insight/0000755000175000017500000000000014166064322012055 5ustar nileshnileshinsight/MD50000644000175000017500000004776314166064322012406 0ustar nileshnilesh6c0aeb8b60c52b82f4eb1877079a6560 *DESCRIPTION ef24bb87c35c94d00768a6f29b6f7801 *NAMESPACE d641fb3e103968932d972baaece01ecb *NEWS.md 4abb6b02307df2697d4c72f1064f008f *R/all_equal_models.R bf51304a14b6f7997659a2fff1f1573d *R/backports.R 133144a52097e325d9c9f2549f32dcfa *R/check_if_installed.R dd0170cb59c2a702dc0f874b7ad0d459 *R/clean_names.R aa22e5c5aae3e067b694e98feeb6408d *R/clean_parameters.R b38d9341fce510b65979425b7bad66cb *R/color_if.R 979912abe18ce8bddad574a9a138f1ae *R/colour_tools.R 3e07d837a85964f8da682f12dc9c2c16 *R/compute_variances.R 800fabc43156bb08216ec45fa521b054 *R/datawizard.R a2ee2a93b808086ed22355c26b54a50c *R/display.R a39c1838afd3a5ee157852585a185541 *R/download_model.R da6b9600fd6d7cb3b24d686e874ebebe *R/ellipsis_info.R bc5e3c9acc29020a0c51e008fe4c4326 *R/export_table.R a3dfe8869d3ec036df4368b234b1aa9c *R/find_algorithm.R fce3e8fc3a1ac9db4d733c890da4bc62 *R/find_formula.R a4f44d37b0e4a8496f6216192ac3eb05 *R/find_interactions.R 9f3739162f1e9537cef6015f4ac9f85e *R/find_offset.R c86d4064f0796cd98e87655bfe1a1ba4 *R/find_parameter_zi.R 34b62f5947b243e1054ff26c600009ea *R/find_parameters.R ef63bfb3e78991a97c4c1a7e83334161 *R/find_parameters_bayesian.R 389980e91a60c7fe0ae822bd9cec7d9b *R/find_parameters_emmeans.R 4447a2f6c47434df2ce28237802ee113 *R/find_parameters_gam.R d0609210b9807103876d093dae4bd2ce *R/find_parameters_mfx.R e8fc5f6545486a376cdc8e63f110ef8e *R/find_parameters_mixed.R 0487af849c874051fbd7747b1b03b02c *R/find_parameters_other.R bc5b4447166ea1b6077122410451ebe0 *R/find_predictors.R 4cdba15f8779dd2b0f6d215e95c51874 *R/find_random.R c1adacfca3d826ed79f005d71532dca6 *R/find_random_slopes.R 69793021c782045ea6d8bb239dd75c03 *R/find_response.R c971666acf8cfedc497b854472f9adab *R/find_smooth.R 52640acfeea92ee350cdca3121ef1fb1 *R/find_statistic.R dd1ee9b8ddbc75ae14dd13dffcc4c85b *R/find_terms.R 4bdfc731a14345b7853fb26ffb5e67cd *R/find_transformation.R c92b1e9d54e07265affd253519e02060 *R/find_variables.R 13ce6db34889c6a43176b366a0fe61af *R/find_weights.R d995cd8ab637e77c8ad33700054dc5ec *R/fish.R 3b25a3a5ee667b90f5397dcbc2802779 *R/format_bf.R d1d2642e42f78225a0a460ae020906a8 *R/format_ci.R 64a5f2b696b1e60ee9122a5173815fb1 *R/format_message.R 93bbbc23512c6ffdffbcdbc6ae93830a *R/format_number.R 1c5003500696eb7b9b9d00679e5f5b41 *R/format_p.R 0579f533cd2b139a14673044968e1915 *R/format_pd.R 2a3cfc04495e3cba714cdba40528d84c *R/format_rope.R d21ece8ae2c7ed66695d9f500890055d *R/format_string.R 217628abfd37a0c38d38f3bd9eabbc15 *R/format_table.R 89bdb0fb19caa73ea2d2ba900e0ed003 *R/format_value.R 711661a9c7303d9e92f104135f68fe56 *R/get_auxiliary.R 027cd790b3cb9eeb4e6931ecca3fb2b0 *R/get_call.R 8faff8472ffd1a179f3997c799026dab *R/get_data.R cba1073059fd73869bfb100ddc33c642 *R/get_deviance.R c0f45db967dda7e03759a73c31980fef *R/get_df.R 2b8dc9740f61f57a7e53774b1e2282fd *R/get_family.R d7c22bcc50e5ce3bccecc0bc0f8033f0 *R/get_intercept.R 286923e7ddb3523c4483bed25d94e7b5 *R/get_loglikelihood.R f6621573ed55a6035be7e69dd9c2438f *R/get_modelmatrix.R d521ae792528f3c7c4cd8357ae3f64e8 *R/get_nested_lme_varcorr.R d797615bffc1a676561de91ac45b0080 *R/get_parameters.R bc18786b6734ab2fb2d70bace96bdea9 *R/get_parameters_bayesian.R 9b3f3ea4817ebea1beb860110b8eef85 *R/get_parameters_censored.R a8dd8988fdcc4f01fe6a88f630c48be4 *R/get_parameters_emmeans.R 2d638014a46428cde1c71fee08a8aa58 *R/get_parameters_gam.R a0425308d7308c5772696b189de83da9 *R/get_parameters_htest.R 0fd96af3c6b97dcf221303cb3affedfb *R/get_parameters_mfx.R f2c3dae1bbe816148ce7edd5f4baed80 *R/get_parameters_mixed.R 9e22f327e05b48f5bb421bc226f5aef4 *R/get_parameters_others.R 825d642a82fd758b963c0ef70a0fac8b *R/get_parameters_zi.R b1ec38f01b9c352a388cc48ce55539ed *R/get_predicted.R b41adaf17685199c4a2cfb5815d9c384 *R/get_predicted_ci.R 538d1f4f00d50ec89d5ecd6c05d79745 *R/get_predicted_ci_zeroinflated.R 1019a87331822ba127c17e2615d4d0f1 *R/get_predicted_methods.R ce3551825fd98ca829a31d9b2ea00704 *R/get_predictors.R 09d8d0b37ff6ac93373056a8b09c598d *R/get_priors.R 67e585d239443cf43fbb9b01e1a82cbd *R/get_random.R 1e0d93b41e3b63281c201c58392f8974 *R/get_residuals.R 16df4bf5e417868d83081905d781f916 *R/get_response.R c79f31aed25a55a5ec0eab91e4d91878 *R/get_sigma.R 13ab82953318b1fb33b6b3bddba0f4d9 *R/get_statistic.R d678f0d5b23f17ea880fda4b88f8e69c *R/get_transformation.R 8c7de0aa93f5ae9b616e93abbb2b2ab5 *R/get_varcov.R 1a8d5ee27f00d96f0db9779abc099619 *R/get_variances.R 5a439d27ec14fa3a42ca45026d160e41 *R/get_weights.R 07f8090fad3a7222617c67cc7ed8bf90 *R/has_intercept.R 8b62cfd894470009b2d30e98fb7db94c *R/helper_functions.R 24d551e3baf5c7c23ed9e1e28bfc8f05 *R/is_gam_model.R 5f96f7c843e1bae1c5fac18624fbba9e *R/is_mixed_model.R ce2fdda1e0626d6502978a987d1a99d0 *R/is_model.R ef8235841486d25e21c29ecd85fd2062 *R/is_model_supported.R 864d2f0567c12179a8db54136b491263 *R/is_multivariate.R 05b87528924ff8a0e6690332208db40b *R/is_nested_models.R 084cb9ae6ca15e1364f269f1349bfe8e *R/is_nullmodel.R 21af190125399a7ac0dfab021c6a89fe *R/link_function.R c1d320b8b8e35c7ab7b7b77fe4f85ba8 *R/link_inverse.R 8d1e13cbb32ccd08127fc45753815741 *R/model_info.R a41622d74678a9289a2eeeb1d02d3c4f *R/model_name.R 0f787278e165172fd1631700862efe65 *R/n_obs.R b68a10aa93f1a27301066fe037c7fdec *R/n_parameters.R 8a50ffc18c3de6a6c1169ecdd4cc0328 *R/null_model.R 91938fd47ca557fa3a60e9c945026650 *R/print.easystats_check.R b7bf719707427ffa0d61105cf88e92ec *R/print_color.R 9b86b0837e1569220775f5d0cab203ed *R/print_parameters.R e1c9a5889fdec6456bf259eca102873f *R/standardize_column_order.R 98c292d3c072b01809635d15f8b878c6 *R/standardize_names.R b933f9f4c512504750df19565f49e948 *R/utils_get_data.R 502fd83deb1781b3f80420f530f1430c *R/utils_model_info.R 9641e070fc38c292b0cb09fa0115e4cc *README.md ad58d139352976179f655872be39a317 *build/partial.rdb 2cf9e83620c07e4432f9a65d4481ac1b *build/vignette.rds fed293a745c7a75939e2f2156aaa9afe *data/fish.RData b2ef4148f3d981813f57432a2c54184d *inst/CITATION 6487d1cf66c0789db0e78ff2b1201fc7 *inst/WORDLIST 0f2ced2711ec25441971f3eadd3573c3 *inst/doc/display.R 260b3d61a12e1269de797216ed508ca2 *inst/doc/display.Rmd 7a33744fcf5d8bf3d3e36429b990f769 *inst/doc/display.html 51fff1b7f272bf8b7861c604f7d7d6af *inst/doc/export.R 2e51e75f61906ae5297f2cb5fdd5b849 *inst/doc/export.Rmd 67337d74a7b55c5abf66dc73dc2880f9 *inst/doc/export.html 7d1e8774d6fc84dfb770919331105163 *inst/doc/insight.R 00b11ace470acc3e868541683f39cc97 *inst/doc/insight.Rmd 1d53e09c1da4892245c3f95c29fe6860 *inst/doc/insight.html 61eaa8a8c2509464110ef21cbfdd65b3 *man/all_models_equal.Rd ef4fc19013032f471989a90bb4594788 *man/check_if_installed.Rd 221a202698d6f970085bce1cbb83569d *man/clean_names.Rd 775c9422b24a6187c33acffb16db8148 *man/clean_parameters.Rd c4b245e53ab844143c35900a5b568a2b *man/color_if.Rd 4d929028ed2864af406aef7b019ab556 *man/data_match.Rd ba1c74d04d8611af4ee2eed81b521c4d *man/data_relocate.Rd 2ea91a9320651b5236132d273d636e92 *man/data_restoretype.Rd 5a8180629aef51663dd801e542cd6e52 *man/data_to_long.Rd 999a1acd1240d7a532cd0b2bc24eaaa0 *man/display.Rd 08544489787e37f41453f53a323bfecd *man/dot-colour_detect.Rd deb1924eccc33276ba88da36776457c1 *man/download_model.Rd 39dd36c81939185d38ea0aca572423de *man/ellipsis_info.Rd 5148e298eb78411bbf0529dc7de3e5fa *man/export_table.Rd ecb15158024dac89ecf5cd7be7dd34ec *man/figures/logo.png 8d1ee138b4a39d072d7fd6d8794b7d56 *man/find_algorithm.Rd db8b1b85ff1225b5d5619debd25a2197 *man/find_formula.Rd 4df402ce35773ecc6b684ae9f2d042e4 *man/find_interactions.Rd ebeb48ea0a61afc7fa5091404a62c431 *man/find_offset.Rd a64b321c977501c6a39d16ec0321b0dc *man/find_parameters.BGGM.Rd 07de3d821bb26390ab3e6b3b2d7b3d7e *man/find_parameters.Rd a8fc63a7cd888aa9d735361aae2ed20c *man/find_parameters.averaging.Rd d3bcb071ed4c858e013d15302448aec2 *man/find_parameters.betamfx.Rd 92b61fe1981009f154a4914dbeecc2a4 *man/find_parameters.emmGrid.Rd cdc31fa4135e256cc7be39782dbc5f1f *man/find_parameters.gamlss.Rd bfe97f7ba42d51d3117f3952cdce4fdc *man/find_parameters.glmmTMB.Rd a0dd72804839fba132c93dc9c6698178 *man/find_parameters.zeroinfl.Rd 9e51a16856e33e3243f26be0c8a04adf *man/find_predictors.Rd 92cb8a68aca9db1a74f9f7ca8b9a67f2 *man/find_random.Rd 11a655df2e8dab7e50dc930735c36641 *man/find_random_slopes.Rd d71055222b8629de6f0e603e406f69ec *man/find_response.Rd 7c58f77ffb7191e5de263f2d3f9dbcb5 *man/find_smooth.Rd 259fb136d4bcc6f11e09f7a43978c900 *man/find_statistic.Rd 3f7fa0f1ae07666192b9636ba5ca568f *man/find_terms.Rd 522a7be45296bc89546f92d9cbb582fc *man/find_transformation.Rd deebf50bd6f5371bf01876d21553d9de *man/find_variables.Rd bd48f922d2f2806c5abdaa3c69aa8aca *man/find_weights.Rd a5776c79fb4e761210286a43f5119905 *man/fish.Rd 2d9d8fb1370201f2b810501a71fc3cc8 *man/format_bf.Rd e9615bc34f3cd1050fcfe6eb0cd7db10 *man/format_ci.Rd d2c4b5d64068436537e1862bbd0edc5a *man/format_message.Rd c334dd8ed14592171ca5ba82b75ed3b5 *man/format_number.Rd 7f9d445463171a5f4df9547586a46464 *man/format_p.Rd 27a4bf30962fd010c8d8b96ec7c2cbb8 *man/format_pd.Rd 963747ec26cc5d19c13fe6d1e2281869 *man/format_rope.Rd 7b5359cbf0c230777cc722fb823f189c *man/format_string.Rd 0eb5d2bfc7a5b91b8d09297c3213aa80 *man/format_table.Rd bf4aeb1551a9af2cfe46ab41468cb176 *man/format_value.Rd 214319312f775334c8c515a958535d18 *man/get_auxiliary.Rd e4efca8a5083af9b2e5f70386fefe5d8 *man/get_call.Rd cbd010f46b828b04cd0319214188aa44 *man/get_data.Rd fc809683b7c615886b646ba2aeeb5e7c *man/get_deviance.Rd 17ef9f12a307069df9f611d401fc5c25 *man/get_df.Rd 612e6ed00640b47cbdf00646ec541372 *man/get_family.Rd 7b499fadd4bd85bc8851f83f42239c67 *man/get_intercept.Rd 34345dad2b0e9342825496fc733691b0 *man/get_loglikelihood.Rd df4089e47dcc91ee20668642896d0bb8 *man/get_modelmatrix.Rd 9dcfd11a118883f5bc8affe381d70935 *man/get_parameters.BGGM.Rd 97c4232ee9e710b31a860df35859b653 *man/get_parameters.Rd 4bf612eaca8e2a8d5172993d2911178c *man/get_parameters.betamfx.Rd 337cda0ff3e453ef7d6a4bd869be62df *man/get_parameters.betareg.Rd 85c9ebbdad71285ffc8db65b4e9f47d1 *man/get_parameters.emmGrid.Rd a9c4aaaaccfbd43fa9db3df59149a9b4 *man/get_parameters.gamm.Rd abc973f106f992182d4bb8c353094252 *man/get_parameters.glmm.Rd 40665822fcbad254f4d991f77297e31f *man/get_parameters.htest.Rd 5b87b4d93592fd0b465f3d29de21bf37 *man/get_parameters.zeroinfl.Rd 67d1bd84ec7c5187ffe6f11b627d660d *man/get_predicted.Rd 8d35ac0440b988da3350228be47d1d0a *man/get_predicted_ci.Rd b824dcfa8febef8857edf504e8bb594d *man/get_predictors.Rd 9c36e60a04be84303cd42ff7789ffe9e *man/get_priors.Rd 6010398654da7f277cb45e7854a9419d *man/get_random.Rd ac8fbad94178ead5734f3a29961c907e *man/get_residuals.Rd b4bdfad0588ce2f6e574303773a2d271 *man/get_response.Rd 943a6be124d7c98459db8e9e79d38f3f *man/get_sigma.Rd 5040a6ee21acb12d6df486d0c7b446a7 *man/get_statistic.Rd 5050f3b41e3d1c96709a2f457dda4b8d *man/get_transformation.Rd 344b95076f3795db81f664ebe29450e2 *man/get_varcov.Rd 22c6df3c9b16ef0e9cc9905b0c6530e2 *man/get_variance.Rd 533a487f6c42382bddb7642a00f57f7b *man/get_weights.Rd 065670a5b079c931fc84d1ab9bf92cd9 *man/has_intercept.Rd 71909d1677ee960d889600aa23dd28f0 *man/is_gam_model.Rd 338b2ccf530538410abf47619e73b3e5 *man/is_mixed_model.Rd 40c5792b54d4817e2cb019362d8f61e5 *man/is_model.Rd 751f541b6309d54f3e367b340aae0e4f *man/is_model_supported.Rd 38c89c5f497c8d23cba14a7ed1731142 *man/is_multivariate.Rd d086dbb9e9bb49defd27aee58d168ccd *man/is_nested_models.Rd 0659e8b49606883c9e1d8c25752f40a9 *man/is_nullmodel.Rd 61ba3c5a235f160c86ccfe31f34b4cb5 *man/link_function.Rd f7ba7f9579be510fe2ed50310bbda223 *man/link_inverse.Rd ef533cf184e3b69e2bb6a9a83d1b5c1f *man/model_info.Rd f201710f95fdb338bf3d45fab2068213 *man/model_name.Rd 2c7dbcbbf74f5daa940d6045f38b5cfd *man/n_obs.Rd 07a9e98c53df66005fa15be5c1a9d6e4 *man/n_parameters.Rd 1aefcbfb7cfeb44579b601b12507a584 *man/null_model.Rd e734706f7ebb1b3ea842acc00f024bd7 *man/print_color.Rd 29ced472a58a390d15245052b685d92d *man/print_parameters.Rd 5cf77c8d9b548dffd61164503b241edc *man/standardize_column_order.Rd 0738e86e034463f8e4341278ef777633 *man/standardize_names.Rd 9ed28f0615610db6b246e09843a2282c *man/to_numeric.Rd dbd9bab057163c01c320d0b9d3bb9620 *tests/spelling.R 889d6bfce69a30ce6b5fe79def37cfa6 *tests/testthat.R c89c8592c1f0c04b91935a5892bc6460 *tests/testthat/helper-requiet.R d9115e7e21b298a1a9dcf099a1ce9aab *tests/testthat/test-BayesFactorBF.R c23a2d8e79bebe3400a50857bf893432 *tests/testthat/test-FE-formula.R 26345062370688279aed15dc8a80e807 *tests/testthat/test-GLMMadaptive.R d5e03f0e9202091b561d0b4ba14200b7 *tests/testthat/test-Gam2.R c96782b857f98ec57bcec02a23b10181 *tests/testthat/test-LORgee.R ee1fa1c60d5ef50e4c96d339e8ebf1cc *tests/testthat/test-MCMCglmm.R 5c087bb5fd059302e71b82ade25ac8d0 *tests/testthat/test-afex_aov.R 99fc9ffbfe1384a22cb809a0b82eacd7 *tests/testthat/test-all_models_equal.R c00ee37773dcffc2198d588e900b5b3b *tests/testthat/test-aovlist.R d1123e1d4abe3d5f8a9e9359ac674f2d *tests/testthat/test-backticks.R 102c245adbab2ce69b96e7459a139de7 *tests/testthat/test-betabin.R 90455be03069faead4ed1afbfb7c3ea6 *tests/testthat/test-betareg.R bc5cc229ad53573c70a90c1be0156eff *tests/testthat/test-bife.R a7d0cb19ecec080293a7bb29305e3528 *tests/testthat/test-bigglm.R 2764d47137c38271b505fc5108f4b078 *tests/testthat/test-blmer.R 956bd97891383cb3293fe3a97ad9c8e0 *tests/testthat/test-brms.R ade97c084ef7be7fe8312792655dfcaa *tests/testthat/test-censReg.R a5cb1d8cbd97f65d1dd65d395b5ae499 *tests/testthat/test-cgam.R 805c9eff4114049623c7cdaac84a78e0 *tests/testthat/test-check_if_installed.R 5c087631e76b82a31f58b28bae760aed *tests/testthat/test-clean_names.R 614c11f7bb5fe4e37d2f6e586915b63f *tests/testthat/test-clm.R b0ee0db2a8da9a55baa7cc4c1c50274e *tests/testthat/test-clm2.R d40106aae4057241c3183e1d9b8f3d43 *tests/testthat/test-clmm.R b03cb24cff0f8729e7450e0835a6608f *tests/testthat/test-coxme.R 4569030515f050cfafe9d01841f169f0 *tests/testthat/test-coxph.R 7cef47808dd26ac404f1ea63475dec3c *tests/testthat/test-cpglmm.R 53cf215c4d1d1ff6ab7a38c4373d037f *tests/testthat/test-crch.R b562f06f33c5c6e75ee0c382ff505812 *tests/testthat/test-crq.R 52d09c79c4ab905a96c3120afadb52b9 *tests/testthat/test-data.frame.R 815cfc94b8a4045edd24fe4c73e1bade *tests/testthat/test-ellipses_info.R fa7cd3d9f22d6140bf616bfc78f3d0a7 *tests/testthat/test-emmeans.R fd3a1e10ed7b3c079e6dc6103f0a0f74 *tests/testthat/test-epiR.R 7e1c248799404dbddad3caf380da427b *tests/testthat/test-export_table.R 35a7377f3e059fa3d2200d5ac8bc157d *tests/testthat/test-feis.R 2aee3554e0eca625aecd130e2cc6b08d *tests/testthat/test-felm.R cf842352b8d06b1cb00a4277266f896c *tests/testthat/test-find_formula-data.R cefe11e961404fe8374f3cdc60a5920d *tests/testthat/test-find_predictor_nested_re.R aee230674045b9bbf62d538e831ab5bc *tests/testthat/test-find_random.R ff3bb4faa85f5b4007827f7d8cb0c9c7 *tests/testthat/test-find_response.R 82611e39123e32fda6075673bedef4e9 *tests/testthat/test-find_smooth.R 2862a4fbe8017b3636c12c5339e2654e *tests/testthat/test-find_terms.R 604a7bba41225d5680965a165fb3314d *tests/testthat/test-fixest.R ccd704bc064a015b697f2478d7a91092 *tests/testthat/test-format.R 81e4e8fa3b2a2232e8ac701168d11e68 *tests/testthat/test-format_table_ci.R 6abafbfc2f8de5881b84d99e2719b1eb *tests/testthat/test-formatting.R b441f84f6fd5dfb5a1c836c7388e6a17 *tests/testthat/test-gam.R 141af08d7a234fa5baf805d0d7c1b47a *tests/testthat/test-gamlss.R 123134241d9a7122e93372e5b3b90159 *tests/testthat/test-gamm.R d1ee3f268691a3578cc3c6db615741be *tests/testthat/test-gamm4.R f628aaec48e9669147bb8ab5454c0d22 *tests/testthat/test-gbm.R 18ec699cddbeaf9d6af1ecda11386be0 *tests/testthat/test-gee.R 9db4859880168305052da0095bc1986f *tests/testthat/test-geeglm.R bf1764fa032892a0ce853365fe7f2ffc *tests/testthat/test-get_auxiliary.R 2d0405693f956bdb55841a86b7c2fc42 *tests/testthat/test-get_data.R e9320a2c3ef2d347f3d880e7769e274d *tests/testthat/test-get_deviance.R 7ea494193df58243b58eded426b6b9ce *tests/testthat/test-get_loglikelihood.R 69f9db44b45ff6b4c11cdec033881cde *tests/testthat/test-get_predicted.R f04775a016e2f44ed1a6a186b3424408 *tests/testthat/test-get_priors.R 25b7e5bde611baf886aef7600eb7748a *tests/testthat/test-get_residuals.R 72cb731dd011a0c99394257c3b97fdfa *tests/testthat/test-get_variance.R 6a29cf36d35d0dbfa444c65431089c0e *tests/testthat/test-get_weights.R 3013160233c4e21fd09f112611863cf6 *tests/testthat/test-glm.R c9436ccd4c4999c6c4436e67667f5152 *tests/testthat/test-glmmTMB.R 713f14c244c48718da107e67a198da41 *tests/testthat/test-glmrob_base.R 7cbc79d79acea36cda5ab2a36ea61883 *tests/testthat/test-gls.R ed9fadc6cef6e6310d5fd62be9cafae5 *tests/testthat/test-gmnl.R c6d5a1fd6a15c45f0a4417af5fe2ae58 *tests/testthat/test-has_intercept.R e6d8a387767cb2cce34e7b4f371bf0f9 *tests/testthat/test-htest.R 965c174e75e132768cd385adeb5b79b4 *tests/testthat/test-hurdle.R fcb7986d32a58abdc2dac3ddc983a980 *tests/testthat/test-is_nullmodel.R 9bfbd8bf4c4206b434b10c9b15a9e41d *tests/testthat/test-iv_robust.R 19b5fd567d33f73d3a133aad2a31daa1 *tests/testthat/test-ivreg.R 55ad37f98bbd5cb5659aa56f3ad3312a *tests/testthat/test-ivreg_AER.R 4a9c977db9d3a45c13799b7c41d7c13c *tests/testthat/test-lm.R bcba758625e61a1bebd499a009428c75 *tests/testthat/test-lm_robust.R ae1da05310e573c6763743cfc7bad3bc *tests/testthat/test-lme.R 73fa50b953a35e2d69980f0e017a6091 *tests/testthat/test-lmer.R 60b805d7f6b51e8fe2d7bad1e072fc56 *tests/testthat/test-lmrob_base.R 538ca89459a318307dd84049a1456df2 *tests/testthat/test-lmtest.R 19e1bf894eb187a8e796c575cb667108 *tests/testthat/test-logistf.R 4c9f3c17590e4aec76f1712e68eb630e *tests/testthat/test-metaBMA.R 5d4e1641e850c9f6a44fbbedc5457705 *tests/testthat/test-mixed.R 6f4335666402e15f848b4504bef4f08a *tests/testthat/test-mlogit.R 957bcd62d4b806071e498126eafc2ba9 *tests/testthat/test-model_data.R 32ee2621f30c1c119a7e960ee833d24e *tests/testthat/test-model_info.R 830c3d7683dc92cbaf9e659a35744b74 *tests/testthat/test-multinom.R 669e8055ed1b774f88be25a7a6f434a1 *tests/testthat/test-mvrstanarm.R f9315971e585f8f035209c8be287003a *tests/testthat/test-n_parameters_rank-deficiency.R 056bce85904af3549041d2348b669d04 *tests/testthat/test-namespace.R 62ff81e238413419b1303e709917f89f *tests/testthat/test-negbin.R 183ba4aef2efe9a9ebf35743e719faf2 *tests/testthat/test-nlmer.R e4b10cc4bddbfb6004f80b666d0680f7 *tests/testthat/test-offset.R 19570d3a559d28012b3f5e6e173fce93 *tests/testthat/test-ols.R fa1127274e7b287ebf1dfe88284144f6 *tests/testthat/test-panelr.R 8ec0c13bd2358329fb0618afec9672e1 *tests/testthat/test-plm.R 85b94f4368860a0519673bd1e147ec77 *tests/testthat/test-polr.R b362c709dbc93e7949224f99fbfda369 *tests/testthat/test-proportion_response.R 20d0903f9c241888afa1d1f3e52e2b34 *tests/testthat/test-psm.R 5f59dbb8800421e5e1bb0c5711ea0b1b *tests/testthat/test-r3_4.R 8f3b7f586b83600f343b3ca39099e2ce *tests/testthat/test-response_data2.R 6faa9a16072361848af4197e7529c603 *tests/testthat/test-rlm.R 3b76f85f674059f32192553ae8c77396 *tests/testthat/test-rlmer.R aac89f0c6cb022edaa50e5aee9f5df04 *tests/testthat/test-rms.R c63a1f4bbf7dd01d4458c16e86cb3070 *tests/testthat/test-rq.R 8f4ace34ec3e8f0538476e94c9ee6759 *tests/testthat/test-rqss.R 5714df21a74bed4b96c16200996b5507 *tests/testthat/test-rstanarm.R ef7c6e6c54a8cdc5372872a34fe4a5bc *tests/testthat/test-spatial.R d6180896847b7b6b96328b7a7463db21 *tests/testthat/test-speedglm.R 720097c1fe24899e3d401bf60dc85b36 *tests/testthat/test-speedlm.R 57e6518bbf56839f57b05c6467480f15 *tests/testthat/test-standardize_column_order.R db2c09101032451da899e9221da7e616 *tests/testthat/test-standardize_names.R f47d5392fc98c6cb3c264dd47695264b *tests/testthat/test-survey.R 8c1868dbcc523354280e8503ff7ca012 *tests/testthat/test-survfit.R 7b970061faaa8e900e9b0579bc4960de *tests/testthat/test-survreg.R 06630edebd1dfd1bf9ab7a3a04089e54 *tests/testthat/test-tidymodels.R f25e6f37c81e5e370ad53f7baf3e12b8 *tests/testthat/test-tobit.R 01cecc7f473df11feb4abe2a66b77a01 *tests/testthat/test-truncreg.R feddc60c6f713e68e94cfc685a219045 *tests/testthat/test-vgam.R d2169963f210f19cc3b95bc74a75f0aa *tests/testthat/test-vglm.R 25504bcf630e318dbab17ecd82acacf1 *tests/testthat/test-zeroinfl.R 260b3d61a12e1269de797216ed508ca2 *vignettes/display.Rmd 2e51e75f61906ae5297f2cb5fdd5b849 *vignettes/export.Rmd 5510005f44f37895b290a81e0647db55 *vignettes/export_table.png b3d6e26817614fc39933f2c801d2e074 *vignettes/figure3a.png d2532dd9f119fa9ee21d77389ce0d16c *vignettes/figure3b.png 517c45734e9c9dc34738063ff7e81ed0 *vignettes/figure3c.png 3e810b7bbd075fb581640cac1148ce86 *vignettes/figure3d.png 00b11ace470acc3e868541683f39cc97 *vignettes/insight.Rmd 1a24201ab3d9d3a81bf9dd53bb5102cd *vignettes/insight_design_1.png insight/NEWS.md0000644000175000017500000005554314163102430013155 0ustar nileshnilesh# insight 0.14.6 ## Breaking changes * Data management functions (like `reshape_longer()`, or `data_match()`) have been moved to the *datawizard* package. ## New supported model classes * Support for `bfsl` (*bfsl*) ## New functions * New `standardize_column_order()` function can be used to standardize the column order in output dataframes. ## General * Improved speed performance for some functions. * Improved handling of table captions and footers in `export_table()`. See also the new vignette on exporting data frames into human readable tables here: https://easystats.github.io/insight/articles/export.html * Revised `width` argument in `export_table()`, which now allows to set different column widths across table columns. See examples in `?export_table`. * `export_table()` gets a `table_width` argument to split wide tables into two parts. * `get_varcov()` for `MixMod` (package *GLMMadaptive*) was revised, and now allows to return a robust variance-covariance matrix. * Added more `get_df()` methods. ## Bug fixes * Fixed issues with manual sigma computation to handle dispersion models in `get_sigma()`. * Fixed issue in `find_formula()` for `BayesFactor::lmBF()` with multiple random effects. * Fixed issue in `get_parameters.BFBayesFactor()` with wrong sign of difference estimate for t-tests. * Argument `width` in `format_value()` was ignored when formatting integer values and `protect_integers` was set to `TRUE`. # insight 0.14.5 ## New functions * `find_transformation()` and `get_transformation()` to find or get any function that was used to transform the response variable in a regression model. ## General * Improved support for models of class `sampleSelection`. * Improved documentation. * `get_modelmatrix()` now supports: `rms::lrm` * `get_predicted()` supports: `MASS::polr`, `MASS::rlm`, `rms::lrm`, `fixest`, `bife::bife`, `ordinal::clm`. * `get_predicted()` standard errors are often much faster to compute. * `get_predicted()` supports models with "grouped" or "level" outcomes (e.g., multinomial logit). * `get_predicted()` handles factors better. * Improved documentation ## Changes to functions * `check_if_installed()` gains a `quietly` argument, if neither stopping nor a warning message for non-installed packages is requested. * `get_predicted()`'s `predict` argument now accepts these values: "link", "expectation", "prediction", "classification", or NULL. * `get_predicted()` accepts `predict=NULL`, which allows users to push a `type` argument through the `...` ellipsis, forward to the `predict()` method of the modelling package. ## Bug fixes * Fixed issue with parameter names from *emmeans* objects in `get_parameters()`. * Fixed issues with unknown arguments in `get_predicted()`. # insight 0.14.4 ## Bug fixes * Fixed issues due to latest *brms* update. # insight 0.14.3 ## New supported model classes * `systemfit` (*systemfit*) ## General * Minor improvements for functions that support printing outputs. ## Changes to functions * `get_predicted()` gains a new option, `predict = "response"` for binomial models. * Improved stability of `get_variance()` when computing random-slope-intercept correlation with categorical random slopes. * Improved `get_priors()` for *brms* models. ## Bug fixes * Fixed issue in `get_data()` for *brms* models with auxiliary parameters. * Fixed issue in `find_formula()` for *brms* models with auxiliary parameters. * Fixed issue where `get_data()` for *htest* objects did not always preserve factors. * Fixed issue in `format_table()` for ci-levels with longer fractional part. # insight 0.14.2 ## Changes to functions * `check_if_installed()` gains a `minimum_version` argument, to check if an installed package is not older than the specified version number. * The `package` argument in `check_if_installed()` is now vectorized, so you can check for multiple packages in one function call. * Value formatting functions (like `format_value()` or `format_ci()`) can now round to significant digits using `digits = "signif"`. ## Bug fixes * Fixed issue in `model_info()` with `stan_polr()` models. * Fixed issue in `find_parameters()` for *brms* when model contained parameters for the priors on sigma. * Fixed issue in `n_obs()` for `stats4::mle()` models. * Fixed failing tests due to latest *fixest* update. * Fixed issues due to latest *epiR* update. # insight 0.14.1 ## New functions * Added several data management and preparation functions: `data_to_long()`, `data_match()`, `data_relocate()`, `data_restoretype()`, `force_numeric()`. ## New supported model classes * Support for `pgmm` (*plm*) ## Changes to functions * Improved handling of auxiliary parameters for *stanreg* models. ## Bug fixes * Stability improvements to `get_predicted()`. * Fixed issues when accessing information from an `afex_aov` model with an empty `aov` slot (in anticipation for `{afex}` v.1.0.0). * Fixed issue in `model_info()` for *stanreg* object with non-standard model-family specification. # insight 0.14.0 ## General * Better support for accessing auxiliary parameters (via `get_sigma()` and `get_auxiliary()`, as well as `get_parameters(component = "all")`) for `brmsfit` models. ## New functions * `get_modelmatrix()` as a robust alternative to `model.matrix()` for different model classes. * `format_message()` to format warnings and messages by adjusting the maximum line_length, possibly to the width of the console window. * `format_string()` to shorten a string to a maximum length. * `check_if_installed()` to see if the needed package is installed. ## New supported model classes * Support for `mvord` (*mvord*), `SemiParBIV` (*GJRM*), `selection` (*sampleSelection*) ## Changes to functions * `find_formula()` now warns when data name is present in formula, since this can result in unexpected behaviour in other package functions. * `model_info()` returns `is_bernoulli = TRUE` for Bernoulli models. * Add `get_statistic()` for *lavaan* models. * `get_df()` supports more models/objects. * `get_sigma()` supports more models/objects. * `get_sigma()` and `get_deviance()` for `lrm` models (package *rms*) now only return one value, sigma or deviance for the model with intercept and predictors. * `get_deviance()` now works for `glmerMod`, `MixMod` and `glmmTMB` models. * The behaviour and documentation of the `effects` and `component` arguments, in particular for `brmsfit` models, were revised to be more consistent. * `export_table()` now correctly prints the footer if the input was a list of data frames. ## Bug fixes * Fixed issue (warning) in `get_loglikelihood()` for binomial models with non-numeric response variables. * `find_statistic()` correctly distinguishes t- and z-statistic for *emmGrid* objects. * Fixed issue in `model_info()` for `BGGM` and `mgcv::gam()`. * Fixed issue in `find_formula()` for `gamlss` models with `random()` function in formula. * Fixed issue with `find_parameters()` for *brmsfit* models when auxiliary parameters are directly modelled. * Fixed issue with `get_parameters()` and `find_parameters()` for multi-group *blavaan* models. * Fixed issue in `ellipsis_info()` when detecting nested models with poly-terms. * Fixed issue in `find_response()` for *brmsfit* models that used the `resp_thres()` function in the model formula. * Fixed issue in `get_predicted_ci()` for models with rank-deficient model matrix. * Argument `zap_small` in `format_value()` did not work properly over vectorized vectors. # insight 0.13.2 ## General * `get_predicted()` has be revamped with a new API and a stable output form (a vector). In the course of this revision, a new function `get_predicted_ci()` to calculate uncertainty intervals for model predictions. * Improved support for `orm` (*rms*). ## New supported model classes * Support for `svy_vglm` (*svyVGAM*), `mjoint` (*joineRML*), `mhurdle` (*mhurdle*), `sarlm` (*spatialreg*), `model_fit` (*tidymodels*) ## New functions * `is_gam_model()` as a small helper to check if a model is a generalized additive model with smooth terms. ## Changes to functions * Added `iterations` argument to `get_predicted()` to control the number of draws returned for Bayesian models. * `model_info()` now returns `$is_gam` if model is generalized additive model with smooth terms. * `format_table()` and `export_table()` now check for valid input (e.g., non-empty data frame) and give an informative message. * Improved support for `MixMod` (*GLMMadaptive*) in `get_variance()`. * Improved `print_parameters()`, to allow more flexibility and better cope with different output formats. * `get_parameters()`, `find_parameters()` and `clean_parameters()` for *emmGrid* and *emm_list* objects were revised and now better match the actual parameter names (also for contrasts). ## Bug fixes * Fixed issue in `get_variance()` for models without intercept. * Fixed labelling issue in `get_parameters()` and `clean_parameters()` for `blavaan` models. * `clean_parameters()` for *MCMCglmm* objects did not include random parameters. * Fixed minor issue with unintended sub-titles for `print_html()`. * Fixed issue in `get_prior()` for `rstanarm::R2()` priors. # insight 0.13.1 ## General * Improved handling for GAMs. ## New supported model classes * Support for `elm`, `eglm` (*eflm*) ## Changes to functions * `get_residuals(..., weighted = TRUE)` doesn't throw warnings if weights are 1 (no weights specified). * `n_parameters()` gains a `only_estimable` argument, to remove non-estimable parameters from counting the number of parameters for models with rank-deficient model matrix. * `format_ci()` also gains a `zap_small` argument. ## Bug fixed * Fix or disable failing tests on Mac OS. * Fixed issues in `get_variance()` with non-correlated random-slope-intercepts for *lme4* models. # insight 0.13.0 ## General * Roll-back R dependency to R >= 3.4. ## New supported model classes * Support for `crr` (*cmprsk*), `ergm` (*ergm*), `btergm` (*btergm*), `Rchoice` (*Rchoice*), `garch` (*tseries*) ## Changes to functions * Slightly improved handling of different CI-columns in `format_table()`. * `model_info()` now returns `$is_leventest` if model is an object returned by `car::leveneTest()`. * `get_parameters()` supports `htest` objects. ## Bug fixes * `get_varcov()` did not properly remove `NA` from rank-deficient models. * Fixed issue/warning in `get_data()` for some *htest* objects, where the environment was not properly evaluated. * Fixed issue in `format_table()` with p-value formatting, when input data frame contained a column named `"p"`, which was not numeric. * (Hopefully) fixed issue with failing CRAN checks. # insight 0.12.0 ## Breaking changes * `format_table()` is an alias for `parameters_table()`, and is no longer referring to `export_table()`. ## New supported model classes * Support for `coxr` (*coxrobust*), `coeftest` (*lmtest*), `ivFixed` (*ivFixed*), `ivprobit` (*ivprobit*), `riskRegression` (*riskRegression*). `summary.lm`, `lmodel2` (*lmodel2*), improved support for `bamlss` (*bamlss*). ## New functions * Added `get_deviance()` function that returns the model deviance as a robust alternative to `stats::deviance()`. * Added `model_name()` function that returns the model's "name". * Added `format()` method for `find_formula()` output to flatten it. * Added `null_as_ones = TRUE` argument to `get_weights()` to return vector of 1s instead of `NULL`. * Added `get_intercept()` as a helper function to easily retrieve the value at the intercept. * Added `get_df()` as a robust alternative to `stats::df.residuals()`. * Added `get_predicted()` as a robust alternative to `stats::fitted()`. * Added `get_loglikelihood()` (and its alias `loglikelihood()`) function as a robust alternative to `stats::logLik()`. * Added `get_residuals()` as a robust alternative extract model residuals. * Added `ellipsis_info()` to specify the nature of ellipsis (`...`) inputs. * Added `is_nested_models()` to check if multiple regression models are nested (decreasing or increasing). * Added generic `print_html()`, to allow other packages to create tables in HTML format (via `export_table()`) when not printing the output to console. * Added `is_mixed_model()`, to safely check if a model is a mixed effects model. This function also works for multivariate response models. * `n_parameters()` was moved from *parameters* to *insight*. ## Changes to functions * `find_formula()`, `find_predictor()`, `find_random()` and related functions now also return names of random effects from generalized additive mixed models (`gamm`, `gamm4`, `stan_gamm4`). * Added support for more BFBayesFactor objects. * `model_info()` now returns `$is_xtab` for `chisq.test()` and `BayesFactor::contingencyTableBF()`. Furthermore, the `$family` element for those objects is set to `"categorical"`. * `n_obs()` now handles number of observations from models with binomial family correctly when these have matrix-columns as response variable. ## Bug fixes * Fixed issue in `find_statistic()` for *fixest* models, which did not return the correct value `"t-statistic"` for `feols()`. * Fixes inconsistencies in `get_priors()` for (linear) `BFBayesFactor` models. # insight 0.11.1 ## General * Warnings that formerly were printed using `print_color()` now use `warning()`, to better suppress warning messages if required. ## New functions * `find_smooth()`, to return in particular smooth terms used in a model. ## Changes to functions * `get_variance()` and `get_variance_random()` gain a `tolerance`-argument, to set the tolerance level for singularity checks when computing random effect variances. * `parameters_table()` formats more objects from the *easystats* packages, like ROPE-range or `p_rope()`. * `find_statistic()` now supports models of class *scam*. * `get_data()` now also supports `htest`-object, where possible. ## Bug fixes * Fix CRAN check issues. * `find_formula()` for `stan_gamm4()` now correctly includes random effects. # insight 0.11.0 ## Breaking changes * `model_info()` now also detects models from `oneway.test()`, `binom.test()` `chisq.test()`, `mcnemar.test()` and `prop.test()`. Furthermore, `model_info()` better deals with objects from `BFBayesFactor`, and censored regression models no longer return `TRUE` for `$is_linear`. * `format_table()` is going to be renamed in a future update. Please use its alias `export_table()`. ## New supported model classes * Support for `scam` (*scam*), `meta_random` and `meta_fixed` (*metaBMA*), `Glm` (*rms*), `ridgelm` (*MASS*), `mediate` (*mediation*). Partial support for `mcmc.list` (e.g. *bayesGARCH*) ## New function * `parameters_table()`, which was moved from package *parameters* to *insight*. Note that this function is going to be renamed into `format_table()` in a future update. * `find_offset()`, to find the name of offset-terms. * Added generics for `display()` and `print_md()`, to allow other packages to create tables in other formats when not printing the output to console. ## Changes to functions * `standardize_names()` tries to be as loyal to the *broom*-naming conventions as possible. * The function of the `brackets`-argument in `format_ci()` was changed. It is now also possible to provide a length-two character vector, to define own brackets that encompass the CI-values. * Related to the change in `format_ci()`, the function of the `brackets`-argument in `parameters_table()` was changed accordingly. Furthermore, `parameters_table()` gains a `preserve_attributes`-argument, to preserve any attributes from the input data frame. * `export_table()` gains several new arguments that allows to create tables in markdown-format. * `print_parameters()` gains a `keep_parameter_column`-argument, to keep (default) both the `"Cleaned_Parameter"` and `"Parameter"` columns, or - if `FALSE` - use `"Cleaned_Parameter"` as new `"Parameter"` column. ## Bug fixes ### `get_data()` * Fixed issue in `get_data()` for `MixMod` objects, which were caused due to internal changes in *GLMMadaptive*. * `get_data()` for zero-inflated models from *pscl* did not include the offset-term in cases where the offset was defined as argument, not inside the model formula. * Fixed issue in `get_data()` for `coxph` models with survival-objects with `event`-argument as response. * Fixed edge case in `get_data()` for column name of response values that were log-transformed using `log(x+1)`. ### Other bug fixes * Fixed issue with `survreg` models that included `strata()` in their formula. * Fixed warning in CRAN checks for forthcoming R-devel. # insight 0.10.0 ## New function * `get_sigma()` to return the residual standard deviation. * `standardize_names()`, which was moved from package *parameters* to *insight*. ## New supported model classes * Support for `maov` (*stats*), `HLfit` (*spaMM*), preliminary support for `margins` (*margins*), `merModList` (*merTools*). ## General * Better support for (weighted) multivariate response models of class `mlm` for functions like `get_varcov()` or `clean_parameters()`. * Make `find_formula()` work with t-tests from *BayesFactor*. * Improved handling for *mira* objects. ## Changes to functions * `format_bf()` gains a `na_reference` argument, to set the "reference" for Bayes factor values that are `NA`, and an `exact` argument for returning scientific formatted extreme values. * `format_value()` gains a `zap_small` argument, to prevent scientific printing of numbers if these have more decimal places than indicated by `digits`. * `get_weights()` now also returns `NULL` when all weights were 1. * `get_parameters()` for *BFBayesFactor* objects gets a `verbose` argument. * `get_parameters()` for *emmGrid* and *emm_list* objects gets a `summary` argument, to either return the full posterior samples or the summarized centrality indices for Bayesian models. * `find_formula()` for `MuMIn::model.avg()` now tries to retrieve the random effects part of a formula, when present. * `get_weights()` gains a `na_rm` argument to remove possible missing values. ## Bug fixes * Fix issues with one-sample Bayesian t-tests ( https://github.com/easystats/parameters/issues/297 ). * Fix issue in `format_value()` that printed `"100%"` as `"1e+02%"`. * Removed unnecessary white-spaces in `format_ci()` when upper or lower interval was larger than 1e+5. * `has_intercept()` did not work correctly when intercept was removed from formula using `-1`. * `find_terms()` now shows removal of intercept formula using `-1` as term `"-1"`. * Fix issues with `get_statistic()` for *vgam* models. # insight 0.9.6 ## Changes to functions * `get_data()` now works for models from `afex_aov()`. * `get_parameters()` returns a more informative message for `BFBayesFactor` objects when not the first model is indexed. * `clean_names()` now also removes `exp()`-pattern. * `clean_names()` for character-objects now works with "interaction patterns" (like `clean_names("scale(a):scale(b)")`). * `format_bf()` gains a `protect_ratio` argument, to print numbers smaller than 1 as ratios. ## Bug fixes * Fix issues in CRAN checks. * `get_priors()` now works for more complex `BFBayesFactor` objects that have multiple custom priors. # insight 0.9.5 ## Breaking changes * `get_data()` did not always "back-transform" log-transformed or scaled variables to return the original values. Now this bug has been fixed, and `get_data()` should return all variables on the original scale (as if these variables were not transformed), as stated in the docs. ## Bug fixes * `get_data()` now returns the correct original data for "empty" polynomials (i.e. `poly(x, 1)`). * Fix CRAN check issues due to latest _estimatr_ update. # insight 0.9.1 ## New supported model classes * Support for `mipo` (*mice*), `lqmm` and `lqm` (*lqmm*). Preliminary support for `semLME` (*smicd*), `mle` (*stats4*) and `mle2` (*bbmle*). ## Changes to functions * `model_info()` returns `$is_meta = TRUE` for *brms*-meta-analysis models. * Make `find_statistic()` work with `mgcv::bam()`. * `get_variance()` now also support `truncated_nbinom2()` family from *glmmTMB*. ## Bug fixes * Fixed issue with correctly detecting sigma-parameters in `find_parameters()` for multiple-response `brmsfit`-models. * Fixed issue with `find_formula()` for models from `stan_nlmer()`. * Fixed issues with `find_terms()` when response variable included a namespace, like `survival::Surv()`. * Fixed issues with `get_priors()` for _stanreg_ models, probably caused by the latest update to *rstanarm 2.21.2*. * Fixed issues in `get_variance()` for *brmsfit* models. * Fixed some issues around `crq` objects (package *quantreg*). # insight 0.9.0 ## New supported model classes * `BGGM` (*BGGM*), `metaplus` (*metaplus*), `glht` (*multcomp*), `glmm` (*glmm*), improved support for `manova` (*stats*) ## New functions * Value formatting functions `format_bf()`, `format_pd()`, `format_p()`, `format_rope()` and `format_number()` were moved from package *parameters* to *insight*. ## Changes to functions * `get_variance()` now also returns the correlation among random slopes. * `get_variance()` now also (partially) supports `brmsfit` models. * `get_parameters()` for models that return (posterior or simulated) samples of model parameters gains a `summary`-argument, which - if `TRUE` - returns a point-estimate (mean of samples) instead of the full samples. * `format_p()` returns `"> .999"` for p-values equal to or greater than 0.999. ## Bug fixes * Fixed issue in `find_formula()` that did not properly work for models with random effects in formula (in *lme4* notation), when random effects were in between fixed effects parts. * `get_variance()` did not return variance components for random effects for null-models with random slopes. * Fixed issue with `get_variance()` for `lme`-models with categorical random slope. * Fixed issue that occurred since R 4.0.0 in `find_weights()` when function call had no `weights`-argument. * Fixed issue in `get_data()` for models with `cbind()`-response variables and matrix-like variables in the model frame (e.g. when using `poly()`). * Fixed issues with `PROreg::BBmm()`, due to changes in latest package update. insight/DESCRIPTION0000644000175000017500000001056114166064322013566 0ustar nileshnileshType: Package Package: insight Title: Easy Access to Model Information for Various Model Objects Version: 0.15.0 Authors@R: c(person(given = "Daniel", family = "Lüdecke", role = c("aut", "cre"), email = "d.luedecke@uke.de", comment = c(ORCID = "0000-0002-8895-3206", Twitter = "@strengejacke")), person(given = "Dominique", family = "Makowski", role = c("aut", "ctb"), email = "dom.makowski@gmail.com", comment = c(ORCID = "0000-0001-5375-9967", Twitter = "@Dom_Makowski")), person(given = "Indrajeet", family = "Patil", role = c("aut", "ctb"), email = "patilindrajeet.science@gmail.com", comment = c(ORCID = "0000-0003-1995-6531", Twitter = "@patilindrajeets")), person(given = "Philip", family = "Waggoner", role = c("aut", "ctb"), email = "philip.waggoner@gmail.com", comment = c(ORCID = "0000-0002-7825-7573")), person(given = "Mattan S.", family = "Ben-Shachar", role = c("aut", "ctb"), email = "matanshm@post.bgu.ac.il", comment = c(ORCID = "0000-0002-4287-4801")), person(given = "Brenton M.", family = "Wiernik", role = c("aut"), email = "brenton@wiernik.org", comment = c(ORCID = "0000-0001-9560-6336", Twitter = "@bmwiernik")), person(given = "Vincent", family = "Arel-Bundock", email = "vincent.arel-bundock@umontreal.ca", role = c("ctb"), comment = c(ORCID = "0000-0003-2042-7063")), person(given = "Alex", family = "Hayes", role = c("rev"), email = "alexpghayes@gmail.com", comment = c(ORCID = "0000-0002-4985-5160"))) Maintainer: Daniel Lüdecke Description: A tool to provide an easy, intuitive and consistent access to information contained in various R models, like model formulas, model terms, information about random effects, data that was used to fit the model or data from response variables. 'insight' mainly revolves around two types of functions: Functions that find (the names of) information, starting with 'find_', and functions that get the underlying data, starting with 'get_'. The package has a consistent syntax and works with many different model objects, where otherwise functions to access these information are missing. License: GPL-3 URL: https://easystats.github.io/insight/ BugReports: https://github.com/easystats/insight/issues Depends: R (>= 3.4) Imports: methods, stats, utils Suggests: AER, afex, aod, BayesFactor, bayestestR, bbmle, bdsmatrix, betareg, bife, biglm, blavaan, blme, boot, brms, censReg, cgam, clubSandwich, coxme, cplm, crch, datawizard, effectsize, emmeans, epiR, estimatr, feisr, fixest, fungible, gam, gamlss, gamm4, gbm, gee, geepack, GLMMadaptive, glmmTMB, gmnl, gt, httr, ivreg, JM, knitr, lavaan, lfe, lme4, lmtest, logistf, MASS, Matrix, mclust, MCMCglmm, merTools, metaBMA, mgcv, mice, mlogit, multgee, nlme, nnet, nonnest2, ordinal, panelr, parameters, parsnip, performance, plm, poorman, pscl, psych, quantreg, rmarkdown, rms, robustbase, robustlmm, rstanarm (>= 2.21.1), rstantools, rstudioapi, sandwich, speedglm, spelling, splines, statmod, survey, survival, testthat, tripack, truncreg, VGAM VignetteBuilder: knitr Encoding: UTF-8 Language: en-US RoxygenNote: 7.1.2 Config/testthat/edition: 3 NeedsCompilation: no Packaged: 2022-01-06 09:03:54 UTC; Daniel Author: Daniel Lüdecke [aut, cre] (, @strengejacke), Dominique Makowski [aut, ctb] (, @Dom_Makowski), Indrajeet Patil [aut, ctb] (, @patilindrajeets), Philip Waggoner [aut, ctb] (), Mattan S. Ben-Shachar [aut, ctb] (), Brenton M. Wiernik [aut] (, @bmwiernik), Vincent Arel-Bundock [ctb] (), Alex Hayes [rev] () Repository: CRAN Date/Publication: 2022-01-07 16:22:42 UTC insight/README.md0000644000175000017500000004272414144234777013356 0ustar nileshnilesh # insight [![DOI](https://joss.theoj.org/papers/10.21105/joss.01412/status.svg)](https://doi.org/10.21105/joss.01412) [![downloads](https://cranlogs.r-pkg.org/badges/insight)](https://cranlogs.r-pkg.org/) [![total](https://cranlogs.r-pkg.org/badges/grand-total/insight)](https://cranlogs.r-pkg.org/) [![status](https://tinyverse.netlify.com/badge/insight)](https://CRAN.R-project.org/package=insight) **Gain insight into your models!** When fitting any statistical model, there are many useful pieces of information that are simultaneously calculated and stored beyond coefficient estimates and general model fit statistics. Although there exist some generic functions to obtain model information and data, many package-specific modelling functions do not provide such methods to allow users to access such valuable information. **insight** is an R-package that fills this important gap by providing a suite of functions to support almost any model (see a list of the many models supported below in the **List of Supported Packages and Models** section). The goal of **insight**, then, is to provide tools to provide *easy*, *intuitive*, and *consistent* access to information contained in model objects. These tools aid applied research in virtually any field who fit, diagnose, and present statistical models by streamlining access to every aspect of many model objects via consistent syntax and output. ## Installation [![CRAN_Status_Badge](https://www.r-pkg.org/badges/version/insight)](https://cran.r-project.org/package=insight) [![insight status badge](https://easystats.r-universe.dev/badges/insight)](https://easystats.r-universe.dev) [![R check](https://github.com/easystats/insight/workflows/R-check/badge.svg?branch=master)](https://github.com/easystats/insight/actions) The *insight* package is available on CRAN, while its latest development version is available on R-universe (from *rOpenSci*). | Type | Source | Command | |-------------|------------|---------------------------------------------------------------------------| | Release | CRAN | `install.packages("insight")` | | Development | R-universe | `install.packages("insight", repos = "https://easystats.r-universe.dev")` | Once you have downloaded the package, you can then load it using: ``` r library("insight") ``` ## Documentation [![Documentation](https://img.shields.io/badge/documentation-insight-orange.svg?colorB=E91E63)](https://easystats.github.io/insight/) [![Blog](https://img.shields.io/badge/blog-easystats-orange.svg?colorB=FF9800)](https://easystats.github.io/blog/posts/) [![Features](https://img.shields.io/badge/features-insight-orange.svg?colorB=2196F3)](https://easystats.github.io/insight/reference/index.html) Built with non-programmers in mind, **insight** offers a broad toolbox for making model and data information easily accessible. While **insight** offers many useful functions for working with and understanding model objects (discussed below), we suggest users start with `model_info()`, as this function provides a clean and consistent overview of model objects (e.g., functional form of the model, the model family, link function, number of observations, variables included in the specification, etc.). With a clear understanding of the model introduced, users are able to adapt other functions for more nuanced exploration of and interaction with virtually any model object.Please visit for documentation. ### Definition of Model Components The functions from **insight** address different components of a model. In an effort to avoid confusion about specific “targets” of each function, in this section we provide a short explanation of **insight**’s definitions of regression model components. #### Data The dataset used to fit the model. #### Parameters Values estimated or learned from data that capture the relationship between variables. In regression models, these are usually referred to as *coefficients*. #### Response and Predictors - **response**: the outcome or response variable (dependent variable) of a regression model. - **predictor**: independent variables of (the *fixed* part of) a regression model. For mixed models, variables that are only in the *random effects* part (i.e. grouping factors) of the model are not returned as predictors by default. However, these can be included using additional arguments in the function call, treating predictors are “unique”. As such, if a variable appears as a fixed effect and a random slope, it is treated as one (the same) predictor. #### Variables Any unique variable names that appear in a regression model, e.g., response variable, predictors or random effects. A “variable” only relates to the unique occurence of a term, or the term name. For instance, the expression `x + poly(x, 2)` has only the variable `x`. #### Terms Terms themselves consist of variable and factor names separated by operators, or involve arithmetic expressions. For instance, the expression `x + poly(x, 2)` has *one* variable `x`, but *two* terms `x` and `poly(x, 2)`. #### Random Effects - **random slopes**: variables that are specified as random slopes in a mixed effects model. - **random or grouping factors**: variables that are specified as grouping variables in a mixed effects model. *Aren’t the predictors, terms and parameters the same thing?* In some cases, yes. But not in all cases. Find out more by [**clicking here to access the documentation**](https://easystats.github.io/insight/articles/insight.html). ### Functions The package revolves around two key prefixes: `get_*` and `find_*`. The `get_*` prefix extracts *values* (or *data*) associated with model-specific objects (e.g., parameters or variables), while the `find_*` prefix *lists* model-specific objects (e.g., priors or predictors). These are powerful families of functions allowing for great flexibility in use, whether at a high, descriptive level (`find_*`) or narrower level of statistical inspection and reporting (`get_*`). ![](https://raw.githubusercontent.com/easystats/insight/master/paper/figure1_small.png) In total, the **insight** package includes 16 core functions: [get_data()](https://easystats.github.io/insight/reference/get_data.html), [get_priors()](https://easystats.github.io/insight/reference/get_priors.html), [get_variance()](https://easystats.github.io/insight/reference/get_variance.html), [get_parameters()](https://easystats.github.io/insight/reference/get_parameters.html), [get_predictors()](https://easystats.github.io/insight/reference/get_predictors.html), [get_random()](https://easystats.github.io/insight/reference/get_random.html), [get_response()](https://easystats.github.io/insight/reference/get_response.html), [find_algorithm()](https://easystats.github.io/insight/reference/find_algorithm.html), [find_formula()](https://easystats.github.io/insight/reference/find_formula.html), [find_variables()](https://easystats.github.io/insight/reference/find_variables.html), [find_terms()](https://easystats.github.io/insight/reference/find_terms.html), [find_parameters()](https://easystats.github.io/insight/reference/find_parameters.html), [find_predictors()](https://easystats.github.io/insight/reference/find_predictors.html), [find_random()](https://easystats.github.io/insight/reference/find_random.html), [find_response()](https://easystats.github.io/insight/reference/find_response.html), and [model_info()](https://easystats.github.io/insight/reference/model_info.html). In all cases, users must supply at a minimum, the name of the model fit object. In several functions, there are additional arguments that allow for more targeted returns of model information. For example, the `find_terms()` function’s `effects` argument allows for the extraction of “fixed effects” terms, “random effects” terms, or by default, “all” terms in the model object. We point users to the package documentation or the complementary package website, , for a detailed list of the arguments associated with each function as well as the returned values from each function. ### Examples of Use Cases in R We now would like to provide examples of use cases of the **insight** package. These examples probably do not cover typical real-world problems, but serve as illustration of the core idea of this package: The unified interface to access model information. **insight** should help both users and package developers in order to reduce the hassle with the many exceptions from various modelling packages when accessing model information. #### Making Predictions at Specific Values of a Term of Interest Say, the goal is to make predictions for a certain term, holding remaining co-variates constant. This is achieved by calling `predict()` and feeding the `newdata`-argument with the values of the term of interest as well as the “constant” values for remaining co-variates. The functions `get_data()` and `find_predictors()` are used to get this information, which then can be used in the call to `predict()`. In this example, we fit a simple linear model, but it could be replaced by (m)any other models, so this approach is “universal” and applies to many different model objects. ``` r library(insight) m <- lm( Sepal.Length ~ Species + Petal.Width + Sepal.Width, data = iris ) dat <- get_data(m) pred <- find_predictors(m, flatten = TRUE) l <- lapply(pred, function(x) { if (is.numeric(dat[[x]])) mean(dat[[x]]) else unique(dat[[x]]) }) names(l) <- pred l <- as.data.frame(l) cbind(l, predictions = predict(m, newdata = l)) #> Species Petal.Width Sepal.Width predictions #> 1 setosa 1.2 3.1 5.1 #> 2 versicolor 1.2 3.1 6.1 #> 3 virginica 1.2 3.1 6.3 ``` #### Printing Model Coefficients The next example should emphasize the possibilities to generalize functions to many different model objects using **insight**. The aim is simply to print coefficients in a complete, human readable sentence. The first approach uses the functions that are available for some, but obviously not for all models, to access the information about model coefficients. ``` r print_params <- function(model){ paste0( "My parameters are ", paste0(row.names(summary(model)$coefficients), collapse = ", "), ", thank you for your attention!" ) } m1 <- lm(Sepal.Length ~ Petal.Width, data = iris) print_params(m1) #> [1] "My parameters are (Intercept), Petal.Width, thank you for your attention!" # obviously, something is missing in the output m2 <- mgcv::gam(Sepal.Length ~ Petal.Width + s(Petal.Length), data = iris) print_params(m2) #> [1] "My parameters are , thank you for your attention!" ``` As we can see, the function fails for *gam*-models. As the access to models depends on the type of the model in the R ecosystem, we would need to create specific functions for all models types. With **insight**, users can write a function without having to worry about the model type. ``` r print_params <- function(model){ paste0( "My parameters are ", paste0(insight::find_parameters(model, flatten = TRUE), collapse = ", "), ", thank you for your attention!" ) } m1 <- lm(Sepal.Length ~ Petal.Width, data = iris) print_params(m1) #> [1] "My parameters are (Intercept), Petal.Width, thank you for your attention!" m2 <- mgcv::gam(Sepal.Length ~ Petal.Width + s(Petal.Length), data = iris) print_params(m2) #> [1] "My parameters are (Intercept), Petal.Width, s(Petal.Length), thank you for your attention!" ``` ## Contributing and Support In case you want to file an issue or contribute in another way to the package, please follow [this guide](https://github.com/easystats/insight/blob/master/.github/CONTRIBUTING.md). For questions about the functionality, you may either contact us via email or also file an issue. ## List of Supported Models by Class Currently, 207 model classes are supported. ``` r supported_models() #> [1] "aareg" "afex_aov" "AKP" #> [4] "Anova.mlm" "aov" "aovlist" #> [7] "Arima" "averaging" "bamlss" #> [10] "bamlss.frame" "bayesQR" "bayesx" #> [13] "BBmm" "BBreg" "bcplm" #> [16] "betamfx" "betaor" "betareg" #> [19] "BFBayesFactor" "bfsl" "BGGM" #> [22] "bife" "bifeAPEs" "bigglm" #> [25] "biglm" "blavaan" "blrm" #> [28] "bracl" "brglm" "brmsfit" #> [31] "brmultinom" "btergm" "censReg" #> [34] "cgam" "cgamm" "cglm" #> [37] "clm" "clm2" "clmm" #> [40] "clmm2" "clogit" "coeftest" #> [43] "complmrob" "confusionMatrix" "coxme" #> [46] "coxph" "coxph.penal" "coxr" #> [49] "cpglm" "cpglmm" "crch" #> [52] "crq" "crqs" "crr" #> [55] "dep.effect" "DirichletRegModel" "drc" #> [58] "eglm" "elm" "epi.2by2" #> [61] "ergm" "feglm" "feis" #> [64] "felm" "fitdistr" "fixest" #> [67] "flexsurvreg" "gam" "Gam" #> [70] "gamlss" "gamm" "gamm4" #> [73] "garch" "gbm" "gee" #> [76] "geeglm" "glht" "glimML" #> [79] "glm" "Glm" "glmm" #> [82] "glmmadmb" "glmmPQL" "glmmTMB" #> [85] "glmrob" "glmRob" "glmx" #> [88] "gls" "gmnl" "HLfit" #> [91] "htest" "hurdle" "iv_robust" #> [94] "ivFixed" "ivprobit" "ivreg" #> [97] "lavaan" "lm" "lm_robust" #> [100] "lme" "lmerMod" "lmerModLmerTest" #> [103] "lmodel2" "lmrob" "lmRob" #> [106] "logistf" "logitmfx" "logitor" #> [109] "LORgee" "lqm" "lqmm" #> [112] "lrm" "manova" "MANOVA" #> [115] "margins" "maxLik" "mclogit" #> [118] "mcmc" "mcmc.list" "MCMCglmm" #> [121] "mcp1" "mcp12" "mcp2" #> [124] "med1way" "mediate" "merMod" #> [127] "merModList" "meta_bma" "meta_fixed" #> [130] "meta_random" "metaplus" "mhurdle" #> [133] "mipo" "mira" "mixed" #> [136] "MixMod" "mixor" "mjoint" #> [139] "mle" "mle2" "mlm" #> [142] "mlogit" "mmlogit" "model_fit" #> [145] "multinom" "mvord" "negbinirr" #> [148] "negbinmfx" "ols" "onesampb" #> [151] "orm" "pgmm" "plm" #> [154] "PMCMR" "poissonirr" "poissonmfx" #> [157] "polr" "probitmfx" "psm" #> [160] "Rchoice" "ridgelm" "riskRegression" #> [163] "rjags" "rlm" "rlmerMod" #> [166] "RM" "rma" "rma.uni" #> [169] "robmixglm" "robtab" "rq" #> [172] "rqs" "rqss" "Sarlm" #> [175] "scam" "selection" "sem" #> [178] "SemiParBIV" "semLm" "semLme" #> [181] "slm" "speedglm" "speedlm" #> [184] "stanfit" "stanmvreg" "stanreg" #> [187] "summary.lm" "survfit" "survreg" #> [190] "svy_vglm" "svyglm" "svyolr" #> [193] "t1way" "tobit" "trimcibt" #> [196] "truncreg" "vgam" "vglm" #> [199] "wbgee" "wblm" "wbm" #> [202] "wmcpAKP" "yuen" "yuend" #> [205] "zcpglm" "zeroinfl" "zerotrunc" ``` - **Didn’t find a model?** [File an issue](https://github.com/easystats/insight/issues) and request additional model-support in *insight*! ## Credits If this package helped you, please consider citing as follows: Lüdecke D, Waggoner P, Makowski D. insight: A Unified Interface to Access Information from Model Objects in R. Journal of Open Source Software 2019;4:1412. doi: [10.21105/joss.01412](https://doi.org/10.21105/joss.01412) insight/man/0000755000175000017500000000000014164637175012642 5ustar nileshnileshinsight/man/get_predicted.Rd0000644000175000017500000001731214125336207015724 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/get_predicted.R \name{get_predicted} \alias{get_predicted} \alias{get_predicted.lm} \alias{get_predicted.stanreg} \title{Model Predictions (robust)} \usage{ get_predicted(x, ...) \method{get_predicted}{lm}( x, data = NULL, predict = "expectation", iterations = NULL, verbose = TRUE, ... ) \method{get_predicted}{stanreg}( x, data = NULL, predict = "expectation", iterations = NULL, include_random = TRUE, include_smooth = TRUE, verbose = TRUE, ... ) } \arguments{ \item{x}{A statistical model (can also be a data.frame, in which case the second argument has to be a model).} \item{...}{Other argument to be passed for instance to \code{\link[=get_predicted_ci]{get_predicted_ci()}}.} \item{data}{An optional data frame in which to look for variables with which to predict. If omitted, the data used to fit the model is used.} \item{predict}{string or \code{NULL} \itemize{ \item \code{"link"} returns predictions on the model's link-scale (for logistic models, that means the log-odds scale) with a confidence interval (CI). \item \code{"expectation"} (default) also returns confidence intervals, but this time the output is on the response scale (for logistic models, that means probabilities). \item \code{"prediction"} also gives an output on the response scale, but this time associated with a prediction interval (PI), which is larger than a confidence interval (though it mostly make sense for linear models). \item \code{"classification"} only differs from \code{"prediction"} for binomial models where it additionally transforms the predictions into the original response's type (for instance, to a factor). \item Other strings are passed directly to the \code{type} argument of the \code{predict()} method supplied by the modelling package. \item When \code{predict = NULL}, alternative arguments such as \code{type} will be captured by the \code{...} ellipsis and passed directly to the \code{predict()} method supplied by the modelling package. \item Notes: You can see the 4 options for predictions as on a gradient from "close to the model" to "close to the response data": "link", "expectation", "prediction", "classification". The \code{predict} argument modulates two things: the scale of the output and the type of certainty interval. Read more about in the \strong{Details} section below. }} \item{iterations}{For Bayesian models, this corresponds to the number of posterior draws. If \code{NULL}, will return all the draws (one for each iteration of the model). For frequentist models, if not \code{NULL}, will generate bootstrapped draws, from which bootstrapped CIs will be computed. Iterations can be accessed by running \code{as.data.frame()} on the output.} \item{verbose}{Toggle warnings.} \item{include_random}{If \code{TRUE} (default), include all random effects in the prediction. If \code{FALSE}, don't take them into account. Can also be a formula to specify which random effects to condition on when predicting (passed to the \code{re.form} argument). If \code{include_random = TRUE} and \code{newdata} is provided, make sure to include the random effect variables in \code{newdata} as well.} \item{include_smooth}{For General Additive Models (GAMs). If \code{FALSE}, will fix the value of the smooth to its average, so that the predictions are not depending on it. (default), \code{mean()}, or \code{bayestestR::map_estimate()}.} } \value{ The fitted values (i.e. predictions for the response). For Bayesian or bootstrapped models (when \code{iterations != NULL}), iterations (as columns and observations are rows) can be accessed via \code{as.data.frame}. } \description{ The \code{get_predicted()} function is a robust, flexible and user-friendly alternative to base R \code{\link[=predict]{predict()}} function. Additional features and advantages include availability of uncertainty intervals (CI), bootstrapping, a more intuitive API and the support of more models than base R's \code{predict} function. However, although the interface are simplified, it is still very important to read the documentation of the arguments. This is because making "predictions" (a lose term for a variety of things) is a non-trivial process, with lots of caveats and complications. Read the \code{Details} section for more information. } \details{ In \code{insight::get_predicted()}, the \code{predict} argument jointly modulates two separate concepts, the \strong{scale} and the \strong{uncertainty interval}. \subsection{Confidence Interval (CI) vs. Prediction Interval (PI))}{ \itemize{ \item \strong{Linear models} - \code{lm()}: For linear models, Prediction intervals (\code{predict="prediction"}) show the range that likely contains the value of a new observation (in what range it is likely to fall), whereas confidence intervals (\code{predict="expectation"} or \code{predict="link"}) reflect the uncertainty around the estimated parameters (and gives the range of uncertainty of the regression line). In general, Prediction Intervals (PIs) account for both the uncertainty in the model's parameters, plus the random variation of the individual values. Thus, prediction intervals are always wider than confidence intervals. Moreover, prediction intervals will not necessarily become narrower as the sample size increases (as they do not reflect only the quality of the fit, but also the variability within the data). \item \strong{Generalized Linear models} - \code{glm()}: For binomial models, prediction intervals are somewhat useless (for instance, for a binomial (Bernoulli) model for which the dependent variable is a vector of 1s and 0s, the prediction interval is... \verb{[0, 1]}). }} \subsection{Link scale vs. Response scale}{ When users set the \code{predict} argument to \code{"expectation"}, the predictions are returned on the response scale, which is arguably the most convenient way to understand and visualize relationships of interest. When users set the \code{predict} argument to \code{"link"}, predictions are returned on the link scale, and no transformation is applied. For instance, for a logistic regression model, the response scale corresponds to the predicted probabilities, whereas the link-scale makes predictions of log-odds (probabilities on the logit scale). Note that when users select \code{predict="classification"} in binomial models, the \code{get_predicted()} function will first calculate predictions as if the user had selected \code{predict="expectation"}. Then, it will round the responses in order to return the most likely outcome. } } \examples{ data(mtcars) x <- lm(mpg ~ cyl + hp, data = mtcars) predictions <- get_predicted(x) predictions # Options and methods --------------------- get_predicted(x, predict = "prediction") # Get CI as.data.frame(predictions) # Bootstrapped as.data.frame(get_predicted(x, iterations = 4)) summary(get_predicted(x, iterations = 4)) # Same as as.data.frame(..., keep_iterations = F) # Different predicttion types ------------------------ data(iris) data <- droplevels(iris[1:100, ]) # Fit a logistic model x <- glm(Species ~ Sepal.Length, data = data, family = "binomial") # Expectation (default): response scale + CI pred <- get_predicted(x, predict = "expectation") head(as.data.frame(pred)) # Prediction: response scale + PI pred <- get_predicted(x, predict = "prediction") head(as.data.frame(pred)) # Link: link scale + CI pred <- get_predicted(x, predict = "link") head(as.data.frame(pred)) # Classification: classification "type" + PI pred <- get_predicted(x, predict = "classification") head(as.data.frame(pred)) } \seealso{ \code{\link[=get_predicted_ci]{get_predicted_ci()}} } insight/man/format_ci.Rd0000644000175000017500000000516114077615665015102 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/format_ci.R \name{format_ci} \alias{format_ci} \title{Confidence/Credible Interval (CI) Formatting} \usage{ format_ci( CI_low, CI_high, ci = 0.95, digits = 2, brackets = TRUE, width = NULL, width_low = width, width_high = width, missing = "", zap_small = FALSE ) } \arguments{ \item{CI_low}{Lower CI bound.} \item{CI_high}{Upper CI bound.} \item{ci}{CI level in percentage.} \item{digits}{Number of digits for rounding or significant figures. May also be \code{"signif"} to return significant figures or \code{"scientific"} to return scientific notation. Control the number of digits by adding the value as suffix, e.g. \code{digits = "scientific4"} to have scientific notation with 4 decimal places, or \code{digits = "signif5"} for 5 significant figures (see also \code{\link[=signif]{signif()}}).} \item{brackets}{Either a logical, and if \code{TRUE} (default), values are encompassed in square brackets. If \code{FALSE} or \code{NULL}, no brackets are used. Else, a character vector of length two, indicating the opening and closing brackets.} \item{width}{Minimum width of the returned string. If not \code{NULL} and \code{width} is larger than the string's length, leading whitespaces are added to the string. If \code{width="auto"}, width will be set to the length of the longest string.} \item{width_low, width_high}{Like \code{width}, but only applies to the lower or higher confidence interval value. This can be used when the values for the lower and upper CI are of very different length.} \item{missing}{Value by which \code{NA} values are replaced. By default, an empty string (i.e. \code{""}) is returned for \code{NA}.} \item{zap_small}{Logical, if \code{TRUE}, small values are rounded after \code{digits} decimal places. If \code{FALSE}, values with more decimal places than \code{digits} are printed in scientific notation.} } \value{ A formatted string. } \description{ Confidence/Credible Interval (CI) Formatting } \examples{ format_ci(1.20, 3.57, ci = 0.90) format_ci(1.20, 3.57, ci = NULL) format_ci(1.20, 3.57, ci = NULL, brackets = FALSE) format_ci(1.20, 3.57, ci = NULL, brackets = c("(", ")")) format_ci(c(1.205645, 23.4), c(3.57, -1.35), ci = 0.90) format_ci(c(1.20, NA, NA), c(3.57, -1.35, NA), ci = 0.90) # automatic alignment of width, useful for printing multiple CIs in columns x <- format_ci(c(1.205, 23.4, 100.43), c(3.57, -13.35, 9.4)) cat(x, sep = "\n") x <- format_ci(c(1.205, 23.4, 100.43), c(3.57, -13.35, 9.4), width = "auto") cat(x, sep = "\n") } insight/man/get_transformation.Rd0000644000175000017500000000242214132504734017023 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/get_transformation.R \name{get_transformation} \alias{get_transformation} \title{Return function of transformed response variables} \usage{ get_transformation(x) } \arguments{ \item{x}{A regression model.} } \value{ A list of two functions: \verb{$transformation}, the function that was used to transform the response variable; \verb{$inverse}, the inverse-function of \verb{$transformation} (can be used for "back-transformation"). If no transformation was applied, both list-elements \verb{$transformation} and \verb{$inverse} just return \code{function(x) x}. } \description{ This functions checks whether any transformation, such as log- or exp-transforming, was applied to the response variable (dependent variable) in a regression formula, and returns the related function that was used for transformation. } \examples{ # identity, no transformation model <- lm(Sepal.Length ~ Species, data = iris) get_transformation(model) # log-transformation model <- lm(log(Sepal.Length) ~ Species, data = iris) get_transformation(model) # log-function get_transformation(model)$transformation(.3) log(.3) # inverse function is exp() get_transformation(model)$inverse(.3) exp(.3) } insight/man/get_auxiliary.Rd0000644000175000017500000000622214077615665016004 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/get_auxiliary.R \name{get_auxiliary} \alias{get_auxiliary} \title{Get auxiliary parameters from models} \usage{ get_auxiliary( x, type = "sigma", summary = TRUE, centrality = "mean", verbose = TRUE, ... ) } \arguments{ \item{x}{A model.} \item{type}{The name of the auxiliary parameter that should be retrieved. \code{"sigma"} is available for most models, \code{"dispersion"} for models of class \code{glm}, \code{glmerMod} or \code{glmmTMB} as well as \code{brmsfit}. \code{"beta"} and other parameters are currently only returned for \code{brmsfit} models. See 'Details'.} \item{summary}{Logical, indicates whether the full posterior samples (\code{summary = FALSE})) or the summarized centrality indices of the posterior samples (\code{summary = TRUE})) should be returned as estimates.} \item{centrality}{Only for models with posterior samples, and when \code{summary = TRUE}. In this case, \code{centrality = "mean"} would calculate means of posterior samples for each parameter, while \code{centrality = "median"} would use the more robust median value as measure of central tendency.} \item{verbose}{Toggle warnings.} \item{...}{Currently not used.} } \value{ The requested auxiliary parameter, or \code{NULL} if this information could not be accessed. } \description{ Returns the requested auxiliary parameters from models, like dispersion, sigma, or beta... } \details{ Currently, only sigma and the dispersion parameter are returned, and only for a limited set of models. \subsection{Sigma Parameter}{ See \code{\link[=get_sigma]{get_sigma()}}. } \subsection{Dispersion Parameter}{ There are many different definitions of "dispersion", depending on the context. \code{get_auxiliary()} returns the dispersion parameters that usually can be considered as variance-to-mean ratio for generalized (linear) mixed models. Exceptions are models of class \code{glmmTMB}, where the dispersion equals \ifelse{html}{\out{σ2}}{\eqn{\sigma^2}}. In detail, the computation of the dispersion parameter for generalized linear models is the ratio of the sum of the squared working-residuals and the residual degrees of freedom. For mixed models of class \code{glmer}, the dispersion parameter is also called \ifelse{html}{\out{φ}}{\eqn{\phi}} and is the ratio of the sum of the squared Pearson-residuals and the residual degrees of freedom. For models of class \code{glmmTMB}, dispersion is \ifelse{html}{\out{σ2}}{\eqn{\sigma^2}}. } \subsection{\pkg{brms} models}{ For models of class \code{brmsfit}, there are different options for the \code{type} argument. See a list of supported auxiliary parameters here: \code{\link[=find_parameters.BGGM]{find_parameters.BGGM()}}. } } \examples{ # from ?glm clotting <- data.frame( u = c(5, 10, 15, 20, 30, 40, 60, 80, 100), lot1 = c(118, 58, 42, 35, 27, 25, 21, 19, 18), lot2 = c(69, 35, 26, 21, 18, 16, 13, 12, 12) ) model <- glm(lot1 ~ log(u), data = clotting, family = Gamma()) get_auxiliary(model, type = "dispersion") # same as summary(model)$dispersion } insight/man/format_value.Rd0000644000175000017500000000476414144234777015627 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/format_value.R \name{format_value} \alias{format_value} \alias{format_value.data.frame} \alias{format_value.numeric} \title{Numeric Values Formatting} \usage{ format_value(x, ...) \method{format_value}{data.frame}( x, digits = 2, protect_integers = FALSE, missing = "", width = NULL, as_percent = FALSE, zap_small = FALSE, ... ) \method{format_value}{numeric}( x, digits = 2, protect_integers = FALSE, missing = "", width = NULL, as_percent = FALSE, zap_small = FALSE, ... ) } \arguments{ \item{x}{Numeric value.} \item{...}{Arguments passed to or from other methods.} \item{digits}{Number of digits for rounding or significant figures. May also be \code{"signif"} to return significant figures or \code{"scientific"} to return scientific notation. Control the number of digits by adding the value as suffix, e.g. \code{digits = "scientific4"} to have scientific notation with 4 decimal places, or \code{digits = "signif5"} for 5 significant figures (see also \code{\link[=signif]{signif()}}).} \item{protect_integers}{Should integers be kept as integers (i.e., without decimals)?} \item{missing}{Value by which \code{NA} values are replaced. By default, an empty string (i.e. \code{""}) is returned for \code{NA}.} \item{width}{Minimum width of the returned string. If not \code{NULL} and \code{width} is larger than the string's length, leading whitespaces are added to the string.} \item{as_percent}{Logical, if \code{TRUE}, value is formatted as percentage value.} \item{zap_small}{Logical, if \code{TRUE}, small values are rounded after \code{digits} decimal places. If \code{FALSE}, values with more decimal places than \code{digits} are printed in scientific notation.} } \value{ A formatted string. } \description{ Numeric Values Formatting } \examples{ format_value(1.20) format_value(1.2) format_value(1.2012313) format_value(c(0.0045, 234, -23)) format_value(c(0.0045, .12, .34)) format_value(c(0.0045, .12, .34), as_percent = TRUE) format_value(c(0.0045, .12, .34), digits = "scientific") format_value(c(0.0045, .12, .34), digits = "scientific2") # default format_value(c(0.0045, .123, .345)) # significant figures format_value(c(0.0045, .123, .345), digits = "signif") format_value(as.factor(c("A", "B", "A"))) format_value(iris$Species) format_value(3) format_value(3, protect_integers = TRUE) format_value(head(iris)) } insight/man/clean_parameters.Rd0000644000175000017500000000424214077615665016443 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/clean_parameters.R \name{clean_parameters} \alias{clean_parameters} \title{Get clean names of model parameters} \usage{ clean_parameters(x, ...) } \arguments{ \item{x}{A fitted model.} \item{...}{Currently not used.} } \value{ A data frame with "cleaned" parameter names and information on effects, component and group where parameters belong to. To be consistent across different models, the returned data frame always has at least four columns \code{Parameter}, \code{Effects}, \code{Component} and \code{Cleaned_Parameter}. See 'Details'. } \description{ This function "cleans" names of model parameters by removing patterns like \code{"r_"} or \code{"b[]"} (mostly applicable to Stan models) and adding columns with information to which group or component parameters belong (i.e. fixed or random, count or zero-inflated...) \cr \cr The main purpose of this function is to easily filter and select model parameters, in particular of - but not limited to - posterior samples from Stan models, depending on certain characteristics. This might be useful when only selective results should be reported or results from all parameters should be filtered to return only certain results (see \code{\link[=print_parameters]{print_parameters()}}). } \details{ The \code{Effects} column indicate if a parameter is a \emph{fixed} or \emph{random} effect. The \code{Component} can either be \emph{conditional} or \emph{zero_inflated}. For models with random effects, the \code{Group} column indicates the grouping factor of the random effects. For multivariate response models from \pkg{brms} or \pkg{rstanarm}, an additional \emph{Response} column is included, to indicate which parameters belong to which response formula. Furthermore, \emph{Cleaned_Parameter} column is returned that contains "human readable" parameter names (which are mostly identical to \code{Parameter}, except for for models from \pkg{brms} or \pkg{rstanarm}, or for specific terms like smooth- or spline-terms). } \examples{ \dontrun{ library(brms) model <- download_model("brms_zi_2") clean_parameters(model) } } insight/man/find_random_slopes.Rd0000644000175000017500000000161014077615665016777 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/find_random_slopes.R \name{find_random_slopes} \alias{find_random_slopes} \title{Find names of random slopes} \usage{ find_random_slopes(x) } \arguments{ \item{x}{A fitted mixed model.} } \value{ A list of character vectors with the name(s) of the random slopes, or \code{NULL} if model has no random slopes. Depending on the model, the returned list has following elements: \itemize{ \item \code{random}, the random slopes from the conditional part of model \item \code{zero_inflated_random}, the random slopes from the zero-inflation component of the model } } \description{ Return the name of the random slopes from mixed effects models. } \examples{ if (require("lme4")) { data(sleepstudy) m <- lmer(Reaction ~ Days + (1 + Days | Subject), data = sleepstudy) find_random_slopes(m) } } insight/man/n_parameters.Rd0000644000175000017500000000476314032422572015606 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/n_parameters.R \name{n_parameters} \alias{n_parameters} \alias{n_parameters.default} \alias{n_parameters.merMod} \alias{n_parameters.glmmTMB} \alias{n_parameters.zeroinfl} \alias{n_parameters.gam} \alias{n_parameters.brmsfit} \title{Count number of parameters in a model} \usage{ n_parameters(x, ...) \method{n_parameters}{default}(x, remove_nonestimable = FALSE, ...) \method{n_parameters}{merMod}( x, effects = c("fixed", "random"), remove_nonestimable = FALSE, ... ) \method{n_parameters}{glmmTMB}( x, effects = c("fixed", "random"), component = c("all", "conditional", "zi", "zero_inflated"), remove_nonestimable = FALSE, ... ) \method{n_parameters}{zeroinfl}( x, component = c("all", "conditional", "zi", "zero_inflated"), remove_nonestimable = FALSE, ... ) \method{n_parameters}{gam}( x, component = c("all", "conditional", "smooth_terms"), remove_nonestimable = FALSE, ... ) \method{n_parameters}{brmsfit}(x, effects = "all", component = "all", ...) } \arguments{ \item{x}{A statistical model.} \item{...}{Arguments passed to or from other methods.} \item{remove_nonestimable}{Logical, if \code{TRUE}, removes (i.e. does not count) non-estimable parameters (which may occur for models with rank-deficient model matrix).} \item{effects}{Should number of parameters for fixed effects, random effects or both be returned? Only applies to mixed models. May be abbreviated.} \item{component}{Should total number of parameters, number parameters for the conditional model, the zero-inflated part of the model, the dispersion term or the instrumental variables be returned? Applies to models with zero-inflated and/or dispersion formula, or to models with instrumental variable (so called fixed-effects regressions). May be abbreviated.} } \value{ The number of parameters in the model. } \description{ Returns the number of parameters (coefficients) of a model. } \note{ This function returns the number of parameters for the fixed effects by default, as returned by \code{find_parameters(x, effects = "fixed")}. It does not include \emph{all} estimated model parameters, i.e. auxiliary parameters like sigma or dispersion are not counted. To get the number of \emph{all estimated} parameters, use \code{get_df(x, type = "model")}. } \examples{ data(iris) model <- lm(Sepal.Length ~ Sepal.Width * Species, data = iris) n_parameters(model) } insight/man/get_statistic.Rd0000644000175000017500000000664314077615666016014 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/get_statistic.R \name{get_statistic} \alias{get_statistic} \alias{get_statistic.default} \alias{get_statistic.glmmTMB} \alias{get_statistic.clm2} \alias{get_statistic.betamfx} \alias{get_statistic.logitmfx} \alias{get_statistic.mjoint} \alias{get_statistic.emmGrid} \alias{get_statistic.gee} \alias{get_statistic.betareg} \alias{get_statistic.DirichletRegModel} \title{Get statistic associated with estimates} \usage{ get_statistic(x, ...) \method{get_statistic}{default}(x, column_index = 3, verbose = TRUE, ...) \method{get_statistic}{glmmTMB}( x, component = c("all", "conditional", "zi", "zero_inflated", "dispersion"), ... ) \method{get_statistic}{clm2}(x, component = c("all", "conditional", "scale"), ...) \method{get_statistic}{betamfx}( x, component = c("all", "conditional", "precision", "marginal"), ... ) \method{get_statistic}{logitmfx}(x, component = c("all", "conditional", "marginal"), ...) \method{get_statistic}{mjoint}(x, component = c("all", "conditional", "survival"), ...) \method{get_statistic}{emmGrid}(x, ci = 0.95, adjust = "none", merge_parameters = FALSE, ...) \method{get_statistic}{gee}(x, robust = FALSE, ...) \method{get_statistic}{betareg}(x, component = c("all", "conditional", "precision"), ...) \method{get_statistic}{DirichletRegModel}(x, component = c("all", "conditional", "precision"), ...) } \arguments{ \item{x}{A model.} \item{...}{Currently not used.} \item{column_index}{For model objects that have no defined \code{get_statistic()} method yet, the default method is called. This method tries to extract the statistic column from \code{coef(summary())}, where the index of the column that is being pulled is \code{column_index}. Defaults to 3, which is the default statistic column for most models' summary-output.} \item{verbose}{Toggle messages and warnings.} \item{component}{Should all parameters, parameters for the conditional model, or for the zero-inflated part of the model be returned? Applies to models with zero-inflated component. \code{component} may be one of \code{"conditional"}, \code{"zi"}, \code{"zero-inflated"} or \code{"all"} (default). For models with smooth terms, \code{component = "smooth_terms"} is also possible. May be abbreviated. Note that the \emph{conditional} component is also called \emph{count} or \emph{mean} component, depending on the model.} \item{ci}{Confidence Interval (CI) level. Default to \code{0.95} (\verb{95\%}). Currently only applies to objects of class \code{emmGrid}.} \item{adjust}{Character value naming the method used to adjust p-values or confidence intervals. See \code{?emmeans::summary.emmGrid} for details.} \item{merge_parameters}{Logical, if \code{TRUE} and \code{x} has multiple columns for parameter names (like \code{emmGrid} objects may have), these are merged into a single parameter column, with parameters names and values as values.} \item{robust}{Logical, if \code{TRUE}, test statistic based on robust standard errors is returned.} } \value{ A data frame with the model's parameter names and the related test statistic. } \description{ Returns the statistic (\emph{t}, \code{z}, ...) for model estimates. In most cases, this is the related column from \code{coef(summary())}. } \examples{ data(mtcars) m <- lm(mpg ~ wt + cyl + vs, data = mtcars) get_statistic(m) } insight/man/format_rope.Rd0000644000175000017500000000172114062425757015445 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/format_rope.R \name{format_rope} \alias{format_rope} \title{Percentage in ROPE formatting} \usage{ format_rope(rope_percentage, name = "in ROPE", digits = 2) } \arguments{ \item{rope_percentage}{Value or vector of percentages in ROPE.} \item{name}{Name prefixing the text. Can be \code{NULL}.} \item{digits}{Number of significant digits. May also be \code{"scientific"} to return exact p-values in scientific notation, or \code{"apa"} to use an APA 7th edition-style for p-values (equivalent to \code{digits = 3}). If \code{"scientific"}, control the number of digits by adding the value as a suffix, e.g.m \code{digits = "scientific4"} to have scientific notation with 4 decimal places.} } \value{ A formatted string. } \description{ Percentage in ROPE formatting } \examples{ format_rope(c(0.02, 0.12, 0.357, 0)) format_rope(c(0.02, 0.12, 0.357, 0), name = NULL) } insight/man/get_parameters.glmm.Rd0000644000175000017500000000505714077615666017101 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/get_parameters_mixed.R \name{get_parameters.glmm} \alias{get_parameters.glmm} \alias{get_parameters.coxme} \alias{get_parameters.merMod} \alias{get_parameters.glmmTMB} \alias{get_parameters.glimML} \title{Get model parameters from mixed models} \usage{ \method{get_parameters}{glmm}(x, effects = c("all", "fixed", "random"), ...) \method{get_parameters}{coxme}(x, effects = c("fixed", "random"), ...) \method{get_parameters}{merMod}(x, effects = c("fixed", "random"), ...) \method{get_parameters}{glmmTMB}( x, effects = c("fixed", "random"), component = c("all", "conditional", "zi", "zero_inflated", "dispersion"), ... ) \method{get_parameters}{glimML}(x, effects = c("fixed", "random", "all"), ...) } \arguments{ \item{x}{A fitted model.} \item{effects}{Should parameters for fixed effects, random effects or both be returned? Only applies to mixed models. May be abbreviated.} \item{...}{Currently not used.} \item{component}{Which type of parameters to return, such as parameters for the conditional model, the zero-inflated part of the model or the dispersion term? Applies to models with zero-inflated and/or dispersion formula. Note that the \emph{conditional} component is also called \emph{count} or \emph{mean} component, depending on the model. There are three convenient shortcuts: \code{component = "all"} returns all possible parameters. If \code{component = "location"}, location parameters such as \code{conditional} or \code{zero_inflated} are returned (everything that are fixed or random effects - depending on the \code{effects} argument - but no auxiliary parameters). For \code{component = "distributional"} (or \code{"auxiliary"}), components like \code{sigma} or \code{dispersion} (and other auxiliary parameters) are returned.} } \value{ If \code{effects = "fixed"}, a data frame with two columns: the parameter names and the related point estimates. If \code{effects = "random"}, a list of data frames with the random effects (as returned by \code{ranef()}), unless the random effects have the same simplified structure as fixed effects (e.g. for models from \pkg{MCMCglmm}). } \description{ Returns the coefficients from a model. } \details{ In most cases when models either return different "effects" (fixed, random) or "components" (conditional, zero-inflated, ...), the arguments \code{effects} and \code{component} can be used. } \examples{ data(mtcars) m <- lm(mpg ~ wt + cyl + vs, data = mtcars) get_parameters(m) } insight/man/fish.Rd0000644000175000017500000000035013620020163014033 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/fish.R \docType{data} \name{fish} \alias{fish} \title{Sample data set} \description{ A sample data set, used in tests and some examples. } \keyword{data} insight/man/is_gam_model.Rd0000644000175000017500000000166414015406135015540 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/is_gam_model.R \name{is_gam_model} \alias{is_gam_model} \title{Checks if a model is a generalized additive model} \usage{ is_gam_model(x) } \arguments{ \item{x}{A model object.} } \value{ A logical, \code{TRUE} if \code{x} is a generalized additive model \emph{and} has smooth-terms } \description{ Small helper that checks if a model is a generalized additive model. } \note{ This function only returns \code{TRUE} when the model inherits from a typical GAM model class \emph{and} when smooth terms are present in the model formula. If model has no smooth terms or is not from a typical gam class, \code{FALSE} is returned. } \examples{ if (require("mgcv")) { data(iris) model1 <- lm(Petal.Length ~ Petal.Width + Sepal.Length, data = iris) model2 <- gam(Petal.Length ~ Petal.Width + s(Sepal.Length), data = iris) is_gam_model(model1) is_gam_model(model2) } } insight/man/get_parameters.gamm.Rd0000644000175000017500000000274614077615666017070 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/get_parameters_gam.R \name{get_parameters.gamm} \alias{get_parameters.gamm} \alias{get_parameters.gam} \alias{get_parameters.rqss} \title{Get model parameters from generalized additive models} \usage{ \method{get_parameters}{gamm}( x, component = c("all", "conditional", "smooth_terms", "location"), ... ) \method{get_parameters}{gam}( x, component = c("all", "conditional", "smooth_terms", "location"), ... ) \method{get_parameters}{rqss}(x, component = c("all", "conditional", "smooth_terms"), ...) } \arguments{ \item{x}{A fitted model.} \item{component}{Should all predictor variables, predictor variables for the conditional model, the zero-inflated part of the model, the dispersion term or the instrumental variables be returned? Applies to models with zero-inflated and/or dispersion formula, or to models with instrumental variable (so called fixed-effects regressions). May be abbreviated. Note that the \emph{conditional} component is also called \emph{count} or \emph{mean} component, depending on the model.} \item{...}{Currently not used.} } \value{ For models with smooth terms or zero-inflation component, a data frame with three columns: the parameter names, the related point estimates and the component. } \description{ Returns the coefficients from a model. } \examples{ data(mtcars) m <- lm(mpg ~ wt + cyl + vs, data = mtcars) get_parameters(m) } insight/man/find_algorithm.Rd0000644000175000017500000000262114077615665016123 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/find_algorithm.R \name{find_algorithm} \alias{find_algorithm} \title{Find sampling algorithm and optimizers} \usage{ find_algorithm(x, ...) } \arguments{ \item{x}{A fitted model.} \item{...}{Currently not used.} } \value{ A list with elements depending on the model. \cr For frequentist models: \itemize{ \item \code{algorithm}, for instance \code{"OLS"} or \code{"ML"} \item \code{optimizer}, name of optimizing function, only applies to specific models (like \code{gam}) } For frequentist mixed models: \itemize{ \item \code{algorithm}, for instance \code{"REML"} or \code{"ML"} \item \code{optimizer}, name of optimizing function } For Bayesian models: \itemize{ \item \code{algorithm}, the algorithm \item \code{chains}, number of chains \item \code{iterations}, number of iterations per chain \item \code{warmup}, number of warmups per chain } } \description{ Returns information on the sampling or estimation algorithm as well as optimization functions, or for Bayesian model information on chains, iterations and warmup-samples. } \examples{ if (require("lme4")) { data(sleepstudy) m <- lmer(Reaction ~ Days + (1 | Subject), data = sleepstudy) find_algorithm(m) } \dontrun{ library(rstanarm) m <- stan_lmer(Reaction ~ Days + (1 | Subject), data = sleepstudy) find_algorithm(m) } } insight/man/get_predictors.Rd0000644000175000017500000000106113761646436016146 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/get_predictors.R \name{get_predictors} \alias{get_predictors} \title{Get the data from model predictors} \usage{ get_predictors(x, verbose = TRUE) } \arguments{ \item{x}{A fitted model.} \item{verbose}{Toggle messages and warnings.} } \value{ The data from all predictor variables, as data frame. } \description{ Returns the data from all predictor variables (fixed effects). } \examples{ m <- lm(mpg ~ wt + cyl + vs, data = mtcars) head(get_predictors(m)) } insight/man/find_parameters.zeroinfl.Rd0000644000175000017500000000460014077615665020126 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/find_parameter_zi.R \name{find_parameters.zeroinfl} \alias{find_parameters.zeroinfl} \alias{find_parameters.mhurdle} \title{Find names of model parameters from zero-inflated models} \usage{ \method{find_parameters}{zeroinfl}( x, component = c("all", "conditional", "zi", "zero_inflated"), flatten = FALSE, ... ) \method{find_parameters}{mhurdle}( x, component = c("all", "conditional", "zi", "zero_inflated", "infrequent_purchase", "ip", "auxiliary"), flatten = FALSE, ... ) } \arguments{ \item{x}{A fitted model.} \item{component}{Which type of parameters to return, such as parameters for the conditional model, the zero-inflated part of the model, the dispersion term, the instrumental variables or marginal effects be returned? Applies to models with zero-inflated and/or dispersion formula, or to models with instrumental variables (so called fixed-effects regressions), or models with marginal effects from \pkg{mfx}. May be abbreviated. Note that the \emph{conditional} component is also called \emph{count} or \emph{mean} component, depending on the model. There are three convenient shortcuts: \code{component = "all"} returns all possible parameters. If \code{component = "location"}, location parameters such as \code{conditional}, \code{zero_inflated}, \code{smooth_terms}, or \code{instruments} are returned (everything that are fixed or random effects - depending on the \code{effects} argument - but no auxiliary parameters). For \code{component = "distributional"} (or \code{"auxiliary"}), components like \code{sigma}, \code{dispersion}, \code{beta} or \code{precision} (and other auxiliary parameters) are returned.} \item{flatten}{Logical, if \code{TRUE}, the values are returned as character vector, not as list. Duplicated values are removed.} \item{...}{Currently not used.} } \value{ A list of parameter names. The returned list may have following elements: \itemize{ \item \code{conditional}, the "fixed effects" part from the model. \item \code{zero_inflated}, the "fixed effects" part from the zero-inflation component of the model. } } \description{ Returns the names of model parameters, like they typically appear in the \code{summary()} output. } \examples{ data(mtcars) m <- lm(mpg ~ wt + cyl + vs, data = mtcars) find_parameters(m) } insight/man/ellipsis_info.Rd0000644000175000017500000000245614014733011015753 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/ellipsis_info.R \name{ellipsis_info} \alias{ellipsis_info} \alias{ellipsis_info.default} \title{Gather information about objects in ellipsis (dot dot dot)} \usage{ ellipsis_info(objects, ...) \method{ellipsis_info}{default}(..., only_models = TRUE) } \arguments{ \item{objects, ...}{Arbitrary number of objects.} \item{only_models}{Only keep supported models (default to \code{TRUE}).} } \value{ The list with objects that were passed to the function, including additional information as attributes (e.g. if models have same response or are nested). } \description{ Provides information regarding the models entered in an ellipsis. It detects whether all are models, regressions, nested regressions etc., assigning different classes to the list of objects. } \examples{ m1 <- lm(Sepal.Length ~ Petal.Width + Species, data = iris) m2 <- lm(Sepal.Length ~ Species, data = iris) m3 <- lm(Sepal.Length ~ Petal.Width, data = iris) m4 <- lm(Sepal.Length ~ 1, data = iris) m5 <- lm(Petal.Width ~ 1, data = iris) objects <- ellipsis_info(m1, m2, m3, m4) class(objects) objects <- ellipsis_info(m1, m2, m4) attributes(objects)$is_nested objects <- ellipsis_info(m1, m2, m5) attributes(objects)$same_response } insight/man/model_name.Rd0000644000175000017500000000173614014543421015220 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/model_name.R \name{model_name} \alias{model_name} \alias{model_name.default} \title{Name the model} \usage{ model_name(x, ...) \method{model_name}{default}(x, include_formula = FALSE, include_call = FALSE, ...) } \arguments{ \item{x}{A model.} \item{...}{Currently not used.} \item{include_formula}{Should the name include the model's formula.} \item{include_call}{If \code{TRUE}, will return the function call as a name.} } \value{ A character string of a name (which usually equals the model's class attribute). } \description{ Returns the "name" (class attribute) of a model, possibly including further information. } \examples{ m <- lm(Sepal.Length ~ Petal.Width, data = iris) model_name(m) model_name(m, include_formula = TRUE) model_name(m, include_call = TRUE) if (require("lme4")) { model_name(lmer(Sepal.Length ~ Sepal.Width + (1 | Species), data = iris)) } } insight/man/standardize_names.Rd0000644000175000017500000000515214077615666016633 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/standardize_names.R \name{standardize_names} \alias{standardize_names} \alias{standardize_names.parameters_model} \title{Standardize column names} \usage{ standardize_names(data, ...) \method{standardize_names}{parameters_model}( data, style = c("easystats", "broom"), ignore_estimate = FALSE, ... ) } \arguments{ \item{data}{A data frame. In particular, objects from \emph{easystats} package functions like \code{\link[parameters:model_parameters]{parameters::model_parameters()}} or \code{\link[effectsize:effectsize]{effectsize::effectsize()}} are accepted, but also data frames returned by \code{broom::tidy()} are valid objects.} \item{...}{Currently not used.} \item{style}{Standardization can either be based on the naming conventions from the \href{https://easystats.github.io/easystats/}{easystats-project}, or on \pkg{broom}'s naming scheme.} \item{ignore_estimate}{Logical, if \code{TRUE}, column names like \code{"mean"} or \code{"median"} will \emph{not} be converted to \code{"Coefficient"} resp. \code{"estimate"}.} } \value{ A data frame, with standardized column names. } \description{ Standardize column names from data frames, in particular objects returned from \code{\link[parameters:model_parameters]{parameters::model_parameters()}}, so column names are consistent and the same for any model object. } \details{ This method is in particular useful for package developers or users who use, e.g., \code{\link[parameters:model_parameters]{parameters::model_parameters()}} in their own code or functions to retrieve model parameters for further processing. As \code{model_parameters()} returns a data frame with varying column names (depending on the input), accessing the required information is probably not quite straightforward. In such cases, \code{standardize_names()} can be used to get consistent, i.e. always the same column names, no matter what kind of model was used in \code{model_parameters()}. \cr \cr For \code{style = "broom"}, column names are renamed to match \pkg{broom}'s naming scheme, i.e. \code{Parameter} is renamed to \code{term}, \code{Coefficient} becomes \code{estimate} and so on. \cr \cr For \code{style = "easystats"}, when \code{data} is an object from \code{broom::tidy()}, column names are converted from "broom"-style into "easystats"-style. } \examples{ if (require("parameters")) { model <- lm(mpg ~ wt + cyl, data = mtcars) mp <- model_parameters(model) as.data.frame(mp) standardize_names(mp) standardize_names(mp, style = "broom") } } insight/man/display.Rd0000644000175000017500000000237514077615665014610 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/display.R \name{display} \alias{display} \alias{print_md} \alias{print_html} \alias{display.data.frame} \alias{print_md.data.frame} \alias{print_html.data.frame} \title{Generic export of data frames into formatted tables} \usage{ display(object, ...) print_md(x, ...) print_html(x, ...) \method{display}{data.frame}(object, format = "markdown", ...) \method{print_md}{data.frame}(x, ...) \method{print_html}{data.frame}(x, ...) } \arguments{ \item{object, x}{A data frame.} \item{...}{Arguments passed to other methods.} \item{format}{String, indicating the output format. Can be \code{"markdown"} or \code{"html"}.} } \value{ Depending on \code{format}, either an object of class \code{gt_tbl} or a character vector of class \code{knitr_kable}. } \description{ \code{display()} is a generic function to export data frames into various table formats (like plain text, markdown, ...). \code{print_md()} usually is a convenient wrapper for \code{display(format = "markdown")}. Similar, \code{print_html()} is a shortcut for \code{display(format = "html")}. See the documentation for the specific objects' classes. } \examples{ display(iris[1:5, ]) } insight/man/find_parameters.betamfx.Rd0000644000175000017500000000460114077615665017725 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/find_parameters_mfx.R \name{find_parameters.betamfx} \alias{find_parameters.betamfx} \alias{find_parameters.logitmfx} \title{Find names of model parameters from marginal effects models} \usage{ \method{find_parameters}{betamfx}( x, component = c("all", "conditional", "precision", "marginal", "location", "distributional", "auxiliary"), flatten = FALSE, ... ) \method{find_parameters}{logitmfx}( x, component = c("all", "conditional", "marginal", "location"), flatten = FALSE, ... ) } \arguments{ \item{x}{A fitted model.} \item{component}{Which type of parameters to return, such as parameters for the conditional model, the zero-inflated part of the model, the dispersion term, the instrumental variables or marginal effects be returned? Applies to models with zero-inflated and/or dispersion formula, or to models with instrumental variables (so called fixed-effects regressions), or models with marginal effects from \pkg{mfx}. May be abbreviated. Note that the \emph{conditional} component is also called \emph{count} or \emph{mean} component, depending on the model. There are three convenient shortcuts: \code{component = "all"} returns all possible parameters. If \code{component = "location"}, location parameters such as \code{conditional}, \code{zero_inflated}, \code{smooth_terms}, or \code{instruments} are returned (everything that are fixed or random effects - depending on the \code{effects} argument - but no auxiliary parameters). For \code{component = "distributional"} (or \code{"auxiliary"}), components like \code{sigma}, \code{dispersion}, \code{beta} or \code{precision} (and other auxiliary parameters) are returned.} \item{flatten}{Logical, if \code{TRUE}, the values are returned as character vector, not as list. Duplicated values are removed.} \item{...}{Currently not used.} } \value{ A list of parameter names. The returned list may have following elements: \itemize{ \item \code{conditional}, the "fixed effects" part from the model. \item \code{marginal}, the marginal effects. \item \code{precision}, the precision parameter. } } \description{ Returns the names of model parameters, like they typically appear in the \code{summary()} output. } \examples{ data(mtcars) m <- lm(mpg ~ wt + cyl + vs, data = mtcars) find_parameters(m) } insight/man/get_parameters.betareg.Rd0000644000175000017500000000411014077615666017543 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/get_parameters_others.R \name{get_parameters.betareg} \alias{get_parameters.betareg} \alias{get_parameters.DirichletRegModel} \alias{get_parameters.averaging} \alias{get_parameters.glmx} \alias{get_parameters.clm2} \alias{get_parameters.mvord} \alias{get_parameters.mjoint} \title{Get model parameters from models with special components} \usage{ \method{get_parameters}{betareg}( x, component = c("all", "conditional", "precision", "location", "distributional", "auxiliary"), ... ) \method{get_parameters}{DirichletRegModel}( x, component = c("all", "conditional", "precision", "location", "distributional", "auxiliary"), ... ) \method{get_parameters}{averaging}(x, component = c("conditional", "full"), ...) \method{get_parameters}{glmx}( x, component = c("all", "conditional", "extra", "location", "distributional", "auxiliary"), ... ) \method{get_parameters}{clm2}(x, component = c("all", "conditional", "scale"), ...) \method{get_parameters}{mvord}( x, component = c("all", "conditional", "thresholds", "correlation"), ... ) \method{get_parameters}{mjoint}(x, component = c("all", "conditional", "survival"), ...) } \arguments{ \item{x}{A fitted model.} \item{component}{Should all predictor variables, predictor variables for the conditional model, the zero-inflated part of the model, the dispersion term or the instrumental variables be returned? Applies to models with zero-inflated and/or dispersion formula, or to models with instrumental variable (so called fixed-effects regressions). May be abbreviated. Note that the \emph{conditional} component is also called \emph{count} or \emph{mean} component, depending on the model.} \item{...}{Currently not used.} } \value{ A data frame with three columns: the parameter names, the related point estimates and the component. } \description{ Returns the coefficients from a model. } \examples{ data(mtcars) m <- lm(mpg ~ wt + cyl + vs, data = mtcars) get_parameters(m) } insight/man/print_color.Rd0000644000175000017500000000250314077615666015467 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/print_color.R \name{print_color} \alias{print_color} \alias{print_colour} \alias{color_text} \alias{colour_text} \alias{color_theme} \title{Coloured console output} \usage{ print_color(text, color) print_colour(text, colour) color_text(text, color) colour_text(text, colour) color_theme() } \arguments{ \item{text}{The text to print.} \item{color, colour}{Character vector, indicating the colour for printing. May be one of \code{"red"}, \code{"yellow"}, \code{"green"}, \code{"blue"}, \code{"violet"}, \code{"cyan"} or \code{"grey"}. Formatting is also possible with \code{"bold"} or \code{"italic"}.} } \value{ Nothing. } \description{ Convenient function that allows coloured output in the console. Mainly implemented to reduce package dependencies. } \details{ This function prints \code{text} directly to the console using \code{cat()}, so no string is returned. \code{color_text()}, however, returns only the formatted string, without using \code{cat()}. \code{color_theme()} either returns \code{"dark"} when RStudio is used with dark color scheme, \code{"light"} when it's used with light theme, and \code{NULL} if the theme could not be detected. } \examples{ print_color("I'm blue dabedi dabedei", "blue") } insight/man/format_message.Rd0000644000175000017500000000175114077615665016134 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/format_message.R \name{format_message} \alias{format_message} \title{Format messages and warnings} \usage{ format_message(string, ..., line_length = options()$width) } \arguments{ \item{string}{A string.} \item{...}{Further strings that will be concatenated as indented new lines.} \item{line_length}{Numeric, the maximum length of a line.} } \value{ A formatted string. } \description{ Inserts line breaks into a longer message or warning string. Line length is adjusted to maximum length of the console, if the width can be accessed. By default, new lines are indented by two whitespace. } \examples{ msg <- format_message("Much too long string for just one line, I guess!", line_length = 15 ) message(msg) msg <- format_message("Much too long string for just one line, I guess!", "First new line", "Second new line", "(both indented)", line_length = 30 ) message(msg) } insight/man/get_parameters.emmGrid.Rd0000644000175000017500000000331314077615666017522 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/get_parameters_emmeans.R \name{get_parameters.emmGrid} \alias{get_parameters.emmGrid} \alias{get_parameters.emm_list} \title{Get model parameters from estimated marginal means objects} \usage{ \method{get_parameters}{emmGrid}(x, summary = FALSE, merge_parameters = FALSE, ...) \method{get_parameters}{emm_list}(x, summary = FALSE, ...) } \arguments{ \item{x}{A fitted model.} \item{summary}{Logical, indicates whether the full posterior samples (\code{summary = FALSE})) or the summarized centrality indices of the posterior samples (\code{summary = TRUE})) should be returned as estimates.} \item{merge_parameters}{Logical, if \code{TRUE} and \code{x} has multiple columns for parameter names (like \code{emmGrid} objects may have), these are merged into a single parameter column, with parameters names and values as values.} \item{...}{Currently not used.} } \value{ A data frame with two columns: the parameter names and the related point estimates. } \description{ Returns the coefficients from a model. } \note{ Note that \code{emmGrid} or \code{emm_list} objects returned by functions from \pkg{emmeans} have a different structure compared to usual regression models. Hence, the \code{Parameter} column does not always contain names of \emph{variables}, but may rather contain \emph{values}, e.g. for contrasts. See an example for pairwise comparisons below. } \examples{ data(mtcars) model <- lm(mpg ~ wt * factor(cyl), data = mtcars) if (require("emmeans", quietly = TRUE)) { emm <- emmeans(model, "cyl") get_parameters(emm) emm <- emmeans(model, pairwise ~ cyl) get_parameters(emm) } } insight/man/find_parameters.gamlss.Rd0000644000175000017500000000432214077615665017565 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/find_parameters_gam.R \name{find_parameters.gamlss} \alias{find_parameters.gamlss} \alias{find_parameters.gam} \title{Find names of model parameters from generalized additive models} \usage{ \method{find_parameters}{gamlss}(x, flatten = FALSE, ...) \method{find_parameters}{gam}( x, component = c("all", "conditional", "smooth_terms", "location"), flatten = FALSE, ... ) } \arguments{ \item{x}{A fitted model.} \item{flatten}{Logical, if \code{TRUE}, the values are returned as character vector, not as list. Duplicated values are removed.} \item{...}{Currently not used.} \item{component}{Which type of parameters to return, such as parameters for the conditional model, the zero-inflated part of the model, the dispersion term, the instrumental variables or marginal effects be returned? Applies to models with zero-inflated and/or dispersion formula, or to models with instrumental variables (so called fixed-effects regressions), or models with marginal effects from \pkg{mfx}. May be abbreviated. Note that the \emph{conditional} component is also called \emph{count} or \emph{mean} component, depending on the model. There are three convenient shortcuts: \code{component = "all"} returns all possible parameters. If \code{component = "location"}, location parameters such as \code{conditional}, \code{zero_inflated}, \code{smooth_terms}, or \code{instruments} are returned (everything that are fixed or random effects - depending on the \code{effects} argument - but no auxiliary parameters). For \code{component = "distributional"} (or \code{"auxiliary"}), components like \code{sigma}, \code{dispersion}, \code{beta} or \code{precision} (and other auxiliary parameters) are returned.} } \value{ A list of parameter names. The returned list may have following elements: \itemize{ \item \code{conditional}, the "fixed effects" part from the model. \item \code{smooth_terms}, the smooth parameters. } } \description{ Returns the names of model parameters, like they typically appear in the \code{summary()} output. } \examples{ data(mtcars) m <- lm(mpg ~ wt + cyl + vs, data = mtcars) find_parameters(m) } insight/man/get_residuals.Rd0000644000175000017500000000253414077615666015773 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/get_residuals.R \name{get_residuals} \alias{get_residuals} \alias{get_residuals.default} \title{Extract model residuals} \usage{ get_residuals(x, ...) \method{get_residuals}{default}(x, weighted = FALSE, verbose = TRUE, ...) } \arguments{ \item{x}{A model.} \item{...}{Passed down to \code{residuals()}, if possible.} \item{weighted}{Logical, if \code{TRUE}, returns weighted residuals.} \item{verbose}{Toggle warnings and messages.} } \value{ The residuals, or \code{NULL} if this information could not be accessed. } \description{ Returns the residuals from regression models. } \note{ This function returns the default type of residuals, i.e. for the response from linear models, the deviance residuals for models of class \code{glm} etc. To access different types, pass down the \code{type} argument (see 'Examples'). \cr \cr This function is a robust alternative to \code{residuals()}, as it works for some special model objects that otherwise do not respond properly to calling \code{residuals()}. } \examples{ data(mtcars) m <- lm(mpg ~ wt + cyl + vs, data = mtcars) get_residuals(m) m <- glm(vs ~ wt + cyl + mpg, data = mtcars, family = binomial()) get_residuals(m) # type = "deviance" by default get_residuals(m, type = "response") } insight/man/find_parameters.glmmTMB.Rd0000644000175000017500000000471214077615665017601 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/find_parameters_mixed.R \name{find_parameters.glmmTMB} \alias{find_parameters.glmmTMB} \alias{find_parameters.merMod} \title{Find names of model parameters from mixed models} \usage{ \method{find_parameters}{glmmTMB}( x, effects = c("all", "fixed", "random"), component = c("all", "conditional", "zi", "zero_inflated", "dispersion"), flatten = FALSE, ... ) \method{find_parameters}{merMod}(x, effects = c("all", "fixed", "random"), flatten = FALSE, ...) } \arguments{ \item{x}{A fitted model.} \item{effects}{Should parameters for fixed effects, random effects or both be returned? Only applies to mixed models. May be abbreviated.} \item{component}{Which type of parameters to return, such as parameters for the conditional model, the zero-inflated part of the model or the dispersion term? Applies to models with zero-inflated and/or dispersion formula. Note that the \emph{conditional} component is also called \emph{count} or \emph{mean} component, depending on the model. There are three convenient shortcuts: \code{component = "all"} returns all possible parameters. If \code{component = "location"}, location parameters such as \code{conditional} or \code{zero_inflated} are returned (everything that are fixed or random effects - depending on the \code{effects} argument - but no auxiliary parameters). For \code{component = "distributional"} (or \code{"auxiliary"}), components like \code{sigma} or \code{dispersion} (and other auxiliary parameters) are returned.} \item{flatten}{Logical, if \code{TRUE}, the values are returned as character vector, not as list. Duplicated values are removed.} \item{...}{Currently not used.} } \value{ A list of parameter names. The returned list may have following elements: \itemize{ \item \code{conditional}, the "fixed effects" part from the model. \item \code{random}, the "random effects" part from the model. \item \code{zero_inflated}, the "fixed effects" part from the zero-inflation component of the model. \item \code{zero_inflated_random}, the "random effects" part from the zero-inflation component of the model. \item \code{dispersion}, the dispersion parameters (auxiliary parameter) } } \description{ Returns the names of model parameters, like they typically appear in the \code{summary()} output. } \examples{ data(mtcars) m <- lm(mpg ~ wt + cyl + vs, data = mtcars) find_parameters(m) } insight/man/is_multivariate.Rd0000644000175000017500000000177614077615666016351 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/is_multivariate.R \name{is_multivariate} \alias{is_multivariate} \title{Checks if an object stems from a multivariate response model} \usage{ is_multivariate(x) } \arguments{ \item{x}{A model object, or an object returned by a function from this package.} } \value{ A logical, \code{TRUE} if either \code{x} is a model object and is a multivariate response model, or \code{TRUE} if a return value from a function of \pkg{insight} is from a multivariate response model. } \description{ Small helper that checks if a model is a multivariate response model, i.e. a model with multiple outcomes. } \examples{ \dontrun{ library(rstanarm) data("pbcLong") model <- stan_mvmer( formula = list( logBili ~ year + (1 | id), albumin ~ sex + year + (year | id) ), data = pbcLong, chains = 1, cores = 1, seed = 12345, iter = 1000 ) f <- find_formula(model) is_multivariate(model) is_multivariate(f) } } insight/man/null_model.Rd0000644000175000017500000000140414077615666015266 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/null_model.R \name{null_model} \alias{null_model} \title{Compute intercept-only model for regression models} \usage{ null_model(model, verbose = TRUE, ...) } \arguments{ \item{model}{A (mixed effects) model.} \item{verbose}{Toggle off warnings.} \item{...}{Arguments passed to or from other methods.} } \value{ The null-model of \code{x} } \description{ This function computes the null-model (i.e. \code{(y ~ 1)}) of a model. For mixed models, the null-model takes random effects into account. } \examples{ if (require("lme4")) { data(sleepstudy) m <- lmer(Reaction ~ Days + (1 + Days | Subject), data = sleepstudy) summary(m) summary(null_model(m)) } } insight/man/clean_names.Rd0000644000175000017500000000360514077615665015405 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/clean_names.R \name{clean_names} \alias{clean_names} \alias{clean_names.character} \title{Get clean names of model terms} \usage{ clean_names(x, ...) \method{clean_names}{character}(x, include_names = FALSE, ...) } \arguments{ \item{x}{A fitted model, or a character vector.} \item{...}{Currently not used.} \item{include_names}{Logical, if \code{TRUE}, returns a named vector where names are the original values of \code{x}.} } \value{ The "cleaned" variable names as character vector, i.e. pattern like \code{s()} for splines or \code{log()} are removed from the model terms. } \description{ This function "cleans" names of model terms (or a character vector with such names) by removing patterns like \code{log()} or \code{as.factor()} etc. } \note{ Typically, this method is intended to work on character vectors, in order to remove patterns that obscure the variable names. For convenience reasons it is also possible to call \code{clean_names()} also on a model object. If \code{x} is a regression model, this function is (almost) equal to calling \code{find_variables()}. The main difference is that \code{clean_names()} always returns a character vector, while \code{find_variables()} returns a list of character vectors, unless \code{flatten = TRUE}. See 'Examples'. } \examples{ # example from ?stats::glm counts <- c(18, 17, 15, 20, 10, 20, 25, 13, 12) outcome <- as.numeric(gl(3, 1, 9)) treatment <- gl(3, 3) m <- glm(counts ~ log(outcome) + as.factor(treatment), family = poisson()) clean_names(m) # difference "clean_names()" and "find_variables()" if (require("lme4")) { m <- glmer( cbind(incidence, size - incidence) ~ period + (1 | herd), data = cbpp, family = binomial ) clean_names(m) find_variables(m) find_variables(m, flatten = TRUE) } } insight/man/get_response.Rd0000644000175000017500000000217614077615666015640 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/get_response.R \name{get_response} \alias{get_response} \title{Get the values from the response variable} \usage{ get_response(x, select = NULL, verbose = TRUE) } \arguments{ \item{x}{A fitted model.} \item{select}{Optional name(s) of response variables for which to extract values. Can be used in case of regression models with multiple response variables.} \item{verbose}{Toggle warnings.} } \value{ The values of the response variable, as vector, or a data frame if \code{x} has more than one defined response variable. } \description{ Returns the values the response variable(s) from a model object. If the model is a multivariate response model, a data frame with values from all response variables is returned. } \examples{ if (require("lme4")) { data(cbpp) cbpp$trials <- cbpp$size - cbpp$incidence m <- glm(cbind(incidence, trials) ~ period, data = cbpp, family = binomial) head(get_response(m)) get_response(m, select = "incidence") } data(mtcars) m <- lm(mpg ~ wt + cyl + vs, data = mtcars) get_response(m) } insight/man/get_priors.Rd0000644000175000017500000000141714077615666015315 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/get_priors.R \name{get_priors} \alias{get_priors} \alias{get_priors.brmsfit} \title{Get summary of priors used for a model} \usage{ get_priors(x, ...) \method{get_priors}{brmsfit}(x, verbose = TRUE, ...) } \arguments{ \item{x}{A Bayesian model.} \item{...}{Currently not used.} \item{verbose}{Toggle warnings and messages.} } \value{ A data frame with a summary of the prior distributions used for the parameters in a given model. } \description{ Provides a summary of the prior distributions used for the parameters in a given model. } \examples{ \dontrun{ library(rstanarm) model <- stan_glm(Sepal.Width ~ Species * Petal.Length, data = iris) get_priors(model) } } insight/man/is_nested_models.Rd0000644000175000017500000000166614077615666016466 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/is_nested_models.R \name{is_nested_models} \alias{is_nested_models} \title{Checks whether a list of models are nested models} \usage{ is_nested_models(...) } \arguments{ \item{...}{Multiple regression model objects.} } \value{ \code{TRUE} if models are nested, \code{FALSE} otherwise. If models are nested, also returns two attributes that indicate whether nesting of models is in decreasing or increasing order. } \description{ Checks whether a list of models are nested models, strictly following the order they were passed to the function. } \examples{ m1 <- lm(Sepal.Length ~ Petal.Width + Species, data = iris) m2 <- lm(Sepal.Length ~ Species, data = iris) m3 <- lm(Sepal.Length ~ Petal.Width, data = iris) m4 <- lm(Sepal.Length ~ 1, data = iris) is_nested_models(m1, m2, m4) is_nested_models(m4, m2, m1) is_nested_models(m1, m2, m3) } insight/man/find_interactions.Rd0000644000175000017500000000360014077615665016635 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/find_interactions.R \name{find_interactions} \alias{find_interactions} \title{Find interaction terms from models} \usage{ find_interactions( x, component = c("all", "conditional", "zi", "zero_inflated", "dispersion", "instruments"), flatten = FALSE ) } \arguments{ \item{x}{A fitted model.} \item{component}{Should all predictor variables, predictor variables for the conditional model, the zero-inflated part of the model, the dispersion term or the instrumental variables be returned? Applies to models with zero-inflated and/or dispersion formula, or to models with instrumental variable (so called fixed-effects regressions). May be abbreviated. Note that the \emph{conditional} component is also called \emph{count} or \emph{mean} component, depending on the model.} \item{flatten}{Logical, if \code{TRUE}, the values are returned as character vector, not as list. Duplicated values are removed.} } \value{ A list of character vectors that represent the interaction terms. Depending on \code{component}, the returned list has following elements (or \code{NULL}, if model has no interaction term): \itemize{ \item \code{conditional}, interaction terms that belong to the "fixed effects" terms from the model \item \code{zero_inflated}, interaction terms that belong to the "fixed effects" terms from the zero-inflation component of the model \item \code{instruments}, for fixed-effects regressions like \code{ivreg}, \code{felm} or \code{plm}, interaction terms that belong to the instrumental variables } } \description{ Returns all lowest to highest order interaction terms from a model. } \examples{ data(mtcars) m <- lm(mpg ~ wt + cyl + vs, data = mtcars) find_interactions(m) m <- lm(mpg ~ wt * cyl + vs * hp * gear + carb, data = mtcars) find_interactions(m) } insight/man/get_loglikelihood.Rd0000644000175000017500000000410714077615666016623 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/get_loglikelihood.R \name{get_loglikelihood} \alias{get_loglikelihood} \alias{loglikelihood} \alias{get_loglikelihood.lm} \title{Log-Likelihood} \usage{ get_loglikelihood(x, ...) loglikelihood(x, ...) \method{get_loglikelihood}{lm}(x, estimator = "ML", REML = FALSE, verbose = TRUE, ...) } \arguments{ \item{x}{A model.} \item{...}{Passed down to \code{logLik()}, if possible.} \item{estimator}{Corresponds to the different estimators for the standard deviation of the errors. If \code{estimator="ML"} (default), the scaling is done by n (the biased ML estimator), which is then equivalent to using \code{stats::logLik()}. If \code{estimator="OLS"}, it returns the unbiased OLS estimator.} \item{REML}{Only for linear models. This argument is present for compatibility with \code{stats::logLik()}. Setting it to \code{TRUE} will overwrite the \code{estimator} argument and is thus equivalent to setting \code{estimator="REML"}. It will give the same results as \code{stats::logLik(..., REML=TRUE)}. Note that individual log-likelihoods are not available under REML.} \item{verbose}{Toggle warnings and messages.} } \value{ An object of class \code{"logLik"}, also containing the log-likelihoods for each observation as a \code{per_observation} attribute (\code{attributes(get_loglikelihood(x))$per_observation}) when possible. The code was partly inspired from the \CRANpkg{nonnest2} package. } \description{ A robust function to compute the log-likelihood of a model, as well as individual log-likelihoods (for each observation) whenever possible. Can be used as a replacement for \code{stats::logLik()} out of the box, as the returned object is of the same class (and it gives the same results by default). } \examples{ x <- lm(Sepal.Length ~ Petal.Width + Species, data = iris) get_loglikelihood(x, estimator = "ML") # Equivalent to stats::logLik(x) get_loglikelihood(x, estimator = "REML") # Equivalent to stats::logLik(x, REML=TRUE) get_loglikelihood(x, estimator = "OLS") } insight/man/get_data.Rd0000644000175000017500000000751514137207374014703 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/get_data.R \name{get_data} \alias{get_data} \alias{get_data.gee} \alias{get_data.rqss} \alias{get_data.hurdle} \alias{get_data.zcpglm} \alias{get_data.glmmTMB} \alias{get_data.merMod} \alias{get_data.glmmadmb} \alias{get_data.rlmerMod} \alias{get_data.clmm} \alias{get_data.mixed} \alias{get_data.afex_aov} \alias{get_data.lme} \alias{get_data.MixMod} \alias{get_data.brmsfit} \alias{get_data.stanreg} \alias{get_data.MCMCglmm} \title{Get the data that was used to fit the model} \usage{ get_data(x, ...) \method{get_data}{gee}(x, effects = c("all", "fixed", "random"), verbose = TRUE, ...) \method{get_data}{rqss}( x, component = c("all", "conditional", "smooth_terms"), verbose = TRUE, ... ) \method{get_data}{hurdle}( x, component = c("all", "conditional", "zi", "zero_inflated", "dispersion"), verbose = TRUE, ... ) \method{get_data}{zcpglm}( x, component = c("all", "conditional", "zi", "zero_inflated"), verbose = TRUE, ... ) \method{get_data}{glmmTMB}( x, effects = c("all", "fixed", "random"), component = c("all", "conditional", "zi", "zero_inflated", "dispersion"), verbose = TRUE, ... ) \method{get_data}{merMod}(x, effects = c("all", "fixed", "random"), verbose = TRUE, ...) \method{get_data}{glmmadmb}(x, effects = c("all", "fixed", "random"), verbose = TRUE, ...) \method{get_data}{rlmerMod}(x, effects = c("all", "fixed", "random"), ...) \method{get_data}{clmm}(x, effects = c("all", "fixed", "random"), ...) \method{get_data}{mixed}(x, effects = c("all", "fixed", "random"), ...) \method{get_data}{afex_aov}(x, shape = c("long", "wide"), ...) \method{get_data}{lme}(x, effects = c("all", "fixed", "random"), ...) \method{get_data}{MixMod}( x, effects = c("all", "fixed", "random"), component = c("all", "conditional", "zi", "zero_inflated", "dispersion"), verbose = TRUE, ... ) \method{get_data}{brmsfit}(x, effects = "all", component = "all", verbose = TRUE, ...) \method{get_data}{stanreg}(x, effects = c("all", "fixed", "random"), verbose = TRUE, ...) \method{get_data}{MCMCglmm}(x, effects = c("all", "fixed", "random"), verbose = TRUE, ...) } \arguments{ \item{x}{A fitted model.} \item{...}{Currently not used.} \item{effects}{Should model data for fixed effects, random effects or both be returned? Only applies to mixed models.} \item{verbose}{Toggle messages and warnings.} \item{component}{Should all predictor variables, predictor variables for the conditional model, the zero-inflated part of the model, the dispersion term or the instrumental variables be returned? Applies to models with zero-inflated and/or dispersion formula, or to models with instrumental variable (so called fixed-effects regressions). May be abbreviated. Note that the \emph{conditional} component is also called \emph{count} or \emph{mean} component, depending on the model.} \item{shape}{Return long or wide data? Only applicable in repeated measures designs.} } \value{ The data that was used to fit the model. } \description{ This functions tries to get the data that was used to fit the model and returns it as data frame. } \note{ Unlike \code{model.frame()}, which may contain transformed variables (e.g. if \code{poly()} or \code{scale()} was used inside the formula to specify the model), \code{get_data()} aims at returning the "original", untransformed data (if possible). Consequently, column names are changed accordingly, i.e. \code{"log(x)"} will become \code{"x"} etc. for all data columns with transformed values. } \examples{ if (require("lme4")) { data(cbpp, package = "lme4") cbpp$trials <- cbpp$size - cbpp$incidence m <- glm(cbind(incidence, trials) ~ period, data = cbpp, family = binomial) head(get_data(m)) } } insight/man/find_transformation.Rd0000644000175000017500000000230414132251724017161 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/find_transformation.R \name{find_transformation} \alias{find_transformation} \title{Find possible transformation of response variables} \usage{ find_transformation(x) } \arguments{ \item{x}{A regression model.} } \value{ A string, with the name of the function of the applied transformation. Returns \code{"identity"} for no transformation, and e.g. \code{"log(x+3)"} when a specific values was added to the response variables before log-transforming. } \description{ This functions checks whether any transformation, such as log- or exp-transforming, was applied to the response variable (dependent variable) in a regression formula. Currently, following patterns are detected: \code{log}, \code{log1p}, \code{exp}, \code{expm1}, \code{sqrt}, \verb{log(x+)} and \code{log-log}. } \examples{ # identity, no transformation model <- lm(Sepal.Length ~ Species, data = iris) find_transformation(model) # log-transformation model <- lm(log(Sepal.Length) ~ Species, data = iris) find_transformation(model) # log+2 model <- lm(log(Sepal.Length + 2) ~ Species, data = iris) find_transformation(model) } insight/man/dot-colour_detect.Rd0000644000175000017500000000040513633445677016553 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/color_if.R \name{.colour_detect} \alias{.colour_detect} \title{Detect coloured cells} \usage{ .colour_detect(x) } \description{ Detect coloured cells } \keyword{internal} insight/man/find_parameters.Rd0000644000175000017500000000337414077615665016306 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/find_parameters.R \name{find_parameters} \alias{find_parameters} \alias{find_parameters.default} \title{Find names of model parameters} \usage{ find_parameters(x, ...) \method{find_parameters}{default}(x, flatten = FALSE, verbose = TRUE, ...) } \arguments{ \item{x}{A fitted model.} \item{...}{Currently not used.} \item{flatten}{Logical, if \code{TRUE}, the values are returned as character vector, not as list. Duplicated values are removed.} \item{verbose}{Toggle messages and warnings.} } \value{ A list of parameter names. For simple models, only one list-element, \code{conditional}, is returned. } \description{ Returns the names of model parameters, like they typically appear in the \code{summary()} output. For Bayesian models, the parameter names equal the column names of the posterior samples after coercion from \code{as.data.frame()}. See the documentation for your object's class: \itemize{ \item{\link[=find_parameters.BGGM]{Bayesian models} (\pkg{rstanarm}, \pkg{brms}, \pkg{MCMCglmm}, ...)} \item{\link[=find_parameters.gamlss]{Generalized additive models} (\pkg{mgcv}, \pkg{VGAM}, ...)} \item{\link[=find_parameters.betamfx]{Marginal effects models} (\pkg{mfx})} \item{\link[=find_parameters.emmGrid]{Estimated marginal means} (\pkg{emmeans})} \item{\link[=find_parameters.glmmTMB]{Mixed models} (\pkg{lme4}, \pkg{glmmTMB}, \pkg{GLMMadaptive}, ...)} \item{\link[=find_parameters.zeroinfl]{Zero-inflated and hurdle models} (\pkg{pscl}, ...)} \item{\link[=find_parameters.averaging]{Models with special components} (\pkg{betareg}, \pkg{MuMIn}, ...)} } } \examples{ data(mtcars) m <- lm(mpg ~ wt + cyl + vs, data = mtcars) find_parameters(m) } insight/man/get_variance.Rd0000644000175000017500000001737714100215451015552 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/get_variances.R \name{get_variance} \alias{get_variance} \alias{get_variance_residual} \alias{get_variance_fixed} \alias{get_variance_random} \alias{get_variance_distribution} \alias{get_variance_dispersion} \alias{get_variance_intercept} \alias{get_variance_slope} \alias{get_correlation_slope_intercept} \alias{get_correlation_slopes} \title{Get variance components from random effects models} \usage{ get_variance( x, component = c("all", "fixed", "random", "residual", "distribution", "dispersion", "intercept", "slope", "rho01", "rho00"), verbose = TRUE, ... ) get_variance_residual(x, verbose = TRUE, ...) get_variance_fixed(x, verbose = TRUE, ...) get_variance_random(x, verbose = TRUE, tolerance = 1e-05, ...) get_variance_distribution(x, verbose = TRUE, ...) get_variance_dispersion(x, verbose = TRUE, ...) get_variance_intercept(x, verbose = TRUE, ...) get_variance_slope(x, verbose = TRUE, ...) get_correlation_slope_intercept(x, verbose = TRUE, ...) get_correlation_slopes(x, verbose = TRUE, ...) } \arguments{ \item{x}{A mixed effects model.} \item{component}{Character value, indicating the variance component that should be returned. By default, all variance components are returned. The distribution-specific (\code{"distribution"}) and residual (\code{"residual"}) variance are the most computational intensive components, and hence may take a few seconds to calculate.} \item{verbose}{Toggle off warnings.} \item{...}{Currently not used.} \item{tolerance}{Tolerance for singularity check of random effects, to decide whether to compute random effect variances or not. Indicates up to which value the convergence result is accepted. The larger tolerance is, the stricter the test will be. See \code{\link[performance:check_singularity]{performance::check_singularity()}}.} } \value{ A list with following elements: \itemize{ \item \code{var.fixed}, variance attributable to the fixed effects \item \code{var.random}, (mean) variance of random effects \item \code{var.residual}, residual variance (sum of dispersion and distribution) \item \code{var.distribution}, distribution-specific variance \item \code{var.dispersion}, variance due to additive dispersion \item \code{var.intercept}, the random-intercept-variance, or between-subject-variance (\ifelse{html}{\out{τ00}}{\eqn{\tau_{00}}}) \item \code{var.slope}, the random-slope-variance (\ifelse{html}{\out{τ11}}{\eqn{\tau_{11}}}) \item \code{cor.slope_intercept}, the random-slope-intercept-correlation (\ifelse{html}{\out{ρ01}}{\eqn{\rho_{01}}}) \item \code{cor.slopes}, the correlation between random slopes (\ifelse{html}{\out{ρ00}}{\eqn{\rho_{00}}}) } } \description{ This function extracts the different variance components of a mixed model and returns the result as list. Functions like \code{get_variance_residual(x)} or \code{get_variance_fixed(x)} are shortcuts for \code{get_variance(x, component = "residual")} etc. } \details{ This function returns different variance components from mixed models, which are needed, for instance, to calculate r-squared measures or the intraclass-correlation coefficient (ICC). \subsection{Fixed effects variance}{ The fixed effects variance, \ifelse{html}{\out{σ2f}}{\eqn{\sigma^2_f}}, is the variance of the matrix-multiplication \ifelse{html}{\out{β∗X}}{\eqn{\beta*X}} (parameter vector by model matrix). } \subsection{Random effects variance}{ The random effect variance, \ifelse{html}{\out{σ2i}}{\eqn{\sigma^2_i}}, represents the \emph{mean} random effect variance of the model. Since this variance reflect the "average" random effects variance for mixed models, it is also appropriate for models with more complex random effects structures, like random slopes or nested random effects. Details can be found in \cite{Johnson 2014}, in particular equation 10. For simple random-intercept models, the random effects variance equals the random-intercept variance. } \subsection{Distribution-specific variance}{ The distribution-specific variance, \ifelse{html}{\out{σ2d}}{\eqn{\sigma^2_d}}, depends on the model family. For Gaussian models, it is \ifelse{html}{\out{σ2}}{\eqn{\sigma^2}} (i.e. \code{sigma(model)^2}). For models with binary outcome, it is \eqn{\pi^2 / 3} for logit-link, \code{1} for probit-link, and \eqn{\pi^2 / 6} for cloglog-links. Models from Gamma-families use \eqn{\mu^2} (as obtained from \code{family$variance()}). For all other models, the distribution-specific variance is based on lognormal approximation, \eqn{log(1 + var(x) / \mu^2)} (see \cite{Nakagawa et al. 2017}). The expected variance of a zero-inflated model is computed according to \cite{Zuur et al. 2012, p277}. } \subsection{Variance for the additive overdispersion term}{ The variance for the additive overdispersion term, \ifelse{html}{\out{σ2e}}{\eqn{\sigma^2_e}}, represents \dQuote{the excess variation relative to what is expected from a certain distribution} (Nakagawa et al. 2017). In (most? many?) cases, this will be \code{0}. } \subsection{Residual variance}{ The residual variance, \ifelse{html}{\out{σ2ε}}{\eqn{\sigma^2_\epsilon}}, is simply \ifelse{html}{\out{σ2d + σ2e}}{\eqn{\sigma^2_d + \sigma^2_e}}. } \subsection{Random intercept variance}{ The random intercept variance, or \emph{between-subject} variance (\ifelse{html}{\out{τ00}}{\eqn{\tau_{00}}}), is obtained from \code{VarCorr()}. It indicates how much groups or subjects differ from each other, while the residual variance \ifelse{html}{\out{σ2ε}}{\eqn{\sigma^2_\epsilon}} indicates the \emph{within-subject variance}. } \subsection{Random slope variance}{ The random slope variance (\ifelse{html}{\out{τ11}}{\eqn{\tau_{11}}}) is obtained from \code{VarCorr()}. This measure is only available for mixed models with random slopes. } \subsection{Random slope-intercept correlation}{ The random slope-intercept correlation (\ifelse{html}{\out{ρ01}}{\eqn{\rho_{01}}}) is obtained from \code{VarCorr()}. This measure is only available for mixed models with random intercepts and slopes. } } \note{ This function supports models of class \code{merMod} (including models from \pkg{blme}), \code{clmm}, \code{cpglmm}, \code{glmmadmb}, \code{glmmTMB}, \code{MixMod}, \code{lme}, \code{mixed}, \code{rlmerMod}, \code{stanreg}, \code{brmsfit} or \code{wbm}. Support for objects of class \code{MixMod} (\pkg{GLMMadaptive}), \code{lme} (\pkg{nlme}) or \code{brmsfit} (\pkg{brms}) is experimental and may not work for all models. } \examples{ \dontrun{ library(lme4) data(sleepstudy) m <- lmer(Reaction ~ Days + (1 + Days | Subject), data = sleepstudy) get_variance(m) get_variance_fixed(m) get_variance_residual(m) } } \references{ \itemize{ \item Johnson, P. C. D. (2014). Extension of Nakagawa & Schielzeth’s R2 GLMM to random slopes models. Methods in Ecology and Evolution, 5(9), 944–946. \doi{10.1111/2041-210X.12225} \item Nakagawa, S., Johnson, P. C. D., & Schielzeth, H. (2017). The coefficient of determination R2 and intra-class correlation coefficient from generalized linear mixed-effects models revisited and expanded. Journal of The Royal Society Interface, 14(134), 20170213. \doi{10.1098/rsif.2017.0213} \item Zuur, A. F., Savel'ev, A. A., & Ieno, E. N. (2012). Zero inflated models and generalized linear mixed models with R. Newburgh, United Kingdom: Highland Statistics. } } insight/man/find_variables.Rd0000644000175000017500000000613514120175277016076 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/find_variables.R \name{find_variables} \alias{find_variables} \title{Find names of all variables} \usage{ find_variables( x, effects = c("all", "fixed", "random"), component = c("all", "conditional", "zi", "zero_inflated", "dispersion", "instruments", "smooth_terms"), flatten = FALSE, verbose = TRUE ) } \arguments{ \item{x}{A fitted model.} \item{effects}{Should variables for fixed effects, random effects or both be returned? Only applies to mixed models. May be abbreviated.} \item{component}{Should all predictor variables, predictor variables for the conditional model, the zero-inflated part of the model, the dispersion term or the instrumental variables be returned? Applies to models with zero-inflated and/or dispersion formula, or to models with instrumental variable (so called fixed-effects regressions). May be abbreviated. Note that the \emph{conditional} component is also called \emph{count} or \emph{mean} component, depending on the model.} \item{flatten}{Logical, if \code{TRUE}, the values are returned as character vector, not as list. Duplicated values are removed.} \item{verbose}{Toggle warnings.} } \value{ A list with (depending on the model) following elements (character vectors): \itemize{ \item \code{response}, the name of the response variable \item \code{conditional}, the names of the predictor variables from the \emph{conditional} model (as opposed to the zero-inflated part of a model) \item \code{cluster}, the names of cluster or grouping variables \item \code{dispersion}, the name of the dispersion terms \item \code{instruments}, the names of instrumental variables \item \code{random}, the names of the random effects (grouping factors) \item \code{zero_inflated}, the names of the predictor variables from the \emph{zero-inflated} part of the model \item \code{zero_inflated_random}, the names of the random effects (grouping factors) } } \description{ Returns a list with the names of all variables, including response value and random effects. } \note{ The difference to \code{\link[=find_terms]{find_terms()}} is that \code{find_variables()} returns each variable name only once, while \code{find_terms()} may return a variable multiple times in case of transformations or when arithmetic expressions were used in the formula. } \examples{ if (require("lme4")) { data(cbpp) data(sleepstudy) # some data preparation... cbpp$trials <- cbpp$size - cbpp$incidence sleepstudy$mygrp <- sample(1:5, size = 180, replace = TRUE) sleepstudy$mysubgrp <- NA for (i in 1:5) { filter_group <- sleepstudy$mygrp == i sleepstudy$mysubgrp[filter_group] <- sample(1:30, size = sum(filter_group), replace = TRUE) } m1 <- glmer( cbind(incidence, size - incidence) ~ period + (1 | herd), data = cbpp, family = binomial ) find_variables(m1) m2 <- lmer( Reaction ~ Days + (1 | mygrp / mysubgrp) + (1 | Subject), data = sleepstudy ) find_variables(m2) find_variables(m2, flatten = TRUE) } } insight/man/get_intercept.Rd0000644000175000017500000000143514077615666015774 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/get_intercept.R \name{get_intercept} \alias{get_intercept} \title{Get the value at the intercept} \usage{ get_intercept(x, ...) } \arguments{ \item{x}{A model.} \item{...}{Not used.} } \value{ The value of the intercept. } \description{ Returns the value at the intercept (i.e., the intercept parameter), and \code{NA} if there isn't one. } \examples{ get_intercept(lm(Sepal.Length ~ Petal.Width, data = iris)) get_intercept(lm(Sepal.Length ~ 0 + Petal.Width, data = iris)) if (require("lme4")) { get_intercept(lme4::lmer(Sepal.Length ~ Sepal.Width + (1 | Species), data = iris)) } if (require("gamm4")) { get_intercept(gamm4::gamm4(Sepal.Length ~ s(Petal.Width), data = iris)) } } insight/man/to_numeric.Rd0000644000175000017500000000072314164615020015260 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/datawizard.R \name{to_numeric} \alias{to_numeric} \title{Convert to Numeric (if possible)} \usage{ to_numeric(x) } \arguments{ \item{x}{A vector to be converted.} } \value{ Numeric vector (if possible) } \description{ Tries to convert vector to numeric if possible (if no warnings or errors). Otherwise, leaves it as is. } \examples{ to_numeric(c("1", "2")) to_numeric(c("1", "2", "A")) } insight/man/standardize_column_order.Rd0000644000175000017500000000342314151371005020172 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/standardize_column_order.R \name{standardize_column_order} \alias{standardize_column_order} \alias{standardize_column_order.parameters_model} \title{Standardize column order} \usage{ standardize_column_order(data, ...) \method{standardize_column_order}{parameters_model}(data, style = c("easystats", "broom"), ...) } \arguments{ \item{data}{A data frame. In particular, objects from \emph{easystats} package functions like \code{\link[parameters:model_parameters]{parameters::model_parameters()}} or \code{\link[effectsize:effectsize]{effectsize::effectsize()}} are accepted, but also data frames returned by \code{broom::tidy()} are valid objects.} \item{...}{Currently not used.} \item{style}{Standardization can either be based on the naming conventions from the \href{https://easystats.github.io/easystats/}{easystats-project}, or on \pkg{broom}'s naming scheme.} } \value{ A data frame, with standardized column order. } \description{ Standardizes order of columns for dataframes and other objects from \emph{easystats} and \emph{broom} ecosystem packages. } \examples{ # easystats conventions df1 <- cbind.data.frame( CI_low = -2.873, t = 5.494, CI_high = -1.088, p = 0.00001, Parameter = -1.980, CI = 0.95, df = 29.234, Method = "Student's t-test" ) standardize_column_order(df1, style = "easystats") # broom conventions df2 <- cbind.data.frame( conf.low = -2.873, statistic = 5.494, conf.high = -1.088, p.value = 0.00001, estimate = -1.980, conf.level = 0.95, df = 29.234, method = "Student's t-test" ) standardize_column_order(df2, style = "broom") } insight/man/get_family.Rd0000644000175000017500000000130114021623400015216 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/get_family.R \name{get_family} \alias{get_family} \title{A robust alternative to stats::family} \usage{ get_family(x, ...) } \arguments{ \item{x}{A statistical model.} \item{...}{Further arguments passed to methods.} } \description{ A robust and resilient alternative to \code{stats::family}. To avoid issues with models like \code{gamm4}. } \examples{ data(mtcars) x <- glm(vs ~ wt, data = mtcars, family = "binomial") get_family(x) if (require("mgcv")) { x <- mgcv::gamm( vs ~ am + s(wt), random = list(cyl = ~1), data = mtcars, family = "binomial" ) get_family(x) } } insight/man/find_statistic.Rd0000644000175000017500000000137614077615665016152 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/find_statistic.R \name{find_statistic} \alias{find_statistic} \title{Find statistic for model} \usage{ find_statistic(x, ...) } \arguments{ \item{x}{An object.} \item{...}{Currently not used.} } \value{ A character describing the type of statistic. If there is no statistic available with a distribution, \code{NULL} will be returned. } \description{ Returns the statistic for a regression model (\emph{t}-statistic, \emph{z}-statistic, etc.). Small helper that checks if a model is a regression model object and return the statistic used. } \examples{ # regression model object data(mtcars) m <- lm(mpg ~ wt + cyl + vs, data = mtcars) find_statistic(m) } insight/man/find_response.Rd0000644000175000017500000000211514077615665015771 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/find_response.R \name{find_response} \alias{find_response} \title{Find name of the response variable} \usage{ find_response(x, combine = TRUE, ...) } \arguments{ \item{x}{A fitted model.} \item{combine}{Logical, if \code{TRUE} and the response is a matrix-column, the name of the response matches the notation in formula, and would for instance also contain patterns like \code{"cbind(...)"}. Else, the original variable names from the matrix-column are returned. See 'Examples'.} \item{...}{Currently not used.} } \value{ The name(s) of the response variable(s) from \code{x} as character vector, or \code{NULL} if response variable could not be found. } \description{ Returns the name(s) of the response variable(s) from a model object. } \examples{ if (require("lme4")) { data(cbpp) cbpp$trials <- cbpp$size - cbpp$incidence m <- glm(cbind(incidence, trials) ~ period, data = cbpp, family = binomial) find_response(m, combine = TRUE) find_response(m, combine = FALSE) } } insight/man/is_model_supported.Rd0000644000175000017500000000172314077615666017040 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/is_model_supported.R \name{is_model_supported} \alias{is_model_supported} \alias{supported_models} \title{Checks if an object is a regression model object supported in \pkg{insight} package.} \usage{ is_model_supported(x) supported_models() } \arguments{ \item{x}{An object.} } \value{ A logical, \code{TRUE} if \code{x} is a (supported) model object. } \description{ Small helper that checks if a model is a \emph{supported} (regression) model object. \code{supported_models()} prints a list of currently supported model classes. } \details{ This function returns \code{TRUE} if \code{x} is a model object that works with the package's functions. A list of supported models can also be found here: \url{https://github.com/easystats/insight}. } \examples{ data(mtcars) m <- lm(mpg ~ wt + cyl + vs, data = mtcars) is_model_supported(m) is_model_supported(mtcars) } insight/man/is_model.Rd0000644000175000017500000000161314077615666014731 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/is_model.R \name{is_model} \alias{is_model} \alias{is_regression_model} \title{Checks if an object is a regression model or statistical test object} \usage{ is_model(x) is_regression_model(x) } \arguments{ \item{x}{An object.} } \value{ A logical, \code{TRUE} if \code{x} is a (supported) model object. } \description{ Small helper that checks if a model is a regression model or a statistical object. \code{is_regression_model()} is stricter and only returns \code{TRUE} for regression models, but not for, e.g., \code{htest} objects. } \details{ This function returns \code{TRUE} if \code{x} is a model object. } \examples{ data(mtcars) m <- lm(mpg ~ wt + cyl + vs, data = mtcars) is_model(m) is_model(mtcars) test <- t.test(1:10, y = c(7:20)) is_model(test) is_regression_model(test) } insight/man/format_number.Rd0000644000175000017500000000136414077615665016000 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/format_number.R \name{format_number} \alias{format_number} \title{Convert number to words} \usage{ format_number(x, textual = TRUE, ...) } \arguments{ \item{x}{Number.} \item{textual}{Return words. If \code{FALSE}, will run \code{\link[=format_value]{format_value()}}.} \item{...}{Arguments to be passed to \code{\link[=format_value]{format_value()}} if \code{textual} is \code{FALSE}.} } \value{ A formatted string. } \description{ Convert number to words } \note{ The code has been adapted from here https://github.com/ateucher/useful_code/blob/master/R/numbers2words.r } \examples{ format_number(2) format_number(45) format_number(324.68765) } insight/man/get_parameters.zeroinfl.Rd0000644000175000017500000000307114077615666017767 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/get_parameters_zi.R \name{get_parameters.zeroinfl} \alias{get_parameters.zeroinfl} \alias{get_parameters.zcpglm} \alias{get_parameters.mhurdle} \title{Get model parameters from zero-inflated and hurdle models} \usage{ \method{get_parameters}{zeroinfl}( x, component = c("all", "conditional", "zi", "zero_inflated"), ... ) \method{get_parameters}{zcpglm}( x, component = c("all", "conditional", "zi", "zero_inflated"), ... ) \method{get_parameters}{mhurdle}( x, component = c("all", "conditional", "zi", "zero_inflated", "infrequent_purchase", "ip", "auxiliary"), ... ) } \arguments{ \item{x}{A fitted model.} \item{component}{Should all predictor variables, predictor variables for the conditional model, the zero-inflated part of the model, the dispersion term or the instrumental variables be returned? Applies to models with zero-inflated and/or dispersion formula, or to models with instrumental variable (so called fixed-effects regressions). May be abbreviated. Note that the \emph{conditional} component is also called \emph{count} or \emph{mean} component, depending on the model.} \item{...}{Currently not used.} } \value{ For models with smooth terms or zero-inflation component, a data frame with three columns: the parameter names, the related point estimates and the component. } \description{ Returns the coefficients from a model. } \examples{ data(mtcars) m <- lm(mpg ~ wt + cyl + vs, data = mtcars) get_parameters(m) } insight/man/get_deviance.Rd0000644000175000017500000000205114077615666015550 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/get_deviance.R \name{get_deviance} \alias{get_deviance} \alias{get_deviance.default} \title{Model Deviance} \usage{ get_deviance(x, ...) \method{get_deviance}{default}(x, verbose = TRUE, ...) } \arguments{ \item{x}{A model.} \item{...}{Not used.} \item{verbose}{Toggle warnings and messages.} } \value{ The model deviance. } \description{ Returns model deviance (see \code{stats::deviance()}). } \details{ For GLMMs of class \code{glmerMod}, \code{glmmTMB} or \code{MixMod}, the \emph{absolute unconditional} deviance is returned (see 'Details' in \verb{?lme4::}merMod-class``), i.e. minus twice the log-likelihood. To get the \emph{relative conditional} deviance (relative to a saturated model, conditioned on the conditional modes of random effects), use \code{deviance()}. The value returned `get_deviance()` usually equals the deviance-value from the `summary()`. } \examples{ data(mtcars) x <- lm(mpg ~ cyl, data = mtcars) get_deviance(x) } insight/man/get_varcov.Rd0000644000175000017500000000673714144234777015304 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/get_varcov.R \name{get_varcov} \alias{get_varcov} \alias{get_varcov.default} \alias{get_varcov.betareg} \alias{get_varcov.DirichletRegModel} \alias{get_varcov.clm2} \alias{get_varcov.truncreg} \alias{get_varcov.gamlss} \alias{get_varcov.hurdle} \alias{get_varcov.zcpglm} \alias{get_varcov.glmmTMB} \alias{get_varcov.MixMod} \alias{get_varcov.brmsfit} \alias{get_varcov.betamfx} \alias{get_varcov.aov} \alias{get_varcov.mixor} \title{Get variance-covariance matrix from models} \usage{ get_varcov(x, ...) \method{get_varcov}{default}(x, verbose = TRUE, ...) \method{get_varcov}{betareg}( x, component = c("conditional", "precision", "all"), verbose = TRUE, ... ) \method{get_varcov}{DirichletRegModel}( x, component = c("conditional", "precision", "all"), verbose = TRUE, ... ) \method{get_varcov}{clm2}(x, component = c("all", "conditional", "scale"), ...) \method{get_varcov}{truncreg}(x, component = c("conditional", "all"), ...) \method{get_varcov}{gamlss}(x, component = c("conditional", "all"), ...) \method{get_varcov}{hurdle}(x, component = c("conditional", "zero_inflated", "zi", "all"), ...) \method{get_varcov}{zcpglm}(x, component = c("conditional", "zero_inflated", "zi", "all"), ...) \method{get_varcov}{glmmTMB}( x, component = c("conditional", "zero_inflated", "zi", "dispersion", "all"), ... ) \method{get_varcov}{MixMod}( x, effects = c("fixed", "random"), component = c("conditional", "zero_inflated", "zi", "dispersion", "auxiliary", "all"), robust = FALSE, verbose = TRUE, ... ) \method{get_varcov}{brmsfit}(x, component = "conditional", ...) \method{get_varcov}{betamfx}(x, component = c("conditional", "precision", "all"), ...) \method{get_varcov}{aov}(x, complete = FALSE, ...) \method{get_varcov}{mixor}(x, effects = c("all", "fixed", "random"), ...) } \arguments{ \item{x}{A model.} \item{...}{Currently not used.} \item{verbose}{Toggle warnings.} \item{component}{Should the complete variance-covariance matrix of the model be returned, or only for specific model components only (like count or zero-inflated model parts)? Applies to models with zero-inflated component, or models with precision (e.g. \code{betareg}) component. \code{component} may be one of \code{"conditional"}, \code{"zi"}, \code{"zero-inflated"}, \code{"dispersion"}, \code{"precision"}, or \code{"all"}. May be abbreviated. Note that the \emph{conditional} component is also called \emph{count} or \emph{mean} component, depending on the model.} \item{effects}{Should the complete variance-covariance matrix of the model be returned, or only for specific model parameters only? Currently only applies to models of class \code{mixor}.} \item{robust}{Logical, if \code{TRUE}, returns a robust variance-covariance matrix using sandwich estimation.} \item{complete}{Logical, if \code{TRUE}, for \code{aov}, returns the full variance-covariance matrix.} } \value{ The variance-covariance matrix, as \code{matrix}-object. } \description{ Returns the variance-covariance, as retrieved by \code{stats::vcov()}, but works for more model objects that probably don't provide a \code{vcov()}-method. } \note{ \code{get_varcov()} tries to return the nearest positive definite matrix in case of a negative variance-covariance matrix. } \examples{ data(mtcars) m <- lm(mpg ~ wt + cyl + vs, data = mtcars) get_varcov(m) } insight/man/format_p.Rd0000644000175000017500000000337614157046541014742 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/format_p.R \name{format_p} \alias{format_p} \title{p-values formatting} \usage{ format_p( p, stars = FALSE, stars_only = FALSE, whitespace = TRUE, name = "p", missing = "", decimal_separator = NULL, digits = 3, ... ) } \arguments{ \item{p}{value or vector of p-values.} \item{stars}{Add significance stars (e.g., p < .001***).} \item{stars_only}{Return only significance stars.} \item{whitespace}{Logical, if \code{TRUE} (default), preserves whitespaces. Else, all whitespace characters are removed from the returned string.} \item{name}{Name prefixing the text. Can be \code{NULL}.} \item{missing}{Value by which \code{NA} values are replaced. By default, an empty string (i.e. \code{""}) is returned for \code{NA}.} \item{decimal_separator}{Character, if not \code{NULL}, will be used as decimal separator.} \item{digits}{Number of significant digits. May also be \code{"scientific"} to return exact p-values in scientific notation, or \code{"apa"} to use an APA 7th edition-style for p-values (equivalent to \code{digits = 3}). If \code{"scientific"}, control the number of digits by adding the value as a suffix, e.g.m \code{digits = "scientific4"} to have scientific notation with 4 decimal places.} \item{...}{Arguments from other methods.} } \value{ A formatted string. } \description{ Format p-values. } \examples{ format_p(c(.02, .065, 0, .23)) format_p(c(.02, .065, 0, .23), name = NULL) format_p(c(.02, .065, 0, .23), stars_only = TRUE) model <- lm(mpg ~ wt + cyl, data = mtcars) p <- coef(summary(model))[, 4] format_p(p, digits = "apa") format_p(p, digits = "scientific") format_p(p, digits = "scientific2") } insight/man/find_predictors.Rd0000644000175000017500000000525714101704662016303 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/find_predictors.R \name{find_predictors} \alias{find_predictors} \alias{find_predictors.default} \title{Find names of model predictors} \usage{ find_predictors(x, ...) \method{find_predictors}{default}( x, effects = c("fixed", "random", "all"), component = c("all", "conditional", "zi", "zero_inflated", "dispersion", "instruments", "correlation", "smooth_terms"), flatten = FALSE, verbose = TRUE, ... ) } \arguments{ \item{x}{A fitted model.} \item{...}{Currently not used.} \item{effects}{Should variables for fixed effects, random effects or both be returned? Only applies to mixed models. May be abbreviated.} \item{component}{Should all predictor variables, predictor variables for the conditional model, the zero-inflated part of the model, the dispersion term or the instrumental variables be returned? Applies to models with zero-inflated and/or dispersion formula, or to models with instrumental variable (so called fixed-effects regressions). May be abbreviated. Note that the \emph{conditional} component is also called \emph{count} or \emph{mean} component, depending on the model.} \item{flatten}{Logical, if \code{TRUE}, the values are returned as character vector, not as list. Duplicated values are removed.} \item{verbose}{Toggle warnings.} } \value{ A list of character vectors that represent the name(s) of the predictor variables. Depending on the combination of the arguments \code{effects} and \code{component}, the returned list has following elements: \itemize{ \item \code{conditional}, the "fixed effects" terms from the model \item \code{random}, the "random effects" terms from the model \item \code{zero_inflated}, the "fixed effects" terms from the zero-inflation component of the model \item \code{zero_inflated_random}, the "random effects" terms from the zero-inflation component of the model \item \code{dispersion}, the dispersion terms \item \code{instruments}, for fixed-effects regressions like \code{ivreg}, \code{felm} or \code{plm}, the instrumental variables \item \code{correlation}, for models with correlation-component like \code{gls}, the variables used to describe the correlation structure } } \description{ Returns the names of the predictor variables for the different parts of a model (like fixed or random effects, zero-inflated component, ...). Unlike \code{\link[=find_parameters]{find_parameters()}}, the names from \code{find_predictors()} match the original variable names from the data that was used to fit the model. } \examples{ data(mtcars) m <- lm(mpg ~ wt + cyl + vs, data = mtcars) find_predictors(m) } insight/man/export_table.Rd0000644000175000017500000001442614163102430015604 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/export_table.R \name{export_table} \alias{export_table} \title{Data frame and Tables Pretty Formatting} \usage{ export_table( x, sep = " | ", header = "-", empty_line = NULL, digits = 2, protect_integers = TRUE, missing = "", width = NULL, format = NULL, title = NULL, caption = title, subtitle = NULL, footer = NULL, align = NULL, group_by = NULL, zap_small = FALSE, table_width = NULL, verbose = TRUE ) } \arguments{ \item{x}{A data frame. May also be a list of data frames, to export multiple data frames into multiple tables.} \item{sep}{Column separator.} \item{header}{Header separator. Can be \code{NULL}.} \item{empty_line}{Separator used for empty lines. If \code{NULL}, line remains empty (i.e. filled with whitespaces).} \item{digits}{Number of digits for rounding or significant figures. May also be \code{"signif"} to return significant figures or \code{"scientific"} to return scientific notation. Control the number of digits by adding the value as suffix, e.g. \code{digits = "scientific4"} to have scientific notation with 4 decimal places, or \code{digits = "signif5"} for 5 significant figures (see also \code{\link[=signif]{signif()}}).} \item{protect_integers}{Should integers be kept as integers (i.e., without decimals)?} \item{missing}{Value by which \code{NA} values are replaced. By default, an empty string (i.e. \code{""}) is returned for \code{NA}.} \item{width}{Refers to the width of columns (with numeric values). Can be either \code{NULL}, a number or a named numeric vector. If \code{NULL}, the width for each column is adjusted to the minimum required width. If a number, columns with numeric values will have the minimum width specified in \code{width}. If a named numeric vector, value names are matched against column names, and for each match, the specified width is used (see 'Examples'). Only applies to text-format (see \code{format}).} \item{format}{Name of output-format, as string. If \code{NULL} (or \code{"text"}), returned output is used for basic printing. Can be one of \code{NULL} (the default) resp. \code{"text"} for plain text, \code{"markdown"} (or \code{"md"}) for markdown and \code{"html"} for HTML output.} \item{title, caption, subtitle}{Table title (same as caption) and subtitle, as strings. If \code{NULL}, no title or subtitle is printed, unless it is stored as attributes (\code{table_title}, or its alias \code{table_caption}, and \code{table_subtitle}). If \code{x} is a list of data frames, \code{caption} may be a list of table captions, one for each table.} \item{footer}{Table footer, as string. For markdown-formatted tables, table footers, due to the limitation in markdown rendering, are actually just a new text line under the table. If \code{x} is a list of data frames, \code{footer} may be a list of table captions, one for each table.} \item{align}{Column alignment. For markdown-formatted tables, the default \code{align = NULL} will right-align numeric columns, while all other columns will be left-aligned. If \code{format = "html"}, the default is left-align first column and center all remaining. May be a string to indicate alignment rules for the complete table, like \code{"left"}, \code{"right"}, \code{"center"} or \code{"firstleft"} (to left-align first column, center remaining); or maybe a string with abbreviated alignment characters, where the length of the string must equal the number of columns, for instance, \code{align = "lccrl"} would left-align the first column, center the second and third, right-align column four and left-align the fifth column. For HTML-tables, may be one of \code{"center"}, \code{"left"} or \code{"right"}.} \item{group_by}{Name of column in \code{x} that indicates grouping for tables. Only applies when \code{format = "html"}. \code{group_by} is passed down to \code{gt::gt(groupname_col = group_by)}.} \item{zap_small}{Logical, if \code{TRUE}, small values are rounded after \code{digits} decimal places. If \code{FALSE}, values with more decimal places than \code{digits} are printed in scientific notation.} \item{table_width}{Numeric, or \code{"auto"}, indicating the width of the complete table. If \code{table_width = "auto"} and the table is wider than the current width (i.e. line length) of the console (or any other source for textual output, like markdown files), the table is split into two parts. Else, if \code{table_width} is numeric and table rows are larger than \code{table_width}, the table is split into two parts.} \item{verbose}{Toggle messages and warnings.} } \value{ A data frame in character format. } \description{ Data frame and Tables Pretty Formatting } \note{ The values for \code{caption}, \code{subtitle} and \code{footer} can also be provided as attributes of \code{x}, e.g. if \code{caption = NULL} and \code{x} has attribute \code{table_caption}, the value for this attribute will be used as table caption. \code{table_subtitle} is the attribute for \code{subtitle}, and \code{table_footer} for \code{footer}. } \examples{ export_table(head(iris)) export_table(head(iris), sep = " ", header = "*", digits = 1) # split longer tables export_table(head(iris), table_width = 30) \dontrun{ # colored footers data(iris) x <- as.data.frame(iris[1:5, ]) attr(x, "table_footer") <- c("This is a yellow footer line.", "yellow") export_table(x) attr(x, "table_footer") <- list( c("\nA yellow line", "yellow"), c("\nAnd a red line", "red"), c("\nAnd a blue line", "blue") ) export_table(x) attr(x, "table_footer") <- list( c("Without the ", "yellow"), c("new-line character ", "red"), c("we can have multiple colors per line.", "blue") ) export_table(x) } # column-width d <- data.frame( x = c(1, 2, 3), y = c(100, 200, 300), z = c(10000, 20000, 30000) ) export_table(d) export_table(d, width = 8) export_table(d, width = c(x = 5, z = 10)) export_table(d, width = c(x = 5, y = 5, z = 10), align = "lcr") } \seealso{ Vignettes \href{https://easystats.github.io/insight/articles/display.html}{Formatting, printing and exporting tables} and \href{https://easystats.github.io/parameters/articles/model_parameters_formatting.html}{Formatting model parameters}. } insight/man/data_relocate.Rd0000644000175000017500000000207114164615020015701 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/datawizard.R \name{data_relocate} \alias{data_relocate} \title{Relocate (reorder) columns of a data frame} \usage{ data_relocate(data, cols, before = NULL, after = NULL, safe = TRUE, ...) } \arguments{ \item{data}{A data frame to pivot.} \item{cols}{A character vector indicating the names of the columns to move.} \item{before, after}{Destination of columns. Supplying neither will move columns to the left-hand side; specifying both is an error.} \item{safe}{If \code{TRUE}, will disregard non-existing columns.} \item{...}{Additional arguments passed on to methods.} } \value{ A data frame with reordered columns. } \description{ Relocate (reorder) columns of a data frame } \examples{ # Reorder columns head(data_relocate(iris, cols = "Species", before = "Sepal.Length")) head(data_relocate(iris, cols = "Species", before = "Sepal.Width")) head(data_relocate(iris, cols = "Sepal.Width", after = "Species")) head(data_relocate(iris, cols = c("Species", "Petal.Length"), after = "Sepal.Width")) } insight/man/get_parameters.BGGM.Rd0000644000175000017500000001264514077615666016662 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/get_parameters_bayesian.R \name{get_parameters.BGGM} \alias{get_parameters.BGGM} \alias{get_parameters.MCMCglmm} \alias{get_parameters.BFBayesFactor} \alias{get_parameters.stanmvreg} \alias{get_parameters.brmsfit} \alias{get_parameters.stanreg} \alias{get_parameters.bayesx} \alias{get_parameters.bamlss} \alias{get_parameters.sim.merMod} \alias{get_parameters.sim} \title{Get model parameters from Bayesian models} \usage{ \method{get_parameters}{BGGM}( x, component = c("correlation", "conditional", "intercept", "all"), summary = FALSE, centrality = "mean", ... ) \method{get_parameters}{MCMCglmm}( x, effects = c("fixed", "random", "all"), summary = FALSE, centrality = "mean", ... ) \method{get_parameters}{BFBayesFactor}( x, effects = c("all", "fixed", "random"), component = c("all", "extra"), iterations = 4000, progress = FALSE, verbose = TRUE, summary = FALSE, centrality = "mean", ... ) \method{get_parameters}{stanmvreg}( x, effects = c("fixed", "random", "all"), parameters = NULL, summary = FALSE, centrality = "mean", ... ) \method{get_parameters}{brmsfit}( x, effects = "fixed", component = "all", parameters = NULL, summary = FALSE, centrality = "mean", ... ) \method{get_parameters}{stanreg}( x, effects = c("fixed", "random", "all"), component = c("location", "all", "conditional", "smooth_terms", "sigma", "distributional", "auxiliary"), parameters = NULL, summary = FALSE, centrality = "mean", ... ) \method{get_parameters}{bayesx}( x, component = c("conditional", "smooth_terms", "all"), summary = FALSE, centrality = "mean", ... ) \method{get_parameters}{bamlss}( x, component = c("all", "conditional", "smooth_terms", "location", "distributional", "auxiliary"), parameters = NULL, summary = FALSE, centrality = "mean", ... ) \method{get_parameters}{sim.merMod}( x, effects = c("fixed", "random", "all"), parameters = NULL, summary = FALSE, centrality = "mean", ... ) \method{get_parameters}{sim}(x, parameters = NULL, summary = FALSE, centrality = "mean", ...) } \arguments{ \item{x}{A fitted model.} \item{component}{Which type of parameters to return, such as parameters for the conditional model, the zero-inflated part of the model, the dispersion term, the instrumental variables or marginal effects be returned? Applies to models with zero-inflated and/or dispersion formula, or to models with instrumental variables (so called fixed-effects regressions), or models with marginal effects from \pkg{mfx}. May be abbreviated. Note that the \emph{conditional} component is also called \emph{count} or \emph{mean} component, depending on the model. There are three convenient shortcuts: \code{component = "all"} returns all possible parameters. If \code{component = "location"}, location parameters such as \code{conditional}, \code{zero_inflated}, \code{smooth_terms}, or \code{instruments} are returned (everything that are fixed or random effects - depending on the \code{effects} argument - but no auxiliary parameters). For \code{component = "distributional"} (or \code{"auxiliary"}), components like \code{sigma}, \code{dispersion}, \code{beta} or \code{precision} (and other auxiliary parameters) are returned.} \item{summary}{Logical, indicates whether the full posterior samples (\code{summary = FALSE})) or the summarized centrality indices of the posterior samples (\code{summary = TRUE})) should be returned as estimates.} \item{centrality}{Only for models with posterior samples, and when \code{summary = TRUE}. In this case, \code{centrality = "mean"} would calculate means of posterior samples for each parameter, while \code{centrality = "median"} would use the more robust median value as measure of central tendency.} \item{...}{Currently not used.} \item{effects}{Should parameters for fixed effects, random effects or both be returned? Only applies to mixed models. May be abbreviated.} \item{iterations}{Number of posterior draws.} \item{progress}{Display progress.} \item{verbose}{Toggle messages and warnings.} \item{parameters}{Regular expression pattern that describes the parameters that should be returned.} } \value{ The posterior samples from the requested parameters as data frame. If \code{summary = TRUE}, returns a data frame with two columns: the parameter names and the related point estimates (based on \code{centrality}). } \description{ Returns the coefficients (or posterior samples for Bayesian models) from a model. } \details{ In most cases when models either return different "effects" (fixed, random) or "components" (conditional, zero-inflated, ...), the arguments \code{effects} and \code{component} can be used. } \section{BFBayesFactor Models}{ Note that for \code{BFBayesFactor} models (from the \pkg{BayesFactor} package), posteriors are only extracted from the first numerator model (i.e., \code{model[1]}). If you want to apply some function \code{foo()} to another model stored in the \code{BFBayesFactor} object, index it directly, e.g. \code{foo(model[2])}, \code{foo(1/model[5])}, etc. See also \code{\link[bayestestR:weighted_posteriors]{bayestestR::weighted_posteriors()}}. } \examples{ data(mtcars) m <- lm(mpg ~ wt + cyl + vs, data = mtcars) get_parameters(m) } insight/man/get_sigma.Rd0000644000175000017500000000515014077615666015075 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/get_sigma.R \name{get_sigma} \alias{get_sigma} \title{Get residual standard deviation from models} \usage{ get_sigma(x, ci = NULL, verbose = TRUE) } \arguments{ \item{x}{A model.} \item{ci}{Scalar, the CI level. The default (\code{NULL}) returns no CI.} \item{verbose}{Toggle messages and warnings.} } \value{ The residual standard deviation (sigma), or \code{NULL} if this information could not be accessed. } \description{ Returns \code{sigma}, which corresponds the estimated standard deviation of the residuals. This function extends the \code{sigma()} base R generic for models that don't have implemented it. It also computes the confidence interval (CI), which is stored as an attribute. Sigma is a key-component of regression models, and part of the so-called auxiliary parameters that are estimated. Indeed, linear models for instance assume that the residuals comes from a normal distribution with mean 0 and standard deviation \code{sigma}. See the details section below for more information about its interpretation and calculation. } \details{ \subsection{Interpretation of Sigma}{ The residual standard deviation, \ifelse{html}{\out{σ}}{\eqn{\sigma}}, indicates that the predicted outcome will be within +/- \ifelse{html}{\out{σ}}{\eqn{\sigma}} units of the linear predictor for approximately \verb{68\%} of the data points (\cite{Gelman, Hill & Vehtari 2020, p.84}). In other words, the residual standard deviation indicates the accuracy for a model to predict scores, thus it can be thought of as \dQuote{a measure of the average distance each observation falls from its prediction from the model} (\cite{Gelman, Hill & Vehtari 2020, p.168}). \ifelse{html}{\out{σ}}{\eqn{\sigma}} can be considered as a measure of the unexplained variation in the data, or of the precision of inferences about regression coefficients. } \subsection{Calculation of Sigma}{ By default, \code{get_sigma()} tries to extract sigma by calling \code{stats::sigma()}. If the model-object has no \code{sigma()} method, the next step is calculating sigma as square-root of the model-deviance divided by the residual degrees of freedom. Finally, if even this approach fails, and \code{x} is a mixed model, the residual standard deviation is accessed using the square-root from \code{get_variance_residual()}. } } \examples{ data(mtcars) m <- lm(mpg ~ wt + cyl + vs, data = mtcars) get_sigma(m) } \references{ Gelman, A., Hill, J., & Vehtari, A. (2020). Regression and Other Stories. Cambridge University Press. } insight/man/get_predicted_ci.Rd0000644000175000017500000001117014155570474016404 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/get_predicted_ci.R \name{get_predicted_ci} \alias{get_predicted_ci} \alias{get_predicted_ci.default} \title{Confidence and Prediction Interval for Model Predictions} \usage{ get_predicted_ci(x, predictions = NULL, ...) \method{get_predicted_ci}{default}( x, predictions = NULL, data = NULL, ci = 0.95, ci_type = "confidence", vcov_estimation = NULL, vcov_type = NULL, vcov_args = NULL, dispersion_method = "sd", ci_method = "quantile", ... ) } \arguments{ \item{x}{A statistical model (can also be a data.frame, in which case the second argument has to be a model).} \item{predictions}{A vector of predicted values (as obtained by \code{stats::fitted()}, \code{stats::predict()} or \code{\link[=get_predicted]{get_predicted()}}).} \item{...}{Not used for now.} \item{data}{An optional data frame in which to look for variables with which to predict. If omitted, the data used to fit the model is used.} \item{ci}{The interval level (default \code{0.95}, i.e., \verb{95\%} CI).} \item{ci_type}{Can be \code{"prediction"} or \code{"confidence"}. Prediction intervals show the range that likely contains the value of a new observation (in what range it would fall), whereas confidence intervals reflect the uncertainty around the estimated parameters (and gives the range of the link; for instance of the regression line in a linear regressions). Prediction intervals account for both the uncertainty in the model's parameters, plus the random variation of the individual values. Thus, prediction intervals are always wider than confidence intervals. Moreover, prediction intervals will not necessarily become narrower as the sample size increases (as they do not reflect only the quality of the fit). This applies mostly for "simple" linear models (like \code{lm}), as for other models (e.g., \code{glm}), prediction intervals are somewhat useless (for instance, for a binomial model for which the dependent variable is a vector of 1s and 0s, the prediction interval is... \verb{[0, 1]}).} \item{vcov_estimation}{Either a matrix, or a string, indicating the suffix of the \verb{vcov*()}-function from the \pkg{sandwich} or \pkg{clubSandwich} package, e.g. \code{vcov_estimation = "CL"} (which calls \code{\link[sandwich:vcovCL]{sandwich::vcovCL()}} to compute clustered covariance matrix estimators), or \code{vcov_estimation = "HC"} (which calls \code{\link[sandwich:vcovHC]{sandwich::vcovHC()}} to compute heteroskedasticity-consistent covariance matrix estimators).} \item{vcov_type}{Character vector, specifying the estimation type for the robust covariance matrix estimation (see \code{\link[sandwich:vcovHC]{sandwich::vcovHC()}} or \code{clubSandwich::vcovCR()} for details). Only applies if \code{vcov_estimation} is a string, and not a matrix.} \item{vcov_args}{List of named vectors, used as additional arguments that are passed down to the \pkg{sandwich}-function specified in \code{vcov_estimation}. Only applies if \code{vcov_estimation} is a string, and not a matrix.} \item{dispersion_method, ci_method}{These arguments are only used in the context of bootstrapped and Bayesian models. Possible values are \code{dispersion_method = c("sd", "mad")} and \code{ci_method = c("quantile", "hdi", "eti")}. For the latter, the \pkg{bayestestR} package is required.} } \value{ The Confidence (or Prediction) Interval (CI). } \description{ Returns the Confidence (or Prediction) Interval (CI) associated with predictions made by a model. } \examples{ data(mtcars) # Linear model # ------------ x <- lm(mpg ~ cyl + hp, data = mtcars) predictions <- predict(x) ci_vals <- get_predicted_ci(x, predictions, ci_type = "prediction") head(ci_vals) ci_vals <- get_predicted_ci(x, predictions, ci_type = "confidence") head(ci_vals) ci_vals <- get_predicted_ci(x, predictions, ci = c(0.8, 0.9, 0.95)) head(ci_vals) # Bootstrapped # ------------ predictions <- get_predicted(x, iterations = 500) get_predicted_ci(x, predictions) if (require("datawizard")) { ci_vals <- get_predicted_ci(x, predictions, ci = c(0.80, 0.95)) head(ci_vals) datawizard::reshape_ci(ci_vals) ci_vals <- get_predicted_ci(x, predictions, dispersion_method = "MAD", ci_method = "HDI" ) head(ci_vals) } # Logistic model # -------------- x <- glm(vs ~ wt, data = mtcars, family = "binomial") predictions <- predict(x, type = "link") ci_vals <- get_predicted_ci(x, predictions, ci_type = "prediction") head(ci_vals) ci_vals <- get_predicted_ci(x, predictions, ci_type = "confidence") head(ci_vals) } insight/man/find_weights.Rd0000644000175000017500000000121014077615665015600 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/find_weights.R \name{find_weights} \alias{find_weights} \title{Find names of model weights} \usage{ find_weights(x, ...) } \arguments{ \item{x}{A fitted model.} \item{...}{Currently not used.} } \value{ The name of the weighting variable as character vector, or \code{NULL} if no weights were specified. } \description{ Returns the name of the variable that describes the weights of a model. } \examples{ data(mtcars) mtcars$weight <- rnorm(nrow(mtcars), 1, .3) m <- lm(mpg ~ wt + cyl + vs, data = mtcars, weights = weight) find_weights(m) } insight/man/find_smooth.Rd0000644000175000017500000000124013763646773015447 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/find_smooth.R \name{find_smooth} \alias{find_smooth} \title{Find smooth terms from a model object} \usage{ find_smooth(x, flatten = FALSE) } \arguments{ \item{x}{A (gam) model.} \item{flatten}{Logical, if \code{TRUE}, the values are returned as character vector, not as list. Duplicated values are removed.} } \value{ A character vector with the name(s) of the smooth terms. } \description{ Return the names of smooth terms from a model object. } \examples{ if (require("mgcv")) { data(iris) model <- gam(Petal.Length ~ Petal.Width + s(Sepal.Length), data = iris) find_smooth(model) } } insight/man/get_parameters.betamfx.Rd0000644000175000017500000000244014077615666017564 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/get_parameters_mfx.R \name{get_parameters.betamfx} \alias{get_parameters.betamfx} \alias{get_parameters.logitmfx} \title{Get model parameters from marginal effects models} \usage{ \method{get_parameters}{betamfx}( x, component = c("all", "conditional", "precision", "marginal"), ... ) \method{get_parameters}{logitmfx}(x, component = c("all", "conditional", "marginal"), ...) } \arguments{ \item{x}{A fitted model.} \item{component}{Should all predictor variables, predictor variables for the conditional model, the zero-inflated part of the model, the dispersion term or the instrumental variables be returned? Applies to models with zero-inflated and/or dispersion formula, or to models with instrumental variable (so called fixed-effects regressions). May be abbreviated. Note that the \emph{conditional} component is also called \emph{count} or \emph{mean} component, depending on the model.} \item{...}{Currently not used.} } \value{ A data frame with three columns: the parameter names, the related point estimates and the component. } \description{ Returns the coefficients from a model. } \examples{ data(mtcars) m <- lm(mpg ~ wt + cyl + vs, data = mtcars) get_parameters(m) } insight/man/find_terms.Rd0000644000175000017500000000373414100062306015244 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/find_terms.R \name{find_terms} \alias{find_terms} \title{Find all model terms} \usage{ find_terms(x, flatten = FALSE, verbose = TRUE, ...) } \arguments{ \item{x}{A fitted model.} \item{flatten}{Logical, if \code{TRUE}, the values are returned as character vector, not as list. Duplicated values are removed.} \item{verbose}{Toggle warnings.} \item{...}{Currently not used.} } \value{ A list with (depending on the model) following elements (character vectors): \itemize{ \item \code{response}, the name of the response variable \item \code{conditional}, the names of the predictor variables from the \emph{conditional} model (as opposed to the zero-inflated part of a model) \item \code{random}, the names of the random effects (grouping factors) \item \code{zero_inflated}, the names of the predictor variables from the \emph{zero-inflated} part of the model \item \code{zero_inflated_random}, the names of the random effects (grouping factors) \item \code{dispersion}, the name of the dispersion terms \item \code{instruments}, the names of instrumental variables } Returns \code{NULL} if no terms could be found (for instance, due to problems in accessing the formula). } \description{ Returns a list with the names of all terms, including response value and random effects, "as is". This means, on-the-fly tranformations or arithmetic expressions like \code{log()}, \code{I()}, \code{as.factor()} etc. are preserved. } \note{ The difference to \code{\link[=find_variables]{find_variables()}} is that \code{find_terms()} may return a variable multiple times in case of multiple transformations (see examples below), while \code{find_variables()} returns each variable name only once. } \examples{ if (require("lme4")) { data(sleepstudy) m <- lmer( log(Reaction) ~ Days + I(Days^2) + (1 + Days + exp(Days) | Subject), data = sleepstudy ) find_terms(m) } } insight/man/download_model.Rd0000644000175000017500000000170414077615665016125 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/download_model.R \name{download_model} \alias{download_model} \title{Download circus models} \usage{ download_model(name, url = NULL) } \arguments{ \item{name}{Model name.} \item{url}{String with the URL from where to download the model data. Optional, and should only be used in case the repository-URL is changing. By default, models are downloaded from \verb{https://raw.github.com/easystats/circus/master/data/}.} } \value{ A model from the \emph{circus}-repository. } \description{ Downloads pre-compiled models from the \emph{circus}-repository. The \emph{circus}-repository contains a variety of fitted models to help the systematic testing of other packages } \details{ The code that generated the model is available at the \url{https://easystats.github.io/circus/reference/index.html}. } \references{ \url{https://easystats.github.io/circus/} } insight/man/n_obs.Rd0000644000175000017500000000353714142011662014220 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/n_obs.R \name{n_obs} \alias{n_obs} \alias{n_obs.glm} \alias{n_obs.svyolr} \alias{n_obs.afex_aov} \alias{n_obs.stanmvreg} \title{Get number of observations from a model} \usage{ n_obs(x, ...) \method{n_obs}{glm}(x, disaggregate = FALSE, ...) \method{n_obs}{svyolr}(x, weighted = FALSE, ...) \method{n_obs}{afex_aov}(x, shape = c("long", "wide"), ...) \method{n_obs}{stanmvreg}(x, select = NULL, ...) } \arguments{ \item{x}{A fitted model.} \item{...}{Currently not used.} \item{disaggregate}{For binomial models with aggregated data, \code{n_obs()} returns the number of data rows by default. If \code{disaggregate = TRUE}, the total number of trials is returned instead (determined by summing the results of \code{weights()} for aggregated data, which will be either the weights input for proportion success response or the row sums of the response matrix if matrix response, see 'Examples').} \item{weighted}{For survey designs, returns the weighted sample size.} \item{shape}{Return long or wide data? Only applicable in repeated measures designs.} \item{select}{Optional name(s) of response variables for which to extract values. Can be used in case of regression models with multiple response variables.} } \value{ The number of observations used to fit the model, or \code{NULL} if this information is not available. } \description{ This method returns the number of observation that were used to fit the model, as numeric value. } \examples{ data(mtcars) m <- lm(mpg ~ wt + cyl + vs, data = mtcars) n_obs(m) if (require("lme4")) { data(cbpp, package = "lme4") m <- glm( cbind(incidence, size - incidence) ~ period, data = cbpp, family = binomial(link = "logit") ) n_obs(m) n_obs(m, disaggregate = TRUE) } } insight/man/has_intercept.Rd0000644000175000017500000000141214077731217015752 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/has_intercept.R \name{has_intercept} \alias{has_intercept} \title{Checks if model has an intercept} \usage{ has_intercept(x, verbose = TRUE) } \arguments{ \item{x}{A model object.} \item{verbose}{Toggle warnings.} } \value{ \code{TRUE} if \code{x} has an intercept, \code{FALSE} otherwise. } \description{ Checks if model has an intercept. } \examples{ model <- lm(mpg ~ 0 + gear, data = mtcars) has_intercept(model) model <- lm(mpg ~ gear, data = mtcars) has_intercept(model) if (require("lme4")) { model <- lmer(Reaction ~ 0 + Days + (Days | Subject), data = sleepstudy) has_intercept(model) model <- lmer(Reaction ~ Days + (Days | Subject), data = sleepstudy) has_intercept(model) } } insight/man/find_parameters.averaging.Rd0000644000175000017500000000547614077615665020255 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/find_parameters_other.R \name{find_parameters.averaging} \alias{find_parameters.averaging} \alias{find_parameters.betareg} \alias{find_parameters.DirichletRegModel} \alias{find_parameters.mjoint} \alias{find_parameters.glmx} \title{Find model parameters from models with special components} \usage{ \method{find_parameters}{averaging}(x, component = c("conditional", "full"), flatten = FALSE, ...) \method{find_parameters}{betareg}( x, component = c("all", "conditional", "precision", "location", "distributional", "auxiliary"), flatten = FALSE, ... ) \method{find_parameters}{DirichletRegModel}( x, component = c("all", "conditional", "precision", "location", "distributional", "auxiliary"), flatten = FALSE, ... ) \method{find_parameters}{mjoint}( x, component = c("all", "conditional", "survival"), flatten = FALSE, ... ) \method{find_parameters}{glmx}( x, component = c("all", "conditional", "extra"), flatten = FALSE, ... ) } \arguments{ \item{x}{A fitted model.} \item{component}{Which type of parameters to return, such as parameters for the conditional model, the zero-inflated part of the model, the dispersion term, the instrumental variables or marginal effects be returned? Applies to models with zero-inflated and/or dispersion formula, or to models with instrumental variables (so called fixed-effects regressions), or models with marginal effects from \pkg{mfx}. May be abbreviated. Note that the \emph{conditional} component is also called \emph{count} or \emph{mean} component, depending on the model. There are three convenient shortcuts: \code{component = "all"} returns all possible parameters. If \code{component = "location"}, location parameters such as \code{conditional}, \code{zero_inflated}, \code{smooth_terms}, or \code{instruments} are returned (everything that are fixed or random effects - depending on the \code{effects} argument - but no auxiliary parameters). For \code{component = "distributional"} (or \code{"auxiliary"}), components like \code{sigma}, \code{dispersion}, \code{beta} or \code{precision} (and other auxiliary parameters) are returned.} \item{flatten}{Logical, if \code{TRUE}, the values are returned as character vector, not as list. Duplicated values are removed.} \item{...}{Currently not used.} } \value{ A list of parameter names. The returned list may have following elements: \itemize{ \item \code{conditional}, the "fixed effects" part from the model. \item \code{full}, parameters from the full model. } } \description{ Returns the names of model parameters, like they typically appear in the \code{summary()} output. } \examples{ data(mtcars) m <- lm(mpg ~ wt + cyl + vs, data = mtcars) find_parameters(m) } insight/man/data_match.Rd0000644000175000017500000000135614164615020015204 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/datawizard.R \name{data_match} \alias{data_match} \title{Find row indices of a data frame matching a specific condition} \usage{ data_match(x, to, ...) } \arguments{ \item{x}{A data frame.} \item{to}{A data frame matching the specified conditions.} \item{...}{Other arguments passed to or from other functions.} } \value{ A dataframe containing rows that match the specified configuration. } \description{ Find row indices of a data frame that match a specific condition. } \examples{ matching_rows <- data_match(mtcars, data.frame(vs = 0, am = 1)) mtcars[matching_rows, ] matching_rows <- data_match(mtcars, data.frame(vs = 0, am = c(0, 1))) mtcars[matching_rows, ] } insight/man/get_call.Rd0000644000175000017500000000103313775005507014673 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/get_call.R \name{get_call} \alias{get_call} \title{Get the model's function call} \usage{ get_call(x) } \arguments{ \item{x}{A fitted mixed model.} } \value{ A function call. } \description{ Returns the model's function call when available. } \examples{ data(mtcars) m <- lm(mpg ~ wt + cyl + vs, data = mtcars) get_call(m) if (require("lme4")) { m <- lmer(Sepal.Length ~ Sepal.Width + (1 | Species), data = iris) get_call(m) } } insight/man/data_restoretype.Rd0000644000175000017500000000151214164615020016467 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/datawizard.R \name{data_restoretype} \alias{data_restoretype} \title{Restore the type of columns according to a reference data frame} \usage{ data_restoretype(data, reference = NULL, ...) } \arguments{ \item{data}{A data frame.} \item{reference}{A reference data frame from which to find the correct column types.} \item{...}{Additional arguments passed on to methods.} } \value{ A dataframe with columns whose types have been restored based on the reference dataframe. } \description{ Restore the type of columns according to a reference data frame } \examples{ data <- data.frame( Sepal.Length = c("1", "3", "2"), Species = c("setosa", "versicolor", "setosa"), New = c("1", "3", "4") ) fixed <- data_restoretype(data, reference = iris) summary(fixed) } insight/man/get_df.Rd0000644000175000017500000000212314125542655014352 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/get_df.R \name{get_df} \alias{get_df} \alias{get_df.default} \title{Extract degrees of freedom} \usage{ get_df(x, ...) \method{get_df}{default}(x, type = "residual", verbose = TRUE, ...) } \arguments{ \item{x}{A statistical model.} \item{...}{Currently not used.} \item{type}{Can be \code{"residual"}, \code{"model"} or \code{"analytical"}. \code{"residual"} tries to extract residual degrees of freedoms. If residual degrees of freedom could not be extracted, returns analytical degrees of freedom, i.e. \code{n-k} (number of observations minus number of parameters). \code{"model"} returns model-based degrees of freedom, i.e. the number of (estimated) parameters.} \item{verbose}{Toggle warnings.} } \description{ Estimate or extract residual or model-based degrees of freedom from regression models. } \examples{ model <- lm(Sepal.Length ~ Petal.Length * Species, data = iris) get_df(model) # same as df.residual(model) get_df(model, type = "model") # same as attr(logLik(model), "df") } insight/man/check_if_installed.Rd0000644000175000017500000000345114144234777016725 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/check_if_installed.R \name{check_if_installed} \alias{check_if_installed} \title{Checking if needed package is installed} \usage{ check_if_installed( package, reason = "for this function to work", stop = TRUE, minimum_version = NULL, quietly = FALSE, ... ) } \arguments{ \item{package}{A character vector naming the package(s), whose installation needs to be checked in any of the libraries.} \item{reason}{A phrase describing why the package is needed. The default is a generic description.} \item{stop}{Logical that decides whether the function should stop if the needed package is not installed.} \item{minimum_version}{String, representing the minimum package version that is required. If \code{NULL}, no check for minimum version is done. Note that \code{minimum_version} only works when \code{package} is of length 1.} \item{quietly}{Logical, if \code{TRUE}, invisibly returns either \code{TRUE} if all packages are installed, \code{FALSE} otherwise, and does not stop or throw a warning. If \code{quietly = TRUE}, argument \code{stop} is ignored. Use this argument to internally check for package dependencies without stopping or warnings.} \item{...}{Currently ignored} } \value{ If \code{stop = TRUE}, and \code{package} is not yet installed, the function stops and throws an error. Else, a named logical vector is returned, indicating which of the packages are installed, and which not. } \description{ Checking if needed package is installed } \examples{ \dontrun{ check_if_installed("inexistent_package") check_if_installed("insight") check_if_installed("insight", minimum_version = "99.8.7") x <- check_if_installed(c("inexistent", "also_not_here"), stop = FALSE) x } } insight/man/find_random.Rd0000644000175000017500000000303114077615665015411 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/find_random.R \name{find_random} \alias{find_random} \title{Find names of random effects} \usage{ find_random(x, split_nested = FALSE, flatten = FALSE) } \arguments{ \item{x}{A fitted mixed model.} \item{split_nested}{Logical, if \code{TRUE}, terms from nested random effects will be returned as separated elements, not as single string with colon. See 'Examples'.} \item{flatten}{Logical, if \code{TRUE}, the values are returned as character vector, not as list. Duplicated values are removed.} } \value{ A list of character vectors that represent the name(s) of the random effects (grouping factors). Depending on the model, the returned list has following elements: \itemize{ \item \code{random}, the "random effects" terms from the conditional part of model \item \code{zero_inflated_random}, the "random effects" terms from the zero-inflation component of the model } } \description{ Return the name of the grouping factors from mixed effects models. } \examples{ if (require("lme4")) { data(sleepstudy) sleepstudy$mygrp <- sample(1:5, size = 180, replace = TRUE) sleepstudy$mysubgrp <- NA for (i in 1:5) { filter_group <- sleepstudy$mygrp == i sleepstudy$mysubgrp[filter_group] <- sample(1:30, size = sum(filter_group), replace = TRUE) } m <- lmer( Reaction ~ Days + (1 | mygrp / mysubgrp) + (1 | Subject), data = sleepstudy ) find_random(m) find_random(m, split_nested = TRUE) } } insight/man/print_parameters.Rd0000644000175000017500000001412214077615666016514 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/print_parameters.R \name{print_parameters} \alias{print_parameters} \title{Prepare summary statistics of model parameters for printing} \usage{ print_parameters( x, ..., split_by = c("Effects", "Component", "Group", "Response"), format = "text", parameter_column = "Parameter", keep_parameter_column = TRUE, remove_empty_column = FALSE, titles = NULL, subtitles = NULL ) } \arguments{ \item{x}{A fitted model, or a data frame returned by \code{\link[=clean_parameters]{clean_parameters()}}.} \item{...}{One or more objects (data frames), which contain information about the model parameters and related statistics (like confidence intervals, HDI, ROPE, ...).} \item{split_by}{\code{split_by} should be a character vector with one or more of the following elements: \code{"Effects"}, \code{"Component"}, \code{"Response"} and \code{"Group"}. These are the column names returned by \code{\link[=clean_parameters]{clean_parameters()}}, which is used to extract the information from which the group or component model parameters belong. If \code{NULL}, the merged data frame is returned. Else, the data frame is split into a list, split by the values from those columns defined in \code{split_by}.} \item{format}{Name of output-format, as string. If \code{NULL} (or \code{"text"}), assumed use for output is basic printing. If \code{"markdown"}, markdown-format is assumed. This only affects the style of title- and table-caption attributes, which are used in \code{\link[=export_table]{export_table()}}.} \item{parameter_column}{String, name of the column that contains the parameter names. Usually, for data frames returned by functions the easystats-packages, this will be \code{"Parameter"}.} \item{keep_parameter_column}{Logical, if \code{TRUE}, the data frames in the returned list have both a \code{"Cleaned_Parameter"} and \code{"Parameter"} column. If \code{FALSE}, the (unformatted) \code{"Parameter"} is removed, and the column with cleaned parameter names (\code{"Cleaned_Parameter"}) is renamed into \code{"Parameter"}.} \item{remove_empty_column}{Logical, if \code{TRUE}, columns with completely empty character values will be removed.} \item{titles, subtitles}{By default, the names of the model components (like fixed or random effects, count or zero-inflated model part) are added as attributes \code{"table_title"} and \code{"table_subtitle"} to each list element returned by \code{print_parameters()}. These attributes are then extracted and used as table (sub) titles in \code{\link[=export_table]{export_table()}}. Use \code{titles} and \code{subtitles} to override the default attribute values for \code{"table_title"} and \code{"table_subtitle"}. \code{titles} and \code{subtitles} may be any length from 1 to same length as returned list elements. If \code{titles} and \code{subtitles} are shorter than existing elements, only the first default attributes are overwritten.} } \value{ A data frame or a list of data frames (if \code{split_by} is not \code{NULL}). If a list is returned, the element names reflect the model components where the extracted information in the data frames belong to, e.g. \code{random.zero_inflated.Intercept: persons}. This is the data frame that contains the parameters for the random effects from group-level "persons" from the zero-inflated model component. } \description{ This function takes a data frame, typically a data frame with information on summaries of model parameters like \code{\link[bayestestR:describe_posterior]{bayestestR::describe_posterior()}}, \code{\link[bayestestR:hdi]{bayestestR::hdi()}} or \code{\link[parameters:model_parameters]{parameters::model_parameters()}}, as input and splits this information into several parts, depending on the model. See details below. } \details{ This function prepares data frames that contain information about model parameters for clear printing. \cr \cr First, \code{x} is required, which should either be a model object or a prepared data frame as returned by \code{\link[=clean_parameters]{clean_parameters()}}. If \code{x} is a model, \code{clean_parameters()} is called on that model object to get information with which model components the parameters are associated. \cr \cr Then, \code{...} take one or more data frames that also contain information about parameters from the same model, but also have additional information provided by other methods. For instance, a data frame in \code{...} might be the result of, for instance, \code{\link[bayestestR:describe_posterior]{bayestestR::describe_posterior()}}, or \code{\link[parameters:model_parameters]{parameters::model_parameters()}}, where we have a) a \code{Parameter} column and b) columns with other parameter values (like CI, HDI, test statistic, etc.). \cr \cr Now we have a data frame with model parameters and information about the association to the different model components, a data frame with model parameters, and some summary statistics. \code{print_parameters()} then merges these data frames, so the parameters or statistics of interest are also associated with the different model components. The data frame is split into a list, so for a clear printing. Users can loop over this list and print each component for a better overview. Further, parameter names are "cleaned", if necessary, also for a cleaner print. See also 'Examples'. } \examples{ \dontrun{ library(bayestestR) model <- download_model("brms_zi_2") x <- hdi(model, effects = "all", component = "all") # hdi() returns a data frame; here we use only the # information on parameter names and HDI values tmp <- as.data.frame(x)[, 1:4] tmp # Based on the "split_by" argument, we get a list of data frames that # is split into several parts that reflect the model components. print_parameters(model, tmp) # This is the standard print()-method for "bayestestR::hdi"-objects. # For printing methods, it is easy to print complex summary statistics # in a clean way to the console by splitting the information into # different model components. x } } insight/man/link_function.Rd0000644000175000017500000000262014077615666015777 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/link_function.R \name{link_function} \alias{link_function} \alias{link_function.betamfx} \alias{link_function.gamlss} \alias{link_function.betareg} \alias{link_function.DirichletRegModel} \title{Get link-function from model object} \usage{ link_function(x, ...) \method{link_function}{betamfx}(x, what = c("mean", "precision"), ...) \method{link_function}{gamlss}(x, what = c("mu", "sigma", "nu", "tau"), ...) \method{link_function}{betareg}(x, what = c("mean", "precision"), ...) \method{link_function}{DirichletRegModel}(x, what = c("mean", "precision"), ...) } \arguments{ \item{x}{A fitted model.} \item{...}{Currently not used.} \item{what}{For \code{gamlss} models, indicates for which distribution parameter the link (inverse) function should be returned; for \code{betareg} or \code{DirichletRegModel}, can be \code{"mean"} or \code{"precision"}.} } \value{ A function, describing the link-function from a model-object. For multivariate-response models, a list of functions is returned. } \description{ Returns the link-function from a model object. } \examples{ # example from ?stats::glm counts <- c(18, 17, 15, 20, 10, 20, 25, 13, 12) outcome <- gl(3, 1, 9) treatment <- gl(3, 3) m <- glm(counts ~ outcome + treatment, family = poisson()) link_function(m)(.3) # same as log(.3) } insight/man/format_pd.Rd0000644000175000017500000000131113677310525015073 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/format_pd.R \name{format_pd} \alias{format_pd} \title{Probability of direction (pd) formatting} \usage{ format_pd(pd, stars = FALSE, stars_only = FALSE, name = "pd") } \arguments{ \item{pd}{Probability of direction (pd).} \item{stars}{Add significance stars (e.g., p < .001***).} \item{stars_only}{Return only significance stars.} \item{name}{Name prefixing the text. Can be \code{NULL}.} } \value{ A formatted string. } \description{ Probability of direction (pd) formatting } \examples{ format_pd(0.12) format_pd(c(0.12, 1, 0.9999, 0.98, 0.995, 0.96), name = NULL) format_pd(c(0.12, 1, 0.9999, 0.98, 0.995, 0.96), stars = TRUE) } insight/man/is_nullmodel.Rd0000644000175000017500000000147714077615666015634 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/is_nullmodel.R \name{is_nullmodel} \alias{is_nullmodel} \title{Checks if model is a null-model (intercept-only)} \usage{ is_nullmodel(x) } \arguments{ \item{x}{A model object.} } \value{ \code{TRUE} if \code{x} is a null-model, \code{FALSE} otherwise. } \description{ Checks if model is a null-model (intercept-only), i.e. if the conditional part of the model has no predictors. } \examples{ model <- lm(mpg ~ 1, data = mtcars) is_nullmodel(model) model <- lm(mpg ~ gear, data = mtcars) is_nullmodel(model) if (require("lme4")) { model <- lmer(Reaction ~ 1 + (Days | Subject), data = sleepstudy) is_nullmodel(model) model <- lmer(Reaction ~ Days + (Days | Subject), data = sleepstudy) is_nullmodel(model) } } insight/man/get_random.Rd0000644000175000017500000000162514077615666015260 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/get_random.R \name{get_random} \alias{get_random} \title{Get the data from random effects} \usage{ get_random(x) } \arguments{ \item{x}{A fitted mixed model.} } \value{ The data from all random effects terms, as data frame. Or \code{NULL} if model has no random effects. } \description{ Returns the data from all random effects terms. } \examples{ if (require("lme4")) { data(sleepstudy) # prepare some data... sleepstudy$mygrp <- sample(1:5, size = 180, replace = TRUE) sleepstudy$mysubgrp <- NA for (i in 1:5) { filter_group <- sleepstudy$mygrp == i sleepstudy$mysubgrp[filter_group] <- sample(1:30, size = sum(filter_group), replace = TRUE) } m <- lmer( Reaction ~ Days + (1 | mygrp / mysubgrp) + (1 | Subject), data = sleepstudy ) head(get_random(m)) } } insight/man/find_formula.Rd0000644000175000017500000000536714164336653015606 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/find_formula.R \name{find_formula} \alias{find_formula} \alias{formula_ok} \title{Find model formula} \usage{ find_formula(x, verbose = TRUE, ...) formula_ok(x, verbose = TRUE, ...) } \arguments{ \item{x}{A fitted model.} \item{verbose}{Toggle warnings.} \item{...}{Currently not used.} } \value{ A list of formulas that describe the model. For simple models, only one list-element, \code{conditional}, is returned. For more complex models, the returned list may have following elements: \itemize{ \item \code{conditional}, the "fixed effects" part from the model (in the context of fixed-effects or instrumental variable regression, also called \emph{regressors}) . One exception are \code{DirichletRegModel} models from \pkg{DirichletReg}, which has two or three components, depending on \code{model}. \item \code{random}, the "random effects" part from the model (or the \code{id} for gee-models and similar) \item \code{zero_inflated}, the "fixed effects" part from the zero-inflation component of the model \item \code{zero_inflated_random}, the "random effects" part from the zero-inflation component of the model \item \code{dispersion}, the dispersion formula \item \code{instruments}, for fixed-effects or instrumental variable regressions like \code{ivreg::ivreg()}, \code{lfe::felm()} or \code{plm::plm()}, the instrumental variables \item \code{cluster}, for fixed-effects regressions like \code{lfe::felm()}, the cluster specification \item \code{correlation}, for models with correlation-component like \code{nlme::gls()}, the formula that describes the correlation structure \item \code{slopes}, for fixed-effects individual-slope models like \code{feisr::feis()}, the formula for the slope parameters \item \code{precision}, for \code{DirichletRegModel} models from \pkg{DirichletReg}, when parametrization (i.e. \code{model}) is \code{"alternative"}. } } \description{ Returns the formula(s) for the different parts of a model (like fixed or random effects, zero-inflated component, ...). \code{formula_ok()} checks if a model formula has valid syntax regarding writing \code{TRUE} instead of \code{T} inside \code{poly()} and that no data names are used (i.e. no \code{data$variable}, but rather \code{variable}). } \note{ For models of class \code{lme} or \code{gls} the correlation-component is only returned, when it is explicitly defined as named argument (\code{form}), e.g. \code{corAR1(form = ~1 | Mare)} } \examples{ data(mtcars) m <- lm(mpg ~ wt + cyl + vs, data = mtcars) find_formula(m) if (require("lme4")) { m <- lmer(Sepal.Length ~ Sepal.Width + (1 | Species), data = iris) f <- find_formula(m) f format(f) } } insight/man/color_if.Rd0000644000175000017500000000501614077615665014732 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/color_if.R \name{color_if} \alias{color_if} \alias{colour_if} \title{Color-formatting for data columns based on condition} \usage{ color_if( x, columns, predicate = `>`, value = 0, color_if = "green", color_else = "red", digits = 2 ) colour_if( x, columns, predicate = `>`, value = 0, colour_if = "green", colour_else = "red", digits = 2 ) } \arguments{ \item{x}{A data frame} \item{columns}{Character vector with column names of \code{x} that should be formatted.} \item{predicate}{A function that takes \code{columns} and \code{value} as input and which should return \code{TRUE} or \code{FALSE}, based on if the condition (in comparison with \code{value}) is met.} \item{value}{The comparator. May be used in conjunction with \code{predicate} to quickly set up a function which compares elements in \code{colums} to \code{value}. May be ignored when \code{predicate} is a function that internally computes other comparisons. See 'Examples'.} \item{color_if, colour_if}{Character vector, indicating the color code used to format values in \code{x} that meet the condition of \code{predicate} and \code{value}. May be one of \code{"red"}, \code{"yellow"}, \code{"green"}, \code{"blue"}, \code{"violet"}, \code{"cyan"} or \code{"grey"}. Formatting is also possible with \code{"bold"} or \code{"italic"}.} \item{color_else, colour_else}{See \code{color_if}, but only for conditions that are \emph{not} met.} \item{digits}{Digits for rounded values.} } \value{ The . } \description{ Convenient function that formats columns in data frames with color codes, where the color is chosen based on certain conditions. Columns are then printed in color in the console. } \details{ The predicate-function simply works like this: \code{which(predicate(x[, columns], value))} } \examples{ # all values in Sepal.Length larger than 5 in green, all remaining in red x <- color_if(iris[1:10, ], columns = "Sepal.Length", predicate = `>`, value = 5) x cat(x$Sepal.Length) # all levels "setosa" in Species in green, all remaining in red x <- color_if(iris, columns = "Species", predicate = `==`, value = "setosa") cat(x$Species) # own function, argument "value" not needed here p <- function(x, y) { x >= 4.9 & x <= 5.1 } # all values in Sepal.Length between 4.9 and 5.1 in green, all remaining in red x <- color_if(iris[1:10, ], columns = "Sepal.Length", predicate = p) cat(x$Sepal.Length) } insight/man/get_parameters.Rd0000644000175000017500000000413114077615666016136 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/get_parameters.R \name{get_parameters} \alias{get_parameters} \alias{get_parameters.default} \title{Get model parameters} \usage{ get_parameters(x, ...) \method{get_parameters}{default}(x, verbose = TRUE, ...) } \arguments{ \item{x}{A fitted model.} \item{...}{Currently not used.} \item{verbose}{Toggle messages and warnings.} } \value{ \itemize{ \item for non-Bayesian models, a data frame with two columns: the parameter names and the related point estimates. \item for Anova (\code{aov()}) with error term, a list of parameters for the conditional and the random effects parameters } } \description{ Returns the coefficients (or posterior samples for Bayesian models) from a model. See the documentation for your object's class: \itemize{ \item{\link[=get_parameters.BGGM]{Bayesian models} (\pkg{rstanarm}, \pkg{brms}, \pkg{MCMCglmm}, ...)} \item{\link[=get_parameters.emmGrid]{Estimated marginal means} (\pkg{emmeans})} \item{\link[=get_parameters.gamm]{Generalized additive models} (\pkg{mgcv}, \pkg{VGAM}, ...)} \item{\link[=get_parameters.betamfx]{Marginal effects models} (\pkg{mfx})} \item{\link[=get_parameters.glmm]{Mixed models} (\pkg{lme4}, \pkg{glmmTMB}, \pkg{GLMMadaptive}, ...)} \item{\link[=get_parameters.zeroinfl]{Zero-inflated and hurdle models} (\pkg{pscl}, ...)} \item{\link[=get_parameters.betareg]{Models with special components} (\pkg{betareg}, \pkg{MuMIn}, ...)} \item{\link[=get_parameters.htest]{Hypothesis tests} (\code{htest})} } } \details{ In most cases when models either return different "effects" (fixed, random) or "components" (conditional, zero-inflated, ...), the arguments \code{effects} and \code{component} can be used. \cr \cr \code{get_parameters()} is comparable to \code{coef()}, however, the coefficients are returned as data frame (with columns for names and point estimates of coefficients). For Bayesian models, the posterior samples of parameters are returned. } \examples{ data(mtcars) m <- lm(mpg ~ wt + cyl + vs, data = mtcars) get_parameters(m) } insight/man/all_models_equal.Rd0000644000175000017500000000217214077615665016440 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/all_equal_models.R \name{all_models_equal} \alias{all_models_equal} \alias{all_models_same_class} \title{Checks if all objects are models of same class} \usage{ all_models_equal(..., verbose = FALSE) all_models_same_class(..., verbose = FALSE) } \arguments{ \item{...}{A list of objects.} \item{verbose}{Toggle off warnings.} } \value{ A logical, \code{TRUE} if \code{x} are all supported model objects of same class. } \description{ Small helper that checks if all objects are \emph{supported} (regression) model objects and of same class. } \examples{ if (require("lme4")) { data(mtcars) data(sleepstudy) m1 <- lm(mpg ~ wt + cyl + vs, data = mtcars) m2 <- lm(mpg ~ wt + cyl, data = mtcars) m3 <- lmer(Reaction ~ Days + (1 | Subject), data = sleepstudy) m4 <- glm(formula = vs ~ wt, family = binomial(), data = mtcars) all_models_same_class(m1, m2) all_models_same_class(m1, m2, m3) all_models_same_class(m1, m4, m2, m3, verbose = TRUE) all_models_same_class(m1, m4, mtcars, m2, m3, verbose = TRUE) } } insight/man/format_bf.Rd0000644000175000017500000000224413745240431015057 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/format_bf.R \name{format_bf} \alias{format_bf} \title{Bayes Factor formatting} \usage{ format_bf( bf, stars = FALSE, stars_only = FALSE, name = "BF", protect_ratio = FALSE, na_reference = NA, exact = FALSE ) } \arguments{ \item{bf}{Bayes Factor.} \item{stars}{Add significance stars (e.g., p < .001***).} \item{stars_only}{Return only significance stars.} \item{name}{Name prefixing the text. Can be \code{NULL}.} \item{protect_ratio}{Should values smaller than 1 be represented as ratios?} \item{na_reference}{How to format missing values (\code{NA}).} \item{exact}{Should very large or very small values be reported with a scientific format (e.g., 4.24e5), or as truncated values (as "> 1000" and "< 1/1000").} } \value{ A formatted string. } \description{ Bayes Factor formatting } \examples{ format_bf(bfs <- c(0.000045, 0.033, NA, 1557, 3.54)) format_bf(bfs, exact = TRUE, name = NULL) format_bf(bfs, stars = TRUE) format_bf(bfs, protect_ratio = TRUE) format_bf(bfs, protect_ratio = TRUE, exact = TRUE) format_bf(bfs, na_reference = 1) } insight/man/link_inverse.Rd0000644000175000017500000000263314077615666015631 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/link_inverse.R \name{link_inverse} \alias{link_inverse} \alias{link_inverse.betareg} \alias{link_inverse.DirichletRegModel} \alias{link_inverse.betamfx} \alias{link_inverse.gamlss} \title{Get link-inverse function from model object} \usage{ link_inverse(x, ...) \method{link_inverse}{betareg}(x, what = c("mean", "precision"), ...) \method{link_inverse}{DirichletRegModel}(x, what = c("mean", "precision"), ...) \method{link_inverse}{betamfx}(x, what = c("mean", "precision"), ...) \method{link_inverse}{gamlss}(x, what = c("mu", "sigma", "nu", "tau"), ...) } \arguments{ \item{x}{A fitted model.} \item{...}{Currently not used.} \item{what}{For \code{gamlss} models, indicates for which distribution parameter the link (inverse) function should be returned; for \code{betareg} or \code{DirichletRegModel}, can be \code{"mean"} or \code{"precision"}.} } \value{ A function, describing the inverse-link function from a model-object. For multivariate-response models, a list of functions is returned. } \description{ Returns the link-inverse function from a model object. } \examples{ # example from ?stats::glm counts <- c(18, 17, 15, 20, 10, 20, 25, 13, 12) outcome <- gl(3, 1, 9) treatment <- gl(3, 3) m <- glm(counts ~ outcome + treatment, family = poisson()) link_inverse(m)(.3) # same as exp(.3) } insight/man/find_parameters.emmGrid.Rd0000644000175000017500000000221114077615665017656 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/find_parameters_emmeans.R \name{find_parameters.emmGrid} \alias{find_parameters.emmGrid} \title{Find model parameters from estimated marginal means objects} \usage{ \method{find_parameters}{emmGrid}(x, flatten = FALSE, merge_parameters = FALSE, ...) } \arguments{ \item{x}{A fitted model.} \item{flatten}{Logical, if \code{TRUE}, the values are returned as character vector, not as list. Duplicated values are removed.} \item{merge_parameters}{Logical, if \code{TRUE} and \code{x} has multiple columns for parameter names (like \code{emmGrid} objects may have), these are merged into a single parameter column, with parameters names and values as values.} \item{...}{Currently not used.} } \value{ A list of parameter names. For simple models, only one list-element, \code{conditional}, is returned. } \description{ Returns the parameter names from a model. } \examples{ data(mtcars) model <- lm(mpg ~ wt * factor(cyl), data = mtcars) if (require("emmeans", quietly = TRUE)) { emm <- emmeans(model, c("wt", "cyl")) find_parameters(emm) } } insight/man/find_parameters.BGGM.Rd0000644000175000017500000001134414077615665017015 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/find_parameters_bayesian.R \name{find_parameters.BGGM} \alias{find_parameters.BGGM} \alias{find_parameters.BFBayesFactor} \alias{find_parameters.MCMCglmm} \alias{find_parameters.bamlss} \alias{find_parameters.brmsfit} \alias{find_parameters.bayesx} \alias{find_parameters.stanreg} \alias{find_parameters.sim.merMod} \title{Find names of model parameters from Bayesian models} \usage{ \method{find_parameters}{BGGM}( x, component = c("correlation", "conditional", "intercept", "all"), flatten = FALSE, ... ) \method{find_parameters}{BFBayesFactor}( x, effects = c("all", "fixed", "random"), component = c("all", "extra"), flatten = FALSE, ... ) \method{find_parameters}{MCMCglmm}(x, effects = c("all", "fixed", "random"), flatten = FALSE, ...) \method{find_parameters}{bamlss}( x, flatten = FALSE, component = c("all", "conditional", "location", "distributional", "auxiliary"), parameters = NULL, ... ) \method{find_parameters}{brmsfit}( x, effects = "all", component = "all", flatten = FALSE, parameters = NULL, ... ) \method{find_parameters}{bayesx}( x, component = c("all", "conditional", "smooth_terms"), flatten = FALSE, parameters = NULL, ... ) \method{find_parameters}{stanreg}( x, effects = c("all", "fixed", "random"), component = c("location", "all", "conditional", "smooth_terms", "sigma", "distributional", "auxiliary"), flatten = FALSE, parameters = NULL, ... ) \method{find_parameters}{sim.merMod}( x, effects = c("all", "fixed", "random"), flatten = FALSE, parameters = NULL, ... ) } \arguments{ \item{x}{A fitted model.} \item{component}{Which type of parameters to return, such as parameters for the conditional model, the zero-inflated part of the model, the dispersion term, the instrumental variables or marginal effects be returned? Applies to models with zero-inflated and/or dispersion formula, or to models with instrumental variables (so called fixed-effects regressions), or models with marginal effects from \pkg{mfx}. May be abbreviated. Note that the \emph{conditional} component is also called \emph{count} or \emph{mean} component, depending on the model. There are three convenient shortcuts: \code{component = "all"} returns all possible parameters. If \code{component = "location"}, location parameters such as \code{conditional}, \code{zero_inflated}, \code{smooth_terms}, or \code{instruments} are returned (everything that are fixed or random effects - depending on the \code{effects} argument - but no auxiliary parameters). For \code{component = "distributional"} (or \code{"auxiliary"}), components like \code{sigma}, \code{dispersion}, \code{beta} or \code{precision} (and other auxiliary parameters) are returned.} \item{flatten}{Logical, if \code{TRUE}, the values are returned as character vector, not as list. Duplicated values are removed.} \item{...}{Currently not used.} \item{effects}{Should parameters for fixed effects, random effects or both be returned? Only applies to mixed models. May be abbreviated.} \item{parameters}{Regular expression pattern that describes the parameters that should be returned.} } \value{ A list of parameter names. For simple models, only one list-element, \code{conditional}, is returned. For more complex models, the returned list may have following elements: \itemize{ \item \code{conditional}, the "fixed effects" part from the model \item \code{random}, the "random effects" part from the model \item \code{zero_inflated}, the "fixed effects" part from the zero-inflation component of the model \item \code{zero_inflated_random}, the "random effects" part from the zero-inflation component of the model \item \code{smooth_terms}, the smooth parameters } Furthermore, some models, especially from \pkg{brms}, can also return auxiliary parameters. These may be one of the following: \itemize{ \item \code{sigma}, the residual standard deviation (auxiliary parameter) \item \code{dispersion}, the dispersion parameters (auxiliary parameter) \item \code{beta}, the beta parameter (auxiliary parameter) \item \code{simplex}, simplex parameters of monotonic effects (\pkg{brms} only) \item \code{mix}, mixture parameters (\pkg{brms} only) \item \code{shiftprop}, shifted proportion parameters (\pkg{brms} only) } } \description{ Returns the names of model parameters, like they typically appear in the \code{summary()} output. For Bayesian models, the parameter names equal the column names of the posterior samples after coercion from \code{as.data.frame()}. } \examples{ data(mtcars) m <- lm(mpg ~ wt + cyl + vs, data = mtcars) find_parameters(m) } insight/man/format_table.Rd0000644000175000017500000000707114144234777015574 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/format_table.R \name{format_table} \alias{format_table} \alias{parameters_table} \title{Parameter table formatting} \usage{ format_table( x, pretty_names = TRUE, stars = FALSE, digits = 2, ci_width = "auto", ci_brackets = TRUE, ci_digits = 2, p_digits = 3, rope_digits = 2, zap_small = FALSE, preserve_attributes = FALSE, verbose = TRUE, ... ) parameters_table( x, pretty_names = TRUE, stars = FALSE, digits = 2, ci_width = "auto", ci_brackets = TRUE, ci_digits = 2, p_digits = 3, rope_digits = 2, zap_small = FALSE, preserve_attributes = FALSE, verbose = TRUE, ... ) } \arguments{ \item{x}{A data frame of model's parameters, as returned by various functions of the \strong{easystats}-packages. May also be a result from \code{broom::tidy()}.} \item{pretty_names}{Return "pretty" (i.e. more human readable) parameter names.} \item{stars}{Add significance stars (e.g., p < .001***).} \item{digits, ci_digits, p_digits, rope_digits}{Number of digits for rounding or significant figures. May also be \code{"signif"} to return significant figures or \code{"scientific"} to return scientific notation. Control the number of digits by adding the value as suffix, e.g. \code{digits = "scientific4"} to have scientific notation with 4 decimal places, or \code{digits = "signif5"} for 5 significant figures (see also \code{\link[=signif]{signif()}}).} \item{ci_width}{Minimum width of the returned string for confidence intervals. If not \code{NULL} and width is larger than the string's length, leading whitespaces are added to the string. If \code{width="auto"}, width will be set to the length of the longest string.} \item{ci_brackets}{Logical, if \code{TRUE} (default), CI-values are encompassed in square brackets (else in parentheses).} \item{zap_small}{Logical, if \code{TRUE}, small values are rounded after \code{digits} decimal places. If \code{FALSE}, values with more decimal places than \code{digits} are printed in scientific notation.} \item{preserve_attributes}{Logical, if \code{TRUE}, preserves all attributes from the input data frame.} \item{verbose}{Toggle messages and warnings.} \item{...}{Arguments passed to or from other methods.} } \value{ A data frame. Note that \code{format_table()} converts all columns into character vectors! } \description{ This functions takes a data frame with model parameters as input and formats certain columns into a more readable layout (like collapsing separate columns for lower and upper confidence interval values). Furthermore, column names are formatted as well. Note that \code{format_table()} converts all columns into character vectors! } \examples{ format_table(head(iris), digits = 1) if (require("parameters")) { x <- model_parameters(lm(Sepal.Length ~ Species * Sepal.Width, data = iris)) as.data.frame(format_table(x)) as.data.frame(format_table(x, p_digits = "scientific")) } \donttest{ if (require("rstanarm", warn.conflicts = FALSE) && require("parameters", , warn.conflicts = FALSE)) { model <- stan_glm(Sepal.Length ~ Species, data = iris, refresh = 0, seed = 123) x <- model_parameters(model, ci = c(0.69, 0.89, 0.95)) as.data.frame(format_table(x)) } } } \seealso{ Vignettes \href{https://easystats.github.io/insight/articles/display.html}{Formatting, printing and exporting tables} and \href{https://easystats.github.io/parameters/articles/model_parameters_formatting.html}{Formatting model parameters}. } insight/man/format_string.Rd0000644000175000017500000000230514046165422015775 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/format_string.R \name{format_string} \alias{format_string} \alias{format_string.character} \title{String Values Formatting} \usage{ format_string(x, ...) \method{format_string}{character}(x, length = NULL, abbreviate = "...", ...) } \arguments{ \item{x}{String value.} \item{...}{Arguments passed to or from other methods.} \item{length}{Numeric, maximum length of the returned string. If not \code{NULL}, will shorten the string to a maximum \code{length}, however, it will not truncate inside words. I.e. if the string length happens to be inside a word, this word is removed from the returned string, so the returned string has a \emph{maximum} length of \code{length}, but might be shorter.} \item{abbreviate}{String that will be used as suffix, if \code{x} was shortened.} } \value{ A formatted string. } \description{ String Values Formatting } \examples{ s <- "This can be considered as very long string!" # string is shorter than max.length, so returned as is format_string(s, 60) # string is shortened to as many words that result in # a string of maximum 20 chars format_string(s, 20) } insight/man/find_offset.Rd0000644000175000017500000000171513751534033015411 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/find_offset.R \name{find_offset} \alias{find_offset} \title{Find possible offset terms in a model} \usage{ find_offset(x) } \arguments{ \item{x}{A fitted model.} } \value{ A character vector with the name(s) of offset terms. } \description{ Returns a character vector with the name(s) of offset terms. } \examples{ # Generate some zero-inflated data set.seed(123) N <- 100 # Samples x <- runif(N, 0, 10) # Predictor off <- rgamma(N, 3, 2) # Offset variable yhat <- -1 + x * 0.5 + log(off) # Prediction on log scale dat <- data.frame(y = NA, x, logOff = log(off)) dat$y <- rpois(N, exp(yhat)) # Poisson process dat$y <- ifelse(rbinom(N, 1, 0.3), 0, dat$y) # Zero-inflation process if (require("pscl")) { m1 <- zeroinfl(y ~ offset(logOff) + x | 1, data = dat, dist = "poisson") find_offset(m1) m2 <- zeroinfl(y ~ x | 1, data = dat, offset = logOff, dist = "poisson") find_offset(m2) } } insight/man/model_info.Rd0000644000175000017500000001107014120142353015220 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/model_info.R \name{model_info} \alias{model_info} \alias{model_info.default} \title{Access information from model objects} \usage{ model_info(x, ...) \method{model_info}{default}(x, verbose = TRUE, ...) } \arguments{ \item{x}{A fitted model.} \item{...}{Currently not used.} \item{verbose}{Toggle off warnings.} } \value{ A list with information about the model, like family, link-function etc. (see 'Details'). } \description{ Retrieve information from model objects. } \details{ \code{model_info()} returns a list with information about the model for many different model objects. Following information is returned, where all values starting with \code{is_} are logicals. \itemize{ \item \code{is_binomial}: family is binomial (but not negative binomial) \item \code{is_bernoulli}: special case of binomial models: family is Bernoulli \item \code{is_poisson}: family is poisson \item \code{is_negbin}: family is negative binomial \item \code{is_count}: model is a count model (i.e. family is either poisson or negative binomial) \item \code{is_beta}: family is beta \item \code{is_betabinomial}: family is beta-binomial \item \code{is_dirichlet}: family is dirichlet \item \code{is_exponential}: family is exponential (e.g. Gamma or Weibull) \item \code{is_logit}: model has logit link \item \code{is_probit}: model has probit link \item \code{is_linear}: family is gaussian \item \code{is_tweedie}: family is tweedie \item \code{is_ordinal}: family is ordinal or cumulative link \item \code{is_cumulative}: family is ordinal or cumulative link \item \code{is_multinomial}: family is multinomial or categorical link \item \code{is_categorical}: family is categorical link \item \code{is_censored}: model is a censored model (has a censored response, including survival models) \item \code{is_truncated}: model is a truncated model (has a truncated response) \item \code{is_survival}: model is a survival model \item \code{is_zero_inflated}: model has zero-inflation component \item \code{is_hurdle}: model has zero-inflation component and is a hurdle-model (truncated family distribution) \item \code{is_dispersion}: model has dispersion component \item \code{is_mixed}: model is a mixed effects model (with random effects) \item \code{is_multivariate}: model is a multivariate response model (currently only works for \emph{brmsfit} objects) \item \code{is_trial}: model response contains additional information about the trials \item \code{is_bayesian}: model is a Bayesian model \item \code{is_gam}: model is a generalized additive model \item \code{is_anova}: model is an Anova object \item \code{is_ttest}: model is an an object of class \code{htest}, returned by \code{t.test()} \item \code{is_correlation}: model is an an object of class \code{htest}, returned by \code{cor.test()} \item \code{is_ranktest}: model is an an object of class \code{htest}, returned by \code{cor.test()} (if Spearman's rank correlation), \code{wilcox.text()} or \code{kruskal.test()}. \item \code{is_levenetest}: model is an an object of class \code{anova}, returned by \code{car::leveneTest()}. \item \code{is_onewaytest}: model is an an object of class \code{htest}, returned by \code{oneway.test()} \item \code{is_proptest}: model is an an object of class \code{htest}, returned by \code{prop.test()} \item \code{is_binomtest}: model is an an object of class \code{htest}, returned by \code{binom.test()} \item \code{is_chi2test}: model is an an object of class \code{htest}, returned by \code{chisq.test()} \item \code{is_xtab}: model is an an object of class \code{htest} or \code{BFBayesFactor}, and test-statistic stems from a contingency table (i.e. \code{chisq.test()} or \code{BayesFactor::contingencyTableBF()}). \item \code{link_function}: the link-function \item \code{family}: the family-object \item \code{n_obs}: number of observations \item \code{model_terms}: a list with all model terms, including terms such as random effects or from zero-inflated model parts. } } \examples{ ldose <- rep(0:5, 2) numdead <- c(1, 4, 9, 13, 18, 20, 0, 2, 6, 10, 12, 16) sex <- factor(rep(c("M", "F"), c(6, 6))) SF <- cbind(numdead, numalive = 20 - numdead) dat <- data.frame(ldose, sex, SF, stringsAsFactors = FALSE) m <- glm(SF ~ sex * ldose, family = binomial) model_info(m) \dontrun{ library(glmmTMB) data("Salamanders") m <- glmmTMB( count ~ spp + cover + mined + (1 | site), ziformula = ~ spp + mined, dispformula = ~DOY, data = Salamanders, family = nbinom2 ) } model_info(m) } insight/man/get_parameters.htest.Rd0000644000175000017500000000106314077615666017265 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/get_parameters_htest.R \name{get_parameters.htest} \alias{get_parameters.htest} \title{Get model parameters from htest-objects} \usage{ \method{get_parameters}{htest}(x, ...) } \arguments{ \item{x}{A fitted model.} \item{...}{Currently not used.} } \value{ A data frame with two columns: the parameter names and the related point estimates. } \description{ Returns the parameters from a hypothesis test. } \examples{ get_parameters(t.test(1:10, y = c(7:20))) } insight/man/data_to_long.Rd0000644000175000017500000000552614164637175015573 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/datawizard.R \name{data_to_long} \alias{data_to_long} \alias{data_to_wide} \title{Reshape (pivot) data from wide to long} \usage{ data_to_long( data, cols = "all", colnames_to = "Name", values_to = "Value", rows_to = NULL, ..., names_to = colnames_to ) data_to_wide( data, values_from = "Value", colnames_from = "Name", rows_from = NULL, sep = "_", ..., names_from = colnames_from ) } \arguments{ \item{data}{A data frame to pivot.} \item{cols}{A vector of column names or indices to pivot into longer format.} \item{colnames_to}{The name of the new column that will contain the column names.} \item{values_to}{The name of the new column that will contain the values of the pivoted variables.} \item{rows_to}{The name of the column that will contain the row-number from the original data. If \code{NULL}, will be removed.} \item{...}{Additional arguments passed on to methods.} \item{names_to, names_from}{Same as \code{colnames_to}, is there for compatibility with \code{tidyr::pivot_longer()}.} \item{values_from}{The name of the column that contains the values of the put in the columns.} \item{colnames_from}{The name of the column that contains the levels to be used as future columns.} \item{rows_from}{The name of the column that identifies the rows. If \code{NULL}, will use all the unique rows.} \item{sep}{The indicating a separating character in the variable names in the wide format.} } \value{ data.frame } \description{ This function "lengthens" data, increasing the number of rows and decreasing the number of columns. This is a dependency-free base-R equivalent of \code{tidyr::pivot_longer()}. } \examples{ wide_data <- data.frame(replicate(5, rnorm(10))) # From wide to long # ------------------ # Default behaviour (equivalent to tidyr::pivot_longer(wide_data, cols = 1:5)) data_to_long(wide_data) # Customizing the names data_to_long(wide_data, cols = c(1, 2), colnames_to = "Column", values_to = "Numbers", rows_to = "Row" ) # From long to wide # ----------------- long_data <- data_to_long(wide_data, rows_to = "Row_ID") # Save row number data_to_wide(long_data, colnames_from = "Name", values_from = "Value", rows_from = "Row_ID" ) # Full example # ------------------ if (require("psych")) { data <- psych::bfi # Wide format with one row per participant's personality test # Pivot long format long <- data_to_long(data, cols = "\\\\d", # Select all columns that contain a digit colnames_to = "Item", values_to = "Score", rows_to = "Participant" ) # Separate facet and question number long$Facet <- gsub("\\\\d", "", long$Item) long$Item <- gsub("[A-Z]", "", long$Item) long$Item <- paste0("I", long$Item) wide <- data_to_wide(long, colnames_from = "Item", values_from = "Score" ) head(wide) } } insight/man/figures/0000755000175000017500000000000014125273063014273 5ustar nileshnileshinsight/man/figures/logo.png0000644000175000017500000006124314125273063015747 0ustar nileshnileshPNG  IHDRsRGBgAMA a pHYsodb8IDATx^`SU/;({=,e(Cd),EA`re+Ȕ=e t6m}缤Жinڐws '((,=4=Ä9>0l# $!a8gbuD9\H(R\&\!Tx#%x@e QlH a.VTһ"#T>#jl6 $"-AT\gz*|ABjld;6$ =%T'6lFH|};Qae!7cfl&lQfSB%3 cq$R,&,J *I(!R,&,Tu `Z+|XlOϬ5ϨL+ %^)lH-aFR,LD[AqVpX1SbH& a^Kk0fa*$_Pp8k-T&PI56,&+ apHX*pE(7uT3צLb%_g12< ذ@,!Q,d$9sMƸfNS3sܣa+I(FFAH3IR1N7͐*#.YT;8@V.H @ҁ!_QJh&{V@Hx v->0}F&zq~'!ĺ8M_TƓHءϷ[0xB7ԭƳL緅U#̀dHN0Y۵Aa+?PIB jN $  XkHD & _d H6x5ؖYdSZJW!qpXV;eǛ<W1'v [ɡWDr4/B*Ja+{5é=LB\'ֿRGJ!8-GBQ3p.QDr40x"gpO?+1Zw9=ĆepP$NJX|p Q08p&xQ8qGF~ Q$&[2ca+9"OMKs4aLMȿpr5G9JOSf@H{᩹FAJHv4X$ R, ߋ'ysZLdD4 sY4V.b80Om'dɼQXXE+|+KfW݌|` \j2ABm>?c8C83@a)>-V$H\H_̐Vb*ߐPJ $ vP,[gi' Tms /s1SĎjj|J*gYAB<_$q6/UXHE2LpΊBP: &K?608K=g{˜If Q8<L&(a&ˤqy;O\r^_-? "G d\t: U~f.OVsT/a >!'܃4*&>G Hq]{XvtNb) 8lO,CBIdTB-x/8㍣RxHMSɧF Õ'ƹol"p]ۛ>2Y;@ \E])l$Zk,G[bJ2$1|&(G/ /T 8gM=kAg8A4U= 1nk欀:*m%~(+uJ -k.ŸҪxR8H?j;w dqiԟ 4\:Ƹqm 0UBj*| *Ϣ)Sxf#}~d",GRQOCFFo}8I@ь_bgPl7M/wm k+eCgzd;#n- X'tO HoKtԝ18I8 ʅ4?!E6L~j&(Q,InV`-\dm<'L ܒhDSY  -Pɬ^2:MM')2 8iu:!hP. @)X:!C'H O6.q0lN> ߒ7Qm*Yk]H mMRZ wAֆ Und-6l Hy SDvtaEnQ6rs6mtFHR8Tt]&jd椬f'K`'uh|3pTh'p~p7+-MS#&4[vwgZ0'\ hXh[hOr\I]҃CtI1NMnLO)-$鳦DZ΢1}nm9GhjitLoRB8'iw@ >Y{.{ڬr A=cw2"FȢIYŹ2}9y5>=q0dN}w,ąѕHf&3bjS_}iILI0Hd-ԩ{kf-v^;|J7gu8 7r&HE8:ʽ}IPS'K6V5!ѐnSb!ebTdsn&Lb1&((ƅ}x? %hk<42q4sG>aR"%+6^dnhwF&oOE R}es:4XQh%:8 /ޔt$N 3A0Q8l69ԀY roAA')2g*J;gN)7a-yYx;_ $Î4c-n|]B"ל?IIxu>5u\u0,H(ɭމ_O\2)RdNF3 H>ԋ`z*}-(`Wj F5;H*vMӂ6w(.; K)ɘ*91A+eQLNrsmcvdS6sJ!Iss/w?>:ea[}N}n҃G>&mi%ڽӏwIi=xw2 3v۬4%a^e“eђ7"6$tuޔ%=|8ږJ$$72"wI >Lk/»N0}*kLHslwŲ”6$rm xz^m2S//命HOJS½ADg,r13L_1;o__ y~%2G`p ykMBϨn44y"}>+: tt\L,޷M2t[Rh"1YG8g雅{gwxtX͔8w&o,h!ob-oK $_$1ǫrLަć5l>,]X #y/яr;9QDYM^/ TpN_Bٯ`8~O@ig0YވuBS‰x B̏^_k!Ҋh8 ߓjh.ac5 J|Ns8c=2>ZYbaxcJ1^?ICW:.fTKN3w,:D3;vѢ ^Ԁut+EKc(AnId^gX7n;ȬL+)]HVH`:0⭞F|1A=$&aB[{/QN' c1~0־n^CO#Qj+m^%:aDԘXvЯNs!(V!s{sP fڟLw-FhF^b42.*l}^^I)iGbQ oT#:aWy2A< <]^W38?ˣhht#W!wgHQ}Q`>6]#MVdē֩u]ۡ" =Vuɹj ê- C~GN o'qO:/Aߟ1r=5l)V&gq@qPo}qV]޺/gs7yaC21gL'g8Mix.B n^ŰZ1̭ 5/w"KDN7jU#4 hUO)p[)"№:tPO -v4\!%{.z,R8F.EXFlxp(hWRk6 LX@Rޔ5'61rF:k0FSyy>LkB3&>e1:8#]c@܇a(UfO^/?Y? 7 z/J8cp);DQ7ǽZ H7 sk8mGAҌ#䞰04rVIŷ\s4H &xvtvGb8IiBI\c;(tWPTE;$]J#mNCELWO`p G\dF*Y(s+p"/HE5Pd7.סi9%.ɱ3V} ,0%]D^AqwEJh>.$*k=̇07zEh1dҔx2~v}\da5lHހX,6njTr(z~"yB=͛𥸩S#g(F*qezK;f$,K*P>+&X |^&ݎX gdsRJc q!p?:㯅cNMgʥg܎y6@q=x'F|B&r}qc#InjF78e Ѐ29U]4ǃJṈK7K&ET![BsO`s=񗡬~,g7՜0F8[g,2Yd2ɡF☳cJű_*h"6sd>ڛb/rf.ow >:}E)F2i̡1lNUS_$B1U9|fF^J~H ӑ)ThԲC_TT$эH=V?azդds8av+b4uD%4r<5`,,J:S7H i)zLJXi겇6lBSu ҷ$"*] A]|G"eIu*D;H GcAǹk5\(FK>⮖fqc hZg#5b %3D#w Kޢ,% RBןV[3)0Ʒ;#X%Zav]tQ)gKF(&(y5I0Vv#u j6XnUB4/B`DFu3g1J;EڎW RQաWk>a c{]CPwT#G*"젠QAה#.àkqM}GF)p#2|ۚ4/KQ ^'BA% $SKh cd~J!(8t}5JG9breG8p/: }B4d6iMj8"u"ip|.w&1,PX#~W}*I$W}qS9la"j2-rVAkLp(|# %Df&VA숑MPC׻R|7jY=^tKWI;>?*FyB)?[<׳<6HavvD;4MIeDw:/X{$=p=j8*I==7Ufb~8y42n(r/oD$JMoWeq0wq-ƒ0J:9<ʴ߶ϾIߌʳ,-?~]7Lk=*8U@8g9;I{c{U:"={r.mKa#  e Jfe쥈J/FGϗ)pq"vtNحxۜP %;*1#%=HFQ6]e]ŗ-QOB|d_Ӓ%?`N?)!BQm(K (]BV_}tCԙWߍ N /itHKrk%'^(FXPI>aiTN *pP sݪTYy1u$69>M Ohhj|aUzX81 C9'/(5Z$qO㻨OyI.}RQ |'yTj~ܵȒi3F ®Də9qȼZװҳ/J$#c>rcQm{^ƤmJJC{gvAihQj\"Vl#TNH:C4i.2*=d`_"zw zT?" ~`xio%9Ou_5o*( )ҿr/.r!y7(PoDXx$V'Innt 0hS8!ʪiqn귺uE#K!_dxym@Q= =qűNuZ/|{ ,VG9Ķh_yY#G/.kq8wF ;3.#Zţ1cVAhߜKCSJe: zhX$:k:Dwʔ.w;E+PW@<2v,F([},\L6F&ޏ| D }ͯ"".S]0#^@L(-އkKzz(J0mQ5S YP4{fTj+DMiHҋ)6${_.JKU_4`oQ58 S Dž'r> vEkC)zZ*nľQ;CP@'$PԾr(z2)z+@IX!N3#^P-KE0=n#|x3= `&*NV;yL#P:j14X qgpf2Q/F!ۇKakEq"ӎn0V/#{DY -'_)_eä0d^|sSV n ' CCwho~o0<>r(]C\=Ḏ%3~! 9w7Uu< -?{89%M+Uow+{+T%aװ`,őXoy}ne$'¼VՀ;,GY9u̵4 a9.=5][q~~=օFg6B_ s^]d0z=o zB$_E?|c,}m$XDی CAQ +œ%%C )v[GaXG v҈ #u38)yƛO5Hve DftDmj1[3tpqrDwW))Ji͟J#M$'!ޅpZ@(wAs&etg~f¾H,9y>R~A.~[`zOFͺDZWh~?mZOɧp+a 6[+q_lN?Q/ b2L=c_cR4nyb5f@/!FH]ݎeY_Rz &gQgp>ME-YwZBQ3OhRA|!fL/i~f- nwh )\Dp)i~݌hDs'YxÏ0bp9}ǏANyx) bϮ.Pc퟇ЦYc8~[!eD;OO/Pm*e\z\A._eȢiYQ fy#j)(3p %m0#Sq}&M"X}8X)>9~Guw.1-ب ~Шո°pB>$4ɔ*`K>K^=ѸQcn&t[)>D3t9֕Q2+~F!F$Ҵ%QQaLKPs rm*j@ʕ"͜;ncܧHqɔbӥ)G{TT^2?*캴g#Y%iqd%1cXG25f w5Ia}(_puwwQk;;{t,}T-_? Wx|I|(n_]bГ,엥iU<|z]=OLӡ=5[{vz8\(5ԐA^ZDz%<~֭Er̿ ܾs>>I* M;FIT>_~ Fk8+6p-kRscQ}ec}:ni/I ]''P#l2OT!3Q)!H#_PyUɓJ]cݚ5M8n3gNǩĿ0s,鵔h԰!],-#,"a&%>>ǎSiV͡qSnCwa3ueM=s'~k* V)ZbNп&sdpiOʮnAfЯ_?ԩSVEy}?C7<+Uސ!p$m ԟy7صU-}5.˧5yh E&C̍}䐗d]*]>5`i?F92 {W3.š '߹ݩGw%D3U}ssZ+Lz0/%m;US@(yrb (\[B&46CyrFl{|"n@i~r-珑#B$V]xgMi\R7BV r7I~q,^ա*D5\7m/톑24.v׼b @ѭb ima]Nc$>d%$AIDDZmRKׁh%.ZqWw@=xS:ȣ &Ыֵ2 {h}RZ@xq6AJAC.b˥`7mM;FiFv #_L;APy ӋCG:J {tHl ~L<>45׫ ܹs\:rwZ˻ F~Ӊf7JFozh=4*#e /$B#bUx ۺkoo=7aNqOŢ+xmѸQ}T^ʗEZ JEZ俭Y?x$mNR^ߪ ܲf @Lj] rObs7˗67,ᅢE=*111]p k~[l~*6{CcQ(Vnܸ+Wo)q\Hp(#q$[Tdo"pAuj]y{.ؓٽ¬Eq68ˡ3SjȾAz|ggGTZMoJ~5pCtׯxMo"G~`lI'|QV7/IzRo^P$LyDsG>js NӦu O?D^,]+N>~_V{pU)/O^ dh9t)L>oۡDpbūhe]7WW2*p53C|:KiT$A T<ڲʕSF!d9ؓ ic^0Ml&V ߬YDGkR }ͽфի:HI3iM y.^ft-#ltymbXL"ɰm?4؟pټPx?_b46☕Ka1zԭ[ +C2Pdq8:9D¦Q|b:}OũH# ;h.ܼcs>3Un#ExOGqz ?INvQ~.WEJr&g4bJ+)T$1wGJ =+q4A2>P# ,M8j쯥)_~]#ǿtNJE矤ٔKX_-K-gq V.]IS ]L&Ua$DAqgr˖+#`nXf5T $nE%gY:͊;]^yY ,}bgӜ!$JGBUp\$ӯ',#:};B)иqƍ[}Ȩ(IAoWnWr %a_5פEaT48~ha&9Z@^^^E%'|^x1)Ņz{2s{>7?kKk! |0S̜>[ҥp-i*w$Nzq0ȴMO3;0F_@ݴ&6c -ާOI7BCxF[]QF5)̤Q\Ű?`GxBqV~ $JlGquZY Ԑ,:.c0Mܱy@x*G@im"~6ԩskXOppǞг \F^ 3p5~Xi p> T,>VL>9IjX&k7nhִ;)` qaqx y'~2)~b4gҀG$@Bq_|ʋOD =3S"'j}@}9(%xgRx=G0?Qd=/h=V΅DKOBɶVPa>;"B!@#1T,r-ǣ {ÀOު ~ELӜgq0N8cĀS+ʙ-Ƹqg8s[oLmxbq0)/]TPxϧJcqwkWd.^vzy ˞!NCG"xF@-ncָYmnm%5첧>ye<S =TE=b9aP5Dz+"z)֣v '3h;hJ7Bdٗ@0O?~Ժ^ķY@YT'a(^ =̤җޏ#1Rb33_g)I|;u;(Q8w#~uqJUW-j˲i􈣖8 Ȕ){pkXOe #C ‡}OTa!& VMxֶ^1J%ĕ[f!7BQ:O h~Mg1M0hVp'$㦧b0$RJq mRJ%0eb :Iu{l:COj>vtYslNsf=F댞!PI#̞;8$Klս[P_IL =L¹f4\g _XNwpAM!G}/ Ϊ e; jts_ !!2NxT+jDRFx5yy#R;R74:\3פ9!͕#֞KNwߪϿS6P;XlSh-d|_)V㊏ ׋.~W]0Jã_ `펿 BF QGECWI-(,w78~lkR'j׷x'nIDӝ]}˨`U$_L!ada&H(fWk*{%싵#Er-5T"0 (AGDϩcڪuP~q}*Ǡ(R[=cũx_,?a*0I5D !ٓwTr41LMB-\0dX~Np=&T5J@AsQBzo ǻdȒhMIy(` ^?_Ih@A~g$U&g|+i8p T qd$@B)O#epx"s#@VwSiSbxS7xBw5eL#I&ӨӶоCE56 _9w5s6EX OYuw;a!dZ vFիqY<~2wB(Y4VZj AAhܰ Vzq0g\ qCBp;[V鞥3Ix*ϏZχ^X8&}9aՎv?<-|dˌε $>ɨYX=bVa< qdLu$@B᭾k(e#\#9ԋ?JAʉqF-[[FR%\TqBGEdx0߸K.!$$mp9[vG1u<.>J gy(7!Y@Dmrg }WP ^[+^yw/"vXoypAp}pd ϰLXo!!pxHwg W9l3?9l=' [aS ; ]:wANI ̯ΩS'paTPAzkk׭o Zx)dPNo3꿉A?৷Z j_0.*wBͰg2{=fuxO[Jғ)Pio_g$#e)Vf'9:K#L۫BXp6l%?d bPlP xw^A={4֮] 6mڐx_]GGGXbN؂Ž+tqJRԍ$\|'H(aCV8t%Cc(zaߏEKbaXh>b{ä"1'Gf5&֮ NJGXX(~~.\\]Id`ըZ-_naQ覴|'BЦ[hrIs0lQr C,1x1N9Kxt,9}bC! m̵CxğML"|+H(>伫'^yh/ז_oѿ!?2Gm1o\T\u6 9pR6Ls}&H{pǽUH$;ł1st[ih԰p9M2M={"9|lkpA8::Q&Iēc6,]L:sRb%2>DZuD_RZJ4`]vqqC +U?zK#7ǓZB[Ӆrđuvd.Q  80~I$ߢBq"ZhҨT[Ҩ䄎;I.,q>ԯ02 >Ff8ロC&{xw\%YL5,[WK_΁*^!I9œ($@BYPIu{^1%!:2B:f(S,4MXkנTRӧ/d߿*T;@#Q}1pP|! Ço w1lfX#G8u4K|EȻ![IH$0J*)WM>Wsi+ Z1;$AFd>9ByZW$sWDժՠ"*8(;vlB$?; ǎX&LD&Zd#u[naXl)}htx-ջH~H:Pz 8#6N(z{{;vnBȈul>Ce TS)aH$Hp:"Jq 1=8 x*ci$kf(G6<_EDD֬y+W.$nhC\nZU~AHH2 hd@6?1wqԧϑ@ll %K'WKqi\|O>B&#+2{b^_:vy 8=MđLH$[M#3ؕGH}* F!kGþx]TM Rܩυw O""ڴmFRެ *WEãKhTYhf,Z@QQ(o/4IE|ߥbq gXPXLz$#DA"8Hٯx˶a 0h" %w^_a~=-X߱hU[s rbI$qW)tI‡0'㶘Ҹ/zM؎?/qT2yfm_E2T;cqC\zW\CRX7\oC>;/GeQNm+^v54ʩTJ5d ų/{Eְo9 rM83޸QhHxE[,9al [d'ޛ2+hqn~Yǀ.5 Y/%n8@xNh{FfcD5i s*&(ZځqxzG J @ O9> fJġ+3`p 9VN(NFq0 P|G0A. >a+=X|mLVhO =!ҁ1 `l OaHrH$|Mx7R&p\[qnYw=s wđ}Q>&T p <m1 ۤZ~jx2{DޡllrXRF JlI +,aT,6"iji |%X"~+eT8<$P"6dJmzԗV O5 !vZ U9E sE '},H@,D“ݨ#_,>NIIe h,%}>&C>E(!YM V@B1TY|1&N#W4/阀boAJvϕ΃ H(l"*_K& s1DX5XY)bSDblN% %\VignetteIndexEntry{Formatting, printing and exporting tables} %\VignetteEncoding{UTF-8} %\VignetteEngine{knitr::rmarkdown} editor_options: chunk_output_type: console --- ```{r, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) if (!requireNamespace("poorman", quietly = TRUE) || !requireNamespace("gt", quietly = TRUE)) { knitr::opts_chunk$set(eval = FALSE) } else { library(poorman) library(gt) } ``` ## The difference between a dataframe and its render Most of objects encountered throughout the **easystats** packages are "tables", i.e., a 2D matrix with columns and rows. In R, these objects are often, at their core, *data frames*. Let's create one to use as an example: ```{r, warning=FALSE, message=FALSE} library(insight) df <- data.frame( Variable = c(1, 3, 5, 3, 1), Group = c("A", "A", "A", "B", "B"), CI = c(0.95, 0.95, 0.95, 0.95, 0.95), CI_low = c(3.35, 2.425, 6.213, 12.1, 1.23), CI_high = c(4.23, 5.31, 7.123, 13.5, 3.61), p = c(0.001, 0.0456, 0.45, 0.0042, 0.34) ) df ``` When I display in in the console (calling an object - e.g. `df` - is actually equivalent to calling `print(df)`), the output looks alright, but it could be improved. Some packages, such as **knitr**, have functions to create a nicer output. For instance, in markdown, so that it can be nicely rendered in markdown documents when copied: ```{r, eval=FALSE} knitr::kable(df, format = "markdown") ``` ``` | Variable|Group | CI| CI_low| CI_high| p| |--------:|:-----|----:|------:|-------:|------:| | 1|A | 0.95| 3.350| 4.230| 0.0010| | 3|A | 0.95| 2.425| 5.310| 0.0456| | 5|A | 0.95| 6.213| 7.123| 0.4500| | 3|B | 0.95| 12.100| 13.500| 0.0042| | 1|B | 0.95| 1.230| 3.610| 0.3400| ``` Or HTML, which again makes it look great in HTML files (such as this webpage you're reading): ```{r, results='asis'} knitr::kable(df, format = "html") ``` ## The *insight* workflow The **insight** package also contains function to improve the "printing", or rendering, of tables. Its design dissociates two separate and independent steps: *formatting* and *exporting*. ### Formatting The purpose of formatting is to improve a given table, while still keeping it as a regular R data frame, so that it can be for instance further modified by the user. ```{r} format_table(df) ``` As you can see, `format_table()` modifies columns, turning number into characters (so that it has the same amount of digits), and detecting confidence intervals. This is usually combined with column-specific formatting functions, like `format_p()`: ```{r} df %>% mutate(p = format_p(p, stars = TRUE)) %>% format_table() ``` ### Exporting The next step is *exporting*, which takes a data frame and renders it in a given format, so that it looks good in the console, or in markdown, HTML or latex. For console output, we need to `cat()` the returned result to get nicely printed code: ```{r} cat(export_table(df)) ``` For markdown or HTML, simply modify the `format` argument to markdown ("md")... ```{r} export_table(df, format = "md") ``` ...or HTML format. ```{r} export_table(df, format = "html") ``` This can be combined with `format_table()`. ```{r} df %>% format_table(ci_brackets = c("(", ")")) %>% export_table(format = "html") ``` TODO: What about display? insight/vignettes/export_table.png0000644000175000017500000005540314134524056017272 0ustar nileshnileshPNG  IHDR#sRGBgAMA a pHYs  ~ZIDATx^ |MGO$5hj-%֪ik)ZJWE)m^ J =PKDVd_d޹7M'9̝3ۙ9s1h4 AAD  "C  $ AA&)IeF%$DAQxXh4˗8sFYJyLf/?  4d,YNVF`W5 0+)]({ AA%|+Ɓ]i) *?6WZPGa~>r"~$%0(۶)s()=zc6%+%Ç(5k2;]! CX8:*|h2hrϕoU}WR&NddPʼys*)?^a WFb[aFQƎeJ矕(fy\Y^yMf/AA|tE\9sgel%,Lxݳy3"vJf]++v?^)^\y _F"ܼ̚.|uN2ʴiʹsʎʾ}7kAa(z5k*|٣t=j善}{vFnX`op'Ŋ$e()SSN2bkJxAaoQXY)Ç+LlnԈUSjxJܰnн":Z? h&Me w ܜ\YՋmGfKp 0UԩʱcL`6]lm٥֭٪%<\:UQ;:AAơGƲ'\dqVrCضX1RGø5>>ʤIl.4AAƑ8(.(..졡RneP acԩ.fhsnݫ^:vT^% Q-SV,Q aǏAAd cuT>Q.3\$,n|9J\(v!p bgkfo! R-矕OdJT&OV"#5krV$ADA|޼y©'{op)_^"tr2%wk6l)zhQ٤$66Vٰy;͚0%'#G#8|__D"q%.9~HfCbDVfwjJ AA4fZNIaOAdVܔ˗ٺ1c؃3lp|//vQOe#DG ;jߞI}_;.XY Kvޫ[2f6,%bo/AAȐd! W AADB  $ AA&I  L,AAZh4| Lh aHQW(?,X` A G;Gk/o^s L>Jw)6( dgƖ0syaQQh4ʶm(mE `.[7C'oBa/ll>Xm"Yhhhhs9 B߷f)mU2_}T~,ƍ=: #ŋʼycGa9+E.AǾP,'O%'f_`s9)IS6Rb 7:/tSBLdlg(VJLdzO@й'ɖ4PbgT+ǶpÒ\HOJ];tia,HH;(~_/%E2|%<}(eKHCv:%ۏ^rX|d%쓄PkטB_)YRX EyHa!R9^ ;?΂nҥ {"s.˗mfܸDE)ӧpg 11ʹsGvEAy]ȷcıL/}T?fؽXa4'OKv&"Bٓy* 27/ʄmΟ~b?OOv8v>`j)Z A׮Uڴa?~!m!{rv4g;?-_mڤ4kB\"]M<Ԭɢ$ էLPw(L%Yj0OdAU.of|^9sط{Ɯ@ws#_>Ѳ%j37,F)ڲ_ܺ%Hc20ȮB2I^ψyttۏ|4YBSGŋ/ BB4YN9|XG[[Ɩ9sΝWOo̍???Ȅaa8~57kXMxX+kiw޹| ;/f!edT,CJ>sk֌]پvmvr\ϜQ5.< :ClPRkMyzfj0~Lܑ?r2y|U"3jdIٙmvC [ɬҿ\ _o7l!Y|9Zfߟ-MTlr:p{fwC 4Ӑ6>IV^π#cD lUܺp68t`\lVUBC_;E|+R"ωVWfIV7Z BNt+W*w2a曬yw8}|S*J2e:?X*Ӿ ,]H+6Out4+2ܠBy’uIya]J~B +W6N8%VUg-*#׮%35Bq޶1|^etfZ7+WdKo$(y0V}b 0:L-ʨQ_ynFEFEQ dΝl%Ȝ;x~ɓ-3'+}^jӇA(y ͺu/Kh(#"حhIe YЛ5{mYL,#-};k _0Pα1[Uc0 hHվ}TN5kXܽ]grueUW({gR[&aolq"_ $Ǵ3zjuAxN#=)qr,``8AKPn11l& _]ŊL1|KgMl9+TtpgqAZf|PׇxP)H ɗ2h3.☣ IhR׬aE6>n@' roɃSRo_woJa6B;u{.KEnXTmcGAp `>J<:ɚh qq~`e:v\a\ io pʖe{ޝ=@Gx4ӼGؿ^Ӳ%uk͚5iC"G숿{/ Л/D>v, aa˜sHL}!"543gjo""4_~ w:B ͑#, 8zvD^p=P*WEsӦoƌL #4i^DhnX2lc2 ~y:dĶodu{.gH}¢#5]vhlh*nUr׮ۡd?&s m,Go ro7G̹>>*cRo~Z޲nk^ZFO9Z;]f/ͲBhjkG/`sYʻ*'N]NJ_B"Ʌ>`>JrٺOվ'DFݻ+u vbH3/! Z89Z{+s.bE̙졏ٟ[6}:[ۛK\#~(3MN&qL`:; urgoy￳& ,{G@l5Сl!ӇS$YEA$Y 0 Z(~?:E|0AALɒ#{Y!ߝ&3Nqt)pL P d8wU}..9I|<{b.?cGG CAJA, { l6!Ϯ]7  :EL  LIGBBn\̛:vLeȮxtР}u*Y>H OҖ2a y2,õg(VOdeF|lS +]GˋݶkqrN%)Az*-{d2a y2,O+#Fiq>tI m"=H҆fL B%5+FJ͚lމڵ䉲t)[صEJϞkHחn!Τ$>ݪ͐/OOvĀ~YFJhx+OΒĖX6k\"|U32ۦM겏~z-aݻF2u_λ #Szբ?b BڐBȄ! dX8:*|h2hrKO`Ƨ^؈Gh@wdBQ#eX_=blA2wRob񊥥wʕu݌|9ԮK̬Y@W;:JT<_&W%.N[rtʝ; NČ0R;q"VdgO>}X6ű$xtyGl\ӧ&2RXT~MW8Q',9}zKz={.)lz͋,0 R3T/.Ӱ`Ac<}u]ɰ.x޵K?Ѥ '<\r!2u!S2>p{ * i˰Ȅ! d*J͚lng[qa[|}|}hJ[LV>ZX)e>[*HC*}׮?qcHo׮iLž9|} nL4oή?iڔ]rE|SjŲ7,p߸ Ʈj[o@ `^ 1.+RHօ:5sjD{& ҠGÇK/۷6 \r E:@W>]$}{lb Q*n]|oGbͫ8i5^-wd<a֟9.L#S>NAy^hRӛ>;;\L#rIaf&mj3t9'gtK~g y15omTcmYnVq/ܶVܣ-xrm^+\?nׂҼG&};]&Yϝ tԡ+m"UJsB) qݣHs}mk@ZZU(V6 +jn%'G%0i!w4NAyN7.kxm[\╱+AmIrL1kY{T\,Ob^cP]7q;7'Dk)_aWGq^ohe1WӕGnr6p60Y|hʋ>$:LyҤ(޺F?(bX_n0 "j! z* ;Ad Y[lX_W{|ilv~Ihͥ4n o < ɱqAmk)^[~ڵ}|?*h۹FmcYvEmAbkUùzzr?<e AwLJ];jL滦L*AA>Hd\^ĂJT[a]Gzr֡RR[ھnyGX`d(j^Ug .¤(AYXYZ`}ݷY-w|rtx̣2_hR w_BrLi 1Iђa8ԋxS0ɻT% 9Jo}8p"Ro4\~tC۹9܎'A:rZpM?~i늟;^^ʢxi uZrupxLqL|JU 2,IO9;7)Y#wR  ©T+a;1 S*'m/R}I>;r[=zz+ 6̵^IÃWWJTKκ%k>~qcK+xrr2n(]ꍀ~- +iR#Sªx@IG}T\H=7/TG_~yrgBJ\ 2OA6˗}Cw<"W|Iz}w b+Q,[+WJCUcE]x7QƁ  Bo3$2.JTOVLǿ߾I"`U.7:1Ri=i~S6ՖV%Է_\̬rﴘձc0[/-{=Wx[n{Ofӆ:~-(GQCkqoI\oA er:Q ZwjGR^&8\ADɒ?wB/h  3y{~*JmPMbJu  3y|H ߥވzvNrˁE _TAQC1_w_щص(bYd h|/% ײAA!ײAA@  $ AA&I  L,AA$Y 0HAad!  BA @  $ AA&I  L,AA$Y 0HAad!  BA @  $ AA&I  L,AA$Y 0HAad!  BA @  $K6l D S$K~'!!ӧ)))b?˄{xxDGG}"A FFFb+z'H Hw 3cǎ y6OlžN?~žTTA4JG͟?#AϞ=gΜy…/_ \N:S={vr(0s/ƌ%L K&tž {bDDJXXXRRb(޳l L%˙3gƏyƃqEFF~g7nlٲRqrr?TQL%K](ӧO߾}>}}޽؊}z.0 2,۷oV?x`+V4hP"HP/_a25 d۰y!ݵkח_~o>FP(]4 P9\^ 3III5j@O-LB˖-\{ĉ#Gҥ _*;w3f̓'O}]˗Oxhqٱc3ҫW/>?[Μ9ӣGkתJpBSE +}Yb7ty tkP?wuϞ=|ҹs><88[r0nQAl0 I,???G=`xq8ȶVb5jvСp@8<!8[lA.N:%5aA~+ ^ԩLۀƍ*LMtt6:BX HO=UQ5B ocaL80Ŝ8qʕ+o^^^uA֯_#tgJ*էOuNӰa*U`N`q1a BXs|,YW|+W:ۭV!̪59 e dۨ^:"T \V-nPF N#2ҢE aR{{.ƴꝣN%ƍ7ݻ7p@;;;n%JPAXVVV⬄Fi Ck0b:s/n&:t'$$B 1#`P ;;_ttjEEߍ1s:t} ^"mAAA*UX":aئ=p9 FA^) VgH:lk|8$մiSk> Uг zM6ADDz#F` Q}^0ڳg_gϞ.c׮]8u+_N߾}1u駟0Ћ.v5kL6/ (_@w>=88v $ڠ/8 *Uwa`UVɒ%y,f͚5[ΈF+=QO={ܻw?XjU](mz[ `ɢG$lٲE .i=CԩdhР?~wÈvҺuɓ'Ĥ6 <'=Og+WЛc]:u Ÿa@rsb'xYbGQ^ѣ͛c"]tKV߁^-iUΝ;'%%>|800PXϗ^>|Dlb0Cu0XG|LAm8"}$KիC'+VֶaÆnݺv@ǏCL`~ҥsVOd#~uzs]3@DܡƸaX;Gt(" BԾ}S׮] QPgΜAՇHe!QHj6&&1H1^nڴ :VZWzȴ LJv܉b,Z(b?,Yu|^u0z_!UVH0;Peu9rf Htzѣn;w [t7nh׮Y0Q9NyҤIÆ mV f͚aAG2[w*޽4% 9taH fZݺu7 $w8 sL&0:hq޽ 2 2tඁPqQ}QWLcw& cǎ=4m޽{n6m|[ 4HS_)i?߿۷dBﱠڷo*&C0`FA9Sa4wQ8z'!ɒ#̙3s2 O͛G P4?u^xa^p_JdjS r,AA"  BA @  $ AA&I  L,AA$Y 0HAad!  BA ŎŊn P0ܭ nS CD>!cɲsիW->裷~) wkCap6M1 A 0Ma[ ݅9 A3AA&-%  BA @  $ AA&I  L,AA$Y 0HAad!  BA @  $K6l D S$K#<<#::Zw L,AA$Y!**j̘1]^gРA<! Aќ?FYXHJJڽ{qlEsرݺu1cƍ7P}[S= ݝdb>o<$IHH8tPڵk:uE 9bbb~)..TR={~xbÆ nnn;w1bٖ-[GG"EQAAA'Otqq)[0̗.]oѢE .l۶ ؠAHC~Qd٧JOZiRq9̶1L2G:w=xСCgΜ٩S'lv+6xM6M>juM4xd;ADH%+W_4hFDa}ooo̼ lb 7,DѨQ#TQ[[5jDEESaj*.]J… /_ީ`ĉ#GDܰaC\\ӺkEٳq ͛,_gMH3qMD~$KVA7'jժSL)_OGBBƒIQv ``RJRp2l?&Zaaag>~ѣ/^ >hѢ< @xݽ{ׯ޽{2h4!!!...CլYs[n5bA9 I9}СC{' ҥK[ZZǏnX`'ɒ'`?CL}}}'MQVcw>lȐ!ڵݻʕ+ׯ/B(ʍ7Μ9C(_rɓ<舟תU -Z7k F|'N !,Wd!2j(ԩTK.];*!7d1"ETPbŊ1zɓ'c.""B&0{DnX``m Qzeܹum?&ZVVV>|Pbffm˫lٲ+ٳW+!N:UZf͚ 쪤ӛ4trD$1*UjٲeyW;`|޽ .ٍu۶m16#ED.coo~UVlm`ڵ.}o?UM6E:Q Ç ^5CBB נ ^P\9ع&!?~(^0) ܰƣ7=hPgg^z 4诿~A3Hd5k8' sssu۷/:Sl.n"7,3&ML0>ػwkׄi?&Z666 ,Xti*U6o>>AuѣGW^&E;vT%@*.]ɓ']B??? ~ILLtss֭[Ei& 9 իW"zL Z0ÜH+W /TϞ=+W *voߎ8cϞ=ѣTzaq>>VVV(}AԮ]"ݻqH}^pŋ/_}0rDEEaQ8z'!RD S$Kx;w^H "!ɒ%N:駟B} ۳g.] 6lIII>x`60D.b;vsnf̘q l$SNA*YD㉍ݸqc5oӠu0EDD|{e˖-Qʋ/6lֹs#F!LBBvTPK.?ڷohѢŅ mۆblР$LR=zYF!0K.a8q72Ç͛7#+Wp u떫]lvPΝ;u]OHc<|ڴi'N|0!ST;v젆J6tcH0 ^zӦM@0`~s3m+Vիͻv?*-ܰFF5j@=2ujj*H.]s̙.\ީ`Bȑ6l~Ř1czjhq ͛,_gMH3qMD~$`+]09UV8$LRlRJBa7!!vvv<+LD^-66J*2a@NMfϞ}ѣG/^xԩ .^B[n͛7cѢE(Ifڴiy>}?#Ë-ǯݻw~ݻW; BBB\\\`*Y˷n AD$`۾}{lݻ7`ϧ&y¥&|B )T%㱅Uz__I&a~ԩֻݻw?!Ck׮w+Wtss_(7nطo߇~8s: :tW\9y$l:::jՂEpH7/8q"|'LaѾ %AQF!@NZ09vX "@>}պuk̰pѣG[n=yXb zrK||<&i"'AC`^;wnݺu2ujneeÇ;/fffY@q-[bE5B@888={6}@:uZj͚5&EUI7=i-,," Ic8}t@@@nݸ4bNVdɞ={^v;w\jJ*} X[[mNlV"/\~=je˖ڵk[ SUM6E:Q Çwss ^5CBB*TM r5 cTDŅIQv5@:;;kРA#"A`bcc===k׮mgg\[}\]]G )=<`޽НO:5zY`ҥKTyQF7^d41bā 2/,e#zǞ={bŊ͞=f׮]...ܗ IIJJB{-E;SN5r 1c߿%ΟhѢvL`>}1aDfff7& dtMiZdIJJʜ9sn߾}ʕ+6ʕ+ Z,lxnݺU^L2vA,ݫhS, vlw"׬Yyۺu2}:JcҾ}jժUfM 0np;|#Pe^Q2uZZu֓'O~:7B Ck/+Vرc"gΜ0a/.S&WK,YjiڴiS:uF>nggyf:ss󀀀[.[aÆگP6>>>VVV(}AԮ]"ݻcbbGBA9 I,qԩO?EΝ;dw}'B{ [a~D.b;vsnf̘q #lx<<dɒo?F]~CTƍy/^lذͭs#F033۲eKBBc/HB2ҥKۣ"Zhq…m۶4h IpBHTy<FѣGed*A t'N,c"Lͭ[\]]a/vyX` ܹsXw4~~~?;ϧM6qgϞ = 2k֬xazhvA mƐ`YzM S $B ¤(]vnX`\QF+W;bkk[FarȐ!f4i"L&[VҥKϞ=gΜy- v}1ܰaC\\S1czjhq ͛,_gMH3qMD~$`+]0e/>}qQ $$$ >hѢ< @xݽ{ׯ޽{2-$$v5k._|֭ARe˖~; wܹs8]?UI Ξ='p;|E8CCYŋPk֬XPǏ;wnRRj1z8ܹ3g~,[\r`ǎ^z`޽׮]~O?)SfҤIWLz,Xt*Ul޼yԨQƍCW, Y"""|}}Gq Bg KوgϞXbgφٵk %"A`޺u = p-ŽԩS܂`Vʖ-Sw ڃ5n_ wٲes^f Lޡ眜/_~С%K woVVVʕstttww8fʕY\Ŝ!Ӄu/ƄzꐏvA,ݫhS, vlw"׬Yyۺu2}>Jl35k Ss_a"Ȉ wϟ_V-a}Ewh֭[O<9&&5`ooq NWY[[WRT3ܰ^&`/Y$!b0Xѽ6%JmnΝ;a`;|0'aBќ8q޽{g*SN;Ưfc 7,DJ>b_ :^*L7ǎUI7=8yK.u.tT ? :rH?@60F(<^vVZ7Y&zgϞm߾!{ׯ_= ɋ "o@yx{{#؉' L䔦!Yڶm{M//Xh&t |Q88pITm$uzQQQQaذa|O_A!f/^xn Qz'1 3AAD.CWY 0* AA&I  L,AA$Y 0HAQJ$+IENDB`insight/vignettes/figure3b.png0000644000175000017500000007556013502774207016321 0ustar nileshnileshPNG  IHDR aZsRGBgAMA a pHYs+{IDATx^ U:9%5 nDFۍY aB Z Wp]X:WcpD'{hLs%" ʋ+y{?{=ֳή穧^v_=0;'cAET AET AET AET AET AE⋃n,c<}{իifҢm DQ1eN+,~ahiw~2c=yfFzmv\@hCK/˗/;=GaO6uې=ңFjw tL@2mf\y`3/rS5&ױaJ!AEbٺukq6T%;laGs+5hѬA 4ҴCq]_uUE]d ڶA"| _lv3K'`^ Nz:Cqvc<;Jh"@hV?[:m (2䝁k}ҚAzU!AEqh#Ҏ8ƳTf}5l|;WЪ@N;͔ ڶ @5tEoɅ`"N#UXO?4G5|hpeh9\ѶE/T k?ASgmr&qK!AEprUHwEp.EPRc:>:I̾`ӛ1iۃC3K@\?䭈)F u '\CvREg ^67x҉3 pr-z נrE W@ gBY~'׵y6dRV:t/G^!AEprUT~u r*zu>ąbE殬nWU0z:S 'f7L"TIY?yD{ڧUruutep5_nGψ+Pz(@A.STiۢ#iЌϐmrduŹg "8HKNA.@wgU8@ݩcSj}F*-e'`nUl+Â~oW^ɋ UpN7|Fd+2Z*xG_|1<Dc[Gu~:j:}^YhۢKۺ. 69@;-%K&"8q֭[gJhea3|ͦԬNx/@^6}{GoG" ?& M3IS=Iȓ4 uYW)(:t<ǡ~:Rڶ#AQ=!p *b q&⌀r.t$ *cM(ղ*Yf)UӟEu: .0fwiSONH0qm7n#gYjG"*J괍N3ѱZWu:;>u,(ÐJYy(ɺjHHm[ נbes0.h#-W#Pw|G('lJ͒ԇUiW n0j X^V nqW\וڔtCӛM\{y3ϙ `ovf0wPgN*&ϗ9#moh:O;u( 52(PZu*fm:^l*ܿ/쏺0[ԆoGiU 3 *D))Wl:TtMa'Ԟ~oz`]Opu%;]6`?-T:j)3Dueԉ芎2BSi}PkZdÆ ]em::^&MO 4~769Ҡ=SU_V!AEpLA.TuKqgK^:\S ʭln14@#@ݔ4͙uT-[ h|Ah7}(2G}+KϳGǗ~_,=\Sjhf/ ڙA5P@2O<OHH{< &GMqFQ5dU1\Em6$+WS hDlٲcI^7(AO$ZN7EvӶߴiS۶_tiL5 TJޢhƍ|;h "N4kNa(j:DǸzEuε^BsQqݺuH*So FK{'pžU]e*D:uZ -;*Xti'ˮ,5Soz4jc+Xwlk")?m_vfcVVQ ?u_ >g9-TeSjV9C6[:u^ڙנoWM3n f~B0ǃ%{ !7dO%ع#LL} m/gNUI6l6/?HWgW"UhǪ1_Әh58FxVfPXMq"q'I}8;)Mys7;s̝~ _^+סuҶs|Npl4g/袶uruzCuԫ tM*wѫʴV$Y/?IWϾg}M= (mj#yzy&?әX?zMk{Ӵ<~㺖VՋnۼu_f[պ>^ZqK}]R<7۾Ыhfkgp p .&n'_ma{Źo{ukY}3_~_`v?m=~(Su5we_\cE-Ϗ/ }yKշy^SMfK[h<ȎGٞLWdaT^Dx۞zK}gmݸ'Kw2=R|u24g9.>KO'Y|NVR|ߔ5ASj7RyRq9$nb§ҚRSj7`=ǚdF>|Jh3cOu6+n`J]h36/.m~ާ>lOb whժUl/"436ehk'fq偶-נ^ `LNYsm}1tfO%Wɹ1~}&o1 +wɦ~ȔA/k.hMϏjkWneQ *͓h͉h\D{ޠj}tPu-IIz"Օ,o\m4/JžKA.7o6vi;KQiQiW<:/0RvhSuu%Uc\.S0lM}[aRDi'~2oru61yםNU}K0K]wc0rf3w}Ь 6*^ۼNmkz]ShťWViҺPO*5m[ *]z#\UPO;INL}?ާgEEIQOky4zHNi7nǻ &)b|\>'(F,ey6E}y"mh[;TGҿRr4>Tٲ1eEnV U$~<ߓRv"xsU;c;zf<ꕼ.XQy_|1 "uةM47u9]< 4yIWeJsM6P7v:>v.z (J׉K쌍Rبׯ_oJMq}n-[R'ڶqkɸΩ*^#\?W3㦴 ̘ɬ݉8/wM2f=G@m|\%12_}/Iiɵmևg*/_vbi0Y(4`yQVWX *q[vw˺Saj6z=]7YFۤ6gnNzou8mY+R3Ci׭^1biTAeR`Ku-MA4Ea^tt)z 4_ȪRthfyq zXd w_y)ř,/d**)5c)?UԩQOm<"|ⴇ@Z\hfmKxUG8ѫ4XEi_`z#fmo _&] PYowyS,\{[Qm]Y1 z^)+3ҥOiI)㐍mW>@bEWMնy]jڤ@]]B+Kpsyt6fmv8'SkP?]ltښD7ۧ;jT_@j+A z٤ɀϞ~!. X}i^*k_]fl:ڂ/"z*dl;t'͒hxqQ~j x;uNtkZM޻:>1SYlrZNiPFN|h͚5{A?me_Y[ytǚR8a晊ih6tV[%I:L6NKTAzXYL&Pv}k[7w)H|H(5H[U NJFKzE6\uztSOzZi%Qc0V~%)Vm۶͔b-rSj7yם+ <ٍ|S2@T6o:E]VA2@t`hfuNUY]ArAŠ^8Дz>SQxop0HÀ7UԽ[bJWZoGk| $w㽵2}/Q?Ƨ/r[;<'lYxY _yÆ aNU_4U/FR3C:zꕴǹmW`NuV:n>dJ֯3t4w+@7[|OqӠYj7q}@u_wz}N|d*ؘ)_չM._vŊ䧇l\v^HoۦnʕTMmv^]A\5W??JzM}m5K Zr|ǔq"L@F}=D{u1K-FG ȭ6yoz|\킢EW+5ޗ%_H4Emo_9sTnԢW>@kct;ne;&(ַYlww ׾fJnfSMVSѪzoouMPï{9@~໦bs>;usFOeM~[fNٌȦJO-Ԭm:(:jީEG- cu2?)is2;kbշs?\sퟞ끔>:D{$TmC(h_jsY6dF϶g%y{P Țo,Y6nfɧJCE6O*K"q\:a|1wә6o[L:2h( yu"H<uv],r$/;VX:zW jUc|T{@sM}6T}6_yj >w)1f4´g2U%~m^hR]$~I3cf|6{)U|mbx/?=):%/Rw B2v0vz{f$P g$g/L{\Ny؛k\5__f:6I[撩(*wDw5F&E.Ƥ.YZwm.,j5ӞXPzE :g!526\]޳:P{20X>q\:V꾏hv'~ נ^נbP')gE) .y)㦄2;yP x)l_Rq-:7֓w~/;Q6Cc;LEq W n@+Eԁ媀\> 3aziʕ+M)O>ٔmڴɔk-Q3Cձ^]J N"[c|L/PS0fϫ׾E:O9䐮 T}o65윏>+su|N\yS̞L]2euNƆ_ꭺWl|ڨu9T )٥jNjml+נ^{hM{6rj>}M erS?v65L)?x)jmQ< Ϥۼ4a.r5 =D# tٸq)[l)G[,+5N#9O٩k;JSVhJ\`wc5{oJW^O+Pk؊+ Gj{hUM[fsNӝI SfT A=W%SQّgq/F4=I*3_%h˂Uj7ZS ;[Q3D\*2յ^Y3۶m3vY]7zSZ6?HTo^Sj6`ЌLQ;˗/O}]VǺCN]'曺LӝiMMά#SQ|:\aW4pu6 >F]ן.(,)WV+:fciAklvvAxWi_eJвx^#>kw'ꃓ͏;OH-Og}]yShUR:<>'ˁC>en4۹d*{v}i s"p;f޾͛MsJlU Uz^$|MaيN6C{`R5uuL; St/e,Ubb7 ϴ_UT}QwʮRŇ-׮AvANT&&O:6#W?cgK^oJfqT&W ^U Uz^YY5|)s(0@)m<Թ>ʃ\OPNyG×1~}O?Q:mAC7u`%WxƾXi"d*EQkK/;0x֯_oJ<5OWm[EU?@tz eʿ+S4wFUy&e R:?gJsweSn*{5~Lz-mr"6$M}K6n-[R|M7S X6.<ԉOYU˜( Yz\)-w,|k0=M)L[V| WTiLd @#jkZ5eBK e|kz~vDo ՟TZݻM 6A.&uwi լЫD jֱU,E>ضr_I?zZ*p z|_*/XQ W#`WS)8Aؙ}]}O3O0}uQT\WuĢn,_HU3}!N}K#ϲ*Zⵥmr]{: !H5{F4S0~AǓ21mfU+`_VkP\kP|UWh=2LX4vD)BS` &ﺓ7glxW4f R \kdJ*Lw:K֌6ylT5pȦWyRiÁ95W:|t։lhK$VZkKkjꐬ\Q:,A[F<6Pk_4Ke{|$cZǶM-u/2w땈euIZT}jΏh)p!NP=潕ΔsGMuw|ԶhYJr VAӉ|y*uʨ3EAEcU{3Q'Gt^ Jv3Vt>?5a~ji{}P^l7}?5{^GUPm{\뀶m,UԆa_8w7c?\uz+=8G|}òe:t{%{CrsyomTp6F^s.W]u~}q?nM_ٱ}Nkd{?hm$\\ӬQ>|߳lu{VMնwFwk`yI>^8wճߵ8&Q]^ڷF珮Y!qsw.^s7J]Dvε&]/k?`]wBZuR;P]fIk6yDu{~q֋bv-]^>D6;kUtWArAŠM{Jjq_?~0;~%:﯑Zu?[b?6^;s=gg<:wKUkg=6|y| b{>Ou^:n[^s*͓J[,'ݟ"H~⮯oCOlTA*fٲeԮ*Cձ^i4w#Rm(ָѼV#[f8鲴tRSjWc|$hqєU-ŴUn͋㹛+:7F|{駻:[#[Mpu&D/=n{ckۖ662ġ\m*~e3umk8TO۶i%jz٦W?q] vM^ݧ~ϱKE&>lJv|d3AE}:_|VcUfZz5PnѹF/]1qM\)UUFAZ44zM_7+VT߹<Almm^ ̭ R}^eJ՟ -²7kᇜSyzg`h]$0h? _7~Wr Q?sY7R;'y˫O^ظwo1UrmT0LEqOf)U٩tֹ2[YfO~)w~RGU{³]H];]DR{ۼׯO6pkn)ٹQ9[9[zJs_yEH֕:Y&,5su2N?x'@b0P^lk]5`uՖ<ک:PEݯ=;m츮kP]ڔ9kIe]rE0}';{ ,ұ4S v3×{߃tPz3mPq蓯rOևd*˕%n6.|/-[Rn*;:># gx"T>Zf:+.z:XQY{*!NkRpCLO8LݳޔT>ǵ<{I- ;]*ꨩj}DbƼ*R\N3KNt_0zYxmWe4us6RDڶqm/dzԋ.ؠ~N@JxKu]H>~Ŕ1{}nO ϽࡇT}@ze}20Ҩ\f~>i\26H n·~:ǷND9iN aݪZVcu UQ:ԁe?J/㾩􄓋k곡eo7Ҿq<5Y/ZT歗I0]Ig2w]G:GۼmKm(@7' ^P$m52[r 4\ MzRꜩE} P?يPw9W]6e7kw;Wʺ u{n3vj3+)3cO/o$sTTf==XSjuo^rzw}>eq{׻UoY+˖-3dt9Cc|E7]Hy,1 ,rM 6^훓O>ٔ\soD sMeTd**黮"en^i߰a)?`P{SU-u\aױ{(Wжl5W?q] xHqL d1ַv3[7%$=:i4uO~sV}[ʥj׮I;~UgyQǩ͓Ȣ>%SQ(mFǍ=u:0pO6e7W&u#_N^;~|A*UUPUW>'Zu f zTzE7=mE֤n1>|)5+5Z Os*3Kf:bZ;V_M6'fTT W[:M uך × _B 9 E{^Pm-٪5?q] )ç9? E}=ȩ‡q [gy{) $-? v=w,-JU~6oE}K\Y; |A qqvmSJuNN|nu̹򪲪ekHku|fz=apRq9dig?c QjcwLfKMTT U˗[q4bԗ~Pm-6c[&Wkd#?;@Sj7ϫL72!QʲݚHQ[N||jy؋н@8cA ,/9@VcjU*+cO?os,\2Iʜ#n |RT<.{ 7RU-s͹kJ2QeuW]}զԬNrc4S6vk֬1%\wL?aOW;] PR`eqm۲;^:<}xjSGU~P$m2nURkP_kP1hտ)}ԛdJ|ۈ;i{%h_ʕ+Mxʒ`Ѳ~_ũYEd)~ƴi&S*VonﺩsTT\qt**>gy)wE]IJ̵X!^) o6%$UzN^9ƇSMo~ĔM-SjBj\l} M״gCˎsטR;=9V4d**hnԵ+:۲HDteG`Rh2i^P$m2u:^#\AmsU2:LˢroƗ]5UuWķ=WF)f~On]W5}x0yΧF/X hoveB*ﻐ(;km66ܪ~[{Ejߴ3$P+48ѾS@ڶpu?:]#\l@?qv0~O?x)sMvqC|y~UM1nڮYf5u.8 }\=)ky\Yևd*J+Q,P.ݰ&U>:t>縜p :H~zSj~io⣎2 =cLmfJ1zBT@]"XٕH+'ZN^9'lŸ|75uZv]st~@|DB#n݈4hzשDUtk\ד*l:WԽMmnذکF(c͖/_r põU=У{͏=ge0qX(H@ԡ8? dq'Y w>5ԓMIݶRuWIiSZuBՎ[o8sTT\qI. yu=:U>:ECO7qVNQѯ}i%uB+!SF/Gh/NXIUy{ױ^_LTOh*Ii! H*^8uhEEMFGk1FtM\yYjosz5r&Kv_R#SQ1-[޸z՞A~2_ڢCu Bv\S<{GzѶTy^8Дڕ=(Z晼` &J_]_(HEuCo:(Sӹ&n:pu׮sv8 H*8_7^m۶RrJSj}m;Z87n4[VcLs6#p`v)wMŶ`OivntBl8sir}:٦Q7t@)N߁iERvS-PA_J_4K:iݕN?m{mmuducD .'J}GT^|ǐ#Wit:_uXlT׫v^gx0_fKQjF?}Y&$q KzzթF>,uOO]wYJNuwu?iVK2K@.ԞV\]( Uo n6 vsQ=m\֭3Kuuzapw`$-DuVkP?5L^~,-нlлi} #Zo9V`K#7 >_S}e˖}Ӟ}SB털7 tc wU6:UMV7|7tM?ή6<3W駟6Khw7Rt E*nAc!hUڮuWZoq%}WqI1![ֵ5̪>5&sHbݟYjiV2\F/Xj2a0g^+֮7ut}R@SܾSV2,KuWkPݯ?5LY\Ӆ'5cE,eKS<7EHD`ll M}I)h%WT6[~q uט::*W%܈h]3ڶnyztiz/{%0g: NhCƽ I?wk]uAD7ɏ[swsmnD'wO&ZG:?Uuc.xy1,1Y?Tw^@tl*28Nv}1rIP /e]'\׮,T鳳~WY>?n춣ehۢΪx QSȞܙ ꆂT嘀"EztwCKzx.4Yzkј&:i<1;hmtlESmk#.~h^m]HTnWQ\qB=ɑuijODžK,uQ2u# &ɝ>OUQjШ)Vշw.1OXK/CZaJ~x>gP95ί\zS':M{UU2Dڢnצ]"^;GM^CqtMc@uRkןHؗo }|i)ñ94Sfԏ5vo4~ LYVfWԱxLж>̟OU}kd8|Dm,}قmJ+>\2ZᆭL7G:4&U Nzp׉:6&. ~ewQPDG/ G9i*T}{׭^T?|K穖:uӈ;jN6%ߔGOyAuL.SLo~,5S/3KF8,z(G"T+)GloNהep2+pCSF;׸a>\RC Ʈ1?_ |^:_ 3n&S?:j&>iQǵCKs߶D¶EsYVzc#y j8!uhuAn`$~n{U`ӍYg $ o':ZvQ8;䓝mdPՁzdMAg9}{':g\#tY&v@Fn U1VQշw,p }X}炙石fRiqY皿u%εȋΩWA0h眑^̝׍ʿI@;=/,ny޺unӹx_zg.Uhz{Air&u1D?^k{)-S6PgwYжE*K)HJHY;/MSz3M6ӫ!7mj[' @grO`8e=:ulD4>5E8W{ES+AjEY1W@6zMڶe\;Mϟ ^ˡ^ }_e5HעVe N@y3l/5{R6q*|躟m|\:tl$UχhVw}4/<ߦM Eԇh@R] x 1Ӓ>_AfJcXU}kV*,1]+@/m~5@AP+2U]wkdR0i<um Xe?^OC+_T@R:\ś71o{eJ͆~)UA} vun- 1bE+P};~y?Tum+S2APEև%Di[_Jӗ'Wzɼ?7Rc\jjÃf SmCmK ?@uжE(aM 3ܷf_>|L)[un; Qt}HP'U8GRZ_7t8cLdS*_dIiǸllpAK0KӍfQO[hh@kGqD5gf KM~}~)zN7DEx@(>cQkjkҽW<5f}:P|ro~,*Ǹ^w|->/uh=Ԙq= G(m[ I2)o ^sM:/68sf)uo^:馛R3Ԟ8ʪ *5UV@R0Æ xAE)W_}@SOS|L$PϪJgkUi3s| H hP=NH09d7ַً`͇߶;w|2:j @}`Kf,P43ۛ~W!S(֢K0K )"CpY4Z@\rMeGK,1K ”s@9 *SAejNwfˏۓ%с9,] 1e@ؓeR4P8be0?Lmx0V0_)S>ws=@ϭ٦o4 陧 v{Y˱n4Kgvff~f *?6%M}sgMWyjCE LO҂ܲ.8R94:Ť j[PO#2K@yVlff:vqԚj~G`bfuk3 8Lݳޔ("@$E񍍍R3! Ҷ"eZ9j;@~qSZ0tYjvMiA4 O0y饗E[mgLZF>|J0ϵeAEG&ࡇ@$45X3.2˗MiigV"ˢKڧS~^SAEGlރLU[W~ @>&v҂+Mܖ虭֌K!m{vb”P7 Mxd*B25вsUg |z໦3XQkLEHcMiI+Lڦ;єL?PuV9 qʂ̓K n ,R|UIJ}mfiZ@K`fonQ0co)c3̭+7wr~g~fh+wqm@sCnJm߾=x2K }~?_>.O?H3)t< -/yKfǮLo)SxLwaqcz_c˧ KYI4glzۂ/jL~[7&Qt/?&;NSZlJo?}-kz17`GK# WhYs%۱#e$v_yi۔g#gvYAEGfz2ufif)?`P΢]cQpɫN?,-}&z_xY K<~} ȩ՟ ǃj[PkbO36K &coO_f3'~6šFd0xa7)ujFX3~]1b3vVU8Ua?ق6[&oeyI:PnMѶ9/T]wi~4헎i[pj7'}fi Ʈ,U ,jsk 8Д 6:j ]Jw)-2hp[p'HI+ZMR^qD߻NHG> ,ZfҲ_is`g T͏ĵ{`{O r1>CHUuzEea='H/nW۾fV!bEpn5@,QE k4~T=d||<ۯTۃCɞ( Jk}@]ھ笲w; upʳVl}pJ0e0 ^ߥn u>M Ύmu;Pi5=~ϱM! j,oG?O:v^߭T 3mhd9ݨ )N j w& 2z˒=Kޮ (&(H@_gJ}$l`_]gԜ9(ӟ@`^{P$yO5=+ID+ k~R3[`ϒ/_cJt{Ar^߸{OVBSGjj7(`5Hٟ ʈQp4ÿ}Hx>"£ /,mo[.KENWԩdi}eLiu6qMiIt zK2 =2Zn Z8[MPQflYǔ%@y@e:(O@E`7zl)3n3¬*-SĈlmon3 yvz饗R|3ɔ )ئ|f<(1BSZRI}Ng!ۆ Z6 , jڰnBޚMu+Fuh{(#V7f{evӹ5vݍs,6[ƹ'qقgSꁠ"1_8{zr5k?WY\f'vRz,"~ ۶=Mi]hڦnh_zYZ 1Jndj)lx,->$S*Ưվ?gmJi :yAjmE bJVUDa tnd֯҂D=]SzdLa{WL PG@k"տ)UOS%ٲ(kgKMe`Ŗ(it34oua?xД /Z>;]T۔N )"Ltg6b(r4O;@SZLHq35R-6uWmݺ|LviV4]e3?hJūb]RK^oJ :'`AEGl5>+禍{cJqW@&IH]9lS=xWU3bEٞ'ٹS"Y짬6-puJgf^S->>lJ })-D,fVxc9"(!IK*iLUƠ8فf}OY.F\YO'Wɔ̾)ųonvn_]w=g|ѵw),՟- Po@y>0UThؕ(t5֩_8NU6rI`)م0K8&!?W^Lq[0eV'LE3L?+큉U5s, ى^c]h%9ezkƢFar=ᱤ4e(fs_lzO@5T}u)-J"TIYjjpفl;|wۺ8UٿDSZi gO}b)%]_L?m:yf*"*+yӍ~w gLR9/a'~IjÂ_]]6]sEFQ#e3ʄ-FF@_o7K j] *>2=KNU22 d8DUʚd͆T3 \ҜBdo[ƣF!ZeGδlTT%yӑ'd9LV'3?ҔK-{E\ /?B/f.g뗩Zꅠ"#Ad*M1 *LE eRpO@@YnC+LiH\~є m376CG+nw1|}~0om@}TTi?KX7|)UX7 HTS@柘tI e)ꈠ"#d*b"ز yYϫ Ϛ[!haQg2YH*o=0I(1M V5YUOY.Ff~$Dk˷wφO_f0w?͸@ ~I cvpAEGH0jb:pߕ(N ++EcC\άI>Uj /YZ0ϛLiA8Q:29@YL.-h|҃> \<3d^oJ쾯1u3`U&Zkpk/7KݳkUzٶo_9hSAEGH0jb:`_|)-~^S7춶FV}̔7|{kN9SMt8Д[`퓙{j ֘R3eRwc$x2قn^z!m d*2ISĚMo13lNՉ+S2Yh<8=2yӦ,uofLidJLE6YUOMAk Pc-nYxZ a)5MuNR|5y=`dA~ m d*eשb"g]({Pw)uLaeuڳM_gJsn4yIje~T9]eࢂeih/>lk_2s@ \luPliq z5[ђ/_cJز[Y2ܟcJ \0[LiP?@ W%SM3+SQ)džWg ԆRgݟY}#nVM:>U'c5`m2UT![nhyt9:@]=g7ȶ-NY<[LuG[WV74 nj>}CO;$p)4VN3xgX&}פߚ҂0X̭cܠV>wM4:mW?.3O=?.^~9ckljEcK/,|:U;nPZ\Y?!SS8 : 4OqYǎgukeo7KnWfF*1[R|nT}fVHdl@0=]4pl4`M0}܉]Kh2h꠱؂M{UsZlkDӲtj?jc;ey\7Rp+IuRN5go)Ƿ"lh=ܞuY|ࡇ%7ֺUo^wP ӛjѱ6vf)q47K=clװδNw'h ]4 ^߼, $loTd Uf3D=x0sQE\$(}3kcBYFl@[*}ljjPQ _>x\RO;_M O*HNJퟸ2չ*ˀ-p5il^lAZyr0m)-LGӋlS)E,Z|apS74@0\jS^ }g,oF?}Y젟FZOM|mS SWdbqpUR%)w)uOYa0N"s,3՝Isun d:HZ/͓H8e__mJ^A"#ΰi4OMq<++ENFXYMϘyDV07mSVSp> MC4y-ީ~4Uǟd~3o[6h,T'oʞ:=!Yycg?b~c Xh̄j3R'qϓpN*{?5կ'왚K+_tN9Q9ǚ++ ك9|Ҋ9h[ tlWLѾus~NSmݺ- ~)fxD%I>@T@nE}}8ŒH;wozKCޒ vd@<=oGbMz<X SQq?p|x3qakU0ϛK7y 0/1 u~O>W{235u=k/7K 0.:P $PT ҩ~*# .2׍kO{6:]uCEߖ L٫ƮƴlT&,on vO2:`3o"w{Y*΢KgT٠ R! @=>{ QH_wbpeFPǃꅠ"MOT AET AET AETAȤ3ZIENDB`insight/vignettes/figure3d.png0000644000175000017500000010314213502774207016307 0ustar nileshnileshPNG  IHDRAxsRGBgAMA a pHYs+IDATx^ UW p$"W$FP_)PBE (55L,Y_h6/f͚K:\wu2Rw ֘e( UE ={|rn<_)cܸqۇ;m4o…淀&Ng57oxb("A(eH֮]{f \(@<]]]fi8:kZ3gzW6k)Z=pJE=ǎlPo(EXK/x=Pħ,be P媷C,h8APn:z*Y >S:Tҁ"A*ư Qaw =d" -_ApVSz9A]f7~xSNo'Ol3ą9zD E斛0aqF($ǁqH *{l5|oÆ ޸q۷'**g=N SI;Z9a"llp,Ѓ @/dA5D eD80$z9A_~,!7VxV4Ph*(NP5  9s5hU @&\  .dPy\B@0/씽S6K b g _yAC9,! és E-Zd~5ၔ!;v7}}'?M&@E4߬kf ?{*@1EǓlbNPFA? EoHs"ٲeY+Upj ¤NYfUagCHrJk'ƺG;w!E^tϤaAN<3(R+ij-@ױ÷R#Wը?4P*H7o7jPuSv%KR,R!d?{^i@=],ʠ1'(`wu (Rpѩ柩5i$С5۲eT \FPQ (?=u'=P`U t|dN{Ȃ4s}1 Ytߧ^?/z (RCOq:jW֫P%A3gQ (/]3=0\ |=: +Y| @)8u3vXvu4wE9dQPa(tyQ (z5u/iYZjYj{ !#%g͚5:zyҮ "5T1V+KA }c\KV2;g[zu6m0|LPdMA$D(@U-\z (RCoqYn\ rvPOm(+ŚΜ9e+Cw\ jӐBukQAG <_ !,˟ǁ:۱ (RCoq:ʌX`YQaرDQ(LC[Q (] v6^ZVgR иrJT@1ix[~׷fu/>DV+A&s(#5jϨ=Q 1Q (^z]JKk eP {Y/x{s/v~bΝkZ1*P-dVS8AP(}e>ƪ\Q4P( 6D 2qHC*;9 tҨ_]NcHNкdxu/rK?O^ ]_#R`(42A@ue[MaҥKRtA*q:v"S#kؐC,[*WҼòKiNCx{rPgξg{W6VW,ksuV]nYG3Wzk,FFЧN?(o;۷˲ϤIt P?|w琇_|ogOx/~sk#C< COLX]vv7z3 󐮮.4Q4 /6x{/8Gf%Abu6r:_l^o?V\CmfEh5{$mr󽓾uߤ}?~oy+jU=GJo+T4Ms0S=^LX(FBk[ĥmՍ"A%?q' ~hTɲ"ҹR`[;~VqDʃuW/~UA¾uދPa\s؍$u-7FuR){ ēisg̨*qJ2eU~00G 5k{nwo{Wd)ȹ{Kݺc,0?U$A:vjO^::q+xi?o00nX{t\Tt}4u:eQXVzeuf(B7ha7Юn p!AפU2jT\5i?t})ȣ~{$뇴QOCž/4jT(1ࡂ c@%MAI"}{èq$HmUB1s$Ws Ҏ hX 5RcD;B4x{ꂏ܍^Hf+}n;{+ʅѾS~輧qP(rݬI @+/ zg0D7EURuc]R{~;3y44-tqmR%[״;df)}{/lxNJ?țh'=wT 0 ^Yx=s>g! P4!>՜\Q:vv_Ci;\dq(9 KmA϶m HzBIƬYR2- V㣎?@h=BK2]i[T-CGXd3/BdY YX*P4F?~\v_`eQNvШT\dz%+jЩQN9,Q?fi8eB3k;ǿYvmUN 0}i`/e{*s${;[oׁSo;Vzn_CX#46gFӳSV+z$Si"92BÞ[Pa¨PC+eG x vHrqG{ݧgl5zyKy==~UaYMgK3FqIBiuP1$6fxdhnLs4`}lA5>hGEy NU6[qs~$j^CE v͘1ߖV/=l5IcrJaeW*6Asԛ 1J٬,s C>|Q@2w~Wúym<&7!p:Ar> #ovedF1x.ǟ5K_ _$,S>hN3$`y?Ь)ߗuI'v(ݳj?n13>ݵf\4^PP񐬤>*gEhw=7Au~%۬{6mվ师ObIh^mr;=wX<'uZsqA=4 }*vh;kڵZNo[wvn(mq,Zyͯn"~Wk{WtlGrv:OQN~Z/^l+Nˋʅm/cŽ]^Hlej ~rcEmKKϖ-[EÅ-$!S@*<_DGzHK߫mZ2 ZƗ~o~}^i}עx}745WX{ |}t;q_5~z?,G}ocjxn)늷Duvj$N%U':n#.@f6 ИLZ1kEQ&Goڵz 8hۓnU6;b U8ʝ&|Γz $mSt*K{1Kr k/j۸ߋʖw ;u̵^\e++\CKYHCz|{^3KY}}lӖy>guRNU7]뽅'^g)vٔ>l~3zG1kJuؔ2F{o>%6t3@UǗ-[o]y9hz0yg5jF9TRx[AP< ], T`R\ ۽jժGTÎ*dI'%ױ>Q08N1)aJR =5s}ASgMuC4UDT\=uS}~%:;/mѸ_{Z!=UxBy-O|d85l !i;t̒ݢ.u?m9oUӡ|ўv^l1Kv3l09ݽ0 a窑+, {2I^d"UQsԩ񶂠 UУ{K@6k,dN{B T^=|ԠK=^t~]JK'кȣGZtR̋/,مu+e$Ia auasD^/*W^dz!:Νk5T"+P9 ZG o6?sX;ڜn5O*g\Yr ; fzSb&wf #IUӤD.\FICY3APsԩćU:oTфFvnT!NTHIdexwZ)d}qΕo zWF@!F126upuPTZw\}wM.n(r}CQ9>7 ɢ~HOU/¸lw߳4kCzAǚ`AN•;w%(ۇI C20{X`: jH^ YUj9 pI2Tno²,HC iV z؅̙3R\J_4It\_^nUinu%~N22e7vXi]NNa/Ε~S=^@ GDW6FzzfоQ!=#%Ǖ ?2KCOy(ؠkowYj P(.5d7]R2F;,'Ȣ<:UmO#\y\Oe7-E}Yк^jWpT$&y jnt\#%T瘠z \_e"+#~u7|YBQ 8C< oބf-\P5,MW6+@rҵW/h,z:ЬcR'3KP#OUdz2uT*Q,nUse pl>,ГjqU*԰9Â9Ȣ"ru<8WQ:E}p5ޗAIGWכθ'Q`S8u4M"pUPUpg^Oʷ+ȯuWV~RC͵\EWvFr`׾~EC#?+S>kIsNа!uWLn돫hSU<&79A]R{2K {a† @-J\ze㱌;=! {Dß3J,k] eu_^Aia(@TEOO蜠s!>4o xݨwNg^w_D|N8,rڮg6K0":`P!(y@Ӆ(@W^@e}է)*r:vFh+<S77o6Kvi}]..Ey.C.ʱKא+ y=x.]jZ)tsTtTFzH ⺩;vo}.Fu3j*Ԙ],Q h5Qn;  |a :w TLkX:d}ެ]u3˛Mޒk*un󓬮7AGR2?cZA9Nj@mVs۱(h2j3:Uj;W#Q3tv8ܸtImS؍ZbX7Lвr}ybhKQ,wx?w\ԪLP߹>oҍk` *K4uݐ>KX:bQ {H[wIUׁNA5an)2A?rf]tڥf=+#o%CݯEQi/YaǾb:̒ݨ3K:+ uf #A^yj̠ڬX27 SI79AFv洨BUZ,aͩGOWmYnteVj 3|ΧM~TRP*≚M[iW٣Q VAIuEy'C|1LȜzztY+'^=o$}-{^!Jaqp.u^Y瘵VOn߬Oofͮ{fxYꥴCmeJr}\vt-pyͫw ;qa6aV%vo *&ϛ#|mmV\iZJ=h5t}MbUv櫃]u"t.z! }}UT/F#FhCyUbs޹~vFs*;ڙF1)d/w ,ٍl? [am#u**B}TIZbœT=^p" *(V1K1ò2Ծ RC~?,t$Ryl FzNyْ:"͢~uJY𑎆|U~?,Y 0@qQ?䧬\ƘrQ0tٔz;-OHv&WYOX>tg͚E_Us_0?Nk>y`ݘ,U\M1q=9FU)e\/#;9J^z5'hQi{E nNMv"lhP})?e0]0ˤ/u8׷~W9@,{{U~3g9AyAqQ?93Aw6Ks8o ?;t=y!zoij|ynBtc.7zyf}풏ҶVoD#^t* T6md(hsvdv=ۄʼjj3&j3%ԞQ\űMH=GJ/pq{V%0V%6~SBC5ujvĻ}J ka'r+:Wݽ*\C2n4&tt3c?z@2rN~^}ǡOUݧ}g~J;CQ?g&h(2w[Nԛ|DiP(k;S(=f͢}m~0tg/w߽}/WX2P]עr? G,bQkJP;j6-_SI7a\q;_y5]N\T_hr *KbH\1H2D׼RP>䄖3UZn3X~TpD_&E>2QaP}S:W&hYÍBC.<"o7*K4N@TPB6?ATBz~{S(l ,U,}=YS}u֧n}I7a-&=vf͚eZFQh.,}*J\s\,ٹp1R)*NS]s~t ?ʂr"+{UVC|lCF`V 8[xBܡBv贗cf-9~tI{),ys@u Z6j~#u"z)  2\ {v>iڴifn$4 }:*YuVaC\JׄXjYsOSaqi8*'2Uh5Q?u Zp}z ls& F"+;UfRC|dW~UvQo/zo}߿nȯ/LN]e0NPJ?{A>!|*Cepړt=^pMrz׮ʶݞJaSX**5m$d}ϩ篝!q]u@@Y-Yl2j A2~gdzeCVC*|Mk춾YB谓_r63R1;J&NuuuySL5\c~ ʫ}<8J&`+2d=sԮllLI1GDEXP@8w\J_^~ЍU6=c)lH]5 Fׂ+K pd8IqEG22W@J?_83AGpQ>_|}ACbo}躯R^1k.INuSP#^wu7DiQڤ2sԞgOYӺRjU8 q Ei<f1@؃H1t%4|{~* /4KpeV;l\6SԤ:RiUpؑ镾:rP?LPht="y;^0KFFTR ,0KvjN&A@tUk &(Pa5I;F9jٓ ;n=e*$k:a)a0adC?FK7s[J5Gfj4vXcP5~u}Z*rJX@ǰQpTNdzjCXTZ٨E_J+í/o1Kv8,! k/R{YBt!yuYy|o6^;C(:$Uj6mYK*6(uݣkٴQsWZxa-BOY5>.$NdWXA\GRn.IKXOy'ot^!ԓ?/aSX,X ]2Y Ĭ7 ˥F"M)Zwr? 颺<ޞIq2׉UGW:t}ʽ: ~}3דb~N /[*=dfUQ]ԏ۬y~0ts%[טtnݟ񦬻ʬ}f Y^:t\;ܬ-fx}og//_9?xڷf?rH2$;]]g(CyO1n/Ҭ ;nQz.[,)NpM3,P=ӓn~B1PdQ\ }Ӷ1ѹn*U ՛?ma˪6q5%-vX/"ӍUPʑX}QUV*?su,0"=D:@UE]^\et?{HW:{m 8ž/_qY+]tEWōfmמoZ⍽M5? 6pmn̒/"֗<} ?TW/xwW=k7XlawTe-dD.KxسGq=cvvut>ǎwH?GQYw ֘t11fڵ3'FA dl.(I0^x})iOaaCtC 0Qr?1F Րި~zG_W֓( ΰՐ*SSzk_u:YEu]u=dU?wJWe˖DװzF.K}?6fzWc&Pz}uLT74-f@*秼>W2>_((^;;[F?1z8)Xoow>s'N9XW|{i'ntoa'{.o!6"熾-~ui8)ŸޱރΛp'HRWWYN 힞?y7dpWzc>Zc=0Hˣȥ6n J RtL?Azr=QFnmҸO4hsFnf->Q񶂠IU`bp8Q/F*p*Q*fiFwUYޠO*UrIKAFi; q22 7wA0uTژ1cƾږu\ԃtaeWMꈝUYնk˸:<7ϑKK/p:EN\Xehv4^;nOv^~y$)x/,99J+:m!ShQi~Qf] 1Ҽm^dc`3PC*k+9:UiWU[ZzOe,,Vo+z?]Y1{.5kYjH?G.W;z hԣhԋ-.ԴuVZe֒ 1W}dD҃zbeq\Ҹzp}!ʌ2]} TkY״KZܺn*RHY HL(*?_w'^K&>oǴeԱ' kU,:m޸8`讯}KvN2Ҧv8ۧ-YUj[hޟ(sk ,u.toF fѐ74=5tlu MT⨽\%c<)b1 ._E^3ɲbWTqʡe7H@zѩIZA؍꘰LdbP+ y3 9,eGIBW4)U#P/v'CJ? ;{|>u|{T;k TmU*HzedU}\E=UFGm,]<t8Gé}x74ƽz!IF^:Yg@'RfyrT$Qxz߼YceIy~=t^gQDp"k:Q`_B@4ԅk]/iv0эqu:"#_U뇢u*=,<_x/~լT#xk}g':?h{#:D|t7O{nzoz7|dy__2߱;~u~ UT E _6KR ]ez=SߪYvpmGŠr>Ƕoykϥ;MҾR!H?Gu=5zHF7NWG&Mq)mw~朋E?n\@V\7OEZױuo:W7FC}6m(2ݰa|jT.7V纞tS/2_txA>YwM{[A}_ƹiX[)+(uq o |90{m=uo~zw{k\.+21eky=[S{#euؓǾe+:Sgׯt߿_FO?ϬeC {5-4$2ʴOUUq}f<,fg2㶃(.g(xUe1XtN>z1Ј9&MEK#e]E4 hMZT UAm)/*g\J˥Sbê\O W *?_1+ j?_ *p;yqwA|AAP56͇ڃϘҰ~ViFa^mCj# l/|U} X 1s̒]5k˲@%,* ,0KP^a2rӪ (4kxXWCOp_oZis7n\h L@/O>h&!φ4:i<*HX=^oO APpժUfͮ =ՐG> }rJ(:Z ^}Wo1Kvr;\yQ:|svҐwT(Mجq]c{ɒ%fx\Y3޲@6<_h"?_Eųm2ƿ`*c#},ٍzIf)Yw6+_hfNבqsaS% (_XUElPO~جZhY2?_EF&h2;,xq,مeCm(WC>裥 Jh((-[2 ((*44]Ü,^@|Ϸ 1 <5l=V%)#3Ir̜93̡4ET#@1PYomٕg/_nmTIX_ J&|a(@uP2gɮ}۳~6ܘz_iұy?Ьqo6kə8qbca>… PjE (57rC.,wߎ;% DQ/L *$ Lw}ZC'v/uB:sN2eXwfY  i*J=N@;RUo(sh! 8| F ̚]{/퍞~IԑMo y13>ݵf-y5}EQ)*Ǝnw(J=N@%"ԛeʕ~׬j7@ޞuE^ZQ:>t딷z]{ }A? l:Rnph[ԁ\a"hs=׬@' m $ŋD'/tO@wݴ}ڤNqFAH(/ I&t3KDcah]emRs%@R6[Λ>}YsӜeb{\ l$; ̿2yGG@XޱcY$ (I7yuQsٲe:g '@l=T槆1 ;v{c~+D5wM}7ꄓO('Ё9sAP>5GԩS |L2 ~@tɭ <̾ 2;Gu;`-'yޑg{^OOʏ (ƴ @UP)̿P AT APB@P)AT APB@P)AT APB@P)AT APB@P)AT APB@P)AT APB@@~yw1kx饗+9:n67UeCRtʺk~sd|wNgJpdǼ#^;b~ #d]跼۬ I3s?wyOnF?IgI*;~UoZ>ޘ_d֊ ((aN9ԂjSC]3̢=C=_ʀ 6{}Ͻɛ|D=:`M0_N2v}fmHU-7sRÞ{fmHȲA@a)q,{i槭s7x{gbʮ{f]M6_cd9SzY£"+c-<ؽfW~6ۃ}}к,@ؽ_9s3Cި3&@.~fa{|^ ;?{#fmq{oK-h $"!w;^ׯ7kCzo\\j>/{Hh#ɖWr+Oiמ3K=F:GoFEVT}[;@eWg|\t%NYr^#۷{;?\ 5_}ىK/dۛ^~ѬA͖2we6gfa*Tl5W.P4uWlԐf6cZ1'h4u1f (..NPQ&c/n4kCVsa. |'}YKע:EB?emPS=Lm-bg04_ 3df}zxh] (Jemn\_5T٘́,)<2}/S1K jp/|pPf}bsZ-~ޯګJq0'(( fzgnVfm 6[o_]g}KYc?2?E]]~w7zƇZBPT K7#f4OTsVṶqDQvQ$B`;Z; EmӖf>SoϮ3k{ݣgĐ(<2?_ro4DN2k@ʊ=ͻ.0kCJo„ f-_ZC7;Up w 84@e/]1*?淪CD׬:h~ϑ(kCOWReFq_6k{͛8[f-;YfQ}:U/i1 KϮ7R9TˋY~msyvtpW{TlZ:7,y+:جü TC(6tOj: GS:~I^כOL;7yu^ {~ʺ ^}o>l 'ygN|tyW~RX;[Ys'|ػKj7='ɳ5vE/$֘ڼjq^jtiߐfqĂjмً'}O[Q9؂͂ysQúi VGi[qeeo - =WK{]cշOufչo<[֫;oyҾj(O]q\| =ޔm_2k{) Z,8ۦ@*/*7*?QK:V򫺢84֯E7硛ZW˴&kc{nakĎgm77۳ޮb:߆1 ?EVz>9?@d87OU ͒]PyUIC~2ޚ5@}}~@'Wl>PcfmeC,Ե{tw^=߬ /N':]tYWXU+w6 >c׵ ~Ay(ݷ-Ӭ3.])@B{6jȜ*o/n5MgeK=$w>{(lHA8Q=u ʒ-yc_o_'()mD+5;Q?jw@E_}+*ڧ9o5k夠{/5nIt TꠐǴYge!g"ζnQN:5TE ~W鱩+R3p4 p,OsY <(A$8\ۡsOA6^dL)uħ.BeBE4߽>R?؊}{풏. `a\=W~?lvef-֛!],m[~, (Dg?gW2;dfi85DjiT#5F*OQjL :'"ü֩qyH,Hhhm_'n4Q.'(Ldlչ8װ:({󢠍G:Ν{(6?tϪ.ͣ6 gd}I gƶ;HFT~2 %k\7`C}Y=ZW=[]UfHW;^\kAycXUL)לuf),M}_ެGٍFP8p1Y|she`~l\ S ^7_3\mOK,( FfqmGXF{/z'O6kݴ_طZ5ZsA۩4'hJe((vl7{AE-AA$m,ռfq /}oe9uljE*hGMͲ/ T ;-Đ(.+j > ")072?iT&64ݽ_nዼ4<{fn\!o_?`]o~Y24u5d'k_5Gbz9yity>m;^VYw*l<tۧx]:ssi[O煲n_B\4͇:-'cۯMԡeFǢIY}Rʵ>[2g6{/(W+'~ǻ/SVYx=vYspUKRth?w0|==W P}/>]]Kw߯ ]7AYeGKTsmYC\ 2a~H)gґv)0DpDi5 "/rnS@owԯ h ?2 ŕXcmlY.8mblz[#YopQ[UհSK۲b f4{H[&{ZQ6Sq͠ c&T\is/e^hQww!gub%c-o[?5KR~vWv^,K2$5w}h#FAv1~gPLYwzYf l?VpeeaҐ4O`{34KCvA!m?G Y6k )N2AU{@-lnp!,se!aSݤWiNPWPb@`owvgS)v -ɓ'xlq7i)J.lO?`˶I(bٵ3[ǩ(5pLMpQ5{q/bչm63똑qM^pf4hfgvFUYp<, 5C 7MLк9wC3 PgnpP w,8oءuځ $T,}t~\ hkQ9A]d *ST_AY=ienZ¯ҐC6 I +E;4Pj־wz}7|˽Wʮg됶1XWNxؙu^ǖȂoys |ș}}}P} 2dna;m}nQVO'K^>{noV#w^n [fjc6;͔\la׀Ϋ1]kR^,vvJP淖sY\_;杆Ľ(RMvT*AjO3-n6ft4cAif)5mVTvv߽{cxG|boy|51 xOkǒ3A-%4@RjtonyO AP,9ەjE.Qn`:U6OuǛxKܗH>{Pvcrqfi.*>rގ -4/ұNgR/ڶgN{? YfrnƹuQ:؎COW/qhsAQʯY*۹΢NXu Q'J(̦-PlÔGV@,'?x\{>Ro͑q(vQ!c9x06 p];Aน6Q_y%z53mnISA@l~C`iϑƾY.MzLy!o]<;ޢ.y?d_Ϛ%Kٵ zՎPHL33Klބ~0Ic~ꦀl㭩̽w@&Ӳ۬V vĈQ:e$E,ed\n`Z*HLsMIV4jCơaow}AB ǩS{||^@ٵkɨ#q؆m Zv(n&-yNC3kCr Km[V|4r d#XmQ?ƾrd5w2T^^7yJ=Ts#wC6S;-־+?-<"oLAgJrб~k?goRlV#HVe7rc@=nƹX'B&m;ʖv9vYI} ;Zȓu8ܔJQ}) T`:.K/4R4\[" F /aWGՎ+>LYeV~Wf{4DvZ|4d = "CmmU<;yJ򡣈: ĥޔ}fmȂٟlLSvrqfiO- q؂-0,5,gQ:E{[fmMj:цʮ-cЪV$qn۞a)جtC6H~H\[yw%@R(E0'mkSRx w~^4dQ'dֲEٵe9vb?x+`0sҐ2ebYbۧ,PچU-7Ev!ym eN@cfxXL*C%m.zY&j58FN[\ۏ4KC5K *HIfx^/-};6rJ俛bSYg9,; D Z(n@mȦ>5pgf )f XfޒOPv[6ߥUϖF"/l՜ɭǩ]|APL i,X_B"(alA* qAPMe׮L(YZʧ~l|$=LiYMko` N;RXvv3mڐ8w 4G)Grߤ:)YoW!&4K@~uThj FԎlalG6ݨ3v9fU`OR~gk2)=Y6j'>ds9vyԸڐY*_fiHY~ h,򜠶 R$\lA_Rtci& 32D]l.]'?AujI`%+(і5YVYKRyGpU5f|/7YfT>l+!H1.pI6t=l2nf{Es {>\ƞ%b`ke72KCv~[}w/7KCF_4,%'9Av/!{V0Ke='YAP -sچ'lK@CpY.@Vԇ 5x0vqS-r> dpQt%fmHm:}MKl,lfiN48M2Q k&hA 1I3[cǿ.pڔZvڎ=~}Y3ԩa[&ʑsI:Q9~M!gL? ?GH5ؑr@)v4۾M̌N5֭[7zֳtN9p̹f,E]_{6\rM !ڟeìSZIki 圠ulPڿtNPBvCi(+Pi 7m3O, 7/%w[9Am$6|Y ?7eUfm圲뤠mEy[f-ΩΕ-ص/q$Qo9s3ʞ s~A53k(ኛ:+,$ZvYнG0=:OY16?dt=l:}^s׮5Kn ){T͑,eg[;5+xy=oܚB 1ڬ [~gm>oܺ}fX(9ATǎ՚պ>&a]4KCly  Ѐim`O z)hd7"{QU̇fj$ j\.;ͧ, Q}aqUKAFtnŧ\f \~V69 2RTLPϹ#u9GWfbd'=70mʑY;)zu/s8' u}׶v,s|QFŲ)5ku+;}/C]t|kc7Q1lq4YmfE^L͏xc~}AfEɦehk@Bà g(5S`Oe/#ӁFt~L{}pUf)ʮcTם|.pf6ѼݓΈՑ6і{g ܽnVu^k2@.Q#`#KvI֨8WW/dwQpb-س弥aT(v?cZB z5Rj`֐}7J9\e%jyc pw΃s [oyCkNpQʎҶ4oO#־+fMAC={*,(# 5WVg'IA(T^T7[s$vL#x2KZ52w7)P]]l#-Vi[ztzlGmaAN7yQ8}41mYA jXnӽ[o7kS1)~GJo„H3۾kXf-:r'z84길5tڹ5o`ޠlc{;Nobq)WYjOF7(`r/6LS?jWZ(}g//jf۞F v}[ zY+l,n|뻅y3-H _eE417y̫X %A԰ j^5*;@٠jD.Ҳ-`"lѬ9Pv#Q0Aۡ+8ڳ Ysc >̙q𷕻z\אʍ]HyujcAA;w۶-dIFKٍH:ږ}iѩۥ~>*ڦR~=|] mZu "6U"I=l:Qݰw4_U 栍M (!QCyQ@Q9jMϜf)JQv)CۂIimӾZĜqTܠ!q?jSM"u  U , R6{S`-~g4'h~fo-So~NsЛ-()ZA~kǼQ`g}NP 対_K} {(sl6?M[%nN]2/1lk:v: kɋZ}Qi{T[e)޹, Qmŵ sN4&k͹:u($E϶yͫX 8_[5|nŝ+ѥw-&(/ߺg_Цzf_RvQ2d6R+RlwNPp^g+죂8{~Pm57k{)0wvTUcWWw (cN^b{_}oTZ?aNntWW-9R=bx|>?~ࠌ~aؾ}E.|Eck'@c.|zt l7Qrt.G?bGTndh~󿭴c}޳|;yG6͓;wyox|}n =u2='ꬃ7m>rkmgx{~'{br;mz/P9A@!}{F}_-z6Q=kB M8TA4:7:G_?Oy oh{ u>Pe=83]ZV3_}M<̲ܷ~= F&v[30CejrA' Mgs(s[]:_Tz~<6om>e/=o,W6ϵ祉g]h8Y?ά Ir>TJ/u\em~XmE;r7a$ܓ*yA5h=UR[3ϙ:s7JlspǷSuVO]lֆh1 APP,}}5O|%Lo͏rQQFYB*6Ds&9bk~ˌsdW¹Nǣ{nmsx?p1y : % :- oZUꔟ/Uf1PI!`S|#"p: 2jF FE4eVڦ@ ((}6(^!=KKPT`/?!^wL)ӢyZږ֬APP,==|-` հʋ>70{m^Y瘟L նAPP,}}fa82AMn<&? .ELs2,(Ep6Rrגw?@E1@nO/vYˎR( y\5~˻^” һ;[<3SoHSpu:@oW{3bʡ/'W_M0(w/:~?{Y秽'Wˀ ( a5MKZv}7S:֬H5]-ف OKNuʧ~z t9_^PJ zY~_Go5EU(kOU\ti90.F4ͩZ+ `NPT˼ 3R ʄto4`e@6KLA[oO-*zoe-*AP@p]mQQ[O?2(H!RuN0'(uVkֲ3zũf@b aMx?*#΁I UBמ67зٟb  Is*e* (J! R* ( I0{`IENDB`insight/vignettes/export.Rmd0000644000175000017500000001325314135533135016055 0ustar nileshnilesh--- title: "Exporting tables with captions and footers" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{Exporting tables with captions and footers} %\VignetteEncoding{UTF-8} %\VignetteEngine{knitr::rmarkdown} editor_options: chunk_output_type: console --- ```{r, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>", warning = FALSE, message = FALSE ) ``` ## Exporting data frames into nice tables The `export_table()` functions creates nicely formatted tables in text, markdown or HTML format. You can add (coloured) captions or footer lines to the table as well, and you can even create multiple tables from a *list* of data frames. This vignette shows some examples how to do this (focusing on text output). Note that `export_table()` returns a *formatted string*, which prints nicely (which essentially just uses `cat()`). ***Note:*** The vignettes includes example with coloured text output. The coloured text is not rendered in this vignette. Rather, try out these examples and look at the results in your console! ```{r} library(insight) x <- iris[1:3, c(1, 2, 5)] # the table as "readable" output export_table(x) # see the underlying string unclass(export_table(x)) ``` ## Adding (coloured) titles A title can be added by either using the `caption` argument, or by adding a string as `table_caption` attribute. ```{r} # a simple caption export_table(x, caption = "Title") # we use a new object, so "x" has no attributes yet out <- x attr(out, "table_caption") <- "Another title" export_table(out) ``` `caption` can also be character vector of length 2, with the first element being the caption, and the second being the name of a colour (see `?print_colour` for available options). This is helpful for printing coloured table captions. ```{r} # A red caption export_table(x, caption = c("# Title", "red")) # same for attribute out <- x attr(out, "table_caption") <- c("*A green title*", "green") export_table(out) ``` Since the coloured text is not rendered, we provide a screenshot as example here: ```{r echo=FALSE, out.width="50%"} knitr::include_graphics("export_table.png", dpi = 72) ``` ## Adding (coloured) table footers Use the `footer` argument to add a footer line to the table. It is also possible to add a string as `table_footer` attribute. ```{r} # colored caption, simple footer export_table( x, caption = c("# Title", "red"), footer = "Footer line" ) # as attribute out <- x attr(out, "table_caption") <- c("*A green title*", "green") attr(out, "table_footer") <- "A simple footer" export_table(out) ``` Coloured footers can be added in the same way as for captions. ```{r} # colored caption and footer export_table( x, caption = c("# Title", "red"), footer = c("Footer line in blue", "blue") ) # as attribute out <- x attr(out, "table_caption") <- c("*A green title*", "green") attr(out, "table_footer") <- c("Footer line in blue", "blue") export_table(out) ``` ## Adding subtitles Subtitles can be added using the `subtitle` argument, or the `table_subtitle` attribute. Note that you must take care of adding new-line characters. ```{r} # colored caption, subtitle and footer export_table( x, caption = c("# Title", "red"), subtitle = c("\n A subtitle in yellow", "yellow"), footer = c("Footer line in blue", "blue") ) # as attribute out <- x attr(out, "table_caption") <- c("*A green title*", "green") attr(out, "table_subtitle") <- c("\nA yellow subtitle", "yellow") attr(out, "table_footer") <- c("Footer line in blue", "blue") export_table(out) ``` ## Exporting multiple data frames into multiple tables Multiple data frames saved in a `list()` can be used to create multiple tables at once. ```{r} x <- list( data.frame(iris[1:3, c(1, 2, 5)]), data.frame(iris[51:53, c(1, 3, 5)]), data.frame(iris[111:113, c(1, 4, 5)]) ) # three different tables export_table(x) ``` ## Adding table captions and footers For multiple tables, it is also possible to add a caption for each table. Simply use a `list()` of strings for the `caption` argument, or add a `table_caption` attribute. to *each* data frame in the list. ```{r} # one caption for each table export_table(x, caption = list("Table 1", "Table 2", "Table 3")) # add attribute to *each* data frame out <- x for (i in 1:length(out)) { attr(out[[i]], "table_caption") <- paste("Table", i) } export_table(out) ``` In the same way you can add multiple footers. Note, however, that you have to take care about adding new-line characters. ```{r} # add captions and footers for each table export_table( x, caption = list("Table 1", "Table 2", "Table 3"), footer = list("Footer 1\n\n", "Footer 2\n\n", "Footer 3\n\n") ) out <- x for (i in 1:length(out)) { attr(out[[i]], "table_caption") <- paste("Table", i) attr(out[[i]], "table_footer") <- paste("Footer", i, "\n\n") } export_table(out) ``` Finally, you can even add multiple footer lines to each table, giving each a different color. In this case, each list element has to be a character vector of length 2 (the first element being the caption, and the second being the name of a colour). ```{r} # Colored table captions and multiple footers per table export_table( x, caption = list(c("Red Table 1", "red"), c("Blue Table 2", "bue"), c("Green Table 3", "green")), footer = list( list(c("Footer line 1\n", "green"), c("Second line\n\n", "red")), list(c("Footer line A\n", "blue"), c("Second line\n\n", "green")), list(c("Footer line I\n", "yellow"), c("Second line\n\n", "blue")) ) ) ``` insight/vignettes/figure3a.png0000644000175000017500000010352413502774207016310 0ustar nileshnileshPNG  IHDRYͨ sRGBgAMA a pHYs+IDATx^ Uՙ/UUTQ FШ81h0pĨ $>1iNn4>\qj1-(H4p,G0TEU}jm9k=>.s:{wku""""""""""""""^$""""""""""""""d%""""""""""""" AV"""""""""""""d%""""""""""""" AV"""""""""""""d%""""""""""""" AV"""""""""""""d%""""""""""""" AV"""""""""""""d%""""""""""""" AV"2K.U%"""""hxAD-[5k֨%""qY3fS' "c~ԨQbܸq|(lod%"*Aǂ dyʕ!""""xADdI&۷2|;w,Q>ގu2QA`6lؠ~mʔ)bɒ%jȌD!;eРA1cƈYf" nذa2*8DT9]CXz;vZ""""""} Nz 2fϞM0AXB-mҎs`"Ix|< """"" 8yÀT(8dA# rG AIDDvK; +՜5k'M𴬩'""""" pii?QvЎ3JD5 W°DDDDDDD& J3Y)s̑/7jlj(;㜓WZ% "./G?ΑDDDDDD~(5СC'+vAVN@:2C]f̘!֭[' $ZZZbF75x4F;u 1 / +ë́ Ċ+=?NvAV &N('|:mۦ ^cbO_-Ȋy1QhM4gcפIT7LLȁ0BqČ3_ld%Bj09  Ve&۳OQ][N| R-UIDa&+iT [(K19V+W7 WV8d=~$(a›4"*W^lj`\g7|_46_W5MH%f̶E%ڀ tQMi¨&LPK0ܤ;(I[V8d=~$ϴtSpBU15Q бkڅfQdk[jIeԦN#F/?K/Y`F]۪83_e1Pږ,YJz"'&xK8DdKX/^JDDwyh]rMxuMMDDyu+0=^Ɵ=2uяTxA=fRy XU,hMy$ю3J$3\<Λ7O :T>iNDWr>W=\+Dk;L0DiNCDhq|i55(f ֨v0j/LVʂ(gV%ȿ)S:? Yz|$y}nO}̙DD?Z3EOߘ9l")2Z>/}^$=N"d6tZDTL 3# hiS`zt@6Վ#5N Qy]s$"ʣW^.,_)YxLުDDE hwUUsƩva7>Dv`&k1!xQFaÆsʗMMJհ/,bJgdž.yyhW\խ/|=Ts嗜CE`ㅗ%߮S%b=p>R1YFvh#:p@1cƌl;vͲeTYՎ/\Pcdž.y}lMꦧVlhs{Ք9Y%֥W7"21lpJzÔ =ɚHEPN7N4>Jv0 IHDq WW; +=4z—4^Zbԩv 8q/UY^Xt7'V؇XG+/9Vl^yS#G/"r~}C,LVVE/h_-o"N!z2_ix_tA=1* ;cz9zU$𷸿Zc- z ׇsTdj`VZJDsbCd](o.{=Px+#( ғ}a. |7@'2'h`ہm5kM37iVa+<&9Nݘ1͛Ւ}Cy͟?_-cUbFlg/>O-%wST/P Z"WgVKzH9&v~L4JxΏ>,ZʪA@떹Z OmSK=>ZP]ۇST|^2_^uΜ9j^0 Q{>? ߿+?,Ytܣ« 6 {"ùpQFΒ=EQOqQI;m#w, :$a]` ]y׺EB=p/&L+VPKYҢJ]w| *84Fl8 x {OVؿA@Q~c˓:κE~q In1cƨ] pﰄUs[ϨRcr-۳\=wR$j:\#FP% 4H'Ag1equľ=ӧ8:ν?7mP|l7):8ǝЬms=Zi p$?|#=M;ۆB%$ı~LSrHu Yw$h WI{ 7q{A<9 Xg+|n7 D`78I$8磍Bpdgjk{AlZJ3 JDCy]jI}sb'R47g557|_-YKM9Ye p>`}n5S70.ۇDyGzQuLfܷ :~}IS3aLFҙN]Ys 1=d6Z"lӇDxZssGIzn;WE?v1BHZ++ +4vuV *M 8 *] I&R<WgxN5g`3 dIl_ZZmYv*<9NV% g{K?|+ۖ6 #sBD ,b=\z_s1+M^ 3b}Oami6;h&]!Qy(&խo*LViA =daq)n/ 18&ض>(mRvp>%ߏqiBy <S znڴINB J؍?^snf͛WuH =V\Ts9ia4x9Ay%0?(1%#Gj߻u{_uv#ZxyR8~c7Am޼O>dbRlM"ðM3TKvᜬh$y81\9탗>JŵVN}F!/|0=nܧ 7w)L'bۇs7uc 'l C_\yymUm7yp.*ོ3~WVtJ4jA#dR.G 'MJzxz閸47m;8c1JY<I)E9NV%1g.*Ma A/dW%U{;U>Lֺ YDTˎsuM&-d7&Jѱ}Ffti&m p? TGաD/oULy^I~0 5N=EO2iK%ol=FQ%ݎ[;\0ҝ〆̫b3\ܡ5? 42SoQ@ |1oG5|&mĠjZCϔUKG^.6xSk߹A)U( T˖gӱy^tc^T!^zyG+WTKݐe㸏niiQ%Z8>(Cݼ:LlWy3JQpV?C 鬙3gRtXwrԋ\yHq^\q3+|1oCdHO G1dp\0.:r*d`ߺT8 O]Lk{h1TIoϭ7 l1l\6㜬bZ|.>6Ӽۇl[Njy˭'~ ڵkUI/7ӹlV6uxJvx=X)Γ +^A[yLx} x T4:8ɓ݇YK;޴p]\e*E5S%r !/#@;`w~LzM[u :<bJږpY✬bZ|v\b Wf3Iktz=]έ'gzZܳau)H-=EOѣUZmjECtyRyX3#ʉLLðhLAM$fkqq1m]g7#&=X-U[T)L6AhvW;ZRֶ1kFљk: [㕗Կd! Sw#Hws9YL5;i0M>T C,%DIa"_k.8N:zзLԡ)s(O֫JTxeRvn ?\pL8mݺUƌJz4ʸ^7Û+prnl-͖)u뿒CFqsΠUtv_uhFjsukڅb+"g-Ye!^J]*a.¼8'f7fl-69ǰml)yl`NOu+hp٭'xf̘@GQwIC}ĈgKzS 2DP,vJ/pq<1דI.b6nܨJx I8 s9GgQ 75t.fL9 U[zU 4~bUBgzRxp]WCO=Z"dzT Nf2>MWwm9Ysڿ"pcjp{;(e;sj)wM{MÈDӒyHH>W/ 3eS sW^5*7A'j'eǏ("-+HYq> DS7{l1w\իرcՒdÆ SK=~z K':A]]*%Mt͛Ւ}Ƞ?Z@`ԫ쫤RMC5<z/I3_Qk#6|_oa?yj+:[q%jʡSƪI{t"Myd%؟v||0xd?<$PWKyi~?FȏZk!0sKK{黫v4}ȯ"_xCA 8Wsa&QI3*9fŊj)^yIľ#{y}&ny}n!چŶAIx jNV'zCx өYڮ$ɦ4)yb᧫R5P^vwX4Tq-C'W '*^$_yim]6Yc\ ASEݘ d%/J r`9*aqd2JEW ?~PzaT(dst12^vnӦM,FUX[gfar8\pyQ ?\pVs[NPWsUB)$}dgY'\֛[qb7~Jy=G^v*a^:d6ϹSj{x|[wRѶT 63aDӌ+ AV?\oJ_!sa as<,_E} >Lp, [u"0x/ST#$Bҝ/Yh͚5{߯xiĉ2u/Sd.1뼍zi5 Yɚ$.D.H3dIMltn۲`cіߨR0Z}}T V:\E֒B[tcn_9S[9d"kYEsR% ]J9YA8,ϞD) 7@Q" l3 $1TASNZ:y/ 46nQe?n]:n89bO~`$ +7Q6 ?\plB0+$*ŭ G/4tnyON:^|PύjU+/ɹXݼ{| lknVK0Z 5pއߡ/omcd%"f>@M^Ip?z3e)Tm{uZg;A%r|@ # ch߸ êo%O0k^qNV}na~p7|OJ{?弟5kؼysJp:'9]I/suD5ԤYAaڒUx}yƢ?~Z:FIz~̺R{U0"xmSR_"I?My4y1 1,j*SK=!&{U}xDay񚮾N(?JffqNd-g".0}QxEn]Ԓ2{6F-QlA$uO\g#ָEg%(C}[,8>4cՇZSժӄm(^`n ?\pV_t^ n^ "fYa1k]^C_R)Js}/N:Une-'Vr]wymS8'4s(g[}?.3_|QtΟ?_l۶M#CVt^"˕5GF)3`c0m/z$p߶)R,6ȐGd~zzjHY1_vg5'+*Iu >\ËxyfW&[S:YaG}ͮC.:G=qr8MM>`!7T%0sګsGD)rEG[j;«yx!2Ё+~FɘC3_+e#~Z xotc.@y;7nT%=S*wijXw.;1940_2(f<3eqZj/QTC#ھ9j;-%sDi*E_:;NkG8tp i?ywxϹ~yd-0Ac풟sWA1 /vg5'WEN:ȚPFImS4trlj15~ǥא+Y0Tpmj' /[x /0?{nV-UC7ߩZoQsڋA}vzL-Uc>|#;EnƱFlr0Q'<')lo/VFцԤny}q61 'vg5'לIAndfMYiS4A-T[ƯUIVRiCǫQm!3u~O7 d(v)jٻITV;!B_Rѫz|"uL(J 3 kiPqd?d m̙3Rt Eb(h ?\pVsz1 yw+dMI3f*|S}HɇJռ 6tY!` 4&UX;a9'DGQXIT/ ~IChjݪUTPaI?vTbX+^q'Lʪ<ɠ׼yTIL!Z?FQю~dEkظxIbѫ17 dT֭[U)^In.uz5p(1Z4d[vzTv ;ujZԋDQǨ.Yvv.:}8'#U,}`5ߌapDKm@+N5)B=Oa,dfcaH;;;~/C+>Fg@w6lmLW"SVIxq nVK]0Lp2+s0R AmqY2# ( gj/ɚ" dV/=bƠ]TP~;wZCv>2CC?#9v+A?17(ǓjknWpxX f5_F~-NJ`P2֭[JՊIPw*UC[5NA8:844g7OBяQi.89Y`"/7M8/klWfn朘:uja/6Wָ׹aCzjٽmFMٰDEӾyѺb ^QK䰠Yvj/ɚ _CW탟|rÉgy } IߛzЇJaگ觴(in3l XW{ ?dܺu*E'`vٸq*e֏QPi.89Y^ OExW'OV%=T਍?'t5D8"dɫ6=h hM kL9 U t3OW%kmAyu>O6g.'d((J^w_u14"Xssڡy wF ItCѫ/sQ%J흸ygI=$f;Qb2uq%Բ"c~= /Wm?榑*(jv;^Ⴓ1|㉊JeL||1̚5˸Ay/|}͹q’%KTIhذa (#S6+^36m_&!y+p*Ҁ~*jƞ[o78Ҟ͙ ~ÐڄsڋÁ";Jr ~%;qNV;([0u (A8;=qg!")^dM׾y>''Ω8ilaLa"{} u??}ϲxˢ/pYXx*!&x dhƕ~"rӃ}\e'Wma>q+qrWk4AoѨo?. ٲ9LP,X"AF) Z \`ҎW^RK㜬j=|O/z7E(>Eidoߞ)/ZP8ùy s-/V%iӦR-GǽlC1A>JlDoܹA[ cWx]g*xuV^-Ǝ̜Ւ7[#dtp"Cи]c{1ONn`ƉAB\Dy\tEO\`oq&*\/=};֮]+Ԡxxy= WРcH ԩVO:.h LERCROV?3sQ᡽i>x,r.,3>1 ӇE?SK4m1't|c~I4 !`\߸' CW.sڇA]0kC= (j6"[y[vqc9=+;QvJ¹_{i&ٯOWuuu0:%o^\p! @ nl~l}ԭ(E kN>=0}gQGnq(_Gl}ǵ2v}b Lq4QY݂A+o\_7~Or8)\$Yٱ߃s c48^ۄk./!;@qLYq Չ!FglS䯲rs2?>`cSFn2xt$N55 (E}͛7K[5Z_/8/_0Dֻ io6^QnئŧH㦸H%lѣ1~}E-cq^ud`xqp^#d;^QylĊT07IA$x '69'sKiC`hݞIT$]'dqjɟ&Qq!`<4 `XQlNE7>³mHȺ;`\!i ~DTLyeڤ>?1x=`з}2y˪?V^~-MNPʦ S`F8AV|p ?[d(dygsKKqcԤIT)Z?F&YY`K2x '>N d|N%V7x,fD81dLlvΠUɟLg|`~C iT)>M3g Q=^KSRҁES{G#L￿*%'E&! M> ~0zr#,/?ӹq guk$ŸQU/o9}\x/Vt~ab y|}Jv跎 `Vstۖws,ouۆ6!M#+}E9$܄ृ¹=r"ĹNAR yڵ2낧Wm?^l_mgLt\8Xׅ :fS( ۈ!׭[g<.Kl/斵mĉ -˓-;_zSl:c1V{?d&+70^s+/:F-E-e!JZ(.^9R4NjI/h]iK%I/g|g;娒 ۊcN?_q>bU3VWWJ=~zBW^9$i7">)ˑm*3~p[ȘE|c6-… 1EZ`/҈14J8diIAG/'p^y;c K'\MRO#&֏]-]YiNQ(eZV"| ߖZ-ȊNʡ )?ClZ0,n={;wZrl5EE/Oo߶g*1g }OMF5i2UgɉW)(.E 3YkðaÌfZ]^Idb<emxsOaDx)g|gkt*Qp_"_4Pq4c\:摉+xXa̙DD_6 QM^!4̀M*U⨳hjRKDDDDD7?<_t~Q`?M6R2:~1e;tдDD`k; + 4˗/WKzxYzgĚ^PKվwyDDDDDDI{Vp0$봠4Ix5 ~;T(lnd% 1t X`Zϕ/ܫJՐʧ}6_ي0G?Yq >\5c c<S6[v*a]EDW Qťד-hmƹXJDDDD<_ٌ0u:'=Doe˖WkMb͛7'imΜ9DD?yhd%FGMC{~_jGL퓗DDDDDy "6>nrÆ Mb G֓3'L V<}tU""ʗ Q<8wʔ)V d}!jc?y>tZ"""""6 սfΜBG4eŴL`YfnX"|X*/x]g*, =P~qzɒ%j);+Y/fl\`"Y""""j#KnБİz iPFlgQ5>2ގ3JD8$ O< ܄,ÞUX?v1rajl`a<:.\k.jdΟ?_->.ql-pL]:!g nbÇŲ7AUӼxrjlQ%Gx 2.#F woT[ZZƍeG8Vf=iX/pym6{ !ꉨ(lnd%"[ Xu0JDDDDDDD~!(j@N})hV𚈈W?(##>l~ĊSg|C͆9:k4k׮UjӦMS%""J3Y2b7%Vmۦ(i eE.SKzC,~CDDDDDDDҥKٳS ":sLQMDD +>~޶C-uA`u!Nj'Fs>{*u2'\-V=~YWKi5U= Sn!Kh^鵽m켞yyb_҇+V j(>cJ†7 JGLbte%Bchq "so|N-uAϽ Ԥ~{nV-u}͢׸/%""""[׺YWu׻bw{8}sn&  q{ŝT-uc?p±sO+eAiP?i^߃ּ|F?؏WgVKݚo Ǩ#(/Ҫlc⃇r]uuPQbU=ޤ|&"""JZy&\'r|iQ |h'Z"7xBSC1jbK=J:ҦߙUߏO%=DA*SY`;cСCoAPL78A4vZ"7;9-&Qws6W;DpnsDDDD  |s~:u9~ק!])j UXξxX s2\~"WZ*v^p+h[tk"dͺ*6(>nֺf\V9G+R(^ ow㞺Nظ t0YQ3YM邇TMw-_ˇ*!ؙ:%ίJr\5\gkm0RWjuJDDᰍ!ʪ aT_g!2""""ۤ 9t8Ĝ^(fs/ʀ̣Sh9:eLV"09Kg7O#~[G_3AO-/' Ζ-[x\-՗AÈRONG~oXLRkpN:U4|\5Y]a{_y)Z%~!WxȺ_kO쏆Ɣ+9 zϖJLZtKŽ3ϐsU:Q1J^tAV ;xԷR2>~A,#1 ݈a!s~ۙٯ. [y3,\ɫT1C `IѾK  ^%aؓR{azVy܃9pq~/~aqSg#^hc&s>{aD@{%@e{Uq4qMbϫ.KN*0w Ҭcs@-ў[N:Kb״ R0}xD :T-ESCFzr ycE 4MH4^x*(AV:F=Xy׉^RK jIAI-E=/./ؾ`jv}Ψl1&m--ՋUN/dw~̪`{,JǬ_2:Psp\u]kyzeaza ԕաw,ֶ#IM}"""" 'tW  JiCVYFݰfD(>, n"C6}*uʽ5aDx#1P_@P;>5MJݧVnQQ-p$!TGk9U)tic˺wN +;߫,МdqNO2 f=Bf[G̣'r`{mgP1sºm/}k{M9-0'Hܓb6BaK1w&> ̢sbWe=vG'JV \{V]'@-thiyd+10( rVGZ݂?XS#hӦMb~YfU[Ös t sЮ>-,$JyhkZr`Xe( ͭ 2cm ˪3 of=MCCCVmuRɪ;FJ׀Mhy? 9sn&1B]xmZꂹ#˃;/C^!P ; c:Ւ7Ygub? e[-CVwmhc*3O.m~KDSk{Sto&t !硫:Iw ?:6= 𯶝xoOgAv^n2nKr NJui:CY@ߪ>?DDDD $!3CBKrNJ_~,c[ Ln>=uR8κ-uNVl_l`r8v8^y>-xU7QWa #OgXpγχZzٯ<(E5lwkw$X+?nHzXQgr.Xq>SCU+N>yT Op~wuCph my]~aWP&L[{Ɏy|y7new9T󨛳D cW{ago9w_ j49fkͺ)rxJO7zOmx{ p~7|ՕmEe?BSo]yuž볠m|{gt{uQIDDDD6p$kW TW$MAϤTΗXOg=n2dT ]\2qC2'q b~; ,9;2ۮR9%deq%r.a 8XQ/OG뀺^ 5p.z %O]A08kb˹p{@%otYeiҵYmo3Lg,/e N:Uvj{me/tT ֢ObuQj~ȶF} pge=wwSK]@@>C=V+!H "HF (K* [ޞVJJ`Ehqg~ #p ܶ]t9o _k>^z7CR

7Q}=wN:7|_#QйcUi9C}71J:ё `^O1w߲ yI*!;] ~ MWݙ^#/"cI'Jmt7⍞KMM(y&CCuu"""" +uPvj3Yj2Q+. M@pS, ѯ.øHaȰs lSȨ '7+a֨7BԙSy\yLDJ'MuWs];?>]tHd""""`/UjݱYV#C 2oPRbJ.3mPFR7nYiCvT Ɩ63%6K]&Ř5jqe]4^x*U1e:k*0U3.` ^U2kJݚ>VMK`h[] a{&AL0u / }M3y]?LV"""Pd%M5lшeFM8X;$<Ȃ.Sˁ!^7y|qFve{{nV8 3[3PIODDDTkd%3S'72Y] ~QIAy7HRuzWAV}Ȓq ![q<+!l(W]VnCӐ>R8sTʺz.isN_, g!Xcxp˒ #l[ aGRrNVB{hlt*%WU[É#n 1[ԭS4me}}?uNdv^+ؖ9w%=X.˩o'O$"""U KͺCn!c[o~rca䮥Wu8d- .eQR].+Qpe +*lkSg}$ÞG;8I:Gt퐯i\((=M1Ηhs9@Oi͍pfY8T%}:Ŏ?%ԫu}ꝺ!0v&X OIIS:o/'p3W$᪻&nWss;JDDDT$ Y&MlN]i\=CuxÔ`MsC11- .wPR]  ,/?y 4#xd+"S1nQ#]o;uŒ6F':cTU)?ta> W@-À1Η`N!,!- tgL;oncTwaM7_@CǮ:;2 ^Q`PD-佐$(A,Jw gp=]֘hlpnj, +3Ym K X=hoJ5 a>%/#C,ClۘrM3⑷Q t䢜Gұ{}lic :cej梍',3\<g$uqN {As 1wROs"9qptl/MS/|_[Fu%Ŗ-!_XrD&x +53Yo&J[7Yr4'""" AV Ev ĦBlVs:u ;5tHQdy7}drVvS)ya x6}xbubũ׋)Gڸ 6Ti]V L]Czh6&Dn5⦆JaԓP;6Kdxio[ &̳gC\{-wbv.mn[uƐ.x\%BYf|aΏ3.k[ {ᇋt &Uܮ]N,(e? Qwnc4(д>D5WwO?>5K[_|A]Z NޑqPmdDDDDd֪ uKzNV] %ֱ 7qI =*9 Cа`aY+D9t s*Ռӭg\J61qXy. Hq>e;U[CIf]`ѐORO6 )#0Pm.ٷ_*]VY< QwlciQpJ.I=N}v^{wmyg@&ڵyU͕JDDDDdf$:xTZ7]& Ʊne eF9*:}A59b-JuTlݹtngUl';Yљ[ oY)j\$dJWu/,o=GּsɊa0/nianHp z SZ3هܴYenI&@nWK&kRP={/ԓpn:JDCU]s栍1DDDDDA1JfNzٙoA.5uy\̰WzqMެS UX!RKu4yGcR%3]$ð֥Y2ݫ S:c$qL@gt|N{Ŗmo8߻|i#ys;6i.` ۼI^c{UJOnm'h#-}?]ZaCT[=۟*uK:h|u@r'rq7Q} Q-aJۺN,n]-c,K؇GtPrCa.]߼?c0H3-ǴԆ5ÞG*uC@oEP²7R`t3}U 'osV\gJl=PwtAR,U:yZvQgSB-|a5 ̭8:KۮbD]TɿPaчTY'3^W-+kh]Q} |SgR0n3mNjuĶsܜ^JnYJ*յJ-tӝ dsϰs[ga+;rLU?Z}j Tj?R8Y=8k jm-UK}MqToi+Yq\?H> 㫿H>}_B"0ئKY-7Uި-}_7}Z """0d%Ekp*`@" 9YcSvCln?COQnȬT3O^g\izX`7n`HѠбp+? B}F1jwB0L]:ڱN36-VK޶)&>}Zi&Jݰjn}6\nkb˹CL8xTU{axm|r&r6j&5@m;'+3Ѕ~p=n!!mc &]g/tǧo~ҩj[ja g i zc@+sۢ{Rt!yy<{zj[k5RCgv=JǯRQӇ%Y+ך :tqxeDDDDDa1J|IjZ[sAbuA q%Ӹ[z̕e_Z7iJ+3AI+N6{jq ޛ2Pfݤ nYU{lM@(Wgm]tY^?bZ`=F=;S>OJ>XXSg[Υqջ]P1r;*QZd8mIӷ5Y|6'u¿SٚEJ7WJ:^yIt}4JU Gfju;w _﫯ho}_Z'v^pܧAw)dBT -ܧ]6Y3]1!T<}ݱ^AlY]UK0D-Ce"uR:o+b> bT.humΉ?KTH_?{j>ͬ nNE-[/zٮ4CAUjhxw│~9QQ߃UzcK (z[@{O;jlY%M=W_>}NɰtuNEly=~am~\-u?j{XV~.[7qoo{w߿W'\`y[rx[?ۿWxcܫpg/ZTw 3X?{繪ȁsb?u[ݻw{_oq\J8ywR0ؗjݧ}<Գm^!i}!(;s1Vՙ2YKn޵uEva[hp_5O-QK]z?K+ A?η2Kasa jy@x) [:dU=j-Į]jK׾Jf,sdzD{=4,JQɃ :>o~[#Bb]tGR}SGf{uO+s:F7k8?C_C.}2糝:V*h+/=~u߬{yuJ6fyn;r^CBe 1Vՙ YK&nSG;%u6fq/*dʴ922h 45~J-Șkw]pn՜~lovMg]JmkPC>"ۮ57OLX.EVڏmmCX C"#2.4߽H-@Uo~][' m"̱r[{Xͷ<6E9MpJضۖY foJ{CV:5"mpֱ%""""YFĵc0J۞|wryk` 4}1kY3`jCVv DuQgP 7/J" HV('WGX 1Vՙ YK&X/]xݪ-t0^rU X='ku5@eNV>=vAt덗^.̫ž|Exy204/D䯪jMݵ͕A*5bp^>=4^xZ Ʃ7a>AD&{}Q:qlman=ܻW6ܤJݐr m?QV8\0yA;u:' \b7ֶstOdsYwtzwCSbȑ/<(J,XYlxG' 8G/Z,_}! ֚VPcǼ}[y@+t- G~* ɋѬ3~P,ؼR-uA}kbZyY- ٷx1gsZـD}m7xfmЄn7/Fa,>ZC*l4|dnF]x g~{X>*]6OA2Q 밧׊'nQ`]ÉŎ/N;#kKYO'cUýN#'Ij?#!q |/ Ҵ?Pwc87O>S:S 亁'^&RGmXn4vhZ@qx%x`|gVWKDDDTd QrHVdVq[ƯU z 3YC7MH-uAV潝b'R<ǙJDDDU&"ru+?WpZJϋ_pD*n}L]fssjTurb ߑ9Y>wfEZϨޞ[{W?`X>qImqh<, b8bd:Ty/DDDD4f/ H8'+0̇E0|pRVX=z1}|((4`GLVGDDAP25+ 0@4`V"""t1JDZ[eQ`jIB&Q4Y,UmNV#"a^K/WKɨt +ykjvLTfR-Dbk9Y9YYerXW1ͺT|A<㜬DD3---⠷TKA d%"""""" DD/> rzefpMlzMmc᳆~ebС/(M Q0,0DDD`,AV"""""""""""""O""""""""""""""AV"""""""""""""d%""""""""""""" AV"""""""""""""d%""""""""""""" AV""""""""""""""߄E6^T,JIENDB`insight/vignettes/figure3c.png0000644000175000017500000010017713502774207016313 0ustar nileshnileshPNG  IHDR !hgsRGBgAMA a pHYs+IDATx^ \UIP`+ = \Aid("} 9 BҗڋH#`Z8 ؈OkpЈ=b*8!}8 vC$;yoYszZU{\ZozF GH $ BTB AHR! @*!H $ dbݺu7l3`+]wޫ.3hE`f zWþ tIa'~0,bg. &~%, ,{gXF4h4șmxSX١Ss2K86ңm _I?D펻+XhQ !AH۹sgp?yfc=~f `ygSl3Ob&ɟ?,?xi0򅛂_c^A=h96f qѶ/țW=K/G ]ӛt<qfi͛7>$ @"۶m ;hCՂ]. _yf|>9ٹ G>3 .+7S T0tƙf qж/(?pf6l;<^r4@Kg>7>8CM؀* {_e`y~ΔЎk[ijTG{tmZ _t7m ̢_Pe>!MTC6W9C#t2ׇ!~nIRn4p0zpjocjrg g愃53ۚ⡽f+hkL qж-zS{^nW $:T=t4 Yh 8 0 Ҵo:X# 4Ӵ|k`J&~WBmm7%`7o6K^! @/(k}7݈hʕat;:YlYp衇TRi,^8rm;VRl+b5*<,;o>є&֌@wР)7+sΊ`)W Z`b9 Z[m^ө~!@o[n],4gyYBO 㨂Շ!!իW7|Y_|,u>SJ**.iwGӮ۔[HS'i Ό\}=i7x4mYS`;99,7*!ڶ6W҆ ;,)q ҥK3@| BҜ^G>d:6D抜A ˅&TX缳3]q-zҶ 1B9BRoRp-I 4uL@5/$cC;j=bv2(H58ʦL!AHʴ7*$< U眏8^]`퇙%dE5#7F{,}7j-_{ @?xi0x)fi22p)uѫޮm;W:_ tK/dJB=G.G TIYCcCdKӱ:?mtluWknq ?~ROUt_y,XNS:uo Re>{=,56k {?sYjv_EPf  'D,  uL5mN<%@{TO0;Ž;Ҝ~݋36gys`q{m| $Dd5wO?m_!?SpI'FE6BB^&4hYjDLq|@3¬d90<;ɼ{e`"^ya}x3QLNՇ{>vZ0ek]F_eki;d:6DFY Sջj/";8; L`@8#5SWS&9$}; f_.3Khc_}@Nԇ_w _֔/^Ǜ+OZ >vqID<em#ҶEбZ~^AC95UpQ,7;QU BBdTTgͦT `l,5w镦ô JW"y7"(yTW+*s}cBdžgLY454[VZʼ KJH{;+OeDMP(ARѴ6nuMO&GKmZel<ʰJp}gt>$ *Y*_?W~Yվl42iQu|!H2L/TD}H"QL2!)ѵ]ڀ*kbsG81ϙR0+]w%Y f60 P& YGqy߻~ ^^}}$L߿`1Ś}6iOe@<0h ϣ0hg`;gV*aѿ\hmFev/T m]˔{_DE;-ŋʦ $DFDg۸q)5R6nb}Pॱn= < ,;,5&Sm7?rr")2!R*ՇZWWmJ o /W] nU]qmb |ty ;IVm[LBk` ƫ?o5:KugiQ\DUe:6~~Rk$ \4ԎR>iJWR#2.Ui||H5U bLJՇ9 L2p5ր!Ϳd:<fDm.Nx=x-H0۶m3K/T{:O1^_a\ Q]pH(z)5ʣ>$ *Y"2w;]e322L/XhPehݙU7왾tGjKairՔ^_we0zjgpCQNl(dB1q=; l_Up:̻(\hJƮƔQP}VtFvbBwmF-m[T 7`J/TfҴkGq3ثF_ >OgwW*Y'k]󠷸P5(>$ ٽt+s:v2%TMxo. /Lt&Er5Qǘb1ՌHrG>wV0fɮA9#W_?7X:wтLH@#FW>TXP5b3Sm(cT[ FljfZWS*ڶn*+e?jwwҥK?뮻 0vW'f~撰6t}i: !=S_Q!AHJ6*L뱛Rn3][h@ewwk:0kZYK•I\sح b7V1Ě"LH٩ 5С{|bFPُR~UF >Zq^̴ϚR| j+/Bڶe *;;+B, >z駟n;bP@2 ǯLj^r;:k( ZJHk >z,?ܲ,Yk)e')EpQe˖->O3h"v֭[C_8%cSfбYRw;4wqIihpd7;y606i7F3L?Zfo?,62QF,]~`DoN 1̬eGO;Si,CUzن H.1cShjkt簍\oEH|/,uޮ۔uqmgT:L^ !5ogg>jc]ǼZe+6\˧fNɟ=fw;8UaT_kZv; CC~ϫ}؋41f;֗-:ߦeWWfbJǟI'o|YY/)We|\X јh|0~<;IX皍2~)f¦<"L4vN憺v :7md=uxKKo~ӻʹNzw<@Sw:կbLsJzv?Aw:2O)K{EJS'^qm0xqf)tƆ^zOY_m83[URk0CZOk%I}@.^OǺyCc]76S_mpNlNujtC>]t g6? TMjGi_q-5>uܸqcQַGW,o;Oy}祈0liMX v;;]"ם9__YG뢝UTyms_D[3mwU"j#*oq_?;5eͷ]dLHA6eM,{Y17 Y$QЊwVxQ^ucM QDo)7Y)ewa9R ||3,T'ḝ6c3u :0"m'hՉ6^ԉ dpf5 >~SjE}X7SuBgHhFV~!܏ks , @}Ƨ?s?uOK, W~ܶe~R:jOuD[eQ: ʢ dEG>JAYK\ƭ/k46_ĬqNT}M-%*8 -IIzwQi_žK6U_YmݺՔZ\)Ȉ)ٵ}tѩN^+ F:gEzm,(eZ&ie~Կ;5] ayR;dw7O6%K˗qΕ#*:akY:0^75 WJwTʹ#軓IﶲWjՉjQDԇ-ܸ7sQFmwg2il/(WߡWw`gAٔjp+Sqf>6ʨ a]w jB68?\˛unQ4[6GqlW=##Щ,eyc|VakAqt55פ,W%ϖ)+:ueAd6`*w'V\ik;W- 1ӑޫ.6ȒiWOg7tMYQF| 2A?nWp:0]4SúYmp0 ௲PjD<dȎuPJop.}a_%rVmv\ׇ|w&OR2 mڴɔZ6i_} {1\J~l}aFiͣfeD X 1*Lǯ ~Mupmj<07t5͘hL<ˠN IJSc;z}}KZ(a~^u!pmzݮsDc '~QM\#vEQ& R2)w\j*v¹g汝]>fE.0KvZW]L _vv{(R mm?=q@ Z4mZ&ugϔ:yc^5cB g.I)(ե)}ʔZ#; ,Jeu:хxS;5FlCe@6;Y2mK {eR1]fCϛwt)T (a?̏:>Uk²f/[2+LUsNq>}+(_,_O-߹Wvԋ3^Oǵ-N#NLJz)eJ;1 .:P'Sؼy#^۹Qe˖ReJ:=tgVw T!hg7u_ 5m{t H҅蟯ʤ#[-Y0̺N-_ }ppHZ']|yn^ fwWGߛjJԴ.Y=R+߱U4>dz3W˸S8v}'Ԃ;;=B.ŬNNfSJęNDpeϖmt[U_HV)ƽ]Ju{1驭֜E?oOTAwv+B}Ia%6!JmQV?gz[*#Q*CF㼍N_]h,fk}:է_$?}X9m _ekw`Ҍ(kܻT8bN5OxCN],k׮m}mg7hWicYԇgBe1;Ff_$m/QYtj$сŴq։u^|skjoX JsE%=dȊ̑A"}~潠V+8^iNuݗ1S;$}.\wk=? isR27рSHwUKvuPhptD}S5+M>XϚV)S]c Z NZ{^Vubɞ:p᛺d m2B_l Z4l5z%?kMoga;БAE>aPׇꃬC6KJ}ea`ѦnuQ Zz_[+cRׂctLYqs–OvqV|c۷o7%TQc.- 46qn! t4~֣6rzSң[gq2Qm'ߺgQ F:ځ>4|\wlݺՔEŹ"pݹ#:tbġѺdq鳻 7oV6Qї{%͐MQnNOL0ilM0'Ii@p}ԃ5?vsGF)>dJ&6m4t4p hkJ@)V,>Z`:9z}Ow)qri)SVuƯ82ԉӯlJ5oRߔw>ӶߝU_H~~˗n u7z=P>֔MZ5-LnVߩnQpcR}}Gk2ei tծ? ǟ!NՕEG?`|4"tbE%)R7m|*cLHځׅ]-`wGgiL境YL.q7eSSO[,NZ |_6Vr)&H ̙^,uA;uf)p 4Oz7^kJ-7Ry)%3t'MRgu<١FhVSnjwjo#5u7̒2(CMu#;*Ūt*|+u a\=5%Dxm;U.{[wn'a&?oxok6Df3X?T)&8? .i7H뼦}" }|Um4wi]_.EE9Rߗ%''tԷ{Y*oj &,tA6ZC>ٙ.w`\^֋39QǘRy]O>jO/_48ug.1F0 [3#;Y] eBb{YX':ykWԉ.ԇe8dS*/L6wdw* nZ`˕-I.hv?=W5Oǫ>,7h%ۑ;b{q~F3ASjciO< ~$yfV tz]NyPLk\ݗ-;UVmw}XH&6\uj(%m^/j2i`7MSjՅ/Mau׶J "\ek=;]&NuH>wgQǤ\!:q{\]w'Q)J;%8)ʗ)+(HY8T ;̀gH;#]7[2dul5n> WC6U]3[M/;,zG1:7i]~jc+kc w #_)Hn)zWL k-g_ǙR:EgWZnQ};6~ȪLǖNn ɄTTd}\_eugR:k׮5V7&`E%M3u0=ye ֊C>}tc{S'_>dJ<{;d3Mf){[M)_0( h:N&ԔSԏo .+^tղ"ھ;ʕ+SG:1`ReSƆz@]wF,j*LvUyOnROA}O\Sv^Sd D ^g>_zSj5'L i=_ik2t /Tݧ߫|y.$R՘:U+ FՁ誰\> 6 gzi+L)SO=Ք=æԛ\4m/4ru1ɔ>s]'kOR-Ro8Zm˦B;(闿&~hwW}@)Ojr!9{Ws=gQ{g y_^+9SS<-YĔZu2H[cnv Ʉ1v:/-}їqcl*˲eL)<*Sߺg]t5LjӉ1Х͗٨YvoLֻox)zK՟${ɦj?>:\_YUNVfa˗/3(aoy5eMf.yW\kJB&b]UmU+بlӴUQuoOgYVԇTvlUk۶qCtLtRqC7e6olJEz6yzQn4lߔZ1zQY)R|}ӯTkJCx:X2mרYԇdBtd[ 勘嗄U6lBY<]uc\ygCJD'>je RNLaJtO宧~8vMLx4|/S:>&)|)5ҠMru?lQ4O:$k{ҔAbjHmoSgv:DߠnV뺅ΧD?l'N^TLH7ZNEL#ՉUN!R~x$k:&wr_7 Dө }ӟt7f4DRNK^64XNgçƣ:}}pcnԏyM=e673Qc*ɷyltsO'yyۅdBzv'nJױ) Et&=@z} ҭ[Rqm<+ϙh-TNQ2ۼyt9~K6Z+.Iđ/Tk2}a&Yʀ-'Q~!뮻L)M6< 2U$6#m]w4oTά24QvLDS)W2}YgΨ~zV8Q?2*sFnY )'4*8Ӄ>}[Y4=Ou:p6W_&,~"+puD^g֟ޖeGRz}hJգR#~Y/[Xtys5f5퀂lzB55͛2-G k.uNwЫŽˈ T鿇}dhT{l6Z+>q2gPcw; uL%fbۖr; wSO7U}tc]^|ۚ,ŗGDQ@ Czbmjd=4qZE%_}tGx\8|"eTm,B2!u:-裏6Y__FoaX.LS{U6yWU}#:߅KdJ.l4]EuxϟRp棏jG7F)·ʖU% @}g=ʔhʌq:7olP/@:bwsQق~seB*[pTM-u+/R+)؆/F3m7KB۶Qپ; ^hU ] .uTnK|/`{'R9(idD Дm |07_lhw٥>|۪WTq_Tf,УkL;c64Xlgrj@M v4Ȳf)E`Ny晑R smpMe1Juh{khkgE'm /YN촨GR:uldx%[<)(co/lRx69ٙIb-wfF]~ǦqeHEz>i _'ž.\C>%ױcFzQ|ke[O=K~S~sY`V\E=vHEyk |h3c,5ء(\* LhMcZvѼ.8׺N4݆0[kIAʀm<;wqEzG_/~wv[P=gM?庬QzԴg9,hЇO7K墌vFkP7ڑ=_TQmoǽ74lX!7ssyoqt6O*0 $הQ7PHQAH Q]hh6Ks]}jP5TdH9TJ p"dA |jY!oHSyͻX):'Qkl4!e7zњ0Qv 6TNo"wʳɏәi;DAC/xjh5WEVi0K7xZSu2(@^ȠC5?2=f8^q:D} dyڰUծmmSݩ?"n}:UX@T[=&6`#>wѵon>,$E<>^i,em[l usLDӺm'w}tlyL#]reI>f?W:) ^il2SjU!e۽,_S7$ vRGQǘFJ3\0tۑy@4Ixw|۾ }ݸ* $WpRTQ#E.pTfdκQ[ZcrG_KIћ.[n5P@y"=Wyu\}"EtiMƮ2,ڏsVfp }; /1K|”mw{ĔTo۲(In}q%c;z5mwv}=FF ?eSMTTNժw$MLj~:l-uLCeG}^kr4){(|;ؔ? t[ ,Z|)&_ZSjշ߫L /nn T#9<֮_}a}#ܙEc5AE|0WnJͷylnI] )6cLڝX Ngo~5q}\}ξGmJ]~<$7yB2!E58Q#B'4Vq*G:# {+{*naƷ3M=oOv(EϢ9y&C'=\6LVhʏ|Ȕ~;R#oeG'>;,4pNIW':r:h_6%Ud\:lzAHiDuv:dwG&tF|7IYm{R\ۋ d_Hʕfn@ԠYn]TFŠM_vnJofHRm_2%D7~LFꂯcQDV\PW/gǕU5upe=; WOa!J;Uƈof}oD9΂uN)fq7`w:etl?|aSgMU[jJyqYJG}Fo/<i*sN"bMVq {)0K_/ޫ./uZj/3W;7渂:5M2jmu.(#2!%7zњnkp . @ ]mۣ:.#-B $;M-Қizmxv 6ِ):4 EgEWw%t^Ý_ִ lbE:yEԇdBddD=p;=uPo^uz4g}SZweoUKl= VEO&jz2%0ϘR7,1dtq缳ZR]\A2޻Ë[z;/}.wdp"M|ۦ^v\˷o|P4k~]ukjEխ6dȎ=-R^yϟVUjdR'j ;o5KG}_8qg|7MV3vSjE6>; B5 ͛MiHW:T6Š^ɏ7`H WSzzOˆ3,>5˖-3%T_=Pqf߱Kc68XշyEԇdBzzg|Qr6#˔se uSVi,VW^m߱9ӆ }K'q] rLUi떁kܐ3>WngFkB@>8>8|Ye ۼUa!re}YCŕŷ7M)=I;ET1纋eBtl|n#Dw<_Y:u\<^S*?S NJkߴW(QmSGud35{̔ dN4dK7W~kӱ? 6)[B}v&?zY"rն=xvNSro񠀣fz6=+ ݍ#?kMU~=ͷ/+LUwGǟƆ\:=$+W 8∠/8Cn1&/U%).S:Ka!JȈ|$gy| > 7`JՖ6W=\SjU@$_&NMAjBl#Iw8iJɸè{݁7 Dr'?iO\wSU%ɷO0݃0#RetԐ#;yïSӮSJX'>NiTN QFˀG5[||ZnJY2j5@xty9 ʂ6a1hտ)~eSN}|ۈ3nO{1V+V0) 4\kӨ-e=_ƩbˤEdd<.]a!:=]NSO=ՔZe5{>۔gEEU˚NJ "u*DUAr-Oqw,OܻɔҙtL%C)%cCBtǮRĺx)߀c*恍{aJ\Soz_6 4>qM9vW7V3,i}вdhMX mWT_8?>4SjOڔF #)U-Q@8> jLW?v2fSv/os]a!FNCLw:]_z__,5\'PJ×ydSjU1cLN@ԪSc㜍yMvt}[ݱ3a{.rk_Z@$`]@O)Egp@kMcJn:etWY R1ʰ{+ب ӴU]z)m NC?q@w}7ykvc";VHjB'L]weJn'lHz:?=#>]0DuS]juVW2EEė %@)njJmGa!repNkݝ (||}K9UOɣ]N, LmNE:p}oxT<4Zx}+ʔZ )%k0h0J)WߟMF" ԦdSm`I7x0~MDM9]ccqYw*?:fҶVLHRN`։۝j׸2-ݼ8y( VU NӝwYtOR ]iؚN_W,ҢMX ){+ 6f=?}Ԫ 7 kܐ3nO׎I3.\]󘩦[Tm|\ƊCsL:Sc⽰ͣ*>,$RZisu:uWYLa N q9餓5uo&w utbգ>ڔZqy:]u T%#2VYM}v (5NB2û5GJb &kj[`tXR>p1fֳ'3iۆ TneZFӹDz.wʸuޔZ)Ý!$_H츮Utmʜ J_mkwGN׉.ԇv_q ffj:Awd>/Yڨo4n͛M:= {1haLVuWfVP$U+;xW6*q.4jmWu*LTq|^%3iG֊;]v|޺}GQt}XH&Nudߵ|A8QP;w 5SEkgP.^o1Mk\qE}j=VyVV,`Eo%3s1 ppr{+CUL5 / ʹ|Eeq%tα:%Iyf/o_6%lٲBOGW!ȧ학m3_ޥu Zi?֍h;_\hJH_H}!˗ȏ$:n|}yšW׼֔ZuzJ2\4K*xN݇\=]OusX8p޶onJ+Luvwm7oٲŔ|lqҕh$y<þDlhoG;ʶu!%*f2x3Q"t¤@;1Yv,ږ&:}3m_|,@zz tR+BRӗI\`=O͕ NI(Ω!G2Eoݙ!l{ޚmG[€7. Fd~iw) v|ol M|0zkt{' cp wF]LZudޥĔ$e-{YCtFwgT$W'/eKJwteJ{HoT6?F}̒{mf h0_Gvwz`[ Y v2O魹^ /eև:f1>lvP[_X^Lw+mlg#W!V~!,E`vtVuYũrƍf)}ojԧlt{ O:V fiu PQ;j=̈\5kjjBz;Ck^gD?UFHsVeL? gԮѱd8+%qnUvPu["u>l\O>dv>Ӟݾ}:Qf  ׶Ӣ^D6`ҍZV$Q?Y]]4,UzԶ:O϶n_}~,*F!>}}EdK@j;|mN v'Iʴ܉Y$ڶ )un}yр>hwbInK#ϛ4T&L%v}khL~\!k0zר}-}kϨݐ[+E6غWTi|\c*ʹJNU :jYuޮqdvm[}΃xm=?mz->mҩ0v^2dnچzJ_@nA0QN5TvصWc3 IcS%q=w EiV}50@(H N .i:<]i)i(Y\S5Yv^|Q}_ǶAѾb>ݑ5:e|YM ;jҥoLJ>F^emNT2NT@vRQ%.mQqejIakH`?m@LjV>3& @i(c(HC믿>l?بu./ P0BTD{U>#S U.z-eѺmkGTP/͵H'yz&m҉0 ;vRrN#zzqTe-O*NTqi{N,.V'| YR <jmz[DT8Q_4ҠAY簮7pBV>βl [F#W3FWVvoJw䡓tNIՔ:\hVSnTVԇ9dms-lBʀUaVs3-_ܔ@|W^~Ѝ P{)jtSK ) +Xi[Ui|\ OB@/˦d8|YBEۼkG~RH&$WpRߔbJd:Y6zIfU#]D /m#},\>ɔ,J[v8L'M%_EԔ6y/Q3ڸ.d]wgMK vZ?zo|~V{0Nb'e1jpѿ/Ii2h̛9GNGR#dz:f5tq'c/6b2W&_'fͻN$| dm=f )I6Z,Xz̺ :),ByRk(W; WY񳺁 LmrWp,DY<QǵigYj¶E}YVzݾkT˻>VTFFjiiuQ ڣ.ޢ7k1 O| +/tW_Q&.ʜe}~zryT}u6dYgsDcNS۽}3f:֮]K/i|"-#moep 4_|P0̰1ŧS?~0Cλin9. ɭ? ƿrw(;rvKR#m7YOJ; zhՉRFZ=<^csUuAFx8ORjp κ4껉Qۉ:;ο$E]w(c=!i:q]0v3+f YmW㌁wf3(m[tN R}_,\pA.}~>ڣ/@GSw"^3Ǩ@Rv2o |aC4՚/&4km {C6To@5x4)kZVn~2#pl=:]YJjHѫ6>k&\v޵؃vǵ[Ǚ0j:q>sifxn2ԇ!"'|2܉[n _z`ٲey"X;tuɒ%:m]]k< _>\*>S~dRVi[:S3 />2u:gWOӯv`Oٙ }_fjB;J kcb 7:I|&\%M`K\H]oَ 2ߏyhۢucġ iKEoy[ՖlAHe (Uߴŵ౳wyр+@*|;E޵qA6>luhCmL\㵵(\i>_yls}~m^ $m:N;`ӦMf y }oXӴcYNyd $C uL8JY#dfiN ^ywZ*r{fm |BqNS@7#мs\AHdB*3yy$}}}d6V'ԇT)*{F>Vo iYQǁLve[|pڳhcL\-]4 Ъ[ڶ@ m[t _:g @*V-;ʯS}j~)k2($H(S}HH|M5g&NC_eipyQ6^[R@Ӿ)K9}u)چXvd@*Wnծmw6Wh :A0ԯ>~?Q/eZO(Շ!L+-QmhK8WGjJM)D6~e ֔COeSs@2և!7mI @TW+x_1|LԨu/Ҁ`w4k]&~hPy?eJAыք0H@|Ui݆-_t+'>GY+m)$?)TdЎC >$ @l\QتN[zSj4'L){J-o-|u (GÁNfоpM2x ҥAPm3mciHѶf:#db7Fh9Esga۶m1Ȥg׀'U@Ҷ=\S2ׇ!HD)]sSj 5[=ǙR/<Z,5;MwG2K̻sf$w <; WE/t衇/fwNkWP^}ZSjU\L:.>WЀ(sIn2P=eg2֜j*. ] b 7e6h+y7g3K1;\֡NMm/_*o3RySVcJ)c\):ݨ,ж/@9ǯ {?s3 R}iݕV^|fQjO@e B*7u8):{ahÎ*~m;ȼ+ &|w;'4]wAпAMw>(=i ,{ydж/@ǯJS5?^ɠ8:2=cf )c}HLrSgMcN viʶF/Yj74д^h zqSjoh-/-[RkךT^Sz*pj<Έ+?AnSS ov@S /^e!}C+> |I/R $Ft (uĽ #?<8.LꌞzAO9=&~%ރrE 6$M`Sl &T){OK w;H0vݕ#;(ϟMq`~>[5qkfY AHTFGG-AHR7AHR! @*!H $ BTB AHR! @*!H $ BTB AHR! @*!H $ BTB AHR! @*!H $ BTB AHR! @*!H $ BTB AHR! @*!H $ T 3tT=Lpy&{;w F|U0~MN SRޫ.3KA0rm3KsΊ`may՟ |M BPzcwђHA vY؆/_,REg@3c&4ضm6Kt뮴Nk.MYifM`_L) hF&$*$^/ FoA.~MӰS\uGK. o}#4GHoh&`C fdgtt4iH7YH Ps(i;f AHD4_ Y`P淙ghM0Qh-w~n]|y0x)f @E4[&&?howXH2nInߙRW^1%( ('[\}}Sͻs#Ҭg NBuZãҀ'VSj%`zB4`M { 6+[ eD6$jBCᓏ,EhM8[03ŗ%zӱ0;4gD,4Ksm7BCT}5S`d19 XU $\LaΒWʔP5}wҜ~gJ]ll)奩ū5l4/%B–BYs-sNtl jJs/XkJ3j)͙l $,x]4[fBJnWO[5Kp0flِ&6m4%zAHXLHpJ/P&$eI9cNc6|JS*{ɦ4g{M C}0MWd&4MgWvMr% Ř>_eV[Got1NsΆu|^&yȔ|fҥAK,eWYM0e`(fϹgY n gf#ɏI@,r}h"4Kyg0Ciʹ͆/̧U6`jsFp4x Њep֛w¿8L|nHZzl t{(pe]_ƿn_)gr^YfIw(il3bUϖgSuh_yk0LkQVn&<:&ϟgMGN6K3jLkcel.7t)3r@  Hc& /}f){urGo `rr gDFX] P T0tƙfR ʲmv80LR:v4~`Yuswݦ4kw>К^m.ۮO^w%qՇz+%aU V( ,1g)HC?6I2uQYS6fo}#f>c Q/},%~o4g*QkؤՔӱl@qD>"׹391c}l} egj$H룀$ӳ){g Cf_'y4x0Li2WyPVdB$ZJQf+eB{=LH[=ƥA[L`R$@64V>Z';5UsXo[kЇO7KՐg&Z֞<,Yj%FӕM ]cm&LFQ35mLHSNut e,d_?inO?qA\%fU6KMI5o/˝ZKY =+ jƗ^0KvS?ye0uřdSrM/XZ& @hmd"@z.Һjk1U BRWmYj4˧Z>is5<(FgWcQ;νe 3YCWoz|R^p{]iw䏶 Bwŵ v5l4KYc:vD4G6<^O}L~a S9:zj4'm0\| qf @/a:6r444_3Gh(+0&3 =! _|3`A r-)󅛜ZKOE;Һ-?m"I&6m ͛9NF%Imt<5KiR^ טgfiaf!2(Q=1@0e jᱫ ]ʤc;v?ܔӔ̼x?DžTaБ, FmlNk>F$>Y:z_LmlkR}pQVF>/`(AV i7 W=en(8D"QuU\]ml)GYrlٌ\vt<)Q33nz>#NA^>:fb-fؔ8?Fcw֛|uʴ3VTБ-W^69o,:Lh@7" г4^ PGWojNZ'[ MlD_?q(LiK_Sjw+f ("B~rq8- V^?[tbg [)E vM0SK[3Ms0`u}OY! lBLt)͙˚R[cl}2c˪lS|MubwLiQ=_lU2J\9C((NA=͆\hJ )^SN:F} ҬR6ܗZ}<6eJuUу-!P! ޱelH_wPqOuUp˔%k)M)[k[v3!M>)tq/L׼+5%;kLH-Я˦vj%IL5Qc`GNƿBg\Yeu" ߿浦4'8΄d 2EIQ lS+ J!WΙya/~[RAL:>luҥKUkR#pM4By3h"x~kJ{`-$AH {'weBRFg]1ԯ2FSLniW^12g)M)_:|XVqMA7t)fn$9gE0;wfC-3L:l`:AHXq2-Æʶb̈́/\SEYש[OfqeBMmur̄d ыViC oZ,́8Iꙸŗ#7/5ϸ)fl?=qf߬ 3$e! zbT'ú  $,l8eM2J5Mf[v75q+ SwV}r ʒ،z$g쇁e ߾1`m`$h!iN &6?hM:fVV'Gݦt2'Kgä́ % RE}-5UFExlw726`_/^`퇙߸Mx.{eafsҒ q&!`c/tlS:a JL92!E (u(.(m$AMc&$4`3ʳ3w;}9tƙȍI.\)it݅f)w//yY@Ý;wj|QSm>tS UɄ$LHILճJ̄d ̛9O&?zN0jMfalxOhP@~#oM>P-Zd dGL&)#6%zAHX,yuY1R8Rk /R hU& vLF#wҜ_”o۶mhҥ@  [6Lfllo)΢ޒP@u ӱu:#k˙m1+֟w5g1K ɲ`$ej16x 4BeҜc3%z AHXزa:63(p2!e5&,PLHQ?0S3=H;nuUm1m4gjdz[lF ~cYʘ ޳lNf ؙ)۔!`QLH fNSak,a%PToi nRtnLH]/LH2MiRo)͗ Yz5X@(eq_|Y3M'~ؚ ip $,ʚ I\bIY€)Kӱ ,OܻɔɔL=)c3Mi2VU!2!5+K=SĦ?kJs:ƚ ^@ë́$UɄu:6<SviSUɢELiNSunRTu:[ ^s7I=clt,lۖ=\V@ӿ ~`{Rm@1|}M,,-:-j0@?#7D:V)NY/Kk7s_~j TͿu+x¾ C гV8o=iyS3;c]=ыVYo}ÙH:.9Z{79֩10R9eV/>({ $,l/E 2+>p&KQn}FS6S6ѳ27mlɶ7Il>I xJ>sukmƑX~( \}}$Us(iVͮ۔fx9lH۫ݶOK۫-nx34K^2Ks֍LE)L)elɫ_eJuBtYLV3x 龢Pŗ[?cF PpHpj%X~"`Dj',P=jYjg̥2 Yn3$Q֣f RO(sU3AHXMؚI/a@GT@,(&JMQ)|V")ۈ2DfY! @-7~eS> Y{b uL\흔2h_)V\ Xq}c;t JRD >U`2'OW&/NXT:Om~`Νf3F]ؒI1nf7ӱifsvi f)[o\9 JV]Ak4%o -ZdQV{7S?o->(v/i8VDNj2%e}F1v-SKj=N@7# SewI(FI֯׫%PԄ{,QFBPJDRыV9 Ъ<@<״LL5-i `A&$5=>8`R\I@+2!(-eBAK3&y(H H󮸖$<Ȅ+S>[~oWRK6\ы[5KADYjɂ4< 0! a۶m|rY*μ/O<,70 92&[M-EqKDo&Y7l!a`XV>M$HJIZ+iol׿Vʴ[>Pnhn9B-uM@ξ2(j{Ќ-&n0J%j5^D]3gG?Rf̘-|ex0E;|ܹ'0g(L7gGVvM6~O+0ax7@%_/V~a -Lhu_(L6mR[^!,Xeo@a9$ F0Ԓi/˒-a-L2$̙37wq 3Q2Swaa:vea- L` D}}o[=?sl 3MDGG˵^]\wx%'w߭~:*w9СCbZ*Lt SCV$Idڪ- &YNlm&~)e1{FV40AmE䲇8va0y>)~/Iď]"trj+ܨߗZYk*rfQЃ!Jgʶ1V%_Z})X } rv@R>D/^{֭[^1վ^e]ks%Mr[+5c|9>ګ؇mZ:Vnn z^pywa8⪅ih{? *DfI;7<}nd:ω ˃>x+_ TpTu9>%0 P5 oִ 2 N; 6~>UW֦7;s z(»j|qBEw$u]bȑZYό5(=IfcԳʱZ2b)Y3,e-m#au-w(V&(g(a_iy(o!z֚`WGU"'*c#PvZ8&%(ur5%[%CS촢ەԏU: /^P~y s5E>@n!,&=XQ\!egGИRiV #=WYQy>}i>zIѝԝ6 azӂfy L:J߷W(ޤc)[Y|VHg9'ҝamÆ_Ma)6`}WfᤳXj|u-{ LI?]qNo`lҥgߐhI0ZQA`ozZGƟQWxeBV`g~$ql݇ ̹n0u}w+ao8Gæj$ 2F#::Z?k" vCoaшHDs>J/.#2$ Z"Jn;:& P?bV6X~qJvPlϬ[H|]me.YZ?Ri%]r~;aذaZփ׾z߿[:sMovlֲ7cG~i"q۾J!dv>&1a1JuQo|r43YǥH;,O;n/vAߥ-Ҙ4EE(RGn`T;=e ;-{ L*Ǟ>K7wjiн[J*Y!x@Z1$:-|G# -ۙZ%WVj7K7߀$g;wp|7;V(%I &9ʞkyX09PK:@KZ^C+k-/јa݂c Bs[NsZ}tUQ_~ &M8܆/4Q1Pnt}֪]2%Wkkkb\֓.z(jdZ}otE=hNMVܷtYn"̪_tfreM}׌?^zqQl_FNiƅ'oš\vФ~U?k1G[9I*[v&YBQDm?$ando,Zٕ)}7,x CUTw=:†h*7 ٹZQT SX-|ZMPd- Zhvx&idQ9> MivW[NZ-YߪHڄWϞֲ> tyh}/8?kuNWx~vTZo&+c]~N S%}*nmg~1;ٹϟ?_}W2mZo)e˦?ZSguDݏTWOXsU: WkCb\RKyT]'=풻ac&OGFWk;{„ ]?uׯ.~zv-L2T%!0wZ2-*.[G2]^KBhI)rۿpĵ& ue]mazy2?L͞5aL&)_]!Pwadh?]Z~C{wRCJZ$|>>ia!4´vZsw47wsBw LhYhP{WZؔOpՂe޺ QJjzcX(r伄 W S7=|O*PqaNѿ/7~w}W+dHFݎwM㦷0%068&dmI]%]bK6MWkɭ?Q9 Yr-L.ur |VaX&*L3fP}Tg?O)-KGV/?Ԓ> ꫏"DKm =m&jݮw;}ȼD?mSŢj@yG侅ijefq+6 l ^䖞b;awۮFgS8=w_?-*Lms=O*)(|*IpZ(;Sxw]ǽ֘ڡDv9wORzĉM]I䪅f4d.2@&-*@t_%!.,Ւe] ]r1"I ]gyFR *(R^WF4vKMiL2<~\[-4s;j /W{07!5֛m4Bt3B \kmxD d-Le(vW Ʒo߮q ʴ-05t('$d\~Kv=dzc68f+ 0A𴘵؄spmP 8hS;yڵg?ƾ;>.̴4nܸ|%9d}yرrrvBKU S͓Ý TYHU>@i3soӰd\ikѶnʻ*L˗/W*nvK3r ׮fjwBcy6=&}Re9t5I@oj>Q IOMijt~W.VzgslZ=D ˪C2 3|1Lpp:IJ9 Ȱ1wV֬Yq~ӟ0qf:2î̟a0mL9лӲ=]r۽7j.n;GДDRu1&ˤMԒ1SЧjI7;mWQl]0qe[ȤoO]r_}7qm]8$}kjU 4W5<ބ$ɦ6-cRƒQ7n%}yh[+Lڃd$Y'rl{\ Siwfԣ |gIo-]r9Ъ¤7v;f[T UW?6&34UQnÜ~6iOvG7 _t]{BI~ &N!?} J}duD{Zͭ?Ql١ɭ!dQ ^ ~зȐEQOt)R_> IV^/~~7DқZ(q}|뿴jG棶V!mEъL1TTTO/kNڢǮH*.4hcxwd!X(D-WWQcPOesVۇ0kB07|D8ޞnmIt޾$HʲSt4臘>vwb Zu;թAϧ~ҥ6ou1E]82 ' ψW}o%5~qϧx)zN+ }7 j _Cľ}~ӟ;O?_oi Z ϵ 8y"iQnT_˫! VdۀKx#QZ "aO(:q%8'O>Z~& qnLHO9A=qbDjy&s&KsX[[}Vx&wo1{墋>OSqV`_iFԛ"ڠV=fQo-ja~+vlRܺ\kLZ2Mڮ;8X!}SN`{'ϐypiŽZoi.nekG{Ì[ ҷxxtKr܃W3οM ږV&л~jn5kIt8q'RܣTۏcV96DF"ZU6+gfkwec |rAJEIP^{>믫?8 N+#MwnC'N׫Q~6ZBJRf\%X-ΖYbjTCM4h”Qپ'[" /d ?VJ6ΕhN,k5^/&`e kۙQm9ڶjaZtKo~㯹TzCĶ4YQ78+f\څwB'Sw+(·O:2Ŀ0Q  iGfP^-q^K99'^̹+; @`{}#l`?Oy4bϣϽ"U fZX*Gǝ9<9ve}GC taSEq{=U߼ET}5E[Fii7mѿyWѩ&ţQ@Ve_":VP(5tz4H_(t,/;la66\am=Ҏ mĕ-L6mR#wqGRR% S20j?Oi#$GWψ' Bx?;~>ₓoKuaSWv8C#l 6xbz?>'59!IСC_m?F^z6m 05PzV\|Yo^R0nXl믿0zp$&&>7F[[hHKL=!PzP%я~[,?Qk8|8PG/Əc]w}mb˖-Z1F S aBBBvޭk,Ck500ߛ:OR#{ojĈZ ^aaa{o/###b [Ee-q~X{KbŊ7GTT<}VO &n ^(L7S0VO &n ^(L7S0VO &n ^(L7S0VO &n ^(L7S0VO &n ^(L7S0VO &n ^(L7S0VO &n ^(L7S0VO &n ^(L7S0VO &n ^(L7S0VO &n ^(L7S0VO &n ^(L7S0VO &n ^(L7S0VO &n ^(L7S0VO &n ^(L7S0VO &n ^(L7S0VO &n ^(L7S0VO &n ^(L7S0VO &n ^(L7S0VO &n ^(L7S0VO &n ^(L7S0VO &n ^(L7S0VO &n ^(L7S0VO &n ^(L7S0VO &n ^(L7S0VO &n ^(L7S0VO &n ^(L7S0VO &n ^(L7S0VO &n ^(L7S0VO &n ^(L7S0VO &n ^(L7S0VO &n ^(L7S0VO &n ^(L7S0VO &n ^(L7S0VO &n ^(L7S0{kItfe2| `>:LEz)+omQ![~Y?-ȱ]/b{5w缝="Q$d'L#n;A$"`6̌O= ]e[oA|˜H(LAaB &ć0!~ &KP„&P/AaB| GPo@aB !(LAaB &ć0!~ &KP„&P/AaB| GPo@aB !(LAaB &ć0!~ &KP„&P/AaB| GPo@aB !(LAaB &ć0!~ &KP„&P/AaB| GPo@aB !(LAaB &ć0!~ &KP„&P/AaB| GPo@aB !(LAaB &ć0!~ &KP„&P/AaB| GPo@aB !(LAaB &ć0!~ &KP„&P/AaB| GPo@aB !(LAaB &ć0!~ &KP„&P/AaB| GPo@aB !(LAaB &ć0!~ &KP„&:!Ls13!}0!> #(L7x$L}w_]&09si60<1koqg̓GZ3>N8I͜`Ȼzn"h/";A@_nV8lj`c`^yQhmf;U.j`f󔽨97^fo+Ԥ}8h,BvVgh zsP(L,ϛG6$^*`Z8ۿ֟Yꎰ?S%˂@!2_$F</o+eg* jQ8#TAM5 'RMJhESkXtY: Ӌsԗ*N4iЂ#PǟU88=$n0(LAaB  vЗ߱>ӑ  p|FZ-C2~@ϟ [v&(z:PvD\4J9$g8,K9b]< $4&C1n\˰`& L lInӄ-L2MqkVA֢͝wrMdf"C„&ď0!(L'KSF0&]rN[0.%ZQ@_vDQ mGIS㞔[[՗NZ,Xɡ t}֎>!u:ad-pᜭ0Q3Vd]VoNx„&ď0!(L]ƕ0QS'՗sy2ƨ}L7| ]ۤظĮ,= {J)S&V,>rC:ejGԄR2Kmw@_rώ +ΟF^ /55xC{eQ ۅV~P Ԥpj _]m@* '*%U3fi3pF`_~vg'Ӵp)=[V ;J6 MGº@Cƃ„&ď0!Ca2x:94xwÂ݄#S]f)jXjx2B(/hvԸ'q.$=*ɲ}ԝXg3 Ra(9g- @f1z8ٓgFS5.5q(D[/N@_!Ѹ]y>oAaB| GPo0!]JE2-ٵF80P„&z01+ N!÷{|I@_>8tt<eCv !(LAaB \Pp/y~pn.a`Z]ܰn !(LAaB/AaB| GPo@aB !(LAaB &ć0!~ Nȁ̺{c 0>υ΍橡}یyAaB  Ibz l1,_3ᆭNY1w gֹ: t}n4a(:2f9I#y PB/f(S'm3ڷو>CaO }x&cQO`z!ڐOrؿlfpwofnYP?„x'ļXdm6d|A"egGi)~a:әg;oo!n/*gHERI"Is|}6 Pԡ߶3y"LRYC'y gJwECVN2sUiOwو`wH0!~ y}ʼ!hgwl־ُ.P "LK7zL19ѕ9B?7}}`ɷDM) y}UE yoPJ[.q軓bEeEW6\ aiN 'y9/lj,r%即~q^ GPop/Lt99B"K 0uޢ1A%cxM 1h}9j5m5ye a)c7h,5ye#mi+LPDoǝ0O#1-P.ؠ>H@_>tԇs 3٤륰 p1ZiӘ߄RCzI6̊E5ZLKV#ji|j8n˷;a M8{JpQXF-2+SS RKb$egʕrS#8TT.H/N4Wnk-5㧦:BnؠgLgjW7I~r fNS/u q"(322。iyI1\W˟1滊0|ĝ*F@rGQ6KWja?Tzʚ ܨxd*SNq ò'7}qM27;̼82ɺ0E\㖞b'WưMo{WZIݐ 8ͬ-`&bQ8h-D3$o)P?„x0"^<Nέ$2/K]2jGYoeyje0$ɍ^'nvVvl)ZD/mY1L`㨧-+ U qt@56g=ɦ6)%SK˜rsQa^ T q\oAre= uL썘֧ Ņb%85h\[ch2 DF .c+L`* o{:0dK@`G4~,ԙ%7Ҳ. Nt=dyaU]r:_]SjsS[^YfpڢHN lL8RrE7oMP?„xK8 LEڷs yzTsKLKnz0u E  Ef,Zd3qU (…sdTLE&p]whI6)&ƓmΝym +LNήQ2.h {;kZcHe%ܶO!P9#bCv Ko۩r+7JahYQGiҳ4+c{7:H\ G_b,3y<YE0 #BD{`KޭD^:dž <*10iiz>^u+bkwX^lgphF'C:nYAaB  c['ѳ@0&esa4)+G 12J+`dPKkڢ40QRu!"l3,٦ a⃷@Ir]p:?#'f^~i.dFEᤤb2%75Iw,+Nx:z?|5rh֤4\YQiض0:@WdНhQ')Qpha[,x9C+63v´+H{aZ„„ &zvT[M\Z^'Z⤼l#  ͔[]<$qǬXDCar L&<͗6 S@_1}]irǨAԖoLLᆴ';-rw: =-]Q$h^2qdPll@aB  ΅Kkv4,2M:Ve1Z*$ja& *JdsC-Ji[/\rO/HߥX߂0!~  5u 1C1μw_۟I,^Lz:g}}q@Xz:@j(={1?u,S&9npԸ'"f ~y): ]}o?[G͜@B0 8#د?f>%-m[˭/6}nuW|e1έojVvM28oTOFӷDA~<;5~! #,mnI}vg'qݹ<8&(jyy9v_^wfӚE2-i{L_&q}짒zj/<޵m' \HyIjIؽL+Sw -(8qH(LAaB„F=. `EyӖY>bC(&o.V9ҍO [@aB  N9u91w}pҫ%f1$ r&'´3gEeM<;05&t}4ѬS\=7U&E=gN,ǖZ5eAaB  NկJYR^6ˉm*.Kq[0}m'Djޖ6KIfNʨ>KiƆE"ZP[V)K!!tTh4 }X0ݚ0!~ C'„ *(LAaB &ć0!~ &KP„&:%LpT5b CP?„xd= =mڐw3Z8jx{֨C]m@2 {!giL@_"Iv&ć0!~ O2ZdԐw~U̅mj[m'd/'x=)SdN80?dz\QMkrh?!:ojp CP?„xCDM+UVp[nPw 5 lǒx g3Ķ=߿[nk3~a|lzhz<|gt;Ԝ2pۿ7c2ӥƢ CP?„x{a& atQ˞'mHdb+%EYPYgoavgitUb/fIN7Uyf|/ !(LAaB0$d YE`?x<|f@UC9ܦbZTZD' K1٠M6 ka ͦ*Ky 鏧?'ԔQ|fԬgsZʬZ;g'0;ssI룮H=;օ\Jy7ydYf^}6i ~L5̐-0!> #(L7 ӈoC-pl6Qu3?=(&'),eٲA3nO |ĔKREdjo['Sf)td Zz˷ Qdu. OH vgOf1P*ȕ կ#JEHٙDΚ/?r"\5Re9_)q}){daŒB!/h5kןnPPs#`pK  CP?„x0QSFIe%D*YA>RWP"Ä`/^8p;VL-)9IbYfB%\]v]1?UDzcdq QƩ/y.L~rqk㲹-߂_1o-&cM\FX&"WZF 3Ƽ-c|/ !(LAaB%GϙTS%ڤ,P'5o.9|9KbJۼ^nnRĤ Rv^>sa"UiLIWK}9DQ怾0rc*LP/*&.hb4r\_gt@lnD&ć0!~ 1LP %̊E"k U00]_r$qBE&YL$^T^&U\'By{ZJf: L Xl] tԨC0KF [gdo !(LAaBQg(GO^Ff7b1яsΈɉq~vn˷ןhl-1me>d'ˤt3pFⅳbe!.|?Ne`?kntoj$S:P„&|(L+W*? yP(zf„&ď0!넉=gS\ ~L&Dku"R &ć0!~ ^'L˦6dC*/Q7ؘ(3mLXTLuSpP„&z_ǨqO:gcrN&ć0!~ Äx CP?„x %(LAaB  (L0!> #(L70!^„&ď0!ހ„x CP?„x %(LAaB  (L0!> #(L70!^„&ď0!ހ„x CP?„x %(LAaB  (L0!> #(L70!^„&ď0!ހ„x CP?„x %(LAaB  (L0!> #(L70!^„&ď0!ހ„x CP?„x %(LAaB  (L0!> #(L70!^„&ď0!ހ„x CP?„x %(LAaB  (L0!>´'%x*L~DORn#xn a>=s@_cF70ݹaޭl5#:ピBT;rLc0d\M+c=Aj3;$JS·)L9X%+I5 gGaXO^:_,gV`;جTʹ&G֋+{c>zGt Y:d!З?G6풻42kx"Lʹ/O{6 GM#BY˪GPWjF֚= SиRcH(SdŠ  3Uҙ; y9.Րxp<7y{aO SҐP{<&`돕m?QN1~[gNT>bGvPmrh`w;]Zsѐw !N?[6$I>C^V PL|1VюC''t?ϢCac8x^R8=}#6X{&Přk y/q'LG)ޘw! [~l]g'P T~dk*1;Dp;x6G$R4&Uv;9GC+ƼʁYP_3{^0Mnk6~m;z%EaC.P!0deFcw/]gσ-|m;%KmI"#f3jGO.iJsZC'S5)PM%HT*R7D/Jhqj&oMӝ#jabc^+HMlҧ:~=X*"oT_1#n`)R}vӉiZU֞osa$~L $J_1bŦVXQpU;"+ 'Ǡt 9L+٣]vYqo]i-3},(ŵV륇GƖ'@.LӋ<1e[,zN˸}Q%wGz?FD R&1J7J{UgZr=EOUDFikU3P*+" JIyE*CWZ]QҐrR{yJIZmW#`L'EYDGG[Ȓ풶(ŝcp#LgLjuuNv*E y)c[$&]d*k j9Gօ"?OZMԳ$Ee|=Y!llv'ӑ8rݙČ4fnHn{ˠVi1b:PYHJy ?fCf"&hV|qPRm ~Z0oTDmn.l̐n &R,i<=`94_hcD5q6/DP%=ToKG1VIถGr{#ȰֵkNXAne]Wٿm=ڟ )pBqoۨ:ifEYA9#:^S0"l;\H H6he;gV`{0T&FޘË=H{V#G´S  VA3UIZl +)fz8lU[PtX]fcsQۉqGƝ)84t< !P|GB„gͺ*9bRν Bؔ; eY]H˒(5iFνsd6={_q3GKhf'Vzk밵rs}VW|AhxQI~]ϖI8% a"_…ȅBF _4q_Nӷɒ|fez$Gp [ #\=JNP2la 'phx TI3s`%_YI{MJs.vtP$^iV"Ƒ9rI8)rO &bmݳ gIELPӬ-L )>:HiHWZKM8_*c iJE]"{]6EÖ1(ljX-#)=1IcO*1ӕ"2*-WW/G|c`+LԄlzycg'3M=Wp t}HHd9yB0sx6;Pjh"S/&ysb%zf(fe_y qup~!WUs%pY@(ğ%D3'.MIڊu;gW/2$A{Y\[dC??>p;z⵫2C=7=7Q2Sq=dxY& YuQώ+@,E, iGe)1p7Y =L!lM5|^m} $_#t<-F)N{}ΪZXya$vW<{0e"-Hʧ6 We?N *os:,E2b%?YG&eESU&9\k&sb9R4;0 f&7%VaP*-M+N\#- Fh^_D0gJEZA5q -(#4^[ѸC4H۾,#k1T-G5e7HEیw㈢x$LGǝj07 hym_ͅm`GkRl gEv9Vs%D2whKZezStr%Lms )RǙj-ǧ/rhWR@5g^88,&XWqA_[bpR%wm lhIXKm; Lm%)+?: " ǐ&CHcT!i ;B|9*Lw)MY)}C4X{%W,+Wo~&Ll0ՉJK>Y3\E"Xn-*{=H5Q{.SlINZkNsrD%C%e (\sY[N]KN|돔,<wa.^n!9ON'Zdiܓbr" k+;PIm8 uBt8 k߶F/[6 Sr6m(]Q@,{{:FyrS۾h5ms֩/Z)7ԗGa`9,&KEj x?AvD+9HaY~Oۼ^)׾{(M7$z޳ *  I ^:e`˯ ߶SgJަ+<|KG=>!4QMf'߆  VQclvg{Wb;%w,OikG<MN@`T'oKwE%Eg5$MbQŵ eIrqGӯ.sj{)xlvbKEÿj6Wư J:Ep n.9 ~&e(uEF`(UIךբ+x}5E\Gժ@ CTU^_ yf0\Qx'-LC=8Hm<;<6R`yaia~@:f''m@PK&M]p%L%>W_C׳j)g21K. WO%s/FMKs 6HKt):仌]QP̕}%k5@[ O•O4a*8ߏiPK8pY1|v Su2iUqUߵV|-LR}m%+ f+V _$(X+8ڔuI#[\nkl:]钳48Bϟ "&I 6Hd}зg-LHoo6^PxF9o%rPA$r&nd)\D/ gW^$E")@Z@,T@C!i3rk M0*j}Ϥqn۹E@}BnkchF =aHC MT"3b?^}s/"x0y}āO0ӈa^ҵAIYBp鞭:J>W&"Mv^ r8Wϰr0}hp8FPA K/tu*}1k N/p^k))Aя8'Kg@;}1J{\Xt~Saihi"S%0P(5iK?v6@=;D 䧅kV0]MoR>̈́l.x7 :;E [sgiLU㐒e z T v:w0wTC~$mUҶ&'tt(:thT :dwM[m.j-;StBP97}=+'nPI dʧ0lWJ9/dD,a]dZ~3AU ĐeW.%&JIp2}U)[3䞸&Gm?tɹ`7CM3UQW 騅=&114E/?}qup\ WW69g`jW ӦOW3ab}>aYnllr?560N²sajoa2bŨ>"8+Hcޖ1CH\GR&Z2q{)7]㮋1%q~W]D ^r^ZZވ1s܅0ݵ\*;LV|*>]" ^Ҋ(L_$q)LI2KQ,>KlIԗ8IVnDP6Nޕ *08K!tBv=i.l+x sQMՙԦB8z'´__'k Gƞixq0$<MymGxRNGD(iZFd~eJcfX6'yPUi:n) s2軚2trhx \%Faov稷0]0a Fz XF3.k Ys2Q~GJdԔ.LIxp7%8%/iTG-Lr} ӖەhTL\ZM ikr)\qr8i:p0dKN[gOez G&r̭51Kxez4Eد?Q_a?R WԗAY~p qn!L} MOO0jTq]oQH"Vga c ڡ'm`o40Cx (P%[mҕjPy_t+@ p8oe'C(A 5.+Pt| N*.֕.>ɍTajfӷ>єƛyJ)a:>gSutٹc׫.ù!UzJeIubhd׻*L[sgK!nB/u_"C|.+rCuw/PJA@3T_!43VjH_[ڊn83OI!p- c"kUhmk-LUaJxE{%ud'kTVU8-)&msv{А-8.ἅiXZn[WDF4 qgR%WDL)./G6q]r 4dd$5⥋|fܨߛ75Jة4,}n$& z)xT\@T ɐkBI±\{&&6Z2ˉd=xrUge9l&bx|zbi`CMvSI YKe&F1,N;ŪL.ba_]ȶtW.U (ɕ[<-L@]VxP|?+i2hU*D cymJdR GZCⳡ+D8>^ 6tT$B}]H_"vSk`a–40Ү=iC2 Bdk3%|v݅)OhR3hYdjmy _ 뀎\>7Hm 3X\Mߒuk~m"7tlmyz(#{_I‡^HԢC#bh31\U()L+r~:IQpd2>|DiY!ieOgRg_ݖMx|w43쮽ehiGN&\̭'\k0U`Zm?U^ךX?LBTbgj@>‡)Wۈn?'Jx%kra9ip:rnt돕S-4)ȃK wű~Ƶ_#E{ەѾK'3Zq+A|@&A0ҵJ!c^eݖ8x yZRcc?^~򞕏V32ȼDL{]Jo8& 6~n$_#ܸ= å+φm!;mxGm߿߻o&13 dGljL&Oi5UgQQ7ϐ!>E]z>zn8X,Kz;캀Yb@ y9\,nK(i';Șؒʶj3UÔTG|DƜVGZO1bSAHgH29֙@`Ыcm2>&Бkd&,te뇓Af?b:\eۏ`L&7",-EHsE=Bl4n{l53~y5|"Lm%ӂWdIV$I.lI4l@=%.q7uy`;U*+[w+i|0JGqW[C/0! GPo@aB !/{!P߃„x %(LE SXcA:!L['?eA,0!^„oi?O);J!ѓ!yf_nwidj"Cޑm?Uv?%·\;L/oCNf%ARv1pa ! &KP0v15j6Eo Qdwt۩8ƜFۧ͜~1I=ș:]S.*󎜚UGHRjoCOUxӨ';E(2蒮qdnR?GLa۴2 E]y,4<7OȯvIAaB !]cR :k$hͳn$2sO{ETa(JY #Gw;Ӟ?(m%-?T]I(k LUI oDQd蒮 T%?`MED_v(EYI DO0o%T~ ?PJsuBs_„x C8Ј8eLO& 辰|2vk#+OLk`Oz@֞"uS:Gǝ fЉ"w ;\VX)o@i8jJNUnȱ}[ 'm(=݀z8Ɯ֊uqu#.\ S/ȃrI˄)Io(L `ݫ_PKP2+Q㕣JE3hL>X):GeQ ?))k0ҫUxPo8m?/T*jG:P.8\T ckGrriR.GՊ\rMGk|F䝬VWr^"L{U:S-#w(mQ>%Jy>_+Ό~U ?&ٓ×=gW.O*Dj#h)}7 IfZs>z5( _ݙwu[L)E\{ο°reʷٕ u遹)kREN:>UsE^ -mҶWf  1Ԧ7'mK^Ņڴ͜M"+xV-*c}EY',Wh|+cw~}f~=V_9ZpV^K%J gݽXisՇF_HXI(=&ej+T|';k]vZ(W&(lQYR.%s\Tt&,wZ]ӭR{vPnGEs] &ćx"+<8$H'ypFN5>k!ԢqeH0op@ /%vK pb/ʲ||fM8;:y4ak3w?Jmh.MWKU6vb֞"%8>"1OPLur(PFP+f/[LίJ_Y"{_I[9 Fb+L}AB{v蒽TJQPHHYc#GxW La#HeYN@ᚕ=JK.n'de6 q˨*KwNLD"f^mQv<@Ϋ9TPf%_2 ʅd]C&/w^ &LMDv*"C 2fb wZ*Y[|mEMה#|WAaB !aJ8j%Am[ f[E6u9,,؉Jk4Z-Ta2tcvZfmTUSAm'l-#cTKP Lp^My)Wz69&`a!ZI73yG<S:/1-z)NξөtF̽(2%h)WU6\R,lee™Iǖ I+a f}G2@Գ"M<3[{ x=0Au>4)Cckb{LJcTa:_+#95I{J◐u It3'wM:C27,TJ2ZKͻd}RG\ Kd]CciiӅiV0 ?Jx$AJOӳ L$T[vE!9ט*(L0!>w%[}ŐwC;iT=SsTE7d\|q\PοUG? @n;0RϮ®93xi8r?- /Vv=.B밠n</{ZdK {%~~Zi*޲KzBWZ$XiۏImg=S7d@6XQJu=Ri+&]Z9Gvm;P# ˚0o-uĥ0YsSV,S<0]&nM\T*bɮ„x C:q\QGǝ1$PLF!OۢS`G97^%veP#;=weqLucNkMjm{ Ntpi/ D SC )aQS84"ż΅Vw2d`IwG78)lC^' $X5I%ʱNDwK.{5CZh>[;a:%IX extb{p[HlKq'Cn'BCUAER;Х 1́0\10'0ZR?UXv>jRJ&c0!^„>|7{o1|kTgD-LP`?MR>'(҄i0F> (Ioj%?i 鸅t6***Lb*WjcҌT-?4*({+OHnRpDyKns/;IV ] '-/\xY{Y˃dma0`ha:r<&k/C hw&KP=ԉ2d-?ǭ:;uvURmKJcщ놇N8SY R}!{O T8 ܶ ikk- WS8q6pxt5isbZs5͢I璿NH+^; ڤԇ<ɤ4ήR}B(T_\Vw,:b%LNuxJ|_0,6 1pq9)C%gi}+ǽ`7b 2^*^CTqIl)uZ#j7߁{ɝh`*W>&7$*/jza%lr)mv`~_xlP >D r+rI H2mަ'|QRzT沱LvXꢴ+0!^„?0% ( _3zEO-dPgs8S-݅τWS4Zn3W>R^_q"2 y |"JX1(L0!>ď-L{9l5|*z8W%^{ z(=%73+eQ>:6Ƽ֪ Ȼ^&L~syJ txCҜT_.svĜl{vy &ćw WRFd%h\M'}8CYbsqf]F1K/_kv|"o7˄)u-QgMܭ\"a[-Wx3rd~kLt5L &ćw t_ؾ:SI:Mnq:}Se^&L?'Av῝lg+{ 3Hlm @aB !maBnqp  (L0!>-Lȭ  (L0!>de&0!ހ„x C-L@aB4q8{91FF8 `l!L.mMX_Ä&<%Ĝk Ƽo 'p[51+G=g:5e75u!-0mZoӟg` pIٓٓx(Ls¨:[ȖesSy̓!tj5637ښƗHlM.=61r@5s #(L7x"LڷJj8CTsS|a>6]4pm>Šo (L7t(LkfzTCj&y1' |wp/&M3MZ ݹ]kV(@M ׮JCJ!v(L3-6}E]|a n'V4[+b؁{:g.qM|#m/u3䁿m(yo"> }#~ ļRii;zs"I))m)2zf9/={jRȁh<D+8@=;4c-4tϬL:Jy5j}~4]l& Q̋@43wf3ls \}O9ǙY T= GZOҟ$ppxj^M gw HޏcRbP&úIp>vD0!> 03ϒih-u |s}az~&,vNM&sD$\NE,M2Co,KזM- ;Ez8.6%҅zv" ɉMfR7*jX)/j}̱BLyXᏱGI%C $bjSA*-|+A8V7^<zVS"'c2I9 NVij__}mp bh+4jV:q11^aYU8^0M+.ې/7b#PjsPo8ygJRb9NTM.*2qj\& !DmeEibd׷Q=5K-d $ /52rvHPAںN&MJYAR [fFάJ[eQVtaJUEX٤Ո(6Kd&V*,Ɋ V6NN&d >5ID4I%-Vc=H$]L=o^1pSE h#%>E v! ;a:F(jIϘ!bu ΀{!t2e|i)ZQ?KC0%) C\" "Ӝd8TqҚ2q՝"fW`bZ\]5u 9d(5! 1£Ȋ}L r3/QnMz6K1O qgc-(=(LW{wV՚F"U(s&[Z N'jR U'yܓR50_]sSj޳ gF->S.pe+LNWCATY} #(}It%E;7q3%K\'AJR^7l K5\m;ɀAYlj CG?8"n KѤ-f0UqfcFh]ϛI[GrF26ERiPs)܅&۾'i'C6矄lBJ?ĸd;N=Lc-LGEzHiabD0MZ،gwQ[ޏw>tѝTJ(J%eW%߅mƑϰGK#(L78>dlk&DC^^ଅ)%ə0' !1^/D +v4g :Imarz/#ws;6kmW=r4c<ٝ>vX:0jEfIJYbr"Ya&/N #uM<o\ v! Ga͝Pkar%L瞳&KÉ&!T\۾w\))!N 'Q=;0C\Xȓr`:']r#J y K'_DjS2Ȇ-L.[zF0o.3\.ql&bvoyȢ#5dݭg |켱G\xc -LGs6Q㦅 @gE&y1%(0عS!qkSK P;͜(o'L?΀}Z SˋI o0?p? DH_mhtZg&ď0!(L]e 9c?O^KPenk{$LWid gjOQDnZ#ɍ R^6rxCqWBqTO{p!oI/^YZ"<Ϯt oznP+Kzav{$G=E)綅L0 )lkZP.b*W(L]`I4 p,Ox?[}}) I~yԃICVeFEt黋|~x2# !Ԡ=ԗ(p j&HyZ4(vNjQ:8ޭZ-/AJ[-LPg~zz[*Oϖ|@(z5ڛ|ޮL`jp05#pY7βwS1UB|b\k1Ӈt<ˊBUAYK!4 K#(L7R֬Rx8Ѱ ˂XxJZJ\_4Y 1eV~{%tpf[Č4E%@; 稧R0%arm%hP/WW K˭- 4%;Mf (-V=x:X( )zkZD%S'J%_<ȜuW_V>@*/39Qrbus|~ =(5kW3eCv0L R +aK}62Nɼ(qd;{Ϊ'y(q:-LR&~32LϚ?fK&u-SVilBf=ڼZL>9q^:tɥ\"E.K柴-6>r xf\LG2/ i1& @>^62A<2#6g2j9}-D㰧v$ǿcOw‡v>\-e}_6Il;}ޜu'vؼc6% ǃ= %oSCC-ֶmr6#T*M=5k5߶QpY *-@)=V^ l1p"EBLqXw.90!Ca!a8s h-zr{ia:W&j3-m5+*Mܫe1L;1NVDl>0,m>0!~ ^'Ly~}}'Ya\շCU-d$5cl6Taz.1sR-%T b#-Q-`eC W*TIbUX*UKYi,wm"H)kTbi^v뿗.9ď0!넉YH8IԉC pۿʰbǰo>a@/2ab?O>Kf[g +b1,Ty%V[gyN|d"NU2\n1E.9ď0!pu!7_ %&P/AaB|&C|  (L0!>[?„xC')/]D]͢0!>E   PoPI#di"q̻?ZMGf32`U׆'VG<;C$I#s1](L?„x'D/&0=L~qrmTRDO;Z{ &'-=)f=x4J/lԸ'1+vPjPzC   yMC pfao>rO&=G&ҋ`\5~!FkۼB/S& !8 #(L7&zd۾ѐw ʋ,0hG"7v500&t}#R91v71daXjBPc 4&яkh>2dE[$d k(A"a#p*C`+za pG؋}yE5 yv2ϑg8 !xGPop#LgFUUs<"ߕ$wzT8!M,Sf9Esg - H%E,m5u,{ Ex"7GrS2bim%B^fh0 Wr=̼WY{ӵy 92M)pjE`]rk RI A50!mő{%LD{w9/<[ z!+Cah>#٤lX#je @ar}-+rZDMK9VA;E̸"5ɍg0(TqvEOe,N,%o®} s21;S5<@f"j$vGq Gez)j9@, sߖΗjDuEeG|&5aZDx:۱Śq?Ojm1Nu C.9ď0!`/LOi)mm|-%n=, ({%o~ƶ`l(Lɉr[k[{7HÙۺAaYygbuз-LW6~%7aEv/ycm)3mRdhi0t0) RY ѻٽ;,Ppʲ .8T\(&]^BzF=Gn#|2;E@ r fs+$85r _Դ)`LoȻ !xGPo0tQSFszk"]rYlO=0^S./M&-Rыg;wgOZx=ӗZCnPͅڠ+)皘^{Fe!4 ׹}b$]/5S['0aR])i=NL*)ҷ ¤"\sF@_)T6|Ĉ0UUQ'Դ8-˦gz.h=7R>sj|&זn@aB|0!~ 1LJ#x<];aZ0%/m V_:d+LgHSe)jSRA [b6Tk-6! NÊRe4a&L.L0q[HL,5-c+Lw Gzܚԋh-v&[a2@ϙ$D'+˰5|żt L9PyEC (L?„x0M$]ZjPg訅Y\&%^H j/a SF`&՗Ȋenfcϓۅɾ:>J #-LD/wD}##Wц<9%[pC sq'lƹ;¬~Աz!(LYD.90!(LiC:&c09oabwo'U3 nKsɝ_& Sۋ$XzP6Sqg!-Ln@6K -LIvab%PώekSAQ)djv!_)T' -Li`}#INsga]nPw!~ 5 eo >jT8}R-u!Lfj),0o+ g}3QpZ?{B JA@* pElB %@=t$`(i̙~~Nv 3{I>?y̙3dLJ/&ԿdD/&Z\$$7S)k9A3EҢ/fwWi0a(L)cqQ7I4eOBf8knuH UVǑQl3yO.z)kSN5fF-a Ӹm-Ȗ++r[[:YZ[/iokcΒC" pIsVY.Nn7G4,awH/̇{Oߛ6!N&mvz-~Hhl5Hf5ځB/w9F5%"\"9|S_8g+k-x,Qk÷֒:e\v.%D& tk'N`7ٔ y̭dh-&>a= (Ld@Aݔf)]|m7Zɍ> ]"j Qt8-qQ%D ϔhRC AP',L3MP*Ne&c! !"e-‹ 6!'4g@w2?M<.; GEGA#>]ũcA .NM& o~%N{){ a~o= ޚeNvš%ԫl7ZiDiJJ:(M̔&dqq`dQijDS5d, vU $vK^Ǖ.|4W)cP͒{)&>#F>$NElz,V|_VaGgG9D .u&n'3HPs4lDMo7̒v­mD& va؇_wبG/z\?@(f-{G< ;hKy=jF&Y ĹҜי0"PJ_ThGZBY#(Wyc %FlØlZ u%?W4w9566@3LM$bB#SdV_//_j\99a&~oV?{(%JfĆ8qO255}bgw|?÷/vI9wtzdp=P"yJ]{c;apn_it( SnGUȄd@z -`5N$I=G3j|(4V]% )1&x(^J!!#GRB=]%)/x|ig /?W2Q|Sb#5y?ȠAqgVzA^_'/]e;į1 'ޚo': i{=ag:~Gf';j8U"b|RTU:j1RGҍ3Kum/EU@Z6Lxp>|㄃7&gq;+k=q}; d^ <5!LP ]¢3^Z+Tn6)KZxD`M!)9Py9w,!2L8Kq& ԧ}uRP*Κ$Ξ$$_ֈ&ۦԉSB3Lzy)q1RS Sx%Dž aFc 6><y |ij_i.peߏ~݃]r?<-DxQ~./RbWhm5`Qwlo[ 2 }K?K÷$9id;ԛSym5uewp3sck{i 5ܺK˪1fO6h484\ j U9Q<,;&I_qe\仄[݊ ,Տ +1LHAaB`6[ZC^R2q-n N Sc*1+X]/J=NE0&bVh;3sih.LҢ/}W| tMwHnYąKj6,za)ȓm.L XѯiDL-c?8d@U&Logps/"֞W: pa?ϋFn"r^\'5yGjŁGcOIo z5ѡagyMd.%xrIdL{1_)L}7G?XR`5#d4~em5 SSf=mgLY%j vDK (LVb!a#/])Ԡ^ Nc-4I,GFZ\QtW&0enWy[.}Y0z$Eؒ^w rՌ$LQdF@I*[)  0*wT%UoZ8y\%A㇦E `}iƨD 3 m+S0aZrLRtϿO+#o?TS&y3L0=a!~$-r}GcDrI2 zԓ!.9$0!Nh.LdxoJ0u-* S(a %w_0 tf41+_{ɂ1J9H0yF`  :FQڵfI-/ӎn zAwI1II56Aׯ!ͅ l H ߖ; bu(ˊS!ٽLʖ[fG ӥj><5Gj9wo&lVUEM͒{%YUe6xȘ*L S%nwэz}kQ۔}]rHAaB\8nIG~1|D90)tINOȒ#F>]-h!ô3.]/ 驝<caRS<滰wu~IwɽϓwM]r'SMN7Mh7oO4<.h_f`wsMN1<q0'j6RG/&ANx>T-LEvl*Dvze#VkVa1 aW0;<(fPS v!n„8!0|"{⽻xT7T/5JDMj>L,&qhpzEݼ"ab, T_gd|Ys\IÜ%'}C9Y]Abh'=?*Į0Iͯ(S6gl8oO"L$_\AA1Y, P1=÷(_[jrۊ!Eh y oYo0W 0ntO6*lyET(E2=xC7ƛ?@j%f i|I3ΟwPt3|$ 9;@TewD0ÄD& . ޛ MȊ3'Isƾwb Ne]蒌/ΘZ9H 7slef7 >/u.ZO 4;o2_OLq|ʦCrw"#"M-} _$.oǟ0-{% M?ޤ׮g_HaJ,EfL@Dvh`?fx6 F^l:J'|:(5ʓP=h9VN/P#o: p'_ Up cNRkg%T*f'~f1I}kFt] ^D"  zwϞo\h$21W=X\ 8|"N]y8j6{.Q8_Est먯 ΒC" 'Lj*55o&k}%+gohqkxd0MɒDi_]J{JP~9\^ҁUڥ*zJ5vudKrJ/7Qs5%D& N/fjiG-Ooew v+tYC<(3H[cI ( (VPFZG[#ÄD& %8-aBg! q &EBdKq& (LCPYrHAaB„8MZ>D%D& (LCGbe'5ԇc+htq坾aB" &!ӓBV_K>H!zf|"MköDeX3joG::8K (LP*L/$2mE-{H:} Us^N&5"TٌQ3 Zv\VAaB„8eaz:A8S[ ס0_f+LxpPX,}-hrc:*=W:)|X^ȁ„8 qH JZYd\ ߨJr=LLb^Z!»]&O^^~f\O oh㷋s5ZEjʰ-[sdVz?( 5B+r=`B.Uk~.thm[*cz~?| UEz벆hݱJqUxG$D 7@aB„8*L sk 'sFQu-A^k+\/2ۥkuz>Y{$EggYlJs̳76R+*)Jl>3)pI_3.kymuv~8oq.j0Gq#Ot_>+W5W]Ҿ9,gh*e}6:ATncϔ=*/< LpJ= ֠e„8 qUTNʨ k;iw2xHxppQ3"^^]r L5J,$ ftMbhߣQtkz:}8{t.A0*Kj$"1 kSR#~Ø%s#dMP@ _j%aQöp3z$ Ӌ!՞I Z2F/W1ݪWw]m%{v,Tg! q *CTmX[ iKWBj{ٵg,+E@´'wp7fis0*LNHƞKGfηӸ0y3L㐀GcvHwW,֏ài7Z9C&$0!N@aB\pz[M`dpWoZl?m20$LO'Wk03L1L6y~ E֞B *A&o W%F;oJHAaB„8$0~Ez@AJjiq-=S&ULe,.Tѿ-SW'ݓYUv+LP SyxV/P({iɡ3LR=zO[{^M9NHW*)8M>0kX4rL_SU g* g+ko&Q]$?PRYkijjL$M=j( Ȋ*T!h3 a.9 P'0!qE Àl hl]ttBdKq& (LC\& a.9 P'0!AaB\3LHAaB„8 q%D& (LCPYrHAaB„8 q%D& (LCP1LHAaB„8 q%D& (LCP a.9 P'0!AaB\g! q &EpAP'0!AaB\$d),qa?{+ a"(!=ȐdHwjXۡq0C8r5n?fX]3~j I$c;tHa*n=tHa*n=J-?(○ L zV*s90愊W*PqXPx/5cTǩo 8)A 86#Xܿ ܁mDZ&p0Ck3`i&L~?~<WXIENDB`insight/vignettes/insight.Rmd0000644000175000017500000003224313721227747016212 0ustar nileshnilesh--- title: "Getting Started with Accessing Model Information" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{insight} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) if (!requireNamespace("lme4", quietly = TRUE)) { knitr::opts_chunk$set(eval = FALSE) } ``` When fitting any statistical model, there are many useful pieces of information that are simultaneously calculated and stored beyond coefficient estimates and general model fit statistics. Although there exist some generic functions to obtain model information and data, many package-specific modeling functions do not provide such methods to allow users to access such valuable information. **insight** is an R-package that fills this important gap by providing a suite of functions to support almost any model. The goal of **insight**, then, is to provide tools to provide *easy*, *intuitive*, and *consistent* access to information contained in model objects. These tools aid applied research in virtually any field who fit, diagnose, and present statistical models by streamlining access to every aspect of many model objects via consistent syntax and output. Built with non-programmers in mind, **insight** offers a broad toolbox for making model and data information easily accessible. While **insight** offers many useful functions for working with and understanding model objects (discussed below), we suggest users start with `model_info()`, as this function provides a clean and consistent overview of model objects (*e.g.*, functional form of the model, the model family, link function, number of observations, variables included in the specification, etc.). With a clear understanding of the model introduced, users are able to adapt other functions for more nuanced exploration of and interaction with virtually any model object. ## Overview of Core Functions A statistical model is an object describing the relationship between variables. Although there are a lot of *different types* of models, each with their specificities, most of them also share some *common components*. The goal of **insight** is to help you retrieve these components. The `get_*` prefix extracts *values* (or *data*) associated with model-specific objects (e.g., parameters or variables), while the `find_*` prefix *lists* model-specific objects (e.g., priors or predictors). These are powerful families of functions allowing for great flexibility in use, whether at a high, descriptive level (`find_*`) or narrower level of statistical inspection and reporting (`get_*`). We point users to the package documentation or the complementary package website, https://easystats.github.io/insight/, for a detailed list of the arguments associated with each function as well as the returned values from each function. ```{r out.width="100%", echo=FALSE} knitr::include_graphics("insight_design_1.png", dpi = 72) ``` ## Definition of Model Components The functions from **insight** address different components of a model. In an effort to avoid confusion about specific "targets" of each function, in this section we provide a short explanation of **insight**'s definitions of regression model components. ### Data The dataset used to fit the model. ### Parameters Values estimated or learned from data that capture the relationship between variables. In regression models, these are usually referred to as *coefficients*. ### Response and Predictors * **response**: the outcome or response variable (dependent variable) of a regression model. * **predictor**: independent variables of (the _fixed_ part of) a regression model. For mixed models, variables that are only in the _random effects_ part (i.e. grouping factors) of the model are not returned as predictors by default. However, these can be included using additional arguments in the function call, treating predictors are "unique". As such, if a variable appears as a fixed effect and a random slope, it is treated as one (the same) predictor. ```{r out.width="65%", echo=FALSE} knitr::include_graphics("figure3a.png", dpi = 72) ``` ### Variables Any unique variable names that appear in a regression model, e.g., response variable, predictors or random effects. A "variable" only relates to the unique occurrence of a term, or the term name. For instance, the expression `x + poly(x, 2)` has only the variable `x`. ```{r out.width="80%", echo=FALSE} knitr::include_graphics("figure3b.png", dpi = 72) ``` ### Terms Terms themselves consist of variable and factor names separated by operators, or involve arithmetic expressions. For instance, the expression `x + poly(x, 2)` has _one_ variable `x`, but _two_ terms `x` and `poly(x, 2)`. ```{r out.width="80%", echo=FALSE} knitr::include_graphics("figure3c.png", dpi = 72) ``` ### Random Effects * **random slopes**: variables that are specified as random slopes in a mixed effects model. * **random or grouping factors**: variables that are specified as grouping variables in a mixed effects model. ```{r out.width="65%", echo=FALSE} knitr::include_graphics("figure3d.png", dpi = 72) ``` ## Examples *Aren't the predictors, terms, and parameters the same thing?* In some cases, yes. But not in all cases, and sometimes it is useful to have the "bare" variable names (terms), but sometimes it is also useful to have the information about a possible transformation of variables. That is the main reason for having functions that cover similar aspects of a model object (like `find_terms()` and `find_predictors()` or `find_variables()`). Here are some examples that demonstrate the differences of each function: ```{r echo=TRUE,message=FALSE,warning=FALSE} library(insight) library(lme4) data(sleepstudy) sleepstudy$mygrp <- sample(1:5, size = 180, replace = TRUE) sleepstudy$mysubgrp <- NA sleepstudy$Weeks <- sleepstudy$Days / 7 sleepstudy$cat <- as.factor(sample(letters[1:4], nrow(sleepstudy), replace = TRUE)) for (i in 1:5) { filter_group <- sleepstudy$mygrp == i sleepstudy$mysubgrp[filter_group] <- sample(1:30, size = sum(filter_group), replace = TRUE) } model <- lmer( Reaction ~ Days + I(Days^2) + log1p(Weeks) + cat + (1 | mygrp / mysubgrp) + (1 + Days | Subject), data = sleepstudy ) ``` ```{r echo=TRUE,message=FALSE,warning=FALSE} # find the response variable find_response(model) # find all predictors, fixed part by default find_predictors(model) # find random effects, grouping factors only find_random(model) # find random slopes find_random_slopes(model) # find all predictors, including random effects find_predictors(model, effects = "all", component = "all") # find all terms, including response and random effects # this is essentially the same as the previous example plus response find_terms(model) # find all variables, i.e. also quadratic or log-transformed predictors find_variables(model) ``` Finally, there is `find_parameters()`. Parameters are also known as *coefficients*, and `find_parameters()` does exactly that: returns the model coefficients. ```{r echo=TRUE,message=FALSE,warning=FALSE} # find model parameters, i.e. coefficients find_parameters(model) ``` ## Examples of Use Cases in R We now would like to provide examples of use cases of the **insight** package. These examples probably do not cover typical real-world problems, but serve as illustration of the core idea of this package: The unified interface to access model information. **insight** should help both users and package developers in order to reduce the hassle with the many exceptions from various modelling packages when accessing model information. ### Making Predictions at Specific Values of a Term of Interest Say, the goal is to make predictions for a certain term, holding remaining co-variates constant. This is achieved by calling `predict()` and feeding the `newdata`-argument with the values of the term of interest as well as the "constant" values for remaining co-variates. The functions `get_data()` and `find_predictors()` are used to get this information, which then can be used in the call to `predict()`. In this example, we fit a simple linear model, but it could be replaced by (m)any other models, so this approach is "universal" and applies to many different model objects. ``` r library(insight) m <- lm( Sepal.Length ~ Species + Petal.Width + Sepal.Width, data = iris ) dat <- get_data(m) pred <- find_predictors(m, flatten = TRUE) l <- lapply(pred, function(x) { if (is.numeric(dat[[x]])) mean(dat[[x]]) else unique(dat[[x]]) }) names(l) <- pred l <- as.data.frame(l) cbind(l, predictions = predict(m, newdata = l)) #> Species Petal.Width Sepal.Width predictions #> 1 setosa 1.199333 3.057333 5.101427 #> 2 versicolor 1.199333 3.057333 6.089557 #> 3 virginica 1.199333 3.057333 6.339015 ``` ### Printing Model Coefficients The next example should emphasize the possibilities to generalize functions to many different model objects using **insight**. The aim is simply to print coefficients in a complete, human readable sentence. The first approach uses the functions that are available for some, but obviously not for all models, to access the information about model coefficients. ``` r print_params <- function(model){ paste0( "My parameters are ", paste0(row.names(summary(model)$coefficients), collapse = ", "), ", thank you for your attention!" ) } m1 <- lm(Sepal.Length ~ Petal.Width, data = iris) print_params(m1) #> [1] "My parameters are (Intercept), Petal.Width, thank you for your attention!" # obviously, something is missing in the output m2 <- mgcv::gam(Sepal.Length ~ Petal.Width + s(Petal.Length), data = iris) print_params(m2) #> [1] "My parameters are , thank you for your attention!" ``` As we can see, the function fails for *gam*-models. As the access to models depends on the type of the model in the R ecosystem, we would need to create specific functions for all models types. With **insight**, users can write a function without having to worry about the model type. ``` r print_params <- function(model){ paste0( "My parameters are ", paste0(insight::find_parameters(model, flatten = TRUE), collapse = ", "), ", thank you for your attention!" ) } m1 <- lm(Sepal.Length ~ Petal.Width, data = iris) print_params(m1) #> [1] "My parameters are (Intercept), Petal.Width, thank you for your attention!" m2 <- mgcv::gam(Sepal.Length ~ Petal.Width + s(Petal.Length), data = iris) print_params(m2) #> [1] "My parameters are (Intercept), Petal.Width, s(Petal.Length), thank you for your attention!" ``` ## Examples of Use Cases in R packages **insight** is already used by different packages to solve problems that typically occur when the users' inputs are different model objects of varying complexity. For example, [**ggeffects**](https://strengejacke.github.io/ggeffects/), a package that computes and visualizes marginal effects of regression models, requires extraction of the data (`get_data()`) that was used to fit the models, and also the retrieval all model predictors (`find_predictors()`) to decide which covariates are held constant when computing marginal effects. All of this information is required in order to create a data frame for `predict(newdata=)`. Furthermore, the models' link-functions (`link_function()`) resp. link-inverse-functions (`link_inverse()`) are required to obtain predictors at the model's response scale. The [**sjPlot**-package](https://strengejacke.github.io/sjPlot/) creates plots or summary tables from regression models, and uses **insight**-functions to get model-information (`model_info()` or `find_response()`), which is used to build the components of the final plot or table. This information helps, for example, in labeling table columns by providing information on the effect type (odds ratio, incidence rate ratio, etc.) or the different model components, which split plots and tables into the "conditional" and "zero-inflated" parts of a model, in the cases of models with zero-inflation. [**bayestestR**](https://easystats.github.io/bayestestR/) mainly relies on `get_priors()` and `get_parameters()` to retrieve the necessary information to compute various indices or statistics of Bayesian models (like HDI, Credible Interval, MCSE, effective sample size, Bayes factors, etc.). The advantage of `get_parameters()` in this context is that regardless of the number of parameters the posterior distribution has, the necessary data can be easily accessed from the model objects. There is no need to write original, complicated code or regular expressions. A last example is the [**performance**-package](https://easystats.github.io/performance/), which provides functions for computing measures to assess model quality. Many of these indices (e.g. check for overdispersion or zero-inflation, predictive accuracy, logloss, RMSE, etc.) require the number of observations (`n_obs()`) or the data from the response-variable (`get_response()`). Again, in this context, functions from **insight** are helpful, because they offer a unified access to this information. insight/build/0000755000175000017500000000000014165530166013157 5ustar nileshnileshinsight/build/vignette.rds0000644000175000017500000000043314165530166015516 0ustar nileshnileshRN0tMZ$ (pAWӸ%ǶlK7~&FM띱gw{NI$N MVz@R3]צ T? ĹԶa u~ gUzه.p4| 68:^cyCBxpȴ7'ʐFhm} i/2 BuKGG:B2gɂ/ ymRinsight/build/partial.rdb0000644000175000017500000002531314165530157015310 0ustar nileshnilesh}k{ז0psxc`Kq16'09$)K%BJ*a;4Ks:3sӷ<<a3^[RU8ݓy_kڗ/Ht%Jt&݄oxI죿ϲZͭ5՘ayԼ늑O$tqݛ?u5س`ym+gW~Wzocͷzo4]u鞄߿ݲSʝ+uFkZo6jl/$ KF?ݼ?圮ؕ_GKOrNVޟN^2Vڧ 3Ƕj٩RiQq=3XJeb)LRղ/MrtJ3lmmqmTs#&3Nɾlu}XmGB-Pw_U=+ Z~]vgC]UlկYy3W.8i0`LֵլF.(M8 >Yqa4$@VLa}|;s+@3j{h(;W@(aBq5 5 (uoV5 x\"Cs+&Rm-_VtAu+'OC I?!TK·n{:!Vmra(vꕵzea"ğWLO^K[-S^%^br/3OK$~C\1yhJAG )pXGahBL6D}tGVN-9.[OKق.[dC#!W nDp a;sp asxsarOδ,UF6Cx|}EG9gk+[Ep a;[~0vSwUUZ;3%P f,2?gj+yUu_pU1=hNh.#iH5qI흪ӝYxx;@A"e4Gzyofls\X7! ۮ\ˍ(>2F"~]f-~DhQE`#y(dB&Gus5:dj:c%bt&o؈e,L +s NLETkLX#NպV7T]A\HnG9QxǠ(a< Av1I, w/vr`  2c%r7XZ|WҚ'9\ɭGhͩ‘.$:`aȮ`< h| NOv|TqKfhJOuzg ED36˽- ;ً c+9͌2+0#7wmԊzt4j /)Z| dj-!.\H%&hnצZ:pK\@u̳"I ÉrMLܝ" 7H;Iiq[ noZ,~U՛@Jqºb)9c3k96֘L|8^7z^T#|aUe-C'٭-W JYwƙSce]-7X_\Pk4䴂cq}"?#۾涑t20@7?x#4owMh/ h 'GDt9.MΗX5M$Ť <|x(^xU)"p\ E"Nlݩ1Ŭ7xjJdǪlDWpsLD­ǁOH; L{֝btM4SyffK9Ju׎SazA/c =YH]~ u ^̺Vzbrq~ Ob4"46=YvX g?"#?n#;eݮLM6;)"}m$q=&OtW={#R(ע7"ZRw\?H-'7JE{#ROjZ{#~& >Zc|XZ ;ݩx{r&zWK/fDVK$Yd\藶SܳyE'E=~ q :)kĐI">f;) ^ja'Z2 wR&oZI4]ﶣ"tR$}#:vRa:MP?Hm$*8vnwBЫ,FHck5FK{R?k#V*򮉤= -gH=y˻&775y&f& uIWAEUqnY! 7= :/.*EU<hٕufCuӡS4lkJO=a"Z \k 658ƀN {dwPT֊{P]~[}q,1Ѵm9LO>m[E#w 4`3R/-`8TIRAI";i; ^7-'WLJ=r sqK|Ols·O9%v?0 n},nTde =.KE(@69K+moWl)T$:.,0?SAguO,޾yKc JߓwGPռ G0kBU?z-XrZ A#f?g9K cµt$gkkEfJS4Vf Ri$>T22_ٵ{<:ű͉;Jw 1p|hHW, [U9PlFK5BI|ePOLtA4O6aoB)vA$0 u_8" "hI=A&#fH<B09xRgݤVs!C|Pwqm(ԾP2$g=%ڎ8!8RSOXWOv פR~Rvؾ]3$v$-lkOO;0RKj{z *[0AA醧S6$4:-Nj"  3"&\;izxNd84\ӽQ'Ij)kHۑwT7,Z,B/), fnL),@]ti人2䷡[6&iKR_ ֹ\_:h6ؤ6D0"a| Wx`ZZ(zG jH 7, :b1IgnV5C[ysfQlI#Y(. y}6R`8hyPA>|ӛ{5͙/iتۮh #=s5ĝKSժ{9" g[XޓN{ YuV9Y0A=!jLQZϋ;ɖVo~W) /Xr)p1iӘtx\ꝿBD.o߈6UG e6Y#텐*ehZɛLcux 'w?$Čnzg'DC>|0rfUV46bL 7!;$x26먫h]1C kHiRJ b$-Rt&Z?7j(+2'4@ wi=Q5Q ^F7vV^n昢f ƌD\:ilq65JSE#t;.uڀX4R]Է ɿOJ}zn1a;K8%pDKsޯVQKxLRBTWz,}i i'V^ QHx \j0+ᯟn-e4lZUZ%ʆ-Aup0\ ݜ:epKMJmbD+;zN *dGj'/ͩ6d:?I^VVQއ6}ܛQXNhfa :0*qpw3|E@p)ڣ hИv,lh+*֜|I;=Sl՛hMifeڕ\u ^ K& 5B'Ha %>~(#8Oj?Ott@8lL9դF'g=zZ`輣+y^D4;9q&LEԽasjtӸ&5 /?ʸ$0i00 [IST%l4@KQEt4@up_Fd=L{d 곅#;-&h ExHMx.MVPNZJ!:zZ{]CJ\%򶊤=GhH~܁>[oەuo Jt4&]&% "g܄gN|#HQqEtwzI%;Xa ƀۤz;}z_ODz\A}CKGۥ&Ҩ{}Ϥa9s-O|^>{ F}Ϥ^ x<ꐴ +$!]W#Wc2R`xs 6p#<.+E[=+F}VY&VI0}GoY=rŤp2Iꥀ[="im]="]VēI?..q@N$s mK&IAkLz#dӲ$s#nWncO$C2Pjo6ᅥ @C'5OP FK/FU ⊞ﭶ˥iU,0LPٟmђԠ{oQͼhkM$VkI(p\1=ŗjFN/{G(R]]/P:"OٳEHpl-Dj#d]?}琜^,V_Rex Ǵ'OFOͥ_+I"x M|€1,KdLDX0I/yDWe.:x|`6J xF2 +=䕕[2NQMr SD A|ײ)"ф6~ <XQHp\.𤫩CKTN!o:䐷wh;ۛ5qx|M]rx<2KFccu.7%u&|W.#Mp](8ex\j@-kI'K-vʪU Z3Ne :[1/RgGBIAA£Fdx g@'5dYSMΜr'Vb̓I[mhHu6w!jϾ?mE}d{oyY+%:PBAp&Xj\)Irۗ"Eb8<~$VbE@΢'"IiHlc&&͓6!gѧI$RabgnH\΂϶L:TOn,Yus"]q$~'⻯H=2x4N(j8&#ϊ3Lm3/o»ewipR3 Q g$C\hN m(/VW>;*.d#'Kj)֨O׳Bͼ(%)緄/P0.GEv}4hFUm>_Eef*;nKa7X)' ds3`[Wӛ͛cvE$޼.@R,jо?B=&p`:0u*+W:QVP%uI 4 }sݰ齏l!ltj2==d7`dXأܺ_?'6L1vl{75t/["[rxd-Dϑz Czz4}iSA&җ]|K1xGz%eZ!IKRB tO%I7%ꓶcc2ZvBKա~o_@?6Z1ЏM9?^%vV :e끎UɎӃt`gAjj,c_zuj~wq,bljXojalcįا5;n|Rӓ-hkeKn;Toᬣ9:Ǟ+$Tomk yY*ӛ۾hܾw(b_6ZwFbQU{bKdQ{ᓪ*m?Tl/kof4YCșŢpm4i8jRlɭUT;E]Ox5[DzywuF5ZL@{\wn}> 4Ku?j\۔ Tʮ!FutUsw,V7U}cfqᵺ'k;YsKH53L҃]Pkh~oVϛoFBX `c4L~7gQ->S6L|4Z,[捶7Է}]XJMFܑQ3Zȫ|Ui^Vo8xXF RjBC-*|M:VIz7 mqI{k7M!m`]Mlc6wnGDѳ_umhMo VJjcvP}? vIi ';Лuղ^7=iBݱ/ڙRMhiɯNN4m.m̪To;PwtUVIiޢA͝䑁 [ْ{d *B|Q& WmS.,6}e=,Gq(aʝ ,h0{/ s9{uhXYyғS Og}&fR3fN2ttr*e+S1eonkcw!ECjTZT%u)zj2N[tV gKS\zu:}y:= [[[wja|88R} |-BqafL:0p Xs;.)+ z- o 7xKx q "!+@bfi]xq&M%=* bu, ^ G 2dfµ"=^%c)xA>$ *JB8 >%^;?gWgŻ6o*S;~ iT;(g^Lw)n!͗nq؅3bk۶Sۏ2KW܏C=Fa€Dh@s&K?f؈G#\l,q $~/Vh檘| bUp+E [ʺAT/*A:LAu,GȾ(h&*Az~2 r[insight/tests/0000755000175000017500000000000014077615666013235 5ustar nileshnileshinsight/tests/testthat/0000755000175000017500000000000014166064322015057 5ustar nileshnileshinsight/tests/testthat/test-ellipses_info.R0000644000175000017500000000173414122064326021013 0ustar nileshnileshif (requiet("testthat") && requiet("insight")) { data(iris) m1 <- lm(Sepal.Length ~ Petal.Width + Species, data = iris) m2 <- lm(Sepal.Length ~ Species, data = iris) m3 <- lm(Sepal.Length ~ Species, data = iris) m4 <- lm(Sepal.Length ~ 1, data = iris) test_that("ellipses_info", { expect_message(ellipsis_info(m1, m2, m3, m4)) }) info <- ellipsis_info(m1, m2, m4) test_that("ellipses_info", { expect_equal(attributes(info)$is_nested, TRUE) expect_equal(attributes(info)$is_nested_decreasing, TRUE) expect_equal(attributes(info)$is_nested_increasing, FALSE) }) info <- ellipsis_info(m4, m3, m1) test_that("ellipses_info", { expect_equal(attributes(info)$is_nested, TRUE) expect_equal(attributes(info)$is_nested_decreasing, FALSE) expect_equal(attributes(info)$is_nested_increasing, TRUE) }) test_that("ellipses_info - single model", { out <- ellipsis_info(m1) expect_equal(out, m1) }) } insight/tests/testthat/test-export_table.R0000644000175000017500000000576114122064326020654 0ustar nileshnileshif (requiet("testthat") && requiet("insight")) { d <- data.frame(a = c(1.3, 2, 543), b = c("ab", "cd", "abcde"), stringsAsFactors = FALSE) test_that("export_table", { out <- capture.output(cat(export_table(d))) expect_equal(out, c( " a | b", "--------------", " 1.30 | ab", " 2.00 | cd", "543.00 | abcde" )) }) test_that("export_table", { out <- capture.output(cat(export_table(d, sep = " ", header = "*", digits = 1))) expect_equal(out, c( " a b", "***********", " 1.3 ab", " 2.0 cd", "543.0 abcde" )) }) test_that("export_table", { out <- export_table(d, format = "md") expect_equal(out, structure(c( "| a| b|", "|------:|-----:|", "| 1.30| ab|", "| 2.00| cd|", "| 543.00| abcde|" ), format = "pipe", class = c("knitr_kable", "character") )) }) d <- data.frame(a = c(1.3, 2, 543), b = c("ab", "cd", "abcde"), stringsAsFactors = FALSE) attr(d, "table_caption") <- "Table Title" test_that("export_table", { out <- export_table(d, format = "md") expect_equal( out, structure(c( "Table: Table Title", "", "| a| b|", "|------:|-----:|", "| 1.30| ab|", "| 2.00| cd|", "| 543.00| abcde|" ), format = "pipe", class = c("knitr_kable", "character") ) ) }) d <- data.frame(a = c(1.3, 2, 543), b = c("ab", "cd", "abcde"), stringsAsFactors = FALSE) attr(d, "table_title") <- "Table Title" test_that("export_table", { out <- export_table(d, format = "md") expect_equal( out, structure(c( "Table: Table Title", "", "| a| b|", "|------:|-----:|", "| 1.30| ab|", "| 2.00| cd|", "| 543.00| abcde|" ), format = "pipe", class = c("knitr_kable", "character") ) ) }) d <- data.frame(a = c(1.3, 2, 543), b = c("ab", "cd", "abcde"), stringsAsFactors = FALSE) test_that("export_table", { out <- export_table(d, format = "md", title = "Table Title") expect_equal( out, structure(c( "Table: Table Title", "", "| a| b|", "|------:|-----:|", "| 1.30| ab|", "| 2.00| cd|", "| 543.00| abcde|" ), format = "pipe", class = c("knitr_kable", "character") ) ) }) d <- data.frame(a = c(1.3, 2, 543), b = c("ab", "cd", "abcde"), stringsAsFactors = FALSE) attr(d, "table_caption") <- "Table Title" attr(d, "table_footer") <- list("first", "second", "third") test_that("export_table", { out <- export_table(d, format = "md") expect_equal( out, structure(c( "Table: Table Title", "", "| a| b|", "|------:|-----:|", "| 1.30| ab|", "| 2.00| cd|", "| 543.00| abcde|", "first", "second", "third" ), format = "pipe", class = c("knitr_kable", "character") ) ) }) } insight/tests/testthat/test-mvrstanarm.R0000644000175000017500000006103214122064326020347 0ustar nileshnilesh.runThisTest <- Sys.getenv("RunAllinsightTests") == "yes" .runStanTest <- Sys.getenv("RunAllinsightStanTests") == "yes" if (.runThisTest && .runStanTest && suppressWarnings(requiet("testthat") && requiet("insight") && requiet("rstanarm"))) { data("pbcLong") m1 <- download_model("stanmvreg_1") test_that("clean_names", { expect_identical( clean_names(m1), c("logBili", "albumin", "year", "id", "sex") ) }) test_that("find_predictors", { expect_identical( find_predictors(m1), list( y1 = list(conditional = "year"), y2 = list(conditional = c("sex", "year")) ) ) expect_identical(find_predictors(m1, flatten = TRUE), c("year", "sex")) expect_identical( find_predictors(m1, effects = "all", component = "all"), list( y1 = list(conditional = "year", random = "id"), y2 = list( conditional = c("sex", "year"), random = "id" ) ) ) expect_identical( find_predictors( m1, effects = "all", component = "all", flatten = TRUE ), c("year", "id", "sex") ) }) test_that("find_response", { expect_equal( find_response(m1, combine = TRUE), c(y1 = "logBili", y2 = "albumin") ) expect_equal( find_response(m1, combine = FALSE), c(y1 = "logBili", y2 = "albumin") ) }) test_that("get_response", { expect_equal(nrow(get_response(m1)), 304) expect_equal(colnames(get_response(m1)), c("logBili", "albumin")) }) test_that("find_statistic", { expect_null(find_statistic(m1)) }) test_that("find_variables", { expect_identical( find_variables(m1), list( response = c(y1 = "logBili", y2 = "albumin"), y1 = list(conditional = "year", random = "id"), y2 = list( conditional = c("sex", "year"), random = "id" ) ) ) expect_identical( find_variables(m1, flatten = TRUE), c("logBili", "albumin", "year", "id", "sex") ) expect_identical( find_variables(m1, effects = "random"), list( response = c(y1 = "logBili", y2 = "albumin"), y1 = list(random = "id"), y2 = list(random = "id") ) ) }) test_that("find_terms", { expect_identical( find_terms(m1), list( y1 = list( response = "logBili", conditional = "year", random = "id" ), y2 = list( response = "albumin", conditional = c("sex", "year"), random = c("year", "id") ) ) ) expect_identical( find_terms(m1, flatten = TRUE), c("logBili", "year", "id", "albumin", "sex") ) }) test_that("n_obs", { expect_equal(n_obs(m1), 304) }) test_that("find_paramaters", { expect_equal( find_parameters(m1), structure(list( y1 = list( conditional = c("(Intercept)", "year"), random = sprintf("b[(Intercept) id:%i]", 1:40), sigma = "sigma" ), y2 = list( conditional = c("(Intercept)", "sexf", "year"), random = sprintf( c("b[(Intercept) id:%i]", "b[year id:%i]"), rep(1:40, each = 2) ), sigma = "sigma" ) ), is_mv = "1" ) ) expect_equal( find_parameters(m1, effects = "fixed"), structure(list( y1 = list( conditional = c("(Intercept)", "year"), sigma = "sigma" ), y2 = list( conditional = c("(Intercept)", "sexf", "year"), sigma = "sigma" ) ), is_mv = "1" ) ) expect_equal( find_parameters(m1, effects = "random"), structure(list( y1 = list(random = sprintf("b[(Intercept) id:%i]", 1:40)), y2 = list(random = sprintf( c("b[(Intercept) id:%i]", "b[year id:%i]"), rep(1:40, each = 2) )) ), is_mv = "1" ) ) }) test_that("get_paramaters", { expect_equal( colnames(get_parameters(m1)), c( "y1|(Intercept)", "y1|year", "y2|(Intercept)", "y2|sexf", "y2|year" ) ) expect_equal( colnames(get_parameters(m1, effects = "all")), c( "y1|(Intercept)", "y1|year", sprintf("b[y1|(Intercept) id:%i]", 1:40), "y2|(Intercept)", "y2|sexf", "y2|year", sprintf( c("b[y2|(Intercept) id:%i]", "b[y2|year id:%i]"), rep(1:40, each = 2) ) ) ) }) test_that("linkfun", { expect_false(is.null(link_function(m1))) expect_length(link_function(m1), 2) }) test_that("linkinv", { expect_false(is.null(link_inverse(m1))) expect_length(link_inverse(m1), 2) }) test_that("is_multivariate", { expect_true(is_multivariate(m1)) }) test_that("clean_parameters", { expect_identical( clean_parameters(m1), structure( list( Parameter = c( "(Intercept)", "year", "(Intercept)", "sexf", "year", "b[(Intercept) id:1]", "b[(Intercept) id:2]", "b[(Intercept) id:3]", "b[(Intercept) id:4]", "b[(Intercept) id:5]", "b[(Intercept) id:6]", "b[(Intercept) id:7]", "b[(Intercept) id:8]", "b[(Intercept) id:9]", "b[(Intercept) id:10]", "b[(Intercept) id:11]", "b[(Intercept) id:12]", "b[(Intercept) id:13]", "b[(Intercept) id:14]", "b[(Intercept) id:15]", "b[(Intercept) id:16]", "b[(Intercept) id:17]", "b[(Intercept) id:18]", "b[(Intercept) id:19]", "b[(Intercept) id:20]", "b[(Intercept) id:21]", "b[(Intercept) id:22]", "b[(Intercept) id:23]", "b[(Intercept) id:24]", "b[(Intercept) id:25]", "b[(Intercept) id:26]", "b[(Intercept) id:27]", "b[(Intercept) id:28]", "b[(Intercept) id:29]", "b[(Intercept) id:30]", "b[(Intercept) id:31]", "b[(Intercept) id:32]", "b[(Intercept) id:33]", "b[(Intercept) id:34]", "b[(Intercept) id:35]", "b[(Intercept) id:36]", "b[(Intercept) id:37]", "b[(Intercept) id:38]", "b[(Intercept) id:39]", "b[(Intercept) id:40]", "b[(Intercept) id:1]", "b[year id:1]", "b[(Intercept) id:2]", "b[year id:2]", "b[(Intercept) id:3]", "b[year id:3]", "b[(Intercept) id:4]", "b[year id:4]", "b[(Intercept) id:5]", "b[year id:5]", "b[(Intercept) id:6]", "b[year id:6]", "b[(Intercept) id:7]", "b[year id:7]", "b[(Intercept) id:8]", "b[year id:8]", "b[(Intercept) id:9]", "b[year id:9]", "b[(Intercept) id:10]", "b[year id:10]", "b[(Intercept) id:11]", "b[year id:11]", "b[(Intercept) id:12]", "b[year id:12]", "b[(Intercept) id:13]", "b[year id:13]", "b[(Intercept) id:14]", "b[year id:14]", "b[(Intercept) id:15]", "b[year id:15]", "b[(Intercept) id:16]", "b[year id:16]", "b[(Intercept) id:17]", "b[year id:17]", "b[(Intercept) id:18]", "b[year id:18]", "b[(Intercept) id:19]", "b[year id:19]", "b[(Intercept) id:20]", "b[year id:20]", "b[(Intercept) id:21]", "b[year id:21]", "b[(Intercept) id:22]", "b[year id:22]", "b[(Intercept) id:23]", "b[year id:23]", "b[(Intercept) id:24]", "b[year id:24]", "b[(Intercept) id:25]", "b[year id:25]", "b[(Intercept) id:26]", "b[year id:26]", "b[(Intercept) id:27]", "b[year id:27]", "b[(Intercept) id:28]", "b[year id:28]", "b[(Intercept) id:29]", "b[year id:29]", "b[(Intercept) id:30]", "b[year id:30]", "b[(Intercept) id:31]", "b[year id:31]", "b[(Intercept) id:32]", "b[year id:32]", "b[(Intercept) id:33]", "b[year id:33]", "b[(Intercept) id:34]", "b[year id:34]", "b[(Intercept) id:35]", "b[year id:35]", "b[(Intercept) id:36]", "b[year id:36]", "b[(Intercept) id:37]", "b[year id:37]", "b[(Intercept) id:38]", "b[year id:38]", "b[(Intercept) id:39]", "b[year id:39]", "b[(Intercept) id:40]", "b[year id:40]", "sigma", "sigma" ), Effects = c( "fixed", "fixed", "fixed", "fixed", "fixed", "random", "random", "random", "random", "random", "random", "random", "random", "random", "random", "random", "random", "random", "random", "random", "random", "random", "random", "random", "random", "random", "random", "random", "random", "random", "random", "random", "random", "random", "random", "random", "random", "random", "random", "random", "random", "random", "random", "random", "random", "random", "random", "random", "random", "random", "random", "random", "random", "random", "random", "random", "random", "random", "random", "random", "random", "random", "random", "random", "random", "random", "random", "random", "random", "random", "random", "random", "random", "random", "random", "random", "random", "random", "random", "random", "random", "random", "random", "random", "random", "random", "random", "random", "random", "random", "random", "random", "random", "random", "random", "random", "random", "random", "random", "random", "random", "random", "random", "random", "random", "random", "random", "random", "random", "random", "random", "random", "random", "random", "random", "random", "random", "random", "random", "random", "random", "random", "random", "random", "random", "fixed", "fixed" ), Component = c( "conditional", "conditional", "conditional", "conditional", "conditional", "conditional", "conditional", "conditional", "conditional", "conditional", "conditional", "conditional", "conditional", "conditional", "conditional", "conditional", "conditional", "conditional", "conditional", "conditional", "conditional", "conditional", "conditional", "conditional", "conditional", "conditional", "conditional", "conditional", "conditional", "conditional", "conditional", "conditional", "conditional", "conditional", "conditional", "conditional", "conditional", "conditional", "conditional", "conditional", "conditional", "conditional", "conditional", "conditional", "conditional", "conditional", "conditional", "conditional", "conditional", "conditional", "conditional", "conditional", "conditional", "conditional", "conditional", "conditional", "conditional", "conditional", "conditional", "conditional", "conditional", "conditional", "conditional", "conditional", "conditional", "conditional", "conditional", "conditional", "conditional", "conditional", "conditional", "conditional", "conditional", "conditional", "conditional", "conditional", "conditional", "conditional", "conditional", "conditional", "conditional", "conditional", "conditional", "conditional", "conditional", "conditional", "conditional", "conditional", "conditional", "conditional", "conditional", "conditional", "conditional", "conditional", "conditional", "conditional", "conditional", "conditional", "conditional", "conditional", "conditional", "conditional", "conditional", "conditional", "conditional", "conditional", "conditional", "conditional", "conditional", "conditional", "conditional", "conditional", "conditional", "conditional", "conditional", "conditional", "conditional", "conditional", "conditional", "conditional", "conditional", "conditional", "conditional", "conditional", "conditional", "sigma", "sigma" ), Group = c( "", "", "", "", "", "Intercept: id", "Intercept: id", "Intercept: id", "Intercept: id", "Intercept: id", "Intercept: id", "Intercept: id", "Intercept: id", "Intercept: id", "Intercept: id", "Intercept: id", "Intercept: id", "Intercept: id", "Intercept: id", "Intercept: id", "Intercept: id", "Intercept: id", "Intercept: id", "Intercept: id", "Intercept: id", "Intercept: id", "Intercept: id", "Intercept: id", "Intercept: id", "Intercept: id", "Intercept: id", "Intercept: id", "Intercept: id", "Intercept: id", "Intercept: id", "Intercept: id", "Intercept: id", "Intercept: id", "Intercept: id", "Intercept: id", "Intercept: id", "Intercept: id", "Intercept: id", "Intercept: id", "Intercept: id", "Intercept: id", "year: id", "Intercept: id", "year: id", "Intercept: id", "year: id", "Intercept: id", "year: id", "Intercept: id", "year: id", "Intercept: id", "year: id", "Intercept: id", "year: id", "Intercept: id", "year: id", "Intercept: id", "year: id", "Intercept: id", "year: id", "Intercept: id", "year: id", "Intercept: id", "year: id", "Intercept: id", "year: id", "Intercept: id", "year: id", "Intercept: id", "year: id", "Intercept: id", "year: id", "Intercept: id", "year: id", "Intercept: id", "year: id", "Intercept: id", "year: id", "Intercept: id", "year: id", "Intercept: id", "year: id", "Intercept: id", "year: id", "Intercept: id", "year: id", "Intercept: id", "year: id", "Intercept: id", "year: id", "Intercept: id", "year: id", "Intercept: id", "year: id", "Intercept: id", "year: id", "Intercept: id", "year: id", "Intercept: id", "year: id", "Intercept: id", "year: id", "Intercept: id", "year: id", "Intercept: id", "year: id", "Intercept: id", "year: id", "Intercept: id", "year: id", "Intercept: id", "year: id", "Intercept: id", "year: id", "Intercept: id", "year: id", "Intercept: id", "year: id", "Intercept: id", "year: id", "", "" ), Response = c( "y1", "y1", "y2", "y2", "y2", "y1", "y1", "y1", "y1", "y1", "y1", "y1", "y1", "y1", "y1", "y1", "y1", "y1", "y1", "y1", "y1", "y1", "y1", "y1", "y1", "y1", "y1", "y1", "y1", "y1", "y1", "y1", "y1", "y1", "y1", "y1", "y1", "y1", "y1", "y1", "y1", "y1", "y1", "y1", "y1", "y2", "y2", "y2", "y2", "y2", "y2", "y2", "y2", "y2", "y2", "y2", "y2", "y2", "y2", "y2", "y2", "y2", "y2", "y2", "y2", "y2", "y2", "y2", "y2", "y2", "y2", "y2", "y2", "y2", "y2", "y2", "y2", "y2", "y2", "y2", "y2", "y2", "y2", "y2", "y2", "y2", "y2", "y2", "y2", "y2", "y2", "y2", "y2", "y2", "y2", "y2", "y2", "y2", "y2", "y2", "y2", "y2", "y2", "y2", "y2", "y2", "y2", "y2", "y2", "y2", "y2", "y2", "y2", "y2", "y2", "y2", "y2", "y2", "y2", "y2", "y2", "y2", "y2", "y2", "y2", "y1", "y2" ), Cleaned_Parameter = c( "(Intercept)", "year", "(Intercept)", "sexf", "year", "id:1", "id:2", "id:3", "id:4", "id:5", "id:6", "id:7", "id:8", "id:9", "id:10", "id:11", "id:12", "id:13", "id:14", "id:15", "id:16", "id:17", "id:18", "id:19", "id:20", "id:21", "id:22", "id:23", "id:24", "id:25", "id:26", "id:27", "id:28", "id:29", "id:30", "id:31", "id:32", "id:33", "id:34", "id:35", "id:36", "id:37", "id:38", "id:39", "id:40", "id:1", "id:1", "id:2", "id:2", "id:3", "id:3", "id:4", "id:4", "id:5", "id:5", "id:6", "id:6", "id:7", "id:7", "id:8", "id:8", "id:9", "id:9", "id:10", "id:10", "id:11", "id:11", "id:12", "id:12", "id:13", "id:13", "id:14", "id:14", "id:15", "id:15", "id:16", "id:16", "id:17", "id:17", "id:18", "id:18", "id:19", "id:19", "id:20", "id:20", "id:21", "id:21", "id:22", "id:22", "id:23", "id:23", "id:24", "id:24", "id:25", "id:25", "id:26", "id:26", "id:27", "id:27", "id:28", "id:28", "id:29", "id:29", "id:30", "id:30", "id:31", "id:31", "id:32", "id:32", "id:33", "id:33", "id:34", "id:34", "id:35", "id:35", "id:36", "id:36", "id:37", "id:37", "id:38", "id:38", "id:39", "id:39", "id:40", "id:40", "sigma", "sigma" ) ), class = c("clean_parameters", "data.frame"), row.names = c(NA, -127L) ) ) }) } insight/tests/testthat/test-gee.R0000644000175000017500000000535714122064326016725 0ustar nileshnileshif (requiet("testthat") && requiet("insight") && requiet("gee")) { data(warpbreaks) void <- capture.output(suppressMessages( m1 <- gee(breaks ~ tension, id = wool, data = warpbreaks) )) test_that("model_info", { expect_true(model_info(m1)$is_linear) }) test_that("find_predictors", { expect_identical(find_predictors(m1), list(conditional = "tension")) expect_identical(find_predictors(m1, flatten = TRUE), "tension") expect_identical( find_predictors(m1, effects = "random"), list(random = "wool") ) expect_identical( find_predictors(m1, effects = "all", flatten = TRUE), c("tension", "wool") ) }) test_that("find_response", { expect_identical(find_response(m1), "breaks") }) test_that("get_response", { expect_equal(get_response(m1), warpbreaks$breaks) }) test_that("find_random", { expect_equal(find_random(m1), list(random = "wool")) }) test_that("get_random", { expect_equal(get_random(m1), warpbreaks[, "wool", drop = FALSE]) }) test_that("get_predictors", { expect_equal(get_predictors(m1), warpbreaks[, "tension", drop = FALSE]) }) test_that("link_inverse", { expect_equal(link_inverse(m1)(.2), .2, tolerance = 1e-5) }) test_that("get_data", { expect_equal(nrow(get_data(m1)), 54) expect_equal(colnames(get_data(m1)), c("breaks", "tension", "wool")) }) test_that("find_formula", { expect_length(find_formula(m1), 2) expect_equal( find_formula(m1), list( conditional = as.formula("breaks ~ tension"), random = as.formula("~wool") ), ignore_attr = TRUE ) }) test_that("find_terms", { expect_equal( find_terms(m1), list( response = "breaks", conditional = "tension", random = "wool" ) ) expect_equal( find_terms(m1, flatten = TRUE), c("breaks", "tension", "wool") ) }) test_that("n_obs", { expect_equal(n_obs(m1), 54) }) test_that("linkfun", { expect_false(is.null(link_function(m1))) }) test_that("find_parameters", { expect_equal( find_parameters(m1), list(conditional = c( "(Intercept)", "tensionM", "tensionH" )) ) expect_equal(nrow(get_parameters(m1)), 3) expect_equal( get_parameters(m1)$Parameter, c("(Intercept)", "tensionM", "tensionH") ) }) test_that("is_multivariate", { expect_false(is_multivariate(m1)) }) test_that("find_algorithm", { expect_equal(find_algorithm(m1), list(algorithm = "ML")) }) test_that("find_statistic", { expect_identical(find_statistic(m1), "z-statistic") }) } insight/tests/testthat/test-epiR.R0000644000175000017500000000213714122064326017055 0ustar nileshnileshosx <- tryCatch( { si <- Sys.info() if (!is.null(si["sysname"])) { si["sysname"] == "Darwin" || grepl("^darwin", R.version$os) } else { FALSE } }, error = function(e) { FALSE } ) if (!osx && requiet("testthat") && requiet("insight") && requiet("epiR")) { dat <- matrix(c(13, 2163, 5, 3349), nrow = 2, byrow = TRUE) rownames(dat) <- c("DF+", "DF-") colnames(dat) <- c("FUS+", "FUS-") # model m <- epi.2by2( dat = as.table(dat), method = "cohort.count", conf.level = 0.95, units = 100, outcome = "as.columns" ) params <- get_parameters(m) test_that("get_parameters", { expect_equal( params$Estimate, c(4.00754, 4.02561, 0.44835, 0.75047, 0.17642, 0.54201), tolerance = 1e-3 ) expect_equal( params$Parameter, c("RR", "OR", "ARisk", "AFRisk", "PARisk", "PAFRisk") ) }) stat <- get_statistic(m) test_that("get_statistic", { expect_equal(stat$Statistic, 8.177135, tolerance = 1e-3) expect_equal(stat$Parameter, "Chi2") }) } insight/tests/testthat/test-cpglmm.R0000644000175000017500000001032714155570717017451 0ustar nileshnileshif (requiet("testthat") && requiet("insight") && requiet("cplm")) { data("FineRoot") m1 <- cpglmm(RLD ~ Stock + Spacing + (1 | Plant), data = FineRoot) test_that("model_info", { expect_true(model_info(m1)$is_count) expect_false(model_info(m1)$is_linear) }) test_that("find_predictors", { expect_equal( find_predictors(m1, effects = "all"), list(conditional = c("Stock", "Spacing"), random = "Plant") ) expect_equal( find_predictors(m1, effects = "all", flatten = TRUE), c("Stock", "Spacing", "Plant") ) expect_equal( find_predictors(m1, effects = "fixed"), list(conditional = c("Stock", "Spacing")) ) expect_equal( find_predictors(m1, effects = "fixed", flatten = TRUE), c("Stock", "Spacing") ) expect_equal( find_predictors(m1, effects = "random"), list(random = "Plant") ) expect_equal( find_predictors(m1, effects = "random", flatten = TRUE), "Plant" ) }) test_that("find_random", { expect_equal(find_random(m1), list(random = "Plant")) expect_equal(find_random(m1, flatten = TRUE), "Plant") }) test_that("find_response", { expect_identical(find_response(m1), "RLD") }) test_that("get_response", { expect_equal(get_response(m1), FineRoot$RLD) }) test_that("get_data", { expect_equal(colnames(get_data(m1)), c("RLD", "Stock", "Spacing", "Plant")) expect_equal(colnames(get_data(m1, effects = "all")), c("RLD", "Stock", "Spacing", "Plant")) expect_equal(colnames(get_data(m1, effects = "random")), "Plant") }) test_that("find_formula", { expect_length(find_formula(m1), 2) expect_equal( find_formula(m1, component = "conditional"), list( conditional = as.formula("RLD ~ Stock + Spacing"), random = as.formula("~1 | Plant") ), ignore_attr = TRUE ) }) test_that("find_terms", { expect_identical( find_terms(m1), list( response = "RLD", conditional = c("Stock", "Spacing"), random = "Plant" ) ) expect_identical( find_terms(m1, flatten = TRUE), c("RLD", "Stock", "Spacing", "Plant") ) }) test_that("link_function", { expect_equal(link_function(m1)(.2), log(.2), tolerance = 1e-3) }) test_that("link_inverse", { expect_equal(link_inverse(m1)(.2), exp(.2), tolerance = 1e-3) }) test_that("find_variables", { expect_identical( find_variables(m1), list( response = "RLD", conditional = c("Stock", "Spacing"), random = "Plant" ) ) expect_identical( find_variables(m1, flatten = TRUE), c("RLD", "Stock", "Spacing", "Plant") ) }) test_that("get_predictors", { expect_identical(colnames(get_predictors(m1)), c("Stock", "Spacing")) }) test_that("get_random", { expect_identical(colnames(get_random(m1)), "Plant") }) test_that("clean_names", { expect_identical(clean_names(m1), c("RLD", "Stock", "Spacing", "Plant")) }) test_that("find_parameters", { expect_equal( find_parameters(m1), list( conditional = c("(Intercept)", "StockMM106", "StockMark", "Spacing5x3"), random = list(Plant = c("(Intercept)")) ) ) expect_equal(nrow(get_parameters(m1)), 4) expect_equal(get_parameters(m1)$Parameter, c("(Intercept)", "StockMM106", "StockMark", "Spacing5x3")) }) test_that("is_multivariate", { expect_false(is_multivariate(m1)) }) if (getRversion() > "3.6.3") { test_that("get_variance", { skip_on_cran() expect_equal( suppressWarnings(get_variance(m1)), list( var.fixed = 0.1687617, var.random = 0.0002706301, var.residual = 2.763129, var.distribution = 2.763129, var.dispersion = 0, var.intercept = c(Plant = 0.0002706301) ), tolerance = 1e-3 ) }) } test_that("find_random_slopes", { expect_null(find_random_slopes(m1)) }) test_that("find_statistic", { expect_identical(find_statistic(m1), "t-statistic") }) } insight/tests/testthat/test-panelr.R0000644000175000017500000001524314145421074017443 0ustar nileshnileshif (requiet("testthat") && requiet("insight") && requiet("panelr")) { data("WageData") wages <- panel_data(WageData, id = id, wave = t) m1 <- wbm(lwage ~ lag(union) + wks | blk + fem | blk * lag(union), data = wages) m2 <- wbm(lwage ~ lag(union) + wks | blk + t | (t | id), data = wages) test_that("model_info", { expect_true(model_info(m1)$is_linear) expect_true(model_info(m2)$is_linear) }) test_that("find_predictors", { expect_identical( find_predictors(m1), list( conditional = c("union", "wks"), instruments = c("blk", "fem"), interactions = c("blk", "union") ) ) expect_identical( find_predictors(m1, flatten = TRUE), c("union", "wks", "blk", "fem") ) expect_null(find_predictors(m1, effects = "random")) expect_identical( find_predictors(m2), list( conditional = c("union", "wks"), instruments = c("blk", "t") ) ) expect_identical(find_predictors(m2, effects = "random"), list(random = "id")) }) test_that("find_random", { expect_null(find_random(m1)) expect_identical(find_random(m2), list(random = "id")) }) test_that("get_random", { expect_warning(expect_null(get_random(m1))) expect_equal(get_random(m2)[[1]], model.frame(m2)$id) }) test_that("find_response", { expect_identical(find_response(m1), "lwage") }) test_that("get_response", { expect_equal(get_response(m1), model.frame(m1)$lwage) }) test_that("get_predictors", { expect_equal( colnames(get_predictors(m1)), c("lag(union)", "wks", "blk", "fem") ) expect_equal( colnames(get_predictors(m2)), c("lag(union)", "wks", "blk", "t") ) }) test_that("link_inverse", { expect_equal(link_inverse(m1)(.2), .2, tolerance = 1e-5) }) test_that("clean_parameters", { cp <- clean_parameters(m1) expect_equal( cp$Cleaned_Parameter, c( "union", "wks", "(Intercept)", "imean(lag(union))", "imean(wks)", "blk", "fem", "union:blk" ) ) expect_equal( cp$Component, c( "conditional", "conditional", "instruments", "instruments", "instruments", "instruments", "instruments", "interactions" ) ) }) test_that("get_data", { expect_equal(nrow(get_data(m1)), 3570) expect_equal( colnames(get_data(m1)), c( "lwage", "id", "t", "lag(union)", "wks", "blk", "fem", "imean(lag(union))", "imean(wks)", "imean(lag(union):blk)", "lag(union):blk" ) ) expect_equal( colnames(get_data(m2)), c( "lwage", "id", "t", "lag(union)", "wks", "blk", "imean(lag(union))", "imean(wks)" ) ) }) test_that("find_formula", { expect_length(find_formula(m1), 3) expect_equal( find_formula(m1), list( conditional = as.formula("lwage ~ lag(union) + wks"), instruments = as.formula("~blk + fem"), interactions = as.formula("~blk * lag(union)") ), ignore_attr = TRUE ) expect_equal( find_formula(m2), list( conditional = as.formula("lwage ~ lag(union) + wks"), instruments = as.formula("~blk + t"), random = as.formula("~t | id") ), ignore_attr = TRUE ) }) test_that("find_variables", { expect_equal( find_variables(m1), list( response = "lwage", conditional = c("union", "wks"), instruments = c("blk", "fem"), interactions = c("blk", "union") ) ) expect_equal( find_variables(m1, flatten = TRUE), c("lwage", "union", "wks", "blk", "fem") ) expect_equal( find_variables(m2), list( response = "lwage", conditional = c("union", "wks"), instruments = c("blk", "t"), random = "id" ) ) expect_equal( find_variables(m2, flatten = TRUE), c("lwage", "union", "wks", "blk", "t", "id") ) }) test_that("n_obs", { expect_equal(n_obs(m1), 3570) expect_equal(n_obs(m2), 3570) }) test_that("linkfun", { expect_false(is.null(link_function(m1))) }) test_that("find_parameters", { expect_equal( find_parameters(m1), list( conditional = c("lag(union)", "wks"), instruments = c("(Intercept)", "imean(lag(union))", "imean(wks)", "blk", "fem"), random = "lag(union):blk" ) ) expect_equal(nrow(get_parameters(m1)), 8) expect_equal( find_parameters(m2), list( conditional = c("lag(union)", "wks"), instruments = c("(Intercept)", "imean(lag(union))", "imean(wks)", "blk", "t") ) ) }) test_that("get_parameters", { expect_equal( get_parameters(m1), data.frame( Parameter = c( "lag(union)", "wks", "(Intercept)", "imean(lag(union))", "imean(wks)", "blk", "fem", "lag(union):blk" ), Estimate = c( 0.0582474262882615, -0.00163678667081885, 6.59813245629044, -0.0279959204722801, 0.00438047648390025, -0.229414915661438, -0.441756913071962, -0.127319623945541 ), Component = c( "within", "within", "between", "between", "between", "between", "between", "interactions" ), stringsAsFactors = FALSE ), tolerance = 1e-4 ) }) test_that("find_terms", { expect_equal( find_terms(m1), list( response = "lwage", conditional = c("lag(union)", "wks"), instruments = c("blk", "fem"), interactions = c("blk", "lag(union)") ) ) expect_equal( find_terms(m2), list( response = "lwage", conditional = c("lag(union)", "wks"), instruments = c("blk", "t"), random = c("t", "id") ) ) }) test_that("is_multivariate", { expect_false(is_multivariate(m1)) }) test_that("find_statistic", { expect_identical(find_statistic(m1), "t-statistic") expect_identical(find_statistic(m2), "t-statistic") }) .runThisTest <- Sys.getenv("RunAllinsightTests") == "yes" if (.runThisTest) { v <- get_variance(m1) expect_equal(v$var.intercept, c(id = 0.125306895731005), tolerance = 1e-4) expect_equal(v$var.fixed, 0.0273792999320531, tolerance = 1e-4) } } insight/tests/testthat/test-model_info.R0000644000175000017500000000077414122064326020276 0ustar nileshnileshif (requiet("testthat") && requiet("insight") && requiet("BayesFactor")) { model <- BayesFactor::proportionBF(15, 25, p = 0.5) mi <- insight::model_info(model) test_that("model_info-BF-proptest", { expect_true(mi$is_binomial) expect_false(mi$is_linear) }) model <- prop.test(15, 25, p = 0.5) mi <- insight::model_info(model) test_that("model_info-BF-proptest", { expect_true(mi$is_binomial) expect_false(mi$is_linear) expect_false(mi$is_correlation) }) } insight/tests/testthat/test-get_priors.R0000644000175000017500000000116514122064326020333 0ustar nileshnilesh.runThisTest <- Sys.getenv("RunAllinsightTests") == "yes" .runStanTest <- Sys.getenv("RunAllinsightStanTests") == "yes" if (.runThisTest && .runStanTest && requiet("testthat") && requiet("insight") && requiet("brms")) { data(mtcars) set.seed(123) model <- brms::brm(mpg ~ wt, data = mtcars, seed = 1, refresh = 0) priors <- insight::get_priors(model) test_that("get_priors", { expect_equal(priors$Location, c(19.2, NA, 0), tolerance = 1e-3) expect_equal(priors$Distribution, c("student_t", "uniform", "student_t")) expect_equal(priors$Parameter, c("b_Intercept", "b_wt", "sigma")) }) } insight/tests/testthat/test-format.R0000644000175000017500000001500114122064326017440 0ustar nileshnileshif (requiet("testthat") && requiet("insight")) { test_that("format_value", { expect_equal(nchar(format_value(1.2012313)), 4) expect_equal(format_value(4.2, protect_integers = TRUE), "4.20") expect_equal(format_value(4.0, protect_integers = TRUE), "4") expect_equal(format_value(0, protect_integers = TRUE), "0") expect_equal(format_value(0), "0.00") expect_equal(format_value(1234565789101112), "1.23e+15") expect_equal(format_value(1234565789101112, protect_integers = TRUE), "1234565789101112") expect_equal(format_value(0.0000000123), "1.23e-08") expect_equal(format_value(0.0000000123, zap_small = TRUE), "0.00") expect_equal(format_value(0.0000000123, digits = 8), "0.00000001") expect_equal(format_value(0.95, as_percent = TRUE), "95.00%") expect_equal(format_value(0.000001, as_percent = TRUE), "1.00e-04%") expect_equal(format_value(0.000001, as_percent = TRUE, zap_small = TRUE), "0.00%") }) test_that("format_value", { expect_equal(format_value(0.0045, zap_small = TRUE), "0.00") expect_equal(format_value(0.0045), "4.50e-03") expect_equal(format_value(0.00045), "4.50e-04") expect_equal(format_value(0.00045, digits = 3), "4.500e-04") expect_equal(format_value(0.00045, digits = 4), "0.0004") }) test_that("format_ci", { expect_equal( format_ci(c(123, 123, 123, 123), c(123, 12345, 123456, 123456789012), width = "auto"), c("95% CI [123.00, 123.00]", "95% CI [123.00, 12345.00]", "95% CI [123.00, 1.23e+05]", "95% CI [123.00, 1.23e+11]") ) expect_equal( format_ci(c(123, 123, 123, 123), c(123, 12345, 123456, 123456789012), width = "auto", digits = 5), c( "95% CI [123.00000, 123.00000]", "95% CI [123.00000, 12345.00000]", "95% CI [123.00000, 1.23456e+05]", "95% CI [123.00000, 1.23457e+11]" ) ) expect_equal( format_ci(c(123, 123, 123, 123), c(123, 12345, 123456, 123456789012), width = "auto", digits = 0), c("95% CI [123, 123]", "95% CI [123, 12345]", "95% CI [123, 1e+05]", "95% CI [123, 1e+11]") ) expect_equal(format_ci(1.24, 0.0000054), "95% CI [1.24, 5.40e-06]") expect_equal(format_ci(1.24, 0.0000054, digits = 0), "95% CI [1, 5e-06]") expect_equal(format_ci(1.24, 0.0000054, zap_small = TRUE), "95% CI [1.24, 0.00]") expect_equal(format_ci(1.24, 0.0000054, zap_small = TRUE, digits = 0), "95% CI [1, 0]") }) test_that("format others", { expect_true(is.character(insight::format_pd(0.02))) expect_equal(nchar(format_bf(4)), 9) expect_true(is.character(format_rope(0.02))) }) test_that("format_number", { expect_equal(format_number(2), "two") expect_equal(format_number(45), "forty five") expect_equal(format_number(2), "two") }) test_that("format_p", { expect_equal(nchar(format_p(0.02)), 9) expect_equal(nchar(format_p(0.02, stars = TRUE)), 10) expect_equal(nchar(format_p(0.02, stars_only = TRUE)), 1) }) test_that("format_table, other CI columns", { x <- data.frame(test_CI = .9, test_CI_low = .1, test_CI_high = 1.3) test <- utils::capture.output(format_table(x)) expect_equal(test, c(" test 90% CI", "1 [0.10, 1.30]")) x <- data.frame( CI = .8, CI_low = 2.43, CI_high = 5.453, test_CI = .9, test_CI_low = .1, test_CI_high = 1.3 ) test <- utils::capture.output(format_table(x)) expect_equal(test, c(" 80% CI test 90% CI", "1 [2.43, 5.45] [0.10, 1.30]")) x <- data.frame(CI_low = 2.43, CI_high = 5.453, test_CI_low = .1, test_CI_high = 1.3) attr(x, "ci") <- .8 attr(x, "ci_test") <- .9 test <- utils::capture.output(format_table(x)) expect_equal(test, c(" 80% CI test 90% CI", "1 [2.43, 5.45] [0.10, 1.30]")) x <- data.frame( CI_low = 2.43, CI_high = 5.453, test_CI_low = .1, test_CI_high = 1.3, other_CI_low = .12, other_CI_high = 1.4 ) attr(x, "ci") <- .8 attr(x, "ci_test") <- .9 test <- utils::capture.output(format_table(x)) expect_equal(test, c(" 80% CI test 80% CI other 80% CI", "1 [2.43, 5.45] [0.10, 1.30] [0.12, 1.40]")) }) test_that("format_table, multiple CI columns", { d <- data.frame( Parameter = c("(Intercept)", "wt", "cyl"), Coefficient = c(39.69, -3.19, -1.51), SE = c(1.71, 0.76, 0.41), CI_low_0.8 = c(37.44, -4.18, -2.05), CI_high_0.8 = c(41.94, -2.2, -0.96), CI_low_0.9 = c(36.77, -4.48, -2.21), CI_high_0.9 = c(42.6, -1.9, -0.8), t = c(23.14, -4.22, -3.64), df_error = c(29, 29, 29), stringsAsFactors = FALSE ) attr(d, "ci") <- c(.8, .9) expect_equal( format_table(d), data.frame( Parameter = c("(Intercept)", "wt", "cyl"), Coefficient = c("39.69", "-3.19", "-1.51"), SE = c("1.71", "0.76", "0.41"), `80% CI` = c("[37.44, 41.94]", "[-4.18, -2.20]", "[-2.05, -0.96]"), `90% CI` = c("[36.77, 42.60]", "[-4.48, -1.90]", "[-2.21, -0.80]"), `t(29)` = c("23.14", "-4.22", "-3.64"), stringsAsFactors = FALSE ), ignore_attr = TRUE ) # d <- data.frame( # Parameter = c("(Intercept)", "wt", "cyl"), # Coefficient = c(39.69, -3.19, -1.51), # SE = c(1.71, 0.76, 0.41), # CI_low_0.8 = c(37.44, -4.18, -2.05), # CI_high_0.8 = c(41.94, -2.2, -0.96), # CI_low_0.9 = c(36.77, -4.48, -2.21), # CI_high_0.9 = c(42.6, -1.9, -0.8), # t = c(23.14, -4.22, -3.64), # df_error = c(29, 29, 29), # stringsAsFactors = FALSE # ) # expect_equal( # format_table(d), # data.frame( # Parameter = c("(Intercept)", "wt", "cyl"), # Coefficient = c("39.69", "-3.19", "-1.51"), # SE = c("1.71", "0.76", "0.41"), # `80% CI` = c("[37.44, 41.94]", "[-4.18, -2.20]", "[-2.05, -0.96]"), # `90% CI` = c("[36.77, 42.60]", "[-4.48, -1.90]", "[-2.21, -0.80]"), # `t(29)` = c("23.14", "-4.22", "-3.64"), # stringsAsFactors = FALSE # ), # ignore_attr = TRUE # ) }) test_that("format_table, preserve attributes", { d <- mtcars[1:3, 1:3] attr(d, "table_footer") <- "This is a footer" attr(d, "table_caption") <- "And the caption" d2 <- insight::format_table(d, digits = 3, preserve_attributes = TRUE) expect_equal(names(attributes(d2)), c("names", "row.names", "class", "table_footer", "table_caption")) expect_equal(attributes(d2)$table_caption, "And the caption") }) } insight/tests/testthat/test-gamm.R0000644000175000017500000001273614122064326017105 0ustar nileshnilesh.runThisTest <- Sys.getenv("RunAllinsightTests") == "yes" if (.runThisTest) { unloadNamespace("gam") if (requiet("testthat") && requiet("insight") && requiet("mgcv")) { set.seed(0) dat <- gamSim(6, n = 200, scale = .2, dist = "poisson") m1 <- gamm( y ~ s(x0) + s(x1) + s(x2), family = poisson, data = dat, random = list(fac = ~1), verbosePQL = FALSE ) test_that("model_info", { expect_true(model_info(m1)$is_poisson) expect_false(model_info(m1)$is_linear) }) test_that("clean_names", { expect_equal(clean_names(m1), c("y", "x0", "x1", "x2", "fac")) }) test_that("find_predictors", { expect_identical(find_predictors(m1), list(conditional = c("x0", "x1", "x2"))) expect_identical( find_predictors(m1, effects = "all"), list( conditional = c("x0", "x1", "x2"), random = "fac" ) ) expect_identical(find_predictors(m1, flatten = TRUE), c("x0", "x1", "x2")) expect_identical(find_predictors(m1, effects = "random"), list(random = "fac")) }) test_that("find_response", { expect_identical(find_response(m1), "y") }) test_that("get_response", { expect_equal(get_response(m1), dat$y) }) test_that("link_inverse", { expect_equal(link_inverse(m1)(.2), exp(.2), tolerance = 1e-5) }) test_that("get_data", { expect_equal(nrow(get_data(m1)), 200) expect_equal(colnames(get_data(m1)), c("y", "x0", "x1", "x2", "fac", "g", "g.0", "g.1", "y.0", "Xr.V1", "Xr.V2", "Xr.V3", "Xr.V4", "Xr.V5", "Xr.V6", "Xr.V7", "Xr.V8", "Xr.0.V1", "Xr.0.V2", "Xr.0.V3", "Xr.0.V4", "Xr.0.V5", "Xr.0.V6", "Xr.0.V7", "Xr.0.V8", "Xr.1.V1", "Xr.1.V2", "Xr.1.V3", "Xr.1.V4", "Xr.1.V5", "Xr.1.V6", "Xr.1.V7", "Xr.1.V8", "X.(Intercept)", "X.s(x0)Fx1", "X.s(x1)Fx1", "X.s(x2)Fx1")) }) test_that("find_formula", { expect_length(find_formula(m1), 2) expect_equal( find_formula(m1), list( conditional = as.formula("y ~ s(x0) + s(x1) + s(x2)"), random = as.formula("~1 | fac") ), ignore_attr = TRUE ) }) test_that("find_terms", { expect_equal(find_terms(m1), list(response = "y", conditional = c("s(x0)", "s(x1)", "s(x2)"), random = "fac")) expect_equal(find_terms(m1, flatten = TRUE), c("y", "s(x0)", "s(x1)", "s(x2)", "fac")) }) test_that("find_variables", { expect_equal(find_variables(m1), list(response = "y", conditional = c("x0", "x1", "x2"), random = "fac")) expect_equal(find_variables(m1, flatten = TRUE), c("y", "x0", "x1", "x2", "fac")) }) test_that("n_obs", { expect_equal(n_obs(m1), 200) }) test_that("linkfun", { expect_false(is.null(link_function(m1))) }) test_that("find_parameters", { expect_equal( find_parameters(m1), list( conditional = "(Intercept)", smooth_terms = c("s(x0)", "s(x1)", "s(x2)") ) ) expect_equal(nrow(get_parameters(m1)), 4) expect_equal(get_parameters(m1)$Parameter, c("(Intercept)", "s(x0)", "s(x1)", "s(x2)")) }) test_that("is_multivariate", { expect_false(is_multivariate(m1)) }) # test formula random effects ----------------------- n <- 200 sig <- 2 set.seed(0) n.g <- 10 n <- n.g * 10 * 4 dat <- gamSim(1, n = n, scale = 2) f <- dat$f ## simulate nested random effects.... fa <- as.factor(rep(1:10, rep(4 * n.g, 10))) ra <- rep(rnorm(10), rep(4 * n.g, 10)) fb <- as.factor(rep(rep(1:4, rep(n.g, 4)), 10)) rb <- rep(rnorm(4), rep(n.g, 4)) for (i in 1:9) { rb <- c(rb, rep(rnorm(4), rep(n.g, 4))) } ## simulate auto-correlated errors within groups e <- array(0, 0) for (i in 1:40) { eg <- rnorm(n.g, 0, sig) for (j in 2:n.g) { eg[j] <- eg[j - 1] * 0.6 + eg[j] } e <- c(e, eg) } dat$y <- f + ra + rb + e dat$fa <- fa dat$fb <- fb ## fit model .... m1 <- gamm( y ~ s(x0, bs = "cr") + s(x1, bs = "cr"), data = dat, random = list(fa = ~1, fb = ~1), correlation = corAR1() ) set.seed(0) void <- capture.output( dat <- gamSim(6, n = 200, scale = .2, dist = "poisson") ) m2 <- gamm( y ~ s(x0) + s(x1) + s(x2), family = poisson, data = dat, verbosePQL = FALSE ) dat$g <- dat$fac m3 <- gamm( y ~ s(x0) + s(x1) + s(x2), family = poisson, data = dat, random = list(g = ~1), verbosePQL = FALSE ) test_that("find_formula-gamm-1", { expect_equal( find_formula(m1), list( conditional = as.formula("y ~ s(x0, bs = \"cr\") + s(x1, bs = \"cr\")"), random = list(as.formula("~1 | fa"), as.formula("~1 | fb")) ), ignore_attr = TRUE ) }) test_that("find_formula-gamm-2", { expect_equal( find_formula(m2), list(conditional = as.formula("y ~ s(x0) + s(x1) + s(x2)")), ignore_attr = TRUE ) }) test_that("find_formula-gamm-3", { expect_equal( find_formula(m3), list( conditional = as.formula("y ~ s(x0) + s(x1) + s(x2)"), random = as.formula("~1 | g") ), ignore_attr = TRUE ) }) } } insight/tests/testthat/test-polr.R0000644000175000017500000000713214144235000017123 0ustar nileshnileshif (requiet("testthat") && requiet("insight") && requiet("MASS")) { data(housing, package = "MASS") m1 <- polr(Sat ~ Infl + Type + Cont, data = housing, weights = Freq) test_that("model_info", { expect_true(model_info(m1)$is_ordinal) expect_false(model_info(m1)$is_multinomial) }) test_that("find_predictors", { expect_identical(find_predictors(m1), list(conditional = c("Infl", "Type", "Cont"))) expect_identical( find_predictors(m1, flatten = TRUE), c("Infl", "Type", "Cont") ) expect_null(find_predictors(m1, effects = "random")) }) test_that("find_response", { expect_identical(find_response(m1), "Sat") }) test_that("link_inverse", { expect_equal(link_inverse(m1)(.2), plogis(.2), tolerance = 1e-5) }) test_that("get_data", { expect_equal(nrow(get_data(m1)), 72) expect_equal( colnames(get_data(m1)), c("Sat", "Infl", "Type", "Cont", "(weights)", "Freq") ) }) test_that("find_formula", { expect_length(find_formula(m1), 1) expect_equal( find_formula(m1), list(conditional = as.formula("Sat ~ Infl + Type + Cont")), ignore_attr = TRUE ) }) test_that("find_terms", { expect_equal(find_terms(m1), list( response = "Sat", conditional = c("Infl", "Type", "Cont") )) expect_equal( find_terms(m1, flatten = TRUE), c("Sat", "Infl", "Type", "Cont") ) }) test_that("n_obs", { expect_equal(n_obs(m1), 1681) }) test_that("linkfun", { expect_false(is.null(link_function(m1))) }) test_that("find_parameters", { expect_equal( find_parameters(m1), list( conditional = c( "Intercept: Low|Medium", "Intercept: Medium|High", "InflMedium", "InflHigh", "TypeApartment", "TypeAtrium", "TypeTerrace", "ContHigh" ) ) ) }) test_that("get_parameters", { expect_equal( get_parameters(m1), data.frame( Parameter = c( "Intercept: Low|Medium", "Intercept: Medium|High", "InflMedium", "InflHigh", "TypeApartment", "TypeAtrium", "TypeTerrace", "ContHigh" ), Estimate = c( -0.4961353438375, 0.690708290379271, 0.566393738890106, 1.28881906381232, -0.572350146429611, -0.366186566153346, -1.09101490767244, 0.360284149947385 ), stringsAsFactors = FALSE, row.names = NULL ) ) }) test_that("find_statistic", { expect_identical(find_statistic(m1), "t-statistic") }) test_that("get_predicted", { p1 <- get_predicted(m1, predict = "expectation") p2 <- get_predicted(m1, predict = "classification") p3 <- get_predicted(m1, predict = NULL, type = "probs") p4 <- get_predicted(m1, predict = NULL, type = "class") expect_s3_class(p1, "get_predicted") expect_s3_class(p2, "get_predicted") expect_s3_class(p3, "get_predicted") expect_s3_class(p4, "get_predicted") expect_equal(p1, p3) expect_equal(p2, p4) expect_true(inherits(p1, "data.frame")) expect_true(inherits(p2, "factor")) expect_true(inherits(p3, "data.frame")) expect_true(inherits(p4, "factor")) expect_true(all(c("Row", "Response", "Predicted") %in% colnames(p1))) expect_true(all(c("Row", "Response", "Predicted") %in% colnames(p3))) }) } insight/tests/testthat/test-blmer.R0000644000175000017500000002160314122064326017256 0ustar nileshnileshif (requiet("testthat") && requiet("insight") && requiet("blme")) { data(sleepstudy) set.seed(123) sleepstudy$mygrp <- sample(1:5, size = 180, replace = TRUE) sleepstudy$mysubgrp <- NA for (i in 1:5) { filter_group <- sleepstudy$mygrp == i sleepstudy$mysubgrp[filter_group] <- sample(1:30, size = sum(filter_group), replace = TRUE) } m1 <- blmer(Reaction ~ Days + (1 + Days | Subject), data = sleepstudy, cov.prior = NULL ) m2 <- suppressWarnings(blmer( Reaction ~ Days + (1 | mygrp / mysubgrp) + (1 | Subject), data = sleepstudy, cov.prior = wishart )) test_that("model_info", { expect_true(model_info(m1)$is_linear) expect_true(model_info(m2)$is_linear) expect_true(model_info(m1)$is_bayesian) }) test_that("find_predictors", { expect_equal( find_predictors(m1, effects = "all"), list(conditional = "Days", random = "Subject") ) expect_equal( find_predictors(m1, effects = "all", flatten = TRUE), c("Days", "Subject") ) expect_equal( find_predictors(m1, effects = "fixed"), list(conditional = "Days") ) expect_equal( find_predictors(m1, effects = "fixed", flatten = TRUE), "Days" ) expect_equal( find_predictors(m1, effects = "random"), list(random = "Subject") ) expect_equal( find_predictors(m1, effects = "random", flatten = TRUE), "Subject" ) expect_equal( find_predictors(m2, effects = "all"), list( conditional = "Days", random = c("mysubgrp", "mygrp", "Subject") ) ) expect_equal( find_predictors(m2, effects = "all", flatten = TRUE), c("Days", "mysubgrp", "mygrp", "Subject") ) expect_equal( find_predictors(m2, effects = "fixed"), list(conditional = "Days") ) expect_equal(find_predictors(m2, effects = "random"), list(random = c("mysubgrp", "mygrp", "Subject"))) expect_null(find_predictors(m2, effects = "all", component = "zi")) expect_null(find_predictors(m2, effects = "fixed", component = "zi")) expect_null(find_predictors(m2, effects = "random", component = "zi")) }) test_that("find_random", { expect_equal(find_random(m1), list(random = "Subject")) expect_equal(find_random(m1, flatten = TRUE), "Subject") expect_equal(find_random(m2), list(random = c("mysubgrp:mygrp", "mygrp", "Subject"))) expect_equal(find_random(m2, split_nested = TRUE), list(random = c("mysubgrp", "mygrp", "Subject"))) expect_equal( find_random(m2, flatten = TRUE), c("mysubgrp:mygrp", "mygrp", "Subject") ) expect_equal( find_random(m2, split_nested = TRUE, flatten = TRUE), c("mysubgrp", "mygrp", "Subject") ) }) test_that("find_response", { expect_identical(find_response(m1), "Reaction") expect_identical(find_response(m2), "Reaction") }) test_that("get_response", { expect_equal(get_response(m1), sleepstudy$Reaction) }) test_that("link_inverse", { expect_identical(link_inverse(m1)(.2), .2) expect_identical(link_inverse(m2)(.2), .2) }) test_that("get_data", { expect_equal(colnames(get_data(m1)), c("Reaction", "Days", "Subject")) expect_equal(colnames(get_data(m1, effects = "all")), c("Reaction", "Days", "Subject")) expect_equal(colnames(get_data(m1, effects = "random")), "Subject") expect_equal( colnames(get_data(m2)), c("Reaction", "Days", "mygrp", "mysubgrp", "Subject") ) expect_equal( colnames(get_data(m2, effects = "all")), c("Reaction", "Days", "mygrp", "mysubgrp", "Subject") ) expect_equal(colnames(get_data(m2, effects = "random")), c("mysubgrp", "mygrp", "Subject")) }) test_that("find_formula", { expect_length(find_formula(m1), 2) expect_length(find_formula(m2), 2) expect_equal( find_formula(m1, component = "conditional"), list( conditional = as.formula("Reaction ~ Days"), random = as.formula("~1 + Days | Subject") ), ignore_attr = TRUE ) expect_equal( find_formula(m2, component = "conditional"), list( conditional = as.formula("Reaction ~ Days"), random = list( as.formula("~1 | mysubgrp:mygrp"), as.formula("~1 | mygrp"), as.formula("~1 | Subject") ) ), ignore_attr = TRUE ) }) test_that("find_terms", { expect_identical( find_terms(m1), list( response = "Reaction", conditional = "Days", random = c("Days", "Subject") ) ) expect_identical( find_terms(m1, flatten = TRUE), c("Reaction", "Days", "Subject") ) expect_identical( find_terms(m2), list( response = "Reaction", conditional = "Days", random = c("mysubgrp", "mygrp", "Subject") ) ) expect_identical( find_terms(m2, flatten = TRUE), c("Reaction", "Days", "mysubgrp", "mygrp", "Subject") ) }) test_that("find_variables", { expect_identical( find_variables(m1), list( response = "Reaction", conditional = "Days", random = "Subject" ) ) }) test_that("get_response", { expect_identical(get_response(m1), sleepstudy$Reaction) }) test_that("get_predictors", { expect_identical(colnames(get_predictors(m1)), "Days") expect_identical(colnames(get_predictors(m2)), "Days") }) test_that("get_random", { expect_identical(colnames(get_random(m1)), "Subject") expect_identical(colnames(get_random(m2)), c("mysubgrp", "mygrp", "Subject")) }) test_that("clean_names", { expect_identical(clean_names(m1), c("Reaction", "Days", "Subject")) expect_identical( clean_names(m2), c("Reaction", "Days", "mysubgrp", "mygrp", "Subject") ) }) test_that("linkfun", { expect_false(is.null(link_function(m1))) expect_false(is.null(link_function(m2))) }) test_that("find_parameters", { expect_equal( find_parameters(m1), list( conditional = c("(Intercept)", "Days"), random = list(Subject = c("(Intercept)", "Days")) ) ) expect_equal(nrow(get_parameters(m1)), 2) expect_equal(get_parameters(m1)$Parameter, c("(Intercept)", "Days")) expect_equal( find_parameters(m2), list( conditional = c("(Intercept)", "Days"), random = list( `mysubgrp:mygrp` = "(Intercept)", Subject = "(Intercept)", mygrp = "(Intercept)" ) ) ) expect_equal(nrow(get_parameters(m2)), 2) expect_equal(get_parameters(m2)$Parameter, c("(Intercept)", "Days")) expect_equal( names(get_parameters(m2, effects = "random")), c("mysubgrp:mygrp", "Subject", "mygrp") ) }) test_that("is_multivariate", { expect_false(is_multivariate(m1)) expect_false(is_multivariate(m2)) }) test_that("get_variance", { skip_on_cran() expect_equal( get_variance(m1), list( var.fixed = 908.9534, var.random = 1698.084, var.residual = 654.94, var.distribution = 654.94, var.dispersion = 0, var.intercept = c(Subject = 612.1002), var.slope = c(Subject.Days = 35.07171), cor.slope_intercept = c(Subject = 0.06555124) ), tolerance = 1e-1 ) expect_equal(get_variance_fixed(m1), c(var.fixed = 908.9534), tolerance = 1e-1 ) expect_equal(get_variance_random(m1), c(var.random = 1698.084), tolerance = 1e-1 ) expect_equal( get_variance_residual(m1), c(var.residual = 654.94), tolerance = 1e-1 ) expect_equal( get_variance_distribution(m1), c(var.distribution = 654.94), tolerance = 1e-1 ) expect_equal(get_variance_dispersion(m1), c(var.dispersion = 0), tolerance = 1e-1 ) expect_equal( get_variance_intercept(m1), c(var.intercept.Subject = 612.1002), tolerance = 1e-1 ) expect_equal( get_variance_slope(m1), c(var.slope.Subject.Days = 35.07171), tolerance = 1e-1 ) expect_equal( get_correlation_slope_intercept(m1), c(cor.slope_intercept.Subject = 0.06555124), tolerance = 1e-1 ) }) test_that("find_algorithm", { expect_equal( find_algorithm(m1), list(algorithm = "REML", optimizer = "nloptwrap") ) }) test_that("find_random_slopes", { expect_equal(find_random_slopes(m1), list(random = "Days")) expect_null(find_random_slopes(m2)) }) test_that("find_statistic", { expect_identical(find_statistic(m1), "t-statistic") expect_identical(find_statistic(m2), "t-statistic") }) } insight/tests/testthat/test-standardize_column_order.R0000644000175000017500000000270714151371005023236 0ustar nileshnileshtest_that("get_predicted", { # easystats conventions df1 <- cbind.data.frame( CI_low = -2.873, t = 5.494, CI_high = -1.088, p = 0.00001, Parameter = -1.980, CI = 0.95, df = 29.234, Method = "Student's t-test" ) expect_equal( names(standardize_column_order(df1, style = "easystats")), c("Parameter", "CI", "CI_low", "CI_high", "Method", "t", "df", "p") ) # broom conventions df2 <- cbind.data.frame( conf.low = -2.873, statistic = 5.494, conf.high = -1.088, p.value = 0.00001, estimate = -1.980, conf.level = 0.95, df = 29.234, method = "Student's t-test" ) expect_equal( names(standardize_column_order(df2, style = "broom")), c( "estimate", "conf.level", "conf.low", "conf.high", "method", "statistic", "df", "p.value" ) ) # deliberately misspecify column names # the misspecified columns should be pushed to the end df3 <- cbind.data.frame( CI_Low = -2.873, t = 5.494, CI_High = -1.088, p = 0.00001, Parameter = -1.980, CI = 0.95, df = 29.234, Method = "Student's t-test" ) expect_equal( names(standardize_column_order(df3, style = "easystats")), c("Parameter", "CI", "Method", "t", "df", "p", "CI_Low", "CI_High") ) }) insight/tests/testthat/test-lm_robust.R0000644000175000017500000000470614122064326020170 0ustar nileshnileshif (requiet("testthat") && requiet("insight") && requiet("estimatr")) { data(mtcars) m1 <- lm_robust(mpg ~ gear + wt + cyl, data = mtcars) test_that("model_info", { expect_true(model_info(m1)$is_linear) }) test_that("find_predictors", { expect_identical(find_predictors(m1), list(conditional = c("gear", "wt", "cyl"))) expect_identical(find_predictors(m1, flatten = TRUE), c("gear", "wt", "cyl")) expect_null(find_predictors(m1, effects = "random")) }) test_that("find_random", { expect_null(find_random(m1)) }) test_that("get_random", { expect_warning(get_random(m1)) }) test_that("find_response", { expect_identical(find_response(m1), "mpg") }) test_that("get_response", { expect_equal(get_response(m1), mtcars$mpg) }) test_that("get_predictors", { expect_equal(colnames(get_predictors(m1)), c("gear", "wt", "cyl")) }) test_that("link_inverse", { expect_equal(link_inverse(m1)(.2), .2, tolerance = 1e-5) }) test_that("get_data", { expect_equal(nrow(get_data(m1)), 32) expect_equal(colnames(get_data(m1)), c("mpg", "gear", "wt", "cyl")) }) test_that("find_formula", { expect_length(find_formula(m1), 1) expect_equal( find_formula(m1), list(conditional = as.formula("mpg ~ gear + wt + cyl")), ignore_attr = TRUE ) }) test_that("find_terms", { expect_equal(find_terms(m1), list( response = "mpg", conditional = c("gear", "wt", "cyl") )) expect_equal(find_terms(m1, flatten = TRUE), c("mpg", "gear", "wt", "cyl")) }) test_that("n_obs", { expect_equal(n_obs(m1), 32) }) test_that("link_function", { expect_equal(link_function(m1)(.2), .2, tolerance = 1e-5) }) test_that("link_inverse", { expect_equal(link_inverse(m1)(.2), .2, tolerance = 1e-5) }) test_that("find_parameters", { expect_equal( find_parameters(m1), list(conditional = c("(Intercept)", "gear", "wt", "cyl")) ) expect_equal(nrow(get_parameters(m1)), 4) expect_equal( get_parameters(m1)$Parameter, c("(Intercept)", "gear", "wt", "cyl") ) }) test_that("is_multivariate", { expect_false(is_multivariate(m1)) }) test_that("find_algorithm", { expect_warning(expect_null(find_algorithm(m1))) }) test_that("find_statistic", { expect_identical(find_statistic(m1), "t-statistic") }) } insight/tests/testthat/test-model_data.R0000644000175000017500000000264514122064326020253 0ustar nileshnilesh.runThisTest <- Sys.getenv("RunAllinsightTests") == "yes" if (.runThisTest && requiet("testthat") && requiet("insight") && requiet("splines") && requiet("glmmTMB")) { data(iris) m1 <- lm(Sepal.Length ~ Species + ns(Petal.Width), data = iris) m2 <- lm(Sepal.Length ~ Species + ns(Petal.Width, knots = 2), data = iris) m3 <- lm(Sepal.Length ~ Species + bs(Petal.Width, degree = 3), data = iris) m4 <- lm(Sepal.Length ~ Species + bs(Petal.Width, degree = 1), data = iris) m5 <- lm(Sepal.Length ~ Species + Petal.Width, data = iris) test_that("get_data", { mf1 <- get_data(m1) mf2 <- get_data(m2) mf3 <- get_data(m3) mf4 <- get_data(m4) mf5 <- model.frame(m5) expect_equal(as.vector(mf1$Petal.Width), as.vector(mf5$Petal.Width)) expect_equal(as.vector(mf2$Petal.Width), as.vector(mf5$Petal.Width)) expect_equal(as.vector(mf3$Petal.Width), as.vector(mf5$Petal.Width)) expect_equal(as.vector(mf4$Petal.Width), as.vector(mf5$Petal.Width)) }) data("Salamanders") m <- glmmTMB( count ~ spp + cover + mined + poly(DOP, 3) + (1 | site), ziformula = ~ spp + mined, dispformula = ~DOY, data = Salamanders, family = nbinom2 ) test_that("get_data", { mf <- get_data(m) expect_equal(ncol(mf), 7) expect_equal( colnames(mf), c("count", "spp", "cover", "mined", "DOP", "DOY", "site") ) }) } insight/tests/testthat/test-get_loglikelihood.R0000644000175000017500000001163414122064326021644 0ustar nileshnileshosx <- tryCatch( { si <- Sys.info() if (!is.null(si["sysname"])) { si["sysname"] == "Darwin" || grepl("^darwin", R.version$os) } else { FALSE } }, error = function(e) { FALSE } ) .runThisTest <- Sys.getenv("RunAllinsightTests") == "yes" if (.runThisTest && !osx && requiet("testthat") && requiet("insight") && requiet("nonnest2")) { data(iris) data(mtcars) test_that("get_loglikelihood - lm", { x <- lm(Sepal.Length ~ Petal.Width + Species, data = iris) ll <- loglikelihood(x, estimator = "ML") ll2 <- stats::logLik(x) expect_equal(as.numeric(ll), as.numeric(ll2)) expect_equal(attributes(ll)$df, attributes(ll2)$df) expect_equal(sum(attributes(ll)$per_obs - nonnest2::llcont(x)), 0) # REML ll <- loglikelihood(x, estimator = "REML") ll2 <- stats::logLik(x, REML = TRUE) expect_equal(as.numeric(ll), as.numeric(ll2)) # With weights x <- lm(Sepal.Length ~ Petal.Width + Species, data = iris, weights = Petal.Length) ll <- loglikelihood(x, estimator = "ML") ll2 <- stats::logLik(x) expect_equal(as.numeric(ll), as.numeric(ll2)) }) test_that("get_loglikelihood - glm", { x <- glm(vs ~ mpg * disp, data = mtcars, family = "binomial") ll <- loglikelihood(x) ll2 <- stats::logLik(x) expect_equal(as.numeric(ll), as.numeric(ll2)) expect_equal(attributes(ll)$df, attributes(ll2)$df) expect_equal(sum(attributes(ll)$per_obs - nonnest2::llcont(x)), 0) x <- glm(cbind(cyl, gear) ~ mpg, data = mtcars, weights = disp, family = binomial) ll <- loglikelihood(x) ll2 <- stats::logLik(x) expect_equal(as.numeric(ll), as.numeric(ll2)) expect_equal(attributes(ll)$df, attributes(ll2)$df) # Nonnest2 seems to be giving diffenrent results, # which sums doesn't add up to base R's result... so commenting off # expect_equal(sum(attributes(ll)$per_obs - nonnest2::llcont(x)), 0) }) test_that("get_loglikelihood - (g)lmer", { if (requiet("lme4")) { x <- lme4::lmer(Sepal.Length ~ Sepal.Width + (1 | Species), data = iris) ll <- loglikelihood(x, estimator = "ML") ll2 <- stats::logLik(x) expect_equal(as.numeric(ll), as.numeric(ll2)) expect_equal(attributes(ll)$df, attributes(ll2)$df) x <- lme4::glmer(vs ~ mpg + (1 | cyl), data = mtcars, family = "binomial") ll <- loglikelihood(x, estimator = "ML") ll2 <- stats::logLik(x) expect_equal(as.numeric(ll), as.numeric(ll2)) expect_equal(attributes(ll)$df, attributes(ll2)$df) } }) test_that("get_loglikelihood - stanreg", { .runStanTest <- Sys.getenv("RunAllinsightStanTests") == "yes" if (requiet("rstanarm") && .runStanTest) { x <- rstanarm::stan_glm(Sepal.Length ~ Petal.Width, data = iris, refresh = 0) ref <- lm(Sepal.Length ~ Petal.Width, data = iris) ll <- loglikelihood(x) ll2 <- loglikelihood(ref) expect_equal(as.numeric(ll), as.numeric(ll2), tolerance = 2) expect_equal(mean(abs(attributes(ll)$per_obs - attributes(ll2)$per_obs)), 0, tolerance = 0.1) } }) test_that("get_loglikelihood - ivreg", { if (requiet("ivreg")) { data("CigaretteDemand", package = "ivreg") x <- ivreg::ivreg(log(packs) ~ log(rprice) + log(rincome) | salestax + log(rincome), data = CigaretteDemand) ll <- loglikelihood(x) expect_equal(as.numeric(ll), 13.26255, tolerance = 1e-3) } }) test_that("get_loglikelihood - plm", { if (requiet("plm")) { data("Produc", package = "plm") x <- plm::plm(log(gsp) ~ log(pcap) + log(pc) + log(emp) + unemp, data = Produc, index = c("state", "year") ) ll <- loglikelihood(x) expect_equal(as.numeric(ll), 1534.532, tolerance = 1e-3) } }) if (requiet("estimatr")) { test_that("get_loglikelihood - iv_robust", { data(mtcars) x <- estimatr::iv_robust(mpg ~ gear + cyl | carb + wt, data = mtcars) ll <- loglikelihood(x) expect_equal(as.numeric(ll), -84.60057, tolerance = 1e-3) }) } if (requiet("mgcv")) { test_that("get_loglikelihood - mgcv", { x <- mgcv::gam(Sepal.Length ~ s(Petal.Width), data = iris) ll <- insight::get_loglikelihood(x) ll2 <- stats::logLik(x) expect_equal(as.numeric(ll), -96.26613, tolerance = 1e-3) # TODO: I'm not sure why this differes :/ # expect_equal(as.numeric(ll), as.numeric(ll2)) x <- mgcv::gamm(Sepal.Length ~ s(Petal.Width), random = list("Species" = ~1), data = iris) # Which one to get? }) } if (requiet("gamm4")) { test_that("get_loglikelihood - gamm4", { x <- gamm4::gamm4(Sepal.Length ~ s(Petal.Width), data = iris) ll <- insight::get_loglikelihood(x) # It works, but it's quite diferent from the mgcv result expect_equal(as.numeric(ll), -101.1107, tolerance = 1e-3) }) } } insight/tests/testthat/test-hurdle.R0000644000175000017500000000621714122064326017444 0ustar nileshnileshif (requiet("testthat") && requiet("insight") && requiet("pscl")) { data("bioChemists") m1 <- hurdle(art ~ fem + mar + kid5 + ment | kid5 + phd, data = bioChemists) test_that("model_info", { expect_true(model_info(m1)$is_poisson) expect_true(model_info(m1)$is_zero_inflated) expect_false(model_info(m1)$is_linear) }) test_that("find_predictors", { expect_identical( find_predictors(m1), list( conditional = c("fem", "mar", "kid5", "ment"), zero_inflated = c("kid5", "phd") ) ) expect_identical( find_predictors(m1, flatten = TRUE), c("fem", "mar", "kid5", "ment", "phd") ) expect_null(find_predictors(m1, effects = "random")) }) test_that("find_response", { expect_identical(find_response(m1), "art") }) test_that("get_response", { expect_equal(get_response(m1), bioChemists$art) }) test_that("link_inverse", { expect_equal(link_inverse(m1)(.2), exp(.2), tolerance = 1e-5) }) test_that("get_data", { expect_equal(nrow(get_data(m1)), 915) expect_equal( colnames(get_data(m1)), c("art", "fem", "mar", "kid5", "ment", "phd") ) }) test_that("find_formula", { expect_length(find_formula(m1), 2) expect_equal( find_formula(m1), list( conditional = as.formula("art ~ fem + mar + kid5 + ment"), zero_inflated = as.formula("~kid5 + phd") ), ignore_attr = TRUE ) }) test_that("find_variables", { expect_equal( find_variables(m1), list( response = "art", conditional = c("fem", "mar", "kid5", "ment"), zero_inflated = c("kid5", "phd") ) ) expect_equal( find_variables(m1, flatten = TRUE), c("art", "fem", "mar", "kid5", "ment", "phd") ) }) test_that("n_obs", { expect_equal(n_obs(m1), 915) }) test_that("linkfun", { expect_false(is.null(link_function(m1))) }) test_that("find_parameters", { expect_equal( find_parameters(m1), list( conditional = c( "count_(Intercept)", "count_femWomen", "count_marMarried", "count_kid5", "count_ment" ), zero_inflated = c("zero_(Intercept)", "zero_kid5", "zero_phd") ) ) expect_equal(nrow(get_parameters(m1)), 8) expect_equal(nrow(get_parameters(m1, component = "zi")), 3) expect_equal( get_parameters(m1)$Parameter, c( "count_(Intercept)", "count_femWomen", "count_marMarried", "count_kid5", "count_ment", "zero_(Intercept)", "zero_kid5", "zero_phd" ) ) }) test_that("is_multivariate", { expect_false(is_multivariate(m1)) }) test_that("find_terms", { expect_equal( find_terms(m1), list( response = "art", conditional = c("fem", "mar", "kid5", "ment"), zero_inflated = c("kid5", "phd") ) ) }) test_that("find_statistic", { expect_identical(find_statistic(m1), "z-statistic") }) } insight/tests/testthat/test-gam.R0000644000175000017500000001755514164337075016746 0ustar nileshnilesh.runThisTest <- Sys.getenv("RunAllinsightTests") == "yes" if (.runThisTest) { if (requiet("testthat") && requiet("insight") && requiet("mgcv")) { set.seed(123) void <- capture.output( dat <- mgcv::gamSim(1, n = 400, dist = "normal", scale = 2) ) m1 <- mgcv::gam(y ~ s(x0) + s(x1) + s(x2) + s(x3), data = dat) m2 <- download_model("gam_zi_1") m3 <- download_model("gam_mv_1") test_that("model_info", { expect_true(model_info(m1)$is_linear) expect_true(model_info(m2)$is_count) expect_true(model_info(m3)$is_multivariate) }) test_that("n_parameters", { expect_equal(n_parameters(m1), 5) expect_equal(n_parameters(m1, component = "conditional"), 1) }) test_that("clean_names", { expect_equal(clean_names(m1), c("y", "x0", "x1", "x2", "x3")) expect_equal(clean_names(m2), c("y", "x2", "x3", "x0", "x1")) expect_equal(clean_names(m3), c("y0", "y1", "x0", "x1", "x2", "x3")) }) test_that("find_predictors", { expect_identical(find_predictors(m1), list(conditional = c("x0", "x1", "x2", "x3"))) expect_identical( find_predictors(m1, flatten = TRUE), c("x0", "x1", "x2", "x3") ) expect_null(find_predictors(m1, effects = "random")) expect_identical(find_predictors(m2), list(conditional = c("x2", "x3"), zero_inflated = c("x0", "x1"))) expect_identical(find_predictors(m2, flatten = TRUE), c("x2", "x3", "x0", "x1")) expect_null(find_predictors(m2, effects = "random")) expect_identical(find_predictors(m3), list(y0 = list(conditional = c("x0", "x1")), y1 = list(conditional = c("x2", "x3")))) expect_identical(find_predictors(m3, flatten = TRUE), c("x0", "x1", "x2", "x3")) expect_null(find_predictors(m3, effects = "random")) }) test_that("find_response", { expect_identical(find_response(m1), "y") expect_identical(find_response(m2), "y") expect_identical(find_response(m3), c(y0 = "y0", y1 = "y1")) }) test_that("find_smooth", { expect_identical(find_smooth(m1), list(smooth_terms = c("s(x0)", "s(x1)", "s(x2)", "s(x3)"))) }) test_that("get_call", { expect_identical(deparse(get_call(m1)), "mgcv::gam(formula = y ~ s(x0) + s(x1) + s(x2) + s(x3), data = dat)") }) test_that("get_response", { expect_equal(get_response(m1), dat$y) expect_equal(length(get_response(m2)), 500) expect_equal(ncol(get_response(m3)), 2) }) test_that("link_inverse", { expect_equal(link_inverse(m1)(.2), .2, tolerance = 1e-5) expect_equal(link_inverse(m2)(.2), .2, tolerance = 1e-5) expect_equal(link_inverse(m3)(.2), .2, tolerance = 1e-5) }) test_that("get_data", { expect_equal(nrow(get_data(m1)), 400) expect_equal(colnames(get_data(m1)), c("y", "x0", "x1", "x2", "x3")) expect_equal(nrow(get_data(m2)), 500) expect_equal(colnames(get_data(m2)), c("y", "x2", "x3", "x0", "x1")) expect_equal(nrow(get_data(m3)), 300) expect_equal(colnames(get_data(m3)), c("y0", "x0", "x1", "x2", "x3", "y1")) }) test_that("find_formula", { expect_length(find_formula(m1), 1) expect_equal( find_formula(m1), list(conditional = as.formula("y ~ s(x0) + s(x1) + s(x2) + s(x3)")), ignore_attr = TRUE ) expect_length(find_formula(m2), 2) expect_equal( find_formula(m2), list( conditional = as.formula("y ~ s(x2) + s(x3)"), zero_inflated = as.formula("~s(x0) + s(x1)") ), ignore_attr = TRUE ) expect_length(find_formula(m3), 2) expect_equal( find_formula(m3), structure(list( y0 = list(conditional = as.formula("y0 ~ s(x0) + s(x1)")), y1 = list(conditional = as.formula("y1 ~ s(x2) + s(x3)")) ), is_mv = "1" ), ignore_attr = TRUE ) }) test_that("find_variables", { expect_equal(find_variables(m1), list(response = "y", conditional = c("x0", "x1", "x2", "x3"))) expect_equal(find_variables(m1, flatten = TRUE), c("y", "x0", "x1", "x2", "x3")) expect_equal(find_variables(m2), list(response = "y", conditional = c("x2", "x3"), zero_inflated = c("x0", "x1"))) expect_equal(find_variables(m2, flatten = TRUE), c("y", "x2", "x3", "x0", "x1")) expect_equal(find_variables(m3), list(response = c(y0 = "y0", y1 = "y1"), y0 = list(conditional = c("x0", "x1")), y1 = list(conditional = c("x2", "x3")))) expect_equal(find_variables(m3, flatten = TRUE), c("y0", "y1", "x0", "x1", "x2", "x3")) }) test_that("n_obs", { expect_equal(n_obs(m1), 400) expect_equal(n_obs(m2), 500) expect_equal(n_obs(m3), 300) }) test_that("linkfun", { expect_false(is.null(link_function(m1))) }) test_that("find_parameters", { expect_equal( find_parameters(m1), list( conditional = "(Intercept)", smooth_terms = c("s(x0)", "s(x1)", "s(x2)", "s(x3)") ) ) expect_equal(nrow(get_parameters(m1)), 5) expect_equal( get_parameters(m1)$Parameter, c("(Intercept)", "s(x0)", "s(x1)", "s(x2)", "s(x3)") ) expect_equal(nrow(get_parameters(m1, "smooth_terms")), 4) expect_equal( find_parameters(m2), list( conditional = c("(Intercept)", "(Intercept).1"), smooth_terms = c("s(x2)", "s(x3)", "s.1(x0)", "s.1(x1)") ) ) }) test_that("is_multivariate", { expect_false(is_multivariate(m1)) expect_false(is_multivariate(m2)) expect_true(is_multivariate(m3)) }) test_that("find_terms", { expect_equal( find_terms(m1), list( response = "y", conditional = c("s(x0)", "s(x1)", "s(x2)", "s(x3)") ) ) expect_equal( find_terms(m2), list( response = "y", conditional = c("s(x2)", "s(x3)"), zero_inflated = c("s(x0)", "s(x1)") ) ) expect_equal( find_terms(m3), list( y0 = list(response = "y0", conditional = c("s(x0)", "s(x1)")), y1 = list(response = "y1", conditional = c("s(x2)", "s(x3)")) ) ) }) test_that("find_algorithm", { expect_equal( find_algorithm(m1), list(algorithm = "GCV", optimizer = "magic") ) }) test_that("find_statistic", { expect_identical(find_statistic(m1), "t-statistic") }) test_that("get_predicted", { tmp <- mgcv::gam(y ~ s(x0) + s(x1), data = head(dat, 30)) pred <- get_predicted(tmp) expect_s3_class(pred, "get_predicted") expect_equal( as.vector(pred), c( 11.99341, 5.58098, 10.89252, 7.10335, 5.94836, 6.5724, 8.5054, 5.47147, 5.9343, 8.27001, 5.71199, 9.94999, 5.69979, 6.63532, 6.00475, 5.58633, 11.54848, 6.1083, 6.6151, 5.37164, 6.86236, 7.80726, 7.38088, 5.70664, 10.60654, 7.62847, 5.8596, 6.06744, 5.81571, 10.4606 ), tolerance = 1e-3 ) x <- get_predicted(tmp, predict = NULL, type = "link") y <- get_predicted(tmp, predict = "link") z <- predict(tmp, type = "link", se.fit = TRUE) expect_equal(x, y) expect_equal(x, z$fit, ignore_attr = TRUE) expect_equal(as.data.frame(x)$SE, z$se.fit, ignore_attr = TRUE) x <- get_predicted(tmp, predict = NULL, type = "response") y <- get_predicted(tmp, predict = "expectation") z <- predict(tmp, type = "response", se.fit = TRUE) expect_equal(x, y, ignore_attr = TRUE) expect_equal(x, z$fit, ignore_attr = TRUE) expect_equal(as.data.frame(x)$SE, z$se.fit, ignore_attr = TRUE) }) } } insight/tests/testthat/test-plm.R0000644000175000017500000000675114122064326016754 0ustar nileshnilesh.runThisTest <- Sys.getenv("RunAllinsightTests") == "yes" if (.runThisTest && getRversion() > "3.5") { if (requiet("testthat") && requiet("insight") && requiet("plm")) { data(Crime) m1 <- plm(lcrmrte ~ lprbarr + factor(year) | . - lprbarr + lmix, data = Crime, model = "random") # data set.seed(123) data("Produc", package = "plm") # model m2 <- plm::plm( formula = log(gsp) ~ log(pcap) + log(pc) + log(emp) + unemp, data = Produc, index = c("state", "year") ) test_that("model_info", { expect_true(model_info(m1)$is_linear) }) test_that("find_predictors", { expect_identical( find_predictors(m1), list( conditional = c("lprbarr", "year"), instruments = c("lprbarr", "lmix") ) ) expect_identical( find_predictors(m1, flatten = TRUE), c("lprbarr", "year", "lmix") ) expect_null(find_predictors(m1, effects = "random")) }) test_that("find_random", { expect_null(find_random(m1)) }) test_that("get_random", { expect_warning(get_random(m1)) }) test_that("find_response", { expect_identical(find_response(m1), "lcrmrte") }) test_that("get_response", { expect_equal(get_response(m1), Crime$lcrmrte) }) test_that("get_predictors", { expect_equal(colnames(get_predictors(m1)), c("lprbarr", "year", "lmix")) }) test_that("link_inverse", { expect_equal(link_inverse(m1)(.2), .2, tolerance = 1e-5) }) test_that("get_data", { expect_equal(nrow(get_data(m1)), 630) expect_equal( colnames(get_data(m1)), c("lcrmrte", "lprbarr", "year", "lmix") ) expect_equal(nrow(get_data(m2)), 816) expect_equal( colnames(get_data(m2)), c("gsp", "pcap", "pc", "emp", "unemp", "state", "year") ) }) test_that("find_formula", { expect_length(find_formula(m1), 2) expect_equal( find_formula(m1), list( conditional = as.formula("lcrmrte ~ lprbarr + factor(year)"), instruments = as.formula("~-lprbarr + lmix") ), ignore_attr = TRUE ) }) test_that("find_variables", { expect_equal( find_variables(m1), list( response = "lcrmrte", conditional = c("lprbarr", "year"), instruments = c("lprbarr", "lmix") ) ) expect_equal( find_variables(m1, flatten = TRUE), c("lcrmrte", "lprbarr", "year", "lmix") ) }) test_that("n_obs", { expect_equal(n_obs(m1), 630) }) test_that("linkfun", { expect_false(is.null(link_function(m1))) }) test_that("find_parameters", { expect_equal( find_parameters(m1), list( conditional = c( "(Intercept)", "lprbarr", "factor(year)82", "factor(year)83", "factor(year)84", "factor(year)85", "factor(year)86", "factor(year)87" ) ) ) expect_equal(nrow(get_parameters(m1)), 8) }) test_that("is_multivariate", { expect_false(is_multivariate(m1)) }) test_that("find_statistic", { expect_identical(find_statistic(m1), "z-statistic") expect_identical(find_statistic(m2), "t-statistic") }) } } insight/tests/testthat/test-rstanarm.R0000644000175000017500000004021114144235000017771 0ustar nileshnilesh.runThisTest <- Sys.getenv("RunAllinsightTests") == "yes" .runStanTest <- Sys.getenv("RunAllinsightStanTests") == "yes" if (.runThisTest && .runStanTest) { if (suppressWarnings(requiet("testthat") && requiet("insight") && requiet("lme4") && requiet("BayesFactor") && requiet("rstanarm"))) { # skip_on_cran() # defining models --------------------- m1 <- insight::download_model("stanreg_merMod_5") m2 <- insight::download_model("stanreg_glm_6") m3 <- insight::download_model("stanreg_glm_1") data("puzzles") m4 <- stan_glm( RT ~ color * shape, data = puzzles, prior = rstanarm::cauchy(0, c(3, 1, 2)), iter = 500, chains = 2, refresh = 0 ) m5 <- stan_glm( RT ~ color * shape, data = puzzles, prior = rstanarm::cauchy(0, c(1, 2, 3)), iter = 500, chains = 2, refresh = 0 ) m6 <- insight::download_model("stanreg_gamm4_1") m7 <- suppressWarnings(stan_lm(mpg ~ wt + qsec + am, data = mtcars, prior = R2(0.75), chains = 1, iter = 300, refresh = 0 )) m8 <- stan_lmer(Reaction ~ Days + (1 | Subject), data = sleepstudy, refresh = 0) m9 <- stan_aov(yield ~ block + N * P * K, data = npk, prior = R2(0.5), refresh = 0) N <- 200 x <- rnorm(N, 2, 1) z <- rnorm(N, 2, 1) mu <- binomial(link = "logit")$linkinv(1 + 0.2 * x) phi <- exp(1.5 + 0.4 * z) y <- rbeta(N, mu * phi, (1 - mu) * phi) hist(y, col = "dark grey", border = FALSE, xlim = c(0, 1)) fake_dat <- data.frame(y, x, z) m10 <- stan_betareg( y ~ x | z, data = fake_dat, link = "logit", link.phi = "log", refresh = 0, algorithm = "optimizing" # just for speed of example ) ols <- lm(mpg ~ wt + qsec + am, data = mtcars, # all row are complete so ... na.action = na.exclude ) # not necessary in this case b <- coef(ols)[-1] R <- qr.R(ols$qr)[-1, -1] SSR <- crossprod(ols$residuals)[1] not_NA <- !is.na(fitted(ols)) N <- sum(not_NA) xbar <- colMeans(mtcars[not_NA, c("wt", "qsec", "am")]) y <- mtcars$mpg[not_NA] ybar <- mean(y) s_y <- sd(y) m11 <- suppressWarnings(stan_biglm.fit(b, R, SSR, N, xbar, ybar, s_y, prior = R2(.75), # the next line is only to make the example go fast chains = 1, iter = 500, seed = 12345 )) dat <- infert[order(infert$stratum), ] # order by strata m12 <- suppressWarnings(stan_clogit(case ~ spontaneous + induced + (1 | education), strata = stratum, data = dat, subset = parity <= 2, QR = TRUE, chains = 2, iter = 500, refresh = 0 )) # for speed only if (.Platform$OS.type != "windows" || .Platform$r_arch != "i386") { m13 <- suppressWarnings(stan_jm( formulaLong = logBili ~ year + (1 | id), dataLong = pbcLong, formulaEvent = Surv(futimeYears, death) ~ sex + trt, dataEvent = pbcSurv, time_var = "year", # this next line is only to keep the example small in size! chains = 1, cores = 1, seed = 12345, iter = 1000, refresh = 0 )) # expect_snapshot(model_info(m13)) } data("Orange", package = "datasets") Orange$circumference <- Orange$circumference / 100 Orange$age <- Orange$age / 100 ## TODO probably re-enable once strange check error is resolved # m14 <- stan_nlmer( # circumference ~ SSlogis(age, Asym, xmid, scal) ~ Asym | Tree, # data = Orange, # # for speed only # chains = 1, # iter = 1000 # ) m15 <- suppressWarnings(stan_mvmer( formula = list( logBili ~ year + (1 | id), albumin ~ sex + year + (year | id) ), data = pbcLong, # this next line is only to keep the example small in size! chains = 1, cores = 1, seed = 12345, iter = 1000, refresh = 0 )) test_that("model_info-stanreg-glm", { expect_equal( model_info(m1), list( is_binomial = TRUE, is_bernoulli = FALSE, is_count = FALSE, is_poisson = FALSE, is_negbin = FALSE, is_beta = FALSE, is_betabinomial = FALSE, is_dirichlet = FALSE, is_exponential = FALSE, is_logit = TRUE, is_probit = FALSE, is_censored = FALSE, is_truncated = FALSE, is_survival = FALSE, is_linear = FALSE, is_tweedie = FALSE, is_zeroinf = FALSE, is_zero_inflated = FALSE, is_dispersion = FALSE, is_hurdle = FALSE, is_ordinal = FALSE, is_cumulative = FALSE, is_multinomial = FALSE, is_categorical = FALSE, is_mixed = TRUE, is_multivariate = FALSE, is_trial = TRUE, is_bayesian = TRUE, is_gam = FALSE, is_anova = FALSE, is_timeseries = FALSE, is_ttest = FALSE, is_correlation = FALSE, is_onewaytest = FALSE, is_chi2test = FALSE, is_ranktest = FALSE, is_levenetest = FALSE, is_xtab = FALSE, is_proptest = FALSE, is_binomtest = FALSE, is_meta = FALSE, link_function = "logit", family = "binomial", n_obs = 56L, model_terms = list(response = c("incidence", "size"), conditional = c("size", "period"), random = "herd") ) ) expect_equal( model_info(m2), list( is_binomial = FALSE, is_bernoulli = FALSE, is_count = FALSE, is_poisson = FALSE, is_negbin = FALSE, is_beta = FALSE, is_betabinomial = FALSE, is_dirichlet = FALSE, is_exponential = FALSE, is_logit = FALSE, is_probit = FALSE, is_censored = FALSE, is_truncated = FALSE, is_survival = FALSE, is_linear = TRUE, is_tweedie = FALSE, is_zeroinf = FALSE, is_zero_inflated = FALSE, is_dispersion = FALSE, is_hurdle = FALSE, is_ordinal = FALSE, is_cumulative = FALSE, is_multinomial = FALSE, is_categorical = FALSE, is_mixed = FALSE, is_multivariate = FALSE, is_trial = FALSE, is_bayesian = TRUE, is_gam = FALSE, is_anova = FALSE, is_timeseries = FALSE, is_ttest = FALSE, is_correlation = FALSE, is_onewaytest = FALSE, is_chi2test = FALSE, is_ranktest = FALSE, is_levenetest = FALSE, is_xtab = FALSE, is_proptest = FALSE, is_binomtest = FALSE, is_meta = FALSE, link_function = "identity", family = "gaussian", n_obs = 150L, model_terms = list(response = "Sepal.Width", conditional = c("Species", "Petal.Length")) ) ) expect_equal( model_info(m3), list( is_binomial = TRUE, is_bernoulli = TRUE, is_count = FALSE, is_poisson = FALSE, is_negbin = FALSE, is_beta = FALSE, is_betabinomial = FALSE, is_dirichlet = FALSE, is_exponential = FALSE, is_logit = TRUE, is_probit = FALSE, is_censored = FALSE, is_truncated = FALSE, is_survival = FALSE, is_linear = FALSE, is_tweedie = FALSE, is_zeroinf = FALSE, is_zero_inflated = FALSE, is_dispersion = FALSE, is_hurdle = FALSE, is_ordinal = FALSE, is_cumulative = FALSE, is_multinomial = FALSE, is_categorical = FALSE, is_mixed = FALSE, is_multivariate = FALSE, is_trial = FALSE, is_bayesian = TRUE, is_gam = FALSE, is_anova = FALSE, is_timeseries = FALSE, is_ttest = FALSE, is_correlation = FALSE, is_onewaytest = FALSE, is_chi2test = FALSE, is_ranktest = FALSE, is_levenetest = FALSE, is_xtab = FALSE, is_proptest = FALSE, is_binomtest = FALSE, is_meta = FALSE, link_function = "logit", family = "binomial", n_obs = 32L, model_terms = list(response = "vs", conditional = "wt") ) ) ## TODO add model m4 to m15 }) test_that("n_parameters", { expect_equal(n_parameters(m1), 21) expect_equal(n_parameters(m1, effects = "fixed"), 5) }) test_that("get_priors", { expect_equal( colnames(get_priors(m1)), c("Parameter", "Distribution", "Location", "Scale") ) expect_equal( colnames(get_priors(m2)), c( "Parameter", "Distribution", "Location", "Scale", "Adjusted_Scale" ) ) expect_equal(get_priors(m1)$Scale, c(2.5, 2.5, 2.5, 2.5, 2.5), tolerance = 1e-3) expect_equal(get_priors(m2)$Adjusted_Scale, c(1.08967, 2.30381, 2.30381, 0.61727, 0.53603, 0.41197), tolerance = 1e-3) expect_equal(get_priors(m3)$Adjusted_Scale, c(NA, 2.555042), tolerance = 1e-3) expect_equal(get_priors(m4)$Adjusted_Scale, c(6.399801, NA, NA, NA), tolerance = 1e-3) expect_equal(get_priors(m5)$Adjusted_Scale, c(6.399801, NA, NA, NA), tolerance = 1e-3) expect_equal( get_priors(m6), data.frame( Parameter = "(Intercept)", Distribution = "normal", Location = 3.057333, Scale = 2.5, Adjusted_Scale = 1.089666, stringsAsFactors = FALSE, row.names = NULL ), tolerance = 1e-3 ) }) test_that("clean_names", { expect_identical( clean_names(m1), c("incidence", "size", "period", "herd") ) }) test_that("find_predictors", { expect_identical(find_predictors(m1), list(conditional = c("size", "period"))) expect_identical(find_predictors(m1, flatten = TRUE), c("size", "period")) expect_identical( find_predictors(m1, effects = "all", component = "all"), list( conditional = c("size", "period"), random = "herd" ) ) expect_identical( find_predictors( m1, effects = "all", component = "all", flatten = TRUE ), c("size", "period", "herd") ) }) test_that("find_response", { expect_equal( find_response(m1, combine = TRUE), "cbind(incidence, size - incidence)" ) expect_equal( find_response(m1, combine = FALSE), c("incidence", "size") ) }) test_that("get_response", { expect_equal(nrow(get_response(m1)), 56) expect_equal(colnames(get_response(m1)), c("incidence", "size")) }) test_that("find_random", { expect_equal(find_random(m1), list(random = "herd")) }) test_that("get_random", { expect_equal(get_random(m1), lme4::cbpp[, "herd", drop = FALSE]) }) test_that("find_terms", { expect_identical( find_terms(m1), list( response = "cbind(incidence, size - incidence)", conditional = c("size", "period"), random = "herd" ) ) }) test_that("find_variables", { expect_identical( find_variables(m1), list( response = c("incidence", "size"), conditional = c("size", "period"), random = "herd" ) ) expect_identical( find_variables(m1, effects = "fixed"), list( response = c("incidence", "size"), conditional = c("size", "period") ) ) expect_null(find_variables(m1, component = "zi")) }) test_that("n_obs", { expect_equal(n_obs(m1), 56) expect_equal(n_obs(m1, disaggregate = TRUE), 842) }) test_that("find_paramaters", { expect_equal( find_parameters(m1), list( conditional = c("(Intercept)", "size", "period2", "period3", "period4"), random = c(sprintf("b[(Intercept) herd:%i]", 1:15), "Sigma[herd:(Intercept),(Intercept)]") ) ) expect_equal( find_parameters(m1, flatten = TRUE), c( "(Intercept)", "size", "period2", "period3", "period4", sprintf("b[(Intercept) herd:%i]", 1:15), "Sigma[herd:(Intercept),(Intercept)]" ) ) }) test_that("find_paramaters", { expect_equal( colnames(get_parameters(m1)), c("(Intercept)", "size", "period2", "period3", "period4") ) expect_equal( colnames(get_parameters(m1, effects = "all")), c( "(Intercept)", "size", "period2", "period3", "period4", sprintf("b[(Intercept) herd:%i]", 1:15), "Sigma[herd:(Intercept),(Intercept)]" ) ) }) test_that("linkfun", { expect_false(is.null(link_function(m1))) }) test_that("link_inverse", { expect_equal(link_inverse(m1)(.2), plogis(.2), tolerance = 1e-4) }) test_that("get_data", { expect_equal(nrow(get_data(m1)), 56) expect_equal( colnames(get_data(m1)), c("incidence", "size", "period", "herd") ) }) test_that("find_formula", { expect_length(find_formula(m1), 2) expect_equal( find_formula(m1), list( conditional = as.formula("cbind(incidence, size - incidence) ~ size + period"), random = as.formula("~1 | herd") ), ignore_attr = TRUE ) }) test_that("get_variance", { expect_equal( get_variance(m1), list( var.fixed = 0.36274, var.random = 0.5988885, var.residual = 3.28987, var.distribution = 3.28987, var.dispersion = 0, var.intercept = c(herd = 0.59889) ), tolerance = 1e-3 ) expect_equal(get_variance_fixed(m1), c(var.fixed = 0.3627389), tolerance = 1e-4 ) expect_equal(get_variance_random(m1), c(var.random = 0.5988885), tolerance = 1e-4 ) expect_equal(get_variance_residual(m1), c(var.residual = 3.289868), tolerance = 1e-4 ) expect_equal(get_variance_distribution(m1), c(var.distribution = 3.289868), tolerance = 1e-4 ) expect_equal(get_variance_dispersion(m1), c(var.dispersion = 0), tolerance = 1e-4 ) }) test_that("find_algorithm", { expect_equal( find_algorithm(m1), list( algorithm = "sampling", chains = 2, iterations = 500, warmup = 250 ) ) }) test_that("clean_parameters", { expect_equal( clean_parameters(m2), structure( list( Parameter = c( "(Intercept)", "Speciesversicolor", "Speciesvirginica", "Petal.Length", "Speciesversicolor:Petal.Length", "Speciesvirginica:Petal.Length", "sigma" ), Effects = c( "fixed", "fixed", "fixed", "fixed", "fixed", "fixed", "fixed" ), Component = c( "conditional", "conditional", "conditional", "conditional", "conditional", "conditional", "sigma" ), Cleaned_Parameter = c( "(Intercept)", "Speciesversicolor", "Speciesvirginica", "Petal.Length", "Speciesversicolor:Petal.Length", "Speciesvirginica:Petal.Length", "sigma" ) ), class = c("clean_parameters", "data.frame"), row.names = c(NA, -7L) ), ignore_attr = TRUE ) }) test_that("find_statistic", { expect_null(find_statistic(m1)) expect_null(find_statistic(m2)) expect_null(find_statistic(m3)) expect_null(find_statistic(m4)) expect_null(find_statistic(m5)) expect_null(find_statistic(m6)) }) model <- stan_glm( disp ~ carb, data = mtcars, priors = NULL, prior_intercept = NULL, refresh = 0 ) test_that("flat_priors", { p <- get_priors(model) expect_equal(p$Distribution, c("uniform", "normal")) expect_equal(p$Location, c(NA, 0), tolerance = 1e-3) }) } } insight/tests/testthat/test-check_if_installed.R0000644000175000017500000000032514122064326021745 0ustar nileshnileshif (requiet("testthat") && requiet("insight")) { test_that("export_table", { # mimic package name if cat were to walk on a keyboard expect_error(check_if_installed("xklfueofi8eur3rnfalfb")) }) } insight/tests/testthat/test-truncreg.R0000644000175000017500000000362614122064326020013 0ustar nileshnileshif (requiet("testthat") && requiet("insight") && requiet("truncreg") && requiet("survival")) { data("tobin", package = "survival") m1 <- truncreg(durable ~ age + quant, data = tobin, subset = durable > 0) test_that("model_info", { expect_true(model_info(m1)$is_linear) }) test_that("find_predictors", { expect_identical(find_predictors(m1), list(conditional = c("age", "quant"))) expect_identical(find_predictors(m1, flatten = TRUE), c("age", "quant")) expect_null(find_predictors(m1, effects = "random")) }) test_that("find_response", { expect_identical(find_response(m1), "durable") }) test_that("link_inverse", { expect_equal(link_inverse(m1)(.2), .2, tolerance = 1e-5) }) test_that("get_data", { expect_equal(nrow(get_data(m1)), 7) expect_equal(colnames(get_data(m1)), c("durable", "age", "quant")) }) test_that("find_formula", { expect_length(find_formula(m1), 1) expect_equal( find_formula(m1), list(conditional = as.formula("durable ~ age + quant")), ignore_attr = TRUE ) }) test_that("find_terms", { expect_equal(find_terms(m1), list( response = "durable", conditional = c("age", "quant") )) expect_equal(find_terms(m1, flatten = TRUE), c("durable", "age", "quant")) }) test_that("n_obs", { expect_equal(n_obs(m1), 7) }) test_that("linkfun", { expect_false(is.null(link_function(m1))) }) test_that("find_parameters", { expect_equal( find_parameters(m1), list(conditional = c( "(Intercept)", "age", "quant", "sigma" )) ) expect_equal(nrow(get_parameters(m1)), 4) expect_equal( get_parameters(m1)$Parameter, c("(Intercept)", "age", "quant", "sigma") ) }) test_that("find_statistic", { expect_identical(find_statistic(m1), "t-statistic") }) } insight/tests/testthat/test-gls.R0000644000175000017500000000524514122064326016746 0ustar nileshnileshif (requiet("testthat") && requiet("insight") && requiet("nlme")) { data(Ovary) m1 <- gls(follicles ~ sin(2 * pi * Time) + cos(2 * pi * Time), Ovary, correlation = corAR1(form = ~ 1 | Mare) ) test_that("model_info", { expect_true(model_info(m1)$is_linear) }) test_that("find_predictors", { expect_identical( find_predictors(m1), list(conditional = "Time", correlation = "Mare") ) expect_identical(find_predictors(m1, flatten = TRUE), c("Time", "Mare")) expect_null(find_predictors(m1, effects = "random")) }) test_that("find_response", { expect_identical(find_response(m1), "follicles") }) test_that("link_inverse", { expect_equal(link_inverse(m1)(.2), .2, tolerance = 1e-5) }) test_that("get_data", { expect_equal(nrow(get_data(m1)), 308) expect_equal(colnames(get_data(m1)), c("Mare", "Time", "follicles")) }) test_that("find_formula", { expect_length(find_formula(m1), 2) expect_equal( find_formula(m1), list( conditional = as.formula("follicles ~ sin(2 * pi * Time) + cos(2 * pi * Time)"), correlation = as.formula("~1 | Mare") ), ignore_attr = TRUE ) }) test_that("find_terms", { expect_equal( find_terms(m1), list( response = "follicles", conditional = c("sin(2 * pi * Time)", "cos(2 * pi * Time)"), correlation = c("1", "Mare") ) ) expect_equal( find_terms(m1, flatten = TRUE), c( "follicles", "sin(2 * pi * Time)", "cos(2 * pi * Time)", "1", "Mare" ) ) }) test_that("find_variables", { expect_equal( find_variables(m1), list( response = "follicles", conditional = "Time", correlation = "Mare" ) ) expect_equal( find_variables(m1, flatten = TRUE), c("follicles", "Time", "Mare") ) }) test_that("n_obs", { expect_equal(n_obs(m1), 308) }) test_that("linkfun", { expect_false(is.null(link_function(m1))) }) test_that("find_parameters", { expect_equal( find_parameters(m1), list( conditional = c("(Intercept)", "sin(2 * pi * Time)", "cos(2 * pi * Time)") ) ) expect_equal(nrow(get_parameters(m1)), 3) expect_equal( get_parameters(m1)$Parameter, c("(Intercept)", "sin(2 * pi * Time)", "cos(2 * pi * Time)") ) }) test_that("is_multivariate", { expect_false(is_multivariate(m1)) }) test_that("find_statistic", { expect_identical(find_statistic(m1), "t-statistic") }) } insight/tests/testthat/test-logistf.R0000644000175000017500000000553214122064326017627 0ustar nileshnileshif (requiet("testthat") && requiet("insight") && requiet("logistf")) { data(sex2) m1 <- logistf(case ~ age + oc + vic + vicl + vis + dia, data = sex2) test_that("model_info", { expect_true(model_info(m1)$is_binomial) expect_true(model_info(m1)$is_logit) expect_false(model_info(m1)$is_linear) }) test_that("find_predictors", { expect_identical(find_predictors(m1), list(conditional = c( "age", "oc", "vic", "vicl", "vis", "dia" ))) expect_identical( find_predictors(m1, flatten = TRUE), c("age", "oc", "vic", "vicl", "vis", "dia") ) expect_null(find_predictors(m1, effects = "random")) }) test_that("find_random", { expect_null(find_random(m1)) }) test_that("get_random", { expect_warning(get_random(m1)) }) test_that("find_response", { expect_identical(find_response(m1), "case") }) test_that("get_response", { expect_equal(get_response(m1), sex2$case) }) test_that("get_predictors", { expect_equal( colnames(get_predictors(m1)), c("age", "oc", "vic", "vicl", "vis", "dia") ) }) test_that("link_inverse", { expect_equal(link_inverse(m1)(.2), plogis(.2), tolerance = 1e-5) }) test_that("get_data", { expect_equal(nrow(get_data(m1)), 239) expect_equal( colnames(get_data(m1)), c("case", "age", "oc", "vic", "vicl", "vis", "dia") ) }) test_that("find_formula", { expect_length(find_formula(m1), 1) expect_equal( find_formula(m1), list(conditional = as.formula("case ~ age + oc + vic + vicl + vis + dia")), ignore_attr = TRUE ) }) test_that("find_terms", { expect_equal(find_terms(m1), list( response = "case", conditional = c("age", "oc", "vic", "vicl", "vis", "dia") )) expect_equal( find_terms(m1, flatten = TRUE), c("case", "age", "oc", "vic", "vicl", "vis", "dia") ) }) test_that("n_obs", { expect_equal(n_obs(m1), 239) }) test_that("linkfun", { expect_false(is.null(link_function(m1))) }) test_that("linkinverse", { expect_false(is.null(link_inverse(m1))) }) test_that("find_parameters", { expect_equal( find_parameters(m1), list( conditional = c("(Intercept)", "age", "oc", "vic", "vicl", "vis", "dia") ) ) expect_equal(nrow(get_parameters(m1)), 7) expect_equal( get_parameters(m1)$Parameter, c("(Intercept)", "age", "oc", "vic", "vicl", "vis", "dia") ) }) test_that("is_multivariate", { expect_false(is_multivariate(m1)) }) test_that("find_algorithm", { expect_equal(find_algorithm(m1), list(algorithm = "Penalized ML")) }) test_that("find_statistic", { expect_identical(find_statistic(m1), "chi-squared statistic") }) } insight/tests/testthat/test-format_table_ci.R0000644000175000017500000000052214122064326021264 0ustar nileshnileshif (requiet("testthat") && requiet("insight")) { d <- data.frame(CI = 0.97, CI_low = 1, CI_high = 3) test_that("format_table with ci-level", { ft <- insight::format_table(d) expect_equal(colnames(ft), "97% CI") d$CI <- 0.788 ft <- insight::format_table(d) expect_equal(colnames(ft), "78.8% CI") }) } insight/tests/testthat/test-get_weights.R0000644000175000017500000000125614122064326020470 0ustar nileshnileshif (requiet("testthat") && requiet("insight") && requiet("lme4")) { data(mtcars) m1 <- lmer(mpg ~ am + (1 | cyl), data = mtcars) m2 <- lm(mpg ~ am, data = mtcars) test_that("get_weights", { expect_null(get_weights(m1)) expect_null(get_weights(m2)) }) set.seed(123) mtcars$w <- abs(rnorm(nrow(mtcars), sd = .5)) m1 <- lmer(mpg ~ am + (1 | cyl), data = mtcars, weights = w) m2 <- lm(mpg ~ am, data = mtcars, weights = w) test_that("get_weights", { expect_equal( get_weights(m1), mtcars$w, tolerance = 1e-2 ) expect_equal( get_weights(m2), mtcars$w, tolerance = 1e-2 ) }) } insight/tests/testthat/test-ivreg.R0000644000175000017500000000624614122064326017277 0ustar nileshnileshif (requiet("testthat") && requiet("insight") && requiet("ivreg")) { data("CigaretteDemand") m1 <- ivreg::ivreg(log(packs) ~ log(rprice) + log(rincome) | salestax + log(rincome), data = CigaretteDemand ) test_that("model_info", { expect_true(model_info(m1)$is_linear) }) test_that("find_predictors", { expect_identical( find_predictors(m1), list( conditional = c("rprice", "rincome"), instruments = c("salestax", "rincome") ) ) expect_identical( find_predictors(m1, flatten = TRUE), c("rprice", "rincome", "salestax") ) expect_null(find_predictors(m1, effects = "random")) }) test_that("find_random", { expect_null(find_random(m1)) }) test_that("get_random", { expect_warning(get_random(m1)) }) test_that("find_response", { expect_identical(find_response(m1), "packs") }) test_that("get_response", { expect_equal(get_response(m1), CigaretteDemand$packs) }) test_that("get_predictors", { expect_equal( colnames(get_predictors(m1)), c("rprice", "rincome", "salestax") ) }) test_that("link_inverse", { expect_equal(link_inverse(m1)(.2), .2, tolerance = 1e-5) }) test_that("get_data", { expect_equal(nrow(get_data(m1)), 48) expect_equal( colnames(get_data(m1)), c("packs", "rprice", "rincome", "salestax") ) }) test_that("find_formula", { expect_length(find_formula(m1), 2) expect_equal( find_formula(m1), list( conditional = as.formula("log(packs) ~ log(rprice) + log(rincome)"), instruments = as.formula("~salestax + log(rincome)") ), ignore_attr = TRUE ) }) test_that("find_variables", { expect_equal( find_variables(m1), list( response = "packs", conditional = c("rprice", "rincome"), instruments = c("salestax", "rincome") ) ) expect_equal( find_variables(m1, flatten = TRUE), c("packs", "rprice", "rincome", "salestax") ) }) test_that("n_obs", { expect_equal(n_obs(m1), 48) }) test_that("linkfun", { expect_false(is.null(link_function(m1))) }) test_that("find_parameters", { expect_equal( find_parameters(m1), list( conditional = c("(Intercept)", "log(rprice)", "log(rincome)") ) ) expect_equal(nrow(get_parameters(m1)), 3) expect_equal( get_parameters(m1)$Parameter, c("(Intercept)", "log(rprice)", "log(rincome)") ) }) test_that("is_multivariate", { expect_false(is_multivariate(m1)) }) test_that("find_terms", { expect_equal( find_terms(m1), list( response = "log(packs)", conditional = c("log(rprice)", "log(rincome)"), instruments = c("salestax", "log(rincome)") ) ) expect_equal(nrow(get_parameters(m1)), 3) expect_equal( get_parameters(m1)$Parameter, c("(Intercept)", "log(rprice)", "log(rincome)") ) }) test_that("find_statistic", { expect_identical(find_statistic(m1), "t-statistic") }) } insight/tests/testthat/test-felm.R0000644000175000017500000000757114122064326017110 0ustar nileshnileshif (requiet("testthat") && requiet("insight") && requiet("lfe")) { x <- rnorm(1000) x2 <- rnorm(length(x)) id <- factor(sample(20, length(x), replace = TRUE)) firm <- factor(sample(13, length(x), replace = TRUE)) id.eff <- rnorm(nlevels(id)) firm.eff <- rnorm(nlevels(firm)) u <- rnorm(length(x)) y <- x + 0.5 * x2 + id.eff[id] + firm.eff[firm] + u x3 <- rnorm(length(x)) x4 <- sample(12, length(x), replace = TRUE) Q <- 0.3 * x3 + x + 0.2 * x2 + id.eff[id] + 0.3 * log(x4) - 0.3 * y + rnorm(length(x), sd = 0.3) W <- 0.7 * x3 - 2 * x + 0.1 * x2 - 0.7 * id.eff[id] + 0.8 * cos(x4) - 0.2 * y + rnorm(length(x), sd = 0.6) # add them to the outcome y <- y + Q + W dat <- data.frame(y, x, x2, x3, x4, id, firm, Q, W) m1 <- felm(y ~ x + x2 | id + firm | (Q | W ~ x3 + factor(x4)), data = dat) test_that("model_info", { expect_true(model_info(m1)$is_linear) }) test_that("find_predictors", { expect_identical( find_predictors(m1), list( conditional = c("x", "x2"), instruments = c("Q", "W", "x3", "x4") ) ) expect_identical(find_predictors(m1, effects = "random"), list(random = c("id", "firm"))) expect_identical( find_predictors(m1, effects = "all", flatten = TRUE), c("x", "x2", "id", "firm", "Q", "W", "x3", "x4") ) }) test_that("find_random", { expect_identical(find_random(m1), list(random = c("id", "firm"))) }) test_that("get_random", { expect_identical(colnames(get_random(m1)), c("id", "firm")) }) test_that("find_response", { expect_identical(find_response(m1), "y") }) test_that("get_response", { expect_equal(get_response(m1), dat$y) }) test_that("get_predictors", { expect_equal( colnames(get_predictors(m1)), c("x", "x2", "Q", "W", "x3", "x4") ) }) test_that("link_inverse", { expect_equal(link_inverse(m1)(.2), .2, tolerance = 1e-5) }) test_that("get_data", { expect_equal(nrow(get_data(m1)), 1000) expect_equal( colnames(get_data(m1)), c("y", "x", "x2", "id", "firm", "Q", "W", "x3", "x4") ) }) test_that("find_formula", { expect_length(find_formula(m1), 3) expect_equal( find_formula(m1), list( conditional = as.formula("y ~ x + x2"), random = as.formula("~id + firm"), instruments = as.formula("~(Q | W ~ x3 + factor(x4))") ), ignore_attr = TRUE ) }) test_that("find_terms", { expect_equal( find_terms(m1), list( response = "y", conditional = c("x", "x2"), random = c("id", "firm"), instruments = c("(Q", "W x3", "factor(x4))") ) ) expect_equal( find_terms(m1, flatten = TRUE), c("y", "x", "x2", "id", "firm", "(Q", "W x3", "factor(x4))") ) }) test_that("find_variables", { expect_equal( find_variables(m1), list( response = "y", conditional = c("x", "x2"), random = c("id", "firm"), instruments = c("Q", "W", "x3", "x4") ) ) expect_equal( find_variables(m1, flatten = TRUE), c("y", "x", "x2", "id", "firm", "Q", "W", "x3", "x4") ) }) test_that("n_obs", { expect_equal(n_obs(m1), 1000) }) test_that("linkfun", { expect_false(is.null(link_function(m1))) }) test_that("find_parameters", { expect_equal( find_parameters(m1), list(conditional = c("x", "x2", "Q(fit)", "W(fit)")) ) expect_equal(nrow(get_parameters(m1)), 4) expect_equal( get_parameters(m1)$Parameter, c("x", "x2", "Q(fit)", "W(fit)") ) }) test_that("is_multivariate", { expect_false(is_multivariate(m1)) }) test_that("find_statistic", { expect_identical(find_statistic(m1), "t-statistic") }) } insight/tests/testthat/test-gamm4.R0000644000175000017500000000643514122064326017170 0ustar nileshnileshosx <- tryCatch( { si <- Sys.info() if (!is.null(si["sysname"])) { si["sysname"] == "Darwin" || grepl("^darwin", R.version$os) } else { FALSE } }, error = function(e) { FALSE } ) unloadNamespace("gam") .runThisTest <- Sys.getenv("RunAllinsightTests") == "yes" if (.runThisTest && !osx && requiet("testthat") && requiet("insight") && requiet("gamm4")) { set.seed(0) dat <- gamSim(1, n = 400, scale = 2) ## simulate 4 term additive truth dat$fac <- fac <- as.factor(sample(1:20, 400, replace = TRUE)) dat$y <- dat$y + model.matrix(~ fac - 1) %*% rnorm(20) * .5 m1 <- gamm4(y ~ s(x0) + x1 + s(x2), data = dat, random = ~ (1 | fac) ) test_that("model_info", { expect_true(model_info(m1)$is_linear) }) test_that("clean_names", { expect_equal(clean_names(m1), c("y", "x0", "x1", "x2", "fac")) }) test_that("find_predictors", { expect_identical(find_predictors(m1), list(conditional = c("x0", "x1", "x2"))) expect_identical(find_predictors(m1, flatten = TRUE), c("x0", "x1", "x2")) expect_identical(find_predictors(m1, effects = "random"), list(random = "fac")) }) test_that("find_response", { expect_identical(find_response(m1), "y") }) test_that("get_response", { expect_equal(get_response(m1), unname(dat$y[, 1])) }) test_that("link_inverse", { expect_equal(link_inverse(m1)(.2), .2, tolerance = 1e-5) }) test_that("get_data", { expect_equal(nrow(get_data(m1)), 400) expect_equal( colnames(get_data(m1)), c( "y", "x1", "x0", "x2", "fac", "y.0", "Xr", "Xr.0", "X.(Intercept)", "X.x1", "X.s(x0)Fx1", "X.s(x2)Fx1" ) ) }) test_that("find_formula", { expect_length(find_formula(m1), 2) expect_equal( find_formula(m1), list( conditional = as.formula("y ~ s(x0) + x1 + s(x2)"), random = as.formula("~1 | fac") ), ignore_attr = TRUE ) }) test_that("find_terms", { expect_equal(find_terms(m1), list( response = "y", conditional = c("s(x0)", "x1", "s(x2)"), random = "fac" )) expect_equal( find_terms(m1, flatten = TRUE), c("y", "s(x0)", "x1", "s(x2)", "fac") ) }) test_that("find_variables", { expect_equal(find_variables(m1), list( response = "y", conditional = c("x0", "x1", "x2"), random = "fac" )) expect_equal(find_variables(m1, flatten = TRUE), c("y", "x0", "x1", "x2", "fac")) }) test_that("n_obs", { expect_equal(n_obs(m1), 400) }) test_that("linkfun", { expect_false(is.null(link_function(m1))) }) test_that("find_parameters", { expect_equal( find_parameters(m1), list( conditional = c("(Intercept)", "x1"), smooth_terms = c("s(x0)", "s(x2)") ) ) expect_equal(nrow(get_parameters(m1)), 4) expect_equal( get_parameters(m1)$Parameter, c("(Intercept)", "x1", "s(x0)", "s(x2)") ) }) test_that("is_multivariate", { expect_false(is_multivariate(m1)) }) test_that("find_statistic", { expect_null(find_statistic(m1)) }) } insight/tests/testthat/test-is_nullmodel.R0000644000175000017500000000115414122064326020642 0ustar nileshnileshif (requiet("testthat") && requiet("insight") && requiet("lme4")) { data(mtcars) data(sleepstudy) m1 <- lm(mpg ~ 1, data = mtcars) m2 <- lm(mpg ~ gear, data = mtcars) m3 <- lmer(Reaction ~ 1 + (Days | Subject), data = sleepstudy) m4 <- lmer(Reaction ~ Days + (Days | Subject), data = sleepstudy) m5 <- suppressWarnings(lmer(Reaction ~ 0 + (Days | Subject), data = sleepstudy)) test_that("is_nullmodel", { expect_true(is_nullmodel(m1)) expect_false(is_nullmodel(m2)) expect_true(is_nullmodel(m3)) expect_false(is_nullmodel(m4)) expect_true(is_nullmodel(m5)) }) } insight/tests/testthat/test-find_formula-data.R0000644000175000017500000000133714122064326021533 0ustar nileshnileshif (requiet("testthat") && requiet("insight")) { data(mtcars) d <- mtcars m1 <- lm(mtcars$mpg ~ mtcars$hp * mtcars$cyl + poly(mtcars$drat, 2) / mtcars$disp) m2 <- lm(mtcars$mpg ~ d$hp * mtcars$cyl + poly(mtcars$drat, 2) / mtcars$disp) m3 <- lm(mpg ~ hp * cyl + poly(drat, 2) / disp, data = mtcars) test_that("find_formula-data1", { expect_warning(find_formula(m1)) }) test_that("find_formula-data2", { expect_error(find_formula(m2)) }) test_that("find_formula-data3", { expect_equal( find_formula(m3), structure(list(conditional = mpg ~ hp * cyl + poly(drat, 2) / disp), class = c("insight_formula", "list") ), ignore_attr = TRUE ) }) } insight/tests/testthat/test-aovlist.R0000644000175000017500000000747014122064326017644 0ustar nileshnileshif (requiet("testthat") && requiet("insight") && requiet("stats")) { data(npk) m1 <- aov(yield ~ N * P * K + Error(block), data = npk) m2 <- aov(yield ~ N * P * K, data = npk) test_that("model_info", { expect_true(model_info(m1)$is_linear) expect_true(model_info(m1)$is_anova) expect_true(model_info(m2)$is_linear) expect_true(model_info(m2)$is_anova) }) test_that("find_predictors", { expect_identical(find_predictors(m1), list(conditional = c("N", "P", "K", "block"))) expect_identical(find_predictors(m2), list(conditional = c("N", "P", "K"))) expect_null(find_predictors(m1, effects = "random")) }) test_that("find_random", { expect_null(find_random(m1)) }) test_that("get_random", { expect_warning(get_random(m1)) }) test_that("find_response", { expect_identical(find_response(m1), "yield") expect_identical(find_response(m2), "yield") }) test_that("get_response", { expect_equal(get_response(m1), npk$yield) expect_equal(get_response(m2), npk$yield) }) test_that("get_predictors", { expect_equal(colnames(get_predictors(m1)), c("N", "P", "K", "block")) expect_equal(colnames(get_predictors(m2)), c("N", "P", "K")) }) test_that("link_inverse", { expect_equal(link_inverse(m1)(.2), .2, tolerance = 1e-5) expect_equal(link_inverse(m2)(.2), .2, tolerance = 1e-5) }) test_that("get_data", { expect_equal(nrow(get_data(m1)), 24) expect_equal(nrow(get_data(m2)), 24) expect_equal(colnames(get_data(m1)), c("yield", "N", "P", "K", "block")) expect_equal(colnames(get_data(m2)), c("yield", "N", "P", "K")) }) test_that("find_formula", { expect_length(find_formula(m1), 1) expect_equal( find_formula(m1), list(conditional = as.formula("yield ~ N * P * K + Error(block)")), ignore_attr = TRUE ) expect_length(find_formula(m2), 1) expect_equal( find_formula(m2), list(conditional = as.formula("yield ~ N * P * K")), ignore_attr = TRUE ) }) test_that("find_terms", { expect_equal(find_terms(m1), list( response = "yield", conditional = c("N", "P", "K"), error = "Error(block)" )) expect_equal( find_terms(m1, flatten = TRUE), c("yield", "N", "P", "K", "Error(block)") ) expect_equal(find_terms(m2), list( response = "yield", conditional = c("N", "P", "K") )) expect_equal(find_terms(m2, flatten = TRUE), c("yield", "N", "P", "K")) }) test_that("n_obs", { expect_equal(n_obs(m1), 24) expect_equal(n_obs(m2), 24) }) test_that("linkfun", { expect_false(is.null(link_function(m1))) expect_false(is.null(link_function(m2))) }) test_that("find_parameters", { expect_equal( find_parameters(m1), list(conditional = c("(Intercept)", "N1:P1:K1", "N1", "P1", "K1", "N1:P1", "N1:K1", "P1:K1")) ) expect_equal(ncol(get_parameters(m1)), 3) expect_equal(nrow(get_parameters(m1, effects = "all")), 8) expect_equal( get_parameters(m1, effects = "all")$Group, c("(Intercept)", "block", "Within", "Within", "Within", "Within", "Within", "Within") ) expect_equal( find_parameters(m2), list( conditional = c( "(Intercept)", "N1", "P1", "K1", "N1:P1", "N1:K1", "P1:K1", "N1:P1:K1" ) ) ) expect_equal(nrow(get_parameters(m2)), 8) }) test_that("is_multivariate", { expect_false(is_multivariate(m1)) expect_false(is_multivariate(m2)) }) test_that("find_statistic", { expect_identical(find_statistic(m1), "F-statistic") expect_identical(find_statistic(m2), "F-statistic") }) } insight/tests/testthat/test-get_predicted.R0000644000175000017500000005143314144553020020761 0ustar nileshnileshrun_stan <- Sys.getenv("RunAllinsightStanTests") == "yes" pkgs <- c( "lme4", "brms", "glmmTMB", "mgcv", "gamm4", "merTools", "emmeans", "bayestestR", "mclust", "rstanarm", "rstantools", "psych" ) invisible(sapply(pkgs, requiet)) # LM and GLM -------------------------------------------------------------- # ========================================================================= test_that("get_predicted - lm", { skip_if(isFALSE(run_stan)) skip_if_not_installed("rstanarm") x <- lm(mpg ~ cyl + hp, data = mtcars) # Link vs. relation rezlink <- get_predicted(x, predict = "link") rezrela <- get_predicted(x, predict = "expectation") expect_equal(mean(abs(rezlink - rezrela)), 0, tolerance = 1e-3) expect_equal(mean(summary(rezlink)$CI_high - summary(rezrela)$CI_high), 0, tolerance = 1e-3) # Relation vs. Prediction rezpred <- get_predicted(x, predict = "prediction") expect_equal(mean(abs(rezlink - rezpred)), 0, tolerance = 1e-3) expect_true(all(mean(summary(rezlink)$CI_high - summary(rezpred)$CI_high) < 0)) # Confidence ref <- predict(x, se.fit = TRUE, interval = "confidence") rez <- as.data.frame(get_predicted(x, predict = "expectation")) expect_equal(nrow(rez), 32) expect_equal(max(abs(as.data.frame(ref$fit)$fit - rez$Predicted)), 0, tolerance = 1e-10) expect_equal(max(abs(ref$se.fit - rez$SE)), 0, tolerance = 1e-10) expect_equal(max(abs(as.data.frame(ref$fit)$lwr - rez$CI_low)), 0, tolerance = 1e-10) # Prediction ref <- predict(x, newdata = insight::get_data(x), se.fit = TRUE, interval = "prediction") rez <- as.data.frame(get_predicted(x, predict = "prediction")) expect_equal(nrow(rez), 32) expect_equal(max(abs(as.data.frame(ref$fit)$fit - rez$Predicted)), 0, tolerance = 1e-10) expect_equal(max(abs(as.data.frame(ref$fit)$lwr - rez$CI_low)), 0, tolerance = 1e-10) # Bootstrap set.seed(333) ref <- predict(x, newdata = insight::get_data(x), se.fit = TRUE, interval = "confidence") rez <- get_predicted(x, iterations = 600) expect_equal(length(rez), 32) expect_null(nrow(rez)) expect_equal(mean(abs(as.data.frame(ref$fit)$fit - summary(rez)$Predicted)), 0, tolerance = 0.1) expect_equal(mean(abs(as.data.frame(ref$fit)$lwr - summary(rez)$CI_low)), 0, tolerance = 0.5) # TODO: Is it possible to get "prediction" CIs via bootstrapping? # vs. Bayesian xbayes <- rstanarm::stan_glm(mpg ~ cyl + hp, data = mtcars, refresh = 0, seed = 333) rez <- as.data.frame(get_predicted(x, predict = "link")) rezbayes <- summary(get_predicted(xbayes, predict = "link")) expect_equal(mean(abs(rez$Predicted - rezbayes$Predicted)), 0, tolerance = 0.1) expect_equal(mean(abs(rez$CI_low - rezbayes$CI_low)), 0, tolerance = 0.1) rez <- as.data.frame(get_predicted(x, predict = "prediction")) rezbayes <- summary(get_predicted(xbayes, predict = "prediction")) expect_equal(mean(abs(rez$Predicted - rezbayes$Predicted)), 0, tolerance = 0.1) expect_equal(mean(abs(rez$CI_low - rezbayes$CI_low)), 0, tolerance = 0.2) }) test_that("get_predicted - glm", { skip_if(isFALSE(run_stan)) skip_if_not_installed("rstanarm") x <- glm(vs ~ wt, data = mtcars, family = "binomial") # Link vs. relation rezlink <- get_predicted(x, predict = "link") rezrela <- get_predicted(x, predict = "expectation") expect_true(min(rezlink) < 0) expect_true(min(rezrela) > 0) expect_true(min(summary(rezlink)$CI_low) < 0) expect_true(min(summary(rezrela)$CI_low) > 0) # Relation vs. Prediction rezrela <- get_predicted(x, predict = "expectation") rezpred <- get_predicted(x, predict = "prediction") expect_equal(mean(abs(rezrela - rezpred)), 0, tolerance = 1e-3) expect_true(all(mean(summary(rezrela)$CI_high - summary(rezpred)$CI_high) < 0)) # Against stats::predict ref <- predict(x, se.fit = TRUE, type = "response") rez <- as.data.frame(get_predicted(x, predict = "expectation")) expect_equal(nrow(rez), 32) expect_equal(max(abs(ref$fit - rez$Predicted)), 0, tolerance = 1e-10) expect_equal(max(abs(ref$se.fit - rez$SE)), 0, tolerance = 1e-10) ref <- as.data.frame(suppressWarnings(insight::link_inverse(x)(predict.lm(x, interval = "confidence")))) expect_equal(max(abs(ref$lwr - rez$CI_low)), 0, tolerance = 1e-2) # Bootstrap set.seed(333) ref <- suppressWarnings(predict(x, se.fit = TRUE, type = "response")) rez <- suppressWarnings(summary(get_predicted(x, iterations = 800, verbose = FALSE))) expect_equal(mean(abs(ref$fit - rez$Predicted)), 0, tolerance = 0.1) # vs. Bayesian xbayes <- rstanarm::stan_glm(vs ~ wt, data = mtcars, family = "binomial", refresh = 0, seed = 333) rez <- as.data.frame(get_predicted(x, predict = "link")) rezbayes <- summary(get_predicted(xbayes, predict = "link")) expect_equal(mean(abs(rez$Predicted - rezbayes$Predicted)), 0, tolerance = 0.1) expect_equal(mean(abs(rez$CI_low - rezbayes$CI_low)), 0, tolerance = 0.1) rez <- as.data.frame(get_predicted(x, predict = "prediction")) rezbayes <- summary(get_predicted(xbayes, predict = "prediction")) expect_equal(mean(abs(rez$Predicted - rezbayes$Predicted)), 0, tolerance = 0.1) # expect_equal(mean(abs(rez$CI_low - rezbayes$CI_low)), 0, tolerance = 0.3) }) test_that("get_predicted - lm (log)", { x <- lm(mpg ~ log(hp), data = mtcars) rez <- insight::get_predicted(x) expect_equal(length(rez), 32) expect_equal(max(abs(rez - stats::fitted(x))), 0) expect_equal(max(abs(rez - stats::predict(x))), 0) data <- as.data.frame(rez) expect_equal(max(abs(rez - data$Predicted)), 0) expect_equal(nrow(data), 32) }) # Mixed -------------------------------------------------------------- # ========================================================================= test_that("get_predicted - lmerMod", { skip_if_not_installed("lme4") skip_if_not_installed("merTools") skip_if_not_installed("rstanarm") skip_if(isFALSE(run_stan)) x <- lme4::lmer(mpg ~ am + (1 | cyl), data = mtcars) # Link vs. relation rezlink <- get_predicted(x, predict = "link") rezrela <- get_predicted(x, predict = "expectation") rezpred <- get_predicted(x, predict = "prediction") expect_equal(mean(abs(rezlink - rezrela)), 0, tolerance = 1e-3) expect_equal(mean(summary(rezlink)$CI_high - summary(rezrela)$CI_high), 0, tolerance = 1e-3) expect_true(all(summary(rezlink)$CI_high - summary(rezpred)$CI_high < 0)) # Bootstrap set.seed(333) rez <- as.data.frame(get_predicted(x, iterations = 5)) expect_equal(c(nrow(rez), ncol(rez)), c(32, 9)) # Compare to merTools rez_merTools <- merTools::predictInterval(x, level = 0.95, seed = 333, n.sims = 2000) expect_equal(mean(abs(as.data.frame(rezpred)$CI_low - rez_merTools$lwr)), 0, tolerance = 0.5) # Compare to emmeans (not sure what it does) # refgrid <- emmeans::ref_grid(x, at = as.list(get_data(x)), data = get_data(x)) # rez_emmeans <- as.data.frame(predict(refgrid, level = 0.95, interval = "prediction")) # This is completely off # expect_equal(mean(as.data.frame(rez)$CI_low - rez_emmeans$lower.PL), 0, tolerance = 0.5) # Compare with glmmTMB ref <- insight::get_predicted(glmmTMB::glmmTMB(mpg ~ am + (1 | cyl), data = mtcars), predict = "expectation") expect_equal(mean(abs(rezrela - ref)), 0, tolerance = 0.1) # A bit high # expect_equal(mean(abs(as.data.frame(rezrela)$SE - as.data.frame(ref)$SE)), 0, tolerance = 1e-5) # expect_equal(mean(abs(as.data.frame(rezrela)$CI_low - as.data.frame(ref)$CI_low)), 0, tolerance = 1e-5) # Compare with rstanarm xref <- suppressWarnings( rstanarm::stan_lmer(mpg ~ am + (1 | cyl), data = mtcars, refresh = 0, iter = 1000, seed = 333 ) ) rez_stan <- insight::get_predicted(xref, predict = "expectation") expect_equal(mean(abs(rezrela - rez_stan)), 0, tolerance = 0.1) # Different indeed # expect_equal(mean(as.data.frame(rezrela)$CI_low - as.data.frame(rez_stan)$CI_low), 0, tolerance = 0.5) }) test_that("get_predicted - lmerMod (log)", { skip_if_not_installed("lme4") x <- lme4::lmer(mpg ~ am + log(hp) + (1 | cyl), data = mtcars) rez <- insight::get_predicted(x) expect_equal(length(rez), 32) expect_equal(max(abs(rez - stats::fitted(x))), 0) expect_equal(max(abs(rez - stats::predict(x))), 0) expect_equal(nrow(as.data.frame(rez)), 32) # No random rez2 <- insight::get_predicted(x, newdata = mtcars[c("am", "hp")]) expect_true(!all(is.na(as.data.frame(rez2)))) }) test_that("get_predicted - merMod", { skip_if_not_installed("lme4") skip_if_not_installed("glmmTMB") x <- lme4::glmer(vs ~ am + (1 | cyl), data = mtcars, family = "binomial") rezlink <- get_predicted(x, predict = "link") rezrela <- get_predicted(x, predict = "expectation") expect_true(min(rezlink) < 0) expect_true(min(rezrela) > 0) expect_true(min(summary(rezlink)$CI_low) < 0) expect_true(min(summary(rezrela)$CI_low) > 0) expect_equal(max(abs(rezrela - stats::fitted(x))), 0) expect_equal(max(abs(rezrela - stats::predict(x, type = "response"))), 0) expect_equal(nrow(as.data.frame(rezlink)), 32) # Compare with glmmTMB xref <- glmmTMB::glmmTMB(vs ~ am + (1 | cyl), data = mtcars, family = "binomial") rez_ref <- insight::get_predicted(xref, predict = "expectation") expect_equal(max(abs(rezrela - rez_ref)), 0, tolerance = 1e-5) expect_equal(mean(abs(as.data.frame(rezrela)$SE - as.data.frame(rez_ref)$SE)), 0, tolerance = 0.2) }) test_that("get_predicted - glmmTMB", { skip_if_not_installed("glmmTMB") x <- glmmTMB::glmmTMB(mpg ~ am + (1 | cyl), data = mtcars) # Link vs. relation rezlink <- get_predicted(x, predict = "link") rezrela <- get_predicted(x, predict = "expectation") expect_equal(mean(abs(rezlink - rezrela)), 0, tolerance = 1e-3) expect_equal(mean(summary(rezlink)$CI_high - summary(rezrela)$CI_high), 0, tolerance = 1e-3) # Bootstrap set.seed(333) rez <- as.data.frame(get_predicted(x, iterations = 5, predict = "link")) expect_equal(c(nrow(rez), ncol(rez)), c(32, 9)) # Binomial x <- glmmTMB::glmmTMB(vs ~ am + (1 | cyl), data = mtcars, family = "binomial") rezlink <- get_predicted(x, predict = "link") rezrela <- get_predicted(x, predict = "expectation") expect_true(min(rezlink) < 0) expect_true(min(rezrela) > 0) expect_true(min(summary(rezlink)$CI_low) < 0) expect_true(min(summary(rezrela)$CI_low) > 0) expect_equal(max(abs(rezrela - stats::fitted(x))), 0) expect_equal(max(abs(rezrela - stats::predict(x, type = "response"))), 0) expect_equal(nrow(as.data.frame(rez)), 32) # No random rez <- insight::get_predicted(x, newdata = mtcars[c("am")]) expect_true(!all(is.na(as.data.frame(rez)))) x <- glmmTMB::glmmTMB(Petal.Length ~ Petal.Width + (1 | Species), data = iris) rez <- insight::get_predicted(x, data = data.frame(Petal.Width = c(0, 1, 2))) expect_equal(length(rez), 3) # vs. Bayesian # x <- glmmTMB::glmmTMB(mpg ~ am + (1 | cyl), data = mtcars) # rez <- summary(insight::get_predicted(x)) # xref <- rstanarm::stan_lmer(mpg ~ am + (1 | cyl), data = mtcars, refresh = 0, iter = 1000, seed = 333) # rezbayes <- summary(insight::get_predicted(xref)) # expect_equal(mean(abs(rez$Predicted - rezbayes$Predicted)), 0, tolerance = 0.1) # expect_equal(mean(abs(rez$CI_low - rezbayes$CI_low)), 0, tolerance = 0.2) }) # GAM -------------------------------------------------------------- # ========================================================================= test_that("get_predicted - mgcv::gam and gamm", { skip_if_not_installed("mgcv") x <- mgcv::gam(mpg ~ am + s(wt), data = mtcars) expect_equal(length(insight::get_predicted(x)), 32) rez <- insight::get_predicted(x, data = data.frame(am = c(0, 0, 1), wt = c(2, 3, 4))) expect_equal(length(rez), 3) # No smooth rez <- insight::get_predicted(x, newdata = data.frame(am = c(0, 0, 1))) expect_equal(length(rez), 3) rez2 <- insight::get_predicted(x, newdata = data.frame(am = c(0, 0, 1), wt = c(2, 3, 4)), include_smooth = FALSE) expect_equal(max(abs(as.numeric(rez - rez2))), 0, tolerance = 1e-4) expect_equal(length(unique(attributes(rez)$data$wt)), 1) # Bootstrap set.seed(333) rez <- summary(get_predicted(x, iterations = 50)) expect_equal(nrow(rez), 32) # Binomial x <- mgcv::gam(vs ~ am + s(wt), data = mtcars, family = "binomial") rez <- insight::get_predicted(x) expect_equal(length(rez), 32) expect_equal(max(abs(rez - stats::fitted(x))), 0) expect_equal(max(abs(rez - stats::predict(x, type = "response"))), 0) expect_equal(nrow(as.data.frame(rez)), 32) # GAMM x <- mgcv::gamm(vs ~ am + s(wt), random = list(cyl = ~1), data = mtcars, family = "binomial", verbosePQL = FALSE) rez <- insight::get_predicted(x) expect_equal(length(rez), 32) expect_equal(max(abs(rez - x$gam$fitted.values)), 0) expect_equal(max(abs(rez - stats::predict(x$gam, type = "response"))), 0) expect_equal(nrow(as.data.frame(rez)), 32) }) # Bayesian -------------------------------------------------------------- # ========================================================================= test_that("get_predicted - rstanarm", { skip_if(isFALSE(run_stan)) skip_if_not_installed("rstanarm") # LM x <- rstanarm::stan_glm(mpg ~ cyl + hp, data = mtcars, refresh = 0, seed = 333) rezlink <- summary(get_predicted(x, predict = "link")) rezrela <- summary(get_predicted(x, predict = "expectation")) expect_equal(mean(abs(rezlink$Predicted - rezrela$Predicted)), 0, tolerance = 0.1) expect_equal(mean(abs(rezlink$CI_high - rezrela$CI_high)), 0, tolerance = 0.1) rezpred <- summary(get_predicted(x, predict = "prediction")) expect_equal(mean(abs(rezlink$Predicted - rezpred$Predicted)), 0, tolerance = 0.1) expect_true(all(mean(rezlink$CI_high - rezpred$CI_high) < 0)) # GLM x <- rstanarm::stan_glm(vs ~ wt, data = mtcars, family = "binomial", refresh = 0, seed = 333) rezlink <- summary(get_predicted(x, predict = "link")) rezrela <- summary(get_predicted(x, predict = "expectation")) expect_true(min(rezlink$Predicted) < 0) expect_true(min(rezrela$Predicted) > 0) expect_true(min(rezlink$CI_high) < 0) expect_true(min(rezrela$CI_high) > 0) rezpred <- summary(get_predicted(x, predict = "prediction")) expect_equal(mean(abs(rezrela$Predicted - rezpred$Predicted)), 0, tolerance = 0.1) expect_true(all(mean(rezrela$CI_high - rezpred$CI_high) < 0)) # Mixed x <- suppressWarnings( rstanarm::stan_lmer(mpg ~ am + (1 | cyl), data = mtcars, refresh = 0, seed = 333, iter = 500 ) ) rezrela <- summary(get_predicted(x, predict = "expectation")) rezpred <- summary(get_predicted(x, predict = "prediction")) rezrela2 <- summary(get_predicted(x, predict = "expectation", include_random = FALSE)) rezpred2 <- summary(get_predicted(x, predict = "prediction", include_random = FALSE)) expect_true(mean(abs(rezrela$Predicted - rezrela2$Predicted)) > 0) expect_true(mean(abs(rezpred$Predicted - rezpred2$Predicted)) > 0) rezrela3 <- summary(get_predicted(x, predict = "expectation", data = mtcars["am"])) expect_equal(mean(abs(rezrela2$Predicted - rezrela3$Predicted)), 0, tolerance = 0.001) }) # FA / PCA ---------------------------------------------------------------- # ========================================================================= test_that("get_predicted - FA / PCA", { skip_if_not_installed("fungible") skip_if_not_installed("psych") # PCA x <- get_predicted(psych::principal(mtcars, 3)) expect_equal(dim(x), c(32, 3)) x <- get_predicted(psych::principal(mtcars, 3), data = mtcars[1:5, ]) expect_equal(dim(x), c(5, 3)) x <- get_predicted(prcomp(mtcars)) expect_equal(dim(x), c(32, ncol(mtcars))) x <- get_predicted(prcomp(mtcars), data = mtcars[1:5, ]) expect_equal(dim(x), c(5, ncol(mtcars))) # FA x <- get_predicted(psych::fa(mtcars, 3)) expect_equal(dim(x), c(32, 3)) x <- get_predicted(psych::fa(mtcars, 3), data = mtcars[1:5, ]) expect_equal(dim(x), c(5, 3)) expect_error(get_predicted(fungible::faMain(mtcars, numFactors = 3))) x <- get_predicted(fungible::faMain(mtcars, numFactors = 3), data = mtcars[1:5, ]) expect_equal(dim(x), c(5, 3)) }) # arguments: `predict` vs. `type` ----------------------------------------- # ========================================================================= test_that("lm: get_predicted vs barebones `predict()`", { mod <- lm(mpg ~ hp + factor(cyl), mtcars) known <- predict(mod, se.fit = TRUE, interval = "confidence") unknown1 <- as.data.frame(get_predicted(mod)) unknown2 <- as.data.frame(get_predicted(mod, predict = "expectation")) unknown3 <- suppressWarnings(as.data.frame(get_predicted(mod, predict = "response"))) expect_warning(as.data.frame(get_predicted(mod, predict = "response"))) expect_equal(unknown1$Predicted, known$fit[, "fit"], ignore_attr = TRUE) expect_equal(unknown1$SE, known$se.fit, ignore_attr = TRUE) expect_equal(unknown1$CI_low, known$fit[, "lwr"], ignore_attr = TRUE) expect_equal(unknown1$CI_high, known$fit[, "upr"], ignore_attr = TRUE) expect_equal(unknown2$Predicted, known$fit[, "fit"], ignore_attr = TRUE) expect_equal(unknown2$SE, known$se.fit, ignore_attr = TRUE) expect_equal(unknown2$CI_low, known$fit[, "lwr"], ignore_attr = TRUE) expect_equal(unknown2$CI_high, known$fit[, "upr"], ignore_attr = TRUE) expect_equal(unknown3$Predicted, known$fit[, "fit"], ignore_attr = TRUE) expect_equal(unknown3$SE, known$se.fit, ignore_attr = TRUE) expect_equal(unknown3$CI_low, known$fit[, "lwr"], ignore_attr = TRUE) expect_equal(unknown3$CI_high, known$fit[, "upr"], ignore_attr = TRUE) }) test_that("using both `predict` and `type` raises an informative error", { mod <- glm(am ~ hp + factor(cyl), family = binomial, data = mtcars) expect_warning(expect_error( get_predicted(mod, predict = "response", type = "response") )) }) test_that("`predict` and `type` are both `NULL`", { mod <- glm(am ~ hp + factor(cyl), family = binomial, data = mtcars) expect_error(get_predicted(mod, predict = NULL), regexp = "supply") }) test_that("`predict()` vs. `get_predicted` link equivalence", { # link mod <- glm(am ~ hp + factor(cyl), family = binomial, data = mtcars) known <- predict(mod, type = "link", interval = "confidence", se.fit = TRUE) unknown <- as.data.frame(get_predicted(mod, predict = NULL, type = "link")) expect_equal(unname(known$fit), unknown$Predicted) expect_equal(unname(known$se.fit), unknown$SE) # response mod <- glm(am ~ hp + factor(cyl), family = binomial, data = mtcars) known <- predict(mod, type = "response", se.fit = TRUE) unknown1 <- as.data.frame(get_predicted(mod, predict = "expectation")) unknown2 <- as.data.frame(get_predicted(mod, predict = NULL, type = "response")) unknown3 <- suppressWarnings(as.data.frame(get_predicted(mod, predict = "response"))) expect_warning(as.data.frame(get_predicted(mod, predict = "response"))) expect_equal(unname(known$fit), unknown1$Predicted) expect_equal(unname(known$se.fit), unknown1$SE) expect_equal(unname(known$fit), unknown2$Predicted) expect_equal(unname(known$se.fit), unknown2$SE) expect_equal(unname(known$fit), unknown3$Predicted) expect_equal(unname(known$se.fit), unknown3$SE) }) test_that("hurdle: get_predicted matches `predict()`", { skip_if_not_installed("pscl") data("bioChemists", package = "pscl") mod <- pscl::hurdle(art ~ phd + fem | ment, data = bioChemists, dist = "negbin") known <- predict(mod, type = "response") unknown <- get_predicted(mod, predict = NULL, type = "response") expect_equal(known, unknown, ignore_attr = TRUE) known <- predict(mod, type = "zero") unknown <- get_predicted(mod, predict = NULL, type = "zero") expect_equal(known, unknown, ignore_attr = TRUE) }) test_that("bugfix: used to return all zeros", { mod <- glm(am ~ hp + factor(cyl), family = binomial, data = mtcars) expect_warning(get_predicted(mod, predict = "response")) pred <- suppressWarnings(get_predicted(mod, predict = "response")) expect_false(any(pred == 0)) pred <- suppressWarnings(get_predicted(mod, predict = "original")) expect_warning(get_predicted(mod, predict = "original")) expect_false(all(pred == 0)) }) test_that("brms: `type` in ellipsis used to produce the wrong intervals", { skip_if(isFALSE(run_stan)) skip_if_not_installed("brms") library(brms) void <- capture.output( mod <- brm(am ~ hp + mpg, family = bernoulli, data = mtcars, chains = 2, iter = 1000, seed = 1024, silent = 2 ) ) x <- get_predicted(mod, predict = "link") y <- get_predicted(mod, predict = "expectation") z <- get_predicted(mod, predict = NULL, type = "response") expect_equal(round(x[1], 1), -1.4) expect_equal(round(y[1], 1), .2) expect_identical(y, z) }) insight/tests/testthat/test-offset.R0000644000175000017500000000231514122064326017442 0ustar nileshnileshif (requiet("testthat") && requiet("insight") && requiet("pscl")) { # Generate some zero-inflated data set.seed(123) N <- 100 # Samples x <- runif(N, 0, 10) # Predictor off <- rgamma(N, 3, 2) # Offset variable yhat <- -1 + x * 0.5 + log(off) # Prediction on log scale dat <- data.frame(y = NA, x, logOff = log(off)) # Storage dataframe dat$y <- rpois(N, exp(yhat)) # Poisson process dat$y <- ifelse(rbinom(N, 1, 0.3), 0, dat$y) # Zero-inflation process # Fit zeroinfl model using 2 methods of offset input m1 <- zeroinfl(y ~ offset(logOff) + x | 1, data = dat, dist = "poisson") m2 <- zeroinfl(y ~ x | 1, data = dat, offset = logOff, dist = "poisson" ) # Fit zeroinfl model without offset data m3 <- zeroinfl(y ~ x | 1, data = dat, dist = "poisson") test_that("offset in get_data()", { expect_equal(colnames(get_data(m1)), c("y", "logOff", "x")) expect_equal(colnames(get_data(m2)), c("y", "x", "logOff")) expect_equal(colnames(get_data(m3)), c("y", "x")) }) test_that("offset in get_data()", { expect_equal(find_offset(m1), "logOff") expect_equal(find_offset(m2), "logOff") expect_null(find_offset(m3)) }) } insight/tests/testthat/test-namespace.R0000644000175000017500000000611514122064326020112 0ustar nileshnileshif (requiet("testthat") && requiet("insight") && requiet("splines")) { data(iris) m1 <- lm(Sepal.Length ~ splines::bs(Petal.Width, df = 4) + Species, data = iris) test_that("model_info", { expect_true(model_info(m1)$is_linear) }) test_that("find_predictors", { expect_identical(find_predictors(m1), list(conditional = c("Petal.Width", "Species"))) expect_identical( find_predictors(m1, flatten = TRUE), c("Petal.Width", "Species") ) expect_null(find_predictors(m1, effects = "random")) }) test_that("get_predictors", { expect_equal(get_predictors(m1), iris[, c("Petal.Width", "Species")]) }) test_that("find_response", { expect_identical(find_response(m1), "Sepal.Length") }) test_that("get_response", { expect_identical(get_response(m1), iris$Sepal.Length) }) test_that("link_inverse", { expect_equal(link_inverse(m1)(.2), .2, tolerance = 1e-4) }) test_that("link_function", { expect_equal(link_function(m1)(.2), .2, tolerance = 1e-4) }) test_that("get_data", { expect_equal(nrow(get_data(m1)), 150) expect_equal( colnames(get_data(m1)), c("Sepal.Length", "Species", "Petal.Width") ) }) test_that("find_formula", { expect_length(find_formula(m1), 1) expect_equal( find_formula(m1), list( conditional = as.formula("Sepal.Length ~ splines::bs(Petal.Width, df = 4) + Species") ), ignore_attr = TRUE ) }) test_that("find_variables", { expect_equal( find_variables(m1), list( response = "Sepal.Length", conditional = c("Petal.Width", "Species") ) ) expect_equal( find_variables(m1, flatten = TRUE), c("Sepal.Length", "Petal.Width", "Species") ) }) test_that("find_parameters", { expect_equal( find_parameters(m1), list( conditional = c( "(Intercept)", "splines::bs(Petal.Width, df = 4)1", "splines::bs(Petal.Width, df = 4)2", "splines::bs(Petal.Width, df = 4)3", "splines::bs(Petal.Width, df = 4)4", "Speciesversicolor", "Speciesvirginica" ) ) ) }) test_that("get_parameters", { expect_equal(nrow(get_parameters(m1)), 7) expect_equal( get_parameters(m1)$Parameter, c( "(Intercept)", "splines::bs(Petal.Width, df = 4)1", "splines::bs(Petal.Width, df = 4)2", "splines::bs(Petal.Width, df = 4)3", "splines::bs(Petal.Width, df = 4)4", "Speciesversicolor", "Speciesvirginica" ) ) }) test_that("find_algorithm", { expect_equal(find_algorithm(m1), list(algorithm = "OLS")) }) test_that("find_terms", { expect_equal( find_terms(m1), list( response = "Sepal.Length", conditional = c("splines", "bs(Petal.Width, df = 4)", "Species") ) ) }) test_that("find_statistic", { expect_identical(find_statistic(m1), "t-statistic") }) } insight/tests/testthat/test-find_response.R0000644000175000017500000000036214163571275021025 0ustar nileshnileshif (requiet("testthat") && requiet("insight")) { test_that("find_response", { expect_equal(find_response(y1 + y2 ~ x, combine = FALSE), c("y1", "y2")) expect_equal(find_response(y1 + y2 ~ x, combine = TRUE), "y1 + y2") }) } insight/tests/testthat/test-formatting.R0000644000175000017500000000143414157047106020334 0ustar nileshnileshif (requiet("testthat") && requiet("insight")) { x <- c(0.0000453, 0.12, 1.2, 0.0001234) test_that("format_value", { f <- format_value(x, zap_small = FALSE) expect_equal(f, c("4.53e-05", "0.12", "1.20", "1.23e-04")) f <- format_value(x, zap_small = TRUE) expect_equal(f, c("0.00", "0.12", "1.20", "0.00")) }) p <- c(1, 0.9, 0.05, 0.01, 0.001, 0.0009) test_that("format_p", { expect_equal(format_p(p, name = NULL), c("> .999", "0.900", "0.050", "0.010", "0.001", "< .001")) expect_equal(format_p(p, name = NULL, whitespace = FALSE), c(">.999", "0.900", "0.050", "0.010", "0.001", "<.001")) expect_equal(format_p(p, name = NULL, whitespace = FALSE, decimal_separator = ","), c(">,999", "0,900", "0,050", "0,010", "0,001", "<,001")) }) } insight/tests/testthat/test-lm.R0000644000175000017500000001632314122064326016570 0ustar nileshnileshif (requiet("testthat") && requiet("insight") && requiet("stats")) { data(iris) data(mtcars) m1 <- lm(Sepal.Length ~ Petal.Width + Species, data = iris) m2 <- lm(log(mpg) ~ log(hp) + cyl + I(cyl^2) + poly(wt, degree = 2, raw = TRUE), data = mtcars ) test_that("model_info", { expect_true(model_info(m1)$is_linear) expect_false(model_info(m1)$is_bayesian) }) test_that("get_residuals", { expect_equal( head(get_residuals(m2)), head(stats::residuals(m2)), tolerance = 1e-3, ignore_attr = TRUE ) }) test_that("get_sigma", { expect_equal(get_sigma(m1), 0.4810113, tolerance = 1e-3, ignore_attr = TRUE) }) test_that("find_predictors", { expect_identical(find_predictors(m1), list(conditional = c("Petal.Width", "Species"))) expect_identical( find_predictors(m1, flatten = TRUE), c("Petal.Width", "Species") ) expect_null(find_predictors(m1, effects = "random")) expect_identical(find_predictors(m2), list(conditional = c("hp", "cyl", "wt"))) expect_identical(find_predictors(m2, flatten = TRUE), c("hp", "cyl", "wt")) expect_null(find_predictors(m2, effects = "random")) }) test_that("find_response", { expect_identical(find_response(m1), "Sepal.Length") expect_identical(find_response(m2), "mpg") }) test_that("link_inverse", { expect_identical(link_inverse(m1)(.2), .2) expect_identical(link_inverse(m2)(.2), .2) }) test_that("loglik", { expect_equal(get_loglikelihood(m1), logLik(m1), ignore_attr = TRUE) expect_equal(get_loglikelihood(m2), logLik(m2), ignore_attr = TRUE) }) test_that("get_df", { expect_equal(get_df(m1), df.residual(m1), ignore_attr = TRUE) expect_equal(get_df(m2), df.residual(m2), ignore_attr = TRUE) expect_equal(get_df(m1, type = "model"), attr(logLik(m1), "df"), ignore_attr = TRUE) expect_equal(get_df(m2, type = "model"), attr(logLik(m2), "df"), ignore_attr = TRUE) }) test_that("get_data", { expect_equal(nrow(get_data(m1)), 150) expect_equal( colnames(get_data(m1)), c("Sepal.Length", "Petal.Width", "Species") ) expect_equal(nrow(get_data(m2)), 32) expect_equal(colnames(get_data(m2)), c("mpg", "hp", "cyl", "wt")) }) test_that("get_intercept", { expect_equal(get_intercept(m1), as.vector(stats::coef(m1)[1]), ignore_attr = TRUE) expect_equal(get_intercept(m2), as.vector(stats::coef(m2)[1]), ignore_attr = TRUE) }) test_that("find_formula", { expect_length(find_formula(m1), 1) expect_equal( find_formula(m1), list(conditional = as.formula("Sepal.Length ~ Petal.Width + Species")), ignore_attr = TRUE ) expect_length(find_formula(m2), 1) expect_equal( find_formula(m2), list( conditional = as.formula( "log(mpg) ~ log(hp) + cyl + I(cyl^2) + poly(wt, degree = 2, raw = TRUE)" ) ), ignore_attr = TRUE ) }) test_that("find_terms", { expect_equal( find_terms(m1), list( response = "Sepal.Length", conditional = c("Petal.Width", "Species") ) ) expect_equal( find_terms(m2), list( response = "log(mpg)", conditional = c( "log(hp)", "cyl", "I(cyl^2)", "poly(wt, degree = 2, raw = TRUE)" ) ) ) expect_equal( find_terms(m1, flatten = TRUE), c("Sepal.Length", "Petal.Width", "Species") ) expect_equal( find_terms(m2, flatten = TRUE), c( "log(mpg)", "log(hp)", "cyl", "I(cyl^2)", "poly(wt, degree = 2, raw = TRUE)" ) ) }) test_that("find_variables", { expect_equal( find_variables(m1), list( response = "Sepal.Length", conditional = c("Petal.Width", "Species") ) ) expect_equal(find_variables(m2), list( response = "mpg", conditional = c("hp", "cyl", "wt") )) expect_equal( find_variables(m1, flatten = TRUE), c("Sepal.Length", "Petal.Width", "Species") ) expect_equal( find_variables(m2, flatten = TRUE), c("mpg", "hp", "cyl", "wt") ) }) test_that("find_parameters", { expect_equal( find_parameters(m1), list( conditional = c( "(Intercept)", "Petal.Width", "Speciesversicolor", "Speciesvirginica" ) ) ) expect_equal(nrow(get_parameters(m1)), 4) expect_equal( get_parameters(m1)$Parameter, c( "(Intercept)", "Petal.Width", "Speciesversicolor", "Speciesvirginica" ) ) }) test_that("find_parameters summary.lm", { s <- summary(m1) expect_equal( find_parameters(s), list( conditional = c( "(Intercept)", "Petal.Width", "Speciesversicolor", "Speciesvirginica" ) ) ) }) test_that("linkfun", { expect_false(is.null(link_function(m1))) expect_false(is.null(link_function(m2))) }) test_that("find_algorithm", { expect_equal(find_algorithm(m1), list(algorithm = "OLS")) }) test_that("get_variance", { expect_warning(expect_null(get_variance(m1))) expect_warning(expect_null(get_variance_dispersion(m1))) expect_warning(expect_null(get_variance_distribution(m1))) expect_warning(expect_null(get_variance_fixed(m1))) expect_warning(expect_null(get_variance_intercept(m1))) expect_warning(expect_null(get_variance_random(m1))) expect_warning(expect_null(get_variance_residual(m1))) }) test_that("is_model", { expect_true(is_model(m1)) }) test_that("all_models_equal", { expect_true(all_models_equal(m1, m2)) }) test_that("get_varcov", { expect_equal(diag(get_varcov(m1)), diag(vcov(m1))) }) test_that("get_statistic", { expect_equal(get_statistic(m1)$Statistic, c(57.5427, 4.7298, -0.2615, -0.1398), tolerance = 1e-3) }) test_that("find_statistic", { expect_equal(find_statistic(m1), "t-statistic") }) data("DNase") DNase1 <- subset(DNase, Run == 1) m3 <- stats::nls( density ~ stats::SSlogis(log(conc), Asym, xmid, scal), DNase1, start = list( Asym = 1, xmid = 1, scal = 1 ) ) ## Dobson (1990) Page 93: Randomized Controlled Trial : counts <- c(18, 17, 15, 20, 10, 20, 25, 13, 12) outcome <- gl(3, 1, 9) treatment <- gl(3, 3) m4 <- glm(counts ~ outcome + treatment, family = poisson()) test_that("is_model", { expect_true(is_model(m3)) }) test_that("is_model", { expect_false(is_model_supported(m3)) }) test_that("all_models_equal", { expect_false(all_models_equal(m1, m2, m3)) expect_false(all_models_equal(m1, m2, m4)) }) test_that("find_statistic", { expect_identical(find_statistic(m1), "t-statistic") expect_identical(find_statistic(m2), "t-statistic") expect_identical(find_statistic(m3), "t-statistic") expect_identical(find_statistic(m4), "z-statistic") }) } insight/tests/testthat/test-rqss.R0000644000175000017500000000436114122064326017147 0ustar nileshnileshif (requiet("testthat") && requiet("insight") && requiet("quantreg") && requiet("tripack")) { data("CobarOre") set.seed(123) CobarOre$w <- rnorm(nrow(CobarOre)) # model m1 <- rqss(z ~ w + qss(cbind(x, y), lambda = .08), data = CobarOre) test_that("model_info", { expect_true(model_info(m1)$is_linear) }) test_that("find_predictors", { expect_identical( find_predictors(m1), list(conditional = c("w", "x", "y")) ) expect_identical( find_predictors(m1, flatten = TRUE), c("w", "x", "y") ) expect_null(find_predictors(m1, effects = "random")) }) test_that("find_random", { expect_null(find_random(m1)) }) test_that("get_random", { expect_warning(get_random(m1)) }) test_that("find_response", { expect_identical(find_response(m1), "z") }) test_that("find_formula", { expect_length(find_formula(m1), 1) expect_equal( find_formula(m1), list(conditional = as.formula("z ~ w + qss(cbind(x, y), lambda = 0.08)")), ignore_attr = TRUE ) }) test_that("find_terms", { expect_equal( find_terms(m1), list(response = "z", conditional = c("w", "qss(cbind(x, y), lambda = 0.08)")) ) expect_equal( find_terms(m1, flatten = TRUE), c("z", "w", "qss(cbind(x, y), lambda = 0.08)") ) }) test_that("n_obs", { expect_equal(n_obs(m1), 38) }) test_that("link_function", { expect_equal(link_function(m1)(.2), .2, tolerance = 1e-5) }) test_that("link_inverse", { expect_equal(link_inverse(m1)(.2), .2, tolerance = 1e-5) }) test_that("find_parameters", { expect_equal( find_parameters(m1), list(conditional = c("(Intercept)", "w"), smooth_terms = "cbind(x, y)") ) expect_equal(nrow(get_parameters(m1)), 3) expect_equal( get_parameters(m1)$Parameter, c("(Intercept)", "w", "cbind(x, y)") ) }) test_that("is_multivariate", { expect_false(is_multivariate(m1)) }) test_that("find_algorithm", { expect_equal(find_algorithm(m1), list(algorithm = "sfn")) }) test_that("find_statistic", { expect_identical(find_statistic(m1), "t-statistic") }) } insight/tests/testthat/test-n_parameters_rank-deficiency.R0000644000175000017500000000046214122064326023750 0ustar nileshnileshif (requiet("testthat") && requiet("insight")) { set.seed(123) data(mtcars) m <- lm(formula = wt ~ am * cyl * vs, data = mtcars) test_that("n_parameters-rank_deficiency", { expect_equal(n_parameters(m), 8) expect_equal(n_parameters(m, remove_nonestimable = TRUE), m$rank) }) } insight/tests/testthat/test-ols.R0000644000175000017500000000470214122064326016753 0ustar nileshnileshif (requiet("testthat") && requiet("insight") && requiet("rms")) { data(mtcars) m1 <- ols(mpg ~ rcs(hp, 3) * cyl + wt, data = mtcars) test_that("model_info", { expect_true(model_info(m1)$is_linear) }) test_that("find_predictors", { expect_identical(find_predictors(m1), list(conditional = c("hp", "cyl", "wt"))) expect_identical(find_predictors(m1, flatten = TRUE), c("hp", "cyl", "wt")) expect_null(find_predictors(m1, effects = "random")) }) test_that("find_random", { expect_null(find_random(m1)) }) test_that("get_random", { expect_warning(get_random(m1)) }) test_that("find_response", { expect_identical(find_response(m1), "mpg") }) test_that("get_response", { expect_equal(get_response(m1), mtcars$mpg) }) test_that("get_predictors", { expect_equal(colnames(get_predictors(m1)), c("hp", "cyl", "wt")) }) test_that("get_data", { expect_equal(nrow(get_data(m1)), 32) expect_equal(colnames(get_data(m1)), c("mpg", "cyl", "wt", "hp")) }) test_that("find_formula", { expect_length(find_formula(m1), 1) expect_equal( find_formula(m1), list(conditional = as.formula("mpg ~ rcs(hp, 3) * cyl + wt")), ignore_attr = TRUE ) }) test_that("find_variables", { expect_equal(find_variables(m1), list( response = "mpg", conditional = c("hp", "cyl", "wt") )) expect_equal( find_variables(m1, flatten = TRUE), c("mpg", "hp", "cyl", "wt") ) }) test_that("n_obs", { expect_equal(n_obs(m1), 32) }) test_that("linkfun", { expect_equal(link_function(m1)(.2), .2, tolerance = 1e-5) }) test_that("link_inverse", { expect_equal(link_inverse(m1)(.2), .2, tolerance = 1e-5) }) test_that("find_parameters", { expect_equal( find_parameters(m1), list( conditional = c("Intercept", "hp", "hp'", "cyl", "wt", "hp * cyl", "hp' * cyl") ) ) expect_equal(nrow(get_parameters(m1)), 7) expect_equal( get_parameters(m1)$Parameter, c("Intercept", "hp", "hp'", "cyl", "wt", "hp * cyl", "hp' * cyl") ) }) test_that("is_multivariate", { expect_false(is_multivariate(m1)) }) test_that("find_algorithm", { expect_equal(find_algorithm(m1), list(algorithm = "OLS")) }) # TO DO # test_that("find_statistic", { # expect_null(find_statistic(m1)) # }) } insight/tests/testthat/test-crch.R0000644000175000017500000000552514122064326017101 0ustar nileshnileshif (requiet("testthat") && requiet("insight") && requiet("crch")) { data("RainIbk") RainIbk$sqrtensmean <- apply(sqrt(RainIbk[, grep("^rainfc", names(RainIbk))]), 1, mean) RainIbk$sqrtenssd <- apply(sqrt(RainIbk[, grep("^rainfc", names(RainIbk))]), 1, sd) m1 <- crch(sqrt(rain) ~ sqrtensmean, data = RainIbk, dist = "gaussian") test_that("model_info", { expect_false(model_info(m1)$is_linear) expect_true(model_info(m1)$is_censored) }) test_that("find_predictors", { expect_identical(find_predictors(m1), list(conditional = c("sqrtensmean"))) expect_identical(find_predictors(m1, flatten = TRUE), c("sqrtensmean")) expect_null(find_predictors(m1, effects = "random")) }) test_that("find_random", { expect_null(find_random(m1)) }) test_that("get_random", { expect_warning(get_random(m1)) }) test_that("find_response", { expect_identical(find_response(m1), "rain") }) test_that("get_response", { expect_equal(get_response(m1), RainIbk$rain) }) test_that("get_predictors", { expect_equal(colnames(get_predictors(m1)), c("sqrtensmean")) }) test_that("link_inverse", { expect_equal(link_inverse(m1)(.2), .2, tolerance = 1e-5) }) test_that("get_data", { expect_equal(nrow(get_data(m1)), 4971) expect_equal(colnames(get_data(m1)), c("rain", "sqrtensmean")) }) test_that("find_formula", { expect_length(find_formula(m1), 1) expect_equal( find_formula(m1), list(conditional = as.formula("sqrt(rain) ~ sqrtensmean")), ignore_attr = TRUE ) }) test_that("find_terms", { expect_equal( find_terms(m1), list( response = "sqrt(rain)", conditional = c("sqrtensmean") ) ) expect_equal( find_terms(m1, flatten = TRUE), c("sqrt(rain)", "sqrtensmean") ) }) test_that("find_variables", { expect_equal( find_variables(m1), list( response = "rain", conditional = c("sqrtensmean") ) ) expect_equal( find_variables(m1, flatten = TRUE), c("rain", "sqrtensmean") ) }) test_that("n_obs", { expect_equal(n_obs(m1), 4971) }) test_that("linkfun", { expect_false(is.null(link_function(m1))) }) test_that("find_parameters", { expect_equal( find_parameters(m1), list( conditional = c("(Intercept)", "sqrtensmean", "(scale)_(Intercept)") ) ) expect_equal(nrow(get_parameters(m1)), 3) expect_equal( get_parameters(m1)$Parameter, c("(Intercept)", "sqrtensmean", "(scale)_(Intercept)") ) }) test_that("is_multivariate", { expect_false(is_multivariate(m1)) }) test_that("find_statistic", { expect_identical(find_statistic(m1), "z-statistic") }) } insight/tests/testthat/test-gbm.R0000644000175000017500000000620414122064326016722 0ustar nileshnilesh.runThisTest <- Sys.getenv("RunAllinsightTests") == "yes" if (.runThisTest) { if (requiet("testthat") && requiet("insight") && requiet("gbm")) { set.seed(102) # for reproducibility m1 <- gbm( mpg ~ gear + cyl + wt, data = mtcars, var.monotone = c(0, 0, 0), distribution = "gaussian", shrinkage = 0.1, interaction.depth = 1, bag.fraction = 0.5, train.fraction = 0.5, n.minobsinnode = 1, cv.folds = 3, keep.data = TRUE, verbose = FALSE, n.cores = 1 ) test_that("model_info", { expect_true(model_info(m1)$is_linear) expect_false(model_info(m1)$is_binomial) }) test_that("find_predictors", { expect_identical(find_predictors(m1), list(conditional = c("gear", "cyl", "wt"))) expect_identical( find_predictors(m1, flatten = TRUE), c("gear", "cyl", "wt") ) expect_null(find_predictors(m1, effects = "random")) }) test_that("find_random", { expect_null(find_random(m1)) }) test_that("get_random", { expect_warning(get_random(m1)) }) test_that("find_response", { expect_identical(find_response(m1), "mpg") }) test_that("get_response", { expect_equal(get_response(m1), mtcars$mpg) }) test_that("get_predictors", { expect_equal(colnames(get_predictors(m1)), c("gear", "cyl", "wt")) }) test_that("link_inverse", { expect_equal(link_inverse(m1)(.2), .2, tolerance = 1e-5) }) test_that("get_data", { expect_equal(nrow(get_data(m1)), 32) expect_equal(colnames(get_data(m1)), c("mpg", "gear", "cyl", "wt")) }) test_that("find_formula", { expect_length(find_formula(m1), 1) expect_equal( find_formula(m1), list(conditional = as.formula("mpg ~ gear + cyl + wt")), ignore_attr = TRUE ) }) test_that("find_variables", { expect_equal( find_variables(m1), list( response = "mpg", conditional = c("gear", "cyl", "wt") ) ) expect_equal( find_variables(m1, flatten = TRUE), c("mpg", "gear", "cyl", "wt") ) }) test_that("n_obs", { expect_equal(n_obs(m1), 32) }) test_that("linkfun", { expect_false(is.null(link_function(m1))) }) test_that("find_parameters", { expect_equal( find_parameters(m1), list(conditional = c("wt", "cyl", "gear")) ) expect_equal(nrow(get_parameters(m1)), 3) expect_equal(get_parameters(m1)$Parameter, c("wt", "cyl", "gear")) }) test_that("find_terms", { expect_equal( find_terms(m1), list( response = "mpg", conditional = c("gear", "cyl", "wt") ) ) }) test_that("is_multivariate", { expect_false(is_multivariate(m1)) }) test_that("find_algorithm", { expect_warning(expect_null(find_algorithm(m1))) }) test_that("find_statistic", { expect_null(find_statistic(m1)) }) } } insight/tests/testthat/test-proportion_response.R0000644000175000017500000000127314142160003022276 0ustar nileshnileshif (requiet("testthat") && requiet("insight") && requiet("lme4")) { data(mtcars) m1 <- suppressWarnings(glmer( vs / cyl ~ disp + (1 | cyl), data = mtcars, family = binomial(link = "logit") )) m2 <- suppressWarnings(glmer( I(vs / cyl) ~ disp + (1 | cyl), data = mtcars, family = binomial(link = "logit") )) test_that("get_response", { expect_equal(head(get_response(m1)), c(0, 0, 0.25, 0.16667, 0, 0.16667), tolerance = 1e-2) expect_equal(get_response(m2), mtcars[, c("vs", "cyl")]) }) test_that("find_response", { expect_equal(find_response(m1), "vs/cyl") expect_equal(find_response(m2), "I(vs/cyl)") }) } insight/tests/testthat/test-speedglm.R0000644000175000017500000000612014122064326017752 0ustar nileshnileshif (requiet("testthat") && requiet("insight") && requiet("speedglm") && requiet("glmmTMB")) { data(Salamanders) Salamanders$cover <- abs(Salamanders$cover) m1 <- speedglm(count ~ mined + log(cover) + sample, family = poisson(), data = Salamanders ) test_that("model_info", { expect_true(model_info(m1)$is_poisson) expect_true(model_info(m1)$is_count) expect_false(model_info(m1)$is_negbin) expect_false(model_info(m1)$is_binomial) expect_false(model_info(m1)$is_linear) }) test_that("find_predictors", { expect_identical(find_predictors(m1), list(conditional = c("mined", "cover", "sample"))) expect_identical( find_predictors(m1, flatten = TRUE), c("mined", "cover", "sample") ) expect_null(find_predictors(m1, effects = "random")) }) test_that("find_random", { expect_null(find_random(m1)) }) test_that("get_random", { expect_warning(get_random(m1)) }) test_that("find_response", { expect_identical(find_response(m1), "count") }) test_that("get_response", { expect_equal(get_response(m1), Salamanders$count) }) test_that("get_predictors", { expect_equal(colnames(get_predictors(m1)), c("mined", "cover", "sample")) }) test_that("link_inverse", { expect_equal(link_inverse(m1)(.2), exp(.2), tolerance = 1e-5) }) test_that("linkfun", { expect_equal(link_function(m1)(.2), log(.2), tolerance = 1e-5) }) test_that("get_data", { expect_equal(nrow(get_data(m1)), 644) expect_equal( colnames(get_data(m1)), c("count", "mined", "cover", "sample") ) }) test_that("find_formula", { expect_length(find_formula(m1), 1) expect_equal( find_formula(m1), list(conditional = as.formula("count ~ mined + log(cover) + sample")), ignore_attr = TRUE ) }) test_that("find_variables", { expect_equal( find_variables(m1), list( response = "count", conditional = c("mined", "cover", "sample") ) ) expect_equal( find_variables(m1, flatten = TRUE), c("count", "mined", "cover", "sample") ) }) test_that("n_obs", { expect_equal(n_obs(m1), 644) }) test_that("find_parameters", { expect_equal( find_parameters(m1), list( conditional = c("(Intercept)", "minedno", "log(cover)", "sample") ) ) expect_equal(nrow(get_parameters(m1)), 4) expect_equal( get_parameters(m1)$Parameter, c("(Intercept)", "minedno", "log(cover)", "sample") ) }) test_that("is_multivariate", { expect_false(is_multivariate(m1)) }) test_that("find_terms", { expect_equal( find_terms(m1), list( response = "count", conditional = c("mined", "log(cover)", "sample") ) ) }) test_that("find_algorithm", { expect_equal(find_algorithm(m1), list(algorithm = "eigen")) }) test_that("find_statistic", { expect_identical(find_statistic(m1), "z-statistic") }) } insight/tests/testthat/test-get_variance.R0000644000175000017500000000542014122064326020603 0ustar nileshnileshosx <- tryCatch( { si <- Sys.info() if (!is.null(si["sysname"])) { si["sysname"] == "Darwin" || grepl("^darwin", R.version$os) } else { FALSE } }, error = function(e) { FALSE } ) .runThisTest <- Sys.getenv("RunAllinsightTests") == "yes" if (!osx && .runThisTest && requiet("testthat") && requiet("insight") && requiet("lme4")) { data("sleepstudy") data("Penicillin") set.seed(12345) sleepstudy$grp <- sample(1:5, size = 180, replace = TRUE) sleepstudy$subgrp <- NA for (i in 1:5) { filter_group <- sleepstudy$grp == i sleepstudy$subgrp[filter_group] <- sample(1:30, size = sum(filter_group), replace = TRUE) } fm1 <- lmer(Reaction ~ Days + (Days | Subject), sleepstudy) fm2 <- lmer(Reaction ~ Days + (Days || Subject), sleepstudy) fm3 <- lmer( Reaction ~ Days + (1 + Days || grp / subgrp) + (1 + Days | Subject), data = sleepstudy ) fm4 <- lmer(Reaction ~ Days + (1 | Subject), sleepstudy) fm5 <- lmer( Reaction ~ Days + (1 | grp / subgrp) + (1 | Subject), data = sleepstudy ) fm6 <- lmer(diameter ~ 0 + sample + (1 | plate), data = Penicillin) v1 <- suppressWarnings(get_variance(fm1)) v2 <- suppressWarnings(get_variance(fm2)) v3 <- suppressWarnings(get_variance(fm3)) v4 <- suppressWarnings(get_variance(fm4)) v5 <- suppressWarnings(get_variance(fm5)) v6 <- suppressWarnings(get_variance(fm6)) test_that("get_variance-1", { expect_equal(v1$var.intercept, c(Subject = 612.10016), tolerance = 1e-2 ) expect_equal(v1$var.slope, c(Subject.Days = 35.07171), tolerance = 1e-2 ) }) test_that("get_variance-2", { expect_equal(v2$var.intercept, c(Subject = 627.56905), tolerance = 1e-2 ) expect_equal(v2$var.slope, c(Subject.Days = 35.85838), tolerance = 1e-2 ) }) test_that("get_variance-3", { expect_equal(v3$var.intercept, c(subgrp.grp.1 = 0, Subject = 662.52047, grp.1 = 0), tolerance = 1e-2 ) expect_equal(v3$var.slope, c(Subject.Days = 34.25771, subgrp.grp.Days = 7.88485, grp.Days = 0), tolerance = 1e-2 ) }) test_that("get_variance-4", { expect_equal(v4$var.intercept, c(Subject = 1378.17851), tolerance = 1e-2 ) expect_null(v4$var.slope) }) test_that("get_variance-5", { expect_equal(v5$var.intercept, c(`subgrp:grp` = 38.76069, Subject = 1377.50569, grp = 3.32031), tolerance = 1e-2 ) expect_null(v5$var.slope) }) test_that("get_variance-6", { expect_equal(v6$var.intercept, c(plate = 0.71691), tolerance = 1e-2) expect_equal(v6$var.random, 0.71691, tolerance = 1e-2) expect_null(v6$var.slope) }) } insight/tests/testthat/test-all_models_equal.R0000644000175000017500000000141214122064326021453 0ustar nileshnileshif (requiet("testthat") && requiet("insight") && requiet("lme4")) { data(mtcars) data(sleepstudy) m1 <- lm(mpg ~ wt + cyl + vs, data = mtcars) m2 <- lm(mpg ~ wt + cyl, data = mtcars) m3 <- lmer(Reaction ~ Days + (1 | Subject), data = sleepstudy) m4 <- glm(formula = vs ~ wt, family = binomial(), data = mtcars) test_that("all_models_equal", { expect_true(all_models_equal(m1, m2)) expect_false(all_models_equal(m1, m2, mtcars)) expect_message(expect_false(all_models_equal(m1, m2, mtcars, verbose = TRUE))) expect_false(all_models_equal(m1, m2, m3)) expect_message(expect_false(all_models_equal(m1, m4, m2, m3, verbose = TRUE))) expect_true(is_model_supported(m1)) expect_false(is_model_supported(mtcars)) }) } insight/tests/testthat/test-glm.R0000644000175000017500000000712514122064326016737 0ustar nileshnileshif (requiet("testthat") && requiet("insight") && requiet("glmmTMB")) { data(Salamanders) Salamanders$cover <- abs(Salamanders$cover) m1 <- glm(count ~ mined + log(cover) + sample, family = poisson, data = Salamanders ) test_that("model_info", { expect_true(model_info(m1)$is_poisson) expect_true(model_info(m1)$is_count) expect_false(model_info(m1)$is_negbin) expect_false(model_info(m1)$is_binomial) expect_false(model_info(m1)$is_linear) }) test_that("loglik", { expect_equal(get_loglikelihood(m1), logLik(m1), ignore_attr = TRUE) }) test_that("get_df", { expect_equal(get_df(m1), df.residual(m1), ignore_attr = TRUE) expect_equal(get_df(m1, type = "model"), attr(logLik(m1), "df"), ignore_attr = TRUE) }) test_that("find_predictors", { expect_identical(find_predictors(m1), list(conditional = c("mined", "cover", "sample"))) expect_identical( find_predictors(m1, flatten = TRUE), c("mined", "cover", "sample") ) expect_null(find_predictors(m1, effects = "random")) }) test_that("find_random", { expect_null(find_random(m1)) }) test_that("get_random", { expect_warning(get_random(m1)) }) test_that("find_response", { expect_identical(find_response(m1), "count") }) test_that("get_response", { expect_equal(get_response(m1), Salamanders$count) }) test_that("get_predictors", { expect_equal(colnames(get_predictors(m1)), c("mined", "cover", "sample")) }) test_that("link_inverse", { expect_equal(link_inverse(m1)(.2), exp(.2), tolerance = 1e-5) }) test_that("linkfun", { expect_equal(link_function(m1)(.2), -1.609438, tolerance = 1e-4) }) test_that("get_data", { expect_equal(nrow(get_data(m1)), 644) expect_equal( colnames(get_data(m1)), c("count", "mined", "cover", "sample") ) }) test_that("get_call", { expect_equal(class(get_call(m1)), "call") }) test_that("find_formula", { expect_length(find_formula(m1), 1) expect_equal( find_formula(m1), list(conditional = as.formula("count ~ mined + log(cover) + sample")), ignore_attr = TRUE ) }) test_that("find_variables", { expect_equal( find_variables(m1), list( response = "count", conditional = c("mined", "cover", "sample") ) ) expect_equal( find_variables(m1, flatten = TRUE), c("count", "mined", "cover", "sample") ) }) test_that("n_obs", { expect_equal(n_obs(m1), 644) }) test_that("find_parameters", { expect_equal( find_parameters(m1), list( conditional = c("(Intercept)", "minedno", "log(cover)", "sample") ) ) expect_equal(nrow(get_parameters(m1)), 4) expect_equal( get_parameters(m1)$Parameter, c("(Intercept)", "minedno", "log(cover)", "sample") ) }) test_that("is_multivariate", { expect_false(is_multivariate(m1)) }) test_that("find_terms", { expect_equal( find_terms(m1), list( response = "count", conditional = c("mined", "log(cover)", "sample") ) ) }) test_that("find_algorithm", { expect_equal(find_algorithm(m1), list(algorithm = "ML")) }) test_that("find_statistic", { expect_identical(find_statistic(m1), "z-statistic") }) test_that("get_statistic", { expect_equal(get_statistic(m1)$Statistic, c(-10.7066515607315, 18.1533878215937, -1.68918157150882, 2.23541768590273), tolerance = 1e-4) }) } insight/tests/testthat/test-lmrob_base.R0000644000175000017500000000455014122064326020264 0ustar nileshnileshif (requiet("testthat") && requiet("insight") && requiet("robustbase")) { data(mtcars) m1 <- lmrob(mpg ~ gear + wt + cyl, data = mtcars) test_that("model_info", { expect_true(model_info(m1)$is_linear) }) test_that("find_predictors", { expect_identical(find_predictors(m1), list(conditional = c("gear", "wt", "cyl"))) expect_identical(find_predictors(m1, flatten = TRUE), c("gear", "wt", "cyl")) expect_null(find_predictors(m1, effects = "random")) }) test_that("find_random", { expect_null(find_random(m1)) }) test_that("get_random", { expect_warning(get_random(m1)) }) test_that("find_response", { expect_identical(find_response(m1), "mpg") }) test_that("get_response", { expect_equal(get_response(m1), mtcars$mpg) }) test_that("get_predictors", { expect_equal(colnames(get_predictors(m1)), c("gear", "wt", "cyl")) }) test_that("get_data", { expect_equal(nrow(get_data(m1)), 32) expect_equal(colnames(get_data(m1)), c("mpg", "gear", "wt", "cyl")) }) test_that("find_formula", { expect_length(find_formula(m1), 1) expect_equal( find_formula(m1), list(conditional = as.formula("mpg ~ gear + wt + cyl")), ignore_attr = TRUE ) }) test_that("find_terms", { expect_equal(find_terms(m1), list( response = "mpg", conditional = c("gear", "wt", "cyl") )) expect_equal(find_terms(m1, flatten = TRUE), c("mpg", "gear", "wt", "cyl")) }) test_that("n_obs", { expect_equal(n_obs(m1), 32) }) test_that("link_function", { expect_equal(link_function(m1)(.2), .2, tolerance = 1e-5) }) test_that("link_inverse", { expect_equal(link_inverse(m1)(.2), .2, tolerance = 1e-5) }) test_that("find_parameters", { expect_equal( find_parameters(m1), list(conditional = c("(Intercept)", "gear", "wt", "cyl")) ) expect_equal(nrow(get_parameters(m1)), 4) expect_equal( get_parameters(m1)$Parameter, c("(Intercept)", "gear", "wt", "cyl") ) }) test_that("is_multivariate", { expect_false(is_multivariate(m1)) }) test_that("find_algorithm", { expect_equal(find_algorithm(m1), list(algorithm = "SM")) }) test_that("find_statistic", { expect_identical(find_statistic(m1), "t-statistic") }) } insight/tests/testthat/test-find_random.R0000644000175000017500000000333214122064326020434 0ustar nileshnileshosx <- tryCatch( { si <- Sys.info() if (!is.null(si["sysname"])) { si["sysname"] == "Darwin" || grepl("^darwin", R.version$os) } else { FALSE } }, error = function(e) { FALSE } ) if (!osx && requiet("testthat") && requiet("insight") && requiet("mgcv") && requiet("gamm4") && requiet("rstanarm")) { data <- iris data$g <- data$Species data$Xr <- data$Species test_that("find_random - mgcv::gamm", { model <- mgcv::gamm(Petal.Length ~ Petal.Width + s(Sepal.Length), random = list(Species = ~1), data = iris) expect_equal(insight::find_random(model, flatten = TRUE), "Species") model <- mgcv::gamm(Petal.Length ~ Petal.Width + s(Sepal.Length), random = list(g = ~1), data = data) expect_equal(insight::find_random(model, flatten = TRUE), "g") }) test_that("find_random - gamm4::gamm4", { model <- gamm4::gamm4(Petal.Length ~ Petal.Width + s(Sepal.Length), random = ~ (1 | Species), data = iris) expect_equal(insight::find_random(model, flatten = TRUE), "Species") model <- gamm4::gamm4(Petal.Length ~ Petal.Width + s(Sepal.Length), random = ~ (1 | Xr), data = data) expect_equal(insight::find_random(model, flatten = TRUE), "Xr") }) .runStanTest <- Sys.getenv("RunAllinsightStanTests") == "yes" if (.runStanTest) { test_that("find_random - rstanarm::gamm4", { model <- suppressWarnings(rstanarm::stan_gamm4( Petal.Length ~ Petal.Width + s(Sepal.Length), random = ~ (1 | Species), data = iris, iter = 100, chains = 1, refresh = 0 )) expect_equal(insight::find_random(model, flatten = TRUE), "Species") }) } } insight/tests/testthat/test-get_data.R0000644000175000017500000000156014122064326017725 0ustar nileshnileshif (requiet("testthat") && requiet("insight") && requiet("lme4")) { data("cbpp") set.seed(123) cbpp$cont <- rnorm(nrow(cbpp)) m <- glmer(cbind(incidence, size - incidence) ~ poly(cont, 2) + (1 | herd), data = cbpp, family = binomial ) test_that("get_data", { expect_s3_class(get_data(m), "data.frame") }) d <- data.frame( time = as.factor(c(1, 2, 3, 4, 5, 1, 2, 3, 4, 5, 1, 2, 3, 4, 5, 1, 2, 3, 4, 5, 1, 2, 3, 4, 5, 1, 2, 3, 4, 5)), group = c(1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2), sum = c(0, 5, 10, 15, 20, 0, 20, 25, 45, 50, 0, 5, 10, 15, 20, 0, 20, 25, 45, 50, 0, 5, 10, 15, 20, 0, 20, 25, 45, 50) ) m <- lm(log(sum + 1) ~ as.numeric(time) * group, data = d) test_that("get_data", { expect_equal(colnames(get_data(m)), c("sum", "time", "group")) }) } insight/tests/testthat/test-get_auxiliary.R0000644000175000017500000000162614122064326021026 0ustar nileshnileshif (requiet("testthat") && requiet("insight") && requiet("MASS")) { data(quine) clotting <- data.frame( u = c(5, 10, 15, 20, 30, 40, 60, 80, 100), lot1 = c(118, 58, 42, 35, 27, 25, 21, 19, 18), lot2 = c(69, 35, 26, 21, 18, 16, 13, 12, 12) ) set.seed(123) m1 <- glm(lot1 ~ log(u), data = clotting, family = Gamma()) d <- data.frame( counts = c(18, 17, 15, 20, 10, 20, 25, 13, 12), outcome = gl(3, 1, 9), treatment = gl(3, 3) ) set.seed(123) m2 <- glm(counts ~ outcome + treatment, data = d, family = poisson()) m3 <- glm.nb(Days ~ Sex / (Age + Eth * Lrn), data = quine) test_that("get_dispersion", { expect_equal(get_auxiliary(m1, type = "dispersion"), summary(m1)$dispersion, tolerance = 1e-3, ignore_attr = TRUE) expect_equal(get_auxiliary(m2, type = "dispersion"), 1) expect_equal(get_auxiliary(m3, type = "dispersion"), 1) }) } insight/tests/testthat/test-metaBMA.R0000644000175000017500000000156414122064326017427 0ustar nileshnilesh.runThisTest <- Sys.getenv("RunAllinsightTests") == "yes" if (.runThisTest && requiet("testthat") && requiet("insight") && requiet("metaBMA")) { data(towels) set.seed(123) mf <- meta_fixed(logOR, SE, study, data = towels, d = prior("norm", c(mean = 0, sd = .3), lower = 0)) test_that("get_priors-metaBMA", { priors <- get_priors(mf) expect_equal(priors$Distribution, "Normal") expect_equal(priors$Scale, 0.3, tolerance = 1e-2) }) set.seed(123) mr <- meta_random(logOR, SE, study, data = towels, d = prior("cauchy", c(location = 0, scale = 0.707)), tau = prior("invgamma", c(shape = 1, scale = 0.15)) ) test_that("get_priors-metaBMA", { priors <- get_priors(mr) expect_equal(priors$Distribution, c("Student's t", "Inverse gamma")) expect_equal(priors$Scale, c(0.707, 0.15), tolerance = 1e-2) }) } insight/tests/testthat/test-feis.R0000644000175000017500000000745314122064326017112 0ustar nileshnileshosx <- tryCatch( { si <- Sys.info() if (!is.null(si["sysname"])) { si["sysname"] == "Darwin" || grepl("^darwin", R.version$os) } else { FALSE } }, error = function(e) { FALSE } ) if (!osx && requiet("testthat") && requiet("insight") && requiet("feisr")) { data(mwp) m1 <- feis( lnw ~ marry + enrol + as.factor(yeargr) | exp + I(exp^2), data = mwp, id = "id", robust = TRUE ) test_that("model_info", { expect_true(model_info(m1)$is_linear) }) test_that("find_predictors", { expect_identical(find_predictors(m1), list( conditional = c("marry", "enrol", "yeargr"), slopes = "exp" )) expect_identical(find_predictors(m1, effects = "random"), list(random = "id")) expect_identical( find_predictors(m1, effects = "all", flatten = TRUE), c("marry", "enrol", "yeargr", "exp", "id") ) }) test_that("find_random", { expect_identical(find_random(m1), list(random = "id")) }) test_that("get_random", { expect_identical(colnames(get_random(m1)), "id") }) test_that("find_response", { expect_identical(find_response(m1), "lnw") }) test_that("get_response", { expect_equal(get_response(m1), mwp$lnw) }) test_that("get_predictors", { expect_equal( colnames(get_predictors(m1)), c("marry", "enrol", "yeargr", "exp") ) }) test_that("link_inverse", { expect_equal(link_inverse(m1)(.2), .2, tolerance = 1e-5) }) test_that("get_data", { expect_equal(nrow(get_data(m1)), 3100) expect_equal( colnames(get_data(m1)), c("lnw", "marry", "enrol", "yeargr", "exp", "id") ) }) test_that("find_formula", { expect_length(find_formula(m1), 3) expect_equal( find_formula(m1), list( conditional = as.formula("lnw ~ marry + enrol + as.factor(yeargr)"), slopes = as.formula("~exp + I(exp^2)"), random = as.formula("~id") ), ignore_attr = TRUE ) }) test_that("find_terms", { expect_equal( find_terms(m1), list( response = "lnw", conditional = c("marry", "enrol", "as.factor(yeargr)"), slopes = c("exp", "I(exp^2)"), random = "id" ) ) expect_equal( find_terms(m1, flatten = TRUE), c( "lnw", "marry", "enrol", "as.factor(yeargr)", "exp", "I(exp^2)", "id" ) ) }) test_that("find_variables", { expect_equal( find_variables(m1), list( response = "lnw", conditional = c("marry", "enrol", "yeargr"), slopes = "exp", random = "id" ) ) expect_equal( find_variables(m1, flatten = TRUE), c("lnw", "marry", "enrol", "yeargr", "exp", "id") ) }) test_that("n_obs", { expect_equal(n_obs(m1), 3100) }) test_that("linkfun", { expect_false(is.null(link_function(m1))) }) test_that("find_parameters", { expect_equal( find_parameters(m1), list( conditional = c( "marry", "enrol", "as.factor(yeargr)2", "as.factor(yeargr)3", "as.factor(yeargr)4", "as.factor(yeargr)5" ) ) ) expect_equal(nrow(get_parameters(m1)), 6) expect_equal( get_parameters(m1)$Parameter, c( "marry", "enrol", "as.factor(yeargr)2", "as.factor(yeargr)3", "as.factor(yeargr)4", "as.factor(yeargr)5" ) ) }) test_that("is_multivariate", { expect_false(is_multivariate(m1)) }) test_that("find_statistic", { expect_identical(find_statistic(m1), "t-statistic") }) } insight/tests/testthat/test-multinom.R0000644000175000017500000000747614131014371020030 0ustar nileshnileshif (requiet("testthat") && requiet("insight") && requiet("nnet") && requiet("MASS")) { data("birthwt") void <- capture.output({ m1 <- nnet::multinom(low ~ age + lwt + race + smoke, data = birthwt) }) test_that("model_info", { expect_true(model_info(m1)$is_binomial) expect_false(model_info(m1)$is_linear) }) test_that("n_parameters", { expect_equal(n_parameters(m1), 5) }) test_that("find_predictors", { expect_identical(find_predictors(m1), list(conditional = c("age", "lwt", "race", "smoke"))) expect_identical( find_predictors(m1, flatten = TRUE), c("age", "lwt", "race", "smoke") ) expect_null(find_predictors(m1, effects = "random")) }) test_that("find_response", { expect_identical(find_response(m1), "low") }) test_that("link_inverse", { expect_equal(link_inverse(m1)(.2), plogis(.2), tolerance = 1e-5) }) test_that("get_data", { expect_equal(nrow(get_data(m1)), 189) expect_equal( colnames(get_data(m1)), c("low", "age", "lwt", "race", "smoke") ) }) test_that("find_formula", { expect_length(find_formula(m1), 1) expect_equal( find_formula(m1), list(conditional = as.formula("low ~ age + lwt + race + smoke")), ignore_attr = TRUE ) }) test_that("find_terms", { expect_equal(find_terms(m1), list( response = "low", conditional = c("age", "lwt", "race", "smoke") )) expect_equal( find_terms(m1, flatten = TRUE), c("low", "age", "lwt", "race", "smoke") ) }) test_that("n_obs", { expect_equal(n_obs(m1), 189) }) test_that("linkfun", { expect_false(is.null(link_function(m1))) }) test_that("find_parameters", { expect_equal( find_parameters(m1), list(conditional = c( "(Intercept)", "age", "lwt", "race", "smoke" )) ) expect_equal(nrow(get_parameters(m1)), 5) expect_equal( get_parameters(m1)$Parameter, c("(Intercept)", "age", "lwt", "race", "smoke") ) }) test_that("find_statistic", { expect_identical(find_statistic(m1), "t-statistic") }) test_that("get_predicted", { void <- capture.output({ # binary outcome m1 <- nnet::multinom(low ~ age + lwt + race + smoke, data = birthwt) # multinomial outcome m2 <- nnet::multinom(ftv ~ age + lwt + race + smoke, data = birthwt) }) # binary outcomes produces an atomic vector x <- get_predicted(m1, predict = "classification") expect_true(is.atomic(x) && !is.null(x) && is.null(dim(x))) expect_true(all(levels(x) %in% c("0", "1"))) x <- get_predicted(m1, predict = "expectation") expect_true(is.atomic(x) && !is.null(x) && is.null(dim(x))) x <- get_predicted(m1, predict = NULL, type = "class") expect_true(is.atomic(x) && !is.null(x) && is.null(dim(x))) expect_true(all(levels(x) %in% c("0", "1"))) x <- get_predicted(m1, predict = NULL, type = "probs") expect_true(is.atomic(x) && !is.null(x) && is.null(dim(x))) # multinomial outcomes depends on predict type x <- get_predicted(m2, predict = "classification") expect_true(is.atomic(x) && !is.null(x) && is.null(dim(x))) expect_true(all(levels(x) %in% as.character(0:6))) x <- get_predicted(m2, predict = "expectation") expect_s3_class(x, "data.frame") expect_true(all(c("Row", "Response", "Predicted") %in% colnames(x))) x <- get_predicted(m2, predict = NULL, type = "class") expect_true(is.atomic(x) && !is.null(x) && is.null(dim(x))) expect_true(all(levels(x) %in% as.character(0:6))) x <- get_predicted(m2, predict = NULL, type = "probs") expect_s3_class(x, "data.frame") expect_true(all(c("Row", "Response", "Predicted") %in% colnames(x))) }) } insight/tests/testthat/test-get_deviance.R0000644000175000017500000000204114122064326020565 0ustar nileshnileshosx <- tryCatch( { si <- Sys.info() if (!is.null(si["sysname"])) { si["sysname"] == "Darwin" || grepl("^darwin", R.version$os) } else { FALSE } }, error = function(e) { FALSE } ) .runThisTest <- Sys.getenv("RunAllinsightTests") == "yes" .runStanTest <- Sys.getenv("RunAllinsightStanTests") == "yes" if (.runThisTest && .runStanTest && !osx && requiet("testthat") && requiet("insight") && requiet("lme4") && requiet("rstanarm")) { data(mtcars) test_that("get_deviance - Bayesian lm", { m1 <- lm(mpg ~ disp, data = mtcars) m2 <- rstanarm::stan_glm(mpg ~ disp, data = mtcars, refresh = 0) expect_equal(get_deviance(m1), get_deviance(m2, verbose = FALSE), tolerance = 1e-1) }) test_that("get_deviance - Bayesian glm", { m1 <- glm(vs ~ disp, data = mtcars, family = "binomial") m2 <- rstanarm::stan_glm(vs ~ disp, data = mtcars, family = "binomial", refresh = 0) expect_equal(get_deviance(m1), get_deviance(m2, verbose = FALSE), tolerance = 1e-1) }) } insight/tests/testthat/test-clm.R0000644000175000017500000000732214144234777016747 0ustar nileshnileshif (requiet("testthat") && requiet("insight") && requiet("ordinal")) { data(wine, package = "ordinal") m1 <- clm(rating ~ temp * contact, data = wine) test_that("model_info", { expect_true(model_info(m1)$is_ordinal) expect_false(model_info(m1)$is_multinomial) expect_false(model_info(m1)$is_linear) }) test_that("find_predictors", { expect_identical(find_predictors(m1), list(conditional = c("temp", "contact"))) expect_identical(find_predictors(m1, flatten = TRUE), c("temp", "contact")) expect_null(find_predictors(m1, effects = "random")) }) test_that("find_random", { expect_null(find_random(m1)) }) test_that("get_random", { expect_warning(get_random(m1)) }) test_that("find_response", { expect_identical(find_response(m1), "rating") }) test_that("get_response", { expect_equal(get_response(m1), wine$rating) }) test_that("get_predictors", { expect_equal(colnames(get_predictors(m1)), c("temp", "contact")) }) test_that("link_inverse", { expect_equal(link_inverse(m1)(.2), plogis(.2), tolerance = 1e-5) }) test_that("get_data", { expect_equal(nrow(get_data(m1)), 72) expect_equal(colnames(get_data(m1)), c("rating", "temp", "contact")) }) test_that("find_formula", { expect_length(find_formula(m1), 1) expect_equal( find_formula(m1), list(conditional = as.formula("rating ~ temp * contact")), ignore_attr = TRUE ) }) test_that("find_terms", { expect_equal(find_terms(m1), list( response = "rating", conditional = c("temp", "contact") )) expect_equal( find_terms(m1, flatten = TRUE), c("rating", "temp", "contact") ) }) test_that("n_obs", { expect_equal(n_obs(m1), 72) }) test_that("linkfun", { expect_false(is.null(link_function(m1))) }) test_that("find_parameters", { expect_equal( find_parameters(m1), list( conditional = c( "1|2", "2|3", "3|4", "4|5", "tempwarm", "contactyes", "tempwarm:contactyes" ) ) ) expect_equal(nrow(get_parameters(m1)), 7) expect_equal( get_parameters(m1)$Parameter, c( "1|2", "2|3", "3|4", "4|5", "tempwarm", "contactyes", "tempwarm:contactyes" ) ) }) test_that("is_multivariate", { expect_false(is_multivariate(m1)) }) test_that("find_statistic", { expect_identical(find_statistic(m1), "z-statistic") }) test_that("get_predicted", { nd <- wine nd$rating <- NULL x <- as.data.frame(get_predicted(m1)) y <- as.data.frame(get_predicted(m1, predict = NULL, type = "prob")) z <- predict(m1, type = "prob", newdata = nd, se.fit = TRUE) expect_true(all(c("Row", "Response", "Predicted", "SE") %in% colnames(x))) expect_equal(x, y) for (i in 1:5) { expect_equal(x$Predicted[x$Response == i], unname(z$fit[, i]), ignore_attr = FALSE) expect_equal(x$SE[x$Response == i], unname(z$se.fit[, i]), ignore_attr = FALSE) } x <- as.data.frame(get_predicted(m1, predict = "classification")) y <- as.data.frame(get_predicted(m1, predict = NULL, type = "class")) z <- predict(m1, type = "class", newdata = nd) expect_equal(x, y) expect_equal(as.character(x$Predicted), as.character(z$fit), ignore_attr = FALSE) # we use a hack to handle in-formula factors tmp <- wine tmp$rating <- as.numeric(tmp$rating) tmp <- clm(factor(rating) ~ temp * contact, data = tmp) expect_s3_class(get_predicted(tmp), "get_predicted") }) } insight/tests/testthat/test-clmm.R0000644000175000017500000001330114155570753017115 0ustar nileshnileshosx <- tryCatch( { si <- Sys.info() if (!is.null(si["sysname"])) { si["sysname"] == "Darwin" || grepl("^darwin", R.version$os) } else { FALSE } }, error = function(e) { FALSE } ) if (requiet("testthat") && requiet("insight") && requiet("ordinal")) { data(wine, package = "ordinal") data(soup) m1 <- clmm(rating ~ temp + contact + (1 | judge), data = wine) m2 <- clmm(SURENESS ~ PROD + (1 | RESP) + (1 | RESP:PROD), data = soup, link = "probit", threshold = "equidistant" ) test_that("model_info", { expect_true(model_info(m1)$is_ordinal) expect_true(model_info(m2)$is_ordinal) expect_true(model_info(m1)$is_logit) expect_true(model_info(m2)$is_probit) expect_false(model_info(m1)$is_multinomial) expect_false(model_info(m1)$is_linear) }) test_that("find_predictors", { expect_identical(find_predictors(m1), list(conditional = c("temp", "contact"))) expect_identical( find_predictors(m1, effects = "all"), list( conditional = c("temp", "contact"), random = "judge" ) ) expect_identical( find_predictors(m1, effects = "all", flatten = TRUE), c("temp", "contact", "judge") ) expect_identical(find_predictors(m2), list(conditional = "PROD")) expect_identical( find_predictors(m2, effects = "all"), list( conditional = "PROD", random = c("RESP", "PROD") ) ) expect_identical( find_predictors(m2, effects = "all", flatten = TRUE), c("PROD", "RESP") ) }) test_that("find_random", { expect_equal(find_random(m1), list(random = "judge")) expect_equal(find_random(m2), list(random = c("RESP", "RESP:PROD"))) expect_equal(find_random(m2, split_nested = TRUE), list(random = c("RESP", "PROD"))) }) test_that("get_random", { expect_equal(get_random(m1), wine[, "judge", drop = FALSE]) expect_equal(get_random(m2), soup[, c("RESP", "PROD"), drop = FALSE]) }) test_that("find_response", { expect_identical(find_response(m1), "rating") expect_identical(find_response(m2), "SURENESS") }) test_that("get_response", { expect_equal(get_response(m1), wine$rating) expect_equal(get_response(m2), soup$SURENESS) }) test_that("get_predictors", { expect_equal(colnames(get_predictors(m1)), c("temp", "contact")) expect_equal(colnames(get_predictors(m2)), "PROD") }) test_that("link_inverse", { expect_equal(link_inverse(m1)(.2), plogis(.2), tolerance = 1e-5) expect_equal(link_inverse(m2)(.2), pnorm(.2), tolerance = 1e-5) }) test_that("get_data", { expect_equal(nrow(get_data(m1)), 72) expect_equal( colnames(get_data(m1)), c("rating", "temp", "contact", "judge") ) expect_equal(nrow(get_data(m2)), 1847) expect_equal(colnames(get_data(m2)), c("SURENESS", "PROD", "RESP")) }) test_that("find_formula", { expect_length(find_formula(m1), 2) expect_equal( find_formula(m1), list( conditional = as.formula("rating ~ temp + contact"), random = as.formula("~1 | judge") ), ignore_attr = TRUE ) expect_length(find_formula(m2), 2) expect_equal( find_formula(m2), list( conditional = as.formula("SURENESS ~ PROD"), random = list(as.formula("~1 | RESP"), as.formula("~1 | RESP:PROD")) ), ignore_attr = TRUE ) }) test_that("find_terms", { expect_equal( find_terms(m1), list( response = "rating", conditional = c("temp", "contact"), random = "judge" ) ) expect_equal( find_terms(m1, flatten = TRUE), c("rating", "temp", "contact", "judge") ) expect_equal( find_terms(m2), list( response = "SURENESS", conditional = "PROD", random = c("RESP", "PROD") ) ) expect_equal( find_terms(m2, flatten = TRUE), c("SURENESS", "PROD", "RESP") ) }) test_that("n_obs", { expect_equal(n_obs(m1), 72) expect_equal(n_obs(m2), 1847) }) test_that("linkfun", { expect_false(is.null(link_function(m1))) expect_false(is.null(link_function(m2))) }) test_that("find_parameters", { expect_equal( find_parameters(m1), list( conditional = c("1|2", "2|3", "3|4", "4|5", "tempwarm", "contactyes") ) ) expect_equal( find_parameters(m2), list(conditional = c("threshold.1", "spacing", "PRODTest")) ) }) test_that("is_multivariate", { expect_false(is_multivariate(m1)) expect_false(is_multivariate(m2)) }) if (getRversion() > "3.6.3" && !isTRUE(osx)) { test_that("get_variance", { expect_equal( get_variance(m1), list( var.fixed = 3.23207765938872, var.random = 1.27946088209319, var.residual = 3.28986813369645, var.distribution = 3.28986813369645, var.dispersion = 0, var.intercept = c(judge = 1.27946088209319) ), tolerance = 1e-4 ) expect_equal( get_variance(m2), list( var.fixed = 0.132313576370902, var.random = 0.193186321588604, var.residual = 1, var.distribution = 1, var.dispersion = 0, var.intercept = c(`RESP:PROD` = 0.148265480396059, RESP = 0.0449208411925493) ), tolerance = 1e-4 ) }) } test_that("find_statistic", { expect_identical(find_statistic(m1), "z-statistic") expect_identical(find_statistic(m2), "z-statistic") }) } insight/tests/testthat/test-vglm.R0000644000175000017500000000560414122064326017125 0ustar nileshnileshunloadNamespace("gam") if (requiet("testthat") && requiet("insight") && requiet("VGAM")) { d.AD <- data.frame( treatment = gl(3, 3), outcome = gl(3, 1, 9), counts = c(18, 17, 15, 20, 10, 20, 25, 13, 12) ) m1 <- vglm( counts ~ outcome + treatment, family = poissonff, data = d.AD, trace = FALSE ) test_that("model_info", { expect_true(model_info(m1)$is_poisson) expect_false(model_info(m1)$is_bayesian) expect_false(model_info(m1)$is_linear) }) test_that("find_predictors", { expect_identical(find_predictors(m1), list(conditional = c("outcome", "treatment"))) expect_identical( find_predictors(m1, flatten = TRUE), c("outcome", "treatment") ) expect_null(find_predictors(m1, effects = "random")) }) test_that("find_random", { expect_null(find_random(m1)) }) test_that("get_random", { expect_warning(get_random(m1)) }) test_that("find_response", { expect_identical(find_response(m1), "counts") }) test_that("get_response", { expect_equal(get_response(m1), d.AD$counts) }) test_that("get_predictors", { expect_equal(colnames(get_predictors(m1)), c("outcome", "treatment")) }) li <- suppressWarnings(link_inverse(m1)(.2)[1, 1]) test_that("link_inverse", { expect_equal(li, exp(.2), tolerance = 1e-5) expect_warning(link_inverse(m1)(.2)) }) test_that("get_data", { expect_equal(nrow(get_data(m1)), 9) expect_equal(colnames(get_data(m1)), c("counts", "outcome", "treatment")) }) test_that("find_formula", { expect_length(find_formula(m1), 1) expect_equal( find_formula(m1), list(conditional = as.formula("counts ~ outcome + treatment")), ignore_attr = TRUE ) }) test_that("find_terms", { expect_equal(find_terms(m1), list( response = "counts", conditional = c("outcome", "treatment") )) expect_equal( find_terms(m1, flatten = TRUE), c("counts", "outcome", "treatment") ) }) test_that("n_obs", { expect_equal(n_obs(m1), 9) }) test_that("linkfun", { expect_false(is.null(link_function(m1))) }) test_that("find_parameters", { expect_equal( find_parameters(m1), list( conditional = c( "(Intercept)", "outcome2", "outcome3", "treatment2", "treatment3" ) ) ) expect_equal(nrow(get_parameters(m1)), 5) expect_equal( get_parameters(m1)$Parameter, c( "(Intercept)", "outcome2", "outcome3", "treatment2", "treatment3" ) ) }) test_that("is_multivariate", { expect_false(is_multivariate(m1)) }) test_that("find_statistic", { expect_identical(find_statistic(m1), "z-statistic") }) } insight/tests/testthat/test-response_data2.R0000644000175000017500000000655314122064326021075 0ustar nileshnileshif (suppressWarnings(requiet("testthat") && requiet("insight") && requiet("lme4"))) { data(cbpp) cbpp$trials <- cbpp$size - cbpp$incidence m1 <- glmer( cbind(incidence, trials) ~ period + (1 | herd), data = cbpp, family = binomial ) m2 <- glmer( cbind(incidence, size - incidence) ~ period + (1 | herd), data = cbpp, family = binomial ) m3 <- glm( cbind(incidence, trials) ~ period, data = cbpp, family = binomial ) m4 <- glm( cbind(incidence, size - incidence) ~ period, data = cbpp, family = binomial ) m5 <- glmer( cbind(incidence, size - incidence) ~ (1 | herd), data = cbpp, family = binomial ) test_that("find_response", { expect_equal( find_response(m1, combine = TRUE), "cbind(incidence, trials)" ) expect_equal( find_response(m2, combine = TRUE), "cbind(incidence, size - incidence)" ) expect_equal( find_response(m3, combine = TRUE), "cbind(incidence, trials)" ) expect_equal( find_response(m4, combine = TRUE), "cbind(incidence, size - incidence)" ) expect_equal( find_response(m5, combine = TRUE), "cbind(incidence, size - incidence)" ) expect_equal( find_response(m1, combine = FALSE), c("incidence", "trials") ) expect_equal(find_response(m2, combine = FALSE), c("incidence", "size")) expect_equal( find_response(m3, combine = FALSE), c("incidence", "trials") ) expect_equal(find_response(m4, combine = FALSE), c("incidence", "size")) expect_equal(find_response(m5, combine = FALSE), c("incidence", "size")) }) test_that("get_response", { expect_equal(colnames(get_response(m1)), c("incidence", "trials")) expect_equal(colnames(get_response(m2)), c("incidence", "size")) expect_equal(colnames(get_response(m3)), c("incidence", "trials")) expect_equal(colnames(get_response(m4)), c("incidence", "size")) expect_equal(colnames(get_response(m5)), c("incidence", "size")) }) test_that("get_data", { expect_equal( colnames(get_data(m1)), c( "cbind(incidence, trials)", "period", "herd", "incidence", "trials" ) ) expect_equal( colnames(get_data(m2)), c( "cbind(incidence, size - incidence)", "period", "herd", "incidence", "size" ) ) get_data(m3) get_data(m4) expect_equal( colnames(get_data(m5)), c( "cbind(incidence, size - incidence)", "herd", "incidence", "size" ) ) }) set.seed(123) data(mtcars) m6 <- stats::aov( formula = mpg ~ wt + qsec + Error(disp / am), data = mtcars ) # TO DO # test_that("mod-info", { # get_data(m6) # find_response(m6) # get_response(m6) # find_formula(m6) # }) test_that("find_statistic", { expect_identical(find_statistic(m1), "z-statistic") expect_identical(find_statistic(m2), "z-statistic") expect_identical(find_statistic(m3), "z-statistic") expect_identical(find_statistic(m4), "z-statistic") expect_identical(find_statistic(m5), "z-statistic") expect_identical(find_statistic(m6), "F-statistic") }) } insight/tests/testthat/test-glmmTMB.R0000644000175000017500000006157714144235000017463 0ustar nileshnileshif (requiet("testthat") && requiet("insight") && requiet("glmmTMB")) { # fish <- read.csv("https://stats.idre.ucla.edu/stat/data/fish.csv") # fish$nofish <- as.factor(fish$nofish) # fish$livebait <- as.factor(fish$livebait) # fish$camper <- as.factor(fish$camper) data("fish") m1 <- glmmTMB( count ~ child + camper + (1 | persons), ziformula = ~ child + camper + (1 | persons), data = fish, family = truncated_poisson() ) m2 <- glmmTMB( count ~ child + camper + (1 | persons), data = fish, family = poisson() ) m3 <- glmmTMB( count ~ child + camper + (1 | persons), ziformula = ~ child + livebait + (1 | persons), data = fish, family = poisson() ) m4 <- glmmTMB( count ~ child + camper + (1 | persons), ziformula = ~ child + livebait + (1 | ID), dispformula = ~xb, data = fish, family = truncated_poisson() ) m7 <- suppressWarnings(glmmTMB( count ~ child + camper + (1 + xb | persons), ziformula = ~ child + livebait + (1 + zg + nofish | ID), dispformula = ~xb, data = fish, family = truncated_poisson() )) data(Salamanders) m5 <- glmmTMB( count ~ mined + (1 | site), ziformula = ~mined, family = poisson, data = Salamanders ) m6 <- glmmTMB(count ~ 1, ziformula = ~1, family = poisson(), data = Salamanders ) test_that("find_weights", { expect_null(find_weights(m2)) }) test_that("get_weights", { expect_null(get_weights(m2)) }) test_that("get_deviance + logLik", { expect_equal(get_deviance(m2), 1697.449311, tolerance = 1e-3) expect_equal(get_loglikelihood(m2), logLik(m2), tolerance = 1e-3, ignore_attr = TRUE) expect_equal(get_df(m2, type = "model"), 4) }) test_that("model_info", { expect_true(model_info(m1)$is_zero_inflated) expect_false(model_info(m2)$is_zero_inflated) expect_true(model_info(m3)$is_count) expect_true(model_info(m3)$is_pois) expect_false(model_info(m3)$is_negbin) expect_true(model_info(m6)$is_count) expect_false(model_info(m1)$is_linear) }) test_that("clean_names", { expect_identical(clean_names(m1), c("count", "child", "camper", "persons")) expect_identical(clean_names(m2), c("count", "child", "camper", "persons")) expect_identical( clean_names(m3), c("count", "child", "camper", "persons", "livebait") ) expect_identical( clean_names(m4), c( "count", "child", "camper", "persons", "livebait", "ID", "xb" ) ) expect_identical(clean_names(m6), c("count")) }) test_that("find_predictors", { expect_identical( find_predictors(m1, effects = "all"), list( conditional = c("child", "camper"), random = "persons", zero_inflated = c("child", "camper"), zero_inflated_random = "persons" ) ) expect_identical( find_predictors(m1, effects = "all", flatten = TRUE), c("child", "camper", "persons") ) expect_identical( find_predictors(m1, effects = "random"), list(random = "persons", zero_inflated_random = "persons") ) expect_identical( find_predictors(m1, effects = "random", flatten = TRUE), "persons" ) expect_identical( find_predictors(m1, effects = "random", component = "conditional"), list(random = "persons") ) expect_identical( find_predictors( m1, effects = "random", component = "conditional", flatten = TRUE ), "persons" ) expect_identical( find_predictors(m1), list( conditional = c("child", "camper"), zero_inflated = c("child", "camper") ) ) expect_identical(find_predictors(m1, flatten = TRUE), c("child", "camper")) expect_identical( find_predictors(m2, effects = "all"), list( conditional = c("child", "camper"), random = "persons" ) ) expect_identical( find_predictors(m2, effects = "all", flatten = TRUE), c("child", "camper", "persons") ) expect_identical( find_predictors(m2, effects = "random"), list(random = "persons") ) expect_identical( find_predictors(m2, effects = "random", flatten = TRUE), "persons" ) expect_identical(find_predictors(m2), list(conditional = c("child", "camper"))) expect_null(find_predictors(m6)) }) test_that("find_response", { expect_identical(find_response(m1), "count") expect_identical(find_response(m2), "count") expect_identical(find_response(m6), "count") }) test_that("link_inverse", { expect_identical(link_inverse(m1)(.2), exp(.2)) expect_identical(link_inverse(m2)(.2), exp(.2)) }) test_that("get_data", { expect_equal( colnames(get_data(m1)), c("count", "child", "camper", "persons") ) expect_equal( colnames(get_data(m1, effects = "all")), c("count", "child", "camper", "persons") ) expect_equal(colnames(get_data(m1, effects = "random")), "persons") expect_equal( colnames(get_data(m2)), c("count", "child", "camper", "persons") ) expect_equal( colnames(get_data(m2, effects = "all")), c("count", "child", "camper", "persons") ) expect_equal(colnames(get_data(m2, effects = "random", verbose = FALSE)), "persons") get_data(m3) expect_equal(colnames(get_data(m6, verbose = FALSE)), "count") expect_null(get_data(m6, effects = "random", verbose = FALSE)) }) test_that("find_predictors", { expect_identical( find_predictors(m3, effects = "fixed", component = "conditional"), list(conditional = c("child", "camper")) ) expect_identical( find_predictors( m3, effects = "fixed", component = "conditional", flatten = TRUE ), c("child", "camper") ) expect_identical( find_predictors(m3, effects = "fixed", component = "zero_inflated"), list(zero_inflated = c("child", "livebait")) ) expect_identical( find_predictors( m3, effects = "fixed", component = "zero_inflated", flatten = TRUE ), c("child", "livebait") ) expect_identical( find_predictors(m3, effects = "all", component = "conditional"), list( conditional = c("child", "camper"), random = "persons" ) ) expect_identical( find_predictors( m3, effects = "all", component = "conditional", flatten = TRUE ), c("child", "camper", "persons") ) expect_identical( find_predictors(m3, effects = "all", component = "zero_inflated"), list( zero_inflated = c("child", "livebait"), zero_inflated_random = "persons" ) ) expect_identical( find_predictors( m3, effects = "all", component = "zero_inflated", flatten = TRUE ), c("child", "livebait", "persons") ) expect_identical( find_predictors(m3, effects = "random", component = "conditional"), list(random = "persons") ) expect_identical( find_predictors( m3, effects = "random", component = "conditional", flatten = TRUE ), "persons" ) expect_identical( find_predictors(m3, effects = "random", component = "zero_inflated"), list(zero_inflated_random = "persons") ) expect_identical( find_predictors( m3, effects = "random", component = "zero_inflated", flatten = TRUE ), "persons" ) expect_identical( find_predictors(m3, effects = "fixed", component = "all"), list( conditional = c("child", "camper"), zero_inflated = c("child", "livebait") ) ) expect_identical( find_predictors( m3, effects = "fixed", component = "all", flatten = TRUE ), c("child", "camper", "livebait") ) expect_identical( find_predictors(m3, effects = "all", component = "all"), list( conditional = c("child", "camper"), random = "persons", zero_inflated = c("child", "livebait"), zero_inflated_random = "persons" ) ) expect_identical( find_predictors( m3, effects = "all", component = "all", flatten = TRUE ), c("child", "camper", "persons", "livebait") ) expect_identical( find_predictors(m3, effects = "random", component = "all"), list(random = "persons", zero_inflated_random = "persons") ) expect_identical( find_predictors( m3, effects = "random", component = "all", flatten = TRUE ), "persons" ) expect_null(find_predictors( m6, effects = "random", component = "all", flatten = TRUE )) }) test_that("find_formula", { expect_length(find_formula(m4), 5) expect_equal( find_formula(m4), list( conditional = as.formula("count ~ child + camper"), random = as.formula("~1 | persons"), zero_inflated = as.formula("~child + livebait"), zero_inflated_random = as.formula("~1 | ID"), dispersion = as.formula("~xb") ), ignore_attr = TRUE ) expect_equal(find_formula(m6), list(conditional = as.formula("count ~ 1")), ignore_attr = TRUE) }) test_that("find_predictors", { expect_identical( find_predictors(m4), list( conditional = c("child", "camper"), zero_inflated = c("child", "livebait"), dispersion = "xb" ) ) expect_identical( find_predictors(m4, flatten = TRUE), c("child", "camper", "livebait", "xb") ) expect_identical( find_predictors(m4, effects = "random"), list(random = "persons", zero_inflated_random = "ID") ) expect_identical( find_predictors(m4, effects = "all", flatten = TRUE), c("child", "camper", "persons", "livebait", "ID", "xb") ) expect_identical( find_predictors(m4, effects = "all"), list( conditional = c("child", "camper"), random = "persons", zero_inflated = c("child", "livebait"), zero_inflated_random = "ID", dispersion = "xb" ) ) expect_identical( find_predictors(m4, component = "conditional", flatten = TRUE), c("child", "camper") ) expect_identical( find_predictors(m4, component = "conditional", flatten = FALSE), list(conditional = c("child", "camper")) ) expect_identical( find_predictors(m4, effects = "random", component = "conditional"), list(random = "persons") ) expect_identical( find_predictors(m4, effects = "all", component = "conditional"), list( conditional = c("child", "camper"), random = "persons" ) ) expect_identical( find_predictors(m4, component = "zero_inflated"), list(zero_inflated = c("child", "livebait")) ) expect_identical( find_predictors(m4, effects = "random", component = "zero_inflated"), list(zero_inflated_random = "ID") ) expect_identical( find_predictors( m4, effects = "all", component = "zero_inflated", flatten = TRUE ), c("child", "livebait", "ID") ) expect_identical( find_predictors(m4, component = "dispersion"), list(dispersion = "xb") ) expect_identical( find_predictors(m4, component = "dispersion", flatten = TRUE), "xb" ) expect_null(find_predictors(m4, effects = "random", component = "dispersion")) expect_identical( find_predictors(m4, effects = "all", component = "dispersion"), list(dispersion = "xb") ) expect_identical( find_predictors( m4, effects = "all", component = "dispersion", flatten = TRUE ), "xb" ) }) test_that("find_random", { expect_identical( find_random(m4), list(random = "persons", zero_inflated_random = "ID") ) expect_identical(find_random(m4, flatten = TRUE), c("persons", "ID")) expect_null(find_random(m6, flatten = TRUE)) }) test_that("find_respone", { expect_identical(find_response(m4), "count") expect_identical(find_response(m6), "count") }) test_that("find_terms", { expect_identical( find_terms(m4), list( response = "count", conditional = c("child", "camper"), random = "persons", zero_inflated = c("child", "livebait"), zero_inflated_random = "ID", dispersion = "xb" ) ) expect_identical( find_terms(m4, flatten = TRUE), c( "count", "child", "camper", "persons", "livebait", "ID", "xb" ) ) expect_identical(find_terms(m6), list(response = "count", conditional = "1")) expect_identical(find_terms(m6, flatten = TRUE), c("count", "1")) }) test_that("find_variables", { expect_identical( find_variables(m4), list( response = "count", conditional = c("child", "camper"), random = "persons", zero_inflated = c("child", "livebait"), zero_inflated_random = "ID", dispersion = "xb" ) ) expect_identical( find_variables(m4, flatten = TRUE), c( "count", "child", "camper", "persons", "livebait", "ID", "xb" ) ) expect_identical(find_variables(m6), list(response = "count")) expect_identical(find_variables(m6, flatten = TRUE), "count") }) test_that("get_response", { expect_identical(get_response(m4), fish$count) expect_identical(get_response(m6), Salamanders$count) }) test_that("get_predictors", { expect_identical( colnames(get_predictors(m4)), c("child", "camper", "livebait", "xb") ) expect_null(get_predictors(m6, verbose = FALSE)) }) test_that("get_random", { expect_identical(colnames(get_random(m4)), c("persons", "ID")) expect_warning(expect_null(get_random(m6))) }) test_that("get_data", { expect_identical( colnames(get_data(m4)), c( "count", "child", "camper", "livebait", "xb", "persons", "ID" ) ) expect_identical( colnames(get_data(m4, effects = "fixed")), c("count", "child", "camper", "livebait", "xb") ) expect_identical(colnames(get_data(m4, effects = "random")), c("persons", "ID")) expect_identical(colnames(get_data(m4, component = "zi")), c("count", "child", "livebait", "ID")) expect_identical(colnames(get_data( m4, component = "zi", effects = "fixed" )), c("count", "child", "livebait")) expect_identical(colnames(get_data( m4, component = "zi", effects = "random" )), "ID") expect_identical( colnames(get_data(m4, component = "cond")), c("count", "child", "camper", "persons") ) expect_identical(colnames(get_data( m4, component = "cond", effects = "fixed" )), c("count", "child", "camper")) expect_identical(colnames(get_data( m4, component = "cond", effects = "random" )), "persons") expect_identical(colnames(get_data(m4, component = "disp")), c("count", "xb")) expect_identical(colnames(get_data( m4, component = "disp", effects = "fixed" )), c("count", "xb")) expect_null(get_data(m4, component = "disp", effects = "random", verbose = FALSE)) }) test_that("find_paramaters", { expect_equal( find_parameters(m4), list( conditional = c("(Intercept)", "child", "camper1"), random = list(persons = "(Intercept)"), zero_inflated = c("(Intercept)", "child", "livebait1"), zero_inflated_random = list(ID = "(Intercept)") ) ) expect_equal( find_parameters(m4, flatten = TRUE), c("(Intercept)", "child", "camper1", "livebait1") ) expect_equal( find_parameters(m6), list( conditional = "(Intercept)", zero_inflated = "(Intercept)" ) ) expect_equal( find_parameters(m3), list( conditional = c("(Intercept)", "child", "camper1"), random = list(persons = "(Intercept)"), zero_inflated = c("(Intercept)", "child", "livebait1"), zero_inflated_random = list(persons = "(Intercept)") ) ) expect_equal( find_parameters(m3), list( conditional = c("(Intercept)", "child", "camper1"), random = list(persons = "(Intercept)"), zero_inflated = c("(Intercept)", "child", "livebait1"), zero_inflated_random = list(persons = "(Intercept)") ) ) expect_equal( find_parameters(m3, effects = "fixed"), list( conditional = c("(Intercept)", "child", "camper1"), zero_inflated = c("(Intercept)", "child", "livebait1") ) ) expect_equal( find_parameters(m3, effects = "random", component = "zi"), list(zero_inflated_random = list(persons = "(Intercept)")) ) expect_equal( find_parameters( m3, effects = "fixed", component = "zi", flatten = TRUE ), c("(Intercept)", "child", "livebait1") ) }) test_that("get_paramaters", { expect_equal(nrow(get_parameters(m4)), 6) expect_equal( colnames(get_parameters(m4)), c("Parameter", "Estimate", "Component") ) expect_equal( get_parameters(m4)$Parameter, c( "(Intercept)", "child", "camper1", "(Intercept)", "child", "livebait1" ) ) expect_equal( get_parameters(m4)$Component, c( "conditional", "conditional", "conditional", "zero_inflated", "zero_inflated", "zero_inflated" ) ) expect_equal( get_parameters(m6)$Parameter, c("(Intercept)", "(Intercept)") ) expect_equal( get_parameters(m2)$Parameter, c("(Intercept)", "child", "camper1") ) expect_equal( get_parameters(m2, component = "all")$Parameter, c("(Intercept)", "child", "camper1") ) expect_null(get_parameters(m2, component = "zi")) }) test_that("linkfun", { expect_false(is.null(link_function(m1))) expect_false(is.null(link_function(m2))) expect_false(is.null(link_function(m3))) expect_false(is.null(link_function(m4))) }) test_that("is_multivariate", { expect_false(is_multivariate(m1)) expect_false(is_multivariate(m2)) expect_false(is_multivariate(m3)) expect_false(is_multivariate(m4)) }) # test_that("get_variance", { # # expect_warning(expect_equal(get_variance(m5), list( # var.fixed = 0.32588694431268194762, # var.random = 0.07842738279575413307, # var.residual = 0.41218000030914692111, # var.distribution = 0.41218000030914692111, # var.dispersion = 0, # var.intercept = c(site = 0.07842738279575474369) # ), # tolerance = 1e-3)) # # expect_warning(expect_equal(get_variance_fixed(m1), c(var.fixed = 1.09712435712435052437), tolerance = 1e-3)) # expect_warning(expect_equal(get_variance_random(m1), c(var.random = 0.86712737445492238386), tolerance = 1e-3)) # expect_warning(expect_equal(get_variance_residual(m1), c(var.residual = 0.02634500773355940087 ), tolerance = 1e-3)) # expect_warning(expect_equal(get_variance_distribution(m1), c(var.distribution = 0.02634500773355940087 ), tolerance = 1e-3)) # expect_warning(expect_equal(get_variance_dispersion(m1), c(var.dispersion = 0), tolerance = 1e-3)) # }) test_that("find_algorithm", { expect_equal( find_algorithm(m1), list(algorithm = "ML", optimizer = "nlminb") ) }) test_that("find_random_slopes", { skip_on_cran() skip_on_travis() expect_null(find_random_slopes(m6)) expect_equal( find_random_slopes(m7), list( random = "xb", zero_inflated_random = c("zg", "nofish") ) ) }) test_that("clean_parameters", { expect_equal( clean_parameters(m1), structure( list( Parameter = c( "(Intercept)", "child", "camper1", "(Intercept)", "(Intercept)", "child", "camper1", "(Intercept)" ), Effects = c( "fixed", "fixed", "fixed", "random", "fixed", "fixed", "fixed", "random" ), Component = c( "conditional", "conditional", "conditional", "conditional", "zero_inflated", "zero_inflated", "zero_inflated", "zero_inflated" ), Group = c("", "", "", "persons", "", "", "", "persons"), Cleaned_Parameter = c( "(Intercept)", "child", "camper1", "(Intercept)", "(Intercept)", "child", "camper1", "(Intercept)" ) ), class = c("clean_parameters", "data.frame"), row.names = c(NA, -8L) ) ) }) test_that("find_statistic", { expect_identical(find_statistic(m1), "z-statistic") expect_identical(find_statistic(m2), "z-statistic") expect_identical(find_statistic(m3), "z-statistic") expect_identical(find_statistic(m4), "z-statistic") expect_identical(find_statistic(m5), "z-statistic") expect_identical(find_statistic(m6), "z-statistic") expect_identical(find_statistic(m7), "z-statistic") }) # dispersion model, example from ?glmmTMB sim1 <- function(nfac = 40, nt = 100, facsd = 0.1, tsd = 0.15, mu = 0, residsd = 1) { dat <- expand.grid(fac = factor(letters[1:nfac]), t = 1:nt) n <- nrow(dat) dat$REfac <- rnorm(nfac, sd = facsd)[dat$fac] dat$REt <- rnorm(nt, sd = tsd)[dat$t] dat$x <- rnorm(n, mean = mu, sd = residsd) + dat$REfac + dat$REt dat } set.seed(101) d1 <- sim1(mu = 100, residsd = 10) d2 <- sim1(mu = 200, residsd = 5) d1$sd <- "ten" d2$sd <- "five" dat <- rbind(d1, d2) m0 <- glmmTMB(x ~ sd + (1 | t), dispformula = ~sd, data = dat) test_that("get_paramaters", { expect_equal(nrow(get_parameters(m0)), 4) expect_equal( colnames(get_parameters(m0)), c("Parameter", "Estimate", "Component") ) expect_equal( get_parameters(m0)$Parameter, c( "(Intercept)", "sdten", "(Intercept)", "sdten" ) ) expect_equal( get_parameters(m0)$Estimate, c(200.03431, -99.71491, 3.20287, 1.38648), tolerance = 1e-3 ) expect_equal( get_parameters(m0)$Component, c("conditional", "conditional", "dispersion", "dispersion") ) }) test_that("get_predicted", { # response x <- get_predicted(m1, predict = "expectation", verbose = FALSE) y <- get_predicted(m1, predict = NULL, type = "response") z <- predict(m1, type = "response") expect_equal(x, y, ignore_attr = TRUE) expect_equal(x, z, ignore_attr = TRUE) expect_equal(y, z, ignore_attr = TRUE) # link x <- get_predicted(m1, predict = "link") y <- get_predicted(m1, predict = NULL, type = "link") z <- predict(m1, type = "link") expect_equal(x, y, ignore_attr = TRUE) expect_equal(y, z, ignore_attr = TRUE) expect_equal(x, z, ignore_attr = TRUE) # unsupported: zprob x <- suppressWarnings(get_predicted(m1, predict = "zprob")) y <- get_predicted(m1, predict = NULL, type = "zprob") z <- predict(m1, type = "zprob") expect_equal(x, y) expect_equal(x, z, ignore_attr = TRUE) # not official supported raise warning expect_warning(get_predicted(m1, predict = "zprob")) expect_warning(get_predicted(m1, predict = "zprob", verbose = FALSE), NA) # the second warning is raised for zero-inflation models only. remove when # the zprob correction is implemented expect_warning(get_predicted(m1, predict = "prediction")) expect_warning(get_predicted(m1, predict = "classification")) }) } insight/tests/testthat/test-rlm.R0000644000175000017500000000152614131014371016744 0ustar nileshnileshrequiet("MASS") test_that("model.matrix.rlm accepts `data` argument", { mod <- MASS::rlm(mpg ~ hp + factor(cyl), mtcars) mm <- get_modelmatrix(mod) expect_true(is.matrix(mm)) expect_equal(dim(mm), c(32, 4)) mm <- get_modelmatrix(mod, data = head(mtcars)) expect_true(is.matrix(mm)) expect_equal(dim(mm), c(6, 4)) }) test_that("predict.rlm", { mod <- MASS::rlm(mpg ~ hp + factor(cyl), mtcars) a <- get_predicted(mod) b <- get_predicted(mod, predict = NULL, type = "response") expect_s3_class(a, "get_predicted") expect_s3_class(b, "get_predicted") expect_equal(a, b, ignore_attr = TRUE) expect_equal(as.vector(a), as.vector(b)) expect_error(get_predicted(mod, predict = "link"), regexp = "should be one") expect_error(get_predicted(mod, predict = NULL, type = "link"), regexp = "should be one") }) insight/tests/testthat/test-rlmer.R0000644000175000017500000002254714122064326017306 0ustar nileshnilesh.runThisTest <- Sys.getenv("RunAllinsightTests") == "yes" if (.runThisTest) { if (requiet("testthat") && requiet("insight") && requiet("lme4") && requiet("robustlmm")) { data(sleepstudy) set.seed(123) sleepstudy$mygrp <- sample(1:5, size = 180, replace = TRUE) sleepstudy$mysubgrp <- NA for (i in 1:5) { filter_group <- sleepstudy$mygrp == i sleepstudy$mysubgrp[filter_group] <- sample(1:30, size = sum(filter_group), replace = TRUE ) } m1 <- rlmer( Reaction ~ Days + (Days | Subject), data = sleepstudy, rho.sigma.e = psi2propII(smoothPsi, k = 2.28), rho.sigma.b = chgDefaults(smoothPsi, k = 5.11, s = 10) ) m2 <- rlmer( Reaction ~ Days + (1 | mygrp / mysubgrp) + (1 | Subject), data = sleepstudy, rho.sigma.e = psi2propII(smoothPsi, k = 2.28), rho.sigma.b = chgDefaults(smoothPsi, k = 5.11, s = 10) ) test_that("model_info", { expect_true(model_info(m1)$is_linear) expect_true(model_info(m2)$is_linear) }) test_that("find_predictors", { expect_equal( find_predictors(m1, effects = "all"), list(conditional = "Days", random = "Subject") ) expect_equal( find_predictors(m1, effects = "all", flatten = TRUE), c("Days", "Subject") ) expect_equal( find_predictors(m1, effects = "fixed"), list(conditional = "Days") ) expect_equal( find_predictors(m1, effects = "fixed", flatten = TRUE), "Days" ) expect_equal( find_predictors(m1, effects = "random"), list(random = "Subject") ) expect_equal( find_predictors(m1, effects = "random", flatten = TRUE), "Subject" ) expect_equal( find_predictors(m2, effects = "all"), list( conditional = "Days", random = c("mysubgrp", "mygrp", "Subject") ) ) expect_equal( find_predictors(m2, effects = "all", flatten = TRUE), c("Days", "mysubgrp", "mygrp", "Subject") ) expect_equal( find_predictors(m2, effects = "fixed"), list(conditional = "Days") ) expect_equal(find_predictors(m2, effects = "random"), list(random = c("mysubgrp", "mygrp", "Subject"))) expect_null(find_predictors(m2, effects = "all", component = "zi")) expect_null(find_predictors(m2, effects = "fixed", component = "zi")) expect_null(find_predictors(m2, effects = "random", component = "zi")) }) test_that("find_random", { expect_equal(find_random(m1), list(random = "Subject")) expect_equal(find_random(m1, flatten = TRUE), "Subject") expect_equal(find_random(m2), list(random = c( "mysubgrp:mygrp", "mygrp", "Subject" ))) expect_equal(find_random(m2, split_nested = TRUE), list(random = c("mysubgrp", "mygrp", "Subject"))) expect_equal( find_random(m2, flatten = TRUE), c("mysubgrp:mygrp", "mygrp", "Subject") ) expect_equal( find_random(m2, split_nested = TRUE, flatten = TRUE), c("mysubgrp", "mygrp", "Subject") ) }) test_that("find_response", { expect_identical(find_response(m1), "Reaction") expect_identical(find_response(m2), "Reaction") }) test_that("get_response", { expect_equal(get_response(m1), sleepstudy$Reaction) }) test_that("link_inverse", { expect_identical(link_inverse(m1)(.2), .2) expect_identical(link_inverse(m2)(.2), .2) }) test_that("get_data", { expect_equal(colnames(get_data(m1)), c("Reaction", "Days", "Subject")) expect_equal(colnames(get_data(m1, effects = "all")), c("Reaction", "Days", "Subject")) expect_equal(colnames(get_data(m1, effects = "random")), "Subject") expect_equal( colnames(get_data(m2)), c("Reaction", "Days", "mysubgrp", "mygrp", "Subject") ) expect_equal( colnames(get_data(m2, effects = "all")), c("Reaction", "Days", "mysubgrp", "mygrp", "Subject") ) expect_equal(colnames(get_data(m2, effects = "random")), c("mysubgrp", "mygrp", "Subject")) }) test_that("find_formula", { expect_length(find_formula(m1), 2) expect_length(find_formula(m2), 2) expect_equal( find_formula(m1, component = "conditional"), list( conditional = as.formula("Reaction ~ Days"), random = as.formula("~Days | Subject") ), ignore_attr = TRUE ) expect_equal( find_formula(m2, component = "conditional"), list( conditional = as.formula("Reaction ~ Days"), random = list( as.formula("~1 | mysubgrp:mygrp"), as.formula("~1 | mygrp"), as.formula("~1 | Subject") ) ), ignore_attr = TRUE ) }) test_that("find_terms", { expect_identical( find_terms(m1), list( response = "Reaction", conditional = "Days", random = c("Days", "Subject") ) ) expect_identical( find_terms(m1, flatten = TRUE), c("Reaction", "Days", "Subject") ) expect_identical( find_terms(m2), list( response = "Reaction", conditional = "Days", random = c("mysubgrp", "mygrp", "Subject") ) ) expect_identical( find_terms(m2, flatten = TRUE), c("Reaction", "Days", "mysubgrp", "mygrp", "Subject") ) }) test_that("find_variables", { expect_identical( find_variables(m1), list( response = "Reaction", conditional = "Days", random = "Subject" ) ) }) test_that("get_response", { expect_identical(get_response(m1), sleepstudy$Reaction) }) test_that("get_predictors", { expect_identical(colnames(get_predictors(m1)), "Days") expect_identical(colnames(get_predictors(m2)), "Days") }) test_that("get_random", { expect_identical(colnames(get_random(m1)), "Subject") expect_identical( colnames(get_random(m2)), c("mysubgrp", "mygrp", "Subject") ) }) test_that("clean_names", { expect_identical(clean_names(m1), c("Reaction", "Days", "Subject")) expect_identical( clean_names(m2), c("Reaction", "Days", "mysubgrp", "mygrp", "Subject") ) }) test_that("linkfun", { expect_false(is.null(link_function(m1))) expect_false(is.null(link_function(m2))) }) test_that("find_parameters", { expect_equal( find_parameters(m1), list( conditional = c("(Intercept)", "Days"), random = list(Subject = c("(Intercept)", "Days")) ) ) expect_equal(nrow(get_parameters(m1)), 2) expect_equal(get_parameters(m1)$Parameter, c("(Intercept)", "Days")) expect_equal( find_parameters(m2), list( conditional = c("(Intercept)", "Days"), random = list( `mysubgrp:mygrp` = "(Intercept)", Subject = "(Intercept)", mygrp = "(Intercept)" ) ) ) expect_equal(nrow(get_parameters(m2)), 2) expect_equal(get_parameters(m2)$Parameter, c("(Intercept)", "Days")) expect_equal( names(get_parameters(m2, effects = "random")), c("mysubgrp:mygrp", "Subject", "mygrp") ) }) test_that("is_multivariate", { expect_false(is_multivariate(m1)) expect_false(is_multivariate(m2)) }) test_that("get_variance", { skip_on_cran() skip_on_travis() expect_equal( get_variance(m1), list( var.fixed = 972.98333873885542288917, var.random = 1909.82627106414997797401, var.residual = 401.79840084390571064432, var.distribution = 401.79840084390571064432, var.dispersion = 0, var.intercept = c(Subject = 750.51639089692923789698), var.slope = c(Subject.Days = 41.06728604073937560770), cor.slope_intercept = c(Subject = -0.00703001666895963079) ), tolerance = 1e-4 ) expect_equal( get_variance(m2), list( var.fixed = 914.841369525452, var.random = 1406.78220090082, var.residual = 809.318117324236, var.distribution = 809.318117324236, var.dispersion = 0, var.intercept = c( `mysubgrp:mygrp` = 0, Subject = 1390.66848951126, mygrp = 16.113711389561 ) ), tolerance = 1e-4 ) }) test_that("find_algorithm", { expect_equal( find_algorithm(m1), list(algorithm = "REML", optimizer = "rlmer.fit.DAS.nondiag") ) }) test_that("find_random_slopes", { expect_equal(find_random_slopes(m1), list(random = "Days")) expect_null(find_random_slopes(m2)) }) test_that("find_statistic", { expect_identical(find_statistic(m1), "t-statistic") expect_identical(find_statistic(m2), "t-statistic") }) } } insight/tests/testthat/helper-requiet.R0000644000175000017500000000022014122064326020123 0ustar nileshnileshrequiet <- function(package) { suppressPackageStartupMessages( require(package, warn.conflicts = FALSE, character.only = TRUE) ) } insight/tests/testthat/test-rq.R0000644000175000017500000000516614122064326016605 0ustar nileshnileshif (requiet("testthat") && requiet("insight") && requiet("quantreg")) { data(stackloss) m1 <- rq(stack.loss ~ Air.Flow + Water.Temp, data = stackloss, tau = .25 ) test_that("model_info", { expect_true(model_info(m1)$is_linear) }) test_that("find_predictors", { expect_identical( find_predictors(m1), list(conditional = c("Air.Flow", "Water.Temp")) ) expect_identical( find_predictors(m1, flatten = TRUE), c("Air.Flow", "Water.Temp") ) expect_null(find_predictors(m1, effects = "random")) }) test_that("find_random", { expect_null(find_random(m1)) }) test_that("get_random", { expect_warning(get_random(m1)) }) test_that("find_response", { expect_identical(find_response(m1), "stack.loss") }) test_that("get_response", { expect_equal(get_response(m1), stackloss$stack.loss) }) test_that("get_predictors", { expect_equal(colnames(get_predictors(m1)), c("Air.Flow", "Water.Temp")) }) test_that("get_data", { expect_equal(nrow(get_data(m1)), 21) expect_equal( colnames(get_data(m1)), c("stack.loss", "Air.Flow", "Water.Temp") ) }) test_that("find_formula", { expect_length(find_formula(m1), 1) expect_equal( find_formula(m1), list(conditional = as.formula("stack.loss ~ Air.Flow + Water.Temp")), ignore_attr = TRUE ) }) test_that("find_terms", { expect_equal( find_terms(m1), list( response = "stack.loss", conditional = c("Air.Flow", "Water.Temp") ) ) expect_equal( find_terms(m1, flatten = TRUE), c("stack.loss", "Air.Flow", "Water.Temp") ) }) test_that("n_obs", { expect_equal(n_obs(m1), 21) }) test_that("link_function", { expect_equal(link_function(m1)(.2), .2, tolerance = 1e-5) }) test_that("link_inverse", { expect_equal(link_inverse(m1)(.2), .2, tolerance = 1e-5) }) test_that("find_parameters", { expect_equal( find_parameters(m1), list(conditional = c( "(Intercept)", "Air.Flow", "Water.Temp" )) ) expect_equal(nrow(get_parameters(m1)), 3) expect_equal( get_parameters(m1)$Parameter, c("(Intercept)", "Air.Flow", "Water.Temp") ) }) test_that("is_multivariate", { expect_false(is_multivariate(m1)) }) test_that("find_algorithm", { expect_equal(find_algorithm(m1), list(algorithm = "br")) }) test_that("find_statistic", { expect_identical(find_statistic(m1), "t-statistic") }) } insight/tests/testthat/test-LORgee.R0000644000175000017500000000757214122064326017303 0ustar nileshnileshif (requiet("testthat") && requiet("insight") && requiet("multgee")) { data(arthritis) m1 <- ordLORgee( y ~ factor(time) + factor(trt) + factor(baseline), data = arthritis, id = id, LORstr = "uniform", repeated = time ) test_that("model_info", { expect_true(model_info(m1)$is_ordinal) expect_false(model_info(m1)$is_multinomial) expect_true(model_info(m1)$is_logit) expect_false(model_info(m1)$is_linear) }) test_that("find_predictors", { expect_identical(find_predictors(m1), list(conditional = c("time", "trt", "baseline"))) expect_identical( find_predictors(m1, flatten = TRUE), c("time", "trt", "baseline") ) expect_identical(find_predictors(m1, effects = "random"), list(random = "id")) expect_identical( find_predictors(m1, effects = "all", flatten = TRUE), c("time", "trt", "baseline", "id") ) }) test_that("find_response", { expect_identical(find_response(m1), "y") }) test_that("get_response", { expect_equal(get_response(m1), na.omit(arthritis)$y) }) test_that("find_random", { expect_equal(find_random(m1), list(random = "id")) }) test_that("get_random", { expect_equal(get_random(m1), arthritis[, "id", drop = FALSE]) }) test_that("get_predictors", { expect_equal(get_predictors(m1), na.omit(arthritis)[, c("time", "trt", "baseline"), drop = FALSE]) }) test_that("link_inverse", { expect_equal(link_inverse(m1)(.2), plogis(.2), tolerance = 1e-5) }) test_that("get_data", { expect_equal(nrow(get_data(m1)), 888) expect_equal( colnames(get_data(m1)), c("y", "time", "trt", "baseline", "id") ) }) test_that("find_formula", { expect_length(find_formula(m1), 2) expect_equal( find_formula(m1), list( conditional = as.formula("y ~ factor(time) + factor(trt) + factor(baseline)"), random = as.formula("~id") ), ignore_attr = TRUE ) }) test_that("find_terms", { expect_length(find_terms(m1), 3) expect_equal( find_terms(m1), list( response = "y", conditional = c("factor(time)", "factor(trt)", "factor(baseline)"), random = "id" ) ) }) test_that("find_variables", { expect_equal( find_variables(m1), list( response = "y", conditional = c("time", "trt", "baseline"), random = "id" ) ) expect_equal( find_variables(m1, flatten = TRUE), c("y", "time", "trt", "baseline", "id") ) }) test_that("n_obs", { expect_equal(n_obs(m1), 888) }) test_that("linkfun", { expect_false(is.null(link_function(m1))) }) test_that("find_parameters", { expect_equal( find_parameters(m1), list( conditional = c( "beta10", "beta20", "beta30", "beta40", "factor(time)3", "factor(time)5", "factor(trt)2", "factor(baseline)2", "factor(baseline)3", "factor(baseline)4", "factor(baseline)5" ) ) ) expect_equal(nrow(get_parameters(m1)), 11) expect_equal( get_parameters(m1)$Parameter, c( "beta10", "beta20", "beta30", "beta40", "factor(time)3", "factor(time)5", "factor(trt)2", "factor(baseline)2", "factor(baseline)3", "factor(baseline)4", "factor(baseline)5" ) ) }) test_that("is_multivariate", { expect_false(is_multivariate(m1)) }) test_that("find_algorithm", { expect_equal(find_algorithm(m1), list(algorithm = "Fisher's scoring ML")) }) test_that("find_statistic", { expect_identical(find_statistic(m1), "z-statistic") }) } insight/tests/testthat/test-bife.R0000644000175000017500000000131714131014371017055 0ustar nileshnileshskip_if_not_installed("bife") requiet("bife") test_that("get_predicted", { dataset <- bife::psid mod <- bife(LFP ~ AGE + I(INCH / 1000) + KID1 + KID2 + KID3 | ID, data = dataset) # link x <- get_predicted(mod, predict = "link", data = dataset) y <- get_predicted(mod, predict = NULL, type = "link", data = dataset) z <- predict(mod, type = "link", X_new = dataset) expect_equal(x, y) expect_equal(as.vector(x), z) # resopnse x <- get_predicted(mod, predict = "expectation", data = dataset) y <- get_predicted(mod, predict = NULL, type = "response", data = dataset) z <- predict(mod, type = "response", X_new = dataset) expect_equal(x, y) expect_equal(as.vector(x), z) }) insight/tests/testthat/test-Gam2.R0000644000175000017500000000513514122064326016745 0ustar nileshnilesh.runThisTest <- Sys.getenv("RunAllinsightTests") == "yes" if (.runThisTest && requiet("testthat") && requiet("insight") && requiet("gam")) { data(kyphosis) m1 <- gam::gam( Kyphosis ~ s(Age, 4) + Number, family = binomial, data = kyphosis, trace = TRUE ) test_that("model_info", { expect_true(model_info(m1)$is_binomial) expect_false(model_info(m1)$is_linear) }) test_that("find_predictors", { expect_identical(find_predictors(m1), list(conditional = c("Age", "Number"))) expect_identical(find_predictors(m1, flatten = TRUE), c("Age", "Number")) expect_null(find_predictors(m1, effects = "random")) }) test_that("find_response", { expect_identical(find_response(m1), "Kyphosis") }) test_that("link_inverse", { expect_equal(link_inverse(m1)(.2), plogis(.2), tolerance = 1e-5) }) test_that("get_data", { expect_equal(nrow(get_data(m1)), 81) expect_equal(colnames(get_data(m1)), c("Kyphosis", "Age", "Number")) }) test_that("find_formula", { expect_length(find_formula(m1), 1) expect_equal( find_formula(m1), list(conditional = as.formula("Kyphosis ~ s(Age, 4) + Number")), ignore_attr = TRUE ) }) test_that("find_terms", { expect_equal( find_terms(m1), list( response = "Kyphosis", conditional = c("s(Age, 4)", "Number") ) ) expect_equal( find_terms(m1, flatten = TRUE), c("Kyphosis", "s(Age, 4)", "Number") ) }) test_that("find_variables", { expect_equal( find_variables(m1), list( response = "Kyphosis", conditional = c("Age", "Number") ) ) expect_equal( find_variables(m1, flatten = TRUE), c("Kyphosis", "Age", "Number") ) }) test_that("n_obs", { expect_equal(n_obs(m1), 81) }) test_that("linkfun", { expect_false(is.null(link_function(m1))) }) test_that("find_parameters", { expect_equal( find_parameters(m1), list( conditional = c("(Intercept)", "Number"), smooth_terms = "s(Age, 4)" ) ) expect_equal(nrow(get_parameters(m1)), 3) expect_equal( get_parameters(m1)$Parameter, c("(Intercept)", "Number", "s(Age, 4)") ) }) test_that("is_multivariate", { expect_false(is_multivariate(m1)) }) test_that("find_algorithm", { expect_equal(find_algorithm(m1), list(algorithm = "IWLS")) }) test_that("find_statistic", { expect_identical(find_statistic(m1), "F-statistic") }) } insight/tests/testthat/test-betareg.R0000644000175000017500000000660014122064326017566 0ustar nileshnileshif (requiet("testthat") && requiet("insight") && requiet("betareg")) { data("GasolineYield") data("FoodExpenditure") m1 <- betareg(yield ~ batch + temp, data = GasolineYield) m2 <- betareg(I(food / income) ~ income + persons, data = FoodExpenditure) test_that("model_info", { expect_true(model_info(m1)$is_beta) expect_false(model_info(m1)$is_linear) }) test_that("find_predictors", { expect_identical(find_predictors(m1), list(conditional = c("batch", "temp"))) expect_identical(find_predictors(m1, flatten = TRUE), c("batch", "temp")) expect_null(find_predictors(m1, effects = "random")) }) test_that("find_response", { expect_identical(find_response(m1), "yield") expect_identical(find_response(m2), "I(food/income)") expect_identical(find_response(m2, combine = FALSE), c("food", "income")) }) test_that("get_response", { expect_equal(get_response(m1), GasolineYield$yield) expect_equal(get_response(m2), FoodExpenditure[, c("food", "income")]) }) test_that("link_inverse", { expect_identical(link_inverse(m1)(.2), plogis(.2)) }) test_that("get_data", { expect_equal(nrow(get_data(m1)), 32) expect_equal(colnames(get_data(m1)), c("yield", "batch", "temp")) expect_equal(nrow(get_data(m2)), 38) expect_equal( colnames(get_data(m2)), c("I(food/income)", "income", "persons", "food", "income.1") ) }) test_that("find_formula", { expect_length(find_formula(m1), 1) expect_equal( find_formula(m1), list(conditional = as.formula("yield ~ batch + temp")), ignore_attr = TRUE ) expect_equal( find_formula(m2), list(conditional = as.formula("I(food/income) ~ income + persons")), ignore_attr = TRUE ) }) test_that("find_variables", { expect_equal( find_variables(m1), list( response = "yield", conditional = c("batch", "temp") ) ) expect_equal( find_variables(m1, flatten = TRUE), c("yield", "batch", "temp") ) expect_equal( find_variables(m2, flatten = TRUE), c("food", "income", "persons") ) }) test_that("n_obs", { expect_equal(n_obs(m1), 32) }) test_that("is_multivariate", { expect_false(is_multivariate(m1)) }) test_that("linkfun", { expect_false(is.null(link_function(m1))) }) test_that("find_parameters", { expect_equal( find_parameters(m1), list( conditional = c("(Intercept)", "batch1", "batch2", "batch3", "batch4", "batch5", "batch6", "batch7", "batch8", "batch9", "temp"), precision = "(phi)" ) ) expect_equal(nrow(get_parameters(m1)), 12) expect_equal( get_parameters(m1)$Parameter, c( "(Intercept)", "batch1", "batch2", "batch3", "batch4", "batch5", "batch6", "batch7", "batch8", "batch9", "temp", "(phi)" ) ) }) test_that("find_terms", { expect_equal( find_terms(m2), list( response = "I(food/income)", conditional = c("income", "persons") ) ) }) test_that("find_statistic", { expect_identical(find_statistic(m1), "z-statistic") expect_identical(find_statistic(m2), "z-statistic") }) } insight/tests/testthat/test-find_terms.R0000644000175000017500000000271414122064326020311 0ustar nileshnileshif (requiet("testthat") && requiet("insight") && requiet("lme4")) { test_that("find_terms", { m <- lm(Sepal.Length ~ -1 + Petal.Width + Species, data = iris) expect_equal( find_terms(m), list(response = "Sepal.Length", conditional = c("Petal.Width", "Species", "-1")) ) expect_false(has_intercept(m)) }) test_that("find_terms", { m <- lm(Sepal.Length ~ 0 + Petal.Width + Species, data = iris) expect_equal( find_terms(m), list(response = "Sepal.Length", conditional = c("0", "Petal.Width", "Species")) ) expect_false(has_intercept(m)) }) test_that("find_terms", { m <- lm(Sepal.Length ~ Petal.Width + Species - 1, data = iris) expect_equal( find_terms(m), list(response = "Sepal.Length", conditional = c("Petal.Width", "Species", "-1")) ) expect_false(has_intercept(m)) }) set.seed(1984) dat <- data.frame( y = rnorm(100 * 5, sd = 1 - .20), time = rep(1:10, 10 * 5), g1 = sort(rep(1:100, 5)), g2 = sort(rep(1:10, 10 * 5)) ) dat$g0 <- paste(dat$time, dat$g1) dat$time1 <- dat$time - 8 dat$post <- 0 dat$post[dat$time >= 8] <- 1 m <- lmer(y ~ post + time1 + (post + time1 - 1 | g2), data = dat) test_that("find_terms", { expect_equal( find_terms(m), list(response = "y", conditional = c("post", "time1"), random = c("post", "time1", "g2")) ) expect_true(has_intercept(m)) }) } insight/tests/testthat/test-betabin.R0000644000175000017500000000721014122064326017557 0ustar nileshnileshif (requiet("testthat") && requiet("insight") && requiet("aod")) { data(dja) m1 <- suppressWarnings(betabin(cbind(y, n - y) ~ group * trisk, ~village, data = dja)) test_that("model_info", { expect_true(model_info(m1)$is_binomial) expect_true(model_info(m1)$is_betabinomial) expect_true(model_info(m1)$is_mixed) expect_false(model_info(m1)$is_linear) }) test_that("find_predictors", { expect_identical(find_predictors(m1), list(conditional = c("group", "trisk"))) expect_identical(find_predictors(m1, flatten = TRUE), c("group", "trisk")) expect_identical(find_predictors(m1, effects = "random"), list(random = "village")) expect_identical( find_predictors(m1, effects = "all"), list( conditional = c("group", "trisk"), random = "village" ) ) }) test_that("find_random", { expect_identical(find_random(m1), list(random = "village")) }) test_that("get_random", { expect_equal(get_random(m1), dja[, "village", drop = FALSE]) }) test_that("find_response", { expect_identical(find_response(m1), "cbind(y, n - y)") expect_identical(find_response(m1, combine = FALSE), c("y", "n")) }) test_that("get_response", { expect_equal(get_response(m1), dja[, c("y", "n")]) }) test_that("get_predictors", { expect_equal(colnames(get_predictors(m1)), c("group", "trisk")) }) test_that("link_inverse", { expect_equal(link_inverse(m1)(.2), plogis(.2), tolerance = 1e-5) }) test_that("link_function", { expect_equal(link_function(m1)(.2), qlogis(.2), tolerance = 1e-5) }) test_that("get_data", { expect_equal(nrow(get_data(m1)), 75) expect_equal(colnames(get_data(m1)), c("y", "n", "group", "trisk", "village")) }) test_that("find_formula", { expect_length(find_formula(m1), 2) expect_equal( find_formula(m1), list( conditional = as.formula("cbind(y, n - y) ~ group * trisk"), random = as.formula("~village") ), ignore_attr = TRUE ) }) test_that("find_variables", { expect_equal(find_variables(m1), list(response = c("y", "n"), conditional = c("group", "trisk"), random = "village")) expect_equal(find_variables(m1, flatten = TRUE), c("y", "n", "group", "trisk", "village")) }) test_that("n_obs", { expect_equal(n_obs(m1), 75) }) test_that("find_parameters", { expect_equal( find_parameters(m1), list( conditional = c("(Intercept)", "groupTREAT", "trisk", "groupTREAT:trisk"), random = c( "phi.villageBAK", "phi.villageBAM", "phi.villageBAN", "phi.villageBIJ", "phi.villageBOU", "phi.villageBYD", "phi.villageDEM", "phi.villageDIA", "phi.villageHAM", "phi.villageLAM", "phi.villageLAY", "phi.villageMAF", "phi.villageMAH", "phi.villageMAK", "phi.villageMED", "phi.villageNAB", "phi.villageSAG", "phi.villageSAM", "phi.villageSOU" ) ) ) expect_equal(nrow(get_parameters(m1)), 4) expect_equal(get_parameters(m1)$Parameter, c("(Intercept)", "groupTREAT", "trisk", "groupTREAT:trisk")) }) test_that("is_multivariate", { expect_false(is_multivariate(m1)) }) test_that("find_terms", { expect_equal( find_terms(m1), list( response = "cbind(y, n - y)", conditional = c("group", "trisk"), random = "village" ) ) }) test_that("find_algorithm", { expect_equal(find_algorithm(m1), list(algorithm = "ML")) }) test_that("find_statistic", { expect_identical(find_statistic(m1), "z-statistic") }) } insight/tests/testthat/test-spatial.R0000644000175000017500000000750114122064326017613 0ustar nileshnilesh.runThisTest <- Sys.getenv("RunAllinsightTests") == "yes" if (.runThisTest || Sys.getenv("USER") == "travis") { if (requiet("testthat") && requiet("insight") && requiet("glmmTMB")) { m1 <- download_model("glmmTMB_spatial_1") test_that("find_weights", { expect_null(find_weights(m1)) }) test_that("model_info", { expect_true(model_info(m1)$is_linear) }) test_that("clean_names", { expect_identical(clean_names(m1), c("calcium", "elevation", "region", "pos", "ID")) }) test_that("find_predictors", { expect_identical( find_predictors(m1, effects = "all"), list(conditional = c("elevation", "region"), random = c("pos", "ID")) ) expect_identical( find_predictors(m1, effects = "all", flatten = TRUE), c("elevation", "region", "pos", "ID") ) expect_identical( find_predictors(m1, effects = "random"), list(random = "ID") ) expect_identical(find_predictors(m1, effects = "random", flatten = TRUE), "ID") }) test_that("find_response", { expect_identical(find_response(m1), "calcium") }) test_that("link_inverse", { expect_identical(link_inverse(m1)(.2), .2) }) test_that("get_data", { expect_equal( colnames(get_data(m1)), c("calcium", "elevation", "region", "pos", "ID") ) expect_equal( colnames(get_data(m1, effects = "all")), c("calcium", "elevation", "region", "pos", "ID") ) }) test_that("find_predictors", { expect_identical( find_predictors(m1, effects = "fixed", component = "conditional"), list(conditional = c("elevation", "region")) ) expect_identical( find_predictors(m1), list(conditional = c("elevation", "region")) ) expect_identical( find_predictors(m1, effects = "all"), list( conditional = c("elevation", "region"), random = c("pos", "ID") ) ) }) test_that("find_formula", { expect_length(find_formula(m1), 2) expect_equal( find_formula(m1), list( conditional = as.formula("calcium ~ elevation + region"), random = as.formula("~pos + 0 | ID") ), ignore_attr = TRUE ) }) test_that("find_random", { expect_identical( find_random(m1), list(random = "ID") ) }) test_that("find_terms", { expect_identical( find_terms(m1), list( response = "calcium", conditional = c("elevation", "region"), random = c("pos", "ID") ) ) }) test_that("find_variables", { expect_identical( find_variables(m1), list( response = "calcium", conditional = c("elevation", "region"), random = c("pos", "ID") ) ) }) test_that("get_predictors", { expect_identical( colnames(get_predictors(m1)), c("elevation", "region") ) }) test_that("get_random", { expect_identical(colnames(get_random(m1)), c("pos", "ID")) }) test_that("get_data", { expect_identical( colnames(get_data(m1)), c("calcium", "elevation", "region", "pos", "ID") ) }) test_that("get_paramaters", { expect_equal(nrow(get_parameters(m1)), 5) expect_equal( get_parameters(m1)$Parameter, c("(Intercept)", "elevation", "region2", "region3", "(Intercept)") ) }) test_that("find_random_slopes", { skip_on_cran() skip_on_travis() expect_equal( find_random_slopes(m1), list(random = "pos") ) }) } } insight/tests/testthat/test-FE-formula.R0000644000175000017500000000464514122064326020121 0ustar nileshnileshif (requiet("testthat") && requiet("insight")) { gfe <- insight:::.get_fixed_effects test_that(".get_fixed_effects", { f <- "am ~ disp:wt + (1|gear) + wt + (1+wt|carb)" expect_equal( gfe(stats::as.formula(f)), stats::as.formula("am ~ disp:wt + wt") ) f <- "am ~ disp:wt + wt + (1|gear) + (1+wt|carb)" expect_equal( gfe(stats::as.formula(f)), stats::as.formula("am ~ disp:wt + wt") ) f <- "am ~ (1|gear) + (1+wt|carb) + disp:wt + wt" expect_equal( gfe(stats::as.formula(f)), stats::as.formula("am ~ disp:wt + wt") ) f <- "am ~ 1 + (1|gear)" expect_equal( gfe(stats::as.formula(f)), stats::as.formula("am ~ 1") ) f <- "am ~ 1 + (1+wt|gear)" expect_equal( gfe(stats::as.formula(f)), stats::as.formula("am ~ 1") ) f <- "am ~ disp:wt + (1|gear) + wt + (1+wt|carb)" expect_equal( gfe(stats::as.formula(f)), stats::as.formula("am ~ disp:wt + wt") ) f <- "am ~ disp:wt + (1|gear) + wt + (1*wt|carb)" expect_equal( gfe(stats::as.formula(f)), stats::as.formula("am ~ disp:wt + wt") ) f <- "am ~ (1|gear) + (1+wt|carb) + disp:wt * wt" expect_equal( gfe(stats::as.formula(f)), stats::as.formula("am ~ disp:wt * wt") ) f <- "am ~ disp:wt + poly(gear, 2) + wt + (1+wt|carb)" expect_equal( gfe(stats::as.formula(f)), stats::as.formula("am ~ disp:wt + poly(gear, 2) + wt") ) f <- "y ~ post + time1 + (1 | g2 / g1 / g0) + (post + time1 - 1 | g2)" expect_equal( gfe(stats::as.formula(f)), stats::as.formula("y ~ post + time1") ) f <- "count ~ mined + (1 | site) + offset(Wtemp)" expect_equal( gfe(stats::as.formula(f)), stats::as.formula("count ~ mined + offset(Wtemp)") ) f <- "count ~ mined + offset(Wtemp) + (1 | site)" expect_equal( gfe(stats::as.formula(f)), stats::as.formula("count ~ mined + offset(Wtemp)") ) f <- "time | cens(censored) ~ age * sex + disease + (1|patient)" expect_equal( gfe(stats::as.formula(f)), stats::as.formula("time | cens(censored) ~ age * sex + disease") ) f <- "success | trials(ntrials) ~ x + (1 | patient)" expect_equal( gfe(stats::as.formula(f)), stats::as.formula("success | trials(ntrials) ~ x") ) }) } insight/tests/testthat/test-clm2.R0000644000175000017500000000611614122064326017014 0ustar nileshnileshif (requiet("testthat") && requiet("insight") && requiet("ordinal") && requiet("MASS")) { data(housing, package = "MASS") m1 <- clm2(Sat ~ Infl + Type + Cont, weights = Freq, data = housing) test_that("model_info", { expect_true(model_info(m1)$is_ordinal) expect_false(model_info(m1)$is_multinomial) expect_false(model_info(m1)$is_linear) }) test_that("find_predictors", { expect_identical(find_predictors(m1), list(conditional = c("Infl", "Type", "Cont"))) expect_identical( find_predictors(m1, flatten = TRUE), c("Infl", "Type", "Cont") ) expect_null(find_predictors(m1, effects = "random")) }) test_that("find_random", { expect_null(find_random(m1)) }) test_that("get_random", { expect_warning(get_random(m1)) }) test_that("find_response", { expect_identical(find_response(m1), "Sat") }) test_that("get_response", { expect_equal(get_response(m1), housing$Sat) }) test_that("get_predictors", { expect_equal(colnames(get_predictors(m1)), c("Infl", "Type", "Cont")) }) test_that("link_inverse", { expect_equal(link_inverse(m1)(.2), plogis(.2), tolerance = 1e-5) }) test_that("find_weights", { expect_equal(find_weights(m1), "Freq") }) test_that("get_weights", { expect_equal(get_weights(m1), housing$Freq) }) test_that("get_data", { expect_equal(nrow(get_data(m1)), 72) expect_equal( colnames(get_data(m1)), c("Sat", "Infl", "Type", "Cont", "(weights)", "Freq") ) }) test_that("find_formula", { expect_length(find_formula(m1), 1) expect_equal( find_formula(m1), list(conditional = as.formula("Sat ~ Infl + Type + Cont")), ignore_attr = TRUE ) }) test_that("find_terms", { expect_equal(find_terms(m1), list( response = "Sat", conditional = c("Infl", "Type", "Cont") )) expect_equal( find_terms(m1, flatten = TRUE), c("Sat", "Infl", "Type", "Cont") ) }) test_that("n_obs", { expect_equal(n_obs(m1), 1681) }) test_that("linkfun", { expect_false(is.null(link_function(m1))) }) test_that("linkinv", { expect_false(is.null(link_inverse(m1))) }) test_that("find_parameters", { expect_equal( find_parameters(m1), list( conditional = c( "Low|Medium", "Medium|High", "InflMedium", "InflHigh", "TypeApartment", "TypeAtrium", "TypeTerrace", "ContHigh" ) ) ) expect_equal(nrow(get_parameters(m1)), 8) expect_equal( get_parameters(m1)$Parameter, c( "Low|Medium", "Medium|High", "InflMedium", "InflHigh", "TypeApartment", "TypeAtrium", "TypeTerrace", "ContHigh" ) ) }) test_that("is_multivariate", { expect_false(is_multivariate(m1)) }) test_that("find_statistic", { expect_identical(find_statistic(m1), "z-statistic") }) } insight/tests/testthat/test-geeglm.R0000644000175000017500000000551514122064326017421 0ustar nileshnileshif (requiet("testthat") && requiet("insight") && requiet("geepack")) { data(warpbreaks) m1 <- geeglm( breaks ~ tension, id = wool, data = warpbreaks, family = poisson, corstr = "ar1" ) test_that("model_info", { expect_true(model_info(m1)$is_count) expect_false(model_info(m1)$is_linear) }) test_that("find_predictors", { expect_identical(find_predictors(m1), list(conditional = "tension")) expect_identical(find_predictors(m1, flatten = TRUE), "tension") expect_identical( find_predictors(m1, effects = "random"), list(random = "wool") ) expect_identical( find_predictors(m1, effects = "all", flatten = TRUE), c("tension", "wool") ) }) test_that("find_response", { expect_identical(find_response(m1), "breaks") }) test_that("get_response", { expect_equal(get_response(m1), warpbreaks$breaks) }) test_that("find_random", { expect_equal(find_random(m1), list(random = "wool")) }) test_that("get_random", { expect_equal(get_random(m1), warpbreaks[, "wool", drop = FALSE]) }) test_that("get_predictors", { expect_equal(get_predictors(m1), warpbreaks[, "tension", drop = FALSE]) }) test_that("link_inverse", { expect_equal(link_inverse(m1)(.2), exp(.2), tolerance = 1e-5) }) test_that("get_data", { expect_equal(nrow(get_data(m1)), 54) expect_equal(colnames(get_data(m1)), c("breaks", "tension", "wool")) }) test_that("find_formula", { expect_length(find_formula(m1), 2) expect_equal( find_formula(m1), list( conditional = as.formula("breaks ~ tension"), random = as.formula("~wool") ), ignore_attr = TRUE ) }) test_that("find_terms", { expect_equal( find_terms(m1), list( response = "breaks", conditional = "tension", random = "wool" ) ) expect_equal( find_terms(m1, flatten = TRUE), c("breaks", "tension", "wool") ) }) test_that("n_obs", { expect_equal(n_obs(m1), 54) }) test_that("linkfun", { expect_false(is.null(link_function(m1))) }) test_that("find_parameters", { expect_equal( find_parameters(m1), list(conditional = c( "(Intercept)", "tensionM", "tensionH" )) ) expect_equal(nrow(get_parameters(m1)), 3) expect_equal( get_parameters(m1)$Parameter, c("(Intercept)", "tensionM", "tensionH") ) }) test_that("is_multivariate", { expect_false(is_multivariate(m1)) }) test_that("find_algorithm", { expect_equal(find_algorithm(m1), list(algorithm = "ML")) }) test_that("find_statistic", { expect_identical(find_statistic(m1), "chi-squared statistic") }) } insight/tests/testthat/test-standardize_names.R0000644000175000017500000000360214122064326021647 0ustar nileshnileshif (requiet("testthat") && requiet("insight") && requiet("stats") && requiet("parameters")) { .runThisTest <- Sys.getenv("RunAllinsightTests") == "yes" if (packageVersion("parameters") >= "0.14.0") { test_that("standardize_names works", { set.seed(123) # lm object lm_mod <- lm(wt ~ mpg, mtcars) x <- as.data.frame(parameters::model_parameters(lm_mod)) expect_equal( names(standardize_names(x, style = "broom")), c( "term", "estimate", "std.error", "conf.level", "conf.low", "conf.high", "statistic", "df.error", "p.value" ) ) expect_equal( names(standardize_names(x, style = "easystats")), c( "Parameter", "Coefficient", "SE", "CI", "CI_low", "CI_high", "Statistic", "df", "p" ) ) # aov object aov_mod <- aov(wt ~ mpg, mtcars) y <- as.data.frame(parameters::model_parameters(aov_mod)) expect_equal( names(standardize_names(y, style = "broom")), c("term", "sumsq", "df", "meansq", "statistic", "p.value") ) }) # t-test (this is yet to be finalized) z <- as.data.frame(parameters::model_parameters(t.test(1:10, y = c(7:20)))) ## TODO enable later # expect_equal( # names(standardize_names(z, style = "broom")), # c( # "parameter1", "parameter2", "mean.parameter1", "mean.parameter2", "estimate", # "conf.level", "conf.low", "conf.high", "statistic", "df.error", "p.value", # "method", "alternative" # ) # ) # chi-square test chi <- as.data.frame(parameters::model_parameters(chisq.test(matrix(c(12, 5, 7, 7), ncol = 2)))) # expect_equal( # names(standardize_names(chi, style = "broom")), # c("statistic", "df", "p.value", "method", "alternative") # ) } } insight/tests/testthat/test-mixed.R0000644000175000017500000002232014122064326017260 0ustar nileshnilesh.runThisTest <- Sys.getenv("RunAllinsightTests") == "yes" ## TODO enable once it's clear what the problem is... if (requiet("testthat") && requiet("insight") && requiet("lme4") && suppressPackageStartupMessages(requiet("afex")) && FALSE) { data(sleepstudy) set.seed(123) sleepstudy$mygrp <- sample(1:5, size = nrow(sleepstudy), replace = TRUE) sleepstudy$mysubgrp <- NA for (i in 1:5) { filter_group <- sleepstudy$mygrp == i sleepstudy$mysubgrp[filter_group] <- sample(1:30, size = sum(filter_group), replace = TRUE) } m1 <- mixed(Reaction ~ Days + (1 + Days | Subject), data = sleepstudy ) m2 <- mixed(Reaction ~ Days + (1 | mygrp / mysubgrp) + (1 | Subject), data = sleepstudy ) test_that("model_info", { expect_true(model_info(m1)$is_linear) expect_true(model_info(m2)$is_linear) }) test_that("find_predictors", { expect_equal( find_predictors(m1, effects = "all"), list(conditional = "Days", random = "Subject") ) expect_equal( find_predictors(m1, effects = "all", flatten = TRUE), c("Days", "Subject") ) expect_equal( find_predictors(m1, effects = "fixed"), list(conditional = "Days") ) expect_equal( find_predictors(m1, effects = "fixed", flatten = TRUE), "Days" ) expect_equal( find_predictors(m1, effects = "random"), list(random = "Subject") ) expect_equal( find_predictors(m1, effects = "random", flatten = TRUE), "Subject" ) expect_equal( find_predictors(m2, effects = "all"), list( conditional = "Days", random = c("mysubgrp", "mygrp", "Subject") ) ) expect_equal( find_predictors(m2, effects = "all", flatten = TRUE), c("Days", "mysubgrp", "mygrp", "Subject") ) expect_equal( find_predictors(m2, effects = "fixed"), list(conditional = "Days") ) expect_equal(find_predictors(m2, effects = "random"), list(random = c("mysubgrp", "mygrp", "Subject"))) expect_null(find_predictors(m2, effects = "all", component = "zi")) expect_null(find_predictors(m2, effects = "fixed", component = "zi")) expect_null(find_predictors(m2, effects = "random", component = "zi")) }) test_that("find_random", { expect_equal(find_random(m1), list(random = "Subject")) expect_equal(find_random(m1, flatten = TRUE), "Subject") expect_equal(find_random(m2), list(random = c("mysubgrp:mygrp", "mygrp", "Subject"))) expect_equal(find_random(m2, split_nested = TRUE), list(random = c("mysubgrp", "mygrp", "Subject"))) expect_equal( find_random(m2, flatten = TRUE), c("mysubgrp:mygrp", "mygrp", "Subject") ) expect_equal( find_random(m2, split_nested = TRUE, flatten = TRUE), c("mysubgrp", "mygrp", "Subject") ) }) test_that("find_response", { expect_identical(find_response(m1), "Reaction") expect_identical(find_response(m2), "Reaction") }) test_that("get_response", { expect_equal(get_response(m1), sleepstudy$Reaction) }) test_that("link_inverse", { expect_identical(link_inverse(m1)(.2), .2) expect_identical(link_inverse(m2)(.2), .2) }) test_that("get_data", { expect_equal(colnames(get_data(m1)), c("Reaction", "Days", "Subject")) expect_equal(colnames(get_data(m1, effects = "all")), c("Reaction", "Days", "Subject")) expect_equal(colnames(get_data(m1, effects = "random")), "Subject") expect_equal( colnames(get_data(m2)), c("Reaction", "Days", "mysubgrp", "mygrp", "Subject") ) expect_equal( colnames(get_data(m2, effects = "all")), c("Reaction", "Days", "mysubgrp", "mygrp", "Subject") ) expect_equal(colnames(get_data(m2, effects = "random")), c("mysubgrp", "mygrp", "Subject")) }) test_that("find_formula", { expect_length(find_formula(m1), 2) expect_length(find_formula(m2), 2) expect_equal( find_formula(m1, component = "conditional"), list( conditional = as.formula("Reaction ~ Days"), random = as.formula("~1 + Days | Subject") ), ignore_attr = TRUE ) expect_equal( find_formula(m2, component = "conditional"), list( conditional = as.formula("Reaction ~ Days"), random = list( as.formula("~1 | mysubgrp:mygrp"), as.formula("~1 | mygrp"), as.formula("~1 | Subject") ) ), ignore_attr = TRUE ) }) test_that("find_variables", { expect_identical( find_variables(m1), list( response = "Reaction", conditional = "Days", random = "Subject" ) ) expect_identical( find_variables(m1, flatten = TRUE), c("Reaction", "Days", "Subject") ) expect_identical( find_variables(m2), list( response = "Reaction", conditional = "Days", random = c("mysubgrp", "mygrp", "Subject") ) ) expect_identical( find_variables(m2, flatten = TRUE), c("Reaction", "Days", "mysubgrp", "mygrp", "Subject") ) }) test_that("get_response", { expect_identical(get_response(m1), sleepstudy$Reaction) }) test_that("get_predictors", { expect_identical(colnames(get_predictors(m1)), "Days") expect_identical(colnames(get_predictors(m2)), "Days") }) test_that("get_random", { expect_identical(colnames(get_random(m1)), "Subject") expect_identical(colnames(get_random(m2)), c("mysubgrp", "mygrp", "Subject")) }) test_that("clean_names", { expect_identical(clean_names(m1), c("Reaction", "Days", "Subject")) expect_identical( clean_names(m2), c("Reaction", "Days", "mysubgrp", "mygrp", "Subject") ) }) test_that("linkfun", { expect_false(is.null(link_function(m1))) expect_false(is.null(link_function(m2))) }) test_that("find_parameters", { expect_equal( find_parameters(m1), list( conditional = c("(Intercept)", "Days"), random = list(Subject = c("(Intercept)", "Days")) ) ) expect_equal(nrow(get_parameters(m1)), 2) expect_equal(get_parameters(m1)$Parameter, c("(Intercept)", "Days")) expect_equal( find_parameters(m2), list( conditional = c("(Intercept)", "Days"), random = list( `mysubgrp:mygrp` = "(Intercept)", Subject = "(Intercept)", mygrp = "(Intercept)" ) ) ) expect_equal(nrow(get_parameters(m2)), 2) expect_equal(get_parameters(m2)$Parameter, c("(Intercept)", "Days")) expect_equal( names(get_parameters(m2, effects = "random")), c("mysubgrp:mygrp", "Subject", "mygrp") ) }) test_that("is_multivariate", { expect_false(is_multivariate(m1)) expect_false(is_multivariate(m2)) }) test_that("get_variance", { expect_equal( get_variance(m1), list( var.fixed = 908.9534, var.random = 1698.084, var.residual = 654.94, var.distribution = 654.94, var.dispersion = 0, var.intercept = c(Subject = 612.1002), var.slope = c(Subject.Days = 35.07171), cor.slope_intercept = c(Subject = 0.06555124) ), tolerance = 1e-1 ) expect_equal(get_variance_fixed(m1), c(var.fixed = 908.9534), tolerance = 1e-1 ) expect_equal(get_variance_random(m1), c(var.random = 1698.084), tolerance = 1e-1 ) expect_equal( get_variance_residual(m1), c(var.residual = 654.94), tolerance = 1e-1 ) expect_equal( get_variance_distribution(m1), c(var.distribution = 654.94), tolerance = 1e-1 ) expect_equal(get_variance_dispersion(m1), c(var.dispersion = 0), tolerance = 1e-1 ) expect_equal( get_variance_intercept(m1), c(var.intercept.Subject = 612.1002), tolerance = 1e-1 ) expect_equal( get_variance_slope(m1), c(var.slope.Subject.Days = 35.07171), tolerance = 1e-1 ) expect_equal( get_correlation_slope_intercept(m1), c(cor.slope_intercept.Subject = 0.06555124), tolerance = 1e-1 ) if (.runThisTest || Sys.getenv("USER") == "travis") { expect_warning(expect_equal( get_variance(m2), list( var.fixed = 889.3301, var.residual = 941.8135, var.distribution = 941.8135, var.dispersion = 0, var.intercept = c( `mysubgrp:mygrp` = 0, Subject = 1357.4257, mygrp = 24.4064 ) ), tolerance = 1e-1, )) } }) test_that("find_algorithm", { expect_equal( find_algorithm(m1), list(algorithm = "REML", optimizer = "nloptwrap") ) }) test_that("find_random_slopes", { expect_equal(find_random_slopes(m1), list(random = "Days")) expect_null(find_random_slopes(m2)) }) test_that("find_statistic", { expect_identical(find_statistic(m1), "t-statistic") expect_identical(find_statistic(m2), "t-statistic") }) } insight/tests/testthat/test-GLMMadaptive.R0000644000175000017500000002223614122064326020432 0ustar nileshnilesh.runThisTest <- Sys.getenv("RunAllinsightTests") == "yes" if (.runThisTest) { if (requiet("testthat") && requiet("insight") && requiet("GLMMadaptive") && requiet("lme4")) { m <- download_model("GLMMadaptive_zi_2") m2 <- download_model("GLMMadaptive_zi_1") data(cbpp) m3 <- GLMMadaptive::mixed_model( cbind(incidence, size - incidence) ~ period, random = ~ 1 | herd, data = cbpp, family = binomial ) test_that("model_info", { expect_true(model_info(m)$is_zero_inflated) expect_true(model_info(m)$is_count) expect_true(model_info(m)$is_pois) expect_false(model_info(m)$is_negbin) expect_false(model_info(m)$is_linear) }) test_that("get_deviance + logLik", { expect_equal(get_deviance(m3), 183.96674, tolerance = 1e-3) expect_equal(get_loglikelihood(m3), logLik(m3), tolerance = 1e-3, ignore_attr = TRUE) expect_equal(get_df(m3, type = "model"), 5) }) test_that("n_parameters", { expect_equal(n_parameters(m), 6) expect_equal(n_parameters(m2), 6) expect_equal(n_parameters(m, effects = "random"), 2) expect_equal(n_parameters(m2, effects = "random"), 1) }) test_that("find_predictors", { expect_identical( find_predictors(m, effects = "fixed")$conditional, c("child", "camper") ) expect_identical( find_predictors(m, effects = "fixed")$zero_inflated, c("child", "livebait") ) expect_identical( find_predictors(m, effects = "all", flatten = TRUE), c("child", "camper", "persons", "livebait") ) expect_identical( find_predictors(m, effects = "all")$zero_inflated_random, c("persons") ) expect_identical(find_predictors(m, effects = "random")$random, "persons") expect_identical( find_predictors( m, effects = "fixed", component = "cond", flatten = TRUE ), c("child", "camper") ) expect_identical( find_predictors( m, effects = "all", component = "cond", flatten = TRUE ), c("child", "camper", "persons") ) expect_identical( find_predictors(m, effects = "all", component = "cond")$conditional, c("child", "camper") ) expect_identical( find_predictors( m, effects = "random", component = "cond", flatten = TRUE ), "persons" ) expect_identical( find_predictors( m, effects = "fixed", component = "zi", flatten = TRUE ), c("child", "livebait") ) expect_identical( find_predictors( m, effects = "all", component = "zi", flatten = TRUE ), c("child", "livebait", "persons") ) expect_identical( find_predictors( m, effects = "random", component = "zi", flatten = TRUE ), "persons" ) expect_null(find_predictors( m, effects = "fixed", component = "dispersion", flatten = TRUE )) expect_null(find_predictors( m, effects = "all", component = "dispersion", flatten = TRUE )) expect_null(find_predictors( m, effects = "random", component = "dispersion", flatten = TRUE )) }) test_that("find_response", { expect_identical(find_response(m), "count") }) test_that("link_inverse", { expect_identical(link_inverse(m)(.2), exp(.2)) }) test_that("clean_names", { expect_identical( clean_names(m), c("count", "child", "camper", "persons", "livebait") ) }) test_that("find_formula", { expect_length(find_formula(m), 4) expect_identical( names(find_formula(m)), c( "conditional", "random", "zero_inflated", "zero_inflated_random" ), ignore_attr = TRUE ) }) test_that("find_random", { expect_identical( find_random(m), list(random = "persons", zero_inflated_random = "persons") ) expect_identical(find_random(m, flatten = TRUE), "persons") }) test_that("find_respone", { expect_identical(find_response(m), "count") }) test_that("find_terms", { expect_identical( find_terms(m), list( response = "count", conditional = c("child", "camper"), random = "persons", zero_inflated = c("child", "livebait"), zero_inflated_random = "persons" ) ) expect_identical( find_terms(m, flatten = TRUE), c("count", "child", "camper", "persons", "livebait") ) }) test_that("get_response", { expect_identical(get_response(m3), cbpp[, c("incidence", "size")]) }) test_that("get_predictors", { expect_identical( colnames(get_predictors(m)), c("child", "camper", "livebait") ) }) test_that("get_random", { expect_identical(colnames(get_random(m)), "persons") }) test_that("get_data", { expect_identical( colnames(get_data(m)), c("count", "child", "camper", "livebait", "persons") ) expect_identical( colnames(get_data(m, effects = "fixed")), c("count", "child", "camper", "livebait") ) expect_identical(colnames(get_data(m, effects = "random")), "persons") expect_identical( colnames(get_data(m, component = "zi")), c("count", "child", "livebait", "persons") ) expect_identical(colnames(get_data( m, component = "zi", effects = "fixed" )), c("count", "child", "livebait")) expect_identical(colnames(get_data( m, component = "zi", effects = "random" )), "persons") expect_identical( colnames(get_data(m, component = "cond")), c("count", "child", "camper", "persons") ) expect_identical(colnames(get_data( m, component = "cond", effects = "fixed" )), c("count", "child", "camper")) expect_identical(colnames(get_data( m, component = "cond", effects = "random" )), "persons") expect_identical(colnames(get_data(m, component = "dispersion")), "count") expect_null(suppressWarnings(get_data(m, component = "dispersion", effects = "random"))) expect_identical( colnames(get_data(m3)), c("incidence", "size", "period", "herd") ) }) test_that("find_parameter", { expect_equal( find_parameters(m), list( conditional = c("(Intercept)", "child", "camper1"), random = "(Intercept)", zero_inflated = c("(Intercept)", "child", "livebait1"), zero_inflated_random = "zi_(Intercept)" ) ) expect_equal( find_parameters(m2), list( conditional = c("(Intercept)", "child", "camper1"), random = "(Intercept)", zero_inflated = c("(Intercept)", "child", "livebait1") ) ) expect_equal( find_parameters(m3), list( conditional = c("(Intercept)", "period2", "period3", "period4"), random = "(Intercept)" ) ) expect_equal(nrow(get_parameters(m)), 6) expect_equal( get_parameters(m, effects = "random"), list( random = c(-1.0715496, 1.4083630, 1.9129880, 0.2007521), zero_inflated_random = c(-0.1676294, 0.5502481, 1.2592406, 0.9336591) ), tolerance = 1e-5 ) expect_equal(nrow(get_parameters(m2)), 6) expect_equal(get_parameters(m2, effects = "random"), list(random = c( -1.3262364, -0.2048055, 1.3852572, 0.5282277 )), tolerance = 1e-5 ) expect_equal( get_parameters(m3)$Component, c( "conditional", "conditional", "conditional", "conditional" ) ) expect_error(get_parameters(m3, "zi")) }) test_that("linkfun", { expect_false(is.null(link_function(m))) expect_false(is.null(link_function(m2))) }) test_that("is_multivariate", { expect_false(is_multivariate(m)) expect_false(is_multivariate(m2)) }) test_that("find_algorithm", { expect_equal( find_algorithm(m), list(algorithm = "quasi-Newton", optimizer = "optim") ) }) } # these run successfully for devtools::test_file() locally but fail on Travis # not sure what's going on # test_that("find_statistic", { # expect_identical(find_statistic(m1), "z-statistic") # expect_identical(find_statistic(m2), "z-statistic") # expect_identical(find_statistic(m3), "z-statistic") # }) } insight/tests/testthat/test-coxme.R0000644000175000017500000001234214122064326017270 0ustar nileshnileshif (requiet("testthat") && requiet("insight") && requiet("survival") && requiet("lme4") && requiet("nlme") && requiet("bdsmatrix") && requiet("coxme")) { set.seed(1234) lung$inst2 <- sample(1:10, size = nrow(lung), replace = TRUE) lung <- subset(lung, subset = ph.ecog %in% 0:2) lung$ph.ecog <- factor(lung$ph.ecog, labels = c("good", "ok", "limited")) m1 <- coxme(Surv(time, status) ~ ph.ecog + age + (1 | inst), lung) m2 <- coxme(Surv(time, status) ~ ph.ecog + age + (1 | inst) + (1 | inst2), lung) test_that("model_info", { expect_true(model_info(m1)$is_logit) expect_false(model_info(m1)$is_linear) }) test_that("find_predictors", { expect_identical(find_predictors(m1), list(conditional = c("ph.ecog", "age"))) expect_identical( find_predictors(m1, effects = "random"), list(random = "inst") ) expect_identical(find_predictors(m2), list(conditional = c("ph.ecog", "age"))) expect_identical(find_predictors(m2, effects = "random"), list(random = c("inst", "inst2"))) }) test_that("find_response", { expect_identical(find_response(m1), "Surv(time, status)") expect_identical(find_response(m1, combine = FALSE), c("time", "status")) }) test_that("link_inverse", { expect_equal(link_inverse(m1)(.2), plogis(.2), tolerance = 1e-5) expect_equal(link_inverse(m2)(.2), plogis(.2), tolerance = 1e-5) }) test_that("get_data", { expect_equal(nrow(get_data(m1)), 226) expect_equal( colnames(get_data(m1)), c( "time", "status", "Surv(time, status)", "ph.ecog", "age", "inst" ) ) expect_equal( colnames(get_data(m2)), c( "time", "status", "Surv(time, status)", "ph.ecog", "age", "inst", "inst2" ) ) }) test_that("find_formula", { expect_length(find_formula(m1), 2) expect_equal( find_formula(m1), list( conditional = as.formula("Surv(time, status) ~ ph.ecog + age"), random = as.formula("~1 | inst") ), ignore_attr = TRUE ) expect_length(find_formula(m2), 2) expect_equal( find_formula(m2), list( conditional = as.formula("Surv(time, status) ~ ph.ecog + age"), random = list(as.formula("~1 | inst"), as.formula("~1 | inst2")) ), ignore_attr = TRUE ) }) test_that("find_terms", { expect_equal( find_terms(m1), list( response = "Surv(time, status)", conditional = c("ph.ecog", "age"), random = "inst" ) ) expect_equal( find_terms(m1, flatten = TRUE), c("Surv(time, status)", "ph.ecog", "age", "inst") ) expect_equal( find_terms(m2), list( response = "Surv(time, status)", conditional = c("ph.ecog", "age"), random = c("inst", "inst2") ) ) expect_equal( find_terms(m2, flatten = TRUE), c("Surv(time, status)", "ph.ecog", "age", "inst", "inst2") ) }) test_that("find_variables", { expect_equal( find_variables(m1), list( response = c("time", "status"), conditional = c("ph.ecog", "age"), random = "inst" ) ) expect_equal( find_variables(m1, flatten = TRUE), c("time", "status", "ph.ecog", "age", "inst") ) expect_equal( find_variables(m2), list( response = c("time", "status"), conditional = c("ph.ecog", "age"), random = c("inst", "inst2") ) ) expect_equal( find_variables(m2, flatten = TRUE), c("time", "status", "ph.ecog", "age", "inst", "inst2") ) }) test_that("n_obs", { expect_equal(n_obs(m1), 225) expect_equal(n_obs(m2), 225) }) test_that("get_response", { expect_equal(colnames(get_response(m1)), c("time", "status")) expect_equal(nrow(get_response(m1)), 226) expect_equal(colnames(get_response(m1)), c("time", "status")) expect_equal(nrow(get_response(m2)), 226) }) test_that("linkfun", { expect_false(is.null(link_function(m1))) expect_false(is.null(link_function(m2))) }) test_that("is_multivariate", { expect_false(is_multivariate(m1)) }) test_that("find_parameters", { expect_equal( find_parameters(m1), list( conditional = c("ph.ecogok", "ph.ecoglimited", "age"), random = "inst" ) ) expect_equal( find_parameters(m2), list( conditional = c("ph.ecogok", "ph.ecoglimited", "age"), random = c("inst", "inst2") ) ) expect_equal(nrow(get_parameters(m1)), 3) expect_equal( get_parameters(m1)$Parameter, c("ph.ecogok", "ph.ecoglimited", "age") ) expect_equal(nrow(get_parameters(m2)), 3) expect_equal( get_parameters(m2)$Parameter, c("ph.ecogok", "ph.ecoglimited", "age") ) expect_length(get_parameters(m2, effects = "random"), 2) }) test_that("find_statistic", { expect_identical(find_statistic(m1), "z-statistic") expect_identical(find_statistic(m2), "z-statistic") }) } insight/tests/testthat/test-nlmer.R0000644000175000017500000000074214122064326017273 0ustar nileshnileshif (requiet("testthat") && requiet("insight") && requiet("lme4")) { set.seed(123) startvec <- c(Asym = 200, xmid = 725, scal = 350) nm1 <- lme4::nlmer( formula = circumference ~ SSlogis(age, Asym, xmid, scal) ~ Asym | Tree, data = Orange, start = startvec ) test_that("model_info", { expect_true(model_info(nm1)$is_linear) }) test_that("find_statistic", { expect_identical(find_statistic(nm1), "t-statistic") }) } insight/tests/testthat/test-vgam.R0000644000175000017500000001346414122064326017115 0ustar nileshnilesh.runThisTest <- Sys.getenv("RunAllinsightTests") == "yes" if (.runThisTest && Sys.getenv("USER") != "travis") { if (requiet("testthat") && requiet("insight") && requiet("VGAM")) { data("hunua") m1 <- download_model("vgam_1") m2 <- download_model("vgam_2") test_that("model_info", { expect_true(model_info(m1)$is_binomial) expect_true(model_info(m2)$is_binomial) expect_false(model_info(m1)$is_bayesian) expect_false(model_info(m2)$is_bayesian) }) test_that("find_predictors", { expect_identical(find_predictors(m1), list(conditional = c("vitluc", "altitude"))) expect_identical( find_predictors(m1, flatten = TRUE), c("vitluc", "altitude") ) expect_null(find_predictors(m1, effects = "random")) expect_identical(find_predictors(m2), list(conditional = c("vitluc", "altitude"))) expect_identical( find_predictors(m2, flatten = TRUE), c("vitluc", "altitude") ) expect_null(find_predictors(m2, effects = "random")) }) test_that("find_random", { expect_null(find_random(m1)) expect_null(find_random(m2)) }) test_that("get_random", { expect_warning(get_random(m1)) expect_warning(get_random(m2)) }) test_that("find_response", { expect_identical(find_response(m1), "agaaus") expect_identical(find_response(m2), "cbind(agaaus, kniexc)") expect_identical(find_response(m2, combine = FALSE), c("agaaus", "kniexc")) }) test_that("get_response", { expect_equal(get_response(m1), hunua$agaaus) expect_equal( get_response(m2), data.frame(agaaus = hunua$agaaus, kniexc = hunua$kniexc) ) }) test_that("get_predictors", { expect_equal(colnames(get_predictors(m1)), c("vitluc", "altitude")) expect_equal(colnames(get_predictors(m2)), c("vitluc", "altitude")) }) test_that("link_inverse", { expect_equal(link_inverse(m1)(.2), plogis(.2), tolerance = 1e-5) expect_equal(link_inverse(m2)(.2), plogis(.2), tolerance = 1e-5) }) test_that("get_data", { expect_equal(nrow(get_data(m1)), 392) expect_equal(nrow(get_data(m2)), 392) expect_equal(colnames(get_data(m1)), c("agaaus", "vitluc", "altitude")) expect_equal( colnames(get_data(m2)), c("agaaus", "kniexc", "vitluc", "altitude") ) }) test_that("find_formula", { expect_length(find_formula(m1), 1) expect_equal( find_formula(m1), list(conditional = as.formula("agaaus ~ vitluc + s(altitude, df = 2)")), ignore_attr = TRUE ) expect_length(find_formula(m2), 1) expect_equal( find_formula(m2), list( conditional = as.formula("cbind(agaaus, kniexc) ~ vitluc + s(altitude, df = c(2, 3))") ), ignore_attr = TRUE ) }) test_that("find_terms", { expect_equal( find_terms(m1), list( response = "agaaus", conditional = c("vitluc", "s(altitude, df = 2)") ) ) expect_equal( find_terms(m1, flatten = TRUE), c("agaaus", "vitluc", "s(altitude, df = 2)") ) expect_equal( find_terms(m2), list( response = "cbind(agaaus, kniexc)", conditional = c("vitluc", "s(altitude, df = c(2, 3))") ) ) expect_equal( find_terms(m2, flatten = TRUE), c( "cbind(agaaus, kniexc)", "vitluc", "s(altitude, df = c(2, 3))" ) ) }) test_that("find_variables", { expect_equal( find_variables(m1), list( response = "agaaus", conditional = c("vitluc", "altitude") ) ) expect_equal( find_variables(m1, flatten = TRUE), c("agaaus", "vitluc", "altitude") ) expect_equal(find_variables(m2), list( response = c("agaaus", "kniexc"), conditional = c("vitluc", "altitude") )) expect_equal( find_variables(m2, flatten = TRUE), c("agaaus", "kniexc", "vitluc", "altitude") ) }) test_that("n_obs", { expect_equal(n_obs(m1), 392) expect_equal(n_obs(m2), 392) }) test_that("linkfun", { expect_false(is.null(link_function(m1))) expect_false(is.null(link_function(m2))) }) test_that("find_parameters", { expect_equal( find_parameters(m1), list( conditional = c("(Intercept)", "vitluc"), smooth_terms = "s(altitude, df = 2)" ) ) expect_equal(nrow(get_parameters(m1)), 3) expect_equal( get_parameters(m1)$Parameter, c("(Intercept)", "vitluc", "s(altitude, df = 2)") ) expect_equal( find_parameters(m2), list( conditional = c( "(Intercept):1", "(Intercept):2", "vitluc:1", "vitluc:2" ), smooth_terms = c("s(altitude, df = c(2, 3)):1", "s(altitude, df = c(2, 3)):2") ) ) expect_equal(nrow(get_parameters(m2)), 6) expect_equal( get_parameters(m2)$Parameter, c( "(Intercept):1", "(Intercept):2", "vitluc:1", "vitluc:2", "s(altitude, df = c(2, 3)):1", "s(altitude, df = c(2, 3)):2" ) ) }) test_that("is_multivariate", { expect_false(is_multivariate(m1)) expect_false(is_multivariate(m2)) }) test_that("find_statistic", { expect_identical(find_statistic(m1), "chi-squared statistic") expect_identical(find_statistic(m2), "chi-squared statistic") }) } } insight/tests/testthat/test-lmer.R0000644000175000017500000002775014122064326017125 0ustar nileshnilesh.runThisTest <- Sys.getenv("RunAllinsightTests") == "yes" if (.runThisTest && requiet("testthat") && requiet("insight") && requiet("lme4")) { data(sleepstudy) set.seed(123) sleepstudy$mygrp <- sample(1:5, size = 180, replace = TRUE) sleepstudy$mysubgrp <- NA for (i in 1:5) { filter_group <- sleepstudy$mygrp == i sleepstudy$mysubgrp[filter_group] <- sample(1:30, size = sum(filter_group), replace = TRUE) } m1 <- lme4::lmer(Reaction ~ Days + (1 + Days | Subject), data = sleepstudy ) m2 <- lme4::lmer(Reaction ~ Days + (1 | mygrp / mysubgrp) + (1 | Subject), data = sleepstudy ) test_that("model_info", { expect_true(model_info(m1)$is_linear) expect_true(model_info(m2)$is_linear) }) test_that("loglik", { expect_equal(get_loglikelihood(m1), logLik(m1), ignore_attr = TRUE) expect_equal(get_loglikelihood(m2), logLik(m2), ignore_attr = TRUE) }) test_that("get_df", { expect_equal(get_df(m1), df.residual(m1), ignore_attr = TRUE) expect_equal(get_df(m2), df.residual(m2), ignore_attr = TRUE) expect_equal(get_df(m1, type = "model"), attr(logLik(m1), "df"), ignore_attr = TRUE) expect_equal(get_df(m2, type = "model"), attr(logLik(m2), "df"), ignore_attr = TRUE) }) test_that("n_parameters", { expect_equal(n_parameters(m1), 2) expect_equal(n_parameters(m2), 2) expect_equal(n_parameters(m1, effects = "random"), 2) expect_equal(n_parameters(m2, effects = "random"), 3) }) test_that("find_predictors", { expect_equal( find_predictors(m1, effects = "all"), list(conditional = "Days", random = "Subject") ) expect_equal( find_predictors(m1, effects = "all", flatten = TRUE), c("Days", "Subject") ) expect_equal( find_predictors(m1, effects = "fixed"), list(conditional = "Days") ) expect_equal( find_predictors(m1, effects = "fixed", flatten = TRUE), "Days" ) expect_equal( find_predictors(m1, effects = "random"), list(random = "Subject") ) expect_equal( find_predictors(m1, effects = "random", flatten = TRUE), "Subject" ) expect_equal( find_predictors(m2, effects = "all"), list( conditional = "Days", random = c("mysubgrp", "mygrp", "Subject") ) ) expect_equal( find_predictors(m2, effects = "all", flatten = TRUE), c("Days", "mysubgrp", "mygrp", "Subject") ) expect_equal( find_predictors(m2, effects = "fixed"), list(conditional = "Days") ) expect_equal(find_predictors(m2, effects = "random"), list(random = c("mysubgrp", "mygrp", "Subject"))) expect_null(find_predictors(m2, effects = "all", component = "zi")) expect_null(find_predictors(m2, effects = "fixed", component = "zi")) expect_null(find_predictors(m2, effects = "random", component = "zi")) }) test_that("find_random", { expect_equal(find_random(m1), list(random = "Subject")) expect_equal(find_random(m1, flatten = TRUE), "Subject") expect_equal(find_random(m2), list(random = c("mysubgrp:mygrp", "mygrp", "Subject"))) expect_equal(find_random(m2, split_nested = TRUE), list(random = c("mysubgrp", "mygrp", "Subject"))) expect_equal( find_random(m2, flatten = TRUE), c("mysubgrp:mygrp", "mygrp", "Subject") ) expect_equal( find_random(m2, split_nested = TRUE, flatten = TRUE), c("mysubgrp", "mygrp", "Subject") ) }) test_that("find_response", { expect_identical(find_response(m1), "Reaction") expect_identical(find_response(m2), "Reaction") }) test_that("get_response", { expect_equal(get_response(m1), sleepstudy$Reaction) }) test_that("link_inverse", { expect_identical(link_inverse(m1)(.2), .2) expect_identical(link_inverse(m2)(.2), .2) }) test_that("get_data", { expect_equal(colnames(get_data(m1)), c("Reaction", "Days", "Subject")) expect_equal(colnames(get_data(m1, effects = "all")), c("Reaction", "Days", "Subject")) expect_equal(colnames(get_data(m1, effects = "random")), "Subject") expect_equal( colnames(get_data(m2)), c("Reaction", "Days", "mygrp", "mysubgrp", "Subject") ) expect_equal( colnames(get_data(m2, effects = "all")), c("Reaction", "Days", "mygrp", "mysubgrp", "Subject") ) expect_equal(colnames(get_data(m2, effects = "random")), c("mysubgrp", "mygrp", "Subject")) }) test_that("find_formula", { expect_length(find_formula(m1), 2) expect_length(find_formula(m2), 2) expect_equal( find_formula(m1, component = "conditional"), list( conditional = as.formula("Reaction ~ Days"), random = as.formula("~1 + Days | Subject") ), ignore_attr = TRUE ) expect_equal( find_formula(m2, component = "conditional"), list( conditional = as.formula("Reaction ~ Days"), random = list( as.formula("~1 | mysubgrp:mygrp"), as.formula("~1 | mygrp"), as.formula("~1 | Subject") ) ), ignore_attr = TRUE ) }) test_that("find_terms", { expect_identical( find_terms(m1), list( response = "Reaction", conditional = "Days", random = c("Days", "Subject") ) ) expect_identical( find_terms(m1, flatten = TRUE), c("Reaction", "Days", "Subject") ) expect_identical( find_terms(m2), list( response = "Reaction", conditional = "Days", random = c("mysubgrp", "mygrp", "Subject") ) ) expect_identical( find_terms(m2, flatten = TRUE), c("Reaction", "Days", "mysubgrp", "mygrp", "Subject") ) }) test_that("find_variables", { expect_identical( find_variables(m1), list( response = "Reaction", conditional = "Days", random = "Subject" ) ) expect_identical( find_variables(m1, flatten = TRUE), c("Reaction", "Days", "Subject") ) expect_identical( find_variables(m2), list( response = "Reaction", conditional = "Days", random = c("mysubgrp", "mygrp", "Subject") ) ) expect_identical( find_variables(m2, flatten = TRUE), c("Reaction", "Days", "mysubgrp", "mygrp", "Subject") ) }) test_that("get_response", { expect_identical(get_response(m1), sleepstudy$Reaction) }) test_that("get_predictors", { expect_identical(colnames(get_predictors(m1)), "Days") expect_identical(colnames(get_predictors(m2)), "Days") }) test_that("get_random", { expect_identical(colnames(get_random(m1)), "Subject") expect_identical(colnames(get_random(m2)), c("mysubgrp", "mygrp", "Subject")) }) test_that("clean_names", { expect_identical(clean_names(m1), c("Reaction", "Days", "Subject")) expect_identical( clean_names(m2), c("Reaction", "Days", "mysubgrp", "mygrp", "Subject") ) }) test_that("linkfun", { expect_false(is.null(link_function(m1))) expect_false(is.null(link_function(m2))) }) test_that("find_parameters", { expect_equal( find_parameters(m1), list( conditional = c("(Intercept)", "Days"), random = list(Subject = c("(Intercept)", "Days")) ) ) expect_equal(nrow(get_parameters(m1)), 2) expect_equal(get_parameters(m1)$Parameter, c("(Intercept)", "Days")) expect_equal( find_parameters(m2), list( conditional = c("(Intercept)", "Days"), random = list( `mysubgrp:mygrp` = "(Intercept)", Subject = "(Intercept)", mygrp = "(Intercept)" ) ) ) expect_equal(nrow(get_parameters(m2)), 2) expect_equal(get_parameters(m2)$Parameter, c("(Intercept)", "Days")) expect_equal( names(get_parameters(m2, effects = "random")), c("mysubgrp:mygrp", "Subject", "mygrp") ) }) test_that("is_multivariate", { expect_false(is_multivariate(m1)) expect_false(is_multivariate(m2)) }) test_that("get_variance", { expect_equal( get_variance(m1), list( var.fixed = 908.9534, var.random = 1698.084, var.residual = 654.94, var.distribution = 654.94, var.dispersion = 0, var.intercept = c(Subject = 612.1002), var.slope = c(Subject.Days = 35.07171), cor.slope_intercept = c(Subject = 0.06555124) ), tolerance = 1e-1 ) expect_equal(get_variance_fixed(m1), c(var.fixed = 908.9534), tolerance = 1e-1 ) expect_equal(get_variance_random(m1), c(var.random = 1698.084), tolerance = 1e-1 ) expect_equal( get_variance_residual(m1), c(var.residual = 654.94), tolerance = 1e-1 ) expect_equal( get_variance_distribution(m1), c(var.distribution = 654.94), tolerance = 1e-1 ) expect_equal(get_variance_dispersion(m1), c(var.dispersion = 0), tolerance = 1e-1 ) expect_equal( get_variance_intercept(m1), c(var.intercept.Subject = 612.1002), tolerance = 1e-1 ) expect_equal( get_variance_slope(m1), c(var.slope.Subject.Days = 35.07171), tolerance = 1e-1 ) expect_equal( get_correlation_slope_intercept(m1), c(cor.slope_intercept.Subject = 0.06555124), tolerance = 1e-1 ) if (.runThisTest) { expect_equal( suppressWarnings(get_variance(m2)), list( var.fixed = 889.3301, var.residual = 941.8135, var.distribution = 941.8135, var.dispersion = 0, var.intercept = c( `mysubgrp:mygrp` = 0, Subject = 1357.4257, mygrp = 24.4064 ) ), tolerance = 1e-1 ) } }) test_that("find_algorithm", { expect_equal( find_algorithm(m1), list(algorithm = "REML", optimizer = "nloptwrap") ) }) test_that("find_random_slopes", { expect_equal(find_random_slopes(m1), list(random = "Days")) expect_null(find_random_slopes(m2)) }) m3 <- lme4::lmer(Reaction ~ (1 + Days | Subject), data = sleepstudy ) m4 <- lme4::lmer(Reaction ~ (1 | mygrp / mysubgrp) + (1 | Subject), data = sleepstudy ) m5 <- lme4::lmer(Reaction ~ 1 + (1 + Days | Subject), data = sleepstudy ) m6 <- lme4::lmer(Reaction ~ 1 + (1 | mygrp / mysubgrp) + (1 | Subject), data = sleepstudy ) test_that("find_formula", { expect_equal( find_formula(m3), list( conditional = as.formula("Reaction ~ 1"), random = as.formula("~1 + Days | Subject") ), ignore_attr = TRUE ) expect_equal( find_formula(m5), list( conditional = as.formula("Reaction ~ 1"), random = as.formula("~1 + Days | Subject") ), ignore_attr = TRUE ) expect_equal( find_formula(m4), list( conditional = as.formula("Reaction ~ 1"), random = list( as.formula("~1 | mysubgrp:mygrp"), as.formula("~1 | mygrp"), as.formula("~1 | Subject") ) ), ignore_attr = TRUE ) expect_equal( find_formula(m6), list( conditional = as.formula("Reaction ~ 1"), random = list( as.formula("~1 | mysubgrp:mygrp"), as.formula("~1 | mygrp"), as.formula("~1 | Subject") ) ), ignore_attr = TRUE ) }) test_that("find_statistic", { expect_identical(find_statistic(m1), "t-statistic") expect_identical(find_statistic(m2), "t-statistic") }) test_that("get_call", { expect_equal(class(get_call(m1)), "call") expect_equal(class(get_call(m2)), "call") }) } insight/tests/testthat/test-r3_4.R0000644000175000017500000000041214122064326016717 0ustar nileshnilesh.runThisTest <- Sys.getenv("RunAllinsightTests") == "yes" if (.runThisTest && requiet("testthat") && requiet("insight")) { data(mtcars) m <- glm(am ~ mpg, mtcars, family = binomial()) test_that("find_random", { expect_null(find_random(m)) }) } insight/tests/testthat/test-backticks.R0000644000175000017500000000446314122064326020120 0ustar nileshnileshif (requiet("testthat") && requiet("insight")) { data(iris) iris$`a m` <- iris$Species iris$`Sepal Width` <- iris$Sepal.Width m <- lm(`Sepal Width` ~ Petal.Length + `a m` * log(Sepal.Length), data = iris) test_that("backticks", { expect_equal( find_parameters(m), list(conditional = c( "(Intercept)", "Petal.Length", "a mversicolor", "a mvirginica", "log(Sepal.Length)", "a mversicolor:log(Sepal.Length)", "a mvirginica:log(Sepal.Length)" )) ) expect_equal( get_parameters(m)$Parameter, c( "(Intercept)", "Petal.Length", "a mversicolor", "a mvirginica", "log(Sepal.Length)", "a mversicolor:log(Sepal.Length)", "a mvirginica:log(Sepal.Length)" ) ) expect_equal( get_statistic(m)$Parameter, c( "(Intercept)", "Petal.Length", "a mversicolor", "a mvirginica", "log(Sepal.Length)", "a mversicolor:log(Sepal.Length)", "a mvirginica:log(Sepal.Length)" ) ) expect_equal( clean_parameters(m)$Parameter, c( "(Intercept)", "Petal.Length", "a mversicolor", "a mvirginica", "log(Sepal.Length)", "a mversicolor:log(Sepal.Length)", "a mvirginica:log(Sepal.Length)" ) ) expect_equal( find_predictors(m), list(conditional = c("Petal.Length", "a m", "Sepal.Length")) ) expect_equal( colnames(get_predictors(m)), c("Petal.Length", "a m", "Sepal.Length") ) expect_equal( find_variables(m), list( response = "Sepal Width", conditional = c("Petal.Length", "a m", "Sepal.Length") ) ) expect_equal( find_terms(m), list( response = "Sepal Width", conditional = c("Petal.Length", "a m", "log(Sepal.Length)") ) ) expect_equal( rownames(get_varcov(m)), c( "(Intercept)", "Petal.Length", "a mversicolor", "a mvirginica", "log(Sepal.Length)", "a mversicolor:log(Sepal.Length)", "a mvirginica:log(Sepal.Length)" ) ) expect_equal( clean_names(m), c("Sepal Width", "Petal.Length", "a m", "Sepal.Length") ) expect_equal(find_response(m), "Sepal Width") expect_equal(get_response(m), iris[["Sepal Width"]]) }) } insight/tests/testthat/test-survey.R0000644000175000017500000000414114122064326017510 0ustar nileshnileshif (requiet("testthat") && requiet("insight") && requiet("survey")) { data(api) dstrat <- svydesign( id = ~1, strata = ~stype, weights = ~pw, data = apistrat, fpc = ~fpc ) m1 <- svyglm(api00 ~ ell + meals + mobility, design = dstrat) test_that("model_info", { expect_true(model_info(m1)$is_linear) }) test_that("find_predictors", { expect_identical(find_predictors(m1), list(conditional = c("ell", "meals", "mobility"))) expect_null(find_predictors(m1, effects = "random")) }) test_that("find_response", { expect_identical(find_response(m1), "api00") }) test_that("get_response", { expect_equal(get_response(m1), apistrat$api00) }) test_that("link_inverse", { expect_equal(link_inverse(m1)(.2), .2, tolerance = 1e-5) }) test_that("get_data", { expect_equal(nrow(get_data(m1)), 200) expect_equal( colnames(get_data(m1)), c("api00", "ell", "meals", "mobility", "(weights)") ) }) test_that("find_formula", { expect_length(find_formula(m1), 1) expect_equal( find_formula(m1), list(conditional = as.formula("api00 ~ ell + meals + mobility")), ignore_attr = TRUE ) }) test_that("find_terms", { expect_equal(find_terms(m1), list( response = "api00", conditional = c("ell", "meals", "mobility") )) expect_equal( find_terms(m1, flatten = TRUE), c("api00", "ell", "meals", "mobility") ) }) test_that("n_obs", { expect_equal(n_obs(m1), 200) }) test_that("linkfun", { expect_false(is.null(link_function(m1))) }) test_that("find_parameters", { expect_equal( find_parameters(m1), list(conditional = c( "(Intercept)", "ell", "meals", "mobility" )) ) expect_equal(nrow(get_parameters(m1)), 4) expect_equal( get_parameters(m1)$Parameter, c("(Intercept)", "ell", "meals", "mobility") ) }) test_that("find_statistic", { expect_identical(find_statistic(m1), "t-statistic") }) } insight/tests/testthat/test-tidymodels.R0000644000175000017500000000760714122064326020342 0ustar nileshnilesh.runThisTest <- Sys.getenv("RunAllinsightTests") == "yes" if (.runThisTest && requiet("testthat") && requiet("insight") && requiet("parsnip")) { data(mtcars) m <- parsnip::linear_reg() m <- parsnip::set_engine(m, "lm") m <- parsnip::set_mode(m, "regression") m <- parsnip::fit(m, mpg ~ am + vs, data = mtcars) test_that("find_formula", { expect_equal( find_formula(m), list(conditional = as.formula("mpg ~ am + vs")), ignore_attr = TRUE ) }) # test_that("model_info", { # expect_true(model_info(m1)$is_poisson) # expect_true(model_info(m1)$is_count) # expect_false(model_info(m1)$is_negbin) # expect_false(model_info(m1)$is_binomial) # expect_false(model_info(m1)$is_linear) # }) # # test_that("loglik", { # expect_equal(get_loglikelihood(m1), logLik(m1), ignore_attr = TRUE) # }) # # test_that("get_df", { # expect_equal(get_df(m1), df.residual(m1), ignore_attr = TRUE) # expect_equal(get_df(m1, type = "model"), attr(logLik(m1), "df"), ignore_attr = TRUE) # }) # # # test_that("find_predictors", { # expect_identical(find_predictors(m1), list(conditional = c("mined", "cover", "sample"))) # expect_identical( # find_predictors(m1, flatten = TRUE), # c("mined", "cover", "sample") # ) # expect_null(find_predictors(m1, effects = "random")) # }) # # test_that("find_random", { # expect_null(find_random(m1)) # }) # # test_that("get_random", { # expect_warning(get_random(m1)) # }) # # test_that("find_response", { # expect_identical(find_response(m1), "count") # }) # # test_that("get_response", { # expect_equal(get_response(m1), Salamanders$count) # }) # # test_that("get_predictors", { # expect_equal(colnames(get_predictors(m1)), c("mined", "cover", "sample")) # }) # # test_that("link_inverse", { # expect_equal(link_inverse(m1)(.2), exp(.2), tolerance = 1e-5) # }) # # test_that("linkfun", { # expect_equal(link_function(m1)(.2), -1.609438, tolerance = 1e-4) # }) # # test_that("get_data", { # expect_equal(nrow(get_data(m1)), 644) # expect_equal( # colnames(get_data(m1)), # c("count", "mined", "cover", "sample") # ) # }) # # test_that("get_call", { # expect_equal(class(get_call(m1)), "call") # }) # # # # test_that("find_variables", { # expect_equal( # find_variables(m1), # list( # response = "count", # conditional = c("mined", "cover", "sample") # ) # ) # expect_equal( # find_variables(m1, flatten = TRUE), # c("count", "mined", "cover", "sample") # ) # }) # # test_that("n_obs", { # expect_equal(n_obs(m1), 644) # }) # # test_that("find_parameters", { # expect_equal( # find_parameters(m1), # list( # conditional = c("(Intercept)", "minedno", "log(cover)", "sample") # ) # ) # expect_equal(nrow(get_parameters(m1)), 4) # expect_equal( # get_parameters(m1)$Parameter, # c("(Intercept)", "minedno", "log(cover)", "sample") # ) # }) # # test_that("is_multivariate", { # expect_false(is_multivariate(m1)) # }) # # test_that("find_terms", { # expect_equal( # find_terms(m1), # list( # response = "count", # conditional = c("mined", "log(cover)", "sample") # ) # ) # }) # # test_that("find_algorithm", { # expect_equal(find_algorithm(m1), list(algorithm = "ML")) # }) # # test_that("find_statistic", { # expect_identical(find_statistic(m1), "z-statistic") # }) # # test_that("get_statistic", { # expect_equal(get_statistic(m1)$Statistic, c(-10.7066515607315, 18.1533878215937, -1.68918157150882, 2.23541768590273), tolerance = 1e-4) # }) } insight/tests/testthat/test-zeroinfl.R0000644000175000017500000000774614122064326020021 0ustar nileshnileshif (requiet("testthat") && requiet("insight") && requiet("pscl")) { data("bioChemists") m1 <- zeroinfl(art ~ fem + mar + kid5 + ment | kid5 + phd, data = bioChemists) test_that("model_info", { expect_true(model_info(m1)$is_poisson) expect_true(model_info(m1)$is_zero_inflated) expect_false(model_info(m1)$is_linear) }) test_that("n_parameters", { expect_equal(n_parameters(m1), 8) expect_equal(n_parameters(m1, component = "conditional"), 5) }) test_that("find_predictors", { expect_identical( find_predictors(m1), list( conditional = c("fem", "mar", "kid5", "ment"), zero_inflated = c("kid5", "phd") ) ) expect_identical( find_predictors(m1, flatten = TRUE), c("fem", "mar", "kid5", "ment", "phd") ) expect_null(find_predictors(m1, effects = "random")) }) test_that("find_response", { expect_identical(find_response(m1), "art") }) test_that("link_inverse", { expect_equal(link_inverse(m1)(.2), exp(.2), tolerance = 1e-5) }) test_that("get_data", { expect_equal(nrow(get_data(m1)), 915) expect_equal( colnames(get_data(m1)), c("art", "fem", "mar", "kid5", "ment", "phd") ) }) test_that("find_formula", { expect_length(find_formula(m1), 2) expect_equal( find_formula(m1), list( conditional = as.formula("art ~ fem + mar + kid5 + ment"), zero_inflated = as.formula("~kid5 + phd") ), ignore_attr = TRUE ) }) test_that("find_terms", { expect_equal( find_terms(m1), list( response = "art", conditional = c("fem", "mar", "kid5", "ment"), zero_inflated = c("kid5", "phd") ) ) expect_equal( find_terms(m1, flatten = TRUE), c("art", "fem", "mar", "kid5", "ment", "phd") ) }) test_that("n_obs", { expect_equal(n_obs(m1), 915) }) test_that("linkfun", { expect_false(is.null(link_function(m1))) }) test_that("find_parameters", { expect_equal( find_parameters(m1), list( conditional = c( "count_(Intercept)", "count_femWomen", "count_marMarried", "count_kid5", "count_ment" ), zero_inflated = c("zero_(Intercept)", "zero_kid5", "zero_phd") ) ) expect_equal(nrow(get_parameters(m1)), 8) expect_equal(nrow(get_parameters(m1, component = "zi")), 3) expect_equal( get_parameters(m1)$Parameter, c( "count_(Intercept)", "count_femWomen", "count_marMarried", "count_kid5", "count_ment", "zero_(Intercept)", "zero_kid5", "zero_phd" ) ) }) test_that("find_statistic", { expect_identical(find_statistic(m1), "z-statistic") }) test_that("get_statistic", { expect_equal( get_statistic(m1)$Statistic, c(8.26297, -3.90986, 2.07134, -3.43156, 10.05389, -2.143, 0.21384, -1.84259), tolerance = 1e-3 ) expect_equal( get_statistic(m1)$Component, c( "conditional", "conditional", "conditional", "conditional", "conditional", "zero_inflated", "zero_inflated", "zero_inflated" ), tolerance = 1e-3 ) }) m2 <- zeroinfl(formula = art ~ . | 1, data = bioChemists, dist = "negbin") .runThisTest <- Sys.getenv("RunAllinsightTests") == "yes" if (.runThisTest || Sys.getenv("USER") == "travis") { test_that("get_statistic", { expect_equal( get_statistic(m2)$Statistic, c(1.84902, -2.97806, 1.83266, -3.32478, 0.42324, 8.38088, -0.14579), tolerance = 1e-3 ) expect_equal( get_statistic(m2)$Component, c( "conditional", "conditional", "conditional", "conditional", "conditional", "conditional", "zero_inflated" ), tolerance = 1e-3 ) }) } } insight/tests/testthat/test-fixest.R0000644000175000017500000001464214131014371017457 0ustar nileshnileshskip_on_os("mac") skip_if(getRversion() < "3.6.0") skip_if_not_installed("fixest") requiet("fixest") data(trade) m1 <- femlm(Euros ~ log(dist_km) | Origin + Destination + Product, data = trade) m2 <- femlm(log1p(Euros) ~ log(dist_km) | Origin + Destination + Product, data = trade, family = "gaussian") m3 <- feglm(Euros ~ log(dist_km) | Origin + Destination + Product, data = trade, family = "poisson") test_that("model_info", { expect_true(model_info(m1)$is_count) expect_true(model_info(m2)$is_linear) expect_true(model_info(m3)$is_count) }) test_that("find_predictors", { expect_identical(find_predictors(m1), list(conditional = "dist_km", cluster = c("Origin", "Destination", "Product"))) expect_identical(find_predictors(m2), list(conditional = "dist_km", cluster = c("Origin", "Destination", "Product"))) expect_identical(find_predictors(m3), list(conditional = "dist_km", cluster = c("Origin", "Destination", "Product"))) expect_identical( find_predictors(m1, component = "all"), list(conditional = "dist_km", cluster = c("Origin", "Destination", "Product")) ) expect_identical( find_predictors(m2, component = "all"), list(conditional = "dist_km", cluster = c("Origin", "Destination", "Product")) ) expect_identical( find_predictors(m3, component = "all"), list(conditional = "dist_km", cluster = c("Origin", "Destination", "Product")) ) }) test_that("find_random", { expect_null(find_random(m1)) expect_null(find_random(m2)) expect_null(find_random(m3)) }) test_that("get_random", { expect_warning(expect_null(get_random(m1))) }) test_that("find_response", { expect_identical(find_response(m1), "Euros") expect_identical(find_response(m2), "Euros") expect_identical(find_response(m3), "Euros") }) test_that("get_response", { expect_equal(get_response(m1), trade$Euros) expect_equal(get_response(m2), trade$Euros) expect_equal(get_response(m3), trade$Euros) }) test_that("get_predictors", { expect_equal(colnames(get_predictors(m1)), c("dist_km", "Origin", "Destination", "Product")) expect_equal(colnames(get_predictors(m2)), c("dist_km", "Origin", "Destination", "Product")) expect_equal(colnames(get_predictors(m3)), c("dist_km", "Origin", "Destination", "Product")) }) test_that("link_inverse", { expect_equal(link_inverse(m1)(.2), exp(.2), tolerance = 1e-4) expect_equal(link_inverse(m2)(.2), .2, tolerance = 1e-4) expect_equal(link_inverse(m3)(.2), exp(.2), tolerance = 1e-4) }) test_that("link_function", { expect_equal(link_function(m1)(.2), log(.2), tolerance = 1e-4) expect_equal(link_function(m2)(.2), .2, tolerance = 1e-4) expect_equal(link_function(m3)(.2), log(.2), tolerance = 1e-4) }) test_that("get_data", { expect_equal(nrow(get_data(m1)), 38325) expect_equal(colnames(get_data(m1)), c("Euros", "dist_km", "Origin", "Destination", "Product")) expect_equal(nrow(get_data(m2)), 38325) expect_equal(colnames(get_data(m2)), c("Euros", "dist_km", "Origin", "Destination", "Product")) }) test_that("find_formula", { expect_length(find_formula(m1), 2) expect_equal( find_formula(m1), list( conditional = as.formula("Euros ~ log(dist_km)"), cluster = as.formula("~Origin + Destination + Product") ), ignore_attr = TRUE ) expect_length(find_formula(m2), 2) expect_equal( find_formula(m2), list( conditional = as.formula("log1p(Euros) ~ log(dist_km)"), cluster = as.formula("~Origin + Destination + Product") ), ignore_attr = TRUE ) }) test_that("find_terms", { expect_equal( find_terms(m1), list(response = "Euros", conditional = "log(dist_km)", cluster = c("Origin", "Destination", "Product")) ) expect_equal( find_terms(m1, flatten = TRUE), c("Euros", "log(dist_km)", "Origin", "Destination", "Product") ) expect_equal( find_terms(m2), list(response = "log1p(Euros)", conditional = "log(dist_km)", cluster = c("Origin", "Destination", "Product")) ) expect_equal( find_terms(m2, flatten = TRUE), c("log1p(Euros)", "log(dist_km)", "Origin", "Destination", "Product") ) }) test_that("find_variables", { expect_equal( find_variables(m1), list(response = "Euros", conditional = "dist_km", cluster = c("Origin", "Destination", "Product")) ) expect_equal( find_variables(m1, flatten = TRUE), c("Euros", "dist_km", "Origin", "Destination", "Product") ) expect_equal( find_variables(m2), list(response = "Euros", conditional = "dist_km", cluster = c("Origin", "Destination", "Product")) ) expect_equal( find_variables(m1, flatten = TRUE), c("Euros", "dist_km", "Origin", "Destination", "Product") ) }) test_that("n_obs", { expect_equal(n_obs(m1), 38325) expect_equal(n_obs(m2), 38325) }) test_that("find_parameters", { expect_equal( find_parameters(m1), list(conditional = "log(dist_km)") ) expect_equal( get_parameters(m1), data.frame( Parameter = "log(dist_km)", Estimate = -1.52774702640008, row.names = NULL, stringsAsFactors = FALSE ), tolerance = 1e-4 ) expect_equal( find_parameters(m2), list(conditional = "log(dist_km)") ) expect_equal( get_parameters(m2), data.frame( Parameter = "log(dist_km)", Estimate = -2.16843021944503, row.names = NULL, stringsAsFactors = FALSE ), tolerance = 1e-4 ) }) test_that("is_multivariate", { expect_false(is_multivariate(m1)) }) test_that("find_statistic", { expect_identical(find_statistic(m1), "z-statistic") expect_identical(find_statistic(m2), "t-statistic") }) test_that("get_statistic", { stat <- get_statistic(m1) expect_equal(stat$Statistic, -13.212695, tolerance = 1e-3) stat <- get_statistic(m2) expect_equal(stat$Statistic, -14.065336, tolerance = 1e-3) }) test_that("get_predicted", { pred <- get_predicted(m1) expect_s3_class(pred, "get_predicted") expect_equal(length(pred), nrow(trade)) a <- get_predicted(m1) b <- get_predicted(m1, type = "response", predict = NULL) expect_equal(a, b) a <- get_predicted(m1, predict = "link") b <- get_predicted(m1, type = "link", predict = NULL) expect_equal(a, b) # these used to raise warnings expect_warning(get_predicted(m1, ci = .4), NA) expect_warning(get_predicted(m1, predict = NULL, type = "link"), NA) }) insight/tests/testthat/test-has_intercept.R0000644000175000017500000000207614122064326021010 0ustar nileshnileshif (requiet("testthat") && requiet("insight") && requiet("lme4")) { data(mtcars) data(sleepstudy) data(iris) m1 <- lm(mpg ~ 0 + gear, data = mtcars) m2 <- lm(mpg ~ gear, data = mtcars) m3 <- suppressWarnings(lmer(Reaction ~ 0 + Days + (Days | Subject), data = sleepstudy)) m4 <- lmer(Reaction ~ Days + (Days | Subject), data = sleepstudy) m5 <- suppressWarnings(lmer(Reaction ~ 0 + (Days | Subject), data = sleepstudy)) m6 <- lm(Sepal.Length ~ 0 + Petal.Width + Species, data = iris) m7 <- lm(Sepal.Length ~ -1 + Petal.Width + Species, data = iris) m8 <- lm(Sepal.Length ~ Petal.Width + Species, data = iris) m9 <- lm(Sepal.Length ~ Petal.Width + Species + 1, data = iris) test_that("has_intercept", { expect_true(has_intercept(m2)) expect_false(has_intercept(m1)) expect_true(has_intercept(m4)) expect_false(has_intercept(m3)) expect_false(has_intercept(m5)) expect_false(has_intercept(m6)) expect_false(has_intercept(m7)) expect_true(has_intercept(m8)) expect_true(has_intercept(m9)) }) } insight/tests/testthat/test-BayesFactorBF.R0000644000175000017500000002654514134317616020607 0ustar nileshnilesh.runThisTest <- Sys.getenv("RunAllinsightTests") == "yes" if (.runThisTest && requiet("testthat") && requiet("insight") && requiet("stats") && requiet("BayesFactor")) { x <- correlationBF(y = iris$Sepal.Length, x = iris$Sepal.Width) test_that("get_data", { expect_true(is.data.frame(get_data(x))) }) test_that("find_formula", { expect_null(find_formula(x)) }) test_that("get_parameters", { expect_equal(nrow(get_parameters(x)), 4000) }) mi <- insight::model_info(x) test_that("model_info-BF", { expect_false(mi$is_binomial) expect_true(mi$is_linear) expect_true(mi$is_correlation) expect_false(mi$is_ttest) }) # --------------------------- set.seed(123) x <- rnorm(1000, 0, 1) y <- rnorm(1000, 0, 1) t1 <- ttestBF(x = x, mu = 60) t2 <- ttestBF(x = x, y = y) t2d <- ttestBF(x = x, y = y, paired = TRUE, mu = 60) test_that("get_data", { expect_true(is.data.frame(get_data(t1))) expect_true(is.data.frame(get_data(t2))) expect_true(is.data.frame(get_data(t2d))) }) test_that("find_formula", { expect_equal(find_formula(t1), list(conditional = y ~ 1), ignore_attr = TRUE) expect_equal(find_formula(t2), list(conditional = y ~ group), ignore_attr = TRUE) expect_equal(find_formula(t2d), list(conditional = y ~ 1), ignore_attr = TRUE) }) test_that("get_parameters", { expect_equal(nrow(get_parameters(t1)), 4000) expect_equal(nrow(get_parameters(t2)), 4000) expect_equal(nrow(get_parameters(t2d)), 4000) expect_equal(median(get_parameters(t1)[["Difference"]]), -60, tolerance = 0.05) expect_equal(median(get_parameters(t2)[["Difference"]]), 0, tolerance = 0.05) expect_equal(median(get_parameters(t2d)[["Difference"]]), -60, tolerance = 0.05) }) test_that("model_info", { expect_true(model_info(t1)$is_ttest) expect_true(model_info(t2)$is_ttest) expect_true(model_info(t2d)$is_ttest) }) test_that("get_priors", { expect_equal(nrow(get_priors(t1)), 1) expect_equal(nrow(get_priors(t2)), 1) expect_equal(nrow(get_priors(t2d)), 1) }) test_that("find_parameters", { expect_equal(nrow(get_parameters(t1)), 4000) expect_equal(nrow(get_parameters(t2)), 4000) expect_equal(nrow(get_parameters(t2d)), 4000) expect_equal(find_parameters(t1)[[1]], "Difference") expect_equal(find_parameters(t2)[[1]], "Difference") expect_equal(find_parameters(t2d)[[1]], "Difference") }) # --------------------------- if (.runThisTest) { t <- c(-.15, 2.39, 2.42, 2.43) N <- c(100, 150, 97, 99) x <- meta.ttestBF(t = t, n1 = N, rscale = 1) test_that("get_data", { expect_true(is.data.frame(get_data(x))) }) test_that("find_formula", { expect_null(find_formula(x)) }) test_that("get_parameters", { expect_equal(nrow(get_parameters(x)), 4000) }) } # --------------------------- if (.runThisTest) { data(ToothGrowth) ToothGrowth$dose <- factor(ToothGrowth$dose) levels(ToothGrowth$dose) <- c("Low", "Medium", "High") x <- anovaBF(len ~ supp * dose, data = ToothGrowth) test_that("get_data", { expect_true(is.data.frame(get_data(x))) }) test_that("find_formula", { expect_equal(find_formula(x), list(conditional = as.formula("len ~ supp + dose + supp:dose")), ignore_attr = TRUE ) }) test_that("get_parameters", { expect_equal(colnames(get_parameters(x)), c("mu", "supp-OJ", "supp-VC", "sig2", "g_supp")) }) test_that("clean_parameters", { cp <- clean_parameters(x) expect_equal( cp$Cleaned_Parameter, c( "supp [OJ]", "supp [VC]", "dose [Low]", "dose [Medium]", "dose [High]", "mu", "sig2", "g_supp" ) ) expect_equal( cp$Component, c( "conditional", "conditional", "conditional", "conditional", "conditional", "extra", "extra", "extra" ) ) }) # --------------------------- data(puzzles) x <- anovaBF(RT ~ shape * color + ID, data = puzzles, whichRandom = "ID") test_that("get_data", { expect_true(is.data.frame(get_data(x))) }) test_that("find_formula", { expect_equal( find_formula(x), list( conditional = as.formula("RT ~ shape + color + shape:color"), random = as.formula("~ID") ), ignore_attr = TRUE ) }) test_that("get_parameters", { expect_equal( colnames(get_parameters(x)), c( "mu", "shape-round", "shape-square", "ID-1", "ID-2", "ID-3", "ID-4", "ID-5", "ID-6", "ID-7", "ID-8", "ID-9", "ID-10", "ID-11", "ID-12", "sig2", "g_shape", "g_ID" ) ) }) test_that("get_parameters", { expect_equal( find_parameters(x[4]), list( conditional = c( "shape-round", "shape-square", "color-color", "color-monochromatic", "shape:color-round.&.color", "shape:color-round.&.monochromatic", "shape:color-square.&.color", "shape:color-square.&.monochromatic" ), random = c( "ID-1", "ID-2", "ID-3", "ID-4", "ID-5", "ID-6", "ID-7", "ID-8", "ID-9", "ID-10", "ID-11", "ID-12" ), extra = c("mu", "sig2", "g_shape", "g_color", "g_ID", "g_shape:color") ) ) }) test_that("find_response", { expect_equal(find_response(x), "RT") }) test_that("find_random", { expect_equal(find_random(x), list(random = "ID")) }) test_that("find_variables", { expect_equal( find_variables(x), list( response = "RT", conditional = c("shape", "color"), random = "ID" ) ) }) test_that("find_terms", { expect_equal( find_terms(x), list( response = "RT", conditional = c("shape", "color"), random = "ID" ) ) }) test_that("get_priors", { expect_equal( get_priors(x), data.frame( Parameter = c( "shape-round", "shape-square", "color-color", "color-monochromatic", "ID-1", "ID-2", "ID-3", "ID-4", "ID-5", "ID-6", "ID-7", "ID-8", "ID-9", "ID-10", "ID-11", "ID-12", "mu", "sig2", "g_shape", "g_ID" ), Distribution = c( "cauchy", "cauchy", NA, NA, "cauchy", "cauchy", "cauchy", "cauchy", "cauchy", "cauchy", "cauchy", "cauchy", "cauchy", "cauchy", "cauchy", "cauchy", NA, NA, NA, NA ), Location = c( 0, 0, NA, NA, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, NA, NA, NA, NA ), Scale = c( 0.5, 0.5, NA, NA, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, NA, NA, NA, NA ), stringsAsFactors = FALSE, row.names = NULL ), tolerance = 1e-5 ) }) # --------------------------- x <- lmBF(len ~ supp + dose, data = ToothGrowth) test_that("get_data", { expect_true(is.data.frame(get_data(x))) }) test_that("find_formula", { expect_equal(find_formula(x), list(conditional = as.formula("len ~ supp + dose")), ignore_attr = TRUE) }) test_that("get_parameters", { expect_equal( colnames(get_parameters(x)), c( "mu", "supp-OJ", "supp-VC", "dose-Low", "dose-Medium", "dose-High", "sig2", "g_supp", "g_dose" ) ) }) x2 <- lmBF(len ~ supp + dose + supp:dose, data = ToothGrowth) x <- x / x2 test_that("get_data", { expect_true(is.data.frame(get_data(x))) }) test_that("find_formula", { expect_equal(find_formula(x), list(conditional = as.formula("len ~ supp + dose")), ignore_attr = TRUE) }) test_that("get_parameters", { expect_equal( colnames(get_parameters(x)), c( "mu", "supp-OJ", "supp-VC", "dose-Low", "dose-Medium", "dose-High", "sig2", "g_supp", "g_dose" ) ) }) test_that("get_priors", { expect_equal( get_priors(x), data.frame( Parameter = c("supp-OJ", "supp-VC", "dose-Low", "dose-Medium", "dose-High", "mu", "sig2", "g_supp", "g_dose"), Distribution = c("cauchy", "cauchy", "cauchy", "cauchy", "cauchy", NA, NA, NA, NA), Location = c(0, 0, 0, 0, 0, NA, NA, NA, NA), Scale = c(0.5, 0.5, 0.5, 0.5, 0.5, NA, NA, NA, NA), stringsAsFactors = FALSE, row.names = NULL ), ignore_attr = TRUE, tolerance = 1e-5 ) }) test_that("find_statistic", { expect_null(find_statistic(x)) }) } corr_BF1 <- correlationBF(iris$Sepal.Length, iris$Sepal.Width, progress = FALSE) corr_BFk <- correlationBF(iris$Sepal.Length, iris$Sepal.Width, progress = FALSE, nullInterval = c(-1, 0) ) data(raceDolls) xtab_BF1 <- contingencyTableBF(raceDolls, sampleType = "indepMulti", fixedMargin = "cols", priorConcentration = 2) ttest_BF1 <- ttestBF(sleep$extra[sleep$group == 1], sleep$extra[sleep$group == 2], progress = FALSE) ttest_BFk <- ttestBF(sleep$extra[sleep$group == 1], sleep$extra[sleep$group == 2], progress = FALSE, nullInterval = c(-3, 0) ) prop_BF1 <- proportionBF(y = 15, N = 25, p = .5, progress = FALSE) prop_BFk <- proportionBF( y = 15, N = 25, p = .5, progress = FALSE, nullInterval = c(0, 0.3) ) lm_BFk <- generalTestBF(Sepal.Width ~ Sepal.Length + Species, data = iris, progress = FALSE) lm_BFd <- lm_BFk[3] / lm_BFk[2] lm_BF1 <- lm_BFk[2] test_that("BFBayesFactor index model", { expect_message(get_parameters(corr_BFk)) expect_message(get_parameters(ttest_BFk)) expect_message(get_parameters(prop_BFk)) expect_message(get_parameters(lm_BFk)) expect_message(get_parameters(lm_BFd)) expect_message(get_parameters(xtab_BF1), regexp = NA) expect_message(get_parameters(corr_BF1), regexp = NA) expect_message(get_parameters(ttest_BF1), regexp = NA) expect_message(get_parameters(prop_BF1), regexp = NA) expect_message(get_parameters(lm_BF1), regexp = NA) }) test_that("get_priors for xtable", { expect_equal( get_priors(xtab_BF1), structure(list( Parameter = "Ratio", Distribution = "independent multinomial", Location = 0, Scale = 2 ), class = "data.frame", row.names = c(NA, -1L) ), tolerance = 1e-5 ) }) test_that("get_priors for correlation", { expect_equal( get_priors(corr_BF1), structure(list( Parameter = "rho", Distribution = "beta", Location = 3, Scale = 3 ), class = "data.frame", row.names = c( NA, -1L )), tolerance = 1e-5 ) }) test_that("get_priors for t-test", { expect_equal( get_priors(ttest_BF1), structure(list( Parameter = "Difference", Distribution = "cauchy", Location = 0, Scale = 0.707106781186548 ), class = "data.frame", row.names = c(NA, -1L) ), tolerance = 1e-5 ) }) } insight/tests/testthat/test-crq.R0000644000175000017500000000555414122064326016751 0ustar nileshnileshif (requiet("testthat") && requiet("insight") && requiet("quantreg")) { set.seed(123) n <- 200 x <- rnorm(n) y <- 5 + x + rnorm(n) c <- 4 + x + rnorm(n) d <- (y > c) dat <- data.frame(y, x, c, d) # model m1 <- crq(survival::Surv(pmax(y, c), d, type = "left") ~ x, method = "Portnoy", data = dat) test_that("model_info", { expect_false(model_info(m1)$is_linear) expect_true(model_info(m1)$is_censored) }) test_that("find_predictors", { expect_identical( find_predictors(m1), list(conditional = "x") ) }) test_that("find_random", { expect_null(find_random(m1)) }) test_that("find_response", { expect_identical(find_response(m1), "Surv(pmax(y, c), d, type = \"left\")") }) test_that("get_predictors", { expect_equal(colnames(get_predictors(m1)), "x") }) test_that("get_data", { expect_equal(nrow(get_data(m1)), 200) expect_equal( colnames(get_data(m1)), c("y", "x", "c", "d") ) }) test_that("find_formula", { expect_length(find_formula(m1), 1) expect_equal( find_formula(m1), list(conditional = as.formula("survival::Surv(pmax(y, c), d, type = \"left\") ~ x")), ignore_attr = TRUE ) }) test_that("find_terms", { expect_equal( find_terms(m1), list( response = "Surv(pmax(y, c), d, type = \"left\")", conditional = "x" ) ) expect_equal( find_terms(m1, flatten = TRUE), c("Surv(pmax(y, c), d, type = \"left\")", "x") ) }) test_that("n_obs", { expect_equal(n_obs(m1), 200) }) test_that("link_function", { expect_equal(link_function(m1)(.2), .2, tolerance = 1e-5) }) test_that("link_inverse", { expect_equal(link_inverse(m1)(.2), .2, tolerance = 1e-5) }) test_that("find_parameters", { expect_equal( find_parameters(m1), list(conditional = c("(Intercept)", "x")) ) expect_equal(nrow(get_parameters(m1)), 8) expect_equal( get_parameters(m1)$Parameter, c("(Intercept)", "x", "(Intercept)", "x", "(Intercept)", "x", "(Intercept)", "x") ) expect_equal( get_parameters(m1)$Component, c("tau (0.2)", "tau (0.2)", "tau (0.4)", "tau (0.4)", "tau (0.6)", "tau (0.6)", "tau (0.8)", "tau (0.8)") ) }) test_that("find_statistic", { expect_identical(find_statistic(m1), "t-statistic") }) if (getRversion() >= "3.6.0") { test_that("get_statistic", { expect_equal( get_statistic(m1)$Parameter, c("(Intercept)", "x", "(Intercept)", "x", "(Intercept)", "x", "(Intercept)", "x") ) expect_equal( get_statistic(m1)$Statistic, c(67.64633, 5.88482, 56.8453, 10.05249, 76.86565, 9.78366, 53.05556, 12.83912), tolerance = 1e-3 ) }) } } insight/tests/testthat/test-gamlss.R0000644000175000017500000000544114122064326017445 0ustar nileshnileshif (requiet("testthat") && requiet("insight") && requiet("gamlss")) { data(abdom) void <- capture.output( m1 <- gamlss( y ~ pb(x), sigma.formula = ~ pb(x), family = BCT, data = abdom, method = mixed(1, 20) ) ) test_that("model_info", { expect_true(model_info(m1)$is_linear) }) test_that("find_predictors", { expect_identical(find_predictors(m1), list(conditional = "x", sigma = "x")) expect_identical(find_predictors(m1, flatten = TRUE), "x") expect_null(find_predictors(m1, effects = "random")) }) test_that("find_random", { expect_null(find_random(m1)) }) test_that("get_random", { expect_warning(get_random(m1)) }) test_that("find_response", { expect_identical(find_response(m1), "y") }) test_that("get_response", { expect_equal(get_response(m1), abdom$y) }) test_that("get_predictors", { expect_equal(colnames(get_predictors(m1)), "x") }) test_that("get_data", { expect_equal(nrow(get_data(m1)), 610) expect_equal(colnames(get_data(m1)), c("y", "x")) }) test_that("find_formula", { expect_length(find_formula(m1), 4) expect_equal( find_formula(m1), list( conditional = as.formula("y ~ pb(x)"), sigma = as.formula("~pb(x)"), nu = as.formula("~1"), tau = as.formula("~1") ), ignore_attr = TRUE ) }) test_that("find_variables", { expect_equal( find_variables(m1), list( response = "y", conditional = "x", sigma = "x" ) ) expect_equal(find_variables(m1, flatten = TRUE), c("y", "x")) }) test_that("find_terms", { expect_equal( find_terms(m1), list( response = "y", conditional = "pb(x)", sigma = "pb(x)", nu = "1", tau = "1" ) ) }) test_that("n_obs", { expect_equal(n_obs(m1), 610) }) test_that("link_function", { expect_equal(link_function(m1)(.2), .2, tolerance = 1e-5) }) test_that("link_inverse", { expect_equal(link_inverse(m1)(.2), .2, tolerance = 1e-5) }) test_that("find_parameters", { expect_equal( find_parameters(m1), list( conditional = c("(Intercept)", "pb(x)"), sigma = c("(Intercept)", "pb(x)"), nu = "(Intercept)", tau = "(Intercept)" ) ) expect_equal(nrow(get_parameters(m1)), 6) }) test_that("is_multivariate", { expect_false(is_multivariate(m1)) }) test_that("find_algorithm", { expect_equal(find_algorithm(m1), list(algorithm = "mixed")) }) test_that("find_statistic", { expect_identical(find_statistic(m1), "t-statistic") }) } insight/tests/testthat/test-clean_names.R0000644000175000017500000000763414122064326020432 0ustar nileshnileshif (requiet("testthat") && requiet("insight")) { test_that("clean_names", { expect_equal(clean_names(""), "") expect_equal(clean_names("as.factor(test)"), "test") expect_equal(clean_names("log(test)"), "test") expect_equal(clean_names("log(test, base = exp(3))"), "test") expect_equal(clean_names("log(test,base=exp(3))"), "test") expect_equal(clean_names("log(test/10)"), "test") expect_equal(clean_names("log(test^2)"), "test") expect_equal(clean_names("log(log(test))"), "test") expect_equal(clean_names("log(log(test/10))"), "test") expect_equal(clean_names("log(log(test*2))"), "test") expect_equal(clean_names("scale(log(Days1))"), "Days1") expect_equal(clean_names("I(test^2)"), "test") expect_equal(clean_names("I(test/10)"), "test") expect_equal(clean_names("I(test ^ 2)"), "test") expect_equal(clean_names("I(test / 10)"), "test") expect_equal(clean_names("poly(test, 2)"), "test") expect_equal(clean_names("poly(test, degrees = 2)"), "test") expect_equal(clean_names("poly(test, degrees = 2, raw = TRUE)"), "test") expect_equal(clean_names("ns(test)"), "test") expect_equal(clean_names("ns(test, df = 2)"), "test") expect_equal(clean_names("bs(test)"), "test") expect_equal(clean_names("bs(test, df = 2)"), "test") expect_equal(clean_names("offset(test)"), "test") expect_equal(clean_names("offset(log(test))"), "test") expect_equal(clean_names("factor(test)"), "test") expect_equal(clean_names("as.factor(test)"), "test") expect_equal(clean_names("~ 1 | test"), "test") expect_equal(clean_names("~1|test"), "test") expect_equal(clean_names("1 | test"), "test") expect_equal(clean_names("as.factor(Sepal.Length)"), "Sepal.Length") expect_equal(clean_names("log(Sepal.Length)"), "Sepal.Length") expect_equal(clean_names("log(Sepal.Length, base = exp(3))"), "Sepal.Length") expect_equal(clean_names("log(Sepal.Length,base=exp(3))"), "Sepal.Length") expect_equal(clean_names("log(Sepal.Length/10)"), "Sepal.Length") expect_equal(clean_names("log(Sepal.Length^2)"), "Sepal.Length") expect_equal(clean_names("log(log(Sepal.Length))"), "Sepal.Length") expect_equal(clean_names("log(log(Sepal.Length/10))"), "Sepal.Length") expect_equal(clean_names("log(log(Sepal.Length*2))"), "Sepal.Length") expect_equal(clean_names("I(Sepal.Length^2)"), "Sepal.Length") expect_equal(clean_names("I(Sepal.Length/10)"), "Sepal.Length") expect_equal(clean_names("I(Sepal.Length ^ 2)"), "Sepal.Length") expect_equal(clean_names("I(Sepal.Length / 10)"), "Sepal.Length") expect_equal(clean_names("poly(Sepal.Length, 2)"), "Sepal.Length") expect_equal(clean_names("poly(Sepal.Length, degrees = 2)"), "Sepal.Length") expect_equal(clean_names("poly(Sepal.Length, degrees = 2, raw = TRUE)"), "Sepal.Length") expect_equal(clean_names("ns(Sepal.Length)"), "Sepal.Length") expect_equal(clean_names("ns(Sepal.Length, df = 2)"), "Sepal.Length") expect_equal(clean_names("bs(Sepal.Length)"), "Sepal.Length") expect_equal(clean_names("bs(Sepal.Length, df = 2)"), "Sepal.Length") expect_equal(clean_names("offset(Sepal.Length)"), "Sepal.Length") expect_equal(clean_names("offset(log(Sepal.Length))"), "Sepal.Length") expect_equal(clean_names("factor(Sepal.Length)"), "Sepal.Length") expect_equal(clean_names("as.factor(Sepal.Length)"), "Sepal.Length") expect_equal(clean_names("~ 1 | Sepal.Length"), "Sepal.Length") expect_equal(clean_names("~1|Sepal.Length"), "Sepal.Length") expect_equal(clean_names("1 | Sepal.Length"), "Sepal.Length") expect_equal(clean_names(c("scale(a)", "scale(b)", "scale(a):scale(b)")), c("a", "b", "a:b")) expect_equal( clean_names(c("scale(a)", "scale(b)", "scale(a):scale(b)"), include_names = TRUE), c(`scale(a)` = "a", `scale(b)` = "b", `scale(a):scale(b)` = "a:b") ) }) } insight/tests/testthat/test-lme.R0000644000175000017500000001225514122064326016735 0ustar nileshnileshif (requiet("testthat") && requiet("insight") && requiet("nlme") && requiet("lme4")) { data("sleepstudy") data(Orthodont) m1 <- lme(Reaction ~ Days, random = ~ 1 + Days | Subject, data = sleepstudy ) m2 <- lme(distance ~ age + Sex, data = Orthodont, random = ~1) set.seed(123) sleepstudy$mygrp <- sample(1:5, size = 180, replace = TRUE) sleepstudy$mysubgrp <- NA for (i in 1:5) { filter_group <- sleepstudy$mygrp == i sleepstudy$mysubgrp[filter_group] <- sample(1:30, size = sum(filter_group), replace = TRUE) } m3 <- lme(Reaction ~ Days, random = ~ 1 | mygrp / mysubgrp, data = sleepstudy ) test_that("nested_varCorr", { skip_on_travis() skip_on_cran() expect_equal( insight:::.get_nested_lme_varcorr(m3), list( mysubgrp = structure( 7.508310765, .Dim = c(1L, 1L), .Dimnames = list("(Intercept)", "(Intercept)") ), mygrp = structure( 0.004897827, .Dim = c(1L, 1L), .Dimnames = list("(Intercept)", "(Intercept)") ) ), tolerance = 1e-4 ) }) test_that("model_info", { expect_true(model_info(m1)$is_linear) }) test_that("find_predictors", { expect_identical(find_predictors(m1), list(conditional = "Days")) expect_identical(find_predictors(m2), list(conditional = c("age", "Sex"))) expect_identical( find_predictors(m1, effects = "all"), list(conditional = "Days", random = "Subject") ) expect_identical(find_predictors(m2, effects = "all"), list(conditional = c("age", "Sex"))) expect_identical(find_predictors(m1, flatten = TRUE), "Days") expect_identical( find_predictors(m1, effects = "random"), list(random = "Subject") ) }) test_that("find_response", { expect_identical(find_response(m1), "Reaction") expect_identical(find_response(m2), "distance") }) test_that("get_response", { expect_equal(get_response(m1), sleepstudy$Reaction) }) test_that("find_random", { expect_equal(find_random(m1), list(random = "Subject")) expect_null(find_random(m2)) }) test_that("get_random", { expect_equal(get_random(m1), data.frame(Subject = sleepstudy$Subject)) expect_warning(get_random(m2)) }) test_that("link_inverse", { expect_equal(link_inverse(m1)(.2), .2, tolerance = 1e-5) }) test_that("get_data", { expect_equal(nrow(get_data(m1)), 180) expect_equal(colnames(get_data(m1)), c("Reaction", "Days", "Subject")) expect_equal(colnames(get_data(m2)), c("distance", "age", "Sex")) }) test_that("find_formula", { expect_length(find_formula(m1), 2) expect_equal( find_formula(m1), list( conditional = as.formula("Reaction ~ Days"), random = as.formula("~1 + Days | Subject") ), ignore_attr = TRUE ) expect_length(find_formula(m2), 2) expect_equal( find_formula(m2), list( conditional = as.formula("distance ~ age + Sex"), random = as.formula("~1") ), ignore_attr = TRUE ) }) test_that("find_variables", { expect_equal( find_variables(m1), list( response = "Reaction", conditional = "Days", random = "Subject" ) ) expect_equal( find_variables(m1, flatten = TRUE), c("Reaction", "Days", "Subject") ) expect_equal( find_variables(m2), list( response = "distance", conditional = c("age", "Sex") ) ) }) test_that("n_obs", { expect_equal(n_obs(m1), 180) }) test_that("linkfun", { expect_false(is.null(link_function(m1))) }) test_that("find_parameters", { expect_equal( find_parameters(m1), list( conditional = c("(Intercept)", "Days"), random = c("(Intercept)", "Days") ) ) expect_equal(nrow(get_parameters(m1)), 2) expect_equal(get_parameters(m1)$Parameter, c("(Intercept)", "Days")) expect_equal( find_parameters(m2), list( conditional = c("(Intercept)", "age", "SexFemale"), random = c("(Intercept)") ) ) }) test_that("find_algorithm", { expect_equal( find_algorithm(m1), list(algorithm = "REML", optimizer = "nlminb") ) }) test_that("get_variance", { skip_on_cran() expect_equal( get_variance(m1), list( var.fixed = 908.95336262308865116211, var.random = 1698.06593646939654718153, var.residual = 654.94240352794997761521, var.distribution = 654.94240352794997761521, var.dispersion = 0, var.intercept = c(Subject = 612.07951112963326067984), var.slope = c(Subject.Days = 35.07130179308116169068), cor.slope_intercept = c(Subject = 0.06600000000000000311) ), tolerance = 1e-4 ) }) test_that("find_statistic", { expect_identical(find_statistic(m1), "t-statistic") expect_identical(find_statistic(m2), "t-statistic") expect_identical(find_statistic(m3), "t-statistic") }) } insight/tests/testthat/test-ivreg_AER.R0000644000175000017500000000702214122064326017757 0ustar nileshnileshif (requiet("testthat") && requiet("insight") && requiet("AER")) { data(CigarettesSW) CigarettesSW$rprice <- with(CigarettesSW, price / cpi) CigarettesSW$rincome <- with(CigarettesSW, income / population / cpi) CigarettesSW$tdiff <- with(CigarettesSW, (taxs - tax) / cpi) m1 <- AER::ivreg( log(packs) ~ log(rprice) + log(rincome) | log(rincome) + tdiff + I(tax / cpi), data = CigarettesSW, subset = year == "1995" ) test_that("model_info", { expect_true(model_info(m1)$is_linear) }) test_that("find_predictors", { expect_identical( find_predictors(m1), list( conditional = c("rprice", "rincome"), instruments = c("rincome", "tdiff", "tax", "cpi") ) ) expect_identical( find_predictors(m1, flatten = TRUE), c("rprice", "rincome", "tdiff", "tax", "cpi") ) expect_null(find_predictors(m1, effects = "random")) }) test_that("find_random", { expect_null(find_random(m1)) }) test_that("get_random", { expect_warning(get_random(m1)) }) test_that("find_response", { expect_identical(find_response(m1), "packs") }) test_that("get_response", { expect_equal(get_response(m1), CigarettesSW$packs[CigarettesSW$year == "1995"]) }) test_that("get_predictors", { expect_equal( colnames(get_predictors(m1)), c("rprice", "rincome", "tdiff", "tax", "cpi") ) }) test_that("link_inverse", { expect_equal(link_inverse(m1)(.2), .2, tolerance = 1e-5) }) test_that("get_data", { expect_equal(nrow(get_data(m1)), 48) expect_equal( colnames(get_data(m1)), c("packs", "rprice", "rincome", "tdiff", "tax", "cpi") ) }) test_that("find_formula", { expect_length(find_formula(m1), 2) expect_equal( find_formula(m1), list( conditional = as.formula("log(packs) ~ log(rprice) + log(rincome)"), instruments = as.formula("~log(rincome) + tdiff + I(tax/cpi)") ), ignore_attr = TRUE ) }) test_that("find_variables", { expect_equal( find_variables(m1), list( response = "packs", conditional = c("rprice", "rincome"), instruments = c("rincome", "tdiff", "tax", "cpi") ) ) expect_equal( find_variables(m1, flatten = TRUE), c("packs", "rprice", "rincome", "tdiff", "tax", "cpi") ) }) test_that("n_obs", { expect_equal(n_obs(m1), 48) }) test_that("linkfun", { expect_false(is.null(link_function(m1))) }) test_that("find_parameters", { expect_equal( find_parameters(m1), list( conditional = c("(Intercept)", "log(rprice)", "log(rincome)") ) ) expect_equal(nrow(get_parameters(m1)), 3) expect_equal( get_parameters(m1)$Parameter, c("(Intercept)", "log(rprice)", "log(rincome)") ) }) test_that("is_multivariate", { expect_false(is_multivariate(m1)) }) test_that("find_terms", { expect_equal( find_terms(m1), list( response = "log(packs)", conditional = c("log(rprice)", "log(rincome)"), instruments = c("log(rincome)", "tdiff", "I(tax/cpi)") ) ) expect_equal(nrow(get_parameters(m1)), 3) expect_equal( get_parameters(m1)$Parameter, c("(Intercept)", "log(rprice)", "log(rincome)") ) }) test_that("find_statistic", { expect_identical(find_statistic(m1), "t-statistic") }) } insight/tests/testthat/test-find_predictor_nested_re.R0000644000175000017500000000121114122064326023171 0ustar nileshnileshif (requiet("testthat") && requiet("insight") && requiet("lme4")) { set.seed(1984) dat <- data.frame( y = rnorm(1000 * 5, sd = 1 - .20), time = rep(1:10, 100 * 5), g1 = sort(rep(1:100, 10 * 5)), g2 = sort(rep(1:10, 100 * 5)) ) dat$g0 <- paste(dat$time, dat$g1) dat$time1 <- dat$time - 8 dat$post <- 0 dat$post[dat$time >= 8] <- 1 m <- lmer(y ~ post + time1 + (1 | g2 / g1 / g0) + (post + time1 - 1 | g2), data = dat) test_that("clean_names", { expect_equal( find_predictors(m, effects = "all"), list(conditional = c("post", "time1"), random = c("g0", "g1", "g2")) ) }) } insight/tests/testthat/test-speedlm.R0000644000175000017500000001011414122064326017601 0ustar nileshnileshif (requiet("testthat") && requiet("insight") && requiet("speedglm")) { data(iris) data(mtcars) m1 <- speedlm(Sepal.Length ~ Petal.Width + Species, data = iris) m2 <- speedlm(log(mpg) ~ log(hp) + cyl + I(cyl^2) + poly(wt, degree = 2, raw = TRUE), data = mtcars ) test_that("model_info", { expect_true(model_info(m1)$is_linear) }) test_that("find_predictors", { expect_identical(find_predictors(m1), list(conditional = c("Petal.Width", "Species"))) expect_identical( find_predictors(m1, flatten = TRUE), c("Petal.Width", "Species") ) expect_null(find_predictors(m1, effects = "random")) }) test_that("find_random", { expect_null(find_random(m1)) }) test_that("get_random", { expect_warning(get_random(m1)) }) test_that("find_response", { expect_identical(find_response(m1), "Sepal.Length") }) test_that("get_response", { expect_equal(get_response(m1), iris$Sepal.Length) }) test_that("get_predictors", { expect_equal(colnames(get_predictors(m1)), c("Petal.Width", "Species")) }) test_that("link_inverse", { expect_equal(link_inverse(m1)(.2), .2, tolerance = 1e-5) }) test_that("linkfun", { expect_equal(link_function(m1)(.2), .2, tolerance = 1e-5) }) test_that("get_data", { expect_equal(nrow(get_data(m1)), 150) expect_equal( colnames(get_data(m1)), c("Sepal.Length", "Petal.Width", "Species") ) expect_equal(colnames(get_data(m2)), c("mpg", "hp", "cyl", "wt")) }) test_that("find_formula", { expect_length(find_formula(m1), 1) expect_equal( find_formula(m1), list(conditional = as.formula("Sepal.Length ~ Petal.Width + Species")), ignore_attr = TRUE ) expect_equal( find_formula(m2), list( conditional = as.formula( "log(mpg) ~ log(hp) + cyl + I(cyl^2) + poly(wt, degree = 2, raw = TRUE)" ) ), ignore_attr = TRUE ) }) test_that("find_variables", { expect_equal( find_variables(m1), list( response = "Sepal.Length", conditional = c("Petal.Width", "Species") ) ) expect_equal( find_variables(m1, flatten = TRUE), c("Sepal.Length", "Petal.Width", "Species") ) expect_equal( find_variables(m2, flatten = TRUE), c("mpg", "hp", "cyl", "wt") ) }) test_that("n_obs", { expect_equal(n_obs(m1), 150) expect_equal(n_obs(m2), 32) }) test_that("find_parameters", { expect_equal( find_parameters(m1), list( conditional = c( "(Intercept)", "Petal.Width", "Speciesversicolor", "Speciesvirginica" ) ) ) expect_equal( find_parameters(m2), list( conditional = c( "(Intercept)", "log(hp)", "cyl", "I(cyl^2)", "poly(wt, degree = 2, raw = TRUE)1", "poly(wt, degree = 2, raw = TRUE)2" ) ) ) expect_equal(nrow(get_parameters(m1)), 4) expect_equal( get_parameters(m1)$Parameter, c( "(Intercept)", "Petal.Width", "Speciesversicolor", "Speciesvirginica" ) ) }) test_that("is_multivariate", { expect_false(is_multivariate(m1)) }) test_that("find_terms", { expect_equal( find_terms(m1), list( response = "Sepal.Length", conditional = c("Petal.Width", "Species") ) ) expect_equal( find_terms(m2), list( response = "log(mpg)", conditional = c( "log(hp)", "cyl", "I(cyl^2)", "poly(wt, degree = 2, raw = TRUE)" ) ) ) }) test_that("find_algorithm", { expect_equal(find_algorithm(m1), list(algorithm = "eigen")) }) test_that("find_statistic", { expect_identical(find_statistic(m1), "t-statistic") expect_identical(find_statistic(m2), "t-statistic") }) } insight/tests/testthat/test-cgam.R0000644000175000017500000000241514122064326017064 0ustar nileshnileshif (requiet("testthat") && requiet("insight") && requiet("cgam")) { data(cubic, package = "cgam") m <- cgam(y ~ incr.conv(x), data = cubic) mi <- insight::model_info(m) test_that("model_info", { expect_false(mi$is_binomial) expect_true(mi$is_linear) expect_false(mi$is_censored) }) test_that("n_obs", { expect_equal(n_obs(m), 50) }) test_that("find_formula", { expect_length(find_formula(m), 1) expect_equal( find_formula(m), list(conditional = as.formula("y ~ incr.conv(x)")), ignore_attr = TRUE ) }) test_that("find_terms", { expect_equal(find_terms(m), list( response = "y", conditional = "incr.conv(x)" )) expect_equal( find_terms(m, flatten = TRUE), c("y", "incr.conv(x)") ) }) test_that("get_data", { expect_equal(nrow(get_data(m)), 50) expect_equal(colnames(get_data(m)), c("y", "x")) }) test_that("get_response", { expect_equal(get_response(m), cubic$y) }) test_that("is_multivariate", { expect_false(is_multivariate(m)) }) test_that("is_model", { expect_true(is_model(m)) }) test_that("find_statistic", { expect_identical(find_statistic(m), "t-statistic") }) } insight/tests/testthat/test-htest.R0000644000175000017500000000341314122064326017303 0ustar nileshnileshif (requiet("testthat") && requiet("insight")) { x <- t.test(1:3, c(1, 1:3)) test_that("get_data.t-test", { expect_equal(get_data(x)$x, c(1, 2, 3, NA)) }) dat <<- matrix(c(794, 86, 150, 570), nrow = 2, dimnames = list( "1st Survey" = c("Approve", "Disapprove"), "2nd Survey" = c("Approve", "Disapprove") ) ) m <- mcnemar.test(dat) test_that("get_data.mcnemar", { expect_equal( get_data(m), structure(c(794, 86, 150, 570), .Dim = c(2L, 2L), .Dimnames = list( `1st Survey` = c("Approve", "Disapprove"), `2nd Survey` = c("Approve", "Disapprove") ), class = "table" ) ) }) TeaTasting <<- matrix(c(3, 1, 1, 3), nrow = 2, dimnames = list( Guess = c("Milk", "Tea"), Truth = c("Milk", "Tea") ) ) m <- fisher.test(TeaTasting, alternative = "greater") test_that("get_data.fisher", { expect_equal( get_data(m), structure(c(3, 1, 1, 3), .Dim = c(2L, 2L), .Dimnames = list( Guess = c("Milk", "Tea"), Truth = c("Milk", "Tea") ), class = "table" ) ) }) wb <<- aggregate(warpbreaks$breaks, by = list( w = warpbreaks$wool, t = warpbreaks$tension ), FUN = mean ) m <- friedman.test(wb$x, wb$w, wb$t) test_that("get_data.freedman", { expect_equal( get_data(m), data.frame( x = c( 44.5555555555556, 28.2222222222222, 24, 28.7777777777778, 24.5555555555556, 18.7777777777778 ), w = c(1L, 2L, 1L, 2L, 1L, 2L), t = c(1L, 1L, 2L, 2L, 3L, 3L) ), tolerance = 1e-3, ignore_attr = TRUE ) }) } insight/tests/testthat/test-survreg.R0000644000175000017500000000653314122064326017657 0ustar nileshnileshif (requiet("testthat") && requiet("insight") && requiet("survival")) { m1 <- survreg(Surv(futime, fustat) ~ ecog.ps + rx, data = ovarian, dist = "exponential" ) m2 <- survreg(Surv(time, status) ~ ph.ecog + age + strata(sex), data = lung) test_that("model_info", { expect_false(model_info(m1)$is_linear) expect_true(model_info(m1)$is_exponential) expect_true(model_info(m2)$is_survival) expect_false(model_info(m2)$is_linear) }) test_that("find_predictors", { expect_identical(find_predictors(m1), list(conditional = c("ecog.ps", "rx"))) expect_identical(find_predictors(m1, flatten = TRUE), c("ecog.ps", "rx")) expect_null(find_predictors(m1, effects = "random")) expect_identical(find_predictors(m2), list(conditional = c("ph.ecog", "age", "sex"))) }) test_that("find_random", { expect_null(find_random(m1)) }) test_that("get_random", { expect_warning(get_random(m1)) }) test_that("find_response", { expect_identical(find_response(m1), "Surv(futime, fustat)") }) test_that("get_response", { expect_equal(get_response(m1), ovarian[, c("futime", "fustat")]) }) test_that("get_predictors", { expect_equal(colnames(get_predictors(m1)), c("ecog.ps", "rx")) }) test_that("link_inverse", { expect_equal(link_inverse(m1)(.2), exp(.2), tolerance = 1e-5) }) test_that("get_data", { expect_equal(nrow(get_data(m1)), 26) expect_equal( colnames(get_data(m1)), c("futime", "fustat", "Surv(futime, fustat)", "ecog.ps", "rx") ) }) test_that("find_formula", { expect_length(find_formula(m1), 1) expect_equal( find_formula(m1), list(conditional = as.formula("Surv(futime, fustat) ~ ecog.ps + rx")), ignore_attr = TRUE ) }) test_that("find_variables", { expect_equal(find_variables(m1), list( response = c("futime", "fustat"), conditional = c("ecog.ps", "rx") )) expect_equal( find_variables(m1, flatten = TRUE), c("futime", "fustat", "ecog.ps", "rx") ) }) test_that("n_obs", { expect_equal(n_obs(m1), 26) }) test_that("linkfun", { expect_false(is.null(link_function(m1))) }) test_that("find_parameters", { expect_equal( find_parameters(m1), list(conditional = c("(Intercept)", "ecog.ps", "rx")) ) expect_equal( find_parameters(m2), list(conditional = c("(Intercept)", "ph.ecog", "age", "sex=1", "sex=2")) ) }) test_that("get_parameters", { expect_equal(nrow(get_parameters(m1)), 3) expect_equal(get_parameters(m1)$Parameter, c("(Intercept)", "ecog.ps", "rx")) expect_equal(get_parameters(m1)$Estimate, c(6.96184, -0.43313, 0.5815), tolerance = 1e-3) expect_equal(nrow(get_parameters(m2)), 5) expect_equal(get_parameters(m2)$Parameter, c("(Intercept)", "ph.ecog", "age", "sex=1", "sex=2")) expect_equal(get_parameters(m2)$Estimate, c(6.73235, -0.32443, -0.00581, -0.24408, -0.42345), tolerance = 1e-3) }) test_that("get_statistic", { expect_equal(nrow(get_statistic(m1)), 3) expect_equal(nrow(get_statistic(m2)), 5) }) test_that("is_multivariate", { expect_false(is_multivariate(m1)) }) test_that("find_statistic", { expect_identical(find_statistic(m1), "z-statistic") }) } insight/tests/testthat/test-psm.R0000644000175000017500000000702514122064326016756 0ustar nileshnileshif (requiet("testthat") && requiet("insight") && requiet("rms")) { n <- 400 set.seed(1) age <- rnorm(n, 50, 12) sex <- factor(sample(c("Female", "Male"), n, TRUE)) # Population hazard function: h <- .02 * exp(.06 * (age - 50) + .8 * (sex == "Female")) d.time <- -log(runif(n)) / h cens <- 15 * runif(n) death <- ifelse(d.time <= cens, 1, 0) d.time <- pmin(d.time, cens) dat <- data.frame(d.time, death, sex, age, stringsAsFactors = FALSE) m1 <- psm(Surv(d.time, death) ~ sex * pol(age, 2), dist = "lognormal", data = dat ) test_that("model_info", { expect_false(model_info(m1)$is_binomial) expect_false(model_info(m1)$is_logit) }) test_that("find_predictors", { expect_identical(find_predictors(m1), list(conditional = c("sex", "age"))) expect_identical(find_predictors(m1, flatten = TRUE), c("sex", "age")) expect_null(find_predictors(m1, effects = "random")) }) test_that("find_random", { expect_null(find_random(m1)) }) test_that("get_random", { expect_warning(get_random(m1)) }) test_that("find_response", { expect_identical(find_response(m1), "Surv(d.time, death)") expect_identical(find_response(m1, combine = FALSE), c("d.time", "death")) }) test_that("get_response", { expect_equal(get_response(m1), dat[, c("d.time", "death")]) }) test_that("get_predictors", { expect_equal(colnames(get_predictors(m1)), c("sex", "age")) }) test_that("link_inverse", { expect_equal(link_inverse(m1)(.2), exp(.2), tolerance = 1e-5) }) test_that("get_data", { expect_equal(nrow(get_data(m1)), 400) expect_equal(colnames(get_data(m1)), c("d.time", "death", "sex", "age")) }) test_that("find_formula", { expect_length(find_formula(m1), 1) expect_equal( find_formula(m1), list(conditional = as.formula( "Surv(d.time, death) ~ sex * pol(age, 2)" )), ignore_attr = TRUE ) }) test_that("find_terms", { expect_length(find_terms(m1), 2) expect_equal( find_terms(m1), list( response = "Surv(d.time, death)", conditional = c("sex", "pol(age, 2)") ) ) }) test_that("find_variables", { expect_equal(find_variables(m1), list( response = c("d.time", "death"), conditional = c("sex", "age") )) expect_equal( find_variables(m1, flatten = TRUE), c("d.time", "death", "sex", "age") ) }) test_that("n_obs", { expect_equal(n_obs(m1), 400) }) test_that("linkfun", { expect_false(is.null(link_function(m1))) }) test_that("linkinverse", { expect_false(is.null(link_inverse(m1))) }) test_that("find_parameters", { expect_equal( find_parameters(m1), list( conditional = c( "(Intercept)", "sex=Male", "age", "age^2", "sex=Male * age", "sex=Male * age^2" ) ) ) expect_equal(nrow(get_parameters(m1)), 6) expect_equal( get_parameters(m1)$Parameter, c( "(Intercept)", "sex=Male", "age", "age^2", "sex=Male * age", "sex=Male * age^2" ) ) }) test_that("is_multivariate", { expect_false(is_multivariate(m1)) }) test_that("find_algorithm", { expect_warning(find_algorithm(m1)) }) test_that("find_statistic", { expect_identical(find_statistic(m1), "z-statistic") }) } insight/tests/testthat/test-negbin.R0000644000175000017500000001023714122064326017420 0ustar nileshnilesh.runThisTest <- Sys.getenv("RunAllinsightTests") == "yes" if (.runThisTest && Sys.getenv("USER") != "travis") { if (requiet("testthat") && requiet("insight") && requiet("aod")) { data(dja) m1 <- suppressWarnings(aod::negbin(y ~ group + offset(log(trisk)), random = ~village, data = dja )) test_that("model_info", { expect_true(model_info(m1)$is_negbin) expect_true(model_info(m1)$is_mixed) expect_false(model_info(m1)$is_linear) }) test_that("find_predictors", { expect_identical(find_predictors(m1), list(conditional = c("group", "trisk"))) expect_identical(find_predictors(m1, flatten = TRUE), c("group", "trisk")) expect_identical( find_predictors(m1, effects = "random"), list(random = "village") ) expect_identical( find_predictors(m1, effects = "all"), list( conditional = c("group", "trisk"), random = "village" ) ) }) test_that("find_random", { expect_identical(find_random(m1), list(random = "village")) }) test_that("get_random", { expect_equal(get_random(m1), dja[, "village", drop = FALSE]) }) test_that("find_response", { expect_identical(find_response(m1), "y") expect_identical(find_response(m1, combine = FALSE), "y") }) test_that("get_response", { expect_equal(get_response(m1), dja[, "y"]) }) test_that("get_predictors", { expect_equal(colnames(get_predictors(m1)), c("group", "trisk")) }) test_that("link_inverse", { expect_equal(link_inverse(m1)(.2), exp(.2), tolerance = 1e-5) }) test_that("link_function", { expect_equal(link_function(m1)(.2), log(.2), tolerance = 1e-5) }) test_that("get_data", { expect_equal(nrow(get_data(m1)), 75) expect_equal(colnames(get_data(m1)), c("y", "group", "trisk", "village")) }) test_that("find_formula", { expect_length(find_formula(m1), 2) expect_equal( find_formula(m1), list( conditional = as.formula("y ~ group + offset(log(trisk))"), random = as.formula("~village") ), ignore_attr = TRUE ) }) test_that("find_variables", { expect_equal( find_variables(m1), list( response = "y", conditional = c("group", "trisk"), random = "village" ) ) expect_equal( find_variables(m1, flatten = TRUE), c("y", "group", "trisk", "village") ) }) test_that("n_obs", { expect_equal(n_obs(m1), 75) }) test_that("find_parameters", { expect_equal( find_parameters(m1), list( conditional = c("(Intercept)", "groupTREAT"), random = c( "phi.villageBAK", "phi.villageBAM", "phi.villageBAN", "phi.villageBIJ", "phi.villageBOU", "phi.villageBYD", "phi.villageDEM", "phi.villageDIA", "phi.villageHAM", "phi.villageLAM", "phi.villageLAY", "phi.villageMAF", "phi.villageMAH", "phi.villageMAK", "phi.villageMED", "phi.villageNAB", "phi.villageSAG", "phi.villageSAM", "phi.villageSOU" ) ) ) expect_equal(nrow(get_parameters(m1)), 2) expect_equal( get_parameters(m1)$Parameter, c("(Intercept)", "groupTREAT") ) }) test_that("is_multivariate", { expect_false(is_multivariate(m1)) }) test_that("find_terms", { expect_equal( find_terms(m1), list( response = "y", conditional = c("group", "offset(log(trisk))"), random = "village" ) ) }) test_that("find_algorithm", { expect_equal(find_algorithm(m1), list(algorithm = "ML")) }) test_that("find_statistic", { expect_identical(find_statistic(m1), "z-statistic") }) } } insight/tests/testthat/test-mlogit.R0000644000175000017500000000671614122064326017460 0ustar nileshnileshif (requiet("testthat") && requiet("insight") && requiet("mlogit")) { data("Fishing") Fish <- mlogit.data(Fishing, varying = c(2:9), shape = "wide", choice = "mode" ) m1 <- mlogit(mode ~ price + catch, data = Fish) m2 <- mlogit(mode ~ price + catch | income, data = Fish) test_that("model_info", { expect_false(model_info(m1)$is_ordinal) expect_false(model_info(m2)$is_ordinal) expect_true(model_info(m1)$is_multinomial) expect_true(model_info(m2)$is_multinomial) expect_false(model_info(m1)$is_linear) }) test_that("find_predictors", { expect_identical(find_predictors(m1), list(conditional = c("price", "catch"))) expect_identical(find_predictors(m1, flatten = TRUE), c("price", "catch")) expect_null(find_predictors(m1, effects = "random")) expect_identical(find_predictors(m2), list(conditional = c("price", "catch", "income"))) expect_identical( find_predictors(m2, flatten = TRUE), c("price", "catch", "income") ) expect_null(find_predictors(m2, effects = "random")) }) test_that("find_response", { expect_identical(find_response(m1), "mode") expect_identical(find_response(m2), "mode") }) if (getRversion() >= "3.6.0") { test_that("get_response", { expect_equal(get_response(m1), as.vector(Fish$mode)) }) test_that("get_data", { expect_equal(nrow(get_data(m1)), 4728) expect_equal(nrow(get_data(m2)), 4728) if (packageVersion("mlogit") <= "1.0-3.1") { expect_equal( colnames(get_data(m1)), c("mode", "price", "catch", "probabilities", "linpred") ) expect_equal( colnames(get_data(m2)), c( "mode", "price", "catch", "income", "probabilities", "linpred" ) ) } else { expect_equal( colnames(get_data(m1)), c("mode", "price", "catch", "idx", "probabilities", "linpred") ) expect_equal( colnames(get_data(m2)), c( "mode", "price", "catch", "income", "idx", "probabilities", "linpred" ) ) } }) } test_that("link_inverse", { expect_equal(link_inverse(m1)(.2), plogis(.2), tolerance = 1e-5) expect_equal(link_inverse(m2)(.2), plogis(.2), tolerance = 1e-5) }) test_that("find_formula", { expect_length(find_formula(m1), 1) expect_length(find_formula(m2), 1) }) test_that("find_terms", { expect_equal(find_terms(m1), list( response = "mode", conditional = c("price", "catch") )) expect_equal(find_terms(m1, flatten = TRUE), c("mode", "price", "catch")) expect_equal(find_terms(m2), list( response = "mode", conditional = c("price", "catch", "income") )) expect_equal( find_terms(m2, flatten = TRUE), c("mode", "price", "catch", "income") ) }) test_that("n_obs", { expect_equal(n_obs(m1), 4728) expect_equal(n_obs(m2), 4728) }) test_that("linkfun", { expect_false(is.null(link_function(m1))) expect_false(is.null(link_function(m2))) }) test_that("find_statistic", { expect_identical(find_statistic(m1), "z-statistic") expect_identical(find_statistic(m2), "z-statistic") }) } insight/tests/testthat/test-censReg.R0000644000175000017500000000743714122064326017554 0ustar nileshnileshif (requiet("testthat") && requiet("insight") && requiet("censReg") && requiet("AER")) { data("Affairs", package = "AER") m1 <- censReg(affairs ~ age + yearsmarried + religiousness + occupation + rating, data = Affairs ) test_that("model_info", { expect_false(model_info(m1)$is_linear) expect_true(model_info(m1)$is_censored) }) test_that("find_predictors", { expect_identical(find_predictors(m1), list( conditional = c( "age", "yearsmarried", "religiousness", "occupation", "rating" ) )) expect_identical( find_predictors(m1, flatten = TRUE), c( "age", "yearsmarried", "religiousness", "occupation", "rating" ) ) expect_null(find_predictors(m1, effects = "random")) }) test_that("find_random", { expect_null(find_random(m1)) }) test_that("get_random", { expect_warning(get_random(m1)) }) test_that("find_response", { expect_identical(find_response(m1), "affairs") }) test_that("get_response", { expect_equal(get_response(m1), Affairs$affairs) }) test_that("get_predictors", { expect_equal( colnames(get_predictors(m1)), c( "age", "yearsmarried", "religiousness", "occupation", "rating" ) ) }) test_that("link_inverse", { expect_equal(link_inverse(m1)(.2), .2, tolerance = 1e-5) }) test_that("get_data", { expect_equal(nrow(get_data(m1)), 601) expect_equal( colnames(get_data(m1)), c( "affairs", "age", "yearsmarried", "religiousness", "occupation", "rating" ) ) }) test_that("find_formula", { expect_length(find_formula(m1), 1) expect_equal( find_formula(m1), list( conditional = as.formula( "affairs ~ age + yearsmarried + religiousness + occupation + rating" ) ), ignore_attr = TRUE ) }) test_that("find_variables", { expect_equal( find_variables(m1), list( response = "affairs", conditional = c( "age", "yearsmarried", "religiousness", "occupation", "rating" ) ) ) expect_equal( find_variables(m1, flatten = TRUE), c( "affairs", "age", "yearsmarried", "religiousness", "occupation", "rating" ) ) }) test_that("n_obs", { expect_equal(n_obs(m1), 601) }) test_that("linkfun", { expect_false(is.null(link_function(m1))) }) test_that("find_parameters", { expect_equal( find_parameters(m1), list( conditional = c( "(Intercept)", "age", "yearsmarried", "religiousness", "occupation", "rating", "logSigma" ) ) ) expect_equal(nrow(get_parameters(m1)), 7) expect_equal( get_parameters(m1)$Parameter, c( "(Intercept)", "age", "yearsmarried", "religiousness", "occupation", "rating", "logSigma" ) ) }) test_that("find_terms", { expect_equal( find_terms(m1), list( response = "affairs", conditional = c( "age", "yearsmarried", "religiousness", "occupation", "rating" ) ) ) }) test_that("is_multivariate", { expect_false(is_multivariate(m1)) }) test_that("find_statistic", { expect_identical(find_statistic(m1), "t-statistic") }) } insight/tests/testthat/test-get_residuals.R0000644000175000017500000000642014122064326021007 0ustar nileshnileshif (requiet("testthat") && requiet("insight") && requiet("lme4")) { data(mtcars) data(sleepstudy) data(cbpp) set.seed(123) mtcars$w <- abs(rnorm(nrow(mtcars), mean = 1, .3)) sleepstudy$w <- abs(rnorm(nrow(sleepstudy), mean = 1, .3)) cbpp$w <- abs(rnorm(nrow(cbpp), mean = 1, .3)) test_that("get_residuals - lm", { m <- lm(am ~ cyl, weights = w, data = mtcars) expect_equal( as.vector(get_residuals(m, weighted = FALSE)), as.vector(residuals(m)) ) expect_equal( as.vector(get_residuals(m, weighted = FALSE, type = "deviance")), as.vector(residuals(m, type = "deviance")) ) expect_equal( get_weights(m), weights(m) ) expect_equal( as.vector(get_residuals(m)), as.vector(residuals(m)) ) expect_equal( as.vector(get_deviance(m)), as.vector(deviance(m)) ) expect_equal( get_residuals(m, weighted = TRUE), as.vector(weighted.residuals(m)) ) }) test_that("get_residuals - glm", { m <- suppressWarnings(glm(am ~ cyl, weights = w, data = mtcars, family = binomial)) expect_equal( as.vector(get_residuals(m, weighted = FALSE, type = "response")), as.vector(residuals(m, type = "response")) ) expect_equal( as.vector(get_residuals(m, weighted = FALSE)), as.vector(residuals(m)) ) expect_equal( get_weights(m), weights(m) ) expect_equal( as.vector(get_residuals(m)), as.vector(residuals(m)) ) expect_equal( as.vector(get_deviance(m)), as.vector(deviance(m)) ) expect_equal( get_residuals(m, weighted = TRUE), as.vector(weighted.residuals(m)) ) }) test_that("get_residuals - lmer", { m <- lmer(Reaction ~ Days + (Days | Subject), weights = w, data = sleepstudy) expect_equal( as.vector(get_residuals(m, weighted = FALSE)), as.vector(residuals(m)) ) expect_equal( as.vector(get_residuals(m, weighted = FALSE, type = "deviance")), as.vector(residuals(m, type = "deviance")) ) expect_equal( get_weights(m), weights(m) ) expect_equal( as.vector(get_residuals(m)), as.vector(residuals(m)) ) expect_equal( as.vector(get_deviance(m)), as.vector(deviance(m, REML = FALSE)) ) expect_equal( get_residuals(m, weighted = TRUE), as.vector(weighted.residuals(m)) ) }) test_that("get_residuals - glmer", { m <- glmer(cbind(incidence, size - incidence) ~ period + (1 | herd), weights = w, data = cbpp, family = binomial, nAGQ = 0) expect_equal( as.vector(get_residuals(m, weighted = FALSE, type = "response")), as.vector(residuals(m, type = "response")) ) expect_equal( as.vector(get_residuals(m, weighted = FALSE)), as.vector(residuals(m)) ) expect_equal( get_weights(m), weights(m) ) expect_equal( as.vector(get_residuals(m)), as.vector(residuals(m)) ) expect_equal( as.vector(get_deviance(m)), 177.4023, tolerance = 1e-3 ) expect_equal( get_residuals(m, weighted = TRUE), as.vector(weighted.residuals(m)) ) }) } insight/tests/testthat/test-tobit.R0000644000175000017500000000657514122064326017311 0ustar nileshnileshif (requiet("testthat") && requiet("insight") && requiet("AER")) { data("Affairs", package = "AER") m1 <- AER::tobit(affairs ~ age + yearsmarried + religiousness + occupation + rating, data = Affairs ) test_that("model_info", { expect_true(model_info(m1)$is_linear) expect_true(model_info(m1)$is_censored) }) test_that("find_predictors", { expect_identical(find_predictors(m1), list( conditional = c( "age", "yearsmarried", "religiousness", "occupation", "rating" ) )) expect_identical( find_predictors(m1, flatten = TRUE), c( "age", "yearsmarried", "religiousness", "occupation", "rating" ) ) expect_null(find_predictors(m1, effects = "random")) }) test_that("find_random", { expect_null(find_random(m1)) }) test_that("get_random", { expect_warning(get_random(m1)) }) test_that("find_response", { expect_identical(find_response(m1), "affairs") }) test_that("get_response", { expect_equal(get_response(m1), Affairs$affairs) }) test_that("get_predictors", { expect_equal( colnames(get_predictors(m1)), c( "age", "yearsmarried", "religiousness", "occupation", "rating" ) ) }) test_that("link_inverse", { expect_equal(link_inverse(m1)(.2), .2, tolerance = 1e-5) }) test_that("get_data", { expect_equal(nrow(get_data(m1)), 601) expect_equal( colnames(get_data(m1)), c( "affairs", "age", "yearsmarried", "religiousness", "occupation", "rating" ) ) }) test_that("find_formula", { expect_length(find_formula(m1), 1) expect_equal( find_formula(m1), list( conditional = as.formula( "affairs ~ age + yearsmarried + religiousness + occupation + rating" ) ), ignore_attr = TRUE ) }) test_that("find_terms", { expect_equal(find_terms(m1), list( response = "affairs", conditional = c( "age", "yearsmarried", "religiousness", "occupation", "rating" ) )) expect_equal( find_terms(m1, flatten = TRUE), c( "affairs", "age", "yearsmarried", "religiousness", "occupation", "rating" ) ) }) test_that("n_obs", { expect_equal(n_obs(m1), 601) }) test_that("linkfun", { expect_false(is.null(link_function(m1))) }) test_that("find_parameters", { expect_equal( find_parameters(m1), list( conditional = c( "(Intercept)", "age", "yearsmarried", "religiousness", "occupation", "rating" ) ) ) expect_equal(nrow(get_parameters(m1)), 6) expect_equal( get_parameters(m1)$Parameter, c( "(Intercept)", "age", "yearsmarried", "religiousness", "occupation", "rating" ) ) }) test_that("is_multivariate", { expect_false(is_multivariate(m1)) }) test_that("find_statistic", { expect_identical(find_statistic(m1), "z-statistic") }) } insight/tests/testthat/test-iv_robust.R0000644000175000017500000000541414122064326020173 0ustar nileshnileshif (requiet("testthat") && requiet("insight") && requiet("estimatr")) { data(mtcars) m1 <- iv_robust(mpg ~ gear + cyl | carb + wt, data = mtcars) test_that("model_info", { expect_true(model_info(m1)$is_linear) }) test_that("find_predictors", { expect_identical( find_predictors(m1), list( conditional = c("gear", "cyl"), instruments = c("carb", "wt") ) ) expect_identical( find_predictors(m1, component = "instruments"), list(instruments = c("carb", "wt")) ) expect_identical( find_predictors(m1, flatten = TRUE), c("gear", "cyl", "carb", "wt") ) expect_null(find_predictors(m1, effects = "random")) }) test_that("find_random", { expect_null(find_random(m1)) }) test_that("get_random", { expect_warning(get_random(m1)) }) test_that("find_response", { expect_identical(find_response(m1), "mpg") }) test_that("get_response", { expect_equal(get_response(m1), mtcars$mpg) }) test_that("get_predictors", { expect_equal(colnames(get_predictors(m1)), c("gear", "cyl", "carb", "wt")) }) test_that("get_data", { expect_equal(nrow(get_data(m1)), 32) expect_equal( colnames(get_data(m1)), c("mpg", "carb + wt", "gear", "cyl", "carb", "wt") ) }) test_that("find_formula", { expect_length(find_formula(m1), 2) expect_equal( find_formula(m1), list( conditional = as.formula("mpg ~ gear + cyl"), instruments = as.formula("~carb + wt") ), ignore_attr = TRUE ) }) test_that("find_terms", { expect_equal( find_terms(m1), list( response = "mpg", conditional = c("gear", "cyl"), instruments = c("carb", "wt") ) ) expect_equal( find_terms(m1, flatten = TRUE), c("mpg", "gear", "cyl", "carb", "wt") ) }) test_that("n_obs", { expect_equal(n_obs(m1), 32) }) test_that("link_function", { expect_equal(link_function(m1)(.2), .2, tolerance = 1e-5) }) test_that("link_inverse", { expect_equal(link_inverse(m1)(.2), .2, tolerance = 1e-5) }) test_that("find_parameters", { expect_equal( find_parameters(m1), list(conditional = c("(Intercept)", "gear", "cyl")) ) expect_equal(nrow(get_parameters(m1)), 3) expect_equal( get_parameters(m1)$Parameter, c("(Intercept)", "gear", "cyl") ) }) test_that("is_multivariate", { expect_false(is_multivariate(m1)) }) test_that("find_algorithm", { expect_warning(expect_null(find_algorithm(m1))) }) test_that("find_statistic", { expect_identical(find_statistic(m1), "t-statistic") }) } insight/tests/testthat/test-survfit.R0000644000175000017500000000373114122064326017661 0ustar nileshnileshif (requiet("testthat") && requiet("insight") && requiet("survival")) { m1 <- survfit(Surv(time, status) ~ sex + age + ph.ecog, data = lung) test_that("model_info", { expect_true(model_info(m1)$is_logit) expect_false(model_info(m1)$is_linear) }) test_that("find_predictors", { expect_identical(find_predictors(m1), list(conditional = c("sex", "age", "ph.ecog"))) expect_null(find_predictors(m1, effects = "random")) }) test_that("find_response", { expect_identical(find_response(m1), "Surv(time, status)") expect_identical(find_response(m1, combine = FALSE), c("time", "status")) }) test_that("link_inverse", { expect_equal(link_inverse(m1)(.2), plogis(.2), tolerance = 1e-5) }) test_that("get_data", { expect_equal(nrow(get_data(m1)), 227) expect_equal( colnames(get_data(m1)), c("time", "status", "age", "sex", "ph.ecog") ) }) test_that("find_formula", { expect_length(find_formula(m1), 1) expect_equal( find_formula(m1), list(conditional = as.formula( "Surv(time, status) ~ sex + age + ph.ecog" )), ignore_attr = TRUE ) }) test_that("find_variables", { expect_equal(find_variables(m1), list( response = c("time", "status"), conditional = c("sex", "age", "ph.ecog") )) expect_equal( find_variables(m1, flatten = TRUE), c("time", "status", "sex", "age", "ph.ecog") ) }) test_that("n_obs", { expect_equal(n_obs(m1), 227) }) test_that("linkfun", { expect_false(is.null(link_function(m1))) }) test_that("is_multivariate", { expect_false(is_multivariate(m1)) }) test_that("find_terms", { expect_equal( find_terms(m1), list( response = "Surv(time, status)", conditional = c("sex", "age", "ph.ecog") ) ) }) test_that("find_statistic", { expect_null(find_statistic(m1)) }) } insight/tests/testthat/test-bigglm.R0000644000175000017500000000701014122064326017412 0ustar nileshnilesh.runThisTest <- Sys.getenv("RunAllinsightTests") == "yes" if (.runThisTest) { if (requiet("testthat") && requiet("insight") && requiet("glmmTMB") && requiet("biglm")) { data(Salamanders) Salamanders$cover <- abs(Salamanders$cover) m1 <- bigglm(count ~ mined + log(cover) + sample, family = poisson(), data = Salamanders ) test_that("model_info", { expect_true(model_info(m1)$is_poisson) expect_true(model_info(m1)$is_count) expect_false(model_info(m1)$is_negbin) expect_false(model_info(m1)$is_binomial) expect_false(model_info(m1)$is_linear) }) test_that("find_predictors", { expect_identical(find_predictors(m1), list(conditional = c("mined", "cover", "sample"))) expect_identical( find_predictors(m1, flatten = TRUE), c("mined", "cover", "sample") ) expect_null(find_predictors(m1, effects = "random")) }) test_that("find_random", { expect_null(find_random(m1)) }) test_that("get_random", { expect_warning(get_random(m1)) }) test_that("find_response", { expect_identical(find_response(m1), "count") }) test_that("get_response", { expect_equal(get_response(m1), Salamanders$count) }) test_that("get_predictors", { expect_equal( colnames(get_predictors(m1)), c("mined", "cover", "sample") ) }) test_that("link_inverse", { expect_equal(link_inverse(m1)(.2), exp(.2), tolerance = 1e-5) }) test_that("get_data", { expect_equal(nrow(get_data(m1)), 644) expect_equal( colnames(get_data(m1)), c( "site", "mined", "cover", "sample", "DOP", "Wtemp", "DOY", "spp", "count" ) ) }) test_that("find_formula", { expect_length(find_formula(m1), 1) expect_equal( find_formula(m1), list(conditional = as.formula("count ~ mined + log(cover) + sample")), ignore_attr = TRUE ) }) test_that("find_variables", { expect_equal( find_variables(m1), list( response = "count", conditional = c("mined", "cover", "sample") ) ) expect_equal( find_variables(m1, flatten = TRUE), c("count", "mined", "cover", "sample") ) }) test_that("n_obs", { expect_equal(n_obs(m1), 644) }) test_that("linkfun", { expect_false(is.null(link_function(m1))) }) test_that("find_parameters", { expect_equal( find_parameters(m1), list( conditional = c("(Intercept)", "minedno", "log(cover)", "sample") ) ) expect_equal(nrow(get_parameters(m1)), 4) expect_equal( get_parameters(m1)$Parameter, c("(Intercept)", "minedno", "log(cover)", "sample") ) }) test_that("is_multivariate", { expect_false(is_multivariate(m1)) }) test_that("find_terms", { expect_equal( find_terms(m1), list( response = "count", conditional = c("mined", "log(cover)", "sample") ) ) }) test_that("find_algorithm", { expect_equal(find_algorithm(m1), list(algorithm = "ML")) }) test_that("find_statistic", { expect_identical(find_statistic(m1), "z-statistic") }) } } insight/tests/testthat/test-afex_aov.R0000644000175000017500000001107514122064326017747 0ustar nileshnileshif (requiet("testthat") && requiet("insight") && requiet("afex")) { data(obk.long, package = "afex") obk.long$treatment <- as.character(obk.long$treatment) obk.long$phase <- as.character(obk.long$phase) Mc <- afex::aov_car(value ~ treatment * gender + age + Error(id / (phase * hour)), factorize = FALSE, data = obk.long, include_aov = FALSE ) Mc2 <- afex::aov_car(value ~ treatment * gender + exp(age) + Error(id / (phase * hour)), factorize = FALSE, data = obk.long, include_aov = FALSE ) M <- afex::aov_car(value ~ treatment * gender + Error(id / (phase * hour)), data = obk.long, include_aov = FALSE ) B <- afex::aov_car(value ~ treatment * gender + Error(id), data = obk.long, include_aov = FALSE ) W <- afex::aov_car(value ~ Error(id / (phase * hour)), data = obk.long, include_aov = FALSE ) mods <- list(Mc, Mc2, M, B, W) test_that("afex_aov: afex", { expect_equal(unique(unlist(sapply(mods, model_name))), "afex_aov") expect_equal(unique(unlist(sapply(mods, find_algorithm))), "OLS") expect_equal(unique(unlist(sapply(mods, find_statistic))), "F-statistic") expect_null(unique(unlist(sapply(mods, find_offset)))) expect_null(unique(unlist(sapply(mods, find_random_slopes)))) expect_null(unique(unlist(sapply(mods, find_smooth)))) expect_null(unique(unlist(sapply(mods, find_weights)))) expect_null(unique(unlist(sapply(mods, get_call)))) expect_null(unique(unlist(sapply(mods, get_weights)))) expect_null(unique(unlist(suppressWarnings(sapply(mods, get_variance))))) expect_true(unique(sapply(mods, all_models_equal))) expect_true(unique(sapply(mods, has_intercept))) expect_true(unique(sapply(mods, is_model))) expect_true(unique(sapply(mods, is_model_supported))) expect_false(unique(sapply(mods, is_gam_model))) # expect_false(unique(sapply(mods, is_multivariate))) expect_false(unique(sapply(mods, is_nullmodel))) # expect_equal(get_family(Mc2), gaussian()) expect_equal(link_function(Mc2), gaussian()$linkfun) expect_equal(link_inverse(Mc2), gaussian()$linkinv) }) test_that("afex_aov: model values", { expect_equal(suppressWarnings(sapply(mods, get_auxiliary)), c(1.746, 1.768, 1.764, 1.233, 2.075), tolerance = 0.01 ) expect_equal(suppressWarnings(sapply(mods, get_df)), c(135, 135, 150, 10, 225), tolerance = 0.01 ) expect_equal(sapply(mods, get_loglikelihood), c(-411.04, -414.088, -431.688, -22.295, -517.397), tolerance = 0.01 ) expect_equal(suppressWarnings(sapply(mods, get_sigma)), c(1.746, 1.768, 1.764, 1.233, 2.075), tolerance = 0.01 ) expect_equal(sapply(mods, n_obs), c(240, 240, 240, 16, 240), tolerance = 0.01 ) expect_equal(sapply(mods, n_parameters), c(105, 105, 90, 6, 15), tolerance = 0.01 ) expect_equal(sapply(mods, is_mixed_model), c(TRUE, TRUE, TRUE, FALSE, TRUE), tolerance = 0.01 ) expect_equal(sapply(mods, get_deviance), c(411.603, 422.17, 467, 15.204, 969.125), tolerance = 0.01 ) }) test_that("afex_aov: formula and parameters", { # find_formula expect_equal( find_terms(Mc2), list( response = "value", conditional = c("phase", "hour", "treatment", "gender", "exp(age)"), error = "Error(id/(phase * hour))" ) ) expect_equal(length(find_interactions(Mc2)$conditional), 14L) expect_equal( find_variables(Mc2), list( response = "value", fixed = c("treatment", "gender", "age", "phase", "hour"), random = "id" ) ) expect_equal( find_predictors(Mc2, effects = "all"), list( fixed = c("treatment", "gender", "age", "phase", "hour"), random = "id" ) ) expect_equal( find_random(Mc2), list(random = "id") ) expect_equal(find_response(Mc2), "value") }) test_that("afex_aov: formula and parameters", { expect_equal(dim(get_data(Mc2)), c(240, 7)) expect_equal(dim(get_statistic(Mc2)), c(19, 2)) expect_equal(dim(get_modelmatrix(Mc2)), c(16, 7)) expect_equal(length(find_parameters(Mc2)), 15L) expect_equal(length(get_intercept(Mc2)), 15L) expect_equal(dim(get_parameters(Mc2)), c(15 * 7, 3)) expect_equal(dim(get_varcov(Mc2)), c(15 * 7, 15 * 7)) expect_equal(length(get_predicted(Mc2)), n_obs(Mc2)) expect_equal(length(get_residuals(Mc2)), n_obs(Mc2)) }) } insight/tests/testthat/test-emmeans.R0000644000175000017500000000152314131014371017574 0ustar nileshnileshif (requiet("emmeans") && requiet("insight") && requiet("testthat")) { test_that("emmeans", { m <- glm(am ~ factor(cyl), family = binomial(), data = mtcars ) EList <- emmeans::emmeans(m, pairwise ~ cyl, type = "resp") E <- emmeans::emmeans(m, ~cyl, type = "resp") C <- emmeans::contrast(E, method = "pairwise") expect_equal(find_statistic(EList), "z-statistic") expect_equal(get_statistic(EList)$Statistic, c(1.449, -0.377, -2.346, 1.243, 2.717, 1.393), tolerance = 0.001) expect_equal(get_statistic(EList)$Statistic[1:3], get_statistic(E)$Statistic, tolerance = 0.001) expect_equal(get_statistic(EList)$Statistic[4:6], get_statistic(C)$Statistic, tolerance = 0.001) expect_equal(get_parameters(EList)$Estimate, c(0.727, 0.429, 0.143, 3.556, 16, 4.5), tolerance = 0.001) }) } insight/tests/testthat/test-gmnl.R0000644000175000017500000000407614122064326017117 0ustar nileshnileshif (requiet("testthat") && requiet("insight") && requiet("gmnl") && requiet("mlogit") && requiet("MASS")) { data(housing, package = "MASS") dat <- mlogit.data(housing, choice = "Sat", shape = "wide") void <- capture.output( m1 <- gmnl(Sat ~ Infl + Type + Cont | 1, data = dat, model = "smnl", R = 100 ) ) test_that("model_info", { expect_false(model_info(m1)$is_ordinal) expect_true(model_info(m1)$is_multinomial) }) test_that("find_predictors", { expect_identical(find_predictors(m1), list(conditional = c("Infl", "Type", "Cont"))) expect_identical( find_predictors(m1, flatten = TRUE), c("Infl", "Type", "Cont") ) expect_null(find_predictors(m1, effects = "random")) }) test_that("find_response", { expect_identical(find_response(m1), "Sat") }) test_that("link_inverse", { expect_equal(link_inverse(m1)(.2), plogis(.2), tolerance = 1e-5) }) test_that("get_data", { expect_equal(nrow(get_data(m1)), 216) expect_equal(colnames(get_data(m1)), c("Sat", "Infl", "Type", "Cont")) }) test_that("find_formula", { expect_length(find_formula(m1), 1) }) test_that("find_terms", { expect_equal(find_terms(m1), list( response = "Sat", conditional = c("Infl", "Type", "Cont", "1") )) expect_equal( find_terms(m1, flatten = TRUE), c("Sat", "Infl", "Type", "Cont", "1") ) }) test_that("n_obs", { expect_equal(n_obs(m1), 72) }) test_that("find_variables", { expect_equal(find_variables(m1), list( response = "Sat", conditional = c("Infl", "Type", "Cont") )) expect_equal( find_variables(m1, flatten = TRUE), c("Sat", "Infl", "Type", "Cont") ) }) test_that("linkfun", { expect_false(is.null(link_function(m1))) }) test_that("is_multivariate", { expect_false(is_multivariate(m1)) }) test_that("find_statistic", { expect_identical(find_statistic(m1), "z-statistic") }) } insight/tests/testthat/test-MCMCglmm.R0000644000175000017500000000611114122064326017546 0ustar nileshnileshosx <- tryCatch( { si <- Sys.info() if (!is.null(si["sysname"])) { si["sysname"] == "Darwin" || grepl("^darwin", R.version$os) } else { FALSE } }, error = function(e) { FALSE } ) if (!osx && requiet("testthat") && requiet("insight") && requiet("MCMCglmm")) { data(PlodiaPO) m1 <- MCMCglmm( PO ~ plate, random = ~FSfamily, data = PlodiaPO, verbose = FALSE, nitt = 1300, burnin = 300, thin = 1 ) test_that("model_info", { expect_true(model_info(m1)$is_mixed) expect_true(model_info(m1)$is_linear) }) test_that("find_predictors", { expect_identical(find_predictors(m1), list(conditional = "plate")) expect_identical(find_predictors(m1, flatten = TRUE), "plate") expect_identical( find_predictors(m1, effects = "random"), list(random = "FSfamily") ) }) test_that("find_random", { expect_equal(find_random(m1), list(random = "FSfamily")) }) test_that("get_random", { expect_equal(get_random(m1), data.frame(FSfamily = PlodiaPO$FSfamily)) }) test_that("find_response", { expect_identical(find_response(m1), "PO") }) test_that("get_response", { expect_equal(get_response(m1), PlodiaPO$PO) }) test_that("get_predictors", { expect_equal(colnames(get_predictors(m1)), "plate") }) test_that("link_inverse", { expect_equal(link_inverse(m1)(.5), .5, tolerance = 1e-1) }) test_that("get_data", { expect_equal(nrow(get_data(m1)), 511) expect_equal(colnames(get_data(m1)), c("FSfamily", "PO", "plate")) }) test_that("find_formula", { expect_length(find_formula(m1), 2) expect_equal( find_formula(m1), list( conditional = as.formula("PO ~ plate"), random = as.formula("~FSfamily") ), ignore_attr = TRUE ) }) test_that("find_terms", { expect_equal( find_terms(m1), list( response = "PO", conditional = "plate", random = "FSfamily" ) ) expect_equal(find_terms(m1, flatten = TRUE), c("PO", "plate", "FSfamily")) }) test_that("n_obs", { expect_null(n_obs(m1)) }) test_that("linkfun", { expect_equal(link_function(m1)(.5), .5, tolerance = 1e-1) }) test_that("find_parameters", { expect_equal( find_parameters(m1), list( conditional = c("(Intercept)", "plate"), random = "FSfamily" ) ) expect_equal(nrow(get_parameters(m1, summary = TRUE)), 2) expect_equal(nrow(get_parameters(m1, summary = FALSE)), 1000) expect_equal(get_parameters(m1, summary = TRUE)$Parameter, c("(Intercept)", "plate")) expect_equal(colnames(get_parameters(m1, summary = FALSE)), c("(Intercept)", "plate")) expect_equal( get_parameters(m1, effects = "random", summary = TRUE)$Parameter, "FSfamily" ) }) test_that("is_multivariate", { expect_false(is_multivariate(m1)) }) test_that("find_statistic", { expect_null(find_statistic(m1)) }) } insight/tests/testthat/test-coxph.R0000644000175000017500000000576114122064326017305 0ustar nileshnileshif (requiet("testthat") && requiet("insight") && requiet("survival")) { lung <- subset(lung, subset = ph.ecog %in% 0:2) lung$sex <- factor(lung$sex, labels = c("male", "female")) lung$ph.ecog <- factor(lung$ph.ecog, labels = c("good", "ok", "limited")) m1 <- coxph(Surv(time, status) ~ sex + age + ph.ecog, data = lung) test_that("model_info", { expect_true(model_info(m1)$is_logit) expect_false(model_info(m1)$is_linear) }) test_that("find_predictors", { expect_identical(find_predictors(m1), list(conditional = c("sex", "age", "ph.ecog"))) expect_null(find_predictors(m1, effects = "random")) }) test_that("find_response", { expect_identical(find_response(m1), "Surv(time, status)") expect_identical(find_response(m1, combine = FALSE), c("time", "status")) }) test_that("link_inverse", { expect_equal(link_inverse(m1)(.2), plogis(.2), tolerance = 1e-5) }) test_that("get_data", { expect_equal(nrow(get_data(m1)), 226) expect_equal( colnames(get_data(m1)), c( "time", "status", "Surv(time, status)", "sex", "age", "ph.ecog" ) ) }) test_that("find_formula", { expect_length(find_formula(m1), 1) expect_equal( find_formula(m1), list(conditional = as.formula( "Surv(time, status) ~ sex + age + ph.ecog" )), ignore_attr = TRUE ) }) test_that("find_variables", { expect_equal(find_variables(m1), list( response = c("time", "status"), conditional = c("sex", "age", "ph.ecog") )) expect_equal( find_variables(m1, flatten = TRUE), c("time", "status", "sex", "age", "ph.ecog") ) }) test_that("n_obs", { expect_equal(n_obs(m1), 226) }) test_that("linkfun", { expect_false(is.null(link_function(m1))) }) test_that("is_multivariate", { expect_false(is_multivariate(m1)) }) test_that("find_parameters", { expect_equal( find_parameters(m1), list( conditional = c("sexfemale", "age", "ph.ecogok", "ph.ecoglimited") ) ) expect_equal(nrow(get_parameters(m1)), 4) expect_equal( get_parameters(m1)$Parameter, c("sexfemale", "age", "ph.ecogok", "ph.ecoglimited") ) }) test_that("find_terms", { expect_equal( find_terms(m1), list( response = "Surv(time, status)", conditional = c("sex", "age", "ph.ecog") ) ) }) test_that("find_statistic", { expect_identical(find_statistic(m1), "z-statistic") }) if (requiet("JM")) { data("aids", package = "JM") m <- coxph(Surv(start, stop, event) ~ CD4, data = aids) test_that("coxph triple response", { expect_equal(colnames(get_data(m)), c("start", "stop", "event", "Surv(start, stop, event)", "CD4")) expect_equal(find_variables(m), list(response = c("start", "stop", "event"), conditional = "CD4")) }) } } insight/tests/testthat/test-rms.R0000644000175000017500000000466614122064326016770 0ustar nileshnileshif (requiet("testthat") && requiet("insight") && requiet("rms")) { data(mtcars) m1 <- lrm(am ~ mpg + gear, data = mtcars) test_that("model_info", { expect_true(model_info(m1)$is_binomial) expect_true(model_info(m1)$is_logit) expect_false(model_info(m1)$is_linear) }) test_that("find_predictors", { expect_identical(find_predictors(m1), list(conditional = c("mpg", "gear"))) expect_identical(find_predictors(m1, flatten = TRUE), c("mpg", "gear")) expect_null(find_predictors(m1, effects = "random")) }) test_that("find_random", { expect_null(find_random(m1)) }) test_that("get_random", { expect_warning(get_random(m1)) }) test_that("find_response", { expect_identical(find_response(m1), "am") }) test_that("get_response", { expect_equal(get_response(m1), mtcars$am) }) test_that("get_predictors", { expect_equal(colnames(get_predictors(m1)), c("mpg", "gear")) }) test_that("link_inverse", { expect_equal(link_inverse(m1)(.2), plogis(.2), tolerance = 1e-5) }) test_that("get_data", { expect_equal(nrow(get_data(m1)), 32) expect_equal(colnames(get_data(m1)), c("am", "mpg", "gear")) }) test_that("find_formula", { expect_length(find_formula(m1), 1) expect_equal( find_formula(m1), list(conditional = as.formula("am ~ mpg + gear")), ignore_attr = TRUE ) }) test_that("find_terms", { expect_equal(find_terms(m1), list( response = "am", conditional = c("mpg", "gear") )) expect_equal(find_terms(m1, flatten = TRUE), c("am", "mpg", "gear")) }) test_that("n_obs", { expect_equal(n_obs(m1), 32) }) test_that("linkfun", { expect_false(is.null(link_function(m1))) }) test_that("linkinverse", { expect_false(is.null(link_inverse(m1))) }) test_that("find_parameters", { expect_equal( find_parameters(m1), list(conditional = c("Intercept", "mpg", "gear")) ) expect_equal(nrow(get_parameters(m1)), 3) expect_equal( get_parameters(m1)$Parameter, c("Intercept", "mpg", "gear") ) }) test_that("is_multivariate", { expect_false(is_multivariate(m1)) }) test_that("find_algorithm", { expect_equal(find_algorithm(m1), list(algorithm = "ML")) }) test_that("find_statistic", { expect_identical(find_statistic(m1), "z-statistic") }) } insight/tests/testthat/test-find_smooth.R0000644000175000017500000000400214164344622020466 0ustar nileshnileshosx <- tryCatch( { si <- Sys.info() if (!is.null(si["sysname"])) { si["sysname"] == "Darwin" || grepl("^darwin", R.version$os) } else { FALSE } }, error = function(e) { FALSE } ) if (requiet("testthat") && requiet("insight") && requiet("mgcv") && requiet("gamm4") && requiet("rstanarm") && !osx) { set.seed(2) ## simulate some data... void <- capture.output( dat <- mgcv::gamSim(1, n = 400, dist = "normal", scale = 2) ) bt <- mgcv::gam(y ~ te(x0, x1, k = 7) + s(x2) + s(x3), data = dat, method = "REML" ) test_that("find_smooth - gam", { expect_equal(find_smooth(bt), list(smooth_terms = c("te(x0, x1, k = 7)", "s(x2)", "s(x3)"))) expect_equal(find_smooth(bt, flatten = TRUE), c("te(x0, x1, k = 7)", "s(x2)", "s(x3)")) }) test_that("find_smooth - mgcv::gamm", { model <- mgcv::gamm(Petal.Length ~ Petal.Width + s(Sepal.Length), random = list(Species = ~1), data = iris) expect_equal(find_smooth(model, flatten = TRUE), "s(Sepal.Length)") }) test_that("find_smooth - gamm4", { model <- gamm4::gamm4(Petal.Length ~ Petal.Width + s(Sepal.Length), random = ~ (1 | Species), data = iris) expect_equal(find_smooth(model, flatten = TRUE), "s(Sepal.Length)") }) .runStanTest <- Sys.getenv("RunAllinsightStanTests") == "yes" if (.runStanTest) { test_that("find_smooth - stan_gamm4", { model <- suppressWarnings( rstanarm::stan_gamm4( Petal.Length ~ Petal.Width + s(Sepal.Length), random = ~ (1 | Species), data = iris, iter = 100, chains = 1, refresh = 0 ) ) expect_equal(find_smooth(model, flatten = TRUE), "s(Sepal.Length)") }) } # test_that("find_smooth - brms", { # model <- brms::brm(Petal.Length ~ Petal.Width + s(Sepal.Length) + (1|Species), data = iris, iter=100, chains=1, refresh=0) # expect_equal(find_smooth(model, flatten = TRUE), "s(Sepal.Length)") # }) } insight/tests/testthat/test-glmrob_base.R0000644000175000017500000000651214142012215020423 0ustar nileshnileshif (requiet("testthat") && requiet("insight") && requiet("robustbase")) { data(carrots) m1 <- glmrob( cbind(success, total - success) ~ logdose + block, family = binomial, data = carrots, method = "Mqle", control = glmrobMqle.control(tcc = 1.2) ) test_that("model_info", { expect_true(model_info(m1)$is_binomial) expect_false(model_info(m1)$is_linear) }) test_that("find_predictors", { expect_identical(find_predictors(m1), list(conditional = c("logdose", "block"))) expect_identical(find_predictors(m1, flatten = TRUE), c("logdose", "block")) expect_null(find_predictors(m1, effects = "random")) }) test_that("find_random", { expect_null(find_random(m1)) }) test_that("get_random", { expect_warning(get_random(m1)) }) test_that("find_response", { expect_identical(find_response(m1), "cbind(success, total - success)") expect_identical(find_response(m1, combine = FALSE), c("success", "total")) }) test_that("get_response", { expect_equal(get_response(m1), carrots[, c("success", "total")]) }) test_that("get_predictors", { expect_equal(colnames(get_predictors(m1)), c("logdose", "block")) }) test_that("get_data", { expect_equal(nrow(get_data(m1)), 24) expect_equal( colnames(get_data(m1)), c( "cbind(success, total - success)", "logdose", "block", "success", "total" ) ) }) test_that("find_formula", { expect_length(find_formula(m1), 1) expect_equal( find_formula(m1), list( conditional = as.formula("cbind(success, total - success) ~ logdose + block") ), ignore_attr = TRUE ) }) test_that("find_terms", { expect_equal( find_terms(m1), list( response = "cbind(success, total - success)", conditional = c("logdose", "block") ) ) expect_equal( find_terms(m1, flatten = TRUE), c("cbind(success, total - success)", "logdose", "block") ) }) test_that("find_variables", { expect_equal(find_variables(m1), list( response = c("success", "total"), conditional = c("logdose", "block") )) expect_equal( find_variables(m1, flatten = TRUE), c("success", "total", "logdose", "block") ) }) test_that("n_obs", { expect_equal(n_obs(m1), 24) expect_equal(n_obs(m1, disaggregate = TRUE), 900) }) test_that("link_function", { expect_equal(link_function(m1)(.2), qlogis(.2), tolerance = 1e-5) }) test_that("link_inverse", { expect_equal(link_inverse(m1)(.2), plogis(.2), tolerance = 1e-5) }) test_that("find_parameters", { expect_equal( find_parameters(m1), list( conditional = c("(Intercept)", "logdose", "blockB2", "blockB3") ) ) expect_equal(nrow(get_parameters(m1)), 4) expect_equal( get_parameters(m1)$Parameter, c("(Intercept)", "logdose", "blockB2", "blockB3") ) }) test_that("is_multivariate", { expect_false(is_multivariate(m1)) }) test_that("find_algorithm", { expect_equal(find_algorithm(m1), list(algorithm = "Mqle")) }) test_that("find_statistic", { expect_identical(find_statistic(m1), "z-statistic") }) } insight/tests/testthat/test-brms.R0000644000175000017500000006352414151371005017125 0ustar nileshnilesh.runThisTest <- Sys.getenv("RunAllinsightTests") == "yes" if (.runThisTest) { if (suppressWarnings(requiet("testthat") && requiet("insight") && requiet("brms"))) { # Model fitting ----------------------------------------------------------- m1 <- suppressWarnings(insight::download_model("brms_mixed_6")) m2 <- insight::download_model("brms_mv_4") m3 <- insight::download_model("brms_2") m4 <- insight::download_model("brms_zi_3") m5 <- insight::download_model("brms_mv_5") m6 <- insight::download_model("brms_corr_re1") m7 <- suppressWarnings(insight::download_model("brms_mixed_8")) m8 <- insight::download_model("brms_ordinal_1") # Tests ------------------------------------------------------------------- test_that("get_predicted.brmsfit: ordinal dv", { skip_if_not_installed("bayestestR") pred1 <- get_predicted(m8) pred2 <- get_predicted(m8, ci_method = "hdi") expect_true(inherits(pred1, "get_predicted")) expect_true(inherits(pred1, "data.frame")) expect_true(all(c("Row", "Response") %in% colnames(pred1))) # ci_method changes intervals but not se or predicted pred1 <- data.frame(pred1) pred2 <- data.frame(pred2) expect_equal(pred1$Row, pred2$Row) expect_equal(pred1$Response, pred2$Response) expect_equal(pred1$Predicted, pred2$Predicted) expect_equal(pred1$SE, pred2$SE) expect_false(mean(pred1$CI_low == pred2$CI_low) > 0.1) # most CI bounds are different expect_false(mean(pred1$CI_high == pred2$CI_high) > 0.1) # most CI bounds are different # compare to manual predictions pred3 <- get_predicted(m8, centrality_function = stats::median) manual <- rstantools::posterior_epred(m8) manual <- apply(manual[, , 1], 2, median) expect_equal(pred3$Predicted[1:32], manual) manual <- rstantools::posterior_epred(m8) manual <- apply(manual[, , 1], 2, mean) expect_equal(pred1$Predicted[1:32], manual) }) test_that("find_statistic", { expect_null(find_statistic(m1)) expect_null(find_statistic(m2)) expect_null(find_statistic(m3)) expect_null(find_statistic(m4)) expect_null(find_statistic(m5)) }) test_that("n_parameters", { expect_equal(n_parameters(m1), 65) expect_equal(n_parameters(m1, effects = "fixed"), 5) }) test_that("model_info", { expect_true(model_info(m3)$is_trial) expect_true(model_info(m5)[[1]]$is_zero_inflated) expect_true(model_info(m5)[[1]]$is_bayesian) }) test_that("clean_names", { expect_identical( clean_names(m1), c("count", "Age", "Base", "Trt", "patient") ) expect_identical( clean_names(m2), c( "Sepal.Length", "Sepal.Width", "Petal.Length", "Species" ) ) expect_identical(clean_names(m3), c("r", "n", "treat", "c2")) expect_identical( clean_names(m4), c("count", "child", "camper", "persons") ) expect_identical( clean_names(m5), c( "count", "count2", "child", "camper", "persons", "livebait" ) ) }) test_that("find_predictors", { expect_identical(find_predictors(m1), list(conditional = c("Age", "Base", "Trt"))) expect_identical( find_predictors(m1, flatten = TRUE), c("Age", "Base", "Trt") ) expect_identical( find_predictors(m1, effects = "all", component = "all"), list( conditional = c("Age", "Base", "Trt"), random = "patient" ) ) expect_identical( find_predictors( m1, effects = "all", component = "all", flatten = TRUE ), c("Age", "Base", "Trt", "patient") ) expect_identical( find_predictors(m2), list( SepalLength = list(conditional = c( "Petal.Length", "Sepal.Width", "Species" )), SepalWidth = list(conditional = "Species") ) ) expect_identical( find_predictors(m2, flatten = TRUE), c("Petal.Length", "Sepal.Width", "Species") ) expect_identical(find_predictors(m3), list(conditional = c("treat", "c2"))) expect_identical( find_predictors(m4), list( conditional = c("child", "camper"), zero_inflated = c("child", "camper") ) ) expect_identical( find_predictors(m4, effects = "random"), list(random = "persons", zero_inflated_random = "persons") ) expect_identical(find_predictors(m4, flatten = TRUE), c("child", "camper")) expect_identical( find_predictors(m5), list( count = list( conditional = c("child", "camper"), zero_inflated = "camper" ), count2 = list( conditional = c("child", "livebait"), zero_inflated = "child" ) ) ) }) test_that("find_response", { expect_equal(find_response(m1, combine = TRUE), "count") expect_equal( find_response(m2, combine = TRUE), c(SepalLength = "Sepal.Length", SepalWidth = "Sepal.Width") ) expect_equal(find_response(m3, combine = TRUE), c("r", "n")) expect_equal(find_response(m1, combine = FALSE), "count") expect_equal( find_response(m2, combine = FALSE), c(SepalLength = "Sepal.Length", SepalWidth = "Sepal.Width") ) expect_equal(find_response(m3, combine = FALSE), c("r", "n")) expect_equal(find_response(m4, combine = FALSE), "count") expect_equal( find_response(m5, combine = TRUE), c(count = "count", count2 = "count2") ) }) test_that("get_response", { expect_length(get_response(m1), 236) expect_equal(ncol(get_response(m2)), 2) expect_equal( colnames(get_response(m2)), c("Sepal.Length", "Sepal.Width") ) expect_equal(ncol(get_response(m3)), 2) expect_equal(colnames(get_response(m3)), c("r", "n")) expect_length(get_response(m4), 250) expect_equal(colnames(get_response(m5)), c("count", "count2")) }) test_that("find_variables", { expect_identical( find_variables(m1), list( response = "count", conditional = c("Age", "Base", "Trt"), random = "patient" ) ) expect_identical( find_variables(m6), list( response = "y", conditional = "x", random = "id", sigma = "x", sigma_random = "id" ) ) expect_identical( find_variables(m1, effects = "fixed"), list( response = "count", conditional = c("Age", "Base", "Trt") ) ) expect_null(find_variables(m1, component = "zi")) expect_identical( find_variables(m2), list( response = c(SepalLength = "Sepal.Length", SepalWidth = "Sepal.Width"), SepalLength = list(conditional = c( "Petal.Length", "Sepal.Width", "Species" )), SepalWidth = list(conditional = "Species") ) ) expect_identical( find_variables(m2, flatten = TRUE), c( "Sepal.Length", "Sepal.Width", "Petal.Length", "Species" ) ) expect_identical(find_variables(m3), list( response = c("r", "n"), conditional = c("treat", "c2") )) expect_identical( find_variables(m4), list( response = "count", conditional = c("child", "camper"), random = "persons", zero_inflated = c("child", "camper"), zero_inflated_random = "persons" ) ) expect_identical( find_variables(m4, flatten = TRUE), c("count", "child", "camper", "persons") ) }) test_that("n_obs", { expect_equal(n_obs(m1), 236) expect_equal(n_obs(m2), 150) expect_equal(n_obs(m3), 10) expect_equal(n_obs(m4), 250) expect_equal(n_obs(m5), 250) }) test_that("find_random", { expect_equal(find_random(m5), list( count = list( random = "persons", zero_inflated_random = "persons" ), count2 = list( random = "persons", zero_inflated_random = "persons" ) )) expect_equal(find_random(m5, flatten = TRUE), "persons") expect_equal(find_random(m6, flatten = TRUE), "id") }) test_that("get_random", { zinb <- get_data(m4) expect_equal(get_random(m4), zinb[, "persons", drop = FALSE]) }) test_that("get_data", { d <- get_data(m6) expect_equal(nrow(d), 200) expect_equal(ncol(d), 3) }) test_that("find_paramaters", { expect_equal( find_parameters(m1), list( conditional = c( "b_Intercept", "b_Age", "b_Base", "b_Trt1", "b_Base:Trt1" ), random = c(sprintf("r_patient[%i,Intercept]", 1:59), "sd_patient__Intercept") ) ) expect_equal( find_parameters(m2), structure(list( SepalLength = list( conditional = c( "b_SepalLength_Intercept", "b_SepalLength_Petal.Length", "b_SepalLength_Sepal.Width", "b_SepalLength_Speciesversicolor", "b_SepalLength_Speciesvirginica" ), sigma = "sigma_SepalLength" ), SepalWidth = list( conditional = c( "b_SepalWidth_Intercept", "b_SepalWidth_Speciesversicolor", "b_SepalWidth_Speciesvirginica" ), sigma = "sigma_SepalWidth" ) ), "is_mv" = "1" ) ) expect_equal( find_parameters(m4), list( conditional = c("b_Intercept", "b_child", "b_camper"), random = c(sprintf("r_persons[%i,Intercept]", 1:4), "sd_persons__Intercept"), zero_inflated = c("b_zi_Intercept", "b_zi_child", "b_zi_camper"), zero_inflated_random = c(sprintf("r_persons__zi[%i,Intercept]", 1:4), "sd_persons__zi_Intercept") ) ) expect_equal( find_parameters(m5, effects = "all"), structure(list( count = list( conditional = c("b_count_Intercept", "b_count_child", "b_count_camper"), random = c(sprintf("r_persons__count[%i,Intercept]", 1:4), "sd_persons__count_Intercept"), zero_inflated = c("b_zi_count_Intercept", "b_zi_count_camper"), zero_inflated_random = c(sprintf("r_persons__zi_count[%i,Intercept]", 1:4), "sd_persons__zi_count_Intercept") ), count2 = list( conditional = c( "b_count2_Intercept", "b_count2_child", "b_count2_livebait" ), random = c(sprintf("r_persons__count2[%i,Intercept]", 1:4), "sd_persons__count2_Intercept"), zero_inflated = c("b_zi_count2_Intercept", "b_zi_count2_child"), zero_inflated_random = c(sprintf("r_persons__zi_count2[%i,Intercept]", 1:4), "sd_persons__zi_count2_Intercept") ) ), "is_mv" = "1" ) ) }) test_that("find_paramaters", { expect_equal( colnames(get_parameters(m4)), c( "b_Intercept", "b_child", "b_camper", "b_zi_Intercept", "b_zi_child", "b_zi_camper" ) ) expect_equal( colnames(get_parameters(m4, component = "zi")), c("b_zi_Intercept", "b_zi_child", "b_zi_camper") ) expect_equal( colnames(get_parameters(m4, effects = "all")), c( "b_Intercept", "b_child", "b_camper", "r_persons[1,Intercept]", "r_persons[2,Intercept]", "r_persons[3,Intercept]", "r_persons[4,Intercept]", "sd_persons__Intercept", "b_zi_Intercept", "b_zi_child", "b_zi_camper", "r_persons__zi[1,Intercept]", "r_persons__zi[2,Intercept]", "r_persons__zi[3,Intercept]", "r_persons__zi[4,Intercept]", "sd_persons__zi_Intercept" ) ) expect_equal( colnames(get_parameters(m4, effects = "random", component = "conditional")), c( "r_persons[1,Intercept]", "r_persons[2,Intercept]", "r_persons[3,Intercept]", "r_persons[4,Intercept]", "sd_persons__Intercept" ) ) expect_equal( colnames(get_parameters(m5, effects = "random", component = "conditional")), c( "r_persons__count[1,Intercept]", "r_persons__count[2,Intercept]", "r_persons__count[3,Intercept]", "r_persons__count[4,Intercept]", "sd_persons__count_Intercept", "r_persons__count2[1,Intercept]", "r_persons__count2[2,Intercept]", "r_persons__count2[3,Intercept]", "r_persons__count2[4,Intercept]", "sd_persons__count2_Intercept" ) ) expect_equal( colnames(get_parameters(m5, effects = "all", component = "all")), c( "b_count_Intercept", "b_count_child", "b_count_camper", "r_persons__count[1,Intercept]", "r_persons__count[2,Intercept]", "r_persons__count[3,Intercept]", "r_persons__count[4,Intercept]", "sd_persons__count_Intercept", "b_zi_count_Intercept", "b_zi_count_camper", "r_persons__zi_count[1,Intercept]", "r_persons__zi_count[2,Intercept]", "r_persons__zi_count[3,Intercept]", "r_persons__zi_count[4,Intercept]", "sd_persons__zi_count_Intercept", "b_count2_Intercept", "b_count2_child", "b_count2_livebait", "r_persons__count2[1,Intercept]", "r_persons__count2[2,Intercept]", "r_persons__count2[3,Intercept]", "r_persons__count2[4,Intercept]", "sd_persons__count2_Intercept", "b_zi_count2_Intercept", "b_zi_count2_child", "r_persons__zi_count2[1,Intercept]", "r_persons__zi_count2[2,Intercept]", "r_persons__zi_count2[3,Intercept]", "r_persons__zi_count2[4,Intercept]", "sd_persons__zi_count2_Intercept" ) ) }) test_that("linkfun", { expect_false(is.null(link_function(m1))) expect_length(link_function(m2), 2) expect_false(is.null(link_function(m3))) expect_false(is.null(link_function(m4))) expect_length(link_function(m5), 2) }) test_that("linkinv", { expect_false(is.null(link_inverse(m1))) expect_length(link_inverse(m2), 2) expect_false(is.null(link_inverse(m3))) expect_false(is.null(link_inverse(m4))) expect_length(link_inverse(m2), 2) }) test_that("is_multivariate", { expect_false(is_multivariate(m1)) expect_true(is_multivariate(m2)) expect_false(is_multivariate(m3)) expect_false(is_multivariate(m4)) expect_true(is_multivariate(m5)) }) test_that("find_terms", { expect_equal( find_terms(m2), list( SepalLength = list( response = "Sepal.Length", conditional = c("Petal.Length", "Sepal.Width", "Species") ), SepalWidth = list( response = "Sepal.Width", conditional = "Species" ) ) ) }) test_that("find_algorithm", { expect_equal( find_algorithm(m1), list( algorithm = "sampling", chains = 1, iterations = 500, warmup = 250 ) ) }) test_that("get_priors", { expect_equal( get_priors(m7), data.frame( Parameter = c( "b_Intercept", "b_Age", "b_Base", "b_Trt1", "b_Base:Trt1", "sd_patient__Intercept", "sd_patient__Age", "cor_patient__Intercept__Age" ), Distribution = c( "student_t", "student_t", "student_t", "student_t", "student_t", "cauchy", "cauchy", "lkj" ), Location = c(1.4, 0, 0, 0, 0, NA, NA, 1), Scale = c(2.5, 10, 10, 10, 10, NA, NA, NA), df = c(3, 5, 5, 5, 5, NA, NA, NA), stringsAsFactors = FALSE ), ignore_attr = TRUE ) expect_equal( get_priors(m3), data.frame( Parameter = c("b_Intercept", "b_treat1", "b_c2", "b_treat1:c2"), Distribution = c("student_t", "uniform", "uniform", "uniform"), Location = c(0, NA, NA, NA), Scale = c(2.5, NA, NA, NA), df = c(3, NA, NA, NA), stringsAsFactors = FALSE ), ignore_attr = TRUE ) }) test_that("clean_parameters", { expect_equal( clean_parameters(m4), structure( list( Parameter = c( "b_Intercept", "b_child", "b_camper", "r_persons[1,Intercept]", "r_persons[2,Intercept]", "r_persons[3,Intercept]", "r_persons[4,Intercept]", "sd_persons__Intercept", "b_zi_Intercept", "b_zi_child", "b_zi_camper", "r_persons__zi[1,Intercept]", "r_persons__zi[2,Intercept]", "r_persons__zi[3,Intercept]", "r_persons__zi[4,Intercept]", "sd_persons__zi_Intercept" ), Effects = c( "fixed", "fixed", "fixed", "random", "random", "random", "random", "random", "fixed", "fixed", "fixed", "random", "random", "random", "random", "random" ), Component = c( "conditional", "conditional", "conditional", "conditional", "conditional", "conditional", "conditional", "conditional", "zero_inflated", "zero_inflated", "zero_inflated", "zero_inflated", "zero_inflated", "zero_inflated", "zero_inflated", "zero_inflated" ), Group = c( "", "", "", "Intercept: persons", "Intercept: persons", "Intercept: persons", "Intercept: persons", "SD/Cor: persons", "", "", "", "Intercept: persons", "Intercept: persons", "Intercept: persons", "Intercept: persons", "SD/Cor: persons" ), Cleaned_Parameter = c( "(Intercept)", "child", "camper", "persons.1", "persons.2", "persons.3", "persons.4", "(Intercept)", "(Intercept)", "child", "camper", "persons.1", "persons.2", "persons.3", "persons.4", "(Intercept)" ) ), class = c("clean_parameters", "data.frame"), row.names = c( NA, -16L ) ), ignore_attr = TRUE ) expect_equal( clean_parameters(m5), structure( list( Parameter = c( "b_count_Intercept", "b_count_child", "b_count_camper", "b_count2_Intercept", "b_count2_child", "b_count2_livebait", "r_persons__count[1,Intercept]", "r_persons__count[2,Intercept]", "r_persons__count[3,Intercept]", "r_persons__count[4,Intercept]", "sd_persons__count_Intercept", "r_persons__count2[1,Intercept]", "r_persons__count2[2,Intercept]", "r_persons__count2[3,Intercept]", "r_persons__count2[4,Intercept]", "sd_persons__count2_Intercept", "b_zi_count_Intercept", "b_zi_count_camper", "b_zi_count2_Intercept", "b_zi_count2_child", "r_persons__zi_count[1,Intercept]", "r_persons__zi_count[2,Intercept]", "r_persons__zi_count[3,Intercept]", "r_persons__zi_count[4,Intercept]", "sd_persons__zi_count_Intercept", "r_persons__zi_count2[1,Intercept]", "r_persons__zi_count2[2,Intercept]", "r_persons__zi_count2[3,Intercept]", "r_persons__zi_count2[4,Intercept]", "sd_persons__zi_count2_Intercept" ), Effects = c( "fixed", "fixed", "fixed", "fixed", "fixed", "fixed", "random", "random", "random", "random", "random", "random", "random", "random", "random", "random", "fixed", "fixed", "fixed", "fixed", "random", "random", "random", "random", "random", "random", "random", "random", "random", "random" ), Component = c( "conditional", "conditional", "conditional", "conditional", "conditional", "conditional", "conditional", "conditional", "conditional", "conditional", "conditional", "conditional", "conditional", "conditional", "conditional", "conditional", "zero_inflated", "zero_inflated", "zero_inflated", "zero_inflated", "zero_inflated", "zero_inflated", "zero_inflated", "zero_inflated", "zero_inflated", "zero_inflated", "zero_inflated", "zero_inflated", "zero_inflated", "zero_inflated" ), Group = c( "", "", "", "", "", "", "Intercept: persons", "Intercept: persons", "Intercept: persons", "Intercept: persons", "SD/Cor: persons", "Intercept: persons2", "Intercept: persons2", "Intercept: persons2", "Intercept: persons2", "SD/Cor: persons", "", "", "", "", "Intercept: persons", "Intercept: persons", "Intercept: persons", "Intercept: persons", "SD/Cor: persons", "Intercept: persons2", "Intercept: persons2", "Intercept: persons2", "Intercept: persons2", "SD/Cor: persons" ), Response = c( "count", "count", "count", "count2", "count2", "count2", "count", "count", "count", "count", "count", "count2", "count2", "count2", "count2", "count2", "count", "count", "count2", "count2", "count", "count", "count", "count", "count", "count2", "count2", "count2", "count2", "count2" ), Cleaned_Parameter = c( "(Intercept)", "child", "camper", "(Intercept)", "child", "livebait", "persons.1", "persons.2", "persons.3", "persons.4", "count_Intercept", "persons2.1", "persons2.2", "persons2.3", "persons2.4", "count2_Intercept", "(Intercept)", "camper", "(Intercept)", "child", "persons.1", "persons.2", "persons.3", "persons.4", "zi_count_Intercept", "persons2.1", "persons2.2", "persons2.3", "persons2.4", "zi_count2_Intercept" ) ), class = c("clean_parameters", "data.frame"), row.names = c(NA, -30L) ), ignore_attr = TRUE ) }) } } insight/tests/testthat/test-lmtest.R0000644000175000017500000000113514122064326017463 0ustar nileshnileshif (requiet("testthat") && requiet("insight") && requiet("lmtest")) { data("Mandible", package = "lmtest") m <- lm(length ~ age, data = Mandible, subset = (age <= 28)) ct1 <- coeftest(m) ct2 <- coeftest(m, df = Inf) test_that("find_statistic", { expect_equal(find_statistic(ct1), "t-statistic") expect_equal(find_statistic(ct2), "z-statistic") }) test_that("get_statistic", { expect_equal(get_statistic(ct1)$Statistic, c(-12.24446, 37.16067), tolerance = 1e-3) expect_equal(get_statistic(ct2)$Statistic, c(-12.24446, 37.16067), tolerance = 1e-3) }) } insight/tests/testthat/test-data.frame.R0000644000175000017500000000046414122064326020161 0ustar nileshnileshif (requiet("testthat") && requiet("insight")) { data(iris) test_that("find_parameters", { expect_error(find_parameters(iris)) }) test_that("find_formula", { expect_error(find_formula(iris)) }) test_that("find_statistic", { expect_error(find_statistic(iris)) }) } insight/tests/testthat.R0000644000175000017500000000201214105430731015167 0ustar nileshnileshif (require("testthat")) { library(insight) is_dev_version <- length(strsplit(packageDescription("insight")$Version, "\\.")[[1]]) > 3 if (is_dev_version) { Sys.setenv("RunAllinsightTests" = "yes") } else { Sys.setenv("RunAllinsightTests" = "no") } si <- Sys.info() osx <- tryCatch( { if (!is.null(si["sysname"])) { si["sysname"] == "Darwin" || grepl("^darwin", R.version$os) } else { FALSE } }, error = function(e) { FALSE } ) solaris <- tryCatch( { if (!is.null(si["sysname"])) { grepl("SunOS", si["sysname"], ignore.case = TRUE) } else { FALSE } }, error = function(e) { FALSE } ) # disable / enable if needed if (.Platform$OS.type == "unix" && is_dev_version) { Sys.setenv("RunAllinsightStanTests" = "yes") } else { Sys.setenv("RunAllinsightStanTests" = "no") } if (!osx && !solaris) { test_check("insight") } } insight/tests/spelling.R0000644000175000017500000000023314006757140015155 0ustar nileshnileshif (requireNamespace("spelling", quietly = TRUE)) { spelling::spell_check_test( vignettes = TRUE, error = FALSE, skip_on_cran = TRUE ) } insight/R/0000755000175000017500000000000014164615006012255 5ustar nileshnileshinsight/R/get_weights.R0000644000175000017500000000622414150126533014712 0ustar nileshnilesh#' @title Get the values from model weights #' @name get_weights #' #' @description Returns weighting variable of a model. #' #' @param x A fitted model. #' @param na_rm Logical, if `TRUE`, removes possible missing values. #' @param null_as_ones Logical, if `TRUE`, will return a vector of `1` #' if no weights were specified in the model (as if the weights were all set #' to 1). #' @param ... Currently not used. #' #' @return The weighting variable, or `NULL` if no weights were specified #' or if weights were 1. If the weighting variable should also be returned #' (instead of `NULL`), when all weights are set to 1 (i.e. no weighting), #' set `null_as_ones = TRUE`. #' #' @examples #' data(mtcars) #' set.seed(123) #' mtcars$weight <- rnorm(nrow(mtcars), 1, .3) #' #' # LMs #' m <- lm(mpg ~ wt + cyl + vs, data = mtcars, weights = weight) #' get_weights(m) #' #' get_weights(lm(mpg ~ wt, data = mtcars), null_as_ones = TRUE) #' #' # GLMs #' m <- glm(vs ~ disp + mpg, data = mtcars, weights = weight, family = quasibinomial) #' get_weights(m) #' m <- glm(cbind(cyl, gear) ~ mpg, data = mtcars, weights = weight, family = binomial) #' get_weights(m) #' @export get_weights <- function(x, ...) { UseMethod("get_weights") } #' @rdname get_weights #' @export get_weights.default <- function(x, na_rm = FALSE, null_as_ones = FALSE, ...) { w <- NULL tryCatch( { w <- stats::weights(x, ...) }, error = function(e) { NULL }, warning = function(w) { NULL } ) if (is.null(w)) { tryCatch( { w <- stats::model.frame(x)[["(weights)"]] }, error = function(e) { NULL }, warning = function(w) { NULL } ) } if (is.null(w)) { tryCatch( { w <- .recover_data_from_environment(x)[[find_weights(x)]] }, error = function(e) { NULL }, warning = function(w) { NULL } ) } # if all weights are 1, set return value to NULL if (!is.null(w) && all(w == 1L)) { w <- NULL } if (!is.null(w) && anyNA(w) && isTRUE(na_rm)) { w <- stats::na.omit(w) } if (is.null(w) && isTRUE(null_as_ones)) { w <- rep.int(1, n_obs(x)) } w } #' @export get_weights.brmsfit <- function(x, na_rm = FALSE, null_as_ones = FALSE, ...) { w <- unique(find_weights(x)) if (!is.null(w)) { if (length(w) > 1) { return(get_data(x)[w]) } else { w <- get_data(x)[[w]] } } if (!is.null(w) && all(w == 1L)) { w <- NULL } if (!is.null(w) && anyNA(w) && isTRUE(na_rm)) { w <- stats::na.omit(w) } if (is.null(w) && null_as_ones) { w <- rep.int(1, n_obs(x)) } w } #' @export get_weights.btergm <- function(x, null_as_ones = FALSE, ...) { x@weights } #' @export get_weights.list <- function(x, na_rm = FALSE, null_as_ones = FALSE, ...) { # For GAMMs if ("gam" %in% names(x)) { get_weights(x$gam, na_rm = na_rm, null_as_ones = null_as_ones, ...) } else { stop("Cannot find weights in this object. Please an open an issue!") } } insight/R/get_varcov.R0000644000175000017500000005162514144234777014562 0ustar nileshnilesh#' @title Get variance-covariance matrix from models #' #' @description Returns the variance-covariance, as retrieved by #' `stats::vcov()`, but works for more model objects that probably #' don't provide a `vcov()`-method. #' @name get_varcov #' #' @param x A model. #' @param component Should the complete variance-covariance matrix of the model #' be returned, or only for specific model components only (like count or #' zero-inflated model parts)? Applies to models with zero-inflated component, #' or models with precision (e.g. `betareg`) component. `component` #' may be one of `"conditional"`, `"zi"`, `"zero-inflated"`, #' `"dispersion"`, `"precision"`, or `"all"`. May be abbreviated. #' Note that the *conditional* component is also called *count* or #' *mean* component, depending on the model. #' @param effects Should the complete variance-covariance matrix of the model #' be returned, or only for specific model parameters only? Currently only #' applies to models of class `mixor`. #' @param complete Logical, if `TRUE`, for `aov`, returns the full #' variance-covariance matrix. #' @param robust Logical, if `TRUE`, returns a robust variance-covariance matrix #' using sandwich estimation. #' @param verbose Toggle warnings. #' @param ... Currently not used. #' #' @note `get_varcov()` tries to return the nearest positive definite matrix #' in case of a negative variance-covariance matrix. #' #' @return The variance-covariance matrix, as `matrix`-object. #' #' @examples #' data(mtcars) #' m <- lm(mpg ~ wt + cyl + vs, data = mtcars) #' get_varcov(m) #' @export get_varcov <- function(x, ...) { UseMethod("get_varcov") } # Default models ---------------------------------------------------- #' @rdname get_varcov #' @export get_varcov.default <- function(x, verbose = TRUE, ...) { vc <- suppressWarnings(stats::vcov(x)) .process_vcov(vc, verbose) } #' @export get_varcov.maxLik <- get_varcov.default #' @export get_varcov.HLfit <- get_varcov.default # mlm --------------------------------------------- #' @export get_varcov.mlm <- function(x, ...) { if (!is.null(x$weights)) { s <- summary(x)[[1L]] .get_weighted_varcov(x, s$cov.unscaled) } else { get_varcov.default(x) } } # models with special components --------------------------------------------- #' @rdname get_varcov #' @export get_varcov.betareg <- function(x, component = c("conditional", "precision", "all"), verbose = TRUE, ...) { component <- match.arg(component) vc <- switch(component, "conditional" = stats::vcov(object = x, model = "mean"), "precision" = stats::vcov(object = x, model = "precision"), stats::vcov(object = x) ) .process_vcov(vc, verbose) } #' @rdname get_varcov #' @export get_varcov.DirichletRegModel <- function(x, component = c("conditional", "precision", "all"), verbose = TRUE, ...) { component <- match.arg(component) if (x$parametrization == "common") { vc <- stats::vcov(x) } else { if (component == "conditional") { vc <- stats::vcov(x) keep <- grepl("^(?!\\(phi\\))", rownames(vc), perl = TRUE) vc <- vc[keep, keep, drop = FALSE] } else if (component == "precision") { vc <- stats::vcov(x) keep <- grepl("^\\(phi\\)", rownames(vc), perl = TRUE) vc <- vc[keep, keep, drop = FALSE] } else { vc <- stats::vcov(x) } } .process_vcov(vc, verbose) } #' @rdname get_varcov #' @export get_varcov.clm2 <- function(x, component = c("all", "conditional", "scale"), ...) { component <- match.arg(component) n_intercepts <- length(x$xi) n_location <- length(x$beta) n_scale <- length(x$zeta) vc <- stats::vcov(x) if (.is_negativ_matrix(vc)) { vc <- .fix_negative_matrix(vc) } range <- switch(component, "all" = 1:(n_scale + n_intercepts + n_location), "conditional" = 1:(n_intercepts + n_location), "scale" = (1 + n_intercepts + n_location):(n_scale + n_intercepts + n_location) ) vc <- vc[range, range, drop = FALSE] # fix possible missings due to rank deficient model matrix vc <- .fix_rank_deficiency(vc) .remove_backticks_from_matrix_names(as.matrix(vc)) } #' @export get_varcov.clmm2 <- get_varcov.clm2 #' @export get_varcov.glmx <- function(x, component = c("all", "conditional", "extra"), ...) { component <- match.arg(component) vc <- stats::vcov(object = x) if (component != "all") { keep <- match(find_parameters(x)[[component]], rownames(vc)) vc <- vc[keep, keep, drop = FALSE] } .process_vcov(vc) } #' @export get_varcov.pgmm <- function(x, component = c("conditional", "all"), ...) { component <- match.arg(component) vc <- stats::vcov(x) if (component != "all") { keep <- match(find_parameters(x)[[component]], rownames(vc)) vc <- vc[keep, keep, drop = FALSE] } .process_vcov(vc) } #' @export get_varcov.selection <- function(x, component = c("all", "selection", "outcome", "auxiliary"), ...) { component <- match.arg(component) vc <- stats::vcov(object = x) if (component != "all") { keep <- match(find_parameters(x)[[component]], rownames(vc)) vc <- vc[keep, keep, drop = FALSE] } # we can't check for rank-deficiency here... if (.is_negativ_matrix(vc)) { vc <- .fix_negative_matrix(vc) } .remove_backticks_from_matrix_names(as.matrix(vc)) } #' @export get_varcov.mvord <- function(x, component = c("all", "conditional", "thresholds", "correlation"), ...) { component <- match.arg(component) vc <- stats::vcov(x) if (component != "all") { fp <- find_parameters(x)[[component]] if (component == "thresholds") { fp <- gsub("\\s", "\\.", fp) } keep <- match(fp, rownames(vc)) vc <- vc[keep, keep, drop = FALSE] } .process_vcov(vc) } #' @export get_varcov.mjoint <- function(x, component = c("all", "conditional", "survival"), ...) { component <- match.arg(component) vc <- stats::vcov(x) keep <- match(find_parameters(x, flatten = TRUE, component = component), rownames(vc)) vc <- vc[keep, keep, drop = FALSE] .process_vcov(vc) } #' @export get_varcov.mhurdle <- function(x, component = c("all", "conditional", "zi", "zero_inflated", "infrequent_purchase", "ip", "auxiliary"), ...) { component <- match.arg(component) vc <- stats::vcov(x) # rownames(vc) <- gsub("^(h1|h2|h3)\\.(.*)", "\\2", rownames(vc)) # colnames(vc) <- rownames(vc) keep <- match(find_parameters(x, flatten = TRUE, component = component), rownames(vc)) vc <- vc[keep, keep, drop = FALSE] .process_vcov(vc) } #' @rdname get_varcov #' @export get_varcov.truncreg <- function(x, component = c("conditional", "all"), ...) { component <- match.arg(component) vc <- stats::vcov(x) if (component == "conditional") { vc <- vc[1:(nrow(vc) - 1), 1:(ncol(vc) - 1), drop = FALSE] } .process_vcov(vc) } #' @rdname get_varcov #' @export get_varcov.gamlss <- function(x, component = c("conditional", "all"), ...) { component <- match.arg(component) vc <- suppressWarnings(stats::vcov(x)) if (component == "conditional") { cond_pars <- length(find_parameters(x)$conditional) vc <- as.matrix(vc)[1:cond_pars, 1:cond_pars, drop = FALSE] } .process_vcov(vc) } # Zero-Inflated models ---------------------------------------------------- #' @rdname get_varcov #' @export get_varcov.hurdle <- function(x, component = c("conditional", "zero_inflated", "zi", "all"), ...) { component <- match.arg(component) vc <- switch(component, "conditional" = stats::vcov(object = x, model = "count"), "zi" = , "zero_inflated" = stats::vcov(object = x, model = "zero"), stats::vcov(object = x) ) .process_vcov(vc) } #' @export get_varcov.zeroinfl <- get_varcov.hurdle #' @export get_varcov.zerocount <- get_varcov.hurdle #' @rdname get_varcov #' @export get_varcov.zcpglm <- function(x, component = c("conditional", "zero_inflated", "zi", "all"), ...) { component <- match.arg(component) # installed? check_if_installed("cplm") vc <- cplm::vcov(x) tweedie <- which(grepl("^tw_", rownames(vc))) zero <- which(grepl("^zero_", rownames(vc))) vc <- switch(component, "conditional" = vc[tweedie, tweedie, drop = FALSE], "zi" = , "zero_inflated" = vc[zero, zero, drop = FALSE], vc[c(tweedie, zero), c(tweedie, zero), drop = FALSE] ) .process_vcov(vc) } # Zero-Inflated mixed models ------------------------------------------------ #' @rdname get_varcov #' @export get_varcov.glmmTMB <- function(x, component = c("conditional", "zero_inflated", "zi", "dispersion", "all"), ...) { component <- match.arg(component) vc <- switch(component, "conditional" = stats::vcov(x)[["cond"]], "zi" = , "zero_inflated" = stats::vcov(x)[["zi"]], "dispersion" = stats::vcov(x)[["disp"]], stats::vcov(x, full = TRUE) ) .process_vcov(vc) } #' @rdname get_varcov #' @export get_varcov.MixMod <- function(x, effects = c("fixed", "random"), component = c("conditional", "zero_inflated", "zi", "dispersion", "auxiliary", "all"), robust = FALSE, verbose = TRUE, ...) { component <- match.arg(component) effects <- match.arg(effects) random_vc <- stats::vcov(x, parm = "var-cov", sandwich = robust) if (effects == "random") { vc <- random_vc } else { vc <- switch(component, "conditional" = stats::vcov(x, parm = "fixed-effects", sandwich = robust), "zero_inflated" = , "zi" = stats::vcov(x, parm = "all", sandwich = robust), "auxiliary" = , "dispersion" = stats::vcov(x, parm = "extra", sandwich = robust), stats::vcov(x, parm = "all", sandwich = robust) ) # drop random parameters random_parms <- stats::na.omit(match(colnames(random_vc), colnames(vc))) if (length(random_parms)) { vc <- vc[-random_parms, -random_parms, drop = FALSE] } # filter ZI if (component %in% c("zi", "zero_inflated")) { zi_parms <- grepl("^zi_", colnames(vc)) vc <- vc[zi_parms, zi_parms, drop = FALSE] } } .process_vcov(vc) } # Bayesian models ------------------------------------------------ #' @rdname get_varcov #' @export get_varcov.brmsfit <- function(x, component = "conditional", ...) { component <- match.arg(component, choices = c("all", .all_elements())) params <- find_parameters(x, effects = "fixed", component = component, flatten = TRUE) params <- gsub("^b_", "", params) vc <- stats::vcov(x)[params, params, drop = FALSE] .process_vcov(vc) } # mfx models ------------------------------------------------------- #' @rdname get_varcov #' @export get_varcov.betamfx <- function(x, component = c("conditional", "precision", "all"), ...) { component <- match.arg(component) get_varcov.betareg(x$fit, component = component, ...) } #' @export get_varcov.betaor <- get_varcov.betamfx #' @export get_varcov.logitmfx <- function(x, ...) { get_varcov(x$fit, ...) } #' @export get_varcov.poissonmfx <- get_varcov.logitmfx #' @export get_varcov.negbinmfx <- get_varcov.logitmfx #' @export get_varcov.probitmfx <- get_varcov.logitmfx #' @export get_varcov.logitor <- get_varcov.logitmfx #' @export get_varcov.poissonirr <- get_varcov.logitmfx #' @export get_varcov.negbinirr <- get_varcov.logitmfx #' @export get_varcov.model_fit <- get_varcov.logitmfx # Other models with special handling ----------------------------------------- #' @export get_varcov.merModList <- function(x, ...) { warning("Can't access variance-covariance matrix for 'merModList' objects.", call. = FALSE) return(NULL) } #' @export get_varcov.mediate <- function(x, ...) { warning("Can't access variance-covariance matrix for 'mediate' objects.", call. = FALSE) return(NULL) } #' @rdname get_varcov #' @export get_varcov.aov <- function(x, complete = FALSE, ...) { vc <- suppressWarnings(stats::vcov(x, complete = complete)) .process_vcov(vc) } #' @export get_varcov.ivFixed <- function(x, ...) { .process_vcov(x$vcov) } #' @export get_varcov.averaging <- function(x, ...) { if (is.null(attributes(x)$modelList)) { warning("Can't calculate covariance matrix. Please use 'fit = TRUE' in 'model.avg()'.", call. = FALSE) } else { get_varcov.default(x) } } #' @export get_varcov.robmixglm <- function(x, ...) { params <- find_parameters(x, flatten = TRUE) np <- length(params) vc <- x$fit@vcov[1:np, 1:np, drop = FALSE] dimnames(vc) <- list(params, params) .process_vcov(vc) } #' @export get_varcov.Rchoice <- function(x, ...) { vc <- stats::vcov(x) params <- find_parameters(x, flatten = TRUE) dimnames(vc) <- list(params, params) .process_vcov(vc) } #' @export get_varcov.rq <- function(x, ...) { s <- summary(x, covariance = TRUE) vc <- as.matrix(s$cov) # add row/column names preds <- find_parameters(x, flatten = TRUE) rownames(vc) <- preds colnames(vc) <- preds if (.is_negativ_matrix(vc)) { vc <- .fix_negative_matrix(vc) } .remove_backticks_from_matrix_names(as.matrix(vc)) } #' @export get_varcov.crr <- function(x, ...) { vc <- as.matrix(x$var) params <- find_parameters(x, flatten = TRUE) dimnames(vc) <- list(params, params) .process_vcov(vc) } #' @export get_varcov.crq <- function(x, ...) { sc <- summary(x, covariance = TRUE) preds <- find_parameters(x, flatten = TRUE) if (all(unlist(lapply(sc, is.list)))) { vc <- lapply(sc, function(i) { .x <- as.matrix(i$cov) # add row/column names rownames(.x) <- preds colnames(.x) <- preds if (.is_negativ_matrix(.x)) { .x <- .fix_negative_matrix(.x) } .x }) names(vc) <- sprintf("tau (%g)", unlist(lapply(sc, function(i) i$tau))) } else { vc <- as.matrix(sc$cov) if (.is_negativ_matrix(vc)) { vc <- .fix_negative_matrix(vc) } vc <- .remove_backticks_from_matrix_names(as.matrix(vc)) } vc } #' @export get_varcov.nlrq <- get_varcov.crq #' @export get_varcov.rqs <- get_varcov.crq #' @export get_varcov.flexsurvreg <- function(x, ...) { pars <- find_parameters(x, flatten = TRUE) vc <- as.matrix(stats::vcov(x))[pars, pars, drop = FALSE] .process_vcov(vc) } #' @export get_varcov.afex_aov <- function(x, ...) { get_varcov(x$lm, ...) } #' @export get_varcov.mixed <- function(x, ...) { vc <- as.matrix(stats::vcov(x$full_model)) .process_vcov(vc) } #' @export get_varcov.cpglmm <- function(x, ...) { vc <- as.matrix(x@vcov) .process_vcov(vc) } #' @export get_varcov.cpglm <- get_varcov.cpglmm #' @export get_varcov.cglm <- function(x, ...) { vc <- as.matrix(x$var) .process_vcov(vc) } #' @export get_varcov.mle2 <- function(x, ...) { vc <- as.matrix(x@vcov) .process_vcov(vc) } #' @export get_varcov.mle <- get_varcov.mle2 #' @rdname get_varcov #' @export get_varcov.mixor <- function(x, effects = c("all", "fixed", "random"), ...) { effects <- match.arg(effects) params <- find_parameters(x, effects = effects, flatten = TRUE) vc <- as.matrix(stats::vcov(x))[params, params, drop = FALSE] .process_vcov(vc) } #' @export get_varcov.glmm <- get_varcov.mixor #' @export get_varcov.gamm <- function(x, ...) { vc <- stats::vcov(x$gam) .process_vcov(vc) } #' @export get_varcov.lqmm <- function(x, ...) { sc <- summary(x, covariance = TRUE) np <- length(find_parameters(x, flatten = TRUE)) if (length(dim(sc$Cov)) == 3) { vc <- lapply(1:length(x$tau), function(i) { .x <- sc$Cov[, , i][1:np, 1:np] if (.is_negativ_matrix(.x)) { .x <- .fix_negative_matrix(.x) } .x }) names(vc) <- sprintf("tau (%g)", x$tau) } else { vc <- as.matrix(sc$Cov)[1:np, 1:np] if (.is_negativ_matrix(vc)) { vc <- .fix_negative_matrix(vc) } vc <- .remove_backticks_from_matrix_names(as.matrix(vc)) } vc } #' @export get_varcov.lqm <- get_varcov.lqmm #' @export get_varcov.list <- function(x, ...) { if ("gam" %in% names(x)) { vc <- stats::vcov(x$gam) .process_vcov(vc) } } #' @export get_varcov.BBmm <- function(x, ...) { vc <- x$fixed.vcov .process_vcov(vc) } #' @export get_varcov.BBreg <- function(x, ...) { vc <- x$vcov .process_vcov(vc) } #' @export get_varcov.feis <- function(x, ...) { vc <- x$vcov .process_vcov(vc) } #' @export get_varcov.glimML <- function(x, ...) { # installed? check_if_installed("aod") vc <- aod::vcov(x) .process_vcov(vc) } #' @export get_varcov.vglm <- function(x, ...) { # installed? check_if_installed("VGAM") vc <- VGAM::vcov(x) .process_vcov(vc) } #' @export get_varcov.vgam <- get_varcov.vglm #' @export get_varcov.geeglm <- function(x, ...) { vc <- summary(x)$cov.unscaled .process_vcov(vc) } #' @export get_varcov.tobit <- function(x, ...) { coef_names <- find_parameters(x, flatten = TRUE) vc <- stats::vcov(x)[coef_names, coef_names, drop = FALSE] .process_vcov(vc) } #' @export get_varcov.lmRob <- function(x, ...) { vc <- x$cov .process_vcov(vc) } #' @export get_varcov.glmRob <- get_varcov.lmRob #' @export get_varcov.coxr <- function(x, ...) { vc <- x$var .process_vcov(vc) } #' @export get_varcov.gee <- function(x, ...) { vc <- x$naive.variance .process_vcov(vc) } #' @export get_varcov.LORgee <- get_varcov.gee # helper-functions ----------------------------------------------------- .process_vcov <- function(vc, verbose = TRUE) { if (.is_negativ_matrix(vc)) { vc <- .fix_negative_matrix(vc) } # fix possible missings due to rank deficient model matrix vc <- .fix_rank_deficiency(vc, verbose) .remove_backticks_from_matrix_names(as.matrix(vc)) } .is_negativ_matrix <- function(x) { if (is.matrix(x) && (nrow(x) == ncol(x))) { rv <- tryCatch( { eigenvalues <- eigen(x, only.values = TRUE)$values eigenvalues[abs(eigenvalues) < 1e-07] <- 0 any(eigenvalues <= 0) }, error = function(e) { FALSE } ) } else { rv <- FALSE } rv } .fix_negative_matrix <- function(m) { if (requireNamespace("Matrix", quietly = TRUE)) { as.matrix(Matrix::nearPD(m)$mat) } else { m } } .fix_rank_deficiency <- function(m, verbose = TRUE) { if (anyNA(m)) { if (isTRUE(verbose)) { warning(format_message("Model matrix is rank deficient. Some variance-covariance parameters are missing."), call. = FALSE) } mm <- m[!is.na(m)] if (!is.matrix(mm)) { mm <- matrix(mm, nrow = sqrt(length(mm))) na_cols <- apply(m, 2, function(i) all(is.na(i))) rownames(mm) <- rownames(m)[!na_cols] colnames(mm) <- rownames(m)[!na_cols] attr(mm, "na_columns_name") <- na_cols[na_cols] attr(mm, "na_columns_index") <- which(na_cols) attr(mm, "rank_deficient") <- TRUE } m <- mm } m } .get_weighted_varcov <- function(x, cov_unscaled) { ssd <- .weighted_crossprod(stats::residuals(x), w = x$weights) df <- sum(x$weights) out <- structure(list(SSD = ssd, call = x$call, df = df), class = "SSD") kronecker(stats::estVar(out), cov_unscaled, make.dimnames = TRUE) } .weighted_crossprod <- function(x, w) { if (is.vector(x)) { x <- as.matrix(x) } if (missing(w)) { return(crossprod(x)) } if (length(w) == 1 || (is.vector(w) && stats::sd(w) < sqrt(.Machine$double.eps))) { return(w[1] * crossprod(x)) } else { if (is.vector(w)) { if (length(w) != nrow(x)) { stop("w is the wrong length") } return(crossprod(x, w * x)) } else { if (nrow(w) != ncol(w) || nrow(w) != nrow(x)) { stop("w is the wrong dimension") } return(crossprod(x, w %*% x)) } } } insight/R/link_inverse.R0000644000175000017500000003466114101711726015077 0ustar nileshnilesh#' @title Get link-inverse function from model object #' @name link_inverse #' #' @description Returns the link-inverse function from a model object. #' #' @param what For `gamlss` models, indicates for which distribution #' parameter the link (inverse) function should be returned; for #' `betareg` or `DirichletRegModel`, can be `"mean"` or #' `"precision"`. #' @inheritParams find_predictors #' @inheritParams find_formula #' #' @return A function, describing the inverse-link function from a model-object. #' For multivariate-response models, a list of functions is returned. #' #' @examples #' # example from ?stats::glm #' counts <- c(18, 17, 15, 20, 10, 20, 25, 13, 12) #' outcome <- gl(3, 1, 9) #' treatment <- gl(3, 3) #' m <- glm(counts ~ outcome + treatment, family = poisson()) #' #' link_inverse(m)(.3) #' # same as #' exp(.3) #' @export link_inverse <- function(x, ...) { UseMethod("link_inverse") } # Default method --------------------------------------- #' @export link_inverse.default <- function(x, ...) { if (inherits(x, "list") && .obj_has_name(x, "gam")) { x <- x$gam class(x) <- c(class(x), c("glm", "lm")) } if (inherits(x, "Zelig-relogit")) { stats::make.link(link = "logit")$linkinv } else { tryCatch( { stats::family(x)$linkinv }, error = function(x) { NULL } ) } } # GLM families --------------------------------------------------- #' @export link_inverse.glm <- function(x, ...) { tryCatch( { stats::family(x)$linkinv }, error = function(x) { NULL } ) } #' @export link_inverse.speedglm <- link_inverse.glm #' @export link_inverse.bigglm <- link_inverse.glm # Tobit Family --------------------------------- #' @export link_inverse.tobit <- function(x, ...) { .make_tobit_family(x)$linkinv } #' @export link_inverse.crch <- link_inverse.tobit #' @export link_inverse.survreg <- link_inverse.tobit #' @export link_inverse.psm <- link_inverse.tobit #' @export link_inverse.flexsurvreg <- function(x, ...) { dist <- parse(text = .safe_deparse(x$call))[[1]]$dist .make_tobit_family(x, dist)$linkinv } # Gaussian identity links --------------------------------- #' @export link_inverse.lm <- function(x, ...) { stats::gaussian(link = "identity")$linkinv } #' @export link_inverse.bayesx <- link_inverse.lm #' @export link_inverse.systemfit <- link_inverse.lm #' @export link_inverse.lqmm <- link_inverse.lm #' @export link_inverse.lqm <- link_inverse.lm #' @export link_inverse.biglm <- link_inverse.lm #' @export link_inverse.aovlist <- link_inverse.lm #' @export link_inverse.ivreg <- link_inverse.lm #' @export link_inverse.ivFixed <- link_inverse.lm #' @export link_inverse.iv_robust <- link_inverse.lm #' @export link_inverse.mixed <- link_inverse.lm #' @export link_inverse.lme <- link_inverse.lm #' @export link_inverse.rq <- link_inverse.lm #' @export link_inverse.rqss <- link_inverse.lm #' @export link_inverse.crq <- link_inverse.lm #' @export link_inverse.crqs <- link_inverse.lm #' @export link_inverse.censReg <- link_inverse.lm #' @export link_inverse.plm <- link_inverse.lm #' @export link_inverse.lm_robust <- link_inverse.lm #' @export link_inverse.truncreg <- link_inverse.lm #' @export link_inverse.felm <- link_inverse.lm #' @export link_inverse.feis <- link_inverse.lm #' @export link_inverse.gls <- link_inverse.lm #' @export link_inverse.lmRob <- link_inverse.lm #' @export link_inverse.MANOVA <- link_inverse.lm #' @export link_inverse.RM <- link_inverse.lm #' @export link_inverse.lmrob <- link_inverse.lm #' @export link_inverse.complmrob <- link_inverse.lm #' @export link_inverse.speedlm <- link_inverse.lm #' @export link_inverse.afex_aov <- link_inverse.lm #' @rdname link_inverse #' @export link_inverse.betareg <- function(x, what = c("mean", "precision"), ...) { what <- match.arg(what) switch(what, "mean" = x$link$mean$linkinv, "precision" = x$link$precision$linkinv ) } #' @rdname link_inverse #' @export link_inverse.DirichletRegModel <- function(x, what = c("mean", "precision"), ...) { what <- match.arg(what) if (x$parametrization == "common") { stats::make.link("logit")$linkinv } else { switch(what, "mean" = stats::make.link("logit")$linkinv, "precision" = stats::make.link("log")$linkinv ) } } # Logit links ----------------------------------- #' @export link_inverse.gmnl <- function(x, ...) { stats::make.link("logit")$linkinv } #' @export link_inverse.mlogit <- link_inverse.gmnl #' @export link_inverse.BBreg <- link_inverse.gmnl #' @export link_inverse.BBmm <- link_inverse.gmnl #' @export link_inverse.coxph <- link_inverse.gmnl #' @export link_inverse.riskRegression <- link_inverse.gmnl #' @export link_inverse.comprisk <- link_inverse.gmnl #' @export link_inverse.coxr <- link_inverse.gmnl #' @export link_inverse.survfit <- link_inverse.gmnl #' @export link_inverse.coxme <- link_inverse.gmnl #' @export link_inverse.lrm <- link_inverse.gmnl #' @export link_inverse.orm <- link_inverse.gmnl #' @export link_inverse.cph <- link_inverse.gmnl #' @export link_inverse.logistf <- link_inverse.gmnl #' @export link_inverse.multinom <- link_inverse.gmnl # Probit link ------------------------ #' @export link_inverse.ivprobit <- function(x, ...) { stats::make.link(link = "probit")$linkinv } #' @export link_inverse.mvord <- function(x, ...) { link_name <- x$rho$link$name l <- stats::make.link(link = ifelse(link_name == "mvprobit", "probit", "logit")) l$linkinv } # Log-links --------------------------------------- #' @export link_inverse.zeroinfl <- function(x, ...) { stats::make.link("log")$linkinv } #' @export link_inverse.hurdle <- link_inverse.zeroinfl #' @export link_inverse.zerotrunc <- link_inverse.zeroinfl # Ordinal models ----------------------------------- #' @export link_inverse.clm <- function(x, ...) { stats::make.link(.get_ordinal_link(x))$linkinv } #' @export link_inverse.clmm <- link_inverse.clm #' @export link_inverse.clm2 <- link_inverse.clm #' @export link_inverse.mixor <- link_inverse.clm # mfx models ------------------------------------------------------ #' @rdname link_inverse #' @export link_inverse.betamfx <- function(x, what = c("mean", "precision"), ...) { what <- match.arg(what) link_inverse.betareg(x$fit, what = what, ...) } #' @export link_inverse.betaor <- link_inverse.betamfx #' @export link_inverse.logitmfx <- function(x, ...) { link_inverse(x$fit, ...) } #' @export link_inverse.poissonmfx <- link_inverse.logitmfx #' @export link_inverse.probitmfx <- link_inverse.logitmfx #' @export link_inverse.negbinmfx <- link_inverse.logitmfx #' @export link_inverse.logitor <- link_inverse.logitmfx #' @export link_inverse.probitirr <- link_inverse.logitmfx #' @export link_inverse.negbinirr <- link_inverse.logitmfx # Other models ---------------------------- #' @export link_inverse.Rchoice <- function(x, ...) { stats::make.link(link = x$link)$linkinv } #' @export link_inverse.merModList <- function(x, ...) { link_inverse.default(x[[1]], ...) } #' @export link_inverse.robmixglm <- function(x, ...) { switch(tolower(x$family), gaussian = stats::make.link(link = "identity")$linkinv, binomial = stats::make.link(link = "logit")$linkinv, gamma = stats::make.link(link = "inverse")$linkinv, poisson = , truncpoisson = stats::make.link(link = "log")$linkinv, stats::make.link(link = "identity")$linkinv ) } #' @export link_inverse.cglm <- function(x, ...) { link <- parse(text = .safe_deparse(x$call))[[1]]$link method <- parse(text = .safe_deparse(x$call))[[1]]$method if (!is.null(method) && method == "clm") { link <- "identiy" } stats::make.link(link = link)$linkinv } #' @export link_inverse.cpglmm <- function(x, ...) { f <- .get_cplm_family(x) f$linkinv } #' @export link_inverse.cpglm <- link_inverse.cpglmm #' @export link_inverse.zcpglm <- link_inverse.cpglmm #' @export link_inverse.bcplm <- link_inverse.cpglmm #' @export link_inverse.fixest <- function(x, ...) { if (is.null(x$family)) { if (!is.null(x$method) && x$method == "feols") { stats::gaussian(link = "identity")$linkinv } } else if (inherits(x$family, "family")) { x$family$linkinv } else { link <- switch(x$family, "poisson" = , "negbin" = "log", "logit" = "logit", "gaussian" = "identity" ) stats::make.link(link)$linkinv } } #' @export link_inverse.feglm <- link_inverse.fixest #' @export link_inverse.glmx <- function(x, ...) { x$family$glm$linkinv } #' @export link_inverse.bife <- function(x, ...) { x$family$linkinv } #' @export link_inverse.glmmadmb <- function(x, ...) { x$ilinkfun } #' @export link_inverse.polr <- function(x, ...) { link <- x$method if (link == "logistic") link <- "logit" stats::make.link(link)$linkinv } #' @export link_inverse.svyolr <- function(x, ...) { link <- x$method if (link == "logistic") link <- "logit" stats::make.link(link)$linkinv } #' @export link_inverse.LORgee <- function(x, ...) { if (grepl(pattern = "logit", x = x$link, fixed = TRUE)) { link <- "logit" } else if (grepl(pattern = "probit", x = x$link, fixed = TRUE)) { link <- "probit" } else if (grepl(pattern = "cauchit", x = x$link, fixed = TRUE)) { link <- "cauchit" } else if (grepl(pattern = "cloglog", x = x$link, fixed = TRUE)) { link <- "cloglog" } else { link <- "logit" } stats::make.link(link)$linkinv } #' @export link_inverse.glimML <- function(x, ...) { stats::make.link(x@link)$linkinv } #' @export link_inverse.glmmTMB <- function(x, ...) { ff <- stats::family(x) if ("linkinv" %in% names(ff)) { ff$linkinv } else if ("link" %in% names(ff) && is.character(ff$link)) { stats::make.link(ff$link)$linkinv } else { match.fun("exp") } } #' @export link_inverse.MCMCglmm <- function(x, ...) { switch(x$Residual$original.family, "cengaussian" = , "gaussian" = stats::gaussian(link = "identity")$linkinv, "categorical" = , "multinomial" = , "zibinomial" = , "ordinal" = stats::make.link("logit")$linkinv, "poisson" = , "cenpoisson" = , "zipoisson" = , "zapoisson" = , "ztpoisson" = , "hupoisson" = stats::make.link("log")$linkinv ) } #' @export link_inverse.glmm <- function(x, ...) { switch(tolower(x$family.glmm$family.glmm), "bernoulli.glmm" = , "binomial.glmm" = stats::make.link("logit")$linkinv, "poisson.glmm" = stats::make.link("log")$linkinv, stats::gaussian(link = "identity")$linkinv ) } #' @export link_inverse.gamm <- function(x, ...) { x <- x$gam class(x) <- c(class(x), c("glm", "lm")) NextMethod() } #' @export link_inverse.stanmvreg <- function(x, ...) { fam <- stats::family(x) lapply(fam, function(.x) .x$linkinv) } #' @export link_inverse.gbm <- function(x, ...) { switch(x$distribution$name, laplace = , tdist = , gaussian = stats::gaussian(link = "identity")$linkinv, poisson = stats::poisson(link = "log")$linkinv, huberized = , adaboost = , coxph = , bernoulli = stats::make.link("logit")$linkinv ) } #' @export link_inverse.brmsfit <- function(x, ...) { fam <- stats::family(x) if (is_multivariate(x)) { lapply(fam, .brms_link_inverse) } else { .brms_link_inverse(fam) } } #' @rdname link_inverse #' @export link_inverse.gamlss <- function(x, what = c("mu", "sigma", "nu", "tau"), ...) { what <- match.arg(what) faminfo <- get(x$family[1], asNamespace("gamlss"))() switch(what, "mu" = faminfo$mu.linkinv, "sigma" = faminfo$sigma.linkinv, "nu" = faminfo$nu.linkinv, "tau" = faminfo$tau.linkinv, faminfo$mu.linkinv ) } #' @export link_inverse.bamlss <- function(x, ...) { flink <- stats::family(x)$links[1] tryCatch( { stats::make.link(flink)$linkinv }, error = function(e) { print_colour("\nCould not find appropriate link-inverse-function.\n", "red") } ) } #' @export link_inverse.glmmPQL <- function(x, ...) { x$family$linkinv } #' @export link_inverse.MixMod <- link_inverse.glmmPQL #' @export link_inverse.cgam <- link_inverse.glmmPQL #' @export link_inverse.vgam <- function(x, ...) { x@family@linkinv } #' @export link_inverse.vglm <- link_inverse.vgam #' @export link_inverse.svy_vglm <- function(x, ...) { link_inverse(x$fit) } #' @export link_inverse.model_fit <- link_inverse.svy_vglm #' @export link_inverse.gam <- function(x, ...) { li <- tryCatch( { .gam_family(x)$linkinv }, error = function(x) { NULL } ) if (is.null(li)) { mi <- .gam_family(x) if (.obj_has_name(mi, "linfo")) { if (.obj_has_name(mi$linfo, "linkinv")) { li <- mi$linfo$linkinv } else { li <- mi$linfo[[1]]$linkinv } } } li } #' @export link_inverse.mipo <- function(x, ...) { models <- eval(x$call$object) link_inverse(models$analyses[[1]]) } #' @export link_inverse.mira <- function(x, ...) { # installed? check_if_installed("mice") link_inverse(mice::pool(x), ...) } # helper -------------- .brms_link_inverse <- function(fam) { # do we have custom families? if (!is.null(fam$family) && (is.character(fam$family) && fam$family == "custom")) { il <- stats::make.link(fam$link)$linkinv } else { if ("linkinv" %in% names(fam)) { il <- fam$linkinv } else if ("link" %in% names(fam) && is.character(fam$link)) { il <- stats::make.link(fam$link)$linkinv } else { ff <- get(fam$family, asNamespace("stats")) il <- ff(fam$link)$linkinv } } il } .get_cplm_family <- function(x) { link <- parse(text = .safe_deparse(x@call))[[1]]$link if (is.null(link)) link <- "log" if (!is.numeric(link)) { stats::poisson(link = link) } else { # installed? check_if_installed("statmod") statmod::tweedie(link.power = link) } } insight/R/get_df.R0000644000175000017500000002314314137207374013640 0ustar nileshnilesh#' @title Extract degrees of freedom #' @name get_df #' #' @description Estimate or extract residual or model-based degrees of freedom #' from regression models. #' #' @param x A statistical model. #' @param type Can be `"residual"`, `"model"` or `"analytical"`. `"residual"` #' tries to extract residual degrees of freedoms. If residual degrees of freedom #' could not be extracted, returns analytical degrees of freedom, i.e. `n-k` #' (number of observations minus number of parameters). `"model"` returns #' model-based degrees of freedom, i.e. the number of (estimated) parameters. #' @param verbose Toggle warnings. #' @param ... Currently not used. #' #' @examples #' model <- lm(Sepal.Length ~ Petal.Length * Species, data = iris) #' get_df(model) # same as df.residual(model) #' get_df(model, type = "model") # same as attr(logLik(model), "df") #' @export get_df <- function(x, ...) { UseMethod("get_df") } #' @rdname get_df #' @export get_df.default <- function(x, type = "residual", verbose = TRUE, ...) { type <- match.arg(tolower(type), choices = c("residual", "model", "analytical")) if (type == "residual") { dof <- .degrees_of_freedom_residual(x, verbose = verbose) if (is.null(dof) || all(is.infinite(dof)) || anyNA(dof)) { dof <- .degrees_of_freedom_analytical(x) } } else if (type == "analytical") { dof <- .degrees_of_freedom_analytical(x) } else { dof <- .model_df(x) } if (!is.null(dof) && length(dof) > 0 && all(dof == 0) && isTRUE(verbose)) { warning("Model has zero degrees of freedom!", call. = FALSE) } if (is.null(dof) && isTRUE(verbose)) { warning("Could not extract degrees of freedom.", call. = FALSE) } dof } #' @export get_df.model_fit <- function(x, type = "residual", verbose = TRUE, ...) { get_df(x$fit, type = type, verbose = verbose, ...) } #' @export get_df.ivFixed <- function(x, type = "residual", ...) { type <- match.arg(tolower(type), choices = c("residual", "model")) if (type == "model") { .model_df(x) } else { as.vector(x$df) } } #' @export get_df.ivprobit <- get_df.ivFixed #' @export get_df.multinom <- function(x, type = "residual", ...) { type <- match.arg(tolower(type), choices = c("residual", "model")) if (type == "model") { .model_df(x) } else { n_obs(x) - x$edf } } #' @export get_df.nnet <- get_df.multinom #' @export get_df.summary.lm <- function(x, type = "residual", ...) { type <- match.arg(tolower(type), choices = c("residual", "model")) if (type == "model") { .model_df(x) } else { x$fstatistic[3] } } #' @export get_df.emmGrid <- function(x, ...) { if (!is.null(x@misc$is_boot) && x@misc$is_boot) { return(.boot_em_df(x)) } unique(summary(x)$df) } #' @export get_df.emm_list <- function(x, ...) { if (!is.null(x[[1]]@misc$is_boot) && x[[1]]@misc$is_boot) { return(.boot_em_df(x)) } s <- summary(x) unname(unlist(lapply(s, function(i) { if (is.null(i$df)) { rep(Inf, nrow(i)) } else { i$df } }))) } #' @export get_df.coeftest <- function(x, ...) { attributes(x)$df } #' @export get_df.lqmm <- function(x, type = "residual", ...) { type <- match.arg(tolower(type), choices = c("residual", "model")) if (type == "model") { .model_df(x) } else { cs <- summary(x) tryCatch( { if (!is.null(cs$rdf)) { cs$rdf } else { attr(cs$B, "R") - 1 } }, error = function(e) { NULL } ) } } #' @export get_df.lqm <- get_df.lqmm #' @export get_df.cgam <- function(x, type = "residual", ...) { type <- match.arg(tolower(type), choices = c("residual", "model")) if (type == "model") { .model_df(x) } else { x$resid_df_obs } } #' @export get_df.glht <- function(x, type = "residual", ...) { type <- match.arg(tolower(type), choices = c("residual", "model")) if (type == "model") { .model_df(x) } else { x$df } } #' @export get_df.BBmm <- get_df.glht #' @export get_df.BBreg <- get_df.glht #' @export get_df.rlm <- function(x, type = "residual", ...) { type <- match.arg(tolower(type), choices = c("residual", "model")) if (type == "residual") { .degrees_of_freedom_analytical(x) } else { .model_df(x) } } #' @export get_df.bfsl <- function(x, type = "residual", ...) { type <- match.arg(tolower(type), choices = c("residual", "model")) if (type == "residual") { x$df.residual } else { .model_df(x) } } #' @export get_df.plm <- function(x, type = "residual", ...) { type <- match.arg(tolower(type), choices = c("residual", "model")) if (type == "residual") { x$df.residual } else { .model_df(x) } } #' @export get_df.selection <- function(x, type = "residual", ...) { type <- match.arg(tolower(type), choices = c("residual", "model")) if (type == "model") { .model_df(x) } else { s <- summary(x) s$param$df } } #' @export get_df.logitor <- function(x, type = "residual", ...) { type <- match.arg(tolower(type), choices = c("residual", "model")) if (type == "model") { .model_df(x) } else { get_df.default(x$fit, ...) } } #' @export get_df.poissonirr <- get_df.logitor #' @export get_df.negbinirr <- get_df.logitor #' @export get_df.poissonmfx <- get_df.logitor #' @export get_df.logitmfx <- get_df.logitor #' @export get_df.negbinmfx <- get_df.logitor #' @export get_df.probitmfx <- get_df.logitor #' @export get_df.betaor <- get_df.logitor #' @export get_df.betamfx <- get_df.logitor #' @export get_df.merModList <- function(x, type = "residual", ...) { type <- match.arg(tolower(type), choices = c("residual", "model")) if (type == "model") { .model_df(x) } else { s <- suppressWarnings(summary(x)) s$fe$df } } #' @export get_df.mira <- function(x, type = "residual", ...) { # installed? check_if_installed("mice") type <- match.arg(tolower(type), choices = c("residual", "model")) get_df(mice::pool(x), type, ...) } #' @export get_df.mipo <- function(x, type = "residual", ...) { type <- match.arg(tolower(type), choices = c("residual", "model")) if (type == "model") { .model_df(x) } else { as.vector(summary(x)$df) } } #' @export get_df.vgam <- function(x, type = "residual", ...) { type <- match.arg(tolower(type), choices = c("residual", "model")) if (type == "model") { .model_df(x) } else { params <- get_parameters(x) out <- stats::setNames(rep(NA, nrow(params)), params$Parameter) out[names(x@nl.df)] <- x@nl.df out } } #' @export get_df.rqs <- function(x, type = "residual", ...) { type <- match.arg(tolower(type), choices = c("residual", "model")) if (type == "model") { .model_df(x) } else { tryCatch( { s <- suppressWarnings(summary(x, covariance = TRUE)) cs <- lapply(s, function(i) i$rdf) unique(unlist(cs)) }, error = function(e) { NULL } ) } } #' @export get_df.systemfit <- function(x, type = "residual", ...) { type <- match.arg(tolower(type), choices = c("residual", "model")) df <- c() s <- summary(x)$eq params <- find_parameters(x) f <- find_formula(x) system_names <- names(f) for (i in 1:length(system_names)) { dfs <- rep(s[[i]]$df[2], length(params[[i]])) df_names <- rep(names(params[i]), length(params[[i]])) df <- c(df, stats::setNames(dfs, df_names)) } df } # Analytical approach ------------------------------ #' @keywords internal .degrees_of_freedom_analytical <- function(model) { nparam <- n_parameters(model) n <- n_obs(model) if (is.null(n)) { return(Inf) } return(n - nparam) } # Model approach (Residual df) ------------------------------ #' @keywords internal .degrees_of_freedom_residual <- function(model, verbose = TRUE) { if (.is_bayesian_model(model) && !inherits(model, c("bayesx", "blmerMod", "bglmerMod"))) { if (requireNamespace("bayestestR", quietly = TRUE)) { model <- bayestestR::bayesian_as_frequentist(model) } else { if (isTRUE(verbose)) { warning("Can't extract degrees of freedom from Bayesian model.", call. = FALSE) } return(NULL) } } # 1st try dof <- try(stats::df.residual(model), silent = TRUE) # 2nd try if (inherits(dof, "try-error") || is.null(dof) || all(is.na(dof))) { junk <- utils::capture.output(dof = try(summary(model)$df[2], silent = TRUE)) } # 3rd try, nlme if (inherits(dof, "try-error") || is.null(dof) || all(is.na(dof))) { dof <- try(unname(model$fixDF$X), silent = TRUE) } # last try if (inherits(dof, "try-error")) { dof <- NULL } dof } # Model approach (model-based / logLik df) ------------------------------ .model_df <- function(x) { dof <- tryCatch( { attr(stats::logLik(x), "df") }, error = function(e) { NULL } ) if (is.null(dof) || all(is.infinite(dof)) || all(is.na(dof))) { if (!is.null(x$rank)) { dof <- x$rank + 1 } else { n <- n_parameters(x) extra <- 0 mi <- model_info(x, verbose = FALSE) if (mi$is_linear || mi$is_negbin) { extra <- extra + 1 } dof <- n + extra } } dof } .boot_em_df <- function(model) { est <- get_parameters(model, summary = FALSE) rep(NA, ncol(est)) } insight/R/format_rope.R0000644000175000017500000000120213757657664014737 0ustar nileshnilesh#' Percentage in ROPE formatting #' #' @param rope_percentage Value or vector of percentages in ROPE. #' @inheritParams format_p #' @inheritParams format_ci #' #' @return A formatted string. #' #' @examples #' format_rope(c(0.02, 0.12, 0.357, 0)) #' format_rope(c(0.02, 0.12, 0.357, 0), name = NULL) #' @export format_rope <- function(rope_percentage, name = "in ROPE", digits = 2) { text <- ifelse(rope_percentage == 0, "0%", ifelse(rope_percentage == 1, "100%", format_value(rope_percentage, digits = digits, as_percent = TRUE) ) ) if (!is.null(name)) { text <- paste(text, name) } text } insight/R/format_p.R0000644000175000017500000001045214157046524014216 0ustar nileshnilesh#' p-values formatting #' #' Format p-values. #' #' @param p value or vector of p-values. #' @param stars Add significance stars (e.g., p < .001***). #' @param stars_only Return only significance stars. #' @param whitespace Logical, if `TRUE` (default), preserves whitespaces. Else, #' all whitespace characters are removed from the returned string. #' @param name Name prefixing the text. Can be `NULL`. #' @param decimal_separator Character, if not `NULL`, will be used as #' decimal separator. #' @param digits Number of significant digits. May also be `"scientific"` #' to return exact p-values in scientific notation, or `"apa"` to use #' an APA 7th edition-style for p-values (equivalent to `digits = 3`). #' If `"scientific"`, control the number of digits by adding the value as #' a suffix, e.g.m `digits = "scientific4"` to have scientific notation #' with 4 decimal places. #' @param ... Arguments from other methods. #' @inheritParams format_value #' #' @return A formatted string. #' @examples #' format_p(c(.02, .065, 0, .23)) #' format_p(c(.02, .065, 0, .23), name = NULL) #' format_p(c(.02, .065, 0, .23), stars_only = TRUE) #' #' model <- lm(mpg ~ wt + cyl, data = mtcars) #' p <- coef(summary(model))[, 4] #' format_p(p, digits = "apa") #' format_p(p, digits = "scientific") #' format_p(p, digits = "scientific2") #' @export format_p <- function(p, stars = FALSE, stars_only = FALSE, whitespace = TRUE, name = "p", missing = "", decimal_separator = NULL, digits = 3, ...) { # only convert p if it's a valid numeric, or at least coercible to # valid numeric values... if (!is.numeric(p)) { if (.is_numeric_character(p)) { p <- .factor_to_numeric(p) } else { return(p) } } if (identical(stars, "only")) { stars <- TRUE stars_only <- TRUE } if (digits == "apa") { digits <- 3 } if (is.character(digits) && grepl("^scientific", digits)) { digits <- tryCatch( { as.numeric(gsub("scientific", "", digits, fixed = TRUE)) }, error = function(e) { NA } ) if (is.na(digits)) { digits <- 5 } text <- ifelse(is.na(p), NA, ifelse(p < 0.001, sprintf("= %.*e***", digits, p), ifelse(p < 0.01, sprintf("= %.*e**", digits, p), ifelse(p < 0.05, sprintf("= %.*e*", digits, p), ifelse(p > 0.999, sprintf("= %.*e", digits, p), sprintf("= %.*e", digits, p) ) ) ) ) ) } else if (digits <= 3) { text <- ifelse(is.na(p), NA, ifelse(p < 0.001, "< .001***", ifelse(p < 0.01, paste0("= ", format_value(p, digits), "**"), ifelse(p < 0.05, paste0("= ", format_value(p, digits), "*"), ifelse(p > 0.999, "> .999", paste0("= ", format_value(p, digits)) ) ) ) ) ) } else { text <- ifelse(is.na(p), NA, ifelse(p < 0.001, paste0("= ", format_value(p, digits), "***"), ifelse(p < 0.01, paste0("= ", format_value(p, digits), "**"), ifelse(p < 0.05, paste0("= ", format_value(p, digits), "*"), paste0("= ", format_value(p, digits)) ) ) ) ) } .add_prefix_and_remove_stars(text, stars, stars_only, name, missing, whitespace, decimal_separator) } #' @keywords internal .add_prefix_and_remove_stars <- function(text, stars, stars_only, name, missing = "", whitespace = TRUE, decimal_separator = NULL) { missing_index <- is.na(text) if (is.null(name)) { text <- gsub("= ", "", text) } else { text <- paste(name, text) } if (stars_only == TRUE) { text <- gsub("[^\\*]", "", text) } else if (stars == FALSE) { text <- gsub("\\*", "", text) } # replace missing with related string text[missing_index] <- missing # remove whitespace around < and > if (isFALSE(whitespace)) { text <- gsub(" ", "", text, fixed = TRUE) } # replace decimal separator if (!is.null(decimal_separator)) { text <- gsub(".", decimal_separator, text, fixed = TRUE) } text } insight/R/print_color.R0000644000175000017500000000351014077615665014747 0ustar nileshnilesh#' @title Coloured console output #' @name print_color #' #' @description Convenient function that allows coloured output in the console. #' Mainly implemented to reduce package dependencies. #' #' @param text The text to print. #' @param color,colour Character vector, indicating the colour for printing. #' May be one of `"red"`, `"yellow"`, `"green"`, `"blue"`, #' `"violet"`, `"cyan"` or `"grey"`. Formatting is also possible #' with `"bold"` or `"italic"`. #' #' @details This function prints `text` directly to the console using #' `cat()`, so no string is returned. `color_text()`, however, #' returns only the formatted string, without using `cat()`. #' `color_theme()` either returns `"dark"` when RStudio is used #' with dark color scheme, `"light"` when it's used with light theme, #' and `NULL` if the theme could not be detected. #' #' @return Nothing. #' #' @examples #' print_color("I'm blue dabedi dabedei", "blue") #' @export print_color <- function(text, color) { cat(.colour(colour = color, x = text)) } #' @rdname print_color #' @export print_colour <- function(text, colour) { print_color(color = colour, text = text) } #' @rdname print_color #' @export color_text <- function(text, color) { .colour(colour = color, x = text) } #' @rdname print_color #' @export colour_text <- function(text, colour) { .colour(colour = colour, x = text) } #' @rdname print_color #' @export color_theme <- function() { if (requireNamespace("rstudioapi", quietly = TRUE)) { if (!rstudioapi::isAvailable()) { return(NULL) } if (!rstudioapi::hasFun("getThemeInfo")) { return(NULL) } theme <- rstudioapi::getThemeInfo() if (isTRUE(theme$dark)) { return("dark") } else { return("light") } } return(NULL) } insight/R/get_loglikelihood.R0000644000175000017500000002123214137207374016071 0ustar nileshnilesh#' Log-Likelihood #' #' A robust function to compute the log-likelihood of a model, as well as #' individual log-likelihoods (for each observation) whenever possible. Can be #' used as a replacement for `stats::logLik()` out of the box, as the #' returned object is of the same class (and it gives the same results by #' default). #' #' @param estimator Corresponds to the different estimators for the standard #' deviation of the errors. If `estimator="ML"` (default), the scaling is #' done by n (the biased ML estimator), which is then equivalent to using #' `stats::logLik()`. If `estimator="OLS"`, it returns the unbiased #' OLS estimator. #' @param REML Only for linear models. This argument is present for #' compatibility with `stats::logLik()`. Setting it to `TRUE` will #' overwrite the `estimator` argument and is thus equivalent to setting #' `estimator="REML"`. It will give the same results as #' `stats::logLik(..., REML=TRUE)`. Note that individual log-likelihoods #' are not available under REML. #' @param ... Passed down to `logLik()`, if possible. #' @inheritParams get_residuals #' #' @return An object of class `"logLik"`, also containing the #' log-likelihoods for each observation as a `per_observation` attribute #' (`attributes(get_loglikelihood(x))$per_observation`) when possible. #' The code was partly inspired from the \CRANpkg{nonnest2} package. #' #' @examples #' x <- lm(Sepal.Length ~ Petal.Width + Species, data = iris) #' #' get_loglikelihood(x, estimator = "ML") # Equivalent to stats::logLik(x) #' get_loglikelihood(x, estimator = "REML") # Equivalent to stats::logLik(x, REML=TRUE) #' get_loglikelihood(x, estimator = "OLS") #' @export get_loglikelihood <- function(x, ...) { UseMethod("get_loglikelihood") } #' @rdname get_loglikelihood #' @export loglikelihood <- get_loglikelihood #' @export get_loglikelihood.default <- function(x, ...) { .loglikelihood_prep_output(x, lls = NA, ...) } #' @export get_loglikelihood.model_fit <- function(x, ...) { get_loglikelihood(x$fit, ...) } #' @export get_loglikelihood.afex_aov <- function(x, ...) { get_loglikelihood(x$lm, ...) } # Methods WITH individual LLs --------------------------------------------- # TODO: Complete for other families with https://github.com/cran/nonnest2/blob/master/R/llcont.R # https://stats.stackexchange.com/questions/322038/input-format-for-response-in-binomial-glm-in-r .get_loglikelihood_lm <- function(x, estimator = "ML", REML = FALSE, verbose = TRUE, ...) { # Replace arg if compatibility base R is activated if (REML) estimator <- "REML" # Get weights w <- get_weights(x, null_as_ones = TRUE) # Get LogLikelihood estimator <- tolower(estimator) # REML (directly returned) # TODO: Find a way of reversing this formula to pull the sums out and get individual lls if (estimator == "reml") { if (!"qr" %in% names(x)) { stop("REML estimation not available for this model.") } N <- get_df(x, type = "residual") # n_obs - p val <- 0.5 * (sum(log(w)) - N * (log(2 * pi) + 1 - log(N) + log(sum(w * get_residuals(x, verbose = verbose)^2)))) p <- n_parameters(x, remove_nonestimable = TRUE) ll <- val - sum(log(abs(diag(x$qr$qr)[1:p]))) return(.loglikelihood_prep_output(x, ll)) } # Get S2 s <- as.numeric(get_sigma(x)) if (estimator == "ols") { s2 <- s^2 } else if (estimator == "ml") { s2 <- (s * sqrt(get_df(x, type = "residual") / n_obs(x)))^2 } else { stop("'estimator' should be one of 'ML', 'REML' or 'OLS'.") } # Get individual log-likelihoods lls <- 0.5 * (log(w) - (log(2 * pi) + log(s2) + (w * get_residuals(x, verbose = verbose)^2) / s2)) .loglikelihood_prep_output(x, lls) } .get_loglikelihood_glm <- function(x, verbose = TRUE, ...) { fam <- stats::family(x)$family resp <- get_response(x, verbose = verbose) w <- get_weights(x, null_as_ones = TRUE) dev <- stats::deviance(x) disp <- dev / sum(w) predicted <- get_predicted(x, verbose = verbose) # Make adjustment for binomial models with matrix as input if (fam == "binomial") { resp <- .factor_to_numeric(resp, lowest = 0) if (!is.null(ncol(resp))) { n <- apply(resp, 1, sum) resp <- ifelse(n == 0, 0, resp[, 1] / n) } else { n <- rep.int(1, length(resp)) } n <- if (any(n > 1)) n else w w <- ifelse(n > 0, (w / n), 0) } # Calculate Log Likelihoods depending on the family lls <- switch(fam, binomial = { stats::dbinom(round(n * resp), round(n), predicted, log = TRUE) * w }, quasibinomial = { NA }, poisson = { stats::dpois(resp, predicted, log = TRUE) * w }, quasipoisson = { NA }, gaussian = { nobs <- length(resp) -((log(dev / nobs * 2 * pi) + 1) - log(w)) / 2 }, inverse.gaussian = { -((log(disp * 2 * pi) + 1) + 3 * log(resp)) / 2 }, Gamma = { stats::dgamma(resp, shape = 1 / disp, scale = predicted * disp, log = TRUE) * w } ) .loglikelihood_prep_output(x, lls) } #' @rdname get_loglikelihood #' @export get_loglikelihood.lm <- function(x, estimator = "ML", REML = FALSE, verbose = TRUE, ...) { if (inherits(x, "list") && .obj_has_name(x, "gam")) { x <- x$gam } info <- model_info(x, verbose = FALSE) if (info$is_linear) { ll <- .get_loglikelihood_lm(x, estimator = estimator, REML = REML, verbose = verbose, ... ) } else { ll <- .get_loglikelihood_glm(x, verbose = verbose, ...) } ll } #' @export get_loglikelihood.ivreg <- get_loglikelihood.lm #' @export get_loglikelihood.glm <- get_loglikelihood.lm #' @export get_loglikelihood.glmer <- get_loglikelihood.lm #' @export get_loglikelihood.gam <- get_loglikelihood.lm #' @export get_loglikelihood.gamm <- get_loglikelihood.lm #' @export get_loglikelihood.list <- get_loglikelihood.lm #' @export get_loglikelihood.stanreg <- function(x, centrality = stats::median, ...) { # installed? check_if_installed("rstanarm") # Get posterior distribution of logliks mat <- rstanarm::log_lik(x) # Point estimate using the function passed as the centrality argument lls <- sapply(as.data.frame(mat), centrality) .loglikelihood_prep_output(x, lls) } # Methods WITHOUT individual LLs --------------------------------------------- #' @export get_loglikelihood.iv_robust <- function(x, ...) { res <- get_residuals(x) p <- x$rank w <- x$weights N <- length(res) if (is.null(w)) { w <- rep.int(1, N) } else { excl <- w == 0 if (any(excl)) { res <- res[!excl] N <- length(res) w <- w[!excl] } } val <- 0.5 * (sum(log(w)) - N * (log(2 * pi) + 1 - log(N) + log(sum(w * res^2)))) attr(val, "nall") <- N attr(val, "nobs") <- N attr(val, "df") <- p + 1 class(val) <- "logLik" val } #' @export get_loglikelihood.svycoxph <- function(x, ...) { .loglikelihood_prep_output(x, lls = x$ll[2], df = x$degf.resid) } #' @export get_loglikelihood.crr <- function(x, ...) { x$loglik } #' @export get_loglikelihood.plm <- function(x, ...) { res <- get_residuals(x) w <- get_weights(x, null_as_ones = TRUE) N <- n_obs(x) ll <- 0.5 * (sum(log(w)) - N * (log(2 * pi) + 1 - log(N) + log(sum(w * res^2)))) .loglikelihood_prep_output(x, lls = ll, df = get_df(x, type = "model")) } #' @export get_loglikelihood.cpglm <- get_loglikelihood.plm # Helpers ----------------------------------------------------------------- .loglikelihood_prep_output <- function(x, lls = NA, df = NULL, ...) { # Prepare output if (all(is.na(lls))) { out <- stats::logLik(x, ...) attr(out, "per_obs") <- NA } else if (length(lls) == 1) { out <- lls } else { out <- sum(lls) attr(out, "per_obs") <- lls # This is useful for some models comparison tests } # Some attributes present in stats::logLik (not sure what nall does) attr(out, "nall") <- attr(out, "nobs") <- n_obs(x) # See https://stats.stackexchange.com/questions/393016/what-does-the-degree-of-freedom-df-mean-in-the-results-of-log-likelihood-logl if (is.null(df)) df <- get_df(x, type = "model") attr(out, "df") <- df # Make of same class as returned by stats::logLik(x) class(out) <- c("logLik", class(x)) out } insight/R/standardize_column_order.R0000644000175000017500000001256114151371702017463 0ustar nileshnilesh#' Standardize column order #' #' Standardizes order of columns for dataframes and other objects from #' *easystats* and *broom* ecosystem packages. #' #' @return A data frame, with standardized column order. #' #' @inheritParams standardize_names #' #' @examples #' # easystats conventions #' df1 <- cbind.data.frame( #' CI_low = -2.873, #' t = 5.494, #' CI_high = -1.088, #' p = 0.00001, #' Parameter = -1.980, #' CI = 0.95, #' df = 29.234, #' Method = "Student's t-test" #' ) #' #' standardize_column_order(df1, style = "easystats") #' #' # broom conventions #' df2 <- cbind.data.frame( #' conf.low = -2.873, #' statistic = 5.494, #' conf.high = -1.088, #' p.value = 0.00001, #' estimate = -1.980, #' conf.level = 0.95, #' df = 29.234, #' method = "Student's t-test" #' ) #' #' standardize_column_order(df2, style = "broom") #' @export standardize_column_order <- function(data, ...) { UseMethod("standardize_column_order") } #' @export standardize_column_order.default <- function(data, ...) { print_color(sprintf("Objects of class '%s' are currently not supported.\n", class(data)[1]), "red") invisible(data) } #' @rdname standardize_column_order #' @export standardize_column_order.parameters_model <- function(data, style = c("easystats", "broom"), ...) { style <- match.arg(style) # easystats -------------------------------- if (style == "easystats") { col_order <- c( # estimate "Parameter1", "Parameter2", "Parameter", "Mean_Parameter1", "Mean_Parameter2", "Mean_Group1", "Mean_Group2", "Coefficient", "r", "rho", "tau", "Estimate", "Median", "Mean", "MAP", "MAD", "Dxy", "Difference", "Psihat", "Trimmed_Mean", "R2", "Mu", # type of estimate "Group", "Component", "Response", "Effects", "Weight", # uncertainty "SE", "Std. Error", "SD", "Deviance_error", "CI", "CI_low", "CI_high", "Difference_CI_low", "Difference_CI_high", "CI_Method", "CI_Distribution", "CI_Iterations", "Sum_Squares", "Mean_Square", "pd", "ROPE_CI", "ROPE_low", "ROPE_high", "ROPE_Percentage", "Rhat", "ESS", # prior details "Prior_Distribution", "Prior_Location", "Prior_Scale", # test details "Method", "method", # statistic "t", "t value", "z", "z value", "F", "F value", "Chi2", "Chi2 value", "chisq", "Chisq", "chi-sq", "t / F", "z / Chisq", "z / Chi2", "W", "S", # degrees of freedom "df", "df_error", "df_residual", # p-value "p", "BF", "log_BF", # other details "Alternative", "n_Obs", # effectsize details "Effectsize", "d", "Cohens_d", "d_CI_low", "d_CI_high", "g", "Hedges_g", "g_CI_low", "g_CI_high", "Eta2", "Eta2_CI_low", "Eta2_CI_high", "Omega2", "Omega2_CI_low", "Omega2_CI_high", "Epsilon2", "Epsilon2_CI_low", "Epsilon2_CI_high", "Cramers_v", "Cramers_v_adjusted", "Cramers_CI_low", "Cramers_CI_high", "phi", "phi_adjusted", "phi_CI_low", "phi_CI_high", "r_rank_biserial", "rank_biserial_CI_low", "rank_biserial_CI_high", "rank_epsilon_squared", "rank_epsilon_squared_CI_low", "rank_epsilon_squared_CI_high", "Kendalls_W", "Kendalls_W_CI_low", "Kendalls_W_CI_high" ) } # broom ------------------------------------ if (style == "broom") { col_order <- c( # estimate "estimate", "mean.group1", "mean.group2", # type of estimate "group", "component", "response", "effects", "weight", # uncertainty "std.error", "std.dev", "conf.level", "conf.low", "conf.high", "conf.method", "conf.distribution", "conf.iterations", "sum.squares", "mean.square", "pd", "rope.percentage", "rhat", "ess", # prior details "prior.distribution", "prior.location", "prior.scale", # test details "method", # statistic "statistic", # degrees of freedom "df", "df.error", "df.residual", # p-value "p.value", "bayes.factor", "log(bayes.factor)", # other details "alternative", "n.obs", # effectsize details "effectsize", "d", "cohens.d", "d.conf.low", "d.conf.high", "g", "Hedges.g", "g.conf.low", "g.conf.high", "eta2", "eta2.conf.low", "eta2.conf.high", "omega2", "omega2.conf.low", "omega2.conf.high", "epsilon2", "epsilon2.conf.low", "epsilon2.conf.high", "cramers.v", "cramers.v.adjusted", "cramers.conf.low", "cramers.conf.high", "phi", "phi.adjusted", "phi.conf.low", "phi.conf.high", "r.rank.biserial", "rank.biserial.conf.low", "rank.biserial.conf.high", "rank.epsilon.squared", "rank.epsilon.squared.conf.low", "rank.epsilon.squared.conf.high", "kendalls.w", "kendalls.w.conf.low", "kendalls.w.conf.high" ) } data[union(intersect(col_order, names(data)), names(data))] } #' @export standardize_column_order.effectsize_table <- standardize_column_order.parameters_model #' @export standardize_column_order.data.frame <- standardize_column_order.parameters_model #' @export standardize_column_order.parameters_distribution <- standardize_column_order.parameters_model insight/R/has_intercept.R0000644000175000017500000000232514077731203015233 0ustar nileshnilesh#' @title Checks if model has an intercept #' @name has_intercept #' #' @description Checks if model has an intercept. #' #' @param x A model object. #' @param verbose Toggle warnings. #' #' @return `TRUE` if `x` has an intercept, `FALSE` otherwise. #' #' @examples #' model <- lm(mpg ~ 0 + gear, data = mtcars) #' has_intercept(model) #' #' model <- lm(mpg ~ gear, data = mtcars) #' has_intercept(model) #' #' if (require("lme4")) { #' model <- lmer(Reaction ~ 0 + Days + (Days | Subject), data = sleepstudy) #' has_intercept(model) #' #' model <- lmer(Reaction ~ Days + (Days | Subject), data = sleepstudy) #' has_intercept(model) #' } #' @export has_intercept <- function(x, verbose = TRUE) { f <- find_formula(x) if (is_multivariate(x)) { lapply(f, .check_for_intercept, verbose = verbose) } else { .check_for_intercept(f, verbose) } } .check_for_intercept <- function(f, verbose = TRUE) { if (!is.null(f$conditional)) { f_terms <- stats::terms(f$conditional) intercept <- as.vector(attr(f_terms, "intercept")) return(intercept == 1) } else if (verbose) { warning(format_message("Cannot extract terms from model formula."), call. = FALSE) } } insight/R/get_data.R0000644000175000017500000010220614163322634014152 0ustar nileshnilesh#' @title Get the data that was used to fit the model #' @name get_data #' #' @description This functions tries to get the data that was used to fit the #' model and returns it as data frame. #' #' @param effects Should model data for fixed effects, random effects #' or both be returned? Only applies to mixed models. #' @param verbose Toggle messages and warnings. #' #' @inheritParams find_predictors #' @inheritParams find_formula #' #' @return The data that was used to fit the model. #' #' @note Unlike `model.frame()`, which may contain transformed variables #' (e.g. if `poly()` or `scale()` was used inside the formula to #' specify the model), `get_data()` aims at returning the "original", #' untransformed data (if possible). Consequently, column names are changed #' accordingly, i.e. `"log(x)"` will become `"x"` etc. for all data #' columns with transformed values. #' #' @examples #' if (require("lme4")) { #' data(cbpp, package = "lme4") #' cbpp$trials <- cbpp$size - cbpp$incidence #' m <- glm(cbind(incidence, trials) ~ period, data = cbpp, family = binomial) #' head(get_data(m)) #' } #' @export get_data <- function(x, ...) { UseMethod("get_data") } # default method ------------------------------------------------------ #' @export get_data.default <- function(x, verbose = TRUE, ...) { if (inherits(x, "list") && .obj_has_name(x, "gam")) { x <- x$gam class(x) <- c(class(x), c("glm", "lm")) } mf <- tryCatch( { if (inherits(x, "Zelig-relogit")) { .get_zelig_relogit_frame(x) } else { stats::model.frame(x) } }, error = function(x) { NULL } ) if (is.null(mf)) { mf <- tryCatch( { dat <- .recover_data_from_environment(x) vars <- find_variables(x, flatten = TRUE, verbose = FALSE) dat[, intersect(vars, colnames(dat)), drop = FALSE] }, error = function(x) { NULL } ) } .prepare_get_data(x, mf, verbose = verbose) } #' @export get_data.data.frame <- function(x, ...) { x } #' @export get_data.summary.lm <- function(x, verbose = TRUE, ...) { mf <- tryCatch( { .recover_data_from_environment(x)[, all.vars(x$terms), drop = FALSE] }, error = function(x) { NULL } ) .prepare_get_data(x, mf, verbose = verbose) } #' @export get_data.model_fit <- function(x, verbose = TRUE, ...) { get_data(x$fit, verbose = verbose, ...) } # classical and survival models ----------------------------------------------- #' @export get_data.mjoint <- function(x, verbose = TRUE, ...) { mf <- tryCatch( { dat <- x$data[[1]] data_columns <- intersect( colnames(dat), unique(c( find_response(x, combine = FALSE, component = "all"), find_variables(x, flatten = TRUE, verbose = FALSE) )) ) dat[, data_columns, drop = FALSE] }, error = function(x) { NULL } ) .prepare_get_data(x, stats::na.omit(mf), verbose = verbose) } #' @rdname get_data #' @export get_data.gee <- function(x, effects = c("all", "fixed", "random"), verbose = TRUE, ...) { effects <- match.arg(effects) mf <- tryCatch( { dat <- .recover_data_from_environment(x) vars <- switch(effects, all = find_variables(x, flatten = TRUE, verbose = FALSE), fixed = find_variables(x, effects = "fixed", flatten = TRUE, verbose = FALSE), random = find_random(x, flatten = TRUE) ) dat[, intersect(vars, colnames(dat)), drop = FALSE] }, error = function(x) { NULL } ) .prepare_get_data(x, stats::na.omit(mf), effects = effects, verbose = verbose) } #' @rdname get_data #' @export get_data.rqss <- function(x, component = c("all", "conditional", "smooth_terms"), verbose = TRUE, ...) { component <- match.arg(component) mf <- tryCatch( { dat <- .recover_data_from_environment(x) vars <- find_variables( x, effects = "all", component = component, flatten = TRUE, verbose = FALSE ) dat[, intersect(vars, colnames(dat)), drop = FALSE] }, error = function(x) { NULL } ) .prepare_get_data(x, stats::na.omit(mf), verbose = verbose) } #' @export get_data.gls <- function(x, verbose = TRUE, ...) { mf <- tryCatch( { dat <- .recover_data_from_environment(x) data_columns <- intersect( colnames(dat), find_variables(x, flatten = TRUE, verbose = FALSE) ) dat[, data_columns, drop = FALSE] }, error = function(x) { NULL } ) .prepare_get_data(x, stats::na.omit(mf), verbose = verbose) } #' @export get_data.survfit <- get_data.gls #' @export get_data.aareg <- get_data.gls #' @export get_data.complmrob <- get_data.gls #' @export get_data.nlrq <- get_data.gls #' @export get_data.robmixglm <- get_data.gls #' @export get_data.selection <- get_data.gls # if ("lm" %in% names(x)) { # suppressWarnings(get_data(x$lm, verbose = verbose)) # } else if (!is.null(x$twoStep$lm)) { # suppressWarnings(get_data(x$twoStep$lm, verbose = verbose)) # } else { # NULL # } #' @export get_data.lqmm <- function(x, verbose = TRUE, ...) { mf <- tryCatch( { x$mfArgs$data }, error = function(x) { NULL } ) .prepare_get_data(x, stats::na.omit(mf), verbose = verbose) } #' @export get_data.nls <- get_data.gls #' @export get_data.gnls <- get_data.gls # zero-inflated models ------------------------------------------------------- #' @rdname get_data #' @export get_data.hurdle <- function(x, component = c("all", "conditional", "zi", "zero_inflated", "dispersion"), verbose = TRUE, ...) { component <- match.arg(component) .return_zeroinf_data(x, component, verbose = verbose) } #' @export get_data.zeroinfl <- get_data.hurdle #' @export get_data.zerotrunc <- get_data.hurdle #' @rdname get_data #' @export get_data.zcpglm <- function(x, component = c("all", "conditional", "zi", "zero_inflated"), verbose = TRUE, ...) { component <- match.arg(component) mf <- stats::model.frame(x) mf_zero <- mf$zero mf_tweedie <- mf$tweedie # zcpglm saves variables twice, once in the model frame for zero-inflated # model and once for the tweedie-model. we now need to remove duplicates cn <- setdiff(colnames(mf$zero), colnames(mf$tweedie)) if (length(cn)) { mf_zero <- mf_zero[cn] } else { mf_zero <- NULL } mf <- switch(component, "all" = do.call(cbind, .compact_list(list(mf_tweedie, mf_zero))), "conditional" = mf_tweedie, "zi" = , "zero_inflated" = mf_zero ) .prepare_get_data(x, stats::na.omit(mf), verbose = verbose) } # mixed models ------------------------------------------------------------- #' @rdname get_data #' @export get_data.glmmTMB <- function(x, effects = c("all", "fixed", "random"), component = c("all", "conditional", "zi", "zero_inflated", "dispersion"), verbose = TRUE, ...) { effects <- match.arg(effects) component <- match.arg(component) model.terms <- find_variables( x, effects = "all", component = "all", flatten = FALSE, verbose = FALSE ) mf <- tryCatch( { stats::model.frame(x) }, error = function(x) { NULL } ) mf <- .prepare_get_data(x, mf, effects, verbose = verbose) # add variables from other model components mf <- .add_zeroinf_data(x, mf, model.terms$dispersion) mf <- .add_zeroinf_data(x, mf, model.terms$zero_inflated) mf <- .add_zeroinf_data(x, mf, model.terms$zero_inflated_random) .return_combined_data(x, mf, effects, component, model.terms, verbose = verbose) } #' @rdname get_data #' @export get_data.merMod <- function(x, effects = c("all", "fixed", "random"), verbose = TRUE, ...) { effects <- match.arg(effects) mf <- tryCatch( { switch(effects, fixed = stats::model.frame(x, fixed.only = TRUE), all = stats::model.frame(x, fixed.only = FALSE), random = stats::model.frame(x, fixed.only = FALSE)[, find_random(x, split_nested = TRUE, flatten = TRUE), drop = FALSE] ) }, error = function(x) { NULL } ) .prepare_get_data(x, mf, effects, verbose = verbose) } #' @export get_data.merModList <- function(x, effects = c("all", "fixed", "random"), ...) { warning("Can't access data for 'merModList' objects.", call. = FALSE) return(NULL) } #' @export get_data.MANOVA <- function(x, effects = c("all", "fixed", "random"), verbose = TRUE, ...) { effects <- match.arg(effects) mf <- tryCatch( { switch(effects, fixed = .remove_column(x$input$data, x$input$subject), all = x$input$data, random = x$input$data[, x$input$subject, drop = FALSE] ) }, error = function(x) { NULL } ) .prepare_get_data(x, mf, effects, verbose = verbose) } #' @export get_data.RM <- get_data.MANOVA #' @export get_data.cpglmm <- function(x, effects = c("all", "fixed", "random"), verbose = TRUE, ...) { effects <- match.arg(effects) dat <- stats::model.frame(x) mf <- tryCatch( { switch(effects, fixed = dat[, find_predictors(x, effects = "fixed", flatten = TRUE, verbose = FALSE), drop = FALSE], all = dat, random = dat[, find_random(x, split_nested = TRUE, flatten = TRUE), drop = FALSE] ) }, error = function(x) { NULL } ) .prepare_get_data(x, mf, effects, verbose = verbose) } #' @export get_data.HLfit <- get_data.cpglmm #' @export get_data.glmm <- function(x, effects = c("all", "fixed", "random"), verbose = TRUE, ...) { effects <- match.arg(effects) dat <- get_data.default(x, verbose = verbose) mf <- tryCatch( { switch(effects, fixed = dat[, find_predictors(x, effects = "fixed", flatten = TRUE, verbose = FALSE ), drop = FALSE], all = dat, random = dat[, find_random(x, split_nested = TRUE, flatten = TRUE), drop = FALSE] ) }, error = function(x) { NULL } ) } #' @export get_data.mixor <- function(x, effects = c("all", "fixed", "random"), verbose = TRUE, ...) { effects <- match.arg(effects) mf <- tryCatch( { switch(effects, fixed = stats::model.frame(x), all = cbind(stats::model.frame(x), x$id), random = data.frame(x$id) ) }, error = function(x) { NULL } ) fix_cn <- which(colnames(mf) %in% c("x.id", "x$id")) colnames(mf)[fix_cn] <- .safe_deparse(x$call$id) .prepare_get_data(x, mf, effects, verbose = verbose) } #' @rdname get_data #' @export get_data.glmmadmb <- function(x, effects = c("all", "fixed", "random"), verbose = TRUE, ...) { effects <- match.arg(effects) fixed_data <- x$frame random_data <- .recover_data_from_environment(x)[, find_random(x, split_nested = TRUE, flatten = TRUE), drop = FALSE] mf <- tryCatch( { switch(effects, fixed = fixed_data, all = cbind(fixed_data, random_data), random = random_data ) }, error = function(x) { NULL } ) .prepare_get_data(x, mf, effects, verbose = verbose) } #' @rdname get_data #' @export get_data.rlmerMod <- function(x, effects = c("all", "fixed", "random"), ...) { effects <- match.arg(effects) .get_data_from_modelframe(x, stats::model.frame(x), effects) } #' @rdname get_data #' @export get_data.clmm <- get_data.rlmerMod #' @rdname get_data #' @export get_data.mixed <- function(x, effects = c("all", "fixed", "random"), ...) { effects <- match.arg(effects) .get_data_from_modelframe(x, x$data, effects) } #' @export #' @rdname get_data #' @param shape Return long or wide data? Only applicable in repeated measures #' designs. get_data.afex_aov <- function(x, shape = c("long", "wide"), ...) { if (!length(attr(x, "within"))) { shape <- "long" } else { shape <- match.arg(shape) } x$data[[shape]] } #' @export get_data.sem <- function(x, effects = c("all", "fixed", "random"), verbose = TRUE, ...) { effects <- match.arg(effects) mf <- tryCatch( { dat <- .recover_data_from_environment(x) vars <- switch(effects, all = find_variables(x, flatten = TRUE, verbose = FALSE), fixed = find_variables(x, effects = "fixed", flatten = TRUE, verbose = FALSE), random = find_random(x, flatten = TRUE) ) dat[, intersect(vars, colnames(dat)), drop = FALSE] }, error = function(x) { NULL } ) .prepare_get_data(x, stats::na.omit(mf), effects, verbose = verbose) } #' @rdname get_data #' @export get_data.lme <- function(x, effects = c("all", "fixed", "random"), ...) { effects <- match.arg(effects) dat <- tryCatch( { x$data }, error = function(x) { NULL } ) stats::na.omit(.get_data_from_modelframe(x, dat, effects)) } #' @rdname get_data #' @export get_data.MixMod <- function(x, effects = c("all", "fixed", "random"), component = c("all", "conditional", "zi", "zero_inflated", "dispersion"), verbose = TRUE, ...) { effects <- match.arg(effects) component <- match.arg(component) tryCatch( { fitfram <- stats::model.frame(x, type = "fixed") fitfram_re <- stats::model.frame(x, type = "random") fitfram_zi <- stats::model.frame(x, type = "zi_fixed") fitfram_zi_re <- stats::model.frame(x, type = "zi_random") if (!.is_empty_object(fitfram_re)) { for (i in 1:length(fitfram_re)) { fitfram <- .merge_dataframes(fitfram_re[[i]], fitfram, replace = TRUE) } } if (!.is_empty_object(fitfram_zi)) { fitfram <- .merge_dataframes(fitfram_zi, fitfram, replace = TRUE) } if (!.is_empty_object(fitfram_zi_re)) { for (i in 1:length(fitfram_zi_re)) { fitfram <- .merge_dataframes(fitfram_zi_re[[i]], fitfram, replace = TRUE) } } fitfram$grp__id <- unlist(x$id) colnames(fitfram)[ncol(fitfram)] <- x$id_name[1] # test... fitfram <- .prepare_get_data(x, fitfram, effects, verbose = verbose) model.terms <- find_variables( x, effects = "all", component = "all", flatten = FALSE, verbose = FALSE ) .return_combined_data(x, mf = fitfram, effects, component, model.terms, verbose = verbose) }, error = function(x) { NULL } ) } #' @export get_data.BBmm <- function(x, effects = c("all", "fixed", "random"), verbose = TRUE, ...) { effects <- match.arg(effects) mf <- tryCatch( { dat <- .recover_data_from_environment(x)[, find_variables(x, flatten = TRUE), drop = FALSE] switch(effects, all = dat[, find_variables(x, flatten = TRUE), drop = FALSE], fixed = dat[, find_variables(x, effects = "fixed", flatten = TRUE), drop = FALSE], random = dat[, find_random(x, flatten = TRUE), drop = FALSE] ) }, error = function(x) { x$X } ) .prepare_get_data(x, stats::na.omit(mf), effects, verbose = verbose) } #' @export get_data.glimML <- function(x, effects = c("all", "fixed", "random"), verbose = TRUE, ...) { effects <- match.arg(effects) dat <- x@data mf <- switch(effects, all = dat[, find_variables(x, flatten = TRUE), drop = FALSE], fixed = dat[, find_variables(x, effects = "fixed", flatten = TRUE), drop = FALSE], random = dat[, find_random(x, flatten = TRUE), drop = FALSE] ) .prepare_get_data(x, stats::na.omit(mf), effects, verbose = verbose) } # sem models ------------------------------------- #' @export get_data.lavaan <- function(x, verbose = TRUE, ...) { mf <- tryCatch( { .get_S4_data_from_env(x) }, error = function(x) { NULL } ) .prepare_get_data(x, stats::na.omit(mf), verbose = verbose) } #' @export get_data.blavaan <- get_data.lavaan # additive models (gam) ------------------------------------- #' @export get_data.vgam <- function(x, verbose = TRUE, ...) { mf <- tryCatch( { get(x@misc$dataname, envir = parent.frame())[, find_variables(x, flatten = TRUE), drop = FALSE] }, error = function(x) { NULL } ) .prepare_get_data(x, mf, verbose = verbose) } #' @export get_data.gamm <- function(x, verbose = TRUE, ...) { x <- x$gam class(x) <- c(class(x), c("glm", "lm")) mf <- stats::model.frame(x) .prepare_get_data(x, mf, verbose = verbose) } #' @export get_data.gamlss <- function(x, verbose = TRUE, ...) { mf <- tryCatch( { elements <- c("mu", "sigma", "nu", "tau") mf_list <- .compact_list(lapply(elements, function(e) { if (paste0(e, ".x") %in% names(x)) { stats::model.frame(x, what = e) } else { NULL } })) mf_data <- mf_list[[1]] if (length(mf_list) > 1) { for (i in 2:length(mf_list)) { cn <- setdiff(colnames(mf_list[[i]]), colnames(mf_data)) if (length(cn)) mf_data <- cbind(mf_data, mf_list[[i]][, cn, drop = FALSE]) } } mf_data }, error = function(x) { NULL } ) .prepare_get_data(x, mf, effects = "all", verbose = verbose) } # fixed effects and panel regression -------------------------------------- #' @export get_data.felm <- function(x, effects = c("all", "fixed", "random"), verbose = TRUE, ...) { effects <- match.arg(effects) .get_data_from_modelframe(x, stats::model.frame(x), effects, verbose = verbose) } #' @export get_data.feis <- function(x, effects = c("all", "fixed", "random"), ...) { effects <- match.arg(effects) mf <- tryCatch( { .recover_data_from_environment(x) }, error = function(x) { stats::model.frame(x) } ) .get_data_from_modelframe(x, mf, effects) } #' @export get_data.fixest <- function(x, ...) { mf <- .recover_data_from_environment(x) .get_data_from_modelframe(x, mf, effects = "all") } #' @export get_data.feglm <- function(x, ...) { mf <- as.data.frame(x$data) .get_data_from_modelframe(x, mf, effects = "all") } #' @export get_data.pgmm <- function(x, verbose = TRUE, ...) { model_terms <- find_variables(x, effects = "all", component = "all", flatten = TRUE) mf <- tryCatch( { .recover_data_from_environment(x)[, model_terms, drop = FALSE] }, error = function(x) { NULL } ) .prepare_get_data(x, mf, verbose = verbose) } #' @export get_data.plm <- function(x, verbose = TRUE, ...) { mf <- stats::model.frame(x) model_terms <- find_variables(x, effects = "all", component = "all", flatten = TRUE) cn <- colnames(mf) mf <- as.data.frame(lapply(mf, as.vector)) colnames(mf) <- clean_names(cn) # find index variables index <- eval(parse(text = .safe_deparse(x$call))[[1]]$index) # try to get index variables from orignal data if (!is.null(index)) { original_data <- .recover_data_from_environment(x) keep <- intersect(index, colnames(original_data)) if (length(keep)) { mf <- cbind(mf, original_data[, keep, drop = FALSE]) model_terms <- c(model_terms, keep) } } .prepare_get_data(x, mf[, model_terms, drop = FALSE], verbose = verbose) } #' @export get_data.wbm <- function(x, effects = c("all", "fixed", "random"), verbose = TRUE, ...) { effects <- match.arg(effects) mf <- stats::model.frame(x) # dat <- as.data.frame(x@orig_data) if (effects == "random") { return(stats::na.omit(mf[, unique(find_random(x, split_nested = TRUE, flatten = TRUE)), drop = FALSE])) } resp.col <- which(colnames(mf) == find_response(x)) mf <- mf[, c(resp.col, (1:ncol(mf))[-resp.col])] .prepare_get_data(x, stats::na.omit(mf), effects, verbose = verbose) } #' @export get_data.wbgee <- get_data.wbm #' @export get_data.ivreg <- function(x, verbose = TRUE, ...) { mf <- stats::model.frame(x) cn <- clean_names(colnames(mf)) ft <- find_variables(x, flatten = TRUE) remain <- setdiff(ft, cn) if (.is_empty_object(remain)) { final_mf <- mf } else { final_mf <- tryCatch( { dat <- .recover_data_from_environment(x) cbind(mf, dat[, remain, drop = FALSE]) }, error = function(x) { NULL } ) } .prepare_get_data(x, stats::na.omit(final_mf), verbose = verbose) } #' @export get_data.iv_robust <- get_data.ivreg #' @export get_data.ivprobit <- function(x, verbose = TRUE, ...) { .prepare_get_data(x, stats::na.omit(as.data.frame(x$mr1)), verbose = verbose) } #' @export get_data.bife <- function(x, effects = c("all", "fixed", "random"), verbose = TRUE, ...) { effects <- match.arg(effects) mf <- as.data.frame(x$data) if (effects == "random") { return(stats::na.omit(mf[, unique(find_random(x, split_nested = TRUE, flatten = TRUE)), drop = FALSE])) } else if (effects == "fixed") { mf <- mf[, setdiff(colnames(mf), unique(find_random(x, split_nested = TRUE, flatten = TRUE))), drop = FALSE] } .prepare_get_data(x, stats::na.omit(mf), effects, verbose = verbose) } # Bayesian regression --------------------------------------------------- #' @rdname get_data #' @export get_data.brmsfit <- function(x, effects = "all", component = "all", verbose = TRUE, ...) { effects <- match.arg(effects, choices = c("all", "fixed", "random")) component <- match.arg(component, choices = c("all", .all_elements())) model.terms <- find_variables(x, effects = "all", component = "all", flatten = FALSE) mf <- stats::model.frame(x) if (.is_multi_membership(x)) { model.terms <- lapply(model.terms, .clean_brms_mm) rs <- setdiff(unname(unlist(find_random_slopes(x))), unname(unlist(model.terms))) if (!.is_empty_object(rs)) model.terms$random <- c(rs, model.terms$random) } .return_combined_data( x, .prepare_get_data(x, mf, effects = effects, verbose = verbose), effects, component, model.terms, is_mv = is_multivariate(x), verbose = verbose ) } #' @rdname get_data #' @export get_data.stanreg <- function(x, effects = c("all", "fixed", "random"), verbose = TRUE, ...) { effects <- match.arg(effects) model.terms <- find_variables(x, effects = "all", component = "all", flatten = FALSE ) mf <- stats::model.frame(x) .return_combined_data( x, .prepare_get_data(x, mf, effects = effects, verbose = verbose), effects, component = "all", model.terms, is_mv = is_multivariate(x), verbose = verbose ) } #' @export get_data.BFBayesFactor <- function(x, ...) { x@data } #' @rdname get_data #' @export get_data.MCMCglmm <- function(x, effects = c("all", "fixed", "random"), verbose = TRUE, ...) { effects <- match.arg(effects) mf <- tryCatch( { env_dataframes <- names(which(unlist(eapply(.GlobalEnv, is.data.frame)))) pv <- find_predictors(x, effects = effects, component = "all", flatten = TRUE) matchframe <- unlist(lapply(env_dataframes, function(.x) { dat <- get(.x) all(pv %in% colnames(dat)) })) mf <- env_dataframes[matchframe][1] if (!is.na(mf)) { dat <- get(mf) switch(effects, fixed = dat[, setdiff(colnames(dat), find_random(x, flatten = TRUE)), drop = FALSE], all = dat, random = dat[, find_random(x, flatten = TRUE), drop = FALSE] ) } else { NULL } }, error = function(x) { NULL } ) .prepare_get_data(x, mf, effects = effects, verbose = verbose) } #' @export get_data.stanmvreg <- function(x, verbose = TRUE, ...) { mf <- tryCatch( { out <- data.frame() for (i in stats::model.frame(x)) { out <- .merge_dataframes(out, i) } out }, error = function(x) { NULL } ) .prepare_get_data(x, mf, verbose = verbose) } # mfx models ------------------------------------------------------ #' @export get_data.betamfx <- function(x, ...) { get_data(x$fit, ...) } #' @export get_data.betaor <- get_data.betamfx #' @export get_data.logitor <- get_data.betamfx #' @export get_data.poissonirr <- get_data.betamfx #' @export get_data.negbinirr <- get_data.betamfx #' @export get_data.logitmfx <- get_data.betamfx #' @export get_data.poissonmfx <- get_data.betamfx #' @export get_data.probitmfx <- get_data.betamfx #' @export get_data.negbinmfx <- get_data.betamfx # other models ------------------------------------------------------ #' @export get_data.svy_vglm <- function(x, verbose = TRUE, ...) { mf <- x$design$variables[find_variables(x, flatten = TRUE)] .prepare_get_data(x, mf, verbose = verbose) } #' @export get_data.mediate <- function(x, ...) { d1 <- get_data(x$model.m) d2 <- get_data(x$model.y) merge(d1, d2, sort = FALSE, all = TRUE) } #' @export get_data.mle2 <- function(x, ...) { as.data.frame(do.call(cbind, x@data)) } #' @export get_data.mle <- get_data.mle2 #' @export get_data.glht <- function(x, ...) { get_data(x$model, ...) } #' @export get_data.averaging <- function(x, ...) { ml <- attributes(x)$modelList if (is.null(ml)) { warning("Can't retrieve data. Please use 'fit = TRUE' in 'model.avg()'.", call. = FALSE) return(NULL) } mf <- tryCatch( { Reduce(function(x, y) merge(x, y, all = TRUE, sort = FALSE), lapply(ml, stats::model.frame)) }, error = function(x) { NULL } ) if (is.null(mf)) { mf <- tryCatch( { .recover_data_from_environment(x)[, find_variables(x, flatten = TRUE), drop = FALSE] }, error = function(x) { NULL } ) } .prepare_get_data(x, mf) } #' @export get_data.Arima <- function(x, ...) { # first try, parent frame dat <- tryCatch( { eval(x$call$x, envir = parent.frame()) }, error = function(e) { NULL } ) if (is.null(dat)) { # second try, global env dat <- tryCatch( { eval(x$call$x, envir = globalenv()) }, error = function(e) { NULL } ) } dat } #' @export get_data.BGGM <- function(x, ...) { x$Y } #' @export get_data.mcmc.list <- function(x, ...) { NULL } #' @export get_data.DirichletRegModel <- function(x, verbose = TRUE, ...) { mf <- x$data resp <- sapply(x$data, inherits, "DirichletRegData") .prepare_get_data(x, mf[!resp], verbose = verbose) } #' @export get_data.vglm <- function(x, ...) { mf <- tryCatch( { if (!length(x@model)) { env <- environment(x@terms$terms) if (is.null(env)) env <- parent.frame() fcall <- x@call fcall$method <- "model.frame" fcall$smart <- FALSE eval(fcall, env, parent.frame()) } else { x@model } }, error = function(x) { NULL } ) .prepare_get_data(x, mf) } #' @export get_data.biglm <- function(x, ...) { mf <- stats::model.frame(x) .prepare_get_data(x, mf) } #' @export get_data.bigglm <- get_data.biglm #' @export get_data.LORgee <- function(x, effects = c("all", "fixed", "random"), ...) { effects <- match.arg(effects) mf <- tryCatch( { dat <- .recover_data_from_environment(x)[, find_variables(x, flatten = TRUE), drop = FALSE] switch(effects, all = dat[, find_variables(x, flatten = TRUE), drop = FALSE], fixed = dat[, find_variables(x, effects = "fixed", flatten = TRUE), drop = FALSE], random = dat[, find_random(x, flatten = TRUE), drop = FALSE] ) }, error = function(x) { stats::model.frame(x) } ) .prepare_get_data(x, stats::na.omit(mf), effects = effects) } #' @export get_data.gmnl <- function(x, ...) { mf <- tryCatch( { x$mf }, error = function(x) { NULL } ) .prepare_get_data(x, mf) } #' @export get_data.gbm <- function(x, ...) { mf <- tryCatch( { get(.safe_deparse(x$call$data), envir = parent.frame())[, find_variables(x, flatten = TRUE), drop = FALSE] }, error = function(x) { stats::model.frame(x) } ) .get_data_from_modelframe(x, mf, effects = "all") } #' @export get_data.tobit <- function(x, verbose = TRUE, ...) { dat <- .recover_data_from_environment(x) ft <- find_variables(x, flatten = TRUE, verbose = FALSE) remain <- intersect(ft, colnames(dat)) .prepare_get_data(x, stats::na.omit(dat[, remain, drop = FALSE]), verbose = verbose) } #' @export get_data.clmm2 <- function(x, ...) { mf <- tryCatch( { data_complete <- x$location data_scale <- x$scale if (!is.null(data_scale)) { remain <- setdiff(colnames(data_scale), colnames(data_complete)) if (length(remain)) data_complete <- cbind(data_complete, data_scale[, remain, drop = FALSE]) } data_complete <- cbind(data_complete, x$grFac) colnames(data_complete)[ncol(data_complete)] <- unlist(.find_random_effects(x, f = find_formula(x, verbose = FALSE), split_nested = TRUE)) data_complete }, error = function(x) { NULL } ) .prepare_get_data(x, mf) } #' @export get_data.clm2 <- function(x, ...) { mf <- tryCatch( { data_complete <- x$location data_scale <- x$scale if (!is.null(data_scale)) { remain <- setdiff(colnames(data_scale), colnames(data_complete)) if (length(remain)) data_complete <- cbind(data_complete, data_scale[, remain, drop = FALSE]) } data_complete }, error = function(x) { NULL } ) .prepare_get_data(x, mf) } #' @export get_data.bracl <- function(x, verbose = TRUE, ...) { mf <- stats::model.frame(x) suppressWarnings(.prepare_get_data(x, mf, verbose = verbose)) } #' @export get_data.mlogit <- function(x, verbose = TRUE, ...) { mf <- tryCatch( { as.data.frame(stats::model.frame(x)) }, error = function(x) { NULL } ) .prepare_get_data(x, mf, verbose = verbose) } #' @export get_data.rma <- function(x, verbose = TRUE, ...) { mf <- tryCatch( { .recover_data_from_environment(x) }, error = function(x) { NULL } ) .prepare_get_data(x, stats::na.omit(mf), verbose = verbose) } #' @export get_data.metaplus <- get_data.rma #' @export get_data.meta_random <- function(x, verbose = TRUE, ...) { mf <- tryCatch( { x$data$data }, error = function(x) { NULL } ) .prepare_get_data(x, stats::na.omit(mf), verbose = verbose) } #' @export get_data.meta_bma <- function(x, verbose = TRUE, ...) { mf <- tryCatch( { x$meta$fixed$data$data }, error = function(x) { NULL } ) .prepare_get_data(x, stats::na.omit(mf), verbose = verbose) } #' @export get_data.meta_fixed <- get_data.meta_random #' @export get_data.ivFixed <- get_data.rma #' @export get_data.bfsl <- function(x, ...) { as.data.frame(x$data[c("x", "y", "sd_x", "sd_y")]) } #' @export get_data.mipo <- function(x, ...) { tryCatch( { models <- eval(x$call$object) get_data(models$analyses[[1]], ...) }, error = function(e) { NULL } ) } #' @export get_data.htest <- function(x, ...) { out <- NULL if (!is.null(x$data.name)) { out <- .retrieve_htest_data(x) } out } insight/R/print_parameters.R0000644000175000017500000002706214077615665016004 0ustar nileshnilesh#' @title Prepare summary statistics of model parameters for printing #' @name print_parameters #' #' @description This function takes a data frame, typically a data frame with #' information on summaries of model parameters like [bayestestR::describe_posterior()], #' [bayestestR::hdi()] or [parameters::model_parameters()], #' as input and splits this information into several parts, depending on the #' model. See details below. #' #' @param x A fitted model, or a data frame returned by [clean_parameters()]. #' @param ... One or more objects (data frames), which contain information about #' the model parameters and related statistics (like confidence intervals, HDI, #' ROPE, ...). #' @param split_by `split_by` should be a character vector with one or #' more of the following elements: `"Effects"`, `"Component"`, #' `"Response"` and `"Group"`. These are the column names returned #' by [clean_parameters()], which is used to extract the information #' from which the group or component model parameters belong. If `NULL`, the #' merged data frame is returned. Else, the data frame is split into a list, #' split by the values from those columns defined in `split_by`. #' @param format Name of output-format, as string. If `NULL` (or `"text"`), #' assumed use for output is basic printing. If `"markdown"`, markdown-format #' is assumed. This only affects the style of title- and table-caption attributes, #' which are used in [export_table()]. #' @param parameter_column String, name of the column that contains the #' parameter names. Usually, for data frames returned by functions the #' easystats-packages, this will be `"Parameter"`. #' @param keep_parameter_column Logical, if `TRUE`, the data frames in the #' returned list have both a `"Cleaned_Parameter"` and `"Parameter"` #' column. If `FALSE`, the (unformatted) `"Parameter"` is removed, #' and the column with cleaned parameter names (`"Cleaned_Parameter"`) is #' renamed into `"Parameter"`. #' @param remove_empty_column Logical, if `TRUE`, columns with completely #' empty character values will be removed. #' @param titles,subtitles By default, the names of the model components (like #' fixed or random effects, count or zero-inflated model part) are added as #' attributes `"table_title"` and `"table_subtitle"` to each list #' element returned by `print_parameters()`. These attributes are then #' extracted and used as table (sub) titles in [export_table()]. #' Use `titles` and `subtitles` to override the default attribute #' values for `"table_title"` and `"table_subtitle"`. `titles` #' and `subtitles` may be any length from 1 to same length as returned #' list elements. If `titles` and `subtitles` are shorter than #' existing elements, only the first default attributes are overwritten. #' #' @return A data frame or a list of data frames (if `split_by` is not `NULL`). #' If a list is returned, the element names reflect the model components where the #' extracted information in the data frames belong to, e.g. ``random.zero_inflated.Intercept: persons``. #' This is the data frame that contains the parameters for the random effects from #' group-level "persons" from the zero-inflated model component. #' #' @details This function prepares data frames that contain information #' about model parameters for clear printing. #' \cr \cr #' First, `x` is required, which should either be a model object or a #' prepared data frame as returned by [clean_parameters()]. If #' `x` is a model, `clean_parameters()` is called on that model #' object to get information with which model components the parameters #' are associated. #' \cr \cr #' Then, `...` take one or more data frames that also contain information #' about parameters from the same model, but also have additional information #' provided by other methods. For instance, a data frame in `...` might #' be the result of, for instance, [bayestestR::describe_posterior()], #' or [parameters::model_parameters()], where we have a) a #' `Parameter` column and b) columns with other parameter values (like #' CI, HDI, test statistic, etc.). #' \cr \cr #' Now we have a data frame with model parameters and information about the #' association to the different model components, a data frame with model #' parameters, and some summary statistics. `print_parameters()` #' then merges these data frames, so the parameters or statistics of interest #' are also associated with the different model components. The data #' frame is split into a list, so for a clear printing. Users can loop over this #' list and print each component for a better overview. Further, parameter #' names are "cleaned", if necessary, also for a cleaner print. See also 'Examples'. #' #' @examples #' \dontrun{ #' library(bayestestR) #' model <- download_model("brms_zi_2") #' x <- hdi(model, effects = "all", component = "all") #' #' # hdi() returns a data frame; here we use only the #' # information on parameter names and HDI values #' tmp <- as.data.frame(x)[, 1:4] #' tmp #' #' # Based on the "split_by" argument, we get a list of data frames that #' # is split into several parts that reflect the model components. #' print_parameters(model, tmp) #' #' # This is the standard print()-method for "bayestestR::hdi"-objects. #' # For printing methods, it is easy to print complex summary statistics #' # in a clean way to the console by splitting the information into #' # different model components. #' x #' } #' @export print_parameters <- function(x, ..., split_by = c("Effects", "Component", "Group", "Response"), format = "text", parameter_column = "Parameter", keep_parameter_column = TRUE, remove_empty_column = FALSE, titles = NULL, subtitles = NULL) { obj <- list(...) # save attributes of original object att <- do.call(c, .compact_list(lapply(obj, function(i) { a <- attributes(i) a$names <- a$class <- a$row.names <- NULL a }))) att <- att[!duplicated(names(att))] # get cleaned parameters cp <- if (!inherits(x, "clean_parameters")) { clean_parameters(x) } else { x } # merge all objects together obj <- Reduce( function(x, y) { # check for valid column name if (parameter_column != "Parameter" && parameter_column %in% colnames(y) && !"Parameter" %in% colnames(y)) { colnames(y)[colnames(y) == parameter_column] <- "Parameter" } merge_by <- unique(c("Parameter", intersect(colnames(y), intersect(c("Effects", "Component", "Group", "Response"), colnames(x))))) merge(x, y, all.x = FALSE, by = merge_by, sort = FALSE) }, c(list(cp), obj) ) # return merged data frame if no splitting requested if (.is_empty_object(split_by)) { return(obj) } # determine where to split data frames split_by <- split_by[split_by %in% colnames(obj)] f <- lapply(split_by, function(i) { if (i %in% colnames(obj)) obj[[i]] }) names(f) <- split_by # split into groups, remove empty elements out <- split(obj, f) out <- .compact_list(lapply(out, function(i) { if (nrow(i) > 0) i })) # remove trailing dots names(out) <- list_names <- gsub("(.*)\\.$", "\\1", names(out)) has_zeroinf <- any(grepl("zero_inflated", names(out), fixed = TRUE)) # create title attributes, and remove unnecessary columns from output out <- lapply(names(out), function(i) { # init title variables title1 <- title2 <- "" # get data frame element <- out[[i]] # split name at ".", so we have all components the data frame refers to (i.e. # fixed/random, conditional/zero-inflated, group-lvl or random slope etc.) # as character vector parts <- unlist(strsplit(i, ".", fixed = TRUE)) # iterate all parts of the component names, to create title attribute for (j in 1:length(parts)) { # Rename "fixed", "random" etc. into proper titles. Here we have the # "Main title" of a subcomponent (like "Random effects") if (parts[j] %in% c("fixed", "random") || (has_zeroinf && parts[j] %in% c("conditional", "zero_inflated"))) { tmp <- switch(parts[j], "fixed" = "Fixed effects", "random" = "Random effects", "dispersion" = "Dispersion", "conditional" = "(conditional)", "zero_inflated" = "(zero-inflated)" ) title1 <- paste0(title1, " ", tmp) } else if (!parts[j] %in% c("conditional", "zero_inflated")) { # here we have the "subtitles" of a subcomponent # (like "Intercept: Group-Level 1") tmp <- switch(parts[j], "simplex" = "(monotonic effects)", parts[j] ) title2 <- paste0(title2, " ", tmp) } } .effects <- unique(element$Effects) .component <- unique(element$Component) .group <- unique(element$Group) # we don't need "Effects" and "Component" column any more, and probably # also no longer the "Group" column columns_to_remove <- c("Effects", "Component", "Cleaned_Parameter") if (.n_unique(.group) == 1) { columns_to_remove <- c(columns_to_remove, "Group") } else { .group <- NULL } keep <- setdiff(colnames(element), columns_to_remove) element <- element[, c("Cleaned_Parameter", keep)] # if we had a pretty_names attributes in the original object, # match parameters of pretty names here, and add this attributes # to each element here... if ("pretty_names" %in% names(att)) { attr(element, "pretty_names") <- stats::setNames(att$pretty_names[element$Parameter], element$Cleaned_Parameter) } # keep or remove old parameter column? if (!isTRUE(keep_parameter_column)) { element$Parameter <- NULL colnames(element)[colnames(element) == "Cleaned_Parameter"] <- "Parameter" } # remove empty columns if (isTRUE(remove_empty_column)) { for (j in colnames(element)) { if (all(is.na(element[[j]])) || (is.character(element[[j]]) && all(element[[j]] == ""))) { element[[j]] <- NULL } } } # for sub-table titles if (is.null(format) || format == "text") { title_prefix <- "# " } else { title_prefix <- "" } title1 <- .capitalize(title1) title2 <- .capitalize(title2) # add attributes attr(element, "main_title") <- .trim(title1) attr(element, "sub_title") <- .trim(title2) if (is.null(format) || format == "text") { attr(element, "table_caption") <- c(paste0(title_prefix, .trim(title1)), "blue") attr(element, "table_subtitle") <- c(.trim(title2), "blue") } else { attr(element, "table_caption") <- .trim(title1) attr(element, "table_subtitle") <- .trim(title2) } attr(element, "Effects") <- .effects attr(element, "Component") <- .component attr(element, "Group") <- .group element }) # override titles? if (!is.null(titles) && length(titles) <= length(out)) { for (i in 1:length(titles)) { attr(out[[i]], "table_caption") <- c(titles[i], "blue") } } if (!is.null(subtitles) && length(subtitles) <= length(out)) { for (i in 1:length(subtitles)) { attr(out[[i]], "table_subtitle") <- c(subtitles[i], "blue") } } att$pretty_names <- NULL attr(out, "additional_attributes") <- att names(out) <- list_names out } insight/R/get_parameters_censored.R0000644000175000017500000000651214036353003017262 0ustar nileshnilesh# Survival and censored models --------------------------------------------- #' @export get_parameters.flexsurvreg <- function(x, ...) { cf <- stats::coef(x) params <- data.frame( Parameter = names(cf), Estimate = unname(cf), stringsAsFactors = FALSE, row.names = NULL ) .remove_backticks_from_parameter_names(params) } #' @export get_parameters.aareg <- function(x, ...) { sc <- summary(x) params <- data.frame( Parameter = rownames(sc$table), Estimate = unname(sc$table[, 2]), stringsAsFactors = FALSE, row.names = NULL ) .remove_backticks_from_parameter_names(params) } #' @export get_parameters.crr <- function(x, ...) { sc <- x$coef params <- data.frame( Parameter = names(sc), Estimate = unname(sc), stringsAsFactors = FALSE, row.names = NULL ) .remove_backticks_from_parameter_names(params) } #' @export get_parameters.lmodel2 <- function(x, ...) { res <- x$regression.results out <- as.data.frame(cbind(Method = rep(res$Method, 2), utils::stack(res, select = 2:3))) colnames(out) <- c("Component", "Estimate", "Parameter") out[c("Parameter", "Estimate", "Component")] } #' @export get_parameters.rqs <- function(x, ...) { sc <- suppressWarnings(summary(x)) if (all(unlist(lapply(sc, is.list)))) { list_sc <- lapply(sc, function(i) { .x <- as.data.frame(stats::coef(i)) .x$Parameter <- rownames(.x) .x$tau <- i$tau .x }) out <- do.call(rbind, list_sc) params <- data.frame( Parameter = out$Parameter, Estimate = out$coefficients, Component = sprintf("tau (%g)", out$tau), stringsAsFactors = FALSE, row.names = NULL ) } else { get_parameters.default(x, ...) } .remove_backticks_from_parameter_names(params) } #' @export get_parameters.crq <- function(x, ...) { sc <- summary(x) if (all(unlist(lapply(sc, is.list)))) { list_sc <- lapply(sc, function(i) { .x <- as.data.frame(i) .x$Parameter <- rownames(.x) .x }) out <- do.call(rbind, list_sc) params <- data.frame( Parameter = out$Parameter, Estimate = out$coefficients.Value, Component = sprintf("tau (%g)", out$tau), stringsAsFactors = FALSE, row.names = NULL ) } else { params <- data.frame( Parameter = names(sc$coefficients[, 1]), Estimate = unname(sc$coefficients[, 1]), stringsAsFactors = FALSE, row.names = NULL ) } .remove_backticks_from_parameter_names(params) } #' @export get_parameters.crqs <- get_parameters.crq #' @export get_parameters.lqmm <- function(x, ...) { cs <- stats::coef(x) if (is.matrix(cs)) { params <- .gather(as.data.frame(cs), names_to = "Component", values_to = "Estimate") params$Component <- sprintf("tau (%s)", params$Component) params$Parameter <- rep(rownames(cs), length.out = nrow(params)) params <- params[c("Parameter", "Estimate", "Component")] row.names(params) <- NULL } else { params <- data.frame( Parameter = names(cs), Estimate = unname(cs), stringsAsFactors = FALSE, row.names = NULL ) } .remove_backticks_from_parameter_names(params) } #' @export get_parameters.lqm <- get_parameters.lqmm insight/R/is_model.R0000644000175000017500000001312314141732514014172 0ustar nileshnilesh#' @title Checks if an object is a regression model or statistical test object #' @name is_model #' #' @description Small helper that checks if a model is a regression model or #' a statistical object. `is_regression_model()` is stricter and only #' returns `TRUE` for regression models, but not for, e.g., `htest` #' objects. #' #' @param x An object. #' #' @return A logical, `TRUE` if `x` is a (supported) model object. #' #' @details This function returns `TRUE` if `x` is a model object. #' #' @examples #' data(mtcars) #' m <- lm(mpg ~ wt + cyl + vs, data = mtcars) #' #' is_model(m) #' is_model(mtcars) #' #' test <- t.test(1:10, y = c(7:20)) #' is_model(test) #' is_regression_model(test) #' @export is_model <- function(x) { inherits(.get_class_list(x), .get_model_classes()) } # Is regression model ----------------------------------------------------- #' @rdname is_model #' @export is_regression_model <- function(x) { inherits(.get_class_list(x), .get_model_classes(regression_only = TRUE)) } # Helpers ----------------------------------------------------------------- .get_class_list <- function(x) { if (length(class(x)) > 1 || class(x) != "list") { return(x) } if (all(c("mer", "gam") %in% names(x))) { class(x) <- c("gamm4", "list") } x } .get_model_classes <- function(regression_only = FALSE) { out <- c( "_ranger", # a -------------------- "aareg", "afex_aov", "AKP", "ancova", "anova", "Anova.mlm", "aov", "aovlist", "Arima", "averaging", # b -------------------- "bamlss", "bamlss.frame", "bayesGAM", "bayesmeta", "bayesx", "bayesQR", "BBmm", "BBreg", "bcplm", "betamfx", "betaor", "betareg", "bfsl", "BFBayesFactor", "BGGM", "bglmerMod", "bife", "bifeAPEs", "biglm", "bigglm", "blrm", "blavaan", "blmerMod", "boot_test_mediation", "bracl", "brglm", "brglmFit", "brmsfit", "brmultinom", "bsem", "btergm", "buildmer", # c -------------------- "cch", "censReg", "cgam", "cgamm", "cglm", "clm", "clm2", "clmm", "clmm2", "clogit", "coeftest", "complmrob", "comprisk", "confusionMatrix", "coxme", "coxph", "coxph.penal", "coxr", "cpglm", "cpglmm", "crch", "crq", "crqs", "crr", "dglm", # d -------------------- "dep.effect", "DirichletRegModel", "drc", # e -------------------- "eglm", "elm", "emmGrid", "emm_list", "epi.2by2", "ergm", # f -------------------- "feglm", "feis", "felm", "fitdistr", "fixest", "flexmix", "flexsurvreg", # g -------------------- "gam", "Gam", "GAMBoost", "gamlr", "gamlss", "gamm", "gamm4", "garch", "gbm", "gee", "geeglm", "gjrm", "glht", "glimML", "Glm", "glm", "glmaag", "glmbb", "glmboostLSS", "glmc", "glmdm", "glmdisc", "glmerMod", "glmlep", "glmm", "glmmadmb", "glmmEP", "glmmFit", "glmmfields", "glmmLasso", "glmmPQL", "glmmTMB", "glmnet", "glmrob", "glmRob", "glmx", "gls", "gmnl", "gmm", "gnls", "gsm", # h -------------------- "heavyLme", "HLfit", "htest", "hurdle", # i -------------------- "ivFixed", "iv_robust", "ivreg", "ivprobit", # j -------------------- "joint", # k -------------------- "kmeans", # l -------------------- "lavaan", "lm", "lm_robust", "lme", "lmrob", "lmRob", "loggammacenslmrob", "logistf", "LogitBoost", "loo", "LORgee", "lmodel2", "lmerMod", "lmerModLmerTest", "logitmfx", "logitor", "lqm", "lqmm", "lrm", # m -------------------- "maov", "manova", "MANOVA", "margins", "maxLik", "mboostLSS", "mclogit", "mcp1", "mcp2", "mmclogit", "mcmc", "mcmc.list", "MCMCglmm", "mediate", "merMod", "merModList", "meta_bma", "meta_fixed", "meta_random", "meta_ordered", "metaplus", "mhurdle", "mipo", "mira", "mixed", "mixor", "MixMod", "mjoint", "mle", "mle2", "mlergm", "mlm", "mlma", "mlogit", "model_fit", "multinom", "mvmeta", "mvord", "mvr", # n -------------------- "negbin", "negbinmfx", "negbinirr", "nlreg", "nlrq", "nls", "nparLD", # o -------------------- "objectiveML", "ols", "osrt", "orcutt", # p -------------------- "pairwise.htest", "pb1", "pb2", "pgmm", "plm", "plmm", "PMCMR", "poissonmfx", "poissonirr", "polr", "pseudoglm", "psm", "probitmfx", # q -------------------- "qr", "QRNLMM", "QRLMM", # r -------------------- "rankFD", "Rchoice", "rdrobust", "ridgelm", "riskRegression", "rjags", "rlm", "rlme", "rlmerMod", "RM", "rma", "rmanovab", "rma.uni", "rms", "robmixglm", "robtab", "rq", "rqs", "rqss", # s -------------------- "Sarlm", "scam", "selection", "sem", "SemiParBIV", "slm", "speedlm", "speedglm", "splmm", "spml", "stanmvreg", "stanreg", "summary.lm", "survfit", "survreg", "survPresmooth", "svyglm", "svy_vglm", "svyolr", "systemfit", # t -------------------- "t1way", "t2way", "t3way", "test_mediation", "tobit", "trendPMCMR", "trimcibt", "truncreg", # v -------------------- "varest", "vgam", "vglm", # w -------------------- "wbm", "wblm", "wbgee", "wmcpAKP", # y -------------------- "yuen", "yuend", # z -------------------- "zcpglm", "zeroinfl", "zerotrunc" ) if (isTRUE(regression_only)) { out <- setdiff(out, c( "emmGrid", "emm_list", "htest", "pairwise.htest", "summary.lm" )) } out } .get_gam_classes <- function() { out <- c( "bamlss", "bamlss.frame", "brmsfit", "cgam", "cgamm", "gam", "Gam", "GAMBoost", "gamlr", "gamlss", "gamm", "gamm4", "stanmvreg", "stanreg" ) out } insight/R/standardize_names.R0000644000175000017500000001613214151371005016070 0ustar nileshnilesh#' Standardize column names #' #' Standardize column names from data frames, in particular objects returned #' from [parameters::model_parameters()], so column #' names are consistent and the same for any model object. #' #' @param data A data frame. In particular, objects from *easystats* #' package functions like #' [parameters::model_parameters()] or #' [effectsize::effectsize()] are accepted, but also #' data frames returned by `broom::tidy()` are valid objects. #' @param style Standardization can either be based on the naming conventions #' from the [easystats-project](https://easystats.github.io/easystats/), #' or on \pkg{broom}'s naming scheme. #' @param ignore_estimate Logical, if `TRUE`, column names like #' `"mean"` or `"median"` will *not* be converted to #' `"Coefficient"` resp. `"estimate"`. #' @param ... Currently not used. #' #' @return A data frame, with standardized column names. #' #' @details This method is in particular useful for package developers or users #' who use, e.g., #' [parameters::model_parameters()] in their own #' code or functions to retrieve model parameters for further processing. As #' `model_parameters()` returns a data frame with varying column names #' (depending on the input), accessing the required information is probably #' not quite straightforward. In such cases, `standardize_names()` can be #' used to get consistent, i.e. always the same column names, no matter what #' kind of model was used in `model_parameters()`. #' \cr \cr #' For `style = "broom"`, column names are renamed to match \pkg{broom}'s #' naming scheme, i.e. `Parameter` is renamed to `term`, #' `Coefficient` becomes `estimate` and so on. #' \cr \cr #' For `style = "easystats"`, when `data` is an object from #' `broom::tidy()`, column names are converted from "broom"-style into #' "easystats"-style. #' #' @examples #' if (require("parameters")) { #' model <- lm(mpg ~ wt + cyl, data = mtcars) #' mp <- model_parameters(model) #' #' as.data.frame(mp) #' standardize_names(mp) #' standardize_names(mp, style = "broom") #' } #' @export standardize_names <- function(data, ...) { UseMethod("standardize_names") } #' @export standardize_names.default <- function(data, ...) { print_color(sprintf("Objects of class '%s' are currently not supported.\n", class(data)[1]), "red") } #' @rdname standardize_names #' @export standardize_names.parameters_model <- function(data, style = c("easystats", "broom"), ignore_estimate = FALSE, ...) { style <- match.arg(style) .standardize_names(data, style, ignore_estimate = ignore_estimate, ...) } #' @export standardize_names.effectsize_table <- standardize_names.parameters_model #' @export standardize_names.data.frame <- standardize_names.parameters_model #' @export standardize_names.parameters_distribution <- standardize_names.parameters_model # helper ----- .standardize_names <- function(data, style, ignore_estimate = FALSE, ...) { cn <- colnames(data) if (style == "easystats") { cn <- .names_to_easystats(cn, ignore_estimate) } else { cn <- .names_to_broom(cn, ignore_estimate) } colnames(data) <- cn as.data.frame(data) } .names_to_easystats <- function(cn, ignore_estimate) { cn[cn %in% c("t", "z", "F", "Chi2", "chisq", "Chisq", "chi-sq", "t / F", "z / Chisq", "z / Chi2", "W")] <- "Statistic" if (isFALSE(ignore_estimate)) { cn[cn %in% c("Median", "Mean", "MAP", "Dxy", "rho", "r", "tau", "Difference")] <- "Coefficient" } cn[cn %in% c("df_residual", "df.residual", "Resid..Df", "df.error", "df_error")] <- "df" # convert broom-style to easystats # styler: off cn[cn == "term"] <- "Parameter" cn[cn == "estimate"] <- "Coefficient" cn[cn == "std.error"] <- "SE" cn[cn == "std.dev"] <- "SD" cn[cn == "p.value"] <- "p" cn[cn == "bayes.factor"] <- "BF" cn[cn == "component"] <- "Component" cn[cn == "effect"] <- "Effects" cn[cn == "response"] <- "Response" cn[cn == "statistic"] <- "Statistic" cn[cn == "conf.low"] <- "CI_low" cn[cn == "conf.high"] <- "CI_high" cn[cn == "conf.level"] <- "CI" cn[cn == "n.obs"] <- "n_Obs" # anova cn[cn == "sumsq"] <- "Sum_Squares" cn[cn == "meansq"] <- "Mean_Square" cn[cn == "Resid..Dev"] <- "Deviance_error" # convert classic summary cn[cn == "Estimate"] <- "Coefficient" cn[cn == "Std. Error"] <- "SE" cn[cn == "t value"] <- "Statistic" cn[cn == "z value"] <- "Statistic" cn[cn == "Pr(>|t|)"] <- "p" cn[cn == "Pr(>|z|)"] <- "p" # styler: on cn } .names_to_broom <- function(cn, ignore_estimate) { # TO DO: currently `htest` object output naming differs from `broom` # needs further discussion # styler: off # easy replacements cn[cn == "Parameter"] <- "term" cn[cn == "SE"] <- "std.error" cn[cn == "SD"] <- "std.dev" cn[cn == "p"] <- "p.value" cn[cn == "BF"] <- "bayes.factor" cn[cn == "log_BF"] <- "log(bayes.factor)" cn[cn == "Component"] <- "component" cn[cn == "Effects"] <- "effect" cn[cn == "Response"] <- "response" cn[cn == "CI"] <- "conf.level" cn[cn == "df_error"] <- "df.error" cn[cn == "df_residual"] <- "df.residual" cn[cn == "n_Obs"] <- "n.obs" # anova cn[cn == "Sum_Squares"] <- "sumsq" cn[cn == "Mean_Square"] <- "meansq" cn[cn == "Deviance_error"] <- "Resid..Dev" # styler: on # name of coefficient column for (Bayesian) models if (isFALSE(ignore_estimate)) { cn[cn %in% c("Coefficient", "Std_Coefficient", "Median", "Mean", "MAP", "Psihat", "Trimmed_Mean")] <- "estimate" } # name of coefficient column htest cn[cn %in% c("Dxy", "rho", "r", "tau", "R2")] <- "estimate" if (("Difference" %in% cn) && !("estimate" %in% cn)) { cn[cn == "Difference"] <- "estimate" } cn[cn %in% c("W", "S", "t", "z", "F", "Chi2", "chisq", "chi-sq", "Chisq", "t / F", "z / Chisq", "z / Chi2")] <- "statistic" # fancy regex replacements cn <- gsub("^CI_low", "conf.low", cn) cn <- gsub("^CI_high", "conf.high", cn) cn <- gsub("(.*)CI_low$", "\\1conf.low", cn) cn <- gsub("(.*)CI_high$", "\\1conf.high", cn) # from package effectisze if (requireNamespace("effectsize", quietly = TRUE)) { effectsize_names <- effectsize::is_effectsize_name(cn) if (any(effectsize_names)) { cn[effectsize_names] <- "estimate" } } # convert classic summary # styler: off cn[cn == "Std. Error"] <- "std.error" cn[cn == "t value"] <- "statistic" cn[cn == "z value"] <- "statistic" cn[cn == "Pr(>|t|)"] <- "p.value" cn[cn == "Pr(>|z|)"] <- "p.value" # styler: on # lowercase for everything cn <- gsub(tolower(cn), pattern = "_", replacement = ".", fixed = TRUE) cn } insight/R/is_gam_model.R0000644000175000017500000000173414077615665015042 0ustar nileshnilesh#' @title Checks if a model is a generalized additive model #' @name is_gam_model #' #' @description Small helper that checks if a model is a generalized additive #' model. #' #' @param x A model object. #' #' @return A logical, `TRUE` if `x` is a generalized additive model #' *and* has smooth-terms #' #' @note This function only returns `TRUE` when the model inherits from a #' typical GAM model class *and* when smooth terms are present in the model #' formula. If model has no smooth terms or is not from a typical gam class, #' `FALSE` is returned. #' #' @examples #' if (require("mgcv")) { #' data(iris) #' model1 <- lm(Petal.Length ~ Petal.Width + Sepal.Length, data = iris) #' model2 <- gam(Petal.Length ~ Petal.Width + s(Sepal.Length), data = iris) #' is_gam_model(model1) #' is_gam_model(model2) #' } #' @export is_gam_model <- function(x) { !is.null(find_smooth(x, flatten = TRUE)) && inherits(.get_class_list(x), .get_gam_classes()) } insight/R/find_variables.R0000644000175000017500000000546214120175277015362 0ustar nileshnilesh#' @title Find names of all variables #' @name find_variables #' #' @description Returns a list with the names of all variables, including #' response value and random effects. #' #' @inheritParams find_predictors #' @inheritParams find_formula #' #' @note The difference to [find_terms()] is that #' `find_variables()` returns each variable name only once, while #' `find_terms()` may return a variable multiple times in case of #' transformations or when arithmetic expressions were used in the formula. #' #' @return A list with (depending on the model) following elements (character #' vectors): #' * `response`, the name of the response variable #' * `conditional`, the names of the predictor variables from the *conditional* model (as opposed to the zero-inflated part of a model) #' * `cluster`, the names of cluster or grouping variables #' * `dispersion`, the name of the dispersion terms #' * `instruments`, the names of instrumental variables #' * `random`, the names of the random effects (grouping factors) #' * `zero_inflated`, the names of the predictor variables from the *zero-inflated* part of the model #' * `zero_inflated_random`, the names of the random effects (grouping factors) #' #' @examples #' if (require("lme4")) { #' data(cbpp) #' data(sleepstudy) #' # some data preparation... #' cbpp$trials <- cbpp$size - cbpp$incidence #' sleepstudy$mygrp <- sample(1:5, size = 180, replace = TRUE) #' sleepstudy$mysubgrp <- NA #' for (i in 1:5) { #' filter_group <- sleepstudy$mygrp == i #' sleepstudy$mysubgrp[filter_group] <- #' sample(1:30, size = sum(filter_group), replace = TRUE) #' } #' #' m1 <- glmer( #' cbind(incidence, size - incidence) ~ period + (1 | herd), #' data = cbpp, #' family = binomial #' ) #' find_variables(m1) #' #' m2 <- lmer( #' Reaction ~ Days + (1 | mygrp / mysubgrp) + (1 | Subject), #' data = sleepstudy #' ) #' find_variables(m2) #' find_variables(m2, flatten = TRUE) #' } #' @export find_variables <- function(x, effects = c("all", "fixed", "random"), component = c("all", "conditional", "zi", "zero_inflated", "dispersion", "instruments", "smooth_terms"), flatten = FALSE, verbose = TRUE) { effects <- match.arg(effects) component <- match.arg(component) if (component %in% c("all", "conditional")) { resp <- find_response(x, combine = FALSE) } else { resp <- NULL } pr <- find_predictors( x, effects = effects, component = component, flatten = flatten, verbose = verbose ) if (flatten) { unique(c(resp, pr)) } else if (is.null(resp)) { pr } else { c(list(response = resp), pr) } } insight/R/get_parameters_mfx.R0000644000175000017500000000511714061456671016267 0ustar nileshnilesh#' @title Get model parameters from marginal effects models #' @name get_parameters.betamfx #' #' @description Returns the coefficients from a model. #' #' @param ... Currently not used. #' #' @inheritParams find_parameters #' @inheritParams find_predictors #' #' @return A data frame with three columns: the parameter names, the related #' point estimates and the component. #' #' @examples #' data(mtcars) #' m <- lm(mpg ~ wt + cyl + vs, data = mtcars) #' get_parameters(m) #' @export get_parameters.betamfx <- function(x, component = c("all", "conditional", "precision", "marginal"), ...) { component <- match.arg(component) params <- get_parameters.betareg(x$fit, component = "all", ...) mfx <- x$mfxest params <- rbind( data.frame( Parameter = gsub("^\\(phi\\)_", "", rownames(mfx)), Estimate = as.vector(mfx[, 1]), Component = "marginal", stringsAsFactors = FALSE ), params ) if (component != "all") { params <- params[params$Component == component, , drop = FALSE] } .remove_backticks_from_parameter_names(params) } #' @export get_parameters.betaor <- function(x, component = c("all", "conditional", "precision"), ...) { component <- match.arg(component) get_parameters.betareg(x$fit, component = component, ...) } #' @rdname get_parameters.betamfx #' @export get_parameters.logitmfx <- function(x, component = c("all", "conditional", "marginal"), ...) { params <- get_parameters.default(x$fit, ...) params$Component <- "conditional" mfx <- x$mfxest params <- rbind( data.frame( Parameter = rownames(mfx), Estimate = as.vector(mfx[, 1]), Component = "marginal", stringsAsFactors = FALSE ), params ) component <- match.arg(component) if (component != "all") { params <- params[params$Component == component, , drop = FALSE] } .remove_backticks_from_parameter_names(params) } #' @export get_parameters.poissonmfx <- get_parameters.logitmfx #' @export get_parameters.negbinmfx <- get_parameters.logitmfx #' @export get_parameters.probitmfx <- get_parameters.logitmfx #' @export get_parameters.logitor <- function(x, ...) { get_parameters.default(x$fit, ...) } #' @export get_parameters.poissonirr <- get_parameters.logitor #' @export get_parameters.negbinirr <- get_parameters.logitor insight/R/find_weights.R0000644000175000017500000000451114077615664015070 0ustar nileshnilesh#' @title Find names of model weights #' @name find_weights #' #' @description Returns the name of the variable that describes the weights of a #' model. #' #' @param x A fitted model. #' @param ... Currently not used. #' #' @return The name of the weighting variable as character vector, or `NULL` #' if no weights were specified. #' #' @examples #' data(mtcars) #' mtcars$weight <- rnorm(nrow(mtcars), 1, .3) #' m <- lm(mpg ~ wt + cyl + vs, data = mtcars, weights = weight) #' find_weights(m) #' @export find_weights <- function(x, ...) { UseMethod("find_weights") } #' @export find_weights.default <- function(x, ...) { tryCatch( { call_string <- .safe_deparse(x$call) if (!is.null(call_string)) { w <- .safe_deparse(parse(text = call_string)[[1]]$weights) # edge case, users use "eval(parse())" to parse weight variables if (grepl("^eval\\(parse\\(", w)) { w <- eval(parse(text = .trim(gsub("eval\\(parse\\((.*)=(.*)\\)\\)", "\\2", w)))) } if (.is_empty_object(w) || w == "NULL") w <- NULL } else { w <- NULL } w }, error = function(e) { NULL } ) } #' @export find_weights.brmsfit <- function(x, ...) { f <- find_formula(x, verbose = FALSE) if (is_multivariate(f)) { resp <- unlist(lapply(f, function(i) .safe_deparse(i$conditional[[2L]]))) } else { resp <- .safe_deparse(f$conditional[[2L]]) } resp <- .compact_character(unname(sapply(resp, function(i) { if (grepl("(.*)\\|(\\s+)weights\\((.*)\\)", i)) { i } else { "" } }))) w <- .trim(sub("(.*)\\|(\\s+)weights\\((.*)\\)", "\\3", resp)) if (.is_empty_object(w)) w <- NULL w } #' @export find_weights.model_fit <- function(x, ...) { find_weights(x$fit, ...) } #' @export find_weights.merMod <- function(x, ...) { tryCatch( { w <- .safe_deparse(parse(text = .safe_deparse(x@call))[[1]]$weights) # edge case, users use "eval(parse())" to parse weight variables if (grepl("^eval\\(parse\\(", w)) { w <- eval(parse(text = .trim(gsub("eval\\(parse\\((.*)=(.*)\\)\\)", "\\2", w)))) } if (.is_empty_object(w) || w == "NULL") w <- NULL w }, error = function(e) { NULL } ) } insight/R/get_parameters_emmeans.R0000644000175000017500000000715714120050102017077 0ustar nileshnilesh#' @title Get model parameters from estimated marginal means objects #' @name get_parameters.emmGrid #' #' @description Returns the coefficients from a model. #' #' @param merge_parameters Logical, if `TRUE` and `x` has multiple #' columns for parameter names (like `emmGrid` objects may have), these #' are merged into a single parameter column, with parameters names and values #' as values. #' @param ... Currently not used. #' #' @inheritParams find_parameters #' @inheritParams find_predictors #' @inheritParams get_parameters.BGGM #' #' @note Note that `emmGrid` or `emm_list` objects returned by #' functions from \pkg{emmeans} have a different structure compared to #' usual regression models. Hence, the `Parameter` column does not #' always contain names of *variables*, but may rather contain #' *values*, e.g. for contrasts. See an example for pairwise #' comparisons below. #' #' @return A data frame with two columns: the parameter names and the related #' point estimates. #' #' @examples #' data(mtcars) #' model <- lm(mpg ~ wt * factor(cyl), data = mtcars) #' if (require("emmeans", quietly = TRUE)) { #' emm <- emmeans(model, "cyl") #' get_parameters(emm) #' #' emm <- emmeans(model, pairwise ~ cyl) #' get_parameters(emm) #' } #' @export get_parameters.emmGrid <- function(x, summary = FALSE, merge_parameters = FALSE, ...) { # check if we have a Bayesian model here if (!.is_baysian_emmeans(x) || isTRUE(summary)) { s <- summary(x) estimate_pos <- which(colnames(s) == attr(s, "estName")) params <- s[, seq_len(estimate_pos - 1), drop = FALSE] if (isTRUE(merge_parameters) && ncol(params) > 1) { r <- apply(params, 1, function(i) paste0(colnames(params), " [", i, "]")) out <- data.frame( Parameter = unname(sapply(as.data.frame(r), paste, collapse = ", ")), Estimate = s[[estimate_pos]], stringsAsFactors = FALSE, row.names = NULL ) } else { out <- data.frame( params, Estimate = s[[estimate_pos]], stringsAsFactors = FALSE, row.names = NULL ) if (isTRUE(merge_parameters)) { colnames(out)[1] <- "Parameter" } } .remove_backticks_from_parameter_names(out) } else { .clean_emmeans_draws(x) } } #' @rdname get_parameters.emmGrid #' @export get_parameters.emm_list <- function(x, summary = FALSE, ...) { if (!.is_baysian_emmeans(x) || isTRUE(summary)) { do.call(rbind, lapply(names(x), function(i) { out <- get_parameters(x[[i]], summary = summary) if (ncol(out) > 2) { est <- out$Estimate out$Estimate <- NULL r <- apply(out, 1, function(i) paste0(colnames(out), " [", i, "]")) out <- data.frame( Parameter = unname(sapply(as.data.frame(r), paste, collapse = ", ")), Estimate = unname(est), stringsAsFactors = FALSE ) } out$Component <- i colnames(out)[1] <- "Parameter" out })) } else { do.call(cbind, lapply(names(x), function(i) { .clean_emmeans_draws(x[[i]]) })) } } # helper -------------------- .clean_emmeans_draws <- function(x, ...) { # installed? check_if_installed("emmeans") if (!is.null(attributes(x)$misc$predict.type) && attributes(x)$misc$predict.type != "none") { x <- emmeans::regrid(x, transform = attributes(x)$misc$predict.type, ...) } draws <- emmeans::as.mcmc.emmGrid( x, names = FALSE, sep.chains = FALSE, NE.include = TRUE, ... ) data.frame(draws, check.names = FALSE) } insight/R/format_bf.R0000644000175000017500000000503714077615664014361 0ustar nileshnilesh#' Bayes Factor formatting #' #' @param bf Bayes Factor. #' @param protect_ratio Should values smaller than 1 be represented as ratios? #' @param na_reference How to format missing values (`NA`). #' @param exact Should very large or very small values be reported with a #' scientific format (e.g., 4.24e5), or as truncated values (as "> 1000" and #' "< 1/1000"). #' @inheritParams format_p #' #' @return A formatted string. #' #' @examples #' format_bf(bfs <- c(0.000045, 0.033, NA, 1557, 3.54)) #' format_bf(bfs, exact = TRUE, name = NULL) #' format_bf(bfs, stars = TRUE) #' format_bf(bfs, protect_ratio = TRUE) #' format_bf(bfs, protect_ratio = TRUE, exact = TRUE) #' format_bf(bfs, na_reference = 1) #' @export format_bf <- function(bf, stars = FALSE, stars_only = FALSE, name = "BF", protect_ratio = FALSE, na_reference = NA, exact = FALSE) { if (!is.na(na_reference)) { bf[is.na(bf)] <- na_reference } else { bf[bad_bf <- is.na(bf)] <- 1 } bf_orig <- bf if (protect_ratio) { is_small <- bf < 1 bf[is_small] <- 1 / bf[is_small] } else { is_small <- logical(length(bf)) } digits <- ifelse(is.na(bf), 0, ifelse(bf < 1, 3, 2)) text <- paste0( "= ", ifelse(is_small, "1/", ""), format_value(bf, digits = digits) ) ## Very big/small values is_extreme <- bf_orig > 1000 | bf_orig < 1 / 1000 if (any(is_extreme)) { if (exact) { text[is_extreme] <- ifelse(bf_orig[is_extreme] > 1000, sprintf("= %.2e", bf_orig[is_extreme]), text[is_extreme] ) text[is_extreme] <- ifelse(bf_orig[is_extreme] < 1 / 1000, ifelse(is_small[is_extreme], sprintf("= 1/%.2e", bf[is_extreme]), sprintf("= %.2e", bf_orig[is_extreme]) ), text[is_extreme] ) } else { text[is_extreme] <- ifelse(bf_orig[is_extreme] > 1000, "> 1000", text[is_extreme] ) text[is_extreme] <- ifelse(bf_orig[is_extreme] < 1 / 1000, ifelse(is_small[is_extreme], "< 1/1000", "< 0.001"), text[is_extreme] ) } } ## Add stars text <- ifelse(bf_orig > 30, paste0(text, "***"), ifelse(bf_orig > 10, paste0(text, "**"), ifelse(bf_orig > 3, paste0(text, "*"), text) ) ) out <- .add_prefix_and_remove_stars(text, stars, stars_only, name) if (is.na(na_reference)) out[bad_bf] <- "" out } insight/R/get_sigma.R0000644000175000017500000001521014163102430014325 0ustar nileshnilesh#' @title Get residual standard deviation from models #' #' @description Returns `sigma`, which corresponds the estimated standard #' deviation of the residuals. This function extends the `sigma()` base R #' generic for models that don't have implemented it. It also computes the #' confidence interval (CI), which is stored as an attribute. #' #' Sigma is a key-component of regression models, and part of the so-called #' auxiliary parameters that are estimated. Indeed, linear models for instance #' assume that the residuals comes from a normal distribution with mean 0 and #' standard deviation `sigma`. See the details section below for more #' information about its interpretation and calculation. #' #' @name get_sigma #' #' @param x A model. #' @param ci Scalar, the CI level. The default (`NULL`) returns no CI. #' @inheritParams find_parameters #' #' @return The residual standard deviation (sigma), or `NULL` if this #' information could not be accessed. #' #' @details #' \subsection{Interpretation of Sigma}{ #' The residual standard deviation, #' \ifelse{html}{\out{σ}}{\eqn{\sigma}}, indicates that the predicted #' outcome will be within +/- \ifelse{html}{\out{σ}}{\eqn{\sigma}} units #' of the linear predictor for approximately `68%` of the data points #' (\cite{Gelman, Hill & Vehtari 2020, p.84}). In other words, the residual #' standard deviation indicates the accuracy for a model to predict scores, #' thus it can be thought of as \dQuote{a measure of the average distance each #' observation falls from its prediction from the model} (\cite{Gelman, Hill & #' Vehtari 2020, p.168}). \ifelse{html}{\out{σ}}{\eqn{\sigma}} can be #' considered as a measure of the unexplained variation in the data, or of the #' precision of inferences about regression coefficients. } #' #' \subsection{Calculation of Sigma}{ #' By default, `get_sigma()` tries to extract sigma by calling #' `stats::sigma()`. If the model-object has no `sigma()` method, #' the next step is calculating sigma as square-root of the model-deviance #' divided by the residual degrees of freedom. Finally, if even this approach #' fails, and `x` is a mixed model, the residual standard deviation is #' accessed using the square-root from `get_variance_residual()`. #' } #' #' @references Gelman, A., Hill, J., & Vehtari, A. (2020). Regression and Other #' Stories. Cambridge University Press. #' #' @examples #' data(mtcars) #' m <- lm(mpg ~ wt + cyl + vs, data = mtcars) #' get_sigma(m) #' @export get_sigma <- function(x, ci = NULL, verbose = TRUE) { s <- .get_sigma(x, verbose = verbose) # Confidence interval for sigma ci <- tryCatch( { .get_sigma_ci(x, ci = ci) }, error = function(e) { NULL } ) if (!is.null(ci)) { attributes(s) <- c(attributes(s), ci) } s } # Retrieve sigma ---------------------------------------------------------- .get_sigma <- function(x, ...) { UseMethod(".get_sigma") } # special handling --------------- .get_sigma.model_fit <- function(x, verbose = TRUE, ...) { .get_sigma(x$fit, verbose = verbose) } .get_sigma.lrm <- function(x, verbose = TRUE, ...) { s <- stats::sigma(x) s[length(s)] } .get_sigma.merModList <- function(x, verbose = TRUE, ...) { s <- suppressWarnings(summary(x)) s$residError } .get_sigma.summary.lm <- function(x, verbose = TRUE, ...) { x$sigma } .get_sigma.selection <- function(x, verbose = TRUE, ...) { unname(stats::coef(x)["sigma"]) } .get_sigma.cpglmm <- function(x, verbose = TRUE, ...) { tryCatch( { stats::deviance(x)[["sigmaML"]] }, error = function(e) { NULL } ) } .get_sigma.brmsfit <- function(x, verbose = TRUE, ...) { s <- tryCatch( { dat <- as.data.frame(x) sigma_column <- grep("sigma", colnames(dat), fixed = TRUE) if (length(sigma_column) == 1) { mean(dat[[sigma_column]][1]) } else if (length(sigma_column)) { # if more than one sigma column, # there isn't a traditional sigma for the model return(NULL) } else { NULL } }, error = function(e) { NULL } ) # compute sigma manually --------------- if (.is_empty_object(s)) { # default sigma --------------- s <- tryCatch( { stats::sigma(x) }, error = function(e) { NULL } ) } if (.is_empty_object(s)) { info <- model_info(x, verbose = FALSE) if (!is.null(info) && info$is_mixed) { s <- tryCatch( { sqrt(get_variance_residual(x, verbose = FALSE)) }, error = function(e) { NULL } ) } } if (.is_empty_object(s)) { return(NULL) } class(s) <- c("insight_aux", class(s)) s } # default handling --------------- .get_sigma.default <- function(x, verbose = TRUE, ...) { if (inherits(x, c("mipo", "mira", "riskRegression"))) { return(NULL) } # default sigma --------------- s <- tryCatch( { stats::sigma(x) }, error = function(e) { NULL } ) # compute sigma manually --------------- if (.is_empty_object(s)) { info <- model_info(x, verbose = FALSE) if (!is.null(info) && info$is_dispersion) { return(NULL) } } if (.is_empty_object(s)) { info <- model_info(x, verbose = FALSE) if (!is.null(info) && info$is_mixed) { s <- tryCatch( { sqrt(get_variance_residual(x, verbose = FALSE)) }, error = function(e) { NULL } ) } } if (.is_empty_object(s)) { s <- tryCatch( { sqrt(get_deviance(x, verbose = verbose) / get_df(x, type = "residual", verbose = verbose)) }, error = function(e) { NULL } ) } if (.is_empty_object(s)) { return(NULL) } class(s) <- c("insight_aux", class(s)) s } # Methods ----------------------------------------------------------------- .get_sigma_ci <- function(x, ci = 0.95, ...) { # TODO: What does it work for Bayesian models? if (is.null(ci) || is.na(ci)) { return(NULL) } alpha <- 1 - ci dev <- get_deviance(x) n <- n_obs(x) low <- dev / stats::qchisq(1 - alpha / 2, n) high <- dev / stats::qchisq(alpha / 2, n) list(CI_low = sqrt(low), CI_high = sqrt(high)) } #' @export as.numeric.insight_aux <- function(x, ...) { if (is.null(x) || is.na(x) || is.infinite(x)) { return(NULL) } else { mean(x, na.rm = TRUE) } } insight/R/find_predictors.R0000644000175000017500000001755414135301056015565 0ustar nileshnilesh#' @title Find names of model predictors #' @name find_predictors #' #' @description Returns the names of the predictor variables for the #' different parts of a model (like fixed or random effects, zero-inflated #' component, ...). Unlike [find_parameters()], the names from #' `find_predictors()` match the original variable names from the data #' that was used to fit the model. #' #' @param x A fitted model. #' @param effects Should variables for fixed effects, random effects #' or both be returned? Only applies to mixed models. May be abbreviated. #' @param component Should all predictor variables, predictor variables for the #' conditional model, the zero-inflated part of the model, the dispersion #' term or the instrumental variables be returned? Applies to models #' with zero-inflated and/or dispersion formula, or to models with instrumental #' variable (so called fixed-effects regressions). May be abbreviated. Note that the #' *conditional* component is also called *count* or *mean* #' component, depending on the model. #' @param flatten Logical, if `TRUE`, the values are returned #' as character vector, not as list. Duplicated values are removed. #' @param verbose Toggle warnings. #' @param ... Currently not used. #' #' @return A list of character vectors that represent the name(s) of the #' predictor variables. Depending on the combination of the arguments #' `effects` and `component`, the returned list has following #' elements: #' \itemize{ #' \item `conditional`, the "fixed effects" terms from the model #' \item `random`, the "random effects" terms from the model #' \item `zero_inflated`, the "fixed effects" terms from the zero-inflation component of the model #' \item `zero_inflated_random`, the "random effects" terms from the zero-inflation component of the model #' \item `dispersion`, the dispersion terms #' \item `instruments`, for fixed-effects regressions like `ivreg`, `felm` or `plm`, the instrumental variables #' \item `correlation`, for models with correlation-component like `gls`, the variables used to describe the correlation structure #' } #' #' @examples #' data(mtcars) #' m <- lm(mpg ~ wt + cyl + vs, data = mtcars) #' find_predictors(m) #' @export find_predictors <- function(x, ...) { UseMethod("find_predictors") } #' @rdname find_predictors #' @export find_predictors.default <- function(x, effects = c("fixed", "random", "all"), component = c("all", "conditional", "zi", "zero_inflated", "dispersion", "instruments", "correlation", "smooth_terms"), flatten = FALSE, verbose = TRUE, ...) { effects <- match.arg(effects) component <- match.arg(component) f <- find_formula(x, verbose = verbose) is_mv <- is_multivariate(f) elements <- .get_elements(effects, component) # filter formulas, depending on requested effects and components if (is_mv) { f <- lapply(f, function(.x) .prepare_predictors(x, .x, elements)) } else { f <- .prepare_predictors(x, f, elements) } # random effects are returned as list, so we need to unlist here if (is_mv) { l <- lapply(f, function(.i) .return_vars(.i, x)) } else { l <- .return_vars(f, x) } if (.is_empty_object(l) || .is_empty_object(.compact_list(l))) { return(NULL) } # some models, like spatial models, have random slopes that are not defined # as fixed effect predictor. In such cases, we have to add the random slope term # manually, so other functions like "get_data()" work as expected... if (.obj_has_name(l, "random") && effects == "all") { random_slope <- unname(unlist(find_random_slopes(x))) all_predictors <- unlist(unique(l)) rs_not_in_pred <- unique(setdiff(random_slope, all_predictors)) if (length(rs_not_in_pred)) l$random <- c(rs_not_in_pred, l$random) } if (flatten) { unique(unlist(l)) } else { l } } #' @export find_predictors.selection <- function(x, flatten = FALSE, verbose = TRUE, ...) { elements <- .get_elements("all", "all") f <- lapply(find_formula(x, verbose = verbose), function(i) { .prepare_predictors(x, i, elements = elements) }) l <- lapply(f, function(.i) .return_vars(.i, x)) if (.is_empty_object(l) || .is_empty_object(.compact_list(l))) { return(NULL) } if (flatten) { unique(unlist(l)) } else { l } } #' @export find_predictors.bfsl <- function(x, flatten = FALSE, verbose = TRUE, ...) { l <- list(conditional = "x") if (flatten) { unique(unlist(l)) } else { l } } #' @export find_predictors.afex_aov <- function(x, effects = c("fixed", "random", "all"), component = c("all", "conditional", "zi", "zero_inflated", "dispersion", "instruments", "correlation", "smooth_terms"), flatten = FALSE, verbose = TRUE, ...) { effects <- match.arg(effects) if (effects == "all") effects <- c("fixed", "random") l <- list( fixed = c(names(attr(x, "between")), names(attr(x, "within"))), random = attr(x, "id") )[effects] if (flatten) { unique(unlist(l)) } else { l } } .return_vars <- function(f, x) { l <- lapply(names(f), function(i) { if (i %in% c("random", "zero_inflated_random")) { unique(paste(unlist(f[[i]]))) } else if (is.numeric(f[[i]])) { f[[i]] } else { if (is.list(f[[i]])) { # this is for multivariate response models, where # we have a list of formulas lapply(f[[i]], function(j) unique(all.vars(j))) } else { unique(all.vars(f[[i]])) } } }) empty_elements <- sapply(l, .is_empty_object) l <- .compact_list(l) # here we handle special cases for non-linear model in brms if (inherits(x, "brmsfit")) { nf <- stats::formula(x) if (!is.null(attr(nf$formula, "nl", exact = TRUE)) && .obj_has_name(nf, "pforms")) { nl_parms <- names(nf$pforms) l <- lapply(l, .remove_values, nl_parms) } } # remove constants l <- lapply(l, .remove_values, c(".", "pi", "1", "0")) l <- lapply(l, .remove_values, c(0, 1)) l <- lapply(l, function(i) gsub("`", "", i, fixed = TRUE)) names(l) <- names(f)[!empty_elements] l } .prepare_predictors <- function(x, f, elements) { f <- f[names(f) %in% elements] # from conditional model, remove response if (.obj_has_name(f, "conditional")) { f[["conditional"]] <- f[["conditional"]][[3]] } # from conditional model, remove response if (.obj_has_name(f, "survival")) { f[["survival"]] <- f[["survival"]][[3]] } # from conditional model, remove response if (inherits(x, "selection")) { if (.obj_has_name(f, "selection")) { f[["selection"]] <- f[["selection"]][[3]] } if (.obj_has_name(f, "outcome")) { f[["outcome"]] <- f[["outcome"]][[3]] } } # if we have random effects, just return grouping variable, not random slopes if (.obj_has_name(f, "random")) { f[["random"]] <- .get_group_factor(x, f[["random"]]) } # same for zi-random effects if (.obj_has_name(f, "zero_inflated_random")) { f[["zero_inflated_random"]] <- .get_group_factor(x, f[["zero_inflated_random"]]) } # same for sigma-random effects if (.obj_has_name(f, "sigma_random")) { f[["sigma_random"]] <- .get_group_factor(x, f[["sigma_random"]]) } # same for beta-random effects if (.obj_has_name(f, "beta_random")) { f[["beta_random"]] <- .get_group_factor(x, f[["beta_random"]]) } f } insight/R/compute_variances.R0000644000175000017500000007371514163327307016127 0ustar nileshnilesh.compute_variances <- function(x, component, name_fun = NULL, name_full = NULL, verbose = TRUE, tolerance = 1e-5, model_component = "conditional") { ## Original code taken from GitGub-Repo of package glmmTMB ## Author: Ben Bolker, who used an cleaned-up/adapted ## version of Jon Lefcheck's code from SEMfit ## Major revisions and adaption to more complex models and other packages ## by Daniel Lüdecke faminfo <- model_info(x, verbose = FALSE) if (faminfo$family %in% c("truncated_nbinom1")) { if (verbose) { warning(format_message(sprintf("Truncated negative binomial families are currently not supported by `%s`.", name_fun)), call. = FALSE) } return(NA) } # get necessary model information, like fixed and random effects, # variance-covariance matrix etc. vals <- .get_variance_information( x, faminfo = faminfo, name_fun = name_fun, verbose = verbose, model_component = model_component ) # Test for non-zero random effects ((near) singularity) no_random_variance <- FALSE if (.is_singular(x, vals, tolerance = tolerance) && !(component %in% c("slope", "intercept"))) { if (verbose) { warning(format_message( sprintf("Can't compute %s. Some variance components equal zero. Your model may suffer from singularity (see '?lme4::isSingular' and '?performance::check_singularity').", name_full), "Solution: Respecify random structure! You may also decrease the 'tolerance' level to enforce the calculation of random effect variances." ), call. = FALSE) } no_random_variance <- TRUE } # initialize return values, if not all components are requested var.fixed <- NULL var.random <- NULL var.residual <- NULL var.distribution <- NULL var.dispersion <- NULL var.intercept <- NULL var.slope <- NULL cor.slope_intercept <- NULL cor.slopes <- NULL # Get variance of fixed effects: multiply coefs by design matrix if (component %in% c("fixed", "all")) { var.fixed <- .compute_variance_fixed(vals) } # Are random slopes present as fixed effects? Warn. if (!.random_slopes_in_fixed(x) && verbose) { warning(format_message( sprintf("Random slopes not present as fixed effects. This artificially inflates the conditional %s.", name_full), "Solution: Respecify fixed structure!" ), call. = FALSE) } # Separate observation variance from variance of random effects nr <- sapply(vals$re, nrow) not.obs.terms <- names(nr[nr != n_obs(x)]) obs.terms <- names(nr[nr == n_obs(x)]) # Variance of random effects if (component %in% c("random", "all") && isFALSE(no_random_variance)) { var.random <- .compute_variance_random(not.obs.terms, x = x, vals = vals) } # Residual variance, which is defined as the variance due to # additive dispersion and the distribution-specific variance (Johnson et al. 2014) if (component %in% c("residual", "distribution", "all")) { var.distribution <- .compute_variance_distribution( x = x, var.cor = vals$vc, faminfo, name = name_full, verbose = verbose ) } if (component %in% c("residual", "dispersion", "all")) { var.dispersion <- .compute_variance_dispersion( x = x, vals = vals, faminfo = faminfo, obs.terms = obs.terms ) } if (component %in% c("residual", "all")) { var.residual <- var.distribution + var.dispersion } if (isTRUE(faminfo$is_mixed) || inherits(x, c("wblm", "wbgee"))) { if (component %in% c("intercept", "all")) { var.intercept <- .between_subject_variance(vals, x) } if (component %in% c("slope", "all")) { var.slope <- .random_slope_variance(vals, x) } if (component %in% c("rho01", "all")) { cor.slope_intercept <- .random_slope_intercept_corr(vals, x) } if (component %in% c("rho00", "all")) { cor.slopes <- .random_slopes_corr(vals, x) } } else { var.intercept <- NULL var.slope <- NULL cor.slope_intercept <- NULL cor.slopes <- NULL } # if we only need residual variance, we can delete those # values again... if (component == "residual") { var.distribution <- NULL var.dispersion <- NULL } .compact_list(list( "var.fixed" = var.fixed, "var.random" = var.random, "var.residual" = var.residual, "var.distribution" = var.distribution, "var.dispersion" = var.dispersion, "var.intercept" = var.intercept, "var.slope" = var.slope, "cor.slope_intercept" = cor.slope_intercept, "cor.slopes" = cor.slopes )) } # store essential information on coefficients, model matrix and so on # as list, since we need these information throughout the functions to # calculate the variance components... # # basically, this function should return a list that has the same # structure for any mixed models like this code for lme4: # beta = lme4::fixef(x), # X = lme4::getME(x, "X"), # vc = lme4::VarCorr(x), # re = lme4::ranef(x) # .get_variance_information <- function(x, faminfo, name_fun = "get_variances", verbose = TRUE, model_component = "conditional") { # installed? check_if_installed("lme4", reason = "to compute variances for mixed models") if (inherits(x, "lme")) { check_if_installed("nlme", reason = "to compute variances for mixed models") } if (inherits(x, "rstanarm")) { check_if_installed("rstanarm", reason = "to compute variances for mixed models") } # stanreg # --------------------------- if (inherits(x, "stanreg")) { vals <- list( beta = lme4::fixef(x), X = rstanarm::get_x(x), vc = lme4::VarCorr(x), re = lme4::ranef(x) ) # GLMMapdative # --------------------------- } else if (inherits(x, "MixMod")) { vc1 <- vc2 <- NULL re_names <- find_random(x) vc_cond <- !grepl("^zi_", colnames(x$D)) if (any(vc_cond)) { vc1 <- x$D[vc_cond, vc_cond, drop = FALSE] attr(vc1, "stddev") <- sqrt(diag(vc1)) attr(vc1, "correlation") <- stats::cov2cor(x$D[vc_cond, vc_cond, drop = FALSE]) } vc_zi <- grepl("^zi_", colnames(x$D)) if (any(vc_zi)) { colnames(x$D) <- gsub("^zi_(.*)", "\\1", colnames(x$D)) rownames(x$D) <- colnames(x$D) vc2 <- x$D[vc_zi, vc_zi, drop = FALSE] attr(vc2, "stddev") <- sqrt(diag(vc2)) attr(vc2, "correlation") <- stats::cov2cor(x$D[vc_zi, vc_zi, drop = FALSE]) } vc1 <- list(vc1) names(vc1) <- re_names[[1]] attr(vc1, "sc") <- sqrt(get_deviance(x, verbose = FALSE) / get_df(x, type = "residual", verbose = FALSE)) if (!is.null(vc2)) { vc2 <- list(vc2) names(vc2) <- re_names[[2]] attr(vc2, "sc") <- sqrt(get_deviance(x, verbose = FALSE) / get_df(x, type = "residual", verbose = FALSE)) } vcorr <- .compact_list(list(vc1, vc2)) names(vcorr) <- c("cond", "zi")[1:length(vcorr)] vals <- list( beta = lme4::fixef(x), X = get_modelmatrix(x), vc = vcorr, re = list(lme4::ranef(x)) ) names(vals$re) <- x$id_name # joineRML # --------------------------- } else if (inherits(x, "mjoint")) { re_names <- find_random(x, flatten = TRUE) vcorr <- summary(x)$D attr(vcorr, "stddev") <- sqrt(diag(vcorr)) attr(vcorr, "correlation") <- stats::cov2cor(vcorr) vcorr <- list(vcorr) names(vcorr) <- re_names[1] attr(vcorr, "sc") <- x$coef$sigma2[[1]] vals <- list( beta = lme4::fixef(x), X = matrix(1, nrow = n_obs(x), dimnames = list(NULL, "(Intercept)_1")), vc = vcorr, re = list(lme4::ranef(x)) ) names(vals$re) <- re_names[1:length(vals$re)] # nlme # --------------------------- } else if (inherits(x, "lme")) { re_names <- find_random(x, split_nested = TRUE, flatten = TRUE) comp_x <- get_modelmatrix(x) rownames(comp_x) <- 1:nrow(comp_x) if (.is_nested_lme(x)) { vals_vc <- .get_nested_lme_varcorr(x) vals_re <- lme4::ranef(x) } else { vals_vc <- list(nlme::getVarCov(x)) vals_re <- list(lme4::ranef(x)) } vals <- list( beta = lme4::fixef(x), X = comp_x, vc = vals_vc, re = vals_re ) names(vals$re) <- re_names names(vals$vc) <- re_names # ordinal # --------------------------- } else if (inherits(x, "clmm")) { if (requireNamespace("ordinal", quietly = TRUE)) { mm <- get_modelmatrix(x) vals <- list( beta = c("(Intercept)" = 1, stats::coef(x)[intersect(names(stats::coef(x)), colnames(mm))]), X = mm, vc = ordinal::VarCorr(x), re = ordinal::ranef(x) ) } # glmmadmb # --------------------------- } else if (inherits(x, "glmmadmb")) { vals <- list( beta = lme4::fixef(x), X = get_modelmatrix(x), vc = lme4::VarCorr(x), re = lme4::ranef(x) ) # brms # --------------------------- } else if (inherits(x, "brmsfit")) { comp_x <- get_modelmatrix(x) rownames(comp_x) <- 1:nrow(comp_x) vc <- lapply(names(lme4::VarCorr(x)), function(i) { element <- lme4::VarCorr(x)[[i]] if (i != "residual__") { if (!is.null(element$cov)) { out <- as.matrix(drop(element$cov[, 1, ])) colnames(out) <- rownames(out) <- gsub("Intercept", "(Intercept)", rownames(element$cov), fixed = TRUE) } else { out <- as.matrix(drop(element$sd[, 1])^2) colnames(out) <- rownames(out) <- gsub("Intercept", "(Intercept)", rownames(element$sd), fixed = TRUE) } attr(out, "sttdev") <- element$sd[, 1] } else { out <- NULL } out }) vc <- .compact_list(vc) names(vc) <- setdiff(names(lme4::VarCorr(x)), "residual__") attr(vc, "sc") <- lme4::VarCorr(x)$residual__$sd[1, 1] vals <- list( beta = lme4::fixef(x)[, 1], X = comp_x, vc = vc, re = lapply(lme4::ranef(x), function(re) { reval <- as.data.frame(drop(re[, 1, ])) colnames(reval) <- gsub("Intercept", "(Intercept)", dimnames(re)[[3]], fixed = TRUE) reval }) ) names(vals$beta) <- gsub("Intercept", "(Intercept)", names(vals$beta), fixed = TRUE) # cpglmm # --------------------------- } else if (inherits(x, "cpglmm")) { # installed? check_if_installed("cplm") vals <- list( beta = cplm::fixef(x), X = cplm::model.matrix(x), vc = cplm::VarCorr(x), re = cplm::ranef(x) ) # lme4 / glmmTMB # --------------------------- } else { vals <- list( beta = lme4::fixef(x), X = lme4::getME(x, "X"), vc = lme4::VarCorr(x), re = lme4::ranef(x) ) } # for glmmTMB, tell user that dispersion model is ignored if (inherits(x, c("glmmTMB", "MixMod"))) { if (is.null(model_component) || model_component == "conditional") { vals <- lapply(vals, .collapse_cond) } else { vals <- lapply(vals, .collapse_zi) } } if (!is.null(find_formula(x)[["dispersion"]]) && verbose) { warning(sprintf("%s ignores effects of dispersion model.", name_fun), call. = FALSE) } vals } # helper-function, telling user if family / distribution # is supported or not .badlink <- function(link, family, verbose = TRUE) { if (verbose) { warning(format_message(sprintf("Model link '%s' is not yet supported for the %s distribution.", link, family)), call. = FALSE) } return(NA) } # glmmTMB returns a list of model information, one for conditional # and one for zero-inflated part, so here we "unlist" it, returning # only the conditional part. .collapse_cond <- function(x) { if (is.list(x) && "cond" %in% names(x)) { x[["cond"]] } else { x } } .collapse_zi <- function(x) { if (is.list(x) && "zi" %in% names(x)) { x[["zi"]] } else { x } } # fixed effects variance ---- # --------------------------- .compute_variance_fixed <- function(vals) { with(vals, stats::var(as.vector(beta %*% t(X)))) } # variance associated with a random-effects term (Johnson 2014) ---- # ------------------------------------------------------------------ .compute_variance_random <- function(terms, x, vals) { if (is.null(terms)) { return(NULL) } .sigma_sum <- function(Sigma) { rn <- rownames(Sigma) # fix for models w/o intercept if (!any(grepl("^\\(Intercept\\)", colnames(vals$X)))) { vals$X <- cbind("(Intercept)" = 1, vals$X) } if (!is.null(rn)) { valid <- rownames(Sigma) %in% colnames(vals$X) if (!all(valid)) { rn <- rn[valid] Sigma <- Sigma[valid, valid, drop = FALSE] } } Z <- vals$X[, rn, drop = FALSE] Z.m <- Z %*% Sigma sum(diag(crossprod(Z.m, Z))) / n_obs(x) } # if (inherits(x, "MixMod")) { # .sigma_sum(vals$vc) # } else { # sum(sapply(vals$vc[terms], .sigma_sum)) # } sum(sapply(vals$vc[terms], .sigma_sum)) } # distribution-specific variance (Nakagawa et al. 2017) ---- # ---------------------------------------------------------- .compute_variance_distribution <- function(x, var.cor, faminfo, name, verbose = TRUE) { if (inherits(x, "lme")) { sig <- x$sigma } else { sig <- attr(var.cor, "sc") } if (is.null(sig)) sig <- 1 # Distribution-specific variance depends on the model-family # and the related link-function if (faminfo$is_linear && !faminfo$is_tweedie) { # linear / Gaussian ---- # ---------------------- dist.variance <- sig^2 } else { if (faminfo$is_betabinomial) { # beta-binomial ---- # ------------------ dist.variance <- switch(faminfo$link_function, logit = , probit = , cloglog = , clogloglink = .variance_distributional(x, faminfo, sig, name = name, verbose = verbose), .badlink(faminfo$link_function, faminfo$family, verbose = verbose) ) } else if (faminfo$is_binomial) { # binomial / bernoulli ---- # -------------------------- dist.variance <- switch(faminfo$link_function, logit = pi^2 / 3, probit = 1, cloglog = , clogloglink = pi^2 / 6, .badlink(faminfo$link_function, faminfo$family, verbose = verbose) ) } else if (faminfo$is_count) { # count ---- # ----------- dist.variance <- switch(faminfo$link_function, log = .variance_distributional(x, faminfo, sig, name = name, verbose = verbose), sqrt = 0.25, .badlink(faminfo$link_function, faminfo$family, verbose = verbose) ) } else if (faminfo$family %in% c("Gamma", "gamma")) { # Gamma ---- # ----------- ## TODO needs some more checking - should now be in line with other packages dist.variance <- switch(faminfo$link_function, inverse = , identity = , log = stats::family(x)$variance(sig), # log = .variance_distributional(x, faminfo, sig, name = name, verbose = verbose), .badlink(faminfo$link_function, faminfo$family, verbose = verbose) ) } else if (faminfo$family == "beta") { # Beta ---- # ---------- dist.variance <- switch(faminfo$link_function, logit = .variance_distributional(x, faminfo, sig, name = name, verbose = verbose), .badlink(faminfo$link_function, faminfo$family, verbose = verbose) ) } else if (faminfo$is_tweedie) { # Tweedie ---- # ------------- dist.variance <- switch(faminfo$link_function, log = .variance_distributional(x, faminfo, sig, name = name, verbose = verbose), .badlink(faminfo$link_function, faminfo$family, verbose = verbose) ) } else { dist.variance <- sig } } dist.variance } # dispersion-specific variance ---- # --------------------------------- .compute_variance_dispersion <- function(x, vals, faminfo, obs.terms) { if (faminfo$is_linear) { 0 } else { if (length(obs.terms) == 0) { 0 } else { .compute_variance_random(obs.terms, x = x, vals = vals) } } } # This is the core-function to calculate the distribution-specific variance # Nakagawa et al. 2017 propose three different methods, here we only rely # on the lognormal-approximation. # .variance_distributional <- function(x, faminfo, sig, name, verbose = TRUE) { if (!requireNamespace("lme4", quietly = TRUE)) { stop("Package `lme4` needs to be installed to compute variances for mixed models.", call. = FALSE) } # lognormal-approximation of distributional variance, # see Nakagawa et al. 2017 # in general want log(1+var(x)/mu^2) .null_model <- null_model(x, verbose = verbose) # check if null-model could be computed if (!is.null(.null_model)) { if (inherits(.null_model, "cpglmm")) { # installed? check_if_installed("cplm") null_fixef <- unname(cplm::fixef(.null_model)) } else { null_fixef <- unname(.collapse_cond(lme4::fixef(.null_model))) } mu <- exp(null_fixef) } else { mu <- NA } if (is.na(mu)) { if (verbose) { warning(format_message("Can't calculate model's distribution-specific variance. Results are not reliable."), call. = FALSE) } return(0) } else if (mu < 6) { if (verbose) { warning(format_message(sprintf("mu of %0.1f is too close to zero, estimate of %s may be unreliable.", mu, name)), call. = FALSE) } } cvsquared <- tryCatch( { vv <- switch(faminfo$family, # (zero-inflated) poisson ---- # ---------------------------- `zero-inflated poisson` = , poisson = .variance_family_poisson(x, mu, faminfo), # hurdle-poisson ---- # ------------------- `hurdle poisson` = , truncated_poisson = stats::family(x)$variance(sig), # Gamma, exponential ---- # ----------------------- Gamma = stats::family(x)$variance(sig), # (zero-inflated) negative binomial ---- # -------------------------------------- `zero-inflated negative binomial` = , `negative binomial` = , genpois = , nbinom1 = , nbinom2 = .variance_family_nbinom(x, mu, sig, faminfo), truncated_nbinom2 = stats::family(x)$variance(mu, sig), # other distributions ---- # ------------------------ tweedie = .variance_family_tweedie(x, mu, sig), beta = .variance_family_beta(x, mu, sig), # betabinomial = stats::family(x)$variance(mu, sig), # betabinomial = .variance_family_betabinom(x, mu, sig), # default variance for non-captured distributions ---- # ---------------------------------------------------- .variance_family_default(x, mu, verbose) ) if (vv < 0 && isTRUE(verbose)) { warning(format_message("Model's distribution-specific variance is negative. Results are not reliable."), call. = FALSE) } vv / mu^2 }, error = function(x) { if (verbose) { warning(format_message("Can't calculate model's distribution-specific variance. Results are not reliable."), call. = FALSE) } 0 } ) log1p(cvsquared) } # Get distributional variance for poisson-family # ---------------------------------------------- .variance_family_poisson <- function(x, mu, faminfo) { if (faminfo$is_zero_inflated) { .variance_zip(x, faminfo, family_var = mu) } else { if (inherits(x, "MixMod")) { return(mu) } else if (inherits(x, "cpglmm")) { .get_cplm_family(x)$variance(mu) } else { stats::family(x)$variance(mu) } } } # Get distributional variance for beta-family # ---------------------------------------------- .variance_family_beta <- function(x, mu, phi) { if (inherits(x, "MixMod")) { stats::family(x)$variance(mu) } else { mu * (1 - mu) / (1 + phi) } } # Get distributional variance for beta-family # ---------------------------------------------- .variance_family_betabinom <- function(x, mu, phi) { if (inherits(x, "MixMod")) { stats::family(x)$variance(mu) } else { n <- n_obs(x) mu * (1 - mu) * (n * (phi + n) / (1 + phi)) } } # Get distributional variance for tweedie-family # ---------------------------------------------- .variance_family_tweedie <- function(x, mu, phi) { p <- unname(stats::plogis(x$fit$par["thetaf"]) + 1) phi * mu^p } # Get distributional variance for nbinom-family # ---------------------------------------------- .variance_family_nbinom <- function(x, mu, sig, faminfo) { if (faminfo$is_zero_inflated) { if (missing(sig)) sig <- 0 .variance_zinb(x, sig, faminfo, family_var = mu * (1 + sig)) } else { if (inherits(x, "MixMod")) { if (missing(sig)) { return(rep(1e-16, length(mu))) } mu * (1 + sig) } else { stats::family(x)$variance(mu, sig) } } } # For zero-inflated negative-binomial models, # the distributional variance is based on Zuur et al. 2012 # ---------------------------------------------- .variance_zinb <- function(model, sig, faminfo, family_var) { if (inherits(model, "glmmTMB")) { v <- stats::family(model)$variance # zi probability p <- stats::predict(model, type = "zprob") # mean of conditional distribution mu <- stats::predict(model, type = "conditional") # sigma betad <- model$fit$par["betad"] k <- switch(faminfo$family, gaussian = exp(0.5 * betad), Gamma = exp(-0.5 * betad), exp(betad) ) pvar <- (1 - p) * v(mu, k) + mu^2 * (p^2 + p) } else if (inherits(model, "MixMod")) { v <- family_var p <- stats::plogis(stats::predict(model, type_pred = "link", type = "zero_part")) mu <- suppressWarnings(stats::predict(model, type_pred = "link", type = "mean_subject")) k <- sig pvar <- (1 - p) * v(mu, k) + mu^2 * (p^2 + p) } else { pvar <- family_var } mean(pvar) # pearson residuals # pred <- predict(model, type = "response") ## (1 - p) * mu # pred <- stats::predict(model, type_pred = "response", type = "mean_subject") # (get_response(model) - pred) / sqrt(pvar) } # For zero-inflated poisson models, the # distributional variance is based on Zuur et al. 2012 # ---------------------------------------------- .variance_zip <- function(model, faminfo, family_var) { if (inherits(model, "glmmTMB")) { p <- stats::predict(model, type = "zprob") mu <- stats::predict(model, type = "conditional") pvar <- (1 - p) * (mu + p * mu^2) } else if (inherits(model, "MixMod")) { p <- stats::plogis(stats::predict(model, type_pred = "link", type = "zero_part")) mu <- suppressWarnings(stats::predict(model, type = "mean_subject")) pvar <- (1 - p) * (mu + p * mu^2) } else { pvar <- family_var } mean(pvar) } # Get distribution-specific variance for general and # undefined families / link-functions # ---------------------------------------------- .variance_family_default <- function(x, mu, verbose) { # installed? check_if_installed("lme4") tryCatch( { if (inherits(x, "merMod")) { mu * (1 + mu / lme4::getME(x, "glmer.nb.theta")) } else if (inherits(x, "MixMod")) { stats::family(x)$variance(mu) } else if (inherits(x, "glmmTMB")) { if (is.null(x$theta)) { theta <- lme4::getME(x, "theta") } else { theta <- x$theta } mu * (1 + mu / theta) } else { mu * (1 + mu / x$theta) } }, error = function(x) { if (verbose) { warning("Can't calculate model's distribution-specific variance. Results are not reliable.", call. = FALSE) } 0 } ) } # return existence of random slopes ---- # ---------------------------------------------- .random_slopes_in_fixed <- function(model) { rs <- find_random_slopes(model) fe <- find_predictors(model, effects = "fixed", component = "all") # if model has no random slopes, there are no random slopes that # are *not* present as fixed effects if (is.null(rs)) { return(TRUE) } # NULL models have no predictors, so no fixed effect as random slope if (is.null(fe)) { return(FALSE) } # make sure we have identical subcomponents between random and # fixed effects fe <- .compact_list(fe[c("conditional", "zero_inflated")]) if (length(rs) > length(fe)) rs <- rs[1:length(fe)] if (length(fe) > length(rs)) fe <- fe[1:length(rs)] all(mapply(function(r, f) all(r %in% f), rs, fe, SIMPLIFY = TRUE)) } # random intercept-variances, i.e. # between-subject-variance (tau 00) ---- # ---------------------------------------------- .between_subject_variance <- function(vals, x) { vars <- lapply(vals$vc, function(i) i[1]) # check for uncorrelated random slopes-intercept non_intercepts <- which(sapply(vals$vc, function(i) !grepl("^\\(Intercept\\)", dimnames(i)[[1]][1]))) if (length(non_intercepts)) { vars <- vars[-non_intercepts] } sapply(vars, function(i) i) } # random slope-variances (tau 11) ---- # ---------------------------------------------- .random_slope_variance <- function(vals, x) { if (inherits(x, "lme")) { unlist(lapply(vals$vc, function(x) diag(x)[-1])) } else { out <- unlist(lapply(vals$vc, function(x) diag(x)[-1])) # check for uncorrelated random slopes-intercept non_intercepts <- which(sapply(vals$vc, function(i) !grepl("^\\(Intercept\\)", dimnames(i)[[1]][1]))) if (length(non_intercepts)) { dn <- unlist(lapply(vals$vc, function(i) dimnames(i)[1])[non_intercepts]) rndslopes <- unlist(lapply(vals$vc, function(i) i[1])[non_intercepts]) names(rndslopes) <- gsub("(.*)\\.\\d+$", "\\1", names(rndslopes)) out <- c(out, stats::setNames(rndslopes, paste0(names(rndslopes), ".", dn))) } out } } # slope-intercept-correlations (rho 01) ---- # ---------------------------------------------- .random_slope_intercept_corr <- function(vals, x) { if (inherits(x, "lme")) { rho01 <- unlist(sapply(vals$vc, function(i) attr(i, "cor_slope_intercept"))) if (is.null(rho01)) { vc <- lme4::VarCorr(x) if ("Corr" %in% colnames(vc)) { re_name <- find_random(x, split_nested = FALSE, flatten = TRUE) rho01 <- as.vector(suppressWarnings(stats::na.omit(as.numeric(vc[, "Corr"])))) if (length(re_name) == length(rho01)) { names(rho01) <- re_name } } } rho01 } else { corrs <- lapply(vals$vc, attr, "correlation") rho01 <- sapply(corrs, function(i) { if (!is.null(i)) { i[-1, 1] } else { NULL } }) unlist(rho01) } } # slope-slope-correlations (rho 01) ---- # ---------------------------------------------- .random_slopes_corr <- function(vals, x) { corrs <- lapply(vals$vc, attr, "correlation") rnd_slopes <- unlist(find_random_slopes(x)) if (length(rnd_slopes) < 2) { return(NULL) } rho01 <- tryCatch( { lapply(corrs, function(d) { d[upper.tri(d, diag = TRUE)] <- NA d <- as.data.frame(d) d <- .reshape_longer(d, colnames_to = "Parameter1", rows_to = "Parameter2") d <- d[stats::complete.cases(d), ] d <- d[!d$Parameter1 %in% c("Intercept", "(Intercept)"), ] d$Parameter <- paste0(d$Parameter1, "-", d$Parameter2) d$Parameter1 <- d$Parameter2 <- NULL stats::setNames(d$Value, d$Parameter) }) }, error = function(e) { NULL } ) # rho01 <- tryCatch( # { # sapply(corrs, function(i) { # if (!is.null(i)) { # slope_pairs <- utils::combn(x = rnd_slopes, m = 2, simplify = FALSE) # lapply(slope_pairs, function(j) { # stats::setNames(i[j[1], j[2]], paste0("..", paste0(j, collapse = "-"))) # }) # } else { # NULL # } # }) # }, # error = function(e) { # NULL # } # ) unlist(rho01) } # helper -------------------------- .reshape_longer <- function(data, colnames_to = "Name", rows_to = NULL) { cols <- names(data) values_to <- "Value" # save attribute of each variable variable_attr <- lapply(data, attributes) # Create Index column as needed by reshape data[["_Row"]] <- .to_numeric(row.names(data)) # Reshape long <- stats::reshape(data, varying = cols, idvar = "_Row", v.names = values_to, timevar = colnames_to, direction = "long" ) # Sort the dataframe (to match pivot_longer's output) long <- long[order(long[["_Row"]], long[[colnames_to]]), ] # Remove or rename the row index if (is.null(rows_to)) { long[["_Row"]] <- NULL } else { names(long)[names(long) == "_Row"] <- rows_to } # Re-insert col names as levels long[[colnames_to]] <- cols[long[[colnames_to]]] # Reset row names row.names(long) <- NULL # Remove reshape attributes attributes(long)$reshapeLong <- NULL # add back attributes where possible for (i in colnames(long)) { attributes(long[[i]]) <- variable_attr[[i]] } long } .to_numeric <- function(x) { tryCatch(as.numeric(as.character(x)), error = function(e) x, warning = function(w) x) } insight/R/get_call.R0000644000175000017500000000262214037012611014144 0ustar nileshnilesh#' @title Get the model's function call #' @name get_call #' #' @description Returns the model's function call when available. #' #' @inheritParams find_random #' #' @return A function call. #' #' @examples #' data(mtcars) #' m <- lm(mpg ~ wt + cyl + vs, data = mtcars) #' get_call(m) #' #' if (require("lme4")) { #' m <- lmer(Sepal.Length ~ Sepal.Width + (1 | Species), data = iris) #' get_call(m) #' } #' @export get_call <- function(x) { UseMethod("get_call") } #' @export get_call.default <- function(x) { cl <- tryCatch( { x$call }, error = function(x) { NULL } ) if (is.null(cl)) { cl <- tryCatch( { x@call }, error = function(x) { NULL } ) } # For GAMM4 if (is.null(cl) && "gam" %in% names(x)) { cl <- tryCatch( { x$gam$formula # Where's the call here? }, error = function(x) { NULL } ) } cl } #' @export get_call.lm <- function(x) { x$call } #' @export get_call.glm <- get_call.lm #' @export get_call.mvord <- function(x) { x$rho$mc } #' @export get_call.model_fit <- function(x) { get_call(x$fit) } #' @export get_call.lmerMod <- function(x) { x@call } #' @export get_call.merMod <- get_call.lmerMod #' @export get_call.stanreg <- function(x) { x$call } insight/R/n_obs.R0000644000175000017500000002433414142011655013502 0ustar nileshnilesh#' @title Get number of observations from a model #' @name n_obs #' #' @description This method returns the number of observation that were used #' to fit the model, as numeric value. #' #' @param weighted For survey designs, returns the weighted sample size. #' @param disaggregate For binomial models with aggregated data, `n_obs()` #' returns the number of data rows by default. If `disaggregate = TRUE`, #' the total number of trials is returned instead (determined by summing the #' results of `weights()` for aggregated data, which will be either the #' weights input for proportion success response or the row sums of the #' response matrix if matrix response, see 'Examples'). #' @inheritParams find_predictors #' @inheritParams get_response #' @inheritParams find_formula #' #' @return The number of observations used to fit the model, or `NULL` if #' this information is not available. #' #' @examples #' data(mtcars) #' m <- lm(mpg ~ wt + cyl + vs, data = mtcars) #' n_obs(m) #' #' if (require("lme4")) { #' data(cbpp, package = "lme4") #' m <- glm( #' cbind(incidence, size - incidence) ~ period, #' data = cbpp, #' family = binomial(link = "logit") #' ) #' n_obs(m) #' n_obs(m, disaggregate = TRUE) #' } #' @export n_obs <- function(x, ...) { UseMethod("n_obs") } #' @export n_obs.default <- function(x, ...) { is_binomial <- tryCatch( { fam <- stats::family(x) fam$family == "binomial" }, error = function(e) { FALSE } ) if (isTRUE(is_binomial)) { return(n_obs.glm(x, ...)) } tryCatch( { stats::nobs(x) }, error = function(x) { NULL } ) } #' @rdname n_obs #' @export n_obs.glm <- function(x, disaggregate = FALSE, ...) { is_binomial <- tryCatch( { fam <- stats::family(x) fam$family == "binomial" }, error = function(e) { FALSE } ) .nobs <- stats::nobs(x) if (isTRUE(is_binomial) && isTRUE(disaggregate)) { resp <- deparse(stats::formula(x)[[2]]) resp_data <- get_response(x, verbose = FALSE) # response is a matrix of numbers of trials and successes if (grepl("^cbind\\(", resp)) { trials <- trimws(sub("cbind\\((.*),(.*)\\)", "\\2", resp)) if (grepl("-", trials, fixed = TRUE)) { .nobs <- sum(resp_data[[2]]) } else { .nobs <- sum(resp_data) } # response is a fraction } else if (!is.data.frame(resp_data) && .is.fraction(resp_data)) { .nobs <- sum(get_weights(x)) } } .nobs } #' @export n_obs.censReg <- n_obs.default #' @rdname n_obs #' @export n_obs.svyolr <- function(x, weighted = FALSE, ...) { if (weighted) { stats::nobs(x) } else { nrow(stats::model.frame(x)) } } #' @export n_obs.svy_vglm <- function(x, ...) { n_obs(x$fit) } #' @export n_obs.model_fit <- n_obs.svy_vglm #' @export n_obs.gam <- function(x, ...) { if (!is.null(dim(x$y))) { dim(x$y)[1] } else { length(x$y) } } #' @export n_obs.gamm <- function(x, ...) { if (.obj_has_name(x, "gam")) { n_obs(x$gam, ...) } else { stop("Cannot find n_obs for this object. Please an open an issue!") } } #' @export n_obs.list <- n_obs.gamm #' @export n_obs.lavaan <- function(x, ...) { x@SampleStats@ntotal } #' @export n_obs.selection <- function(x, type = c("all", "observed", "censored"), ...) { type <- match.arg(type) s <- summary(x) switch(type, "all" = s$param$nObs, "observed" = s$param$N1, s$param$N0 ) } #' @export n_obs.mjoint <- function(x, ...) { nrow(x$data[[1]]) } #' @export n_obs.joint <- function(x, ...) { nrow(x$data$longitudinal) } #' @export n_obs.merModList <- function(x, ...) { stats::nobs(x[[1]]) } #' @export n_obs.summary.lm <- function(x, ...) { length(x$residuals) } #' @export n_obs.mediate <- function(x, ...) { x$nobs } #' @export n_obs.garch <- function(x, ...) { x$n.used } #' @export n_obs.bayesx <- function(x, ...) { length(x$response) } #' @export n_obs.flexsurvreg <- function(x, ...) { x$N } #' @export n_obs.SemiParBIV <- function(x, ...) { x$n } #' @export n_obs.ivprobit <- function(x, ...) { nrow(x$mr1) } #' @export n_obs.mvord <- function(x, ...) { x$rho$n } #' @export n_obs.bamlss <- function(x, ...) { nrow(x$model.frame) } #' @export n_obs.coeftest <- function(x, ...) { attributes(x)$nobs } #' @export n_obs.lmRob <- function(x, ...) { length(x$fitted.values) } #' @export n_obs.lqmm <- function(x, ...) { x$nobs } #' @export n_obs.lqm <- n_obs.lqmm #' @export n_obs.sem <- function(x, ...) { if (!.is_semLme(x)) { return(NULL) } length(x$original.y) } #' @export n_obs.LORgee <- function(x, ...) { x$nobs } #' @export n_obs.crr <- function(x, ...) { x$n } #' @export n_obs.mcmc <- function(x, ...) { nrow(as.data.frame(x)) } #' @export n_obs.biglm <- function(x, ...) { x$n } #' @export n_obs.bigglm <- n_obs.biglm #' @export n_obs.eglm <- n_obs.biglm #' @export n_obs.rqss <- n_obs.biglm #' @export n_obs.hurdle <- n_obs.biglm #' @export n_obs.zerotrunc <- n_obs.biglm #' @export n_obs.zeroinfl <- n_obs.biglm #' @export n_obs.cgam <- function(x, ...) { nrow(get_data(x)) } #' @export n_obs.cglm <- n_obs.cgam #' @export n_obs.gbm <- function(x, ...) { length(x$fit) } #' @export #' @rdname n_obs #' @inheritParams get_data n_obs.afex_aov <- function(x, shape = c("long", "wide"), ...) { shape <- match.arg(shape) nrow(get_data(x, shape = shape)) } #' @export n_obs.glimML <- function(x, ...) { nrow(x@data) } #' @export n_obs.mle2 <- function(x, ...) { n <- tryCatch( { x@nobs }, error = function(e) { NULL } ) if (is.null(n)) { n <- tryCatch( { nrow(get_data(x)) }, error = function(e) { NULL } ) } n } #' @export n_obs.mle <- n_obs.mle2 #' @export n_obs.glmRob <- function(x, ...) { length(x$fitted.values) } #' @export n_obs.gmnl <- function(x, ...) { x$logLik$nobs } #' @export n_obs.multinom <- function(x, ...) { nrow(x$fitted.values) } #' @export n_obs.cpglmm <- function(x, ...) { nrow(x@frame) } #' @export n_obs.lmodel2 <- function(x, ...) { nrow(get_data(x)) } #' @export n_obs.cpglm <- function(x, ...) { nrow(x$model.frame) } #' @export n_obs.zcpglm <- n_obs.cpglm #' @export n_obs.bcplm <- n_obs.cpglm #' @export n_obs.rq <- function(x, ...) { length(x$fitted.values) } #' @export n_obs.ivFixed <- n_obs.rq #' @export n_obs.BBreg <- function(x, ...) { x$nObs } #' @export n_obs.BBmm <- n_obs.BBreg #' @export n_obs.crq <- function(x, ...) { n <- nrow(x$residuals) if (.is_empty_object(n)) { n <- nrow(x$fitted.values) } n } #' @export n_obs.crqs <- n_obs.crq #' @export n_obs.comprisk <- function(x, ...) { x$n } #' @export n_obs.riskRegression <- function(x, ...) { nrow(x$response) } #' @export n_obs.MANOVA <- function(x, ...) { nrow(x$input$data) } #' @export n_obs.RM <- n_obs.MANOVA #' @export n_obs.nlrq <- function(x, ...) { length(stats::fitted(x)) } #' @export n_obs.survfit <- function(x, ...) { length(x$n.event) } #' @export n_obs.mhurdle <- function(x, ...) { nrow(x$model) } #' @export n_obs.survreg <- function(x, ...) { length(x$linear.predictors) } #' @export n_obs.aareg <- function(x, ...) { max(x$n) } #' @export n_obs.coxph <- n_obs.aareg #' @export n_obs.coxme <- n_obs.aareg #' @export n_obs.coxr <- function(x, ...) { nrow(x$y) } #' @export n_obs.felm <- function(x, ...) { x$N } #' @export n_obs.feis <- function(x, ...) { length(x$fitted.values) } #' @export n_obs.averaging <- function(x, ...) { attr(x, "nobs") } #' @export n_obs.fixest <- function(x, ...) { x$nobs } #' @export n_obs.feglm <- function(x, ...) { x$nobs[["nobs"]] } #' @export n_obs.bife <- n_obs.feglm #' @export n_obs.complmrob <- n_obs.cgam #' @export n_obs.aovlist <- function(x, ...) { nrow(stats::model.frame(x)) } #' @rdname n_obs #' @export n_obs.stanmvreg <- function(x, select = NULL, ...) { n <- min(x$n_yobs) if (!is.null(select)) { if (select %in% names(x$n_yobs)) { n <- x$n_yobs[select] } else { print_color(sprintf("Could not find response '%s'. Model's response variables are named %s.\n", select, paste(names(x$n_yobs), collapse = ", ")), "red") cat("Returning smallest number of observations now.\n") n <- min(x$n_yobs) } } n } #' @export n_obs.blrm <- function(x, ...) { x$N } #' @export n_obs.mlogit <- function(x, ...) { nrow(x$model) } #' @export n_obs.Glm <- n_obs.mlogit #' @export n_obs.maxLik <- n_obs.mlogit #' @export n_obs.wbm <- function(x, ...) { nrow(x@frame) } #' @export n_obs.wbgee <- function(x, ...) { stats::nobs(x) } #' @export n_obs.Rchoice <- function(x, ...) { nrow(x$mf) } # mfx models -------------------------------------- #' @export n_obs.betamfx <- function(x, ...) { stats::nobs(x$fit) } #' @export n_obs.betaor <- n_obs.betamfx #' @export n_obs.logitmfx <- n_obs.betamfx #' @export n_obs.poissonmfx <- n_obs.betamfx #' @export n_obs.probitmfx <- n_obs.betamfx #' @export n_obs.negbinmfx <- n_obs.betamfx #' @export n_obs.negbinirr <- n_obs.betamfx #' @export n_obs.poissonirr <- n_obs.betamfx #' @export n_obs.logitor <- n_obs.betamfx # special models ----------- #' @export n_obs.mipo <- function(x, ...) { x$glanced$nobs } #' @export n_obs.mira <- function(x, ...) { if (!requireNamespace("mice", quietly = TRUE)) { stop("Package `mice` required. Please install it.", call. = FALSE) } n_obs(mice::pool(x), ...) } #' @export n_obs.emm_list <- function(x, ...) { NULL } insight/R/get_response.R0000644000175000017500000000453714135300715015102 0ustar nileshnilesh#' @title Get the values from the response variable #' @name get_response #' #' @description Returns the values the response variable(s) from a model object. #' If the model is a multivariate response model, a data frame with values #' from all response variables is returned. #' #' @param select Optional name(s) of response variables for which to extract values. #' Can be used in case of regression models with multiple response variables. #' @inheritParams find_predictors #' #' @return The values of the response variable, as vector, or a data frame if #' `x` has more than one defined response variable. #' #' @examples #' if (require("lme4")) { #' data(cbpp) #' cbpp$trials <- cbpp$size - cbpp$incidence #' #' m <- glm(cbind(incidence, trials) ~ period, data = cbpp, family = binomial) #' head(get_response(m)) #' get_response(m, select = "incidence") #' } #' #' data(mtcars) #' m <- lm(mpg ~ wt + cyl + vs, data = mtcars) #' get_response(m) #' @export get_response <- function(x, select = NULL, verbose = TRUE) { rn <- find_response(x, combine = FALSE) combined_rn <- find_response(x, combine = TRUE) if (is.null(rn)) { return(NULL) } # check if response is a proportion for a binomial glm proportion_response <- combined_rn[!grepl("I\\((.*)\\)", combined_rn)] binom_fam <- tryCatch( { stats::family(x)$family == "binomial" }, error = function(x) { FALSE } ) glm_proportion <- any(grepl("/", proportion_response, fixed = TRUE)) && binom_fam # data used to fit the model model_data <- get_data(x, verbose = verbose) # exceptions if (inherits(x, "DirichletRegModel")) { rv <- x$Y class(rv) <- "matrix" data.frame(rv) } else if (inherits(x, "bfsl")) { model_data[["y"]] } else if (length(rn) > 1 && all(rn %in% colnames(model_data)) && !glm_proportion) { rv <- model_data[, rn, drop = FALSE] colnames(rv) <- rn # if user only wants specific response value, return this only if (!is.null(select) && all(select %in% colnames(rv))) { rv <- rv[, select, drop = TRUE] } rv } else { rv <- model_data[[combined_rn]] if (!is.factor(rv) && !is.numeric(rv) && !is.character(rv) && !is.logical(rv) && !is.integer(rv)) { as.vector(rv) } else { rv } } } insight/R/get_parameters_gam.R0000644000175000017500000001242414040053577016233 0ustar nileshnilesh#' @title Get model parameters from generalized additive models #' @name get_parameters.gamm #' #' @description Returns the coefficients from a model. #' #' @param ... Currently not used. #' #' @inheritParams find_parameters #' @inheritParams find_predictors #' #' @return For models with smooth terms or zero-inflation component, a data #' frame with three columns: the parameter names, the related point estimates #' and the component. #' #' @examples #' data(mtcars) #' m <- lm(mpg ~ wt + cyl + vs, data = mtcars) #' get_parameters(m) #' @export get_parameters.gamm <- function(x, component = c("all", "conditional", "smooth_terms", "location"), ...) { x <- x$gam class(x) <- c(class(x), c("glm", "lm")) get_parameters.gam(x, component, ...) } #' @export get_parameters.Gam <- function(x, component = c("all", "conditional", "smooth_terms", "location"), ...) { component <- match.arg(component) pars <- stats::coef(x) .return_smooth_parms( conditional = pars[.grep_non_smoothers(names(pars))], smooth_terms = pars[.grep_smoothers(names(pars))], component = component ) } #' @rdname get_parameters.gamm #' @export get_parameters.gam <- function(x, component = c("all", "conditional", "smooth_terms", "location"), ...) { component <- match.arg(component) pars <- stats::coef(x) st <- summary(x)$s.table smooth_terms <- st[, 1] names(smooth_terms) <- row.names(st) .return_smooth_parms( conditional = pars[.grep_non_smoothers(names(pars))], smooth_terms = smooth_terms, component = component ) } #' @export get_parameters.scam <- get_parameters.gam #' @export get_parameters.vgam <- function(x, component = c("all", "conditional", "smooth_terms", "location"), ...) { component <- match.arg(component) pars <- stats::coef(x) .return_smooth_parms( conditional = pars[.grep_non_smoothers(names(pars))], smooth_terms = pars[.grep_smoothers(names(pars))], component = component ) } #' @export get_parameters.gamlss <- function(x, ...) { pars <- lapply(x$parameters, function(i) { stats::na.omit(stats::coef(x, what = i)) }) names(pars) <- x$parameters if ("mu" %in% names(pars)) names(pars)[1] <- "conditional" do.call(rbind, lapply(names(pars), function(i) { params <- data.frame( Parameter = names(pars[[i]]), Estimate = pars[[i]], Component = i, stringsAsFactors = FALSE, row.names = NULL ) .remove_backticks_from_parameter_names(params) })) # data.frame( # Parameter = c(names(pars$conditional), names(pars$sigma), names(pars$nu), names(pars$tau)), # Estimate = c(unname(pars$conditional), unname(pars$sigma), unname(pars$nu), unname(pars$tau)), # Component = c( # rep("conditional", length(pars$conditional)), # rep("sigma", length(pars$sigma)), # rep("nu", length(pars$nu)), # rep("tau", length(pars$tau)) # ), # stringsAsFactors = FALSE, # row.names = NULL # ) } #' @rdname get_parameters.gamm #' @export get_parameters.rqss <- function(x, component = c("all", "conditional", "smooth_terms"), ...) { component <- match.arg(component) sc <- summary(x) smooth_terms <- sc$qsstab[, 1] names(smooth_terms) <- rownames(sc$qsstab) .return_smooth_parms( conditional = stats::setNames(sc$coef[, 1], rownames(sc$coef)), smooth_terms = smooth_terms, component = component ) } #' @export get_parameters.cgam <- function(x, component = c("all", "conditional", "smooth_terms"), ...) { component <- match.arg(component) sc <- summary(x) estimates <- sc$coefficients smooth_terms <- sc$coefficients2 if (!is.null(smooth_terms)) smooth_terms <- stats::setNames(smooth_terms[, 1], rownames(smooth_terms)) .return_smooth_parms( conditional = stats::setNames(estimates[, 1], rownames(estimates)), smooth_terms = smooth_terms, component = component ) } #' @export get_parameters.SemiParBIV <- function(x, ...) { s <- summary(x) s <- .compact_list(s[grepl("^tableP", names(s))]) params <- do.call(rbind, lapply(1:length(s), function(i) { out <- as.data.frame(s[[i]]) out$Parameter <- rownames(out) out$Component <- paste0("Equation", i) out })) colnames(params)[1] <- "Estimate" rownames(params) <- NULL .remove_backticks_from_parameter_names(params[c("Parameter", "Estimate", "Component")]) } # helper ------------------- .return_smooth_parms <- function(conditional, smooth_terms, component) { cond <- data.frame( Parameter = names(conditional), Estimate = conditional, Component = "conditional", stringsAsFactors = FALSE, row.names = NULL ) if (!is.null(smooth_terms)) { smooth <- data.frame( Parameter = names(smooth_terms), Estimate = smooth_terms, Component = "smooth_terms", stringsAsFactors = FALSE, row.names = NULL ) } else { smooth <- NULL } pars <- switch(component, all = , location = rbind(cond, smooth), conditional = cond, smooth_terms = smooth ) if (!component %in% c("all", "location")) { pars <- .remove_column(pars, "Component") } .remove_backticks_from_parameter_names(pars) } insight/R/datawizard.R0000644000175000017500000002775414164637161014557 0ustar nileshnilesh#' Restore the type of columns according to a reference data frame #' #' @param data A data frame. #' @param reference A reference data frame from which to find the correct #' column types. #' @param ... Additional arguments passed on to methods. #' #' @return #' #' A dataframe with columns whose types have been restored based on the #' reference dataframe. #' #' @examples #' data <- data.frame( #' Sepal.Length = c("1", "3", "2"), #' Species = c("setosa", "versicolor", "setosa"), #' New = c("1", "3", "4") #' ) #' #' fixed <- data_restoretype(data, reference = iris) #' summary(fixed) #' @export data_restoretype <- function(data, reference = NULL, ...) { for (col in names(data)) { # No reference data (regular fixing) ---------------- if (is.null(reference)) { if (is.character(data[[col]])) { data[[col]] <- .to_numeric(data[[col]]) } } else { if (is.factor(reference[[col]]) && !is.factor(data[[col]])) { # Restore factor levels data[[col]] <- factor(data[[col]], levels = levels(reference[[col]])) } if (is.numeric(reference[[col]]) && !is.numeric(data[[col]])) { data[[col]] <- .to_numeric(as.character(data[[col]])) } if (is.character(reference[[col]]) && !is.character(data[[col]])) { data[[col]] <- as.character(data[[col]]) } } } data } #' Convert to Numeric (if possible) #' #' Tries to convert vector to numeric if possible (if no warnings or errors). #' Otherwise, leaves it as is. #' #' @param x A vector to be converted. #' #' @examples #' to_numeric(c("1", "2")) #' to_numeric(c("1", "2", "A")) #' @return Numeric vector (if possible) #' @export to_numeric <- function(x) { tryCatch(as.numeric(as.character(x)), error = function(e) x, warning = function(w) x ) } #' Find row indices of a data frame matching a specific condition #' #' Find row indices of a data frame that match a specific condition. #' #' @param x A data frame. #' @param to A data frame matching the specified conditions. #' @param ... Other arguments passed to or from other functions. #' #' @return #' #' A dataframe containing rows that match the specified configuration. #' #' @examples #' matching_rows <- data_match(mtcars, data.frame(vs = 0, am = 1)) #' mtcars[matching_rows, ] #' #' matching_rows <- data_match(mtcars, data.frame(vs = 0, am = c(0, 1))) #' mtcars[matching_rows, ] #' @export data_match <- function(x, to, ...) { # Input checks if (!is.data.frame(to)) to <- as.data.frame(to) # Find matching rows idx <- 1:nrow(x) for (col in names(to)) { if (col %in% names(x)) { idx <- idx[x[[col]][idx] %in% to[[col]]] } } to_numeric(row.names(x)[idx]) } #' Relocate (reorder) columns of a data frame #' #' @param data A data frame to pivot. #' @param cols A character vector indicating the names of the columns to move. #' @param before,after Destination of columns. Supplying neither will move #' columns to the left-hand side; specifying both is an error. #' @param safe If `TRUE`, will disregard non-existing columns. #' @param ... Additional arguments passed on to methods. #' #' @examples #' # Reorder columns #' head(data_relocate(iris, cols = "Species", before = "Sepal.Length")) #' head(data_relocate(iris, cols = "Species", before = "Sepal.Width")) #' head(data_relocate(iris, cols = "Sepal.Width", after = "Species")) #' head(data_relocate(iris, cols = c("Species", "Petal.Length"), after = "Sepal.Width")) #' @return A data frame with reordered columns. #' #' @export data_relocate <- function(data, cols, before = NULL, after = NULL, safe = TRUE, ...) { # Sanitize if (!is.null(before) && !is.null(after)) { stop("You must supply only one of `before` or `after`.") } if (safe) { cols <- cols[cols %in% names(data)] } # save attributes att <- attributes(data) # Move columns to the right hand side data <- data[c(setdiff(names(data), cols), cols)] # Get columns and their original position data_cols <- names(data) position <- which(data_cols %in% cols) # Find new positions if (!is.null(before)) { before <- before[before %in% data_cols][1] # Take first that exists (if vector is supplied) if (length(before) != 1) { stop("The column passed to 'before' wasn't found. Possibly mispelled.") } where <- min(match(before, data_cols)) position <- c(setdiff(position, where), where) } else if (!is.null(after)) { after <- after[after %in% data_cols][1] # Take first that exists (if vector is supplied) if (length(after) != 1) { stop("The column passed to 'after' wasn't found. Possibly mispelled.") } where <- max(match(after, data_cols)) position <- c(where, setdiff(position, where)) } else { where <- 1 position <- union(position, where) } # Set left and right side lhs <- setdiff(seq(1, where - 1), position) rhs <- setdiff(seq(where + 1, ncol(data)), position) position <- unique(c(lhs, position, rhs)) position <- position[position <= length(data_cols)] out <- data[position] attributes(out) <- utils::modifyList(att, attributes(out)) out } #' Reshape (pivot) data from wide to long #' #' This function "lengthens" data, increasing the number of rows and decreasing #' the number of columns. This is a dependency-free base-R equivalent of #' `tidyr::pivot_longer()`. #' #' @param data A data frame to pivot. #' @param cols A vector of column names or indices to pivot into longer format. #' @param colnames_to The name of the new column that will contain the column #' names. #' @param values_to The name of the new column that will contain the values of #' the pivoted variables. #' @param rows_to The name of the column that will contain the row-number from #' the original data. If `NULL`, will be removed. #' @param colnames_from The name of the column that contains the levels to be #' used as future columns. #' @param values_from The name of the column that contains the values of the put #' in the columns. #' @param rows_from The name of the column that identifies the rows. If #' `NULL`, will use all the unique rows. #' @param ... Additional arguments passed on to methods. #' @param names_to,names_from Same as `colnames_to`, is there for #' compatibility with `tidyr::pivot_longer()`. #' @param sep The indicating a separating character in the variable names in the #' wide format. #' #' #' @examples #' wide_data <- data.frame(replicate(5, rnorm(10))) #' #' # From wide to long #' # ------------------ #' # Default behaviour (equivalent to tidyr::pivot_longer(wide_data, cols = 1:5)) #' data_to_long(wide_data) #' #' # Customizing the names #' data_to_long(wide_data, #' cols = c(1, 2), #' colnames_to = "Column", #' values_to = "Numbers", #' rows_to = "Row" #' ) #' #' # From long to wide #' # ----------------- #' long_data <- data_to_long(wide_data, rows_to = "Row_ID") # Save row number #' data_to_wide(long_data, #' colnames_from = "Name", #' values_from = "Value", #' rows_from = "Row_ID" #' ) #' #' # Full example #' # ------------------ #' if (require("psych")) { #' data <- psych::bfi # Wide format with one row per participant's personality test #' #' # Pivot long format #' long <- data_to_long(data, #' cols = "\\d", # Select all columns that contain a digit #' colnames_to = "Item", #' values_to = "Score", #' rows_to = "Participant" #' ) #' #' # Separate facet and question number #' long$Facet <- gsub("\\d", "", long$Item) #' long$Item <- gsub("[A-Z]", "", long$Item) #' long$Item <- paste0("I", long$Item) #' #' wide <- data_to_wide(long, #' colnames_from = "Item", #' values_from = "Score" #' ) #' head(wide) #' } #' @return data.frame #' @export data_to_long <- function(data, cols = "all", colnames_to = "Name", values_to = "Value", rows_to = NULL, ..., names_to = colnames_to) { if (inherits(data, "tbl_df")) { tbl_input <- TRUE data <- as.data.frame(data) } else { tbl_input <- FALSE } # Select columns ---------------- if (is.character(cols) && length(cols) == 1) { # If only one name if (cols == "all") { # If all, take all cols <- names(data) } else { # Surely, a regex cols <- grep(cols, names(data), value = TRUE) } } # If numeric, surely the index of the cols if (is.numeric(cols)) { cols <- names(data)[cols] } # Sanity checks ---------------- # Make sure all cols are in data if (!all(cols %in% names(data))) { stop("Some variables as selected by 'cols' are not present in the data.") } # Compatibility with tidyr if (names_to != colnames_to) colnames_to <- names_to # save attribute of each variable variable_attr <- lapply(data, attributes) # Reshaping --------------------- # Create Index column as needed by reshape data[["_Row"]] <- to_numeric(row.names(data)) # Reshape long <- stats::reshape(data, varying = cols, idvar = "_Row", v.names = values_to, timevar = colnames_to, direction = "long" ) # Cleaning -------------------------- # Sort the dataframe (to match pivot_longer's output) long <- long[order(long[["_Row"]], long[[colnames_to]]), ] # Remove or rename the row index if (is.null(rows_to)) { long[["_Row"]] <- NULL } else { names(long)[names(long) == "_Row"] <- rows_to } # Re-insert col names as levels long[[colnames_to]] <- cols[long[[colnames_to]]] # Reset row names row.names(long) <- NULL # Remove reshape attributes attributes(long)$reshapeLong <- NULL # add back attributes where possible for (i in colnames(long)) { attributes(long[[i]]) <- variable_attr[[i]] } if (isTRUE(tbl_input)) { class(long) <- c("tbl_df", "tbl", "data.frame") } long } #' @rdname data_to_long #' @export data_to_wide <- function(data, values_from = "Value", colnames_from = "Name", rows_from = NULL, sep = "_", ..., names_from = colnames_from) { if (inherits(data, "tbl_df")) { tbl_input <- TRUE data <- as.data.frame(data) } else { tbl_input <- FALSE } # Compatibility with tidyr if (names_from != colnames_from) colnames_from <- names_from # save attribute of each variable variable_attr <- lapply(data, attributes) # If no other row identifier, create one if (is.null(rows_from)) { if (all(names(data) %in% c(values_from, colnames_from))) { data[["_Rows"]] <- row.names(data) } data[["_Rows"]] <- apply(data[, !names(data) %in% c(values_from, colnames_from), drop = FALSE], 1, paste, collapse = "_") rows_from <- "_Rows" } # Reshape wide <- stats::reshape(data, v.names = values_from, idvar = rows_from, timevar = colnames_from, sep = sep, direction = "wide" ) # Clean if ("_Rows" %in% names(wide)) wide[["_Rows"]] <- NULL row.names(wide) <- NULL # Reset row names # Remove reshape attributes attributes(wide)$reshapeWide <- NULL # add back attributes where possible for (i in colnames(wide)) { attributes(wide[[i]]) <- variable_attr[[i]] } if (isTRUE(tbl_input)) { class(wide) <- c("tbl_df", "tbl", "data.frame") } wide } insight/R/format_string.R0000644000175000017500000000330714077615664015276 0ustar nileshnilesh#' String Values Formatting #' #' @param x String value. #' @param length Numeric, maximum length of the returned string. If not #' `NULL`, will shorten the string to a maximum `length`, however, #' it will not truncate inside words. I.e. if the string length happens to be #' inside a word, this word is removed from the returned string, so the #' returned string has a *maximum* length of `length`, but might be #' shorter. #' @param abbreviate String that will be used as suffix, if `x` was #' shortened. #' @param ... Arguments passed to or from other methods. #' #' @return A formatted string. #' #' @examples #' s <- "This can be considered as very long string!" #' # string is shorter than max.length, so returned as is #' format_string(s, 60) #' #' # string is shortened to as many words that result in #' # a string of maximum 20 chars #' format_string(s, 20) #' @export format_string <- function(x, ...) { UseMethod("format_string") } #' @export format_string.default <- function(x, ...) { x } #' @export format_string.data.frame <- function(x, length = NULL, abbreviate = "...", ...) { as.data.frame(sapply( x, format_string, length = length, abbreviate = abbreviate, simplify = FALSE )) } #' @rdname format_string #' @export format_string.character <- function(x, length = NULL, abbreviate = "...", ...) { if (!is.null(length)) { pattern <- paste("(.{1,", length, "})(\\s|$)", sep = "") tmp <- paste0(substr(x, 0, unlist(regexec(abbreviate, sub(pattern, replacement = paste0("\\1", abbreviate), x), fixed = TRUE)) - 1), abbreviate) too.long <- nchar(x) > length x[too.long] <- tmp[too.long] } x } insight/R/find_interactions.R0000644000175000017500000000473514077615664016130 0ustar nileshnilesh#' @title Find interaction terms from models #' @name find_interactions #' #' @description Returns all lowest to highest order interaction terms from a model. #' #' @inheritParams find_predictors #' #' @return A list of character vectors that represent the interaction terms. #' Depending on `component`, the returned list has following #' elements (or `NULL`, if model has no interaction term): #' \itemize{ #' \item `conditional`, interaction terms that belong to the "fixed #' effects" terms from the model #' #' \item `zero_inflated`, interaction terms that belong to the "fixed #' effects" terms from the zero-inflation component of the model #' #' \item `instruments`, for fixed-effects regressions like `ivreg`, #' `felm` or `plm`, interaction terms that belong to the #' instrumental variables #' } #' #' @examples #' data(mtcars) #' #' m <- lm(mpg ~ wt + cyl + vs, data = mtcars) #' find_interactions(m) #' #' m <- lm(mpg ~ wt * cyl + vs * hp * gear + carb, data = mtcars) #' find_interactions(m) #' @export find_interactions <- function(x, component = c("all", "conditional", "zi", "zero_inflated", "dispersion", "instruments"), flatten = FALSE) { component <- match.arg(component) .find_interactions(x, effects = "fixed", component, flatten, main_effects = FALSE ) } .find_interactions <- function(x, effects = "fixed", component, flatten, main_effects = FALSE) { f <- find_formula(x) is_mv <- is_multivariate(f) elements <- .get_elements(effects = effects, component = component) if (is_mv) { l <- lapply(f, function(.x) .compact_list(lapply(.x[elements], function(i) .get_interaction_terms(i, main_effects)))) } else { l <- .compact_list(lapply(f[elements], function(i) .get_interaction_terms(i, main_effects))) } if (.is_empty_object(l)) { return(NULL) } if (flatten) { unique(unlist(l)) } else { l } } .get_interaction_terms <- function(f, main_effects = FALSE) { if (is.null(f)) { return(NULL) } terms <- labels(stats::terms(f)) if (main_effects) { terms } else { interaction_terms <- grepl(":", terms, fixed = TRUE) if (any(interaction_terms)) { terms[interaction_terms] } else { NULL } } } insight/R/get_auxiliary.R0000644000175000017500000001144314137207374015256 0ustar nileshnilesh#' @title Get auxiliary parameters from models #' #' @description Returns the requested auxiliary parameters from models, like #' dispersion, sigma, or beta... #' #' @name get_auxiliary #' #' @param x A model. #' @param type The name of the auxiliary parameter that should be retrieved. #' `"sigma"` is available for most models, `"dispersion"` for models #' of class `glm`, `glmerMod` or `glmmTMB` as well as `brmsfit`. #' `"beta"` and other parameters are currently only returned for `brmsfit` #' models. See 'Details'. #' @param verbose Toggle warnings. #' @param ... Currently not used. #' @inheritParams get_parameters.BGGM #' #' @return The requested auxiliary parameter, or `NULL` if this information #' could not be accessed. #' #' @details Currently, only sigma and the dispersion parameter are returned, and #' only for a limited set of models. #' \subsection{Sigma Parameter}{ #' See [get_sigma()]. #' } #' \subsection{Dispersion Parameter}{ #' There are many different definitions of "dispersion", depending on the context. #' `get_auxiliary()` returns the dispersion parameters that usually can #' be considered as variance-to-mean ratio for generalized (linear) mixed #' models. Exceptions are models of class `glmmTMB`, where the dispersion #' equals \ifelse{html}{\out{σ2}}{\eqn{\sigma^2}}. #' In detail, the computation of the dispersion parameter for generalized linear #' models is the ratio of the sum of the squared working-residuals and the #' residual degrees of freedom. For mixed models of class `glmer`, the #' dispersion parameter is also called \ifelse{html}{\out{φ}}{\eqn{\phi}} #' and is the ratio of the sum of the squared Pearson-residuals and the residual #' degrees of freedom. For models of class `glmmTMB`, dispersion is #' \ifelse{html}{\out{σ2}}{\eqn{\sigma^2}}. #' } #' \subsection{\pkg{brms} models}{ #' For models of class `brmsfit`, there are different options for the #' `type` argument. See a list of supported auxiliary parameters here: #' [find_parameters.BGGM()]. #' } #' #' @examples #' # from ?glm #' clotting <- data.frame( #' u = c(5, 10, 15, 20, 30, 40, 60, 80, 100), #' lot1 = c(118, 58, 42, 35, 27, 25, 21, 19, 18), #' lot2 = c(69, 35, 26, 21, 18, 16, 13, 12, 12) #' ) #' model <- glm(lot1 ~ log(u), data = clotting, family = Gamma()) #' get_auxiliary(model, type = "dispersion") # same as summary(model)$dispersion #' @export get_auxiliary <- function(x, type = "sigma", summary = TRUE, centrality = "mean", verbose = TRUE, ...) { type <- match.arg(type, choices = .aux_elements()) if (inherits(x, "brmsfit")) { return(.get_generic_aux(x, type, summary = summary, centrality = centrality)) } else if (type == "sigma") { return(as.numeric(get_sigma(x))) } else if (type == "dispersion") { return(get_dispersion(x)) } else { return(NULL) } } # dispersion parameter ----------------------- get_dispersion <- function(x, ...) { UseMethod("get_dispersion") } get_dispersion.model_fit <- function(x, ...) { get_dispersion(x$fit, ...) } get_dispersion.glm <- function(x, verbose = TRUE, ...) { info <- model_info(x, verbose = verbose) disp <- NULL if (info$is_poisson || info$is_binomial || info$is_negbin) { disp <- 1 } else { working_weights <- get_weights(x, type = "working") working_res <- as.vector(get_residuals(x, type = "working"))^2 * working_weights disp <- sum(working_res[working_weights > 0]) / get_df(x, type = "residual") } disp } get_dispersion.glmerMod <- function(x, verbose = TRUE, ...) { info <- model_info(x, verbose = verbose) disp <- NULL if (info$is_poisson || info$is_binomial || info$is_negbin) { disp <- 1 } else { # see http://bbolker.github.io/mixedmodels-misc/glmmFAQ.html#fitting-models-with-overdispersion # phi is the dispersion factor, and phi is usually "sigma^2" # (https://stat.ethz.ch/pipermail/r-sig-mixed-models/2017q4/026168.html) # or the following ratio: res_df <- get_df(x, type = "residual") p_res <- get_residuals(x, type = "pearson") disp <- sum(p_res^2) / res_df } disp } get_dispersion.glmmTMB <- function(x, verbose = TRUE, ...) { info <- model_info(x, verbose = verbose) disp <- NULL if (info$is_poisson || info$is_binomial || info$is_negbin) { disp <- 1 } else { disp <- as.numeric(get_sigma(x))^2 } disp } get_dispersion.brmsfit <- get_dispersion.glmmTMB # special ------------------ .get_generic_aux <- function(x, param, summary = TRUE, centrality = "mean", ...) { aux <- NULL if (inherits(x, "brmsfit")) { aux <- as.data.frame(x)[[param]] if (summary) { aux <- .summary_of_posteriors(aux, centrality = centrality) } } aux } insight/R/get_parameters_mixed.R0000644000175000017500000003464114077615665016616 0ustar nileshnilesh#' @title Get model parameters from mixed models #' @name get_parameters.glmm #' #' @description Returns the coefficients from a model. #' #' @param ... Currently not used. #' #' @inheritParams find_parameters.glmmTMB #' @inheritParams find_parameters #' @inheritParams find_predictors #' #' @return If `effects = "fixed"`, a data frame with two columns: the #' parameter names and the related point estimates. If `effects = #' "random"`, a list of data frames with the random effects (as returned by #' `ranef()`), unless the random effects have the same simplified #' structure as fixed effects (e.g. for models from \pkg{MCMCglmm}). #' #' @details In most cases when models either return different "effects" (fixed, #' random) or "components" (conditional, zero-inflated, ...), the arguments #' `effects` and `component` can be used. #' #' @examples #' data(mtcars) #' m <- lm(mpg ~ wt + cyl + vs, data = mtcars) #' get_parameters(m) #' @export get_parameters.glmm <- function(x, effects = c("all", "fixed", "random"), ...) { effects <- match.arg(effects) params <- data.frame( Parameter = names(c(x$beta, x$nu)), Estimate = unname(c(x$beta, x$nu)), Effects = c(rep("fixed", times = length(x$beta)), rep("random", times = length(x$nu))), stringsAsFactors = FALSE, row.names = NULL ) if (effects != "all") { params <- params[params$Effects == effects, , drop = FALSE] params$Effects <- NULL } .remove_backticks_from_parameter_names(params) } #' @rdname get_parameters.glmm #' @export get_parameters.coxme <- function(x, effects = c("fixed", "random"), ...) { # installed? check_if_installed("lme4") effects <- match.arg(effects) if (effects == "fixed") { l <- list(conditional = lme4::fixef(x)) } else { l <- .compact_list(list( conditional = lme4::fixef(x), random = lme4::ranef(x) )) } fixed <- data.frame( Parameter = names(l$conditional), Estimate = unname(l$conditional), stringsAsFactors = FALSE ) if (effects == "fixed") { .remove_backticks_from_parameter_names(fixed) } else { l$random } } #' @export get_parameters.wbm <- function(x, effects = c("fixed", "random"), ...) { effects <- match.arg(effects) if (effects == "fixed") { s <- summary(x) terms <- c( rownames(s$within_table), rownames(s$between_table), rownames(s$ints_table) ) wt <- s$within_table bt <- s$between_table it <- s$ints_table if (!is.null(wt)) { wt <- data.frame(params = wt, component = "within", stringsAsFactors = FALSE) } if (!is.null(bt)) { bt <- data.frame(params = bt, component = "between", stringsAsFactors = FALSE) } if (!is.null(it)) { it <- data.frame(params = it, component = "interactions", stringsAsFactors = FALSE) } params <- rbind(wt, bt, it) out <- data.frame( Parameter = terms, Estimate = params[[1]], Component = params[["component"]], stringsAsFactors = FALSE ) .remove_backticks_from_parameter_names(out) } else { # installed? check_if_installed("lme4") lme4::ranef(x) } } #' @export get_parameters.wbgee <- function(x, ...) { get_parameters.wbm(x, effects = "fixed") } #' @export get_parameters.nlmerMod <- function(x, effects = c("fixed", "random"), ...) { # installed? check_if_installed("lme4") effects <- match.arg(effects) startvectors <- .get_startvector_from_env(x) fx <- lme4::fixef(x) if (effects == "fixed") { l <- .compact_list(list( conditional = fx[setdiff(names(fx), startvectors)], nonlinear = fx[startvectors] )) } else { l <- .compact_list(list( conditional = fx[setdiff(names(fx), startvectors)], nonlinear = fx[startvectors], random = lapply(lme4::ranef(x), colnames) )) } fixed <- data.frame( Parameter = c( names(l$conditional), names(l$nonlinear) ), Estimate = c(unname(l$conditional), unname(l$nonlinear)), Component = c( rep("fixed", length(l$conditional)), rep("nonlinear", length(l$nonlinear)) ), stringsAsFactors = FALSE ) if (effects == "fixed") { .remove_backticks_from_parameter_names(fixed) } else { l$random } } #' @rdname get_parameters.glmm #' @export get_parameters.merMod <- function(x, effects = c("fixed", "random"), ...) { # installed? check_if_installed("lme4") effects <- match.arg(effects) if (effects == "fixed") { l <- list(conditional = lme4::fixef(x)) } else { l <- .compact_list(list( conditional = lme4::fixef(x), random = lme4::ranef(x) )) } fixed <- data.frame( Parameter = names(l$conditional), Estimate = unname(l$conditional), stringsAsFactors = FALSE ) if (effects == "fixed") { .remove_backticks_from_parameter_names(fixed) } else { l$random } } #' @export get_parameters.rlmerMod <- get_parameters.merMod #' @export get_parameters.glmmadmb <- get_parameters.merMod #' @export get_parameters.lme <- get_parameters.merMod #' @export get_parameters.merModList <- function(x, ...) { s <- suppressWarnings(summary(x)) fixed <- data.frame( Parameter = s$fe$term, Estimate = s$fe$estimate, stringsAsFactors = FALSE ) .remove_backticks_from_parameter_names(fixed) } #' @export get_parameters.HLfit <- function(x, effects = c("fixed", "random"), ...) { # installed? check_if_installed("lme4") effects <- match.arg(effects) if (effects == "fixed") { l <- list(conditional = lme4::fixef(x)) } else { utils::capture.output(s <- summary(x)) l <- .compact_list(list( conditional = lme4::fixef(x), random = lme4::ranef(x) )) } fixed <- data.frame( Parameter = names(l$conditional), Estimate = unname(l$conditional), stringsAsFactors = FALSE ) if (effects == "fixed") { .remove_backticks_from_parameter_names(fixed) } else { l$random } } #' @export get_parameters.sem <- function(x, effects = c("fixed", "random"), ...) { if (!.is_semLme(x)) { return(NULL) } effects <- match.arg(effects) if (effects == "fixed") { l <- list(conditional = x$coef) } else { l <- .compact_list(list( conditional = x$coef, random = x$ranef )) } fixed <- data.frame( Parameter = names(l$conditional), Estimate = unname(l$conditional), stringsAsFactors = FALSE ) if (effects == "fixed") { .remove_backticks_from_parameter_names(fixed) } else { l$random } } #' @export get_parameters.cpglmm <- function(x, effects = c("fixed", "random"), ...) { # installed? check_if_installed("cplm") effects <- match.arg(effects) if (effects == "fixed") { l <- list(conditional = cplm::fixef(x)) } else { l <- .compact_list(list( conditional = cplm::fixef(x), random = cplm::ranef(x) )) } fixed <- data.frame( Parameter = names(l$conditional), Estimate = unname(l$conditional), stringsAsFactors = FALSE ) if (effects == "fixed") { .remove_backticks_from_parameter_names(fixed) } else { l$random } } #' @export get_parameters.mixed <- function(x, effects = c("fixed", "random"), ...) { # installed? check_if_installed("lme4") effects <- match.arg(effects) if (effects == "fixed") { l <- list(conditional = lme4::fixef(x$full_model)) } else { l <- .compact_list(list( conditional = lme4::fixef(x$full_model), random = lme4::ranef(x$full_model) )) } fixed <- data.frame( Parameter = names(l$conditional), Estimate = unname(l$conditional), stringsAsFactors = FALSE ) if (effects == "fixed") { .remove_backticks_from_parameter_names(fixed) } else { l$random } } #' @export get_parameters.MixMod <- function(x, effects = c("fixed", "random"), component = c("all", "conditional", "zi", "zero_inflated", "dispersion"), ...) { # installed? check_if_installed("lme4") effects <- match.arg(effects) component <- match.arg(component) has_zeroinf <- !is.null(find_formula(x, verbose = FALSE)[["zero_inflated"]]) if (component %in% c("zi", "zero_inflated") && !has_zeroinf) { stop("Model has no zero-inflation component.", call. = FALSE) } re.names <- dimnames(lme4::ranef(x))[[2]] re <- lme4::ranef(x) if (has_zeroinf) { z_inflated <- lme4::fixef(x, sub_model = "zero_part") z_inflated_random <- re[grepl("^zi_", re.names, perl = TRUE)] } else { z_inflated <- NULL z_inflated_random <- NULL component <- "conditional" } l <- .compact_list(list( conditional = lme4::fixef(x, sub_model = "main"), random = re[grepl("^(?!zi_)", re.names, perl = TRUE)], zero_inflated = z_inflated, zero_inflated_random = z_inflated_random )) fixed <- data.frame( Parameter = names(l$conditional), Estimate = unname(l$conditional), Component = "conditional", stringsAsFactors = FALSE ) if (has_zeroinf) { fixedzi <- data.frame( Parameter = names(l$zero_inflated), Estimate = unname(l$zero_inflated), Component = "zero_inflated", stringsAsFactors = FALSE ) } else { fixedzi <- NULL } if (effects == "fixed") { params <- switch(component, all = rbind(fixed, fixedzi), conditional = fixed, zi = , zero_inflated = fixedzi ) .remove_backticks_from_parameter_names(params) } else if (effects == "random") { switch(component, all = .compact_list(list(random = l$random, zero_inflated_random = l$zero_inflated_random)), conditional = list(random = l$random), zi = , zero_inflated = list(zero_inflated_random = l$zero_inflated_random) ) } } #' @rdname get_parameters.glmm #' @export get_parameters.glmmTMB <- function(x, effects = c("fixed", "random"), component = c("all", "conditional", "zi", "zero_inflated", "dispersion"), ...) { # installed? check_if_installed("lme4") effects <- match.arg(effects) component <- match.arg(component) if (effects == "fixed") { l <- .compact_list(list( conditional = lme4::fixef(x)$cond, zero_inflated = lme4::fixef(x)$zi, dispersion = lme4::fixef(x)$disp )) } else { l <- .compact_list(list( conditional = lme4::fixef(x)$cond, random = lme4::ranef(x)$cond, zero_inflated = lme4::fixef(x)$zi, zero_inflated_random = lme4::ranef(x)$zi, dispersion = lme4::fixef(x)$disp )) } # ---- fixed effects (conditional model) fixed <- data.frame( Parameter = names(l$conditional), Estimate = unname(l$conditional), Component = "conditional", stringsAsFactors = FALSE ) # ---- fixed effects (zero_inflated model) if (.obj_has_name(l, "zero_inflated")) { fixedzi <- data.frame( Parameter = names(l$zero_inflated), Estimate = unname(l$zero_inflated), Component = "zero_inflated", stringsAsFactors = FALSE ) } else { fixedzi <- NULL } # ---- fixed effects (dispersion model) if (.obj_has_name(l, "dispersion")) { fixeddisp <- data.frame( Parameter = names(l$dispersion), Estimate = unname(l$dispersion), Component = "dispersion", stringsAsFactors = FALSE ) } else { fixeddisp <- NULL } # ---- build result if (effects == "fixed") { out <- switch(component, all = rbind(fixed, fixedzi, fixeddisp), conditional = fixed, zi = , zero_inflated = fixedzi, dispersion = fixeddisp ) .remove_backticks_from_parameter_names(out) } else if (effects == "random") { switch(component, all = .compact_list(list(random = l$random, zero_inflated_random = l$zero_inflated_random)), conditional = l$random, zi = , zero_inflated = l$zero_inflated_random ) } } #' @export get_parameters.mixor <- function(x, effects = c("all", "fixed", "random"), ...) { coefs <- stats::coef(x) effects <- match.arg(effects) params <- find_parameters(x, effects = "fixed", flatten = TRUE) fixed <- data.frame( Parameter = params, Estimate = unname(coefs[params]), Effects = "fixed", stringsAsFactors = FALSE ) if (effects != "fixed") { params <- find_parameters(x, effects = "random", flatten = TRUE) random <- data.frame( Parameter = params, Estimate = unname(coefs[params]), Effects = "random", stringsAsFactors = FALSE ) } else { random <- NULL } switch(effects, "all" = rbind(fixed, random), "fixed" = fixed, "random" = random ) } #' @export get_parameters.BBmm <- function(x, effects = c("fixed", "random"), ...) { effects <- match.arg(effects) l <- .compact_list(list( conditional = x$fixed.coef, random = x$random.coef )) fixed <- data.frame( Parameter = names(l$conditional), Estimate = l$conditional, stringsAsFactors = FALSE, row.names = NULL ) if (effects == "fixed") { .remove_backticks_from_parameter_names(fixed) } else { l$random } } #' @rdname get_parameters.glmm #' @export get_parameters.glimML <- function(x, effects = c("fixed", "random", "all"), ...) { effects <- match.arg(effects) l <- .compact_list(list( conditional = x@fixed.param, random = x@random.param )) fixed <- data.frame( Parameter = names(l$conditional), Estimate = l$conditional, stringsAsFactors = FALSE, row.names = NULL ) fixed <- .remove_backticks_from_parameter_names(fixed) random <- data.frame( Parameter = names(l$random), Estimate = l$random, stringsAsFactors = FALSE, row.names = NULL ) random <- .remove_backticks_from_parameter_names(random) all <- rbind( cbind(fixed, data.frame(Effects = "fixed", stringsAsFactors = FALSE)), cbind(random, data.frame(Effects = "random", stringsAsFactors = FALSE)) ) if (effects == "fixed") { fixed } else if (effects == "random") { random } else { all } } insight/R/find_parameters_mixed.R0000644000175000017500000003106114077615664016747 0ustar nileshnilesh#' @title Find names of model parameters from mixed models #' @name find_parameters.glmmTMB #' #' @description Returns the names of model parameters, like they typically #' appear in the `summary()` output. #' #' @param component Which type of parameters to return, such as parameters for #' the conditional model, the zero-inflated part of the model or the #' dispersion term? Applies to models with zero-inflated and/or dispersion #' formula. Note that the *conditional* component is also called #' *count* or *mean* component, depending on the model. There are #' three convenient shortcuts: `component = "all"` returns all possible #' parameters. If `component = "location"`, location parameters such as #' `conditional` or `zero_inflated` are returned (everything that #' are fixed or random effects - depending on the `effects` argument - #' but no auxiliary parameters). For `component = "distributional"` (or #' `"auxiliary"`), components like `sigma` or `dispersion` (and #' other auxiliary parameters) are returned. #' @param ... Currently not used. #' @inheritParams find_parameters #' @inheritParams find_parameters.betamfx #' @inheritParams find_parameters.BGGM #' @inheritParams find_predictors #' #' @return A list of parameter names. The returned list may have following #' elements: #' \itemize{ #' \item `conditional`, the "fixed effects" part from the model. #' \item `random`, the "random effects" part from the model. #' \item `zero_inflated`, the "fixed effects" part from the #' zero-inflation component of the model. #' \item `zero_inflated_random`, the "random effects" part from the #' zero-inflation component of the model. #' \item `dispersion`, the dispersion parameters (auxiliary parameter) #' } #' #' @examples #' data(mtcars) #' m <- lm(mpg ~ wt + cyl + vs, data = mtcars) #' find_parameters(m) #' @export find_parameters.glmmTMB <- function(x, effects = c("all", "fixed", "random"), component = c("all", "conditional", "zi", "zero_inflated", "dispersion"), flatten = FALSE, ...) { effects <- match.arg(effects) component <- match.arg(component) # installed check_if_installed("lme4") # we extract random effects only when really necessary, to save # computational time. In particular model with large sample and # many random effects groups may take some time to return random effects if (effects == "fixed") { l <- .compact_list(list( conditional = names(lme4::fixef(x)$cond), zero_inflated = names(lme4::fixef(x)$zi), dispersion = names(lme4::fixef(x)$disp) )) } else { l <- .compact_list(list( conditional = names(lme4::fixef(x)$cond), random = lapply(lme4::ranef(x)$cond, colnames), zero_inflated = names(lme4::fixef(x)$zi), zero_inflated_random = lapply(lme4::ranef(x)$zi, colnames), dispersion = names(lme4::fixef(x)$disp) )) } .filter_parameters(l, effects = effects, component = component, flatten = flatten ) } #' @export find_parameters.MixMod <- function(x, effects = c("all", "fixed", "random"), component = c("all", "conditional", "zi", "zero_inflated"), flatten = FALSE, ...) { # installed check_if_installed("lme4") re.names <- dimnames(lme4::ranef(x))[[2]] has_zeroinf <- !is.null(find_formula(x)[["zero_inflated"]]) if (has_zeroinf) { z_inflated <- names(lme4::fixef(x, sub_model = "zero_part")) z_inflated_random <- re.names[grepl("^zi_", re.names, perl = TRUE)] } else { z_inflated <- NULL z_inflated_random <- NULL } l <- .compact_list(list( conditional = names(lme4::fixef(x, sub_model = "main")), random = re.names[grepl("^(?!zi_)", re.names, perl = TRUE)], zero_inflated = z_inflated, zero_inflated_random = z_inflated_random )) l <- lapply(l, .remove_backticks_from_string) effects <- match.arg(effects) component <- match.arg(component) elements <- .get_elements(effects = effects, component = component) l <- .compact_list(l[elements]) if (flatten) { unique(unlist(l)) } else { l } } #' @export find_parameters.nlmerMod <- function(x, effects = c("all", "fixed", "random"), flatten = FALSE, ...) { # installed check_if_installed("lme4") effects <- match.arg(effects) startvectors <- .get_startvector_from_env(x) if (effects == "fixed") { l <- .compact_list(list( conditional = setdiff(names(lme4::fixef(x)), startvectors), nonlinear = startvectors )) } else { l <- .compact_list(list( conditional = setdiff(names(lme4::fixef(x)), startvectors), nonlinear = startvectors, random = lapply(lme4::ranef(x), colnames) )) } .filter_parameters(l, effects = effects, flatten = flatten) } #' @rdname find_parameters.glmmTMB #' @export find_parameters.merMod <- function(x, effects = c("all", "fixed", "random"), flatten = FALSE, ...) { effects <- match.arg(effects) # installed check_if_installed("lme4") # we extract random effects only when really necessary, to save # computational time. In particular model with large sample and # many random effects groups may take some time to return random effects if (effects == "fixed") { l <- list(conditional = names(lme4::fixef(x))) } else { l <- .compact_list(list( conditional = names(lme4::fixef(x)), random = lapply(lme4::ranef(x), colnames) )) } .filter_parameters(l, effects = effects, flatten = flatten) } #' @export find_parameters.rlmerMod <- find_parameters.merMod #' @export find_parameters.glmmadmb <- find_parameters.merMod #' @export find_parameters.merModList <- function(x, effects = c("all", "fixed", "random"), flatten = FALSE, ...) { effects <- match.arg(effects) find_parameters(x[[1]], effects = effects, flatten = flatten, ...) } #' @export find_parameters.HLfit <- function(x, effects = c("all", "fixed", "random"), flatten = FALSE, ...) { effects <- match.arg(effects) # installed check_if_installed("lme4") # we extract random effects only when really necessary, to save # computational time. In particular model with large sample and # many random effects groups may take some time to return random effects if (effects == "fixed") { l <- list(conditional = names(lme4::fixef(x))) } else { utils::capture.output(s <- summary(x)) l <- .compact_list(list( conditional = names(lme4::fixef(x)), random = s$lambda_table$Term )) } .filter_parameters(l, effects = effects, flatten = flatten) } #' @export find_parameters.sem <- function(x, effects = c("all", "fixed", "random"), flatten = FALSE, ...) { if (!.is_semLme(x)) { return(NULL) } effects <- match.arg(effects) l <- .compact_list(list( conditional = names(x$coef), random = colnames(x$ranef) )) .filter_parameters(l, effects = effects, flatten = flatten) } #' @export find_parameters.cpglmm <- function(x, effects = c("all", "fixed", "random"), flatten = FALSE, ...) { # installed check_if_installed("cplm") effects <- match.arg(effects) # we extract random effects only when really necessary, to save # computational time. In particular model with large sample and # many random effects groups may take some time to return random effects if (effects == "fixed") { l <- list(conditional = names(cplm::fixef(x))) } else { l <- .compact_list(list( conditional = names(cplm::fixef(x)), random = lapply(cplm::ranef(x), colnames) )) } .filter_parameters(l, effects = effects, flatten = flatten) } #' @export find_parameters.coxme <- function(x, effects = c("all", "fixed", "random"), flatten = FALSE, ...) { # installed? check_if_installed("lme4") effects <- match.arg(effects) if (effects == "fixed") { l <- list(conditional = names(lme4::fixef(x))) } else { l <- .compact_list(list( conditional = names(lme4::fixef(x)), random = names(lme4::ranef(x)) )) } .filter_parameters(l, effects = effects, flatten = flatten, recursive = FALSE ) } #' @export find_parameters.mixed <- function(x, effects = c("all", "fixed", "random"), flatten = FALSE, ...) { # installed check_if_installed("lme4") effects <- match.arg(effects) if (effects == "fixed") { l <- list(conditional = names(lme4::fixef(x$full_model))) } else { l <- .compact_list(list( conditional = names(lme4::fixef(x$full_model)), random = lapply(lme4::ranef(x$full_model), colnames) )) } .filter_parameters(l, effects = effects, flatten = flatten) } #' @export find_parameters.lme <- function(x, effects = c("all", "fixed", "random"), flatten = FALSE, ...) { # installed? check_if_installed("lme4") effects <- match.arg(effects) if (effects == "fixed") { l <- list(conditional = names(lme4::fixef(x))) } else { re <- lme4::ranef(x) if (is.data.frame(re)) { rn <- colnames(re) } else { rn <- lapply(re, colnames) } l <- .compact_list(list( conditional = names(lme4::fixef(x)), random = rn )) } .filter_parameters(l, effects = effects, flatten = flatten) } #' @export find_parameters.glmm <- function(x, effects = c("all", "fixed", "random"), flatten = FALSE, ...) { effects <- match.arg(effects) s <- summary(x) fe_params <- rownames(s$coefmat) re_params <- rownames(s$nucoefmat) l <- .compact_list(list( conditional = fe_params, random = re_params )) .filter_parameters(l, effects = effects, flatten = flatten) } #' @export find_parameters.BBmm <- function(x, effects = c("all", "fixed", "random"), flatten = FALSE, ...) { l <- .compact_list(list( conditional = names(x$fixed.coef), random = x$namesRand )) effects <- match.arg(effects) .filter_parameters(l, effects = effects, flatten = flatten, recursive = FALSE ) } #' @export find_parameters.glimML <- function(x, effects = c("all", "fixed", "random"), flatten = FALSE, ...) { l <- .compact_list(list( conditional = names(x@fixed.param), random = names(x@random.param) )) effects <- match.arg(effects) .filter_parameters(l, effects = effects, flatten = flatten, recursive = FALSE ) } #' @export find_parameters.mixor <- function(x, effects = c("all", "fixed", "random"), flatten = FALSE, ...) { effects <- match.arg(effects) coefs <- x$Model random_start <- grep("(\\(Intercept\\) \\(Intercept\\)|Random\\.\\(Intercept\\))", rownames(coefs)) thresholds <- grep("Threshold\\d", rownames(coefs)) l <- list( conditional = rownames(coefs)[c(1, thresholds, 2:(random_start - 1))], random = rownames(coefs)[random_start:(thresholds[1] - 1)] ) .filter_parameters(l, effects = effects, flatten = flatten) } insight/R/download_model.R0000644000175000017500000000277314077615664015415 0ustar nileshnilesh#' @title Download circus models #' @name download_model #' #' @description Downloads pre-compiled models from the *circus*-repository. #' The *circus*-repository contains a variety of fitted models to help #' the systematic testing of other packages #' #' @param name Model name. #' @param url String with the URL from where to download the model data. #' Optional, and should only be used in case the repository-URL is #' changing. By default, models are downloaded from #' `https://raw.github.com/easystats/circus/master/data/`. #' #' @return A model from the *circus*-repository. #' #' @details The code that generated the model is available at the #' . #' #' @references #' #' @export download_model <- function(name, url = NULL) { .download_data_github(name, url) } # Download rda files from github .download_data_github <- function(name, url) { if (!requireNamespace("httr", quietly = TRUE)) { stop("Package `httr` required to download models from the circus-repo.", call. = FALSE) } if (is.null(url)) { url <- "https://raw.github.com/easystats/circus/master/data/" } url <- paste0(url, name, ".rda") temp_file <- tempfile() on.exit(unlink(temp_file)) request <- httr::GET(url) httr::stop_for_status(request) writeBin(httr::content(request, type = "raw"), temp_file) x <- load(temp_file) model <- get(x) rm(x) model } insight/R/utils_model_info.R0000644000175000017500000003150114144234777015745 0ustar nileshnilesh.make_family <- function(x, fitfam = "gaussian", zero.inf = FALSE, hurdle = FALSE, logit.link = FALSE, multi.var = FALSE, link.fun = "identity", dispersion = FALSE, verbose = TRUE, ...) { dots <- list(...) if (isTRUE(dots$return_family_only)) { return(list(family = fitfam, link_function = link.fun)) } # create logical for family # binomial family -------- binom_fam <- fitfam %in% c("bernoulli", "binomial", "quasibinomial", "binomialff") | grepl("\\Qbinomial\\E", fitfam, ignore.case = TRUE) # poisson family -------- poisson_fam <- fitfam %in% c("poisson", "quasipoisson", "genpois", "ziplss") | grepl("\\Qpoisson\\E", fitfam, ignore.case = TRUE) # negative binomial family -------- neg_bin_fam <- grepl("\\Qnegative binomial\\E", fitfam, ignore.case = TRUE) | grepl("\\Qnbinom\\E", fitfam, ignore.case = TRUE) | grepl("\\Qnegbin\\E", fitfam, ignore.case = TRUE) | grepl("\\Qnzbinom\\E", fitfam, ignore.case = TRUE) | grepl("\\Qgenpois\\E", fitfam, ignore.case = TRUE) | grepl("\\Qnegbinomial\\E", fitfam, ignore.case = TRUE) | grepl("\\Qneg_binomial\\E", fitfam, ignore.case = TRUE) | fitfam %in% c("ztnbinom", "nbinom") # bernoulli family -------- is_bernoulli <- FALSE if (binom_fam && inherits(x, "glm") && !neg_bin_fam && !poisson_fam) { resp <- get_response(x, verbose = FALSE) if (is.data.frame(resp) && ncol(resp) == 1) { resp <- as.vector(resp[[1]]) } if (!is.data.frame(resp) && all(.is.int(.factor_to_numeric(resp[[1]])))) { is_bernoulli <- TRUE } } else if (fitfam %in% "bernoulli") { is_bernoulli <- TRUE } # beta family -------- beta_fam <- inherits(x, c("betareg", "betamfx")) | fitfam %in% c( "beta", "Beta", "betabinomial", "Beta Inflated", "Zero Inflated Beta", "Beta Inflated zero", "Beta Inflated one" ) # special families (beta-binomial, dirichlet) -------- betabin_fam <- inherits(x, "BBreg") | fitfam %in% "betabinomial" dirichlet_fam <- inherits(x, "DirichletRegModel") | fitfam %in% "dirichlet" ## TODO beta-binomial = binomial? if (betabin_fam) binom_fam <- TRUE # exponential family -------- exponential_fam <- fitfam %in% c("Gamma", "gamma", "weibull") # zero-inflated or hurdle component -------- zero.inf <- zero.inf | fitfam == "ziplss" | grepl("\\Qzero_inflated\\E", fitfam, ignore.case = TRUE) | grepl("\\Qzero-inflated\\E", fitfam, ignore.case = TRUE) | grepl("\\Qneg_binomial\\E", fitfam, ignore.case = TRUE) | grepl("\\Qhurdle\\E", fitfam, ignore.case = TRUE) | grepl("^(zt|zi|za|hu)", fitfam, perl = TRUE) | grepl("^truncated", fitfam, perl = TRUE) # only hurdle component -------- hurdle <- hurdle | grepl("\\Qhurdle\\E", fitfam, ignore.case = TRUE) | grepl("^hu", fitfam, perl = TRUE) | grepl("^truncated", fitfam, perl = TRUE) | fitfam == "ztnbinom" | fitfam %in% c("truncpoiss", "truncnbinom", "truncnbinom1", "truncpoisson") # ordinal family -------- is.ordinal <- inherits(x, c("svyolr", "polr", "clm", "clm2", "clmm", "mixor", "LORgee", "mvord")) | fitfam %in% c("cumulative", "ordinal") # multinomial family -------- is.multinomial <- inherits(x, c("gmnl", "mlogit", "DirichletRegModel", "multinom", "brmultinom")) | fitfam %in% c("cratio", "sratio", "acat", "multinom", "multinomial", "multinomial2", "dirichlet") # categorical family -------- is.categorical <- fitfam == "categorical" # special handling of rms -------------- if (inherits(x, c("lrm", "blrm"))) { resp <- get_response(x, verbose = FALSE) if (.n_unique(resp) == 2) { binom_fam <- TRUE } else { is.ordinal <- TRUE } } # Bayesian model -------- is.bayes <- .is_bayesian_model(x) # survival model -------- is.survival <- inherits( x, c( "aareg", "survreg", "survfit", "survPresmooth", "flexsurvreg", "coxph", "coxme", "coxr", "riskRegression", "comprisk" ) ) # check if we have binomial models with trials instead of binary outcome # and check if we have truncated or censored brms-regression # censored or truncated response -------- is.trial <- FALSE is.censored <- inherits(x, c("tobit", "crch", "censReg", "crq", "crqs")) | (inherits(x, "sem") && inherits(x, "lme")) is.truncated <- FALSE if (inherits(x, "brmsfit") && is.null(stats::formula(x)$responses)) { rv <- tryCatch( { .safe_deparse(stats::formula(x)$formula[[2L]]) }, error = function(x) { NULL } ) if (!is.null(rv)) { is.trial <- .trim(sub("(.*)\\|(.*)\\(([^,)]*).*", "\\2", rv)) %in% c("trials", "resp_trials") is.censored <- grepl("(.*)\\|(.*)cens\\(", rv) is.truncated <- grepl("(.*)\\|(.*)trunc\\(", rv) } } if (binom_fam && !inherits(x, "brmsfit")) { is.trial <- tryCatch( { rv <- .safe_deparse(stats::formula(x)[[2L]]) grepl("cbind\\((.*)\\)", rv) }, error = function(x) { FALSE } ) } # save model terms -------- if (isTRUE(dots$no_terms)) { model_terms <- NULL } else { if (inherits(x, "mcmc")) { model_terms <- find_parameters(x) } else { model_terms <- tryCatch( { find_variables( x, effects = "all", component = "all", flatten = FALSE, verbose = FALSE ) }, error = function(x) { NULL } ) } } # significance tests -------- is_ttest <- FALSE is_correlation <- FALSE is_oneway <- FALSE is_proptest <- FALSE is_binomtest <- FALSE is_chi2test <- FALSE is_ranktest <- FALSE is_xtab <- FALSE is_levenetest <- FALSE if (inherits(x, "htest")) { if (grepl("kruskal-wallis", tolower(x$method), fixed = TRUE) || grepl("design-based kruskalwallis", tolower(x$method), fixed = TRUE) || grepl("design-based median", tolower(x$method), fixed = TRUE) || grepl("design-based vanderwaerden", tolower(x$method), fixed = TRUE) || grepl("wilcoxon", tolower(x$method), fixed = TRUE) || grepl("friedman", tolower(x$method), fixed = TRUE)) { is_ranktest <- TRUE } else if (grepl("t-test", x$method)) { is_ttest <- TRUE } else if (grepl("^One-way", x$method)) { is_oneway <- TRUE } else if (x$method == "Exact binomial test") { binom_fam <- TRUE is_binomtest <- TRUE fitfam <- "binomial" } else if (grepl("\\d+-sample(.*)proportions(.*)", x$method)) { binom_fam <- TRUE is_proptest <- TRUE fitfam <- "binomial" } else if (any(grepl("chi-squared", c(tolower(x$method), tolower(attributes(x$statistic)$names)), fixed = TRUE)) || grepl("Fisher's Exact Test", x$method, fixed = TRUE) || grepl("pearson's x^2", tolower(x$method), fixed = TRUE)) { is_chi2test <- TRUE is_xtab <- TRUE fitfam <- "categorical" } else { is_correlation <- TRUE if (grepl("Spearman's rank", x$method, fixed = TRUE)) { is_ranktest <- TRUE } } } else if (inherits(x, "BGGM")) { is_correlation <- TRUE } # exceptions: car::leveneTest if (inherits(x, "anova") && !is.null(attributes(x)$heading) && grepl("Levene's Test", attributes(x)$heading, fixed = TRUE)) { is_levenetest <- TRUE } # Bayesfactors terms -------- is_meta <- FALSE if (inherits(x, "BFBayesFactor")) { is_ttest <- FALSE is_correlation <- FALSE is_oneway <- FALSE is_proptest <- FALSE is_xtab <- FALSE obj_type <- .classify_BFBayesFactor(x) if (obj_type == "correlation") { is_correlation <- TRUE } else if (obj_type %in% c("ttest1", "ttest2")) { is_ttest <- TRUE } else if (obj_type == "meta") { is_meta <- TRUE } else if (obj_type == "proptest") { binom_fam <- TRUE is_proptest <- TRUE fitfam <- "binomial" } else if (obj_type == "xtable") { is_xtab <- TRUE fitfam <- "categorical" } } # meta analysis -------- if (!is_meta) { is_meta <- inherits(x, c("rma", "metaplus", "meta_random", "meta_fixed", "meta_bma")) } if (inherits(x, "brmsfit") && !is_multivariate(x)) { is_meta <- grepl("(.*)\\|(.*)se\\((.*)\\)", .safe_deparse(find_formula(x, verbose = FALSE)$conditional[[2]])) } # gaussian family -------- linear_model <- TRUE if (binom_fam | exponential_fam | poisson_fam | neg_bin_fam | logit.link | dirichlet_fam | is.ordinal | zero.inf | is.censored | is.survival | is_binomtest | is.categorical | hurdle | is.multinomial | is_chi2test | is_proptest | is_xtab) { linear_model <- FALSE } else if (!(fitfam %in% c("Student's-t", "t Family", "gaussian", "Gaussian")) && !grepl("(\\st)$", fitfam)) { linear_model <- FALSE } if (!linear_model && is.survival && fitfam == "gaussian") { linear_model <- TRUE } # tweedie family -------- tweedie_fam <- grepl("^(tweedie|Tweedie)", fitfam) | grepl("^(tweedie|Tweedie)", link.fun) tweedie_model <- (linear_model && tweedie_fam) || inherits(x, c("bcplm", "cpglm", "cpglmm", "zcpglm")) # return... list( is_binomial = binom_fam & !neg_bin_fam, is_bernoulli = is_bernoulli, is_count = poisson_fam | neg_bin_fam, is_poisson = poisson_fam, is_negbin = neg_bin_fam, is_beta = beta_fam, is_betabinomial = betabin_fam, is_dirichlet = dirichlet_fam, is_exponential = exponential_fam, is_logit = logit.link, is_probit = isTRUE(link.fun == "probit"), is_censored = is.censored | is.survival, is_truncated = inherits(x, "truncreg") | is.truncated, is_survival = is.survival, is_linear = linear_model, is_tweedie = tweedie_model, is_zeroinf = zero.inf, is_zero_inflated = zero.inf, is_dispersion = dispersion, is_hurdle = hurdle, is_ordinal = is.ordinal, is_cumulative = is.ordinal, is_multinomial = is.multinomial | is.categorical, is_categorical = is.categorical, is_mixed = !is_levenetest && is_mixed_model(x), is_multivariate = multi.var, is_trial = is.trial, is_bayesian = is.bayes, is_gam = is_gam_model(x), is_anova = inherits(x, c("aov", "aovlist", "MANOVA", "RM")), is_timeseries = inherits(x, c("Arima")), is_ttest = is_ttest, is_correlation = is_correlation, is_onewaytest = is_oneway, is_chi2test = is_chi2test, is_ranktest = is_ranktest, is_levenetest = is_levenetest, is_xtab = is_xtab, is_proptest = is_proptest, is_binomtest = is_binomtest, is_meta = is_meta, link_function = link.fun, family = fitfam, n_obs = n_obs(x), model_terms = model_terms ) } .get_ordinal_link <- function(x) { switch(x$link, logistic = "logit", cloglog = "log", x$link ) } .make_tobit_family <- function(x, dist = NULL) { if (is.null(dist)) { if (inherits(x, "flexsurvreg")) { dist <- parse(text = .safe_deparse(x$call))[[1]]$dist } else { dist <- x$dist } } f <- switch(dist, gaussian = stats::gaussian("identity"), logistic = stats::binomial("logit"), llogis = , loglogistic = stats::binomial("log"), lnorm = , lognormal = stats::gaussian("log"), gompertz = stats::Gamma("log"), gamma = , gengamma = , gengamma.orig = stats::Gamma(), exponential = , exp = , weibull = stats::Gamma("log"), stats::gaussian("identity") ) if (dist == "weibull") f$family <- "weibull" f } .classify_BFBayesFactor <- function(x) { # installed? check_if_installed("BayesFactor") if (any(class(x@denominator) %in% c("BFcorrelation"))) { "correlation" } else if (any(class(x@denominator) %in% c("BFoneSample"))) { "ttest1" } else if (any(class(x@denominator) %in% c("BFindepSample"))) { "ttest2" } else if (any(class(x@denominator) %in% c("BFmetat"))) { "meta" } else if (any(class(x@denominator) %in% c("BFlinearModel"))) { "linear" } else if (any(class(x@denominator) %in% c("BFcontingencyTable"))) { "xtable" } else if (any(class(x@denominator) %in% c("BFproportion"))) { "proptest" } else { class(x@denominator) } } .is_semLme <- function(x) { all(inherits(x, c("sem", "lme"))) } insight/R/get_modelmatrix.R0000644000175000017500000000563714132524312015571 0ustar nileshnilesh#' Model Matrix #' #' Creates a design matrix from the description. Any character variables are coerced to factors. #' #' @param x An object. #' @param ... Passed down to other methods (mainly `model.matrix()`). #' #' @examples #' data(mtcars) #' #' model <- lm(am ~ vs, data = mtcars) #' get_modelmatrix(model) #' @export get_modelmatrix <- function(x, ...) { UseMethod("get_modelmatrix") } #' @export get_modelmatrix.default <- function(x, ...) { stats::model.matrix(object = x, ...) } #' @export get_modelmatrix.merMod <- function(x, ...) { dots <- list(...) if ("data" %in% names(dots)) { model_terms <- stats::terms(x) mm <- stats::model.matrix(model_terms, ...) } else { mm <- stats::model.matrix(object = x, ...) } mm } #' @export get_modelmatrix.lme <- function(x, ...) { # we check the dots for a "data" argument. To make model.matrix work # for certain objects, we need to specify the data-argument explicitly, # however, if the user provides a data-argument, this should be used instead. .data_in_dots(..., object = x, default_data = get_data(x)) } #' @export get_modelmatrix.gls <- get_modelmatrix.lme #' @export get_modelmatrix.clmm <- function(x, ...) { # former implementation in "get_variance()" # f <- find_formula(x)$conditional # stats::model.matrix(object = f, data = x$model, ...) .data_in_dots(..., object = x, default_data = x$model) } #' @export get_modelmatrix.brmsfit <- function(x, ...) { formula_rhs <- .safe_deparse(find_formula(x)$conditional[[3]]) formula_rhs <- stats::as.formula(paste0("~", formula_rhs)) .data_in_dots(..., object = formula_rhs, default_data = get_data(x)) } #' @export get_modelmatrix.rlm <- function(x, ...) { dots <- list(...) # `rlm` objects can inherit to model.matrix.lm, but that function does # not accept the `data` argument for `rlm` objects if (is.null(dots$data)) { mf <- stats::model.frame(x, xleve = x$xlevels, ... ) } else { mf <- stats::model.frame(x, xleve = x$xlevels, data = dots$data, ... ) } mm <- stats::model.matrix.default(x, data = mf, contrasts.arg = x$contrasts ) return(mm) } #' @export get_modelmatrix.cpglmm <- function(x, ...) { # installed? check_if_installed("cplm") cplm::model.matrix(x, ...) } #' @export get_modelmatrix.afex_aov <- function(x, ...) { stats::model.matrix(object = x$lm, ...) } # helper ---------------- .data_in_dots <- function(..., object = NULL, default_data = NULL) { dot.arguments <- lapply(match.call(expand.dots = FALSE)$`...`, function(x) x) data_arg <- if ("data" %in% names(dot.arguments)) { eval(dot.arguments[["data"]]) } else { default_data } remaining_dots <- setdiff(names(dot.arguments), "data") do.call(stats::model.matrix, c(list(object = object, data = data_arg), remaining_dots)) } insight/R/utils_get_data.R0000644000175000017500000006223414163324003015370 0ustar nileshnilesh# Function that does the most work for preparing and transforming the data, # to ensure we have a "clean" data frame from the data that was used to fit # the model. This also means that, unless necessary for further processing, # variables transformed during model fitting are not included in this data frame # .prepare_get_data <- function(x, mf, effects = "fixed", verbose = TRUE) { # check if we have any data yet if (.is_empty_object(mf)) { if (isTRUE(verbose)) { warning("Could not get model data.", call. = FALSE) } return(NULL) } # we may store model weights here later mw <- NULL # offset variables ---------------------------------------------------------- # do we have an offset, not specified in the formula? offcol <- grep("^(\\(offset\\)|offset\\((.*)\\))", colnames(mf)) if (length(offcol) && .obj_has_name(x, "call") && .obj_has_name(x$call, "offset")) { colnames(mf)[offcol] <- clean_names(.safe_deparse(x$call$offset)) } # backtransform variables, such as log, sqrt etc ---------------------------- mf <- .backtransform(mf) # clean 1-dimensional matrices --------------------------------------------- # in particular, transformation like "scale()" may produce a 1D-matrix, # where we want a vector instead mf[] <- lapply(mf, function(.x) { if (is.matrix(.x) && dim(.x)[2] == 1 && !inherits(.x, c("ns", "bs", "poly", "mSpline"))) { as.vector(.x) } else { .x } }) # detect matrix columns ---------------------------------------------------- # check if we have any matrix columns, e.g. from splines mc <- sapply(mf, is.matrix) # save original response value and the respective single variable names of # the response for later. we don't want to change the response value, # if it's a matrix bound with "cbind()" rn <- find_response(x, combine = TRUE) rn_not_combined <- find_response(x, combine = FALSE) # make sure rn is not NULL, but empty string if (is.null(rn)) rn <- "" if (is.null(rn_not_combined)) rn_not_combined <- "" trials.data <- NULL # restore original variables used in matrix-response columns ---------------- if (mc[1] && rn == colnames(mf)[1]) { mc[1] <- FALSE if (inherits(x, c("coxph", "flexsurvreg", "coxme", "survreg", "survfit", "crq", "psm", "coxr"))) { n_of_responses <- ncol(mf[[1]]) mf <- cbind(as.data.frame(as.matrix(mf[[1]])), mf) colnames(mf)[1:n_of_responses] <- rn_not_combined } else { tryCatch( { trials.data <- as.data.frame(mf[[1]]) colnames(trials.data) <- rn_not_combined # if columns were bound via subtraction, e.g. # "cbind(succes, total - success)", we need to sum up success and # total for the original total-column. pattern <- sprintf("%s(\\s*)-(\\s*)%s", rn_not_combined[2], rn_not_combined[1]) if (any(grepl(pattern = pattern, x = rn))) { trials.data[[2]] <- trials.data[[1]] + trials.data[[2]] } }, error = function(x) { NULL } ) } } # process matrix-variables (restore original data from matrix variables) ---- # if we have any matrix columns, we remove them from original # model frame and convert them to regular data frames, give # proper column names and bind them back to the original model frame if (any(mc)) { # try to get model data from environment md <- tryCatch( { eval(stats::getCall(x)$data, environment(stats::formula(x))) }, error = function(x) { NULL } ) # if data not found in environment, # reduce matrix variables into regular vectors if (is.null(md)) { # we select the non-matrix variables and convert matrix-variables into # regular data frames, then binding them together mf_matrix <- mf[, which(mc), drop = FALSE] mf_nonmatrix <- mf[, -which(mc), drop = FALSE] # fix for rms::rcs() functions if (any(class(mf_matrix[[1]]) == "rms")) { class(mf_matrix[[1]]) <- "matrix" } # matrix to data frame, bind to model frame mf_list <- lapply(mf_matrix, as.data.frame, stringsAsFactors = FALSE) mf_matrix <- do.call(cbind, mf_list) mf <- cbind(mf_nonmatrix, mf_matrix) } else { # fix NA in column names if (any(is.na(colnames(md)))) { colnames(md) <- make.names(colnames(md)) } # get "matrix" terms and "normal" predictors, # but exclude response variable(s) mf_matrix <- mf[, -which(mc), drop = FALSE] spline.term <- clean_names(names(which(mc))) other.terms <- clean_names(colnames(mf_matrix))[-1] # now we have all variable names that we need # from the original data set needed.vars <- c(other.terms, spline.term) # if response is a matrix vector (e.g. multivariate response), # we need to include all response names as well, because else # rows may not match due to additional missings in the response variables if (is.matrix(mf[[1]])) { needed.vars <- c(dimnames(mf[[1]])[[2]], needed.vars) } else { needed.vars <- c(colnames(mf)[1], needed.vars) } # check model weights if ("(weights)" %in% needed.vars && !.obj_has_name(md, "(weights)")) { needed.vars <- needed.vars[-which(needed.vars == "(weights)")] mw <- mf[["(weights)"]] fw <- find_weights(x) if (!is.null(fw) && fw %in% colnames(md)) { needed.vars <- c(needed.vars, fw) } } if (inherits(x, c("coxph", "coxme", "coxr")) || any(grepl("^Surv\\(", spline.term))) { # no further processing for survival models mf <- md } else { # get cleaned variable names for those variables # that we still need from the original model frame needed.vars <- .compact_character(unique(clean_names(needed.vars))) mf <- md[, needed.vars, drop = FALSE] # we need this hack to save variable and value label attributes, if any value_labels <- lapply(mf, function(.l) attr(.l, "labels", exact = TRUE)) variable_labels <- lapply(mf, function(.l) attr(.l, "label", exact = TRUE)) # removing NAs drops all label-attributes mf <- stats::na.omit(mf) # then set back attributes mf <- as.data.frame(mapply(function(.d, .l) { attr(.d, "labels") <- .l .d }, mf, value_labels, SIMPLIFY = FALSE), stringsAsFactors = FALSE) mf <- as.data.frame(mapply(function(.d, .l) { attr(.d, "label") <- .l .d }, mf, variable_labels, SIMPLIFY = FALSE), stringsAsFactors = FALSE) } # add back model weights, if any if (!is.null(mw)) mf$`(weights)` <- mw } # check if we really have all formula terms in our model frame now pv <- tryCatch( { find_predictors(x, effects = effects, flatten = TRUE, verbose = verbose) }, error = function(x) { NULL } ) # still some undetected matrix-variables? if (!is.null(pv) && !all(pv %in% colnames(mf)) && isTRUE(verbose)) { warning(format_message("Some model terms could not be found in model data. You probably need to load the data into the environment."), call. = FALSE) } } # monotonic predictors ------------------------------------------------------ # check if we have monotonic variables, included in formula # with "mo()"? If yes, remove from model frame mos_eisly <- grepl(pattern = "^mo\\(([^,)]*).*", x = colnames(mf)) if (any(mos_eisly)) { mf <- mf[!mos_eisly] } # restore original data for factors ----------------------------------------- # are there any factor variables that have been coerced "on-the-fly", # using "factor()" or "as.factor()"? if so, get names and convert back # to numeric later factors <- colnames(mf)[grepl("^(as\\.factor|factor)\\((.*)\\)", colnames(mf))] # clean variable names cvn <- .remove_pattern_from_names(colnames(mf), ignore_lag = TRUE) # as-is variables I() ------------------------------------------------------- # keep "as is" variable for response variables in data frame if (colnames(mf)[1] == rn[1] && grepl("^I\\(", rn[1])) { md <- tryCatch( { tmp <- .recover_data_from_environment(x)[, unique(c(rn_not_combined, cvn)), drop = FALSE] tmp[, rn_not_combined, drop = FALSE] }, error = function(x) { NULL } ) if (!is.null(md)) { mf <- cbind(mf, md) cvn <- .remove_pattern_from_names(colnames(mf), ignore_lag = TRUE) cvn[1] <- rn[1] } } # fix duplicated colnames --------------------------------------------------- # do we have duplicated names? dupes <- which(duplicated(cvn)) if (!.is_empty_string(dupes)) cvn[dupes] <- sprintf("%s.%s", cvn[dupes], 1:length(dupes)) colnames(mf) <- cvn # add weighting variable ---------------------------------------------------- weighting_var <- find_weights(x) if (!is.null(weighting_var) && !weighting_var %in% colnames(mf) && length(weighting_var) == 1) { mf <- tryCatch( { tmp <- suppressWarnings(cbind(mf, get_weights(x))) colnames(tmp)[ncol(tmp)] <- weighting_var tmp }, error = function(e) { mf } ) } # add back possible trials-data --------------------------------------------- if (!is.null(trials.data)) { new.cols <- setdiff(colnames(trials.data), colnames(mf)) if (!.is_empty_string(new.cols)) mf <- cbind(mf, trials.data[, new.cols, drop = FALSE]) } # remove "trial response" # see https://github.com/easystats/modelbased/issues/164 # if (rn == colnames(mf)[1] && is.matrix(mf[[1]])) { # mf[[1]] <- NULL # } .add_remaining_missing_variables(x, mf, effects, component = "all", factors = factors) } # add remainng variables with special pattern ------------------------------- .add_remaining_missing_variables <- function(model, mf, effects, component, factors = NULL) { # check if data argument was used model_call <- get_call(model) if (!is.null(model_call)) { data_arg <- parse(text = .safe_deparse(model_call))[[1]]$data } else { data_arg <- NULL } # do we have variable names like "mtcars$mpg"? if (is.null(data_arg) && all(grepl("(.*)\\$(.*)", colnames(mf)))) { colnames(mf) <- gsub("(.*)\\$(.*)", "\\2", colnames(mf)) } predictors <- find_predictors( model, effects = effects, component = component, flatten = TRUE, verbose = FALSE ) missing_vars <- setdiff(predictors, colnames(mf)) # check if missing variables can be recovered from the environment, # and if so, add to model frame. if (!is.null(missing_vars) && length(missing_vars) > 0) { env_data <- .recover_data_from_environment(model) if (!is.null(env_data) && all(missing_vars %in% colnames(env_data))) { shared_columns <- intersect(colnames(env_data), c(missing_vars, colnames(mf))) env_data <- stats::na.omit(env_data[shared_columns]) if (nrow(env_data) == nrow(mf) && !any(missing_vars %in% colnames(mf))) { mf <- cbind(mf, env_data[missing_vars]) } } } # add attributes for those that were factors if (length(factors)) { factors <- gsub("^(as\\.factor|factor)\\((.*)\\)", "\\2", factors) for (i in factors) { if (.is_numeric_character(mf[[i]])) { mf[[i]] <- .to_numeric(mf[[i]]) attr(mf[[i]], "factor") <- TRUE } } attr(mf, "factors") <- factors } mf } # combine data from different model components ------------------------------- # This helper functions ensures that data from different model components # are included in the returned data frame # .return_combined_data <- function(x, mf, effects, component, model.terms, is_mv = FALSE, verbose = TRUE) { response <- unlist(model.terms$response) # save factors attribute factors <- attr(mf, "factors", exact = TRUE) if (is_mv) { fixed.component.data <- switch(component, all = c( sapply(model.terms[-1], function(i) i$conditional), sapply(model.terms[-1], function(i) i$zero_inflated), sapply(model.terms[-1], function(i) i$dispersion) ), conditional = sapply(model.terms[-1], function(i) i$conditional), zi = , zero_inflated = sapply(model.terms[-1], function(i) i$zero_inflated), dispersion = sapply(model.terms[-1], function(i) i$dispersion) ) random.component.data <- switch(component, all = c( sapply(model.terms[-1], function(i) i$random), sapply(model.terms[-1], function(i) i$zero_inflated_random) ), conditional = sapply(model.terms[-1], function(i) i$random), zi = , zero_inflated = sapply(model.terms[-1], function(i) i$zero_inflated_random) ) fixed.component.data <- unlist(fixed.component.data) random.component.data <- unlist(random.component.data) } else { all_elements <- intersect(names(model.terms), .get_elements("fixed", "all")) fixed.component.data <- switch(component, all = unlist(model.terms[all_elements]), conditional = model.terms$conditional, zi = , zero_inflated = model.terms$zero_inflated, dispersion = model.terms$dispersion ) random.component.data <- switch(component, all = c(model.terms$random, model.terms$zero_inflated_random), conditional = model.terms$random, zi = , zero_inflated = model.terms$zero_inflated_random ) } # this is to remove the "1" from intercept-ony-models if (!.is_empty_object(fixed.component.data)) { fixed.component.data <- .remove_values(fixed.component.data, c("1", "0")) fixed.component.data <- .remove_values(fixed.component.data, c(1, 0)) } if (!.is_empty_object(random.component.data)) { random.component.data <- .remove_values(random.component.data, c("1", "0")) random.component.data <- .remove_values(random.component.data, c(1, 0)) } weights <- find_weights(x) # if (!is.null(weights) && "(weights)" %in% colnames(mf)) { # weights <- c(weights, "(weights)") # } vars <- switch(effects, all = unique(c(response, fixed.component.data, random.component.data, weights)), fixed = unique(c(response, fixed.component.data, weights)), random = unique(random.component.data) ) # add offset vars <- c(vars, find_offset(x)) still_missing <- setdiff(vars, colnames(mf)) vars <- intersect(vars, colnames(mf)) dat <- mf[, vars, drop = FALSE] if (.is_empty_object(dat)) { if (isTRUE(verbose)) { warning(format_message(sprintf("Data frame is empty, probably component '%s' does not exist in the %s-part of the model?", component, effects)), call. = FALSE) } return(NULL) } if (length(still_missing) && isTRUE(verbose)) { warning(format_message(sprintf("Following potential variables could not be found in the data: %s", paste0(still_missing, collapse = " ,"))), call. = FALSE) } if ("(offset)" %in% colnames(mf) && !("(offset)" %in% colnames(dat))) { dat <- cbind(dat, mf[["(offset"]]) } attr(dat, "factors") <- factors dat } # find zi-data ----------------------------------- # this function tries to get the data from variables from the zero-inflated # component and adds them to the model frame. Useful if the zi-component # has other variables than the count component. # .add_zeroinf_data <- function(x, mf, tn) { tryCatch( { env_data <- eval(x$call$data, envir = parent.frame())[, tn, drop = FALSE] if (.obj_has_name(x$call, "subset")) { env_data <- subset(env_data, subset = eval(x$call$subset)) } .merge_dataframes(env_data, mf, replace = TRUE) }, error = function(x) { mf } ) } # special model handling ----------------------------------- .get_zelig_relogit_frame <- function(x) { vars <- find_variables(x, flatten = TRUE, verbose = FALSE) x$data[, vars, drop = FALSE] } # combine data from count and zi-component ----------------------------------- .return_zeroinf_data <- function(x, component, verbose = TRUE) { model.terms <- find_variables(x, effects = "all", component = "all", flatten = FALSE, verbose = FALSE) model.terms$offset <- find_offset(x) mf <- tryCatch( { stats::model.frame(x) }, error = function(x) { NULL } ) mf <- .prepare_get_data(x, mf, verbose = verbose) # add variables from other model components mf <- .add_zeroinf_data(x, mf, model.terms$zero_inflated) fixed.data <- switch(component, all = c(model.terms$conditional, model.terms$zero_inflated, model.terms$offset), conditional = c(model.terms$conditional, model.terms$offset), zi = , zero_inflated = model.terms$zero_inflated ) mf[, unique(c(model.terms$response, fixed.data, find_weights(x))), drop = FALSE] } # "clean" model frame and get data ----------------------------------- # here we have a model frame with many variables, so just extract the important ones... # .get_data_from_modelframe <- function(x, dat, effects, verbose = TRUE) { if (.is_empty_object(dat)) { warning("Could not get model data.", call. = FALSE) return(NULL) } cn <- clean_names(colnames(dat)) ft <- switch(effects, fixed = find_variables(x, effects = "fixed", flatten = TRUE), all = find_variables(x, flatten = TRUE), random = find_random(x, split_nested = TRUE, flatten = TRUE) ) remain <- intersect(c(ft, find_weights(x)), cn) mf <- tryCatch( { dat[, remain, drop = FALSE] }, error = function(x) { dat } ) .prepare_get_data(x, mf, effects, verbose = verbose) } # find data from the environment ----------------------------------- # return data from a data frame that is in the environment, # and subset the data, if necessary .recover_data_from_environment <- function(x) { model_call <- get_call(x) # first try, parent frame dat <- tryCatch( { eval(model_call$data, envir = parent.frame()) }, error = function(e) { NULL } ) if (is.null(dat)) { # second try, global env dat <- tryCatch( { eval(model_call$data, envir = globalenv()) }, error = function(e) { NULL } ) } if (!is.null(dat) && .obj_has_name(model_call, "subset")) { dat <- subset(dat, subset = eval(model_call$subset)) } dat } # find data from the environment, for models with S4 -------------------------- # return data from a data frame that is in the environment, # and subset the data, if necessary .get_S4_data_from_env <- function(x) { # first try, parent frame dat <- tryCatch( { eval(x@call$data, envir = parent.frame()) }, error = function(e) { NULL } ) if (is.null(dat)) { # second try, global env dat <- tryCatch( { eval(x@call$data, envir = globalenv()) }, error = function(e) { NULL } ) } if (!is.null(dat) && .obj_has_name(x@call, "subset")) { dat <- subset(dat, subset = eval(x@call$subset)) } dat } # find start vector of nlmer-models from the environment ----------------------------------- # return data from a data frame that is in the environment, # and subset the data, if necessary .get_startvector_from_env <- function(x) { tryCatch( { sv <- eval(parse(text = .safe_deparse(x@call))[[1]]$start) if (is.list(sv)) sv <- sv[["nlpars"]] names(sv) }, error = function(e) { NULL } ) } # backtransform variables ------------------------------- .backtransform <- function(mf) { tryCatch( { patterns <- c( "scale\\(log", "exp\\(scale", "log\\(log", "log", "log1p", "log10", "log2", "sqrt", "exp", "expm1", "scale" ) for (i in patterns) { mf <- .backtransform_helper(mf, i) } mf }, error = function(e) { mf } ) } .backtransform_helper <- function(mf, type) { cn <- .get_transformed_names(colnames(mf), type) if (!.is_empty_string(cn)) { for (i in cn) { if (type == "scale\\(log") { mf[[i]] <- exp(.unscale(mf[[i]])) } else if (type == "exp\\(scale") { mf[[i]] <- .unscale(log(mf[[i]])) } else if (type == "log\\(log") { mf[[i]] <- exp(exp(mf[[i]])) } else if (type == "log") { mf[[i]] <- exp(mf[[i]]) } else if (type == "log1p") { mf[[i]] <- expm1(mf[[i]]) } else if (type == "log10") { mf[[i]] <- 10^(mf[[i]]) } else if (type == "log2") { mf[[i]] <- 2^(mf[[i]]) } else if (type == "sqrt") { mf[[i]] <- (mf[[i]])^2 } else if (type == "exp") { mf[[i]] <- log(mf[[i]]) } else if (type == "expm1") { mf[[i]] <- log1p(mf[[i]]) } else if (type == "scale") { mf[[i]] <- .unscale(mf[[i]]) } colnames(mf)[colnames(mf) == i] <- .get_transformed_terms(i, type) } } mf } .unscale <- function(x) { m <- attr(x, "scaled:center") s <- attr(x, "scaled:scale") if (is.null(m)) m <- 0 if (is.null(s)) s <- 1 s * x + m } # find transformed terms, to convert back as raw data -------------------------------- # Find transformed terms inside model formula, and return "clean" term names .get_transformed_terms <- function(model, type = "all") { if (is.character(model)) { x <- model } else { x <- find_terms(model, flatten = TRUE) } pattern <- sprintf("%s\\(([^,\\+)]*).*", type) .trim(gsub(pattern, "\\1", x[grepl(pattern, x)])) } # get column names of transformed terms .get_transformed_names <- function(x, type = "all") { pattern <- sprintf("%s\\(([^,)]*).*", type) x[grepl(pattern, x)] } .retrieve_htest_data <- function(x) { out <- tryCatch( { # special handling of survey-objects if (grepl("^svy", x$data.name)) { if (grepl("pearson's x^2", tolower(x$method), fixed = TRUE)) { d <- x$observed } else { d <- NULL } } else { # split by "and" and "by". E.g., for t.test(1:3, c(1,1:3)), we have # x$data.name = "1:3 and c(1, 1:3)" data_name <- trimws(unlist(strsplit(x$data.name, "(and|by)"))) # now we may have exceptions, e.g. for friedman.test(wb$x, wb$w, wb$t) # x$data.name is "wb$x, wb$w and wb$t" and we now have "wb$x, wb$w" and # "wb$t", so we need to split at comma as well. However, the above t-test # example returns "1:3" and "c(1, 1:3)", so we only must split at comma # when it is not inside parentheses. data_comma <- unlist(strsplit(data_name, "(\\([^)]*\\))")) # any comma not inside parentheses? if (any(grepl(",", data_comma, fixed = TRUE))) { data_name <- trimws(unlist(strsplit(data_comma, ", ", fixed = TRUE))) } # exeception: list for kruskal-wallis if (grepl("Kruskal-Wallis", x$method, fixed = TRUE) && grepl("^list\\(", data_name)) { l <- eval(.str2lang(x$data.name)) names(l) <- paste0("x", 1:length(l)) return(l) } data_call <- lapply(data_name, .str2lang) columns <- lapply(data_call, eval) # preserve table data for McNemar if (!grepl(" (and|by) ", x$data.name) && (grepl("^McNemar", x$method) || (length(columns) == 1 && is.matrix(columns[[1]])))) { return(as.table(columns[[1]])) # check if data is a list for kruskal-wallis } else if (grepl("^Kruskal-Wallis", x$method) && length(columns) == 1 && is.list(columns[[1]])) { l <- columns[[1]] names(l) <- paste0("x", 1:length(l)) return(l) } else { max_len <- max(sapply(columns, length)) for (i in 1:length(columns)) { if (length(columns[[i]]) < max_len) { columns[[i]] <- c(columns[[i]], rep(NA, max_len - length(columns[[i]]))) } } d <- as.data.frame(columns) } if (all(grepl("(.*)\\$(.*)", data_name)) && length(data_name) == length(colnames(d))) { colnames(d) <- gsub("(.*)\\$(.*)", "\\2", data_name) } else if (ncol(d) > 2) { colnames(d) <- paste0("x", 1:ncol(d)) } else if (ncol(d) == 2) { colnames(d) <- c("x", "y") } else { colnames(d) <- "x" } } d }, error = function(e) { NULL } ) # 2nd try if (is.null(out)) { for (parent_level in 1:5) { out <- tryCatch( { data_name <- trimws(unlist(strsplit(x$data.name, "(and|,|by)"))) as.table(get(data_name, envir = parent.frame(n = parent_level))) }, error = function(e) { NULL } ) if (!is.null(out)) break } } out } insight/R/backports.R0000644000175000017500000000050414001034656014363 0ustar nileshnilesh.str2lang <- function(s) { stopifnot(length(s) == 1L) ex <- parse(text = s, keep.source = FALSE) stopifnot(length(ex) == 1L) ex[[1L]] } isTRUE <- function(x) { is.logical(x) && length(x) == 1L && !is.na(x) && x } isFALSE <- function(x) { is.logical(x) && length(x) == 1L && !is.na(x) && !x } insight/R/get_predicted.R0000644000175000017500000012254414151371005015204 0ustar nileshnilesh#' Model Predictions (robust) #' #' The `get_predicted()` function is a robust, flexible and user-friendly alternative to base R [predict()] function. Additional features and advantages include availability of uncertainty intervals (CI), bootstrapping, a more intuitive API and the support of more models than base R's `predict` function. However, although the interface are simplified, it is still very important to read the documentation of the arguments. This is because making "predictions" (a lose term for a variety of things) is a non-trivial process, with lots of caveats and complications. Read the `Details` section for more information. #' #' @param x A statistical model (can also be a data.frame, in which case the #' second argument has to be a model). #' @param data An optional data frame in which to look for variables with which #' to predict. If omitted, the data used to fit the model is used. #' @param predict string or `NULL` #' * `"link"` returns predictions on the model's link-scale (for logistic models, that means the log-odds scale) with a confidence interval (CI). #' * `"expectation"` (default) also returns confidence intervals, but this time the output is on the response scale (for logistic models, that means probabilities). #' * `"prediction"` also gives an output on the response scale, but this time associated with a prediction interval (PI), which is larger than a confidence interval (though it mostly make sense for linear models). #' * `"classification"` only differs from `"prediction"` for binomial models where it additionally transforms the predictions into the original response's type (for instance, to a factor). #' * Other strings are passed directly to the `type` argument of the `predict()` method supplied by the modelling package. #' * When `predict = NULL`, alternative arguments such as `type` will be captured by the `...` ellipsis and passed directly to the `predict()` method supplied by the modelling package. #' * Notes: You can see the 4 options for predictions as on a gradient from "close to the model" to "close to the response data": "link", "expectation", "prediction", "classification". The `predict` argument modulates two things: the scale of the output and the type of certainty interval. Read more about in the **Details** section below. #' @param iterations For Bayesian models, this corresponds to the number of #' posterior draws. If `NULL`, will return all the draws (one for each #' iteration of the model). For frequentist models, if not `NULL`, will #' generate bootstrapped draws, from which bootstrapped CIs will be computed. #' Iterations can be accessed by running `as.data.frame()` on the output. #' @param include_random If `TRUE` (default), include all random effects in #' the prediction. If `FALSE`, don't take them into account. Can also be #' a formula to specify which random effects to condition on when predicting #' (passed to the `re.form` argument). If `include_random = TRUE` #' and `newdata` is provided, make sure to include the random effect #' variables in `newdata` as well. #' @param include_smooth For General Additive Models (GAMs). If `FALSE`, #' will fix the value of the smooth to its average, so that the predictions #' are not depending on it. (default), `mean()`, or #' `bayestestR::map_estimate()`. #' @param ... Other argument to be passed for instance to #' [get_predicted_ci()]. #' @inheritParams get_df #' #' @seealso [get_predicted_ci()] #' #' @return The fitted values (i.e. predictions for the response). For Bayesian #' or bootstrapped models (when `iterations != NULL`), iterations (as #' columns and observations are rows) can be accessed via `as.data.frame`. #' #' @details #' In `insight::get_predicted()`, the `predict` argument jointly #' modulates two separate concepts, the **scale** and the **uncertainty interval**. #' #' \subsection{Confidence Interval (CI) vs. Prediction Interval (PI))}{ #' \itemize{ #' \item **Linear models** - `lm()`: For linear models, Prediction #' intervals (`predict="prediction"`) show the range that likely #' contains the value of a new observation (in what range it is likely to #' fall), whereas confidence intervals (`predict="expectation"` or #' `predict="link"`) reflect the uncertainty around the estimated #' parameters (and gives the range of uncertainty of the regression line). In #' general, Prediction Intervals (PIs) account for both the uncertainty in the #' model's parameters, plus the random variation of the individual values. #' Thus, prediction intervals are always wider than confidence intervals. #' Moreover, prediction intervals will not necessarily become narrower as the #' sample size increases (as they do not reflect only the quality of the fit, #' but also the variability within the data). #' \item **Generalized Linear models** - `glm()`: For binomial models, #' prediction intervals are somewhat useless (for instance, for a binomial #' (Bernoulli) model for which the dependent variable is a vector of 1s and #' 0s, the prediction interval is... `[0, 1]`). #' }} #' #' #' \subsection{Link scale vs. Response scale}{ #' When users set the `predict` argument to `"expectation"`, the predictions #' are returned on the response scale, which is arguably the most convenient #' way to understand and visualize relationships of interest. When users set #' the `predict` argument to `"link"`, predictions are returned on the link #' scale, and no transformation is applied. For instance, for a logistic #' regression model, the response scale corresponds to the predicted #' probabilities, whereas the link-scale makes predictions of log-odds #' (probabilities on the logit scale). Note that when users select #' `predict="classification"` in binomial models, the `get_predicted()` #' function will first calculate predictions as if the user had selected #' `predict="expectation"`. Then, it will round the responses in order to #' return the most likely outcome. #' } #' #' @examples #' data(mtcars) #' x <- lm(mpg ~ cyl + hp, data = mtcars) #' #' predictions <- get_predicted(x) #' predictions #' #' # Options and methods --------------------- #' get_predicted(x, predict = "prediction") #' #' # Get CI #' as.data.frame(predictions) #' #' # Bootstrapped #' as.data.frame(get_predicted(x, iterations = 4)) #' summary(get_predicted(x, iterations = 4)) # Same as as.data.frame(..., keep_iterations = F) #' #' # Different predicttion types ------------------------ #' data(iris) #' data <- droplevels(iris[1:100, ]) #' #' # Fit a logistic model #' x <- glm(Species ~ Sepal.Length, data = data, family = "binomial") #' #' # Expectation (default): response scale + CI #' pred <- get_predicted(x, predict = "expectation") #' head(as.data.frame(pred)) #' #' # Prediction: response scale + PI #' pred <- get_predicted(x, predict = "prediction") #' head(as.data.frame(pred)) #' #' # Link: link scale + CI #' pred <- get_predicted(x, predict = "link") #' head(as.data.frame(pred)) #' #' # Classification: classification "type" + PI #' pred <- get_predicted(x, predict = "classification") #' head(as.data.frame(pred)) #' @export get_predicted <- function(x, ...) { UseMethod("get_predicted") } # default methods --------------------------- #' @export get_predicted.default <- function(x, data = NULL, verbose = TRUE, ...) { # many predict.CLASS methods do not work when `newdata` is explicitly specified, even if it is NULL if (is.null(data)) { args <- c(list(x), list(...)) } else { args <- c(list(x, "newdata" = data), list(...)) } out <- tryCatch(do.call("predict", args), error = function(e) NULL) if (is.null(out)) { out <- tryCatch(do.call("fitted", args), error = function(e) NULL) } if (!is.null(out)) { out <- .get_predicted_out(out, args = list("data" = data)) } out } #' @export get_predicted.data.frame <- function(x, data = NULL, verbose = TRUE, ...) { # This makes it pipe friendly; data %>% get_predicted(model) if (is.null(data)) { stop("Please provide a model to base the estimations on.") } else { get_predicted(data, x, verbose = verbose, ...) } } # LM and GLM -------------------------------------------------------------- # ========================================================================= #' @rdname get_predicted #' @export get_predicted.lm <- function(x, data = NULL, predict = "expectation", iterations = NULL, verbose = TRUE, ...) { args <- .get_predicted_args(x, data = data, predict = predict, verbose = verbose, ...) predict_function <- function(x, data, ...) { stats::predict(x, newdata = data, interval = "none", type = args$type, ...) } if (is.null(iterations)) { predictions <- predict_function(x, data = args$data) } else { predictions <- .get_predicted_boot( x, data = args$data, predict_function = predict_function, iterations = iterations, verbose = verbose, ... ) } ci_data <- get_predicted_ci(x, predictions, data = args$data, ci_type = args$ci_type, ... ) out <- .get_predicted_transform(x, predictions, args, ci_data) .get_predicted_out(out$predictions, args = args, ci_data = out$ci_data) } #' @export get_predicted.glm <- get_predicted.lm # rms ------------------------------------------------------------------- # ======================================================================= # the rms::lrm function produces an object of class c("lrm", "rms", glm"). The # `get_predicted.glm` function breaks when trying to calculate standard errors, # so we use the default method. #' @export get_predicted.lrm <- get_predicted.default # fixest ---------------------------------------------------------------- # ======================================================================= #' @export get_predicted.fixest <- function(x, predict = "expectation", data = NULL, ...) { # Development is ongoing for standard errors. They are too complicated for us # to compute, so we need to wait on the `fixest` developer: # https://github.com/lrberge/fixest/issues/22 dots <- list(...) # supported prediction types if (!is.null(predict)) { predict <- match.arg(predict, choices = c("expectation", "link")) type_arg <- ifelse(predict == "expectation", "response", "link") } else { if (!"type" %in% names(dots)) { stop("Please specify the `predict` argument.") } else { type_arg <- match.arg(dots$type, choices = c("response", "link")) } } # predict.fixest supports: object, newdata, type, na.rm args <- list() args[["type"]] <- type_arg args[["object"]] <- x if ("na.rm" %in% names(dots)) { args[["na.rm"]] <- dots[["na.rm"]] } # newdata=NULL raises error if (!is.null(data)) { args[["newdata"]] <- data } out <- do.call("predict", args) .get_predicted_out(out) } # ordinal --------------------------------------------------------------- # ======================================================================= #' @export get_predicted.clm <- function(x, predict = "expectation", data = NULL, ...) { # When (a) `newdata` is not null and (b) the response variable does *not* # appear in `newdata`, predict.clm() returns matrices with predictions for # each levels of the response. When either of those conditions fail, # `predict.clm()` returns vectors with only predictions for the actually # observed reponse level in each row. dots <- list(...) # prediction types if (!is.null(predict)) { valid <- c("expectation", "classification") predict <- match.arg(predict, choices = valid) type_arg <- c("prob", "class")[match(predict, valid)] } else { if (!"type" %in% names(dots)) { stop("Please specify the `predict` argument.") } else { type_arg <- match.arg(dots$type, choices = c("prob", "class")) } } # hack to get predictions for all response levels if (is.null(data)) { data <- get_data(x) } resp <- find_response(x) data <- data[, setdiff(colnames(data), resp), drop = FALSE] vars <- as.character(attr(x$terms, "variables"))[-1] vars[attr(x$terms, "response")] <- resp s <- paste0("list(", paste(vars, collapse = ", "), ")") new_call <- parse(text = s, keep.source = FALSE)[[1L]] attr(x$terms, "variables") <- new_call # compute predictions args <- list( object = x, newdata = data, type = type_arg, se.fit = (type_arg == "prob") ) pred <- do.call("predict", args) out <- .get_predicted_out(pred$fit) # standard error matrix to long format if (type_arg == "prob") { se <- pred$se.fit se <- as.data.frame(se) se$Row <- 1:nrow(se) se <- stats::reshape(se, direction = "long", varying = setdiff(colnames(se), "Row"), times = setdiff(colnames(se), "Row"), v.names = "SE", timevar = "Response", idvar = "Row" ) row.names(se) <- NULL attr(out, "ci_data") <- se } return(out) } # pscl: hurdle zeroinfl ------------------------------------------------- # ======================================================================= #' @export get_predicted.hurdle <- function(x, data = NULL, predict = "expectation", ci = 0.95, iterations = NULL, verbose = TRUE, ...) { # pscl models return the fitted values immediately and ignores the `type` # argument when `data` is NULL if (is.null(data)) { data <- get_data(x) } dots <- list(...) # Sanitize input args <- .get_predicted_args( x, data = data, predict = predict, ci = ci, verbose = verbose, ... ) if (!is.null(predict) && predict != "expectation") { warning(format_message("Currently, only `predict='expectation'` is supported."), call. = FALSE) predict <- "expectation" } # predict.glmmTMB has many `type` values which do not map on to our standard # `predict` argument. We don't know how to transform those. if (is.null(predict) && "type" %in% names(dots)) { args$type <- dots$type } else { args$type <- "count" } # Prediction function predict_function <- function(x, data, ...) { stats::predict( x, newdata = data, type = args$type, ... ) } # Get prediction predictions <- as.vector(predict_function(x, data = args$data)) # "expectation" for zero-inflated? we need a special handling # for predictions and CIs here. if (identical(predict, "expectation")) { zi_predictions <- stats::predict( x, newdata = args$data, type = "zero", ... ) predictions <- predictions * (1 - as.vector(zi_predictions)) ci_data <- .simulate_zi_predictions(model = x, newdata = data, predictions = predictions, nsim = iterations, ci = ci) } else { # Get CI ci_data <- get_predicted_ci(x, predictions = predictions, data = args$data, ci = ci, ci_type = args$ci_type) } out <- list(predictions = predictions, ci_data = ci_data) .get_predicted_out(out$predictions, args = args, ci_data = out$ci_data) } #' @export get_predicted.zeroinfl <- get_predicted.hurdle # Mixed Models (lme4, glmmTMB) ------------------------------------------ # ======================================================================= #' @export get_predicted.lmerMod <- function(x, data = NULL, predict = "expectation", ci = 0.95, include_random = TRUE, iterations = NULL, verbose = TRUE, ...) { # Sanitize input args <- .get_predicted_args( x, data = data, predict = predict, ci = ci, include_random = include_random, verbose = verbose, ... ) # Make prediction only using random if only random if (all(names(args$data) %in% find_random(x, flatten = TRUE))) { random.only <- TRUE } else { random.only <- FALSE } # Prediction function predict_function <- function(x, ...) { stats::predict( x, newdata = args$data, type = args$type, re.form = args$re.form, random.only = random.only, ... ) } if (is.null(iterations)) { predictions <- predict_function(x) } else { predictions <- .get_predicted_boot( x, data = args$data, predict_function = predict_function, iterations = iterations, verbose = verbose, ... ) } ci_data <- get_predicted_ci(x, predictions, data = args$data, ci = ci, ci_type = args$ci_type, ...) out <- .get_predicted_transform(x, predictions, args, ci_data) .get_predicted_out(out$predictions, args = args, ci_data = out$ci_data) } #' @export get_predicted.merMod <- get_predicted.lmerMod #' @export get_predicted.glmmTMB <- function(x, data = NULL, predict = "expectation", ci = 0.95, include_random = TRUE, iterations = NULL, verbose = TRUE, ...) { dots <- list(...) # Sanity checks if (!is.null(predict) && predict %in% c("prediction", "predicted", "classification")) { predict <- "expectation" if (verbose) { warning( format_message( '"prediction" and "classification" are currently not supported by the', '`predict` argument for glmmTMB models. Changing to `predict="expectation"`.' ), call. = FALSE ) } } # TODO: prediction intervals # https://bbolker.github.io/mixedmodels-misc/glmmFAQ.html#predictions-andor-confidence-or-prediction-intervals-on-predictions # Sanitize input args <- .get_predicted_args( x, data = data, predict = predict, ci = ci, include_random = include_random, verbose = verbose, ... ) # predict.glmmTMB has many `type` values which do not map on to our standard # `predict` argument. We don't know how to transform those. if (is.null(predict) && "type" %in% names(dots)) { args$transform <- FALSE args$type <- dots$type } else if (!predict %in% c("expectation", "link")) { args$transform <- FALSE args$type <- predict } # Prediction function predict_function <- function(x, data, ...) { stats::predict( x, newdata = data, type = args$type, re.form = args$re.form, unconditional = FALSE, ... ) } # Get prediction rez <- predict_function(x, data = args$data, se.fit = TRUE) if (is.null(iterations) || identical(predict, "expectation")) { predictions <- as.numeric(rez$fit) } else { predictions <- .get_predicted_boot( x, data = args$data, predict_function = predict_function, iterations = iterations, verbose = verbose, ... ) } # "expectation" for zero-inflated? we need a special handling # for predictions and CIs here. if (identical(predict, "expectation") && args$info$is_zero_inflated) { zi_predictions <- stats::predict( x, newdata = data, type = "zprob", re.form = args$re.form, unconditional = FALSE, ... ) predictions <- link_inverse(x)(predictions) * (1 - as.vector(zi_predictions)) ci_data <- .simulate_zi_predictions(model = x, newdata = data, predictions = predictions, nsim = iterations, ci = ci) out <- list(predictions = predictions, ci_data = ci_data) } else { # Get CI ci_data <- .get_predicted_se_to_ci(x, predictions = predictions, se = rez$se.fit, ci = ci) out <- .get_predicted_transform(x, predictions, args, ci_data) } .get_predicted_out(out$predictions, args = args, ci_data = out$ci_data) } # bife ------------------------------------------------------------------ # ======================================================================= #' @export get_predicted.bife <- function(x, predict = "expectation", data = NULL, verbose = TRUE, ...) { args <- .get_predicted_args(x, data = data, predict = predict, verbose = TRUE, ... ) out <- tryCatch(stats::predict(x, type = args$scale, X_new = args$data), error = function(e) NULL) if (!is.null(out)) { out <- .get_predicted_out(out, args = list("data" = data)) } out } # nnet::multinom -------------------------------------------------------- # ======================================================================= #' @export get_predicted.multinom <- function(x, predict = "expectation", data = NULL, ...) { dots <- list(...) # `type` argument can be: probs | class if (!is.null(predict)) { type_arg <- match.arg(predict, choices = c("classification", "expectation")) type_arg <- c("class", "probs")[c("classification", "expectation") == type_arg] } else if ("type" %in% names(dots)) { type_arg <- match.arg(dots$type, choices = c("class", "probs")) } else { stop('The `predict` argument must be either "expectation" or "classification".') } args <- c(list(x, "data" = data), list(...)) # predict.multinom doesn't work when `newdata` is explicitly set to NULL (weird) if (is.null(data)) { out <- stats::predict(x, type = type_arg) } else { out <- stats::predict(x, newdata = data, type = type_arg) } .get_predicted_out(out, args = args) } # MASS ------------------------------------------------------------------ # ======================================================================= #' @export get_predicted.rlm <- function(x, predict = "expectation", ...) { # only one prediction type supported if (!is.null(predict)) { predict <- match.arg(predict, choices = "expectation") get_predicted.lm(x, predict = predict, ...) } else { dots <- list(...) if (!"type" %in% names(dots)) { stop("Please specify the `predict` argument.") } dots[["type"]] <- match.arg(dots$type, choices = "response") dots[["x"]] <- x dots <- c(dots, list("predict" = NULL)) do.call("get_predicted.lm", dots) } } # MASS::polr accepts only "class" or "probs" types, and "expectation" # corresponds to "probs". These are the same as nnet::multinom. # Make sure this is below get_predicted.multinom in the file. #' @export get_predicted.polr <- get_predicted.multinom # GAM ------------------------------------------------------------------- # ======================================================================= #' @export get_predicted.gam <- function(x, data = NULL, predict = "expectation", ci = 0.95, include_random = TRUE, include_smooth = TRUE, iterations = NULL, verbose = TRUE, ...) { # allow users to set `predict=NULL` and specify `type` directly if (!is.null(predict)) { predict <- match.arg(predict, choices = c("expectation", "expected", "link", "prediction", "predicted", "classification")) # Sanity checks if (predict %in% c("prediction", "predicted")) { if (verbose) { warning( format_message( "`predict='prediction'` is currently not available for GAM models.", "Changing to `predict='expectation'`." ), call. = FALSE ) } predict <- "expectation" } # TODO: check this for prediction intervals: # https://fromthebottomoftheheap.net/2016/12/15/simultaneous-interval-revisited/ # https://github.com/gavinsimpson/gratia/blob/master/R/confint-methods.R # https://github.com/gavinsimpson/gratia/blob/master/R/posterior-samples.R } # Sanitize input args <- .get_predicted_args( x, data = data, predict = predict, ci = ci, include_random = include_random, include_smooth = include_smooth, verbose = verbose, ... ) if (inherits(x, c("gamm", "list"))) x <- x$gam # Prediction function predict_function <- function(x, data, ...) { stats::predict( x, newdata = data, type = args$type, re.form = args$re.form, unconditional = FALSE, ... ) } # Get prediction rez <- predict_function(x, data = args$data, se.fit = TRUE) if (is.null(iterations)) { predictions <- rez$fit } else { predictions <- .get_predicted_boot( x, data = args$data, predict_function = predict_function, iterations = iterations, verbose = verbose, ... ) } # Get CI ci_data <- .get_predicted_se_to_ci(x, predictions = predictions, se = rez$se.fit, ci = ci) out <- .get_predicted_transform(x, predictions, args, ci_data) .get_predicted_out(out$predictions, args = args, ci_data = out$ci_data) } #' @export get_predicted.gamm <- get_predicted.gam #' @export get_predicted.list <- get_predicted.gam # gamm4 # Bayesian -------------------------------------------------------------- # ======================================================================= #' @rdname get_predicted #' @export get_predicted.stanreg <- function(x, data = NULL, predict = "expectation", iterations = NULL, include_random = TRUE, include_smooth = TRUE, verbose = TRUE, ...) { check_if_installed("rstantools", minimum_version = "2.1.0") args <- .get_predicted_args( x, data = data, predict = predict, include_random = include_random, include_smooth = include_smooth, verbose = verbose, ... ) # when the `type` argument is passed through ellipsis, we need to manually set # the `args$predict` value, because this is what determines which `rstantools` # function we will use to draw from the posterior predictions. dots <- list(...) if (is.null(predict) && "type" %in% names(dots)) { if (dots$type == "link") { args$predict <- "link" } else if (dots$type == "response") { args$predict <- "expectation" } } # Get draws if (args$predict %in% c("link")) { draws <- rstantools::posterior_linpred( x, newdata = args$data, re.form = args$re.form, nsamples = iterations, draws = iterations, ... ) } else if (args$predict %in% c("expectation")) { draws <- rstantools::posterior_epred( x, newdata = args$data, re.form = args$re.form, nsamples = iterations, draws = iterations, ... ) } else { draws <- rstantools::posterior_predict( x, newdata = args$data, re.form = args$re.form, draws = iterations, nsamples = iterations, ... ) } # Get predictions (summarize) predictions <- .get_predicted_centrality_from_draws(x, iter = draws, ...) # Output ci_data <- get_predicted_ci( x, predictions = predictions, data = args$data, ci_type = args$ci_type, ... ) .get_predicted_out(predictions, args = args, ci_data = ci_data) } #' @export get_predicted.brmsfit <- get_predicted.stanreg # Other models ---------------------------------------------------------- # ======================================================================= #' @export get_predicted.crr <- function(x, verbose = TRUE, ...) { out <- as.data.frame(unclass(stats::predict(x, ...))) class(out) <- c("get_predicted", class(out)) out } # FA / PCA ------------------------------------------------------------- # ====================================================================== #' @export get_predicted.principal <- function(x, data = NULL, ...) { if (is.null(data)) { out <- as.data.frame(x$scores) } else { out <- as.data.frame(stats::predict(x, data, ...)) } class(out) <- c("get_predicted", class(out)) out } #' @export get_predicted.fa <- get_predicted.principal #' @export get_predicted.prcomp <- function(x, data = NULL, ...) { if (is.null(data)) { out <- as.data.frame(x$x) } else { out <- as.data.frame(stats::predict(x, data, ...)) } class(out) <- c("get_predicted", class(out)) out } #' @export get_predicted.faMain <- function(x, data = NULL, ...) { check_if_installed("fungible") if (is.null(data)) { stop("A dataframe (either the original of a new one) must be provided (`get_predicted(fa_results, data = df`).") } else { out <- as.data.frame(fungible::faScores(X = data, faMainObject = x)$fscores) } class(out) <- c("get_predicted", class(out)) out } # ==================================================================== # Utils -------------------------------------------------------------- # ==================================================================== .format_reform <- function(include_random = TRUE) { # Format re.form if (is.null(include_random) || is.na(include_random)) { re.form <- include_random } else if (include_random == TRUE) { re.form <- NULL } else if (include_random == FALSE) { re.form <- NA } else { re.form <- include_random } re.form } # ------------------------------------------------------------------------- .get_predicted_args <- function(x, data = NULL, predict = "expectation", include_random = TRUE, include_smooth = TRUE, ci = 0.95, newdata = NULL, verbose = TRUE, ...) { if (length(predict) > 1) { predict <- predict[1] if (isTRUE(verbose)) { msg <- format_message(sprintf("More than one option provided in `predict`. Using first option '%s' now."), predict[1]) warning(msg, call. = FALSE) } } # Get info info <- model_info(x, verbose = FALSE) # Data if (!is.null(newdata) && is.null(data)) data <- newdata if (is.null(data)) data <- get_data(x, verbose = verbose) # CI if (is.null(ci)) ci <- 0 # check `predict` user-input supported <- c("expectation", "expected", "link", "prediction", "predicted", "classification") if (isTRUE(verbose) && !is.null(predict) && !predict %in% supported) { msg <- format_message(sprintf('"%s" is not officially supported by the `get_predicted()` function as a value for the `predict` argument. It will not be processed or validated, and will be passed directly to the `predict()` method supplied by the modeling package. Users are encouraged to check the validity and scale of the results. Set `verbose=FALSE` to silence this warning, or use one of the supported values for the `predict` argument: %s.', predict, paste(sprintf('"%s"', setdiff(supported, c("expected", "predicted"))), collapse = ", "))) warning(msg, call. = FALSE) } # check aliases if (!is.null(predict)) { if (predict == "expected") { predict <- "expectation" } if (predict == "predicted") { predict <- "prediction" } } # Arbitrate conflicts between the `predict` and `type` from the ellipsis. We # create a new variable called `predict_arg` to resolve conflicts. This avoids # modifying the values of `type` and `predict` on the fly, which allows us to # keep track of the original user input. dots <- list(...) if (is.null(dots$type)) { predict_arg <- predict if (is.null(predict)) { stop(format_message("Please supply a value for the `predict` argument.")) } # Type (that's for the initial call to stats::predict) if (info$is_linear) { type_arg <- "response" } else { type_arg <- "link" } } else { if (is.null(predict)) { type_arg <- predict_arg <- dots$type } else { stop(format_message('The `predict` and `type` arguments cannot be used simultaneously. The preferred argument for the `get_predicted()` function is `predict`. If you need to pass a `type` argument directly to the `predict()` method associated with your model type, you must set `predict` to `NULL` explicitly: `get_predicted(model, predict=NULL, type="response")`')) } } # sanity: `predict` argument (backward compatibility -- we already warned above) if (predict_arg == "relation") { predict_arg <- "expectation" } # Prediction and CI type if (predict_arg == "link") { ci_type <- "confidence" scale <- "link" } else if (predict_arg == "expectation") { ci_type <- "confidence" scale <- "response" } else if (predict_arg %in% c("prediction", "classification")) { ci_type <- "prediction" scale <- "response" } else if (!is.null(dots$type)) { ci_type <- "confidence" scale <- dots$type } else { ci_type <- "confidence" scale <- predict_arg } # Transform if (info$is_linear == FALSE && scale == "response") { transform <- TRUE type_arg <- "link" # set from response to link, because we back-transform } else { transform <- FALSE } # Smooth smooths <- clean_names(find_smooth(x, flatten = TRUE)) if (!is.null(smooths)) { for (smooth in smooths) { # Fix smooth to average value if (!smooth %in% names(data) || include_smooth == FALSE) { include_smooth <- FALSE data[[smooth]] <- mean(get_data(x)[[smooth]], na.rm = TRUE) } } } # Random # In case include_random is TRUE, but there's actually no random factors in data if (include_random && !is.null(data) && !is.null(x) && !all(find_random(x, flatten = TRUE) %in% names(data))) { include_random <- FALSE } # Add (or set) random variables to "NA" if (include_random == FALSE) { if (inherits(x, c("stanreg", "brmsfit"))) { # rstantools predictions doens't allow for NaNs in newdata data[find_variables(x, effects = "random", verbose = FALSE)$random] <- NULL } else { data[find_variables(x, effects = "random", verbose = FALSE)$random] <- NA } } re.form <- .format_reform(include_random) # Return all args list( data = data, include_random = include_random, re.form = re.form, include_smooth = include_smooth, ci_type = ci_type, ci = ci, type = type_arg, predict = predict_arg, scale = scale, transform = transform, info = info ) } # ------------------------------------------------------------------------- .get_predict_transform_response <- function(predictions, response) { predictions <- round(predictions) if (is.factor(response)) { predictions[predictions == 0] <- levels(response)[1] predictions[predictions == 1] <- levels(response)[2] predictions <- as.factor(predictions) levels(predictions) <- levels(response) } else { resp <- unique(response) predictions <- resp[match(predictions, resp)] } predictions } .get_predicted_transform <- function(x, predictions, args = NULL, ci_data = NULL, ...) { # Transform to response scale if (isTRUE(args$transform)) { if (!is.null(ci_data)) { # Transform CI se_col <- names(ci_data) == "SE" # fix for R 3.4 row.names(ci_data) <- NULL ci_data[!se_col] <- lapply(ci_data[!se_col], link_inverse(x)) # Transform SE (https://github.com/SurajGupta/r-source/blob/master/src/library/stats/R/predict.glm.R#L60) # Delta method; SE * deriv( inverse_link(x) wrt lin_pred(x) ) mu_eta <- abs(get_family(x)$mu.eta(predictions)) ci_data[se_col] <- ci_data[se_col] * mu_eta } # Transform predictions predictions <- link_inverse(x)(predictions) # Transform iterations if ("iterations" %in% names(attributes(predictions))) { attr(predictions, "iterations") <- as.data.frame(sapply(attributes(predictions)$iterations, link_inverse(x))) } # Transform to response "type" if (args$predict == "classification" && model_info(x, verbose = FALSE)$is_binomial) { response <- get_response(x) ci_data[!se_col] <- lapply(ci_data[!se_col], .get_predict_transform_response, response = response) predictions <- .get_predict_transform_response(predictions, response = response) if ("iterations" %in% names(attributes(predictions))) { attr(predictions, "iterations") <- as.data.frame(sapply(attributes(predictions)$iterations, .get_predict_transform_response, response = response)) } } } list(predictions = predictions, ci_data = ci_data) } # ------------------------------------------------------------------------- .get_predicted_out <- function(predictions, args = NULL, ci_data = NULL, ...) { if (!is.null(ci_data)) { attr(predictions, "ci_data") <- ci_data } if (!is.null(args)) { attr(predictions, "data") <- args$data attr(predictions, "ci") <- args$ci attr(predictions, "predict") <- args$predict } # multidimensional or "grouped" predictions (e.g., nnet::multinom with `predict(type="probs")`) if (is.matrix(predictions) && ncol(predictions) > 1) { predictions <- as.data.frame(predictions) predictions$Row <- 1:nrow(predictions) predictions <- stats::reshape(predictions, direction = "long", varying = setdiff(colnames(predictions), "Row"), times = setdiff(colnames(predictions), "Row"), v.names = "Predicted", timevar = "Response", idvar = "Row" ) row.names(predictions) <- NULL } class(predictions) <- c("get_predicted", class(predictions)) predictions } # Bootstrap ============================================================== .get_predicted_boot <- function(x, data = NULL, predict_function = NULL, iterations = 500, verbose = TRUE, ...) { if (is.null(data)) data <- get_data(x, verbose = verbose) # TODO: how to make it work with the seed argument?? # Using bootMer if (inherits(x, "merMod")) { # installed check_if_installed("lme4") draws <- lme4::bootMer(x, predict_function, nsim = iterations, use.u = TRUE, ...) # Using boot } else { check_if_installed("boot") boot_fun <- function(data, indices, predict_data, ...) { model <- stats::update(x, data = data[indices, , drop = FALSE]) if (verbose) { predict_function(model, data = predict_data, ...) } else { suppressWarnings(predict_function(model, data = predict_data, ...)) } } draws <- boot::boot(data = get_data(x), boot_fun, R = iterations, predict_data = data, ...) } # Format draws draws <- as.data.frame(t(draws$t)) names(draws) <- paste0("iter_", 1:ncol(draws)) .get_predicted_centrality_from_draws(x, draws, ...) } # ------------------------------------------------------------------------- .get_predicted_centrality_from_draws <- function(x, iter, centrality_function = base::mean, ...) { # outcome: ordinal/multinomial/multivariate produce a 3D array of predictions, # which we stack in "long" format if (length(dim(iter)) == 3) { # 3rd dimension of the array is the response level. This stacks the draws into: # Rows * Response ~ Draws iter_stacked <- apply(iter, 1, c) predictions <- data.frame( # rows repeated for each response level Row = rep(1:ncol(iter), times = dim(iter)[3]), # response levels repeated for each row Response = rep(dimnames(iter)[[3]], each = dim(iter)[2]), Predicted = apply(iter_stacked, 1, centrality_function), stringsAsFactors = FALSE ) iter <- as.data.frame(iter_stacked) names(iter) <- paste0("iter_", names(iter)) # outcome with a single level } else { # .get_predicted_boot already gives us the correct observation ~ draws format if (is.null(colnames(iter)) || !all(grepl("^iter", colnames(iter)))) { iter <- as.data.frame(t(iter)) names(iter) <- gsub("^V(\\d+)$", "iter_\\1", names(iter)) } predictions <- apply(iter, 1, centrality_function) } attr(predictions, "iterations") <- iter predictions } insight/R/find_random.R0000644000175000017500000000604614077615664014703 0ustar nileshnilesh#' @title Find names of random effects #' @name find_random #' #' @description Return the name of the grouping factors from mixed effects models. #' #' @param x A fitted mixed model. #' @param split_nested Logical, if `TRUE`, terms from nested random #' effects will be returned as separated elements, not as single string #' with colon. See 'Examples'. #' #' @inheritParams find_predictors #' @inheritParams find_variables #' #' @return A list of character vectors that represent the name(s) of the #' random effects (grouping factors). Depending on the model, the #' returned list has following elements: #' \itemize{ #' \item `random`, the "random effects" terms from the conditional part of model #' \item `zero_inflated_random`, the "random effects" terms from the #' zero-inflation component of the model #' } #' #' @examples #' if (require("lme4")) { #' data(sleepstudy) #' sleepstudy$mygrp <- sample(1:5, size = 180, replace = TRUE) #' sleepstudy$mysubgrp <- NA #' for (i in 1:5) { #' filter_group <- sleepstudy$mygrp == i #' sleepstudy$mysubgrp[filter_group] <- #' sample(1:30, size = sum(filter_group), replace = TRUE) #' } #' #' m <- lmer( #' Reaction ~ Days + (1 | mygrp / mysubgrp) + (1 | Subject), #' data = sleepstudy #' ) #' #' find_random(m) #' find_random(m, split_nested = TRUE) #' } #' @export find_random <- function(x, split_nested = FALSE, flatten = FALSE) { UseMethod("find_random") } #' @export find_random.default <- function(x, split_nested = FALSE, flatten = FALSE) { f <- find_formula(x, verbose = FALSE) if (is_multivariate(x)) { rn <- names(find_response(x)) l <- lapply(rn, function(i) .find_random_effects(x, f[[i]], split_nested)) names(l) <- rn l <- .compact_list(l) } else { l <- .find_random_effects(x, f, split_nested) } if (.is_empty_object(l)) { return(NULL) } if (flatten) { unique(unlist(l)) } else { l } } #' @export find_random.afex_aov <- function(x, split_nested = FALSE, flatten = FALSE) { if (flatten) { attr(x, "id") } else { list(random = attr(x, "id")) } } .find_random_effects <- function(x, f, split_nested) { if (!.obj_has_name(f, "random") && !.obj_has_name(f, "zero_inflated_random")) { return(NULL) } if (.obj_has_name(f, "random")) { if (is.list(f$random)) { r1 <- unique(unlist(lapply(f$random, function(.x) .get_model_random(.x, split_nested, x)))) } else { r1 <- unique(unlist(.get_model_random(f$random, split_nested, x))) } } else { r1 <- NULL } if (.obj_has_name(f, "zero_inflated_random")) { if (is.list(f$zero_inflated_random)) { r2 <- unique(unlist(lapply(f$zero_inflated_random, function(.x) .get_model_random(.x, split_nested, x)))) } else { r2 <- unique(.get_model_random(f$zero_inflated_random, split_nested, x)) } } else { r2 <- NULL } .compact_list(list(random = r1, zero_inflated_random = r2)) } insight/R/find_parameters_emmeans.R0000644000175000017500000000333014077615664017264 0ustar nileshnilesh#' @title Find model parameters from estimated marginal means objects #' @name find_parameters.emmGrid #' #' @description Returns the parameter names from a model. #' #' @inheritParams find_parameters #' @inheritParams find_predictors #' @inheritParams get_parameters.BGGM #' @inheritParams get_parameters.emmGrid #' #' @return A list of parameter names. For simple models, only one list-element, #' `conditional`, is returned. #' #' @examples #' data(mtcars) #' model <- lm(mpg ~ wt * factor(cyl), data = mtcars) #' if (require("emmeans", quietly = TRUE)) { #' emm <- emmeans(model, c("wt", "cyl")) #' find_parameters(emm) #' } #' @export find_parameters.emmGrid <- function(x, flatten = FALSE, merge_parameters = FALSE, ...) { out <- params <- get_parameters(x, summary = TRUE, merge_parameters = merge_parameters) if ("Component" %in% colnames(params)) { params$Component <- factor(params$Component, levels = unique(params$Component)) } if (!.is_baysian_emmeans(x)) { if ("Component" %in% colnames(params)) { out <- lapply(split(params, params$Component), function(i) i[[1]]) } else { out <- stats::setNames(list(params[[1]]), unique(.classify_emmeans(x))) } } else { col_names <- colnames(get_parameters(x, summary = FALSE, merge_parameters = merge_parameters)) if ("Component" %in% colnames(params)) { params$Parameter <- col_names out <- lapply(split(params, params$Component), function(i) i[[1]]) } else { out <- stats::setNames(list(col_names), unique(.classify_emmeans(x))) } } if (flatten) { unique(unlist(out)) } else { out } } #' @export find_parameters.emm_list <- find_parameters.emmGrid insight/R/find_parameter_zi.R0000644000175000017500000000550214077615664016101 0ustar nileshnilesh#' @title Find names of model parameters from zero-inflated models #' @name find_parameters.zeroinfl #' #' @description Returns the names of model parameters, like they typically #' appear in the `summary()` output. #' #' @param ... Currently not used. #' @inheritParams find_parameters #' @inheritParams find_parameters.betamfx #' @inheritParams find_predictors #' #' @return A list of parameter names. The returned list may have following #' elements: #' \itemize{ #' \item `conditional`, the "fixed effects" part from the model. #' \item `zero_inflated`, the "fixed effects" part from the #' zero-inflation component of the model. #' } #' #' @examples #' data(mtcars) #' m <- lm(mpg ~ wt + cyl + vs, data = mtcars) #' find_parameters(m) #' @export find_parameters.zeroinfl <- function(x, component = c("all", "conditional", "zi", "zero_inflated"), flatten = FALSE, ...) { cf <- names(stats::coef(x)) component <- match.arg(component) l <- .compact_list(list( conditional = cf[grepl("^count_", cf, perl = TRUE)], zero_inflated = cf[grepl("^zero_", cf, perl = TRUE)] )) .filter_parameters( l, effects = "all", component = component, flatten = flatten, recursive = FALSE ) } #' @export find_parameters.hurdle <- find_parameters.zeroinfl #' @export find_parameters.zerotrunc <- find_parameters.zeroinfl #' @export find_parameters.zcpglm <- function(x, component = c("all", "conditional", "zi", "zero_inflated"), flatten = FALSE, ...) { cf <- stats::coef(x) component <- match.arg(component) l <- .compact_list(list( conditional = names(cf$tweedie), zero_inflated = names(cf$zero) )) .filter_parameters( l, effects = "all", component = component, flatten = flatten, recursive = FALSE ) } #' @rdname find_parameters.zeroinfl #' @export find_parameters.mhurdle <- function(x, component = c("all", "conditional", "zi", "zero_inflated", "infrequent_purchase", "ip", "auxiliary"), flatten = FALSE, ...) { component <- match.arg(component) cf <- stats::coef(x) cond_pars <- which(grepl("^h2\\.", names(cf))) zi_pars <- which(grepl("^h1\\.", names(cf))) ip_pars <- which(grepl("^h3\\.", names(cf))) aux_pars <- (1:length(names(cf)))[-c(cond_pars, zi_pars, ip_pars)] # names(cf) <- gsub("^(h1|h2|h3)\\.(.*)", "\\2", names(cf)) l <- .compact_list(list( conditional = names(cf)[cond_pars], zero_inflated = names(cf)[zi_pars], infrequent_purchase = names(cf)[ip_pars], auxiliary = names(cf)[aux_pars] )) .filter_parameters( l, effects = "all", component = component, flatten = flatten, recursive = FALSE ) } insight/R/format_message.R0000644000175000017500000000401214044454343015373 0ustar nileshnilesh#' @title Format messages and warnings #' @name format_message #' #' @description Inserts line breaks into a longer message or warning string. #' Line length is adjusted to maximum length of the console, if the width #' can be accessed. By default, new lines are indented by two whitespace. #' #' @param string A string. #' @param ... Further strings that will be concatenated as indented new lines. #' @param line_length Numeric, the maximum length of a line. #' #' @return A formatted string. #' @examples #' msg <- format_message("Much too long string for just one line, I guess!", #' line_length = 15 #' ) #' message(msg) #' #' msg <- format_message("Much too long string for just one line, I guess!", #' "First new line", #' "Second new line", #' "(both indented)", #' line_length = 30 #' ) #' message(msg) #' @export format_message <- function(string, ..., line_length = options()$width) { if (is.null(line_length) || is.infinite(line_length) || line_length < 1) { line_length <- 70 } string <- .wrap_message_line(string, line_length) further_lines <- list(...) if (length(further_lines)) { further_lines <- lapply(further_lines, function(i) { .wrap_message_line(string = i, line_length = line_length, indention = " ") }) string <- paste0(c(string, unlist(further_lines)), collapse = "\n") } string } .wrap_message_line <- function(string, line_length, indention = NULL) { line_separator <- "\\1\n " lsub <- 0 pattern <- paste("(.{1,", line_length, "})(\\s|$)", sep = "") if (line_length > 0 && nchar(string) > line_length) { string <- gsub(pattern, line_separator, string) l <- nchar(string) lc <- substr(string, l - lsub, l) if (lc == "\n") { string <- substr(string, 0, l - (lsub + 1)) } } # remove trailing newline if (grepl("\\n $", string)) { string <- gsub("\\n $", "", string) } if (!is.null(indention)) { string <- paste0(indention, string) } string } insight/R/find_offset.R0000644000175000017500000000244113751534230014667 0ustar nileshnilesh#' @title Find possible offset terms in a model #' @name find_offset #' #' @description Returns a character vector with the name(s) of offset terms. #' #' @inheritParams find_predictors #' #' @return A character vector with the name(s) of offset terms. #' #' @examples #' # Generate some zero-inflated data #' set.seed(123) #' N <- 100 # Samples #' x <- runif(N, 0, 10) # Predictor #' off <- rgamma(N, 3, 2) # Offset variable #' yhat <- -1 + x * 0.5 + log(off) # Prediction on log scale #' dat <- data.frame(y = NA, x, logOff = log(off)) #' dat$y <- rpois(N, exp(yhat)) # Poisson process #' dat$y <- ifelse(rbinom(N, 1, 0.3), 0, dat$y) # Zero-inflation process #' #' if (require("pscl")) { #' m1 <- zeroinfl(y ~ offset(logOff) + x | 1, data = dat, dist = "poisson") #' find_offset(m1) #' #' m2 <- zeroinfl(y ~ x | 1, data = dat, offset = logOff, dist = "poisson") #' find_offset(m2) #' } #' @export find_offset <- function(x) { terms <- find_terms(x, flatten = TRUE) offset <- NULL offcol <- grep("^offset\\((.*)\\)", terms) if (length(offcol)) { offset <- clean_names(terms[offcol]) } if (is.null(offset) && .obj_has_name(x, "call") && .obj_has_name(x$call, "offset")) { offset <- clean_names(.safe_deparse(x$call$offset)) } offset } insight/R/get_nested_lme_varcorr.R0000644000175000017500000000317014046165422017116 0ustar nileshnilesh# Caution! this is somewhat experimental... # It retrieves the variance-covariance matrix of random effects # from nested lme-models. .get_nested_lme_varcorr <- function(x) { # installed? check_if_installed("lme4") vcor <- lme4::VarCorr(x) class(vcor) <- "matrix" re_index <- (which(rownames(vcor) == "(Intercept)") - 1)[-1] vc_list <- split(data.frame(vcor, stringsAsFactors = FALSE), findInterval(1:nrow(vcor), re_index)) vc_rownames <- split(rownames(vcor), findInterval(1:nrow(vcor), re_index)) re_pars <- unique(unlist(find_parameters(x)["random"])) re_names <- find_random(x, split_nested = TRUE, flatten = TRUE) names(vc_list) <- re_names mapply( function(x, y) { if ("Corr" %in% colnames(x)) { g_cor <- suppressWarnings(stats::na.omit(as.numeric(x[, "Corr"]))) } else { g_cor <- NULL } row.names(x) <- as.vector(y) vl <- rownames(x) %in% re_pars x <- suppressWarnings(apply(x[vl, vl, drop = FALSE], MARGIN = c(1, 2), FUN = as.numeric)) m1 <- matrix(, nrow = nrow(x), ncol = ncol(x)) m1[1:nrow(m1), 1:ncol(m1)] <- as.vector(x[, 1]) rownames(m1) <- rownames(x) colnames(m1) <- rownames(x) if (!is.null(g_cor)) { m1_cov <- sqrt(prod(diag(m1))) * g_cor for (j in 1:ncol(m1)) { m1[j, nrow(m1) - j + 1] <- m1_cov[1] } } attr(m1, "cor_slope_intercept") <- g_cor m1 }, vc_list, vc_rownames, SIMPLIFY = FALSE ) } .is_nested_lme <- function(x) { sapply(find_random(x), function(i) any(grepl(":", i, fixed = TRUE))) } insight/R/get_family.R0000644000175000017500000000325614137207374014533 0ustar nileshnilesh#' A robust alternative to stats::family #' #' A robust and resilient alternative to `stats::family`. To avoid issues #' with models like `gamm4`. #' #' @param x A statistical model. #' @param ... Further arguments passed to methods. #' #' @examples #' data(mtcars) #' x <- glm(vs ~ wt, data = mtcars, family = "binomial") #' get_family(x) #' #' if (require("mgcv")) { #' x <- mgcv::gamm( #' vs ~ am + s(wt), #' random = list(cyl = ~1), #' data = mtcars, #' family = "binomial" #' ) #' get_family(x) #' } #' @export get_family <- function(x, ...) { UseMethod("get_family") } #' @export get_family.default <- function(x, ...) { fam <- tryCatch( { stats::family(x, ...) }, error = function(e) { NULL } ) if (is.null(fam)) { fam <- tryCatch( { .get_family(x, ...) }, error = function(e) { NULL } ) } fam } #' @export get_family.list <- function(x, ...) { if ("gam" %in% names(x)) { .get_family(x) } else { stop("Could not retrieve family from this list. Check the input.") } } #' @export get_family.model_fit <- function(x, ...) { get_family(x$fit, ...) } .get_family <- function(x, ...) { info <- model_info(x, verbose = FALSE) if (info$family == "binomial") { fam <- stats::binomial(link = info$link_function) } else if (info$is_linear) { fam <- stats::gaussian(link = "identity") } else if (info$is_poisson) { fam <- stats::poisson(link = info$link_function) } else { stop("Could not retrieve family from this object. Open an issue on the insight's GitHub.") } fam } insight/R/get_parameters_zi.R0000644000175000017500000001120714046165422016106 0ustar nileshnilesh#' @title Get model parameters from zero-inflated and hurdle models #' @name get_parameters.zeroinfl #' #' @description Returns the coefficients from a model. #' #' @param ... Currently not used. #' #' @inheritParams find_parameters #' @inheritParams find_predictors #' #' @return For models with smooth terms or zero-inflation component, a data #' frame with three columns: the parameter names, the related point estimates #' and the component. #' #' @examples #' data(mtcars) #' m <- lm(mpg ~ wt + cyl + vs, data = mtcars) #' get_parameters(m) #' @export get_parameters.zeroinfl <- function(x, component = c("all", "conditional", "zi", "zero_inflated"), ...) { component <- match.arg(component) .return_zeroinf_parms(x, component) } #' @export get_parameters.hurdle <- get_parameters.zeroinfl #' @export get_parameters.zerotrunc <- get_parameters.default #' @rdname get_parameters.zeroinfl #' @export get_parameters.zcpglm <- function(x, component = c("all", "conditional", "zi", "zero_inflated"), ...) { component <- match.arg(component) cf <- stats::coef(x) cond <- data.frame( Parameter = names(cf$tweedie), Estimate = unname(cf$tweedie), Component = "conditional", stringsAsFactors = FALSE, row.names = NULL ) zi <- data.frame( Parameter = names(cf$zero), Estimate = unname(cf$zero), Component = "zero_inflated", stringsAsFactors = FALSE, row.names = NULL ) pars <- switch(component, all = rbind(cond, zi), conditional = cond, zi = , zero_inflated = zi ) if (component != "all") { pars <- .remove_column(pars, "Component") } .remove_backticks_from_parameter_names(pars) } # helper ------------------- .return_zeroinf_parms <- function(x, component) { cf <- stats::coef(x) conditional <- grepl("^count_", names(cf), perl = TRUE) zero_inflated <- grepl("^zero_", names(cf), perl = TRUE) cond <- data.frame( Parameter = names(cf)[conditional], Estimate = unname(cf)[conditional], Component = "conditional", stringsAsFactors = FALSE, row.names = NULL ) zi <- data.frame( Parameter = names(cf)[zero_inflated], Estimate = unname(cf)[zero_inflated], Component = "zero_inflated", stringsAsFactors = FALSE, row.names = NULL ) pars <- switch(component, all = rbind(cond, zi), conditional = cond, zi = , zero_inflated = zi ) if (component != "all") { pars <- .remove_column(pars, "Component") } .remove_backticks_from_parameter_names(pars) } #' @rdname get_parameters.zeroinfl #' @export get_parameters.mhurdle <- function(x, component = c("all", "conditional", "zi", "zero_inflated", "infrequent_purchase", "ip", "auxiliary"), ...) { component <- match.arg(component) cf <- stats::coef(x) cond_pars <- which(grepl("^h2\\.", names(cf))) zi_pars <- which(grepl("^h1\\.", names(cf))) ip_pars <- which(grepl("^h3\\.", names(cf))) aux_pars <- (1:length(names(cf)))[-c(cond_pars, zi_pars, ip_pars)] if (length(cond_pars)) { cond_dat <- data.frame( Parameter = names(cf)[cond_pars], Estimate = unname(cf[cond_pars]), Component = "conditional", stringsAsFactors = FALSE, row.names = NULL ) } else { cond_dat <- NULL } if (length(zi_pars)) { zi_dat <- data.frame( Parameter = names(cf)[zi_pars], Estimate = unname(cf[zi_pars]), Component = "zero_inflated", stringsAsFactors = FALSE, row.names = NULL ) } else { zi_dat <- NULL } if (length(ip_pars)) { ip_dat <- data.frame( Parameter = names(cf)[ip_pars], Estimate = unname(cf[ip_pars]), Component = "infrequent_purchase", stringsAsFactors = FALSE, row.names = NULL ) } else { ip_dat <- NULL } if (length(aux_pars)) { aux_dat <- data.frame( Parameter = names(cf)[aux_pars], Estimate = unname(cf[aux_pars]), Component = "auxiliary", stringsAsFactors = FALSE, row.names = NULL ) } else { aux_dat <- NULL } pars <- rbind(cond_dat, zi_dat, ip_dat, aux_dat) pars <- .filter_component(pars, component) if (component != "all") { pars <- .remove_column(pars, "Component") } # pars$Parameter <- gsub("^(h1|h2|h3)\\.(.*)", "\\2", pars$Parameter) .remove_backticks_from_parameter_names(pars) } insight/R/n_parameters.R0000644000175000017500000001622414077615665015103 0ustar nileshnilesh#' Count number of parameters in a model #' #' Returns the number of parameters (coefficients) of a model. #' #' @param x A statistical model. #' @param effects Should number of parameters for fixed effects, random effects #' or both be returned? Only applies to mixed models. May be abbreviated. #' @param component Should total number of parameters, number parameters for the #' conditional model, the zero-inflated part of the model, the dispersion #' term or the instrumental variables be returned? Applies to models #' with zero-inflated and/or dispersion formula, or to models with instrumental #' variable (so called fixed-effects regressions). May be abbreviated. #' @param remove_nonestimable Logical, if `TRUE`, removes (i.e. does not #' count) non-estimable parameters (which may occur for models with #' rank-deficient model matrix). #' @param ... Arguments passed to or from other methods. #' #' @return The number of parameters in the model. #' #' @note This function returns the number of parameters for the fixed effects #' by default, as returned by `find_parameters(x, effects = "fixed")`. #' It does not include *all* estimated model parameters, i.e. auxiliary #' parameters like sigma or dispersion are not counted. To get the number of #' *all estimated* parameters, use `get_df(x, type = "model")`. #' #' @examples #' data(iris) #' model <- lm(Sepal.Length ~ Sepal.Width * Species, data = iris) #' n_parameters(model) #' @export n_parameters <- function(x, ...) { UseMethod("n_parameters") } # Default models ------------------------------------- #' @rdname n_parameters #' @export n_parameters.default <- function(x, remove_nonestimable = FALSE, ...) { .n_parameters_effects(x, effects = "fixed", remove_nonestimable = remove_nonestimable, ...) } # helper .process_estimable <- function(params, remove_nonestimable) { if (isTRUE(remove_nonestimable)) { params <- params[!is.na(params$Estimate), ] } nrow(params) } # Models with random effects ------------------------------------- #' @rdname n_parameters #' @export n_parameters.merMod <- function(x, effects = c("fixed", "random"), remove_nonestimable = FALSE, ...) { effects <- match.arg(effects) .n_parameters_effects(x, effects = effects, remove_nonestimable = remove_nonestimable, ...) } #' @export n_parameters.BBmm <- n_parameters.merMod #' @export n_parameters.glimML <- n_parameters.merMod #' @export n_parameters.cpglmm <- n_parameters.merMod #' @export n_parameters.rlmerMod <- n_parameters.merMod #' @export n_parameters.mixed <- n_parameters.merMod #' @export n_parameters.coxme <- n_parameters.merMod #' @export n_parameters.lme <- n_parameters.merMod #' @export n_parameters.MCMCglmm <- n_parameters.merMod #' @export n_parameters.sim.merMod <- n_parameters.merMod #' @export n_parameters.wbm <- n_parameters.merMod # Models with random effects and other components ---------------------------- #' @export n_parameters.MixMod <- function(x, effects = c("fixed", "random"), component = c("all", "conditional", "zi", "zero_inflated"), remove_nonestimable = FALSE, ...) { effects <- match.arg(effects) component <- match.arg(component) if (effects == "random" || isFALSE(remove_nonestimable)) { length(unlist(find_parameters(x, effects = effects, component = component, flatten = FALSE, verbose = FALSE, ...))) } else { params <- get_parameters(x, effects = effects, component = component, ...) .process_estimable(params, remove_nonestimable) } } #' @rdname n_parameters #' @export n_parameters.glmmTMB <- n_parameters.MixMod # Models with (zero-inflation) components ---------------------------- #' @rdname n_parameters #' @export n_parameters.zeroinfl <- function(x, component = c("all", "conditional", "zi", "zero_inflated"), remove_nonestimable = FALSE, ...) { component <- match.arg(component) .n_parameters_component(x, component, remove_nonestimable, ...) } #' @export n_parameters.hurdle <- n_parameters.zeroinfl #' @export n_parameters.zerotrunc <- n_parameters.default # GAMs ---------------------------- #' @rdname n_parameters #' @export n_parameters.gam <- function(x, component = c("all", "conditional", "smooth_terms"), remove_nonestimable = FALSE, ...) { component <- match.arg(component) .n_parameters_component(x, component, remove_nonestimable, ...) } #' @export n_parameters.Gam <- n_parameters.gam #' @export n_parameters.vgam <- n_parameters.gam # Bayesian Models ---------------------------- #' @rdname n_parameters #' @export n_parameters.brmsfit <- function(x, effects = "all", component = "all", ...) { effects <- match.arg(effects, choices = c("all", "fixed", "random")) component <- match.arg(component, choices = c("all", .all_elements())) length(unlist(find_parameters(x, effects = effects, component = component, flatten = FALSE, verbose = FALSE, ...))) } #' @export n_parameters.stanreg <- function(x, effects = c("all", "fixed", "random"), component = c("all", "conditional", "smooth_terms"), ...) { effects <- match.arg(effects) component <- match.arg(component) length(unlist(find_parameters(x, effects = effects, component = component, flatten = FALSE, verbose = FALSE, ...))) } #' @export n_parameters.stanmvreg <- n_parameters.stanreg # Other models ------------------------------------- #' @export n_parameters.lavaan <- function(x, ...) { # TODO # installed? # check_if_installed("lavaan") # lavaan::fitmeasures(x)[["npar"]] } #' @export n_parameters.blavaan <- n_parameters.lavaan #' @export n_parameters.multinom <- function(x, ...) { nrow(get_parameters(x)) } #' @export n_parameters.bayesx <- function(x, ...) { length(unlist(find_parameters(x, component = "conditional", flatten = FALSE, verbose = FALSE, ...))) } # helper --------------------- .n_parameters_component <- function(x, component, remove_nonestimable, ...) { if (isTRUE(remove_nonestimable)) { params <- get_parameters(x, component = component, ...) .process_estimable(params, remove_nonestimable) } else { length(unlist(find_parameters(x, component = component, flatten = FALSE, verbose = FALSE, ...))) } } .n_parameters_effects <- function(x, effects, remove_nonestimable, ...) { if (effects == "random" || isFALSE(remove_nonestimable)) { length(unlist(find_parameters(x, effects = effects, flatten = FALSE, verbose = FALSE, ...))) } else { params <- get_parameters(x, effects = effects, ...) .process_estimable(params, remove_nonestimable) } } insight/R/is_model_supported.R0000644000175000017500000001007414135301750016276 0ustar nileshnilesh#' @title Checks if an object is a regression model object supported in #' \pkg{insight} package. #' @name is_model_supported #' #' @description Small helper that checks if a model is a *supported* #' (regression) model object. `supported_models()` prints a list #' of currently supported model classes. #' #' @inheritParams is_model #' #' @return A logical, `TRUE` if `x` is a (supported) model object. #' #' @details This function returns `TRUE` if `x` is a model object #' that works with the package's functions. A list of supported models can #' also be found here: . #' #' @examples #' data(mtcars) #' m <- lm(mpg ~ wt + cyl + vs, data = mtcars) #' #' is_model_supported(m) #' is_model_supported(mtcars) #' @export is_model_supported <- function(x) { inherits(x, .supported_models_list()) } #' @rdname is_model_supported #' @export supported_models <- function() { sort(.supported_models_list()) } .supported_models_list <- function() { c( # a ---------------------------- "aareg", "afex_aov", "aov", "aovlist", "AKP", "Anova.mlm", "Arima", "averaging", # b ---------------------------- "bamlss", "bamlss.frame", "bayesx", "bayesQR", "BBmm", "BBreg", "bcplm", "betamfx", "betaor", "betareg", "bfsl", "BFBayesFactor", "BGGM", "bife", "bifeAPEs", "biglm", "bigglm", "blavaan", "blrm", "bracl", "brglm", "brmsfit", "brmultinom", "btergm", # c ---------------------------- "censReg", "cgam", "cgamm", "cglm", "clm", "clm2", "clmm", "clmm2", "clogit", "coeftest", "complmrob", "confusionMatrix", "coxme", "coxph", "coxph.penal", "coxr", "cpglm", "cpglmm", "crch", "crq", "crqs", "crr", # d ---------------------------- "dep.effect", "DirichletRegModel", "drc", # e ---------------------------- "eglm", "elm", "epi.2by2", "ergm", # f ---------------------------- "feis", "felm", "feglm", "fitdistr", "fixest", "flexsurvreg", # g ---------------------------- "gam", "Gam", "gamlss", "gamm", "gamm4", "garch", "gbm", "gee", "geeglm", "glht", "glimML", "glmm", "glm", "Glm", "glmmadmb", "glmmPQL", "glmmTMB", "glmrob", "glmRob", "glmx", "gls", "gmnl", # h ---------------------------- "HLfit", "htest", "hurdle", # i ---------------------------- "ivFixed", "iv_robust", "ivreg", "ivprobit", # l ---------------------------- "lavaan", "lm", "lm_robust", "lme", "lmrob", "lmRob", "lmerMod", "lmerModLmerTest", "lmodel2", "logitmfx", "logitor", "logistf", "LORgee", "lqm", "lqmm", "lrm", # m ---------------------------- "manova", "MANOVA", "margins", "maxLik", "mcmc", "MCMCglmm", "mcp12", "mcp1", "mcp2", "med1way", "mediate", "metaplus", "merMod", "merModList", "mipo", "mira", "mixed", "mixor", "MixMod", "mhurdle", "mjoint", "mle", "mle2", "mlm", "mclogit", "mcmc.list", "meta_bma", "meta_fixed", "meta_random", "mlogit", "mmlogit", "model_fit", "multinom", "mvord", # n ---------------------------- "negbinmfx", "negbinirr", # o ---------------------------- "ols", "onesampb", "orm", # p ---------------------------- "PMCMR", "poissonmfx", "poissonirr", "pgmm", "plm", "polr", "psm", "probitmfx", # r ---------------------------- "Rchoice", "ridgelm", "riskRegression", "rjags", "rlm", "rlmerMod", "robtab", "RM", "rma", "rma.uni", "robmixglm", "rq", "rqs", "rqss", # s ---------------------------- "Sarlm", "scam", "selection", "sem", "semLm", "semLme", "SemiParBIV", "slm", "speedlm", "speedglm", "stanfit", "stanmvreg", "stanreg", "summary.lm", "survfit", "survreg", "svy_vglm", "svyglm", "svyolr", # t ---------------------------- "t1way", "tobit", "trimcibt", "truncreg", # v ---------------------------- "vgam", "vglm", # w ---------------------------- "wbm", "wblm", "wbgee", "wmcpAKP", # y ---------------------------- "yuen", "yuend", # z ---------------------------- "zcpglm", "zeroinfl", "zerotrunc" ) } insight/R/find_response.R0000644000175000017500000001464614163571567015265 0ustar nileshnilesh#' @title Find name of the response variable #' @name find_response #' #' @description Returns the name(s) of the response variable(s) from a model object. #' #' @param x A fitted model. #' @param combine Logical, if `TRUE` and the response is a matrix-column, #' the name of the response matches the notation in formula, and would for #' instance also contain patterns like `"cbind(...)"`. Else, the original #' variable names from the matrix-column are returned. See 'Examples'. #' @param ... Currently not used. #' #' @return The name(s) of the response variable(s) from `x` as character #' vector, or `NULL` if response variable could not be found. #' #' @examples #' if (require("lme4")) { #' data(cbpp) #' cbpp$trials <- cbpp$size - cbpp$incidence #' m <- glm(cbind(incidence, trials) ~ period, data = cbpp, family = binomial) #' #' find_response(m, combine = TRUE) #' find_response(m, combine = FALSE) #' } #' @export find_response <- function(x, combine = TRUE, ...) { UseMethod("find_response") } #' @export find_response.default <- function(x, combine = TRUE, ...) { f <- find_formula(x, verbose = FALSE) if (is.null(f)) { return(NULL) } # this is for multivariate response models, # where we have a list of formulas if (is_multivariate(f)) { resp <- unlist(lapply(f, function(i) .safe_deparse(i$conditional[[2L]]))) } else { resp <- .safe_deparse(f$conditional[[2L]]) } check_cbind(resp, combine, model = x) } #' @export find_response.model_fit <- function(x, combine = TRUE, ...) { find_response(x$fit, combine = combine, ...) } #' @export find_response.bfsl <- function(x, combine = TRUE, ...) { resp <- find_response.default(x, combine = combine) if (is.null(resp)) { resp <- "y" } resp } #' @export find_response.selection <- function(x, combine = TRUE, ...) { f <- find_formula(x, verbose = FALSE) resp <- c( .safe_deparse(f$conditional$selection[[2L]]), .safe_deparse(f$conditional$outcome[[2L]]) ) check_cbind(resp, combine, model = x) } #' @export find_response.mediate <- function(x, combine = TRUE, ...) { f <- find_formula(x, verbose = FALSE) if (is.null(f)) { return(NULL) } resp <- c(.safe_deparse(f$mediator$conditional[[2L]]), .safe_deparse(f$outcome$conditional[[2L]])) check_cbind(resp, combine, model = x) } #' @export find_response.mjoint <- function(x, combine = TRUE, component = c("conditional", "survival", "all"), ...) { component <- match.arg(component) f <- find_formula(x, verbose = FALSE) if (is.null(f)) { return(NULL) } conditional <- unlist(lapply(f[grepl("^conditional", names(f))], function(i) .safe_deparse(i[[2L]]))) survial <- .safe_deparse(f$survival[[2L]]) resp <- switch(component, "conditional" = conditional, "survial" = survial, "all" = c(conditional, survial) ) unlist(lapply(resp, check_cbind, combine = combine, model = x)) } #' @export find_response.joint <- function(x, combine = TRUE, component = c("conditional", "survival", "all"), ...) { component <- match.arg(component) f <- find_formula(x, verbose = FALSE) if (is.null(f)) { return(NULL) } conditional <- .safe_deparse(f$conditional[[2L]]) survial <- .safe_deparse(f$survival[[2L]]) resp <- switch(component, "conditional" = conditional, "survial" = survial, "all" = c(conditional, survial) ) unlist(lapply(resp, check_cbind, combine = combine, model = x)) } # utils --------------------- # should not be called for brms-models! check_cbind <- function(resp, combine, model) { if (!combine && inherits(model, "DirichletRegModel")) { resp <- model$varnames } else if (!combine && any(grepl("cbind\\((.*)\\)", resp))) { resp <- .extract_combined_response(resp, "cbind") } else if (!combine && any(grepl("Surv\\((.*)\\)", resp))) { resp <- .extract_combined_response(resp, "Surv") } else if (!combine && any(grepl("Hist\\((.*)\\)", resp))) { resp <- .extract_combined_response(resp, "Hist") } else if (!combine && any(grepl("Event\\((.*)\\)", resp))) { resp <- .extract_combined_response(resp, "Event") } else if (!combine && any(grepl("Curv\\((.*)\\)", resp))) { resp <- .extract_combined_response(resp, "Curv") } else if (!combine && any(grepl("MMO\\((.*)\\)", resp))) { resp <- .extract_combined_response(resp, "MMO") } else if (!combine && any(grepl("MMO2\\((.*)\\)", resp))) { resp <- .extract_combined_response(resp, "MMO2") } else if (!combine && any(grepl("/", resp, fixed = TRUE))) { resp <- strsplit(resp, split = "/", fixed = TRUE) resp <- gsub("(I|\\(|\\))", "", .trim(unlist(resp))) } else if (any(.string_contains("|", resp))) { # check for brms Additional Response Information r1 <- .trim(sub("(.*)\\|(.*)", "\\1", resp)) r2 <- .trim(sub("(.*)\\|(.*)\\(([^,)]*).*", "\\3", resp)) # check for "resp_thres" pattern r_resp <- .trim(unlist(strsplit(resp, "|", fixed = TRUE))[2]) if (grepl("^resp_thres", r_resp)) { r3 <- .trim(sub("=", "", sub("(.*)\\(([^=)]*)(.*)\\)", "\\3", r_resp))) names(r3) <- r3 numeric_values <- suppressWarnings(as.numeric(r2)) r2 <- r2[is.na(numeric_values)] if (length(r2)) { r2 <- c(r2, r3) } else { r2 <- r3 } } resp <- c(r1, r2) } else if (!combine && any(grepl("+", resp, fixed = TRUE))) { resp <- strsplit(resp, split = "+", fixed = TRUE) resp <- gsub("(I|\\(|\\))", "", .trim(unlist(resp))) } # exception if (inherits(model, "clogit") && grepl("^rep\\(", resp[1]) && length(resp) == 3) { resp <- c(paste0(resp[1], resp[2]), resp[3]) } .remove_pattern_from_names(resp, ignore_asis = TRUE) } .extract_combined_response <- function(resp, pattern) { if (pattern %in% c("MMO", "MMO2") && !grepl(paste0("^", pattern, "\\((.*),(.*)\\)"), resp)) { resp <- gsub(paste0("^", pattern, "\\((.*)\\)"), "\\1", resp) } else { resp <- sub(sprintf("%s\\(([^,].*)([\\)].*)", pattern), "\\1", resp) resp <- strsplit(resp, split = ",", fixed = TRUE) resp <- .trim(unlist(resp)) } if (any(.string_contains("-", resp[2]))) { resp[2] <- .trim(sub("(.*)(\\-)(.*)", "\\1", resp[2])) } resp } insight/R/find_terms.R0000644000175000017500000001420514135301654014533 0ustar nileshnilesh#' @title Find all model terms #' @name find_terms #' #' @description Returns a list with the names of all terms, including response #' value and random effects, "as is". This means, on-the-fly tranformations #' or arithmetic expressions like `log()`, `I()`, `as.factor()` etc. are #' preserved. #' #' @inheritParams find_formula #' @inheritParams find_predictors #' #' @return A list with (depending on the model) following elements (character #' vectors): #' \itemize{ #' \item `response`, the name of the response variable #' \item `conditional`, the names of the predictor variables from the *conditional* model (as opposed to the zero-inflated part of a model) #' \item `random`, the names of the random effects (grouping factors) #' \item `zero_inflated`, the names of the predictor variables from the *zero-inflated* part of the model #' \item `zero_inflated_random`, the names of the random effects (grouping factors) #' \item `dispersion`, the name of the dispersion terms #' \item `instruments`, the names of instrumental variables #' } #' Returns `NULL` if no terms could be found (for instance, due to #' problems in accessing the formula). #' #' @note The difference to [find_variables()] is that `find_terms()` #' may return a variable multiple times in case of multiple transformations #' (see examples below), while `find_variables()` returns each variable #' name only once. #' #' @examples #' if (require("lme4")) { #' data(sleepstudy) #' m <- lmer( #' log(Reaction) ~ Days + I(Days^2) + (1 + Days + exp(Days) | Subject), #' data = sleepstudy #' ) #' #' find_terms(m) #' } #' @export find_terms <- function(x, flatten = FALSE, verbose = TRUE, ...) { UseMethod("find_terms") } #' @export find_terms.default <- function(x, flatten = FALSE, verbose = TRUE, ...) { f <- find_formula(x, verbose = verbose) if (is.null(f)) { return(NULL) } resp <- find_response(x, verbose = FALSE) if (is_multivariate(f) || isTRUE(attributes(f)$two_stage)) { l <- lapply(f, .get_variables_list, resp = resp) } else { l <- .get_variables_list(f, resp) } if (flatten) { unique(unlist(l)) } else { l } } .find_terms <- function(f, response) { out <- lapply(f, function(i) { if (is.list(i)) { .find_terms(i, response = NULL) } else { f_terms <- unname(attr(stats::terms(i), "term.labels")) sub("(.*)::(.*)", "\\2", f_terms) } }) .compact_list(c(list(response = response), out)) } #' @export find_terms.aovlist <- function(x, flatten = FALSE, verbose = TRUE, ...) { resp <- find_response(x, verbose = FALSE) f <- find_formula(x, verbose = verbose)[[1]] l <- .get_variables_list_aovlist(f, resp) if (flatten) { unique(unlist(l)) } else { l } } #' @export find_terms.afex_aov <- function(x, flatten = FALSE, verbose = TRUE, ...) { resp <- find_response(x, verbose = FALSE) if (length(attr(x, "within")) == 0L) { l <- find_terms(x$lm, flatten = FALSE, verbose = TRUE, ...) l$response <- resp } else { f <- find_formula(x, verbose = verbose)[[1]] l <- .get_variables_list_aovlist(f, resp) } if (flatten) { unique(unlist(l)) } else { l } } #' @export find_terms.bfsl <- function(x, flatten = FALSE, verbose = TRUE, ...) { resp <- find_response(x, verbose = FALSE) f <- find_formula(x, verbose = verbose) if (!is.null(f)) { fx <- f[[1]][[3]] } else { fx <- "x" } l <- list(conditional = c(resp, fx)) if (flatten) { unique(unlist(l)) } else { l } } # helper ----------------------- .get_variables_list <- function(f, resp = NULL) { # exception for formula w/o response if (is.null(resp) || !.is_empty_object(resp)) { f$response <- sub("(.*)::(.*)", "\\2", .safe_deparse(f$conditional[[2L]])) f$conditional <- .safe_deparse(f$conditional[[3L]]) } else { f$conditional <- .safe_deparse(f$conditional[[2L]]) } f <- lapply(f, function(.x) { if (is.list(.x)) { .x <- sapply(.x, .formula_to_string) } else { if (!is.character(.x)) .x <- .safe_deparse(.x) } .x }) # protect "-1" f$conditional <- gsub("(-1|- 1)(?![^(]*\\))", "#1", f$conditional, perl = TRUE) f <- lapply(f, function(.x) { pattern <- "[*+:|\\-\\/](?![^(]*\\))" # was: "[\\*\\+:\\-\\|/](?![^(]*\\))" f_parts <- gsub("~", "", .trim(unlist(strsplit(split = pattern, x = .x, perl = TRUE)))) # if user has used namespace in formula-functions, these are returned # as empty elements. remove those here if (any(nchar(f_parts) == 0)) { f_parts <- f_parts[-which(nchar(f_parts) == 0)] } .remove_backticks_from_string(unique(f_parts)) }) # remove "1" and "0" from variables in random effects if (.obj_has_name(f, "random")) { pos <- which(f$random %in% c("1", "0")) if (length(pos)) f$random <- f$random[-pos] } if (.obj_has_name(f, "zero_inflated_random")) { pos <- which(f$zero_inflated_random %in% c("1", "0")) if (length(pos)) f$zero_inflated_random <- f$zero_inflated_random[-pos] } # restore -1 need_split <- grepl("#1$", f$conditional) if (any(need_split)) { f$conditional <- c( f$conditional[!need_split], .trim(unlist(strsplit(f$conditional[need_split], " ", fixed = TRUE))) ) } f$conditional <- gsub("#1", "-1", f$conditional, fixed = TRUE) # reorder, so response is first .compact_list(f[c(length(f), 1:(length(f) - 1))]) } .get_variables_list_aovlist <- function(f, resp = NULL) { i <- sapply(f[[3]], function(x) { x <- as.character(x) x[1] == "Error" && length(x) > 1 }) error <- utils::capture.output(print(f[[3]][i][[1]])) f[[3]][i] <- NULL f[[3]] <- f[[3]][[2]] f[[3]] <- as.name(paste0(attr(stats::terms.formula(f), "term.labels"), collapse = "+")) l <- .get_variables_list(f, resp) names(l) <- c("response", "conditional") l$error <- error l } .formula_to_string <- function(f) { if (!is.character(f)) f <- .safe_deparse(f) f } insight/R/get_parameters_htest.R0000644000175000017500000000667314061456671016634 0ustar nileshnilesh#' @title Get model parameters from htest-objects #' @name get_parameters.htest #' #' @description Returns the parameters from a hypothesis test. #' #' @param ... Currently not used. #' @inheritParams find_parameters #' #' @return A data frame with two columns: the parameter names and the related #' point estimates. #' #' @examples #' get_parameters(t.test(1:10, y = c(7:20))) #' @export get_parameters.htest <- function(x, ...) { m_info <- model_info(x) if (m_info$is_correlation) { out <- .extract_htest_correlation(x) } else if (m_info$is_levenetest) { out <- .extract_htest_levenetest(x) } else if (m_info$is_ttest) { out <- .extract_htest_ttest(x) } else if (m_info$is_ranktest) { out <- .extract_htest_ranktest(x) } else if (m_info$is_onewaytest) { out <- .extract_htest_oneway(x) } else if (m_info$is_chi2test) { out <- .extract_htest_chi2(x) } else if (m_info$is_proptest) { out <- .extract_htest_prop(x) } else if (m_info$is_binomtest) { out <- .extract_htest_binom(x) } else { stop("'get_parameters()' not implemented for such hypothesis tests yet.") } row.names(out) <- NULL out } # extract htest correlation ---------------------- .extract_htest_correlation <- function(model) { out <- data.frame( Parameter = model$data.name, stringsAsFactors = FALSE ) if (model$method == "Pearson's Chi-squared test") { out$Estimate <- model$statistic } else { out$Estimate <- model$estimate } out } # extract htest ranktest ---------------------- .extract_htest_ranktest <- function(model) { out <- data.frame( Parameter = model$data.name, stringsAsFactors = FALSE ) if (grepl("Wilcoxon", model$method, fixed = TRUE)) { out$Estimate <- model$statistic } else if (grepl("Kruskal-Wallis", model$method, fixed = TRUE)) { out$Estimate <- model$statistic } out } # extract htest leveneTest ---------------------- .extract_htest_levenetest <- function(model) { data.frame( Parameter = "Parameter", Estimate = model$`F value`[1], stringsAsFactors = FALSE ) } # extract htest ttest ---------------------- .extract_htest_ttest <- function(model, standardized_d = NULL, hedges_g = NULL) { out <- data.frame( Parameter = model$data.name, stringsAsFactors = FALSE ) if (length(model$estimate) == 1) { out$Estimate <- model$estimate } else { out$Estimate <- model$estimate[1] - model$estimate[2] } out } # extract htest oneway ---------------------- .extract_htest_oneway <- function(model) { NULL } # extract htest chi2 ---------------------- .extract_htest_chi2 <- function(model) { out <- data.frame( Parameter = model$data.name, stringsAsFactors = FALSE ) if (!is.null(model$estimate) && identical(names(model$estimate), "odds ratio")) { out$Estimate <- model$estimate } else { out$Estimate <- model$statistic } out } # extract htest prop ---------------------- .extract_htest_prop <- function(model) { out <- data.frame( Parameter = "probability", Estimate = model$estimate, stringsAsFactors = FALSE ) } # extract htest binom ---------------------- .extract_htest_binom <- function(model) { out <- data.frame( Parameter = "probability", Estimate = model$estimate, stringsAsFactors = FALSE ) out } insight/R/get_random.R0000644000175000017500000000231214077615665014533 0ustar nileshnilesh#' @title Get the data from random effects #' @name get_random #' #' @description Returns the data from all random effects terms. #' #' @inheritParams find_random #' #' @return The data from all random effects terms, as data frame. Or `NULL` #' if model has no random effects. #' #' @examples #' if (require("lme4")) { #' data(sleepstudy) #' # prepare some data... #' sleepstudy$mygrp <- sample(1:5, size = 180, replace = TRUE) #' sleepstudy$mysubgrp <- NA #' for (i in 1:5) { #' filter_group <- sleepstudy$mygrp == i #' sleepstudy$mysubgrp[filter_group] <- #' sample(1:30, size = sum(filter_group), replace = TRUE) #' } #' #' m <- lmer( #' Reaction ~ Days + (1 | mygrp / mysubgrp) + (1 | Subject), #' data = sleepstudy #' ) #' #' head(get_random(m)) #' } #' @export get_random <- function(x) { UseMethod("get_random") } #' @export get_random.default <- function(x) { if (.is_empty_object(find_random(x))) { warning("No random effects found in model.") return(NULL) } get_data(x, effects = "random") } #' @export get_random.afex_aov <- function(x) { get_data(x, shape = "long")[find_random(x, flatten = TRUE)] } insight/R/print.easystats_check.R0000644000175000017500000000106513646060343016714 0ustar nileshnilesh#' @export print.easystats_check <- function(x, ...) { # check attributes title <- attr(x, "title") text <- attr(x, "text") color <- attr(x, "color") # no attributes found? check list elements then... if (is.null(title) && is.null(text) && is.null(color)) { if ("title" %in% names(x)) title <- x$title if ("text" %in% names(x)) text <- x$text if ("color" %in% names(x)) color <- x$color } if (!is.null(title)) { print_color(paste0("# ", title, "\n\n"), "blue") } print_color(text, color) invisible(x) } insight/R/clean_parameters.R0000644000175000017500000005163414142156546015723 0ustar nileshnilesh#' @title Get clean names of model parameters #' @name clean_parameters #' #' @description This function "cleans" names of model parameters by removing #' patterns like `"r_"` or `"b[]"` (mostly applicable to Stan models) #' and adding columns with information to which group or component parameters #' belong (i.e. fixed or random, count or zero-inflated...) #' \cr \cr #' The main purpose of this function is to easily filter and select model parameters, #' in particular of - but not limited to - posterior samples from Stan models, #' depending on certain characteristics. This might be useful when only selective #' results should be reported or results from all parameters should be filtered #' to return only certain results (see [print_parameters()]). #' #' @param x A fitted model. #' @inheritParams find_parameters #' #' @return A data frame with "cleaned" parameter names and information on #' effects, component and group where parameters belong to. To be consistent #' across different models, the returned data frame always has at least four #' columns `Parameter`, `Effects`, `Component` and #' `Cleaned_Parameter`. See 'Details'. #' #' @details The `Effects` column indicate if a parameter is a *fixed* #' or *random* effect. The `Component` can either be *conditional* #' or *zero_inflated*. For models with random effects, the `Group` #' column indicates the grouping factor of the random effects. For multivariate #' response models from \pkg{brms} or \pkg{rstanarm}, an additional *Response* #' column is included, to indicate which parameters belong to which response #' formula. Furthermore, *Cleaned_Parameter* column is returned that #' contains "human readable" parameter names (which are mostly identical to #' `Parameter`, except for for models from \pkg{brms} or \pkg{rstanarm}, #' or for specific terms like smooth- or spline-terms). #' #' @examples #' \dontrun{ #' library(brms) #' model <- download_model("brms_zi_2") #' clean_parameters(model) #' } #' @export clean_parameters <- function(x, ...) { UseMethod("clean_parameters") } #' @export clean_parameters.default <- function(x, group = "", ...) { pars <- find_parameters(x, effects = "all", component = "all", flatten = FALSE ) l <- lapply(names(pars), function(i) { eff <- if (grepl("random", i, fixed = TRUE)) { "random" } else { "fixed" } com <- if (grepl("zero_inflated", i, fixed = TRUE)) { "zero_inflated" } else if (grepl("dispersion", i, fixed = TRUE)) { "dispersion" } else if (grepl("nonlinear", i, fixed = TRUE)) { "nonlinear" } else if (grepl("instruments", i, fixed = TRUE)) { "instruments" } else if (grepl("extra", i, fixed = TRUE)) { "extra" } else if (grepl("scale", i, fixed = TRUE)) { "scale" } else if (grepl("marginal", i, fixed = TRUE)) { "marginal" } else if (grepl("intercept", i, fixed = TRUE)) { "intercept" } else if (grepl("correlation", i, fixed = TRUE)) { "correlation" } else { "conditional" } fun <- if (grepl("smooth", i, fixed = TRUE)) { "smooth" } else { "" } if (eff == "random") { rand_eff <- lapply(names(pars[[i]]), function(j) { data.frame( Parameter = pars[[i]][[j]], Effects = eff, Component = com, Group = j, Function = fun, Cleaned_Parameter = clean_names(pars[[i]][[j]]), stringsAsFactors = FALSE, row.names = NULL ) }) do.call(rbind, rand_eff) } else { data.frame( Parameter = pars[[i]], Effects = eff, Component = com, Group = group, Function = fun, Cleaned_Parameter = clean_names(pars[[i]]), stringsAsFactors = FALSE, row.names = NULL ) } }) out <- .remove_backticks_from_parameter_names(do.call(rbind, l)) out <- .remove_empty_columns_from_pars(out) .fix_random_effect_smooth(x, out) } #' @export clean_parameters.emmGrid <- function(x, ...) { pars <- find_parameters(x, flatten = FALSE) l <- lapply(names(pars), function(i) { data.frame( Parameter = pars[[i]], Component = i, Cleaned_Parameter = .clean_names(x = pars[[i]], is_emmeans = TRUE), stringsAsFactors = FALSE, row.names = NULL ) }) out <- .remove_backticks_from_parameter_names(do.call(rbind, l)) out <- .remove_empty_columns_from_pars(out) out } #' @export clean_parameters.emm_list <- clean_parameters.emmGrid #' @export clean_parameters.BFBayesFactor <- function(x, ...) { pars <- find_parameters(x, effects = "all", component = "all", flatten = FALSE ) l <- lapply(names(pars), function(i) { eff <- if (grepl("random", i, fixed = TRUE)) { "random" } else { "fixed" } com <- if (grepl("extra", i, fixed = TRUE)) { "extra" } else { "conditional" } data.frame( Parameter = pars[[i]], Effects = eff, Component = com, Cleaned_Parameter = pars[[i]], stringsAsFactors = FALSE, row.names = NULL ) }) out <- .remove_backticks_from_parameter_names(do.call(rbind, l)) out <- .remove_empty_columns_from_pars(.clean_bfbayesfactor_params(out)) .fix_random_effect_smooth(x, out) } #' @export clean_parameters.wbm <- function(x, ...) { pars <- find_parameters(x, effects = "all", component = "all", flatten = FALSE ) l <- lapply(names(pars), function(i) { com <- if (grepl("random", i, fixed = TRUE)) { "interactions" } else if (grepl("instruments", i, fixed = TRUE)) { "instruments" } else { "conditional" } fun <- if (grepl("smooth", i, fixed = TRUE)) { "smooth" } else { "" } data.frame( Parameter = pars[[i]], Effects = "fixed", Component = com, Group = "", Function = fun, Cleaned_Parameter = clean_names(pars[[i]]), stringsAsFactors = FALSE, row.names = NULL ) }) out <- .remove_backticks_from_parameter_names(do.call(rbind, l)) out <- .remove_empty_columns_from_pars(out) .fix_random_effect_smooth(x, out) } #' @export clean_parameters.wbgee <- clean_parameters.wbm #' @export clean_parameters.merModList <- function(x, ...) { clean_parameters.default(x[[1]], ...) } #' @export clean_parameters.model_fit <- function(x, ...) { clean_parameters(x$fit, ...) } #' @export clean_parameters.glmm <- function(x, ...) { pars <- find_parameters(x, effects = "all", component = "all", flatten = FALSE ) l <- lapply(names(pars), function(i) { data.frame( Parameter = pars[[i]], Effects = i, Component = "", Group = "", Function = "", Cleaned_Parameter = clean_names(pars[[i]]), stringsAsFactors = FALSE, row.names = NULL ) }) out <- .remove_backticks_from_parameter_names(do.call(rbind, l)) .remove_empty_columns_from_pars(out) } #' @export clean_parameters.MCMCglmm <- clean_parameters.glmm #' @export clean_parameters.lavaan <- function(x, ...) { params <- get_parameters(x) data.frame( Parameter = params$Parameter, Component = params$Component, Group = "", Function = "", Cleaned_Parameter = params$Parameter, stringsAsFactors = FALSE, row.names = NULL ) } #' @export clean_parameters.blavaan <- function(x, ...) { params <- get_parameters(x, summary = TRUE) params$Estimate <- NULL params$Group <- "" params$Function <- "" params$Cleaned_Parameter <- params$Parameter params } #' @export clean_parameters.brmsfit <- function(x, ...) { pars <- find_parameters(x, effects = "all", component = "all", flatten = FALSE ) is_mv <- is_multivariate(pars) if (is_mv) { l <- do.call( rbind, lapply(names(pars), function(i) .get_stan_params(pars[[i]], response = i)) ) } else { l <- .get_stan_params(pars) } out <- do.call(rbind, l) out <- .remove_empty_columns_from_pars(.clean_brms_params(out, is_mv)) .fix_random_effect_smooth(x, out) } #' @export clean_parameters.stanreg <- function(x, ...) { pars <- find_parameters(x, effects = "all", component = "all", flatten = FALSE ) l <- .get_stan_params(pars) out <- do.call(rbind, l) out <- .remove_empty_columns_from_pars(.clean_stanreg_params(out)) .fix_random_effect_smooth(x, out) } #' @export clean_parameters.bamlss <- function(x, ...) { pars <- find_parameters(x, effects = "all", component = "all", flatten = FALSE) l <- .get_stan_params(pars) out <- do.call(rbind, l) out <- .remove_empty_columns_from_pars(.clean_bamlss_params(out)) class(out) <- c("clean_parameters", class(out)) out } #' @export clean_parameters.stanmvreg <- function(x, ...) { pars <- find_parameters(x, effects = "all", component = "all", flatten = FALSE) l <- do.call( rbind, lapply(names(pars), function(i) .get_stan_params(pars[[i]], response = i)) ) out <- do.call(rbind, l) out <- .remove_empty_columns_from_pars(.clean_stanreg_params(out)) .fix_random_effect_smooth(x, out) } #' @export clean_parameters.stanfit <- clean_parameters.stanreg #' @export clean_parameters.aovlist <- function(x, ...) { pars <- get_parameters(x) clean_parameters.default(x, group = pars$Group) } #' @export clean_parameters.afex_aov <- function(x, ...) { if (!is.null(x$aov)) { clean_parameters(x$aov, ...) } else { clean_parameters(x$lm, ...) } } #' @export clean_parameters.mlm <- function(x, ...) { pars <- find_parameters(x, effects = "all", component = "all", flatten = FALSE) l <- lapply(names(pars), function(i) { eff <- "fixed" com <- "conditional" data.frame( Parameter = pars[[i]]$conditional, Effects = eff, Component = com, Response = i, Cleaned_Parameter = clean_names(pars[[i]]$conditional), stringsAsFactors = FALSE, row.names = NULL ) }) out <- .remove_backticks_from_parameter_names(do.call(rbind, l)) out <- .remove_empty_columns_from_pars(out) .fix_random_effect_smooth(x, out) } # helper ------------------------- .get_stan_params <- function(pars, response = NA) { lapply(names(pars), function(i) { eff <- if (grepl("random", i, fixed = TRUE)) { "random" } else { "fixed" } com <- if (grepl("zero_inflated", i, fixed = TRUE)) { "zero_inflated" } else if (grepl("sigma", i, fixed = TRUE)) { "sigma" } else if (grepl("priors", i, fixed = TRUE)) { "priors" } else if (i %in% c("car", "sdcar")) { "car" } else if (grepl("smooth_terms", i, fixed = TRUE)) { "smooth_terms" } else if (grepl("alpha", i, fixed = TRUE)) { "distributional" } else if (grepl("beta", i, fixed = TRUE)) { "distributional" } else if (grepl("mix", i, fixed = TRUE)) { "distributional" } else if (grepl("shiftprop", i, fixed = TRUE)) { "distributional" } else if (grepl("shape", i, fixed = TRUE)) { "distributional" } else if (grepl("auxiliary", i, fixed = TRUE)) { "distributional" } else if (grepl("dispersion", i, fixed = TRUE)) { "dispersion" } else { "conditional" } fun <- if (grepl("smooth", i, fixed = TRUE)) { "smooth" } else { "" } data.frame( Parameter = pars[[i]], Effects = eff, Component = com, Group = "", Response = response, Function = fun, stringsAsFactors = FALSE, row.names = NULL ) }) } .clean_brms_params <- function(out, is_mv) { out$Cleaned_Parameter <- out$Parameter # for multivariate response models, remove responses from parameter names if (is_mv) { resp <- unique(out$Response) resp_pattern <- sprintf("_%s_(.*)", resp) for (i in resp_pattern) { out$Cleaned_Parameter <- gsub(pattern = i, "_\\1", out$Cleaned_Parameter, perl = TRUE) } resp_pattern <- sprintf("__%s(.*)", resp) for (i in resp_pattern) { out$Cleaned_Parameter <- gsub(pattern = i, "\\1", out$Cleaned_Parameter, perl = TRUE) } resp_pattern <- sprintf("__zi_%s(.*)", resp) for (i in resp_pattern) { out$Cleaned_Parameter <- gsub(pattern = i, "\\1", out$Cleaned_Parameter, perl = TRUE) } resp_pattern <- sprintf("(sigma)(_%s)", resp) for (i in resp_pattern) { out$Cleaned_Parameter <- gsub(pattern = i, "\\1", out$Cleaned_Parameter, perl = TRUE) } } smooth_function <- grepl(pattern = "(bs_|bs_zi_)", out$Cleaned_Parameter) if (any(smooth_function)) { out$Function[smooth_function] <- "smooth" } # clean fixed effects, conditional and zero-inflated out$Cleaned_Parameter <- gsub(pattern = "^b_(?!zi_)(.*)\\.(\\d)\\.$", "\\1[\\2]", out$Cleaned_Parameter, perl = TRUE) out$Cleaned_Parameter <- gsub(pattern = "^b_zi_(.*)\\.(\\d)\\.$", "\\1[\\2]", out$Cleaned_Parameter, perl = TRUE) out$Cleaned_Parameter <- gsub(pattern = "^(b_|bs_|bsp_|bcs_)(?!zi_)(.*)", "\\2", out$Cleaned_Parameter, perl = TRUE) out$Cleaned_Parameter <- gsub(pattern = "^(b_zi_|bs_zi_|bsp_zi_|bcs_zi_)(.*)", "\\2", out$Cleaned_Parameter, perl = TRUE) # correlation and sd cor_sd <- grepl("(sd_|cor_)(.*)", out$Cleaned_Parameter) if (any(cor_sd)) { out$Cleaned_Parameter[cor_sd] <- gsub("^(sd_|cor_)(.*?)__(.*)", "\\3", out$Parameter[cor_sd], perl = TRUE) out$Group[cor_sd] <- paste("SD/Cor:", gsub("^(sd_|cor_)(.*?)__(.*)", "\\2", out$Parameter[cor_sd], perl = TRUE)) # replace "__" by "~" cor_only <- grepl("^cor_", out$Parameter[cor_sd]) if (any(cor_only)) { out$Cleaned_Parameter[which(cor_sd)[cor_only]] <- sub("__", " ~ ", out$Cleaned_Parameter[which(cor_sd)[cor_only]]) } } # extract group-names from random effects and clean random effects rand_eff <- grepl("^r_(.*)\\[(.*)\\]", out$Cleaned_Parameter) if (any(rand_eff)) { r_pars <- gsub("^r_(.*)\\[(.*),(.*)\\]", "\\1.\\2", out$Cleaned_Parameter[rand_eff]) r_grps <- gsub("^r_(.*)\\[(.*),(.*)\\]", "\\3: \\1", out$Cleaned_Parameter[rand_eff]) r_pars <- gsub("__zi", "", r_pars) r_grps <- gsub("__zi", "", r_grps) out$Cleaned_Parameter[rand_eff] <- r_pars out$Group[rand_eff] <- r_grps } # clean remaining parameters priors <- grepl("^prior_", out$Cleaned_Parameter) if (length(priors)) { out$Cleaned_Parameter <- gsub("^prior_", "", out$Cleaned_Parameter) out$Component[priors] <- "priors" } simplex <- grepl("^simo_", out$Cleaned_Parameter) if (length(simplex)) { out$Cleaned_Parameter[simplex] <- gsub("^(simo_|simo_mo)(.*)\\[(\\d)\\]$", "\\2[\\3]", out$Cleaned_Parameter[simplex]) out$Component[simplex] <- "simplex" } smooth <- grepl("^sds_", out$Cleaned_Parameter) if (length(smooth)) { out$Cleaned_Parameter <- gsub("^sds_", "", out$Cleaned_Parameter) out$Component[smooth] <- "smooth_sd" out$Function[smooth] <- "smooth" } # fix intercept names intercepts <- which(out$Cleaned_Parameter %in% c("Intercept", "zi_Intercept")) if (!.is_empty_object(intercepts)) { out$Cleaned_Parameter[intercepts] <- "(Intercept)" } interaction_terms <- which(grepl("\\.", out$Cleaned_Parameter)) if (length(interaction_terms)) { for (i in interaction_terms) { i_terms <- strsplit(out$Cleaned_Parameter[i], "\\.") find_i_terms <- sapply(i_terms, function(j) j %in% out$Cleaned_Parameter) if (all(find_i_terms)) { out$Cleaned_Parameter[i] <- gsub("\\.", ":", out$Cleaned_Parameter[i]) } } } out } .clean_stanreg_params <- function(out) { out$Cleaned_Parameter <- out$Parameter # extract group-names from random effects and clean random effects rand_intercepts <- grepl("^b\\[\\(Intercept\\)", out$Cleaned_Parameter) if (any(rand_intercepts)) { re_grp_level <- gsub("b\\[(.*) (.*):(.*)\\]", "\\2", out$Cleaned_Parameter[rand_intercepts]) out$Cleaned_Parameter[rand_intercepts] <- gsub( "b\\[\\(Intercept\\) (.*)\\]", "\\1", out$Cleaned_Parameter[rand_intercepts] ) out$Group[rand_intercepts] <- sprintf("Intercept: %s", re_grp_level) } # correlation and sd cor_sd <- grepl("^Sigma\\[(.*)", out$Cleaned_Parameter) if (any(cor_sd)) { parm1 <- gsub("^Sigma\\[(.*):(.*),(.*)\\]", "\\2", out$Parameter[cor_sd], perl = TRUE) parm2 <- gsub("^Sigma\\[(.*):(.*),(.*)\\]", "\\3", out$Parameter[cor_sd], perl = TRUE) out$Cleaned_Parameter[which(cor_sd)] <- parm1 rand_cor <- parm1 != parm2 if (any(rand_cor)) { out$Cleaned_Parameter[which(cor_sd)[rand_cor]] <- paste0(parm1[rand_cor], " ~ ", parm2[rand_cor]) } out$Group[cor_sd] <- paste("SD/Cor:", gsub("^Sigma\\[(.*):(.*),(.*)\\]", "\\1", out$Parameter[cor_sd], perl = TRUE)) } # extract group-names from random effects and clean random effects rand_effects <- grepl("^b\\[", out$Cleaned_Parameter) if (any(rand_effects)) { re_grp_level <- gsub("b\\[(.*) (.*):(.*)\\]", "\\2", out$Cleaned_Parameter[rand_effects]) r_grps <- gsub("b\\[(.*) (.*)\\]", "\\1", out$Cleaned_Parameter[rand_effects]) r_pars <- gsub("b\\[(.*) (.*)\\]", "\\2", out$Cleaned_Parameter[rand_effects]) out$Group[rand_effects] <- sprintf("%s: %s", r_grps, re_grp_level) out$Cleaned_Parameter[rand_effects] <- r_pars } # clean remaining parameters smooth <- grepl("^smooth_sd\\[", out$Cleaned_Parameter) if (length(smooth)) { out$Cleaned_Parameter <- gsub("^smooth_sd\\[(.*)\\]", "\\1", out$Cleaned_Parameter) out$Component[smooth] <- "smooth_sd" out$Function[smooth] <- "smooth" } out } .clean_bfbayesfactor_params <- function(out) { pars <- do.call(rbind, strsplit(out$Parameter, "-", TRUE)) if (ncol(pars) == 1) { return(out) } out$Cleaned_Parameter <- tryCatch( { apply(pars, 1, function(i) { if (i[1] == i[2]) { i[2] <- "" } else if (i[1] != i[2] && !grepl(":", i[1], fixed = TRUE)) { i[1] <- paste0(i[1], " [", i[2], "]") i[2] <- "" } else if (grepl(":", i[1], fixed = TRUE)) { f <- unlist(strsplit(i[1], ":", fixed = TRUE)) l <- unlist(strsplit(i[2], ".&.", fixed = TRUE)) matches <- stats::na.omit(match(f, l)) l[matches] <- "" l[-matches] <- paste0("[", l[-matches], "]") i[1] <- paste0(f, l, collapse = " * ") } as.vector(i[1]) }) }, error = function(e) { out$Cleaned_Parameter } ) out } .clean_bamlss_params <- function(out) { out$Cleaned_Parameter <- out$Parameter out$Cleaned_Parameter <- gsub("^(mu\\.p\\.|pi\\.p\\.)(.*)", "\\2", out$Cleaned_Parameter) out$Cleaned_Parameter <- gsub("^(mu\\.s\\.|pi\\.s\\.)(.*)", "s\\(\\2\\)", out$Cleaned_Parameter) out$Cleaned_Parameter <- gsub("^sigma\\.p\\.(.*)", "sigma \\(\\1\\)", out$Cleaned_Parameter) out$Cleaned_Parameter <- gsub("..", ".", out$Cleaned_Parameter, fixed = TRUE) out$Cleaned_Parameter <- gsub(".)", ")", out$Cleaned_Parameter, fixed = TRUE) out$Cleaned_Parameter <- gsub("(.", "(", out$Cleaned_Parameter, fixed = TRUE) out$Cleaned_Parameter <- gsub(".Intercept.", "Intercept", out$Cleaned_Parameter, fixed = TRUE) out$Cleaned_Parameter <- gsub("(\\.$)", "", out$Cleaned_Parameter) out } .remove_empty_columns_from_pars <- function(x) { if (.obj_has_name(x, "Response") && all(is.na(x$Response))) { pos <- which(colnames(x) == "Response") x <- x[, -pos] } if (.obj_has_name(x, "Group") && .is_empty_string(x$Group)) { pos <- which(colnames(x) == "Group") x <- x[, -pos] } if (.obj_has_name(x, "Function") && .is_empty_string(x$Function)) { pos <- which(colnames(x) == "Function") x <- x[, -pos] } x } # Fix random effects assignment for smooth terms # # This function checks whether smooth terms were used as random effects, # (i.e. s(term, by="re")) and if so, the value in the "effects" column will # be set to "random". # .fix_random_effect_smooth <- function(x, out) { if ("Function" %in% colnames(out) && "smooth" %in% out$Function) { vars <- find_terms(x)$conditional vars <- gsub(" ", "", vars, fixed = TRUE) random_smooth_terms <- grepl("^s\\((.*)(bs=\"re\"+)\\)", x = vars) if (any(random_smooth_terms)) { random_term <- paste0( "s(", gsub("^s\\(([^,]*)(.*)(bs=\"re\"+)\\)", "\\1", vars[random_smooth_terms]), ")" ) out$Effects[which(out$Parameter == random_term)] <- "random" } } class(out) <- c("clean_parameters", class(out)) out } insight/R/find_algorithm.R0000644000175000017500000001472714101466051015374 0ustar nileshnilesh#' @title Find sampling algorithm and optimizers #' @name find_algorithm #' #' @description Returns information on the sampling or estimation algorithm #' as well as optimization functions, or for Bayesian model information on #' chains, iterations and warmup-samples. #' #' @inheritParams find_parameters #' #' @return A list with elements depending on the model. #' \cr #' For frequentist models: #' \itemize{ #' \item `algorithm`, for instance `"OLS"` or `"ML"` #' \item `optimizer`, name of optimizing function, only applies to #' specific models (like `gam`) #' } #' For frequentist mixed models: #' \itemize{ #' \item `algorithm`, for instance `"REML"` or `"ML"` #' \item `optimizer`, name of optimizing function #' } #' For Bayesian models: #' \itemize{ #' \item `algorithm`, the algorithm #' \item `chains`, number of chains #' \item `iterations`, number of iterations per chain #' \item `warmup`, number of warmups per chain #' } #' #' @examples #' if (require("lme4")) { #' data(sleepstudy) #' m <- lmer(Reaction ~ Days + (1 | Subject), data = sleepstudy) #' find_algorithm(m) #' } #' \dontrun{ #' library(rstanarm) #' m <- stan_lmer(Reaction ~ Days + (1 | Subject), data = sleepstudy) #' find_algorithm(m) #' } #' @export find_algorithm <- function(x, ...) { UseMethod("find_algorithm") } #' @export find_algorithm.default <- function(x, ...) { warning(sprintf("Objects of class `%s` are not supported.", class(x)[1])) NULL } #' @export find_algorithm.Gam <- function(x, ...) { list("algorithm" = "IWLS") } #' @export find_algorithm.lmRob <- function(x, ...) { list("algorithm" = x$robust.control$final.alg) } #' @export find_algorithm.lmrob <- function(x, ...) { list("algorithm" = x$control$method) } #' @export find_algorithm.glmrob <- function(x, ...) { list("algorithm" = x$method) } #' @export find_algorithm.logistf <- function(x, ...) { list("algorithm" = x$method) } #' @export find_algorithm.bigglm <- function(x, ...) { list("algorithm" = "ML") } #' @export find_algorithm.BBreg <- function(x, ...) { list("algorithm" = "ML") } #' @export find_algorithm.Arima <- function(x, ...) { list("algorithm" = "ML") } #' @export find_algorithm.glimML <- function(x, ...) { list("algorithm" = "ML") } #' @export find_algorithm.BBmm <- function(x, ...) { method <- parse(text = .safe_deparse(x$call))[[1]]$method if (is.null(method)) method <- "BB-NR" list(algorithm = "extended likelihood", optimizer = method) } #' @export find_algorithm.biglm <- function(x, ...) { list("algorithm" = "OLS") } #' @export find_algorithm.gamlss <- function(x, ...) { list("algorithm" = as.character(x$method)[1]) } #' @export find_algorithm.gam <- function(x, ...) { list( "algorithm" = x$method, "optimizer" = x$optimizer ) } #' @export find_algorithm.scam <- find_algorithm.gam #' @export find_algorithm.lm <- function(x, ...) { list("algorithm" = "OLS") } #' @export find_algorithm.systemfit <- function(x, ...) { list("algorithm" = x$method) } #' @export find_algorithm.afex_aov <- function(x, ...) { list("algorithm" = "OLS") } #' @export find_algorithm.speedlm <- function(x, ...) { list("algorithm" = x$method) } #' @export find_algorithm.blavaan <- function(x, ...) { # installed? check_if_installed("blavaan") list( "chains" = blavaan::blavInspect(x, "n.chains"), "sample" = x@external$sample, "warmup" = x@external$burnin ) } #' @export find_algorithm.speedglm <- function(x, ...) { list("algorithm" = x$method) } #' @export find_algorithm.rq <- function(x, ...) { list("algorithm" = x$method) } #' @export find_algorithm.bayesx <- function(x, ...) { list( "algorithm" = x$method, "iterations" = x$iterations, "warmup" = x$burnin ) } #' @export find_algorithm.crq <- function(x, ...) { list("algorithm" = x$method) } #' @export find_algorithm.rqss <- function(x, ...) { list("algorithm" = x$method) } #' @export find_algorithm.glm <- function(x, ...) { list("algorithm" = "ML") } #' @export find_algorithm.LORgee <- function(x, ...) { list("algorithm" = "Fisher's scoring ML") } #' @export find_algorithm.merMod <- function(x, ...) { algorithm <- ifelse(as.logical(x@devcomp$dims[["REML"]]), "REML", "ML") list( "algorithm" = algorithm, "optimizer" = as.character(x@optinfo$optimizer) ) } #' @export find_algorithm.rlmerMod <- find_algorithm.merMod #' @export find_algorithm.merModList <- function(x, ...) { find_algorithm(x[[1]], ...) } #' @export find_algorithm.mixed <- function(x, ...) { x <- x$full_model algorithm <- ifelse(as.logical(x@devcomp$dims[["REML"]]), "REML", "ML") list( "algorithm" = algorithm, "optimizer" = as.character(x@optinfo$optimizer) ) } #' @export find_algorithm.lme <- function(x, ...) { optimizer <- "nlminb" if (!is.null(x$call$control) && "optim" %in% as.character(x$call$control)) { optimizer <- "optim" } list( "algorithm" = x$method, "optimizer" = optimizer ) } #' @export find_algorithm.MixMod <- function(x, ...) { list( ## TODO fix me "algorithm" = "quasi-Newton", "optimizer" = x$control$optimizer ) } #' @export find_algorithm.glmmTMB <- function(x, ...) { algorithm <- ifelse(x$modelInfo$REML, "REML", "ML") list( "algorithm" = algorithm, "optimizer" = "nlminb" ) } #' @export find_algorithm.stanreg <- function(x, ...) { info <- x$stanfit@sim list( "algorithm" = x$algorithm, "chains" = info$chains, "iterations" = info$iter, "warmup" = info$warmup ) } #' @export find_algorithm.stanfit <- function(x, ...) { info <- x@sim algorithm <- unlist(x@stan_args) list( "algorithm" = as.vector(algorithm["algorithm"]), "chains" = info$chains, "iterations" = info$iter, "warmup" = info$warmup ) } #' @export find_algorithm.bayesQR <- function(x, ...) { list( "algorithm" = x[[1]]$method, "iterations" = nrow(x[[1]]$betadraw) ) } #' @export find_algorithm.brmsfit <- function(x, ...) { info <- x$fit@sim list( "algorithm" = x$algorithm, "chains" = info$chains, "iterations" = info$iter, "warmup" = info$warmup ) } insight/R/display.R0000644000175000017500000000312514077615664014063 0ustar nileshnilesh#' @title Generic export of data frames into formatted tables #' @name display #' #' @description `display()` is a generic function to export data frames #' into various table formats (like plain text, markdown, ...). `print_md()` #' usually is a convenient wrapper for `display(format = "markdown")`. #' Similar, `print_html()` is a shortcut for `display(format = "html")`. #' See the documentation for the specific objects' classes. #' #' @param object,x A data frame. #' @param format String, indicating the output format. Can be `"markdown"` or `"html"`. #' @param ... Arguments passed to other methods. #' #' @return Depending on `format`, either an object of class `gt_tbl` #' or a character vector of class `knitr_kable`. #' #' @examples #' display(iris[1:5, ]) #' @export display <- function(object, ...) { UseMethod("display") } #' @rdname display #' @export print_md <- function(x, ...) { UseMethod("print_md") } #' @rdname display #' @export print_html <- function(x, ...) { UseMethod("print_html") } # data.frame -------------------------------------------------------------- #' @rdname display #' @export display.data.frame <- function(object, format = "markdown", ...) { if (identical(format, "html")) { print_html(x = object, ...) } else { print_md(x = object, ...) } } #' @rdname display #' @export print_md.data.frame <- function(x, ...) { export_table(x, format = "markdown", ...) } #' @rdname display #' @export print_html.data.frame <- function(x, ...) { export_table(x, format = "html", ...) } insight/R/get_parameters.R0000644000175000017500000004563114137207374015420 0ustar nileshnilesh#' @title Get model parameters #' @name get_parameters #' #' @description Returns the coefficients (or posterior samples for Bayesian #' models) from a model. See the documentation for your object's class: #' \itemize{ #' \item{[Bayesian models][get_parameters.BGGM] (\pkg{rstanarm}, \pkg{brms}, \pkg{MCMCglmm}, ...)} #' \item{[Estimated marginal means][get_parameters.emmGrid] (\pkg{emmeans})} #' \item{[Generalized additive models][get_parameters.gamm] (\pkg{mgcv}, \pkg{VGAM}, ...)} #' \item{[Marginal effects models][get_parameters.betamfx] (\pkg{mfx})} #' \item{[Mixed models][get_parameters.glmm] (\pkg{lme4}, \pkg{glmmTMB}, \pkg{GLMMadaptive}, ...)} #' \item{[Zero-inflated and hurdle models][get_parameters.zeroinfl] (\pkg{pscl}, ...)} #' \item{[Models with special components][get_parameters.betareg] (\pkg{betareg}, \pkg{MuMIn}, ...)} #' \item{[Hypothesis tests][get_parameters.htest] (`htest`)} #' } #' #' @param verbose Toggle messages and warnings. #' @param ... Currently not used. #' #' @inheritParams find_parameters #' @inheritParams find_predictors #' #' @return \itemize{ #' \item for non-Bayesian models, a data frame with two columns: the parameter names and the related point estimates. #' \item for Anova (`aov()`) with error term, a list of parameters for the conditional and the random effects parameters #' } #' #' @details In most cases when models either return different "effects" (fixed, #' random) or "components" (conditional, zero-inflated, ...), the arguments #' `effects` and `component` can be used. #' \cr \cr #' `get_parameters()` is comparable to `coef()`, however, the coefficients #' are returned as data frame (with columns for names and point estimates of #' coefficients). For Bayesian models, the posterior samples of parameters are #' returned. #' #' @examples #' data(mtcars) #' m <- lm(mpg ~ wt + cyl + vs, data = mtcars) #' get_parameters(m) #' @export get_parameters <- function(x, ...) { UseMethod("get_parameters") } # Default models --------------------------------------------- #' @rdname get_parameters #' @export get_parameters.default <- function(x, verbose = TRUE, ...) { if (inherits(x, "list") && .obj_has_name(x, "gam")) { x <- x$gam class(x) <- c(class(x), c("glm", "lm")) return(get_parameters.gam(x, ...)) } tryCatch( { cf <- stats::coef(x) params <- names(cf) if (is.null(params)) { params <- paste(1:length(cf)) } params <- data.frame( Parameter = params, Estimate = unname(cf), stringsAsFactors = FALSE, row.names = NULL ) .remove_backticks_from_parameter_names(params) }, error = function(x) { if (isTRUE(verbose)) { warning(sprintf("Parameters can't be retrieved for objects of class '%s'.", class(x)[1]), call. = FALSE) } return(NULL) } ) } #' @export get_parameters.summary.lm <- function(x, ...) { cf <- stats::coef(x) params <- data.frame( Parameter = names(cf[, 1]), Estimate = unname(cf[, 1]), stringsAsFactors = FALSE, row.names = NULL ) .remove_backticks_from_parameter_names(params) } #' @export get_parameters.data.frame <- function(x, ...) { stop("A data frame is no valid object for this function") } # Special models --------------------------------------------- #' @export get_parameters.rms <- get_parameters.default #' @export get_parameters.tobit <- get_parameters.default #' @export get_parameters.model_fit <- function(x, ...) { get_parameters(x$fit, ...) } #' @export get_parameters.bfsl <- function(x, ...) { cf <- stats::coef(x) params <- data.frame( Parameter = rownames(cf), Estimate = unname(cf[, "Estimate"]), stringsAsFactors = FALSE, row.names = NULL ) .remove_backticks_from_parameter_names(params) } #' @export get_parameters.selection <- function(x, component = c("all", "selection", "outcome", "auxiliary"), ...) { component <- match.arg(component) s <- summary(x) rn <- row.names(s$estimate) estimates <- as.data.frame(s$estimate, row.names = FALSE) params <- data.frame( Parameter = rn, Estimate = estimates[[1]], Component = "auxiliary", stringsAsFactors = FALSE, row.names = NULL ) params$Component[s$param$index$betaS] <- "selection" params$Component[s$param$index$betaO] <- "outcome" if (component != "all") { params <- params[params$Component == component, , drop = FALSE] } .remove_backticks_from_parameter_names(params) } #' @export get_parameters.epi.2by2 <- function(x, ...) { coef_names <- grepl("^([^NNT]*)(\\.strata\\.wald)", names(x$massoc.detail), perl = TRUE) cf <- x$massoc.detail[coef_names] names(cf) <- gsub(".strata.wald", "", names(cf), fixed = TRUE) params <- data.frame( Parameter = names(cf), Estimate = unname(unlist(lapply(cf, function(i) i["est"]))), stringsAsFactors = FALSE, row.names = NULL ) .remove_backticks_from_parameter_names(params) } #' @export get_parameters.Rchoice <- function(x, ...) { cf <- stats::coef(x) params <- data.frame( Parameter = find_parameters(x, flatten = TRUE), Estimate = as.vector(cf), stringsAsFactors = FALSE, row.names = NULL ) .remove_backticks_from_parameter_names(params) } #' @export get_parameters.btergm <- function(x, ...) { cf <- x@coef params <- data.frame( Parameter = names(cf), Estimate = as.vector(cf), stringsAsFactors = FALSE, row.names = NULL ) .remove_backticks_from_parameter_names(params) } #' @export get_parameters.mediate <- function(x, ...) { info <- model_info(x$model.y, verbose = FALSE) if (info$is_linear && !x$INT) { out <- data.frame( Parameter = c("ACME", "ADE", "Total Effect", "Prop. Mediated"), Estimate = c(x$d1, x$z0, x$tau.coef, x$n0), stringsAsFactors = FALSE ) } else { out <- data.frame( Parameter = c( "ACME (control)", "ACME (treated)", "ADE (control)", "ADE (treated)", "Total Effect", "Prop. Mediated (control)", "Prop. Mediated (treated)", "ACME (average)", "ADE (average)", "Prop. Mediated (average)" ), Estimate = c(x$d0, x$d1, x$z0, x$z1, x$tau.coef, x$n0, x$n1, x$d.avg, x$z.avg, x$n.avg), stringsAsFactors = FALSE ) } .remove_backticks_from_parameter_names(out) } #' @export get_parameters.ridgelm <- function(x, ...) { out <- data.frame( Parameter = names(x$coef), Estimate = as.vector(x$coef), stringsAsFactors = FALSE ) .remove_backticks_from_parameter_names(out) } #' @export get_parameters.ivFixed <- function(x, ...) { out <- data.frame( Parameter = rownames(x$coefficients), Estimate = as.vector(x$coefficients), stringsAsFactors = FALSE ) .remove_backticks_from_parameter_names(out) } #' @export get_parameters.ivprobit <- function(x, ...) { out <- data.frame( Parameter = x$names, Estimate = as.vector(x$coefficients), stringsAsFactors = FALSE ) .remove_backticks_from_parameter_names(out) } #' @export get_parameters.survreg <- function(x, ...) { s <- summary(x) out <- data.frame( Parameter = rownames(s$table), Estimate = as.vector(s$table[, 1]), stringsAsFactors = FALSE ) .remove_backticks_from_parameter_names(out) } #' @export get_parameters.riskRegression <- function(x, ...) { junk <- utils::capture.output(cs <- stats::coef(x)) out <- data.frame( Parameter = as.vector(cs[, 1]), Estimate = as.numeric(cs[, 2]), stringsAsFactors = FALSE ) .remove_backticks_from_parameter_names(out) } #' @export get_parameters.mipo <- function(x, ...) { out <- data.frame( Parameter = as.vector(summary(x)$term), Estimate = as.vector(summary(x)$estimate), stringsAsFactors = FALSE ) .remove_backticks_from_parameter_names(out) } #' @export get_parameters.mira <- function(x, ...) { # installed? check_if_installed("mice") get_parameters(mice::pool(x), ...) } #' @export get_parameters.margins <- function(x, ...) { s <- summary(x) param <- as.vector(s$factor) estimate_pos <- which(colnames(s) == "AME") if (estimate_pos > 2) { out <- s[1:(estimate_pos - 1)] r <- apply(out, 1, function(i) paste0(colnames(out), " [", i, "]")) param <- unname(sapply(as.data.frame(r), paste, collapse = ", ")) } out <- data.frame( Parameter = param, Estimate = as.vector(summary(x)$AME), stringsAsFactors = FALSE ) .remove_backticks_from_parameter_names(out) } #' @export get_parameters.glht <- function(x, ...) { s <- summary(x) alt <- switch(x$alternative, two.sided = "==", less = ">=", greater = "<=" ) out <- data.frame( Parameter = paste(names(s$test$coefficients), alt, x$rhs), Estimate = unname(s$test$coefficients), stringsAsFactors = FALSE ) .remove_backticks_from_parameter_names(out) } #' @export get_parameters.mle2 <- function(x, ...) { # installed? check_if_installed("bbmle") s <- bbmle::summary(x) params <- data.frame( Parameter = names(s@coef[, 1]), Estimate = unname(s@coef[, 1]), stringsAsFactors = FALSE, row.names = NULL ) .remove_backticks_from_parameter_names(params) } #' @export get_parameters.mle <- get_parameters.mle2 #' @export get_parameters.lrm <- function(x, ...) { tryCatch( { cf <- stats::coef(x) params <- data.frame( Parameter = names(cf), Estimate = unname(cf), stringsAsFactors = FALSE, row.names = NULL ) .remove_backticks_from_parameter_names(params) }, error = function(x) { NULL } ) } #' @export get_parameters.orm <- get_parameters.lrm #' @export get_parameters.multinom <- function(x, ...) { params <- stats::coef(x) if (is.matrix(params)) { out <- data.frame() for (i in 1:nrow(params)) { out <- rbind(out, data.frame( Parameter = colnames(params), Estimate = unname(params[i, ]), Response = rownames(params)[i], stringsAsFactors = FALSE, row.names = NULL )) } } else { out <- data.frame( Parameter = names(params), Estimate = unname(params), stringsAsFactors = FALSE, row.names = NULL ) } .remove_backticks_from_parameter_names(out) } #' @export get_parameters.brmultinom <- get_parameters.multinom #' @export get_parameters.mlm <- function(x, ...) { cs <- stats::coef(summary(x)) out <- lapply(names(cs), function(i) { params <- data.frame( Parameter = rownames(cs[[i]]), Estimate = cs[[i]][, 1], Response = gsub("^Response (.*)", "\\1", i), stringsAsFactors = FALSE, row.names = NULL ) .remove_backticks_from_parameter_names(params) }) do.call(rbind, out) } #' @export get_parameters.gbm <- function(x, ...) { s <- summary(x, plotit = FALSE) params <- data.frame( Parameter = as.character(s$var), Estimate = s$rel.inf, stringsAsFactors = FALSE, row.names = NULL ) .remove_backticks_from_parameter_names(params) } #' @export get_parameters.BBreg <- function(x, ...) { pars <- summary(x)$coefficients params <- data.frame( Parameter = rownames(pars), Estimate = pars[, "Estimate"], stringsAsFactors = FALSE, row.names = NULL ) .remove_backticks_from_parameter_names(params) } #' @export get_parameters.rma <- function(x, ...) { tryCatch( { cf <- stats::coef(x) params <- data.frame( Parameter = names(cf), Estimate = unname(cf), stringsAsFactors = FALSE, row.names = NULL ) params$Parameter[grepl("intrcpt", params$Parameter)] <- "(Intercept)" .remove_backticks_from_parameter_names(params) }, error = function(x) { NULL } ) } #' @export get_parameters.meta_random <- function(x, ...) { tryCatch( { cf <- x$estimates params <- data.frame( Parameter = rownames(cf), Estimate = unname(cf[, 1]), stringsAsFactors = FALSE, row.names = NULL ) params$Parameter[grepl("d", params$Parameter)] <- "(Intercept)" .remove_backticks_from_parameter_names(params) }, error = function(x) { NULL } ) } #' @export get_parameters.meta_fixed <- get_parameters.meta_random #' @export get_parameters.meta_bma <- get_parameters.meta_random #' @export get_parameters.metaplus <- function(x, ...) { params <- data.frame( Parameter = rownames(x$results), Estimate = unname(x$results[, 1]), stringsAsFactors = FALSE, row.names = NULL ) params$Parameter[grepl("muhat", params$Parameter)] <- "(Intercept)" .remove_backticks_from_parameter_names(params) } # SEM models --------------------------------------------- #' @export get_parameters.blavaan <- function(x, summary = FALSE, centrality = "mean", ...) { # installed? check_if_installed("lavaan") check_if_installed("blavaan") draws <- blavaan::blavInspect(x, "draws") posteriors <- as.data.frame(as.matrix(draws)) param_tab <- lavaan::parameterEstimates(x) params <- paste0(param_tab$lhs, param_tab$op, param_tab$rhs) coef_labels <- names(lavaan::coef(x)) if ("group" %in% colnames(param_tab) && .n_unique(param_tab$group) > 1) { params <- paste0(params, " (group ", param_tab$group, ")") groups <- grepl("(.*)\\.g(.*)", coef_labels) coef_labels[!groups] <- paste0(coef_labels[!groups], " (group 1)") coef_labels[groups] <- gsub("(.*)\\.g(.*)", "\\1 \\(group \\2\\)", coef_labels[groups]) } are_labels <- !coef_labels %in% params if (any(are_labels)) { unique_labels <- unique(coef_labels[are_labels]) for (ll in seq_along(unique_labels)) { coef_labels[coef_labels == unique_labels[ll]] <- params[param_tab$label == unique_labels[ll]] } } colnames(posteriors) <- coef_labels if (isTRUE(summary)) { posteriors <- .summary_of_posteriors(posteriors, centrality = centrality) posteriors$Component <- NA posteriors$Component[grepl("=~", posteriors$Parameter, fixed = TRUE)] <- "latent" posteriors$Component[grepl("~~", posteriors$Parameter, fixed = TRUE)] <- "residual" posteriors$Component[grepl("~1", posteriors$Parameter, fixed = TRUE)] <- "intercept" posteriors$Component[is.na(posteriors$Component)] <- "regression" } posteriors } #' @export get_parameters.lavaan <- function(x, ...) { # installed? check_if_installed("lavaan") params <- lavaan::parameterEstimates(x) params$parameter <- paste0(params$lhs, params$op, params$rhs) params$comp <- NA params$comp[params$op == "~"] <- "regression" params$comp[params$op == "=~"] <- "latent" params$comp[params$op == "~~"] <- "residual" params$comp[params$op == "~1"] <- "intercept" params <- data.frame( Parameter = params$parameter, Estimate = params$est, Component = params$comp, stringsAsFactors = FALSE ) .remove_backticks_from_parameter_names(params) } # Ordinal models --------------------------------------------- #' @export get_parameters.polr <- function(x, ...) { pars <- c(sprintf("Intercept: %s", names(x$zeta)), names(x$coefficients)) params <- data.frame( Parameter = pars, Estimate = c(unname(x$zeta), unname(x$coefficients)), stringsAsFactors = FALSE, row.names = NULL ) .remove_backticks_from_parameter_names(params) } #' @export get_parameters.bracl <- function(x, ...) { pars <- stats::coef(x) params <- data.frame( Parameter = names(pars), Estimate = unname(pars), Response = gsub("(.*):(.*)", "\\1", names(pars)), stringsAsFactors = FALSE, row.names = NULL ) .remove_backticks_from_parameter_names(params) } # Standard models -------------------------------------------------- #' @export get_parameters.aov <- function(x, ...) { cf <- stats::coef(x) params <- data.frame( Parameter = names(cf), Estimate = unname(cf), stringsAsFactors = FALSE, row.names = NULL ) .remove_backticks_from_parameter_names(params) } #' @export get_parameters.aovlist <- function(x, ...) { cs <- stats::coef(x) out <- do.call(rbind, lapply(names(cs), function(i) { params <- data.frame( Parameter = names(cs[[i]]), Estimate = unname(cs[[i]]), Group = i, stringsAsFactors = FALSE ) .remove_backticks_from_parameter_names(params) })) rownames(out) <- NULL out } #' @export get_parameters.manova <- function(x, ...) { params <- stats::na.omit(stats::coef(x)) out <- .gather(as.data.frame(params), names_to = "Response", values_to = "Estimate") out$Parameter <- rownames(out) out <- out[c("Parameter", "Estimate", "Response")] rownames(out) <- NULL pattern <- paste0("(", paste0(paste0(".", unique(out$Response)), collapse = "|"), ")$") out$Parameter <- gsub(pattern, "", out$Parameter) .remove_backticks_from_parameter_names(out) } #' @export get_parameters.maov <- get_parameters.manova #' @export get_parameters.afex_aov <- function(x, ...) { if (!is.null(x$aov)) { get_parameters(x$aov, ...) } else { get_parameters(x$lm, ...) } } #' @export get_parameters.pgmm <- function(x, component = c("conditional", "all"), ...) { component <- match.arg(component) cs <- stats::coef(summary(x, time.dummies = TRUE, robust = FALSE)) params <- data.frame( Parameter = rownames(cs), Estimate = unname(cs[, 1]), Component = "conditional", stringsAsFactors = FALSE, row.names = NULL ) params$Component[params$Parameter %in% x$args$namest] <- "time_dummies" if (component == "conditional") { params <- params[params$Component == "conditional", ] params <- .remove_column(params, "Component") } .remove_backticks_from_parameter_names(params) } # utility functions --------------------------------- .get_armsim_fixef_parms <- function(x) { sn <- methods::slotNames(x) as.data.frame(methods::slot(x, sn[1])) } .get_armsim_ranef_parms <- function(x) { dat <- NULL if (methods::.hasSlot(x, "ranef")) { re <- x@ranef dat <- data.frame() for (i in 1:length(re)) { dn <- dimnames(re[[i]])[[2]] cn <- dimnames(re[[i]])[[3]] l <- lapply(1:length(dn), function(j) { d <- as.data.frame(re[[i]][, j, ]) colnames(d) <- sprintf("%s.%s", cn, dn[j]) d }) if (ncol(dat) == 0) { dat <- do.call(cbind, l) } else { dat <- cbind(dat, do.call(cbind, l)) } } } dat } insight/R/fish.R0000644000175000017500000000023613647263111013333 0ustar nileshnilesh#' @docType data #' @title Sample data set #' @name fish #' @keywords data #' #' @description A sample data set, used in tests and some examples. NULL insight/R/get_residuals.R0000644000175000017500000001352014077615665015251 0ustar nileshnilesh#' @title Extract model residuals #' #' @description Returns the residuals from regression models. #' #' @name get_residuals #' #' @param x A model. #' @param weighted Logical, if `TRUE`, returns weighted residuals. #' @param verbose Toggle warnings and messages. #' @param ... Passed down to `residuals()`, if possible. #' #' @return The residuals, or `NULL` if this information could not be #' accessed. #' #' @note This function returns the default type of residuals, i.e. for the #' response from linear models, the deviance residuals for models of class #' `glm` etc. To access different types, pass down the `type` #' argument (see 'Examples'). #' \cr \cr #' This function is a robust alternative to `residuals()`, as it works for #' some special model objects that otherwise do not respond properly to calling #' `residuals()`. #' #' @examples #' data(mtcars) #' m <- lm(mpg ~ wt + cyl + vs, data = mtcars) #' get_residuals(m) #' #' m <- glm(vs ~ wt + cyl + mpg, data = mtcars, family = binomial()) #' get_residuals(m) # type = "deviance" by default #' get_residuals(m, type = "response") #' @export get_residuals <- function(x, ...) { UseMethod("get_residuals") } #' @rdname get_residuals #' @export get_residuals.default <- function(x, weighted = FALSE, verbose = TRUE, ...) { # setup, check if user requested specific type of residuals # later, we can only catch response residuals, in such cases, give warning # when type is not "response"... dot_args <- list(...) no_response_resid <- !is.null(dot_args[["type"]]) && dot_args[["type"]] != "response" res_type <- dot_args[["type"]] yield_warning <- FALSE if (isTRUE(weighted)) { return(.weighted_residuals(x, verbose)) } res <- tryCatch( { stats::residuals(x, ...) }, error = function(e) { NULL } ) if (is.null(res)) { res <- tryCatch( { x$residuals }, error = function(e) { NULL } ) } # For gamm4 objects if (is.null(res)) { res <- tryCatch( { x$gam$residuals }, error = function(e) { NULL } ) } if (is.null(res)) { res <- tryCatch( { yield_warning <- no_response_resid && verbose pred <- stats::predict(x, type = "response") observed <- .factor_to_numeric(get_response(x, verbose = FALSE)) observed - pred }, error = function(e) { NULL } ) } if (is.null(res)) { res <- tryCatch( { yield_warning <- no_response_resid && verbose pred <- stats::fitted(x) observed <- .factor_to_numeric(get_response(x, verbose = FALSE)) observed - pred }, error = function(e) { NULL } ) } if (is.null(res) || all(is.na(res))) { if (verbose) warning("Can't extract residuals from model.") res <- NULL } else if (yield_warning) { warning(format_message(paste0("Can't extract '", res_type, "' residuals. Returning response residuals.")), call. = FALSE) } res } #' @export get_residuals.vgam <- function(x, weighted = FALSE, verbose = TRUE, ...) { if (isTRUE(weighted)) { return(.weighted_residuals(x, verbose)) } x@residuals } #' @export get_residuals.vglm <- get_residuals.vgam #' @export get_residuals.model_fit <- function(x, weighted = FALSE, verbose = TRUE, ...) { get_residuals(x$fit, weighted = weighted, verbose = verbose, ...) } #' @export get_residuals.coxph <- function(x, weighted = FALSE, verbose = TRUE, ...) { if (isTRUE(weighted)) { return(.weighted_residuals(x, verbose)) } stats::residuals(x, ...) } #' @export get_residuals.crr <- function(x, weighted = FALSE, verbose = TRUE, ...) { if (isTRUE(weighted) && isTRUE(verbose)) { warning("Weighted residuals are not supported for 'crr' models.", call. = FALSE) } x$res } #' @export get_residuals.slm <- function(x, weighted = FALSE, verbose = TRUE, ...) { if (isTRUE(weighted)) { return(.weighted_residuals(x, verbose)) } res <- tryCatch( { junk <- utils::capture.output(pred <- stats::predict(x, type = "response")) observed <- .factor_to_numeric(get_response(x)) observed - pred }, error = function(e) { NULL } ) if (is.null(res) || all(is.na(res))) { if (verbose) warning("Can't extract residuals from model.") res <- NULL } res } .weighted_residuals <- function(x, verbose = TRUE) { w <- get_weights(x, null_as_ones = TRUE) tryCatch( { res_resp <- as.vector(get_residuals( x, weighted = FALSE, type = "response", verbose = FALSE )) res_dev <- as.vector(get_residuals( x, weighted = FALSE, type = "deviance", verbose = FALSE )) if (!is.null(w) && !is.null(res_dev) && !all(w == 1)) { if (!is.null(res_resp) && identical(res_resp, res_dev)) { res_dev <- res_dev * w^0.5 } res_dev <- res_dev[!is.na(w) & w != 0] } else if (verbose) { if (is.null(w)) { warning(format_message("Can't calculate weighted residuals from model. Model doesn't seem to have weights."), call. = FALSE) } else if (is.null(res_dev)) { warning(format_message("Can't calculate weighted residuals from model. Could not extract deviance-residuals."), call. = FALSE) } } res_dev }, error = function(e) { if (verbose) { warning("Can't calculate weighted residuals from model.", call. = FALSE) } NULL } ) } #' @export get_residuals.afex_aov <- function(x, weighted = FALSE, verbose = TRUE, ...) { suppressMessages(stats::residuals(x, ...)) } insight/R/get_statistic.R0000644000175000017500000015067614135277361015273 0ustar nileshnilesh#' @title Get statistic associated with estimates #' @description Returns the statistic (*t*, `z`, ...) for model #' estimates. In most cases, this is the related column from #' `coef(summary())`. #' @name get_statistic #' #' @param x A model. #' @param column_index For model objects that have no defined #' `get_statistic()` method yet, the default method is called. This #' method tries to extract the statistic column from `coef(summary())`, #' where the index of the column that is being pulled is `column_index`. #' Defaults to 3, which is the default statistic column for most models' #' summary-output. #' @param component Should all parameters, parameters for the conditional model, #' or for the zero-inflated part of the model be returned? Applies to models #' with zero-inflated component. `component` may be one of #' `"conditional"`, `"zi"`, `"zero-inflated"` or `"all"` #' (default). For models with smooth terms, `component = "smooth_terms"` #' is also possible. May be abbreviated. Note that the *conditional* #' component is also called *count* or *mean* component, depending #' on the model. #' @param robust Logical, if `TRUE`, test statistic based on robust #' standard errors is returned. #' @param adjust Character value naming the method used to adjust p-values or #' confidence intervals. See `?emmeans::summary.emmGrid` for details. #' @param ci Confidence Interval (CI) level. Default to `0.95` (`95%`). #' Currently only applies to objects of class `emmGrid`. #' @param ... Currently not used. #' @inheritParams get_parameters #' @inheritParams get_parameters.emmGrid #' #' @return A data frame with the model's parameter names and the related test #' statistic. #' #' @examples #' data(mtcars) #' m <- lm(mpg ~ wt + cyl + vs, data = mtcars) #' get_statistic(m) #' @export get_statistic <- function(x, ...) { UseMethod("get_statistic") } # Default models ---------------------------------------------------------- #' @rdname get_statistic #' @export get_statistic.default <- function(x, column_index = 3, verbose = TRUE, ...) { cs <- stats::coef(summary(x)) if (column_index > ncol(cs)) { if (isTRUE(verbose)) { warning("Could not access test statistic of model parameters.", call. = FALSE) } return(NULL) } # edge cases: check for NULL params <- rownames(cs) if (is.null(params)) { params <- paste(1:nrow(cs)) } out <- data.frame( Parameter = params, Statistic = as.vector(cs[, column_index]), stringsAsFactors = FALSE, row.names = NULL ) out <- .remove_backticks_from_parameter_names(out) attr(out, "statistic") <- find_statistic(x) out } #' @export get_statistic.summary.lm <- function(x, ...) { cs <- stats::coef(x) out <- data.frame( Parameter = rownames(cs), Statistic = as.vector(cs[, 3]), stringsAsFactors = FALSE, row.names = NULL ) out <- .remove_backticks_from_parameter_names(out) attr(out, "statistic") <- find_statistic(x) out } #' @export get_statistic.mlm <- function(x, ...) { cs <- stats::coef(summary(x)) out <- lapply(names(cs), function(i) { params <- cs[[i]] data.frame( Parameter = rownames(params), Statistic = as.vector(params[, 3]), Response = gsub("^Response (.*)", "\\1", i), stringsAsFactors = FALSE, row.names = NULL ) }) out <- .remove_backticks_from_parameter_names(do.call(rbind, out)) attr(out, "statistic") <- find_statistic(x) out } #' @export get_statistic.lme <- function(x, ...) { get_statistic.default(x, column_index = 4) } #' @export get_statistic.lmerModLmerTest <- get_statistic.lme #' @export get_statistic.merModList <- function(x, ...) { s <- suppressWarnings(summary(x)) out <- data.frame( Parameter = s$fe$term, Statistic = s$fe$statistic, stringsAsFactors = FALSE ) out <- .remove_backticks_from_parameter_names(out) attr(out, "statistic") <- find_statistic(x) out } #' @export get_statistic.afex_aov <- function(x, ...) { out <- data.frame( Parameter = rownames(x$anova_table), Statistic = x$anova_table$"F", stringsAsFactors = FALSE, row.names = NULL ) out <- .remove_backticks_from_parameter_names(out) attr(out, "statistic") <- find_statistic(x) out } #' @export get_statistic.plm <- get_statistic.default #' @export get_statistic.maxLik <- get_statistic.default #' @export get_statistic.glmmadmb <- get_statistic.default #' @export get_statistic.lm_robust <- get_statistic.default #' @export get_statistic.geeglm <- get_statistic.default #' @export get_statistic.truncreg <- get_statistic.default #' @export get_statistic.tobit <- get_statistic.default #' @export get_statistic.censReg <- get_statistic.default #' @export get_statistic.negbin <- get_statistic.default #' @export get_statistic.feis <- get_statistic.default # Models with zero-inflation component -------------------------------------- #' @export get_statistic.mhurdle <- function(x, component = c("all", "conditional", "zi", "zero_inflated", "infrequent_purchase", "ip", "auxiliary"), ...) { component <- match.arg(component) s <- summary(x) params <- get_parameters(x, component = "all") stats <- data.frame( Parameter = rownames(s$coefficients), Statistic = as.vector(s$coefficients[, 3]), Component = NA, stringsAsFactors = FALSE ) cond_pars <- which(grepl("^h2\\.", rownames(s$coefficients))) zi_pars <- which(grepl("^h1\\.", rownames(s$coefficients))) ip_pars <- which(grepl("^h3\\.", rownames(s$coefficients))) aux_pars <- (1:length(rownames(s$coefficients)))[-c(cond_pars, zi_pars, ip_pars)] stats$Component[cond_pars] <- "conditional" stats$Component[zi_pars] <- "zero_inflated" stats$Component[ip_pars] <- "infrequent_purchase" stats$Component[aux_pars] <- "auxiliary" params <- merge(params, stats, sort = FALSE) params <- .filter_component(params, component)[intersect(c("Parameter", "Statistic", "Component"), colnames(params))] params <- .remove_backticks_from_parameter_names(params) attr(params, "statistic") <- find_statistic(x) params } #' @rdname get_statistic #' @export get_statistic.glmmTMB <- function(x, component = c("all", "conditional", "zi", "zero_inflated", "dispersion"), ...) { component <- match.arg(component) cs <- .compact_list(stats::coef(summary(x))) out <- lapply(names(cs), function(i) { data.frame( Parameter = find_parameters(x, effects = "fixed", component = i, flatten = TRUE), Statistic = as.vector(cs[[i]][, 3]), Component = i, stringsAsFactors = FALSE, row.names = NULL ) }) stat <- do.call(rbind, out) stat$Component <- .rename_values(stat$Component, "cond", "conditional") stat$Component <- .rename_values(stat$Component, "zi", "zero_inflated") stat$Component <- .rename_values(stat$Component, "disp", "dispersion") stat <- .filter_component(stat, component) stat <- .remove_backticks_from_parameter_names(stat) attr(stat, "statistic") <- find_statistic(x) stat } #' @export get_statistic.zeroinfl <- function(x, component = c("all", "conditional", "zi", "zero_inflated"), ...) { component <- match.arg(component) cs <- .compact_list(stats::coef(summary(x))) out <- lapply(names(cs), function(i) { comp <- ifelse(i == "count", "conditional", "zi") stats <- cs[[i]] # remove log(theta) theta <- grepl("Log(theta)", rownames(stats), fixed = TRUE) if (any(theta)) { stats <- stats[!theta, ] } data.frame( Parameter = find_parameters(x, effects = "fixed", component = comp, flatten = TRUE ), Statistic = as.vector(stats[, 3]), Component = comp, stringsAsFactors = FALSE, row.names = NULL ) }) stat <- do.call(rbind, out) stat$Component <- .rename_values(stat$Component, "cond", "conditional") stat$Component <- .rename_values(stat$Component, "zi", "zero_inflated") stat <- .filter_component(stat, component) stat <- .remove_backticks_from_parameter_names(stat) attr(stat, "statistic") <- find_statistic(x) stat } #' @export get_statistic.hurdle <- get_statistic.zeroinfl #' @export get_statistic.zerocount <- get_statistic.zeroinfl #' @export get_statistic.MixMod <- function(x, component = c("all", "conditional", "zi", "zero_inflated"), ...) { component <- match.arg(component) s <- summary(x) cs <- list(s$coef_table, s$coef_table_zi) names(cs) <- c("conditional", "zero_inflated") cs <- .compact_list(cs) out <- lapply(names(cs), function(i) { data.frame( Parameter = find_parameters(x, effects = "fixed", component = i, flatten = TRUE ), Statistic = as.vector(cs[[i]][, 3]), Component = i, stringsAsFactors = FALSE, row.names = NULL ) }) stat <- .filter_component(do.call(rbind, out), component) stat <- .remove_backticks_from_parameter_names(stat) attr(stat, "statistic") <- find_statistic(x) stat } # gam models -------------------------------------------------------------- #' @export get_statistic.Gam <- function(x, ...) { p.aov <- stats::na.omit(summary(x)$parametric.anova) out <- data.frame( Parameter = rownames(p.aov), Statistic = as.vector(p.aov[, 4]), stringsAsFactors = FALSE, row.names = NULL ) out <- .remove_backticks_from_parameter_names(out) attr(out, "statistic") <- find_statistic(x) out } #' @export get_statistic.gam <- function(x, ...) { cs <- summary(x)$p.table cs.smooth <- summary(x)$s.table out <- data.frame( Parameter = c(rownames(cs), rownames(cs.smooth)), Statistic = c(as.vector(cs[, 3]), as.vector(cs.smooth[, 3])), Component = c(rep("conditional", nrow(cs)), rep("smooth_terms", nrow(cs.smooth))), stringsAsFactors = FALSE, row.names = NULL ) out <- .remove_backticks_from_parameter_names(out) attr(out, "statistic") <- find_statistic(x) out } #' @export get_statistic.scam <- get_statistic.gam #' @export get_statistic.SemiParBIV <- function(x, ...) { s <- summary(x) s <- .compact_list(s[grepl("^tableP", names(s))]) params <- do.call(rbind, lapply(1:length(s), function(i) { out <- as.data.frame(s[[i]]) out$Parameter <- rownames(out) out$Component <- paste0("Equation", i) out })) colnames(params)[3] <- "Statistic" rownames(params) <- NULL out <- .remove_backticks_from_parameter_names(params[c("Parameter", "Statistic", "Component")]) attr(out, "statistic") <- find_statistic(x) out } #' @export get_statistic.gamm <- function(x, ...) { x <- x$gam class(x) <- c("gam", "lm", "glm") get_statistic.gam(x, ...) } #' @export get_statistic.list <- function(x, ...) { if ("gam" %in% names(x)) { x <- x$gam class(x) <- c("gam", "lm", "glm") get_statistic.gam(x, ...) } } #' @export get_statistic.gamlss <- function(x, ...) { parms <- get_parameters(x) utils::capture.output(cs <- summary(x)) out <- data.frame( Parameter = parms$Parameter, Statistic = as.vector(cs[, 3]), Component = parms$Component, stringsAsFactors = FALSE, row.names = NULL ) attr(out, "statistic") <- find_statistic(x) out } #' @export get_statistic.vglm <- function(x, ...) { if (!requireNamespace("VGAM", quietly = TRUE)) { stop("Package 'VGAM' needed for this function to work. Please install it.") } cs <- VGAM::coef(VGAM::summary(x)) out <- data.frame( Parameter = rownames(cs), Statistic = as.vector(cs[, 3]), stringsAsFactors = FALSE, row.names = NULL ) out <- .remove_backticks_from_parameter_names(out) attr(out, "statistic") <- find_statistic(x) out } #' @export get_statistic.vgam <- function(x, ...) { params <- get_parameters(x) out <- data.frame( Parameter = names(x@nl.chisq), Statistic = x@nl.chisq, stringsAsFactors = FALSE, row.names = NULL ) out <- merge(params, out, all.x = TRUE) out <- out[order(out$Parameter, params$Parameter), ] out <- .remove_backticks_from_parameter_names(out[c("Parameter", "Statistic", "Component")]) attr(out, "statistic") <- find_statistic(x) out } #' @export get_statistic.cgam <- function(x, component = c("all", "conditional", "smooth_terms"), ...) { component <- match.arg(component) sc <- summary(x) stat <- as.vector(sc$coefficients[, 3]) if (!is.null(sc$coefficients2)) stat <- c(stat, rep(NA, nrow(sc$coefficients2))) params <- get_parameters(x, component = "all") out <- data.frame( Parameter = params$Parameter, Statistic = stat, Component = params$Component, stringsAsFactors = FALSE, row.names = NULL ) if (component != "all") { out <- out[out$Component == component, , drop = FALSE] } attr(out, "statistic") <- find_statistic(x) out } # Survival models ------------------------------------------ #' @export get_statistic.coxph <- function(x, ...) { get_statistic.default(x, column_index = 4) } #' @export get_statistic.svy_vglm <- function(x, verbose = TRUE, ...) { cs <- summary(x)$coeftable out <- data.frame( Parameter = find_parameters(x, flatten = TRUE), Statistic = as.vector(cs[, 3]), stringsAsFactors = FALSE, row.names = NULL ) attr(out, "statistic") <- find_statistic(x) out } #' @export get_statistic.coxr <- function(x, ...) { parms <- get_parameters(x) vc <- get_varcov(x) se <- sqrt(diag(vc)) out <- data.frame( Parameter = parms$Parameter, Statistic = parms$Estimate / se, stringsAsFactors = FALSE, row.names = NULL ) attr(out, "statistic") <- find_statistic(x) out } #' @export get_statistic.crr <- get_statistic.coxr #' @export get_statistic.coxme <- function(x, ...) { beta <- x$coefficients out <- NULL if (length(beta) > 0) { out <- data.frame( Parameter = names(beta), Statistic = as.vector(beta / sqrt(diag(stats::vcov(x)))), stringsAsFactors = FALSE, row.names = NULL ) out <- .remove_backticks_from_parameter_names(out) attr(out, "statistic") <- find_statistic(x) } out } #' @export get_statistic.riskRegression <- function(x, ...) { junk <- utils::capture.output(cs <- stats::coef(x)) out <- data.frame( Parameter = as.vector(cs[, 1]), Statistic = as.numeric(cs[, "z"]), stringsAsFactors = FALSE, row.names = NULL ) out <- .remove_backticks_from_parameter_names(out) attr(out, "statistic") <- find_statistic(x) out } #' @export get_statistic.survreg <- function(x, ...) { parms <- get_parameters(x) s <- summary(x) out <- data.frame( Parameter = parms$Parameter, Statistic = s$table[, 3], stringsAsFactors = FALSE, row.names = NULL ) attr(out, "statistic") <- find_statistic(x) out } #' @export get_statistic.BBmm <- function(x, ...) { parms <- get_parameters(x) s <- summary(x) out <- data.frame( Parameter = parms$Parameter, Statistic = s$fixed.coefficients[, 3], stringsAsFactors = FALSE, row.names = NULL ) attr(out, "statistic") <- find_statistic(x) out } #' @export get_statistic.flexsurvreg <- function(x, ...) { parms <- get_parameters(x) se <- x$res[, "se"] out <- data.frame( Parameter = parms$Parameter, Statistic = parms$Estimate / se, stringsAsFactors = FALSE, row.names = NULL ) attr(out, "statistic") <- find_statistic(x) out } #' @export get_statistic.aareg <- function(x, ...) { sc <- summary(x) parms <- get_parameters(x) out <- data.frame( Parameter = parms$Parameter, Statistic = unname(sc$test.statistic), stringsAsFactors = FALSE, row.names = NULL ) attr(out, "statistic") <- find_statistic(x) out } # Ordinal models -------------------------------------------------- #' @rdname get_statistic #' @export get_statistic.clm2 <- function(x, component = c("all", "conditional", "scale"), ...) { component <- match.arg(component) stats <- stats::coef(summary(x)) n_intercepts <- length(x$xi) n_location <- length(x$beta) n_scale <- length(x$zeta) out <- data.frame( Parameter = rownames(stats), Statistic = unname(stats[, "z value"]), Component = c(rep("conditional", times = n_intercepts + n_location), rep("scale", times = n_scale)), stringsAsFactors = FALSE, row.names = NULL ) if (component != "all") { out <- out[out$Component == component, , drop = FALSE] } attr(out, "statistic") <- find_statistic(x) out } #' @export get_statistic.clmm2 <- get_statistic.clm2 #' @export get_statistic.mvord <- function(x, component = c("all", "conditional", "thresholds", "correlation"), ...) { component <- match.arg(component) junk <- utils::capture.output(s <- summary(x)) # intercepts thresholds thresholds <- as.data.frame(s$thresholds) thresholds$Parameter <- rownames(thresholds) thresholds$Response <- gsub("(.*)\\s(.*)", "\\1", thresholds$Parameter) # coefficients coefficients <- as.data.frame(s$coefficients) coefficients$Parameter <- rownames(coefficients) coefficients$Response <- gsub("(.*)\\s(.*)", "\\2", coefficients$Parameter) if (!all(coefficients$Response %in% thresholds$Response)) { resp <- unique(thresholds$Response) for (i in coefficients$Response) { coefficients$Response[coefficients$Response == i] <- resp[grepl(paste0(i, "$"), resp)] } } params <- data.frame( Parameter = c(thresholds$Parameter, coefficients$Parameter), Statistic = c(unname(thresholds[, "z value"]), unname(coefficients[, "z value"])), Component = c(rep("thresholds", nrow(thresholds)), rep("conditional", nrow(coefficients))), Response = c(thresholds$Response, coefficients$Response), stringsAsFactors = FALSE, row.names = NULL ) params_error <- data.frame( Parameter = rownames(s$error.structure), Statistic = unname(s$error.structure[, "z value"]), Component = "correlation", Response = NA, stringsAsFactors = FALSE, row.names = NULL ) params <- rbind(params, params_error) if (.n_unique(params$Response) == 1) { params$Response <- NULL } if (component != "all") { params <- params[params$Component == component, , drop = FALSE] } attr(params, "statistic") <- find_statistic(x) .remove_backticks_from_parameter_names(params) } #' @export get_statistic.glmm <- function(x, effects = c("all", "fixed", "random"), ...) { effects <- match.arg(effects) s <- summary(x) out <- get_parameters(x, effects = "all") out$Statistic <- c(s$coefmat[, 3], s$nucoefmat[, 3]) out <- out[, c("Parameter", "Statistic", "Effects")] if (effects != "all") { out <- out[out$Effects == effects, , drop = FALSE] out$Effects <- NULL } attr(out, "statistic") <- find_statistic(x) out } #' @export get_statistic.mixor <- function(x, effects = c("all", "fixed", "random"), ...) { stats <- x$Model[, "z value"] effects <- match.arg(effects) parms <- get_parameters(x, effects = effects) out <- data.frame( Parameter = parms$Parameter, Statistic = stats[parms$Parameter], Effects = parms$Effects, stringsAsFactors = FALSE, row.names = NULL ) attr(out, "statistic") <- find_statistic(x) out } #' @export get_statistic.multinom <- function(x, ...) { parms <- get_parameters(x) stderr <- summary(x)$standard.errors if (is.matrix(stderr)) { se <- c() for (i in 1:nrow(stderr)) { se <- c(se, as.vector(stderr[i, ])) } } else { se <- as.vector(stderr) } out <- data.frame( Parameter = parms$Parameter, Statistic = parms$Estimate / se, stringsAsFactors = FALSE, row.names = NULL ) if ("Response" %in% colnames(parms)) { out$Response <- parms$Response } attr(out, "statistic") <- find_statistic(x) out } #' @export get_statistic.brmultinom <- get_statistic.multinom #' @export get_statistic.bracl <- function(x, ...) { parms <- get_parameters(x) out <- data.frame( Parameter = parms$Parameter, Statistic = stats::coef(summary(x))[, "z value"], Response = parms$Response, stringsAsFactors = FALSE, row.names = NULL ) attr(out, "statistic") <- find_statistic(x) out } #' @export get_statistic.mlogit <- function(x, ...) { if (requireNamespace("mlogit", quietly = TRUE)) { cs <- stats::coef(summary(x)) out <- data.frame( Parameter = rownames(cs), Statistic = as.vector(cs[, 3]), stringsAsFactors = FALSE, row.names = NULL ) out <- .remove_backticks_from_parameter_names(out) attr(out, "statistic") <- find_statistic(x) out } else { NULL } } # mfx models ------------------------------------------------------- #' @rdname get_statistic #' @export get_statistic.betamfx <- function(x, component = c("all", "conditional", "precision", "marginal"), ...) { component <- match.arg(component) parms <- get_parameters(x, component = "all", ...) cs <- do.call(rbind, stats::coef(summary(x$fit))) stat <- c(as.vector(x$mfxest[, 3]), as.vector(cs[, 3])) out <- data.frame( Parameter = parms$Parameter, Statistic = stat, Component = parms$Component, stringsAsFactors = FALSE, row.names = NULL ) if (component != "all") { out <- out[out$Component == component, , drop = FALSE] } attr(out, "statistic") <- find_statistic(x) out } #' @export get_statistic.betaor <- function(x, component = c("all", "conditional", "precision"), ...) { component <- match.arg(component) parms <- get_parameters(x, component = "all", ...) cs <- do.call(rbind, stats::coef(summary(x$fit))) out <- data.frame( Parameter = parms$Parameter, Statistic = as.vector(cs[, 3]), Component = parms$Component, stringsAsFactors = FALSE, row.names = NULL ) if (component != "all") { out <- out[out$Component == component, , drop = FALSE] } attr(out, "statistic") <- find_statistic(x) out } #' @rdname get_statistic #' @export get_statistic.logitmfx <- function(x, component = c("all", "conditional", "marginal"), ...) { parms <- get_parameters(x, component = "all", ...) cs <- stats::coef(summary(x$fit)) stat <- c(as.vector(x$mfxest[, 3]), as.vector(cs[, 3])) out <- data.frame( Parameter = parms$Parameter, Statistic = stat, Component = parms$Component, stringsAsFactors = FALSE, row.names = NULL ) if (component != "all") { out <- out[out$Component == component, , drop = FALSE] } attr(out, "statistic") <- find_statistic(x) out } #' @export get_statistic.poissonmfx <- get_statistic.logitmfx #' @export get_statistic.negbinmfx <- get_statistic.logitmfx #' @export get_statistic.probitmfx <- get_statistic.logitmfx #' @export get_statistic.logitor <- function(x, ...) { get_statistic.default(x$fit) } #' @export get_statistic.poissonirr <- get_statistic.logitor #' @export get_statistic.negbinirr <- get_statistic.logitor # Other models ------------------------------------------------------- #' @export get_statistic.pgmm <- function(x, component = c("conditional", "all"), verbose = TRUE, ...) { component <- match.arg(component) cs <- stats::coef(summary(x, time.dummies = TRUE, robust = FALSE)) out <- data.frame( Parameter = row.names(cs), Statistic = as.vector(cs[, 3]), Component = "conditional", stringsAsFactors = FALSE, row.names = NULL ) out$Component[out$Parameter %in% x$args$namest] <- "time_dummies" if (component == "conditional") { out <- out[out$Component == "conditional", ] out <- .remove_column(out, "Component") } out <- .remove_backticks_from_parameter_names(out) attr(out, "statistic") <- find_statistic(x) out } #' @export get_statistic.selection <- function(x, component = c("all", "selection", "outcome", "auxiliary"), ...) { component <- match.arg(component) s <- summary(x) rn <- row.names(s$estimate) estimates <- as.data.frame(s$estimate, row.names = FALSE) params <- data.frame( Parameter = rn, Statistic = estimates[[3]], Component = "auxiliary", stringsAsFactors = FALSE, row.names = NULL ) params$Component[s$param$index$betaS] <- "selection" params$Component[s$param$index$betaO] <- "outcome" if (component != "all") { params <- params[params$Component == component, , drop = FALSE] } params <- .remove_backticks_from_parameter_names(params) attr(params, "statistic") <- find_statistic(x) params } #' @export get_statistic.lavaan <- function(x, ...) { # installed? check_if_installed("lavaan") params <- lavaan::parameterEstimates(x) params$parameter <- paste0(params$lhs, params$op, params$rhs) params$comp <- NA params$comp[params$op == "~"] <- "regression" params$comp[params$op == "=~"] <- "latent" params$comp[params$op == "~~"] <- "residual" params$comp[params$op == "~1"] <- "intercept" params <- data.frame( Parameter = params$parameter, Statistic = params$z, Component = params$comp, stringsAsFactors = FALSE ) params <- .remove_backticks_from_parameter_names(params) attr(params, "statistic") <- find_statistic(x) params } #' @export get_statistic.model_fit <- function(x, ...) { get_statistic(x$fit, ...) } #' @export get_statistic.Sarlm <- function(x, ...) { s <- summary(x) # add rho, if present if (!is.null(s$rho)) { rho <- as.numeric(s$rho) / as.numeric(s$rho.se) } else { rho <- NULL } stat <- data.frame( Parameter = find_parameters(x, flatten = TRUE), Statistic = c(rho, as.vector(s$Coef[, 3])), stringsAsFactors = FALSE, row.names = NULL ) stat <- .remove_backticks_from_parameter_names(stat) attr(stat, "statistic") <- find_statistic(x) stat } #' @rdname get_statistic #' @export get_statistic.mjoint <- function(x, component = c("all", "conditional", "survival"), ...) { component <- match.arg(component) s <- summary(x) params <- rbind( data.frame( Parameter = rownames(s$coefs.long), Statistic = unname(s$coefs.long[, 3]), Component = "conditional", stringsAsFactors = FALSE, row.names = NULL ), data.frame( Parameter = rownames(s$coefs.surv), Statistic = unname(s$coefs.surv[, 3]), Component = "survival", stringsAsFactors = FALSE, row.names = NULL ) ) if (component != "all") { params <- params[params$Component == component, , drop = FALSE] } attr(params, "statistic") <- find_statistic(x) params } #' @export get_statistic.Rchoice <- function(x, verbose = TRUE, ...) { cs <- summary(x)$CoefTable out <- data.frame( Parameter = find_parameters(x, flatten = TRUE), Statistic = as.vector(cs[, 3]), stringsAsFactors = FALSE, row.names = NULL ) attr(out, "statistic") <- find_statistic(x) out } #' @export get_statistic.garch <- function(x, verbose = TRUE, ...) { cs <- summary(x)$coef out <- data.frame( Parameter = find_parameters(x, flatten = TRUE), Statistic = as.vector(cs[, 3]), stringsAsFactors = FALSE, row.names = NULL ) attr(out, "statistic") <- find_statistic(x) out } #' @export get_statistic.ergm <- function(x, verbose = TRUE, ...) { get_statistic.default(x = x, column_index = 4, verbose = verbose, ...) } #' @export get_statistic.btergm <- function(x, verbose = TRUE, ...) { params <- x@coef bootstraps <- x@boot$t # standard error sdev <- sapply(1:ncol(bootstraps), function(i) { cur <- (bootstraps[, i] - params[i])^2 sqrt(sum(cur) / length(cur)) }) stat <- (0 - colMeans(bootstraps)) / sdev out <- data.frame( Parameter = names(stat), Statistic = stat, stringsAsFactors = FALSE, row.names = NULL ) attr(out, "statistic") <- find_statistic(x) out } #' @export get_statistic.ridgelm <- function(x, ...) { NULL } #' @export get_statistic.lmodel2 <- function(x, ...) { NULL } #' @export get_statistic.ivFixed <- get_statistic.coxr #' @export get_statistic.ivprobit <- function(x, ...) { out <- data.frame( Parameter = x$names, Statistic = as.vector(x$tval), stringsAsFactors = FALSE ) out <- .remove_backticks_from_parameter_names(out) attr(out, "statistic") <- find_statistic(x) out } #' @export get_statistic.HLfit <- function(x, ...) { utils::capture.output(s <- summary(x)) out <- data.frame( Parameter = rownames(s$beta_table), Statistic = as.vector(s$beta_table[, "t-value"]), stringsAsFactors = FALSE ) out <- .remove_backticks_from_parameter_names(out) attr(out, "statistic") <- find_statistic(x) out } #' @export get_statistic.margins <- function(x, ...) { out <- data.frame( Parameter = get_parameters(x)$Parameter, Statistic = as.vector(summary(x)$z), stringsAsFactors = FALSE ) out <- .remove_backticks_from_parameter_names(out) attr(out, "statistic") <- find_statistic(x) out } #' @export get_statistic.lqmm <- function(x, ...) { cs <- summary(x, ...) params <- get_parameters(x) if (is.list(cs$tTable)) { stats <- do.call(rbind, cs$tTable) params$Statistic <- params$Estimate / stats[, 2] params <- params[c("Parameter", "Statistic", "Component")] } else { params$Statistic <- params$Estimate / cs$tTable[, 2] params <- params[c("Parameter", "Statistic")] } out <- .remove_backticks_from_parameter_names(params) attr(out, "statistic") <- find_statistic(x) out } #' @export get_statistic.lqm <- get_statistic.lqmm #' @export get_statistic.mipo <- function(x, ...) { params <- data.frame( Parameter = as.vector(summary(x)$term), Statistic = as.vector(summary(x)$statistic), stringsAsFactors = FALSE ) out <- .remove_backticks_from_parameter_names(params) attr(out, "statistic") <- find_statistic(x) out } #' @export get_statistic.mira <- function(x, ...) { get_statistic(x$analyses[[1]], ...) } #' @export get_statistic.mle2 <- function(x, ...) { if (!requireNamespace("bbmle", quietly = TRUE)) { stop("Package `bbmle` needs to be installed to extract test statistic.", call. = FALSE) } s <- bbmle::summary(x) params <- data.frame( Parameter = names(s@coef[, 3]), Statistic = unname(s@coef[, 3]), stringsAsFactors = FALSE, row.names = NULL ) out <- .remove_backticks_from_parameter_names(params) attr(out, "statistic") <- find_statistic(x) out } #' @export get_statistic.mle <- get_statistic.mle2 #' @export get_statistic.glht <- function(x, ...) { s <- summary(x) alt <- switch(x$alternative, two.sided = "==", less = ">=", greater = "<=" ) out <- data.frame( Parameter = paste(names(s$test$coefficients), alt, x$rhs), Statistic = unname(s$test$tstat), stringsAsFactors = FALSE ) out <- .remove_backticks_from_parameter_names(out) attr(out, "statistic") <- find_statistic(x) out } #' @rdname get_statistic #' @export get_statistic.emmGrid <- function(x, ci = .95, adjust = "none", merge_parameters = FALSE, ...) { s <- summary(x, level = ci, adjust = adjust, infer = TRUE) stat <- s[["t.ratio"]] # 2nd try if (.is_empty_object(stat)) { stat <- s[["z.ratio"]] } # quit if (.is_empty_object(stat)) { return(NULL) } estimate_pos <- which(colnames(s) == attr(s, "estName")) if (isTRUE(merge_parameters)) { params <- get_parameters(x, merge_parameters = TRUE)["Parameter"] } else { params <- s[, seq_len(estimate_pos - 1), drop = FALSE] } out <- data.frame( params, Statistic = as.vector(stat), stringsAsFactors = FALSE, row.names = NULL ) out <- .remove_backticks_from_parameter_names(out) attr(out, "statistic") <- find_statistic(x) out } #' @export get_statistic.emm_list <- function(x, ci = .95, adjust = "none", ...) { params <- get_parameters(x) s <- summary(x, level = ci, adjust = adjust, infer = TRUE) stat <- lapply(s, "[[", "t.ratio") # 2nd try if (.is_empty_object(stat)) { stat <- lapply(s, "[[", "z.ratio") } # quit if (.is_empty_object(stat)) { return(NULL) } stat <- unlist(stat) out <- data.frame( Parameter = params$Parameter, Statistic = as.vector(stat), Component = params$Component, stringsAsFactors = FALSE, row.names = NULL ) out <- .remove_backticks_from_parameter_names(out) attr(out, "statistic") <- find_statistic(x) out } #' @export get_statistic.robmixglm <- function(x, ...) { cs <- stats::coef(summary(x)) out <- data.frame( Parameter = rownames(cs), Statistic = as.vector(cs[, 3]), stringsAsFactors = FALSE, row.names = NULL ) out <- out[!is.na(out$Statistic), ] out <- .remove_backticks_from_parameter_names(out) attr(out, "statistic") <- find_statistic(x) out } #' @export get_statistic.averaging <- function(x, component = c("conditional", "full"), ...) { component <- match.arg(component) params <- get_parameters(x, component = component) if (component == "full") { s <- summary(x)$coefmat.full } else { s <- summary(x)$coefmat.subset } out <- data.frame( Parameter = params$Parameter, Statistic = s[, 4], stringsAsFactors = FALSE, row.names = NULL ) out <- .remove_backticks_from_parameter_names(out) attr(out, "statistic") <- find_statistic(x) out } #' @export get_statistic.bayesx <- function(x, ...) { out <- data.frame( Parameter = find_parameters(x, component = "conditional", flatten = TRUE), Statistic = x$fixed.effects[, 3], stringsAsFactors = FALSE ) out <- .remove_backticks_from_parameter_names(out) attr(out, "statistic") <- find_statistic(x) out } #' @export get_statistic.Arima <- function(x, ...) { params <- get_parameters(x) out <- data.frame( Parameter = params$Parameter, Statistic = as.vector(params$Estimate / sqrt(diag(get_varcov(x)))), stringsAsFactors = FALSE, row.names = NULL ) out <- .remove_backticks_from_parameter_names(out) attr(out, "statistic") <- find_statistic(x) out } #' @export get_statistic.wbm <- function(x, ...) { s <- summary(x) statistic_column <- if ("t val." %in% c( colnames(s$within_table), colnames(s$between_table), colnames(s$ints_table) )) { "t val." } else { "z val." } stat <- c( s$within_table[, statistic_column], s$between_table[, statistic_column], s$ints_table[, statistic_column] ) params <- get_parameters(x) out <- data.frame( Parameter = params$Parameter, Statistic = as.vector(stat), Component = params$Component, stringsAsFactors = FALSE, row.names = NULL ) attr(out, "statistic") <- find_statistic(x) out } #' @export get_statistic.wbgee <- get_statistic.wbm #' @export get_statistic.cpglmm <- function(x, ...) { # installed? check_if_installed("cplm") stats <- cplm::summary(x)$coefs params <- get_parameters(x) out <- data.frame( Parameter = params$Parameter, Statistic = as.vector(stats[, "t value"]), stringsAsFactors = FALSE, row.names = NULL ) attr(out, "statistic") <- find_statistic(x) out } #' @export get_statistic.sem <- function(x, ...) { if (!.is_semLme(x)) { return(NULL) } params <- get_parameters(x, effects = "fixed") if (is.null(x$se)) { warning(format_message("Model has no standard errors. Please fit model again with bootstrapped standard errors."), call. = FALSE) return(NULL) } out <- data.frame( Parameter = params$Parameter, Statistic = as.vector(x$coef / x$se), stringsAsFactors = FALSE, row.names = NULL ) attr(out, "statistic") <- find_statistic(x) out } #' @export get_statistic.cpglm <- function(x, ...) { # installed? check_if_installed("cplm") junk <- utils::capture.output(stats <- cplm::summary(x)$coefficients) params <- get_parameters(x) out <- data.frame( Parameter = params$Parameter, Statistic = as.vector(stats[, "t value"]), stringsAsFactors = FALSE, row.names = NULL ) attr(out, "statistic") <- find_statistic(x) out } #' @export get_statistic.zcpglm <- function(x, component = c("all", "conditional", "zi", "zero_inflated"), ...) { # installed? check_if_installed("cplm") component <- match.arg(component) junk <- utils::capture.output(stats <- cplm::summary(x)$coefficients) params <- get_parameters(x) tweedie <- data.frame( Parameter = params$Parameter[params$Component == "conditional"], Statistic = as.vector(stats$tweedie[, "z value"]), Component = "conditional", stringsAsFactors = FALSE, row.names = NULL ) zero <- data.frame( Parameter = params$Parameter[params$Component == "zero_inflated"], Statistic = as.vector(stats$zero[, "z value"]), Component = "zero_inflated", stringsAsFactors = FALSE, row.names = NULL ) out <- .filter_component(rbind(tweedie, zero), component) out <- .remove_backticks_from_parameter_names(out) attr(out, "statistic") <- find_statistic(x) out } #' @export get_statistic.manova <- function(x, ...) { stats <- as.data.frame(summary(x)$stats) out <- data.frame( Parameter = rownames(stats), Statistic = as.vector(stats[["approx F"]]), stringsAsFactors = FALSE, row.names = NULL ) attr(out, "statistic") <- find_statistic(x) out } #' @export get_statistic.maov <- function(x, ...) { s <- summary(x) out <- do.call(rbind, lapply(names(s), function(i) { stats <- s[[i]] missing <- is.na(stats[["F value"]]) data.frame( Parameter = rownames(stats)[!missing], Statistic = as.vector(stats[["F value"]][!missing]), Response = gsub("\\s*Response ", "", i), stringsAsFactors = FALSE, row.names = NULL ) })) attr(out, "statistic") <- find_statistic(x) out } #' @export get_statistic.MANOVA <- function(x, ...) { stats <- as.data.frame(x$WTS) out <- data.frame( Parameter = rownames(stats), Statistic = as.vector(stats[[1]]), stringsAsFactors = FALSE, row.names = NULL ) attr(out, "statistic") <- find_statistic(x) out } #' @export get_statistic.RM <- get_statistic.MANOVA #' @export get_statistic.rq <- function(x, ...) { stat <- tryCatch( { cs <- stats::coef(summary(x)) cs[, "t value"] }, error = function(e) { cs <- stats::coef(summary(x, covariance = TRUE)) cs[, "t value"] } ) params <- get_parameters(x) out <- data.frame( Parameter = params$Parameter, Statistic = stat, stringsAsFactors = FALSE, row.names = NULL ) attr(out, "statistic") <- find_statistic(x) out } #' @export get_statistic.rqs <- function(x, ...) { stat <- tryCatch( { s <- suppressWarnings(summary(x, covariance = TRUE)) cs <- do.call(rbind, lapply(s, stats::coef)) cs[, "t value"] }, error = function(e) { NULL } ) params <- get_parameters(x) out <- data.frame( Parameter = params$Parameter, Statistic = stat, Component = params$Component, stringsAsFactors = FALSE, row.names = NULL ) attr(out, "statistic") <- find_statistic(x) out } #' @export get_statistic.crq <- function(x, ...) { sc <- summary(x) params <- get_parameters(x) if (all(unlist(lapply(sc, is.list)))) { list_sc <- lapply(sc, function(i) { .x <- as.data.frame(i) .x$Parameter <- rownames(.x) .x }) out <- do.call(rbind, list_sc) out <- data.frame( Parameter = params$Parameter, Statistic = out$coefficients.T.Value, Component = params$Component, stringsAsFactors = FALSE, row.names = NULL ) } else { out <- data.frame( Parameter = params$Parameter, Statistic = unname(sc$coefficients[, 5]), stringsAsFactors = FALSE, row.names = NULL ) } attr(out, "statistic") <- find_statistic(x) out } #' @export get_statistic.crqs <- get_statistic.crq #' @export get_statistic.nlrq <- get_statistic.rq #' @export get_statistic.rqss <- function(x, component = c("all", "conditional", "smooth_terms"), ...) { component <- match.arg(component) cs <- summary(x) stat <- c(as.vector(cs$coef[, "t value"]), as.vector(cs$qsstab[, "F value"])) params <- get_parameters(x) out <- data.frame( Parameter = params$Parameter, Statistic = unname(stat), Component = params$Component, stringsAsFactors = FALSE, row.names = NULL ) if (component != "all") { out <- out[out$Component == component, , drop = FALSE] } attr(out, "statistic") <- find_statistic(x) out } #' @export get_statistic.systemfit <- function(x, ...) { cf <- stats::coef(summary(x)) f <- find_formula(x) system_names <- names(f) parameter_names <- row.names(cf) out <- lapply(system_names, function(i) { pattern <- paste0("^", i, "_(.*)") params <- grepl(pattern, parameter_names) data.frame( Parameter = gsub(pattern, "\\1", parameter_names[params]), Statistic = as.vector(cf[params, 3]), Component = i, stringsAsFactors = FALSE ) }) out <- do.call(rbind, out) attr(out, "statistic") <- find_statistic(x) out } #' @export get_statistic.bigglm <- function(x, ...) { parms <- get_parameters(x) cs <- summary(x)$mat se <- as.vector(cs[, 4]) out <- data.frame( Parameter = parms$Parameter, Statistic = parms$Estimate / se, stringsAsFactors = FALSE, row.names = NULL ) attr(out, "statistic") <- find_statistic(x) out } #' @export get_statistic.biglm <- function(x, ...) { parms <- get_parameters(x) cs <- summary(x)$mat se <- as.vector(cs[, 4]) out <- data.frame( Parameter = parms$Parameter, Statistic = parms$Estimate / se, stringsAsFactors = FALSE, row.names = NULL ) attr(out, "statistic") <- find_statistic(x) out } #' @export get_statistic.LORgee <- function(x, ...) { out <- get_statistic.default(x) attr(out, "statistic") <- find_statistic(x) out } #' @export get_statistic.crch <- function(x, ...) { cs <- do.call(rbind, stats::coef(summary(x), model = "full")) params <- get_parameters(x) out <- data.frame( Parameter = params$Parameter, Statistic = as.vector(cs[, 3]), stringsAsFactors = FALSE, row.names = NULL ) attr(out, "statistic") <- find_statistic(x) out } #' @export get_statistic.fixest <- function(x, ...) { cs <- summary(x)$coeftable params <- get_parameters(x) out <- data.frame( Parameter = params$Parameter, Statistic = as.vector(cs[, 3]), stringsAsFactors = FALSE, row.names = NULL ) attr(out, "statistic") <- find_statistic(x) out } #' @export get_statistic.glmx <- function(x, component = c("all", "conditional", "extra"), ...) { component <- match.arg(component) cf <- stats::coef(summary(x)) parms <- get_parameters(x) out <- rbind( data.frame( Parameter = parms$Parameter[parms$Component == "conditional"], Statistic = unname(cf$glm[, 3]), Component = "conditional", stringsAsFactors = FALSE, row.names = NULL ), data.frame( Parameter = parms$Parameter[parms$Component == "extra"], Statistic = cf$extra[, 3], Component = "extra", stringsAsFactors = FALSE, row.names = NULL ) ) if (component != "all") { out <- out[out$Component == component, , drop = FALSE] } attr(out, "statistic") <- find_statistic(x) out } #' @rdname get_statistic #' @export get_statistic.gee <- function(x, robust = FALSE, ...) { parms <- get_parameters(x) cs <- stats::coef(summary(x)) if (isTRUE(robust)) { stats <- as.vector(cs[, "Robust z"]) } else { stats <- as.vector(cs[, "Naive z"]) } out <- data.frame( Parameter = parms$Parameter, Statistic = stats, stringsAsFactors = FALSE, row.names = NULL ) attr(out, "statistic") <- find_statistic(x) out } #' @export get_statistic.complmrob <- function(x, ...) { parms <- get_parameters(x) stat <- summary(x)$stats out <- data.frame( Parameter = parms$Parameter, Statistic = as.vector(stat[, "t value"]), stringsAsFactors = FALSE, row.names = NULL ) attr(out, "statistic") <- find_statistic(x) out } #' @export get_statistic.logistf <- function(x, ...) { parms <- get_parameters(x) utils::capture.output(s <- summary(x)) out <- data.frame( Parameter = parms$Parameter, Statistic = as.vector(stats::qchisq(1 - s$prob, df = 1)), stringsAsFactors = FALSE, row.names = NULL ) attr(out, "statistic") <- find_statistic(x) out } #' @export get_statistic.epi.2by2 <- function(x, ...) { stat <- x$massoc.detail$chi2.strata.uncor out <- data.frame( Parameter = "Chi2", Statistic = stat$test.statistic, stringsAsFactors = FALSE, row.names = NULL ) attr(out, "statistic") <- find_statistic(x) out } #' @export get_statistic.svyglm.nb <- function(x, ...) { if (!isNamespaceLoaded("survey")) { requireNamespace("survey", quietly = TRUE) } parms <- get_parameters(x) se <- sqrt(diag(stats::vcov(x, stderr = "robust"))) out <- data.frame( Parameter = parms$Parameter, Statistic = parms$Estimate / se, stringsAsFactors = FALSE, row.names = NULL ) attr(out, "statistic") <- find_statistic(x) out } #' @export get_statistic.svyglm.zip <- get_statistic.svyglm.nb #' @export get_statistic.svyglm <- function(x, ...) { parms <- get_parameters(x) vc <- get_varcov(x) se <- sqrt(diag(vc)) out <- data.frame( Parameter = parms$Parameter, Statistic = parms$Estimate / se, stringsAsFactors = FALSE, row.names = NULL ) attr(out, "statistic") <- find_statistic(x) out } #' @export get_statistic.svyolr <- get_statistic.svyglm #' @rdname get_statistic #' @export get_statistic.betareg <- function(x, component = c("all", "conditional", "precision"), ...) { component <- match.arg(component) parms <- get_parameters(x) cs <- do.call(rbind, stats::coef(summary(x))) se <- as.vector(cs[, 2]) out <- data.frame( Parameter = parms$Parameter, Statistic = parms$Estimate / se, Component = parms$Component, stringsAsFactors = FALSE, row.names = NULL ) if (component != "all") { out <- out[out$Component == component, , drop = FALSE] } attr(out, "statistic") <- find_statistic(x) out } #' @rdname get_statistic #' @export get_statistic.DirichletRegModel <- function(x, component = c("all", "conditional", "precision"), ...) { component <- match.arg(component) parms <- get_parameters(x) junk <- utils::capture.output(cs <- summary(x)$coef.mat) out <- data.frame( Parameter = parms$Parameter, Statistic = unname(cs[, "z value"]), Response = parms$Response, stringsAsFactors = FALSE, row.names = NULL ) if (!is.null(parms$Component)) { out$Component <- parms$Component } else { component <- "all" } if (component != "all") { out <- out[out$Component == component, , drop = FALSE] } attr(out, "statistic") <- find_statistic(x) out } #' @export get_statistic.glimML <- function(x, ...) { # installed? check_if_installed("aod") parms <- get_parameters(x) s <- methods::slot(aod::summary(x), "Coef") out <- data.frame( Parameter = parms$Parameter, Statistic = s[, 3], stringsAsFactors = FALSE, row.names = NULL ) attr(out, "statistic") <- find_statistic(x) out } #' @export get_statistic.lrm <- function(x, ...) { parms <- get_parameters(x) stat <- stats::coef(x) / sqrt(diag(stats::vcov(x))) out <- data.frame( Parameter = parms$Parameter, Statistic = as.vector(stat), stringsAsFactors = FALSE, row.names = NULL ) attr(out, "statistic") <- find_statistic(x) out } #' @export get_statistic.ols <- get_statistic.lrm #' @export get_statistic.rms <- get_statistic.lrm #' @export get_statistic.psm <- get_statistic.lrm #' @export get_statistic.orm <- function(x, ...) { parms <- get_parameters(x) vc <- stats::vcov(x) parms <- parms[parms$Parameter %in% dimnames(vc)[[1]], ] stat <- parms$Estimate / sqrt(diag(vc)) out <- data.frame( Parameter = parms$Parameter, Statistic = as.vector(stat), stringsAsFactors = FALSE, row.names = NULL ) attr(out, "statistic") <- find_statistic(x) out } #' @export get_statistic.rma <- function(x, ...) { parms <- get_parameters(x) stat <- x$zval out <- data.frame( Parameter = parms$Parameter, Statistic = as.vector(stat), stringsAsFactors = FALSE, row.names = NULL ) attr(out, "statistic") <- find_statistic(x) out } #' @export get_statistic.metaplus <- function(x, ...) { params <- get_parameters(x) ci_low <- as.vector(x$results[, "95% ci.lb"]) ci_high <- as.vector(x$results[, "95% ci.ub"]) cis <- apply(cbind(ci_low, ci_high), MARGIN = 1, diff) se <- cis / (2 * stats::qnorm(.975)) out <- data.frame( Parameter = params$Parameter, Statistic = as.vector(params$Estimate / se), stringsAsFactors = FALSE, row.names = NULL ) attr(out, "statistic") <- find_statistic(x) out } #' @export get_statistic.bife <- function(x, ...) { parms <- get_parameters(x) cs <- summary(x) out <- data.frame( Parameter = parms$Parameter, Statistic = as.vector(cs$cm[, 3]), stringsAsFactors = FALSE, row.names = NULL ) attr(out, "statistic") <- find_statistic(x) out } #' @export get_statistic.mediate <- function(x, ...) { NULL } #' @export get_statistic.coeftest <- function(x, ...) { out <- data.frame( Parameter = row.names(x), Statistic = x[, 3], stringsAsFactors = FALSE, row.names = NULL ) attr(out, "statistic") <- find_statistic(x) out } #' @export get_statistic.bfsl <- function(x, ...) { cs <- stats::coef(x) out <- data.frame( Parameter = rownames(cs), Statistic = as.vector(cs[, "Estimate"] / cs[, "Std. Error"]), stringsAsFactors = FALSE, row.names = NULL ) out <- .remove_backticks_from_parameter_names(out) attr(out, "statistic") <- find_statistic(x) out } insight/R/find_parameters_bayesian.R0000644000175000017500000004735114144234776017441 0ustar nileshnilesh#' @title Find names of model parameters from Bayesian models #' @name find_parameters.BGGM #' #' @description Returns the names of model parameters, like they typically #' appear in the `summary()` output. For Bayesian models, the parameter #' names equal the column names of the posterior samples after coercion #' from `as.data.frame()`. #' #' @param parameters Regular expression pattern that describes the parameters that #' should be returned. #' @param effects Should parameters for fixed effects, random effects #' or both be returned? Only applies to mixed models. May be abbreviated. #' @param ... Currently not used. #' @inheritParams find_parameters #' @inheritParams find_parameters.betamfx #' @inheritParams find_predictors #' #' @return A list of parameter names. For simple models, only one list-element, #' `conditional`, is returned. For more complex models, the returned #' list may have following elements: #' \itemize{ #' \item `conditional`, the "fixed effects" part from the model #' \item `random`, the "random effects" part from the model #' \item `zero_inflated`, the "fixed effects" part from the #' zero-inflation component of the model #' \item `zero_inflated_random`, the "random effects" part from the #' zero-inflation component of the model #' \item `smooth_terms`, the smooth parameters #' } #' #' Furthermore, some models, especially from \pkg{brms}, can also return #' auxiliary parameters. These may be one of the following: #' \itemize{ #' \item `sigma`, the residual standard deviation (auxiliary parameter) #' \item `dispersion`, the dispersion parameters (auxiliary parameter) #' \item `beta`, the beta parameter (auxiliary parameter) #' \item `simplex`, simplex parameters of monotonic effects (\pkg{brms} only) #' \item `mix`, mixture parameters (\pkg{brms} only) #' \item `shiftprop`, shifted proportion parameters (\pkg{brms} only) #' } #' #' @examples #' data(mtcars) #' m <- lm(mpg ~ wt + cyl + vs, data = mtcars) #' find_parameters(m) #' @export find_parameters.BGGM <- function(x, component = c("correlation", "conditional", "intercept", "all"), flatten = FALSE, ...) { component <- match.arg(component) l <- switch(component, "correlation" = list(correlation = colnames(get_parameters(x, component = "correlation"))), "conditional" = list(conditional = colnames(get_parameters(x, component = "conditional"))), "intercept" = list(intercept = colnames(x$Y)), "all" = list( intercept = colnames(x$Y), correlation = colnames(get_parameters(x, component = "correlation")), conditional = colnames(get_parameters(x, component = "conditional")) ) ) l <- .compact_list(l) if (flatten) { unique(unlist(l)) } else { l } } #' @rdname find_parameters.BGGM #' @export find_parameters.BFBayesFactor <- function(x, effects = c("all", "fixed", "random"), component = c("all", "extra"), flatten = FALSE, ...) { conditional <- NULL random <- NULL extra <- NULL effects <- match.arg(effects) component <- match.arg(component) if (.classify_BFBayesFactor(x) == "correlation") { conditional <- "rho" } else if (.classify_BFBayesFactor(x) %in% c("ttest1", "ttest2")) { conditional <- "Difference" } else if (.classify_BFBayesFactor(x) == "meta") { conditional <- "Effect" } else if (.classify_BFBayesFactor(x) == "proptest") { conditional <- "p" } else if (.classify_BFBayesFactor(x) == "linear") { posteriors <- as.data.frame(suppressMessages( BayesFactor::posterior(x, iterations = 20, progress = FALSE, index = 1, ...) )) params <- colnames(posteriors) vars <- find_variables(x, effects = "all", verbose = FALSE) interactions <- find_interactions(x) dat <- get_data(x, verbose = FALSE) if ("conditional" %in% names(vars)) { conditional <- unlist(lapply(vars$conditional, function(i) { if (is.factor(dat[[i]])) { sprintf("%s-%s", i, levels(dat[[i]])) } else { sprintf("%s-%s", i, i) } })) } # add interaction terms to conditional if ("conditional" %in% names(interactions)) { for (i in interactions$conditional) { conditional <- c(conditional, params[grepl(paste0("^\\Q", i, "\\E"), params)]) } } if ("random" %in% names(vars)) { random <- unlist(lapply(vars$random, function(i) { if (is.factor(dat[[i]])) { sprintf("%s-%s", i, levels(dat[[i]])) } else { sprintf("%s-%s", i, i) } })) } extra <- setdiff(params, c(conditional, random)) } elements <- .get_elements(effects, component = component) l <- lapply(.compact_list(list(conditional = conditional, random = random, extra = extra)), .remove_backticks_from_string) l <- .compact_list(l[elements]) if (flatten) { unique(unlist(l)) } else { l } } #' @rdname find_parameters.BGGM #' @export find_parameters.MCMCglmm <- function(x, effects = c("all", "fixed", "random"), flatten = FALSE, ...) { sc <- summary(x) effects <- match.arg(effects) l <- .compact_list(list( conditional = rownames(sc$solutions), random = rownames(sc$Gcovariances) )) .filter_parameters(l, effects = effects, flatten = flatten, recursive = FALSE ) } #' @export find_parameters.mcmc.list <- function(x, flatten = FALSE, ...) { l <- list(conditional = colnames(x[[1]])) if (flatten) { unique(unlist(l)) } else { l } } #' @rdname find_parameters.BGGM #' @export find_parameters.bamlss <- function(x, flatten = FALSE, component = c("all", "conditional", "location", "distributional", "auxiliary"), parameters = NULL, ...) { component <- match.arg(component) cn <- colnames(as.data.frame(unclass(x$samples))) ignore <- grepl("(\\.alpha|logLik|\\.accepted|\\.edf)$", cn) cond <- cn[grepl("^(mu\\.p\\.|pi\\.p\\.)", cn) & !ignore] sigma <- cn[grepl("^sigma\\.p\\.", cn) & !ignore] smooth_terms <- cn[grepl("^mu\\.s\\.(.*)(\\.tau\\d+|\\.edf)$", cn)] alpha <- cn[grepl("\\.alpha$", cn)] elements <- .get_elements(effects = "all", component = component) l <- .compact_list(list( conditional = cond, smooth_terms = smooth_terms, sigma = sigma, alpha = alpha )[elements]) l <- .filter_pars(l, parameters) if (flatten) { unique(unlist(l)) } else { l } } #' @rdname find_parameters.BGGM #' @export find_parameters.brmsfit <- function(x, effects = "all", component = "all", flatten = FALSE, parameters = NULL, ...) { effects <- match.arg(effects, choices = c("all", "fixed", "random")) component <- match.arg(component, choices = c("all", .all_elements())) fe <- colnames(as.data.frame(x)) # remove redundant columns. These seem to be new since brms 2.16? pattern <- c("^[A-z]_\\d\\.\\d\\.(.*)") fe <- fe[!grepl(pattern, fe, perl = TRUE)] is_mv <- NULL # remove "Intercept" fe <- fe[!grepl("^Intercept", fe)] cond <- fe[grepl("^(b_|bs_|bsp_|bcs_)(?!zi_)(.*)", fe, perl = TRUE)] zi <- fe[grepl("^(b_zi_|bs_zi_|bsp_zi_|bcs_zi_)", fe, perl = TRUE)] rand <- fe[grepl("(?!.*__(zi|sigma|beta))(?=.*^r_)", fe, perl = TRUE) & !grepl("^prior_", fe, perl = TRUE)] randzi <- fe[grepl("^r_(.*__zi)", fe, perl = TRUE)] rand_sd <- fe[grepl("(?!.*_zi)(?=.*^sd_)", fe, perl = TRUE)] randzi_sd <- fe[grepl("^sd_(.*_zi)", fe, perl = TRUE)] rand_cor <- fe[grepl("(?!.*_zi)(?=.*^cor_)", fe, perl = TRUE)] randzi_cor <- fe[grepl("^cor_(.*_zi)", fe, perl = TRUE)] simo <- fe[grepl("^simo_", fe, perl = TRUE)] car_struc <- fe[fe %in% c("car", "sdcar")] smooth_terms <- fe[grepl("^sds_", fe, perl = TRUE)] priors <- fe[grepl("^prior_", fe, perl = TRUE)] sigma <- fe[grepl("^sigma_", fe, perl = TRUE) | grepl("sigma", fe, fixed = TRUE)] randsigma <- fe[grepl("^r_(.*__sigma)", fe, perl = TRUE)] beta <- fe[grepl("beta", fe, fixed = TRUE)] randbeta <- fe[grepl("^r_(.*__beta)", fe, perl = TRUE)] mix <- fe[grepl("mix", fe, fixed = TRUE)] shiftprop <- fe[grepl("shiftprop", fe, fixed = TRUE)] dispersion <- fe[grepl("dispersion", fe, fixed = TRUE)] auxiliary <- fe[grepl("(shape|phi|precision|_ndt_)", fe)] # if auxiliary is modelled directly, we need to remove duplicates here # e.g. "b_sigma..." is in "cond" and in "sigma" now, we just need it in "cond". sigma <- setdiff(sigma, c(cond, rand, rand_sd, rand_cor, randsigma, car_struc, "prior_sigma")) beta <- setdiff(beta, c(cond, rand, rand_sd, randbeta, rand_cor, car_struc)) auxiliary <- setdiff(auxiliary, c(cond, rand, rand_sd, rand_cor, car_struc)) l <- .compact_list(list( conditional = cond, random = c(rand, rand_sd, rand_cor, car_struc), zero_inflated = zi, zero_inflated_random = c(randzi, randzi_sd, randzi_cor), simplex = simo, smooth_terms = smooth_terms, sigma = sigma, sigma_random = randsigma, beta = beta, beta_random = randbeta, dispersion = dispersion, mix = mix, shiftprop = shiftprop, auxiliary = auxiliary, priors = priors )) elements <- .get_elements(effects = effects, component = component) elements <- c(elements, "priors") if (is_multivariate(x)) { rn <- names(find_response(x)) l <- lapply(rn, function(i) { if (.obj_has_name(l, "conditional")) { conditional <- l$conditional[grepl(sprintf("^(b_|bs_|bsp_|bcs_)\\Q%s\\E_", i), l$conditional)] } else { conditional <- NULL } if (.obj_has_name(l, "random")) { random <- l$random[grepl(sprintf("__\\Q%s\\E\\[", i), l$random) | grepl(sprintf("^sd_(.*)\\Q%s\\E\\_", i), l$random) | grepl("^cor_", l$random) | l$random %in% c("car", "sdcar")] } else { random <- NULL } if (.obj_has_name(l, "zero_inflated")) { zero_inflated <- l$zero_inflated[grepl(sprintf("^(b_zi_|bs_zi_|bsp_zi_|bcs_zi_)\\Q%s\\E_", i), l$zero_inflated)] } else { zero_inflated <- NULL } if (.obj_has_name(l, "zero_inflated_random")) { zero_inflated_random <- l$zero_inflated_random[grepl(sprintf("__zi_\\Q%s\\E\\[", i), l$zero_inflated_random) | grepl(sprintf("^sd_(.*)\\Q%s\\E\\_", i), l$zero_inflated_random) | grepl("^cor_", l$zero_inflated_random)] } else { zero_inflated_random <- NULL } if (.obj_has_name(l, "simplex")) { simplex <- l$simplex } else { simplex <- NULL } if (.obj_has_name(l, "sigma")) { sigma <- l$sigma[grepl(sprintf("^sigma_\\Q%s\\E$", i), l$sigma)] } else { sigma <- NULL } if (.obj_has_name(l, "beta")) { beta <- l$beta[grepl(sprintf("^beta_\\Q%s\\E$", i), l$sigma)] } else { beta <- NULL } if (.obj_has_name(l, "dispersion")) { dispersion <- l$dispersion[grepl(sprintf("^dispersion_\\Q%s\\E$", i), l$dispersion)] } else { dispersion <- NULL } if (.obj_has_name(l, "mix")) { mix <- l$mix[grepl(sprintf("^mix_\\Q%s\\E$", i), l$mix)] } else { mix <- NULL } if (.obj_has_name(l, "shape") || .obj_has_name(l, "precision")) { aux <- l$aux[grepl(sprintf("^(shape|precision)_\\Q%s\\E$", i), l$aux)] } else { aux <- NULL } if (.obj_has_name(l, "smooth_terms")) { smooth_terms <- l$smooth_terms } else { smooth_terms <- NULL } if (.obj_has_name(l, "priors")) { priors <- l$priors } else { priors <- NULL } pars <- .compact_list(list( conditional = conditional, random = random, zero_inflated = zero_inflated, zero_inflated_random = zero_inflated_random, simplex = simplex, smooth_terms = smooth_terms, sigma = sigma, beta = beta, dispersion = dispersion, mix = mix, priors = priors, auxiliary = aux )) .compact_list(pars[elements]) }) names(l) <- rn is_mv <- "1" } else { l <- .compact_list(l[elements]) } l <- .filter_pars(l, parameters, !is.null(is_mv) && is_mv == "1") attr(l, "is_mv") <- is_mv if (flatten) { unique(unlist(l)) } else { l } } #' @rdname find_parameters.BGGM #' @export find_parameters.bayesx <- function(x, component = c("all", "conditional", "smooth_terms"), flatten = FALSE, parameters = NULL, ...) { cond <- rownames(stats::coef(x)) smooth_terms <- rownames(x$smooth.hyp) l <- .compact_list(list( conditional = cond, smooth_terms = smooth_terms )) l <- .filter_pars(l, parameters) component <- match.arg(component) elements <- .get_elements(effects = "all", component) l <- .compact_list(l[elements]) if (flatten) { unique(unlist(l)) } else { l } } #' @rdname find_parameters.BGGM #' @export find_parameters.stanreg <- function(x, effects = c("all", "fixed", "random"), component = c("location", "all", "conditional", "smooth_terms", "sigma", "distributional", "auxiliary"), flatten = FALSE, parameters = NULL, ...) { fe <- colnames(as.data.frame(x)) cond <- fe[grepl("^(?!(b\\[|sigma|Sigma))", fe, perl = TRUE) & .grep_non_smoothers(fe)] rand <- fe[grepl("^b\\[", fe, perl = TRUE)] rand_sd <- fe[grepl("^Sigma\\[", fe, perl = TRUE)] smooth_terms <- fe[grepl("^smooth_sd", fe, perl = TRUE)] sigma <- fe[grepl("sigma", fe, fixed = TRUE)] auxiliary <- fe[grepl("(shape|phi|precision)", fe)] # remove auxiliary from conditional cond <- setdiff(cond, auxiliary) l <- .compact_list(list( conditional = cond, random = c(rand, rand_sd), smooth_terms = smooth_terms, sigma = sigma, auxiliary = auxiliary )) l <- .filter_pars(l, parameters) effects <- match.arg(effects) component <- match.arg(component) elements <- .get_elements(effects, component) l <- .compact_list(l[elements]) if (flatten) { unique(unlist(l)) } else { l } } #' @export find_parameters.bcplm <- function(x, flatten = FALSE, parameters = NULL, ...) { l <- .filter_pars(list(conditional = dimnames(x$sims.list[[1]])[[2]]), parameters) if (flatten) { unique(unlist(l)) } else { l } } #' @export find_parameters.stanmvreg <- function(x, effects = c("all", "fixed", "random"), component = c("all", "conditional", "sigma"), flatten = FALSE, parameters = NULL, ...) { fe <- colnames(as.data.frame(x)) rn <- names(find_response(x)) cond <- fe[grepl("^(?!(b\\[|sigma|Sigma))", fe, perl = TRUE) & .grep_non_smoothers(fe) & !grepl("\\|sigma$", fe, perl = TRUE)] rand <- fe[grepl("^b\\[", fe, perl = TRUE)] sigma <- fe[grepl("\\|sigma$", fe, perl = TRUE) & .grep_non_smoothers(fe)] l <- .compact_list(list( conditional = cond, random = rand, sigma = sigma )) if (.obj_has_name(l, "conditional")) { x1 <- sub("(.*)(\\|)(.*)", "\\1", l$conditional) x2 <- sub("(.*)(\\|)(.*)", "\\3", l$conditional) l.cond <- lapply(rn, function(i) { list(conditional = x2[which(x1 == i)]) }) names(l.cond) <- rn } else { l.cond <- NULL } if (.obj_has_name(l, "random")) { x1 <- sub("b\\[(.*)(\\|)(.*)", "\\1", l$random) x2 <- sub("(b\\[).*(.*)(\\|)(.*)", "\\1\\4", l$random) l.random <- lapply(rn, function(i) { list(random = x2[which(x1 == i)]) }) names(l.random) <- rn } else { l.random <- NULL } if (.obj_has_name(l, "sigma")) { l.sigma <- lapply(rn, function(i) { list(sigma = "sigma") }) names(l.sigma) <- rn } else { l.sigma <- NULL } l <- mapply(c, l.cond, l.random, l.sigma, SIMPLIFY = FALSE) l <- .filter_pars(l, parameters, is_mv = TRUE) effects <- match.arg(effects) component <- match.arg(component) elements <- .get_elements(effects, component) l <- lapply(l, function(i) .compact_list(i[elements])) attr(l, "is_mv") <- "1" if (flatten) { unique(unlist(l)) } else { l } } # Simulation models ----------------------------- #' @rdname find_parameters.BGGM #' @export find_parameters.sim.merMod <- function(x, effects = c("all", "fixed", "random"), flatten = FALSE, parameters = NULL, ...) { fe <- colnames(.get_armsim_fixef_parms(x)) re <- colnames(.get_armsim_ranef_parms(x)) l <- .compact_list(list( conditional = fe, random = re )) l <- .filter_pars(l, parameters) effects <- match.arg(effects) elements <- .get_elements(effects, component = "all") l <- .compact_list(l[elements]) if (flatten) { unique(unlist(l)) } else { l } } #' @export find_parameters.sim <- function(x, flatten = FALSE, parameters = NULL, ...) { l <- .filter_pars( list(conditional = colnames(.get_armsim_fixef_parms(x))), parameters ) if (flatten) { unique(unlist(l)) } else { l } } #' @export find_parameters.mcmc <- function(x, flatten = FALSE, parameters = NULL, ...) { l <- .filter_pars(list(conditional = colnames(x)), parameters) if (flatten) { unique(unlist(l)) } else { l } } #' @export find_parameters.bayesQR <- function(x, flatten = FALSE, parameters = NULL, ...) { l <- .filter_pars(list(conditional = x[[1]]$names), parameters) if (flatten) { unique(unlist(l)) } else { l } } #' @export find_parameters.stanfit <- function(x, effects = c("all", "fixed", "random"), flatten = FALSE, parameters = NULL, ...) { fe <- colnames(as.data.frame(x)) cond <- fe[grepl("^(?!(b\\[|sigma|Sigma|lp__))", fe, perl = TRUE) & .grep_non_smoothers(fe)] rand <- fe[grepl("^b\\[", fe, perl = TRUE)] l <- .compact_list(list( conditional = cond, random = rand )) l <- .filter_pars(l, parameters) effects <- match.arg(effects) elements <- .get_elements(effects, component = "all") l <- .compact_list(l[elements]) if (flatten) { unique(unlist(l)) } else { l } } insight/R/get_predictors.R0000644000175000017500000000174314040475164015424 0ustar nileshnilesh#' @title Get the data from model predictors #' @name get_predictors #' #' @description Returns the data from all predictor variables (fixed effects). #' #' @param verbose Toggle messages and warnings. #' @inheritParams find_predictors #' #' @return The data from all predictor variables, as data frame. #' #' @examples #' m <- lm(mpg ~ wt + cyl + vs, data = mtcars) #' head(get_predictors(m)) #' @export get_predictors <- function(x, verbose = TRUE) { vars <- if (inherits(x, "wbm")) { unlist(.compact_list(find_terms(x, flatten = FALSE)[c("conditional", "instruments")])) } else { find_predictors(x, effects = "fixed", component = "all", flatten = TRUE) } dat <- get_data(x) dat <- dat[, intersect(vars, colnames(dat)), drop = FALSE] if (.is_empty_object(dat)) { if (isTRUE(verbose)) { warning(format_message("Data frame is empty, probably you have an intercept-only model?"), call. = FALSE) } return(NULL) } dat } insight/R/find_parameters_gam.R0000644000175000017500000001172714077615664016414 0ustar nileshnilesh#' @title Find names of model parameters from generalized additive models #' @name find_parameters.gamlss #' #' @description Returns the names of model parameters, like they typically #' appear in the `summary()` output. #' #' @param ... Currently not used. #' @inheritParams find_parameters #' @inheritParams find_parameters.betamfx #' @inheritParams find_predictors #' #' @return A list of parameter names. The returned list may have following #' elements: #' \itemize{ #' \item `conditional`, the "fixed effects" part from the model. #' \item `smooth_terms`, the smooth parameters. #' } #' #' @examples #' data(mtcars) #' m <- lm(mpg ~ wt + cyl + vs, data = mtcars) #' find_parameters(m) #' @export find_parameters.gamlss <- function(x, flatten = FALSE, ...) { pars <- lapply(x$parameters, function(i) { .remove_backticks_from_string(names(stats::na.omit(stats::coef(x, what = i)))) }) names(pars) <- x$parameters if ("mu" %in% names(pars)) names(pars)[1] <- "conditional" pars <- .compact_list(pars) if (flatten) { unique(unlist(pars)) } else { pars } } #' @rdname find_parameters.gamlss #' @export find_parameters.gam <- function(x, component = c("all", "conditional", "smooth_terms", "location"), flatten = FALSE, ...) { pars <- list(conditional = names(stats::coef(x))) pars$conditional <- .remove_backticks_from_string(pars$conditional) st <- summary(x)$s.table pars$conditional <- pars$conditional[.grep_non_smoothers(pars$conditional)] pars$smooth_terms <- row.names(st) pars <- .compact_list(pars) component <- match.arg(component) elements <- .get_elements(effects = "all", component = component) pars <- .compact_list(pars[elements]) if (flatten) { unique(unlist(pars)) } else { pars } } #' @export find_parameters.scam <- find_parameters.gam #' @export find_parameters.Gam <- function(x, component = c("all", "conditional", "smooth_terms", "location"), flatten = FALSE, ...) { pars <- names(stats::coef(x)) component <- match.arg(component) l <- .compact_list(list( conditional = pars[.grep_non_smoothers(pars)], smooth_terms = pars[.grep_smoothers(pars)] )) .filter_parameters( l, effects = "all", component = component, flatten = flatten, recursive = TRUE ) } #' @export find_parameters.vgam <- find_parameters.Gam #' @export find_parameters.gamm <- function(x, component = c("all", "conditional", "smooth_terms", "location"), flatten = FALSE, ...) { x <- x$gam class(x) <- c(class(x), c("glm", "lm")) component <- match.arg(component) l <- find_parameters.gam(x, component = component) if (flatten) { unique(unlist(l)) } else { l } } #' @export find_parameters.cgam <- function(x, component = c("all", "conditional", "smooth_terms", "location"), flatten = FALSE, ...) { component <- match.arg(component) sc <- summary(x) estimates <- sc$coefficients smooth_terms <- sc$coefficients2 l <- .compact_list(list( conditional = rownames(estimates), smooth_terms = rownames(smooth_terms) )) l <- lapply(l, .remove_backticks_from_string) component <- match.arg(component) elements <- .get_elements(effects = "all", component = component) l <- .compact_list(l[elements]) if (flatten) { unique(unlist(l)) } else { l } } #' @export find_parameters.SemiParBIV <- function(x, flatten = FALSE, ...) { pars <- get_parameters(x) # make sure we preserve order for split() pars$Component <- factor(pars$Component, levels = unique(pars$Component)) l <- lapply(split(pars, pars$Component), function(i) { as.vector(i$Parameter) }) if (flatten) { unique(unlist(l)) } else { l } } #' @export find_parameters.selection <- find_parameters.SemiParBIV #' @export find_parameters.rqss <- function(x, component = c("all", "conditional", "smooth_terms", "location"), flatten = FALSE, ...) { sc <- summary(x) pars <- list( conditional = rownames(sc$coef), smooth_terms = rownames(sc$qsstab) ) pars$conditional <- .remove_backticks_from_string(pars$conditional) pars$smooth_terms <- .remove_backticks_from_string(pars$smooth_terms) component <- match.arg(component) elements <- .get_elements(effects = "all", component) pars <- .compact_list(pars[elements]) if (flatten) { unique(unlist(pars)) } else { pars } } insight/R/get_parameters_others.R0000644000175000017500000002013614101710621016756 0ustar nileshnilesh#' @title Get model parameters from models with special components #' @name get_parameters.betareg #' #' @description Returns the coefficients from a model. #' #' @param ... Currently not used. #' #' @inheritParams find_parameters #' @inheritParams find_predictors #' #' @return A data frame with three columns: the parameter names, the related #' point estimates and the component. #' #' @examples #' data(mtcars) #' m <- lm(mpg ~ wt + cyl + vs, data = mtcars) #' get_parameters(m) #' @export get_parameters.betareg <- function(x, component = c("all", "conditional", "precision", "location", "distributional", "auxiliary"), ...) { component <- match.arg(component) cf <- stats::coef(x) params <- data.frame( Parameter = gsub("^\\(phi\\)_", "", names(cf)), Estimate = unname(cf), Component = c(rep("conditional", length(x$coefficients$mean)), rep("precision", length(x$coefficients$precision))), stringsAsFactors = FALSE, row.names = NULL ) if (component != "all") { params <- params[params$Component == component, , drop = FALSE] } .remove_backticks_from_parameter_names(params) } #' @rdname get_parameters.betareg #' @export get_parameters.DirichletRegModel <- function(x, component = c("all", "conditional", "precision", "location", "distributional", "auxiliary"), ...) { component <- match.arg(component) cf <- stats::coef(x) if (x$parametrization == "common") { component <- "all" n_comp <- lapply(cf, length) pattern <- paste0("(", paste(x$varnames, collapse = "|"), ")\\.(.*)") p_names <- gsub(pattern, "\\2", names(unlist(cf))) params <- data.frame( Parameter = p_names, Estimate = unname(unlist(cf)), Response = rep(names(n_comp), sapply(n_comp, function(i) i)), stringsAsFactors = FALSE, row.names = NULL ) } else { out1 <- .gather(data.frame(do.call(cbind, cf$beta)), names_to = "Response", values_to = "Estimate") out2 <- .gather(data.frame(do.call(cbind, cf$gamma)), names_to = "Component", values_to = "Estimate") out1$Component <- "conditional" out2$Component <- "precision" out2$Response <- NA params <- merge(out1, out2, all = TRUE, sort = FALSE) params$Parameter <- gsub("(.*)\\.(.*)\\.(.*)", "\\3", names(unlist(cf))) params <- params[c("Parameter", "Estimate", "Component", "Response")] } if (component != "all") { params <- params[params$Component == component, , drop = FALSE] } .remove_backticks_from_parameter_names(params) } #' @rdname get_parameters.betareg #' @export get_parameters.averaging <- function(x, component = c("conditional", "full"), ...) { component <- match.arg(component) cf <- stats::coef(x, full = component == "full") params <- data.frame( Parameter = names(cf), Estimate = unname(cf), stringsAsFactors = FALSE, row.names = NULL ) .remove_backticks_from_parameter_names(params) } #' @rdname get_parameters.betareg #' @export get_parameters.glmx <- function(x, component = c("all", "conditional", "extra", "location", "distributional", "auxiliary"), ...) { component <- match.arg(component) cf <- stats::coef(summary(x)) params <- rbind( data.frame( Parameter = names(cf$glm[, 1]), Estimate = unname(cf$glm[, 1]), Component = "conditional", stringsAsFactors = FALSE, row.names = NULL ), data.frame( Parameter = rownames(cf$extra), Estimate = cf$extra[, 1], Component = "extra", stringsAsFactors = FALSE, row.names = NULL ) ) if (component != "all") { params <- params[params$Component == component, , drop = FALSE] } .remove_backticks_from_parameter_names(params) } #' @rdname get_parameters.betareg #' @export get_parameters.clm2 <- function(x, component = c("all", "conditional", "scale"), ...) { component <- match.arg(component) cf <- stats::coef(summary(x)) n_intercepts <- length(x$xi) n_location <- length(x$beta) n_scale <- length(x$zeta) params <- data.frame( Parameter = rownames(cf), Estimate = unname(cf[, "Estimate"]), Component = c(rep("conditional", times = n_intercepts + n_location), rep("scale", times = n_scale)), stringsAsFactors = FALSE, row.names = NULL ) if (component != "all") { params <- params[params$Component == component, , drop = FALSE] } .remove_backticks_from_parameter_names(params) } #' @export get_parameters.clmm2 <- get_parameters.clm2 #' @rdname get_parameters.betareg #' @export get_parameters.mvord <- function(x, component = c("all", "conditional", "thresholds", "correlation"), ...) { component <- match.arg(component) junk <- utils::capture.output(s <- summary(x)) # intercepts thresholds thresholds <- as.data.frame(s$thresholds) thresholds$Parameter <- rownames(thresholds) thresholds$Response <- gsub("(.*)\\s(.*)", "\\1", thresholds$Parameter) # coefficients coefficients <- as.data.frame(s$coefficients) coefficients$Parameter <- rownames(coefficients) coefficients$Response <- gsub("(.*)\\s(.*)", "\\2", coefficients$Parameter) if (!all(coefficients$Response %in% thresholds$Response)) { resp <- unique(thresholds$Response) for (i in coefficients$Response) { coefficients$Response[coefficients$Response == i] <- resp[grepl(paste0(i, "$"), resp)] } } params <- data.frame( Parameter = c(thresholds$Parameter, coefficients$Parameter), Estimate = c(unname(thresholds[, "Estimate"]), unname(coefficients[, "Estimate"])), Component = c(rep("thresholds", nrow(thresholds)), rep("conditional", nrow(coefficients))), Response = c(thresholds$Response, coefficients$Response), stringsAsFactors = FALSE, row.names = NULL ) params_error <- data.frame( Parameter = rownames(s$error.structure), Estimate = unname(s$error.structure[, "Estimate"]), Component = "correlation", Response = NA, stringsAsFactors = FALSE, row.names = NULL ) params <- rbind(params, params_error) if (.n_unique(params$Response) == 1) { params$Response <- NULL } if (component != "all") { params <- params[params$Component == component, , drop = FALSE] } .remove_backticks_from_parameter_names(params) } #' @rdname get_parameters.betareg #' @export get_parameters.mjoint <- function(x, component = c("all", "conditional", "survival"), ...) { component <- match.arg(component) s <- summary(x) params <- rbind( data.frame( Parameter = rownames(s$coefs.long), Estimate = unname(s$coefs.long[, 1]), Component = "conditional", stringsAsFactors = FALSE, row.names = NULL ), data.frame( Parameter = rownames(s$coefs.surv), Estimate = unname(s$coefs.surv[, 1]), Component = "survival", stringsAsFactors = FALSE, row.names = NULL ) ) if (component != "all") { params <- params[params$Component == component, , drop = FALSE] } .remove_backticks_from_parameter_names(params) } #' @export get_parameters.systemfit <- function(x, ...) { cf <- stats::coef(summary(x)) f <- find_formula(x) system_names <- names(f) parameter_names <- row.names(cf) out <- lapply(system_names, function(i) { pattern <- paste0("^", i, "_(.*)") params <- grepl(pattern, parameter_names) data.frame( Parameter = gsub(pattern, "\\1", parameter_names[params]), Estimate = as.vector(cf[params, "Estimate"]), Component = i, stringsAsFactors = FALSE ) }) do.call(rbind, out) } insight/R/get_predicted_ci_zeroinflated.R0000644000175000017500000001607614144234777020447 0ustar nileshnilesh.simulate_zi_predictions <- function(model, newdata, predictions, nsim = NULL, ci = .95) { # Since the zero inflation and the conditional model are working in "opposite # directions", confidence intervals can not be derived directly from the # "predict()"-function. Thus, confidence intervals are based on quantiles # of simulated draws from a multivariate normal distribution # (see also _Brooks et al. 2017, pp.391-392_ for details). if (is.null(newdata)) { newdata <- get_data(model) } if (is.null(nsim)) { nsim <- 1000 } if (inherits(model, "glmmTMB")) { out <- .simulate_predictions_glmmTMB(model, newdata, nsim) } else if (inherits(model, "MixMod")) { out <- .simulate_predictions_MixMod(model, newdata, nsim) } else { out <- .simulate_predictions_zeroinfl(model, newdata, nsim) } if (is.null(out)) { return(NULL) } sims <- link_inverse(model)(out$cond) * (1 - stats::plogis(out$zi)) ci <- (1 + ci) / 2 simulated <- data.frame( SE = apply(sims, 1, stats::sd), CI_low = apply(sims, 1, stats::quantile, probs = 1 - ci), CI_high = apply(sims, 1, stats::quantile, probs = ci), stringsAsFactors = FALSE ) # We need to fix a bit here. We have the simulated standard errors and CI's - # but can use the "correct" predictions from "predict(type = "reponse")". # in order to make CI and predictions match, we take the simulated CI-range # and use the original predicted values as "center" for those CI-ranges. ci.range <- (simulated$CI_high - simulated$CI_low) / 2 # fix lower bound ("center" lower bound related to predicted values) ci.low <- predictions - ci.range # fix negative CI neg.ci <- ci.low < 0 if (any(neg.ci)) { ci.range[neg.ci] <- ci.range[neg.ci] - abs(ci.low[neg.ci]) - 1e-05 simulated$SE[neg.ci] <- simulated$SE[neg.ci] - ((abs(ci.low[neg.ci]) + 1e-05) / stats::qnorm(ci)) } simulated$CI_low <- predictions - ci.range simulated$CI_high <- predictions + ci.range simulated[c("SE", "CI_low", "CI_high")] } # glmmTMB ------------------- .simulate_predictions_glmmTMB <- function(model, newdata, nsim) { check_if_installed("lme4") check_if_installed("MASS") tryCatch( { condformula <- lme4::nobars(stats::formula(model)[-2]) ziformula <- lme4::nobars(stats::formula(model$modelInfo$allForm$ziformula)) matrix.conditional <- stats::model.matrix(condformula, newdata) beta.conditional <- lme4::fixef(model)$cond matrix.zero_inflated <- stats::model.matrix(ziformula, newdata) beta.zero_inflated <- lme4::fixef(model)$zi .get_simulation_from_zi(model, nsim, beta.conditional, beta.zero_inflated, matrix.conditional, matrix.zero_inflated) }, error = function(x) { NULL } ) } # GLMMAdaptive MixMod ------------------- .simulate_predictions_MixMod <- function(model, newdata, nsim) { check_if_installed("lme4") check_if_installed("MASS") tryCatch( { condformula <- stats::formula(model, type = "fixed") ziformula <- stats::formula(model, type = "zi_fixed") matrix.conditional <- stats::model.matrix(condformula, newdata) beta.conditional <- lme4::fixef(model, sub_model = "main") matrix.zero_inflated <- stats::model.matrix(ziformula, newdata) beta.zero_inflated <- lme4::fixef(model, sub_model = "zero_part") .get_simulation_from_zi(model, nsim, beta.conditional, beta.zero_inflated, matrix.conditional, matrix.zero_inflated) }, error = function(x) { NULL } ) } # pscl::zeroinfl ---------------------- .simulate_predictions_zeroinfl <- function(model, newdata, nsim = 1000) { # check for at least to factor levels, in order to build contrasts single_factor_levels <- sapply(newdata, function(i) is.factor(i) && nlevels(i) == 1) if (any(single_factor_levels)) { warning(format_message("Some factors in the data have only one level. Cannot compute model matrix for standard errors and confidence intervals."), call. = FALSE) return(NULL) } tryCatch( { condformula <- stats::as.formula(paste0("~", .safe_deparse(stats::formula(model)[[3]][[2]]))) ziformula <- stats::as.formula(paste0("~", .safe_deparse(stats::formula(model)[[3]][[3]]))) matrix.conditional <- stats::model.matrix(condformula, model = "count", data = newdata) beta.conditional <- stats::coef(model, model = "count") matrix.zero_inflated <- stats::model.matrix(ziformula, model = "zero", data = newdata) beta.zero_inflated <- stats::coef(model, model = "zero") .get_simulation_from_zi(model, nsim, beta.conditional, beta.zero_inflated, matrix.conditional, matrix.zero_inflated) }, error = function(x) { NULL } ) } # gam -------------------- .get_zeroinfl_gam_predictions <- function(model, newdata, nsim = 1000) { tryCatch( { mm <- stats::model.matrix(model, data = newdata) linpred <- attr(mm, "lpi", exact = TRUE) cond <- linpred[[1]] zi <- linpred[[2]] matrix.conditional <- mm[, cond] matrix.zero_inflated <- mm[, zi] beta.conditional <- stats::coef(model)[cond] beta.zero_inflated <- stats::coef(model)[zi] varcov.cond <- stats::vcov(model)[cond, cond] varcov.zi <- stats::vcov(model)[zi, zi] psim.cond <- MASS::mvrnorm(nsim, mu = beta.conditional, Sigma = varcov.cond) pred.cond <- matrix.conditional %*% t(psim.cond) psim.zi <- MASS::mvrnorm(nsim, mu = beta.zero_inflated, Sigma = varcov.zi) pred.zi <- matrix.zero_inflated %*% t(psim.zi) list(cond = pred.cond, zi = pred.zi) }, error = function(x) { NULL } ) } # helper ----------------- .get_simulation_from_zi <- function(model, nsim, beta.conditional, beta.zero_inflated, matrix.conditional, matrix.zero_inflated) { # if formula has a polynomial term, and this term is one that is held # constant, model.matrix() with "newdata" will throw an error - so we # re-build the newdata-argument by including all values for poly-terms, if # these are hold constant. # fixes <- .rows_to_keep(model, newdata, condformula, ziformula, terms, value_adjustment, condition) # # if (!is.null(fixes)) { # keep <- fixes$keep # newdata <- fixes$newdata # } else { # keep <- NULL # } cond.varcov <- get_varcov(model, component = "conditional") zi.varcov <- get_varcov(model, component = "zero_inflated") pred.condpar.psim <- MASS::mvrnorm(n = nsim, mu = beta.conditional, Sigma = cond.varcov) pred.cond.psim <- matrix.conditional %*% t(pred.condpar.psim) pred.zipar.psim <- MASS::mvrnorm(n = nsim, mu = beta.zero_inflated, Sigma = zi.varcov) pred.zi.psim <- matrix.zero_inflated %*% t(pred.zipar.psim) # if (!.is_empty(keep)) { # pred.cond.psim <- pred.cond.psim[keep, , drop = FALSE] # pred.zi.psim <- pred.zi.psim[keep, , drop = FALSE] # } list(cond = pred.cond.psim, zi = pred.zi.psim) } insight/R/find_random_slopes.R0000644000175000017500000000337214077615664016267 0ustar nileshnilesh#' @title Find names of random slopes #' @name find_random_slopes #' #' @description Return the name of the random slopes from mixed effects models. #' #' @param x A fitted mixed model. #' #' @return A list of character vectors with the name(s) of the random slopes, or #' `NULL` if model has no random slopes. Depending on the model, the #' returned list has following elements: #' \itemize{ #' \item `random`, the random slopes from the conditional part of model #' \item `zero_inflated_random`, the random slopes from the #' zero-inflation component of the model #' } #' #' @examples #' if (require("lme4")) { #' data(sleepstudy) #' m <- lmer(Reaction ~ Days + (1 + Days | Subject), data = sleepstudy) #' find_random_slopes(m) #' } #' @export find_random_slopes <- function(x) { random_slopes <- vector(mode = "list") forms <- find_formula(x, verbose = FALSE) random_slopes$random <- .extract_random_slopes(forms$random) random_slopes$zero_inflated_random <- .extract_random_slopes(forms$zero_inflated_random) random_slopes <- .compact_list(random_slopes) if (.is_empty_object(random_slopes)) { NULL } else { random_slopes } } .extract_random_slopes <- function(fr) { if (is.null(fr)) { return(NULL) } if (!is.list(fr)) fr <- list(fr) random_slope <- lapply(fr, function(forms) { if (grepl("(.*)\\|(.*)\\|(.*)", .safe_deparse(forms))) { pattern <- "(.*)\\|(.*)\\|(.*)" } else { pattern <- "(.*)\\|(.*)" } pattern <- gsub(pattern, "\\1", .safe_deparse(forms)) re <- all.vars(forms) re[sapply(re, function(x) { grepl(x, pattern, fixed = TRUE) })] }) unique(unlist(.compact_list(random_slope))) } insight/R/format_pd.R0000644000175000017500000000146213677310470014363 0ustar nileshnilesh#' Probability of direction (pd) formatting #' #' @param pd Probability of direction (pd). #' @inheritParams format_p #' #' @return A formatted string. #' @examples #' format_pd(0.12) #' format_pd(c(0.12, 1, 0.9999, 0.98, 0.995, 0.96), name = NULL) #' format_pd(c(0.12, 1, 0.9999, 0.98, 0.995, 0.96), stars = TRUE) #' @export format_pd <- function(pd, stars = FALSE, stars_only = FALSE, name = "pd") { text <- ifelse(pd >= 1, "= 100%***", ifelse(pd > 0.999, paste0("= ", format_value(pd * 100), "%***"), ifelse(pd > 0.99, paste0("= ", format_value(pd * 100), "%**"), ifelse(pd > 0.97, paste0("= ", format_value(pd * 100), "%*"), paste0("= ", format_value(pd * 100), "%") ) ) ) ) .add_prefix_and_remove_stars(text, stars, stars_only, name) } insight/R/get_predicted_ci.R0000644000175000017500000004104214155570474015667 0ustar nileshnilesh#' Confidence and Prediction Interval for Model Predictions #' #' Returns the Confidence (or Prediction) Interval (CI) associated with #' predictions made by a model. #' #' @inheritParams get_predicted #' @param predictions A vector of predicted values (as obtained by #' `stats::fitted()`, `stats::predict()` or #' [get_predicted()]). #' @param ci The interval level (default `0.95`, i.e., `95%` CI). #' @param ci_type Can be `"prediction"` or `"confidence"`. Prediction #' intervals show the range that likely contains the value of a new #' observation (in what range it would fall), whereas confidence intervals #' reflect the uncertainty around the estimated parameters (and gives the #' range of the link; for instance of the regression line in a linear #' regressions). Prediction intervals account for both the uncertainty in the #' model's parameters, plus the random variation of the individual values. #' Thus, prediction intervals are always wider than confidence intervals. #' Moreover, prediction intervals will not necessarily become narrower as the #' sample size increases (as they do not reflect only the quality of the fit). #' This applies mostly for "simple" linear models (like `lm`), as for #' other models (e.g., `glm`), prediction intervals are somewhat useless #' (for instance, for a binomial model for which the dependent variable is a #' vector of 1s and 0s, the prediction interval is... `[0, 1]`). #' @param vcov_estimation Either a matrix, or a string, indicating the suffix #' of the `vcov*()`-function from the \pkg{sandwich} or \pkg{clubSandwich} #' package, e.g. `vcov_estimation = "CL"` (which calls #' [sandwich::vcovCL()] to compute clustered covariance matrix #' estimators), or `vcov_estimation = "HC"` (which calls #' [sandwich::vcovHC()] to compute heteroskedasticity-consistent covariance #' matrix estimators). #' @param vcov_type Character vector, specifying the estimation type for the #' robust covariance matrix estimation (see #' [sandwich::vcovHC()] or `clubSandwich::vcovCR()` #' for details). Only applies if `vcov_estimation` is a string, and not a matrix. #' @param vcov_args List of named vectors, used as additional arguments that are #' passed down to the \pkg{sandwich}-function specified in #' `vcov_estimation`. Only applies if `vcov_estimation` is a string, and not #' a matrix. #' @param dispersion_method,ci_method These arguments are only used in #' the context of bootstrapped and Bayesian models. Possible values are #' `dispersion_method = c("sd", "mad")` and #' `ci_method = c("quantile", "hdi", "eti")`. For the latter, the #' \pkg{bayestestR} package is required. #' @param ... Not used for now. #' #' #' @return The Confidence (or Prediction) Interval (CI). #' #' #' @examples #' data(mtcars) #' #' # Linear model #' # ------------ #' x <- lm(mpg ~ cyl + hp, data = mtcars) #' predictions <- predict(x) #' ci_vals <- get_predicted_ci(x, predictions, ci_type = "prediction") #' head(ci_vals) #' ci_vals <- get_predicted_ci(x, predictions, ci_type = "confidence") #' head(ci_vals) #' ci_vals <- get_predicted_ci(x, predictions, ci = c(0.8, 0.9, 0.95)) #' head(ci_vals) #' #' # Bootstrapped #' # ------------ #' predictions <- get_predicted(x, iterations = 500) #' get_predicted_ci(x, predictions) #' #' if (require("datawizard")) { #' ci_vals <- get_predicted_ci(x, predictions, ci = c(0.80, 0.95)) #' head(ci_vals) #' datawizard::reshape_ci(ci_vals) #' #' ci_vals <- get_predicted_ci(x, #' predictions, #' dispersion_method = "MAD", #' ci_method = "HDI" #' ) #' head(ci_vals) #' } #' #' #' # Logistic model #' # -------------- #' x <- glm(vs ~ wt, data = mtcars, family = "binomial") #' predictions <- predict(x, type = "link") #' ci_vals <- get_predicted_ci(x, predictions, ci_type = "prediction") #' head(ci_vals) #' ci_vals <- get_predicted_ci(x, predictions, ci_type = "confidence") #' head(ci_vals) #' @export get_predicted_ci <- function(x, predictions = NULL, ...) { UseMethod("get_predicted_ci") } # General method ---------------------------------------------------------- #' @rdname get_predicted_ci #' @export get_predicted_ci.default <- function(x, predictions = NULL, data = NULL, ci = 0.95, ci_type = "confidence", vcov_estimation = NULL, vcov_type = NULL, vcov_args = NULL, dispersion_method = "sd", ci_method = "quantile", ...) { # If draws are present (bootstrapped or Bayesian) if ("iterations" %in% names(attributes(predictions))) { iter <- attributes(predictions)$iteration se <- .get_predicted_se_from_iter(iter = iter, dispersion_method) out <- .get_predicted_ci_from_iter(iter = iter, ci = ci, ci_method) out <- cbind(se, out) # outcome is multinomial/ordinal/cumulative if (inherits(predictions, "data.frame") && "Response" %in% colnames(predictions) && "Row" %in% colnames(predictions)) { out <- cbind(predictions[, c("Row", "Response")], out) } return(out) } # Analytical solution # 1. Find appropriate interval function if (ci_type == "confidence" || get_family(x)$family %in% c("gaussian") || (!is.null(vcov_estimation) && is.matrix(vcov_estimation))) { # gaussian or CI se <- get_predicted_se( x, predictions, data = data, ci_type = ci_type, vcov_estimation = vcov_estimation, vcov_type = vcov_type, vcov_args = vcov_args ) ci_function <- .get_predicted_se_to_ci } else { se <- rep(NA, length(predictions)) ci_function <- .get_predicted_pi_glm } # 2. Run it once or multiple times if multiple CI levels are requested if (is.null(ci)) { out <- data.frame(SE = se) } else if (length(ci) == 1) { out <- ci_function(x, predictions, ci = ci, se = se) } else { out <- data.frame(SE = se) for (ci_val in ci) { temp <- ci_function(x, predictions, ci = ci_val, se = se) temp$SE <- NULL names(temp) <- paste0(names(temp), "_", ci_val) out <- cbind(out, temp) } } out } # Specific definitions ---------------------------------------------------- #' @export get_predicted_ci.mlm <- function(x, ...) { stop("TBD") } # Get Variance-covariance Matrix --------------------------------------------------- .get_predicted_ci_vcov <- function(x, vcov_estimation = NULL, vcov_type = NULL, vcov_args = NULL) { # (robust) variance-covariance matrix if (!is.null(vcov_estimation) && !is.matrix(vcov_estimation)) { # check for existing vcov-prefix if (!grepl("^vcov", vcov_estimation)) { vcov_estimation <- paste0("vcov", vcov_estimation) } # set default for clubSandwich if (vcov_estimation == "vcovCR" && is.null(vcov_type)) { vcov_type <- "CR0" } if (!is.null(vcov_type) && vcov_type %in% c("CR0", "CR1", "CR1p", "CR1S", "CR2", "CR3")) { # installed? check_if_installed("clubSandwich") robust_package <- "clubSandwich" vcov_estimation <- "vcovCR" } else { # installed? check_if_installed("sandwich") robust_package <- "sandwich" } # compute robust standard errors based on vcov if (robust_package == "sandwich") { vcov_estimation <- get(vcov_estimation, asNamespace("sandwich")) vcovmat <- as.matrix(do.call(vcov_estimation, c(list(x = x, type = vcov_type), vcov_args))) } else { vcov_estimation <- clubSandwich::vcovCR vcovmat <- as.matrix(do.call(vcov_estimation, c(list(obj = x, type = vcov_type), vcov_args))) } } else if (!is.matrix(vcov_estimation)) { # get variance-covariance-matrix, depending on model type vcovmat <- get_varcov(x, component = "conditional") } else { vcovmat <- vcov_estimation } vcovmat } # Get Model matrix ------------------------------------------------------------ .get_predicted_ci_modelmatrix <- function(x, data = NULL, vcovmat = NULL, ...) { resp <- find_response(x) if (is.null(vcovmat)) vcovmat <- .get_predicted_ci_vcov(x, ...) if (is.null(data)) { mm <- get_modelmatrix(x) } else { if (!all(resp %in% data)) data[[resp]] <- 0 # fake response # else, model.matrix below fails, e.g. for log-terms attr(data, "terms") <- NULL # In these models we need to drop offset from model_terms. To do this, we # must construct the mm by calling `get_modelmatrix` on modified model # terms. When we do not need to drop offset terms, we call get_modelmatrix # on the model itself. The latter strategy is safer in cases where `data` # does not include all the levels of a factor variable. if (inherits(x, c("zeroinfl", "hurdle", "zerotrunc"))) { # model terms, required for model matrix model_terms <- tryCatch( { stats::terms(x) }, error = function(e) { find_formula(x)$conditional } ) all_terms <- find_terms(x)$conditional off_terms <- grepl("^offset\\((.*)\\)", all_terms) if (any(off_terms)) { all_terms <- all_terms[!off_terms] # TODO: preserve interactions vcov_names <- dimnames(vcovmat)[[1]][grepl(":", dimnames(vcovmat)[[1]], fixed = TRUE)] if (length(vcov_names)) { vcov_names <- gsub(":", "*", vcov_names, fixed = TRUE) all_terms <- unique(c(all_terms, vcov_names)) } off_terms <- grepl("^offset\\((.*)\\)", all_terms) model_terms <- stats::reformulate(all_terms[!off_terms], response = find_response(x)) } # check for at least to factor levels, in order to build contrasts single_factor_levels <- sapply(data, function(i) is.factor(i) && nlevels(i) == 1) if (any(single_factor_levels)) { warning(format_message("Some factors in the data have only one level. Cannot compute model matrix for standard errors and confidence intervals."), call. = FALSE) return(NULL) } mm <- get_modelmatrix(model_terms, data = data) } else { mm <- get_modelmatrix(x, data = data) } } # fix rank deficiency if (ncol(vcovmat) < ncol(mm)) { mm <- mm[, intersect(colnames(mm), colnames(vcovmat))] } mm } # Get SE ------------------------------------------------------------------ get_predicted_se <- function(x, predictions = NULL, data = NULL, ci_type = "confidence", vcov_estimation = NULL, vcov_type = NULL, vcov_args = NULL) { # Matrix-multiply X by the parameter vector B to get the predictions, then # extract the variance-covariance matrix V of the parameters and compute XVX' # to get the variance-covariance matrix of the predictions. The square-root of # the diagonal of this matrix represent the standard errors of the predictions, # which are then multiplied by 1.96 for the confidence intervals. vcovmat <- .get_predicted_ci_vcov( x, vcov_estimation = vcov_estimation, vcov_type = vcov_type, vcov_args = vcov_args ) mm <- .get_predicted_ci_modelmatrix(x, data = data, vcovmat = vcovmat) # return NULL for fail if (is.null(mm)) { return(NULL) } # compute vcov for predictions # Next line equivalent to: diag(M V M') var_diag <- colSums(t(mm %*% vcovmat) * t(mm)) # add sigma to standard errors, i.e. confidence or prediction intervals ci_type <- match.arg(ci_type, c("confidence", "prediction")) if (ci_type == "prediction") { if (is_mixed_model(x)) { se <- sqrt(var_diag + get_variance_residual(x)) } else { se <- sqrt(var_diag + get_sigma(x)^2) } } else { se <- sqrt(var_diag) } se } ## Convert to CI ----------- .get_predicted_se_to_ci <- function(x, predictions = NULL, se = NULL, ci = 0.95, ...) { # TODO: Prediction interval for binomial: https://fromthebottomoftheheap.net/2017/05/01/glm-prediction-intervals-i/ # TODO: Prediction interval for poisson: https://fromthebottomoftheheap.net/2017/05/01/glm-prediction-intervals-ii/ # Sanity checks if (is.null(predictions)) { return(data.frame(SE = se)) } if (is.null(ci)) { return(data.frame(CI_low = predictions, CI_high = predictions)) } # Same as predicted dof <- get_df(x, type = "residual") # Return NA if (is.null(se)) { se <- ci_low <- ci_high <- rep(NA, length(predictions)) # Get CI # TODO: Does this cover all the model families? } else { if (is.null(dof) || is.infinite(dof) || find_statistic(x) == "z-statistic") { crit_val <- stats::qnorm(p = (1 + ci) / 2) } else { crit_val <- stats::qt(p = (1 + ci) / 2, df = dof) } if (length(predictions) != length(se)) { # multiple length? if (length(predictions) %% length(se) == 0) { # for multiple length, SE and predictions may match, could be intended? # could there be any cases where we have twice or x times the length of # predictions as standard errors? warning(format_message("Predictions and standard errors are not of the same length. Please check if you need the 'data' argument."), call. = FALSE) } else { stop(format_message("Predictions and standard errors are not of the same length. Please specify the 'data' argument."), call. = FALSE) } } ci_low <- predictions - (se * crit_val) ci_high <- predictions + (se * crit_val) } data.frame(SE = se, CI_low = ci_low, CI_high = ci_high) } # Get PI ------------------------------------------------------------------ .get_predicted_pi_glm <- function(x, predictions, ci = 0.95, ...) { info <- model_info(x) linkfun <- link_function(x) linkinv <- link_inverse(x) alpha <- 1 - ci prob <- c(alpha / 2, 1 - alpha / 2) if (info$is_binomial) { p <- linkinv(predictions) ci_low <- stats::qbinom(prob[1], size = 1, prob = p) ci_high <- stats::qbinom(prob[2], size = 1, prob = p) } else if (info$is_poisson) { rate <- linkinv(predictions) ci_low <- stats::qpois(prob[1], lambda = rate) ci_high <- stats::qpois(prob[2], lambda = rate) } data.frame( CI_low = linkfun(ci_low), CI_high = linkfun(ci_high) ) } # Interval helpers -------------------------------------------------------- .get_predicted_se_from_iter <- function(iter, dispersion_method = "SD") { data <- as.data.frame(t(iter)) # Reshape # Dispersion if (is.character(dispersion_method)) { dispersion_method <- match.arg(tolower(dispersion_method), c("sd", "mad")) if (dispersion_method == "sd") { se <- apply(data, 2, stats::sd) } else if (dispersion_method == "mad") { se <- apply(data, 2, stats::mad) } else { stop("`dispersion_method` argument not recognized.") } } else { se <- apply(data, 2, dispersion_method) } data.frame(SE = se, row.names = 1:length(se)) } .get_predicted_ci_from_iter <- function(iter, ci = 0.95, ci_method = "quantile") { # Interval ci_method <- match.arg(tolower(ci_method), c("quantile", "hdi", "eti")) if (ci_method == "quantile") { out <- data.frame(Parameter = 1:nrow(iter)) for (i in ci) { temp <- data.frame( CI_low = apply(iter, 1, stats::quantile, probs = (1 - i) / 2, na.rm = TRUE), CI_high = apply(iter, 1, stats::quantile, probs = (1 + i) / 2, na.rm = TRUE) ) names(temp) <- paste0(c("CI_low_", "CI_high_"), i) out <- cbind(out, temp) } if (length(ci) == 1) names(out) <- c("Parameter", "CI_low", "CI_high") } else { # installed? check_if_installed(c("bayestestR", "datawizard")) out <- as.data.frame(bayestestR::ci(as.data.frame(t(iter)), ci = ci, method = ci_method)) if (length(ci) > 1) out <- datawizard::reshape_ci(out) } out$Parameter <- out$CI <- NULL row.names(out) <- NULL out } insight/R/color_if.R0000644000175000017500000000710014077615664014207 0ustar nileshnilesh#' @title Color-formatting for data columns based on condition #' @name color_if #' #' @description Convenient function that formats columns in data frames #' with color codes, where the color is chosen based on certain conditions. #' Columns are then printed in color in the console. #' #' @param x A data frame #' @param columns Character vector with column names of `x` that should be formatted. #' @param predicate A function that takes `columns` and `value` as input #' and which should return `TRUE` or `FALSE`, based on if the condition #' (in comparison with `value`) is met. #' @param value The comparator. May be used in conjunction with `predicate` #' to quickly set up a function which compares elements in `colums` to `value`. #' May be ignored when `predicate` is a function that internally computes other #' comparisons. See 'Examples'. #' @param color_if,colour_if Character vector, indicating the color code used to #' format values in `x` that meet the condition of `predicate` and `value`. #' May be one of `"red"`, `"yellow"`, `"green"`, `"blue"`, #' `"violet"`, `"cyan"` or `"grey"`. Formatting is also possible #' with `"bold"` or `"italic"`. #' @param color_else,colour_else See `color_if`, but only for conditions #' that are *not* met. #' @param digits Digits for rounded values. #' #' @details The predicate-function simply works like this: #' `which(predicate(x[, columns], value))` #' #' @return The . #' #' @examples #' # all values in Sepal.Length larger than 5 in green, all remaining in red #' x <- color_if(iris[1:10, ], columns = "Sepal.Length", predicate = `>`, value = 5) #' x #' cat(x$Sepal.Length) #' #' # all levels "setosa" in Species in green, all remaining in red #' x <- color_if(iris, columns = "Species", predicate = `==`, value = "setosa") #' cat(x$Species) #' #' # own function, argument "value" not needed here #' p <- function(x, y) { #' x >= 4.9 & x <= 5.1 #' } #' # all values in Sepal.Length between 4.9 and 5.1 in green, all remaining in red #' x <- color_if(iris[1:10, ], columns = "Sepal.Length", predicate = p) #' cat(x$Sepal.Length) #' @export color_if <- function(x, columns, predicate = `>`, value = 0, color_if = "green", color_else = "red", digits = 2) { xnew <- x if (columns %in% names(x)) { x_if <- which(predicate(x[, columns], value)) x_else <- which(!predicate(x[, columns], value)) values <- x[, columns] xnew[, columns] <- format( if (is.numeric(values)) { round(values, digits = digits) } else { values }, width = nchar(columns), nsmall = digits, justify = "right" ) # remove NA xnew[, columns][trimws(xnew[, columns]) == "NA"] <- "" if (!is.null(color_if) && length(x_if)) { xnew[, columns][x_if] <- .colour(color_if, xnew[, columns][x_if]) } if (!is.null(color_else) && length(x_else)) { xnew[, columns][x_else] <- .colour(color_else, xnew[, columns][x_else]) } } xnew } #' Detect coloured cells #' @keywords internal .colour_detect <- function(x) { ansi_regex <- paste0( "(?:(?:\\x{001b}\\[)|\\x{009b})", "(?:(?:[0-9]{1,3})?(?:(?:;[0-9]{0,3})*)?[A-M|f-m])", "|\\x{001b}[A-M]" ) grepl(ansi_regex, x, perl = TRUE) } #' @rdname color_if #' @export colour_if <- function(x, columns, predicate = `>`, value = 0, colour_if = "green", colour_else = "red", digits = 2) { color_if(x = x, columns = columns, predicate = predicate, value = value, color_if = colour_if, color_else = colour_else, digits = digits) } insight/R/find_transformation.R0000644000175000017500000000436714144234776016471 0ustar nileshnilesh#' @title Find possible transformation of response variables #' @name find_transformation #' #' @description This functions checks whether any transformation, such as log- #' or exp-transforming, was applied to the response variable (dependent #' variable) in a regression formula. Currently, following patterns are #' detected: `log`, `log1p`, `exp`, `expm1`, `sqrt`, `log(x+)` and #' `log-log`. #' #' @param x A regression model. #' @return A string, with the name of the function of the applied transformation. #' Returns `"identity"` for no transformation, and e.g. `"log(x+3)"` when #' a specific values was added to the response variables before #' log-transforming. #' #' @examples #' # identity, no transformation #' model <- lm(Sepal.Length ~ Species, data = iris) #' find_transformation(model) #' #' # log-transformation #' model <- lm(log(Sepal.Length) ~ Species, data = iris) #' find_transformation(model) #' #' # log+2 #' model <- lm(log(Sepal.Length + 2) ~ Species, data = iris) #' find_transformation(model) #' @export find_transformation <- function(x) { rv <- find_terms(x)[["response"]] transform_fun <- "identity" # log-transformation if (any(grepl("log\\((.*)\\)", rv))) { # do we have log-log models? if (grepl("log\\(log\\((.*)\\)\\)", rv)) { transform_fun <- "log-log" } else { plus_minus <- eval(parse(text = gsub("log\\(([^,\\+)]*)(.*)\\)", "\\2", rv))) if (is.null(plus_minus)) { transform_fun <- "log" } else { transform_fun <- paste0("log(x+", plus_minus, ")") } } } # log1p-transformation if (any(grepl("log1p\\((.*)\\)", rv))) { transform_fun <- "log1p" } # expm1-transformation if (any(grepl("expm1\\((.*)\\)", rv))) { transform_fun <- "expm1" } # exp-transformation if (any(grepl("exp\\((.*)\\)", rv))) { transform_fun <- "exp" } # sqrt-transformation if (any(grepl("sqrt\\((.*)\\)", rv))) { plus_minus <- eval(parse(text = gsub("sqrt\\(([^,\\+)]*)(.*)\\)", "\\2", rv))) if (is.null(plus_minus)) { transform_fun <- "sqrt" } else { transform_fun <- paste0("sqrt(x+", plus_minus, ")") } } transform_fun } insight/R/check_if_installed.R0000644000175000017500000000703014144234776016203 0ustar nileshnilesh#' Checking if needed package is installed #' @param package A character vector naming the package(s), whose installation #' needs to be checked in any of the libraries. #' @param reason A phrase describing why the package is needed. The default is a #' generic description. #' @param stop Logical that decides whether the function should stop if the #' needed package is not installed. #' @param quietly Logical, if `TRUE`, invisibly returns either `TRUE` if all #' packages are installed, `FALSE` otherwise, and does not stop or throw a #' warning. If `quietly = TRUE`, argument `stop` is ignored. Use this argument #' to internally check for package dependencies without stopping or warnings. #' @param minimum_version String, representing the minimum package version that #' is required. If `NULL`, no check for minimum version is done. Note #' that `minimum_version` only works when `package` is of length 1. #' @param ... Currently ignored #' #' @return If `stop = TRUE`, and `package` is not yet installed, the #' function stops and throws an error. Else, a named logical vector is #' returned, indicating which of the packages are installed, and which not. #' #' @examples #' \dontrun{ #' check_if_installed("inexistent_package") #' check_if_installed("insight") #' check_if_installed("insight", minimum_version = "99.8.7") #' #' x <- check_if_installed(c("inexistent", "also_not_here"), stop = FALSE) #' x #' } #' @export check_if_installed <- function(package, reason = "for this function to work", stop = TRUE, minimum_version = NULL, quietly = FALSE, ...) { # does it need to be displayed? is_installed <- sapply(package, requireNamespace, quietly = TRUE) message <- NULL if (!all(is_installed)) { # only keep not-installed packages package <- package[!is_installed] # prepare the message if (length(package) > 1) { message <- format_message( paste0("Packages ", paste(sprintf("'%s'", package), collapse = " and "), " are required ", reason, "."), paste0("Please install them by running install.packages(", paste(sprintf("\"%s\"", package), collapse = ", "), ").") ) } else { message <- format_message( paste0("Package '", package, "' is required ", reason, "."), paste0("Please install it by running install.packages('", package, "').") ) } } else if (!is.null(minimum_version) && utils::packageVersion(package) < package_version(minimum_version)) { # prepare the message message <- format_message( paste0("Package '", package, "' is installed, but package version '", minimum_version, "' is required ", reason, "."), paste0("Please update the package by running install.packages('", package, "').") ) } if (!quietly && !is.null(message)) { if (stop) stop(message, call. = FALSE) else warning(message, call. = FALSE) } class(is_installed) <- c("check_if_installed", class(is_installed)) invisible(is_installed) } #' @export print.check_if_installed <- function(x, ...) { if (any(x)) { cat("Following packages are installed:\n") print_color(paste0("- ", names(x)[x], collapse = "\n"), "green") } if (any(!x)) { if (any(x)) { cat("\n\n") } cat("Following packages are not installed:\n") print_color(paste0("- ", names(x)[!x], collapse = "\n"), "red") } } insight/R/format_ci.R0000644000175000017500000001227514077615664014367 0ustar nileshnilesh#' Confidence/Credible Interval (CI) Formatting #' #' @param CI_low Lower CI bound. #' @param CI_high Upper CI bound. #' @param ci CI level in percentage. #' @param brackets Either a logical, and if `TRUE` (default), values are #' encompassed in square brackets. If `FALSE` or `NULL`, no brackets #' are used. Else, a character vector of length two, indicating the opening #' and closing brackets. #' @param width Minimum width of the returned string. If not `NULL` and #' `width` is larger than the string's length, leading whitespaces are #' added to the string. If `width="auto"`, width will be set to the #' length of the longest string. #' @param width_low,width_high Like `width`, but only applies to the lower #' or higher confidence interval value. This can be used when the values for #' the lower and upper CI are of very different length. #' @inheritParams format_value #' #' @return A formatted string. #' @examples #' format_ci(1.20, 3.57, ci = 0.90) #' format_ci(1.20, 3.57, ci = NULL) #' format_ci(1.20, 3.57, ci = NULL, brackets = FALSE) #' format_ci(1.20, 3.57, ci = NULL, brackets = c("(", ")")) #' format_ci(c(1.205645, 23.4), c(3.57, -1.35), ci = 0.90) #' format_ci(c(1.20, NA, NA), c(3.57, -1.35, NA), ci = 0.90) #' #' # automatic alignment of width, useful for printing multiple CIs in columns #' x <- format_ci(c(1.205, 23.4, 100.43), c(3.57, -13.35, 9.4)) #' cat(x, sep = "\n") #' #' x <- format_ci(c(1.205, 23.4, 100.43), c(3.57, -13.35, 9.4), width = "auto") #' cat(x, sep = "\n") #' @export format_ci <- function(CI_low, CI_high, ci = 0.95, digits = 2, brackets = TRUE, width = NULL, width_low = width, width_high = width, missing = "", zap_small = FALSE) { # check proper defaults if (isTRUE(brackets)) { ci_brackets <- c("[", "]") } else if (is.null(brackets) || isFALSE(brackets)) { ci_brackets <- c("", "") } else { ci_brackets <- brackets } if (!is.null(width) && width == "auto") { # set default numeric value for digits sig_digits <- digits # check if we have special handling, like "scientific" or "signif" # and then convert to numeric if (is.character(digits)) { if (grepl("^scientific", digits)) { if (digits == "scientific") digits <- "scientific3" sig_digits <- as.numeric(gsub("scientific", "", digits, fixed = TRUE)) + 3 } else { if (digits == "signif") digits <- "signif2" sig_digits <- as.numeric(gsub("signif", "", digits, fixed = TRUE)) } } # round CI-values for standard rounding, or scientific if (is.numeric(CI_low) && is.numeric(CI_high)) { if (is.numeric(digits) || (is.character(digits) && grepl("^scientific", digits))) { CI_low <- round(CI_low, sig_digits) CI_high <- round(CI_high, sig_digits) } else { CI_low <- signif(CI_low, digits = sig_digits) CI_high <- signif(CI_high, digits = sig_digits) } } if (all(is.na(CI_low))) { width_low <- 1 } else { width_low <- max(unlist(lapply(stats::na.omit(CI_low), function(.i) { if (.i > 1e+5) { 6 + digits } else { nchar(as.character(.i)) } }))) } if (all(is.na(CI_high))) { width_high <- 1 } else { width_high <- max(unlist(lapply(stats::na.omit(CI_high), function(.i) { if (.i > 1e+5) { 6 + digits } else { nchar(as.character(.i)) } }))) } } if (is.na(missing)) missing <- NA_character_ if (!is.null(ci)) { ifelse(is.na(CI_low) & is.na(CI_high), missing, paste0( ci * 100, "% CI ", .format_ci( CI_low, CI_high, digits = digits, ci_brackets = ci_brackets, width_low = width_low, width_high = width_high, missing = missing, zap_small = zap_small ) ) ) } else { ifelse( is.na(CI_low) & is.na(CI_high), missing, .format_ci( CI_low, CI_high, digits = digits, ci_brackets = ci_brackets, width_low = width_low, width_high = width_high, missing = missing, zap_small = zap_small ) ) } } #' @keywords internal .format_ci <- function(CI_low, CI_high, digits = 2, ci_brackets = c("[", "]"), width_low = NULL, width_high = NULL, missing = "NA", zap_small = FALSE) { paste0( ci_brackets[1], format_value( CI_low, digits = digits, missing = missing, width = width_low, zap_small = zap_small ), ", ", format_value( CI_high, digits = digits, missing = missing, width = width_high, zap_small = zap_small ), ci_brackets[2] ) } insight/R/get_transformation.R0000644000175000017500000000403414144234777016320 0ustar nileshnilesh#' @title Return function of transformed response variables #' @name get_transformation #' #' @description This functions checks whether any transformation, such as log- #' or exp-transforming, was applied to the response variable (dependent #' variable) in a regression formula, and returns the related function that #' was used for transformation. #' #' @param x A regression model. #' @return A list of two functions: `$transformation`, the function that was #' used to transform the response variable; `$inverse`, the inverse-function #' of `$transformation` (can be used for "back-transformation"). If no #' transformation was applied, both list-elements `$transformation` and #' `$inverse` just return `function(x) x`. #' #' @examples #' # identity, no transformation #' model <- lm(Sepal.Length ~ Species, data = iris) #' get_transformation(model) #' #' # log-transformation #' model <- lm(log(Sepal.Length) ~ Species, data = iris) #' get_transformation(model) #' #' # log-function #' get_transformation(model)$transformation(.3) #' log(.3) #' #' # inverse function is exp() #' get_transformation(model)$inverse(.3) #' exp(.3) #' @export get_transformation <- function(x) { transform_fun <- find_transformation(x) if (transform_fun == "identity") { out <- list(transformation = function(x) x, inverse = function(x) x) } else if (transform_fun == "log") { out <- list(transformation = log, inverse = exp) } else if (transform_fun %in% c("log1p", "log(x+1)")) { out <- list(transformation = log1p, inverse = expm1) } else if (transform_fun == "exp") { out <- list(transformation = exp, inverse = log) } else if (transform_fun == "sqrt") { out <- list(transformation = sqrt, inverse = function(x) x^2) } else if (transform_fun == "expm1") { out <- list(transformation = expm1, inverse = log1p) } else if (transform_fun == "log-log") { out <- list( transformation = function(x) log(log(x)), inverse = function(x) exp(exp(x)) ) } out } insight/R/is_nested_models.R0000644000175000017500000000255014077615665015740 0ustar nileshnilesh#' @title Checks whether a list of models are nested models #' @name is_nested_models #' #' @description Checks whether a list of models are nested models, strictly #' following the order they were passed to the function. #' #' @param ... Multiple regression model objects. #' #' @return `TRUE` if models are nested, `FALSE` otherwise. If models #' are nested, also returns two attributes that indicate whether nesting of #' models is in decreasing or increasing order. #' #' @examples #' m1 <- lm(Sepal.Length ~ Petal.Width + Species, data = iris) #' m2 <- lm(Sepal.Length ~ Species, data = iris) #' m3 <- lm(Sepal.Length ~ Petal.Width, data = iris) #' m4 <- lm(Sepal.Length ~ 1, data = iris) #' #' is_nested_models(m1, m2, m4) #' is_nested_models(m4, m2, m1) #' is_nested_models(m1, m2, m3) #' @export is_nested_models <- function(...) { objects <- list(...) object_names <- match.call(expand.dots = FALSE)$`...` if (!all(sapply(objects, is_regression_model))) { stop("All models must be valid regression model objects.", call. = FALSE) } names(objects) <- object_names info <- ellipsis_info.ListRegressions(objects) out <- isTRUE(attributes(info)$is_nested) attr(out, "is_nested_increasing") <- attributes(info)$is_nested_increasing attr(out, "is_nested_decreasing") <- attributes(info)$is_nested_decreasing out } insight/R/find_parameters_other.R0000644000175000017500000001143714135302155016746 0ustar nileshnilesh#' @title Find model parameters from models with special components #' @name find_parameters.averaging #' #' @description Returns the names of model parameters, like they typically #' appear in the `summary()` output. #' #' @param ... Currently not used. #' @inheritParams find_parameters #' @inheritParams find_parameters.betamfx #' @inheritParams find_predictors #' #' @return A list of parameter names. The returned list may have following #' elements: #' \itemize{ #' \item `conditional`, the "fixed effects" part from the model. #' \item `full`, parameters from the full model. #' } #' #' @examples #' data(mtcars) #' m <- lm(mpg ~ wt + cyl + vs, data = mtcars) #' find_parameters(m) #' @export find_parameters.averaging <- function(x, component = c("conditional", "full"), flatten = FALSE, ...) { component <- match.arg(component) cf <- stats::coef(x, full = component == "full") out <- list(conditional = .remove_backticks_from_string(names(cf))) if (flatten) { unique(unlist(out)) } else { out } } #' @rdname find_parameters.averaging #' @export find_parameters.betareg <- function(x, component = c("all", "conditional", "precision", "location", "distributional", "auxiliary"), flatten = FALSE, ...) { component <- match.arg(component) pars <- list( conditional = names(x$coefficients$mean), precision = names(x$coefficients$precision) ) pars$conditional <- .remove_backticks_from_string(pars$conditional) .filter_parameters( pars, effects = "all", component = component, flatten = flatten, recursive = FALSE ) } #' @rdname find_parameters.averaging #' @export find_parameters.DirichletRegModel <- function(x, component = c("all", "conditional", "precision", "location", "distributional", "auxiliary"), flatten = FALSE, ...) { component <- match.arg(component) if (x$parametrization == "common") { pars <- list(conditional = names(unlist(stats::coef(x)))) } else { pars <- .compact_list(list( conditional = names(unlist(stats::coef(x)[["beta"]])), precision = names(unlist(stats::coef(x)[["gamma"]])) )) pars$precision <- .remove_backticks_from_string(pars$precision) } pars$conditional <- .remove_backticks_from_string(pars$conditional) .filter_parameters( pars, effects = "all", component = component, flatten = flatten, recursive = FALSE ) } #' @rdname find_parameters.averaging #' @export find_parameters.mjoint <- function(x, component = c("all", "conditional", "survival"), flatten = FALSE, ...) { component <- match.arg(component) s <- summary(x) out <- list( conditional = .remove_backticks_from_string(rownames(s$coefs.long)), survival = .remove_backticks_from_string(rownames(s$coefs.surv)) ) .filter_parameters( out, effects = "all", component = component, flatten = flatten, recursive = FALSE ) } #' @rdname find_parameters.averaging #' @export find_parameters.glmx <- function(x, component = c("all", "conditional", "extra"), flatten = FALSE, ...) { cf <- stats::coef(summary(x)) out <- list( conditional = .remove_backticks_from_string(names(cf$glm[, 1])), extra = .remove_backticks_from_string(rownames(cf$extra)) ) .filter_parameters( out, effects = "all", component = component, flatten = flatten, recursive = FALSE ) } #' @export find_parameters.model_fit <- function(x, flatten = FALSE, ...) { find_parameters(x$fit, flatten = flatten, ...) } #' @export find_parameters.systemfit <- function(x, flatten = FALSE, ...) { cf <- stats::coef(x) f <- find_formula(x) system_names <- names(f) out <- lapply(system_names, function(i) { pattern <- paste0("^", i, "_(.*)") params <- grepl(pattern, names(cf)) gsub(pattern, "\\1", names(cf)[params]) }) names(out) <- system_names if (flatten) { unique(unlist(out)) } else { out } } #' @export find_parameters.bfsl <- function(x, flatten = FALSE, ...) { cf <- stats::coef(x) out <- list(conditional = rownames(cf)) if (flatten) { unique(unlist(out)) } else { out } } insight/R/get_deviance.R0000644000175000017500000000525714077615664015043 0ustar nileshnilesh#' Model Deviance #' #' Returns model deviance (see `stats::deviance()`). #' #' @param ... Not used. #' @inheritParams get_residuals #' #' @return The model deviance. #' #' @details For GLMMs of class `glmerMod`, `glmmTMB` or `MixMod`, #' the *absolute unconditional* deviance is returned (see 'Details' in #' `?lme4::`merMod-class``), i.e. minus twice the log-likelihood. To get #' the *relative conditional* deviance (relative to a saturated model, #' conditioned on the conditional modes of random effects), use `deviance()`. #' The value returned `get_deviance()` usually equals the deviance-value #' from the `summary()`. #' #' @examples #' data(mtcars) #' x <- lm(mpg ~ cyl, data = mtcars) #' get_deviance(x) #' @export get_deviance <- function(x, ...) { UseMethod("get_deviance") } #' @rdname get_deviance #' @export get_deviance.default <- function(x, verbose = TRUE, ...) { dev <- tryCatch( { stats::deviance(x, ...) }, error = function(e) { NULL } ) if (is.null(dev)) { dev <- tryCatch( { x$deviance }, error = function(e) { NULL } ) } if (is.null(dev)) { dev <- tryCatch( { sum(get_residuals(x, weighted = TRUE, verbose = verbose)^2, na.rm = TRUE) }, error = function(e) { NULL } ) } dev } #' @export get_deviance.stanreg <- function(x, verbose = TRUE, ...) { info <- model_info(x) if (info$is_linear) { res <- get_residuals(x, weighted = TRUE, verbose = verbose) dev <- sum(res^2, na.rm = TRUE) } else if (info$is_binomial) { w <- get_weights(x, null_as_ones = TRUE, verbose = verbose) n <- n_obs(x) y <- get_response(x, verbose = FALSE) mu <- stats::fitted(x) dev_resids_fun <- x$family$dev.resids dev <- sum(dev_resids_fun(y, mu, w)) } else { stop("Could not compute deviance for this type of model") } # Not sure if it generalizes to other models though since deviance.glm # extracts it via x@deviance dev } #' @export get_deviance.lmerMod <- function(x, ...) { stats::deviance(x, REML = FALSE, ...) } #' @export get_deviance.lrm <- function(x, ...) { d <- stats::deviance(x, ...) d[length(d)] } #' @export get_deviance.glmmTMB <- function(x, ...) { tryCatch( { -2 * as.numeric(get_loglikelihood(x, ...)) }, error = function(e) { NULL } ) } #' @export get_deviance.glmerMod <- get_deviance.glmmTMB #' @export get_deviance.MixMod <- get_deviance.glmmTMB #' @export get_deviance.model_fit <- function(x, ...) { get_deviance(x$fit, ...) } insight/R/format_number.R0000644000175000017500000000536314077615664015264 0ustar nileshnilesh#' Convert number to words #' #' @note #' The code has been adapted from here https://github.com/ateucher/useful_code/blob/master/R/numbers2words.r #' #' @param x Number. #' @param textual Return words. If `FALSE`, will run [format_value()]. #' @param ... Arguments to be passed to [format_value()] if `textual` is `FALSE`. #' #' #' @return A formatted string. #' @examples #' format_number(2) #' format_number(45) #' format_number(324.68765) #' @export format_number <- function(x, textual = TRUE, ...) { if (textual) { .format_number(x) } else { format_value(x, ...) } } #' @keywords internal .format_number <- function(x) { # https://github.com/ateucher/useful_code/blob/master/R/numbers2words.r x <- round(x) # Disable scientific notation opts <- options(scipen = 100) on.exit(options(opts)) if (length(x) > 1) { return(.trim_ws_and(sapply(x, .format_character_number))) } .format_character_number(x) } ## Function by John Fox found here: ## http://tolstoy.newcastle.edu.au/R/help/05/04/2715.html ## Tweaks by AJH to add commas and "and" .format_character_number <- function(x, ones, tees) { ones <- c("", "one", "two", "three", "four", "five", "six", "seven", "eight", "nine") names(ones) <- 0:9 teens <- c( "ten", "eleven", "twelve", "thirteen", "fourteen", "fifteen", "sixteen", " seventeen", "eighteen", "nineteen" ) names(teens) <- 0:9 tens <- c( "twenty", "thirty", "forty", "fifty", "sixty", "seventy", "eighty", "ninety" ) names(tens) <- 2:9 suffixes <- c("thousand", "million", "billion", "trillion") digits <- rev(strsplit(as.character(x), "")[[1]]) nDigits <- length(digits) if (nDigits == 1) { as.vector(ones[digits]) } else if (nDigits == 2) { if (x <= 19) { as.vector(teens[digits[1]]) } else { .trim_ws_and(paste(tens[digits[2]], Recall(as.numeric(digits[1])))) } } else if (nDigits == 3) { .trim_ws_and(paste(ones[digits[3]], "hundred and", Recall(.make_number(digits[2:1])))) } else { nSuffix <- ((nDigits + 2) %/% 3) - 1 if (nSuffix > length(suffixes)) { stop(paste(x, "is too large!")) } .trim_ws_and(paste( Recall(.make_number(digits[nDigits:(3 * nSuffix + 1)])), suffixes[nSuffix], ",", Recall(.make_number(digits[(3 * nSuffix):1])) )) } } .make_number <- function(...) { as.numeric(paste(..., collapse = "")) } .trim_ws_and <- function(text) { # Tidy leading/trailing whitespace, space before comma text <- gsub("^\ ", "", gsub("\ *$", "", gsub("\ ,", ",", text))) # Clear any trailing " and" text <- gsub(" and$", "", text) # Clear any trailing comma gsub("\ *,$", "", text) } insight/R/find_smooth.R0000644000175000017500000000156714164336250014723 0ustar nileshnilesh#' @title Find smooth terms from a model object #' @name find_smooth #' #' @description Return the names of smooth terms from a model object. #' #' @param x A (gam) model. #' @inheritParams find_predictors #' #' @return A character vector with the name(s) of the smooth terms. #' #' @examples #' if (require("mgcv")) { #' data(iris) #' model <- gam(Petal.Length ~ Petal.Width + s(Sepal.Length), data = iris) #' find_smooth(model) #' } #' @export find_smooth <- function(x, flatten = FALSE) { all_terms <- find_terms(x, flatten = TRUE, verbose = FALSE) patterns <- "^(s|ti|te|t2|gam::s|VGAM::s|mgcv::s|mgcv::ti|mgcv::te|mgcv::t2|brms::s|brms::t2)\\(" l <- .compact_list(list(smooth_terms = all_terms[grepl(patterns, all_terms)])) if (.is_empty_object(l)) { return(NULL) } if (flatten) { unique(unlist(l)) } else { l } } insight/R/link_function.R0000644000175000017500000003434114101711707015243 0ustar nileshnilesh#' @title Get link-function from model object #' @name link_function #' #' @description Returns the link-function from a model object. #' #' @inheritParams find_predictors #' @inheritParams find_formula #' @inheritParams link_inverse #' #' @return A function, describing the link-function from a model-object. #' For multivariate-response models, a list of functions is returned. #' #' @examples #' # example from ?stats::glm #' counts <- c(18, 17, 15, 20, 10, 20, 25, 13, 12) #' outcome <- gl(3, 1, 9) #' treatment <- gl(3, 3) #' m <- glm(counts ~ outcome + treatment, family = poisson()) #' #' link_function(m)(.3) #' # same as #' log(.3) #' @export link_function <- function(x, ...) { UseMethod("link_function") } # Default method --------------------------- #' @export link_function.default <- function(x, ...) { if (inherits(x, "list") && .obj_has_name(x, "gam")) { x <- x$gam class(x) <- c(class(x), c("glm", "lm")) } tryCatch( { # get model family ff <- .gam_family(x) # return link function, if exists if ("linkfun" %in% names(ff)) { return(ff$linkfun) } # else, create link function from link-string if ("link" %in% names(ff)) { return(match.fun(ff$link)) } NULL }, error = function(x) { NULL } ) } # Gaussian family ------------------------------------------ #' @export link_function.lm <- function(x, ...) { stats::gaussian(link = "identity")$linkfun } #' @export link_function.lme <- link_function.lm #' @export link_function.systemfit <- link_function.lm #' @export link_function.lqmm <- link_function.lm #' @export link_function.lqm <- link_function.lm #' @export link_function.bayesx <- link_function.lm #' @export link_function.mixed <- link_function.lm #' @export link_function.truncreg <- link_function.lm #' @export link_function.censReg <- link_function.lm #' @export link_function.gls <- link_function.lm #' @export link_function.rq <- link_function.lm #' @export link_function.rqss <- link_function.lm #' @export link_function.crq <- link_function.lm #' @export link_function.crqs <- link_function.lm #' @export link_function.lmRob <- link_function.lm #' @export link_function.complmRob <- link_function.lm #' @export link_function.speedlm <- link_function.lm #' @export link_function.biglm <- link_function.lm #' @export link_function.lmrob <- link_function.lm #' @export link_function.lm_robust <- link_function.lm #' @export link_function.iv_robust <- link_function.lm #' @export link_function.aovlist <- link_function.lm #' @export link_function.felm <- link_function.lm #' @export link_function.feis <- link_function.lm #' @export link_function.ivreg <- link_function.lm #' @export link_function.ivFixed <- link_function.lm #' @export link_function.plm <- link_function.lm #' @export link_function.MANOVA <- link_function.lm #' @export link_function.RM <- link_function.lm #' @export link_function.afex_aov <- link_function.lm # General family --------------------------------- #' @export link_function.glm <- link_function.default #' @export link_function.speedglm <- link_function.default #' @export link_function.bigglm <- link_function.default #' @export link_function.brglm <- link_function.default #' @export link_function.cgam <- link_function.default # Logit link ------------------------ #' @export link_function.multinom <- function(x, ...) { stats::make.link(link = "logit")$linkfun } #' @export link_function.BBreg <- link_function.multinom #' @export link_function.BBmm <- link_function.multinom #' @export link_function.gmnl <- link_function.multinom #' @export link_function.logistf <- link_function.multinom #' @export link_function.lrm <- link_function.multinom #' @export link_function.orm <- link_function.multinom #' @export link_function.cph <- link_function.multinom #' @export link_function.mlogit <- link_function.multinom #' @export link_function.coxph <- link_function.multinom #' @export link_function.coxr <- link_function.multinom #' @export link_function.survfit <- link_function.multinom #' @export link_function.coxme <- link_function.multinom #' @export link_function.riskRegression <- link_function.multinom #' @export link_function.comprisk <- link_function.multinom # Probit link ------------------------ #' @export link_function.ivprobit <- function(x, ...) { stats::make.link(link = "probit")$linkfun } # Log links ------------------------ #' @export link_function.zeroinfl <- function(x, ...) { stats::make.link("log")$linkfun } #' @export link_function.hurdle <- link_function.zeroinfl #' @export link_function.zerotrunc <- link_function.zeroinfl # Tobit links --------------------------------- #' @export link_function.tobit <- function(x, ...) { .make_tobit_family(x)$linkfun } #' @export link_function.crch <- link_function.tobit #' @export link_function.survreg <- link_function.tobit #' @export link_function.psm <- link_function.tobit #' @export link_function.flexsurvreg <- function(x, ...) { dist <- parse(text = .safe_deparse(x$call))[[1]]$dist .make_tobit_family(x, dist)$linkfun } # Ordinal and cumulative links -------------------------- #' @export link_function.mvord <- function(x, ...) { link_name <- x$rho$link$name l <- stats::make.link(link = ifelse(link_name == "mvprobit", "probit", "logit")) l$linkfun } #' @export link_function.clm <- function(x, ...) { stats::make.link(link = .get_ordinal_link(x))$linkfun } #' @export link_function.clm2 <- link_function.clm #' @export link_function.clmm <- link_function.clm #' @export link_function.mixor <- link_function.clm # mfx models ------------------------------------------------------ #' @rdname link_function #' @export link_function.betamfx <- function(x, what = c("mean", "precision"), ...) { what <- match.arg(what) link_function.betareg(x$fit, what = what, ...) } #' @export link_function.betaor <- link_function.betamfx #' @export link_function.logitmfx <- function(x, ...) { link_function(x$fit, ...) } #' @export link_function.poissonmfx <- link_function.logitmfx #' @export link_function.negbinmfx <- link_function.logitmfx #' @export link_function.probitmfx <- link_function.logitmfx #' @export link_function.negbinirr <- link_function.logitmfx #' @export link_function.poissonirr <- link_function.logitmfx #' @export link_function.logitor <- link_function.logitmfx #' @export link_function.model_fit <- link_function.logitmfx # Other models ----------------------------- #' @export link_function.Rchoice <- function(x, ...) { stats::make.link(link = x$link)$linkfun } #' @export link_function.merModList <- function(x, ...) { link_function.default(x[[1]], ...) } #' @export link_function.mipo <- function(x, ...) { models <- eval(x$call$object) link_function(models$analyses[[1]]) } #' @export link_function.mira <- function(x, ...) { # installed? check_if_installed("mice") link_function(mice::pool(x), ...) } #' @export link_function.robmixglm <- function(x, ...) { switch(tolower(x$family), gaussian = stats::make.link(link = "identity")$linkfun, binomial = stats::make.link(link = "logit")$linkfun, gamma = stats::make.link(link = "inverse")$linkfun, poisson = , truncpoisson = stats::make.link(link = "log")$linkfun, stats::make.link(link = "identity")$linkfun ) } #' @export link_function.MCMCglmm <- function(x, ...) { switch(x$Residual$original.family, "cengaussian" = , "gaussian" = stats::gaussian(link = "identity")$linkfun, "categorical" = , "multinomial" = , "zibinomial" = , "ordinal" = stats::make.link("logit")$linkfun, "poisson" = , "cenpoisson" = , "zipoisson" = , "zapoisson" = , "ztpoisson" = , "hupoisson" = stats::make.link("log")$linkfun ) } #' @export link_function.cglm <- function(x, ...) { link <- parse(text = .safe_deparse(x$call))[[1]]$link method <- parse(text = .safe_deparse(x$call))[[1]]$method if (!is.null(method) && method == "clm") { link <- "identiy" } stats::make.link(link = link)$linkfun } #' @export link_function.fixest <- function(x, ...) { if (is.null(x$family)) { if (!is.null(x$method) && x$method == "feols") { stats::gaussian(link = "identity")$linkfun } } else if (inherits(x$family, "family")) { x$family$linkfun } else { link <- switch(x$family, "poisson" = , "negbin" = "log", "logit" = "logit", "gaussian" = "identity" ) stats::make.link(link)$linkfun } } #' @export link_function.feglm <- link_function.fixest #' @export link_function.glmx <- function(x, ...) { x$family$glm$linkfun } #' @export link_function.bife <- function(x, ...) { x$family$linkfun } #' @export link_function.cpglmm <- function(x, ...) { f <- .get_cplm_family(x) f$linkfun } #' @export link_function.cpglm <- link_function.cpglmm #' @export link_function.zcpglm <- link_function.cpglmm #' @export link_function.bcplm <- link_function.cpglmm #' @export link_function.gam <- function(x, ...) { lf <- tryCatch( { # get model family ff <- .gam_family(x) # return link function, if exists if ("linkfun" %in% names(ff)) { return(ff$linkfun) } # else, create link function from link-string if ("link" %in% names(ff)) { return(match.fun(ff$link)) } NULL }, error = function(x) { NULL } ) if (is.null(lf)) { mi <- .gam_family(x) if (.obj_has_name(mi, "linfo")) { if (.obj_has_name(mi$linfo, "linkfun")) { lf <- mi$linfo$linkfun } else { lf <- mi$linfo[[1]]$linkfun } } } lf } #' @export link_function.glimML <- function(x, ...) { stats::make.link(link = x@link)$linkfun } #' @export link_function.glmmadmb <- function(x, ...) { x$linkfun } #' @export link_function.glmm <- function(x, ...) { switch(tolower(x$family.glmm$family.glmm), "bernoulli.glmm" = , "binomial.glmm" = stats::make.link("logit")$linkfun, "poisson.glmm" = stats::make.link("log")$linkfun, stats::gaussian(link = "identity")$linkfun ) } #' @rdname link_function #' @export link_function.gamlss <- function(x, what = c("mu", "sigma", "nu", "tau"), ...) { what <- match.arg(what) faminfo <- get(x$family[1], asNamespace("gamlss"))() switch(what, "mu" = faminfo$mu.linkfun, "sigma" = faminfo$sigma.linkfun, "nu" = faminfo$nu.linkfun, "tau" = faminfo$tau.linkfun, faminfo$mu.linkfun ) } #' @export link_function.gamm <- function(x, ...) { x <- x$gam class(x) <- c(class(x), c("glm", "lm")) NextMethod() } #' @export link_function.bamlss <- function(x, ...) { flink <- stats::family(x)$links[1] tryCatch( { stats::make.link(flink)$linkfun }, error = function(e) { print_colour("\nCould not find appropriate link-function.\n", "red") } ) } #' @export link_function.LORgee <- function(x, ...) { if (grepl(pattern = "logit", x = x$link, fixed = TRUE)) { link <- "logit" } else if (grepl(pattern = "probit", x = x$link, fixed = TRUE)) { link <- "probit" } else if (grepl(pattern = "cauchit", x = x$link, fixed = TRUE)) { link <- "cauchit" } else if (grepl(pattern = "cloglog", x = x$link, fixed = TRUE)) { link <- "cloglog" } else { link <- "logit" } stats::make.link(link)$linkfun } #' @export link_function.vgam <- function(x, ...) { x@family@linkfun } #' @export link_function.vglm <- function(x, ...) { x@family@linkfun } #' @export link_function.svy_vglm <- function(x, ...) { link_function(x$fit) } #' @export link_function.polr <- function(x, ...) { link <- switch(x$method, logistic = "logit", probit = "probit", "log" ) stats::make.link(link)$linkfun } #' @export link_function.svyolr <- function(x, ...) { link <- switch(x$method, logistic = "logit", probit = "probit", "log" ) stats::make.link(link)$linkfun } #' @rdname link_function #' @export link_function.betareg <- function(x, what = c("mean", "precision"), ...) { what <- match.arg(what) switch(what, "mean" = x$link$mean$linkfun, "precision" = x$link$precision$linkfun ) } #' @rdname link_function #' @export link_function.DirichletRegModel <- function(x, what = c("mean", "precision"), ...) { what <- match.arg(what) if (x$parametrization == "common") { stats::make.link("logit")$linkfun } else { switch(what, "mean" = stats::make.link("logit")$linkfun, "precision" = stats::make.link("log")$linkfun ) } } #' @export link_function.gbm <- function(x, ...) { switch(x$distribution$name, laplace = , tdist = , gaussian = stats::gaussian(link = "identity")$linkfun, poisson = stats::poisson(link = "log")$linkfun, huberized = , adaboost = , coxph = , bernoulli = stats::make.link("logit")$linkfun ) } #' @export link_function.stanmvreg <- function(x, ...) { fam <- stats::family(x) lapply(fam, function(.x) .x$linkfun) } #' @export link_function.brmsfit <- function(x, ...) { fam <- stats::family(x) if (is_multivariate(x)) { lapply(fam, .brms_link_fun) } else { .brms_link_fun(fam) } } # helper ----------------------- .brms_link_fun <- function(fam) { # do we have custom families? if (!is.null(fam$family) && (is.character(fam$family) && fam$family == "custom")) { il <- stats::make.link(fam$link)$linkfun } else { if ("linkfun" %in% names(fam)) { il <- fam$linkfun } else if ("link" %in% names(fam) && is.character(fam$link)) { il <- stats::make.link(fam$link)$linkfun } else { ff <- get(fam$family, asNamespace("stats")) il <- ff(fam$link)$linkfun } } il } insight/R/colour_tools.R0000644000175000017500000000605414022633424015125 0ustar nileshnilesh.rstudio_with_ansi_support <- function() { if (Sys.getenv("RSTUDIO", "") == "") { return(FALSE) } if ((cols <- Sys.getenv("RSTUDIO_CONSOLE_COLOR", "")) != "" && !is.na(as.numeric(cols))) { return(TRUE) } requireNamespace("rstudioapi", quietly = TRUE) && rstudioapi::isAvailable() && rstudioapi::hasFun("getConsoleHasColor") } .supports_color <- function() { enabled <- getOption("crayon.enabled") if (!is.null(enabled)) { return(isTRUE(enabled)) } if (.rstudio_with_ansi_support() && sink.number() == 0) { return(TRUE) } if (!isatty(stdout())) { return(FALSE) } if (Sys.info()["sysname"] == "windows") { if (Sys.getenv("ConEmuANSI") == "ON") { return(TRUE) } if (Sys.getenv("CMDER_ROOT") != "") { return(TRUE) } return(FALSE) } if ("COLORTERM" %in% names(Sys.getenv())) { return(TRUE) } if (Sys.getenv("TERM") == "dumb") { return(FALSE) } grepl("^screen|^xterm|^vt100|color|ansi|cygwin|linux", Sys.getenv("TERM"), ignore.case = TRUE, perl = TRUE ) } .blue <- function(x) { if (.supports_color()) { x[!is.na(x)] <- paste0("\033[34m", x[!is.na(x)], "\033[39m") } x } .bold <- function(x) { if (.supports_color()) { x[!is.na(x)] <- paste0("\033[1m", x[!is.na(x)], "\033[22m") } x } .italic <- function(x) { if (.supports_color()) { x[!is.na(x)] <- paste0("\033[3m", x[!is.na(x)], "\033[23m") } x } .red <- function(x) { if (.supports_color()) { x[!is.na(x)] <- paste0("\033[31m", x[!is.na(x)], "\033[39m") } x } .green <- function(x) { if (.supports_color()) { x[!is.na(x)] <- paste0("\033[32m", x[!is.na(x)], "\033[39m") } x } .black <- function(x) { if (.supports_color()) { x[!is.na(x)] <- paste0("\033[30m", x[!is.na(x)], "\033[39m") } x } .white <- function(x) { if (.supports_color()) { x[!is.na(x)] <- paste0("\033[37m", x[!is.na(x)], "\033[39m") } x } .yellow <- function(x) { if (.supports_color()) { x[!is.na(x)] <- paste0("\033[33m", x[!is.na(x)], "\033[39m") } x } .violet <- function(x) { if (.supports_color()) { x[!is.na(x)] <- paste0("\033[35m", x[!is.na(x)], "\033[39m") } x } .cyan <- function(x) { if (.supports_color()) { x[!is.na(x)] <- paste0("\033[36m", x[!is.na(x)], "\033[39m") } x } .grey <- function(x) { if (.supports_color()) { x[!is.na(x)] <- paste0("\033[90m", x[!is.na(x)], "\033[39m") } x } .colour <- function(colour = "red", x) { switch(colour, red = .red(x), yellow = .yellow(x), green = .green(x), blue = .blue(x), black = .black(x), white = .white(x), violet = .violet(x), cyan = .cyan(x), grey = .grey(x), bold = .bold(x), italic = .italic(x), warning(paste0("`color` ", colour, " not yet supported.")) ) } .is_valid_colour <- function(colour) { colour %in% c("red", "yellow", "green", "blue", "violet", "cyan", "grey", "bold", "italic") } insight/R/is_mixed_model.R0000644000175000017500000000150514077615665015400 0ustar nileshnilesh#' @title Checks if a model is a mixed effects model #' @name is_mixed_model #' #' @description Small helper that checks if a model is a mixed effects model, #' i.e. if it the model has random effects. #' #' @param x A model object. #' #' @return A logical, `TRUE` if `x` is a mixed model. #' #' @examples #' data(mtcars) #' model <- lm(mpg ~ wt + cyl + vs, data = mtcars) #' is_mixed_model(model) #' #' if (require("lme4")) { #' data(sleepstudy) #' model <- lmer(Reaction ~ Days + (1 | Subject), data = sleepstudy) #' is_mixed_model(model) #' } #' @export is_mixed_model <- function(x) { UseMethod("is_mixed_model") } #' @export is_mixed_model.default <- function(x) { !is.null(find_random(x)) } #' @export is_mixed_model.afex_aov <- function(x) { as.logical(length(attr(x, "within"))) } insight/R/null_model.R0000644000175000017500000000427314077615665014556 0ustar nileshnilesh#' @title Compute intercept-only model for regression models #' @name null_model #' #' @description This function computes the null-model (i.e. `(y ~ 1)`) of #' a model. For mixed models, the null-model takes random effects into account. #' #' @param model A (mixed effects) model. #' @param verbose Toggle off warnings. #' @param ... Arguments passed to or from other methods. #' #' @return The null-model of `x` #' #' @examples #' if (require("lme4")) { #' data(sleepstudy) #' m <- lmer(Reaction ~ Days + (1 + Days | Subject), data = sleepstudy) #' summary(m) #' summary(null_model(m)) #' } #' @export null_model <- function(model, verbose = TRUE, ...) { if (is_mixed_model(model)) { .null_model_mixed(model, verbose) } else if (inherits(model, "clm2")) { stats::update(model, location = ~1, scale = ~1) } else if (inherits(model, "multinom")) { stats::update(model, ~1, trace = FALSE) } else { stats::update(model, ~1, ...) } } .null_model_mixed <- function(model, verbose = TRUE) { if (inherits(model, "MixMod")) { nullform <- stats::as.formula(paste(find_response(model), "~ 1")) null.model <- stats::update(model, fixed = nullform) } else if (inherits(model, "cpglmm")) { nullform <- find_formula(model, verbose = FALSE)[["random"]] null.model <- stats::update(model, nullform) } else { f <- stats::formula(model) resp <- find_response(model) re.terms <- paste0("(", sapply(.findbars(f), .safe_deparse), ")") nullform <- stats::reformulate(re.terms, response = resp) null.model <- tryCatch( { stats::update(model, nullform) }, error = function(e) { msg <- e$message if (verbose) { if (grepl("(^object)(.*)(not found$)", msg)) { print_color("Can't calculate null-model. Probably the data that was used to fit the model cannot be found.\n", "red") } else if (grepl("^could not find function", msg)) { print_color("Can't calculate null-model. Probably you need to load the package that was used to fit the model.\n", "red") } } return(NULL) } ) } null.model } insight/R/format_table.R0000644000175000017500000005375114144234777015064 0ustar nileshnilesh#' @title Parameter table formatting #' @name format_table #' #' @description This functions takes a data frame with model parameters as input #' and formats certain columns into a more readable layout (like collapsing #' separate columns for lower and upper confidence interval values). Furthermore, #' column names are formatted as well. Note that `format_table()` #' converts all columns into character vectors! #' #' @param x A data frame of model's parameters, as returned by various functions #' of the **easystats**-packages. May also be a result from #' `broom::tidy()`. #' @param pretty_names Return "pretty" (i.e. more human readable) parameter #' names. #' @param digits,ci_digits,p_digits,rope_digits Number of digits for rounding or #' significant figures. May also be `"signif"` to return significant #' figures or `"scientific"` to return scientific notation. Control the #' number of digits by adding the value as suffix, e.g. `digits = "scientific4"` #' to have scientific notation with 4 decimal places, or `digits = "signif5"` #' for 5 significant figures (see also [signif()]). #' @param ci_width Minimum width of the returned string for confidence #' intervals. If not `NULL` and width is larger than the string's length, #' leading whitespaces are added to the string. If `width="auto"`, width #' will be set to the length of the longest string. #' @param ci_brackets Logical, if `TRUE` (default), CI-values are #' encompassed in square brackets (else in parentheses). #' @param preserve_attributes Logical, if `TRUE`, preserves all attributes #' from the input data frame. #' @param ... Arguments passed to or from other methods. #' @inheritParams format_p #' @inheritParams format_value #' @inheritParams get_data #' #' @seealso Vignettes [Formatting, printing and exporting tables](https://easystats.github.io/insight/articles/display.html) #' and [Formatting model parameters](https://easystats.github.io/parameters/articles/model_parameters_formatting.html). #' #' @examples #' format_table(head(iris), digits = 1) #' #' if (require("parameters")) { #' x <- model_parameters(lm(Sepal.Length ~ Species * Sepal.Width, data = iris)) #' as.data.frame(format_table(x)) #' as.data.frame(format_table(x, p_digits = "scientific")) #' } #' \donttest{ #' if (require("rstanarm", warn.conflicts = FALSE) && #' require("parameters", , warn.conflicts = FALSE)) { #' model <- stan_glm(Sepal.Length ~ Species, data = iris, refresh = 0, seed = 123) #' x <- model_parameters(model, ci = c(0.69, 0.89, 0.95)) #' as.data.frame(format_table(x)) #' } #' } #' @return A data frame. Note that `format_table()` converts all columns #' into character vectors! #' @export format_table <- function(x, pretty_names = TRUE, stars = FALSE, digits = 2, ci_width = "auto", ci_brackets = TRUE, ci_digits = 2, p_digits = 3, rope_digits = 2, zap_small = FALSE, preserve_attributes = FALSE, verbose = TRUE, ...) { # sanity check if (is.null(x) || (is.data.frame(x) && nrow(x) == 0)) { if (isTRUE(verbose)) { message("Can't format table, data frame is empty.") } return(NULL) } # check if user supplied digits attributes if (missing(digits)) digits <- .additional_arguments(x, "digits", 2) if (missing(ci_digits)) ci_digits <- .additional_arguments(x, "ci_digits", 2) if (missing(p_digits)) p_digits <- .additional_arguments(x, "p_digits", 3) if (missing(rope_digits)) rope_digits <- .additional_arguments(x, "rope_digits", 2) att <- attributes(x) x <- as.data.frame(x, stringsAsFactors = FALSE) # Format parameters names ---- if (pretty_names && !is.null(att$pretty_names)) { # remove strings with NA names att$pretty_names <- att$pretty_names[!is.na(names(att$pretty_names))] if (length(att$pretty_names) != length(x$Parameter)) { match_pretty_names <- stats::na.omit(match(names(att$pretty_names), x$Parameter)) if (length(match_pretty_names)) { x$Parameter[match_pretty_names] <- att$pretty_names[x$Parameter[match_pretty_names]] } } else { match_pretty_names <- att$pretty_names[x$Parameter] if (!anyNA(match_pretty_names)) { x$Parameter <- att$pretty_names[x$Parameter] } else { match_pretty_names <- stats::na.omit(match(names(att$pretty_names), x$Parameter)) if (length(match_pretty_names)) { x$Parameter[match_pretty_names] <- att$pretty_names[x$Parameter[match_pretty_names]] } } } } # Format specific columns ---- if ("n_Obs" %in% names(x)) x$n_Obs <- format_value(x$n_Obs, protect_integers = TRUE) if ("n_Missing" %in% names(x)) x$n_Missing <- format_value(x$n_Missing, protect_integers = TRUE) # Format df columns ---- x <- .format_df_columns(x) # Format special anova columns ---- x <- .format_aov_columns(x) # Format frequentist stats ---- x <- .format_freq_stats(x) # P values ---- x <- .format_p_values(x, stars = stars, p_digits = p_digits) # Main CI and Prediction Intervals ---- x <- .format_main_ci_columns(x, att, ci_digits, ci_width, ci_brackets, zap_small) x <- .format_main_ci_columns(x, att, ci_digits, ci_width, ci_brackets, zap_small, ci_name = "PI") x <- .format_broom_ci_columns(x, ci_digits, ci_width, ci_brackets, zap_small) # Other CIs ---- out <- .format_other_ci_columns(x, att, ci_digits, ci_width, ci_brackets, zap_small) x <- out$x other_ci_colname <- out$other_ci_colname # Misc / Effect Sizes names(x)[names(x) == "Cohens_d"] <- "Cohen's d" names(x)[names(x) == "Cramers_v"] <- "Cramer's V" names(x)[names(x) == "phi_adjusted"] <- "phi (adj.)" names(x)[names(x) == "Cramers_v_adjusted"] <- "Cramer's V (adj.)" # Standardized ---- x <- .format_std_columns(x, other_ci_colname, digits, zap_small) # Partial ---- x[names(x)[grepl("_partial$", names(x))]] <- format_value(x[names(x)[grepl("_partial$", names(x))]], zap_small = zap_small) names(x)[grepl("_partial$", names(x))] <- paste0(gsub("_partial$", "", names(x)[grepl("_partial$", names(x))]), " (partial)") # metafor ---- if ("Weight" %in% names(x)) x$Weight <- format_value(x$Weight, protect_integers = TRUE) # Bayesian --- x <- .format_bayes_columns( x, stars, rope_digits = rope_digits, zap_small = zap_small, ci_width = ci_width, ci_brackets = ci_brackets ) # rename performance columns x <- .format_performance_columns(x) # Format remaining columns other_cols <- names(x)[sapply(x, is.numeric)] x[other_cols[other_cols %in% names(x)]] <- format_value(x[other_cols[other_cols %in% names(x)]], digits = digits, zap_small = zap_small) # SEM links if (all(c("To", "Operator", "From") %in% names(x))) { x$Link <- paste(x$To, x$Operator, x$From) col_position <- which(names(x) == "To") x <- x[c(names(x)[0:(col_position - 1)], "Link", names(x)[col_position:(length(names(x)) - 1)])] # Replace at initial position x$To <- x$Operator <- x$From <- NULL } x[] <- lapply(x, as.character) # restore attributes if (isTRUE(preserve_attributes)) { attributes(x) <- utils::modifyList(att, attributes(x)) } x } #' @rdname format_table #' @export parameters_table <- format_table # sub-routines --------------- .format_p_values <- function(x, stars, p_digits) { if ("p" %in% names(x)) { x$p <- format_p(x$p, stars = stars, name = NULL, missing = "", digits = p_digits) x$p <- format(x$p, justify = "left") } if ("p.value" %in% names(x)) { x$p.value <- format_p(x$p.value, stars = stars, name = NULL, missing = "", digits = p_digits) x$p.value <- format(x$p.value, justify = "left") } for (stats in c("p_CochransQ", "p_Omnibus", "p_Chi2", "p_Baseline", "p_RMSEA", "p_ROPE", "p_MAP", "Wu_Hausman_p", "Sargan_p", "p_Omega2", "p_LR")) { if (stats %in% names(x)) { x[[stats]] <- format_p(x[[stats]], stars = stars, name = NULL, missing = "", digits = p_digits) x[[stats]] <- format(x[[stats]], justify = "left") p_name <- gsub("(.*)_p$", "\\1", gsub("^p_(.*)", "\\1", stats)) names(x)[names(x) == stats] <- paste0("p (", p_name, ")") } } x } .format_df_columns <- function(x) { # generic df if ("df" %in% names(x)) x$df <- format_value(x$df, protect_integers = TRUE) # residual df if ("df_residual" %in% names(x)) x$df_residual <- format_value(x$df_residual, protect_integers = TRUE) names(x)[names(x) == "df_residual"] <- "df" # df for errors if ("df_error" %in% names(x)) { x$df_error <- format_value(x$df_error, protect_integers = TRUE) if (!("df" %in% names(x))) { names(x)[names(x) == "df_error"] <- "df" } else { names(x)[names(x) == "df_error"] <- "df (error)" } } # denominator and numerator df if ("df_num" %in% names(x)) { x$df_num <- format_value(x$df_num, protect_integers = TRUE) names(x)[names(x) == "df_num"] <- "df (num.)" } if ("df_denom" %in% names(x)) { x$df_denom <- format_value(x$df_denom, protect_integers = TRUE) names(x)[names(x) == "df_denom"] <- "df (denom.)" } x } .format_aov_columns <- function(x) { if ("Deviance_error" %in% names(x)) { x$Deviance_error <- format_value(x$Deviance_error, protect_integers = TRUE) names(x)[names(x) == "Deviance_error"] <- "Deviance (error)" } if ("Power" %in% names(x)) { x$Power <- format_value(x$Power, as_percent = TRUE, digits = 1) } x } .format_freq_stats <- function(x) { for (stats in c("t", "Chi2")) { if (stats %in% names(x) && "df" %in% names(x)) { if (is.character(x$df)) { x$df[x$df == ""] <- NA_character_ } df <- stats::na.omit(unique(x$df)) if (length(df) == 1 && !all(is.infinite(df))) { names(x)[names(x) == stats] <- paste0(stats, "(", df, ")") x$df <- NULL } } else if (stats %in% names(x) && "df_error" %in% names(x)) { if (is.character(x$df_error)) { x$df_error[x$df_error == ""] <- NA_character_ } df <- stats::na.omit(unique(x$df_error)) if (length(df) == 1 && !all(is.infinite(df))) { names(x)[names(x) == stats] <- paste0(stats, "(", df, ")") x$df_error <- NULL } } } for (stats in c("Baseline", "Chi2")) { df_col <- paste0(stats, "_df") if (stats %in% names(x) && df_col %in% names(x)) { df <- stats::na.omit(unique(x[[df_col]])) if (length(df) == 1 && !all(is.infinite(df))) { names(x)[names(x) == stats] <- paste0(stats, "(", df, ")") x[[df_col]] <- NULL } } } if ("Success" %in% names(x)) x$Success <- format_value(x$Success, protect_integers = TRUE) if ("Trials" %in% names(x)) x$Trials <- format_value(x$Trials, protect_integers = TRUE) x } .format_main_ci_columns <- function(x, att, ci_digits, ci_width = "auto", ci_brackets = TRUE, zap_small, ci_name = "CI") { # Main CI ci_low <- names(x)[grep(paste0("^", ci_name, "_low"), names(x))] ci_high <- names(x)[grep(paste0("^", ci_name, "_high"), names(x))] ci_value <- att[["ci"]] # CI or SI? ci_method <- att[["ci_method"]] if (!is.null(ci_method) && all(tolower(ci_method) == "si")) { # return when we have no CI columns if (length(ci_low) == 0 || length(ci_high) == 0) { return(x) } # Support Intervals if (is.null(ci_value) && !is.null(x$CI)) { ci_value <- unique(x$CI)[1] } if (is.null(ci_value)) { ci_colname <- "SI" } else { ci_colname <- sprintf("BF = %.5g SI", ci_value) } x[[ci_low[1]]] <- format_ci(x[[ci_low[1]]], x[[ci_high[1]]], ci = NULL, digits = ci_digits, width = ci_width, brackets = ci_brackets, zap_small = zap_small) ci_position <- which(names(x) == ci_low[1]) colnames(x)[ci_position] <- ci_colname # remove upper CI column ci_position <- which(names(x) == ci_high[1]) x[[ci_position]] <- NULL x$CI <- NULL } else { # regular CIs if (length(ci_low) >= 1 && length(ci_low) == length(ci_high)) { if (!is.null(ci_value)) { if (length(unique(stats::na.omit(ci_value))) > 1) { ci_colname <- sprintf("%g%% %s", unique(stats::na.omit(ci_value)) * 100, ci_name) } else { ci_colname <- sprintf("%g%% %s", unique(stats::na.omit(ci_value))[1] * 100, ci_name) } x$CI <- NULL } else if (!is.null(x$CI)) { ci_colname <- sprintf("%g%% %s", unique(stats::na.omit(x$CI))[1] * 100, ci_name) x$CI <- NULL } else { # all these edge cases... for some objects in "parameters::model_parameters()", # when we have multiple ci-levels, column names can be "CI_low_0.8" or # "CI_low_0.95" etc. - this is handled here, if we have no ci-attribute if (grepl(paste0(ci_name, "_low_(\\d)\\.(\\d)"), ci_low) && grepl(paste0(ci_name, "_high_(\\d)\\.(\\d)"), ci_high)) { ci_levels <- as.numeric(gsub(paste0(ci_name, "_low_(\\d)\\.(\\d)"), "\\1.\\2", ci_low)) ci_colname <- sprintf("%g%% %s", unique(stats::na.omit(ci_levels)) * 100, ci_name) x$CI <- NULL } else { ci_colname <- ci_name } } # Get characters to align the CI for (i in 1:length(ci_colname)) { x[[ci_low[i]]] <- format_ci(x[[ci_low[i]]], x[[ci_high[i]]], ci = NULL, digits = ci_digits, width = ci_width, brackets = ci_brackets, zap_small = zap_small) # rename lower CI into final CI column ci_position <- which(names(x) == ci_low[i]) colnames(x)[ci_position] <- ci_colname[i] # remove upper CI column ci_position <- which(names(x) == ci_high[i]) x[[ci_position]] <- NULL } } } x } .format_other_ci_columns <- function(x, att, ci_digits, ci_width = "auto", ci_brackets = TRUE, zap_small) { other_ci_low <- names(x)[grep("_CI_low$", names(x))] other_ci_high <- names(x)[grep("_CI_high$", names(x))] if (length(other_ci_low) >= 1 && length(other_ci_low) == length(other_ci_high)) { other <- unlist(strsplit(other_ci_low, "_CI_low$")) # CI percentage if (length(other) == 1 && !is.null(att[[paste0("ci_", other)]])) { other_ci_colname <- sprintf("%s %g%% CI", other, unique(stats::na.omit(att[[paste0("ci_", other)]])) * 100) } else if (!is.null(att[["ci"]])) { other_ci_colname <- sprintf("%s %g%% CI", other, unique(stats::na.omit(att[["ci"]])) * 100) } else if (length(other == 1) && paste0(other, "_CI") %in% colnames(x)) { other_ci_colname <- sprintf("%s %g%% CI", other, unique(stats::na.omit(x[[paste0(other, "_CI")]])) * 100) } else { other_ci_colname <- paste(other, " CI") } # Get characters to align the CI for (i in 1:length(other_ci_colname)) { x[[other_ci_low[i]]] <- format_ci(x[[other_ci_low[i]]], x[[other_ci_high[i]]], ci = NULL, digits = ci_digits, width = ci_width, brackets = ci_brackets, zap_small = zap_small) # rename lower CI into final CI column other_ci_position <- which(names(x) == other_ci_low[i]) colnames(x)[other_ci_position] <- other_ci_colname[i] # remove upper CI column other_ci_position <- which(names(x) == other_ci_high[i]) x[[other_ci_position]] <- NULL } # remove columns with CI level for (i in other) { x[[paste0(i, "_CI")]] <- NULL } } else { other_ci_colname <- c() } list(x = x, other_ci_colname = other_ci_colname) } .format_broom_ci_columns <- function(x, ci_digits, ci_width = "auto", ci_brackets = TRUE, zap_small) { if (!any(grepl("conf.low", names(x), fixed = TRUE))) { return(x) } if (!any(grepl("conf.high", names(x), fixed = TRUE))) { return(x) } tryCatch( { ci_low <- names(x)[which(names(x) == "conf.low")] ci_high <- names(x)[which(names(x) == "conf.high")] x$conf.int <- format_ci(x[[ci_low]], x[[ci_high]], ci = NULL, digits = ci_digits, width = ci_width, brackets = ci_brackets, zap_small = zap_small) x$conf.low <- NULL x$conf.high <- NULL x }, error = function(e) { x } ) } .format_rope_columns <- function(x, ci_width = "auto", ci_brackets = TRUE, zap_small) { if (all(c("ROPE_low", "ROPE_high") %in% names(x))) { x$ROPE_low <- format_ci(x$ROPE_low, x$ROPE_high, ci = NULL, width = ci_width, brackets = ci_brackets, zap_small = zap_small) x$ROPE_high <- NULL names(x)[names(x) == "ROPE_low"] <- "ROPE" x$ROPE_CI <- NULL } x } .format_std_columns <- function(x, other_ci_colname, digits, zap_small) { std_cols <- names(x)[grepl("Std_", names(x))] if (length(std_cols) == 0) { return(x) } std_cis <- NULL if (!is.null(other_ci_colname)) { std_cis <- std_cols[std_cols %in% other_ci_colname] std_cols <- std_cols[!std_cols %in% other_ci_colname] } x[std_cols] <- format_value(x[std_cols], digits = digits, zap_small = zap_small) names(x)[names(x) == std_cols] <- .replace_words(std_cols, "Std_Coefficient", "Std. Coef.") names(x)[names(x) == std_cols] <- .replace_words(std_cols, "Std_Median", "Std. Median") names(x)[names(x) == std_cols] <- .replace_words(std_cols, "Std_Mean", "Std. Mean") names(x)[names(x) == std_cols] <- .replace_words(std_cols, "Std_MAP", "Std. MAP") if (!is.null(std_cis) && length(std_cis)) { # std_cis_replacement <- .replace_words(std_cis, "^Std_", "Std. ") std_cis_replacement <- gsub("^Std_Coefficient(.*)", "Std. Coef.\\1", std_cis) names(x)[names(x) == std_cis] <- std_cis_replacement } x } .format_bayes_columns <- function(x, stars, rope_digits = 2, zap_small, ci_width = "auto", ci_brackets = TRUE) { # Indices if ("BF" %in% names(x)) x$BF <- format_bf(x$BF, name = NULL, stars = stars) if ("log_BF" %in% names(x)) { x$BF <- format_bf(exp(x$log_BF), name = NULL, stars = stars) x$log_BF <- NULL } if ("pd" %in% names(x)) x$pd <- format_pd(x$pd, name = NULL, stars = stars) if ("Rhat" %in% names(x)) x$Rhat <- format_value(x$Rhat, digits = 3) if ("ESS" %in% names(x)) x$ESS <- round(x$ESS) if ("ROPE_Equivalence" %in% names(x)) names(x)[names(x) == "ROPE_Equivalence"] <- "Equivalence (ROPE)" if ("ROPE_Percentage" %in% names(x)) { x$ROPE_Percentage <- format_rope(x$ROPE_Percentage, name = NULL, digits = rope_digits) names(x)[names(x) == "ROPE_Percentage"] <- "% in ROPE" } x <- .format_rope_columns(x, ci_width, ci_brackets, zap_small) # Priors if ("Prior_Location" %in% names(x)) x$Prior_Location <- format_value(x$Prior_Location, protect_integers = TRUE) if ("Prior_Scale" %in% names(x)) x$Prior_Scale <- format_value(x$Prior_Scale, protect_integers = TRUE) if ("Prior_Distribution" %in% names(x)) x$Prior_Distribution <- ifelse(is.na(x$Prior_Distribution), "", x$Prior_Distribution) if ("Prior_df" %in% names(x)) x$Prior_df <- format_value(x$Prior_df, protect_integers = TRUE) if (all(c("Prior_Distribution", "Prior_df") %in% names(x))) { missing_df <- is.na(x$Prior_df) | x$Prior_df == "" x$Prior_Distribution[!missing_df] <- paste0(x$Prior_Distribution[!missing_df], " (df=", x$Prior_df[!missing_df], ")") } if (all(c("Prior_Distribution", "Prior_Location", "Prior_Scale") %in% names(x))) { x$Prior <- paste0( .capitalize(x$Prior_Distribution), " (", x$Prior_Location, " +- ", x$Prior_Scale, ")" ) x$Prior <- ifelse(x$Prior == " ( +- )", "", x$Prior) # Remove empty x$Prior <- trimws(gsub("( +- )", "", x$Prior, fixed = TRUE)) col_position <- which(names(x) == "Prior_Distribution") x <- x[c(names(x)[0:(col_position - 1)], "Prior", names(x)[col_position:(length(names(x)) - 1)])] # Replace at initial position x$Prior_Distribution <- x$Prior_Location <- x$Prior_Scale <- x$Prior_df <- NULL } x } .format_performance_columns <- function(x) { if ("R2_adjusted" %in% names(x)) names(x)[names(x) == "R2_adjusted"] <- "R2 (adj.)" if ("R2_conditional" %in% names(x)) names(x)[names(x) == "R2_conditional"] <- "R2 (cond.)" if ("R2_marginal" %in% names(x)) names(x)[names(x) == "R2_marginal"] <- "R2 (marg.)" if ("R2_Tjur" %in% names(x)) names(x)[names(x) == "R2_Tjur"] <- "Tjur's R2" if ("R2_Nagelkerke" %in% names(x)) names(x)[names(x) == "R2_Nagelkerke"] <- "Nagelkerke's R2" if ("Performance_Score" %in% names(x)) names(x)[names(x) == "Performance_Score"] <- "Performance-Score" if ("Wu_Hausman" %in% names(x)) names(x)[names(x) == "Wu_Hausman"] <- "Wu & Hausman" if ("p(Wu_Hausman)" %in% names(x)) names(x)[names(x) == "p(Wu_Hausman)"] <- "p(Wu & Hausman)" if ("AIC_wt" %in% names(x)) names(x)[names(x) == "AIC_wt"] <- "AIC weights" if ("BIC_wt" %in% names(x)) names(x)[names(x) == "BIC_wt"] <- "BIC weights" if ("AICc_wt" %in% names(x)) names(x)[names(x) == "AICc_wt"] <- "AICc weights" if ("WAIC_wt" %in% names(x)) names(x)[names(x) == "WAIC_wt"] <- "WAIC weights" if ("LOOIC_wt" %in% names(x)) names(x)[names(x) == "LOOIC_wt"] <- "LOOIC weights" x } # helper --------------------- .replace_words <- function(x, target, replacement) { for (i in 1:length(x)) { if (grepl(target, x[i], fixed = TRUE)) { x[i] <- gsub(target, replacement, x[i]) } } x } .additional_arguments <- function(x, value, default) { args <- attributes(x)$additional_arguments if (length(args) > 0 && value %in% names(args)) { out <- args[[value]] } else { out <- attributes(x)[[value]] } if (is.null(out)) { out <- default } out } insight/R/find_parameters_mfx.R0000644000175000017500000001024414077615664016433 0ustar nileshnilesh#' @title Find names of model parameters from marginal effects models #' @name find_parameters.betamfx #' #' @description Returns the names of model parameters, like they typically #' appear in the `summary()` output. #' #' @param component Which type of parameters to return, such as parameters for the #' conditional model, the zero-inflated part of the model, the dispersion #' term, the instrumental variables or marginal effects be returned? Applies #' to models with zero-inflated and/or dispersion formula, or to models with #' instrumental variables (so called fixed-effects regressions), or models #' with marginal effects from \pkg{mfx}. May be abbreviated. Note that the #' *conditional* component is also called *count* or *mean* #' component, depending on the model. There are three convenient shortcuts: #' `component = "all"` returns all possible parameters. #' If `component = "location"`, location parameters such as `conditional`, #' `zero_inflated`, `smooth_terms`, or `instruments` are returned #' (everything that are fixed or random effects - depending on the `effects` #' argument - but no auxiliary parameters). For `component = "distributional"` #' (or `"auxiliary"`), components like `sigma`, `dispersion`, #' `beta` or `precision` (and other auxiliary parameters) are returned. #' @param ... Currently not used. #' @inheritParams find_parameters #' @inheritParams find_predictors #' #' @return A list of parameter names. The returned list may have following #' elements: #' \itemize{ #' \item `conditional`, the "fixed effects" part from the model. #' \item `marginal`, the marginal effects. #' \item `precision`, the precision parameter. #' } #' #' @examples #' data(mtcars) #' m <- lm(mpg ~ wt + cyl + vs, data = mtcars) #' find_parameters(m) #' @export find_parameters.betamfx <- function(x, component = c("all", "conditional", "precision", "marginal", "location", "distributional", "auxiliary"), flatten = FALSE, ...) { pars <- list( marginal = .remove_backticks_from_string(rownames(x$mfxest)), conditional = .remove_backticks_from_string(names(x$fit$coefficients$mean)), precision = .remove_backticks_from_string(names(x$fit$coefficients$precision)) ) component <- match.arg(component) elements <- .get_elements(effects = "all", component = component) pars <- .compact_list(pars[elements]) if (flatten) { unique(unlist(pars)) } else { pars } } #' @export find_parameters.betaor <- function(x, component = c("all", "conditional", "precision", "location", "distributional", "auxiliary"), flatten = FALSE, ...) { pars <- list( conditional = .remove_backticks_from_string(names(x$fit$coefficients$mean)), precision = .remove_backticks_from_string(names(x$fit$coefficients$precision)) ) component <- match.arg(component) elements <- .get_elements(effects = "all", component = component) pars <- .compact_list(pars[elements]) if (flatten) { unique(unlist(pars)) } else { pars } } #' @rdname find_parameters.betamfx #' @export find_parameters.logitmfx <- function(x, component = c("all", "conditional", "marginal", "location"), flatten = FALSE, ...) { p <- .remove_backticks_from_string(names(stats::coef(x$fit))) pars <- list(marginal = .remove_backticks_from_string(rownames(x$mfxest)), conditional = p) component <- match.arg(component) elements <- .get_elements(effects = "all", component = component) pars <- .compact_list(pars[elements]) if (flatten) { unique(unlist(pars)) } else { pars } } #' @export find_parameters.poissonmfx <- find_parameters.logitmfx #' @export find_parameters.negbinmfx <- find_parameters.logitmfx #' @export find_parameters.probitmfx <- find_parameters.logitmfx #' @export find_parameters.logitor <- function(x, flatten = FALSE, ...) { pars <- list(conditional = .remove_backticks_from_string(names(stats::coef(x$fit)))) if (flatten) { unique(unlist(pars)) } else { pars } } #' @export find_parameters.poissonirr <- find_parameters.logitor #' @export find_parameters.negbinirr <- find_parameters.logitor insight/R/clean_names.R0000644000175000017500000001551014134317373014652 0ustar nileshnilesh#' @title Get clean names of model terms #' @name clean_names #' #' @description This function "cleans" names of model terms (or a character #' vector with such names) by removing patterns like `log()` or #' `as.factor()` etc. #' #' @param x A fitted model, or a character vector. #' @param include_names Logical, if `TRUE`, returns a named vector where #' names are the original values of `x`. #' @param ... Currently not used. #' #' @return The "cleaned" variable names as character vector, i.e. pattern #' like `s()` for splines or `log()` are removed from #' the model terms. #' #' @note Typically, this method is intended to work on character vectors, #' in order to remove patterns that obscure the variable names. For #' convenience reasons it is also possible to call `clean_names()` #' also on a model object. If `x` is a regression model, this #' function is (almost) equal to calling `find_variables()`. The #' main difference is that `clean_names()` always returns a character #' vector, while `find_variables()` returns a list of character #' vectors, unless `flatten = TRUE`. See 'Examples'. #' #' @examples #' # example from ?stats::glm #' counts <- c(18, 17, 15, 20, 10, 20, 25, 13, 12) #' outcome <- as.numeric(gl(3, 1, 9)) #' treatment <- gl(3, 3) #' m <- glm(counts ~ log(outcome) + as.factor(treatment), family = poisson()) #' clean_names(m) #' #' # difference "clean_names()" and "find_variables()" #' if (require("lme4")) { #' m <- glmer( #' cbind(incidence, size - incidence) ~ period + (1 | herd), #' data = cbpp, #' family = binomial #' ) #' #' clean_names(m) #' find_variables(m) #' find_variables(m, flatten = TRUE) #' } #' @export clean_names <- function(x, ...) { UseMethod("clean_names") } #' @export clean_names.default <- function(x, ...) { if (is.null(x)) { return(x) } cleaned <- unname(find_variables(x, flatten = TRUE, verbose = FALSE)) .remove_values(cleaned, c("1", "0")) } #' @rdname clean_names #' @export clean_names.character <- function(x, include_names = FALSE, ...) { .clean_names(x = x, include_names = include_names, ...) } # helper ----------------- .clean_names <- function(x, include_names = FALSE, is_emmeans = FALSE, ...) { if (is.null(x)) { return(x) } out <- sapply(x, function(.x) { # in case we have ranges, like [2:5], remove those first, so it's not # treated as "interaction" .x <- sub("\\[(\\d+):(\\d+)\\]", "", .x) if (grepl(":", .x, fixed = TRUE) && !grepl("::", .x, fixed = TRUE)) { paste(sapply( strsplit(.x, ":", fixed = TRUE), .remove_pattern_from_names, is_emmeans = is_emmeans ), collapse = ":" ) } else { .remove_pattern_from_names(.x, is_emmeans = is_emmeans) } }) if (isTRUE(include_names)) { out } else { unname(out) } } # utils --------------------- .remove_pattern_from_names <- function(x, ignore_asis = FALSE, ignore_lag = FALSE, is_emmeans = FALSE) { # return if x is empty if (.is_empty_string(x)) { return("") } # for gam-smoothers/loess, remove s()- and lo()-function in column name # for survival, remove strata(), and so on... pattern <- c( "as.factor", "as.numeric", "factor", "frailty", "offset", "log1p", "log10", "log2", "log-log", "scale-log", "log", "lag", "diff", "lspline", "pspline", "scale-poly", "poly", "catg", "asis", "matrx", "pol", "strata", "strat", "scale", "scored", "interaction", "sqrt", "sin", "cos", "tan", "acos", "asin", "atan", "atan2", "exp", "lsp", "rcs", "pb", "lo", "bs", "ns", "mSpline", "t2", "te", "ti", "tt", # need to be fixed first "mmc", "mm", "mi", "mo", "gp", "s", "I" ) # sometimes needed for panelr models, where we need to preserve "lag()" if (ignore_lag) { lag_pattern <- which(pattern == "lag") if (length(lag_pattern)) pattern <- pattern[-lag_pattern] } # do we have a "log()" pattern here? if yes, get capture region # which matches the "cleaned" variable name cleaned <- sapply(1:length(x), function(i) { # check if we have special patterns like 100 * log(xy), and remove it if (isFALSE(is_emmeans) && grepl("^([0-9]+)", x[i])) { x[i] <- gsub("^([0-9]+)[^(\\.|[:alnum:])]+(.*)", "\\2", x[i]) } for (j in 1:length(pattern)) { # remove possible namespace x[i] <- sub("(.*)::(.*)", "\\2", x[i]) if (pattern[j] == "offset") { x[i] <- .trim(unique(sub("^offset\\(([^-+ )]*).*", "\\1", x[i]))) } else if (pattern[j] == "I") { if (!ignore_asis) x[i] <- .trim(unique(sub("I\\(((\\w|\\.)*).*", "\\1", x[i]))) } else if (pattern[j] == "asis") { if (!ignore_asis) x[i] <- .trim(unique(sub("asis\\(((\\w|\\.)*).*", "\\1", x[i]))) } else if (pattern[j] == "log-log") { x[i] <- .trim(unique(sub("^log\\(log\\(((\\w|\\.)*).*", "\\1", x[i]))) } else if (pattern[j] == "scale-log") { x[i] <- .trim(unique(sub("^scale\\(log\\(((\\w|\\.)*).*", "\\1", x[i]))) x[i] <- .trim(unique(sub("^scale\\(log1p\\(((\\w|\\.)*).*", "\\1", x[i]))) x[i] <- .trim(unique(sub("^scale\\(log2\\(((\\w|\\.)*).*", "\\1", x[i]))) x[i] <- .trim(unique(sub("^scale\\(log10\\(((\\w|\\.)*).*", "\\1", x[i]))) } else if (pattern[j] == "scale-poly") { x[i] <- .trim(unique(sub("^scale\\(poly\\(((\\w|\\.)*).*", "\\1", x[i]))) } else if (pattern[j] %in% c("mmc", "mm")) { ## TODO multimembership-models need to be fixed p <- paste0("^", pattern[j], "\\((.*)\\).*") g <- .trim(sub(p, "\\1", x[i])) x[i] <- .trim(unlist(strsplit(g, ","))) } else { # p <- paste0("^", pattern[j], "\\(([^,/)]*).*") # this one should be more generic... p <- paste0("^", pattern[j], "\\(((\\w|\\.)*).*") x[i] <- unique(sub(p, "\\1", x[i])) } } # for coxme-models, remove random-effect things... .trim(sub("^(.*)\\|(.*)", "\\2", x[i])) }) # remove for random intercept only models .remove_values(cleaned, c("1", "0")) } ## TODO multimembership-models may also have weights, this does not work yet .clean_brms_mm <- function(x) { # only clean for mm() / mmc() functions, else return x if (!grepl("^(mmc|mm)\\(", x)) { return(x) } # extract terms from mm() / mmc() functions, i.e. get # multimembership-terms unname(.compact_character(unlist(sapply(c("mmc", "mm"), function(j) { if (grepl(paste0("^", j, "\\("), x = x)) { p <- paste0("^", j, "\\((.*)\\).*") g <- .trim(sub(p, "\\1", x)) .trim(unlist(strsplit(g, ","))) } else { "" } }, simplify = FALSE)))) } insight/R/get_variances.R0000644000175000017500000002634114100215451015206 0ustar nileshnilesh#' @title Get variance components from random effects models #' @name get_variance #' #' @description This function extracts the different variance components of a #' mixed model and returns the result as list. Functions like #' `get_variance_residual(x)` or `get_variance_fixed(x)` are shortcuts #' for `get_variance(x, component = "residual")` etc. #' #' @param x A mixed effects model. #' @param component Character value, indicating the variance component that should #' be returned. By default, all variance components are returned. The #' distribution-specific (`"distribution"`) and residual (`"residual"`) #' variance are the most computational intensive components, and hence may #' take a few seconds to calculate. #' @param verbose Toggle off warnings. #' @param tolerance Tolerance for singularity check of random effects, to decide #' whether to compute random effect variances or not. Indicates up to which #' value the convergence result is accepted. The larger tolerance is, the #' stricter the test will be. See [performance::check_singularity()]. #' @param ... Currently not used. #' #' @return A list with following elements: #' \itemize{ #' \item `var.fixed`, variance attributable to the fixed effects #' \item `var.random`, (mean) variance of random effects #' \item `var.residual`, residual variance (sum of dispersion and distribution) #' \item `var.distribution`, distribution-specific variance #' \item `var.dispersion`, variance due to additive dispersion #' \item `var.intercept`, the random-intercept-variance, or between-subject-variance (\ifelse{html}{\out{τ00}}{\eqn{\tau_{00}}}) #' \item `var.slope`, the random-slope-variance (\ifelse{html}{\out{τ11}}{\eqn{\tau_{11}}}) #' \item `cor.slope_intercept`, the random-slope-intercept-correlation (\ifelse{html}{\out{ρ01}}{\eqn{\rho_{01}}}) #' \item `cor.slopes`, the correlation between random slopes (\ifelse{html}{\out{ρ00}}{\eqn{\rho_{00}}}) #' } #' #' @details This function returns different variance components from mixed models, #' which are needed, for instance, to calculate r-squared measures or the #' intraclass-correlation coefficient (ICC). #' \subsection{Fixed effects variance}{ #' The fixed effects variance, \ifelse{html}{\out{σ2f}}{\eqn{\sigma^2_f}}, #' is the variance of the matrix-multiplication \ifelse{html}{\out{β∗X}}{\eqn{\beta*X}} #' (parameter vector by model matrix). #' } #' \subsection{Random effects variance}{ #' The random effect variance, \ifelse{html}{\out{σ2i}}{\eqn{\sigma^2_i}}, #' represents the *mean* random effect variance of the model. Since #' this variance reflect the "average" random effects variance for mixed #' models, it is also appropriate for models with more complex random #' effects structures, like random slopes or nested random effects. #' Details can be found in \cite{Johnson 2014}, in particular equation 10. #' For simple random-intercept models, the random effects variance equals #' the random-intercept variance. #' } #' \subsection{Distribution-specific variance}{ #' The distribution-specific variance, #' \ifelse{html}{\out{σ2d}}{\eqn{\sigma^2_d}}, #' depends on the model family. For Gaussian models, it is #' \ifelse{html}{\out{σ2}}{\eqn{\sigma^2}} (i.e. #' `sigma(model)^2`). For models with binary outcome, it is #' \eqn{\pi^2 / 3} for logit-link, `1` for probit-link, and \eqn{\pi^2 / 6} #' for cloglog-links. Models from Gamma-families use \eqn{\mu^2} (as obtained #' from `family$variance()`). For all other models, the distribution-specific #' variance is based on lognormal approximation, \eqn{log(1 + var(x) / \mu^2)} #' (see \cite{Nakagawa et al. 2017}). The expected variance of a zero-inflated #' model is computed according to \cite{Zuur et al. 2012, p277}. #' } #' \subsection{Variance for the additive overdispersion term}{ #' The variance for the additive overdispersion term, #' \ifelse{html}{\out{σ2e}}{\eqn{\sigma^2_e}}, #' represents \dQuote{the excess variation relative to what is expected #' from a certain distribution} (Nakagawa et al. 2017). In (most? many?) #' cases, this will be `0`. #' } #' \subsection{Residual variance}{ #' The residual variance, \ifelse{html}{\out{σ2ε}}{\eqn{\sigma^2_\epsilon}}, #' is simply \ifelse{html}{\out{σ2d + σ2e}}{\eqn{\sigma^2_d + \sigma^2_e}}. #' } #' \subsection{Random intercept variance}{ #' The random intercept variance, or *between-subject* variance #' (\ifelse{html}{\out{τ00}}{\eqn{\tau_{00}}}), #' is obtained from `VarCorr()`. It indicates how much groups #' or subjects differ from each other, while the residual variance #' \ifelse{html}{\out{σ2ε}}{\eqn{\sigma^2_\epsilon}} #' indicates the *within-subject variance*. #' } #' \subsection{Random slope variance}{ #' The random slope variance (\ifelse{html}{\out{τ11}}{\eqn{\tau_{11}}}) #' is obtained from `VarCorr()`. This measure is only available #' for mixed models with random slopes. #' } #' \subsection{Random slope-intercept correlation}{ #' The random slope-intercept correlation #' (\ifelse{html}{\out{ρ01}}{\eqn{\rho_{01}}}) #' is obtained from `VarCorr()`. This measure is only available #' for mixed models with random intercepts and slopes. #' } #' #' @note This function supports models of class `merMod` (including models #' from \pkg{blme}), `clmm`, `cpglmm`, `glmmadmb`, `glmmTMB`, #' `MixMod`, `lme`, `mixed`, `rlmerMod`, `stanreg`, #' `brmsfit` or `wbm`. Support for objects of class `MixMod` #' (\pkg{GLMMadaptive}), `lme` (\pkg{nlme}) or `brmsfit` (\pkg{brms}) #' is experimental and may not work for all models. #' #' @references \itemize{ #' \item Johnson, P. C. D. (2014). Extension of Nakagawa & Schielzeth’s R2 GLMM to random slopes models. Methods in Ecology and Evolution, 5(9), 944–946. \doi{10.1111/2041-210X.12225} #' \item Nakagawa, S., Johnson, P. C. D., & Schielzeth, H. (2017). The coefficient of determination R2 and intra-class correlation coefficient from generalized linear mixed-effects models revisited and expanded. Journal of The Royal Society Interface, 14(134), 20170213. \doi{10.1098/rsif.2017.0213} #' \item Zuur, A. F., Savel'ev, A. A., & Ieno, E. N. (2012). Zero inflated models and generalized linear mixed models with R. Newburgh, United Kingdom: Highland Statistics. #' } #' #' @examples #' \dontrun{ #' library(lme4) #' data(sleepstudy) #' m <- lmer(Reaction ~ Days + (1 + Days | Subject), data = sleepstudy) #' #' get_variance(m) #' get_variance_fixed(m) #' get_variance_residual(m) #' } #' @export get_variance <- function(x, component = c("all", "fixed", "random", "residual", "distribution", "dispersion", "intercept", "slope", "rho01", "rho00"), verbose = TRUE, ...) { UseMethod("get_variance") } #' @export get_variance.default <- function(x, component = c("all", "fixed", "random", "residual", "distribution", "dispersion", "intercept", "slope", "rho01", "rho00"), verbose = TRUE, ...) { if (isTRUE(verbose)) { warning(sprintf("Objects of class `%s` are not supported.", class(x)[1])) } NULL } #' @export get_variance.merMod <- function(x, component = c("all", "fixed", "random", "residual", "distribution", "dispersion", "intercept", "slope", "rho01", "rho00"), verbose = TRUE, tolerance = 1e-5, ...) { component <- match.arg(component) tryCatch( { .compute_variances(x, component = component, name_fun = "get_variance", name_full = "random effect variances", verbose = verbose, tolerance = tolerance) }, error = function(e) { NULL } ) } #' @export get_variance.rlmerMod <- get_variance.merMod #' @export get_variance.mjoint <- get_variance.merMod #' @export get_variance.cpglmm <- get_variance.merMod #' @export get_variance.glmmadmb <- get_variance.merMod #' @export get_variance.stanreg <- get_variance.merMod #' @export get_variance.clmm <- get_variance.merMod #' @export get_variance.wbm <- get_variance.merMod #' @export get_variance.wblm <- get_variance.merMod #' @export get_variance.lme <- get_variance.merMod #' @export get_variance.brmsfit <- get_variance.merMod #' @export get_variance.glmmTMB <- function(x, component = c("all", "fixed", "random", "residual", "distribution", "dispersion", "intercept", "slope", "rho01", "rho00"), verbose = TRUE, tolerance = 1e-5, model_component = NULL, ...) { component <- match.arg(component) tryCatch( { .compute_variances(x, component = component, name_fun = "get_variance", name_full = "random effect variances", verbose = verbose, tolerance = tolerance, model_component = model_component) }, error = function(e) { NULL } ) } #' @export get_variance.MixMod <- get_variance.glmmTMB #' @export get_variance.mixed <- function(x, component = c("all", "fixed", "random", "residual", "distribution", "dispersion", "intercept", "slope", "rho01", "rho00"), verbose = TRUE, tolerance = 1e-5, ...) { component <- match.arg(component) .compute_variances(x$full_model, component = component, name_fun = "get_variance", name_full = "random effect variances", verbose = verbose, tolerance = tolerance) } #' @rdname get_variance #' @export get_variance_residual <- function(x, verbose = TRUE, ...) { unlist(get_variance(x, component = "residual", verbose = verbose, ...)) } #' @rdname get_variance #' @export get_variance_fixed <- function(x, verbose = TRUE, ...) { unlist(get_variance(x, component = "fixed", verbose = verbose, ...)) } #' @rdname get_variance #' @export get_variance_random <- function(x, verbose = TRUE, tolerance = 1e-5, ...) { unlist(get_variance(x, component = "random", verbose = verbose, tolerance = tolerance, ...)) } #' @rdname get_variance #' @export get_variance_distribution <- function(x, verbose = TRUE, ...) { unlist(get_variance(x, component = "distribution", verbose = verbose, ...)) } #' @rdname get_variance #' @export get_variance_dispersion <- function(x, verbose = TRUE, ...) { unlist(get_variance(x, component = "dispersion", verbose = verbose, ...)) } #' @rdname get_variance #' @export get_variance_intercept <- function(x, verbose = TRUE, ...) { unlist(get_variance(x, component = "intercept", verbose = verbose, ...)) } #' @rdname get_variance #' @export get_variance_slope <- function(x, verbose = TRUE, ...) { unlist(get_variance(x, component = "slope", verbose = verbose, ...)) } #' @rdname get_variance #' @export get_correlation_slope_intercept <- function(x, verbose = TRUE, ...) { unlist(get_variance(x, component = "rho01", verbose = verbose, ...)) } #' @rdname get_variance #' @export get_correlation_slopes <- function(x, verbose = TRUE, ...) { unlist(get_variance(x, component = "rho00", verbose = verbose, ...)) } insight/R/find_statistic.R0000644000175000017500000002026614137207374015422 0ustar nileshnilesh#' @title Find statistic for model #' @description Returns the statistic for a regression model (*t*-statistic, #' *z*-statistic, etc.). #' @name find_statistic #' #' @description Small helper that checks if a model is a regression model #' object and return the statistic used. #' #' @param x An object. #' @param ... Currently not used. #' #' @return A character describing the type of statistic. If there is no #' statistic available with a distribution, `NULL` will be returned. #' #' @examples #' # regression model object #' data(mtcars) #' m <- lm(mpg ~ wt + cyl + vs, data = mtcars) #' find_statistic(m) #' @export find_statistic <- function(x, ...) { # model object check -------------------------------------------------------- # check if the object is a model object; if not, quit early if (isFALSE(is_model(x))) { stop("The entered object is not a model object.", call. = FALSE) } if (inherits(x, "mipo")) { x <- tryCatch( { models <- eval(x$call$object) x <- models$analyses[[1]] }, error = function(e) { NULL } ) } if (inherits(x, "mira")) { x <- x$analyses[[1]] } if (inherits(x, "model_fit")) { x <- x$fit } if (inherits(x, "merModList")) { x <- x[[1]] } # check if model object is accessible; if not, quit early if (is.null(x)) { return(NULL) } # t-value objects ---------------------------------------------------------- t.mods <- c( "bayesx", "BBreg", "BBmm", "bcplm", "biglm", "bfsl", "blmerMod", "cch", "censReg", "complmrob", "cpglm", "cpglmm", "crq", "crqs", "drc", "elm", "feis", "felm", "gamlss", "garch", "glmmPQL", "gls", "gmm", "gnls", "HLfit", "ivreg", "ivFixed", "iv_robust", "ivprobit", "lm", "lm_robust", "lm.beta", "lme", "lmerMod", "lmerModLmerTest", "lmodel2", "lmRob", "lmrob", "lqm", "lqmm", "maxLik", "mixed", "mhurdle", "mlm", "multinom", "nlmerMod", "nlrq", "nls", "ols", "orcutt", "pb1", "pb2", "polr", "rlm", "rms", "rlmerMod", "rq", "rqs", "rqss", "selection", "speedlm", "spml", "summary.lm", "svyglm", "svyolr", "systemfit", "truncreg", "varest", "wbm", "wblm", "yuen" ) # z-value objects ---------------------------------------------------------- z.mods <- c( "aareg", "Arima", "averaging", "betamfx", "betaor", "betareg", "bife", "bifeAPEs", "bglmerMod", "boot_test_mediation", "bracl", "brglm", "brglmFit", "brmultinom", "btergm", "cglm", "cph", "clm", "clm2", "clmm", "clmm2", "clogit", "coxme", "coxph", "coxr", "crch", "crr", "DirichletRegModel", "ergm", "feglm", "flexsurvreg", "gee", "glimML", "glmm", "glmmadmb", "glmmFit", "glmmLasso", "glmmTMB", "glmx", "gmnl", "hurdle", "lavaan", "loggammacenslmrob", "logitmfx", "logitor", "LORgee", "lrm", "margins", "metaplus", "mixor", "MixMod", "mjoint", "mle", "mle2", "mlogit", "mclogit", "mmclogit", "mvmeta", "mvord", "negbin", "negbinmfx", "negbinirr", "nlreg", "objectiveML", "orm", "poissonmfx", "poissonirr", "psm", "probitmfx", "pgmm", "qr", "QRNLMM", "QRLMM", "Rchoice", "riskRegression", "robmixglm", "rma", "rma.mv", "rma.uni", "rrvglm", "Sarlm", "sem", "SemiParBIV", "slm", "survreg", "svy_vglm", "test_mediation", "tobit", "vglm", "wbgee", "zcpglm", "zeroinfl", "zerotrunc" ) # F-value objects ---------------------------------------------------------- f.mods <- c( "afex_aov", "Anova.mlm", "aov", "aovlist", "anova", "Gam", "manova", "maov", "t1way" ) # chi-squared value objects ------------------------------------------------ chi.mods <- c( "coxph.penal", "epi.2by2", "geeglm", "logistf", "MANOVA", "mlma", "nparLD", "RM", "vgam" ) # mixed bag ---------------------------------------------------------------- # models for which there is no clear t-or z-statistic # which statistic to use will be decided based on the family used g.mods <- c( "bam", "bigglm", "cgam", "cgamm", "eglm", "emmGrid", "emm_list", "fixest", "gam", "glm", "Glm", "glmc", "glmerMod", "glmRob", "glmrob", "pseudoglm", "scam", "speedglm" ) # t-statistic (otherwise z-statistic: "binomial", "poisson") g.t.mods <- c( "gaussian", "Gamma", "quasi", "quasibinomial", "quasipoisson", "inverse.gaussian" ) # pattern finding ---------------------------------------------------------- unclear.mods <- c("plm") if (inherits(x, "glht")) { if (x$df == 0) { z.mods <- c(z.mods, "glht") } else { t.mods <- c(t.mods, "glht") } } if (inherits(x, "coeftest")) { if ("z value" %in% dimnames(x)[[2]]) { z.mods <- c(z.mods, "coeftest") } else { t.mods <- c(t.mods, "coeftest") } } # no statistic ------------------------------------------------------------- unsupported.mods <- c( "bcplm", "BFBayesFactor", "brmsfit", "gbm", "glmmEP", "joint", "list", "MCMCglmm", "mediate", "mlergm", "pairwise.htest", "ridgelm", "splmm", "stanreg", "stanmvreg", "survfit" ) # edge cases --------------------------------------------------------------- m_info <- model_info(x, verbose = FALSE, return_family_only = TRUE) # tweedie-check needs to come first, because glm can also have tweedie # family, so this exception needs to be caught before checking for g.mods tryCatch( { suppressWarnings( if (!is_multivariate(x) && .is_tweedie(x, m_info)) { return("t-statistic") } ) }, error = function(e) {} ) # statistic check ----------------------------------------------------------- model_class <- class(x)[[1]] if (model_class %in% unsupported.mods) { return(NULL) } if (model_class %in% t.mods) { return("t-statistic") } if (model_class %in% z.mods) { return("z-statistic") } if (model_class %in% f.mods) { return("F-statistic") } if (model_class %in% chi.mods) { return("chi-squared statistic") } if (model_class %in% g.mods) { if (model_class %in% c("emmGrid", "emm_list")) { stat <- tryCatch( { df <- get_df(x) if (all(is.na(df)) || all(is.infinite(df))) { "z-statistic" } else { "t-statistic" } }, error = function(e) { "t-statistic" } ) return(stat) } else if (m_info$family %in% g.t.mods) { return("t-statistic") } else { return("z-statistic") } } # ambiguous cases ----------------------------------------------------------- if (model_class %in% unclear.mods) { col_names <- colnames(as.data.frame(summary(x)$coefficients)) t_names <- c( "t", "t-value", "t value", "t.value", "Pr(>|t|)" ) z_names <- c( "z", "z-value", "z value", "z.value", "Pr(>|z|)", "Pr(>|Z|)", "Naive z", "Robust z", "san.z", "Wald Z" ) f_names <- c("F", "F-value", "F value", "F.value") chi_names <- c("Chisq", "chi-sq", "chi.sq", "Wald", "W", "Pr(>|W|)") if (length(col_names) == 0L) { return(NULL) } if (any(t_names %in% col_names)) { return("t-statistic") } if (any(z_names %in% col_names)) { return("z-statistic") } if (any(f_names %in% col_names)) { return("F-statistic") } if (any(chi_names %in% col_names)) { return("chi-squared statistic") } } } # helper --------------- .is_tweedie <- function(model, info) { if (info$family %in% c("Student's-t", "t Family", "gaussian", "Gaussian") || grepl("(\\st)$", info$family)) { linear_model <- TRUE } tweedie_fam <- grepl("^(tweedie|Tweedie)", info$family) | grepl("^(tweedie|Tweedie)", info$link_function) (linear_model && tweedie_fam) || inherits(model, c("bcplm", "cpglm", "cpglmm", "zcpglm")) } insight/R/helper_functions.R0000644000175000017500000005257014142156546015765 0ustar nileshnilesh# remove trailing/leading spaces from character vectors .trim <- function(x) gsub("^\\s+|\\s+$", "", x) # remove NULL elements from lists .compact_list <- function(x) x[!sapply(x, function(i) all(length(i) == 0) || all(is.null(i)) || (!is.data.frame(i) && any(i == "NULL", na.rm = TRUE)) || (is.data.frame(i) && nrow(i) == 0))] # remove empty string from character .compact_character <- function(x) x[!sapply(x, function(i) nchar(i) == 0 || is.null(i) || any(i == "NULL", na.rm = TRUE))] # remove values from vector .remove_values <- function(x, values) { remove <- x %in% values if (any(remove)) { x <- x[!remove] } x } # rename values in a vector .rename_values <- function(x, old, new) { x[x %in% old] <- new x } # is string empty? .is_empty_string <- function(x) { x <- x[!is.na(x)] length(x) == 0 || all(nchar(x) == 0) } # is object empty? .is_empty_object <- function(x) { if (inherits(x, "data.frame")) { x <- as.data.frame(x) } if (is.list(x) && length(x) > 0) { x <- tryCatch( { .compact_list(x) }, error = function(x) { x } ) } if (inherits(x, "data.frame")) { if (nrow(x) > 0 && ncol(x) > 0) { x <- x[!sapply(x, function(i) all(is.na(i)))] x <- x[!apply(x, 1, function(i) all(is.na(i))), ] # need to check for is.null for R 3.4 } } else if (!is.null(x)) { x <- stats::na.omit(x) } length(x) == 0 || is.null(x) || isTRUE(nrow(x) == 0) || isTRUE(ncol(x) == 0) } # does string contain pattern? .string_contains <- function(pattern, x) { pattern <- paste0("\\Q", pattern, "\\E") grepl(pattern, x, perl = TRUE) } # has object an element with given name? .obj_has_name <- function(x, name) { name %in% names(x) } # checks if a brms-models is a multi-membership-model .is_multi_membership <- function(x) { if (inherits(x, "brmsfit")) { re <- find_random(x, split_nested = TRUE, flatten = TRUE) any(grepl("^(mmc|mm)\\(", re)) } else { return(FALSE) } } # merge data frames, remove double columns .merge_dataframes <- function(data, ..., replace = TRUE) { # check for identical column names tmp <- cbind(...) if (nrow(data) == 0) { return(tmp) } doubles <- colnames(tmp) %in% colnames(data) # keep order? reihenfolge <- c(which(!doubles), which(doubles)) # remove duplicate column names, if requested if (replace && any(doubles)) tmp <- tmp[, !doubles, drop = FALSE] # bind all data x <- cbind(tmp, data) # restore order if (replace) { # check for correct length. if "data" had duplicated variables, # but not all variable are duplicates, add indices of regular values if (ncol(x) > length(reihenfolge)) { # get remaining indices xl <- seq_len(ncol(x))[-seq_len(length(reihenfolge))] # add to "reihefolge" reihenfolge <- c(reihenfolge, xl) } # sort data frame x <- x[, order(reihenfolge), drop = FALSE] } x } # removes random effects from a formula that is in lmer-notation .get_fixed_effects <- function(f) { # remove random effects from RHS fl <- length(f) f[[fl]] <- .nobars(f[[fl]]) f } # check if any terms appear in the formula after random effects # like "~ (1|school) + open + extro + agree + school" # this regex removes "(1|school)", as well as any +, -, *, whitespace etc. # if there are any chars left, these come from further terms that come after # random effects... .formula_empty_after_random_effect <- function(f) { nchar(gsub("(~|\\+|\\*|-|/|:)", "", gsub(" ", "", gsub("\\((.*)\\)", "", f)))) == 0 } # extract random effects from formula .get_model_random <- function(f, split_nested = FALSE, model) { is_special <- inherits( model, c( "MCMCglmm", "gee", "LORgee", "mixor", "clmm2", "felm", "feis", "bife", "BFBayesFactor", "BBmm", "glimML", "MANOVA", "RM", "cglm", "glmm" ) ) if (identical(.safe_deparse(f), "~0") || identical(.safe_deparse(f), "~1")) { return(NULL) } re <- sapply(.findbars(f), .safe_deparse) if (is_special && .is_empty_object(re)) { re <- all.vars(f[[2L]]) if (length(re) > 1) { re <- as.list(re) split_nested <- FALSE } } else { re <- .trim(substring(re, max(gregexpr(pattern = "\\|", re)[[1]]) + 1)) } # check for multi-membership models if (inherits(model, "brmsfit")) { if (grepl("mm\\((.*)\\)", re)) { re <- trimws(unlist(strsplit(gsub("mm\\((.*)\\)", "\\1", re), ","))) } } if (split_nested) { # remove parenthesis for nested models re <- unique(unlist(strsplit(re, "\\:"))) # nested random effects, e.g. g1 / g2 / g3, deparse to "g0:(g1:g2)". # when we split at ":", we have "g0", "(g1" and "g2)". In such cases, # we need to remove the parentheses. But we need to preserve them in # case we have group factors in other models, like panelr, where we can # have "lag(union)" as group factor. In such cases, parentheses should be # preserved. We here check if group factors, after passing to "clean_names()", # still have "(" or ")" in their name, and if so, just remove parentheses # for these cases... has_parantheses <- vapply( clean_names(re), function(i) { grepl("[\\(\\)]", x = i) }, logical(1) ) if (any(has_parantheses)) { re[has_parantheses] <- gsub(pattern = "[\\(\\)]", replacement = "", x = re[has_parantheses]) } re } else { unique(re) } } # in case we need the random effects terms as formula (symbol), # not as character string, then call this functions instead of # .get_model_random() .get_group_factor <- function(x, f) { if (is.list(f)) { f <- lapply(f, function(.x) { .get_model_random(.x, split_nested = TRUE, x) }) } else { f <- .get_model_random(f, split_nested = TRUE, x) } if (is.null(f)) { return(NULL) } if (is.list(f)) { f <- lapply(f, function(i) sapply(i, as.symbol)) } else { f <- sapply(f, as.symbol) } f } # helper to access model components ---------------- .all_elements <- function() { c( "conditional", "conditional1", "conditional2", "conditional3", "precision", "nonlinear", "random", "zi", "zero_inflated", "zero_inflated_random", "shape", "dispersion", "instruments", "interactions", "simplex", "smooth_terms", "sigma", "nu", "tau", "correlation", "slopes", "cluster", "extra", "scale", "marginal", "alpha", "beta", "survival", "infrequent_purchase", "auxiliary", "mix", "shiftprop", "phi", "ndt", "hu", "xi", "coi", "zoi", "aux", "dist", "selection", "outcome", "time_dummies", "sigma_random", "beta_random", "car" ) } .aux_elements <- function() { c( "sigma", "alpha", "beta", "dispersion", "precision", "nu", "tau", "shape", "phi", "(phi)", "ndt", "hu", "xi", "coi", "zoi", "mix", "shiftprop", "auxiliary", "aux", "dist", # random parameters "sigma_random", "beta_random" ) } .get_elements <- function(effects, component) { # all elements of a model elements <- .all_elements() # zero-inflation component zero_inflated_component <- c("zi", "zero_inflated", "zero_inflated_random") # auxiliary parameters auxiliary_parameters <- .aux_elements() # random parameters random_parameters <- c("random", "zero_inflated_random", "sigma_random", "beta_random", "car") # conditional component conditional_component <- setdiff(elements, c(auxiliary_parameters, zero_inflated_component, "smooth_terms")) # location parameters location_parameters <- if (effects == "fixed") { setdiff(elements, c(auxiliary_parameters, random_parameters)) } else { setdiff(elements, auxiliary_parameters) } # fixed pattern? if (all(component == "location")) { return(location_parameters) } # fixed pattern? if (all(component %in% c("aux", "dist", "distributional", "auxiliary"))) { return(auxiliary_parameters) } elements <- switch(effects, all = elements, fixed = elements[!elements %in% random_parameters], random = elements[elements %in% random_parameters] ) elements <- switch(component, all = elements, cond = , conditional = elements[elements %in% conditional_component], zi = , zero_inflated = elements[elements %in% zero_inflated_component], elements[elements == component] ) elements } # checks if a mixed model fit is singular or not. Need own function, # because lme4::isSingular() does not work with glmmTMB .is_singular <- function(x, vals, tolerance = 1e-5) { if (!requireNamespace("lme4", quietly = TRUE)) { stop("Package `lme4` needs to be installed to compute variances for mixed models.", call. = FALSE) } tryCatch( { if (inherits(x, c("glmmTMB", "clmm", "cpglmm"))) { is_si <- any(sapply(vals$vc, function(.x) any(abs(diag(.x)) < tolerance))) } else if (inherits(x, "merMod")) { theta <- lme4::getME(x, "theta") diag.element <- lme4::getME(x, "lower") == 0 is_si <- any(abs(theta[diag.element]) < tolerance) } else if (inherits(x, "MixMod")) { vc <- diag(x$D) is_si <- any(sapply(vc, function(.x) any(abs(.x) < tolerance))) } else if (inherits(x, "lme")) { is_si <- any(abs(stats::na.omit(as.numeric(diag(vals$vc))) < tolerance)) } else { is_si <- FALSE } is_si }, error = function(x) { FALSE } ) } # Filter parameters from Stan-model fits .filter_pars <- function(l, parameters = NULL, is_mv = NULL) { if (!is.null(parameters)) { if (is.null(is_mv)) { is_mv <- isTRUE(attr(l, "is_mv", exact = TRUE) == "1") } if (is_multivariate(l) || is_mv) { for (i in names(l)) { l[[i]] <- .filter_pars_univariate(l[[i]], parameters) } } else { l <- .filter_pars_univariate(l, parameters) } if (isTRUE(is_mv)) attr(l, "is_mv") <- "1" } l } .filter_pars_univariate <- function(l, parameters) { lapply(l, function(component) { unlist(unname(sapply( parameters, function(pattern) { component[grepl(pattern = pattern, x = component, perl = TRUE)] }, simplify = FALSE ))) }) } # remove column .remove_column <- function(data, variables) { data[, -which(colnames(data) %in% variables), drop = FALSE] } .grep_smoothers <- function(x) { grepl("^(s\\()", x, perl = TRUE) | grepl("^(ti\\()", x, perl = TRUE) | grepl("^(te\\()", x, perl = TRUE) | grepl("^(t2\\()", x, perl = TRUE) | grepl("^(gam::s\\()", x, perl = TRUE) | grepl("^(VGAM::s\\()", x, perl = TRUE) | grepl("^(mgcv::s\\()", x, perl = TRUE) | grepl("^(mgcv::ti\\()", x, perl = TRUE) | grepl("^(mgcv::t2\\()", x, perl = TRUE) | grepl("^(mgcv::te\\()", x, perl = TRUE) | grepl("^(brms::s\\()", x, perl = TRUE) | grepl("^(brms::t2\\()", x, perl = TRUE) | grepl("^(smooth_sd\\[)", x, perl = TRUE) } .grep_zi_smoothers <- function(x) { grepl("^(s\\.\\d\\()", x, perl = TRUE) | grepl("^(gam::s\\.\\d\\()", x, perl = TRUE) | grepl("^(mgcv::s\\.\\d\\()", x, perl = TRUE) } .grep_non_smoothers <- function(x) { grepl("^(?!(s\\())", x, perl = TRUE) & # this one captures smoothers in zi- or mv-models from gam grepl("^(?!(s\\.\\d\\())", x, perl = TRUE) & grepl("^(?!(ti\\())", x, perl = TRUE) & grepl("^(?!(te\\())", x, perl = TRUE) & grepl("^(?!(t2\\())", x, perl = TRUE) & grepl("^(?!(gam::s\\())", x, perl = TRUE) & grepl("^(?!(gam::s\\.\\d\\())", x, perl = TRUE) & grepl("^(?!(VGAM::s\\())", x, perl = TRUE) & grepl("^(?!(mgcv::s\\())", x, perl = TRUE) & grepl("^(?!(mgcv::s\\.\\d\\())", x, perl = TRUE) & grepl("^(?!(mgcv::ti\\())", x, perl = TRUE) & grepl("^(?!(mgcv::te\\())", x, perl = TRUE) & grepl("^(?!(brms::s\\())", x, perl = TRUE) & grepl("^(?!(brms::t2\\())", x, perl = TRUE) & grepl("^(?!(smooth_sd\\[))", x, perl = TRUE) } # .split_formula <- function(f) { # rhs <- if (length(f) > 2) # f[[3L]] # else # f[[2L]] # # lapply(.extract_formula_parts(rhs), .safe_deparse) # } # # # .extract_formula_parts <- function(x, sep = "|") { # if (is.null(x)) # return(NULL) # rval <- list() # if (length(x) > 1L && x[[1L]] == sep) { # while (length(x) > 1L && x[[1L]] == sep) { # rval <- c(x[[3L]], rval) # x <- x[[2L]] # } # } # c(x, rval) # } .safe_deparse <- function(string) { if (is.null(string)) { return(NULL) } paste0(sapply(deparse(string, width.cutoff = 500), .trim, simplify = TRUE), collapse = " ") } .gam_family <- function(x) { faminfo <- tryCatch( { stats::family(x) }, error = function(e) { NULL } ) # try to set manually, if not found otherwise if (is.null(faminfo)) { faminfo <- tryCatch( { x$family }, error = function(e) { NULL } ) } faminfo } # for models with zero-inflation component, return # required component of model-summary .filter_component <- function(dat, component) { switch(component, "cond" = , "conditional" = dat[dat$Component == "conditional", , drop = FALSE], "zi" = , "zero_inflated" = dat[dat$Component == "zero_inflated", , drop = FALSE], "dispersion" = dat[dat$Component == "dispersion", , drop = FALSE], "smooth_terms" = dat[dat$Component == "smooth_terms", , drop = FALSE], "ip" = , "infrequent_purchase" = dat[dat$Component == "infrequent_purchase", , drop = FALSE], "auxiliary" = dat[dat$Component == "auxiliary", , drop = FALSE], "distributional" = dat[dat$Component == "distributional", , drop = FALSE], "sigma" = dat[dat$Component == "sigma", , drop = FALSE], dat ) } # capitalizes the first letter in a string .capitalize <- function(x) { capped <- grep("^[A-Z]", x, invert = TRUE) substr(x[capped], 1, 1) <- toupper(substr(x[capped], 1, 1)) x } .remove_backticks_from_parameter_names <- function(x) { if (is.data.frame(x) && "Parameter" %in% colnames(x)) { x$Parameter <- gsub("`", "", x$Parameter, fixed = TRUE) } x } .remove_backticks_from_string <- function(x) { if (is.character(x)) { x <- gsub("`", "", x, fixed = TRUE) } x } .remove_backticks_from_matrix_names <- function(x) { if (is.matrix(x)) { colnames(x) <- gsub("`", "", colnames(x), fixed = TRUE) rownames(x) <- gsub("`", "", colnames(x), fixed = TRUE) } x } #' @keywords internal .gather <- function(x, names_to = "key", values_to = "value", columns = colnames(x)) { if (is.numeric(columns)) columns <- colnames(x)[columns] dat <- stats::reshape( x, idvar = "id", ids = row.names(x), times = columns, timevar = names_to, v.names = values_to, varying = list(columns), direction = "long" ) if (is.factor(dat[[values_to]])) { dat[[values_to]] <- as.character(dat[[values_to]]) } dat[, 1:(ncol(dat) - 1), drop = FALSE] } .is_baysian_emmeans <- function(x) { if (inherits(x, "emm_list")) { x <- x[[1]] } post.beta <- methods::slot(x, "post.beta") !(all(dim(post.beta) == 1) && is.na(post.beta)) } .is_bayesian_model <- function(x) { inherits(x, c( "brmsfit", "stanfit", "MCMCglmm", "stanreg", "stanmvreg", "bmerMod", "BFBayesFactor", "bamlss", "bayesx", "mcmc", "bcplm", "bayesQR", "BGGM", "meta_random", "meta_fixed", "meta_bma", "blavaan", "blrm" )) } # safe conversion from factor to numeric .factor_to_numeric <- function(x, lowest = NULL) { if (is.data.frame(x)) { as.data.frame(lapply(x, .factor_to_numeric_helper, lowest = lowest)) } else { .factor_to_numeric_helper(x, lowest = lowest) } } .factor_to_numeric_helper <- function(x, lowest = NULL) { if (is.numeric(x)) { return(x) } if (is.logical(x)) { return(as.numeric(x)) } if (anyNA(suppressWarnings(as.numeric(as.character(stats::na.omit(x)))))) { if (is.character(x)) { x <- as.factor(x) } x <- droplevels(x) levels(x) <- 1:nlevels(x) } out <- as.numeric(as.character(x)) if (!is.null(lowest)) { difference <- min(out) - lowest out <- out - difference } out } ## copied from lme4::findbars() ----------------------- .expandDoubleVert <- function(term) { frml <- stats::formula(substitute(~x, list(x = term[[2]]))) newtrms <- paste0("0+", attr(stats::terms(frml), "term.labels")) if (attr(stats::terms(frml), "intercept") != 0) { newtrms <- c("1", newtrms) } stats::as.formula(paste("~(", paste(vapply(newtrms, function(trm) { paste0(trm, "|", deparse(term[[3]])) }, ""), collapse = ")+("), ")"))[[2]] } .expandDoubleVerts <- function(term) { if (!is.name(term) && is.language(term)) { if (term[[1]] == as.name("(")) { term[[2]] <- .expandDoubleVerts(term[[2]]) } stopifnot(is.call(term)) if (term[[1]] == as.name("||")) { return(.expandDoubleVert(term)) } term[[2]] <- .expandDoubleVerts(term[[2]]) if (length(term) != 2 && length(term) == 3) { term[[3]] <- .expandDoubleVerts(term[[3]]) } } term } .findbars <- function(term) { fb <- function(term) { if (is.name(term) || !is.language(term)) { return(NULL) } if (term[[1]] == as.name("(")) { return(fb(term[[2]])) } stopifnot(is.call(term)) if (term[[1]] == as.name("|")) { return(term) } if (length(term) == 2) { return(fb(term[[2]])) } c(fb(term[[2]]), fb(term[[3]])) } expandSlash <- function(bb) { makeInteraction <- function(x) { if (length(x) < 2) { return(x) } trm1 <- makeInteraction(x[[1]]) trm11 <- if (is.list(trm1)) { trm1[[1]] } else { trm1 } list(substitute(foo:bar, list(foo = x[[2]], bar = trm11)), trm1) } slashTerms <- function(x) { if (!("/" %in% all.names(x))) { return(x) } if (x[[1]] != as.name("/")) { stop("unparseable formula for grouping factor", call. = FALSE) } list(slashTerms(x[[2]]), slashTerms(x[[3]])) } if (!is.list(bb)) { expandSlash(list(bb)) } else { unlist(lapply(bb, function(x) { if (length(x) > 2 && is.list(trms <- slashTerms(x[[3]]))) { lapply(unlist(makeInteraction(trms)), function(trm) { substitute(foo | bar, list(foo = x[[2]], bar = trm)) }) } else { x } })) } } modterm <- .expandDoubleVerts(if (methods::is(term, "formula")) { term[[length(term)]] } else { term }) expandSlash(fb(modterm)) } ## copied from lme4::nobars() ----------------------- .nobars <- function(term) { nb <- .nobars_(term) if (methods::is(term, "formula") && length(term) == 3 && is.symbol(nb)) { nb <- stats::reformulate("1", response = deparse(nb)) } if (is.null(nb)) { nb <- if (methods::is(term, "formula")) { ~1 } else { 1 } } nb } .nobars_ <- function(term) { if (!(any(c("|", "||") %in% all.names(term)))) { return(term) } if (.isBar(term)) { return(NULL) } if (.isAnyArgBar(term)) { return(NULL) } if (length(term) == 2) { nb <- .nobars_(term[[2]]) if (is.null(nb)) { return(NULL) } term[[2]] <- nb return(term) } nb2 <- .nobars_(term[[2]]) nb3 <- .nobars_(term[[3]]) if (is.null(nb2)) { return(nb3) } if (is.null(nb3)) { return(nb2) } term[[2]] <- nb2 term[[3]] <- nb3 term } .isBar <- function(term) { if (is.call(term)) { if ((term[[1]] == as.name("|")) || (term[[1]] == as.name("||"))) { return(TRUE) } } FALSE } .isAnyArgBar <- function(term) { if ((term[[1]] != as.name("~")) && (term[[1]] != as.name("("))) { for (i in seq_along(term)) { if (.isBar(term[[i]])) { return(TRUE) } } } FALSE } .n_unique <- function(x, na.rm = TRUE) { if (is.null(x)) { return(0) } if (isTRUE(na.rm)) x <- stats::na.omit(x) length(unique(x)) } # classify emmeans objects ------------- is.emmeans.contrast <- function(x) { if (inherits(x, "list")) { out <- vector("list", length = length(x)) for (i in seq_along(x)) { out[[i]] <- is.emmeans.contrast(x[[i]]) } return(unlist(out)) } res <- "con.coef" %in% names(x@misc) rep(res, nrow(x@linfct)) } is.emmeans.trend <- function(x) { if (inherits(x, "list")) { out <- vector("list", length = length(x)) for (i in seq_along(x)) { out[[i]] <- is.emmeans.trend(x[[i]]) } return(unlist(out)) } "trend" %in% names(x@roles) & !is.emmeans.contrast(x) } is.emmean <- function(x) { !is.emmeans.trend(x) & !is.emmeans.contrast(x) } .classify_emmeans <- function(x) { c_ <- is.emmeans.contrast(x) t_ <- is.emmeans.trend(x) ifelse(c_, "contrasts", ifelse(t_, "emtrends", "emmeans") ) } insight/R/export_table.R0000644000175000017500000006776014163104230015077 0ustar nileshnilesh#' Data frame and Tables Pretty Formatting #' #' @param x A data frame. May also be a list of data frames, to export multiple #' data frames into multiple tables. #' @param sep Column separator. #' @param header Header separator. Can be `NULL`. #' @param empty_line Separator used for empty lines. If `NULL`, line remains #' empty (i.e. filled with whitespaces). #' @param format Name of output-format, as string. If `NULL` (or `"text"`), #' returned output is used for basic printing. Can be one of `NULL` (the #' default) resp. `"text"` for plain text, `"markdown"` (or #' `"md"`) for markdown and `"html"` for HTML output. #' @param title,caption,subtitle Table title (same as caption) and subtitle, as strings. If `NULL`, #' no title or subtitle is printed, unless it is stored as attributes (`table_title`, #' or its alias `table_caption`, and `table_subtitle`). If `x` is a list of #' data frames, `caption` may be a list of table captions, one for each table. #' @param footer Table footer, as string. For markdown-formatted tables, table #' footers, due to the limitation in markdown rendering, are actually just a #' new text line under the table. If `x` is a list of data frames, `footer` #' may be a list of table captions, one for each table. #' @param align Column alignment. For markdown-formatted tables, the default #' `align = NULL` will right-align numeric columns, while all other #' columns will be left-aligned. If `format = "html"`, the default is #' left-align first column and center all remaining. May be a string to #' indicate alignment rules for the complete table, like `"left"`, #' `"right"`, `"center"` or `"firstleft"` (to left-align first #' column, center remaining); or maybe a string with abbreviated alignment #' characters, where the length of the string must equal the number of columns, #' for instance, `align = "lccrl"` would left-align the first column, center #' the second and third, right-align column four and left-align the fifth #' column. For HTML-tables, may be one of `"center"`, `"left"` or #' `"right"`. #' @param group_by Name of column in `x` that indicates grouping for tables. #' Only applies when `format = "html"`. `group_by` is passed down #' to `gt::gt(groupname_col = group_by)`. #' @param width Refers to the width of columns (with numeric values). Can be #' either `NULL`, a number or a named numeric vector. If `NULL`, the width for #' each column is adjusted to the minimum required width. If a number, columns #' with numeric values will have the minimum width specified in `width`. If #' a named numeric vector, value names are matched against column names, and #' for each match, the specified width is used (see 'Examples'). Only applies #' to text-format (see `format`). #' @param table_width Numeric, or `"auto"`, indicating the width of the complete #' table. If `table_width = "auto"` and the table is wider than the current #' width (i.e. line length) of the console (or any other source for textual #' output, like markdown files), the table is split into two parts. Else, #' if `table_width` is numeric and table rows are larger than `table_width`, #' the table is split into two parts. #' @inheritParams format_value #' @inheritParams get_data #' #' @note The values for `caption`, `subtitle` and `footer` #' can also be provided as attributes of `x`, e.g. if `caption = NULL` #' and `x` has attribute `table_caption`, the value for this #' attribute will be used as table caption. `table_subtitle` is the #' attribute for `subtitle`, and `table_footer` for `footer`. #' #' @inherit format_table seealso #' #' @return A data frame in character format. #' @examples #' export_table(head(iris)) #' export_table(head(iris), sep = " ", header = "*", digits = 1) #' #' # split longer tables #' export_table(head(iris), table_width = 30) #' #' \dontrun{ #' # colored footers #' data(iris) #' x <- as.data.frame(iris[1:5, ]) #' attr(x, "table_footer") <- c("This is a yellow footer line.", "yellow") #' export_table(x) #' #' attr(x, "table_footer") <- list( #' c("\nA yellow line", "yellow"), #' c("\nAnd a red line", "red"), #' c("\nAnd a blue line", "blue") #' ) #' export_table(x) #' #' attr(x, "table_footer") <- list( #' c("Without the ", "yellow"), #' c("new-line character ", "red"), #' c("we can have multiple colors per line.", "blue") #' ) #' export_table(x) #' } #' #' # column-width #' d <- data.frame( #' x = c(1, 2, 3), #' y = c(100, 200, 300), #' z = c(10000, 20000, 30000) #' ) #' export_table(d) #' export_table(d, width = 8) #' export_table(d, width = c(x = 5, z = 10)) #' export_table(d, width = c(x = 5, y = 5, z = 10), align = "lcr") #' @export export_table <- function(x, sep = " | ", header = "-", empty_line = NULL, digits = 2, protect_integers = TRUE, missing = "", width = NULL, format = NULL, title = NULL, caption = title, subtitle = NULL, footer = NULL, align = NULL, group_by = NULL, zap_small = FALSE, table_width = NULL, verbose = TRUE) { # check args if (is.null(format)) { format <- "text" } if (format == "md") { format <- "markdown" } # sanity check if (is.null(x) || (is.data.frame(x) && nrow(x) == 0) || .is_empty_object(x)) { if (isTRUE(verbose)) { message(paste0("Can't export table to ", format, ", data frame is empty.")) } return(NULL) } # if we have a list of data frame and HTML format, create a single # data frame now... if (identical(format, "html") && !is.data.frame(x) && is.list(x)) { x <- do.call(rbind, lapply(x, function(i) { attr_name <- .check_caption_attr_name(i) i$Component <- attr(i, attr_name)[1] i })) } # check for indention indent_groups <- attributes(x)$indent_groups indent_rows <- attributes(x)$indent_rows # single data frame if (is.data.frame(x)) { # check default attributes for caption, sub-title and footer if (!is.null(title)) { caption <- title } if (is.null(caption)) { attr_name <- .check_caption_attr_name(x) caption <- attributes(x)[[attr_name]] } if (is.null(subtitle)) { subtitle <- attributes(x)$table_subtitle } if (is.null(footer)) { footer <- attributes(x)$table_footer } # convert data frame into specified output format out <- .export_table( x = x, sep = sep, header = header, digits = digits, protect_integers = protect_integers, missing = missing, width = width, format = format, caption = caption, subtitle = subtitle, footer = footer, align = align, group_by = group_by, zap_small = zap_small, empty_line = empty_line, indent_groups = indent_groups, indent_rows = indent_rows, table_width = table_width ) } else if (is.list(x)) { # remove empty elements l <- .compact_list(x) # list of data frames tmp <- lapply(1:length(l), function(element) { i <- l[[element]] # use individual footer for each list element... t_footer <- attributes(i)$table_footer # ...unless we have a footer-argument. # Then use this as last (final) footer if (element == length(l) && is.null(attributes(i)$table_footer) && !is.null(footer) && !is.list(footer)) { t_footer <- footer } # if we still have no footer, check if user provided a list of titles if (is.null(t_footer) && !is.null(footer) && is.list(footer) && length(footer) == length(l)) { t_footer <- footer[[element]] } # for lists of data frame, each list element may have # an own attribute for the title, to have "subheadings" # for each table attr_name <- .check_caption_attr_name(i) # if only alias "title" is provided, copy it to caption-variable if (!is.null(title) && is.null(caption)) { caption <- title } # use individual title for each list element... t_title <- attributes(i)[[attr_name]] # ...unless we have a title-argument. # Then use this as first (main) header if (element == 1 && is.null(attributes(i)[[attr_name]]) && !is.null(caption) && !is.list(caption)) { t_title <- caption } # if we still have no title, check if user provided a list of titles if (is.null(t_title) && !is.null(caption) && is.list(caption) && length(caption) == length(l)) { t_title <- caption[[element]] } # convert data frame into specified output format .export_table( x = i, sep = sep, header = header, digits = digits, protect_integers = protect_integers, missing = missing, width = width, format = format, caption = t_title, subtitle = attributes(i)$table_subtitle, footer = t_footer, align = align, group_by = group_by, zap_small = zap_small, empty_line = empty_line, indent_groups = indent_groups, indent_rows = indent_rows, table_width = table_width ) }) # insert new lines between tables out <- c() if (format == "text") { for (i in 1:length(tmp)) { out <- paste0(out, tmp[[i]], "\n") } out <- substr(out, 1, nchar(out) - 1) } else if (format == "markdown") { for (i in 1:length(tmp)) { out <- c(out, tmp[[i]], "") } out <- out[1:(length(out) - 1)] } } else { return(NULL) } # add specific knitr-attribute for proper printing inside rmarkdown if (format == "markdown") { attr(out, "format") <- "pipe" class(out) <- c("knitr_kable", "character") } else if (format == "text") { class(out) <- c("insight_table", class(out)) } out } # check whether "table_caption" or its alias "table_title" is used as attribute .check_caption_attr_name <- function(x) { attr_name <- "table_caption" if (is.null(attr(x, attr_name, exact = TRUE)) && !is.null(attr(x, "table_title", exact = TRUE))) { attr_name <- "table_title" } attr_name } # create matrix of raw table layout -------------------- .export_table <- function(x, sep = " | ", header = "-", digits = 2, protect_integers = TRUE, missing = "", width = NULL, format = NULL, caption = NULL, subtitle = NULL, footer = NULL, align = NULL, group_by = NULL, zap_small = FALSE, empty_line = NULL, indent_groups = NULL, indent_rows = NULL, table_width = NULL) { df <- as.data.frame(x) # check width argument, for format value. cannot have # named vector of length > 1 here if (is.null(width) || length(width) == 1) { col_width <- width } else { col_width <- NULL } # round all numerics col_names <- names(df) df <- as.data.frame(sapply(df, function(i) { if (is.numeric(i)) { format_value(i, digits = digits, protect_integers = protect_integers, missing = missing, width = col_width, zap_small = zap_small ) } else { i } }, simplify = FALSE), stringsAsFactors = FALSE) # Convert to character df <- as.data.frame(sapply(df, as.character, simplify = FALSE), stringsAsFactors = FALSE) names(df) <- col_names df[is.na(df)] <- as.character(missing) if (identical(format, "html")) { # html formatting starts here, needs less preparation of table matrix out <- .format_html_table( df, caption = caption, subtitle = subtitle, footer = footer, align = align, group_by = group_by, indent_groups = indent_groups, indent_rows = indent_rows ) # text and markdown go here... } else { # Add colnames as row df <- rbind(colnames(df), df) # Align aligned <- format(df, justify = "right") # default alignment col_align <- rep("right", ncol(df)) # Center first row first_row <- as.character(aligned[1, ]) for (i in 1:length(first_row)) { aligned[1, i] <- format(trimws(first_row[i]), width = nchar(first_row[i]), justify = "right") } final <- as.matrix(aligned) # left-align first column (if a character or a factor) if (!is.numeric(x[, 1])) { final[, 1] <- format(trimws(final[, 1]), justify = "left") col_align[1] <- "left" } if (format == "text") { # go for simple text output out <- .format_basic_table( final, header, sep, caption = caption, subtitle = subtitle, footer = footer, align = align, empty_line = empty_line, indent_groups = indent_groups, indent_rows = indent_rows, col_names = col_names, col_width = width, col_align = col_align, table_width = table_width ) } else if (format == "markdown") { # markdown is a bit different... out <- .format_markdown_table( final, x, caption = caption, subtitle = subtitle, footer = footer, align = align, indent_groups = indent_groups, indent_rows = indent_rows ) } } out } # plain text formatting ------------------------ .format_basic_table <- function(final, header, sep, caption = NULL, subtitle = NULL, footer = NULL, align = NULL, empty_line = NULL, indent_groups = NULL, indent_rows = NULL, col_names = NULL, col_width = NULL, col_align = NULL, table_width = NULL) { # align table, if requested if (!is.null(align) && length(align) == 1) { for (i in 1:ncol(final)) { align_char <- "" if (align %in% c("left", "right", "center", "firstleft")) { align_char <- "" } else { align_char <- substr(align, i, i) } # left alignment, or at least first line only left? if (align == "left" || (align == "firstleft" && i == 1) || align_char == "l") { final[, i] <- format(trimws(final[, i]), justify = "left") col_align[i] <- "left" # right-alignment } else if (align == "right" || align_char == "r") { final[, i] <- format(trimws(final[, i]), justify = "right") col_align[i] <- "right" # else, center } else { final[, i] <- format(trimws(final[, i]), justify = "centre") col_align[i] <- "centre" } } } # indent groups? if (!is.null(indent_groups) && any(grepl(indent_groups, final[, 1], fixed = TRUE))) { final <- .indent_groups(final, indent_groups) } else if (!is.null(indent_rows) && any(grepl("# ", final[, 1], fixed = TRUE))) { final <- .indent_rows(final, indent_rows) } # check for fixed column widths if (!is.null(col_width) && length(col_width) > 1 && !is.null(names(col_width))) { matching_columns <- stats::na.omit(match(names(col_width), col_names)) if (length(matching_columns)) { for (i in matching_columns) { w <- as.vector(col_width[col_names[i]]) final[, i] <- format(trimws(final[, i]), width = w, justify = col_align[i]) } } } # we can split very wide table into maximum three parts # this is currently hardcoded, not flexible, so we cannot allow # more than three parts of one wide table final2 <- NULL final3 <- NULL # save first column we may need this when table is wrapped into multiple # parts due to over-lengthy lines if (identical(table_width, "auto") || (!is.null(table_width) && is.numeric(table_width))) { # check current line width in console and width of table rows if (is.numeric(table_width)) { line_width <- table_width } else { line_width <- options()$width } # first split - table columns longer than "line_width" go # into a second string row_width <- nchar(paste0(final[1, ], collapse = sep)) # if wider, save first column - we need to repeat this later if (row_width > line_width) { i <- 1 while (nchar(paste0(final[1, 1:i], collapse = sep)) < line_width) { i <- i + 1 } if (i > 2 && i < ncol(final)) { final2 <- final[, c(1, i:ncol(final))] final <- final[, 1:(i - 1)] } } # second split - table columns longer than "line_width" go # into a third string row_width <- nchar(paste0(final2[1, ], collapse = sep)) # if wider, save first column - we need to repeat this later if (row_width > line_width) { i <- 1 while (nchar(paste0(final2[1, 1:i], collapse = sep)) < line_width) { i <- i + 1 } if (i > 2 && i < ncol(final2)) { final3 <- final2[, c(1, i:ncol(final2))] final2 <- final2[, 1:(i - 1)] } } } # Transform to character rows <- .table_parts(c(), final, header, sep, empty_line) # if we have over-lengthy tables that are split into two parts, # print second table here if (!is.null(final2)) { rows <- .table_parts(paste0(rows, "\n"), final2, header, sep, empty_line) } # if we have over-lengthy tables that are split into two parts, # print second table here if (!is.null(final3)) { rows <- .table_parts(paste0(rows, "\n"), final3, header, sep, empty_line) } # if caption is available, add a row with a headline if (!is.null(caption) && caption[1] != "") { if (length(caption) == 2 && .is_valid_colour(caption[2])) { caption <- .colour(caption[2], caption[1]) } if (!is.null(subtitle)) { if (length(subtitle) == 2 && .is_valid_colour(subtitle[2])) { subtitle <- .colour(subtitle[2], subtitle[1]) } } else { subtitle <- "" } # paste everything together and remove unnecessary double spaces title_line <- .trim(paste0(caption[1], " ", subtitle[1])) title_line <- gsub(" ", " ", title_line, fixed = TRUE) rows <- paste0(title_line, "\n\n", rows) } # if footer is available, add a row with a footer. footers may # also be provided as list of character vectors, so each footer # line can get its own color if (!is.null(footer)) { if (is.list(footer)) { for (i in footer) { rows <- .paste_footers(i, rows) } } else { rows <- .paste_footers(footer, rows) } } rows } .table_parts <- function(rows, final, header, sep, empty_line) { for (row in 1:nrow(final)) { final_row <- paste0(final[row, ], collapse = sep) # check if we have an empty row if (!is.null(empty_line) && all(nchar(trimws(final[row, ])) == 0)) { rows <- paste0(rows, paste0(rep_len(empty_line, nchar(final_row)), collapse = ""), sep = "\n") } else { rows <- paste0(rows, final_row, sep = "\n") } # First row separation if (row == 1) { if (!is.null(header)) { rows <- paste0(rows, paste0(rep_len(header, nchar(final_row)), collapse = ""), sep = "\n") } } } rows } #' @export print.insight_table <- function(x, ...) { cat(x) invisible(x) } # helper ---------------- .paste_footers <- function(footer, rows) { if (.is_empty_string(footer)) { return(rows) } if (length(footer) == 2 && .is_valid_colour(footer[2])) { footer <- .colour(footer[2], footer[1]) } paste0(rows, footer[1]) } .indent_groups <- function(final, indent_groups) { # check length of indent string whitespace <- sprintf("%*s", nchar(indent_groups), " ") # find start index of groups grps <- grep(indent_groups, final[, 1], fixed = TRUE) # create index for those rows that should be indented grp_rows <- seq(grps[1], nrow(final)) grp_rows <- grp_rows[!grp_rows %in% grps] # indent final[grp_rows, 1] <- paste0(whitespace, final[grp_rows, 1]) # remove indent token final[, 1] <- gsub(indent_groups, "", final[, 1], fixed = TRUE) # trim whitespace at end final[, 1] <- trimws(final[, 1], which = "right") # move group name (indent header) to left final[, 1] <- format(final[, 1], justify = "left", width = max(nchar(final[, 1]))) final } .indent_rows <- function(final, indent_rows, whitespace = " ") { # create index for those rows that should be indented grp_rows <- indent_rows + 1 # indent final[grp_rows, 1] <- paste0(whitespace, final[grp_rows, 1]) # find rows that should not be indented non_grp_rows <- 1:nrow(final) non_grp_rows <- non_grp_rows[!non_grp_rows %in% grp_rows] # paste whitespace at end, to ensure equal width for each string final[non_grp_rows, 1] <- paste0(final[non_grp_rows, 1], whitespace) # remove indent token grps <- grep("# ", final[, 1], fixed = TRUE) final[, 1] <- gsub("# ", "", final[, 1], fixed = TRUE) # move group name (indent header) to left final[grps, 1] <- format(final[grps, 1], justify = "left", width = max(nchar(final[, 1]))) final } .indent_rows_html <- function(final, indent_rows, whitespace = "") { # create index for those rows that should be indented grp_rows <- indent_rows + 1 # indent final[grp_rows, 1] <- paste0(whitespace, final[grp_rows, 1]) # find rows that should not be indented non_grp_rows <- 1:nrow(final) non_grp_rows <- non_grp_rows[!non_grp_rows %in% grp_rows] # remove indent token final[, 1] <- gsub("# ", "", final[, 1]) final } # markdown formatting ------------------- .format_markdown_table <- function(final, x, caption = NULL, subtitle = NULL, footer = NULL, align = NULL, indent_groups = NULL, indent_rows = NULL) { column_width <- nchar(final[1, ]) n_columns <- ncol(final) first_row_leftalign <- (!is.null(align) && align == "firstleft") ## create header line for markdown table ----- header <- "|" # indention? than adjust column width for first column if (!is.null(indent_rows) || !is.null(indent_groups)) { column_width[1] <- column_width[1] + 2 } # go through all columns of the data frame for (i in 1:n_columns) { # create separator line for current column line <- paste0(rep_len("-", column_width[i]), collapse = "") # check if user-defined alignment is requested, and if so, extract # alignment direction and save to "align_char" align_char <- "" if (!is.null(align)) { if (align %in% c("left", "right", "center", "firstleft")) { align_char <- "" } else { align_char <- substr(align, i, i) } } # auto-alignment? if (is.null(align)) { # if so, check if string in column starts with # whitespace (indicating right-alignment) or not. if (grepl("^\\s", final[2, i])) { line <- paste0(line, ":") final[, i] <- format(final[, i], width = column_width[i] + 1, justify = "right") } else { line <- paste0(":", line) final[, i] <- format(final[, i], width = column_width[i] + 1, justify = "left") } # left alignment, or at least first line only left? } else if (align == "left" || (first_row_leftalign && i == 1) || align_char == "l") { line <- paste0(":", line) final[, i] <- format(final[, i], width = column_width[i] + 1, justify = "left") # right-alignment } else if (align == "right" || align_char == "r") { line <- paste0(line, ":") final[, i] <- format(final[, i], width = column_width[i] + 1, justify = "right") # else, center } else { line <- paste0(":", line, ":") final[, i] <- format(final[, i], width = column_width[i] + 2, justify = "centre") } # finally, we have our header-line that indicates column alignments header <- paste0(header, line, "|") } # indent groups? if (!is.null(indent_groups) && any(grepl(indent_groups, final[, 1], fixed = TRUE))) { final <- .indent_groups(final, indent_groups) } else if (!is.null(indent_rows) && any(grepl("# ", final[, 1], fixed = TRUE))) { final <- .indent_rows(final, indent_rows) } # Transform to character rows <- c() for (row in 1:nrow(final)) { final_row <- paste0("|", paste0(final[row, ], collapse = "|"), "|", collapse = "") rows <- c(rows, final_row) # First row separation if (row == 1) { rows <- c(rows, header) } } if (!is.null(caption)) { if (!is.null(subtitle)) { caption[1] <- paste0(caption[1], " ", subtitle[1]) } rows <- c(paste0("Table: ", .trim(caption[1])), "", rows) } if (!is.null(footer)) { if (is.list(footer)) { for (i in footer) { if (!.is_empty_string(i)) { rows <- c(rows, i[1]) } } } else if (!.is_empty_string(footer)) { rows <- c(rows, footer[1]) } } rows } # html formatting --------------------------- .format_html_table <- function(final, caption = NULL, subtitle = NULL, footer = NULL, align = "center", group_by = NULL, indent_groups = NULL, indent_rows = NULL) { # installed? check_if_installed("gt") if (is.null(align)) { align <- "firstleft" } group_by_columns <- c(intersect(c("Group", "Response", "Effects", "Component"), names(final)), group_by) if (!length(group_by_columns)) { group_by_columns <- NULL } else { # remove columns with only 1 unique value - this *should* be safe to # remove, but we may check if all printed sub titles look like intended for (i in group_by_columns) { if (.n_unique(final[[i]]) <= 1) { final[[i]] <- NULL } } } # indent groups? if (!is.null(indent_rows) && any(grepl("# ", final[, 1], fixed = TRUE))) { final <- .indent_rows_html(final, indent_rows) } tab <- gt::gt(final, groupname_col = group_by_columns) header <- gt::tab_header(tab, title = caption, subtitle = subtitle) footer <- gt::tab_source_note(header, source_note = footer) out <- gt::cols_align(footer, align = "center") # align columns if (!is.null(out[["_boxhead"]]) && !is.null(out[["_boxhead"]]$column_align)) { if (align == "firstleft") { out[["_boxhead"]]$column_align[1] <- "left" } else { col_align <- c() for (i in 1:nchar(align)) { col_align <- c(col_align, switch(substr(align, i, i), "l" = "left", "r" = "right", "center" )) } out[["_boxhead"]]$column_align <- col_align } } out } insight/R/is_multivariate.R0000644000175000017500000000244314077615665015622 0ustar nileshnilesh#' @title Checks if an object stems from a multivariate response model #' @name is_multivariate #' #' @description Small helper that checks if a model is a multivariate response #' model, i.e. a model with multiple outcomes. #' #' @param x A model object, or an object returned by a function from this package. #' #' @return A logical, `TRUE` if either `x` is a model object and is #' a multivariate response model, or `TRUE` if a return value from a #' function of \pkg{insight} is from a multivariate response model. #' #' @examples #' \dontrun{ #' library(rstanarm) #' data("pbcLong") #' model <- stan_mvmer( #' formula = list( #' logBili ~ year + (1 | id), #' albumin ~ sex + year + (year | id) #' ), #' data = pbcLong, #' chains = 1, cores = 1, seed = 12345, iter = 1000 #' ) #' #' f <- find_formula(model) #' is_multivariate(model) #' is_multivariate(f) #' } #' @export is_multivariate <- function(x) { if (inherits(x, "gam", which = TRUE) == 1) { f <- .gam_family(x) gam_mv <- !is.null(f) && f$family == "Multivariate normal" } else { gam_mv <- FALSE } (inherits(x, "brmsfit") && !is.null(stats::formula(x)$response)) | inherits(x, c("stanmvreg", "mlm", "mvord")) | gam_mv | !is.null(attr(x, "is_mv", exact = TRUE)) } insight/R/get_predicted_methods.R0000644000175000017500000000431714144234777016744 0ustar nileshnilesh# Printing ----------------------------------------------------------------- #' @export print.get_predicted <- function(x, ...) { print_colour("Predicted values:\n\n", "blue") # vectors have NULL columns; 1-dimensional arrays have NA columns (e.g., mgcv::gam predict() output) if (is.null(ncol(x)) || is.na(ncol(x))) { print.default(as.vector(x)) } else { print.data.frame(x) } print_colour("\nNOTE: Confidence intervals, if available, are stored as attributes and can be accessed using `as.data.frame()` on this output.\n", "yellow") } .print_bigdata <- function(x, nrows = 3, ncols = 3, ...) { out <- x[1:nrows, 1:ncols] # Add row row <- out[1, ] row[1, ] <- "..." out <- rbind(out, row) # Add col out[[paste0("...x", ncol(x) - ncols)]] <- "..." row.names(out)[nrows + 1] <- paste0("...x", nrow(x) - nrows) class(out) <- "data.frame" out } # As data frame ----------------------------------------------------------- #' @export as.data.frame.get_predicted <- function(x, ..., keep_iterations = TRUE) { # a regular data.frame (e.g., from PCA/FA) if (inherits(x, "data.frame") && !"iterations" %in% names(attributes(x)) && !"Response" %in% colnames(x)) { out <- as.data.frame.data.frame(x) # grouped response level (e.g., polr or multinom) } else if (inherits(x, "data.frame") && "Response" %in% colnames(x)) { out <- as.data.frame.data.frame(x) if ("ci_data" %in% names(attributes(x))) { out <- merge(out, attributes(x)$ci_data, by = c("Row", "Response"), sort = FALSE) } } else { # Then it must be predictions from a regression model out <- data.frame("Predicted" = as.vector(x)) if ("ci_data" %in% names(attributes(x))) { out <- cbind(out, attributes(x)$ci_data) } if ("iterations" %in% names(attributes(x)) && keep_iterations == TRUE) { out <- cbind(out, attributes(x)$iterations) } } out } #' @export summary.get_predicted <- function(object, ...) { as.data.frame(object, keep_iterations = FALSE, ...) } #' @export as.matrix.get_predicted <- function(x, ...) { class(x) <- class(x)[class(x) != "get_predicted"] as.matrix(x) } insight/R/model_name.R0000644000175000017500000000254414077615665014523 0ustar nileshnilesh#' @title Name the model #' @name model_name #' #' @description Returns the "name" (class attribute) of a model, possibly including further information. #' #' @inheritParams get_residuals #' @param include_formula Should the name include the model's formula. #' @param include_call If `TRUE`, will return the function call as a name. #' @param ... Currently not used. #' #' @return A character string of a name (which usually equals the model's class attribute). #' #' @examples #' m <- lm(Sepal.Length ~ Petal.Width, data = iris) #' model_name(m) #' model_name(m, include_formula = TRUE) #' model_name(m, include_call = TRUE) #' #' if (require("lme4")) { #' model_name(lmer(Sepal.Length ~ Sepal.Width + (1 | Species), data = iris)) #' } #' @export model_name <- function(x, ...) { UseMethod("model_name") } #' @rdname model_name #' @export model_name.default <- function(x, include_formula = FALSE, include_call = FALSE, ...) { if (include_call) { return(format(get_call(x))) } name <- class(x)[[1]] if (include_formula) { f <- format(find_formula(x, verbose = FALSE)) name <- paste0(name, "(", f, ")") } name } #' @export model_name.list <- function(x, include_formula = FALSE, include_call = FALSE, ...) { sapply(x, model_name, include_formula = include_formula, include_call = include_call, ...) } insight/R/model_info.R0000644000175000017500000006452614141732514014527 0ustar nileshnilesh#' @title Access information from model objects #' @name model_info #' #' @description Retrieve information from model objects. #' #' @param verbose Toggle off warnings. #' @inheritParams find_predictors #' @inheritParams link_inverse #' @inheritParams find_formula #' #' @return A list with information about the model, like family, link-function #' etc. (see 'Details'). #' #' @details `model_info()` returns a list with information about the #' model for many different model objects. Following information #' is returned, where all values starting with `is_` are logicals. #' * `is_binomial`: family is binomial (but not negative binomial) #' * `is_bernoulli`: special case of binomial models: family is Bernoulli #' * `is_poisson`: family is poisson #' * `is_negbin`: family is negative binomial #' * `is_count`: model is a count model (i.e. family is either poisson or negative binomial) #' * `is_beta`: family is beta #' * `is_betabinomial`: family is beta-binomial #' * `is_dirichlet`: family is dirichlet #' * `is_exponential`: family is exponential (e.g. Gamma or Weibull) #' * `is_logit`: model has logit link #' * `is_probit`: model has probit link #' * `is_linear`: family is gaussian #' * `is_tweedie`: family is tweedie #' * `is_ordinal`: family is ordinal or cumulative link #' * `is_cumulative`: family is ordinal or cumulative link #' * `is_multinomial`: family is multinomial or categorical link #' * `is_categorical`: family is categorical link #' * `is_censored`: model is a censored model (has a censored response, including survival models) #' * `is_truncated`: model is a truncated model (has a truncated response) #' * `is_survival`: model is a survival model #' * `is_zero_inflated`: model has zero-inflation component #' * `is_hurdle`: model has zero-inflation component and is a hurdle-model (truncated family distribution) #' * `is_dispersion`: model has dispersion component #' * `is_mixed`: model is a mixed effects model (with random effects) #' * `is_multivariate`: model is a multivariate response model (currently only works for _brmsfit_ objects) #' * `is_trial`: model response contains additional information about the trials #' * `is_bayesian`: model is a Bayesian model #' * `is_gam`: model is a generalized additive model #' * `is_anova`: model is an Anova object #' * `is_ttest`: model is an an object of class `htest`, returned by `t.test()` #' * `is_correlation`: model is an an object of class `htest`, returned by `cor.test()` #' * `is_ranktest`: model is an an object of class `htest`, returned by `cor.test()` (if Spearman's rank correlation), `wilcox.text()` or `kruskal.test()`. #' * `is_levenetest`: model is an an object of class `anova`, returned by `car::leveneTest()`. #' * `is_onewaytest`: model is an an object of class `htest`, returned by `oneway.test()` #' * `is_proptest`: model is an an object of class `htest`, returned by `prop.test()` #' * `is_binomtest`: model is an an object of class `htest`, returned by `binom.test()` #' * `is_chi2test`: model is an an object of class `htest`, returned by `chisq.test()` #' * `is_xtab`: model is an an object of class `htest` or `BFBayesFactor`, and test-statistic stems from a contingency table (i.e. `chisq.test()` or `BayesFactor::contingencyTableBF()`). #' * `link_function`: the link-function #' * `family`: the family-object #' * `n_obs`: number of observations #' * `model_terms`: a list with all model terms, including terms such as random effects or from zero-inflated model parts. #' #' @examples #' ldose <- rep(0:5, 2) #' numdead <- c(1, 4, 9, 13, 18, 20, 0, 2, 6, 10, 12, 16) #' sex <- factor(rep(c("M", "F"), c(6, 6))) #' SF <- cbind(numdead, numalive = 20 - numdead) #' dat <- data.frame(ldose, sex, SF, stringsAsFactors = FALSE) #' m <- glm(SF ~ sex * ldose, family = binomial) #' #' model_info(m) #' \dontrun{ #' library(glmmTMB) #' data("Salamanders") #' m <- glmmTMB( #' count ~ spp + cover + mined + (1 | site), #' ziformula = ~ spp + mined, #' dispformula = ~DOY, #' data = Salamanders, #' family = nbinom2 #' ) #' } #' #' model_info(m) #' @export model_info <- function(x, ...) { UseMethod("model_info") } # Default methods -------------------------------------- #' @export model_info.data.frame <- function(x, ...) { stop("A data frame is no valid object for this function.", call. = FALSE) } #' @rdname model_info #' @export model_info.default <- function(x, verbose = TRUE, ...) { if (inherits(x, "list") && .obj_has_name(x, "gam")) { x <- x$gam class(x) <- c(class(x), c("glm", "lm")) } faminfo <- tryCatch( { if (inherits(x, c("Zelig-relogit"))) { stats::binomial(link = "logit") } else { stats::family(x) } }, error = function(x) { NULL } ) if (!is.null(faminfo)) { .make_family( x = x, fitfam = faminfo$family, logit.link = faminfo$link == "logit", link.fun = faminfo$link, verbose = verbose, ... ) } else { if (isTRUE(verbose)) { warning("Could not access model information.", call. = FALSE) } NULL } } #' @export model_info.model_fit <- function(x, verbose = TRUE, ...) { model_info(x$fit, verbose = verbose, ...) } # Models with general handling, Gaussian ---------------------------------- #' @export model_info.anova <- function(x, verbose = TRUE, ...) { if (!is.null(attributes(x)$heading) && grepl("Levene's Test", attributes(x)$heading, fixed = TRUE)) { .make_family(x, verbose = verbose) } else { NULL } } #' @export model_info.mmclogit <- function(x, verbose = TRUE, ...) { .make_family(x, verbose = verbose, ...) } #' @export model_info.maxLik <- model_info.mmclogit #' @export model_info.mjoint <- model_info.mmclogit #' @export model_info.censReg <- model_info.mmclogit #' @export model_info.htest <- model_info.mmclogit #' @export model_info.BFBayesFactor <- model_info.mmclogit #' @export model_info.lme <- model_info.mmclogit #' @export model_info.bayesx <- model_info.mmclogit #' @export model_info.rq <- model_info.mmclogit #' @export model_info.crq <- model_info.mmclogit #' @export model_info.crqs <- model_info.mmclogit #' @export model_info.nlrq <- model_info.mmclogit #' @export model_info.rqss <- model_info.mmclogit #' @export model_info.mixed <- model_info.mmclogit #' @export model_info.plm <- model_info.mmclogit #' @export model_info.mcmc <- model_info.mmclogit #' @export model_info.bayesQR <- model_info.mmclogit #' @export model_info.gls <- model_info.mmclogit #' @export model_info.nls <- model_info.mmclogit #' @export model_info.MANOVA <- model_info.mmclogit #' @export model_info.RM <- model_info.mmclogit #' @export model_info.truncreg <- model_info.mmclogit #' @export model_info.lmRob <- model_info.mmclogit #' @export model_info.speedlm <- model_info.mmclogit #' @export model_info.lmrob <- model_info.mmclogit #' @export model_info.complmrob <- model_info.mmclogit #' @export model_info.lm_robust <- model_info.mmclogit #' @export model_info.iv_robust <- model_info.mmclogit #' @export model_info.systemfit <- model_info.mmclogit #' @export model_info.lqmm <- model_info.mmclogit #' @export model_info.lqm <- model_info.mmclogit #' @export model_info.felm <- model_info.mmclogit #' @export model_info.feis <- model_info.mmclogit #' @export model_info.ivreg <- model_info.mmclogit #' @export model_info.ivFixed <- model_info.mmclogit #' @export model_info.aovlist <- model_info.mmclogit #' @export model_info.rma <- model_info.mmclogit #' @export model_info.meta_random <- model_info.mmclogit #' @export model_info.meta_bma <- model_info.mmclogit #' @export model_info.meta_fixed <- model_info.mmclogit #' @export model_info.metaplus <- model_info.mmclogit #' @export model_info.mclogit <- model_info.mmclogit #' @export model_info.mlm <- function(x, ...) { .make_family(x, multi.var = TRUE, ...) } #' @export model_info.afex_aov <- function(x, verbose = TRUE, ...) { if (!is.null(x$aov)) { .make_family(x$aov, verbose = verbose, ...) } else { .make_family(x$lm, verbose = verbose, ...) } } # Models with logit-link -------------------------------- #' @export model_info.logistf <- function(x, verbose = TRUE, ...) { faminfo <- stats::binomial(link = "logit") .make_family( x = x, fitfam = faminfo$family, logit.link = faminfo$link == "logit", link.fun = faminfo$link, verbose = verbose, ... ) } #' @export model_info.lrm <- model_info.logistf #' @export model_info.blrm <- model_info.logistf #' @export model_info.multinom <- model_info.logistf #' @export model_info.mlogit <- model_info.logistf #' @export model_info.gmnl <- model_info.logistf # Models with ordinal family ------------------------------------ #' @export model_info.clm <- function(x, verbose = TRUE, ...) { faminfo <- stats::binomial(link = .get_ordinal_link(x)) .make_family( x = x, fitfam = faminfo$family, logit.link = faminfo$link == "logit", link.fun = faminfo$link, verbose = verbose, ... ) } #' @export model_info.clm2 <- model_info.clm #' @export model_info.clmm <- model_info.clm #' @export model_info.mixor <- model_info.clm #' @export model_info.mvord <- function(x, verbose = verbose, ...) { link_name <- x$rho$link$name faminfo <- stats::binomial(link = ifelse(link_name == "mvprobit", "probit", "logit")) .make_family( x = x, fitfam = faminfo$family, logit.link = faminfo$link == "logit", link.fun = faminfo$link, multi.var = TRUE, verbose = verbose, ... ) } # Models with family-function ---------------------------------- #' @export model_info.bamlss <- function(x, verbose = TRUE, ...) { faminfo <- stats::family(x) .make_family( x = x, fitfam = faminfo$family, logit.link = faminfo$links[1] == "logit", link.fun = faminfo$links[1], verbose = verbose, ... ) } #' @export model_info.speedglm <- function(x, verbose = TRUE, ...) { faminfo <- stats::family(x) .make_family( x = x, fitfam = faminfo$family, logit.link = faminfo$link == "logit", link.fun = faminfo$link, verbose = verbose, ... ) } #' @export model_info.brmultinom <- model_info.speedglm # Models with tobit family ---------------------------------- #' @export model_info.flexsurvreg <- function(x, verbose = TRUE, ...) { dist <- parse(text = .safe_deparse(x$call))[[1]]$dist faminfo <- .make_tobit_family(x, dist) .make_family( x = x, fitfam = faminfo$family, logit.link = faminfo$link == "logit", link.fun = faminfo$link, verbose = verbose, ... ) } #' @export model_info.tobit <- function(x, verbose = TRUE, ...) { faminfo <- .make_tobit_family(x) .make_family( x = x, fitfam = faminfo$family, logit.link = faminfo$link == "logit", link.fun = faminfo$link, verbose = verbose, ... ) } #' @export model_info.crch <- model_info.tobit #' @export model_info.survreg <- model_info.tobit # Models with family in object ---------------------------------- #' @export model_info.MixMod <- function(x, verbose = TRUE, ...) { faminfo <- x$family .make_family( x = x, fitfam = faminfo$family, logit.link = faminfo$link == "logit", link.fun = faminfo$link, verbose = verbose, ... ) } #' @export model_info.glmmPQL <- model_info.MixMod #' @export model_info.bife <- model_info.MixMod #' @export model_info.glmx <- function(x, verbose = TRUE, ...) { faminfo <- x$family$glm .make_family( x = x, fitfam = faminfo$family, logit.link = faminfo$link == "logit", link.fun = faminfo$link, verbose = verbose, ... ) } #' @export model_info.fixest <- function(x, verbose = TRUE, ...) { faminfo <- x$family if (is.null(faminfo)) { if (!is.null(x$method) && x$method == "feols") { .make_family(x, ...) } } else if (inherits(faminfo, "family")) { .make_family( x = x, fitfam = faminfo$family, logit.link = faminfo$link == "logit", link.fun = faminfo$link, verbose = verbose, ... ) } else { fitfam <- switch(faminfo, "negbin" = "negative binomial", "logit" = "binomial", faminfo ) link <- switch(faminfo, "poisson" = , "negbin" = "log", "logit" = "logit", "gaussian" = "identity" ) .make_family( x = x, fitfam = fitfam, logit.link = link == "logit", link.fun = link, verbose = verbose, ... ) } } #' @export model_info.feglm <- model_info.fixest # Survival-models ---------------------------------------- #' @export model_info.coxph <- function(x, verbose = TRUE, ...) { .make_family( x = x, fitfam = "survival", logit.link = TRUE, link.fun = NULL, verbose = verbose, ... ) } #' @export model_info.coxr <- model_info.coxph #' @export model_info.aareg <- model_info.coxph #' @export model_info.survfit <- model_info.coxph #' @export model_info.coxme <- model_info.coxph #' @export model_info.riskRegression <- model_info.coxph #' @export model_info.comprisk <- model_info.coxph # Zero-Inflated Models ------------------------------ #' @export model_info.zeroinfl <- function(x, ...) { if (is.list(x$dist)) { dist <- x$dist[[1]] } else { dist <- x$dist } fitfam <- switch(dist, poisson = "poisson", negbin = "negative binomial", "poisson" ) .make_family( x = x, fitfam = fitfam, zero.inf = TRUE, link.fun = "log", ... ) } #' @export model_info.zerotrunc <- model_info.zeroinfl #' @export model_info.hurdle <- function(x, ...) { if (is.list(x$dist)) { dist <- x$dist[[1]] } else { dist <- x$dist } fitfam <- switch(dist, poisson = "poisson", negbin = "negative binomial", "poisson" ) .make_family( x = x, fitfam = fitfam, zero.inf = TRUE, hurdle = TRUE, link.fun = "log", ... ) } #' @export model_info.mhurdle <- function(x, ...) { .make_family( x = x, zero.inf = TRUE, hurdle = TRUE, ... ) } # Bayesian Models --------------------------- #' @export model_info.stanreg <- function(x, ...) { if (inherits(x, "polr")) { model_info.polr(x) } else { model_info.default(x) } } #' @export model_info.brmsfit <- function(x, ...) { faminfo <- stats::family(x) if (is_multivariate(x)) { lapply(faminfo, function(.x) { .make_family( x = x, fitfam = .x$family, zero.inf = FALSE, logit.link = .x$link == "logit", multi.var = TRUE, link.fun = .x$link, dispersion = !.is_empty_object(insight::find_formula(x)$sigma), ... ) }) } else { .make_family( x = x, fitfam = faminfo$family, logit.link = faminfo$link == "logit", multi.var = FALSE, link.fun = faminfo$link, dispersion = !.is_empty_object(insight::find_formula(x)$sigma), ... ) } } #' @export model_info.stanmvreg <- function(x, ...) { faminfo <- stats::family(x) lapply(faminfo, function(.x) { .make_family( x = x, fitfam = .x$family, zero.inf = FALSE, logit.link = .x$link == "logit", multi.var = TRUE, link.fun = .x$link, ... ) }) } #' @export model_info.BGGM <- function(x, ...) { link <- switch(x$type, "continuous" = stats::gaussian(), stats::binomial() ) family <- switch(x$type, "continuous" = "gaussian", "binary" = "binomial", "ordinal" ) .make_family( x = x, fitfam = family, zero.inf = FALSE, logit.link = link$link == "logit", link.fun = link$link, ... ) } # Other models ---------------------------- #' @export model_info.garch <- function(x, ...) { .make_family( x = x, ... ) } #' @export model_info.Rchoice <- function(x, ...) { .make_family( x = x, fitfam = x$family, logit.link = x$link == "logit", link.fun = x$link, ... ) } #' @export model_info.ivprobit <- function(x, ...) { .make_family( x = x, fitfam = "binomial", logit.link = FALSE, link.fun = "probit", ... ) } #' @export model_info.glht <- function(x, verbose = TRUE, ...) { model_info(x$model, verbose = verbose, ...) } #' @export model_info.coeftest <- function(x, ...) { NULL } #' @export model_info.glmm <- function(x, ...) { f <- switch(tolower(x$family.glmm$family.glmm), "bernoulli.glmm" = , "binomial.glmm" = stats::binomial("logit"), "poisson.glmm" = stats::poisson("log"), stats::gaussian("identity") ) .make_family( x = x, fitfam = f$family, logit.link = f$link == "logit", multi.var = FALSE, link.fun = f$link, ... ) } #' @export model_info.robmixglm <- function(x, ...) { f <- switch(tolower(x$family), gaussian = stats::gaussian("identity"), binomial = stats::binomial("logit"), poisson = stats::poisson("log"), gamma = stats::Gamma("inverse"), truncpoisson = stats::poisson("log"), stats::gaussian("identity") ) .make_family( x = x, fitfam = f$family, logit.link = f$link == "logit", multi.var = FALSE, link.fun = f$link, zero.inf = x$family == "truncpoisson", hurdle = x$family == "truncpoisson", ... ) } #' @export model_info.Arima <- function(x, ...) { .make_family(x, ...) } #' @export model_info.summary.lm <- model_info.Arima #' @export model_info.averaging <- function(x, ...) { if (is.null(attributes(x)$modelList)) { warning("Can't calculate covariance matrix. Please use 'fit = TRUE' in 'model.avg()'.", call. = FALSE) return(NULL) } model_info.default(x = attributes(x)$modelList[[1]]) } #' @export model_info.merModList <- function(x, ...) { model_info.default(x[[1]], ...) } #' @export model_info.cglm <- function(x, ...) { link <- parse(text = .safe_deparse(x$call))[[1]]$link method <- parse(text = .safe_deparse(x$call))[[1]]$method if (!is.null(method) && method == "clm") { .make_family(x, ...) } else if (!is.null(link)) { .make_family( x, logit.link = link == "logit", link.fun = link, ... ) } else { .make_family(x, ...) } } #' @export model_info.cgam <- function(x, ...) { faminfo <- x$family .make_family( x = x, fitfam = faminfo$family, logit.link = faminfo$link == "logit", link.fun = faminfo$link, ... ) } #' @export model_info.gamm <- function(x, ...) { x <- x$gam class(x) <- c(class(x), c("glm", "lm")) NextMethod() } #' @export model_info.LORgee <- function(x, ...) { if (grepl(pattern = "logit", x = x$link, fixed = TRUE)) { link <- "logit" } else if (grepl(pattern = "probit", x = x$link, fixed = TRUE)) { link <- "probit" } else if (grepl(pattern = "cauchit", x = x$link, fixed = TRUE)) { link <- "cauchit" } else if (grepl(pattern = "cloglog", x = x$link, fixed = TRUE)) { link <- "cloglog" } else { link <- "logit" } if (x$link == "Cumulative logit") { family <- "ordinal" } else { family <- "multinomial" } .make_family( x = x, fitfam = family, logit.link = link == "logit", link.fun = link, ... ) } #' @export model_info.BBreg <- function(x, ...) { .make_family( x = x, fitfam = "betabinomial", logit.link = TRUE, multi.var = FALSE, zero.inf = FALSE, link.fun = "logit", ... ) } #' @export model_info.BBmm <- model_info.BBreg #' @export model_info.glmmadmb <- function(x, ...) { .make_family( x = x, fitfam = x$family, logit.link = x$link == "logit", multi.var = FALSE, zero.inf = x$zeroInflation, link.fun = x$link, ... ) } #' @export model_info.cpglmm <- function(x, ...) { link <- parse(text = .safe_deparse(x@call))[[1]]$link if (is.null(link)) link <- "log" if (is.numeric(link)) link <- "tweedie" .make_family( x = x, fitfam = "poisson", logit.link = FALSE, multi.var = FALSE, link.fun = link, ... ) } #' @export model_info.zcpglm <- function(x, ...) { link <- parse(text = .safe_deparse(x@call))[[1]]$link if (is.null(link)) link <- "log" if (is.numeric(link)) link <- "tweedie" .make_family( x = x, fitfam = "poisson", logit.link = FALSE, multi.var = FALSE, link.fun = link, zero.inf = TRUE, ... ) } #' @export model_info.cpglm <- model_info.cpglmm #' @export model_info.bcplm <- model_info.cpglmm #' @export model_info.glimML <- function(x, ...) { fitfam <- switch(x@method, BB = "betabinomial", NB = "negative binomial" ) .make_family( x = x, fitfam = fitfam, logit.link = x@link == "logit", multi.var = FALSE, zero.inf = FALSE, link.fun = x@link, ... ) } #' @export model_info.gam <- function(x, ...) { if (!inherits(x, c("glm", "lm"))) { class(x) <- c(class(x), "glm", "lm") } faminfo <- .gam_family(x) link <- faminfo$link[1] is.mv <- faminfo$family == "Multivariate normal" if (is.mv) link <- "identity" .make_family( x = x, fitfam = faminfo$family, logit.link = !is.null(link) && (link == "logit" || faminfo$family == "multinom"), link.fun = link, multi.var = is.mv, ... ) } #' @export model_info.vgam <- function(x, ...) { faminfo <- x@family link.fun <- faminfo@blurb[3] if (grepl("^(l|L)ogit", link.fun)) link.fun <- "logit" .make_family( x = x, fitfam = faminfo@vfamily[1], logit.link = any(.string_contains("logit", faminfo@blurb)), link.fun = link.fun, ... ) } #' @export model_info.vglm <- model_info.vgam #' @export model_info.svy_vglm <- function(x, verbose = TRUE, ...) { model_info(x$fit, verbose = verbose) } #' @export model_info.glmmTMB <- function(x, ...) { # installed? check_if_installed("lme4") faminfo <- stats::family(x) .make_family( x = x, fitfam = faminfo$family, zero.inf = !.is_empty_object(lme4::fixef(x)$zi), hurdle = grepl("truncated", faminfo$family), logit.link = faminfo$link == "logit", link.fun = faminfo$link, dispersion = !.is_empty_object(lme4::fixef(x)$disp), ... ) } #' @export model_info.betareg <- function(x, ...) { .make_family( x = x, fitfam = "beta", logit.link = x$link$mean$name == "logit", link.fun = x$link$mean$name, ... ) } #' @export model_info.DirichletRegModel <- function(x, ...) { .make_family( x = x, fitfam = "dirichlet", logit.link = TRUE, link.fun = "logit", ... ) } #' @export model_info.gbm <- function(x, ...) { faminfo <- switch(x$distribution$name, laplace = , tdist = , gaussian = list(name = "gaussian", logit = FALSE, link = NULL), coxph = list(name = "survival", logit = TRUE, link = NULL), poisson = list(name = "poisson", logit = FALSE, link = "log"), huberized = , adaboost = , bernoulli = list(name = "binomial", logit = TRUE, link = "logit"), ) .make_family( x = x, fitfam = faminfo$name, logit.link = faminfo$logit, link.fun = faminfo$link, ... ) } #' @export model_info.MCMCglmm <- function(x, ...) { .make_family( x = x, fitfam = x$Residual$family, logit.link = FALSE, link.fun = "", ... ) } #' @export model_info.polr <- function(x, ...) { link <- x$method if (link == "logistic") link <- "logit" faminfo <- stats::binomial(link = link) .make_family( x = x, fitfam = faminfo$family, logit.link = faminfo$link == "logit", link.fun = faminfo$link, ... ) } #' @export model_info.orm <- function(x, ...) { faminfo <- stats::binomial(link = "logit") .make_family( x = x, fitfam = faminfo$family, logit.link = faminfo$link == "logit", link.fun = faminfo$link, ... ) } #' @export model_info.svyolr <- function(x, ...) { l <- switch(x$method, logistic = "logit", x$method ) faminfo <- stats::binomial(link = l) .make_family( x = x, fitfam = faminfo$family, logit.link = faminfo$link == "logit", link.fun = faminfo$link, ... ) } #' @export model_info.gamlss <- function(x, ...) { faminfo <- get(x$family[1], asNamespace("gamlss"))() .make_family( x = x, fitfam = faminfo$family[2], logit.link = faminfo$mu.link == "logit", link.fun = faminfo$mu.link, ... ) } #' @export model_info.mipo <- function(x, verbose = TRUE, ...) { tryCatch( { models <- eval(x$call$object) model_info(models$analyses[[1]], verbose = verbose, ...) }, error = function(e) { NULL } ) } #' @export model_info.mira <- function(x, ...) { model_info(x$analyses[[1]], ...) } # mfx models ------------------------------- #' @export model_info.betamfx <- function(x, ...) { model_info.betareg(x$fit) } #' @export model_info.betaor <- model_info.betamfx #' @export model_info.logitmfx <- function(x, ...) { model_info.default(x$fit, ...) } #' @export model_info.poissonmfx <- model_info.logitmfx #' @export model_info.negbinmfx <- model_info.logitmfx #' @export model_info.probitmfx <- model_info.logitmfx #' @export model_info.logitor <- model_info.logitmfx #' @export model_info.poissonirr <- model_info.logitmfx #' @export model_info.negbinirr <- model_info.logitmfx #' @export model_info.bfsl <- function(x, verbose = TRUE, ...) { .make_family( x = x, fitfam = "gaussian", logit.link = FALSE, link.fun = "identity", verbose = verbose, ... ) } # not yet supported ------------------------------- #' @export model_info.earth <- function(x, ...) { stop("Models of class 'earth' are not yet supported.", call. = FALSE) } insight/R/find_parameters.R0000644000175000017500000004365614077615664015576 0ustar nileshnilesh#' @title Find names of model parameters #' @name find_parameters #' #' @description Returns the names of model parameters, like they typically #' appear in the `summary()` output. For Bayesian models, the parameter #' names equal the column names of the posterior samples after coercion #' from `as.data.frame()`. See the documentation for your object's class: #' \itemize{ #' \item{[Bayesian models][find_parameters.BGGM] (\pkg{rstanarm}, \pkg{brms}, \pkg{MCMCglmm}, ...)} #' \item{[Generalized additive models][find_parameters.gamlss] (\pkg{mgcv}, \pkg{VGAM}, ...)} #' \item{[Marginal effects models][find_parameters.betamfx] (\pkg{mfx})} #' \item{[Estimated marginal means][find_parameters.emmGrid] (\pkg{emmeans})} #' \item{[Mixed models][find_parameters.glmmTMB] (\pkg{lme4}, \pkg{glmmTMB}, \pkg{GLMMadaptive}, ...)} #' \item{[Zero-inflated and hurdle models][find_parameters.zeroinfl] (\pkg{pscl}, ...)} #' \item{[Models with special components][find_parameters.averaging] (\pkg{betareg}, \pkg{MuMIn}, ...)} #' } #' #' @param verbose Toggle messages and warnings. #' @param ... Currently not used. #' @inheritParams find_predictors #' #' @return A list of parameter names. For simple models, only one list-element, #' `conditional`, is returned. #' #' @examples #' data(mtcars) #' m <- lm(mpg ~ wt + cyl + vs, data = mtcars) #' find_parameters(m) #' @export find_parameters <- function(x, ...) { UseMethod("find_parameters") } # Default methods ------------------------------------------- #' @rdname find_parameters #' @export find_parameters.default <- function(x, flatten = FALSE, verbose = TRUE, ...) { if (inherits(x, "list") && .obj_has_name(x, "gam")) { x <- x$gam class(x) <- c(class(x), c("glm", "lm")) pars <- find_parameters.gam(x) } else { pars <- tryCatch( { p <- .remove_backticks_from_string(names(stats::coef(x))) list(conditional = p) }, error = function(x) { NULL } ) } if (is.null(pars$conditional) || is.null(pars)) { if (isTRUE(verbose)) { warning(format_message(sprintf("Parameters can't be retrieved for objects of class '%s'.", class(x)[1])), call. = FALSE) } return(NULL) } if (flatten) { unique(unlist(pars)) } else { pars } } #' @export find_parameters.data.frame <- function(x, flatten = FALSE, ...) { stop("A data frame is no valid object for this function.") } #' @export find_parameters.summary.lm <- function(x, flatten = FALSE, ...) { cf <- stats::coef(x) l <- list(conditional = .remove_backticks_from_string(rownames(cf))) if (flatten) { unique(unlist(l)) } else { l } } # Ordinal ----------------------------------------------- #' @export find_parameters.polr <- function(x, flatten = FALSE, ...) { pars <- list(conditional = c(sprintf("Intercept: %s", names(x$zeta)), names(stats::coef(x)))) pars$conditional <- .remove_backticks_from_string(pars$conditional) if (flatten) { unique(unlist(pars)) } else { pars } } #' @export find_parameters.clm2 <- function(x, flatten = FALSE, ...) { cf <- stats::coef(x) n_intercepts <- length(x$xi) n_location <- length(x$beta) n_scale <- length(x$zeta) if (n_scale == 0) { pars <- list(conditional = names(cf)) pars$conditional <- .remove_backticks_from_string(pars$conditional) } else { pars <- .compact_list(list( conditional = names(cf)[1:(n_intercepts + n_location)], scale = names(cf)[(1 + n_intercepts + n_location):(n_scale + n_intercepts + n_location)] )) pars <- rapply(pars, .remove_backticks_from_string, how = "list") } if (flatten) { unique(unlist(pars)) } else { pars } } #' @export find_parameters.clmm2 <- find_parameters.clm2 #' @export find_parameters.bracl <- function(x, flatten = FALSE, ...) { pars <- list(conditional = names(stats::coef(x))) pars$conditional <- .remove_backticks_from_string(pars$conditional) if (flatten) { unique(unlist(pars)) } else { pars } } #' @export find_parameters.multinom <- function(x, flatten = FALSE, ...) { params <- stats::coef(x) pars <- if (is.matrix(params)) { list(conditional = colnames(params)) } else { list(conditional = names(params)) } pars$conditional <- .remove_backticks_from_string(pars$conditional) if (flatten) { unique(unlist(pars)) } else { pars } } #' @export find_parameters.brmultinom <- find_parameters.multinom # SEM models ------------------------------------------------------ #' @export find_parameters.blavaan <- function(x, flatten = FALSE, ...) { # installed? check_if_installed("lavaan") param_tab <- lavaan::parameterEstimates(x) params <- paste0(param_tab$lhs, param_tab$op, param_tab$rhs) coef_labels <- names(lavaan::coef(x)) if ("group" %in% colnames(param_tab) && .n_unique(param_tab$group) > 1) { params <- paste0(params, " (group ", param_tab$group, ")") groups <- grepl("(.*)\\.g(.*)", coef_labels) coef_labels[!groups] <- paste0(coef_labels[!groups], " (group 1)") coef_labels[groups] <- gsub("(.*)\\.g(.*)", "\\1 \\(group \\2\\)", coef_labels[groups]) } are_labels <- !coef_labels %in% params if (any(are_labels)) { unique_labels <- unique(coef_labels[are_labels]) for (ll in seq_along(unique_labels)) { coef_labels[coef_labels == unique_labels[ll]] <- params[param_tab$label == unique_labels[ll]] } } pars <- data.frame( pars = coef_labels, comp = NA, stringsAsFactors = FALSE ) pars$comp[grepl("=~", pars$pars, fixed = TRUE)] <- "latent" pars$comp[grepl("~~", pars$pars, fixed = TRUE)] <- "residual" pars$comp[grepl("~1", pars$pars, fixed = TRUE)] <- "intercept" pars$comp[is.na(pars$comp)] <- "regression" pars$comp <- factor(pars$comp, levels = unique(pars$comp)) pars <- split(pars, pars$comp) pars <- .compact_list(lapply(pars, function(i) i$pars)) if (flatten) { unique(unlist(pars)) } else { pars } } #' @export find_parameters.lavaan <- function(x, flatten = FALSE, ...) { # installed? check_if_installed("lavaan") pars <- get_parameters(x) pars$Component <- factor(pars$Component, levels = unique(pars$Component)) pars <- split(pars$Parameter, pars$Component) if (flatten) { unique(unlist(pars)) } else { pars } } # Panel models ---------------------------------------- #' @export find_parameters.pgmm <- function(x, component = c("conditional", "all"), flatten = FALSE, ...) { component <- match.arg(component) s <- summary(x, robust = FALSE) l <- list( conditional = rownames(s$coefficients), time_dummies = x$args$namest ) .filter_parameters( l, effects = "all", component = component, flatten = flatten, recursive = FALSE ) } #' @export find_parameters.wbm <- function(x, flatten = FALSE, ...) { s <- summary(x) pars <- .compact_list(list( conditional = rownames(s$within_table), instruments = rownames(s$between_table), random = rownames(s$ints_table) )) pars$conditional <- .remove_backticks_from_string(pars$conditional) if (flatten) { unique(unlist(pars)) } else { pars } } #' @export find_parameters.wbgee <- find_parameters.wbm # Other models ----------------------------------- #' @export find_parameters.rms <- find_parameters.default #' @export find_parameters.tobit <- find_parameters.default #' @export find_parameters.Rchoice <- function(x, flatten = FALSE, ...) { cf <- names(stats::coef(x)) if (cf[1] == "constant") { cf[1] <- "(Intercept)" } out <- list(conditional = cf) if (flatten) { unique(unlist(out)) } else { out } } #' @export find_parameters.btergm <- function(x, flatten = FALSE, ...) { cf <- x@coef out <- list(conditional = names(cf)) if (flatten) { unique(unlist(out)) } else { out } } #' @export find_parameters.crr <- function(x, flatten = FALSE, ...) { cs <- x$coef out <- list(conditional = names(cs)) if (flatten) { unique(unlist(out)) } else { out } } #' @export find_parameters.riskRegression <- function(x, flatten = FALSE, ...) { junk <- utils::capture.output(cs <- stats::coef(x)) out <- list(conditional = as.vector(cs[, 1])) if (flatten) { unique(unlist(out)) } else { out } } #' @export find_parameters.lmodel2 <- function(x, flatten = FALSE, ...) { out <- list(conditional = c("Intercept", "Slope")) if (flatten) { unique(unlist(out)) } else { out } } #' @export find_parameters.ivFixed <- function(x, flatten = FALSE, ...) { out <- list(conditional = rownames(x$coefficients)) if (flatten) { unique(unlist(out)) } else { out } } #' @export find_parameters.ivprobit <- function(x, flatten = FALSE, ...) { out <- list(conditional = x$names) if (flatten) { unique(unlist(out)) } else { out } } #' @export find_parameters.mediate <- function(x, flatten = FALSE, ...) { info <- model_info(x$model.y) if (info$is_linear && !x$INT) { out <- list(conditional = c("ACME", "ADE", "Total Effect", "Prop. Mediated")) } else { out <- list( conditional = c( "ACME (control)", "ACME (treated)", "ADE (control)", "ADE (treated)", "Total Effect", "Prop. Mediated (control)", "Prop. Mediated (treated)", "ACME (average)", "ADE (average)", "Prop. Mediated (average)" ) ) } if (flatten) { unique(unlist(out)) } else { out } } #' @export find_parameters.ridgelm <- function(x, flatten = FALSE, ...) { out <- list(conditional = names(x$coef)) if (flatten) { unique(unlist(out)) } else { out } } #' @export find_parameters.survreg <- function(x, flatten = FALSE, ...) { s <- summary(x) out <- list(conditional = rownames(s$table)) if (flatten) { unique(unlist(out)) } else { out } } #' @export find_parameters.mle2 <- function(x, flatten = FALSE, ...) { # installed? check_if_installed("bbmle") s <- bbmle::summary(x) out <- list(conditional = rownames(s@coef)) if (flatten) { unique(unlist(out)) } else { out } } #' @export find_parameters.mle <- find_parameters.mle2 #' @export find_parameters.glht <- function(x, flatten = FALSE, ...) { s <- summary(x) alt <- switch(x$alternative, two.sided = "==", less = ">=", greater = "<=" ) l <- list(conditional = paste(names(s$test$coefficients), alt, x$rhs)) if (flatten) { unique(unlist(l)) } else { l } } #' @export find_parameters.manova <- function(x, flatten = FALSE, ...) { out <- list(conditional = .remove_backticks_from_string(rownames(stats::na.omit(stats::coef(x))))) if (flatten) { unique(unlist(out)) } else { out } } #' @export find_parameters.maov <- find_parameters.manova #' @export find_parameters.afex_aov <- function(x, flatten = FALSE, ...) { if (!is.null(x$aov)) { find_parameters(x$aov, flatten = flatten, ...) } else { find_parameters(x$lm, flatten = flatten, ...) } } #' @export find_parameters.mlm <- function(x, flatten = FALSE, ...) { cs <- stats::coef(summary(x)) out <- lapply(cs, function(i) { list(conditional = .remove_backticks_from_string(rownames(i))) }) names(out) <- gsub("^Response (.*)", "\\1", names(cs)) attr(out, "is_mv") <- TRUE if (flatten) { unique(unlist(out)) } else { out } } #' @export find_parameters.mvord <- function(x, flatten = FALSE, ...) { junk <- utils::capture.output(s <- summary(x)) out <- list( thresholds = .remove_backticks_from_string(rownames(s$thresholds)), conditional = .remove_backticks_from_string(rownames(s$coefficients)), correlation = .remove_backticks_from_string(rownames(s$error.structure)) ) attr(out, "is_mv") <- TRUE if (flatten) { unique(unlist(out)) } else { out } } #' @export find_parameters.gbm <- function(x, flatten = FALSE, ...) { s <- summary(x, plotit = FALSE) pars <- list(conditional = as.character(s$var)) pars$conditional <- .remove_backticks_from_string(pars$conditional) if (flatten) { unique(unlist(pars)) } else { pars } } #' @export find_parameters.BBreg <- function(x, flatten = FALSE, ...) { pars <- list(conditional = rownames(stats::coef(x))) pars$conditional <- .remove_backticks_from_string(pars$conditional) if (flatten) { unique(unlist(pars)) } else { pars } } #' @export find_parameters.lrm <- function(x, flatten = FALSE, ...) { pars <- list(conditional = names(stats::coef(x))) pars$conditional <- .remove_backticks_from_string(pars$conditional) if (flatten) { unique(unlist(pars)) } else { pars } } #' @export find_parameters.flexsurvreg <- find_parameters.lrm #' @export find_parameters.aovlist <- function(x, flatten = FALSE, ...) { l <- list(conditional = unname(.remove_backticks_from_string(unlist(lapply(stats::coef(x), names))))) if (flatten) { unique(unlist(l)) } else { l } } #' @export find_parameters.rqs <- function(x, flatten = FALSE, ...) { sc <- suppressWarnings(summary(x)) if (all(unlist(lapply(sc, is.list)))) { pars <- list(conditional = rownames(stats::coef(sc[[1]]))) } else { return(find_parameters.default(x, flatten = flatten, ...)) } pars$conditional <- .remove_backticks_from_string(pars$conditional) if (flatten) { unique(unlist(pars)) } else { pars } } #' @export find_parameters.crq <- function(x, flatten = FALSE, ...) { sc <- suppressWarnings(summary(x)) if (all(unlist(lapply(sc, is.list)))) { pars <- list(conditional = rownames(sc[[1]]$coefficients)) } else { pars <- list(conditional = rownames(sc$coefficients)) } pars$conditional <- .remove_backticks_from_string(pars$conditional) if (flatten) { unique(unlist(pars)) } else { pars } } #' @export find_parameters.crqs <- find_parameters.crq #' @export find_parameters.lqmm <- function(x, flatten = FALSE, ...) { cs <- stats::coef(x) if (is.matrix(cs)) { pars <- list(conditional = rownames(cs)) } else { pars <- list(conditional = names(cs)) } pars$conditional <- .remove_backticks_from_string(pars$conditional) if (flatten) { unique(unlist(pars)) } else { pars } } #' @export find_parameters.lqm <- find_parameters.lqmm #' @export find_parameters.aareg <- function(x, flatten = FALSE, ...) { sc <- summary(x) pars <- list(conditional = rownames(sc$table)) pars$conditional <- .remove_backticks_from_string(pars$conditional) if (flatten) { unique(unlist(pars)) } else { pars } } #' @export find_parameters.rma <- function(x, flatten = FALSE, ...) { tryCatch( { cf <- stats::coef(x) pars <- list(conditional = names(cf)) pars$conditional[grepl("intrcpt", pars$conditional)] <- "(Intercept)" pars$conditional <- .remove_backticks_from_string(pars$conditional) if (flatten) { unique(unlist(pars)) } else { pars } }, error = function(x) { NULL } ) } #' @export find_parameters.meta_random <- function(x, flatten = FALSE, ...) { tryCatch( { cf <- x$estimates pars <- list(conditional = rownames(cf)) pars$conditional[pars$conditional == "d"] <- "(Intercept)" if (flatten) { unique(unlist(pars)) } else { pars } }, error = function(x) { NULL } ) } #' @export find_parameters.meta_fixed <- find_parameters.meta_random #' @export find_parameters.meta_bma <- find_parameters.meta_random #' @export find_parameters.metaplus <- function(x, flatten = FALSE, ...) { pars <- list(conditional = rownames(x$results)) pars$conditional[grepl("muhat", pars$conditional)] <- "(Intercept)" pars$conditional <- .remove_backticks_from_string(pars$conditional) if (flatten) { unique(unlist(pars)) } else { pars } } #' @export find_parameters.mipo <- function(x, flatten = FALSE, ...) { pars <- list(conditional = as.vector(summary(x)$term)) pars$conditional <- .remove_backticks_from_string(pars$conditional) if (flatten) { unique(unlist(pars)) } else { pars } } #' @export find_parameters.mira <- function(x, flatten = FALSE, ...) { find_parameters(x$analyses[[1]], flatten = flatten, ...) } ## For questions or problems with this ask Fernando Miguez (femiguez@iastate.edu) #' @export find_parameters.nls <- function(x, flatten = FALSE, ...) { f <- find_formula(x) elements <- .get_elements(effects = "fixed", component = "all") f <- .prepare_predictors(x, f, elements) pars <- .return_vars(f, x) if (flatten) { unique(unlist(pars)) } else { pars } } # helper ---------------------------- .filter_parameters <- function(l, effects, component = "all", flatten, recursive = TRUE) { if (isTRUE(recursive)) { # recursively remove back-ticks from all list-elements parameters l <- rapply(l, .remove_backticks_from_string, how = "list") } else { l <- lapply(l, .remove_backticks_from_string) } # keep only requested effects elements <- .get_elements(effects, component = component) # remove empty list-elements l <- .compact_list(l[elements]) if (flatten) { unique(unlist(l)) } else { l } } insight/R/find_formula.R0000644000175000017500000013074614164336635015070 0ustar nileshnilesh#' @title Find model formula #' @name find_formula #' #' @description Returns the formula(s) for the different parts of a model #' (like fixed or random effects, zero-inflated component, ...). #' `formula_ok()` checks if a model formula has valid syntax #' regarding writing `TRUE` instead of `T` inside `poly()` #' and that no data names are used (i.e. no `data$variable`, but rather #' `variable`). #' #' @param verbose Toggle warnings. #' @param ... Currently not used. #' @inheritParams find_predictors #' #' @return A list of formulas that describe the model. For simple models, #' only one list-element, `conditional`, is returned. For more complex #' models, the returned list may have following elements: #' #' - `conditional`, the "fixed effects" part from the model (in the #' context of fixed-effects or instrumental variable regression, also #' called *regressors*) . One exception are `DirichletRegModel` models #' from \pkg{DirichletReg}, which has two or three components, #' depending on `model`. #' #' - `random`, the "random effects" part from the model (or the #' `id` for gee-models and similar) #' #' - `zero_inflated`, the "fixed effects" part from the #' zero-inflation component of the model #' #' - `zero_inflated_random`, the "random effects" part from the #' zero-inflation component of the model #' #' - `dispersion`, the dispersion formula #' #' - `instruments`, for fixed-effects or instrumental variable #' regressions like `ivreg::ivreg()`, `lfe::felm()` or `plm::plm()`, #' the instrumental variables #' #' - `cluster`, for fixed-effects regressions like #' `lfe::felm()`, the cluster specification #' #' - `correlation`, for models with correlation-component like #' `nlme::gls()`, the formula that describes the correlation structure #' #' - `slopes`, for fixed-effects individual-slope models like #' `feisr::feis()`, the formula for the slope parameters #' #' - `precision`, for `DirichletRegModel` models from #' \pkg{DirichletReg}, when parametrization (i.e. `model`) is #' `"alternative"`. #' #' @note For models of class `lme` or `gls` the correlation-component #' is only returned, when it is explicitly defined as named argument #' (`form`), e.g. `corAR1(form = ~1 | Mare)` #' #' @examples #' data(mtcars) #' m <- lm(mpg ~ wt + cyl + vs, data = mtcars) #' find_formula(m) #' #' if (require("lme4")) { #' m <- lmer(Sepal.Length ~ Sepal.Width + (1 | Species), data = iris) #' f <- find_formula(m) #' f #' format(f) #' } #' @export find_formula <- function(x, verbose = TRUE, ...) { UseMethod("find_formula") } #' @rdname find_formula #' @export formula_ok <- function(x, verbose = TRUE, ...) { f <- find_formula(x, verbose = FALSE) # check if formula contains data name with "$". This may # result in unexpected behaviour, and we should warn users check_1 <- .check_formula_for_dollar(f, verbose = verbose) # check if formula contains poly-term with "raw=T". In this case, # all.vars() returns "T" as variable, which is not intended check_2 <- .check_formula_for_T(f, verbose = verbose) all(check_1 && check_2) } # Default method ----------------------------------- #' @export find_formula.default <- function(x, verbose = TRUE, ...) { f <- tryCatch( { list(conditional = stats::formula(x)) }, error = function(x) { NULL } ) .find_formula_return(f, verbose = verbose) } #' @export find_formula.list <- function(x, verbose = TRUE, ...) { if (.obj_has_name(x, "gam")) { if ("mer" %in% names(x)) { f.random <- .fix_gamm4_random_effect(find_formula(x$mer)$random) if (length(f.random) == 1) { f.random <- f.random[[1]] } else if (length(f.random) == 0) { f.random <- NULL } } x <- x$gam class(x) <- c(class(x), c("glm", "lm")) f <- .compact_list(list(conditional = stats::formula(x), random = f.random)) } else { f <- find_formula.default(x, ...) } .find_formula_return(f, verbose = verbose) } #' @export find_formula.data.frame <- function(x, verbose = TRUE, ...) { stop("A data frame is not a valid object for this function.") } #' @export find_formula.aovlist <- function(x, verbose = TRUE, ...) { f <- attr(x, "terms", exact = TRUE) attributes(f) <- NULL .find_formula_return(list(conditional = f)) } #' @export find_formula.anova <- function(x, verbose = TRUE, ...) { stop("Formulas cannot be retrieved from anova() objects.") } # GAM ----------------------------------------------------------- #' @export find_formula.SemiParBIV <- function(x, verbose = TRUE, ...) { f <- stats::formula(x, ...) names(f) <- c("Equation 1", "Equation 2", "Equation 3")[1:length(f)] f <- list(conditional = f) .find_formula_return(f, verbose = verbose) } #' @export find_formula.gam <- function(x, verbose = TRUE, ...) { f <- tryCatch( { stats::formula(x) }, error = function(x) { NULL } ) if (!is.null(f)) { if (is.list(f)) { mi <- .gam_family(x) if (!is.null(mi) && mi$family == "ziplss") { # handle formula for zero-inflated models f <- list(conditional = f[[1]], zero_inflated = f[[2]]) } else if (mi$family == "Multivariate normal") { # handle formula for multivariate models r <- lapply(f, function(.i) deparse(.i[[2]])) f <- lapply(f, function(.i) list(conditional = .i)) names(f) <- r attr(f, "is_mv") <- "1" } } else { f <- list(conditional = f) } } .find_formula_return(f, verbose = verbose) } #' @export find_formula.gamlss <- function(x, verbose = TRUE, ...) { f <- tryCatch( { f.cond <- stats::as.formula(.get_fixed_effects(x$mu.formula)) f.random <- lapply(.findbars(x$mu.formula), function(.x) { f <- .safe_deparse(.x) stats::as.formula(paste0("~", f)) }) if (length(f.random) == 1) { f.random <- f.random[[1]] } else if (grepl("random\\((.*)\\)", .safe_deparse(f.cond))) { re <- gsub("(.*)random\\((.*)\\)", "\\2", .safe_deparse(f.cond)) f.random <- stats::as.formula(paste0("~1|", re)) f.cond <- stats::update.formula(f.cond, stats::as.formula(paste0(". ~ . - random(", re, ")"))) } .compact_list(list( conditional = f.cond, random = f.random, sigma = x$sigma.formula, nu = x$nu.formula, tau = x$tau.formula )) }, error = function(x) { NULL } ) .find_formula_return(f, verbose = verbose) } #' @export find_formula.bamlss <- function(x, verbose = TRUE, ...) { f <- stats::formula(x) if (!is.null(f$mu)) { f.cond <- f$mu$formula } else if (!is.null(f$pi)) { f.cond <- f$pi$formula } if (!is.null(f$sigma)) { f.sigma <- stats::as.formula(paste0("~", as.character(f$sigma$formula)[3])) } else if (!is.null(f$pi)) { f.sigma <- NULL } f <- .compact_list(list( conditional = stats::as.formula(.safe_deparse(f.cond)), sigma = f.sigma )) .find_formula_return(f, verbose = verbose) } #' @export find_formula.gamm <- function(x, verbose = TRUE, ...) { f <- .compact_list(find_formula(x$gam)) random <- .fix_gamm_random_effect(names(x$lme$groups)) if (length(random) == 0) { f.random <- NULL } else if (length(random) > 1) { f.random <- lapply(random, function(r) stats::as.formula(paste0("~1|", r))) } else { f.random <- stats::as.formula(paste0("~1|", random)) } .find_formula_return(.compact_list(c(f, list(random = f.random)))) } # Meta-Analysis ----------------------- #' @export find_formula.rma <- function(x, verbose = TRUE, ...) { NULL } #' @export find_formula.metaplus <- find_formula.rma #' @export find_formula.meta_random <- find_formula.rma #' @export find_formula.meta_fixed <- find_formula.rma #' @export find_formula.meta_bma <- find_formula.rma # Other models ---------------------------------------------- #' @export find_formula.censReg <- find_formula.default #' @export find_formula.maxLik <- find_formula.default #' @export find_formula.maxim <- find_formula.default #' @export find_formula.systemfit <- function(x, verbose = TRUE, ...) { f <- stats::formula(x) l <- lapply(f, function(i) { list(conditional = i) }) f <- .compact_list(l) if (length(f) > 1) { attr(f, "is_mv") <- "1" } .find_formula_return(f) } #' @export find_formula.selection <- function(x, verbose = TRUE, ...) { model_call <- parse(text = deparse(get_call(x)))[[1]] f <- list(conditional = list( selection = stats::as.formula(model_call$selection), outcome = stats::as.formula(model_call$outcome) )) attr(f, "two_stage") <- TRUE .find_formula_return(f, verbose = verbose) } #' @export find_formula.svy_vglm <- function(x, verbose = TRUE, ...) { find_formula(x$fit) } #' @export find_formula.mjoint <- function(x, verbose = TRUE, ...) { s <- summary(x) f.cond <- s$formLongFixed if (length(s$formLongFixed) == 1) { names(f.cond) <- "conditional" } else { names(f.cond) <- paste0("conditional", 1:length(f.cond)) } f.rand <- s$formLongRandom if (length(s$formLongRandom) == 1) { names(f.rand) <- "random" } else { names(f.rand) <- paste0("random", 1:length(f.rand)) } f <- c(f.cond, f.rand, list(survival = s$formSurv)) .find_formula_return(f, verbose = verbose) } #' @export find_formula.mvord <- function(x, verbose = TRUE, ...) { f <- list(conditional = x$rho$formula) .find_formula_return(f, verbose = verbose) } #' @export find_formula.btergm <- function(x, verbose = TRUE, ...) { f <- list(conditional = x@formula) .find_formula_return(f, verbose = verbose) } #' @export find_formula.mediate <- function(x, verbose = TRUE, ...) { f <- list( mediator = find_formula(x$model.m), outcome = find_formula(x$model.y) ) .find_formula_return(f, verbose = verbose) } #' @export find_formula.averaging <- function(x, verbose = TRUE, ...) { f_random <- tryCatch( { models <- attributes(x)$modelList find_formula(models[[1]]) }, error = function(e) { NULL } ) f <- find_formula.default(x) if (!.obj_has_name(f, "random") && .obj_has_name(f_random, "random")) { f$random <- f_random$random } .find_formula_return(f, verbose = verbose) } #' @export find_formula.glht <- function(x, verbose = TRUE, ...) { .find_formula_return(list(conditional = stats::formula(x$model))) } #' @export find_formula.joint <- function(x, verbose = TRUE, ...) { f <- stats::formula(x) .find_formula_return(list(conditional = f$lformula, survival = f$sformula)) } #' @export find_formula.betareg <- function(x, verbose = TRUE, ...) { f <- stats::formula(x) fs <- .safe_deparse(f) if (grepl("|", fs, fixed = TRUE)) { fs <- trimws(unlist(strsplit(fs, "|", fixed = TRUE))) f <- list( conditional = stats::as.formula(fs[1]), precision = stats::as.formula(paste0("~", fs[2])) ) } else { f <- list(conditional = f) } .find_formula_return(f, verbose = verbose) } #' @export find_formula.afex_aov <- function(x, verbose = TRUE, ...) { if (length(attr(x, "within")) == 0L) { fff <- find_formula(x$lm, verbose = verbose, ...) fff$conditional[2] <- call(attr(x, "dv")) # need to fix LHS fff } else { d <- get_data(x, shape = "long") dv <- attr(x, "dv") id <- attr(x, "id") within <- names(attr(x, "within")) within <- paste0(within, collapse = "*") within <- paste0("(", within, ")") e <- paste0("Error(", id, "/", within, ")") between <- names(attr(x, "between")) if (length(between) > 0L) { tempf <- find_formula(x$lm)[[1]] between <- as.character(tempf)[3] between <- paste0("(", between, ")") within <- paste0(c(within, between), collapse = "*") } out <- list(conditional = stats::formula(paste0(dv, "~", within, "+", e))) class(out) <- c("insight_formula", "list") out } } #' @export find_formula.mira <- function(x, verbose = TRUE, ...) { .find_formula_return(find_formula(x$analyses[[1]])) } #' @export find_formula.gee <- function(x, verbose = TRUE, ...) { f <- tryCatch( { id <- parse(text = .safe_deparse(x$call))[[1]]$id # alternative regex-patterns that also work: # sub(".*id ?= ?(.*?),.*", "\\1", .safe_deparse(x$call), perl = TRUE) # sub(".*\\bid\\s*=\\s*([^,]+).*", "\\1", .safe_deparse(x$call), perl = TRUE) list( conditional = stats::formula(x), random = stats::as.formula(paste0("~", id)) ) }, error = function(x) { NULL } ) .find_formula_return(f, verbose = verbose) } #' @export find_formula.MANOVA <- function(x, verbose = TRUE, ...) { f <- .compact_list(list( conditional = x$input$formula, random = stats::as.formula(paste0("~", x$input$subject)) )) .find_formula_return(f, verbose = verbose) } #' @export find_formula.RM <- find_formula.MANOVA #' @export find_formula.gls <- function(x, verbose = TRUE, ...) { ## TODO this is an intermediate fix to return the correlation variables from gls-objects fcorr <- x$call$correlation if (!is.null(fcorr)) { f_corr <- parse(text = .safe_deparse(x$call$correlation))[[1]] } else { f_corr <- NULL } if (is.symbol(f_corr)) { f_corr <- paste("~", .safe_deparse(f_corr)) } else { f_corr <- f_corr$form } l <- tryCatch( { list( conditional = stats::formula(x), correlation = stats::as.formula(f_corr) ) }, error = function(x) { NULL } ) .find_formula_return(.compact_list(l)) } #' @export find_formula.LORgee <- function(x, verbose = TRUE, ...) { f <- tryCatch( { id <- parse(text = .safe_deparse(x$call))[[1]]$id # alternative regex-patterns that also work: # sub(".*id ?= ?(.*?),.*", "\\1", .safe_deparse(x$call), perl = TRUE) # sub(".*\\bid\\s*=\\s*([^,]+).*", "\\1", .safe_deparse(x$call), perl = TRUE) list( conditional = stats::formula(x), random = stats::as.formula(paste0("~", id)) ) }, error = function(x) { NULL } ) .find_formula_return(f, verbose = verbose) } #' @export find_formula.cglm <- function(x, verbose = TRUE, ...) { f <- tryCatch( { id <- parse(text = .safe_deparse(x$call))[[1]]$id # alternative regex-patterns that also work: # sub(".*id ?= ?(.*?),.*", "\\1", .safe_deparse(x$call), perl = TRUE) # sub(".*\\bid\\s*=\\s*([^,]+).*", "\\1", .safe_deparse(x$call), perl = TRUE) list( conditional = stats::formula(x), random = stats::as.formula(paste0("~", id)) ) }, error = function(x) { NULL } ) .find_formula_return(f, verbose = verbose) } # mfx models --------------------------------------- #' @export find_formula.betamfx <- find_formula.betareg #' @export find_formula.betaor <- find_formula.betareg #' @export find_formula.logitmfx <- function(x, verbose = TRUE, ...) { find_formula.default(x$fit, ...) } #' @export find_formula.poissonmfx <- find_formula.logitmfx #' @export find_formula.negbinmfx <- find_formula.logitmfx #' @export find_formula.logitor <- find_formula.logitmfx #' @export find_formula.negbinirr <- find_formula.logitmfx #' @export find_formula.poissonirr <- find_formula.logitmfx #' @export find_formula.probitmfx <- find_formula.logitmfx # Panel data models --------------------------------------- #' @export find_formula.ivreg <- function(x, verbose = TRUE, ...) { f <- tryCatch( { f <- .safe_deparse(stats::formula(x)) cond <- .trim(substr(f, start = 0, stop = regexpr(pattern = "\\|", f) - 1)) instr <- .trim(substr(f, regexpr(pattern = "\\|", f) + 1, stop = 10000L)) list( conditional = stats::as.formula(cond), instruments = stats::as.formula(paste0("~", instr)) ) }, error = function(x) { NULL } ) .find_formula_return(f, verbose = verbose) } #' @export find_formula.iv_robust <- find_formula.ivreg #' @export find_formula.ivFixed <- find_formula.ivreg #' @export find_formula.plm <- function(x, verbose = TRUE, ...) { f <- tryCatch( { f <- .safe_deparse(stats::formula(x)) bar_pos <- regexpr(pattern = "\\|", f) if (bar_pos == -1) { stop_pos <- nchar(f) + 1 } else { stop_pos <- bar_pos } cond <- .trim(substr(f, start = 0, stop = stop_pos - 1)) instr <- .trim(substr(f, stop_pos + 1, stop = 10000L)) if (.is_empty_string(instr)) { list(conditional = stats::as.formula(cond)) } else { # check if formula starts with dot, and remove it instr <- gsub("(^\\.\\s*)(.*)", "\\2", instr) list( conditional = stats::as.formula(cond), instruments = stats::as.formula(paste0("~", instr)) ) } }, error = function(x) { NULL } ) .find_formula_return(f, verbose = verbose) } #' @export find_formula.pgmm <- find_formula.plm #' @export find_formula.felm <- function(x, verbose = TRUE, ...) { f <- .safe_deparse(stats::formula(x)) f_parts <- .trim(unlist(strsplit(f, "(? 1) { f.rand <- paste0("~", f_parts[2]) } else { f.rand <- NULL } if (length(f_parts) > 2) { f.instr <- paste0("~", f_parts[3]) } else { f.instr <- NULL } if (length(f_parts) > 3) { f.clus <- paste0("~", f_parts[4]) } else { f.clus <- NULL } f <- .compact_list(list( conditional = stats::as.formula(f.cond), random = stats::as.formula(f.rand), instruments = stats::as.formula(f.instr), cluster = stats::as.formula(f.clus) )) .find_formula_return(f, verbose = verbose) } #' @export find_formula.mhurdle <- function(x, verbose = TRUE, ...) { f <- .safe_deparse(stats::formula(x)[[3]]) f_parts <- .trim(unlist(strsplit(f, "(? 1) { f.cond <- paste0(.safe_deparse(stats::formula(x)[[2]]), "~", f_parts[2]) } else { f.cond <- NULL } if (length(f_parts) > 2) { f.ip <- paste0("~", f_parts[3]) } else { f.ip <- NULL } # remove "empty" parts if (f.zi == "~0") { f.zi <- NULL } if (f.ip == "~0") { f.ip <- NULL } f <- .compact_list(list( conditional = stats::as.formula(f.cond), zero_inflated = stats::as.formula(f.zi), infrequent_purchase = stats::as.formula(f.ip) )) .find_formula_return(f, verbose = verbose) } #' @export find_formula.feglm <- function(x, verbose = TRUE, ...) { f <- .safe_deparse(stats::formula(x)) f_parts <- unlist(strsplit(f, "(? 1) { f.instr <- paste0("~", .trim(f_parts[2])) } else { f.instr <- NULL } if (length(f_parts) > 2) { f.clus <- paste0("~", .trim(f_parts[3])) } else { f.clus <- NULL } f <- .compact_list(list( conditional = stats::as.formula(f.cond), instruments = stats::as.formula(f.instr), cluster = stats::as.formula(f.clus) )) .find_formula_return(f, verbose = verbose) } #' @export find_formula.fixest <- function(x, verbose = TRUE, ...) { f <- .safe_deparse(stats::formula(x)) f_parts <- unlist(strsplit(f, "(? 1) { f.clus <- paste0("~", .trim(f_parts[2])) } else { f.clus <- parse(text = deparse(x$call))[[1]]$fixef if (!is.null(f.clus)) { f.clus <- paste("~", paste(eval(f.clus), collapse = " + ")) } } f <- .compact_list(list( conditional = stats::as.formula(f.cond), cluster = stats::as.formula(f.clus) )) .find_formula_return(f, verbose = verbose) } #' @export find_formula.feis <- function(x, verbose = TRUE, ...) { f <- .safe_deparse(stats::formula(x)) f_parts <- unlist(strsplit(f, "(? 1) { f.slopes <- paste0("~", .trim(f_parts[2])) } else { f.slopes <- NULL } f <- .compact_list(list( conditional = stats::as.formula(f.cond), slopes = stats::as.formula(f.slopes), random = stats::as.formula(paste0("~", id)) )) .find_formula_return(f, verbose = verbose) } #' @export find_formula.bife <- function(x, verbose = TRUE, ...) { f <- .safe_deparse(stats::formula(x)) f_parts <- unlist(strsplit(f, "|", fixed = TRUE)) f.cond <- .trim(f_parts[1]) if (length(f_parts) > 1) { f.rand <- paste0("~", .trim(f_parts[2])) } else { f.rand <- NULL } f <- .compact_list(list( conditional = stats::as.formula(f.cond), random = stats::as.formula(f.rand) )) .find_formula_return(f, verbose = verbose) } #' @export find_formula.ivprobit <- function(x, verbose = TRUE, ...) { NULL } #' @export find_formula.wbm <- function(x, verbose = TRUE, ...) { f <- .safe_deparse(stats::formula(x)) f_parts <- unlist(strsplit(f, "(? 1) { f.instr <- paste0("~", .trim(f_parts[2])) } else { f.instr <- NULL } if (length(f_parts) > 2) { f_parts[3] <- .trim(f_parts[3]) if (grepl("\\((.+)\\|(.+)\\)", f_parts[3])) { # we have multiple random effects, which we can better extract # via ".findbars()" if (length(gregexpr("\\|", f_parts[3])[[1]]) > 1) { f.rand <- .findbars(stats::as.formula(paste("~", f_parts[3]))) } else { f.rand <- gsub("(\\(|\\))", "", f_parts[3]) f.rand <- stats::as.formula(paste0("~", .trim(f.rand))) } f.clint <- NULL } else { ## TODO dangerous fix to convert cross-level interactions # into random effects... f.clint <- f_parts[3] f.clint <- paste0("~", .trim(f.clint)) f.rand <- NULL } } else { f.rand <- NULL f.clint <- NULL } f <- .compact_list(list( conditional = stats::as.formula(f.cond), instruments = stats::as.formula(f.instr), interactions = stats::as.formula(f.clint), random = f.rand )) .find_formula_return(f, verbose = verbose) } #' @export find_formula.wbgee <- find_formula.wbm #' @export find_formula.glimML <- function(x, verbose = TRUE, ...) { f <- .compact_list(list( conditional = x@formula, random = x@random )) .find_formula_return(f, verbose = verbose) } #' @export find_formula.tobit <- function(x, verbose = TRUE, ...) { f <- tryCatch( { list(conditional = parse(text = .safe_deparse(x$call))[[1]]$formula) }, error = function(x) { NULL } ) .find_formula_return(f, verbose = verbose) } # Zero inflated models -------------------------------------- #' @export find_formula.hurdle <- function(x, verbose = TRUE, ...) { .zeroinf_formula(x, verbose = verbose) } #' @export find_formula.zeroinfl <- find_formula.hurdle #' @export find_formula.zerotrunc <- find_formula.hurdle #' @export find_formula.zcpglm <- function(x, verbose = TRUE, ...) { .zeroinf_formula(x, separator = "\\|\\|", verbose = verbose) } # Ordinal models -------------------------------------- #' @export find_formula.clmm2 <- function(x, verbose = TRUE, ...) { f <- .compact_list(list( conditional = stats::as.formula(.safe_deparse(attr(x$location, "terms", exact = TRUE))), scale = stats::as.formula(.safe_deparse(attr(x$scale, "terms", exact = TRUE))), random = stats::as.formula(paste0("~", parse(text = .safe_deparse(x$call))[[1]]$random)) )) .find_formula_return(f, verbose = verbose) } #' @export find_formula.clm2 <- function(x, verbose = TRUE, ...) { f <- .compact_list(list( conditional = stats::formula(attr(x$location, "terms", exact = TRUE)), scale = stats::formula(attr(x$scale, "terms", exact = TRUE)) )) .find_formula_return(f, verbose = verbose) } #' @export find_formula.DirichletRegModel <- function(x, verbose = TRUE, ...) { f <- .safe_deparse(stats::formula(x)) f_parts <- unlist(strsplit(f, "(? 1) { f.cond2 <- paste0("~", .trim(f_parts[2])) } else { f.cond2 <- NULL } if (length(f_parts) > 2) { f.cond3 <- paste0("~", .trim(f_parts[3])) } else { f.cond3 <- NULL } out <- .compact_list(list( conditional = stats::as.formula(f.cond), conditional2 = stats::as.formula(f.cond2), conditional3 = stats::as.formula(f.cond3) )) if (x$parametrization == "alternative") { if (length(out) == 2) names(out)[2] <- "precision" } .find_formula_return(out) } # Mixed models ----------------------- #' @export find_formula.glmmTMB <- function(x, verbose = TRUE, ...) { f.cond <- stats::formula(x) f.zi <- stats::formula(x, component = "zi") f.disp <- stats::formula(x, component = "disp") if (identical(.safe_deparse(f.zi), "~0") || identical(.safe_deparse(f.zi), "~1")) { f.zi <- NULL } if (identical(.safe_deparse(f.disp), "~0") || identical(.safe_deparse(f.disp), "~1")) { f.disp <- NULL } f.random <- lapply(.findbars(f.cond), function(.x) { f <- .safe_deparse(.x) stats::as.formula(paste0("~", f)) }) if (length(f.random) == 1) { f.random <- f.random[[1]] } f.zirandom <- lapply(.findbars(f.zi), function(.x) { f <- .safe_deparse(.x) if (f == "NULL") { return(NULL) } stats::as.formula(paste0("~", f)) }) if (length(f.zirandom) == 1) { f.zirandom <- f.zirandom[[1]] } f.cond <- stats::as.formula(.get_fixed_effects(f.cond)) if (!is.null(f.zi)) f.zi <- stats::as.formula(.get_fixed_effects(f.zi)) f <- .compact_list(list( conditional = f.cond, random = f.random, zero_inflated = f.zi, zero_inflated_random = f.zirandom, dispersion = f.disp )) .find_formula_return(f, verbose = verbose) } #' @export find_formula.nlmerMod <- function(x, verbose = TRUE, ...) { f.random <- lapply(.findbars(stats::formula(x)), function(.x) { f <- .safe_deparse(.x) stats::as.formula(paste0("~", f)) }) if (length(f.random) == 1) { f.random <- f.random[[1]] } f.cond <- .nobars(stats::as.formula(gsub("(.*)(~)(.*)~(.*)", "\\1\\2\\4", .safe_deparse(stats::formula(x))))) f.nonlin <- stats::as.formula(paste0("~", .trim(gsub("(.*)~(.*)~(.*)", "\\2", .safe_deparse(stats::formula(x)))))) f <- .compact_list(list( conditional = f.cond, nonlinear = f.nonlin, random = f.random )) .find_formula_return(f, verbose = verbose) } #' @export find_formula.merMod <- function(x, verbose = TRUE, ...) { f.cond <- stats::formula(x) f.random <- lapply(.findbars(f.cond), function(.x) { f <- .safe_deparse(.x) stats::as.formula(paste0("~", f)) }) if (length(f.random) == 1) { f.random <- f.random[[1]] } f.cond <- stats::as.formula(.get_fixed_effects(f.cond)) f <- .compact_list(list(conditional = f.cond, random = f.random)) .find_formula_return(f, verbose = verbose) } #' @export find_formula.rlmerMod <- find_formula.merMod #' @export find_formula.cpglmm <- find_formula.merMod #' @export find_formula.glmmadmb <- find_formula.merMod #' @export find_formula.mixed <- find_formula.merMod #' @export find_formula.clmm <- find_formula.merMod #' @export find_formula.cgamm <- find_formula.merMod #' @export find_formula.coxme <- find_formula.merMod #' @export find_formula.HLfit <- find_formula.merMod #' @export find_formula.merModList <- function(x, verbose = TRUE, ...) { find_formula(x[[1]], ...) } #' @export find_formula.sem <- function(x, verbose = TRUE, ...) { if (!.is_semLme(x)) { return(NULL) } f.cond <- x$formula f.random <- lapply(.findbars(f.cond), function(.x) { f <- .safe_deparse(.x) stats::as.formula(paste0("~", f)) }) if (length(f.random) == 1) { f.random <- f.random[[1]] } f.cond <- stats::as.formula(.get_fixed_effects(f.cond)) f <- .compact_list(list(conditional = f.cond, random = f.random)) .find_formula_return(f, verbose = verbose) } #' @export find_formula.lme <- function(x, verbose = TRUE, ...) { fm <- eval(x$call$fixed) fmr <- eval(x$call$random) ## TODO this is an intermediate fix to return the correlation variables from lme-objects fcorr <- x$call$correlation if (!is.null(fcorr)) { fc <- parse(text = .safe_deparse(x$call$correlation))[[1]]$form } else { fc <- NULL } f <- .compact_list(list( conditional = fm, random = fmr, correlation = stats::as.formula(fc) )) .find_formula_return(f, verbose = verbose) } #' @export find_formula.lqmm <- function(x, verbose = TRUE, ...) { fm <- eval(x$call$fixed) fmr <- .safe_deparse(x$call$random) fmg <- .safe_deparse(x$call$group) f <- .compact_list(list( conditional = fm, random = stats::as.formula(paste0(fmr, "|", fmg)) )) .find_formula_return(f, verbose = verbose) } #' @export find_formula.mixor <- function(x, verbose = TRUE, ...) { fm <- x$call$formula f_id <- deparse(x$call$id) f_rs <- x$call$which.random.slope if (!is.null(f_rs)) { f_rs <- trimws(unlist(strsplit(.safe_deparse(x$call$formula[[3]]), "\\+")))[f_rs] fmr <- paste(f_rs, "|", f_id) } else { fmr <- f_id } fmr <- stats::as.formula(paste("~", fmr)) f <- .compact_list(list( conditional = fm, random = fmr )) .find_formula_return(f, verbose = verbose) } #' @export find_formula.MixMod <- function(x, verbose = TRUE, ...) { f.cond <- stats::formula(x) f.zi <- stats::formula(x, type = "zi_fixed") f.random <- stats::formula(x, type = "random") f.zirandom <- stats::formula(x, type = "zi_random") f <- .compact_list(list( conditional = f.cond, random = f.random, zero_inflated = f.zi, zero_inflated_random = f.zirandom )) .find_formula_return(f, verbose = verbose) } #' @export find_formula.BBmm <- function(x, verbose = TRUE, ...) { f.cond <- parse(text = .safe_deparse(x$call))[[1]]$fixed.formula f.rand <- parse(text = .safe_deparse(x$call))[[1]]$random.formula f <- .compact_list(list( conditional = stats::as.formula(f.cond), random = stats::as.formula(f.rand) )) .find_formula_return(f, verbose = verbose) } #' @export find_formula.mmclogit <- function(x, verbose = TRUE, ...) { f <- tryCatch( { list( conditional = stats::formula(x), random = stats::as.formula(parse(text = .safe_deparse(x$call))[[1]]$random) ) }, error = function(x) { NULL } ) .find_formula_return(f, verbose = verbose) } #' @export find_formula.glmm <- function(x, verbose = TRUE, ...) { f.cond <- stats::as.formula(x$fixedcall) f.random <- lapply(x$randcall, function(.x) { av <- all.vars(.x) stats::as.formula(paste0("~1|", av[length(av)])) }) if (length(f.random) == 1) { f.random <- f.random[[1]] } f <- .compact_list(list(conditional = f.cond, random = f.random)) .find_formula_return(f, verbose = verbose) } # Bayesian models -------------------------------- #' @export find_formula.BGGM <- function(x, verbose = TRUE, ...) { list(conditional = x$formula) } #' @export find_formula.mcmc.list <- function(x, verbose = TRUE, ...) { NULL } #' @export find_formula.stanreg <- function(x, verbose = TRUE, ...) { if (inherits(x, "nlmerMod")) { find_formula.nlmerMod(x, ...) } else { f.cond <- stats::formula(x) # special handling for stan_gamm4 if (inherits(x, "gamm4")) { f.random <- tryCatch( { lapply(.findbars(stats::formula(x$glmod)), function(.x) { f <- .safe_deparse(.x) stats::as.formula(paste0("~", f)) }) }, error = function(e) { NULL } ) } else { f.random <- lapply(.findbars(f.cond), function(.x) { f <- .safe_deparse(.x) stats::as.formula(paste0("~", f)) }) } if (length(f.random) == 1) { f.random <- f.random[[1]] } f.cond <- stats::as.formula(.get_fixed_effects(f.cond)) f <- .compact_list(list(conditional = f.cond, random = f.random)) .find_formula_return(f, verbose = verbose) } } #' @export find_formula.brmsfit <- function(x, verbose = TRUE, ...) { f <- stats::formula(x) if (.obj_has_name(f, "forms")) { mv_formula <- lapply(f$forms, .get_brms_formula) attr(mv_formula, "is_mv") <- "1" f <- mv_formula } else { f <- .get_brms_formula(f) } .find_formula_return(f, verbose = verbose) } #' @export find_formula.stanmvreg <- function(x, verbose = TRUE, ...) { f <- stats::formula(x) mv_formula <- lapply(f, .get_stanmv_formula) attr(mv_formula, "is_mv") <- "1" .find_formula_return(mv_formula) } #' @export find_formula.MCMCglmm <- function(x, verbose = TRUE, ...) { fm <- x$Fixed$formula fmr <- x$Random$formula f <- .compact_list(list(conditional = fm, random = fmr)) .find_formula_return(f, verbose = verbose) } #' @export find_formula.BFBayesFactor <- function(x, verbose = TRUE, ...) { if (.classify_BFBayesFactor(x) == "linear") { fcond <- utils::tail(x@numerator, 1)[[1]]@identifier$formula dt <- utils::tail(x@numerator, 1)[[1]]@dataTypes frand <- names(dt)[which(dt == "random")] if (!.is_empty_object(frand)) { f.random <- stats::as.formula(paste0("~", paste(frand, collapse = " + "))) for (i in frand) { fcond <- sub(i, "", fcond, fixed = TRUE) } while (grepl("\\+$", .trim(fcond))) { fcond <- gsub("(.*)\\+$", "\\1", .trim(fcond)) } # random effects only? if (grepl("~$", .trim(fcond))) { fcond <- paste(fcond, "1") } f.cond <- stats::as.formula(.trim(fcond)) } else { f.random <- NULL f.cond <- stats::as.formula(fcond) } } else if (.classify_BFBayesFactor(x) %in% c("ttest1", "ttest2")) { f.cond <- tryCatch( { stats::as.formula(x@numerator[[1]]@identifier$formula) }, error = function(e) { NULL } ) f.random <- NULL } else { return(NULL) } f <- .compact_list(list( conditional = f.cond, random = f.random )) .find_formula_return(f, verbose = verbose) } # tidymodels -------------------------------------------------------------- #' @export find_formula.model_fit <- function(x, verbose = TRUE, ...) { find_formula(x$fit, ...) } # helper --------------------------- .get_brms_formula <- function(f) { f_cond <- f$formula f_random <- lapply(.findbars(f_cond), function(.x) { fm <- .safe_deparse(.x) stats::as.formula(paste0("~", fm)) }) if (length(f_random) == 1) { f_random <- f_random[[1]] } f_cond <- stats::as.formula(.get_fixed_effects(f_cond)) f_zi <- f$pforms$zi f_zirandom <- NULL # auxiliary f_sigma <- f$pforms$sigma f_mu <- f$pforms$mu f_nu <- f$pforms$nu f_shape <- f$pforms$shape f_beta <- f$pforms$beta f_phi <- f$pforms$phi f_hu <- f$pforms$hu f_ndt <- f$pforms$ndt f_zoi <- f$pforms$zoi f_coi <- f$pforms$coi f_kappa <- f$pforms$kappa f_bias <- f$pforms$bias f_bs <- f$pforms$bs f_sigmarandom <- NULL f_betarandom <- NULL # split zero-inflated fixed from zero-inflated random if (!.is_empty_object(f_zi)) { f_zirandom <- lapply(.findbars(f_zi), function(.x) { f <- .safe_deparse(.x) stats::as.formula(paste0("~", f)) }) if (length(f_zirandom) == 1) { f_zirandom <- f_zirandom[[1]] } f_zi <- stats::as.formula(paste0("~", .safe_deparse(f_zi[[3L]]))) f_zi <- stats::as.formula(.get_fixed_effects(f_zi)) } # split sigma fixed from sigma random if (!.is_empty_object(f_sigma)) { f_sigmarandom <- lapply(.findbars(f_sigma), function(.x) { f <- .safe_deparse(.x) stats::as.formula(paste0("~", f)) }) if (length(f_sigmarandom) == 1) { f_sigmarandom <- f_sigmarandom[[1]] } f_sigma <- stats::as.formula(paste0("~", .safe_deparse(f_sigma[[3L]]))) f_sigma <- stats::as.formula(.get_fixed_effects(f_sigma)) } # split beta fixed from beta random if (!.is_empty_object(f_beta)) { f_betarandom <- lapply(.findbars(f_beta), function(.x) { f <- .safe_deparse(.x) stats::as.formula(paste0("~", f)) }) if (length(f_betarandom) == 1) { f_betarandom <- f_betarandom[[1]] } f_beta <- stats::as.formula(paste0("~", .safe_deparse(f_beta[[3L]]))) f_beta <- stats::as.formula(.get_fixed_effects(f_beta)) } .compact_list(list( conditional = f_cond, random = f_random, zero_inflated = f_zi, zero_inflated_random = f_zirandom, sigma = f_sigma, sigma_random = f_sigmarandom, beta = f_beta, beta_random = f_betarandom, shape = f_shape, phi = f_phi, hurdle = f_hu, mu = f_mu, nu = f_nu, ndt = f_ndt, bs = f_bs, bias = f_bias, zero_one_inflated = f_zoi, conditional_one_inflated = f_coi, kappa = f_kappa )) } .get_stanmv_formula <- function(f) { f_cond <- f f_random <- lapply(.findbars(f_cond), function(.x) { fm <- .safe_deparse(.x) stats::as.formula(paste0("~", fm)) }) if (length(f_random) == 1) { f_random <- f_random[[1]] } f_cond <- stats::as.formula(.get_fixed_effects(f_cond)) .compact_list(list( conditional = f_cond, random = f_random )) } # Find formula for zero-inflated regressions, where # zero-inflated part is separated by | from count part .zeroinf_formula <- function(x, separator = "\\|", verbose = TRUE) { f <- tryCatch( { stats::formula(x) }, error = function(x) { NULL } ) if (is.null(f)) { return(NULL) } f <- .trim(unlist(strsplit(.safe_deparse(f), separator))) c.form <- stats::as.formula(f[1]) if (length(f) == 2) { zi.form <- stats::as.formula(paste0("~", f[2])) } else { zi.form <- NULL } ## TODO could be extended to all find_formula() # fix dot-formulas c.form <- .dot_formula(f = c.form, model = x) # fix dot-formulas zi.form <- tryCatch( { if (as.character(zi.form[2]) == ".") { resp <- .safe_deparse(c.form[2]) pred <- setdiff(colnames(.recover_data_from_environment(x)), resp) zi.form <- stats::as.formula(paste(resp, "~", paste0(pred, collapse = " + "))) } zi.form }, error = function(e) { zi.form } ) f <- .compact_list(list(conditional = c.form, zero_inflated = zi.form)) .find_formula_return(f, verbose = verbose) } # try to guess "full" formula for dot-abbreviation, e.g. # lm(mpg ~., data = mtcars) .dot_formula <- function(f, model) { # fix dot-formulas tryCatch( { if (as.character(f[[3]])[1] == ".") { resp <- .safe_deparse(f[[2]]) pred <- setdiff(colnames(.recover_data_from_environment(model)), resp) f <- stats::as.formula(paste(resp, "~", paste0(pred, collapse = " + "))) } f }, error = function(e) { f } ) } .fix_gamm_random_effect <- function(x) { g_in_terms <- length(x) > 1 && x[length(x)] == "g" xr_in_terms <- length(x) > 1 && x[length(x)] == "Xr" x <- x[!(grepl("(Xr\\.\\d|g\\.\\d)", x) | x %in% c("Xr", "g"))] # exceptions, if random effect is named g if (!length(x) && isTRUE(g_in_terms)) { x <- "g" } if (!length(x) && isTRUE(xr_in_terms)) { x <- "Xr" } x } .fix_gamm4_random_effect <- function(f) { if (inherits(f, "formula")) { f <- list(f) } len <- length(f) keep <- sapply(f, function(i) { i <- gsub("(~1| | \\|)", "", deparse(i)) !any(grepl("(Xr\\.\\d|g\\.\\d)", i) | i %in% c("Xr", "g")) }) f <- .compact_list(f[keep]) # exceptions, if random effect is named Xr if (!length(f) && len > 1) { f <- list(stats::as.formula("~1 | Xr")) } f } # Helpers and Methods ----------------------------------------------------- .find_formula_return <- function(f, verbose = TRUE) { if (is.null(f)) { return(NULL) } # check if formula contains data name with "$". This may # result in unexpected behaviour, and we should warn users .check_formula_for_dollar(f, verbose = verbose) # check if formula contains poly-term with "raw=T". In this case, # all.vars() returns "T" as variable, which is not intended .check_formula_for_T(f, verbose = verbose) class(f) <- c("insight_formula", class(f)) f } .check_formula_for_T <- function(f, verbose = TRUE) { f <- .safe_deparse(f[[1]]) if (.is_empty_object(f)) { return(TRUE) } if (grepl("(.*)poly\\((.*),\\s*raw\\s*=\\s*T\\)", f)) { if (verbose) { warning(format_message( "Looks like you are using 'poly()' with 'raw = T'. This results in unexpected behaviour, because 'all.vars()' considers 'T' as variable.", "Please use 'raw = TRUE'." ), call. = FALSE ) } return(FALSE) } return(TRUE) } # formulas with $, like "lm(mtcars$mpg ~ mtcars$hp), may cause problems # in various functions throughout the easystats packages. We warn the user # here... .check_formula_for_dollar <- function(f, verbose = TRUE) { if (.is_empty_object(f)) { return(TRUE) } if (any(grepl("\\$", .safe_deparse(f[[1]])))) { fc <- try(.formula_clean(f[[1]]), silent = TRUE) if (inherits(fc, "try-error")) { stop(attributes(fc)$condition$message, call. = FALSE) } else { if (verbose) { warning( format_message(paste0( "Using `$` in model formulas can produce unexpected results. Specify your model using the `data` argument instead.", "\n Try: ", fc$formula, ", data = ", fc$data )), call. = FALSE ) } return(FALSE) } } return(TRUE) } .formula_clean <- function(f) { fc <- as.character(f) LHS <- fc[2] RHS <- fc[3] pattern <- "[\\s*+:()|^,\\-\\/]" # was: "[\\s\\*\\+:\\-\\|/\\(\\)\\^,]" parts <- trimws(unlist(strsplit(split = pattern, x = LHS, perl = TRUE))) d_LHS <- unique(gsub("(.*)\\$(.*)", "\\1", parts[grepl("(.*)\\$(.*)", parts)])) parts <- trimws(unlist(strsplit(split = pattern, x = RHS, perl = TRUE))) d_RHS <- unique(gsub("(.*)\\$(.*)", "\\1", parts[grepl("(.*)\\$(.*)", parts)])) if (.n_unique(c(d_LHS, d_RHS)) > 1) { stop("Multiple data objects present in formula. Specify your model using the `data` argument instead.", call. = FALSE) } else { d <- unique(d_RHS) } LHS_clean <- gsub(paste0(d_LHS, "\\$"), "", LHS) RHS_clean <- gsub(paste0(d_RHS, "\\$"), "", RHS) list(data = d, formula = paste(LHS_clean, fc[1], RHS_clean)) } # methods ------------------------- #' @export format.insight_formula <- function(x, what = c("conditional", "random"), ...) { # The purpose of this function is to flatten the formula # Start by first part (conditional by default) ft <- format(x[[1]]) # Wrap random in brackets if ("random" %in% names(x)) { x[["random"]] <- paste0("(", format(x[["random"]]), ")") } # Add all the components for (part in what[-1]) { if (part %in% names(x)) { ft <- paste0(ft, " + ", format(x[[part]])) } } ft } insight/R/format_value.R0000644000175000017500000001515114144234777015101 0ustar nileshnilesh#' Numeric Values Formatting #' #' @param x Numeric value. #' @param digits Number of digits for rounding or significant figures. May also #' be `"signif"` to return significant figures or `"scientific"` #' to return scientific notation. Control the number of digits by adding the #' value as suffix, e.g. `digits = "scientific4"` to have scientific #' notation with 4 decimal places, or `digits = "signif5"` for 5 #' significant figures (see also [signif()]). #' @param protect_integers Should integers be kept as integers (i.e., without #' decimals)? #' @param missing Value by which `NA` values are replaced. By default, an #' empty string (i.e. `""`) is returned for `NA`. #' @param width Minimum width of the returned string. If not `NULL` and #' `width` is larger than the string's length, leading whitespaces are #' added to the string. #' @param as_percent Logical, if `TRUE`, value is formatted as percentage #' value. #' @param zap_small Logical, if `TRUE`, small values are rounded after #' `digits` decimal places. If `FALSE`, values with more decimal #' places than `digits` are printed in scientific notation. #' @param ... Arguments passed to or from other methods. #' #' #' @return A formatted string. #' #' @examples #' format_value(1.20) #' format_value(1.2) #' format_value(1.2012313) #' format_value(c(0.0045, 234, -23)) #' format_value(c(0.0045, .12, .34)) #' format_value(c(0.0045, .12, .34), as_percent = TRUE) #' format_value(c(0.0045, .12, .34), digits = "scientific") #' format_value(c(0.0045, .12, .34), digits = "scientific2") #' #' # default #' format_value(c(0.0045, .123, .345)) #' # significant figures #' format_value(c(0.0045, .123, .345), digits = "signif") #' #' format_value(as.factor(c("A", "B", "A"))) #' format_value(iris$Species) #' #' format_value(3) #' format_value(3, protect_integers = TRUE) #' #' format_value(head(iris)) #' @export format_value <- function(x, ...) { UseMethod("format_value") } #' @rdname format_value #' @export format_value.data.frame <- function(x, digits = 2, protect_integers = FALSE, missing = "", width = NULL, as_percent = FALSE, zap_small = FALSE, ...) { as.data.frame(sapply(x, format_value, digits = digits, protect_integers = protect_integers, missing = missing, width = width, as_percent = as_percent, zap_small = zap_small, simplify = FALSE)) } #' @rdname format_value #' @export format_value.numeric <- function(x, digits = 2, protect_integers = FALSE, missing = "", width = NULL, as_percent = FALSE, zap_small = FALSE, ...) { if (protect_integers) { out <- .format_value_unless_integer(x, digits = digits, .missing = missing, .width = width, .as_percent = as_percent, .zap_small = zap_small, ...) } else { out <- .format_value(x, digits = digits, .missing = missing, .width = width, .as_percent = as_percent, .zap_small = zap_small, ...) } # Deal with negative zeros if (!is.factor(x)) { whitespace <- ifelse(is.null(width), "", " ") out[out == "-0"] <- paste0(whitespace, "0") out[out == "-0.0"] <- paste0(whitespace, "0.0") out[out == "-0.00"] <- paste0(whitespace, "0.00") out[out == "-0.000"] <- paste0(whitespace, "0.000") out[out == "-0.0000"] <- paste0(whitespace, "0.0000") } out } #' @export format_value.double <- format_value.numeric #' @export format_value.character <- format_value.numeric #' @export format_value.factor <- format_value.numeric #' @export format_value.logical <- format_value.numeric .format_value_unless_integer <- function(x, digits = 2, .missing = "", .width = NULL, .as_percent = FALSE, .zap_small = FALSE, ...) { if (is.numeric(x) && !all(.is.int(stats::na.omit(x)))) { .format_value(x, digits = digits, .missing = .missing, .width = .width, .as_percent = .as_percent, .zap_small = .zap_small) } else if (anyNA(x)) { .convert_missing(x, .missing) } else if (is.numeric(x) && all(.is.int(stats::na.omit(x))) && !is.null(.width)) { format(x, justify = "right", width = .width) } else { as.character(x) } } .format_value <- function(x, digits = 2, .missing = "", .width = NULL, .as_percent = FALSE, .zap_small = FALSE, ...) { # proper character NA if (is.na(.missing)) .missing <- NA_character_ if (is.numeric(x)) { if (isTRUE(.as_percent)) { need_sci <- (abs(100 * x) >= 1e+5 | (log10(abs(100 * x)) < -digits)) & x != 0 if (.zap_small) { x <- ifelse(is.na(x), .missing, sprintf("%.*f%%", digits, 100 * x)) } else { x <- ifelse(is.na(x), .missing, ifelse(need_sci, sprintf("%.*e%%", digits, 100 * x), sprintf("%.*f%%", digits, 100 * x) ) ) } } else { if (is.character(digits) && grepl("^scientific", digits)) { digits <- tryCatch( expr = { as.numeric(gsub("scientific", "", digits, fixed = TRUE)) }, error = function(e) { 5 } ) if (is.na(digits)) digits <- 5 x <- sprintf("%.*e", digits, x) } else if (is.character(digits) && grepl("^signif", digits)) { digits <- tryCatch( expr = { as.numeric(gsub("signif", "", digits, fixed = TRUE)) }, error = function(e) { NA } ) if (is.na(digits)) digits <- 3 x <- as.character(signif(x, digits)) } else { need_sci <- (abs(x) >= 1e+5 | (log10(abs(x)) < -digits)) & x != 0 if (.zap_small) { x <- ifelse(is.na(x), .missing, sprintf("%.*f", digits, x)) } else { x <- ifelse(is.na(x), .missing, ifelse(need_sci, sprintf("%.*e", digits, x), sprintf("%.*f", digits, x) ) ) } } } } else if (anyNA(x)) { x <- .convert_missing(x, .missing) } if (!is.null(.width)) { x <- format(x, justify = "right", width = .width) } x } .convert_missing <- function(x, .missing) { if (is.na(.missing)) { .missing <- NA_character_ } else { .missing <- as.character(.missing) } if (length(x) == 1) { return(.missing) } missings <- which(is.na(x)) x[missings] <- .missing x[!missings] <- as.character(x) x } .is.int <- function(x) { tryCatch( expr = { ifelse(is.infinite(x), FALSE, x %% 1 == 0) }, warning = function(w) { is.integer(x) }, error = function(e) { FALSE } ) } .is.fraction <- function(x) { !all(.is.int(x)) && is.numeric(x) && .n_unique(x) > 2 } insight/R/is_nullmodel.R0000644000175000017500000000232714077615665015110 0ustar nileshnilesh#' @title Checks if model is a null-model (intercept-only) #' @name is_nullmodel #' #' @description Checks if model is a null-model (intercept-only), i.e. if #' the conditional part of the model has no predictors. #' #' @param x A model object. #' #' @return `TRUE` if `x` is a null-model, `FALSE` otherwise. #' #' @examples #' model <- lm(mpg ~ 1, data = mtcars) #' is_nullmodel(model) #' #' model <- lm(mpg ~ gear, data = mtcars) #' is_nullmodel(model) #' #' if (require("lme4")) { #' model <- lmer(Reaction ~ 1 + (Days | Subject), data = sleepstudy) #' is_nullmodel(model) #' #' model <- lmer(Reaction ~ Days + (Days | Subject), data = sleepstudy) #' is_nullmodel(model) #' } #' @export is_nullmodel <- function(x) { UseMethod("is_nullmodel") } #' @export is_nullmodel.default <- function(x) { if (is_multivariate(x)) { unlist(lapply(find_predictors(x, effects = "fixed", component = "conditional"), .check_for_nullmodel)) } else { .check_for_nullmodel(find_predictors(x, effects = "fixed", component = "conditional")) } } #' @export is_nullmodel.afex_aov <- function(x) { FALSE } .check_for_nullmodel <- function(preds) { is.null(preds[["conditional"]]) } insight/R/ellipsis_info.R0000644000175000017500000001305614077615664015261 0ustar nileshnilesh#' Gather information about objects in ellipsis (dot dot dot) #' #' Provides information regarding the models entered in an ellipsis. #' It detects whether all are models, regressions, nested regressions etc., #' assigning different classes to the list of objects. #' #' @param objects,... Arbitrary number of objects. #' @param only_models Only keep supported models (default to `TRUE`). #' #' @return The list with objects that were passed to the function, including #' additional information as attributes (e.g. if models have same response or #' are nested). #' #' @examples #' m1 <- lm(Sepal.Length ~ Petal.Width + Species, data = iris) #' m2 <- lm(Sepal.Length ~ Species, data = iris) #' m3 <- lm(Sepal.Length ~ Petal.Width, data = iris) #' m4 <- lm(Sepal.Length ~ 1, data = iris) #' m5 <- lm(Petal.Width ~ 1, data = iris) #' #' objects <- ellipsis_info(m1, m2, m3, m4) #' class(objects) #' #' objects <- ellipsis_info(m1, m2, m4) #' attributes(objects)$is_nested #' #' objects <- ellipsis_info(m1, m2, m5) #' attributes(objects)$same_response #' @export ellipsis_info <- function(objects, ...) { UseMethod("ellipsis_info") } #' @rdname ellipsis_info #' @export ellipsis_info.default <- function(..., only_models = TRUE) { # Create list with names objects <- list(...) object_names <- match.call(expand.dots = FALSE)$`...` names(objects) <- object_names # If only one objects was provided if (length(objects) == 1) { return(objects[[1]]) } # Check whether all are models is_model <- sapply(objects, insight::is_model) # Drop non-models if need be if (only_models && any(is_model == FALSE)) { warning(paste( paste0(object_names[is_model == FALSE], collapse = ", "), "are not supported models and have been dropped." )) objects <- objects[is_model] object_names <- object_names[is_model] } # Add class if (all(is_model)) { class(objects) <- c("ListModels", class(objects)) } else { class(objects) <- c("ListObjects", class(objects)) } # Now objects is of class ListObjects or ListModels, so dispatching on the appropriate method ellipsis_info(objects) } # ListObjects and ListModels ---------------------------------------------- #' @export ellipsis_info.ListObjects <- function(objects, ...) { # Do nothing objects } #' @export ellipsis_info.ListModels <- function(objects, ...) { # Lavaan if (all(sapply(objects, inherits, what = "lavaan"))) { class(objects) <- c("ListLavaan", class(objects)) # Regressions } else if (all(sapply(objects, is_regression_model))) { class(objects) <- c("ListRegressions", class(objects)) # Mixed bag } else { class(objects) <- c("ListVarious", class(objects)) } # Dispatch on the next appropriate method ellipsis_info(objects) } # ListRegressions --------------------------------------------------------- #' @export ellipsis_info.ListVarious <- function(objects, ...) { # Do nothing (for now?) objects } #' @export ellipsis_info.ListLavaan <- function(objects, ...) { # TODO: check the nesting objects } #' @export ellipsis_info.ListRegressions <- function(objects, ..., verbose = TRUE) { object_names <- names(objects) # Check if same outcome outcome <- get_response(objects[[1]], verbose = FALSE) same_response <- all(sapply(objects[2:length(object_names)], function(i) identical(get_response(i, verbose = FALSE), outcome))) attr(objects, "same_response") <- isTRUE(same_response) # Check if nested is_nested_increasing <- is_nested_decreasing <- c() len <- length(objects) for (i in 2:len) { is_nested_decreasing <- c(is_nested_decreasing, .nested_regressions(objects[[i - 1]], objects[[i]])) is_nested_increasing <- c(is_nested_increasing, .nested_regressions(objects[[len + 2 - i]], objects[[len + 1 - i]])) } is_nested <- all(is_nested_decreasing) || all(is_nested_increasing) if (isTRUE(same_response) & is_nested) { class(objects) <- c("ListNestedRegressions", class(objects)) attr(objects, "is_nested") <- TRUE # order of df from models model_df <- sapply(objects, n_parameters) if (is_nested && any(duplicated(model_df)) && length(unique(sapply(objects, model_name, include_formula = FALSE))) == 1 && verbose) { message("Some of the nested models seem to be identical.") } attr(objects, "is_nested_increasing") <- all(is_nested_increasing) attr(objects, "is_nested_decreasing") <- all(is_nested_decreasing) } else { class(objects) <- c("ListNonNestedRegressions", class(objects)) attr(objects, "is_nested") <- FALSE } objects } # Helpers ----------------------------------------------------------------- #' @keywords internal .nested_regressions <- function(basemodel, model) { params_base <- find_parameters(basemodel, effects = "fixed", component = "conditional", flatten = TRUE ) params <- find_parameters(model, effects = "fixed", component = "conditional", flatten = TRUE ) # poly() are not properly recognized as nested, so remove poly() syntax here pattern <- paste0("^poly\\(((\\w|\\.)*).*\\)(\\d)") poly_terms <- grepl("^poly\\((.*)\\)", params) if (any(poly_terms)) { params[poly_terms] <- gsub(pattern, "\\1\\3", params[poly_terms]) } poly_terms <- grepl("^poly\\((.*)\\)", params_base) if (any(poly_terms)) { params_base[poly_terms] <- gsub(pattern, "\\1\\3", params_base[poly_terms]) } all(params %in% params_base) } insight/R/get_parameters_bayesian.R0000644000175000017500000004603014137207374017265 0ustar nileshnilesh#' @title Get model parameters from Bayesian models #' @name get_parameters.BGGM #' #' @description Returns the coefficients (or posterior samples for Bayesian #' models) from a model. #' #' @param iterations Number of posterior draws. #' @param progress Display progress. #' @param summary Logical, indicates whether the full posterior samples #' (`summary = FALSE`)) or the summarized centrality indices of #' the posterior samples (`summary = TRUE`)) should be returned as #' estimates. #' @param centrality Only for models with posterior samples, and when #' `summary = TRUE`. In this case, `centrality = "mean"` would #' calculate means of posterior samples for each parameter, while #' `centrality = "median"` would use the more robust median value as #' measure of central tendency. #' @param verbose Toggle messages and warnings. #' @param ... Currently not used. #' #' @inheritParams find_parameters.BGGM #' @inheritParams find_parameters #' @inheritParams find_predictors #' #' @return The posterior samples from the requested parameters as data frame. #' If `summary = TRUE`, returns a data frame with two columns: the #' parameter names and the related point estimates (based on `centrality`). #' #' @details In most cases when models either return different "effects" (fixed, #' random) or "components" (conditional, zero-inflated, ...), the arguments #' `effects` and `component` can be used. #' #' @section BFBayesFactor Models: #' Note that for `BFBayesFactor` models (from the \pkg{BayesFactor} #' package), posteriors are only extracted from the first numerator model (i.e., #' `model[1]`). If you want to apply some function `foo()` to another #' model stored in the `BFBayesFactor` object, index it directly, e.g. #' `foo(model[2])`, `foo(1/model[5])`, etc. #' See also [bayestestR::weighted_posteriors()]. #' #' @examples #' data(mtcars) #' m <- lm(mpg ~ wt + cyl + vs, data = mtcars) #' get_parameters(m) #' @export get_parameters.BGGM <- function(x, component = c("correlation", "conditional", "intercept", "all"), summary = FALSE, centrality = "mean", ...) { # installed? # check_if_installed("BGGM") # # out <- as.data.frame(BGGM::posterior_samples(x)) out <- as.data.frame(.bggm_posterior_samples(x)) intercepts <- grepl("_\\(Intercept\\)$", colnames(out)) correlations <- grepl("(.*)--(.*)", colnames(out)) conditional <- !intercepts & !correlations component <- match.arg(component) out <- switch(component, "conditional" = out[, conditional, drop = FALSE], "correlation" = out[, correlations, drop = FALSE], "intercept" = out[, intercepts, drop = FALSE], out ) if (isTRUE(summary)) { out <- .summary_of_posteriors(out, centrality = centrality) } out } #' @rdname get_parameters.BGGM #' @export get_parameters.MCMCglmm <- function(x, effects = c("fixed", "random", "all"), summary = FALSE, centrality = "mean", ...) { effects <- match.arg(effects) nF <- x$Fixed$nfl fixed <- as.data.frame(x$Sol[, 1:nF, drop = FALSE]) random <- as.data.frame(x$VCV[, find_random(x, split_nested = TRUE, flatten = TRUE), drop = FALSE]) all <- cbind(fixed, random) out <- if (effects == "fixed") { fixed } else if (effects == "random") { random } else { all } if (isTRUE(summary)) { out <- .summary_of_posteriors(out, centrality = centrality) } out } #' @rdname get_parameters.BGGM #' @export get_parameters.BFBayesFactor <- function(x, effects = c("all", "fixed", "random"), component = c("all", "extra"), iterations = 4000, progress = FALSE, verbose = TRUE, summary = FALSE, centrality = "mean", ...) { # installed? check_if_installed("BayesFactor") effects <- match.arg(effects) component <- match.arg(component) bf_type <- .classify_BFBayesFactor(x) # check if valid model was indexed... if (length(x@numerator) > 1 || !xor( x@denominator@shortName == "Intercept only", grepl("^(Null|Indep)", x@denominator@shortName) )) { if (verbose) { message( "Multiple `BFBayesFactor` models detected - posteriors are extracted from the first numerator model.\n", 'See help("get_parameters", package = "insight").' ) } } params <- find_parameters(x, effects = effects, component = component, flatten = TRUE, ...) if (bf_type %in% c("correlation", "ttest1", "ttest2", "meta", "linear")) { posteriors <- as.data.frame(suppressMessages( BayesFactor::posterior(x, iterations = iterations, progress = progress, index = 1, ...) )) out <- switch(bf_type, "correlation" = data.frame("rho" = as.numeric(posteriors$rho)), "ttest1" = data.frame("Difference" = as.numeric(posteriors[, 1]) - x@numerator[[1]]@prior$mu), "ttest2" = data.frame("Difference" = as.numeric(posteriors[, 2]) - x@numerator[[1]]@prior$mu), "meta" = data.frame("Effect" = as.numeric(posteriors$delta)), "linear" = .get_bf_posteriors(posteriors, params), NULL ) } else if (bf_type == "proptest") { posteriors <- as.data.frame(as.matrix(suppressMessages( BayesFactor::posterior(x, iterations = iterations, progress = progress, index = 1) )[, "p"])) colnames(posteriors) <- "p" out <- posteriors } else if (bf_type == "xtable") { data <- get_data(x, verbose = verbose) N <- sum(data) cells <- prod(dim(data)) posts <- as.data.frame(as.matrix(suppressMessages( BayesFactor::posterior(x, iterations = iterations, progress = progress) ))) posts <- posts[, seq_len(cells)] if (sum(posts[1, ]) == 1) { posts <- posts * N } colnames(posts) <- gsub("(pi|lambda)", "cell", colnames(posts)) out <- posts } else { out <- NULL } if (isTRUE(summary) && !is.null(out)) { out <- .summary_of_posteriors(out, centrality = centrality) } out } #' @rdname get_parameters.BGGM #' @export get_parameters.stanmvreg <- function(x, effects = c("fixed", "random", "all"), parameters = NULL, summary = FALSE, centrality = "mean", ...) { effects <- match.arg(effects) elements <- .get_elements(effects, "all") parms <- find_parameters(x, flatten = FALSE, parameters = parameters) for (i in names(parms)) { parms[[i]]$conditional <- sprintf("%s|%s", i, parms[[i]]$conditional) find_bracket <- regexpr(pattern = "\\[", parms[[i]]$random) parms[[i]]$random <- paste0( substr(parms[[i]]$random, start = 1, stop = find_bracket), i, "|", substr(parms[[i]]$random, start = find_bracket + 1, stop = 1000000L) ) parms[[i]]$sigma <- NULL } out <- as.data.frame(x)[unlist(lapply(.compact_list(parms), function(i) i[elements]))] if (isTRUE(summary)) { out <- .summary_of_posteriors(out, centrality = centrality) } out } #' @rdname get_parameters.BGGM #' @export get_parameters.brmsfit <- function(x, effects = "fixed", component = "all", parameters = NULL, summary = FALSE, centrality = "mean", ...) { effects <- match.arg(effects, choices = c("all", "fixed", "random")) component <- match.arg(component, choices = c("all", .all_elements())) if (is_multivariate(x)) { parms <- find_parameters(x, flatten = FALSE, parameters = parameters) elements <- .get_elements(effects, component) out <- as.data.frame(x)[unlist(lapply(parms, function(i) i[elements]))] } else { out <- as.data.frame(x)[.get_parms_data(x, effects, component, parameters)] } if (isTRUE(summary)) { out <- .summary_of_posteriors(out, centrality = centrality) } out } #' @rdname get_parameters.BGGM #' @export get_parameters.stanreg <- function(x, effects = c("fixed", "random", "all"), component = c("location", "all", "conditional", "smooth_terms", "sigma", "distributional", "auxiliary"), parameters = NULL, summary = FALSE, centrality = "mean", ...) { effects <- match.arg(effects) component <- match.arg(component) out <- as.data.frame(x)[.get_parms_data(x, effects, component, parameters)] if (isTRUE(summary)) { out <- .summary_of_posteriors(out, centrality = centrality) } out } #' @export get_parameters.stanfit <- get_parameters.stanreg #' @export get_parameters.bcplm <- function(x, parameters = NULL, summary = FALSE, centrality = "mean", ...) { out <- as.data.frame(do.call(rbind, x$sims.list)) if (!is.null(parameters)) { out <- out[grepl(pattern = parameters, x = colnames(out), perl = TRUE)] } if (isTRUE(summary)) { out <- .summary_of_posteriors(out, centrality = centrality) } out } #' @rdname get_parameters.BGGM #' @export get_parameters.bayesx <- function(x, component = c("conditional", "smooth_terms", "all"), summary = FALSE, centrality = "mean", ...) { component <- match.arg(component) smooth_dat <- data.frame( Parameter = find_parameters(x, component = "smooth_terms", flatten = TRUE), Estimate = x$smooth.hyp[, 1], Component = "smooth_terms", stringsAsFactors = FALSE ) fixed_dat <- data.frame( Parameter = find_parameters(x, component = "conditional", flatten = TRUE), Estimate = x$fixed.effects[, 1], Component = "conditional", stringsAsFactors = FALSE ) params <- switch(component, "all" = rbind(fixed_dat, smooth_dat), "conditional" = fixed_dat, "smooth_terms" = smooth_dat ) out <- .remove_backticks_from_parameter_names(params) if (isTRUE(summary)) { out <- .summary_of_posteriors(out, centrality = centrality) } out } #' @export get_parameters.mcmc.list <- function(x, parameters = NULL, summary = FALSE, centrality = "mean", ...) { out <- as.data.frame(do.call(rbind, x)) if (!is.null(parameters)) { out <- out[grepl(pattern = parameters, x = colnames(out), perl = TRUE)] } if (isTRUE(summary)) { out <- .summary_of_posteriors(out, centrality = centrality) } out } #' @rdname get_parameters.BGGM #' @export get_parameters.bamlss <- function(x, component = c("all", "conditional", "smooth_terms", "location", "distributional", "auxiliary"), parameters = NULL, summary = FALSE, centrality = "mean", ...) { component <- match.arg(component) elements <- .get_elements(effects = "all", component) parms <- find_parameters(x, flatten = FALSE, parameters = parameters) out <- as.data.frame(unclass(x$samples))[unname(unlist(parms[elements]))] if (isTRUE(summary)) { out <- .summary_of_posteriors(out, centrality = centrality) } out } #' @export get_parameters.mcmc <- function(x, parameters = NULL, summary = FALSE, centrality = "mean", ...) { out <- as.data.frame(x)[.get_parms_data(x, "all", "all", parameters)] if (isTRUE(summary)) { out <- .summary_of_posteriors(out, centrality = centrality) } out } #' @export get_parameters.bayesQR <- function(x, parameters = NULL, summary = FALSE, centrality = "mean", ...) { out <- as.data.frame(x[[1]]$betadraw) names(out) <- x[[1]]$names out <- out[.get_parms_data(x, "all", "all", parameters)] if (isTRUE(summary)) { out <- .summary_of_posteriors(out, centrality = centrality) } out } #' @export get_parameters.blrm <- function(x, parameters = NULL, summary = FALSE, centrality = "mean", ...) { out <- as.data.frame(x$draws) out <- out[.get_parms_data(x, "all", "all", parameters)] if (isTRUE(summary)) { out <- .summary_of_posteriors(out, centrality = centrality) } out } #' @rdname get_parameters.BGGM #' @export get_parameters.sim.merMod <- function(x, effects = c("fixed", "random", "all"), parameters = NULL, summary = FALSE, centrality = "mean", ...) { effects <- match.arg(effects) fe <- re <- NULL if (effects %in% c("fixed", "all")) fe <- .get_armsim_fixef_parms(x) if (effects %in% c("random", "all")) re <- .get_armsim_ranef_parms(x) dat <- do.call(cbind, .compact_list(list(fe, re))) out <- as.data.frame(dat)[.get_parms_data(x, effects, "all", parameters)] if (isTRUE(summary)) { out <- .summary_of_posteriors(out, centrality = centrality) } out } #' @rdname get_parameters.BGGM #' @export get_parameters.sim <- function(x, parameters = NULL, summary = FALSE, centrality = "mean", ...) { dat <- .get_armsim_fixef_parms(x) out <- as.data.frame(dat)[.get_parms_data(x, "all", "all", parameters)] if (isTRUE(summary)) { out <- .summary_of_posteriors(out, centrality = centrality) } out } # helper ----------------------- .summary_of_posteriors <- function(out, centrality = "mean", ...) { s <- switch(centrality, "mean" = sapply(out, mean, na.rm = TRUE), "median" = sapply(out, stats::median, na.rm = TRUE), sapply(out, mean, na.rm = TRUE) ) data.frame( Parameter = names(s), Estimate = unname(s), stringsAsFactors = FALSE ) } .get_bf_posteriors <- function(posteriors, params) { cn <- intersect(colnames(posteriors), params) posteriors[, cn, drop = FALSE] } .get_parms_data <- function(x, effects, component, parameters = NULL) { elements <- .get_elements(effects, component) unlist(find_parameters(x, effects = "all", component = "all", flatten = FALSE, parameters = parameters)[elements]) } # use temporarily code from BGGM package, as long as that package is archived on CRAN .bggm_posterior_samples <- function(object, ...) { if (methods::is(object, "estimate") | methods::is(object, "explore")) { if (!methods::is(object, "default")) { stop("object most be from 'estimate' or 'explore'") } p <- object$p pcors_total <- p * (p - 1) * 0.5 I_p <- diag(p) iter <- object$iter pcor_samples <- matrix(object$post_samp$pcors[, , 51:(iter + 50)][upper.tri(I_p)], nrow = iter, ncol = pcors_total, byrow = TRUE ) cn <- colnames(object$Y) if (is.null(cn)) { col_names <- sapply(1:p, function(x) paste(1:p, x, sep = "--"))[upper.tri(I_p)] } else { col_names <- sapply(cn, function(x) paste(cn, x, sep = "--"))[upper.tri(I_p)] } colnames(pcor_samples) <- col_names posterior_samples <- pcor_samples if (!is.null(object$formula)) { if (ncol(object$X) == 1) { beta_terms <- "(Intercept)" } else { beta_terms <- colnames(object$X) } n_beta_terms <- length(beta_terms) beta_samples <- object$post_samp$beta if (is.null(cn)) { col_names <- 1:p } else { col_names <- cn } beta_start <- matrix(beta_samples[1:n_beta_terms, 1, 51:(iter + 50)], nrow = iter, n_beta_terms, byrow = TRUE ) colnames(beta_start) <- paste0( col_names[1], "_", beta_terms ) for (i in 2:p) { beta_i <- matrix(beta_samples[1:n_beta_terms, i, 51:(iter + 50)], nrow = iter, n_beta_terms, byrow = TRUE ) colnames(beta_i) <- paste0(col_names[i], "_", beta_terms) beta_start <- cbind(beta_start, beta_i) } posterior_samples <- cbind(posterior_samples, beta_start) } } else if (methods::is(object, "var_estimate")) { if (!methods::is(object, "default")) { stop("object most be from 'var_estimate'") } p <- object$p pcors_total <- p * (p - 1) * 0.5 I_p <- diag(p) iter <- object$iter pcor_samples <- matrix(object$fit$pcors[, , 51:(iter + 50)][upper.tri(I_p)], nrow = iter, ncol = pcors_total, byrow = TRUE ) cn <- colnames(object$Y) if (is.null(cn)) { col_names <- sapply(1:p, function(x) paste(1:p, x, sep = "--"))[upper.tri(I_p)] } else { col_names <- sapply(cn, function(x) paste(cn, x, sep = "--"))[upper.tri(I_p)] } colnames(pcor_samples) <- col_names posterior_samples <- pcor_samples n_beta_terms <- nrow(object$beta_mu) beta_samples <- object$fit$beta col_names <- colnames(object$Y) beta_terms <- colnames(object$X) beta_start <- matrix(beta_samples[1:n_beta_terms, 1, 51:(iter + 50)], nrow = iter, n_beta_terms, byrow = TRUE ) colnames(beta_start) <- paste0(col_names[1], "_", beta_terms) for (i in 2:p) { beta_i <- matrix(beta_samples[1:n_beta_terms, i, 51:(iter + 50)], nrow = iter, n_beta_terms, byrow = TRUE ) colnames(beta_i) <- paste0(col_names[i], "_", beta_terms) beta_start <- cbind(beta_start, beta_i) } posterior_samples <- cbind(posterior_samples, beta_start) } else { stop("object class not currently supported") } posterior_samples } insight/R/get_intercept.R0000644000175000017500000000223414077615665015253 0ustar nileshnilesh#' @title Get the value at the intercept #' @name get_intercept #' #' @description Returns the value at the intercept (i.e., the intercept #' parameter), and `NA` if there isn't one. #' #' @param ... Not used. #' @inheritParams get_residuals #' #' @return The value of the intercept. #' #' @examples #' get_intercept(lm(Sepal.Length ~ Petal.Width, data = iris)) #' get_intercept(lm(Sepal.Length ~ 0 + Petal.Width, data = iris)) #' #' if (require("lme4")) { #' get_intercept(lme4::lmer(Sepal.Length ~ Sepal.Width + (1 | Species), data = iris)) #' } #' if (require("gamm4")) { #' get_intercept(gamm4::gamm4(Sepal.Length ~ s(Petal.Width), data = iris)) #' } #' @export get_intercept <- function(x, ...) { UseMethod("get_intercept") } #' @export get_intercept.default <- function(x, ...) { params <- get_parameters(x) intercept <- params[params$Parameter == "(Intercept)", 2] if (length(intercept) == 0) { intercept <- NA } intercept } #' @export get_intercept.stanreg <- function(x, ...) { params <- get_parameters(x) if ("(Intercept)" %in% names(params)) { params[["(Intercept)"]] } else { NA } } insight/R/all_equal_models.R0000644000175000017500000000451114077615664015720 0ustar nileshnilesh#' @title Checks if all objects are models of same class #' @name all_models_equal #' #' @description Small helper that checks if all objects are *supported* #' (regression) model objects and of same class. #' #' @param ... A list of objects. #' @inheritParams get_variance #' #' @return A logical, `TRUE` if `x` are all supported model objects #' of same class. #' #' @examples #' if (require("lme4")) { #' data(mtcars) #' data(sleepstudy) #' #' m1 <- lm(mpg ~ wt + cyl + vs, data = mtcars) #' m2 <- lm(mpg ~ wt + cyl, data = mtcars) #' m3 <- lmer(Reaction ~ Days + (1 | Subject), data = sleepstudy) #' m4 <- glm(formula = vs ~ wt, family = binomial(), data = mtcars) #' #' all_models_same_class(m1, m2) #' all_models_same_class(m1, m2, m3) #' all_models_same_class(m1, m4, m2, m3, verbose = TRUE) #' all_models_same_class(m1, m4, mtcars, m2, m3, verbose = TRUE) #' } #' @export all_models_equal <- function(..., verbose = FALSE) { objects <- list(...) object_names <- match.call(expand.dots = FALSE)$`...` all_supported <- vapply(objects, is_model_supported, FUN.VALUE = logical(1)) all_classes <- sapply(objects, class) if (is.matrix(all_classes)) { all_classes <- as.vector(all_classes[1, ]) } else if (is.list(all_classes)) { all_classes <- sapply(all_classes, function(i) i[1]) } all_equal <- all(vapply(all_classes[-1], function(i) identical(i, all_classes[1]), FUN.VALUE = logical(1))) if (!all(all_supported) && verbose) { differ <- which(!all_supported) m1 <- "Following objects are no (supported) models:" m2 <- paste0(sprintf("%s", object_names[differ]), collapse = ", ") message(paste(m1, m2, collapse = " ")) } if (!all(all_equal) && verbose) { differ <- which(!duplicated(all_classes)) m1 <- sprintf( "Following objects are not identical with %s (of class \"%s\"):", object_names[1], all_classes[[1]] ) m2 <- paste0( sprintf( "%s (\"%s\")", object_names[differ[-1]], sapply(all_classes[differ[-1]], function(x) as.vector(x[[1]])) ), collapse = ", " ) message(paste(m1, m2, collapse = " ")) } all(all_supported) && all(all_equal) } #' @rdname all_models_equal #' @export all_models_same_class <- all_models_equal insight/R/get_priors.R0000644000175000017500000004423514131014371014555 0ustar nileshnilesh#' @title Get summary of priors used for a model #' @name get_priors #' #' @description Provides a summary of the prior distributions used #' for the parameters in a given model. #' #' @param x A Bayesian model. #' @param verbose Toggle warnings and messages. #' @param ... Currently not used. #' #' @return A data frame with a summary of the prior distributions used #' for the parameters in a given model. #' #' @examples #' \dontrun{ #' library(rstanarm) #' model <- stan_glm(Sepal.Width ~ Species * Petal.Length, data = iris) #' get_priors(model) #' } #' #' @export get_priors <- function(x, ...) { UseMethod("get_priors") } # ========================================================================= # RSTANARM ---------------------------------------------------------------- # ========================================================================= #' @export get_priors.stanreg <- function(x, verbose = TRUE, ...) { # installed? check_if_installed("rstanarm") ps <- rstanarm::prior_summary(x) l <- .compact_list(lapply(ps[c("prior_intercept", "prior")], function(.x) { if (!is.null(.x)) { # quick and dirty fix for flat priors # else, .compact_list() will set this item as "NA" if (is.na(.x$dist)) { .x$dist <- "uniform" .x$location <- 0 .x$scale <- 0 .x$adjusted_scale <- 0 } .x <- do.call(cbind, .x) as.data.frame(.x) } })) # find all column names, add missing columns manually, so merge() works cn <- unique(unlist(lapply(l, colnames))) l <- lapply(l, function(.x) { missing <- setdiff(cn, colnames(.x)) if (length(missing)) { .x[missing] <- NA } .x }) if (length(l) > 1) { prior_info <- do.call(rbind, l) } else { cn <- colnames(l[[1]]) prior_info <- as.data.frame(l) colnames(prior_info) <- cn } # fix parameters for flat priors here flat <- which(prior_info$dist == "uniform") if (length(flat) > 0) { prior_info$location[flat] <- NA prior_info$scale[flat] <- NA prior_info$adjusted_scale[flat] <- NA } params <- find_parameters(x, parameters = "^(?!(R2|log-fit_ratio))")$conditional # this is a particular fix for the "R2" prior, which conveys prior # information about *all* the parameters. In this case, number of # parameters doesn't match number of priors if (length(params) != nrow(prior_info)) { if (length(params) == 1) { prior_info$parameter <- "(Intercept)" } else if ("R2" %in% prior_info$dist) { prior_info$parameter <- prior_info$dist prior_info$parameter[prior_info$dist != "R2"] <- "(Intercept)" } } else { prior_info$parameter <- params } prior_info <- prior_info[, intersect(c("parameter", "dist", "location", "scale", "adjusted_scale"), colnames(prior_info))] colnames(prior_info) <- gsub("dist", "distribution", colnames(prior_info)) colnames(prior_info) <- gsub("df", "DoF", colnames(prior_info)) priors <- as.data.frame(lapply(prior_info, function(x) { if (.is_numeric_character(x)) { as.numeric(as.character(x)) } else { as.character(x) } }), stringsAsFactors = FALSE) string <- strsplit(names(priors), "_", fixed = TRUE) string <- lapply(string, .capitalize) names(priors) <- unlist(lapply(string, paste0, collapse = "_")) priors } #' @export get_priors.stanmvreg <- function(x, ...) { # installed? check_if_installed("rstanarm") ps <- rstanarm::prior_summary(x) l <- .compact_list(lapply(ps[c("prior_intercept", "prior")], function(.x) { lapply(.x, function(.i) { if (!is.null(.i)) do.call(cbind, .i) }) })) prior_info <- do.call(rbind, lapply(l, function(.x) { if (length(.x) > 1) { out <- lapply(names(.x), function(.i) { if (!("adjusted_scale" %in% colnames(.x[[.i]]))) .x[[.i]] <- cbind(.x[[.i]], adjusted_scale = NA) data.frame(.x[[.i]], response = .i, stringsAsFactors = FALSE) }) do.call(rbind, out) } else { cn <- colnames(.x[[1]]) prior_info <- as.data.frame(.x) colnames(prior_info) <- cn } })) # find parameter names params <- unlist(lapply(find_parameters(x), function(.i) .i$conditional)) params <- params[c(which(params == "(Intercept)"), which(params != "(Intercept)"))] prior_info$parameter <- params prior_info <- prior_info[, intersect(c("parameter", "dist", "location", "scale", "adjusted_scale", "response"), colnames(prior_info))] colnames(prior_info) <- gsub("dist", "distribution", colnames(prior_info)) colnames(prior_info) <- gsub("df", "DoF", colnames(prior_info)) priors <- as.data.frame(lapply(prior_info, function(x) { if (.is_numeric_character(x)) { as.numeric(as.character(x)) } else { as.character(x) } }), stringsAsFactors = FALSE) string <- strsplit(names(priors), "_", fixed = TRUE) string <- lapply(string, .capitalize) names(priors) <- unlist(lapply(string, paste0, collapse = "_")) # minor fixes priors$Parameter <- sprintf("%s|%s", priors$Response, priors$Parameter) priors } # ========================================================================= # BRMS ---------------------------------------------------------------- # ========================================================================= #' @rdname get_priors #' @export get_priors.brmsfit <- function(x, verbose = TRUE, ...) { info <- as.data.frame(.print_brmsprior_preparation(x$prior)) info$Parameter <- .match_brms_priors_to_params(info) priors <- data.frame(Parameter = info$Parameter) # Format the prior string ------------------------------------ priors$Distribution <- gsub( "(.*)\\(.*", "\\1", ifelse(info$prior == "(flat)", "uniform", info$prior ) ) priors$Distribution[priors$Distribution == "lkj_corr_cholesky"] <- "lkj" # Initialize empty priors$Location <- NA priors$Scale <- NA priors$df <- NA # student_t(df, location, scale) is_student_t <- priors$Distribution == "student_t" priors$Location[is_student_t] <- gsub("(.*)\\((.*)\\,(.*)\\,(.*)\\)", "\\3", info$prior[is_student_t]) priors$Scale[is_student_t] <- gsub("(.*)\\((.*)\\,(.*)\\,(.*)\\)", "\\4", info$prior[is_student_t]) priors$df[is_student_t] <- gsub("(.*)\\((.*)\\,(.*)\\,(.*)\\)", "\\2", info$prior[is_student_t]) # normal(location, scale) is_normal <- priors$Distribution == "normal" priors$Location[is_normal] <- gsub("(.*)\\((.*)\\,(.*)\\)", "\\2", info$prior[is_normal]) priors$Scale[is_normal] <- gsub("(.*)\\((.*)\\,(.*)\\)", "\\3", info$prior[is_normal]) # lkj(eta) is_lkj <- priors$Distribution == "lkj" priors$Location[is_lkj] <- gsub("(.*)\\((.*)\\)", "\\2", info$prior[is_lkj]) # Transform to numeric priors$Location <- as.numeric(priors$Location) priors$Scale <- as.numeric(priors$Scale) priors$df <- as.numeric(priors$df) # Get parameters params <- find_parameters(x, ..., flatten = TRUE) # remove random params <- params[!grepl("^r_(.*)\\]$", params)] params <- params[!grepl("^r_(.*)\\.\\d$", params)] # Loop through all parameters and try to retrieve its correct prior out <- data.frame() for (p in params) { subset <- priors[priors$Parameter == p, ] # If nothing corresponding directly to the parameter... if (nrow(subset) == 0) { # Special treatment for cor_* subset <- priors[sapply(priors$Parameter, grepl, x = p), ] # If still empty, make empty df if (nrow(subset) == 0) { subset <- stats::setNames(data.frame(t(rep(NA, 5))), c("Parameter", "Distribution", "Location", "Scale", "df")) } } # Rbind the stuff subset$Parameter <- p out <- rbind(out, subset) } row.names(out) <- NULL attr(out, "priors") <- info out } # Utils ------- .print_brmsprior_preparation <- function(x) { # This function is taken from brms:::print.brmsprior # which adds information using private functions upon printing # but doesn't return it check_if_installed("brms") .stan_base_prior <- function(prior) { stopifnot(length(unique(prior$class)) <= 1) take <- with(prior, !nzchar(coef) & nzchar(prior)) prior <- prior[take, ] if (!NROW(prior)) { return("") } vars <- c("group", "nlpar", "dpar", "resp", "class") for (v in vars) { take <- nzchar(prior[[v]]) if (any(take)) { prior <- prior[take, ] } } stopifnot(NROW(prior) == 1) prior$prior } .find_rows <- function(x, ..., ls = list(), fun = "%in%") { x <- as.data.frame(x) if (!nrow(x)) { return(logical(0)) } out <- rep(TRUE, nrow(x)) ls <- c(ls, list(...)) if (!length(ls)) { return(out) } if (is.null(names(ls))) { stop("Argument 'ls' must be named.") } for (name in names(ls)) { out <- out & brms::do_call(fun, list(x[[name]], ls[[name]])) } out } stopifnot(brms::is.brmsprior(x)) if (is.null(x$source)) { x$source <- "(unknown)" } x$source[!nzchar(x$source)] <- "(unknown)" # column names to vectorize over cols <- c("group", "nlpar", "dpar", "resp", "class") empty_strings <- rep("", 4) for (i in which(!nzchar(x$prior))) { ls <- x[i, cols] ls <- rbind(ls, c(empty_strings, ls$class)) ls <- as.list(ls) # sub_prior <- subset2(x, ls = ls) sub_prior <- x[.find_rows(x, ls = ls, fun = "%in%"), , drop = FALSE] base_prior <- .stan_base_prior(sub_prior) if (nzchar(base_prior)) { x$prior[i] <- base_prior x$source[i] <- "(vectorized)" } else { x$prior[i] <- "(flat)" } } x } .match_brms_priors_to_params <- function(prior_summary) { # Rename for easier manipulation pr <- prior_summary # Initialize empty string p <- rep("", nrow(pr)) # class == Intercept ------------------------- p <- ifelse(pr$class == "Intercept", paste0( "b", ifelse(pr$dpar != "", paste0("_", pr$dpar), ""), "_Intercept" ), p ) # class == b ------------------------------ # Are there other possible parameters? p <- ifelse( pr$class == "b", paste0( "b_", ifelse(pr$dpar != "", paste0(pr$dpar, "_"), ""), pr$coef ), p ) # class == L ------------------------------ p <- ifelse(pr$class == "L", paste0("cor_", pr$group, "_"), p) # class == sigma ------------------------------ # TODO: I only saw it alone, but possibly can have other parameters p <- ifelse(pr$class == "sigma", "sigma", p) # class == sd ------------------------------- p <- ifelse( pr$class == "sd", paste0( "sd_", pr$group, "__", ifelse(pr$dpar != "", paste0(pr$dpar, "_"), ""), pr$coef ), p ) # class == sds ------------------------------- # TODO: Fix coef for sds_ # TODO: Fix beta for smooth term (bs_coef instead of b_coef) # p <- ifelse( # pr$class == "sds", # paste0("sds", ifelse(pr$coef != "", paste0("_", pr$coef), "")), # p) p } # ========================================================================= # BCPLM ---------------------------------------------------------------- # ========================================================================= #' @export get_priors.bcplm <- function(x, ...) { params <- setdiff(find_parameters(x, flatten = TRUE), c("phi", "p")) location <- eval(parse(text = .safe_deparse(x@call))[[1]]$prior.beta.mean) if (is.null(location)) location <- 0 scale <- eval(parse(text = .safe_deparse(x@call))[[1]]$prior.beta.var) if (is.null(scale)) scale <- 10000 data.frame( Parameter = params, Distribution = "normal", Location = location, Scale = scale, stringsAsFactors = FALSE ) } # ========================================================================= # meta ------------------------------------------------------------- # ========================================================================= #' @export get_priors.meta_random <- function(x, ...) { params <- rownames(x$estimates) params[params == "d"] <- "(Intercept)" prior_info1 <- attr(x$prior_d, "param") prior_info2 <- attr(x$prior_tau, "param") fam1 <- attr(x$prior_d, "family") fam2 <- attr(x$prior_tau, "family") loc1 <- which(names(prior_info1) %in% c("mean", "location", "shape"))[1] loc2 <- which(names(prior_info2) %in% c("mean", "location", "shape"))[1] scale1 <- which(names(prior_info1) %in% c("scale", "sd"))[1] scale2 <- which(names(prior_info2) %in% c("scale", "sd"))[1] out <- data.frame( Parameter = params, Distribution = c(fam1, fam2), Location = c(prior_info1[loc1], prior_info2[loc2]), Scale = c(prior_info1[scale1], prior_info2[scale2]), stringsAsFactors = FALSE, row.names = NULL ) .fix_metabma_priorname(out) } #' @export get_priors.meta_fixed <- function(x, ...) { params <- rownames(x$estimates) params[params == "d"] <- "(Intercept)" prior_info <- attr(x$prior_d, "param") fam <- attr(x$prior_d, "family") loc <- which(names(prior_info) %in% c("mean", "location", "shape"))[1] scale <- which(names(prior_info) %in% c("scale", "sd"))[1] out <- data.frame( Parameter = params, Distribution = fam, Location = prior_info[loc], Scale = prior_info[scale], stringsAsFactors = FALSE ) .fix_metabma_priorname(out) } .fix_metabma_priorname <- function(x) { x$Distribution <- gsub("t", "Student's t", x$Distribution, fixed = TRUE) x$Distribution <- gsub("norm", "Normal", x$Distribution, fixed = TRUE) x$Distribution <- gsub("invgamma", "Inverse gamma", x$Distribution, fixed = TRUE) x } # ========================================================================= # BayesFactor ------------------------------------------------------------- # ========================================================================= #' @export get_priors.BFBayesFactor <- function(x, ...) { prior <- .compact_list(utils::tail(x@numerator, 1)[[1]]@prior[[1]]) bf_type <- .classify_BFBayesFactor(x) prior_names <- switch(bf_type, "correlation" = "rho", "ttest1" = , "ttest2" = "Difference", "meta" = "Effect", "proptest" = "Proportion", "xtable" = "Ratio", names(prior) ) # Distribution if (bf_type == "xtable") { Distribution <- x@denominator@type[[1]] } else if (bf_type == "correlation") { Distribution <- "beta" } else { Distribution <- "cauchy" } # Prior prior_scale <- unlist(prior) if (length(prior_names) != length(prior_scale)) { prior_names <- unlist(lapply(prior_names, function(i) { if (!is.null(names(prior[[i]]))) { names(prior[[i]]) } else { rep(i, times = length(prior[[i]])) } })) } if (bf_type == "correlation") { # "A shifted, scaled beta(1/rscale,1/rscale) prior distribution is assumed for rho" prior_scale <- 1 / prior_scale } # Location if (bf_type == "correlation") { location <- prior_scale } else { location <- 0 } # Prepare output if (bf_type == "linear") { # find data types, to match priors data_types <- x@numerator[[1]]@dataTypes params <- find_parameters(x) # create data frame of parameter names and components out <- as.data.frame(utils::stack(params), stringsAsFactors = FALSE) colnames(out) <- c("Parameter", "Component") out$Distribution <- Distribution out$Location <- location out$Scale <- NA # find parameter names pattern to match data types find_types <- do.call(rbind, strsplit(out$Parameter, "-", TRUE))[, 1, drop = TRUE] interactions <- grepl(":", find_types, fixed = TRUE) find_types[interactions] <- gsub("(.*):(.*)", "\\2", find_types[interactions]) cont_types <- data_types == "continuous" data_types[cont_types] <- paste0(data_types[cont_types], ".", names(data_types[cont_types])) for (i in 1:length(data_types)) { out$Scale[find_types == names(data_types)[i]] <- prior_scale[data_types[i]] } # missing information to NA out$Distribution[is.na(out$Scale)] <- NA out$Location[is.na(out$Scale)] <- NA out[c("Parameter", "Distribution", "Location", "Scale")] } else { data.frame( Parameter = prior_names, Distribution = Distribution, Location = location, Scale = prior_scale, stringsAsFactors = FALSE, row.names = NULL ) } } # ========================================================================= # blavaan ------------------------------------------------------------- # ========================================================================= #' @export get_priors.blavaan <- function(x, ...) { # installed? check_if_installed("lavaan") PE <- lavaan::parameterEstimates( x, se = FALSE, ci = FALSE, remove.eq = FALSE, remove.system.eq = TRUE, remove.ineq = FALSE, remove.def = FALSE, add.attributes = TRUE ) if (!("group" %in% names(PE))) PE$group <- 1 newpt <- x@ParTable pte2 <- which(newpt$free > 0) relevant_rows <- match( with(newpt, paste(lhs[pte2], op[pte2], rhs[pte2], group[pte2], sep = "")), paste(PE$lhs, PE$op, PE$rhs, PE$group, sep = "") ) # Priors priors <- rep(NA, nrow(PE)) priors[relevant_rows] <- newpt$prior[pte2] priors[is.na(PE$prior)] <- NA stats::na.omit(data.frame( Parameter = paste(PE$lhs, PE$op, PE$rhs, sep = ""), Distribution = gsub("(.*)\\((.*)", "\\1", priors), Location = as.numeric(gsub("(.*)\\((.*)\\,(.*)\\)(.*)", "\\2", priors)), Scale = as.numeric(gsub("(.*)\\((.*)\\,(.*)\\)(.*)", "\\3", priors)), stringsAsFactors = FALSE )) } #' @export get_priors.mcmc.list <- function(x, ...) { NULL } # Utils ------------------------------------------------------------------- .is_numeric_character <- function(x) { (is.character(x) && !anyNA(suppressWarnings(as.numeric(stats::na.omit(x[nchar(x) > 0]))))) || (is.factor(x) && !anyNA(suppressWarnings(as.numeric(levels(x))))) } insight/inst/0000755000175000017500000000000014165530166013035 5ustar nileshnileshinsight/inst/CITATION0000644000175000017500000000057714044454343014201 0ustar nileshnileshbibentry( bibtype = "article", title = "{insight}: A Unified Interface to Access Information from Model Objects in {R}.", volume = "4", doi = "10.21105/joss.01412", number = "38", journal = "Journal of Open Source Software", author = c(person("Daniel", "Lüdecke"), person("Philip", "Waggoner"), person("Dominique", "Makowski")), year = "2019", pages = "1412" ) insight/inst/WORDLIST0000644000175000017500000000211514151371005014214 0ustar nileshnileshAPA Anova BFBayesFactor BGGM BayesFactor Coloured DOI GJRM GLMM GLMMadaptive GLMMs Gelman HDI Ieno MCMCglmm MCSE Nakagawa Newburgh PIs RStudio Rchoice Savel'ev Schielzeth Schielzeth’s Vehtari Zuur al ateucher bamlss bayesGARCH bayestestR bbmle behaviour bfsl blavaan brms brmsfit btergm ci cloglog cmprsk colour coloured comparator coxrobust datawizard dirichlet doi easystats eflm emm emmGrid emmeans epiR ergm estimatr et fixest gam ggeffects github glmm glmmTMB heteroskedasticity htest https intra intraclass io ivFixed ivprobit joineRML joss knitr labelling lavaan lme lmodel lmtest logloss lqmm md merMod merTools metaBMA metaplus mhurdle mira modelled modelling multcomp mvord occurence optimizers plm poisson pre pscl quantreg rOpenSci riskRegression rms rstanarm sampleSelection sjPlot smicd spaMM spatialreg specificities stanreg svyVGAM systemfit th tidymodels tranformations tseries tweedie unformatted untransformed variates vectorized vgam warmup warmups ’s insight/inst/doc/0000755000175000017500000000000014165530166013602 5ustar nileshnileshinsight/inst/doc/insight.html0000644000175000017500000107311114165530166016141 0ustar nileshnilesh Getting Started with Accessing Model Information

Getting Started with Accessing Model Information

When fitting any statistical model, there are many useful pieces of information that are simultaneously calculated and stored beyond coefficient estimates and general model fit statistics. Although there exist some generic functions to obtain model information and data, many package-specific modeling functions do not provide such methods to allow users to access such valuable information.

insight is an R-package that fills this important gap by providing a suite of functions to support almost any model. The goal of insight, then, is to provide tools to provide easy, intuitive, and consistent access to information contained in model objects. These tools aid applied research in virtually any field who fit, diagnose, and present statistical models by streamlining access to every aspect of many model objects via consistent syntax and output.

Built with non-programmers in mind, insight offers a broad toolbox for making model and data information easily accessible. While insight offers many useful functions for working with and understanding model objects (discussed below), we suggest users start with model_info(), as this function provides a clean and consistent overview of model objects (e.g., functional form of the model, the model family, link function, number of observations, variables included in the specification, etc.). With a clear understanding of the model introduced, users are able to adapt other functions for more nuanced exploration of and interaction with virtually any model object.

Overview of Core Functions

A statistical model is an object describing the relationship between variables. Although there are a lot of different types of models, each with their specificities, most of them also share some common components. The goal of insight is to help you retrieve these components.

The get_* prefix extracts values (or data) associated with model-specific objects (e.g., parameters or variables), while the find_* prefix lists model-specific objects (e.g., priors or predictors). These are powerful families of functions allowing for great flexibility in use, whether at a high, descriptive level (find_*) or narrower level of statistical inspection and reporting (get_*). We point users to the package documentation or the complementary package website, https://easystats.github.io/insight/, for a detailed list of the arguments associated with each function as well as the returned values from each function.

Definition of Model Components

The functions from insight address different components of a model. In an effort to avoid confusion about specific “targets” of each function, in this section we provide a short explanation of insight’s definitions of regression model components.

Data

The dataset used to fit the model.

Parameters

Values estimated or learned from data that capture the relationship between variables. In regression models, these are usually referred to as coefficients.

Response and Predictors

  • response: the outcome or response variable (dependent variable) of a regression model.
  • predictor: independent variables of (the fixed part of) a regression model. For mixed models, variables that are only in the random effects part (i.e. grouping factors) of the model are not returned as predictors by default. However, these can be included using additional arguments in the function call, treating predictors are “unique”. As such, if a variable appears as a fixed effect and a random slope, it is treated as one (the same) predictor.

Variables

Any unique variable names that appear in a regression model, e.g., response variable, predictors or random effects. A “variable” only relates to the unique occurrence of a term, or the term name. For instance, the expression x + poly(x, 2) has only the variable x.

Terms

Terms themselves consist of variable and factor names separated by operators, or involve arithmetic expressions. For instance, the expression x + poly(x, 2) has one variable x, but two terms x and poly(x, 2).

Random Effects

  • random slopes: variables that are specified as random slopes in a mixed effects model.
  • random or grouping factors: variables that are specified as grouping variables in a mixed effects model.

Examples

Aren’t the predictors, terms, and parameters the same thing?

In some cases, yes. But not in all cases, and sometimes it is useful to have the “bare” variable names (terms), but sometimes it is also useful to have the information about a possible transformation of variables. That is the main reason for having functions that cover similar aspects of a model object (like find_terms() and find_predictors() or find_variables()).

Here are some examples that demonstrate the differences of each function:

library(insight)
library(lme4)
data(sleepstudy)
sleepstudy$mygrp <- sample(1:5, size = 180, replace = TRUE)
sleepstudy$mysubgrp <- NA
sleepstudy$Weeks <- sleepstudy$Days / 7
sleepstudy$cat <- as.factor(sample(letters[1:4], nrow(sleepstudy), replace = TRUE))

for (i in 1:5) {
  filter_group <- sleepstudy$mygrp == i
  sleepstudy$mysubgrp[filter_group] <-
    sample(1:30, size = sum(filter_group), replace = TRUE)
}

model <- lmer(
  Reaction ~ Days + I(Days^2) + log1p(Weeks) + cat +
    (1 | mygrp / mysubgrp) + 
    (1 + Days | Subject),
  data = sleepstudy
)
# find the response variable
find_response(model)
#> [1] "Reaction"

# find all predictors, fixed part by default
find_predictors(model)
#> $conditional
#> [1] "Days"  "Weeks" "cat"

# find random effects, grouping factors only
find_random(model)
#> $random
#> [1] "mysubgrp:mygrp" "mygrp"          "Subject"

# find random slopes
find_random_slopes(model)
#> $random
#> [1] "Days"

# find all predictors, including random effects
find_predictors(model, effects = "all", component = "all")
#> $conditional
#> [1] "Days"  "Weeks" "cat"  
#> 
#> $random
#> [1] "mysubgrp" "mygrp"    "Subject"

# find all terms, including response and random effects
# this is essentially the same as the previous example plus response
find_terms(model)
#> $response
#> [1] "Reaction"
#> 
#> $conditional
#> [1] "Days"         "I(Days^2)"    "log1p(Weeks)" "cat"         
#> 
#> $random
#> [1] "mysubgrp" "mygrp"    "Days"     "Subject"

# find all variables, i.e. also quadratic or log-transformed predictors
find_variables(model)
#> $response
#> [1] "Reaction"
#> 
#> $conditional
#> [1] "Days"  "Weeks" "cat"  
#> 
#> $random
#> [1] "mysubgrp" "mygrp"    "Subject"

Finally, there is find_parameters(). Parameters are also known as coefficients, and find_parameters() does exactly that: returns the model coefficients.

# find model parameters, i.e. coefficients
find_parameters(model)
#> $conditional
#> [1] "(Intercept)"  "Days"         "I(Days^2)"    "log1p(Weeks)" "catb"        
#> [6] "catc"         "catd"        
#> 
#> $random
#> $random$`mysubgrp:mygrp`
#> [1] "(Intercept)"
#> 
#> $random$Subject
#> [1] "(Intercept)" "Days"       
#> 
#> $random$mygrp
#> [1] "(Intercept)"

Examples of Use Cases in R

We now would like to provide examples of use cases of the insight package. These examples probably do not cover typical real-world problems, but serve as illustration of the core idea of this package: The unified interface to access model information. insight should help both users and package developers in order to reduce the hassle with the many exceptions from various modelling packages when accessing model information.

Making Predictions at Specific Values of a Term of Interest

Say, the goal is to make predictions for a certain term, holding remaining co-variates constant. This is achieved by calling predict() and feeding the newdata-argument with the values of the term of interest as well as the “constant” values for remaining co-variates. The functions get_data() and find_predictors() are used to get this information, which then can be used in the call to predict().

In this example, we fit a simple linear model, but it could be replaced by (m)any other models, so this approach is “universal” and applies to many different model objects.

library(insight)
m <- lm(
  Sepal.Length ~ Species + Petal.Width + Sepal.Width, 
  data = iris
)

dat <- get_data(m)
pred <- find_predictors(m, flatten = TRUE)

l <- lapply(pred, function(x) {
  if (is.numeric(dat[[x]]))
    mean(dat[[x]])
  else
    unique(dat[[x]])
})

names(l) <- pred
l <- as.data.frame(l)

cbind(l, predictions = predict(m, newdata = l))
#>      Species Petal.Width Sepal.Width predictions
#> 1     setosa    1.199333    3.057333    5.101427
#> 2 versicolor    1.199333    3.057333    6.089557
#> 3  virginica    1.199333    3.057333    6.339015

Printing Model Coefficients

The next example should emphasize the possibilities to generalize functions to many different model objects using insight. The aim is simply to print coefficients in a complete, human readable sentence.

The first approach uses the functions that are available for some, but obviously not for all models, to access the information about model coefficients.

print_params <- function(model){
  paste0(
    "My parameters are ",
    paste0(row.names(summary(model)$coefficients),  collapse = ", "),
    ", thank you for your attention!"
  )
}

m1 <- lm(Sepal.Length ~ Petal.Width, data = iris)
print_params(m1)
#> [1] "My parameters are (Intercept), Petal.Width, thank you for your attention!"

# obviously, something is missing in the output
m2 <- mgcv::gam(Sepal.Length ~ Petal.Width + s(Petal.Length), data = iris)
print_params(m2)
#> [1] "My parameters are , thank you for your attention!"

As we can see, the function fails for gam-models. As the access to models depends on the type of the model in the R ecosystem, we would need to create specific functions for all models types. With insight, users can write a function without having to worry about the model type.

print_params <- function(model){
  paste0(
    "My parameters are ",
    paste0(insight::find_parameters(model, flatten = TRUE),  collapse = ", "),
    ", thank you for your attention!"
  )
}

m1 <- lm(Sepal.Length ~ Petal.Width, data = iris)
print_params(m1)
#> [1] "My parameters are (Intercept), Petal.Width, thank you for your attention!"

m2 <- mgcv::gam(Sepal.Length ~ Petal.Width + s(Petal.Length), data = iris)
print_params(m2)
#> [1] "My parameters are (Intercept), Petal.Width, s(Petal.Length), thank you for your attention!"

Examples of Use Cases in R packages

insight is already used by different packages to solve problems that typically occur when the users’ inputs are different model objects of varying complexity.

For example, ggeffects, a package that computes and visualizes marginal effects of regression models, requires extraction of the data (get_data()) that was used to fit the models, and also the retrieval all model predictors (find_predictors()) to decide which covariates are held constant when computing marginal effects. All of this information is required in order to create a data frame for predict(newdata=<data frame>). Furthermore, the models’ link-functions (link_function()) resp. link-inverse-functions (link_inverse()) are required to obtain predictors at the model’s response scale.

The sjPlot-package creates plots or summary tables from regression models, and uses insight-functions to get model-information (model_info() or find_response()), which is used to build the components of the final plot or table. This information helps, for example, in labeling table columns by providing information on the effect type (odds ratio, incidence rate ratio, etc.) or the different model components, which split plots and tables into the “conditional” and “zero-inflated” parts of a model, in the cases of models with zero-inflation.

bayestestR mainly relies on get_priors() and get_parameters() to retrieve the necessary information to compute various indices or statistics of Bayesian models (like HDI, Credible Interval, MCSE, effective sample size, Bayes factors, etc.). The advantage of get_parameters() in this context is that regardless of the number of parameters the posterior distribution has, the necessary data can be easily accessed from the model objects. There is no need to write original, complicated code or regular expressions.

A last example is the performance-package, which provides functions for computing measures to assess model quality. Many of these indices (e.g. check for overdispersion or zero-inflation, predictive accuracy, logloss, RMSE, etc.) require the number of observations (n_obs()) or the data from the response-variable (get_response()). Again, in this context, functions from insight are helpful, because they offer a unified access to this information.

insight/inst/doc/display.Rmd0000644000175000017500000000704014144235000015676 0ustar nileshnilesh--- title: "Formatting, printing and exporting tables" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{Formatting, printing and exporting tables} %\VignetteEncoding{UTF-8} %\VignetteEngine{knitr::rmarkdown} editor_options: chunk_output_type: console --- ```{r, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) if (!requireNamespace("poorman", quietly = TRUE) || !requireNamespace("gt", quietly = TRUE)) { knitr::opts_chunk$set(eval = FALSE) } else { library(poorman) library(gt) } ``` ## The difference between a dataframe and its render Most of objects encountered throughout the **easystats** packages are "tables", i.e., a 2D matrix with columns and rows. In R, these objects are often, at their core, *data frames*. Let's create one to use as an example: ```{r, warning=FALSE, message=FALSE} library(insight) df <- data.frame( Variable = c(1, 3, 5, 3, 1), Group = c("A", "A", "A", "B", "B"), CI = c(0.95, 0.95, 0.95, 0.95, 0.95), CI_low = c(3.35, 2.425, 6.213, 12.1, 1.23), CI_high = c(4.23, 5.31, 7.123, 13.5, 3.61), p = c(0.001, 0.0456, 0.45, 0.0042, 0.34) ) df ``` When I display in in the console (calling an object - e.g. `df` - is actually equivalent to calling `print(df)`), the output looks alright, but it could be improved. Some packages, such as **knitr**, have functions to create a nicer output. For instance, in markdown, so that it can be nicely rendered in markdown documents when copied: ```{r, eval=FALSE} knitr::kable(df, format = "markdown") ``` ``` | Variable|Group | CI| CI_low| CI_high| p| |--------:|:-----|----:|------:|-------:|------:| | 1|A | 0.95| 3.350| 4.230| 0.0010| | 3|A | 0.95| 2.425| 5.310| 0.0456| | 5|A | 0.95| 6.213| 7.123| 0.4500| | 3|B | 0.95| 12.100| 13.500| 0.0042| | 1|B | 0.95| 1.230| 3.610| 0.3400| ``` Or HTML, which again makes it look great in HTML files (such as this webpage you're reading): ```{r, results='asis'} knitr::kable(df, format = "html") ``` ## The *insight* workflow The **insight** package also contains function to improve the "printing", or rendering, of tables. Its design dissociates two separate and independent steps: *formatting* and *exporting*. ### Formatting The purpose of formatting is to improve a given table, while still keeping it as a regular R data frame, so that it can be for instance further modified by the user. ```{r} format_table(df) ``` As you can see, `format_table()` modifies columns, turning number into characters (so that it has the same amount of digits), and detecting confidence intervals. This is usually combined with column-specific formatting functions, like `format_p()`: ```{r} df %>% mutate(p = format_p(p, stars = TRUE)) %>% format_table() ``` ### Exporting The next step is *exporting*, which takes a data frame and renders it in a given format, so that it looks good in the console, or in markdown, HTML or latex. For console output, we need to `cat()` the returned result to get nicely printed code: ```{r} cat(export_table(df)) ``` For markdown or HTML, simply modify the `format` argument to markdown ("md")... ```{r} export_table(df, format = "md") ``` ...or HTML format. ```{r} export_table(df, format = "html") ``` This can be combined with `format_table()`. ```{r} df %>% format_table(ci_brackets = c("(", ")")) %>% export_table(format = "html") ``` TODO: What about display? insight/inst/doc/export.Rmd0000644000175000017500000001325314135533135015567 0ustar nileshnilesh--- title: "Exporting tables with captions and footers" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{Exporting tables with captions and footers} %\VignetteEncoding{UTF-8} %\VignetteEngine{knitr::rmarkdown} editor_options: chunk_output_type: console --- ```{r, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>", warning = FALSE, message = FALSE ) ``` ## Exporting data frames into nice tables The `export_table()` functions creates nicely formatted tables in text, markdown or HTML format. You can add (coloured) captions or footer lines to the table as well, and you can even create multiple tables from a *list* of data frames. This vignette shows some examples how to do this (focusing on text output). Note that `export_table()` returns a *formatted string*, which prints nicely (which essentially just uses `cat()`). ***Note:*** The vignettes includes example with coloured text output. The coloured text is not rendered in this vignette. Rather, try out these examples and look at the results in your console! ```{r} library(insight) x <- iris[1:3, c(1, 2, 5)] # the table as "readable" output export_table(x) # see the underlying string unclass(export_table(x)) ``` ## Adding (coloured) titles A title can be added by either using the `caption` argument, or by adding a string as `table_caption` attribute. ```{r} # a simple caption export_table(x, caption = "Title") # we use a new object, so "x" has no attributes yet out <- x attr(out, "table_caption") <- "Another title" export_table(out) ``` `caption` can also be character vector of length 2, with the first element being the caption, and the second being the name of a colour (see `?print_colour` for available options). This is helpful for printing coloured table captions. ```{r} # A red caption export_table(x, caption = c("# Title", "red")) # same for attribute out <- x attr(out, "table_caption") <- c("*A green title*", "green") export_table(out) ``` Since the coloured text is not rendered, we provide a screenshot as example here: ```{r echo=FALSE, out.width="50%"} knitr::include_graphics("export_table.png", dpi = 72) ``` ## Adding (coloured) table footers Use the `footer` argument to add a footer line to the table. It is also possible to add a string as `table_footer` attribute. ```{r} # colored caption, simple footer export_table( x, caption = c("# Title", "red"), footer = "Footer line" ) # as attribute out <- x attr(out, "table_caption") <- c("*A green title*", "green") attr(out, "table_footer") <- "A simple footer" export_table(out) ``` Coloured footers can be added in the same way as for captions. ```{r} # colored caption and footer export_table( x, caption = c("# Title", "red"), footer = c("Footer line in blue", "blue") ) # as attribute out <- x attr(out, "table_caption") <- c("*A green title*", "green") attr(out, "table_footer") <- c("Footer line in blue", "blue") export_table(out) ``` ## Adding subtitles Subtitles can be added using the `subtitle` argument, or the `table_subtitle` attribute. Note that you must take care of adding new-line characters. ```{r} # colored caption, subtitle and footer export_table( x, caption = c("# Title", "red"), subtitle = c("\n A subtitle in yellow", "yellow"), footer = c("Footer line in blue", "blue") ) # as attribute out <- x attr(out, "table_caption") <- c("*A green title*", "green") attr(out, "table_subtitle") <- c("\nA yellow subtitle", "yellow") attr(out, "table_footer") <- c("Footer line in blue", "blue") export_table(out) ``` ## Exporting multiple data frames into multiple tables Multiple data frames saved in a `list()` can be used to create multiple tables at once. ```{r} x <- list( data.frame(iris[1:3, c(1, 2, 5)]), data.frame(iris[51:53, c(1, 3, 5)]), data.frame(iris[111:113, c(1, 4, 5)]) ) # three different tables export_table(x) ``` ## Adding table captions and footers For multiple tables, it is also possible to add a caption for each table. Simply use a `list()` of strings for the `caption` argument, or add a `table_caption` attribute. to *each* data frame in the list. ```{r} # one caption for each table export_table(x, caption = list("Table 1", "Table 2", "Table 3")) # add attribute to *each* data frame out <- x for (i in 1:length(out)) { attr(out[[i]], "table_caption") <- paste("Table", i) } export_table(out) ``` In the same way you can add multiple footers. Note, however, that you have to take care about adding new-line characters. ```{r} # add captions and footers for each table export_table( x, caption = list("Table 1", "Table 2", "Table 3"), footer = list("Footer 1\n\n", "Footer 2\n\n", "Footer 3\n\n") ) out <- x for (i in 1:length(out)) { attr(out[[i]], "table_caption") <- paste("Table", i) attr(out[[i]], "table_footer") <- paste("Footer", i, "\n\n") } export_table(out) ``` Finally, you can even add multiple footer lines to each table, giving each a different color. In this case, each list element has to be a character vector of length 2 (the first element being the caption, and the second being the name of a colour). ```{r} # Colored table captions and multiple footers per table export_table( x, caption = list(c("Red Table 1", "red"), c("Blue Table 2", "bue"), c("Green Table 3", "green")), footer = list( list(c("Footer line 1\n", "green"), c("Second line\n\n", "red")), list(c("Footer line A\n", "blue"), c("Second line\n\n", "green")), list(c("Footer line I\n", "yellow"), c("Second line\n\n", "blue")) ) ) ``` insight/inst/doc/export.R0000644000175000017500000000757114165530164015256 0ustar nileshnilesh## ---- include = FALSE--------------------------------------------------------- knitr::opts_chunk$set( collapse = TRUE, comment = "#>", warning = FALSE, message = FALSE ) ## ----------------------------------------------------------------------------- library(insight) x <- iris[1:3, c(1, 2, 5)] # the table as "readable" output export_table(x) # see the underlying string unclass(export_table(x)) ## ----------------------------------------------------------------------------- # a simple caption export_table(x, caption = "Title") # we use a new object, so "x" has no attributes yet out <- x attr(out, "table_caption") <- "Another title" export_table(out) ## ----------------------------------------------------------------------------- # A red caption export_table(x, caption = c("# Title", "red")) # same for attribute out <- x attr(out, "table_caption") <- c("*A green title*", "green") export_table(out) ## ----echo=FALSE, out.width="50%"---------------------------------------------- knitr::include_graphics("export_table.png", dpi = 72) ## ----------------------------------------------------------------------------- # colored caption, simple footer export_table( x, caption = c("# Title", "red"), footer = "Footer line" ) # as attribute out <- x attr(out, "table_caption") <- c("*A green title*", "green") attr(out, "table_footer") <- "A simple footer" export_table(out) ## ----------------------------------------------------------------------------- # colored caption and footer export_table( x, caption = c("# Title", "red"), footer = c("Footer line in blue", "blue") ) # as attribute out <- x attr(out, "table_caption") <- c("*A green title*", "green") attr(out, "table_footer") <- c("Footer line in blue", "blue") export_table(out) ## ----------------------------------------------------------------------------- # colored caption, subtitle and footer export_table( x, caption = c("# Title", "red"), subtitle = c("\n A subtitle in yellow", "yellow"), footer = c("Footer line in blue", "blue") ) # as attribute out <- x attr(out, "table_caption") <- c("*A green title*", "green") attr(out, "table_subtitle") <- c("\nA yellow subtitle", "yellow") attr(out, "table_footer") <- c("Footer line in blue", "blue") export_table(out) ## ----------------------------------------------------------------------------- x <- list( data.frame(iris[1:3, c(1, 2, 5)]), data.frame(iris[51:53, c(1, 3, 5)]), data.frame(iris[111:113, c(1, 4, 5)]) ) # three different tables export_table(x) ## ----------------------------------------------------------------------------- # one caption for each table export_table(x, caption = list("Table 1", "Table 2", "Table 3")) # add attribute to *each* data frame out <- x for (i in 1:length(out)) { attr(out[[i]], "table_caption") <- paste("Table", i) } export_table(out) ## ----------------------------------------------------------------------------- # add captions and footers for each table export_table( x, caption = list("Table 1", "Table 2", "Table 3"), footer = list("Footer 1\n\n", "Footer 2\n\n", "Footer 3\n\n") ) out <- x for (i in 1:length(out)) { attr(out[[i]], "table_caption") <- paste("Table", i) attr(out[[i]], "table_footer") <- paste("Footer", i, "\n\n") } export_table(out) ## ----------------------------------------------------------------------------- # Colored table captions and multiple footers per table export_table( x, caption = list(c("Red Table 1", "red"), c("Blue Table 2", "bue"), c("Green Table 3", "green")), footer = list( list(c("Footer line 1\n", "green"), c("Second line\n\n", "red")), list(c("Footer line A\n", "blue"), c("Second line\n\n", "green")), list(c("Footer line I\n", "yellow"), c("Second line\n\n", "blue")) ) ) insight/inst/doc/display.R0000644000175000017500000000332314165530162015367 0ustar nileshnilesh## ---- include = FALSE--------------------------------------------------------- knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) if (!requireNamespace("poorman", quietly = TRUE) || !requireNamespace("gt", quietly = TRUE)) { knitr::opts_chunk$set(eval = FALSE) } else { library(poorman) library(gt) } ## ---- warning=FALSE, message=FALSE-------------------------------------------- library(insight) df <- data.frame( Variable = c(1, 3, 5, 3, 1), Group = c("A", "A", "A", "B", "B"), CI = c(0.95, 0.95, 0.95, 0.95, 0.95), CI_low = c(3.35, 2.425, 6.213, 12.1, 1.23), CI_high = c(4.23, 5.31, 7.123, 13.5, 3.61), p = c(0.001, 0.0456, 0.45, 0.0042, 0.34) ) df ## ---- eval=FALSE-------------------------------------------------------------- # knitr::kable(df, format = "markdown") # ## ---- results='asis'---------------------------------------------------------- knitr::kable(df, format = "html") ## ----------------------------------------------------------------------------- format_table(df) ## ----------------------------------------------------------------------------- df %>% mutate(p = format_p(p, stars = TRUE)) %>% format_table() ## ----------------------------------------------------------------------------- cat(export_table(df)) ## ----------------------------------------------------------------------------- export_table(df, format = "md") ## ----------------------------------------------------------------------------- export_table(df, format = "html") ## ----------------------------------------------------------------------------- df %>% format_table(ci_brackets = c("(", ")")) %>% export_table(format = "html") insight/inst/doc/display.html0000644000175000017500000012726714165530163016151 0ustar nileshnilesh Formatting, printing and exporting tables

Formatting, printing and exporting tables

The difference between a dataframe and its render

Most of objects encountered throughout the easystats packages are “tables”, i.e., a 2D matrix with columns and rows. In R, these objects are often, at their core, data frames. Let’s create one to use as an example:

library(insight)

df <- data.frame(
  Variable = c(1, 3, 5, 3, 1),
  Group = c("A", "A", "A", "B", "B"),
  CI = c(0.95, 0.95, 0.95, 0.95, 0.95),
  CI_low = c(3.35, 2.425, 6.213, 12.1, 1.23),
  CI_high = c(4.23, 5.31, 7.123, 13.5, 3.61),
  p = c(0.001, 0.0456, 0.45, 0.0042, 0.34)
)

df
#>   Variable Group   CI CI_low CI_high      p
#> 1        1     A 0.95  3.350   4.230 0.0010
#> 2        3     A 0.95  2.425   5.310 0.0456
#> 3        5     A 0.95  6.213   7.123 0.4500
#> 4        3     B 0.95 12.100  13.500 0.0042
#> 5        1     B 0.95  1.230   3.610 0.3400

When I display in in the console (calling an object - e.g. df - is actually equivalent to calling print(df)), the output looks alright, but it could be improved. Some packages, such as knitr, have functions to create a nicer output. For instance, in markdown, so that it can be nicely rendered in markdown documents when copied:

knitr::kable(df, format = "markdown")
| Variable|Group |   CI| CI_low| CI_high|      p|
|--------:|:-----|----:|------:|-------:|------:|
|        1|A     | 0.95|  3.350|   4.230| 0.0010|
|        3|A     | 0.95|  2.425|   5.310| 0.0456|
|        5|A     | 0.95|  6.213|   7.123| 0.4500|
|        3|B     | 0.95| 12.100|  13.500| 0.0042|
|        1|B     | 0.95|  1.230|   3.610| 0.3400|

Or HTML, which again makes it look great in HTML files (such as this webpage you’re reading):

knitr::kable(df, format = "html")
Variable Group CI CI_low CI_high p
1 A 0.95 3.350 4.230 0.0010
3 A 0.95 2.425 5.310 0.0456
5 A 0.95 6.213 7.123 0.4500
3 B 0.95 12.100 13.500 0.0042
1 B 0.95 1.230 3.610 0.3400

The insight workflow

The insight package also contains function to improve the “printing”, or rendering, of tables. Its design dissociates two separate and independent steps: formatting and exporting.

Formatting

The purpose of formatting is to improve a given table, while still keeping it as a regular R data frame, so that it can be for instance further modified by the user.

format_table(df)
#>   Variable Group         95% CI     p
#> 1     1.00     A [ 3.35,  4.23] 0.001
#> 2     3.00     A [ 2.42,  5.31] 0.046
#> 3     5.00     A [ 6.21,  7.12] 0.450
#> 4     3.00     B [12.10, 13.50] 0.004
#> 5     1.00     B [ 1.23,  3.61] 0.340

As you can see, format_table() modifies columns, turning number into characters (so that it has the same amount of digits), and detecting confidence intervals. This is usually combined with column-specific formatting functions, like format_p():

df %>% 
  mutate(p = format_p(p, stars = TRUE)) %>% 
  format_table()
#>   Variable Group         95% CI           p
#> 1     1.00     A [ 3.35,  4.23] p = 0.001**
#> 2     3.00     A [ 2.42,  5.31] p = 0.046* 
#> 3     5.00     A [ 6.21,  7.12] p = 0.450  
#> 4     3.00     B [12.10, 13.50] p = 0.004**
#> 5     1.00     B [ 1.23,  3.61] p = 0.340

Exporting

The next step is exporting, which takes a data frame and renders it in a given format, so that it looks good in the console, or in markdown, HTML or latex.

For console output, we need to cat() the returned result to get nicely printed code:

cat(export_table(df))
#> Variable | Group |   CI | CI_low | CI_high |        p
#> -----------------------------------------------------
#>        1 |     A | 0.95 |   3.35 |    4.23 | 1.00e-03
#>        3 |     A | 0.95 |   2.42 |    5.31 |     0.05
#>        5 |     A | 0.95 |   6.21 |    7.12 |     0.45
#>        3 |     B | 0.95 |  12.10 |   13.50 | 4.20e-03
#>        1 |     B | 0.95 |   1.23 |    3.61 |     0.34

For markdown or HTML, simply modify the format argument to markdown (“md”)…

export_table(df, format = "md")
Variable Group CI CI_low CI_high p
1 A 0.95 3.35 4.23 1.00e-03
3 A 0.95 2.42 5.31 0.05
5 A 0.95 6.21 7.12 0.45
3 B 0.95 12.10 13.50 4.20e-03
1 B 0.95 1.23 3.61 0.34

…or HTML format.

export_table(df, format = "html")
Variable CI CI_low CI_high p
A
1 0.95 3.35 4.23 1.00e-03
3 0.95 2.42 5.31 0.05
5 0.95 6.21 7.12 0.45
B
3 0.95 12.10 13.50 4.20e-03
1 0.95 1.23 3.61 0.34

This can be combined with format_table().

df %>% 
  format_table(ci_brackets = c("(", ")")) %>% 
  export_table(format = "html")
Variable 95% CI p
A
1.00 ( 3.35, 4.23) 0.001
3.00 ( 2.42, 5.31) 0.046
5.00 ( 6.21, 7.12) 0.450
B
3.00 (12.10, 13.50) 0.004
1.00 ( 1.23, 3.61) 0.340

TODO: What about display?

insight/inst/doc/insight.R0000644000175000017500000000457314165530166015403 0ustar nileshnilesh## ---- include = FALSE--------------------------------------------------------- knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) if (!requireNamespace("lme4", quietly = TRUE)) { knitr::opts_chunk$set(eval = FALSE) } ## ----out.width="100%", echo=FALSE--------------------------------------------- knitr::include_graphics("insight_design_1.png", dpi = 72) ## ----out.width="65%", echo=FALSE---------------------------------------------- knitr::include_graphics("figure3a.png", dpi = 72) ## ----out.width="80%", echo=FALSE---------------------------------------------- knitr::include_graphics("figure3b.png", dpi = 72) ## ----out.width="80%", echo=FALSE---------------------------------------------- knitr::include_graphics("figure3c.png", dpi = 72) ## ----out.width="65%", echo=FALSE---------------------------------------------- knitr::include_graphics("figure3d.png", dpi = 72) ## ----echo=TRUE,message=FALSE,warning=FALSE------------------------------------ library(insight) library(lme4) data(sleepstudy) sleepstudy$mygrp <- sample(1:5, size = 180, replace = TRUE) sleepstudy$mysubgrp <- NA sleepstudy$Weeks <- sleepstudy$Days / 7 sleepstudy$cat <- as.factor(sample(letters[1:4], nrow(sleepstudy), replace = TRUE)) for (i in 1:5) { filter_group <- sleepstudy$mygrp == i sleepstudy$mysubgrp[filter_group] <- sample(1:30, size = sum(filter_group), replace = TRUE) } model <- lmer( Reaction ~ Days + I(Days^2) + log1p(Weeks) + cat + (1 | mygrp / mysubgrp) + (1 + Days | Subject), data = sleepstudy ) ## ----echo=TRUE,message=FALSE,warning=FALSE------------------------------------ # find the response variable find_response(model) # find all predictors, fixed part by default find_predictors(model) # find random effects, grouping factors only find_random(model) # find random slopes find_random_slopes(model) # find all predictors, including random effects find_predictors(model, effects = "all", component = "all") # find all terms, including response and random effects # this is essentially the same as the previous example plus response find_terms(model) # find all variables, i.e. also quadratic or log-transformed predictors find_variables(model) ## ----echo=TRUE,message=FALSE,warning=FALSE------------------------------------ # find model parameters, i.e. coefficients find_parameters(model) insight/inst/doc/export.html0000644000175000017500000027461314165530164016024 0ustar nileshnilesh Exporting tables with captions and footers

Exporting tables with captions and footers

Exporting data frames into nice tables

The export_table() functions creates nicely formatted tables in text, markdown or HTML format. You can add (coloured) captions or footer lines to the table as well, and you can even create multiple tables from a list of data frames.

This vignette shows some examples how to do this (focusing on text output). Note that export_table() returns a formatted string, which prints nicely (which essentially just uses cat()).

Note: The vignettes includes example with coloured text output. The coloured text is not rendered in this vignette. Rather, try out these examples and look at the results in your console!

library(insight)
x <- iris[1:3, c(1, 2, 5)]

# the table as "readable" output
export_table(x)
#> Sepal.Length | Sepal.Width | Species
#> ------------------------------------
#>         5.10 |        3.50 |  setosa
#>         4.90 |        3.00 |  setosa
#>         4.70 |        3.20 |  setosa

# see the underlying string
unclass(export_table(x))
#> [1] "Sepal.Length | Sepal.Width | Species\n------------------------------------\n        5.10 |        3.50 |  setosa\n        4.90 |        3.00 |  setosa\n        4.70 |        3.20 |  setosa\n"

Adding (coloured) titles

A title can be added by either using the caption argument, or by adding a string as table_caption attribute.

# a simple caption
export_table(x, caption = "Title")
#> Title
#> 
#> Sepal.Length | Sepal.Width | Species
#> ------------------------------------
#>         5.10 |        3.50 |  setosa
#>         4.90 |        3.00 |  setosa
#>         4.70 |        3.20 |  setosa

# we use a new object, so "x" has no attributes yet
out <- x
attr(out, "table_caption") <- "Another title"
export_table(out)
#> Another title
#> 
#> Sepal.Length | Sepal.Width | Species
#> ------------------------------------
#>         5.10 |        3.50 |  setosa
#>         4.90 |        3.00 |  setosa
#>         4.70 |        3.20 |  setosa

caption can also be character vector of length 2, with the first element being the caption, and the second being the name of a colour (see ?print_colour for available options). This is helpful for printing coloured table captions.

# A red caption
export_table(x, caption = c("# Title", "red"))
#> # Title
#> 
#> Sepal.Length | Sepal.Width | Species
#> ------------------------------------
#>         5.10 |        3.50 |  setosa
#>         4.90 |        3.00 |  setosa
#>         4.70 |        3.20 |  setosa

# same for attribute
out <- x
attr(out, "table_caption") <- c("*A green title*", "green")
export_table(out)
#> *A green title*
#> 
#> Sepal.Length | Sepal.Width | Species
#> ------------------------------------
#>         5.10 |        3.50 |  setosa
#>         4.90 |        3.00 |  setosa
#>         4.70 |        3.20 |  setosa

Since the coloured text is not rendered, we provide a screenshot as example here:

Adding (coloured) table footers

Use the footer argument to add a footer line to the table. It is also possible to add a string as table_footer attribute.

# colored caption, simple footer
export_table(
  x, 
  caption = c("# Title", "red"),
  footer = "Footer line"
)
#> # Title
#> 
#> Sepal.Length | Sepal.Width | Species
#> ------------------------------------
#>         5.10 |        3.50 |  setosa
#>         4.90 |        3.00 |  setosa
#>         4.70 |        3.20 |  setosa
#> Footer line

# as attribute
out <- x
attr(out, "table_caption") <- c("*A green title*", "green")
attr(out, "table_footer") <- "A simple footer"
export_table(out)
#> *A green title*
#> 
#> Sepal.Length | Sepal.Width | Species
#> ------------------------------------
#>         5.10 |        3.50 |  setosa
#>         4.90 |        3.00 |  setosa
#>         4.70 |        3.20 |  setosa
#> A simple footer

Coloured footers can be added in the same way as for captions.

# colored caption and footer
export_table(
  x, 
  caption = c("# Title", "red"),
  footer = c("Footer line in blue", "blue")
)
#> # Title
#> 
#> Sepal.Length | Sepal.Width | Species
#> ------------------------------------
#>         5.10 |        3.50 |  setosa
#>         4.90 |        3.00 |  setosa
#>         4.70 |        3.20 |  setosa
#> Footer line in blue

# as attribute
out <- x
attr(out, "table_caption") <- c("*A green title*", "green")
attr(out, "table_footer") <- c("Footer line in blue", "blue")
export_table(out)
#> *A green title*
#> 
#> Sepal.Length | Sepal.Width | Species
#> ------------------------------------
#>         5.10 |        3.50 |  setosa
#>         4.90 |        3.00 |  setosa
#>         4.70 |        3.20 |  setosa
#> Footer line in blue

Adding subtitles

Subtitles can be added using the subtitle argument, or the table_subtitle attribute. Note that you must take care of adding new-line characters.

# colored caption, subtitle and footer
export_table(
  x, 
  caption = c("# Title", "red"),
  subtitle = c("\n   A subtitle in yellow", "yellow"),
  footer = c("Footer line in blue", "blue")
)
#> # Title 
#>   A subtitle in yellow
#> 
#> Sepal.Length | Sepal.Width | Species
#> ------------------------------------
#>         5.10 |        3.50 |  setosa
#>         4.90 |        3.00 |  setosa
#>         4.70 |        3.20 |  setosa
#> Footer line in blue

# as attribute
out <- x
attr(out, "table_caption") <- c("*A green title*", "green")
attr(out, "table_subtitle") <- c("\nA yellow subtitle", "yellow")
attr(out, "table_footer") <- c("Footer line in blue", "blue")
export_table(out)
#> *A green title* 
#> A yellow subtitle
#> 
#> Sepal.Length | Sepal.Width | Species
#> ------------------------------------
#>         5.10 |        3.50 |  setosa
#>         4.90 |        3.00 |  setosa
#>         4.70 |        3.20 |  setosa
#> Footer line in blue

Exporting multiple data frames into multiple tables

Multiple data frames saved in a list() can be used to create multiple tables at once.

x <- list(
  data.frame(iris[1:3, c(1, 2, 5)]),
  data.frame(iris[51:53, c(1, 3, 5)]),
  data.frame(iris[111:113, c(1, 4, 5)])
)

# three different tables
export_table(x)
#> Sepal.Length | Sepal.Width | Species
#> ------------------------------------
#>         5.10 |        3.50 |  setosa
#>         4.90 |        3.00 |  setosa
#>         4.70 |        3.20 |  setosa
#> 
#> Sepal.Length | Petal.Length |    Species
#> ----------------------------------------
#>         7.00 |         4.70 | versicolor
#>         6.40 |         4.50 | versicolor
#>         6.90 |         4.90 | versicolor
#> 
#> Sepal.Length | Petal.Width |   Species
#> --------------------------------------
#>         6.50 |        2.00 | virginica
#>         6.40 |        1.90 | virginica
#>         6.80 |        2.10 | virginica

Adding table captions and footers

For multiple tables, it is also possible to add a caption for each table. Simply use a list() of strings for the caption argument, or add a table_caption attribute. to each data frame in the list.

# one caption for each table
export_table(x, caption = list("Table 1", "Table 2", "Table 3"))
#> Table 1
#> 
#> Sepal.Length | Sepal.Width | Species
#> ------------------------------------
#>         5.10 |        3.50 |  setosa
#>         4.90 |        3.00 |  setosa
#>         4.70 |        3.20 |  setosa
#> 
#> Table 2
#> 
#> Sepal.Length | Petal.Length |    Species
#> ----------------------------------------
#>         7.00 |         4.70 | versicolor
#>         6.40 |         4.50 | versicolor
#>         6.90 |         4.90 | versicolor
#> 
#> Table 3
#> 
#> Sepal.Length | Petal.Width |   Species
#> --------------------------------------
#>         6.50 |        2.00 | virginica
#>         6.40 |        1.90 | virginica
#>         6.80 |        2.10 | virginica

# add attribute to *each* data frame
out <- x
for (i in 1:length(out)) {
  attr(out[[i]], "table_caption") <- paste("Table", i)
}
export_table(out)
#> Table 1
#> 
#> Sepal.Length | Sepal.Width | Species
#> ------------------------------------
#>         5.10 |        3.50 |  setosa
#>         4.90 |        3.00 |  setosa
#>         4.70 |        3.20 |  setosa
#> 
#> Table 2
#> 
#> Sepal.Length | Petal.Length |    Species
#> ----------------------------------------
#>         7.00 |         4.70 | versicolor
#>         6.40 |         4.50 | versicolor
#>         6.90 |         4.90 | versicolor
#> 
#> Table 3
#> 
#> Sepal.Length | Petal.Width |   Species
#> --------------------------------------
#>         6.50 |        2.00 | virginica
#>         6.40 |        1.90 | virginica
#>         6.80 |        2.10 | virginica

In the same way you can add multiple footers. Note, however, that you have to take care about adding new-line characters.

# add captions and footers for each table
export_table(
  x, 
  caption = list("Table 1", "Table 2", "Table 3"),
  footer = list("Footer 1\n\n", "Footer 2\n\n", "Footer 3\n\n")
)
#> Table 1
#> 
#> Sepal.Length | Sepal.Width | Species
#> ------------------------------------
#>         5.10 |        3.50 |  setosa
#>         4.90 |        3.00 |  setosa
#>         4.70 |        3.20 |  setosa
#> Footer 1
#> 
#> 
#> Table 2
#> 
#> Sepal.Length | Petal.Length |    Species
#> ----------------------------------------
#>         7.00 |         4.70 | versicolor
#>         6.40 |         4.50 | versicolor
#>         6.90 |         4.90 | versicolor
#> Footer 2
#> 
#> 
#> Table 3
#> 
#> Sepal.Length | Petal.Width |   Species
#> --------------------------------------
#>         6.50 |        2.00 | virginica
#>         6.40 |        1.90 | virginica
#>         6.80 |        2.10 | virginica
#> Footer 3

out <- x
for (i in 1:length(out)) {
  attr(out[[i]], "table_caption") <- paste("Table", i)
  attr(out[[i]], "table_footer") <- paste("Footer", i, "\n\n")
}
export_table(out)
#> Table 1
#> 
#> Sepal.Length | Sepal.Width | Species
#> ------------------------------------
#>         5.10 |        3.50 |  setosa
#>         4.90 |        3.00 |  setosa
#>         4.70 |        3.20 |  setosa
#> Footer 1 
#> 
#> 
#> Table 2
#> 
#> Sepal.Length | Petal.Length |    Species
#> ----------------------------------------
#>         7.00 |         4.70 | versicolor
#>         6.40 |         4.50 | versicolor
#>         6.90 |         4.90 | versicolor
#> Footer 2 
#> 
#> 
#> Table 3
#> 
#> Sepal.Length | Petal.Width |   Species
#> --------------------------------------
#>         6.50 |        2.00 | virginica
#>         6.40 |        1.90 | virginica
#>         6.80 |        2.10 | virginica
#> Footer 3

Finally, you can even add multiple footer lines to each table, giving each a different color. In this case, each list element has to be a character vector of length 2 (the first element being the caption, and the second being the name of a colour).

# Colored table captions and multiple footers per table
export_table(
  x, 
  caption = list(c("Red Table 1", "red"),
                 c("Blue Table 2", "bue"),
                 c("Green Table 3", "green")),
  footer = list(
    list(c("Footer line 1\n", "green"), c("Second line\n\n", "red")),
    list(c("Footer line A\n", "blue"), c("Second line\n\n", "green")),
    list(c("Footer line I\n", "yellow"), c("Second line\n\n", "blue"))
  )
)
#> Red Table 1
#> 
#> Sepal.Length | Sepal.Width | Species
#> ------------------------------------
#>         5.10 |        3.50 |  setosa
#>         4.90 |        3.00 |  setosa
#>         4.70 |        3.20 |  setosa
#> Footer line 1
#> Second line
#> 
#> 
#> Blue Table 2
#> 
#> Sepal.Length | Petal.Length |    Species
#> ----------------------------------------
#>         7.00 |         4.70 | versicolor
#>         6.40 |         4.50 | versicolor
#>         6.90 |         4.90 | versicolor
#> Footer line A
#> Second line
#> 
#> 
#> Green Table 3
#> 
#> Sepal.Length | Petal.Width |   Species
#> --------------------------------------
#>         6.50 |        2.00 | virginica
#>         6.40 |        1.90 | virginica
#>         6.80 |        2.10 | virginica
#> Footer line I
#> Second line
insight/inst/doc/insight.Rmd0000644000175000017500000003224313721227747015724 0ustar nileshnilesh--- title: "Getting Started with Accessing Model Information" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{insight} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) if (!requireNamespace("lme4", quietly = TRUE)) { knitr::opts_chunk$set(eval = FALSE) } ``` When fitting any statistical model, there are many useful pieces of information that are simultaneously calculated and stored beyond coefficient estimates and general model fit statistics. Although there exist some generic functions to obtain model information and data, many package-specific modeling functions do not provide such methods to allow users to access such valuable information. **insight** is an R-package that fills this important gap by providing a suite of functions to support almost any model. The goal of **insight**, then, is to provide tools to provide *easy*, *intuitive*, and *consistent* access to information contained in model objects. These tools aid applied research in virtually any field who fit, diagnose, and present statistical models by streamlining access to every aspect of many model objects via consistent syntax and output. Built with non-programmers in mind, **insight** offers a broad toolbox for making model and data information easily accessible. While **insight** offers many useful functions for working with and understanding model objects (discussed below), we suggest users start with `model_info()`, as this function provides a clean and consistent overview of model objects (*e.g.*, functional form of the model, the model family, link function, number of observations, variables included in the specification, etc.). With a clear understanding of the model introduced, users are able to adapt other functions for more nuanced exploration of and interaction with virtually any model object. ## Overview of Core Functions A statistical model is an object describing the relationship between variables. Although there are a lot of *different types* of models, each with their specificities, most of them also share some *common components*. The goal of **insight** is to help you retrieve these components. The `get_*` prefix extracts *values* (or *data*) associated with model-specific objects (e.g., parameters or variables), while the `find_*` prefix *lists* model-specific objects (e.g., priors or predictors). These are powerful families of functions allowing for great flexibility in use, whether at a high, descriptive level (`find_*`) or narrower level of statistical inspection and reporting (`get_*`). We point users to the package documentation or the complementary package website, https://easystats.github.io/insight/, for a detailed list of the arguments associated with each function as well as the returned values from each function. ```{r out.width="100%", echo=FALSE} knitr::include_graphics("insight_design_1.png", dpi = 72) ``` ## Definition of Model Components The functions from **insight** address different components of a model. In an effort to avoid confusion about specific "targets" of each function, in this section we provide a short explanation of **insight**'s definitions of regression model components. ### Data The dataset used to fit the model. ### Parameters Values estimated or learned from data that capture the relationship between variables. In regression models, these are usually referred to as *coefficients*. ### Response and Predictors * **response**: the outcome or response variable (dependent variable) of a regression model. * **predictor**: independent variables of (the _fixed_ part of) a regression model. For mixed models, variables that are only in the _random effects_ part (i.e. grouping factors) of the model are not returned as predictors by default. However, these can be included using additional arguments in the function call, treating predictors are "unique". As such, if a variable appears as a fixed effect and a random slope, it is treated as one (the same) predictor. ```{r out.width="65%", echo=FALSE} knitr::include_graphics("figure3a.png", dpi = 72) ``` ### Variables Any unique variable names that appear in a regression model, e.g., response variable, predictors or random effects. A "variable" only relates to the unique occurrence of a term, or the term name. For instance, the expression `x + poly(x, 2)` has only the variable `x`. ```{r out.width="80%", echo=FALSE} knitr::include_graphics("figure3b.png", dpi = 72) ``` ### Terms Terms themselves consist of variable and factor names separated by operators, or involve arithmetic expressions. For instance, the expression `x + poly(x, 2)` has _one_ variable `x`, but _two_ terms `x` and `poly(x, 2)`. ```{r out.width="80%", echo=FALSE} knitr::include_graphics("figure3c.png", dpi = 72) ``` ### Random Effects * **random slopes**: variables that are specified as random slopes in a mixed effects model. * **random or grouping factors**: variables that are specified as grouping variables in a mixed effects model. ```{r out.width="65%", echo=FALSE} knitr::include_graphics("figure3d.png", dpi = 72) ``` ## Examples *Aren't the predictors, terms, and parameters the same thing?* In some cases, yes. But not in all cases, and sometimes it is useful to have the "bare" variable names (terms), but sometimes it is also useful to have the information about a possible transformation of variables. That is the main reason for having functions that cover similar aspects of a model object (like `find_terms()` and `find_predictors()` or `find_variables()`). Here are some examples that demonstrate the differences of each function: ```{r echo=TRUE,message=FALSE,warning=FALSE} library(insight) library(lme4) data(sleepstudy) sleepstudy$mygrp <- sample(1:5, size = 180, replace = TRUE) sleepstudy$mysubgrp <- NA sleepstudy$Weeks <- sleepstudy$Days / 7 sleepstudy$cat <- as.factor(sample(letters[1:4], nrow(sleepstudy), replace = TRUE)) for (i in 1:5) { filter_group <- sleepstudy$mygrp == i sleepstudy$mysubgrp[filter_group] <- sample(1:30, size = sum(filter_group), replace = TRUE) } model <- lmer( Reaction ~ Days + I(Days^2) + log1p(Weeks) + cat + (1 | mygrp / mysubgrp) + (1 + Days | Subject), data = sleepstudy ) ``` ```{r echo=TRUE,message=FALSE,warning=FALSE} # find the response variable find_response(model) # find all predictors, fixed part by default find_predictors(model) # find random effects, grouping factors only find_random(model) # find random slopes find_random_slopes(model) # find all predictors, including random effects find_predictors(model, effects = "all", component = "all") # find all terms, including response and random effects # this is essentially the same as the previous example plus response find_terms(model) # find all variables, i.e. also quadratic or log-transformed predictors find_variables(model) ``` Finally, there is `find_parameters()`. Parameters are also known as *coefficients*, and `find_parameters()` does exactly that: returns the model coefficients. ```{r echo=TRUE,message=FALSE,warning=FALSE} # find model parameters, i.e. coefficients find_parameters(model) ``` ## Examples of Use Cases in R We now would like to provide examples of use cases of the **insight** package. These examples probably do not cover typical real-world problems, but serve as illustration of the core idea of this package: The unified interface to access model information. **insight** should help both users and package developers in order to reduce the hassle with the many exceptions from various modelling packages when accessing model information. ### Making Predictions at Specific Values of a Term of Interest Say, the goal is to make predictions for a certain term, holding remaining co-variates constant. This is achieved by calling `predict()` and feeding the `newdata`-argument with the values of the term of interest as well as the "constant" values for remaining co-variates. The functions `get_data()` and `find_predictors()` are used to get this information, which then can be used in the call to `predict()`. In this example, we fit a simple linear model, but it could be replaced by (m)any other models, so this approach is "universal" and applies to many different model objects. ``` r library(insight) m <- lm( Sepal.Length ~ Species + Petal.Width + Sepal.Width, data = iris ) dat <- get_data(m) pred <- find_predictors(m, flatten = TRUE) l <- lapply(pred, function(x) { if (is.numeric(dat[[x]])) mean(dat[[x]]) else unique(dat[[x]]) }) names(l) <- pred l <- as.data.frame(l) cbind(l, predictions = predict(m, newdata = l)) #> Species Petal.Width Sepal.Width predictions #> 1 setosa 1.199333 3.057333 5.101427 #> 2 versicolor 1.199333 3.057333 6.089557 #> 3 virginica 1.199333 3.057333 6.339015 ``` ### Printing Model Coefficients The next example should emphasize the possibilities to generalize functions to many different model objects using **insight**. The aim is simply to print coefficients in a complete, human readable sentence. The first approach uses the functions that are available for some, but obviously not for all models, to access the information about model coefficients. ``` r print_params <- function(model){ paste0( "My parameters are ", paste0(row.names(summary(model)$coefficients), collapse = ", "), ", thank you for your attention!" ) } m1 <- lm(Sepal.Length ~ Petal.Width, data = iris) print_params(m1) #> [1] "My parameters are (Intercept), Petal.Width, thank you for your attention!" # obviously, something is missing in the output m2 <- mgcv::gam(Sepal.Length ~ Petal.Width + s(Petal.Length), data = iris) print_params(m2) #> [1] "My parameters are , thank you for your attention!" ``` As we can see, the function fails for *gam*-models. As the access to models depends on the type of the model in the R ecosystem, we would need to create specific functions for all models types. With **insight**, users can write a function without having to worry about the model type. ``` r print_params <- function(model){ paste0( "My parameters are ", paste0(insight::find_parameters(model, flatten = TRUE), collapse = ", "), ", thank you for your attention!" ) } m1 <- lm(Sepal.Length ~ Petal.Width, data = iris) print_params(m1) #> [1] "My parameters are (Intercept), Petal.Width, thank you for your attention!" m2 <- mgcv::gam(Sepal.Length ~ Petal.Width + s(Petal.Length), data = iris) print_params(m2) #> [1] "My parameters are (Intercept), Petal.Width, s(Petal.Length), thank you for your attention!" ``` ## Examples of Use Cases in R packages **insight** is already used by different packages to solve problems that typically occur when the users' inputs are different model objects of varying complexity. For example, [**ggeffects**](https://strengejacke.github.io/ggeffects/), a package that computes and visualizes marginal effects of regression models, requires extraction of the data (`get_data()`) that was used to fit the models, and also the retrieval all model predictors (`find_predictors()`) to decide which covariates are held constant when computing marginal effects. All of this information is required in order to create a data frame for `predict(newdata=)`. Furthermore, the models' link-functions (`link_function()`) resp. link-inverse-functions (`link_inverse()`) are required to obtain predictors at the model's response scale. The [**sjPlot**-package](https://strengejacke.github.io/sjPlot/) creates plots or summary tables from regression models, and uses **insight**-functions to get model-information (`model_info()` or `find_response()`), which is used to build the components of the final plot or table. This information helps, for example, in labeling table columns by providing information on the effect type (odds ratio, incidence rate ratio, etc.) or the different model components, which split plots and tables into the "conditional" and "zero-inflated" parts of a model, in the cases of models with zero-inflation. [**bayestestR**](https://easystats.github.io/bayestestR/) mainly relies on `get_priors()` and `get_parameters()` to retrieve the necessary information to compute various indices or statistics of Bayesian models (like HDI, Credible Interval, MCSE, effective sample size, Bayes factors, etc.). The advantage of `get_parameters()` in this context is that regardless of the number of parameters the posterior distribution has, the necessary data can be easily accessed from the model objects. There is no need to write original, complicated code or regular expressions. A last example is the [**performance**-package](https://easystats.github.io/performance/), which provides functions for computing measures to assess model quality. Many of these indices (e.g. check for overdispersion or zero-inflation, predictive accuracy, logloss, RMSE, etc.) require the number of observations (`n_obs()`) or the data from the response-variable (`get_response()`). Again, in this context, functions from **insight** are helpful, because they offer a unified access to this information. insight/data/0000755000175000017500000000000013620010062012750 5ustar nileshnileshinsight/data/fish.RData0000644000175000017500000000673113617043573014650 0ustar nileshnileshZ TW%+1c͉N\D61f4D D4Hb4.51c xԈG(%J%b 4 jZ9{ޯ_eFL}{7q'z :Nuq^.lj玾m=yk5WwjU.WWxꯥ=oM+[-Xrm-rn]ѝzZڷasʶ^(<:qYe<cuqf6jVT[y\A#+W85GswG̓^̏lkeiwɫ6nۜCպѺ=[Uj s l-Zt's&pwMily륵== V1z\|t_眎{Z+C۪GGť1|X-8Ը9b51ǵr@ *1i:i},\Z1V-t\VnX4W9z׭1֢ڵӰqt j/2}ao^+6CsQæseW;Aie *6t\j]|W~l~kmv=E٪Bкoa[Aաƕ_Ikٔ8 nԚH{Km xVvE2nv.њB2%}cf'_$8ƚNA} -;C-0F[R y,dF@%BܦdĵxftO9׀SF[_< Ec,Q1yԥ,ԍOZ:\~ŝ:{q>o3#!!N~Ua5. vByB71}9pM}Q?qĭ]]ٞOO>׆wk9b[9%?U"9?+| Ik!:zYr/=^yW}Q=/΂wu@Ƴ }U3Ghk8p z,ԥ_tcxB< s5zy;^^>=-{& Yct@YOx&!FmcgO%˒z<[s]Kg+-Ek;hR^48ܵGb|w}(VwX.oys+⬧Pd%qLmU~Ͱ;rm3+<?HtH'~[ *E? )z|.o jЖ_Z ;I?)eOމ8؋Cz|~mQ%]o>uuget%\錼XfGlm@{uUѵfJA7!^WTx$ _&oY.-&*jWB6]#zy3Nt)1oF[3V[? vO*ր=- v~Ɉ[?=.| ;t[g=w}n}'+;?c |EיIhV""}yÒu8׎ AV1e14f& U2f%7#Ɇ/^A~@Q0;9^C_fVax qGm̧}ۿ _a۶rjSOJ(^tTGz "T605;=:3 )U93NO7s,WtF~ 32tz*~cY[ i=x#x0;8K6Gc|{A>/$~ݵ%]5~']x/ x 3QO#.A/&/0g?prנ}ޓoCy{4JzCG۱d^*}qL]VcV.u@R<J1o;/dWAl+s/egڢRyO*LKwG+ < !_"Kˇn{e9&},u.G_T8tطn>WLp=!%qd4~zo0:R}-+KMGLx>%3- ~-kX78^9F/  ՜E궯~FY/^1d\ D<_)U?DēorA4 yzuQyXQN)J)e'sG!>WDTT E>t[xn`F=%)?ȣSyn4G"b@R7)C}}¬o+B|MߪWiN{#q%KwY~ ׮|T2? ӓ0SaWmݧ_(y9?&oEk\" 9W-7~C՟~8^i[#z}P"4N_$%%wJC;T7a*ǩ#jswV|NtO>Ծay;vva13 ז87tNx,l^̓scJ(|fhTs6,tμ=OilX'^ػQѳ;3gK"7Y8 %Z''1 v޳BBD,ppʟQm9-insight/NAMESPACE0000644000175000017500000013117614165527776013325 0ustar nileshnilesh# Generated by roxygen2: do not edit by hand S3method(as.data.frame,get_predicted) S3method(as.matrix,get_predicted) S3method(as.numeric,insight_aux) S3method(clean_names,character) S3method(clean_names,default) S3method(clean_parameters,BFBayesFactor) S3method(clean_parameters,MCMCglmm) S3method(clean_parameters,afex_aov) S3method(clean_parameters,aovlist) S3method(clean_parameters,bamlss) S3method(clean_parameters,blavaan) S3method(clean_parameters,brmsfit) S3method(clean_parameters,default) S3method(clean_parameters,emmGrid) S3method(clean_parameters,emm_list) S3method(clean_parameters,glmm) S3method(clean_parameters,lavaan) S3method(clean_parameters,merModList) S3method(clean_parameters,mlm) S3method(clean_parameters,model_fit) S3method(clean_parameters,stanfit) S3method(clean_parameters,stanmvreg) S3method(clean_parameters,stanreg) S3method(clean_parameters,wbgee) S3method(clean_parameters,wbm) S3method(display,data.frame) S3method(ellipsis_info,ListLavaan) S3method(ellipsis_info,ListModels) S3method(ellipsis_info,ListObjects) S3method(ellipsis_info,ListRegressions) S3method(ellipsis_info,ListVarious) S3method(ellipsis_info,default) S3method(find_algorithm,Arima) S3method(find_algorithm,BBmm) S3method(find_algorithm,BBreg) S3method(find_algorithm,Gam) S3method(find_algorithm,LORgee) S3method(find_algorithm,MixMod) S3method(find_algorithm,afex_aov) S3method(find_algorithm,bayesQR) S3method(find_algorithm,bayesx) S3method(find_algorithm,bigglm) S3method(find_algorithm,biglm) S3method(find_algorithm,blavaan) S3method(find_algorithm,brmsfit) S3method(find_algorithm,crq) S3method(find_algorithm,default) S3method(find_algorithm,gam) S3method(find_algorithm,gamlss) S3method(find_algorithm,glimML) S3method(find_algorithm,glm) S3method(find_algorithm,glmmTMB) S3method(find_algorithm,glmrob) S3method(find_algorithm,lm) S3method(find_algorithm,lmRob) S3method(find_algorithm,lme) S3method(find_algorithm,lmrob) S3method(find_algorithm,logistf) S3method(find_algorithm,merMod) S3method(find_algorithm,merModList) S3method(find_algorithm,mixed) S3method(find_algorithm,rlmerMod) S3method(find_algorithm,rq) S3method(find_algorithm,rqss) S3method(find_algorithm,scam) S3method(find_algorithm,speedglm) S3method(find_algorithm,speedlm) S3method(find_algorithm,stanfit) S3method(find_algorithm,stanreg) S3method(find_algorithm,systemfit) S3method(find_formula,BBmm) S3method(find_formula,BFBayesFactor) S3method(find_formula,BGGM) S3method(find_formula,DirichletRegModel) S3method(find_formula,HLfit) S3method(find_formula,LORgee) S3method(find_formula,MANOVA) S3method(find_formula,MCMCglmm) S3method(find_formula,MixMod) S3method(find_formula,RM) S3method(find_formula,SemiParBIV) S3method(find_formula,afex_aov) S3method(find_formula,anova) S3method(find_formula,aovlist) S3method(find_formula,averaging) S3method(find_formula,bamlss) S3method(find_formula,betamfx) S3method(find_formula,betaor) S3method(find_formula,betareg) S3method(find_formula,bife) S3method(find_formula,brmsfit) S3method(find_formula,btergm) S3method(find_formula,censReg) S3method(find_formula,cgamm) S3method(find_formula,cglm) S3method(find_formula,clm2) S3method(find_formula,clmm) S3method(find_formula,clmm2) S3method(find_formula,coxme) S3method(find_formula,cpglmm) S3method(find_formula,data.frame) S3method(find_formula,default) S3method(find_formula,feglm) S3method(find_formula,feis) S3method(find_formula,felm) S3method(find_formula,fixest) S3method(find_formula,gam) S3method(find_formula,gamlss) S3method(find_formula,gamm) S3method(find_formula,gee) S3method(find_formula,glht) S3method(find_formula,glimML) S3method(find_formula,glmm) S3method(find_formula,glmmTMB) S3method(find_formula,glmmadmb) S3method(find_formula,gls) S3method(find_formula,hurdle) S3method(find_formula,ivFixed) S3method(find_formula,iv_robust) S3method(find_formula,ivprobit) S3method(find_formula,ivreg) S3method(find_formula,joint) S3method(find_formula,list) S3method(find_formula,lme) S3method(find_formula,logitmfx) S3method(find_formula,logitor) S3method(find_formula,lqmm) S3method(find_formula,maxLik) S3method(find_formula,maxim) S3method(find_formula,mcmc.list) S3method(find_formula,mediate) S3method(find_formula,merMod) S3method(find_formula,merModList) S3method(find_formula,meta_bma) S3method(find_formula,meta_fixed) S3method(find_formula,meta_random) S3method(find_formula,metaplus) S3method(find_formula,mhurdle) S3method(find_formula,mira) S3method(find_formula,mixed) S3method(find_formula,mixor) S3method(find_formula,mjoint) S3method(find_formula,mmclogit) S3method(find_formula,model_fit) S3method(find_formula,mvord) S3method(find_formula,negbinirr) S3method(find_formula,negbinmfx) S3method(find_formula,nlmerMod) S3method(find_formula,pgmm) S3method(find_formula,plm) S3method(find_formula,poissonirr) S3method(find_formula,poissonmfx) S3method(find_formula,probitmfx) S3method(find_formula,rlmerMod) S3method(find_formula,rma) S3method(find_formula,selection) S3method(find_formula,sem) S3method(find_formula,stanmvreg) S3method(find_formula,stanreg) S3method(find_formula,svy_vglm) S3method(find_formula,systemfit) S3method(find_formula,tobit) S3method(find_formula,wbgee) S3method(find_formula,wbm) S3method(find_formula,zcpglm) S3method(find_formula,zeroinfl) S3method(find_formula,zerotrunc) S3method(find_parameters,BBmm) S3method(find_parameters,BBreg) S3method(find_parameters,BFBayesFactor) S3method(find_parameters,BGGM) S3method(find_parameters,DirichletRegModel) S3method(find_parameters,Gam) S3method(find_parameters,HLfit) S3method(find_parameters,MCMCglmm) S3method(find_parameters,MixMod) S3method(find_parameters,Rchoice) S3method(find_parameters,SemiParBIV) S3method(find_parameters,aareg) S3method(find_parameters,afex_aov) S3method(find_parameters,aovlist) S3method(find_parameters,averaging) S3method(find_parameters,bamlss) S3method(find_parameters,bayesQR) S3method(find_parameters,bayesx) S3method(find_parameters,bcplm) S3method(find_parameters,betamfx) S3method(find_parameters,betaor) S3method(find_parameters,betareg) S3method(find_parameters,bfsl) S3method(find_parameters,blavaan) S3method(find_parameters,bracl) S3method(find_parameters,brmsfit) S3method(find_parameters,brmultinom) S3method(find_parameters,btergm) S3method(find_parameters,cgam) S3method(find_parameters,clm2) S3method(find_parameters,clmm2) S3method(find_parameters,coxme) S3method(find_parameters,cpglmm) S3method(find_parameters,crq) S3method(find_parameters,crqs) S3method(find_parameters,crr) S3method(find_parameters,data.frame) S3method(find_parameters,default) S3method(find_parameters,emmGrid) S3method(find_parameters,emm_list) S3method(find_parameters,flexsurvreg) S3method(find_parameters,gam) S3method(find_parameters,gamlss) S3method(find_parameters,gamm) S3method(find_parameters,gbm) S3method(find_parameters,glht) S3method(find_parameters,glimML) S3method(find_parameters,glmm) S3method(find_parameters,glmmTMB) S3method(find_parameters,glmmadmb) S3method(find_parameters,glmx) S3method(find_parameters,hurdle) S3method(find_parameters,ivFixed) S3method(find_parameters,ivprobit) S3method(find_parameters,lavaan) S3method(find_parameters,lme) S3method(find_parameters,lmodel2) S3method(find_parameters,logitmfx) S3method(find_parameters,logitor) S3method(find_parameters,lqm) S3method(find_parameters,lqmm) S3method(find_parameters,lrm) S3method(find_parameters,manova) S3method(find_parameters,maov) S3method(find_parameters,mcmc) S3method(find_parameters,mcmc.list) S3method(find_parameters,mediate) S3method(find_parameters,merMod) S3method(find_parameters,merModList) S3method(find_parameters,meta_bma) S3method(find_parameters,meta_fixed) S3method(find_parameters,meta_random) S3method(find_parameters,metaplus) S3method(find_parameters,mhurdle) S3method(find_parameters,mipo) S3method(find_parameters,mira) S3method(find_parameters,mixed) S3method(find_parameters,mixor) S3method(find_parameters,mjoint) S3method(find_parameters,mle) S3method(find_parameters,mle2) S3method(find_parameters,mlm) S3method(find_parameters,model_fit) S3method(find_parameters,multinom) S3method(find_parameters,mvord) S3method(find_parameters,negbinirr) S3method(find_parameters,negbinmfx) S3method(find_parameters,nlmerMod) S3method(find_parameters,nls) S3method(find_parameters,pgmm) S3method(find_parameters,poissonirr) S3method(find_parameters,poissonmfx) S3method(find_parameters,polr) S3method(find_parameters,probitmfx) S3method(find_parameters,ridgelm) S3method(find_parameters,riskRegression) S3method(find_parameters,rlmerMod) S3method(find_parameters,rma) S3method(find_parameters,rms) S3method(find_parameters,rqs) S3method(find_parameters,rqss) S3method(find_parameters,scam) S3method(find_parameters,selection) S3method(find_parameters,sem) S3method(find_parameters,sim) S3method(find_parameters,sim.merMod) S3method(find_parameters,stanfit) S3method(find_parameters,stanmvreg) S3method(find_parameters,stanreg) S3method(find_parameters,summary.lm) S3method(find_parameters,survreg) S3method(find_parameters,systemfit) S3method(find_parameters,tobit) S3method(find_parameters,vgam) S3method(find_parameters,wbgee) S3method(find_parameters,wbm) S3method(find_parameters,zcpglm) S3method(find_parameters,zeroinfl) S3method(find_parameters,zerotrunc) S3method(find_predictors,afex_aov) S3method(find_predictors,bfsl) S3method(find_predictors,default) S3method(find_predictors,selection) S3method(find_random,afex_aov) S3method(find_random,default) S3method(find_response,bfsl) S3method(find_response,default) S3method(find_response,joint) S3method(find_response,mediate) S3method(find_response,mjoint) S3method(find_response,model_fit) S3method(find_response,selection) S3method(find_terms,afex_aov) S3method(find_terms,aovlist) S3method(find_terms,bfsl) S3method(find_terms,default) S3method(find_weights,brmsfit) S3method(find_weights,default) S3method(find_weights,merMod) S3method(find_weights,model_fit) S3method(format,insight_formula) S3method(format_string,character) S3method(format_string,data.frame) S3method(format_string,default) S3method(format_value,character) S3method(format_value,data.frame) S3method(format_value,double) S3method(format_value,factor) S3method(format_value,logical) S3method(format_value,numeric) S3method(get_call,default) S3method(get_call,glm) S3method(get_call,lm) S3method(get_call,lmerMod) S3method(get_call,merMod) S3method(get_call,model_fit) S3method(get_call,mvord) S3method(get_call,stanreg) S3method(get_data,Arima) S3method(get_data,BBmm) S3method(get_data,BFBayesFactor) S3method(get_data,BGGM) S3method(get_data,DirichletRegModel) S3method(get_data,HLfit) S3method(get_data,LORgee) S3method(get_data,MANOVA) S3method(get_data,MCMCglmm) S3method(get_data,MixMod) S3method(get_data,RM) S3method(get_data,aareg) S3method(get_data,afex_aov) S3method(get_data,averaging) S3method(get_data,betamfx) S3method(get_data,betaor) S3method(get_data,bfsl) S3method(get_data,bife) S3method(get_data,bigglm) S3method(get_data,biglm) S3method(get_data,blavaan) S3method(get_data,bracl) S3method(get_data,brmsfit) S3method(get_data,clm2) S3method(get_data,clmm) S3method(get_data,clmm2) S3method(get_data,complmrob) S3method(get_data,cpglmm) S3method(get_data,data.frame) S3method(get_data,default) S3method(get_data,feglm) S3method(get_data,feis) S3method(get_data,felm) S3method(get_data,fixest) S3method(get_data,gamlss) S3method(get_data,gamm) S3method(get_data,gbm) S3method(get_data,gee) S3method(get_data,glht) S3method(get_data,glimML) S3method(get_data,glmm) S3method(get_data,glmmTMB) S3method(get_data,glmmadmb) S3method(get_data,gls) S3method(get_data,gmnl) S3method(get_data,gnls) S3method(get_data,htest) S3method(get_data,hurdle) S3method(get_data,ivFixed) S3method(get_data,iv_robust) S3method(get_data,ivprobit) S3method(get_data,ivreg) S3method(get_data,lavaan) S3method(get_data,lme) S3method(get_data,logitmfx) S3method(get_data,logitor) S3method(get_data,lqmm) S3method(get_data,mcmc.list) S3method(get_data,mediate) S3method(get_data,merMod) S3method(get_data,merModList) S3method(get_data,meta_bma) S3method(get_data,meta_fixed) S3method(get_data,meta_random) S3method(get_data,metaplus) S3method(get_data,mipo) S3method(get_data,mixed) S3method(get_data,mixor) S3method(get_data,mjoint) S3method(get_data,mle) S3method(get_data,mle2) S3method(get_data,mlogit) S3method(get_data,model_fit) S3method(get_data,negbinirr) S3method(get_data,negbinmfx) S3method(get_data,nlrq) S3method(get_data,nls) S3method(get_data,pgmm) S3method(get_data,plm) S3method(get_data,poissonirr) S3method(get_data,poissonmfx) S3method(get_data,probitmfx) S3method(get_data,rlmerMod) S3method(get_data,rma) S3method(get_data,robmixglm) S3method(get_data,rqss) S3method(get_data,selection) S3method(get_data,sem) S3method(get_data,stanmvreg) S3method(get_data,stanreg) S3method(get_data,summary.lm) S3method(get_data,survfit) S3method(get_data,svy_vglm) S3method(get_data,tobit) S3method(get_data,vgam) S3method(get_data,vglm) S3method(get_data,wbgee) S3method(get_data,wbm) S3method(get_data,zcpglm) S3method(get_data,zeroinfl) S3method(get_data,zerotrunc) S3method(get_deviance,MixMod) S3method(get_deviance,default) S3method(get_deviance,glmerMod) S3method(get_deviance,glmmTMB) S3method(get_deviance,lmerMod) S3method(get_deviance,lrm) S3method(get_deviance,model_fit) S3method(get_deviance,stanreg) S3method(get_df,BBmm) S3method(get_df,BBreg) S3method(get_df,betamfx) S3method(get_df,betaor) S3method(get_df,bfsl) S3method(get_df,cgam) S3method(get_df,coeftest) S3method(get_df,default) S3method(get_df,emmGrid) S3method(get_df,emm_list) S3method(get_df,glht) S3method(get_df,ivFixed) S3method(get_df,ivprobit) S3method(get_df,logitmfx) S3method(get_df,logitor) S3method(get_df,lqm) S3method(get_df,lqmm) S3method(get_df,merModList) S3method(get_df,mipo) S3method(get_df,mira) S3method(get_df,model_fit) S3method(get_df,multinom) S3method(get_df,negbinirr) S3method(get_df,negbinmfx) S3method(get_df,nnet) S3method(get_df,plm) S3method(get_df,poissonirr) S3method(get_df,poissonmfx) S3method(get_df,probitmfx) S3method(get_df,rlm) S3method(get_df,rqs) S3method(get_df,selection) S3method(get_df,summary.lm) S3method(get_df,systemfit) S3method(get_df,vgam) S3method(get_family,default) S3method(get_family,list) S3method(get_family,model_fit) S3method(get_intercept,default) S3method(get_intercept,stanreg) S3method(get_loglikelihood,afex_aov) S3method(get_loglikelihood,cpglm) S3method(get_loglikelihood,crr) S3method(get_loglikelihood,default) S3method(get_loglikelihood,gam) S3method(get_loglikelihood,gamm) S3method(get_loglikelihood,glm) S3method(get_loglikelihood,glmer) S3method(get_loglikelihood,iv_robust) S3method(get_loglikelihood,ivreg) S3method(get_loglikelihood,list) S3method(get_loglikelihood,lm) S3method(get_loglikelihood,model_fit) S3method(get_loglikelihood,plm) S3method(get_loglikelihood,stanreg) S3method(get_loglikelihood,svycoxph) S3method(get_modelmatrix,afex_aov) S3method(get_modelmatrix,brmsfit) S3method(get_modelmatrix,clmm) S3method(get_modelmatrix,cpglmm) S3method(get_modelmatrix,default) S3method(get_modelmatrix,gls) S3method(get_modelmatrix,lme) S3method(get_modelmatrix,merMod) S3method(get_modelmatrix,rlm) S3method(get_parameters,BBmm) S3method(get_parameters,BBreg) S3method(get_parameters,BFBayesFactor) S3method(get_parameters,BGGM) S3method(get_parameters,DirichletRegModel) S3method(get_parameters,Gam) S3method(get_parameters,HLfit) S3method(get_parameters,MCMCglmm) S3method(get_parameters,MixMod) S3method(get_parameters,Rchoice) S3method(get_parameters,SemiParBIV) S3method(get_parameters,aareg) S3method(get_parameters,afex_aov) S3method(get_parameters,aov) S3method(get_parameters,aovlist) S3method(get_parameters,averaging) S3method(get_parameters,bamlss) S3method(get_parameters,bayesQR) S3method(get_parameters,bayesx) S3method(get_parameters,bcplm) S3method(get_parameters,betamfx) S3method(get_parameters,betaor) S3method(get_parameters,betareg) S3method(get_parameters,bfsl) S3method(get_parameters,blavaan) S3method(get_parameters,blrm) S3method(get_parameters,bracl) S3method(get_parameters,brmsfit) S3method(get_parameters,brmultinom) S3method(get_parameters,btergm) S3method(get_parameters,cgam) S3method(get_parameters,clm2) S3method(get_parameters,clmm2) S3method(get_parameters,coxme) S3method(get_parameters,cpglmm) S3method(get_parameters,crq) S3method(get_parameters,crqs) S3method(get_parameters,crr) S3method(get_parameters,data.frame) S3method(get_parameters,default) S3method(get_parameters,emmGrid) S3method(get_parameters,emm_list) S3method(get_parameters,epi.2by2) S3method(get_parameters,flexsurvreg) S3method(get_parameters,gam) S3method(get_parameters,gamlss) S3method(get_parameters,gamm) S3method(get_parameters,gbm) S3method(get_parameters,glht) S3method(get_parameters,glimML) S3method(get_parameters,glmm) S3method(get_parameters,glmmTMB) S3method(get_parameters,glmmadmb) S3method(get_parameters,glmx) S3method(get_parameters,htest) S3method(get_parameters,hurdle) S3method(get_parameters,ivFixed) S3method(get_parameters,ivprobit) S3method(get_parameters,lavaan) S3method(get_parameters,lme) S3method(get_parameters,lmodel2) S3method(get_parameters,logitmfx) S3method(get_parameters,logitor) S3method(get_parameters,lqm) S3method(get_parameters,lqmm) S3method(get_parameters,lrm) S3method(get_parameters,manova) S3method(get_parameters,maov) S3method(get_parameters,margins) S3method(get_parameters,mcmc) S3method(get_parameters,mcmc.list) S3method(get_parameters,mediate) S3method(get_parameters,merMod) S3method(get_parameters,merModList) S3method(get_parameters,meta_bma) S3method(get_parameters,meta_fixed) S3method(get_parameters,meta_random) S3method(get_parameters,metaplus) S3method(get_parameters,mhurdle) S3method(get_parameters,mipo) S3method(get_parameters,mira) S3method(get_parameters,mixed) S3method(get_parameters,mixor) S3method(get_parameters,mjoint) S3method(get_parameters,mle) S3method(get_parameters,mle2) S3method(get_parameters,mlm) S3method(get_parameters,model_fit) S3method(get_parameters,multinom) S3method(get_parameters,mvord) S3method(get_parameters,negbinirr) S3method(get_parameters,negbinmfx) S3method(get_parameters,nlmerMod) S3method(get_parameters,orm) S3method(get_parameters,pgmm) S3method(get_parameters,poissonirr) S3method(get_parameters,poissonmfx) S3method(get_parameters,polr) S3method(get_parameters,probitmfx) S3method(get_parameters,ridgelm) S3method(get_parameters,riskRegression) S3method(get_parameters,rlmerMod) S3method(get_parameters,rma) S3method(get_parameters,rms) S3method(get_parameters,rqs) S3method(get_parameters,rqss) S3method(get_parameters,scam) S3method(get_parameters,selection) S3method(get_parameters,sem) S3method(get_parameters,sim) S3method(get_parameters,sim.merMod) S3method(get_parameters,stanfit) S3method(get_parameters,stanmvreg) S3method(get_parameters,stanreg) S3method(get_parameters,summary.lm) S3method(get_parameters,survreg) S3method(get_parameters,systemfit) S3method(get_parameters,tobit) S3method(get_parameters,vgam) S3method(get_parameters,wbgee) S3method(get_parameters,wbm) S3method(get_parameters,zcpglm) S3method(get_parameters,zeroinfl) S3method(get_parameters,zerotrunc) S3method(get_predicted,bife) S3method(get_predicted,brmsfit) S3method(get_predicted,clm) S3method(get_predicted,crr) S3method(get_predicted,data.frame) S3method(get_predicted,default) S3method(get_predicted,fa) S3method(get_predicted,faMain) S3method(get_predicted,fixest) S3method(get_predicted,gam) S3method(get_predicted,gamm) S3method(get_predicted,glm) S3method(get_predicted,glmmTMB) S3method(get_predicted,hurdle) S3method(get_predicted,list) S3method(get_predicted,lm) S3method(get_predicted,lmerMod) S3method(get_predicted,lrm) S3method(get_predicted,merMod) S3method(get_predicted,multinom) S3method(get_predicted,polr) S3method(get_predicted,prcomp) S3method(get_predicted,principal) S3method(get_predicted,rlm) S3method(get_predicted,stanreg) S3method(get_predicted,zeroinfl) S3method(get_predicted_ci,default) S3method(get_predicted_ci,mlm) S3method(get_priors,BFBayesFactor) S3method(get_priors,bcplm) S3method(get_priors,blavaan) S3method(get_priors,brmsfit) S3method(get_priors,mcmc.list) S3method(get_priors,meta_fixed) S3method(get_priors,meta_random) S3method(get_priors,stanmvreg) S3method(get_priors,stanreg) S3method(get_random,afex_aov) S3method(get_random,default) S3method(get_residuals,afex_aov) S3method(get_residuals,coxph) S3method(get_residuals,crr) S3method(get_residuals,default) S3method(get_residuals,model_fit) S3method(get_residuals,slm) S3method(get_residuals,vgam) S3method(get_residuals,vglm) S3method(get_statistic,Arima) S3method(get_statistic,BBmm) S3method(get_statistic,DirichletRegModel) S3method(get_statistic,Gam) S3method(get_statistic,HLfit) S3method(get_statistic,LORgee) S3method(get_statistic,MANOVA) S3method(get_statistic,MixMod) S3method(get_statistic,RM) S3method(get_statistic,Rchoice) S3method(get_statistic,Sarlm) S3method(get_statistic,SemiParBIV) S3method(get_statistic,aareg) S3method(get_statistic,afex_aov) S3method(get_statistic,averaging) S3method(get_statistic,bayesx) S3method(get_statistic,betamfx) S3method(get_statistic,betaor) S3method(get_statistic,betareg) S3method(get_statistic,bfsl) S3method(get_statistic,bife) S3method(get_statistic,bigglm) S3method(get_statistic,biglm) S3method(get_statistic,bracl) S3method(get_statistic,brmultinom) S3method(get_statistic,btergm) S3method(get_statistic,censReg) S3method(get_statistic,cgam) S3method(get_statistic,clm2) S3method(get_statistic,clmm2) S3method(get_statistic,coeftest) S3method(get_statistic,complmrob) S3method(get_statistic,coxme) S3method(get_statistic,coxph) S3method(get_statistic,coxr) S3method(get_statistic,cpglm) S3method(get_statistic,cpglmm) S3method(get_statistic,crch) S3method(get_statistic,crq) S3method(get_statistic,crqs) S3method(get_statistic,crr) S3method(get_statistic,default) S3method(get_statistic,emmGrid) S3method(get_statistic,emm_list) S3method(get_statistic,epi.2by2) S3method(get_statistic,ergm) S3method(get_statistic,feis) S3method(get_statistic,fixest) S3method(get_statistic,flexsurvreg) S3method(get_statistic,gam) S3method(get_statistic,gamlss) S3method(get_statistic,gamm) S3method(get_statistic,garch) S3method(get_statistic,gee) S3method(get_statistic,geeglm) S3method(get_statistic,glht) S3method(get_statistic,glimML) S3method(get_statistic,glmm) S3method(get_statistic,glmmTMB) S3method(get_statistic,glmmadmb) S3method(get_statistic,glmx) S3method(get_statistic,hurdle) S3method(get_statistic,ivFixed) S3method(get_statistic,ivprobit) S3method(get_statistic,lavaan) S3method(get_statistic,list) S3method(get_statistic,lm_robust) S3method(get_statistic,lme) S3method(get_statistic,lmerModLmerTest) S3method(get_statistic,lmodel2) S3method(get_statistic,logistf) S3method(get_statistic,logitmfx) S3method(get_statistic,logitor) S3method(get_statistic,lqm) S3method(get_statistic,lqmm) S3method(get_statistic,lrm) S3method(get_statistic,manova) S3method(get_statistic,maov) S3method(get_statistic,margins) S3method(get_statistic,maxLik) S3method(get_statistic,mediate) S3method(get_statistic,merModList) S3method(get_statistic,metaplus) S3method(get_statistic,mhurdle) S3method(get_statistic,mipo) S3method(get_statistic,mira) S3method(get_statistic,mixor) S3method(get_statistic,mjoint) S3method(get_statistic,mle) S3method(get_statistic,mle2) S3method(get_statistic,mlm) S3method(get_statistic,mlogit) S3method(get_statistic,model_fit) S3method(get_statistic,multinom) S3method(get_statistic,mvord) S3method(get_statistic,negbin) S3method(get_statistic,negbinirr) S3method(get_statistic,negbinmfx) S3method(get_statistic,nlrq) S3method(get_statistic,ols) S3method(get_statistic,orm) S3method(get_statistic,pgmm) S3method(get_statistic,plm) S3method(get_statistic,poissonirr) S3method(get_statistic,poissonmfx) S3method(get_statistic,probitmfx) S3method(get_statistic,psm) S3method(get_statistic,ridgelm) S3method(get_statistic,riskRegression) S3method(get_statistic,rma) S3method(get_statistic,rms) S3method(get_statistic,robmixglm) S3method(get_statistic,rq) S3method(get_statistic,rqs) S3method(get_statistic,rqss) S3method(get_statistic,scam) S3method(get_statistic,selection) S3method(get_statistic,sem) S3method(get_statistic,summary.lm) S3method(get_statistic,survreg) S3method(get_statistic,svy_vglm) S3method(get_statistic,svyglm) S3method(get_statistic,svyglm.nb) S3method(get_statistic,svyglm.zip) S3method(get_statistic,svyolr) S3method(get_statistic,systemfit) S3method(get_statistic,tobit) S3method(get_statistic,truncreg) S3method(get_statistic,vgam) S3method(get_statistic,vglm) S3method(get_statistic,wbgee) S3method(get_statistic,wbm) S3method(get_statistic,zcpglm) S3method(get_statistic,zerocount) S3method(get_statistic,zeroinfl) S3method(get_varcov,BBmm) S3method(get_varcov,BBreg) S3method(get_varcov,DirichletRegModel) S3method(get_varcov,HLfit) S3method(get_varcov,LORgee) S3method(get_varcov,MixMod) S3method(get_varcov,Rchoice) S3method(get_varcov,afex_aov) S3method(get_varcov,aov) S3method(get_varcov,averaging) S3method(get_varcov,betamfx) S3method(get_varcov,betaor) S3method(get_varcov,betareg) S3method(get_varcov,brmsfit) S3method(get_varcov,cglm) S3method(get_varcov,clm2) S3method(get_varcov,clmm2) S3method(get_varcov,coxr) S3method(get_varcov,cpglm) S3method(get_varcov,cpglmm) S3method(get_varcov,crq) S3method(get_varcov,crr) S3method(get_varcov,default) S3method(get_varcov,feis) S3method(get_varcov,flexsurvreg) S3method(get_varcov,gamlss) S3method(get_varcov,gamm) S3method(get_varcov,gee) S3method(get_varcov,geeglm) S3method(get_varcov,glimML) S3method(get_varcov,glmRob) S3method(get_varcov,glmm) S3method(get_varcov,glmmTMB) S3method(get_varcov,glmx) S3method(get_varcov,hurdle) S3method(get_varcov,ivFixed) S3method(get_varcov,list) S3method(get_varcov,lmRob) S3method(get_varcov,logitmfx) S3method(get_varcov,logitor) S3method(get_varcov,lqm) S3method(get_varcov,lqmm) S3method(get_varcov,maxLik) S3method(get_varcov,mediate) S3method(get_varcov,merModList) S3method(get_varcov,mhurdle) S3method(get_varcov,mixed) S3method(get_varcov,mixor) S3method(get_varcov,mjoint) S3method(get_varcov,mle) S3method(get_varcov,mle2) S3method(get_varcov,mlm) S3method(get_varcov,model_fit) S3method(get_varcov,mvord) S3method(get_varcov,negbinirr) S3method(get_varcov,negbinmfx) S3method(get_varcov,nlrq) S3method(get_varcov,pgmm) S3method(get_varcov,poissonirr) S3method(get_varcov,poissonmfx) S3method(get_varcov,probitmfx) S3method(get_varcov,robmixglm) S3method(get_varcov,rq) S3method(get_varcov,rqs) S3method(get_varcov,selection) S3method(get_varcov,tobit) S3method(get_varcov,truncreg) S3method(get_varcov,vgam) S3method(get_varcov,vglm) S3method(get_varcov,zcpglm) S3method(get_varcov,zerocount) S3method(get_varcov,zeroinfl) S3method(get_variance,MixMod) S3method(get_variance,brmsfit) S3method(get_variance,clmm) S3method(get_variance,cpglmm) S3method(get_variance,default) S3method(get_variance,glmmTMB) S3method(get_variance,glmmadmb) S3method(get_variance,lme) S3method(get_variance,merMod) S3method(get_variance,mixed) S3method(get_variance,mjoint) S3method(get_variance,rlmerMod) S3method(get_variance,stanreg) S3method(get_variance,wblm) S3method(get_variance,wbm) S3method(get_weights,brmsfit) S3method(get_weights,btergm) S3method(get_weights,default) S3method(get_weights,list) S3method(is_mixed_model,afex_aov) S3method(is_mixed_model,default) S3method(is_nullmodel,afex_aov) S3method(is_nullmodel,default) S3method(link_function,BBmm) S3method(link_function,BBreg) S3method(link_function,DirichletRegModel) S3method(link_function,LORgee) S3method(link_function,MANOVA) S3method(link_function,MCMCglmm) S3method(link_function,RM) S3method(link_function,Rchoice) S3method(link_function,afex_aov) S3method(link_function,aovlist) S3method(link_function,bamlss) S3method(link_function,bayesx) S3method(link_function,bcplm) S3method(link_function,betamfx) S3method(link_function,betaor) S3method(link_function,betareg) S3method(link_function,bife) S3method(link_function,bigglm) S3method(link_function,biglm) S3method(link_function,brglm) S3method(link_function,brmsfit) S3method(link_function,censReg) S3method(link_function,cgam) S3method(link_function,cglm) S3method(link_function,clm) S3method(link_function,clm2) S3method(link_function,clmm) S3method(link_function,complmRob) S3method(link_function,comprisk) S3method(link_function,coxme) S3method(link_function,coxph) S3method(link_function,coxr) S3method(link_function,cpglm) S3method(link_function,cpglmm) S3method(link_function,cph) S3method(link_function,crch) S3method(link_function,crq) S3method(link_function,crqs) S3method(link_function,default) S3method(link_function,feglm) S3method(link_function,feis) S3method(link_function,felm) S3method(link_function,fixest) S3method(link_function,flexsurvreg) S3method(link_function,gam) S3method(link_function,gamlss) S3method(link_function,gamm) S3method(link_function,gbm) S3method(link_function,glimML) S3method(link_function,glm) S3method(link_function,glmm) S3method(link_function,glmmadmb) S3method(link_function,glmx) S3method(link_function,gls) S3method(link_function,gmnl) S3method(link_function,hurdle) S3method(link_function,ivFixed) S3method(link_function,iv_robust) S3method(link_function,ivprobit) S3method(link_function,ivreg) S3method(link_function,lm) S3method(link_function,lmRob) S3method(link_function,lm_robust) S3method(link_function,lme) S3method(link_function,lmrob) S3method(link_function,logistf) S3method(link_function,logitmfx) S3method(link_function,logitor) S3method(link_function,lqm) S3method(link_function,lqmm) S3method(link_function,lrm) S3method(link_function,merModList) S3method(link_function,mipo) S3method(link_function,mira) S3method(link_function,mixed) S3method(link_function,mixor) S3method(link_function,mlogit) S3method(link_function,model_fit) S3method(link_function,multinom) S3method(link_function,mvord) S3method(link_function,negbinirr) S3method(link_function,negbinmfx) S3method(link_function,orm) S3method(link_function,plm) S3method(link_function,poissonirr) S3method(link_function,poissonmfx) S3method(link_function,polr) S3method(link_function,probitmfx) S3method(link_function,psm) S3method(link_function,riskRegression) S3method(link_function,robmixglm) S3method(link_function,rq) S3method(link_function,rqss) S3method(link_function,speedglm) S3method(link_function,speedlm) S3method(link_function,stanmvreg) S3method(link_function,survfit) S3method(link_function,survreg) S3method(link_function,svy_vglm) S3method(link_function,svyolr) S3method(link_function,systemfit) S3method(link_function,tobit) S3method(link_function,truncreg) S3method(link_function,vgam) S3method(link_function,vglm) S3method(link_function,zcpglm) S3method(link_function,zeroinfl) S3method(link_function,zerotrunc) S3method(link_inverse,BBmm) S3method(link_inverse,BBreg) S3method(link_inverse,DirichletRegModel) S3method(link_inverse,LORgee) S3method(link_inverse,MANOVA) S3method(link_inverse,MCMCglmm) S3method(link_inverse,MixMod) S3method(link_inverse,RM) S3method(link_inverse,Rchoice) S3method(link_inverse,afex_aov) S3method(link_inverse,aovlist) S3method(link_inverse,bamlss) S3method(link_inverse,bayesx) S3method(link_inverse,bcplm) S3method(link_inverse,betamfx) S3method(link_inverse,betaor) S3method(link_inverse,betareg) S3method(link_inverse,bife) S3method(link_inverse,bigglm) S3method(link_inverse,biglm) S3method(link_inverse,brmsfit) S3method(link_inverse,censReg) S3method(link_inverse,cgam) S3method(link_inverse,cglm) S3method(link_inverse,clm) S3method(link_inverse,clm2) S3method(link_inverse,clmm) S3method(link_inverse,complmrob) S3method(link_inverse,comprisk) S3method(link_inverse,coxme) S3method(link_inverse,coxph) S3method(link_inverse,coxr) S3method(link_inverse,cpglm) S3method(link_inverse,cpglmm) S3method(link_inverse,cph) S3method(link_inverse,crch) S3method(link_inverse,crq) S3method(link_inverse,crqs) S3method(link_inverse,default) S3method(link_inverse,feglm) S3method(link_inverse,feis) S3method(link_inverse,felm) S3method(link_inverse,fixest) S3method(link_inverse,flexsurvreg) S3method(link_inverse,gam) S3method(link_inverse,gamlss) S3method(link_inverse,gamm) S3method(link_inverse,gbm) S3method(link_inverse,glimML) S3method(link_inverse,glm) S3method(link_inverse,glmm) S3method(link_inverse,glmmPQL) S3method(link_inverse,glmmTMB) S3method(link_inverse,glmmadmb) S3method(link_inverse,glmx) S3method(link_inverse,gls) S3method(link_inverse,gmnl) S3method(link_inverse,hurdle) S3method(link_inverse,ivFixed) S3method(link_inverse,iv_robust) S3method(link_inverse,ivprobit) S3method(link_inverse,ivreg) S3method(link_inverse,lm) S3method(link_inverse,lmRob) S3method(link_inverse,lm_robust) S3method(link_inverse,lme) S3method(link_inverse,lmrob) S3method(link_inverse,logistf) S3method(link_inverse,logitmfx) S3method(link_inverse,logitor) S3method(link_inverse,lqm) S3method(link_inverse,lqmm) S3method(link_inverse,lrm) S3method(link_inverse,merModList) S3method(link_inverse,mipo) S3method(link_inverse,mira) S3method(link_inverse,mixed) S3method(link_inverse,mixor) S3method(link_inverse,mlogit) S3method(link_inverse,model_fit) S3method(link_inverse,multinom) S3method(link_inverse,mvord) S3method(link_inverse,negbinirr) S3method(link_inverse,negbinmfx) S3method(link_inverse,orm) S3method(link_inverse,plm) S3method(link_inverse,poissonmfx) S3method(link_inverse,polr) S3method(link_inverse,probitirr) S3method(link_inverse,probitmfx) S3method(link_inverse,psm) S3method(link_inverse,riskRegression) S3method(link_inverse,robmixglm) S3method(link_inverse,rq) S3method(link_inverse,rqss) S3method(link_inverse,speedglm) S3method(link_inverse,speedlm) S3method(link_inverse,stanmvreg) S3method(link_inverse,survfit) S3method(link_inverse,survreg) S3method(link_inverse,svy_vglm) S3method(link_inverse,svyolr) S3method(link_inverse,systemfit) S3method(link_inverse,tobit) S3method(link_inverse,truncreg) S3method(link_inverse,vgam) S3method(link_inverse,vglm) S3method(link_inverse,zcpglm) S3method(link_inverse,zeroinfl) S3method(link_inverse,zerotrunc) S3method(model_info,Arima) S3method(model_info,BBmm) S3method(model_info,BBreg) S3method(model_info,BFBayesFactor) S3method(model_info,BGGM) S3method(model_info,DirichletRegModel) S3method(model_info,LORgee) S3method(model_info,MANOVA) S3method(model_info,MCMCglmm) S3method(model_info,MixMod) S3method(model_info,RM) S3method(model_info,Rchoice) S3method(model_info,aareg) S3method(model_info,afex_aov) S3method(model_info,anova) S3method(model_info,aovlist) S3method(model_info,averaging) S3method(model_info,bamlss) S3method(model_info,bayesQR) S3method(model_info,bayesx) S3method(model_info,bcplm) S3method(model_info,betamfx) S3method(model_info,betaor) S3method(model_info,betareg) S3method(model_info,bfsl) S3method(model_info,bife) S3method(model_info,blrm) S3method(model_info,brmsfit) S3method(model_info,brmultinom) S3method(model_info,censReg) S3method(model_info,cgam) S3method(model_info,cglm) S3method(model_info,clm) S3method(model_info,clm2) S3method(model_info,clmm) S3method(model_info,coeftest) S3method(model_info,complmrob) S3method(model_info,comprisk) S3method(model_info,coxme) S3method(model_info,coxph) S3method(model_info,coxr) S3method(model_info,cpglm) S3method(model_info,cpglmm) S3method(model_info,crch) S3method(model_info,crq) S3method(model_info,crqs) S3method(model_info,data.frame) S3method(model_info,default) S3method(model_info,earth) S3method(model_info,feglm) S3method(model_info,feis) S3method(model_info,felm) S3method(model_info,fixest) S3method(model_info,flexsurvreg) S3method(model_info,gam) S3method(model_info,gamlss) S3method(model_info,gamm) S3method(model_info,garch) S3method(model_info,gbm) S3method(model_info,glht) S3method(model_info,glimML) S3method(model_info,glmm) S3method(model_info,glmmPQL) S3method(model_info,glmmTMB) S3method(model_info,glmmadmb) S3method(model_info,glmx) S3method(model_info,gls) S3method(model_info,gmnl) S3method(model_info,htest) S3method(model_info,hurdle) S3method(model_info,ivFixed) S3method(model_info,iv_robust) S3method(model_info,ivprobit) S3method(model_info,ivreg) S3method(model_info,lmRob) S3method(model_info,lm_robust) S3method(model_info,lme) S3method(model_info,lmrob) S3method(model_info,logistf) S3method(model_info,logitmfx) S3method(model_info,logitor) S3method(model_info,lqm) S3method(model_info,lqmm) S3method(model_info,lrm) S3method(model_info,maxLik) S3method(model_info,mclogit) S3method(model_info,mcmc) S3method(model_info,merModList) S3method(model_info,meta_bma) S3method(model_info,meta_fixed) S3method(model_info,meta_random) S3method(model_info,metaplus) S3method(model_info,mhurdle) S3method(model_info,mipo) S3method(model_info,mira) S3method(model_info,mixed) S3method(model_info,mixor) S3method(model_info,mjoint) S3method(model_info,mlm) S3method(model_info,mlogit) S3method(model_info,mmclogit) S3method(model_info,model_fit) S3method(model_info,multinom) S3method(model_info,mvord) S3method(model_info,negbinirr) S3method(model_info,negbinmfx) S3method(model_info,nlrq) S3method(model_info,nls) S3method(model_info,orm) S3method(model_info,plm) S3method(model_info,poissonirr) S3method(model_info,poissonmfx) S3method(model_info,polr) S3method(model_info,probitmfx) S3method(model_info,riskRegression) S3method(model_info,rma) S3method(model_info,robmixglm) S3method(model_info,rq) S3method(model_info,rqss) S3method(model_info,speedglm) S3method(model_info,speedlm) S3method(model_info,stanmvreg) S3method(model_info,stanreg) S3method(model_info,summary.lm) S3method(model_info,survfit) S3method(model_info,survreg) S3method(model_info,svy_vglm) S3method(model_info,svyolr) S3method(model_info,systemfit) S3method(model_info,tobit) S3method(model_info,truncreg) S3method(model_info,vgam) S3method(model_info,vglm) S3method(model_info,zcpglm) S3method(model_info,zeroinfl) S3method(model_info,zerotrunc) S3method(model_name,default) S3method(model_name,list) S3method(n_obs,BBmm) S3method(n_obs,BBreg) S3method(n_obs,Glm) S3method(n_obs,LORgee) S3method(n_obs,MANOVA) S3method(n_obs,RM) S3method(n_obs,Rchoice) S3method(n_obs,SemiParBIV) S3method(n_obs,aareg) S3method(n_obs,afex_aov) S3method(n_obs,aovlist) S3method(n_obs,averaging) S3method(n_obs,bamlss) S3method(n_obs,bayesx) S3method(n_obs,bcplm) S3method(n_obs,betamfx) S3method(n_obs,betaor) S3method(n_obs,bife) S3method(n_obs,bigglm) S3method(n_obs,biglm) S3method(n_obs,blrm) S3method(n_obs,censReg) S3method(n_obs,cgam) S3method(n_obs,cglm) S3method(n_obs,coeftest) S3method(n_obs,complmrob) S3method(n_obs,comprisk) S3method(n_obs,coxme) S3method(n_obs,coxph) S3method(n_obs,coxr) S3method(n_obs,cpglm) S3method(n_obs,cpglmm) S3method(n_obs,crq) S3method(n_obs,crqs) S3method(n_obs,crr) S3method(n_obs,default) S3method(n_obs,eglm) S3method(n_obs,emm_list) S3method(n_obs,feglm) S3method(n_obs,feis) S3method(n_obs,felm) S3method(n_obs,fixest) S3method(n_obs,flexsurvreg) S3method(n_obs,gam) S3method(n_obs,gamm) S3method(n_obs,garch) S3method(n_obs,gbm) S3method(n_obs,glimML) S3method(n_obs,glm) S3method(n_obs,glmRob) S3method(n_obs,gmnl) S3method(n_obs,hurdle) S3method(n_obs,ivFixed) S3method(n_obs,ivprobit) S3method(n_obs,joint) S3method(n_obs,lavaan) S3method(n_obs,list) S3method(n_obs,lmRob) S3method(n_obs,lmodel2) S3method(n_obs,logitmfx) S3method(n_obs,logitor) S3method(n_obs,lqm) S3method(n_obs,lqmm) S3method(n_obs,maxLik) S3method(n_obs,mcmc) S3method(n_obs,mediate) S3method(n_obs,merModList) S3method(n_obs,mhurdle) S3method(n_obs,mipo) S3method(n_obs,mira) S3method(n_obs,mjoint) S3method(n_obs,mle) S3method(n_obs,mle2) S3method(n_obs,mlogit) S3method(n_obs,model_fit) S3method(n_obs,multinom) S3method(n_obs,mvord) S3method(n_obs,negbinirr) S3method(n_obs,negbinmfx) S3method(n_obs,nlrq) S3method(n_obs,poissonirr) S3method(n_obs,poissonmfx) S3method(n_obs,probitmfx) S3method(n_obs,riskRegression) S3method(n_obs,rq) S3method(n_obs,rqss) S3method(n_obs,selection) S3method(n_obs,sem) S3method(n_obs,stanmvreg) S3method(n_obs,summary.lm) S3method(n_obs,survfit) S3method(n_obs,survreg) S3method(n_obs,svy_vglm) S3method(n_obs,svyolr) S3method(n_obs,wbgee) S3method(n_obs,wbm) S3method(n_obs,zcpglm) S3method(n_obs,zeroinfl) S3method(n_obs,zerotrunc) S3method(n_parameters,BBmm) S3method(n_parameters,Gam) S3method(n_parameters,MCMCglmm) S3method(n_parameters,MixMod) S3method(n_parameters,bayesx) S3method(n_parameters,blavaan) S3method(n_parameters,brmsfit) S3method(n_parameters,coxme) S3method(n_parameters,cpglmm) S3method(n_parameters,default) S3method(n_parameters,gam) S3method(n_parameters,glimML) S3method(n_parameters,glmmTMB) S3method(n_parameters,hurdle) S3method(n_parameters,lavaan) S3method(n_parameters,lme) S3method(n_parameters,merMod) S3method(n_parameters,mixed) S3method(n_parameters,multinom) S3method(n_parameters,rlmerMod) S3method(n_parameters,sim.merMod) S3method(n_parameters,stanmvreg) S3method(n_parameters,stanreg) S3method(n_parameters,vgam) S3method(n_parameters,wbm) S3method(n_parameters,zeroinfl) S3method(n_parameters,zerotrunc) S3method(print,check_if_installed) S3method(print,easystats_check) S3method(print,get_predicted) S3method(print,insight_table) S3method(print_html,data.frame) S3method(print_md,data.frame) S3method(standardize_column_order,data.frame) S3method(standardize_column_order,default) S3method(standardize_column_order,effectsize_table) S3method(standardize_column_order,parameters_distribution) S3method(standardize_column_order,parameters_model) S3method(standardize_names,data.frame) S3method(standardize_names,default) S3method(standardize_names,effectsize_table) S3method(standardize_names,parameters_distribution) S3method(standardize_names,parameters_model) S3method(summary,get_predicted) export(all_models_equal) export(all_models_same_class) export(check_if_installed) export(clean_names) export(clean_parameters) export(color_if) export(color_text) export(color_theme) export(colour_if) export(colour_text) export(data_match) export(data_relocate) export(data_restoretype) export(data_to_long) export(data_to_wide) export(display) export(download_model) export(ellipsis_info) export(export_table) export(find_algorithm) export(find_formula) export(find_interactions) export(find_offset) export(find_parameters) export(find_predictors) export(find_random) export(find_random_slopes) export(find_response) export(find_smooth) export(find_statistic) export(find_terms) export(find_transformation) export(find_variables) export(find_weights) export(format_bf) export(format_ci) export(format_message) export(format_number) export(format_p) export(format_pd) export(format_rope) export(format_string) export(format_table) export(format_value) export(formula_ok) export(get_auxiliary) export(get_call) export(get_correlation_slope_intercept) export(get_correlation_slopes) export(get_data) export(get_deviance) export(get_df) export(get_family) export(get_intercept) export(get_loglikelihood) export(get_modelmatrix) export(get_parameters) export(get_predicted) export(get_predicted_ci) export(get_predictors) export(get_priors) export(get_random) export(get_residuals) export(get_response) export(get_sigma) export(get_statistic) export(get_transformation) export(get_varcov) export(get_variance) export(get_variance_dispersion) export(get_variance_distribution) export(get_variance_fixed) export(get_variance_intercept) export(get_variance_random) export(get_variance_residual) export(get_variance_slope) export(get_weights) export(has_intercept) export(is_gam_model) export(is_mixed_model) export(is_model) export(is_model_supported) export(is_multivariate) export(is_nested_models) export(is_nullmodel) export(is_regression_model) export(link_function) export(link_inverse) export(loglikelihood) export(model_info) export(model_name) export(n_obs) export(n_parameters) export(null_model) export(parameters_table) export(print_color) export(print_colour) export(print_html) export(print_md) export(print_parameters) export(standardize_column_order) export(standardize_names) export(supported_models) export(to_numeric)