parameters/0000755000176200001440000000000013620060023012405 5ustar liggesusersparameters/NAMESPACE0000644000176200001440000004551013620043641013640 0ustar liggesusers# Generated by roxygen2: do not edit by hand S3method(as.data.frame,glmmTMB) S3method(as.data.frame,lm) S3method(as.data.frame,merMod) S3method(as.double,n_clusters) S3method(as.double,n_factors) S3method(as.numeric,n_clusters) S3method(as.numeric,n_factors) S3method(bootstrap_model,default) S3method(bootstrap_model,merMod) S3method(check_multimodal,data.frame) S3method(check_multimodal,numeric) S3method(ci,BBmm) S3method(ci,BBreg) S3method(ci,DirichletRegModel) S3method(ci,LORgee) S3method(ci,MixMod) S3method(ci,aareg) S3method(ci,betareg) S3method(ci,biglm) S3method(ci,bracl) S3method(ci,brmultinom) S3method(ci,censReg) S3method(ci,clm) S3method(ci,clm2) S3method(ci,clmm2) S3method(ci,comlmrob) S3method(ci,coxme) S3method(ci,coxph) S3method(ci,cpglm) S3method(ci,cpglmm) S3method(ci,crch) S3method(ci,crq) S3method(ci,default) S3method(ci,effectsize_std_params) S3method(ci,feglm) S3method(ci,feis) S3method(ci,fixest) S3method(ci,flexsurvreg) S3method(ci,gam) S3method(ci,gamlss) S3method(ci,gamm) S3method(ci,gamm4) S3method(ci,gee) S3method(ci,geeglm) S3method(ci,glm) S3method(ci,glmmTMB) S3method(ci,glmmadmb) S3method(ci,glmrob) S3method(ci,glmx) S3method(ci,gls) S3method(ci,hurdle) S3method(ci,ivreg) S3method(ci,list) S3method(ci,lm) S3method(ci,lm_robust) S3method(ci,lme) S3method(ci,logistf) S3method(ci,lrm) S3method(ci,merMod) S3method(ci,mixor) S3method(ci,mlm) S3method(ci,multinom) S3method(ci,negbin) S3method(ci,nlrq) S3method(ci,ols) S3method(ci,plm) S3method(ci,polr) S3method(ci,psm) S3method(ci,rma) S3method(ci,rms) S3method(ci,rq) S3method(ci,rqss) S3method(ci,speedglm) S3method(ci,speedlm) S3method(ci,survreg) S3method(ci,svyglm.glimML) S3method(ci,svyglm.nb) S3method(ci,svyglm.zip) S3method(ci,tobit) S3method(ci,truncreg) S3method(ci,vglm) S3method(ci,zerocount) S3method(ci,zeroinfl) S3method(convert_data_to_numeric,character) S3method(convert_data_to_numeric,data.frame) S3method(convert_data_to_numeric,double) S3method(convert_data_to_numeric,factor) S3method(convert_data_to_numeric,logical) S3method(convert_data_to_numeric,numeric) S3method(convert_efa_to_cfa,fa) S3method(convert_efa_to_cfa,parameters_efa) S3method(convert_efa_to_cfa,parameters_pca) S3method(describe_distribution,data.frame) S3method(describe_distribution,factor) S3method(describe_distribution,numeric) S3method(dof_satterthwaite,lme) S3method(dof_satterthwaite,lmerMod) S3method(equivalence_test,MixMod) S3method(equivalence_test,glm) S3method(equivalence_test,glmmTMB) S3method(equivalence_test,lm) S3method(equivalence_test,merMod) S3method(factor_analysis,data.frame) S3method(format_parameters,default) S3method(format_parameters,parameters_model) S3method(format_parameters,rma) S3method(kurtosis,data.frame) S3method(kurtosis,default) S3method(kurtosis,matrix) S3method(kurtosis,numeric) S3method(model_parameters,BFBayesFactor) S3method(model_parameters,DirichletRegModel) S3method(model_parameters,FAMD) S3method(model_parameters,MCMCglmm) S3method(model_parameters,Mclust) S3method(model_parameters,MixMod) S3method(model_parameters,PCA) S3method(model_parameters,anova) S3method(model_parameters,aov) S3method(model_parameters,aovlist) S3method(model_parameters,befa) S3method(model_parameters,betareg) S3method(model_parameters,blavaan) S3method(model_parameters,bracl) S3method(model_parameters,brmsfit) S3method(model_parameters,brmultinom) S3method(model_parameters,cgam) S3method(model_parameters,clm2) S3method(model_parameters,clmm) S3method(model_parameters,clmm2) S3method(model_parameters,cpglmm) S3method(model_parameters,default) S3method(model_parameters,fa) S3method(model_parameters,gam) S3method(model_parameters,gamlss) S3method(model_parameters,gamm) S3method(model_parameters,glmmTMB) S3method(model_parameters,glmx) S3method(model_parameters,htest) S3method(model_parameters,hurdle) S3method(model_parameters,kmeans) S3method(model_parameters,lavaan) S3method(model_parameters,list) S3method(model_parameters,lme) S3method(model_parameters,mcmc) S3method(model_parameters,merMod) S3method(model_parameters,mixor) S3method(model_parameters,mlm) S3method(model_parameters,multinom) S3method(model_parameters,omega) S3method(model_parameters,parameters_efa) S3method(model_parameters,parameters_pca) S3method(model_parameters,principal) S3method(model_parameters,rlmerMod) S3method(model_parameters,rma) S3method(model_parameters,rqss) S3method(model_parameters,stanmvreg) S3method(model_parameters,stanreg) S3method(model_parameters,vgam) S3method(model_parameters,wbgee) S3method(model_parameters,wbm) S3method(model_parameters,zerocount) S3method(model_parameters,zeroinfl) S3method(n_parameters,BBmm) S3method(n_parameters,Gam) S3method(n_parameters,MCMCglmm) S3method(n_parameters,MixMod) S3method(n_parameters,brmsfit) S3method(n_parameters,coxme) S3method(n_parameters,cpglmm) S3method(n_parameters,default) S3method(n_parameters,gam) S3method(n_parameters,glimML) S3method(n_parameters,glmmTMB) S3method(n_parameters,hurdle) S3method(n_parameters,lavaan) S3method(n_parameters,lme) S3method(n_parameters,merMod) S3method(n_parameters,mixed) S3method(n_parameters,multinom) S3method(n_parameters,rlmerMod) S3method(n_parameters,sim.merMod) S3method(n_parameters,stanmvreg) S3method(n_parameters,stanreg) S3method(n_parameters,vgam) S3method(n_parameters,wbm) S3method(n_parameters,zeroinfl) S3method(n_parameters,zerotrunc) S3method(p_value,BBmm) S3method(p_value,BBreg) S3method(p_value,BFBayesFactor) S3method(p_value,DirichletRegModel) S3method(p_value,Gam) S3method(p_value,LORgee) S3method(p_value,MCMCglmm) S3method(p_value,MixMod) S3method(p_value,aareg) S3method(p_value,anova) S3method(p_value,aov) S3method(p_value,aovlist) S3method(p_value,betareg) S3method(p_value,biglm) S3method(p_value,bracl) S3method(p_value,brmsfit) S3method(p_value,brmultinom) S3method(p_value,censReg) S3method(p_value,cgam) S3method(p_value,clm2) S3method(p_value,clmm2) S3method(p_value,complmrob) S3method(p_value,coxme) S3method(p_value,coxph) S3method(p_value,cpglm) S3method(p_value,cpglmm) S3method(p_value,crch) S3method(p_value,crq) S3method(p_value,data.frame) S3method(p_value,default) S3method(p_value,feglm) S3method(p_value,fixest) S3method(p_value,flexsurvreg) S3method(p_value,gam) S3method(p_value,gamlss) S3method(p_value,gamm) S3method(p_value,gamm4) S3method(p_value,gee) S3method(p_value,geeglm) S3method(p_value,glimML) S3method(p_value,glmmTMB) S3method(p_value,glmx) S3method(p_value,gls) S3method(p_value,gmnl) S3method(p_value,htest) S3method(p_value,hurdle) S3method(p_value,ivreg) S3method(p_value,list) S3method(p_value,lm) S3method(p_value,lm_robust) S3method(p_value,lme) S3method(p_value,lmerMod) S3method(p_value,logistf) S3method(p_value,lrm) S3method(p_value,maxLik) S3method(p_value,merMod) S3method(p_value,mixor) S3method(p_value,mlm) S3method(p_value,multinom) S3method(p_value,negbin) S3method(p_value,nlrq) S3method(p_value,numeric) S3method(p_value,ols) S3method(p_value,pggls) S3method(p_value,pglm) S3method(p_value,plm) S3method(p_value,polr) S3method(p_value,psm) S3method(p_value,rlm) S3method(p_value,rlmerMod) S3method(p_value,rma) S3method(p_value,rms) S3method(p_value,rq) S3method(p_value,rqss) S3method(p_value,stanreg) S3method(p_value,survreg) S3method(p_value,svyglm) S3method(p_value,svyglm.nb) S3method(p_value,svyglm.zip) S3method(p_value,svyolr) S3method(p_value,tobit) S3method(p_value,truncreg) S3method(p_value,vgam) S3method(p_value,vglm) S3method(p_value,wbgee) S3method(p_value,wbm) S3method(p_value,zerocount) S3method(p_value,zeroinfl) S3method(p_value_kenward,lmerMod) S3method(p_value_satterthwaite,gls) S3method(p_value_satterthwaite,lme) S3method(p_value_satterthwaite,lmerMod) S3method(p_value_wald,cpglmm) S3method(p_value_wald,merMod) S3method(p_value_wald,rlmerMod) S3method(plot,check_clusterstructure) S3method(plot,cluster_analysis) S3method(plot,n_clusters) S3method(plot,n_factors) S3method(plot,parameters_distribution) S3method(plot,parameters_efa) S3method(plot,parameters_model) S3method(plot,parameters_pca) S3method(plot,parameters_sem) S3method(plot,parameters_simulate) S3method(predict,kmeans) S3method(predict,parameters_clusters) S3method(predict,parameters_efa) S3method(predict,parameters_pca) S3method(predict,parameters_sem) S3method(principal_components,data.frame) S3method(principal_components,lm) S3method(principal_components,merMod) S3method(print,cfa_model) S3method(print,cluster_analysis) S3method(print,cluster_discrimintation) S3method(print,equivalence_test_lm) S3method(print,n_clusters) S3method(print,n_factors) S3method(print,parameters_clusters) S3method(print,parameters_distribution) S3method(print,parameters_efa) S3method(print,parameters_efa_summary) S3method(print,parameters_loadings) S3method(print,parameters_model) S3method(print,parameters_omega) S3method(print,parameters_omega_summary) S3method(print,parameters_pca) S3method(print,parameters_pca_summary) S3method(print,parameters_random) S3method(print,parameters_sem) S3method(reduce_parameters,data.frame) S3method(reduce_parameters,lm) S3method(reduce_parameters,merMod) S3method(reshape_loadings,data.frame) S3method(reshape_loadings,parameters_efa) S3method(se_satterthwaite,default) S3method(se_satterthwaite,gls) S3method(se_satterthwaite,lme) S3method(select_parameters,lm) S3method(select_parameters,merMod) S3method(select_parameters,stanreg) S3method(simulate_model,LORgee) S3method(simulate_model,MixMod) S3method(simulate_model,betareg) S3method(simulate_model,biglm) S3method(simulate_model,bracl) S3method(simulate_model,brmultinom) S3method(simulate_model,censReg) S3method(simulate_model,cglm) S3method(simulate_model,clm) S3method(simulate_model,clm2) S3method(simulate_model,clmm2) S3method(simulate_model,coxme) S3method(simulate_model,coxph) S3method(simulate_model,cpglm) S3method(simulate_model,cpglmm) S3method(simulate_model,crch) S3method(simulate_model,crq) S3method(simulate_model,default) S3method(simulate_model,feglm) S3method(simulate_model,feis) S3method(simulate_model,fixest) S3method(simulate_model,flexsurvreg) S3method(simulate_model,gam) S3method(simulate_model,gamlss) S3method(simulate_model,gamm) S3method(simulate_model,gee) S3method(simulate_model,geeglm) S3method(simulate_model,glimML) S3method(simulate_model,glm) S3method(simulate_model,glmRob) S3method(simulate_model,glmmTMB) S3method(simulate_model,glmmadmb) S3method(simulate_model,glmrob) S3method(simulate_model,glmx) S3method(simulate_model,gls) S3method(simulate_model,hurdle) S3method(simulate_model,iv_robust) S3method(simulate_model,ivreg) S3method(simulate_model,list) S3method(simulate_model,lm) S3method(simulate_model,lmRob) S3method(simulate_model,lm_robust) S3method(simulate_model,lme) S3method(simulate_model,lmrob) S3method(simulate_model,logistf) S3method(simulate_model,lrm) S3method(simulate_model,merMod) S3method(simulate_model,mixor) S3method(simulate_model,multinom) S3method(simulate_model,nlrq) S3method(simulate_model,ols) S3method(simulate_model,plm) S3method(simulate_model,polr) S3method(simulate_model,psm) S3method(simulate_model,rms) S3method(simulate_model,rq) S3method(simulate_model,speedglm) S3method(simulate_model,speedlm) S3method(simulate_model,survreg) S3method(simulate_model,svyglm.nb) S3method(simulate_model,svyglm.zip) S3method(simulate_model,tobit) S3method(simulate_model,truncreg) S3method(simulate_model,vgam) S3method(simulate_model,vglm) S3method(simulate_model,zerocount) S3method(simulate_model,zeroinfl) S3method(simulate_parameters,default) S3method(simulate_parameters,multinom) S3method(skewness,data.frame) S3method(skewness,default) S3method(skewness,matrix) S3method(skewness,numeric) S3method(sort,parameters_efa) S3method(sort,parameters_pca) S3method(standard_error,BBmm) S3method(standard_error,BBreg) S3method(standard_error,DirichletRegModel) S3method(standard_error,LORgee) S3method(standard_error,MCMCglmm) S3method(standard_error,MixMod) S3method(standard_error,aareg) S3method(standard_error,anova) S3method(standard_error,aov) S3method(standard_error,aovlist) S3method(standard_error,betareg) S3method(standard_error,biglm) S3method(standard_error,bracl) S3method(standard_error,brmultinom) S3method(standard_error,censReg) S3method(standard_error,cgam) S3method(standard_error,character) S3method(standard_error,clm2) S3method(standard_error,clmm2) S3method(standard_error,complmrob) S3method(standard_error,coxme) S3method(standard_error,coxph) S3method(standard_error,cpglm) S3method(standard_error,cpglmm) S3method(standard_error,crch) S3method(standard_error,crq) S3method(standard_error,data.frame) S3method(standard_error,default) S3method(standard_error,effectsize_std_params) S3method(standard_error,factor) S3method(standard_error,feglm) S3method(standard_error,fixest) S3method(standard_error,flexsurvreg) S3method(standard_error,gam) S3method(standard_error,gamlss) S3method(standard_error,gamm) S3method(standard_error,gamm4) S3method(standard_error,gee) S3method(standard_error,geeglm) S3method(standard_error,glimML) S3method(standard_error,glm) S3method(standard_error,glmmTMB) S3method(standard_error,glmx) S3method(standard_error,gls) S3method(standard_error,gmnl) S3method(standard_error,htest) S3method(standard_error,hurdle) S3method(standard_error,ivreg) S3method(standard_error,list) S3method(standard_error,lm) S3method(standard_error,lm_robust) S3method(standard_error,lme) S3method(standard_error,logistf) S3method(standard_error,lrm) S3method(standard_error,merMod) S3method(standard_error,mixor) S3method(standard_error,mlm) S3method(standard_error,multinom) S3method(standard_error,negbin) S3method(standard_error,nlrq) S3method(standard_error,numeric) S3method(standard_error,ols) S3method(standard_error,plm) S3method(standard_error,polr) S3method(standard_error,psm) S3method(standard_error,rma) S3method(standard_error,rms) S3method(standard_error,rq) S3method(standard_error,rqss) S3method(standard_error,survreg) S3method(standard_error,svyglm) S3method(standard_error,svyglm.nb) S3method(standard_error,svyglm.zip) S3method(standard_error,table) S3method(standard_error,tobit) S3method(standard_error,truncreg) S3method(standard_error,vgam) S3method(standard_error,vglm) S3method(standard_error,wbgee) S3method(standard_error,wbm) S3method(standard_error,xtabs) S3method(standard_error,zerocount) S3method(standard_error,zeroinfl) S3method(standardize_names,default) S3method(standardize_names,parameters_model) S3method(summary,n_clusters) S3method(summary,n_factors) S3method(summary,parameters_clusters) S3method(summary,parameters_efa) S3method(summary,parameters_omega) S3method(summary,parameters_pca) export(DRR) export(ICA) export(bootstrap_model) export(bootstrap_parameters) export(check_clusterstructure) export(check_factorstructure) export(check_kmo) export(check_multimodal) export(check_sphericity) export(ci) export(ci_betwithin) export(ci_kenward) export(ci_ml1) export(ci_robust) export(ci_satterthwaite) export(ci_wald) export(closest_component) export(cluster_analysis) export(cluster_discrimination) export(cmds) export(convert_data_to_numeric) export(convert_efa_to_cfa) export(data_partition) export(data_to_numeric) export(degrees_of_freedom) export(demean) export(describe_distribution) export(dof) export(dof_betwithin) export(dof_kenward) export(dof_ml1) export(dof_satterthwaite) export(efa_to_cfa) export(equivalence_test) export(factor_analysis) export(format_algorithm) export(format_bf) export(format_model) export(format_number) export(format_order) export(format_p) export(format_parameters) export(format_pd) export(format_rope) export(get_scores) export(kurtosis) export(model_bootstrap) export(model_parameters) export(model_simulate) export(n_clusters) export(n_factors) export(n_parameters) export(p_value) export(p_value_betwithin) export(p_value_kenward) export(p_value_ml1) export(p_value_robust) export(p_value_satterthwaite) export(p_value_wald) export(parameters) export(parameters_bootstrap) export(parameters_reduction) export(parameters_selection) export(parameters_simulate) export(parameters_table) export(parameters_type) export(principal_components) export(random_parameters) export(reduce_parameters) export(rescale_weights) export(reshape_loadings) export(se_betwithin) export(se_kenward) export(se_ml1) export(se_satterthwaite) export(select_parameters) export(simulate_model) export(simulate_parameters) export(skewness) export(smoothness) export(standard_error) export(standard_error_robust) export(standardize_names) importFrom(bayestestR,bayesfactor_models) importFrom(bayestestR,bayesian_as_frequentist) importFrom(bayestestR,ci) importFrom(bayestestR,convert_pd_to_p) importFrom(bayestestR,describe_posterior) importFrom(bayestestR,equivalence_test) importFrom(bayestestR,p_direction) importFrom(bayestestR,rope_range) importFrom(grDevices,colorRampPalette) importFrom(grDevices,dev.off) importFrom(grDevices,png) importFrom(insight,clean_names) importFrom(insight,clean_parameters) importFrom(insight,find_algorithm) importFrom(insight,find_parameters) importFrom(insight,find_predictors) importFrom(insight,find_random) importFrom(insight,find_random_slopes) importFrom(insight,find_response) importFrom(insight,find_terms) importFrom(insight,format_ci) importFrom(insight,format_table) importFrom(insight,format_value) importFrom(insight,get_data) importFrom(insight,get_parameters) importFrom(insight,get_priors) importFrom(insight,get_random) importFrom(insight,get_statistic) importFrom(insight,get_varcov) importFrom(insight,get_variance) importFrom(insight,has_intercept) importFrom(insight,model_info) importFrom(insight,n_obs) importFrom(insight,print_color) importFrom(insight,print_colour) importFrom(methods,slot) importFrom(stats,ave) importFrom(stats,coef) importFrom(stats,complete.cases) importFrom(stats,confint) importFrom(stats,cor) importFrom(stats,cov2cor) importFrom(stats,cutree) importFrom(stats,df.residual) importFrom(stats,dist) importFrom(stats,hclust) importFrom(stats,heatmap) importFrom(stats,kmeans) importFrom(stats,lm) importFrom(stats,logLik) importFrom(stats,model.matrix) importFrom(stats,na.omit) importFrom(stats,pchisq) importFrom(stats,pnorm) importFrom(stats,prcomp) importFrom(stats,predict) importFrom(stats,pt) importFrom(stats,qnorm) importFrom(stats,qt) importFrom(stats,reshape) importFrom(stats,runif) importFrom(stats,sd) importFrom(stats,setNames) importFrom(stats,sigma) importFrom(stats,step) importFrom(stats,update) importFrom(stats,var) importFrom(stats,vcov) importFrom(tools,toTitleCase) importFrom(utils,capture.output) importFrom(utils,head) importFrom(utils,tail) parameters/README.md0000644000176200001440000002375013620033002013670 0ustar liggesusers # parameters [![CRAN](http://www.r-pkg.org/badges/version/parameters)](https://cran.r-project.org/package=parameters) [![downloads](http://cranlogs.r-pkg.org/badges/parameters)](https://cran.r-project.org/package=parameters) [![Build Status](https://travis-ci.org/easystats/parameters.svg?branch=master)](https://travis-ci.org/easystats/parameters) ***Describe and understand your model’s parameters\!*** `parameters`’ primary goal is to provide utilities for processing the parameters of various statistical models. Beyond computing ***p*-values**, **CIs**, **Bayesian indices** and other measures for a wide variety of models, this package implements features like **bootstrapping** of parameters and models, **feature reduction** (feature extraction and variable selection). ## Installation Run the following: ``` r install.packages("parameters") ``` ``` r library("parameters") ``` ## Documentation [![Documentation](https://img.shields.io/badge/documentation-parameters-orange.svg?colorB=E91E63)](https://easystats.github.io/parameters/) [![Blog](https://img.shields.io/badge/blog-easystats-orange.svg?colorB=FF9800)](https://easystats.github.io/blog/posts/) [![Features](https://img.shields.io/badge/features-parameters-orange.svg?colorB=2196F3)](https://easystats.github.io/parameters/reference/index.html) Click on the buttons above to access the package [documentation](https://easystats.github.io/parameters/) and the [easystats blog](https://easystats.github.io/blog/posts/), and check-out these vignettes: - [Summary of Model Parameters](https://easystats.github.io/parameters/articles/model_parameters.html) - [Standardized Model Parameters](https://easystats.github.io/parameters/articles/model_parameters_standardized.html) - [Robust Estimation of Standard Errors, Confidence Intervals and p-values](https://easystats.github.io/parameters/articles/model_parameters_robust.html) - [Parameters selection](https://easystats.github.io/parameters/articles/parameters_selection.html) - [Feature reduction (PCA, cMDS, ICA…)](https://easystats.github.io/parameters/articles/parameters_reduction.html) - [Structural models (EFA, CFA, SEM…)](https://easystats.github.io/parameters/articles/efa_cfa.html) # Features ## Model’s parameters description The [`model_parameters()`](https://easystats.github.io/parameters/articles/model_parameters.html) function (that can be accessed via the `parameters()` shortcut) allows you to extract the parameters and their characteristics from various models in a consistent way. It can be considered as a lightweight alternative to [`broom::tidy()`](https://github.com/tidymodels/broom), with some notable differences: - The column names of the returned data frame are **specific** to their content. For instance, the column containing the statistic is named following the statistic name, i.e., *t*, *z*, etc., instead of a generic name such as *statistic* (**however**, you can get standardized (generic) column names using [`standardize_names()`](https://easystats.github.io/parameters/reference/standardize_names.html)). - It is able to compute or extract indices not available by default, such as ***p*-values**, **CIs**, etc. - It includes **feature engineering** capabilities, including parameters [**bootstrapping**](https://easystats.github.io/parameters/reference/bootstrap_parameters.html). ### Classical Regression Models ``` r model <- lm(Sepal.Width ~ Petal.Length * Species + Petal.Width, data = iris) # regular model parameters model_parameters(model) # Parameter | Coefficient | SE | 95% CI | t | df | p # ------------------------------------------------------------------------------------------------ # (Intercept) | 2.89 | 0.36 | [ 2.18, 3.60] | 8.01 | 143 | < .001 # Petal.Length | 0.26 | 0.25 | [-0.22, 0.75] | 1.07 | 143 | 0.287 # Species [versicolor] | -1.66 | 0.53 | [-2.71, -0.62] | -3.14 | 143 | 0.002 # Species [virginica] | -1.92 | 0.59 | [-3.08, -0.76] | -3.28 | 143 | 0.001 # Petal.Width | 0.62 | 0.14 | [ 0.34, 0.89] | 4.41 | 143 | < .001 # Petal.Length * Species [versicolor] | -0.09 | 0.26 | [-0.61, 0.42] | -0.36 | 143 | 0.721 # Petal.Length * Species [virginica] | -0.13 | 0.26 | [-0.64, 0.38] | -0.50 | 143 | 0.618 # standardized parameters model_parameters(model, standardize = "refit") # Parameter | Coefficient | SE | 95% CI | t | df | p # ------------------------------------------------------------------------------------------------ # (Intercept) | 3.59 | 1.30 | [ 1.01, 6.17] | 2.75 | 143 | 0.007 # Petal.Length | 1.07 | 1.00 | [-0.91, 3.04] | 1.07 | 143 | 0.287 # Species [versicolor] | -4.62 | 1.31 | [-7.21, -2.03] | -3.53 | 143 | < .001 # Species [virginica] | -5.51 | 1.38 | [-8.23, -2.79] | -4.00 | 143 | < .001 # Petal.Width | 1.08 | 0.24 | [ 0.59, 1.56] | 4.41 | 143 | < .001 # Petal.Length * Species [versicolor] | -0.38 | 1.06 | [-2.48, 1.72] | -0.36 | 143 | 0.721 # Petal.Length * Species [virginica] | -0.52 | 1.04 | [-2.58, 1.54] | -0.50 | 143 | 0.618 ``` ### Mixed Models ``` r library(lme4) model <- lmer(Sepal.Width ~ Petal.Length + (1|Species), data = iris) # model parameters with CI, df and p-values based on Wald approximation model_parameters(model) # Parameter | Coefficient | SE | 95% CI | t | df | p # ---------------------------------------------------------------------- # (Intercept) | 2.00 | 0.56 | [0.90, 3.10] | 3.56 | 146 | < .001 # Petal.Length | 0.28 | 0.06 | [0.17, 0.40] | 4.75 | 146 | < .001 # model parameters with CI, df and p-values based on Kenward-Roger approximation model_parameters(model, df_method = "kenward") # Parameter | Coefficient | SE | 95% CI | t | df | p # ------------------------------------------------------------------------- # (Intercept) | 2.00 | 0.57 | [0.89, 3.11] | 3.53 | 2.67 | 0.046 # Petal.Length | 0.28 | 0.06 | [0.16, 0.40] | 4.58 | 140.99 | < .001 ``` ### Structural Models Besides many types of regression models and packages, it also works for other types of models, such as [**structural models**](https://easystats.github.io/parameters/articles/efa_cfa.html) (EFA, CFA, SEM…). ``` r library(psych) model <- psych::fa(attitude, nfactors = 3) model_parameters(model) # # Rotated loadings from Principal Component Analysis (oblimin-rotation) # # Variable | MR1 | MR2 | MR3 | Complexity | Uniqueness # ------------------------------------------------------------ # rating | 0.90 | -0.07 | -0.05 | 1.02 | 0.23 # complaints | 0.97 | -0.06 | 0.04 | 1.01 | 0.10 # privileges | 0.44 | 0.25 | -0.05 | 1.64 | 0.65 # learning | 0.47 | 0.54 | -0.28 | 2.51 | 0.24 # raises | 0.55 | 0.43 | 0.25 | 2.35 | 0.23 # critical | 0.16 | 0.17 | 0.48 | 1.46 | 0.67 # advance | -0.11 | 0.91 | 0.07 | 1.04 | 0.22 # # The 3 latent factors (oblimin rotation) accounted for 66.60% of the total variance of the original data (MR1 = 38.19%, MR2 = 22.69%, MR3 = 5.72%). ``` ## Variable and parameters selection [`parameters_selection()`](https://easystats.github.io/parameters/articles/parameters_selection.html) can help you quickly select and retain the most relevant predictors using methods tailored for the model type. ``` r library(dplyr) lm(disp ~ ., data = mtcars) %>% select_parameters() %>% model_parameters() # Parameter | Coefficient | SE | 95% CI | t | df | p # ---------------------------------------------------------------------------- # (Intercept) | 141.70 | 125.67 | [-116.62, 400.02] | 1.13 | 26 | 0.270 # cyl | 13.14 | 7.90 | [ -3.10, 29.38] | 1.66 | 26 | 0.108 # hp | 0.63 | 0.20 | [ 0.22, 1.03] | 3.18 | 26 | 0.004 # wt | 80.45 | 12.22 | [ 55.33, 105.57] | 6.58 | 26 | < .001 # qsec | -14.68 | 6.14 | [ -27.31, -2.05] | -2.39 | 26 | 0.024 # carb | -28.75 | 5.60 | [ -40.28, -17.23] | -5.13 | 26 | < .001 ``` ## Miscellaneous This packages also contains a lot of [other useful functions](https://easystats.github.io/parameters/reference/index.html): ### Describe a Distribution ``` r data(iris) describe_distribution(iris) # Variable | Mean | SD | Min | Max | Skewness | Kurtosis | n | n_Missing # -------------------------------------------------------------------------------- # Sepal.Length | 5.84 | 0.83 | 4.30 | 7.90 | 0.31 | -0.55 | 150 | 0 # Sepal.Width | 3.06 | 0.44 | 2.00 | 4.40 | 0.32 | 0.23 | 150 | 0 # Petal.Length | 3.76 | 1.77 | 1.00 | 6.90 | -0.27 | -1.40 | 150 | 0 # Petal.Width | 1.20 | 0.76 | 0.10 | 2.50 | -0.10 | -1.34 | 150 | 0 ``` ### Citation In order to cite this package, please use the following citation: - Makowski D, Ben-Shachar M, Lüdecke D (2019). “Describe and understand your model’s parameters.” CRAN. R package, . Corresponding BibTeX entry: @Article{, title = {Describe and understand your model's parameters}, author = {Dominique Makowski and Mattan S. Ben-Shachar and Daniel Lüdecke}, journal = {CRAN}, year = {2019}, note = {R package}, url = {https://github.com/easystats/parameters}, } parameters/data/0000755000176200001440000000000013617043573013337 5ustar liggesusersparameters/data/fish.RData0000644000176200001440000000673113617043573015214 0ustar liggesusersZ TW%+1c͉N\D61f4D D4Hb4.51c xԈG(%J%b 4 jZ9{ޯ_eFL}{7q'z :Nuq^.lj玾m=yk5WwjU.WWxꯥ=oM+[-Xrm-rn]ѝzZڷasʶ^(<:qYe<cuqf6jVT[y\A#+W85GswG̓^̏lkeiwɫ6nۜCպѺ=[Uj s l-Zt's&pwMily륵== V1z\|t_眎{Z+C۪GGť1|X-8Ը9b51ǵr@ *1i:i},\Z1V-t\VnX4W9z׭1֢ڵӰqt j/2}ao^+6CsQæseW;Aie *6t\j]|W~l~kmv=E٪Bкoa[Aաƕ_Ikٔ8 nԚH{Km xVvE2nv.њB2%}cf'_$8ƚNA} -;C-0F[R y,dF@%BܦdĵxftO9׀SF[_< Ec,Q1yԥ,ԍOZ:\~ŝ:{q>o3#!!N~Ua5. vByB71}9pM}Q?qĭ]]ٞOO>׆wk9b[9%?U"9?+| Ik!:zYr/=^yW}Q=/΂wu@Ƴ }U3Ghk8p z,ԥ_tcxB< s5zy;^^>=-{& Yct@YOx&!FmcgO%˒z<[s]Kg+-Ek;hR^48ܵGb|w}(VwX.oys+⬧Pd%qLmU~Ͱ;rm3+<?HtH'~[ *E? )z|.o jЖ_Z ;I?)eOމ8؋Cz|~mQ%]o>uuget%\錼XfGlm@{uUѵfJA7!^WTx$ _&oY.-&*jWB6]#zy3Nt)1oF[3V[? vO*ր=- v~Ɉ[?=.| ;t[g=w}n}'+;?c |EיIhV""}yÒu8׎ AV1e14f& U2f%7#Ɇ/^A~@Q0;9^C_fVax qGm̧}ۿ _a۶rjSOJ(^tTGz "T605;=:3 )U93NO7s,WtF~ 32tz*~cY[ i=x#x0;8K6Gc|{A>/$~ݵ%]5~']x/ x 3QO#.A/&/0g?prנ}ޓoCy{4JzCG۱d^*}qL]VcV.u@R<J1o;/dWAl+s/egڢRyO*LKwG+ < !_"Kˇn{e9&},u.G_T8tطn>WLp=!%qd4~zo0:R}-+KMGLx>%3- ~-kX78^9F/  ՜E궯~FY/^1d\ D<_)U?DēorA4 yzuQyXQN)J)e'sG!>WDTT E>t[xn`F=%)?ȣSyn4G"b@R7)C}}¬o+B|MߪWiN{#q%KwY~ ׮|T2? ӓ0SaWmݧ_(y9?&oEk\" 9W-7~C՟~8^i[#z}P"4N_$%%wJC;T7a*ǩ#jswV|NtO>Ծay;vva13 ז87tNx,l^̓scJ(|fhTs6,tμ=OilX'^ػQѳ;3gK"7Y8 %Z''1 v޳BBD,ppʟQm9-parameters/man/0000755000176200001440000000000013617043573013201 5ustar liggesusersparameters/man/format_bf.Rd0000644000176200001440000000117013607421425015421 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/format_bf.R \name{format_bf} \alias{format_bf} \title{Bayes Factor formatting} \usage{ format_bf(bf, stars = FALSE, stars_only = FALSE, name = "BF") } \arguments{ \item{bf}{Bayes Factor.} \item{stars}{Add significance stars (e.g., p < .001***).} \item{stars_only}{Return only significance stars.} \item{name}{Name prefixing the text. Can be \code{NULL}.} } \value{ A formatted string. } \description{ Bayes Factor formatting } \examples{ format_bf(1.20) format_bf(c(1.20, 1557, 3.5, 12), stars = TRUE) format_bf(c(1.20, 1557, 3.5, 12), name = NULL) } parameters/man/model_parameters.lavaan.Rd0000644000176200001440000000441013611663034020245 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/model_parameters.lavaan.R \name{model_parameters.lavaan} \alias{model_parameters.lavaan} \title{Parameters from CFA/SEM models} \usage{ \method{model_parameters}{lavaan}( model, ci = 0.95, standardize = FALSE, type = c("regression", "correlation", "loading"), ... ) } \arguments{ \item{model}{CFA or SEM created by the \code{lavaan::cfa} or \code{lavaan::sem} functions.} \item{ci}{Confidence Interval (CI) level. Default to 0.95 (95\%).} \item{standardize}{Return standardized parameters (standardized coefficients). See \code{lavaan::standardizedsolution}.} \item{type}{What type of links to return. Can be \code{"all"} or some of \code{c("regression", "correlation", "loading", "variance", "mean")}.} \item{...}{Arguments passed to or from other methods.} } \value{ A data frame of indices related to the model's parameters. } \description{ Format CFA/SEM objects from the (b)lavaan package (Rosseel, 2012; Merkle and Rosseel 2018). } \examples{ library(parameters) # lavaan ------------------------------------- if (require("lavaan")) { # Confirmatory Factor Analysis (CFA) --------- structure <- " visual =~ x1 + x2 + x3 textual =~ x4 + x5 + x6 speed =~ x7 + x8 + x9 " model <- lavaan::cfa(structure, data = HolzingerSwineford1939) model_parameters(model) model_parameters(model, standardize = TRUE) # Structural Equation Model (SEM) ------------ structure <- " # latent variable definitions ind60 =~ x1 + x2 + x3 dem60 =~ y1 + a*y2 + b*y3 + c*y4 dem65 =~ y5 + a*y6 + b*y7 + c*y8 # regressions dem60 ~ ind60 dem65 ~ ind60 + dem60 # residual correlations y1 ~~ y5 y2 ~~ y4 + y6 y3 ~~ y7 y4 ~~ y8 y6 ~~ y8 " model <- lavaan::sem(structure, data = PoliticalDemocracy) model_parameters(model) model_parameters(model, standardize = TRUE) } } \references{ \itemize{ \item Rosseel Y (2012). lavaan: An R Package for Structural Equation Modeling. Journal of Statistical Software, 48(2), 1-36. \item Merkle EC , Rosseel Y (2018). blavaan: Bayesian Structural Equation Models via Parameter Expansion. Journal of Statistical Software, 85(4), 1-30. http://www.jstatsoft.org/v85/i04/ } } parameters/man/model_parameters.Mclust.Rd0000644000176200001440000000107413610641514020253 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/model_parameters.Mclust.R \name{model_parameters.Mclust} \alias{model_parameters.Mclust} \title{Parameters from Mixture Models} \usage{ \method{model_parameters}{Mclust}(model, ...) } \arguments{ \item{model}{Mixture model.} \item{...}{Arguments passed to or from other methods.} } \description{ Format mixture models obtained for example by \code{mclust::Mclust}. } \examples{ library(parameters) library(mclust) model <- mclust::Mclust(iris[1:4], verbose = FALSE) model_parameters(model) } parameters/man/dot-find_most_common.Rd0000644000176200001440000000041213620043640017570 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/utils.R \name{.find_most_common} \alias{.find_most_common} \title{Find most common occurence} \usage{ .find_most_common(x) } \description{ Find most common occurence } \keyword{internal} parameters/man/standard_error.Rd0000644000176200001440000000734413614121022016470 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/standard_error.R \name{standard_error} \alias{standard_error} \alias{standard_error.factor} \alias{standard_error.default} \alias{standard_error.merMod} \alias{standard_error.glmmTMB} \alias{standard_error.MixMod} \alias{standard_error.zeroinfl} \alias{standard_error.coxph} \alias{standard_error.mixor} \alias{standard_error.clm2} \alias{standard_error.betareg} \alias{standard_error.DirichletRegModel} \title{Standard Errors} \usage{ standard_error(model, ...) \method{standard_error}{factor}(model, force = FALSE, verbose = TRUE, ...) \method{standard_error}{default}(model, method = NULL, ...) \method{standard_error}{merMod}(model, effects = c("fixed", "random"), method = NULL, ...) \method{standard_error}{glmmTMB}( model, effects = c("fixed", "random"), component = c("all", "conditional", "zi", "zero_inflated"), ... ) \method{standard_error}{MixMod}( model, effects = c("fixed", "random"), component = c("all", "conditional", "zi", "zero_inflated"), ... ) \method{standard_error}{zeroinfl}( model, component = c("all", "conditional", "zi", "zero_inflated"), method = NULL, ... ) \method{standard_error}{coxph}(model, method = NULL, ...) \method{standard_error}{mixor}(model, effects = c("all", "fixed", "random"), ...) \method{standard_error}{clm2}(model, component = c("all", "conditional", "scale"), ...) \method{standard_error}{betareg}(model, component = c("all", "conditional", "precision"), ...) \method{standard_error}{DirichletRegModel}(model, component = c("all", "conditional", "precision"), ...) } \arguments{ \item{model}{A model.} \item{...}{Arguments passed to or from other methods. For \code{standard_error()}, if \code{method = "robust"}, arguments \code{vcov_estimation}, \code{vcov_type} and \code{vcov_args} can be passed down to \code{\link[=standard_error_robust]{standard_error_robust()}}.} \item{force}{Logical, if \code{TRUE}, factors are converted to numerical values to calculate the standard error, with the lowest level being the value \code{1} (unless the factor has numeric levels, which are converted to the corresponding numeric value). By default, \code{NA} is returned for factors or character vectors.} \item{verbose}{Toggle off warnings.} \item{method}{If \code{"robust"}, robust standard errors are computed by calling \code{\link[=standard_error_robust]{standard_error_robust()}}. \code{standard_error_robust()}, in turn, calls one of the \code{vcov*()}-functions from the \pkg{sandwich} or \pkg{clubSandwich} package for robust covariance matrix estimators. For certain mixed models, \code{method} may also be one of \code{"wald"}, \code{\link[=p_value_ml1]{"ml1"}}, \code{\link[=p_value_betwithin]{"betwithin"}}, \code{\link[=p_value_satterthwaite]{"satterthwaite"}} or \code{\link[=p_value_kenward]{"kenward"}}.} \item{effects}{Should standard errors for fixed effects or random effects be returned? Only applies to mixed models. May be abbreviated. When standard errors for random effects are requested, for each grouping factor a list of standard errors (per group level) for random intercepts and slopes is returned.} \item{component}{Should all parameters, parameters for the conditional model, or for the zero-inflated part of the model be returned? Applies to models with zero-inflated component. \code{component} may be one of \code{"conditional"}, \code{"zi"}, \code{"zero-inflated"} or \code{"all"} (default). May be abbreviated.} } \value{ A data frame. } \description{ \code{standard_error()} attempts to return standard errors of model parameters, while \code{standard_error_robust()} attempts to return robust standard errors. } \examples{ model <- lm(Petal.Length ~ Sepal.Length * Species, data = iris) standard_error(model) } parameters/man/cmds.Rd0000644000176200001440000000266513575512105014422 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/reduce_parameters.R \name{cmds} \alias{cmds} \title{Classical Multidimensional Scaling (cMDS)} \usage{ cmds(x, n = "all", distance = "euclidean", ...) } \arguments{ \item{x}{A dataframe or a statistical model.} \item{n}{Number of components to extract. If \code{n="all"}, then \code{n} is set as the number of variables minus 1 (\code{ncol(x)-1}). If \code{n="auto"} (default) or \code{n=NULL}, the number of components is selected through \code{\link{n_factors}}. In \code{\link{reduce_parameters}}, can also be \code{"max"}, in which case it will select all the components that are maximally pseudo-loaded (i.e., correlated) by at least one variable.} \item{distance}{The distance measure to be used. This must be one of "euclidean", "maximum", "manhattan", "canberra", "binary" or "minkowski". Any unambiguous substring can be given.} \item{...}{Arguments passed to or from other methods.} } \description{ Also referred to as principal Coordinates Analysis (PCoA), Classical Multidimensional Scaling (cMDS) takes a set of dissimilarities (i.e., a distance matrix) and returns a set of points such that the distances between the points are approximately equal to the dissimilarities. } \examples{ cmds(iris[, 1:4]) } \references{ \itemize{ \item Nguyen, L. H., \& Holmes, S. (2019). Ten quick tips for effective dimensionality reduction. PLOS Computational Biology, 15(6). } } parameters/man/check_factorstructure.Rd0000644000176200001440000000143013600115277020053 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/check_factorstructure.R \name{check_factorstructure} \alias{check_factorstructure} \title{Check suitability of data for Factor Analysis (FA)} \usage{ check_factorstructure(x, ...) } \arguments{ \item{x}{A dataframe.} \item{...}{Arguments passed to or from other methods.} } \value{ A list of lists of indices related to sphericity and KMO. } \description{ This checks whether the data is appropriate for Factor Analysis (FA) by running the \link[=check_sphericity]{Bartlett's Test of Sphericity} and the \link[=check_kmo]{Kaiser, Meyer, Olkin (KMO) Measure of Sampling Adequacy (MSA)}. } \examples{ library(parameters) check_factorstructure(mtcars) } \seealso{ check_kmo check_sphericity check_clusterstructure } parameters/man/model_parameters.gam.Rd0000644000176200001440000000626213615560005017554 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/model_parameters.gam.R \name{model_parameters.gam} \alias{model_parameters.gam} \alias{model_parameters.rqss} \alias{model_parameters.cgam} \title{Parameters from Generalized Additive (Mixed) Models} \usage{ \method{model_parameters}{gam}( model, ci = 0.95, bootstrap = FALSE, iterations = 1000, standardize = NULL, exponentiate = FALSE, robust = FALSE, ... ) \method{model_parameters}{rqss}( model, ci = 0.95, bootstrap = FALSE, iterations = 1000, component = c("conditional", "smooth_terms", "all"), standardize = NULL, exponentiate = FALSE, ... ) \method{model_parameters}{cgam}( model, ci = 0.95, bootstrap = FALSE, iterations = 1000, component = c("conditional", "smooth_terms", "all"), standardize = NULL, exponentiate = FALSE, ... ) } \arguments{ \item{model}{A gam/gamm model.} \item{ci}{Confidence Interval (CI) level. Default to 0.95 (95\%).} \item{bootstrap}{Should estimates be based on bootstrapped model? If \code{TRUE}, then arguments of \link[=model_parameters.stanreg]{Bayesian regressions} apply (see also \code{\link[=parameters_bootstrap]{parameters_bootstrap()}}).} \item{iterations}{The number of bootstrap replicates. This only apply in the case of bootstrapped frequentist models.} \item{standardize}{The method used for standardizing the parameters. Can be \code{"refit"}, \code{"posthoc"}, \code{"smart"}, \code{"basic"} or \code{NULL} (default) for no standardization. See 'Details' in \code{\link[effectsize]{standardize_parameters}}. Note that robust estimation (i.e. \code{robust=TRUE}) of standardized parameters only works when \code{standardize="refit"}.} \item{exponentiate}{Logical, indicating whether or not to exponentiate the the coefficients (and related confidence intervals). This is typical for, say, logistic regressions, or more generally speaking: for models with log or logit link.} \item{robust}{Logical, if \code{TRUE}, robust standard errors are calculated (if possible), and confidence intervals and p-values are based on these robust standard errors. Additional arguments like \code{vcov_estimation} or \code{vcov_type} are passed down to other methods, see \code{\link[=standard_error_robust]{standard_error_robust()}} for details.} \item{...}{Arguments passed to or from other methods. For instance, when \code{bootstrap = TRUE}, arguments like \code{ci_method} are passed down to \code{\link[bayestestR]{describe_posterior}}.} \item{component}{Model component for which parameters should be shown. May be one of \code{"conditional"}, \code{"precision"} (\pkg{betareg}), \code{"scale"} (\pkg{ordinal}), \code{"extra"} (\pkg{glmx}) or \code{"all"}.} } \value{ A data frame of indices related to the model's parameters. } \description{ Extract and compute indices and measures to describe parameters of generalized additive models (GAM(M)s). } \examples{ library(parameters) library(mgcv) dat <- gamSim(1, n = 400, dist = "normal", scale = 2) model <- gam(y ~ s(x0) + s(x1) + s(x2) + s(x3), data = dat) model_parameters(model) } \seealso{ \code{\link[=standardize_names]{standardize_names()}} to rename columns into a consistent, standardized naming scheme. } parameters/man/format_order.Rd0000644000176200001440000000117713566047515016164 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/format_order.R \name{format_order} \alias{format_order} \title{Order (first, second, ...) formatting} \usage{ format_order(order, textual = TRUE, ...) } \arguments{ \item{order}{value or vector of orders.} \item{textual}{Return number as words. If \code{FALSE}, will run \code{\link[insight]{format_value}}.} \item{...}{Arguments to be passed to \code{\link[insight]{format_value}} if \code{textual} is \code{FALSE}.} } \value{ A formatted string. } \description{ Format order. } \examples{ format_order(2) format_order(8) format_order(25, textual = FALSE) } parameters/man/model_parameters.zeroinfl.Rd0000644000176200001440000000536013615560005020636 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/model_parameters.zeroinfl.R \name{model_parameters.zeroinfl} \alias{model_parameters.zeroinfl} \title{Parameters from Zero-Inflated Models} \usage{ \method{model_parameters}{zeroinfl}( model, ci = 0.95, bootstrap = FALSE, iterations = 1000, component = c("all", "conditional", "zi", "zero_inflated"), standardize = NULL, exponentiate = FALSE, robust = FALSE, ... ) } \arguments{ \item{model}{A model with zero-inflation component.} \item{ci}{Confidence Interval (CI) level. Default to 0.95 (95\%).} \item{bootstrap}{Should estimates be based on bootstrapped model? If \code{TRUE}, then arguments of \link[=model_parameters.stanreg]{Bayesian regressions} apply (see also \code{\link[=parameters_bootstrap]{parameters_bootstrap()}}).} \item{iterations}{The number of bootstrap replicates. This only apply in the case of bootstrapped frequentist models.} \item{component}{Model component for which parameters should be shown. May be one of \code{"conditional"}, \code{"precision"} (\pkg{betareg}), \code{"scale"} (\pkg{ordinal}), \code{"extra"} (\pkg{glmx}) or \code{"all"}.} \item{standardize}{The method used for standardizing the parameters. Can be \code{"refit"}, \code{"posthoc"}, \code{"smart"}, \code{"basic"} or \code{NULL} (default) for no standardization. See 'Details' in \code{\link[effectsize]{standardize_parameters}}. Note that robust estimation (i.e. \code{robust=TRUE}) of standardized parameters only works when \code{standardize="refit"}.} \item{exponentiate}{Logical, indicating whether or not to exponentiate the the coefficients (and related confidence intervals). This is typical for, say, logistic regressions, or more generally speaking: for models with log or logit link.} \item{robust}{Logical, if \code{TRUE}, robust standard errors are calculated (if possible), and confidence intervals and p-values are based on these robust standard errors. Additional arguments like \code{vcov_estimation} or \code{vcov_type} are passed down to other methods, see \code{\link[=standard_error_robust]{standard_error_robust()}} for details.} \item{...}{Arguments passed to or from other methods. For instance, when \code{bootstrap = TRUE}, arguments like \code{ci_method} are passed down to \code{\link[bayestestR]{describe_posterior}}.} } \value{ A data frame of indices related to the model's parameters. } \description{ Parameters from zero-inflated models. } \examples{ library(parameters) if (require("pscl")) { data("bioChemists") model <- zeroinfl(art ~ fem + mar + kid5 + ment | kid5 + phd, data = bioChemists) model_parameters(model) } } \seealso{ \code{\link[=standardize_names]{standardize_names()}} to rename columns into a consistent, standardized naming scheme. } parameters/man/dot-n_factors_scree.Rd0000644000176200001440000000050013620043640017373 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/n_factors.R \name{.n_factors_scree} \alias{.n_factors_scree} \title{Non Graphical Cattell's Scree Test} \usage{ .n_factors_scree(eigen_values = NULL, model = "factors") } \description{ Non Graphical Cattell's Scree Test } \keyword{internal} parameters/man/check_kmo.Rd0000644000176200001440000000370413600115277015410 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/check_factorstructure.R \name{check_kmo} \alias{check_kmo} \title{Kaiser, Meyer, Olkin (KMO) Measure of Sampling Adequacy (MSA) for Factor Analysis} \usage{ check_kmo(x, ...) } \arguments{ \item{x}{A dataframe.} \item{...}{Arguments passed to or from other methods.} } \value{ A list of indices related to KMO. } \description{ Kaiser (1970) introduced a Measure of Sampling Adequacy (MSA), later modified by Kaiser and Rice (1974). The Kaiser-Meyer-Olkin (KMO) statistic, which can vary from 0 to 1, indicates the degree to which each variable in a set is predicted without error by the other variables. } \details{ A value of 0 indicates that the sum of partial correlations is large relative to the sum correlations, indicating factor analysis is likely to be inappropriate. A KMO value close to 1 indicates that the sum of partial correlations is not large relative to the sum of correlations and so factor analysis should yield distinct and reliable factors. Kaiser (1975) suggested that KMO > .9 were marvelous, in the .80s, meritourious, in the .70s, middling, in the .60s, mediocre, in the .50s, miserable, and less than .5, unacceptable. Hair et al. (2006) suggest accepting a value > 0.5. Values between 0.5 and 0.7 are mediocre, and values between 0.7 and 0.8 are good. This function is strongly inspired by the \code{KMO} function in the \code{psych} package (Revelle, 2016). All credits go to its author. } \examples{ library(parameters) check_kmo(mtcars) } \references{ \itemize{ \item Revelle, W. (2016). How To: Use the psych package for Factor Analysis and data reduction. \item Kaiser, H. F. (1970). A second generation little jiffy. Psychometrika, 35(4), 401-415. \item Kaiser, H. F., & Rice, J. (1974). Little jiffy, mark IV. Educational and psychological measurement, 34(1), 111-117. \item Kaiser, H. F. (1974). An index of factorial simplicity. Psychometrika, 39(1), 31-36. } } parameters/man/demean.Rd0000644000176200001440000001516013566047515014727 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/demean.R \name{demean} \alias{demean} \title{Compute group-meaned and de-meaned variables} \usage{ demean( x, select, group, suffix_demean = "_within", suffix_groupmean = "_between" ) } \arguments{ \item{x}{A data frame.} \item{select}{Character vector with names of variables to select that should be group- and de-meaned.} \item{group}{Name of the variable that indicates the group- or cluster-ID.} \item{suffix_demean, suffix_groupmean}{String value, will be appended to the names of the group-meaned and de-meaned variables of \code{x}. By default, de-meaned variables will be suffixed with \code{"_within"} and grouped-meaned variables with \code{"_between"}.} } \value{ A data frame with the group-/de-meaned variables, which get the suffix \code{"_between"} (for the group-meaned variable) and \code{"_within"} (for the de-meaned variable) by default. } \description{ \code{demean()} computes group- and de-meaned versions of a variable that can be used in regression analysis to model the between- and within-subject effect. } \details{ \subsection{Panel data and correlating fixed and group effects}{ \code{demean()} is intended to create group- and de-meaned variables for panel regression models (fixed effects models), or for complex random-effect-within-between models (see \cite{Bell et al. 2018}), where group-effects (random effects) and fixed effects correlate (see \cite{Bafumi and Gelman 2006)}). This violation of one of the \emph{Gauss-Markov-assumptions} can happen, for instance, when analysing panel data. To control for correlating predictors and group effects, it is recommended to include the group-meaned and de-meaned version of \emph{time-varying covariates} in the model. By this, one can fit complex multilevel models for panel data, including time-varying predictors, time-invariant predictors and random effects. This approach is superior to classic fixed-effects models, which lack information of variation in the group-effects or between-subject effects. } \subsection{Terminology}{ The group-meaned variable is simply the mean of an independent variable within each group (or id-level or cluster) represented by \code{group}. It represents the cluster-mean of an independent variable. The de-meaned variable is then the centered version of the group-meaned variable. De-meaning is sometimes also called person-mean centering or centering within clusters. } \subsection{De-meaning with continuous predictors}{ For continuous time-varying predictors, the recommendation is to include both their de-meaned and group-meaned versions as fixed effects, but not the raw (untransformed) time-varying predictors themselves. The de-meaned predictor should also be included as random effect (random slope). In regression models, the coefficient of the de-meaned predictors indicates the within-subject effect, while the coefficient of the group-meaned predictor indicates the between-subject effect. } \subsection{De-meaning with binary predictors}{ For binary time-varying predictors, the recommendation is to include the raw (untransformed) binary predictor as fixed effect only and the \emph{de-meaned} variable as random effect (random slope) (\cite{Hoffmann 2015, chapter 8-2.I}). \code{demean()} will thus coerce categorical time-varying predictors to numeric to compute the de- and group-meaned versions for these variables. } \subsection{De-meaning interaction terms}{ There are multiple ways to deal with interaction terms of within- and between-effects. A classical approach is to simply use the product term of the de-meaned variables (i.e. introducing the de-meaned variables as interaction term in the model formula, e.g. \code{y ~ x_within * time_within}). This approach, however, might be subject to bias (see \cite{Giesselmann & Schmidt-Catran 2018}). \cr \cr Another option is to first calculate the product term and then apply the de-meaning to it. This approach produces an estimator \dQuote{that reflects unit-level differences of interacted variables whose moderators vary within units}, which is desirable if \emph{no} within interaction of two time-dependent variables is required. \cr \cr A third option, when the interaction should result in a genuine within estimator, is to "double de-mean" the interaction terms (\cite{Giesselmann & Schmidt-Catran 2018}), however, this is currently not supported by \code{demean()}. If this is required, the \code{wmb()} function from the \pkg{panelr} package should be used. \cr \cr To de-mean interaction terms for within-between models, simply specify the term as interaction for the \code{select}-argument, e.g. \code{select = "a*b"} (see 'Examples'). } \subsection{Analysing panel data with mixed models using lme4}{ A description of how to translate the formulas described in \emph{Bell et al. 2018} into R using \code{lmer()} from \pkg{lme4} or \code{glmmTMB()} from \pkg{glmmTMB} can be found here: \href{https://strengejacke.github.io/mixed-models-snippets/random-effects-within-between-effects-model.html}{for lmer()} and \href{https://strengejacke.github.io/mixed-models-snippets/random-effects-within-between-effects-model-glmmtmb.html}{for glmmTMB()}. } } \examples{ data(iris) iris$ID <- sample(1:4, nrow(iris), replace = TRUE) # fake-ID iris$binary <- as.factor(rbinom(150, 1, .35)) # binary variable x <- demean(iris, select = c("Sepal.Length", "Petal.Length"), group = ID) head(x) x <- demean(iris, select = c("Sepal.Length", "binary", "Species"), group = ID) head(x) # demean interaction term x*y dat <- data.frame( a = c(1, 2, 3, 4, 1, 2, 3, 4), x = c(4, 3, 3, 4, 1, 2, 1, 2), y = c(1, 2, 1, 2, 4, 3, 2, 1), ID = c(1, 2, 3, 1, 2, 3, 1, 2) ) demean(dat, select = c("a", "x*y"), group = "ID") } \references{ \itemize{ \item Bafumi J, Gelman A. 2006. Fitting Multilevel Models When Predictors and Group Effects Correlate. In. Philadelphia, PA: Annual meeting of the American Political Science Association. \item Bell A, Fairbrother M, Jones K. 2018. Fixed and Random Effects Models: Making an Informed Choice. Quality & Quantity. \item Giesselmann M, Schmidt-Catran A. (2018). Interactions in fixed effects regression models (Discussion Papers of DIW Berlin No. 1748). DIW Berlin, German Institute for Economic Research. Retrieved from https://ideas.repec.org/p/diw/diwwpp/dp1748.html \item Hoffman L. 2015. Longitudinal analysis: modeling within-person fluctuation and change. New York: Routledge } } parameters/man/rescale_weights.Rd0000644000176200001440000001014213611426120016621 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/rescale_weights.R \name{rescale_weights} \alias{rescale_weights} \title{Rescale design weights for multilevel analysis} \usage{ rescale_weights(data, group, probability_weights, nest = FALSE) } \arguments{ \item{data}{A data frame.} \item{group}{Variable names (as character vector), indicating the grouping structure (strata) of the survey data (level-2-cluster variable). It is also possible to create weights for multiple group variables; in such cases, each created weighting variable will be suffixed by the name of the group variable.} \item{probability_weights}{Variable indicating the probability (design or sampling) weights of the survey data (level-1-weight).} \item{nest}{Logical, if \code{TRUE} and \code{group} indicates at least two group variables, then groups are "nested", i.e. groups are now a combination from each group level of the variables in \code{group}.} } \value{ \code{data}, including the new weighting variables: \code{pweights_a} and \code{pweights_b}, which represent the rescaled design weights to use in multilevel models (use these variables for the \code{weights} argument). } \description{ Most functions to fit multilevel and mixed effects models only allow to specify frequency weights, but not design (i.e. sampling or probability) weights, which should be used when analyzing complex samples and survey data. \code{rescale_weights()} implements an algorithm proposed by \cite{Asparouhov (2006)} and \cite{Carle (2009)} to rescale design weights in survey data to account for the grouping structure of multilevel models, which then can be used for multilevel modelling. } \details{ Rescaling is based on two methods: For \code{pweights_a}, the sample weights \code{probability_weights} are adjusted by a factor that represents the proportion of group size divided by the sum of sampling weights within each group. The adjustment factor for \code{pweights_b} is the sum of sample weights within each group divided by the sum of squared sample weights within each group (see \cite{Carle (2009)}, Appendix B). \cr \cr Regarding the choice between scaling methods A and B, Carle suggests that "analysts who wish to discuss point estimates should report results based on weighting method A. For analysts more interested in residual between-group variance, method B may generally provide the least biased estimates". In general, it is recommended to fit a non-weighted model and weighted models with both scaling methods and when comparing the models, see whether the "inferential decisions converge", to gain confidence in the results. \cr \cr Though the bias of scaled weights decreases with increasing group size, method A is preferred when insufficient or low group size is a concern. \cr \cr The group ID and probably PSU may be used as random effects (e.g. nested design, or group and PSU as varying intercepts), depending on the survey design that should be mimicked. } \examples{ if (require("sjstats")) { data(nhanes_sample, package = "sjstats") head(rescale_weights(nhanes_sample, "SDMVSTRA", "WTINT2YR")) # also works with multiple group-variables... head(rescale_weights(nhanes_sample, c("SDMVSTRA", "SDMVPSU"), "WTINT2YR")) # or nested structures. x <- rescale_weights( data = nhanes_sample, group = c("SDMVSTRA", "SDMVPSU"), probability_weights = "WTINT2YR", nest = TRUE ) head(x) } if (require("lme4") && require("sjstats")) { data(nhanes_sample, package = "sjstats") nhanes_sample <- rescale_weights(nhanes_sample, "SDMVSTRA", "WTINT2YR") glmer( total ~ factor(RIAGENDR) * (log(age) + factor(RIDRETH1)) + (1 | SDMVPSU), family = poisson(), data = nhanes_sample, weights = pweights_a ) } } \references{ \itemize{ \item Carle A.C. (2009). Fitting multilevel models in complex survey data with design weights: Recommendations. BMC Medical Research Methodology 9(49): 1-13 \item Asparouhov T. (2006). General Multi-Level Modeling with Sampling Weights. Communications in Statistics - Theory and Methods 35: 439-460 } } parameters/man/dot-n_factors_bartlett.Rd0000644000176200001440000000054213620043640020121 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/n_factors.R \name{.n_factors_bartlett} \alias{.n_factors_bartlett} \title{Bartlett, Anderson and Lawley Procedures} \usage{ .n_factors_bartlett(eigen_values = NULL, model = "factors", nobs = NULL) } \description{ Bartlett, Anderson and Lawley Procedures } \keyword{internal} parameters/man/smoothness.Rd0000644000176200001440000000155513566047516015704 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/smoothness.R \name{smoothness} \alias{smoothness} \title{Quantify the smoothness of a vector} \usage{ smoothness(x, method = "cor", lag = 1) } \arguments{ \item{x}{Numeric vector (similar to a time series).} \item{method}{Can be "diff" (the standard deviation of the standardized differences) or "cor" (default, lag-one autocorrelation).} \item{lag}{An integer indicating which lag to use. If less than 1, will be interpreted as expressed in percentage of the length of the vector.} } \value{ Value of smoothness. } \description{ Quantify the smoothness of a vector } \examples{ x <- (-10:10)^3 + rnorm(21, 0, 100) plot(x) smoothness(x, method = "cor") smoothness(x, method = "diff") } \references{ https://stats.stackexchange.com/questions/24607/how-to-measure-smoothness-of-a-time-series-in-r } parameters/man/model_parameters.default.Rd0000644000176200001440000000746013615560005020435 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/model_parameters_default.R \name{model_parameters.default} \alias{model_parameters.default} \alias{model_parameters.betareg} \alias{model_parameters.clm2} \alias{model_parameters.glmx} \title{Parameters from (General) Linear Models} \usage{ \method{model_parameters}{default}( model, ci = 0.95, bootstrap = FALSE, iterations = 1000, standardize = NULL, exponentiate = FALSE, robust = FALSE, ... ) \method{model_parameters}{betareg}( model, ci = 0.95, bootstrap = FALSE, iterations = 1000, component = c("conditional", "precision", "all"), standardize = NULL, exponentiate = FALSE, ... ) \method{model_parameters}{clm2}( model, ci = 0.95, bootstrap = FALSE, iterations = 1000, component = c("all", "conditional", "scale"), standardize = NULL, exponentiate = FALSE, ... ) \method{model_parameters}{glmx}( model, ci = 0.95, bootstrap = FALSE, iterations = 1000, component = c("all", "conditional", "extra"), standardize = NULL, exponentiate = FALSE, ... ) } \arguments{ \item{model}{Model object.} \item{ci}{Confidence Interval (CI) level. Default to 0.95 (95\%).} \item{bootstrap}{Should estimates be based on bootstrapped model? If \code{TRUE}, then arguments of \link[=model_parameters.stanreg]{Bayesian regressions} apply (see also \code{\link[=parameters_bootstrap]{parameters_bootstrap()}}).} \item{iterations}{The number of bootstrap replicates. This only apply in the case of bootstrapped frequentist models.} \item{standardize}{The method used for standardizing the parameters. Can be \code{"refit"}, \code{"posthoc"}, \code{"smart"}, \code{"basic"} or \code{NULL} (default) for no standardization. See 'Details' in \code{\link[effectsize]{standardize_parameters}}. Note that robust estimation (i.e. \code{robust=TRUE}) of standardized parameters only works when \code{standardize="refit"}.} \item{exponentiate}{Logical, indicating whether or not to exponentiate the the coefficients (and related confidence intervals). This is typical for, say, logistic regressions, or more generally speaking: for models with log or logit link.} \item{robust}{Logical, if \code{TRUE}, robust standard errors are calculated (if possible), and confidence intervals and p-values are based on these robust standard errors. Additional arguments like \code{vcov_estimation} or \code{vcov_type} are passed down to other methods, see \code{\link[=standard_error_robust]{standard_error_robust()}} for details.} \item{...}{Arguments passed to or from other methods. For instance, when \code{bootstrap = TRUE}, arguments like \code{ci_method} are passed down to \code{\link[bayestestR]{describe_posterior}}.} \item{component}{Model component for which parameters should be shown. May be one of \code{"conditional"}, \code{"precision"} (\pkg{betareg}), \code{"scale"} (\pkg{ordinal}), \code{"extra"} (\pkg{glmx}) or \code{"all"}.} } \value{ A data frame of indices related to the model's parameters. } \description{ Extract and compute indices and measures to describe parameters of (general) linear models (GLMs). } \examples{ library(parameters) model <- lm(mpg ~ wt + cyl, data = mtcars) model_parameters(model) # bootstrapped parameters model_parameters(model, bootstrap = TRUE) # standardized parameters model_parameters(model, standardize = "refit") # different p-value style in output model_parameters(model, p_digits = 5) model_parameters(model, digits = 3, ci_digits = 4, p_digits = "scientific") # logistic regression model model <- glm(vs ~ wt + cyl, data = mtcars, family = "binomial") model_parameters(model) # show odds ratio / exponentiated coefficients model_parameters(model, exponentiate = TRUE) } \seealso{ \code{\link[=standardize_names]{standardize_names()}} to rename columns into a consistent, standardized naming scheme. } parameters/man/reshape_loadings.Rd0000644000176200001440000000227513573554444017012 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/reshape_loadings.R \name{reshape_loadings} \alias{reshape_loadings} \alias{reshape_loadings.parameters_efa} \alias{reshape_loadings.data.frame} \title{Reshape loadings between wide/long formats} \usage{ reshape_loadings(x, ...) \method{reshape_loadings}{parameters_efa}(x, threshold = NULL, ...) \method{reshape_loadings}{data.frame}(x, threshold = NULL, loadings_columns = NULL, ...) } \arguments{ \item{x}{A dataframe or a statistical model.} \item{...}{Arguments passed to or from other methods.} \item{threshold}{A value between 0 and 1 indicates which (absolute) values from the loadings should be removed. An integer higher than 1 indicates the n strongest loadings to retain. Can also be \code{"max"}, in which case it will only display the maximum loading per variable (the most simple structure).} \item{loadings_columns}{Vector indicating the columns corresponding to loadings.} } \description{ Reshape loadings between wide/long formats. } \examples{ library(parameters) library(psych) pca <- model_parameters(psych::fa(attitude, nfactors = 3)) loadings <- reshape_loadings(pca) loadings reshape_loadings(loadings) } parameters/man/reduce_parameters.Rd0000644000176200001440000000727113607421652017166 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/reduce_parameters.R \name{reduce_parameters} \alias{reduce_parameters} \alias{parameters_reduction} \title{Dimensionality reduction (DR) / Features Reduction} \usage{ reduce_parameters(x, method = "PCA", n = "max", ...) parameters_reduction(x, method = "PCA", n = "max", ...) } \arguments{ \item{x}{A dataframe or a statistical model.} \item{method}{The features reduction method. Can be one of 'PCA', 'cMDS', 'DRR', 'ICA' (see the Details section).} \item{n}{Number of components to extract. If \code{n="all"}, then \code{n} is set as the number of variables minus 1 (\code{ncol(x)-1}). If \code{n="auto"} (default) or \code{n=NULL}, the number of components is selected through \code{\link{n_factors}}. In \code{\link{reduce_parameters}}, can also be \code{"max"}, in which case it will select all the components that are maximally pseudo-loaded (i.e., correlated) by at least one variable.} \item{...}{Arguments passed to or from other methods.} } \description{ This function performs a reduction in the parameters space (the number of variables). It starts by creating a new set of variables, based on a given method (the default method is "PCA", but other are available via the \code{method} argument, such as "cMDS", "DRR" or "ICA"). Then, it names this new dimensions using the original variables that correlates the most with it. For instance, a variable named 'V1_0.97/V4_-0.88' means that the V1 and the V4 variables correlate maximally (with respective coefficients of .97 and -.88) with this dimension. Although this function can be useful in exploratory data analysis, it's best to perform the dimension reduction step in a separate and dedicated stage, as this is a very important process in the data analysis workflow. } \details{ The different methods available are described below: \subsection{Supervised Methods}{ \itemize{ \item \strong{PCA}: See \code{\link{principal_components}}. \item \strong{cMDS / PCoA}: See \code{\link{cmds}}. Classical Multidimensional Scaling (cMDS) takes a set of dissimilarities (i.e., a distance matrix) and returns a set of points such that the distances between the points are approximately equal to the dissimilarities. \item \strong{DRR}: See \code{\link{DRR}}. Dimensionality Reduction via Regression (DRR) is a very recent technique extending PCA (Laparra et al., 2015). Starting from a rotated PCA, it predicts redundant information from the remaining components using non-linear regression. Some of the most notable advantages of performing PCR are avoidance of multicollinearity between predictors and overfitting mitigation. PCR tends to perform well when the first principal components are enough to explain most of the variation in the predictors. Requires the \pkg{DRR} package to be installed. \item \strong{ICA}: See \code{\link{ICA}}. Performs an Independent Component Analysis using the FastICA algorithm. Contrary to PCA, that attempts to find uncorrelated sources (through least squares minimization), ICA attempts to find independent sources, i.e., the source space that maximizes the "non-gaussianity" of all sources. Contrary to PCA, ICA does not rank each source, which makes it a poor tool for dimensionality reduction. Requires the \pkg{fastICA} package to be installed. } } } \examples{ out <- reduce_parameters(iris, method = "PCA", n = "max") head(out) } \references{ \itemize{ \item Nguyen, L. H., \& Holmes, S. (2019). Ten quick tips for effective dimensionality reduction. PLOS Computational Biology, 15(6). \item Laparra, V., Malo, J., & Camps-Valls, G. (2015). Dimensionality reduction via regression in hyperspectral imagery. IEEE Journal of Selected Topics in Signal Processing, 9(6), 1026-1036. } } parameters/man/fish.Rd0000644000176200001440000000035113620043640014405 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/fish.R \docType{data} \name{fish} \alias{fish} \title{Sample data set} \description{ A sample data set, used in tests and some examples. } \keyword{data} parameters/man/model_parameters.kmeans.Rd0000644000176200001440000000105613607421425020265 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/model_parameters.kmeans.R \name{model_parameters.kmeans} \alias{model_parameters.kmeans} \title{Parameters from Cluster Models (k-means, ...)} \usage{ \method{model_parameters}{kmeans}(model, ...) } \arguments{ \item{model}{Cluster model.} \item{...}{Arguments passed to or from other methods.} } \description{ Format cluster models obtained for example by \code{\link{kmeans}}. } \examples{ library(parameters) model <- kmeans(iris[1:4], centers = 3) model_parameters(model) } parameters/man/parameters_type.Rd0000644000176200001440000000255313607421652016676 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/parameters_type.R \name{parameters_type} \alias{parameters_type} \title{Type of model parameters} \usage{ parameters_type(model, ...) } \arguments{ \item{model}{A statistical model.} \item{...}{Arguments passed to or from other methods.} } \value{ A data frame. } \description{ Type of model parameters } \examples{ library(parameters) model <- lm(Sepal.Length ~ Petal.Length + Species, data = iris) parameters_type(model) model <- lm(Sepal.Length ~ Species + poly(Sepal.Width, 2), data = iris) parameters_type(model) model <- lm(Sepal.Length ~ Species + poly(Sepal.Width, 2, raw = TRUE), data = iris) parameters_type(model) # Interactions model <- lm(Sepal.Length ~ Sepal.Width * Species, data = iris) parameters_type(model) model <- lm(Sepal.Length ~ Sepal.Width * Species * Petal.Length, data = iris) parameters_type(model) model <- lm(Sepal.Length ~ Species * Sepal.Width, data = iris) parameters_type(model) model <- lm(Sepal.Length ~ Species / Sepal.Width, data = iris) parameters_type(model) # Complex interactions data <- iris data$fac2 <- ifelse(data$Sepal.Width > mean(data$Sepal.Width), "A", "B") model <- lm(Sepal.Length ~ Species / fac2 / Petal.Length, data = data) parameters_type(model) model <- lm(Sepal.Length ~ Species / fac2 * Petal.Length, data = data) parameters_type(model) } parameters/man/model_parameters.merMod.Rd0000644000176200001440000001073213615560005020230 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/model_parameters_mixed.R \name{model_parameters.merMod} \alias{model_parameters.merMod} \alias{model_parameters.glmmTMB} \alias{model_parameters.mixor} \alias{model_parameters.clmm} \title{Parameters from Mixed Models} \usage{ \method{model_parameters}{merMod}( model, ci = 0.95, bootstrap = FALSE, df_method = "wald", iterations = 1000, standardize = NULL, exponentiate = FALSE, robust = FALSE, details = FALSE, ... ) \method{model_parameters}{glmmTMB}( model, ci = 0.95, bootstrap = FALSE, iterations = 1000, component = c("all", "conditional", "zi", "zero_inflated"), standardize = NULL, exponentiate = FALSE, df_method = NULL, details = FALSE, ... ) \method{model_parameters}{mixor}( model, ci = 0.95, effects = c("all", "fixed", "random"), bootstrap = FALSE, iterations = 1000, standardize = NULL, exponentiate = FALSE, details = FALSE, ... ) \method{model_parameters}{clmm}( model, ci = 0.95, bootstrap = FALSE, iterations = 1000, standardize = NULL, exponentiate = FALSE, details = FALSE, df_method = NULL, ... ) } \arguments{ \item{model}{A mixed model.} \item{ci}{Confidence Interval (CI) level. Default to 0.95 (95\%).} \item{bootstrap}{Should estimates be based on bootstrapped model? If \code{TRUE}, then arguments of \link[=model_parameters.stanreg]{Bayesian regressions} apply (see also \code{\link[=parameters_bootstrap]{parameters_bootstrap()}}).} \item{df_method}{Method for computing degrees of freedom for p values, standard errors and confidence intervals (CI). May be \code{"wald"} (default, see \code{\link{degrees_of_freedom}}), \code{"ml1"} (see \code{\link{dof_ml1}}), \code{"betwithin"} (see \code{\link{dof_betwithin}}), \code{"satterthwaite"} (see \code{\link{dof_satterthwaite}}) or \code{"kenward"} (see \code{\link{dof_kenward}}). Note that when \code{df_method} is not \code{"wald"}, robust standard errors etc. cannot be computed.} \item{iterations}{The number of bootstrap replicates. This only apply in the case of bootstrapped frequentist models.} \item{standardize}{The method used for standardizing the parameters. Can be \code{"refit"}, \code{"posthoc"}, \code{"smart"}, \code{"basic"} or \code{NULL} (default) for no standardization. See 'Details' in \code{\link[effectsize]{standardize_parameters}}. Note that robust estimation (i.e. \code{robust=TRUE}) of standardized parameters only works when \code{standardize="refit"}.} \item{exponentiate}{Logical, indicating whether or not to exponentiate the the coefficients (and related confidence intervals). This is typical for, say, logistic regressions, or more generally speaking: for models with log or logit link.} \item{robust}{Logical, if \code{TRUE}, robust standard errors are calculated (if possible), and confidence intervals and p-values are based on these robust standard errors. Additional arguments like \code{vcov_estimation} or \code{vcov_type} are passed down to other methods, see \code{\link[=standard_error_robust]{standard_error_robust()}} for details.} \item{details}{Logical, if \code{TRUE}, a summary of the random effects is included. See \code{\link{random_parameters}} for details.} \item{...}{Arguments passed to or from other methods. For instance, when \code{bootstrap = TRUE}, arguments like \code{ci_method} are passed down to \code{\link[bayestestR]{describe_posterior}}.} \item{component}{Model component for which parameters should be shown. May be one of \code{"conditional"}, \code{"precision"} (\pkg{betareg}), \code{"scale"} (\pkg{ordinal}), \code{"extra"} (\pkg{glmx}) or \code{"all"}.} \item{effects}{Should parameters for fixed effects, random effects or both be returned? Only applies to mixed models. May be abbreviated.} } \value{ A data frame of indices related to the model's parameters. } \description{ Parameters from (linear) mixed models. } \examples{ library(parameters) if (require("lme4") && require("glmmTMB")) { model <- lmer(mpg ~ wt + (1 | gear), data = mtcars) model_parameters(model) model <- glmmTMB( count ~ spp + mined + (1 | site), ziformula = ~mined, family = poisson(), data = Salamanders ) model_parameters(model, details = TRUE) } \donttest{ if (require("lme4")) { model <- lmer(mpg ~ wt + (1 | gear), data = mtcars) model_parameters(model, bootstrap = TRUE, iterations = 50) } } } \seealso{ \code{\link[=standardize_names]{standardize_names()}} to rename columns into a consistent, standardized naming scheme. } parameters/man/convert_data_to_numeric.Rd0000644000176200001440000000143613566047515020374 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/convert_data_to_numeric.R \name{convert_data_to_numeric} \alias{convert_data_to_numeric} \alias{data_to_numeric} \title{Convert data to numeric} \usage{ convert_data_to_numeric(x, dummy_factors = TRUE, ...) data_to_numeric(x, dummy_factors = TRUE, ...) } \arguments{ \item{x}{A data frame or a vector.} \item{dummy_factors}{Transform factors to dummy factors (all factor levels as different columns filled with a binary 0-1 value).} \item{...}{Arguments passed to or from other methods.} } \value{ A data frame of numeric variables. } \description{ Convert data to numeric by converting characters to factors and factors to either numeric levels or dummy variables. } \examples{ head(convert_data_to_numeric(iris)) } parameters/man/p_value.Rd0000644000176200001440000000666013614111266015123 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/p_value.R \name{p_value} \alias{p_value} \alias{p_value.default} \alias{p_value.lmerMod} \alias{p_value.merMod} \alias{p_value.rlmerMod} \alias{p_value.glmmTMB} \alias{p_value.MixMod} \alias{p_value.mixor} \alias{p_value.DirichletRegModel} \alias{p_value.clm2} \alias{p_value.gee} \title{p-values} \usage{ p_value(model, ...) \method{p_value}{default}(model, method = NULL, ...) \method{p_value}{lmerMod}(model, method = "wald", ...) \method{p_value}{merMod}(model, method = "wald", ...) \method{p_value}{rlmerMod}(model, method = "wald", ...) \method{p_value}{glmmTMB}(model, component = c("all", "conditional", "zi", "zero_inflated"), ...) \method{p_value}{MixMod}(model, component = c("all", "conditional", "zi", "zero_inflated"), ...) \method{p_value}{mixor}(model, effects = c("all", "fixed", "random"), ...) \method{p_value}{DirichletRegModel}(model, component = c("all", "conditional", "precision"), ...) \method{p_value}{clm2}(model, component = c("all", "conditional", "scale"), ...) \method{p_value}{gee}(model, method = NULL, ...) } \arguments{ \item{model}{A statistical model.} \item{...}{Arguments passed down to \code{standard_error_robust()} when confidence intervals or p-values based on robust standard errors should be computed.} \item{method}{For mixed models, can be \code{\link[=p_value_wald]{"wald"}} (default), \code{\link[=p_value_ml1]{"ml1"}}, \code{\link[=p_value_betwithin]{"betwithin"}}, \code{\link[=p_value_satterthwaite]{"satterthwaite"}} or \code{\link[=p_value_kenward]{"kenward"}}. For models that are supported by the \pkg{sandwich} or \pkg{clubSandwich} packages, may also be \code{method = "robust"} to compute p-values based ob robust standard errors.} \item{component}{Should all parameters, parameters for the conditional model, or for the zero-inflated part of the model be returned? Applies to models with zero-inflated component. \code{component} may be one of \code{"conditional"}, \code{"zi"}, \code{"zero-inflated"} or \code{"all"} (default). May be abbreviated.} \item{effects}{Should standard errors for fixed effects or random effects be returned? Only applies to mixed models. May be abbreviated. When standard errors for random effects are requested, for each grouping factor a list of standard errors (per group level) for random intercepts and slopes is returned.} } \value{ The p-values. } \description{ This function attempts to return, or compute, p-values of a model's parameters. The nature of the p-values is different depending on the model: \itemize{ \item Mixed models (lme4): By default, p-values are based on Wald-test approximations (see \code{\link{p_value_wald}}). For certain situations, the "m-l-1" rule might be a better approximation. That is, for \code{method = "ml1"}, \code{\link{p_value_ml1}} is called. For \code{lmerMod} objects, if \code{method = "kenward"}, p-values are based on Kenward-Roger approximations, i.e. \code{\link{p_value_kenward}} is called, and \code{method = "satterthwaite"} calls \code{\link{p_value_satterthwaite}}. } } \note{ \code{p_value_robust()} resp. \code{p_value(method = "robust")} rely on the \pkg{sandwich} or \pkg{clubSandwich} package (the latter if \code{vcov_estimation = "CR"} for cluster-robust standard errors) and will thus only work for those models supported by those packages. } \examples{ model <- lme4::lmer(Petal.Length ~ Sepal.Length + (1 | Species), data = iris) p_value(model) } parameters/man/p_value_satterthwaite.Rd0000644000176200001440000000411313611661733020070 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/ci_satterthwaite.R, R/dof_satterthwaite.R, % R/p_value_satterthwaite.R, R/se_satterthwaite.R \name{ci_satterthwaite} \alias{ci_satterthwaite} \alias{dof_satterthwaite} \alias{p_value_satterthwaite} \alias{se_satterthwaite} \title{Satterthwaite approximation for SEs, CIs and p-values} \usage{ ci_satterthwaite(model, ci = 0.95) dof_satterthwaite(model) p_value_satterthwaite(model, dof = NULL) se_satterthwaite(model) } \arguments{ \item{model}{A statistical model.} \item{ci}{Confidence Interval (CI) level. Default to 0.95 (95\%).} \item{dof}{Degrees of Freedom.} } \value{ A data frame. } \description{ An approximate F-test based on the Satterthwaite (1946) approach. } \details{ Inferential statistics (like p-values, confidence intervals and standard errors) may be biased in mixed models when the number of clusters is small (even if the sample size of level-1 units is high). In such cases it is recommended to approximate a more accurate number of degrees of freedom for such inferential statitics. Unlike simpler approximation heuristics like the "m-l-1" rule (\code{dof_ml1}), the Satterthwaite approximation is also applicable in more complex multilevel designs. However, the "m-l-1" heuristic also applies to generalized mixed models, while approaches like Kenward-Roger or Satterthwaite are limited to linear mixed models only. } \examples{ \donttest{ if (require("lme4")) { model <- lmer(Petal.Length ~ Sepal.Length + (1 | Species), data = iris) p_value_satterthwaite(model) } } } \references{ Satterthwaite FE (1946) An approximate distribution of estimates of variance components. Biometrics Bulletin 2 (6):110–4. } \seealso{ \code{dof_satterthwaite()} and \code{se_satterthwaite()} are small helper-functions to calculate approximated degrees of freedom and standard errors for model parameters, based on the Satterthwaite (1946) approach. \cr \cr \code{\link[=dof_kenward]{dof_kenward()}} and \code{\link[=dof_ml1]{dof_ml1()}} approximate degrees of freedom based on Kenward-Roger's method or the "m-l-1" rule. } parameters/man/dot-factor_to_dummy.Rd0000644000176200001440000000047313620043640017440 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/utils.R \name{.factor_to_dummy} \alias{.factor_to_dummy} \title{Safe transformation from factor/character to numeric} \usage{ .factor_to_dummy(x) } \description{ Safe transformation from factor/character to numeric } \keyword{internal} parameters/man/data_partition.Rd0000644000176200001440000000200113610642124016451 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/data_partition.R \name{data_partition} \alias{data_partition} \title{Partition data into a test and a training set} \usage{ data_partition(x, training_proportion = 0.7, group = NULL) } \arguments{ \item{x}{A data frame, or an object that can be coerced to a data frame.} \item{training_proportion}{The proportion (between 0 and 1) of the training set. The remaining part will be used for the test set.} \item{group}{A character vector indicating the name(s) of the column(s) used for stratified partitioning.} } \value{ A list of two data frames, named \code{test} and \code{training}. } \description{ Creates a training and a test set based on a dataframe. Can also be stratified (i.e., evenly spread a given factor) using the \code{group} argument. } \examples{ df <- iris df$Smell <- rep(c("Strong", "Light"), 75) head(data_partition(df)) head(data_partition(df, group = "Species")) head(data_partition(df, group = c("Species", "Smell"))) } parameters/man/model_parameters.Rd0000644000176200001440000000506713617565662017033 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/model_parameters.R \name{model_parameters} \alias{model_parameters} \alias{parameters} \title{Model Parameters} \usage{ model_parameters(model, ...) parameters(model, ...) } \arguments{ \item{model}{Statistical Model.} \item{...}{Arguments passed to or from other methods. Non-documented arguments are \code{digits}, \code{p_digits} and \code{ci_digits} to set the number of digits for the output. See 'Examples' in \code{\link{model_parameters.default}}.} } \value{ A data frame of indices related to the model's parameters. } \description{ Compute and extract model parameters. See the documentation for your object's class: \itemize{ \item{\link[=model_parameters.htest]{Correlations and t-tests}} \item{\link[=model_parameters.aov]{ANOVAs}} \item{\link[=model_parameters.default]{Regression models} (\code{lm}, \code{glm}, \pkg{survey}, ...)} \item{\link[=model_parameters.gam]{Additive models} (\code{gam}, \code{gamm}, ...)} \item{\link[=model_parameters.zeroinfl]{Zero-inflated models} (\code{hurdle}, \code{zeroinfl}, \code{zerocount})} \item{\link[=model_parameters.mlm]{Multinomial, ordinal and cumulative link models} (\code{bracl}, \code{multinom}, \code{mlm}, ...)} \item{\link[=model_parameters.merMod]{Mixed models} (\pkg{lme4}, \pkg{nlme}, \pkg{glmmTMB}, ...)} \item{\link[=model_parameters.BFBayesFactor]{Bayesian tests} (\pkg{BayesFactor})} \item{\link[=model_parameters.stanreg]{Bayesian models} (\pkg{rstanarm}, \pkg{brms}, \pkg{MCMCglmm})} \item{\link[=model_parameters.principal]{PCA and FA} (\pkg{psych})} \item{\link[=model_parameters.lavaan]{CFA and SEM} (\pkg{lavaan}, \pkg{blavaan})} \item{\link[=model_parameters.kmeans]{Cluster models} (k-means, ...)} \item{\link[=model_parameters.rma]{Meta-Analysis via linear (mixed) models} (\code{rma})} } } \details{ Standardization is based on \code{\link[effectsize]{standardize_parameters}}. In case of \code{standardize = "refit"}, the data used to fit the model will be standardized and the model is completely refitted. In such cases, standard errors and confidence intervals refer to the standardized coefficient. } \note{ The \code{\link[=print.parameters_model]{print()}} method has several arguments to tweak the output. There is also a \href{https://easystats.github.io/see/articles/parameters.html}{\code{plot()}-method} implemented in the \href{https://easystats.github.io/see/}{\pkg{see}-package}. } \seealso{ \code{\link[=standardize_names]{standardize_names()}} to rename columns into a consistent, standardized naming scheme. } parameters/man/model_parameters.BFBayesFactor.Rd0000644000176200001440000000477613612621513021431 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/model_parameters.BFBayesFactor.R \name{model_parameters.BFBayesFactor} \alias{model_parameters.BFBayesFactor} \title{Parameters from BayesFactor objects} \usage{ \method{model_parameters}{BFBayesFactor}( model, centrality = "median", dispersion = FALSE, ci = 0.89, ci_method = "hdi", test = c("pd", "rope"), rope_range = "default", rope_ci = 0.89, priors = TRUE, ... ) } \arguments{ \item{model}{Object of class \code{BFBayesFactor}.} \item{centrality}{The point-estimates (centrality indices) to compute. Character (vector) or list with one or more of these options: \code{"median"}, \code{"mean"}, \code{"MAP"} or \code{"all"}.} \item{dispersion}{Logical, if \code{TRUE}, computes indices of dispersion related to the estimate(s) (\code{SD} and \code{MAD} for \code{mean} and \code{median}, respectively).} \item{ci}{Value or vector of probability of the CI (between 0 and 1) to be estimated. Default to \code{.89} (89\%) for Bayesian models and \code{.95} (95\%) for frequentist models.} \item{ci_method}{The type of index used for Credible Interval. Can be \code{"HDI"} (default, see \code{\link[bayestestR:hdi]{hdi}}), \code{"ETI"} (see \code{\link[bayestestR:eti]{eti}}) or \code{"SI"} (see \code{\link[bayestestR:si]{si}}).} \item{test}{The indices of effect existence to compute. Character (vector) or list with one or more of these options: \code{"p_direction"} (or \code{"pd"}), \code{"rope"}, \code{"p_map"}, \code{"equivalence_test"} (or \code{"equitest"}), \code{"bayesfactor"} (or \code{"bf"}) or \code{"all"} to compute all tests. For each "test", the corresponding \pkg{bayestestR} function is called (e.g. \code{\link[bayestestR:rope]{rope}} or \code{\link[bayestestR:p_direction]{p_direction}}) and its results included in the summary output.} \item{rope_range}{ROPE's lower and higher bounds. Should be a list of two values (e.g., \code{c(-0.1, 0.1)}) or \code{"default"}. If \code{"default"}, the bounds are set to \code{x +- 0.1*SD(response)}.} \item{rope_ci}{The Credible Interval (CI) probability, corresponding to the proportion of HDI, to use for the percentage in ROPE.} \item{priors}{Add the prior used for each parameter.} \item{...}{Additional arguments to be passed to or from methods.} } \value{ A data frame of indices related to the model's parameters. } \description{ Parameters of BayesFactor objects. } \examples{ \donttest{ library(BayesFactor) model <- ttestBF(x = rnorm(100, 1, 1)) model_parameters(model) } } parameters/man/model_parameters.htest.Rd0000644000176200001440000000157713607421425020146 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/model_parameters.htest.R \name{model_parameters.htest} \alias{model_parameters.htest} \title{Parameters from Correlations and t-tests} \usage{ \method{model_parameters}{htest}(model, bootstrap = FALSE, ...) } \arguments{ \item{model}{Object of class \code{htest}.} \item{bootstrap}{Should estimates be bootstrapped?} \item{...}{Arguments passed to or from other methods.} } \value{ A data frame of indices related to the model's parameters. } \description{ Parameters of h-tests (correlations, t-tests). } \examples{ model <- cor.test(mtcars$mpg, mtcars$cyl, method = "pearson") model_parameters(model) model <- t.test(iris$Sepal.Width, iris$Sepal.Length) model_parameters(model) model <- t.test(mtcars$mpg ~ mtcars$vs) model_parameters(model) model <- t.test(iris$Sepal.Width, mu = 1) model_parameters(model) } parameters/man/model_parameters.principal.Rd0000644000176200001440000000773713607421425021004 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/model_parameters.FactoMineR.R, % R/model_parameters.psych.R \name{model_parameters.PCA} \alias{model_parameters.PCA} \alias{model_parameters.principal} \alias{model_parameters.omega} \title{Parameters from Structural Models (PCA, EFA, ...)} \usage{ \method{model_parameters}{PCA}(model, sort = FALSE, threshold = NULL, labels = NULL, ...) \method{model_parameters}{principal}(model, sort = FALSE, threshold = NULL, labels = NULL, ...) \method{model_parameters}{omega}(model, ...) } \arguments{ \item{model}{PCA or FA created by the \pkg{psych} or \pkg{FactoMineR} packages (e.g. through \code{psych::principal}, \code{psych::fa} or \code{psych::omega}).} \item{sort}{Sort the loadings.} \item{threshold}{A value between 0 and 1 indicates which (absolute) values from the loadings should be removed. An integer higher than 1 indicates the n strongest loadings to retain. Can also be \code{"max"}, in which case it will only display the maximum loading per variable (the most simple structure).} \item{labels}{A character vector containing labels to be added to the loadings data. Usually, the question related to the item.} \item{...}{Arguments passed to or from other methods.} } \value{ A data frame of loadings. } \description{ Format structural models from the \pkg{psych} or \pkg{FactoMineR} packages. } \details{ For the structural models obtained with \pkg{psych}, the following indices are present: \itemize{ \item \strong{Complexity} (\cite{Hoffman's, 1978; Pettersson and Turkheimer, 2010}) represents the number of latent components needed to account for the observed variables. Whereas a perfect simple structure solution has a complexity of 1 in that each item would only load on one factor, a solution with evenly distributed items has a complexity greater than 1. \item \strong{Uniqueness} represents the variance that is 'unique' to the variable and not shared with other variables. It is equal to \code{1 – communality} (variance that is shared with other variables). A uniqueness of \code{0.20} suggests that 20\% or that variable's variance is not shared with other variables in the overall factor model. The greater 'uniqueness' the lower the relevance of the variable in the factor model. \item \strong{MSA} represents the Kaiser-Meyer-Olkin Measure of Sampling Adequacy (\cite{Kaiser and Rice, 1974}) for each item. It indicates whether there is enough data for each factor give reliable results for the PCA. The value should be > 0.6, and desirable values are > 0.8 (\cite{Tabachnick and Fidell, 2013}). } } \examples{ library(parameters) library(psych) # Principal Component Analysis (PCA) --------- pca <- psych::principal(attitude) model_parameters(pca) pca <- psych::principal(attitude, nfactors = 3, rotate = "none") model_parameters(pca, sort = TRUE, threshold = 0.2) principal_components(attitude, n = 3, sort = TRUE, threshold = 0.2) \donttest{ # Exploratory Factor Analysis (EFA) --------- efa <- psych::fa(attitude, nfactors = 3) model_parameters(efa, threshold = "max", sort = TRUE, labels = as.character(1:ncol(attitude))) } # Omega --------- omega <- psych::omega(mtcars, nfactors = 3) params <- model_parameters(omega) params summary(params) # FactoMineR --------- \dontrun{ library(FactoMineR) model <- FactoMineR::PCA(iris[, 1:4], ncp = 2) model_parameters(model) attributes(model_parameters(model))$scores model <- FactoMineR::FAMD(iris, ncp = 2) model_parameters(model) } } \references{ \itemize{ \item Kaiser, H.F. and Rice. J. (1974). Little jiffy, mark iv. Educational and Psychological Measurement, 34(1):111–117 \item Pettersson, E., \& Turkheimer, E. (2010). Item selection, evaluation, and simple structure in personality data. Journal of research in personality, 44(4), 407-420. \item Revelle, W. (2016). How To: Use the psych package for Factor Analysis and data reduction. \item Tabachnick, B. G., and Fidell, L. S. (2013). Using multivariate statistics (6th ed.). Boston: Pearson Education. } } parameters/man/format_algorithm.Rd0000644000176200001440000000061213620043640017012 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/format_algorithm.R \name{format_algorithm} \alias{format_algorithm} \title{Model Algorithm formatting} \usage{ format_algorithm(model) } \arguments{ \item{model}{A statistical model.} } \description{ Model Algorithm formatting } \examples{ model <- lm(Sepal.Length ~ Species, data = iris) format_algorithm(model) } parameters/man/dot-factor_to_numeric.Rd0000644000176200001440000000050113620043640017737 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/utils.R \name{.factor_to_numeric} \alias{.factor_to_numeric} \title{Safe transformation from factor/character to numeric} \usage{ .factor_to_numeric(x) } \description{ Safe transformation from factor/character to numeric } \keyword{internal} parameters/man/n_parameters.Rd0000644000176200001440000000333613607421426016151 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/n_parameters.R \name{n_parameters} \alias{n_parameters} \alias{n_parameters.default} \alias{n_parameters.merMod} \alias{n_parameters.glmmTMB} \alias{n_parameters.zeroinfl} \alias{n_parameters.gam} \alias{n_parameters.brmsfit} \title{Count number of parameters in a model} \usage{ n_parameters(x, ...) \method{n_parameters}{default}(x, ...) \method{n_parameters}{merMod}(x, effects = c("fixed", "random"), ...) \method{n_parameters}{glmmTMB}( x, effects = c("fixed", "random"), component = c("all", "conditional", "zi", "zero_inflated"), ... ) \method{n_parameters}{zeroinfl}( x, component = c("all", "conditional", "zi", "zero_inflated"), ... ) \method{n_parameters}{gam}(x, component = c("all", "conditional", "smooth_terms"), ...) \method{n_parameters}{brmsfit}( x, effects = c("all", "fixed", "random"), component = c("all", "conditional", "zi", "zero_inflated", "dispersion", "simplex", "sigma", "smooth_terms"), ... ) } \arguments{ \item{x}{A statistical model.} \item{...}{Arguments passed to or from other methods.} \item{effects}{Should number of parameters for fixed effects, random effects or both be returned? Only applies to mixed models. May be abbreviated.} \item{component}{Should total number of parameters, number parameters for the conditional model, the zero-inflated part of the model, the dispersion term or the instrumental variables be returned? Applies to models with zero-inflated and/or dispersion formula, or to models with instrumental variable (so called fixed-effects regressions). May be abbreviated.} } \value{ The number of parameters in the model. } \description{ Returns the number of parameters of a model. } parameters/man/skewness.Rd0000644000176200001440000000635113612621514015327 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/skewness_kurtosis.R \name{skewness} \alias{skewness} \alias{kurtosis} \title{Compute Skewness and Kurtosis} \usage{ skewness(x, na.rm = TRUE, type = "2", ...) kurtosis(x, na.rm = TRUE, type = "2", ...) } \arguments{ \item{x}{A numeric vector or data.frame.} \item{na.rm}{Remove missing values.} \item{type}{Type of algorithm for computing skewness. May be one of \code{1} (or \code{"1"}, \code{"I"} or \code{"classic"}), \code{2} (or \code{"2"}, \code{"II"} or \code{"SPSS"} or \code{"SAS"}) or \code{3} (or \code{"3"}, \code{"III"} or \code{"Minitab"}). See 'Details'.} \item{...}{Arguments passed to or from other methods.} } \value{ Values of skewness or kurtosis. } \description{ Compute Skewness and Kurtosis } \details{ \subsection{Skewness}{ Symmetric distributions have a \code{skewness} around zero, while a negative skewness values indicates a "left-skewed" distribution, and a positive skewness values indicates a "right-skewed" distribution. Examples for the relationship of skewness and distributions are: \itemize{ \item Normal distribution (and other symmetric distribution) has a skewness of 0 \item Half-normal distribution has a skewness just below 1 \item Exponential distribution has a skewness of 2 \item Lognormal distribution can have a skewness of any positive value, depending on its parameters } (\cite{https://en.wikipedia.org/wiki/Skewness}) } \subsection{Types of Skewness}{ \code{skewness()} supports three different methods for estimating skewness, as discussed in \cite{Joanes and Gill (1988)}: \itemize{ \item Type "1" is the "classical" method, which is \code{g1 = (sum((x - mean(x))^3) / n) / (sum((x - mean(x))^2) / n)^1.5} \item Type "2" first calculates the type-1 skewness, than adjusts the result: \code{G1 = g1 * sqrt(n * (n - 1)) / (n - 2)}. This is what SAS and SPSS usually return \item Type "3" first calculates the type-1 skewness, than adjusts the result: \code{b1 = g1 * ((1 - 1 / n))^1.5}. This is what Minitab usually returns. } } \subsection{Kurtosis}{ The \code{kurtosis} is a measure of "tailedness" of a distribution. A distribution with a kurtosis values of about zero is called "mesokurtic". A kurtosis value larger than zero indicates a "leptokurtic" distribution with \emph{fatter} tails. A kurtosis value below zero indicates a "platykurtic" distribution with \emph{thinner} tails (\cite{https://en.wikipedia.org/wiki/Kurtosis}). } \subsection{Types of Kurtosis}{ \code{kurtosis()} supports three different methods for estimating kurtosis, as discussed in \cite{Joanes and Gill (1988)}: \itemize{ \item Type "1" is the "classical" method, which is \code{g2 = n * sum((x - mean(x))^4) / (sum((x - mean(x))^2)^2) - 3}. \item Type "2" first calculates the type-1 kurtosis, than adjusts the result: \code{G2 = ((n + 1) * g2 + 6) * (n - 1)/((n - 2) * (n - 3))}. This is what SAS and SPSS usually return \item Type "3" first calculates the type-1 kurtosis, than adjusts the result: \code{b2 = (g2 + 3) * (1 - 1 / n)^2 - 3}. This is what Minitab usually returns. } } } \examples{ skewness(rnorm(1000)) kurtosis(rnorm(1000)) } \references{ D. N. Joanes and C. A. Gill (1998). Comparing measures of sample skewness and kurtosis. The Statistician, 47, 183–189. } parameters/man/dot-compact_character.Rd0000644000176200001440000000043513620043640017705 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/utils.R \name{.compact_character} \alias{.compact_character} \title{remove empty string from character} \usage{ .compact_character(x) } \description{ remove empty string from character } \keyword{internal} parameters/man/format_pd.Rd0000644000176200001440000000131113607421425015432 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/format_pd.R \name{format_pd} \alias{format_pd} \title{Probability of direction (pd) formatting} \usage{ format_pd(pd, stars = FALSE, stars_only = FALSE, name = "pd") } \arguments{ \item{pd}{Probability of direction (pd).} \item{stars}{Add significance stars (e.g., p < .001***).} \item{stars_only}{Return only significance stars.} \item{name}{Name prefixing the text. Can be \code{NULL}.} } \value{ A formatted string. } \description{ Probability of direction (pd) formatting } \examples{ format_pd(0.12) format_pd(c(0.12, 1, 0.9999, 0.98, 0.995, 0.96), name = NULL) format_pd(c(0.12, 1, 0.9999, 0.98, 0.995, 0.96), stars = TRUE) } parameters/man/format_rope.Rd0000644000176200001440000000103313607421425015775 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/format_rope.R \name{format_rope} \alias{format_rope} \title{Percentage in ROPE formatting} \usage{ format_rope(rope_percentage, name = "in ROPE") } \arguments{ \item{rope_percentage}{Value or vector of percentages in ROPE.} \item{name}{Name prefixing the text. Can be \code{NULL}.} } \value{ A formatted string. } \description{ Percentage in ROPE formatting } \examples{ format_rope(c(0.02, 0.12, 0.357, 0)) format_rope(c(0.02, 0.12, 0.357, 0), name = NULL) } parameters/man/get_scores.Rd0000644000176200001440000000314013612637423015621 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/get_scores.R \name{get_scores} \alias{get_scores} \title{Get Scores from Principal Component Analysis (PCA)} \usage{ get_scores(x, n_items = NULL) } \arguments{ \item{x}{An object returned by \code{\link{principal_components}}.} \item{n_items}{Number of required (i.e. non-missing) items to build the sum score. If \code{NULL}, the value is chosen to match half of the number of columns in a data frame.} } \value{ A data frame with subscales, which are average sum scores for all items from each component. } \description{ \code{get_scores()} takes \code{n_items} amount of items that load the most (either by loading cutoff or number) on a component, and then computes their average. } \details{ \code{get_scores()} takes the results from \code{\link{principal_components}} and extracts the variables for each component found by the PCA. Then, for each of these "subscales", row means are calculated (which equals adding up the single items and dividing by the number of items). This results in a sum score for each component from the PCA, which is on the same scale as the original, single items that were used to compute the PCA. } \examples{ library(parameters) pca <- principal_components(mtcars[, 1:7], n = 2, rotation = "varimax") # PCA extracted two components pca # assignment of items to each component closest_component(pca) # now we want to have sum scores for each component get_scores(pca) # compare to manually computed sum score for 2nd component, which # consists of items "hp" and "qsec" (mtcars$hp + mtcars$qsec) / 2 } parameters/man/check_multimodal.Rd0000644000176200001440000000240113603326255016765 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/check_multimodal.R \name{check_multimodal} \alias{check_multimodal} \title{Check if a distribution is unimodal or multimodal} \usage{ check_multimodal(x, ...) } \arguments{ \item{x}{A numeric vector or a dataframe.} \item{...}{Arguments passed to or from other methods.} } \description{ For univariate distributions (one-dimensional vectors), this functions performs a Ameijeiras-Alonso et al. (2018) excess mass test. For multivariate distributions (dataframes), it uses mixture modelling. However, it seems that it always returns a significant result (suggesting that the distribution is multimodal). A better method might be needed here. } \examples{ # Univariate x <- rnorm(2000) check_multimodal(x) x <- c(rnorm(1000), rnorm(1000, 2)) check_multimodal(x) # Multivariate \donttest{ m <- data.frame( x = rnorm(200), y = rbeta(200, 2, 1) ) plot(m$x, m$y) check_multimodal(m) m <- data.frame( x = c(rnorm(100), rnorm(100, 4)), y = c(rbeta(100, 2, 1), rbeta(100, 1, 4)) ) plot(m$x, m$y) check_multimodal(m) } } \references{ \itemize{ \item Ameijeiras-Alonso, J., Crujeiras, R. M., \& Rodríguez-Casal, A. (2019). Mode testing, critical bandwidth and excess mass. Test, 28(3), 900-919. } } parameters/man/format_model.Rd0000644000176200001440000000055413620043640016131 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/format_model.R \name{format_model} \alias{format_model} \title{Model Name formatting} \usage{ format_model(model) } \arguments{ \item{model}{A statistical model.} } \description{ Model Name formatting } \examples{ model <- lm(Sepal.Length ~ Species, data = iris) format_model(model) } parameters/man/random_parameters.Rd0000644000176200001440000000430413617555624017201 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/random_parameters.R \name{random_parameters} \alias{random_parameters} \title{Summary information from random effects} \usage{ random_parameters(model) } \arguments{ \item{model}{A mixed effects model (including \code{stanreg} models).} } \value{ A data frame with random effects statistics for the variance components, including number of levels per random effect group, as well as complete observations in the model. } \description{ This function extracts the different variance components of a mixed model and returns the result as a data frame. } \details{ The variance components are obtained from \code{\link[insight]{get_variance}} and are denoted as following: \subsection{Within-group (or residual) variance}{ The residual variance, \ifelse{html}{\out{σ2ε}}{\eqn{\sigma^2_\epsilon}}, is the sum of the distribution-specific variance and the variance due to additive dispersion. It indicates the \emph{within-group variance}. } \subsection{Between-group random intercept variance}{ The random intercept variance, or \emph{between-group} variance for the intercept (\ifelse{html}{\out{τ00}}{\eqn{\tau_{00}}}), is obtained from \code{VarCorr()}. It indicates how much groups or subjects differ from each other. } \subsection{Between-group random slope variance}{ The random slope variance, or \emph{between-group} variance for the slopes (\ifelse{html}{\out{τ11}}{\eqn{\tau_{11}}}) is obtained from \code{VarCorr()}. This measure is only available for mixed models with random slopes. It indicates how much groups or subjects differ from each other according to their slopes. } \subsection{Random slope-intercept correlation}{ The random slope-intercept correlation (\ifelse{html}{\out{ρ01}}{\eqn{\rho_{01}}}) is obtained from \code{VarCorr()}. This measure is only available for mixed models with random intercepts and slopes. } } \examples{ if (require("lme4")) { data(sleepstudy) model <- lmer(Reaction ~ Days + (1 + Days | Subject), data = sleepstudy) random_parameters(model) } } parameters/man/model_parameters.aov.Rd0000644000176200001440000000302013611426120017555 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/model_parameters.aov.R \name{model_parameters.aov} \alias{model_parameters.aov} \title{Parameters from ANOVAs} \usage{ \method{model_parameters}{aov}( model, omega_squared = NULL, eta_squared = NULL, epsilon_squared = NULL, ... ) } \arguments{ \item{model}{Object of class \link{aov}, \link{anova} or \code{aovlist}.} \item{omega_squared}{Compute omega squared as index of effect size. Can be \code{"partial"} (adjusted for effect size) or \code{"raw"}.} \item{eta_squared}{Compute eta squared as index of effect size. Can be \code{"partial"} (adjusted for effect size) or \code{"raw"}.} \item{epsilon_squared}{Compute epsilon squared as index of effect size.} \item{...}{Arguments passed to or from other methods.} } \value{ A data frame of indices related to the model's parameters. } \description{ Parameters from ANOVAs. } \examples{ df <- iris df$Sepal.Big <- ifelse(df$Sepal.Width >= 3, "Yes", "No") model <- aov(Sepal.Length ~ Sepal.Big, data = df) model_parameters(model, omega_squared = "partial", eta_squared = "partial", epsilon_squared = TRUE) model <- anova(lm(Sepal.Length ~ Sepal.Big, data = df)) model_parameters(model) model_parameters(model, omega_squared = "partial", eta_squared = "partial", epsilon_squared = TRUE) model <- aov(Sepal.Length ~ Sepal.Big + Error(Species), data = df) model_parameters(model) if (require("lme4")) { model <- anova(lmer(Sepal.Length ~ Sepal.Big + (1 | Species), data = df)) model_parameters(model) } } parameters/man/ICA.Rd0000644000176200001440000000234213575512105014060 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/reduce_parameters.R \name{ICA} \alias{ICA} \title{Independent Component Analysis (ICA)} \usage{ ICA(x, n = "all", ...) } \arguments{ \item{x}{A dataframe or a statistical model.} \item{n}{Number of components to extract. If \code{n="all"}, then \code{n} is set as the number of variables minus 1 (\code{ncol(x)-1}). If \code{n="auto"} (default) or \code{n=NULL}, the number of components is selected through \code{\link{n_factors}}. In \code{\link{reduce_parameters}}, can also be \code{"max"}, in which case it will select all the components that are maximally pseudo-loaded (i.e., correlated) by at least one variable.} \item{...}{Arguments passed to or from other methods.} } \description{ Performs an Independent Component Analysis using the FastICA algorithm. Contrary to PCA, that attempts to find uncorrelated sources (through least squares minimization), ICA attempts to find independent sources, i.e., the source space that maximizes the "non-gaussianity" of all sources. Contrary to PCA, ICA does not rank each source, which makes it a poor tool for dimensionality reduction. Requires the \pkg{fastICA} package to be installed. } \examples{ ICA(iris[, 1:4]) } parameters/man/equivalence_test.lm.Rd0000644000176200001440000000162313566047515017444 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/equivalence_test.R \name{equivalence_test.lm} \alias{equivalence_test.lm} \title{Equivalence test} \usage{ \method{equivalence_test}{lm}(x, range = "default", ci = 0.95, verbose = TRUE, ...) } \arguments{ \item{x}{A statistical model.} \item{range}{The range of practical equivalence of an effect. May be \code{"default"}, to automatically define this range based on properties of the model's data.} \item{ci}{Confidence Interval (CI) level. Default to 0.95 (95\%).} \item{verbose}{Toggle off warnings.} \item{...}{Arguments passed to or from other methods.} } \value{ A data frame. } \description{ Compute the equivalence test for frequentist models. } \examples{ m <- lm(mpg ~ gear + wt + cyl + hp, data = mtcars) equivalence_test(m) } \seealso{ For more details, see \code{\link[bayestestR:equivalence_test]{equivalence_test}}. } parameters/man/model_parameters.befa.Rd0000644000176200001440000000422413612621513017700 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/model_parameters.BayesFM.R \name{model_parameters.befa} \alias{model_parameters.befa} \title{Parameters from PCA/FA} \usage{ \method{model_parameters}{befa}( model, sort = FALSE, centrality = "median", dispersion = FALSE, ci = 0.89, ci_method = "hdi", test = NULL, ... ) } \arguments{ \item{model}{Bayesian EFA created by the \code{BayesFM::befa}.} \item{sort}{Sort the loadings.} \item{centrality}{The point-estimates (centrality indices) to compute. Character (vector) or list with one or more of these options: \code{"median"}, \code{"mean"}, \code{"MAP"} or \code{"all"}.} \item{dispersion}{Logical, if \code{TRUE}, computes indices of dispersion related to the estimate(s) (\code{SD} and \code{MAD} for \code{mean} and \code{median}, respectively).} \item{ci}{Value or vector of probability of the CI (between 0 and 1) to be estimated. Default to \code{.89} (89\%) for Bayesian models and \code{.95} (95\%) for frequentist models.} \item{ci_method}{The type of index used for Credible Interval. Can be \code{"HDI"} (default, see \code{\link[bayestestR:hdi]{hdi}}), \code{"ETI"} (see \code{\link[bayestestR:eti]{eti}}) or \code{"SI"} (see \code{\link[bayestestR:si]{si}}).} \item{test}{The indices of effect existence to compute. Character (vector) or list with one or more of these options: \code{"p_direction"} (or \code{"pd"}), \code{"rope"}, \code{"p_map"}, \code{"equivalence_test"} (or \code{"equitest"}), \code{"bayesfactor"} (or \code{"bf"}) or \code{"all"} to compute all tests. For each "test", the corresponding \pkg{bayestestR} function is called (e.g. \code{\link[bayestestR:rope]{rope}} or \code{\link[bayestestR:p_direction]{p_direction}}) and its results included in the summary output.} \item{...}{Arguments passed to or from other methods.} } \value{ A data frame of loadings. } \description{ Format PCA/FA objects from the psych package (Revelle, 2016). } \examples{ library(parameters) \donttest{ library(BayesFM) efa <- BayesFM::befa(mtcars, iter = 1000) results <- model_parameters(efa, sort = TRUE) results attributes(results)$loadings_long efa_to_cfa(results) } } parameters/man/format_number.Rd0000644000176200001440000000127713566047515016342 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/format_number.R \name{format_number} \alias{format_number} \title{Convert number to words} \usage{ format_number(x, textual = TRUE, ...) } \arguments{ \item{x}{Number.} \item{textual}{Return words. If \code{FALSE}, will run \code{\link[insight]{format_value}}.} \item{...}{Arguments to be passed to \code{\link[insight]{format_value}} if \code{textual} is \code{FALSE}.} } \value{ A formatted string. } \description{ Convert number to words. The code has been adapted from here https://github.com/ateucher/useful_code/blob/master/R/numbers2words.r } \examples{ format_number(2) format_number(45) format_number(324.68765) } parameters/man/standard_error_robust.Rd0000644000176200001440000000502413611010264020061 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/robust_estimation.R \name{standard_error_robust} \alias{standard_error_robust} \alias{p_value_robust} \alias{ci_robust} \title{Robust estimation} \usage{ standard_error_robust( model, vcov_estimation = "HC", vcov_type = NULL, vcov_args = NULL, ... ) p_value_robust( model, vcov_estimation = "HC", vcov_type = NULL, vcov_args = NULL, ... ) ci_robust( model, ci = 0.95, vcov_estimation = "HC", vcov_type = NULL, vcov_args = NULL, ... ) } \arguments{ \item{model}{A model.} \item{vcov_estimation}{String, indicating the suffix of the \code{vcov*()}-function from the \pkg{sandwich}-package, e.g. \code{vcov_estimation = "CL"} (which calls \code{\link[sandwich]{vcovCL}} to compute clustered covariance matrix estimators), or \code{vcov_estimation = "HC"} (which calls \code{\link[sandwich:vcovHC]{vcovHC()}} to compute heteroskedasticity-consistent covariance matrix estimators).} \item{vcov_type}{Character vector, specifying the estimation type for the robust covariance matrix estimation (see \code{\link[sandwich:vcovHC]{vcovHC()}} or \code{\link[clubSandwich:vcovCR]{vcovCR()}} for details).} \item{vcov_args}{List of named vectors, used as additional arguments that are passed down to the \pkg{sandwich}-function specified in \code{vcov_estimation}.} \item{...}{Arguments passed to or from other methods. For \code{standard_error()}, if \code{method = "robust"}, arguments \code{vcov_estimation}, \code{vcov_type} and \code{vcov_args} can be passed down to \code{standard_error_robust()}.} \item{ci}{Confidence Interval (CI) level. Default to 0.95 (95\%).} } \value{ A data frame. } \description{ \code{standard_error_robust()}, \code{ci_robust()} and \code{p_value_robust()} attempt to return indices based on robust estimation of the variance-covariance matrix, using the packages \pkg{sandwich} and \pkg{clubSandwich}. } \note{ These functions rely on the \pkg{sandwich} or \pkg{clubSandwich} package (the latter if \code{vcov_estimation = "CR"} for cluster-robust standard errors) and will thus only work for those models supported by those packages. } \examples{ # robust standard errors, calling sandwich::vcovHC(type="HC3") by default model <- lm(Petal.Length ~ Sepal.Length * Species, data = iris) standard_error_robust(model) # cluster-robust standard errors, using clubSandwich iris$cluster <- factor(rep(LETTERS[1:8], length.out = nrow(iris))) standard_error_robust( model, vcov_type = "CR2", vcov_args = list(cluster = iris$cluster) ) } parameters/man/dot-n_factors_mreg.Rd0000644000176200001440000000046313620043640017234 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/n_factors.R \name{.n_factors_mreg} \alias{.n_factors_mreg} \title{Multiple Regression Procedure} \usage{ .n_factors_mreg(eigen_values = NULL, model = "factors") } \description{ Multiple Regression Procedure } \keyword{internal} parameters/man/p_value_betwithin.Rd0000644000176200001440000000551713614111266017200 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/ci_betwithin.R, R/dof_betwithin.R, % R/p_value_betwithin.R, R/se_betwithin.R \name{ci_betwithin} \alias{ci_betwithin} \alias{dof_betwithin} \alias{p_value_betwithin} \alias{se_betwithin} \title{Between-within approximation for SEs, CIs and p-values} \usage{ ci_betwithin(model, ci = 0.95) dof_betwithin(model) p_value_betwithin(model, dof = NULL) se_betwithin(model) } \arguments{ \item{model}{A mixed model.} \item{ci}{Confidence Interval (CI) level. Default to 0.95 (95\%).} \item{dof}{Degrees of Freedom.} } \value{ A data frame. } \description{ Approximation of degrees of freedom based on a "between-within" heuristic. } \details{ \subsection{Small Sample Cluster corrected Degrees of Freedom}{ Inferential statistics (like p-values, confidence intervals and standard errors) may be biased in mixed models when the number of clusters is small (even if the sample size of level-1 units is high). In such cases it is recommended to approximate a more accurate number of degrees of freedom for such inferential statitics (see \cite{Li and Redden 2015}). The \emph{Between-within} denominator degrees of freedom approximation is recommended in particular for generalized linear mixed models with repeated measurements (longitudinal design). \code{dof_betwithin}) implements a heuristic based on the between-within approach. \strong{Note} that this implementation does not return exactly the same results as shown in \cite{Li and Redden 2015}, but similar. } \subsection{Degrees of Freedom for Longitudinal Designs (Repeated Measures)}{ In particular for repeated measure designs (longitudinal data analysis), the \emph{between-within} heuristic is likely to be more accurate than simply using the residual or infinite degrees of freedom, because \code{dof_betwithin()} returns different degrees of freedom for within-cluster and between-cluster effects. } } \examples{ \donttest{ if (require("lme4")) { data(sleepstudy) model <- lmer(Reaction ~ Days + (1 + Days | Subject), data = sleepstudy) dof_betwithin(model) p_value_betwithin(model) } } } \references{ \itemize{ \item Elff, M.; Heisig, J.P.; Schaeffer, M.; Shikano, S. (2019). Multilevel Analysis with Few Clusters: Improving Likelihood-based Methods to Provide Unbiased Estimates and Accurate Inference, British Journal of Political Science. \item Li, P., Redden, D. T. (2015). Comparing denominator degrees of freedom approximations for the generalized linear mixed model in analyzing binary outcome in small sample cluster-randomized trials. BMC Medical Research Methodology, 15(1), 38. \doi{10.1186/s12874-015-0026-x} } } \seealso{ \code{dof_betwithin()} and \code{se_betwithin()} are small helper-functions to calculate approximated degrees of freedom and standard errors of model parameters, based on the "between-within" heuristic. } parameters/man/cluster_discrimination.Rd0000644000176200001440000000211013603326255020233 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/cluster_discrimination.R \name{cluster_discrimination} \alias{cluster_discrimination} \title{Compute a linear discriminant analysis on classified cluster groups} \usage{ cluster_discrimination(x, cluster_groups = NULL) } \arguments{ \item{x}{A data frame} \item{cluster_groups}{Group classification of the cluster analysis, which can be retrieved from the \code{\link{cluster_analysis}} function.} } \description{ Computes linear discriminant analysis on classified cluster groups, and determines the goodness of classification for each cluster group. } \examples{ \dontrun{ # retrieve group classification from hierarchical cluster analysis groups <- cluster_analysis(iris[, 1:4]) # goodness of group classificatoin cluster_discrimination(iris[, 1:4], cluster_groups = groups) } } \seealso{ \code{\link{n_clusters}} to determine the number of clusters to extract, \code{\link{cluster_analysis}} to compute a cluster analysis and \code{\link{check_clusterstructure}} to check suitability of data for clustering. } parameters/man/figures/0000755000176200001440000000000013564613015014640 5ustar liggesusersparameters/man/figures/logo.png0000644000176200001440000031506513564613015016320 0ustar liggesusersPNG  IHDR [u` pHYs\F\FCA IDATx_l]h!H:##Qb.\9'MQJ%p"ё#G)Qrsz+6fAX0Jg%P}$" XkڑOgg{y~IQZZz]?9W+$ٗ$JC@BW+w'I2$9{ʾ(9WẐwo($wPR4Id4I&~&I@P4DʏY$I_ [=̃2f(-RC[{؅%w@(zf6Қu0 ^ -PWN. !DX[6D^ ˥ͪ!qh@6[L ż͸W[W+] ܂VMah@\ E-?h@`nẑw)$s>73 k8InAwOF@Ϲ-XZ41xhJaצVv-83^~0yv](r ?,EH#JNyL2y6jv(r hhG҂܂V)@W_vxf%wEWF~؝ oH.߽g+٩cI ز(M% ʾ#h[b.Hv-ޓwmLOw`gX(E@6[-Eh7-YPy&j.`(m0(6; ,r Z:9[i( E@۹PnH׮eP̛Z4\#΄+ТV mr 4z/{=;hxVo?c̫̜p;E@[-|cɩ ],-Y֙d@(rr pMv$wEpGM|(L. rC [bA{8+HC}-hvqW< `-G EjQ3*f6P4qCPqz#i ܂V)@W_s3F˾#6(2sCPO2U;xv-LyL(Z"(`{1;PTn7eg'ӼZ4sy,Y%je-Pf"-YPy&j.`(mEԫmnv; b-h4ol7@(Z([`C#]h7h4FށӢ EMk˞NZ0޹5MiWe 9;,`Aje+[`ǒSk!-Y֙d\xw`~ Z(!w&GJB1ojx`z$4;ϴ;E#: p*W4܂(@ajoǺ6R -E8N-[6h>IF;]F(@ի~[@ E< -h?((q՗˶yqyqn \n-mS=-:FF;x@E(A8٩Ar <#Sôh3X%BJ^ Z(feh)% ;!Fe Ű1Q4W+r wnI"[Bٲ +3n+ql`! b(6[0a |6p58BށӢE"r 4}x/7;hx7Z4jyfND 2 2je+[Q#,E,| yvjpp< P4hY'h-K@-L$f'˾+̸% hP4 8]Y@rH QF%]]ae܅%whrZpc"CnҼ'Y`kH^844(@A;>QsFc([0D EjT~r <;uv&\X"y[b.Wvjxg4MyGihg;Q4ի!]R-h,ТLe E=%ztrܒvE Y)@]fβ/m̸Wb ELZPnkeW#ؙpŃ lĀxµP4ݽ[ePDԢQ;0sТ<@vWV?BPF%11hmFh)!{vZp{LdɂîM#y& MJ]@nkI-y/ȱQsFcEș- P|g?:FF;.,Eȉ-P`8;P<3Hn#Sôh3@(@ʐ.`)BF-2Qbow`b E0TVZ(ܒvE Rށ,ۘ͜qyİ1`^ltr (`x@r5o Ќ\M/w`g.IJAPk{ُ)@3 u)@K6N3;03hQ4ի}`@nAF-PfYkhTy&j.(xmGɵP%r tH EzjѨD(< ITϲt)-ũE,͞1h&P4%-E8ľNKvI;)@[ͼ>ɒ;]F` `j. ][d;}Ev Jac  `.@P ŧz[QShhg;P49\n:"bd{-r#Sôh3:-wE>`KSn:""=:-w`fw DZ(P EuE h/-̸% D=J^ltł(Nw }Sj2xq;xp! htRPnfashlLgw`fXT((z[r Y@ E_< D%`[) k8Jn (@ԢQAQA?-EQs-G-qeRZ(aRƁKcؙpZ4EQBG8;-E8ڽ'HoI,9LFh :j[`-";}E Jach ;o|Gor{ r;80=lPۘvD܂!Z(P Ń=%#Sôh3:-w h kNn>"duDPE@ށG`Q4z݈B5r $.@W_fؘqK^ac EAW+]~؝ oH.߽&m`oW#h3b h n)qr l(`{Or7ͳI쌹Kxի}.܂[-EҒew`"h[(6;Z(B-U<9~~Z4Eq-G-܂]P`J-.w`gh+ xh^JvZp{O2޹5M౾W'Y`焮E^W+.w=ZB-wu&}Ev JacB ;o|Gor{Pd$ƴ+w0 XN- [ ;825LF;x@h\n>)@F|@ށ+'@;Q46.`6Ny$-[g;P`՗9N6f\P"yڂի- kZ4ڙpK.IJAD@nR vSnp\Go6N&';3,Y rQV-eh)BKy@ށ:`[xS(6;Z(4o,׆iE&\ Qr l(`k-Z4\#΄+Т@f dr 4r/{2;-E8ڽ'ͼ.[ i-heqr l;Ьu3n+1l Q4J̵P%r FvB ;~Z4E(![i{9)@;)We 9;JP2je+[`ǒSk<% ;:"Fǰ1C(z_\ns MvBGM..@ P4"r T,Xgwei!(,q]mqW< E R.@KqS E" ljϨZ494wDzr hhG҂pU3.`4-@D\n-mS=-)٩ch3hD-EPn.gvj܂so Ig'ӼZ4sy,YG\ZrPH,R@9iɂ2;0QsYCl PZ @իmnv; -(@fΖ~_q^bkhƵP%r FvB? ^!i[}{9f)@;)ͼ.[ H@>W0 FnYh,| yvjpp< bEk)s MvB2h<0=ld'ژvyhҐCZ(b K\W{cmcТ0@h)!evZp{Q,Ysصi$ȀAZw -0pz#i!Rށw\}1`fư1@(-pCPO'GyNE HL ep*be{-`;825LF;cx@$@ʐ.`)BFZZ@ށ:`[Q4PVZ(ܒvE Qށ,͜e٘qyİ1@^( W+]-0\{M@p58Bށ W<(7p-5]m/&;hf7lLgw`;E#0E`z -Ȩ[pjv"-Y֙45W88&(߶P%r rH EC-HށӢhs-UI+peRZ(P hԒu96]-QZ PJ.@K1R{έo Qy590}% v6t(tJ]@n#5v_zcfư1@(4\n-mS=-s;xv-Ly(-P`G;;P<3HnQaZ4s;@( Zsr !;#(p=:-w`施whQ4@6wPTWr effܒW" z -0܂ -p58Bށ W< 86[:zcͳ̌K<^Z-H,"hѨL\P%Fr-G-BrSF%w`FyhD( 8(6~ ]@ E0Z4jºx@F1!ZvZp{O2޹5M9y590}% vӢ!h ԫ~tHn#4wҋ458 n 5[bs S=--QShhgN_H IDAT;(K.`6B` L ӢΘ@C^[)@F3y&jn-y Ex^ls'JZ(P EuE Iy*2ƌ[J Q4@FW, oH.߽&mXdyv&\B,0Q4@aRPnfas@6N&';3,Y@!(je :$ #(@Вew`G- Ek8Jn X@ E#hTo,džiшvhp-G-܂]PAPƁKcؙpZ4"w 9-R{έo (W'Y`0-7MZwAxPASƝoIv_ziGc̹ ;o|Gor{I6Nr`mLy0Ef\n-mAr @Ԕwpdjv]hc)Bv-PGQ( gTL_Edr hhG҂w/smEyȂZRV6%- kp58BF;n…X6C"'r $:zcSg's,i дz2Dn (@K8-YPy&j.`(mAP4\ Qr lwnIPhZ4x7sfCyhR(`A(6[0ҵE1L-1/[J{;i)=x7}3&Od i\Z(']%n"@Ԣq[gݗ^dڨDZ4 r*0whm#ɦIviW<  XW}aA- [PE~x@AQ4()[6[jb=:-w`^2'(-kI Cy*2Gƌ J$d(HZ誄P4O'c;xv-L% b ,A **܂=ɹ7$6Ny_1W<`B(D^ [`C,RIKy@ށ:`[r-G-1޹%]@n@wY}3g96wO8Q4k8Jn t"@Ԣqyv&\h [iA{˾/,(@;1XT̫̜p;E-|cɩYP"Z3K/rm\x Sf r %dZ(Z4I6N2 lL. A\n}e~ 9$ ;PXx@A`(}⡲ jQ;EîM#yh[0D E< -Rw/l̸6&v Nhr )I,94FwEѠ^x1r i"F: }C.܂!Z(P ŧz[:FF;.,1ؼ .@ł]|੅Ar wpdjv\ jee!#<+m7R{FuZ De )ԫm.;]--̸WB FW, r 7 $^h\M/w`g.!-׃<hf޲ lLgw`fleѠ^sr 2Rn2 hJE2;0QsA}`^ \ Qr l(@APhTP"yfwSF/(6~ ]@ EZ6Ƅ+ޢТ-RC}h)=xֲ y590}% v6V4W++zk"F%^4p*0wހZ(>ճ^QShhgښwж-PG]mS Ń=L ӢΘЖ܋sr !;#(@(ԞQ;0Qs3s;ȵhPV PTWr Hy@F33n+y\je+Bрr 7 $^&G;3i-E8Nn hf6yv2y@ށ1W<0[`V4W+\!)@PP:Py&j.(\4p-G-B@EQA- \ Qr l\Y@L-.w`gZjѸںR{έeW'Y`p+-U4W+.w=Fn,@-wu&}Ev Jm5U4p*0whI6NlLy \n-mAr ;825LF;c‚y c)Bv-PGQd@ށ[Y0owjehhG҂`Gy*2{ƌup]7Ez hh`k&Ghhg\OOwb[0ܽ'9@(6N&';}*|@nAv-Pf"% < DME{.bQ[b-xjѨAYFF 2PnH.Z(ԢqyP4h"ޓwn @ͼ>ɒP4XKNN EZ4|LҋehФr `;80=l06B` E(,@ꈠ68=:-w0?8摴`@nOy*2G P xZ4A wIuziE<; N,}ARr Y̥% <(sAi[b- Qށf͜->*]@#]hhZ4\+]Ai-̂έ|@f^M-%EKNN E@fZ3K/F3.Mv[0'GM(WV?P$7(,q]uTEPTn(NjϨZ4FS48摴`@n(;P`՗8 Bgߢ1آZ(*\G<;ڢ1r Y@KyZAPE-r Qށf͜ Q4R=PO-L df>Òf QsCDE0/`^ (yQ4hE̋E0[-(9ycEI/ 0E}wޜy2V7zg29sd_Ӌ抦 0KϸI.jsk&[_vkaq{[-9q}$Iz2yݯ.>͝KNߌy//\cq^h|=ɯ/~>Xqm}jn!5^xv9|oB?C^41wq_+BpoIozʰ{inT8XSb"mLR ߡ x,q !]GnvhE6[mNI.b&0.)lPMPZ8XRÐ1&C'̱s⎏V?!H]G)*@XQ4D' ?cmV-,4aM,*4*6?F DvLn,Uwa f.]+6:E`iQ4DlMڻVÎ/.g4zI>o놷#PhYR~Q7!4{p!e(Vq2$n'^+&3T `иA-%y,SynŃ?vR,}apyka[z' m"KOnU&_s{}`L烮{nN ޳"5* 75S}=_o7Ș{Ӡk5 hP<ˠ{v?~U藴1my.C ͞~3_b݄Y]|nζb7Ed/B8Z)9l񰡋D-i# `ꛥ;w-aO{-n\4mγ/l+oTu&տ?[ҶlG:oht{tleƬȎq @ז_|R 6 *t̽Ο)8_-d p6D/"!UV^b IDAT5, {7fڡXpZc[b<k~roXBjwAycroUzg7z@R- $>6*?Y[ : ϴhѧ1wz蠂fMp"ZC2MT_lÆN2bgVdRlc N^HUC\:Yw 5|Toh_3Z|XߍMǏcEoW{8U#7vZauŷ} /D^cNC*Y _ZƎq]735{v7DU BXvn-od76ݧi(cm nqmm ֣-1x;wG7Sie9oV "Ǟ,& |@69'z!o{eA6 ';w|WO]Q^NQk E-z k뮝hya-,qM|!wQ4PQo}/Vm4=iZ^J=Lysti7B̥mXD!k4|pBnqwe: Mi N[lMŢ4q,oStAePǚ5=ZG[E Vo{=d-\Y$z=vm5Q4nT~,UnfurU}eUyi20cZHox^l}YwcۮmoZ O@D;k(gQ!1b5/q ː@Q -(D'6,QX—)ձ*XMoh,[&noӓ#{e/C+kI7nyBwg <jeѪj[6k71Tze *Xùˢ3T(BceȲ|b-Oh.q)M]Uf0qqYǐMO:^ȹ[ANӝKk_7c/8=~,k᪈va-_H.qh_oTES!Ӄ,a:auzSmEq$ﱧqY~FCl0Zʸ&kPBj5gO߹^20}iIeu}#WƱVor ,Zis[qF)9lw e~cqn~nmtJ E^0H\[07 5^UX{] %V>RE2_LqNO 3 aܵ&qŋ6xwA1(,r/7ykeU^MDXBko1f 7} RqDzvow}܅p|0 U8@8HX[mǃG>|R8j8=̫u&\籬w].{|Y`+',UgY*N _A/hV1Lc=BQHw4zxi??P4(H2o SӲ],km;iUX]BlƬ39lRSo~GCXE|Rxgw$mb[Nث0mN0Bk FЌ3W,|d?=-'Tӂt*oד܏y-#,E7ZoGNO^Ԧ19JXa29Η맿JtHzY*s!0u۾jCvWÛR,\/`[q~z9} Us35iFLi[;wJ;E|kOGߚ@߯D-@k(4M*8MV740h"0/?[%y|íK@c |,B1-e~|d+Hu `꾻a Ĥ)IV65QDkrߝ7%}A{1#U7 }oޚ~@(z?z~tṗ4.(o`[Gږ샧{W&o+,QOA*n̕/_CXhhv_'{ -xwGk(E7~Pn54=ı aSH>#w(#<˓$⁖D,.PjCXuϝlG 1Pf λx`E;ڝ荫Ó"*M,k5 ~}ڰ"~;⁖-|d{,6f4IAn3^ EIWmXW0րrڶ{K1oiOIe;7ֆKh3{_dE|M/>XRp*.Tq, i?齓e8{@'i;Xlϛ-g/\<@k@9d}DxGweY8xg(辻dW .LY;'(1k=ѷKaa=F\wT8 H|G|OBѠ@1l14}Ǯrx1րrzsʛ,q,엿$Q4(Ճ,*tz5 KuC^P.YG~p3fl;`azn/2q)N1h,^7xKo Yc (tZ{ayw^0($0Y{`Fj\_k@X,'ia|{+ϸ{ '0Yw۾>VfZd5b]T^,c (7hY"ƸgQ4((G!u@gsE{?w_d/\wD?pOV!_z:-[or1C7V fi_bXƂ1րX}weA1˸Fm-ϫ.>|eUL#w1րX$uht*|-2?'>sV\XҕsKEW8 T#%V>d A(xD}tCXe ]p[۹ʽƋĔ 6 1{ _ xf8cxW{'r}6e rK,0ROL#c (.aW3v֙(T<}ø;sl_P4N'rwEjm{fyjm-tO_.Vkc!vVo}yAAW)}=;ze2X wuRo'r}͂*guf!`m1Uܢa-YM\_wqVu;zZޝ &,|:3X/r2xèR!&sƛ_B겱&{{ eܡ|-,&`i <8?Z`t]D,$$n-uAV4x c g ;Z?r[di㭱l)_;0nZ:ߊIYКmKLjEЍudC\XklxgwҠY_3Y&n]wXP"gw>1sXt ;P(wI\9+ jkUߵ~?g#_5ZjG1\7*LJ8D9K?>/ؖf cq߸u׏CˎkŃ"f9k?^FXwUvGg.i} خ%y|2ײξeZ}ڎkkoOkXCY隧0W|Nb‡$.~]~(͸cf{\s_t>t]3s72oZk A4e_Iym ;ͬqB;4[C7c @ {E=#|';XZ҄xQ4n5c)4ͮSƾaqE5?1Pvxek=|owei̿{(x"B7d,DE$Bu_AO57Rf6p!~i;Pݛoڅq,XP$}ng@oGPoU/RtaN0[|i6x!\TXc F3TP$}Joy/\;* #y - <wf5 TyPaYlU)KoFk0րo[熬|[2`ܡy;z1hkሟ&G&?kv_؋fa>_%ڱK]7U!NKVWHea9XivzhSЗ|<ݻʻ^JKҘͱgeC^tbӾlWX{X棛X}Ux׹ C7z7%BzN'ɯBk?xOTX}q)آhZ9㕅B.̥h1-ˉ-KKؿH5 {c[қt^{ݯPnu|cxhnjyts@(䏢A tktrI]o4녋b"*O:VNSuR+bǔb0,DOM޾f_!/BƷ{(1VrDd,( !7ov'|v?ӿ_֞zPPI'x}>}SNy5KyB*2·9=ʛu_U7%% xуnH;4K~!2g ߓ?L{IGѠE> j|i9W3=X0$ݧ[} M>|W]؟X c @3tn5^7wF/g}ڟ3>vNН[ʪq}/ˡq=׊MgfG sOX H Q4$ N(yQ4hE̋E0/w'Q?3 $,  [ĀWe -VU` VZiQJ b^7qA &J cH&d0>'0rΙYzY3gfg@P (B BhA!4@P (B*/ m"׾%_k:_C}RNP 8ޓ:uH:iױB^$]oC& o+D :H[U1(!rT"4!X W߆ONjUy* D( @&r߄,DVQxVAi^UeCh7ހhAAhWcBG7$-#[QKO"b{@hk9(ed"tg]`{@dv1IADITIRPp%`  :u.R $i4~tk,Z:4oC[fOO* FW1Cא5?!) =AhpM܂:my؞FAh4`QqM/܆y`G 8N7" ,'@h/ʹ I\ڷ\=xnM6.~,a8 BS"ŭ.WD ؆J1v8 vilE C-x 22Z/6'@ & TnEHIv)4mbi/[ =6[*Ks \E~S'$KAjԗ  Wr}Arb:,X@t'ˁe8Wts MlMc~, IDAT@h:9@C۷"`{(ruA˪ZCh"~nJy8O?` 80W k'_>/Z0B7ތUvt`$`[j7#p``A$b"쇫 xvCO&z xn~a"lU\-V|8@' <8Xa]:Ah*BDuUN5&fXR|woxR]u`$`M*ꂫz NU  ,GCy3Sca" 擫}EāAf8Z$`",usr6[C6uBTaƛ`:ׇ/#܎yv-#C*: x%9*=QOR#oMG YWSM9XB0vy"`V).\rV+\ Jπ3s%{ TQX9t .RtC^(j|umw2_ ]!4lG`G'т742U;3jAro|x؇"Â;$9 tg]_gPE)7cX\eTa?O@hs!{= ܆Ԏ2 4y"rߤ!ڎA``nG(X@tPJ;BM"8^xrT[Op-*q"ʊXZȁns%,p6 f"A >5!0 /(Onѣ1ӎ"-6e(՗SMv׵k:N|FZQˬ7= n"(C-_*o1+JJ={k*Xż:K -.71=xQ jp`0dP-_rt|ߊORTQχρ0eԲS9BUi ΁4S;`==5_ph`&ޓ/@grP҃[6V"$dH?GS8"oGed"^liɯ:ƙv.V`AA$<"*AHhkZ@urh`f`Ġ YbԺh)@/bيݐ(_T4kKOsR&0#0[^ jniH?1D\@h אŊ_,/sZ'.XDgp8U囟0C+ jZKE (`o a_ݵzꚝ^=I8,ز*,CUF)C :MJ;Z@d롾(UZkYk\a {+>8Bhx'7$m1`)k6d"XG`x ÿDц6~tk.4p J y"N3T WzLޔ=.}zf|a7TT!ai҆hӝ -*/ ٜPҕ4rwR`AuʵLHkS <8bTu{Ah\6땟+x4mb2\{eݶQz"tpuҕ ]39ڧ!T}Q:z W@jY@}`"@ F_ G˗d`~AZ8X9MNyQ-ޔͥQNJk.ZJ|ukSmI _\u72\ yU0\EG|õ+.<+ Av{RK='c˃$&PV 4K:$NTG`JUi^FӀnI_Rk*#gPa,U+o1SՎ*ζSSˬ[3@,Bh+xC/P1pCi ;pyUW*҉n;K`= boH@6{z ͞/x] ֦,%`c~u܂EFDwܖt\I046# %߳( W44=: U$|tm;VZlnW8z&++ 4p8W%Ai`tcL_*-#uҸp*=Puaf?WN L>.EtبS.y4+6L;ǀ/ЭpgLы&gl/=>"ָstkf Qf?ˆ\R[:M9&mHxcA'( V^n|w߿_ЀW)_G-nA%$K\kmIUnYFE =.hr>z\5`jib`p!GejH ]9fCR1pC2;+oEF0QZGK#<̮lBD/riKawи"<\9ցiTnH2(/vYs+ f)Y`ԓ[Oc|cr+[$yFmhV8+> .:hX+XB'Ѱ!ac{T(rȡ@(<{AӠ{Zh3B0>8*oF)KwsRZrqeÒtXJi~RB~Ya9/+?[9&Ah*7$0Mz4F0\Bh`'+yCW*-pec8aj35ޟo _ϛurBp"إp.`dʮNW2rx DPR16$dvh5K|dNZY+ xB Mbb(^C|iHt1Ĉ4u_$$TX W{<ӭM֕~g-E MB ↄpjPС@4;Q |yX }W/:tuc+ g_L *\jC%>ݩu$Qv53߲}`@R m wR9JqLjk=.~fJ S!ސँJdvh~!CNXB'n+B(%IiM˃tVv0]hM۾ iO3:Pr$ 4"i|rxCB < l'.4Vq(k}VPhKm j h44x)w `7*=< JENHB`J핆an7W8^y݊JB`U]5'іHr{ZTAe)g&@h`%!aDԢGcfCB(CَB%%/Ь9eMV,* x4!=zoc>_UV QK[HTVl3䊻ïߤY9xAV!TT͈ =!XhVjL<%܎WLJiB@QKo6듩x]#X˫9׃޸JsI_B R`PWdYy-#iTi:3z /5Yo5yRUlYF?z%żV0"Yȱzmnm" 8Bq` T׵ B|gkkכ;bb()8mHZȁA+EX^۬rQy.ym jѯ/?mU~Z`AV.- Vꂇw8QO6~ȥu ! iqU j?M~?膄QMs ۋ@%'Ie`+c)0 " P65Ɂ)tFz; wqEwol8Ex#ϾPa*,SruBbxCTBh`^!X\iHa` ӝQ\ru7~(w`7?%]TY}Ue@ A2yb,dbփ@p(*TDU?zLXy V+ DR`Ƃ(kWR1 E4lW> <ˀMw$&o B\-R,P@KP%20vS@ureGC?MDDbg@Wr80XX@Z'B#j]{\  .PM*)=z~|wRmChD!0ؿIчO,V8hOOAݝ@Th DvXu9/7BH<҆GiCixYKO+8=…wȱI- @3= $Q6iCྎI.z|s10SL1&ɶ/i^nf'_fn#E,TDUSzqRT[Ro?WjC N=P50!|Z",Wh<9=j*p]בY݈(űiM1UWnC1@L@I(W"V3"|$JH4 X)?C 8+'f]=K}":4`[QE,5hdǽT'Է{mf10QOMQ=h\o.Qsx;mDx.0޲ZzQڳꓵ BhӰR!EO}ѣtǘ$ǿLCVm KWz=H\ޱR$Ucun|[ ˝8km9w(h|eb FJ)^իe_J]`%_d;H]wxv#v x  e 4ء"0HIvӺ7 $ IMq5]^Ѹ1;um zwR|7?_a]yQ IDAT3*JOÀGE|>ٽ2y3a͒* *zPJ s3<(X /ָn=4 DyD 8\aPWN mM~I`UtgԎyv}{":jhKh^QǴcX >='1ָR`UQnp+Eh=7^Cp49%᝭{C26櫿˽?`\~ w9 nC(}r)xږ)8הT?7C8`)5Yjz՟U2 &pGy}y*\<[FkD#q;inz=HZZD8`oRTPa0q!^$8ǡC0/%_k|jX8S`'/HNr"us5vT`%i&0`W ];zv+ ҧ9>0`]8 uܢ!* M;ؿ/;@V.n̯6W+@.~/r'k8[qۀ 5?絍<#!VdfMiiQ/+x/4BhE.}Ls =^l8ПouiVq}v 4`>WtN m !pa8Ұfb0dJj1'ݹBqz 40B:M#:u(i$iT ;ycSaԓy_ m 2nFkjObʭqrza"؇`J[=rKiSVڌH\NhmJH+6+_1m"`z0}?/W dvqdPb0f5#8(_C`vYok{hyhQJYT^#}ͨPPXMhpR'|۔yNI2cക\Y X.G_JAJItrG<TuQ?妟/nJHh0i¬9e)0Ci=_ux{C>Wr/{t1fkn܂hyb) z$4Aqu }OOuwu}^||DTXs U2tMշ*\>5ChB`f|O'^|icfb0zWLo;Xŭ"kuhɓ~ݛKmՒ&S,U"嗅;A\*6~TfNpT85 @7 B㖄Ɯ <0 ilzWv2c;-0SQnkKE;F0Biҕ?;^u0 *6c;Et"]YY^v@OB{v6 D}9vBU<Y+ X޳0B}Vo('{u}nv$tANJE%^୼A*,$|qMŵg#ހ`^_H- ѺqVp=!>ZZ(bj8WϠ?? 48%AFP2;4 =` ӥv|=o⬝It=@ n"Ou@.TϨO+nY{X,wg++R#; H5RpT @30(=Ѵ'YM1.Pßo jg7V֌d*YՋ=bs3}7F窭2x?SvbW#-{kcE4?X >K>|CCV B^װD7 h|- i0 .MlV/r`BHOGj_iʀ ٖ :xF5$?J"B£%(Z.w|#ңFl\lSGBh!0S%+5<*,]&j~l[iZ..?]O]g'M$ I\&Mm ˎ F¹!ag&jiW6l @M(l 1P/ ;f~v ne,:Z>('|`,Ns`ԭϝm=MI!3vپ0㛧):]>QJt }/}uXEO{B2y'ĝC7|NkV_P[jwl^xOKD&8XseV(#a?+|-X \9$ i"\a-P WaBOUp{A4=t\3iex}t^kt⏟6}쳈nxi$\9~X<78BȉWVj@KWz}oH6x5?~b^(fQ>MmO<:Qoz"qF&tMMZy"ӫg ;@XN08{4ᇪì9eʪ 80 lHPLPZ仪R: щUx8ȑ#U}ާiĕ55ȀY? L $'$㏫ dl"mI" ]4˕͙pjMoE]vUѢ˝V6Ho5@0 ('ܒ/pavE xA- ~7~)D@/Agn_稙 aEGf&K꫌D6@hC%m{\EZ(̚T8z9)Iv<;ٓe2xp|}N#r=}m+oKxbEޚ)+q4[dω[x6:8ocENJkt7C/c\~39^ŏdm'oKPMiWZ PmNj`ݨԔ4*]}n;taZMB٨xdzdѷ5gyMA)Vt[lLC9#٥Gǐh0B :R`p8ݎ뻪dLpdcڦk.MpyUk)>u`gf^cmU#w/knkFٺH88x)7Srr[8BMV+ Ԯ`;0ܲYyc{u{<#7{6Km" {dv`wS[eY<#sY:GU= (wg:GXo„8Օ7e# HW MX)0UGNϯP~w`aAن>7*VW4Pp!/-_ݞ%RU(|{ "Ԃ칲@4ʀT" ܥ}+{)>ڻ+-پ*{[;)UmR, :h v]j}U蝭R-#itynS6q_/ gͧ8@>>_y悙b5Av}z*ⲒtfEUqDU&YVMKQcD>32dݲmP 2\$ V:Hu">yZ:|Nׂ/"12;QKqH!nI907%*7n} n%8%Zbkբ?~?qV}~i(U_\}v^zWt9ƶWⳏT}Ym,kyx#9iή]> 4;Bh.|3 "^+vJ/Q2;7 Jm̯V)(\9pvf), Qoښ nk^?.0D ҟΜ9c]>Y H"EDL)Itn9KΛ˹@A`@QjtHJ|bB'kV$'[?.6bCߔD`geZRpjܳ`{@,I``ꟛAyS6%aTuT n},p曚=s=uT]EO5hOF;>0x_GT^gnS" BXa(b4JOl F|wLӊ?mR?IWϴhNKl¨ @ڵkɁ îݻ"էnQ&-<Rm4{-B=H bM~|tPˆӫg .aRHaQn'nհ9?kwGt`@:nIw}{wwkT6q=DŽj2h 4p: آ+E!+ZV. :7|UyZQ )$pH'hښ Nژ*ǁCVf/ !pTE4{ v Hk IDATL%[Q uyN2+D9 dg^rRhڔ_1q9Gc|9sF&v|%`Ǯȵv"5(ՋV֙u * @I`/- xہygk020`v(oGX!ؐ`JO4#j3/|v;qQ؋pDW.ý>=U ?OUѓPpƄKOEW (׭wՆ m-Zإmwp2; yU" XTcJ>4g.şQWXB;9uEH|oZB|ri(vPV9X=ybޔts۱KZECx֋eΛ{΁<~mv/^Tdn~P+1qSpA>s oK)oJ_,O="P]P-xbjxwxS:cp!CH%D)0=~quhs{\^`Y 08u(Ir`4G_nZW·g42)0`[r[ILv;R)vm'_OVțfOY;}j&е[?pH`uwʡ)- u X`˛m"j?II5{ɠARg]] ; EM kOkN'[F̬vǝ޶ -.qq;qҊ;@@h`1R`Sؓ`)Ճ&ւ 9AR`p8S$?,[?WȱN|!v|t2ycN'"PBE  ( 5NJO+:aV)Dͼg_ Itu_ _z6Hoޔwl741Q:wܹ|Dŵ]?;`%=݀ +NA Em"{# U[`LsM ޽CjQ w}InQ.'~ȀA/mQx)w"]V55MӌY|{o-Zbgww2U8@IRuuَ+L޺VpIV ArƏNiͽ3|ewլ bV,ݭCҢo_e\v~X"tY/vj+:J!ziHi8W Vݫ Ha0Kmu<E ꪂZc4<0mͲU^xdcJ8Ós-_ kaA38bW| **&Zeu]?r[Y_xa;hM1[R6d]TlP(x'&`AC;k% E XJ[ RSAiA ٮ:0HH4E E ̎0EA LvS{}: ZȱjK(u'њO>0 )xUa<-arABJ9B !40[V/05ZWXX>DPZeRʪ;cDksZ.p{UZY<`-bʯnK[n(}zŽނ>>"lk6Ä:K nM50op˗\&&]AC~&R;Z*0*nMAMW/^|X90`8 P+WKSz9S[Wl4=(q`V 6CĢO(^/=N>lgmw<Q2J?s 40p`ѣaƜde2@6[?I㯭0=yq1t3[*49`lffm \Iߛ@+6Z7cNA8s:/J|+fV@nJyPM !4Йk&r8̞Bw#*]! pA>y>" yUltZ=6 tB77-iIQ68F Anqu םoOБ 9k C:J~8Cc%W;-@G!@^8~t͞긷D}@I˂ĂeaUjkx"a̗rW #Gn^9GS#kB`К}~_*1ܾAƝ0  4˩zmvȃˁ/XXnwbhGρ#;$$& 8|wk{_7*.4U*m[m.ѡԩӄ?JǴA| $z4Vk^*^6mPMLhKW6e"w&+:8 60?aK5UȤqsíY:uxY +!0UDAlٱNY.oAh .^l+=?Y94J9|#Xhjkyjzw[qOo囟ѩCn冴 ~ Pv^ P:ʊ`H"C 40wQu/>zS;}ÄXձ?iM٫_3F` >]7ҁA}QiGcڦSV Z^T_#h;W",k!NbsޔP@Ѧ^%x@Vev X`'&wnqc@d^h࿻]K]M~d6ִ{Ҩz~a!DZS|, 55j  Cj@ſQq)O(>(+MߘB2)ChE{pEKɦ xSY?)9~AZ獶 Hpk,rg/H^/=58Hx}iRFRLl8v Օ]i~NhG~\NJw$.Iܽ`@ CCC3i^F?),Ks88 ت`O  Ŕ.~x1ǻ[=_U!>3/h@W)0:ؐ/ r!7m#5B˻`AsDx)AA`d`k#K*994`Ҍ說;ycPzw*۴x>Y+XSybl~zF7.t2< BÃTOƒȗ;S Ã{&?!M JTz_sk\)ĩIwDTۓD/%.yPfjif9>߂~Y!i2jj:@h .3ً_q!NՑn1:!Y5auliR^V]ޙGY^$K@Xef Ek]@a*Z Z VE'~jV@X"U@VYEH5_;'o}wf dfm&ɜ} X]G/:6&->|0O%rk穝xO&`eⰦ=B)vMP&̢эOij>L AځY4i*_+[O^&N` d| l/sJqv ސ٢2VεҚ3&hg_wj38(/+#?l{ܚ'A.(wÃ[o t|"I/ӎ*0 fk誅meI}-w]gK~훘&W?Ä @~ VXoOsQ#)fh5 I8Qd"H=4~l`5A\-z[0AJhM nM7.pu~2^>.Rus/KN$xELCCә܆YOMW/.1Qӊ03&hEš8?>BϾ\R-?@"q|BT8~̀Cy*W#YG+s [- Av|7sa1pVZ).T#9m[cu .)XrA>c?V ʟym_w]2c3G?+ڂ[ L8)JOߞH?qu EHu;Ou;z)_ ,)m*Bq- PkM"qhqiU\ B{<OJ^*m:swt)?(pq zT ʼn"KbcHa,5i/' ʎ"$#g 1MP>J<4D9ԭ'_7Ȍ8i)1}?xƊ@$3lľ*\&x/#eA0[%thҘ4Էsy@/>x34[~~8>C_Iکp/rFbz͖"ѱ*}i'2jT&H mZԶ~=@0")o y L<2c}o9; Dh p PdwZA[Ѐ|f5 F"y^|OW@~,M:ӭ ( t}* hI=)Dd lM@Ƃlݿ(t*ޫW/[h4[IqzcֻǁC}i-u-0bR=%"ݞFTѾ#\nk{ ֯op"r7e:Os$HD%DAT?0*,+5,pVq(@AB['P f- V7ЈHD1@c0%AV! F2dC%`{5L|/D*G1ƓݯL?y(O4RW춒b"' ggnOztnPnXzc4<;>/ TB;M)//ϳm5८u'L_6Wx:a-VIU2 fvlg(xKq!Fq@xB10(Qlie@8‰[&7R C1u3-'8fՇ [ŲNR S/Y͌ (jP$\ , |hFIi$uXb)A<1߳mǙk_88s[{߃ԽCgö^{yڱXiy+P`ܲtRxS^{8.O7h?:6vj0am7/~-'mEE "cBSi: Z-=0"V+$bEt'._evŹ%.kЭ.߾{ .L<=<^k.օ_G;uV;z}3gt[q "|ևE?&@Y7d 奈 z36I,% yŻ#*aqjni(Qc₞`!d{<dX (cQṽs(>Aݏ's+D]0&V[֭S63ֱFɵ0 RpU0F~} |"M0 ]PzCk0ׅaߧИ{(s2ʞGǂ}pi焮t𓿰a HLO%P4P \ZPX~c^!l^* F"VFs +v(cljM, 8  d/ g0I!I9m#=0 HN0e8Ϡ zgbաbEg`o_ݝܺF {};uF4o_+|="aF4CQg)Φ_ "i rv3$oN=&4nJ[y vzVy/] \o]Ғ?# ݺ8U 5KRsus /֠i~L@7ZB|QRR#+Nxj8Ziመx!ZH7rdY@p7FS7Or`"H$ht|DN FʓԠ`}F X-YVۄ!< ʈ pXH|!d@08hHL_nJC5mP*ne %dp-|\v(m(IM `f4#hnMj7?c̴ajϨAձqp%?IS*E6,(3|\VFl4hKx雷x?Xo_VXw.Իm B hMt2ZD^JӀrIA aH#ʬ*,"q'աGh[LIQ覅JQw(;`$ RR0A[(I)5'4ʭ qZ-2(pCP(mWZ}p @G,TЪe"RK8 ̟sfq>F]FdEB1Qv>bGX{E p!3DNE2 lAZ}G,*>: C6rbSuЫO?W /Fi0GdF+c4D Sa@3bm&||-Ep-jyӑUSӑ/R΃l=CNB{?cсIO4ȧnM@-F4x9(JEכLApËsU"xw&DD<6|B8@NjƸF &ɱA!H;MMJ ^#-A\0wt{nD#{#tjiv^sZP]aU48;ˑpl/2sb,܅6meض^_ʼn05ׂp+H5("& 6~=Lqe*v㻾w;9a➤`2PϤ꽌p*'O|Pؠb(#VQBE3}M&$ڤ`9zB i [_¨+3J^l %Sx/@0yM3f R4r; ,bCD D-nڵkG7  9G6իO[1XwF76^@B91aw@T?J [(Cˋ/ѰmpuPʈf; 3 wK40pd~ўSVe G6uP~ @aVo.̤kbUǃ}Bgi힝h,pq.tc1+s`3Ⴗl=Ю0L҈V (j$}-{s"NZaPz(VlBVE~ ~Zxj>d@- |ܯdǂzF m8 ^$j*IvDKT{+lN3;4оȘT8Y~ɫRI6~T%X8``#ںG7Bѐth.rN(\Xhm&L$h`e ʿ:Q zR@ECZPЋы(p3I@A?Nl_K8P 8fh}5ڭAERgR&8a'ըnMH0<3@6b7@NkUW]E ũp捆KoȬ3<3.g2p0VLS\a6@ByYZ&l$h`2@lv's:W d:<$g9pP~,*/tWT>uQ=:\P(Cr m V⼅SBF[ʤ ZcX_uY4kgz?jI$y@f&iք0 `Ŋ41بE!(^zrȰp0EwNjA q cF N'©CE1>(($C(ZY$Qbz AV<p@`!;ԭ 8n!V5a$ L{j!)$"Ǎ"UA柲iѤ'%ȯI^AhDT\69=ʜk &J,ػ& }'Vmڳsg|~%@ (SZ_pC=ӂ t@IY(Lo ,,:"z)p'E<m.W_X}xPlO-  "Wym"AF-o}ъVTe(%f.= { =FRӷӚlo?֤׭I_Qmlx: eJ V6w04/}$-m0que#E(xmסּxV\kȋܧR[h7@_h9̄@qbpR.P? ̝\xh6 Aα"ex"?x xJX9?##d,3Pkc2Q0A{.>&th2pX~$RW9%Eb b\ZN3Ij]@'B00 G"Fpab5]K"Q0Pqi7˪u'$(5RO N.d =^FM30"76s.iMſu츭ꋨC78_ζԖ+.M^η˭clP>Fn]Iв`DYl@;.X&{?DwJ`un%XE;VբXECV✴Vqp=hmj0VdAC&O40 ӂAVTL=*IZ8D(pѳ;zNkAӦORHuaX0B"B+cE pܾ>5mLcZ4V29p@"Ä'B=CaA  a7QgOi ʫKza-Q@.J7+޵A=3fl_,z؆Va,$ֹ}v, $hX  "pkBl",+q; uˣ s\H3+3#cQ*XzWp&"@{+.??ox0Llm <0f#Vr'Ii Pcj\Ẅ́~Vr_UjlW܏oKS&!ha&Hea++@MlZm&2rG,-l,$:C{A@=oJhӴǂ1,|mv3eaٵƯo2&鑇i۶m[|< q7`)ձ@&ȤU p/gj՜7ha%'nS`"Elu\dE 5QlV#{ᛮ;iQ}[5ʢ, < Rl{J1nOTB.(I-ZIFN a)3- A:+ 3H䱧+ hfgS婅1K֯>:J`Zx̦(iMz*8l)gޝks uhJg.R&tK=FquV:`s]rZyahE6i !d#a@a@-*0}!XGP (,Zi (n; B1QK4E r΂s5٢J;qO>Ҹ57cݸ 39v"*|l#y_mMK=P{o}C-Dxi=\> :Aݚ+\-ZEO3)@&Ǭ}/Swt˂Q0) ܦAb& eE0  _@(#՞+f:\mXpaχ⁺]A'2p>(=-8vI(8s۰! KT=}%+Yl ~$8@,, b %3۪} Aa=5vZ.9F7axl?~JUnB$T A - ]\@c6a (QWiE2o RP@cvSF1˃ҡ.zDo0vHlAYjC/PV~! wEa?8Ѳ"cGfUDq8& "A(Y<++Sk!g`5jz"K,0ʞ6^knu3:t( iv? c "Q;+ e ޷9 VZQ xbgJ^7'fwiEF/RZOt 8)Ġ F9Ik%ܒxAb; CX˩[ 2r>V! Y~jGʀ}<(Zp8SM@A~Aie^;\A2Ȅcx#7h@+ ñ 0h,|*L (HmL~q-pF oIW/.peN`k©Z.Mi+oNCNՇ#+Cџ'-fWFH9LjܰeeeDu 'ĒHBpQbV10dySƅwR&mBy qEcƭ F+(Q}{"$1l Pi6P EFoQC8P!b眩<^N]hXtTY Çy%P~l^fK=<ޘ7Bp법xW]M;>򞬾B(MP>-rլKiHߖm?="ϨߌiCZ;[cOdwK=,]s8P;C{?s|IX?&HL@@:o @`܇cxGQpA @@a|~b\v` UɏB=-BV!qnjGPDk@OIlgu>-l%z‚PO$ s {,m)*]"$ʤ8?Ge\03nr-*Z6jK7w_{vX1g+O^!2*Z !*,N}΃'`K VpWyܺ>ݿ|0 h1XxVOLr k,jҔ֯G9岕?~}B*E[…,08@$qEqiFr@L9P"xO`E8@66:xz*nE{N*CM8FqƬ AnKYDzAQ@\ I d׬:`(\6*z\sG؎l?s55W4p[ P \,P'VE aA,9r<—m# gf3E[-x_70+I (Zӕ胠kj:_|D"&hdGԵBv[%*.@eZ]zJ-\w]3|Vs$h`2nJ!-Ѧ (Qз"ƪ:x"@=`2؆NvId@73B I%,U pnjwABHEjQCF@nK&bC]a&xlZ}\e%#;v,G{СCl!vVE:ĠiGTaxі@ ZEV1yEkP~lԖ˃F+P&VpJ~<W.M{`$"(As"6>@zZ&G.`֞ ]c0 dP4x&ip'$ȏAݳ.2 i^X!Hݣd, #@kUp\h@;X sf5 \Y0@DD%*A:VЎ!v)V`nKMi ]|q7fq9}sBy\+/˶ˣ_>WJ-a'Ǐ>Q%mEilsݮV!urAOijzp>E D^etyE I8s hŹT42IMwzW=n*am Oqg*\|]L&$nL?C)EnVQ<7 ?! @Bm]LM2`~q?Y"ε- m<Zkf )_<O fY)/! Pbp'^,4F4d0W؋*B!?e nY4.N:P`CZSZ5$|9w~nB+z/@yJY+5K8VL3*;>ںY+hnCe-м$.Xj;;QyMY7 E0IceRi~"J=YUhq%_=`dc B3 a{+ ;*Pȋ@LF@Rg Hȶ}˰ED2B "] `D6DE!b4.Jk;_X 0T]Z/ P}mV|([w=zm4g:#0jtP |~GfҔʕ+}6KXҹX$\ "RHq=~ѱo΁e],D(~'wc[v*`A|iBm4+(lNHŰqpрe7_Xęy|< /c,V4[B8F |w-, c`DkI\D[xsLG wB!"޼y_}E3B` "5 v]]slVZH`5K8?&A~ 3[9w[bua1,6y 6ko)ME] |}< ַ3֘>ߛ0`?IU.(1aD!i3 ܂(Er>V@ʼ?;h*` M9S}zSIJm_)Ţc 'xD qtV!|-i(P!>P44՚ qݹG '`₞xob@JJ9{щ;P;8kubDD#`AٖkN99JH|n`QjTI FL?pirD/a(kzrck O*@6oP`(㬂8/&w?@F Ci߃,'y"c!zP40׈=MF 1~!"s@xXn0"X YnAq~"-)jWμ0Dul ;j6S+/^hcBH*9"evn(zZ.Z CĶp|p `qoik+ " {MtS"yҘVZ?jұuv%xEzZ@/Be+>'NmD07ʕ+QV`T v0Vkھ^(G^" ڍnhR[,*`DhS@缲}GWAWϳh >ur˂@d \B]nf0" D58_k dEalJBm'p(A,^IpQj*g` .0c ;ܲԾ`@:.=iGL~?YkF[)LUxC*-eXq fXlFnOѲ󀢽=MtM9jWvlÂ㕎8Vȱm+9ĤE<8 U qwZ0zWm߅_~)k]f |G,d`4ﵽQ$n`Va=SjC#8?hdyoc°FG&@t"\i ^|@P3A <PD"du$NL'DaioJJת&Xn.X L|{EУAE79XiBd,W!څi"~ H:ŷہpx~WBh异ᲆڟs[܏y,//' V" w1`{VC 'h]@A9 Dȇp|Kę` P0=lE7wZqؕόE[0yX_ hWXa+ [=ap \#ʔվy/1BBΑH<nl, y &w㴏X f5I'Yh] +"7z3@"@ h]@! 7 ~b W7{?3|(O^.. peXqd1ck.LEi,0hhN(bcvDnQ`HL #nL*+w?̨x~az"cO[A awLO)!V^ĪrssuSE/t~hUd:n|Oӝ--u& nG,Rtu)hŧ|s cG[75pbcM[l+ۘϢSĜn{ tl'3pAvi#X *_eSubOvh]˫;ͬC?징zI:d]Xd ܺn-\Ⱦ" c?8_4X ӍE ʨ4`5=T߀t r̸)GУf"\ OK .0b-m;zvV>8(gax@ũ&~O? ]HMiJ-t {wV5;KO')~3x }֧ cڤ_翓cjI+Pu:}&OhQg,;x}qGW6S-T qJ7Cy( 2bp^^5PE 5@+WAȱx%G{>xg1/+.X_D?_(<-qKE bw^5a]Eb*ILA."P}퀉 ^RPOGhZ5%E(A+n9 7QOzS^ B`5?E̢M߼v59 [ρ`J6A]gsyt~5ۗj1>#~>2Oy4Ҋ4m[w|LbѦ^8EᙙT0\ꖚ#dؓRӛ]ć8`0l>Ŏn'@3?x/`PadtӐ*-!bW7Ú6xmݣWzĔ "u5mvnۉ9ȱ4F|i+qHh \o硎޹d,yba2V cDӜY]2}_Hk`@NxnozMU1V3| hsnB_=ќ?ݪ Ъؓ ~Cx?֭OZ9mCVel+rW|`jNjO*#,}rѻ}ǩ_F)r )޼׸=I\Ѡp(Y(Pj灺Z'xA0p V/qeEwTa~=JWwgk}OWIz{7m0\g9( gG/lms7'aﶯsWD=Iʷ0Dv-i--zx}p26StD ޢ?p;W,Z7Bg/p3%UQ{VK|?F|w6=_]zh9FGFXBi V/umac~>C*Ob'x((0QV4t ޜ#] zp :R Ehs:۹!4<­(fJlϠ1Amˡ3w^zQPe͖"=\Cc3>mL2'ܥtܹ4}w  ĥj؎FX+0ī=7 JLbN3]E^y#+aޱ_rSW 1!@lPJճ 'g覅VՕWivg#pNS~=6=%e'\a{ޫzXJYf'‹,Պ "@'DI+r;FB!֡K[V%=x($p:|/"P1}a仍d`;:!ZN,TU\>9nz+lo^cUVXrc晁]iɩzf_5|GySdX{p#i'.Ff9[ظ-f? [0^r|'!28 QzKWU xhOX<~?iJT%FN? 0HM5TFI7k{m4aU Z0~ϫVX~|F{V~1˅4loInԠĪ[`ŲBώIտ>v^ZϺ0f 3V@kLXj(>'2 %EBez]hR@HQQj'[?riˍcw]̑U8<{j~EzГC۬/q}hlo-x~<zÞWx:Jg,5Ä Ǝi_e |b5;^=V& u-%0!a{ 5!A⯞s|+իJy١ eP^6/ār>`cES~J  )MM,Y '$0 $h@-oUXE+񀕱Z. GK~ [;Ma[,?BÚ6fŃr(U4or ЎⷅEQKXep ?}V[1^!$:'&E οC;jfv FqwA1cAcW2 8RQ%oziլ7D|k0j1X_=88+ڏ2oEE_[ d$OPHz(D?(T[!G[P~9h\: S)a W>U-rG%0A'$hqMWϷ;mƞ}a 55K D…2tW츣NƋ8qx X],0>‚0H(;]y #30U:ʊ}Gkj?zڙ$A($#3-<2h /i$WY AũzQ+,:+R2 k;rӬ@$ Ī5!xS1BMnv4^4&gӕV:B>$b9}ymx[r9Xc&I:рDe(Z^0=tnP EWTWU}:m4D xeհBmOTqkTF{K ܶ& >YR\'Az^1۷7m 5/zW]==;x>}Ӵr.@߿0Oa  B*~58F\يo5t[K4P+1*ѠTSP$hES=+_P\.D5Z=^Y=co7m kXQ,fDv ~QϏG>A4{  S:׊; [V4T=^O;\XW*^+O~Vk\٢~ZQkP(jib׳ q868,WRȸmMk?+ΫQ3A6T{% Ta]ўL1]+k\0\n3T_&.Y}[oIj@Y*_DYzxJJGBv]F,F 盹xh+=-c.2luM0T1E@Y%Gj[طq՛?؎{u:Aa Oqdr<^" }Yh@Lbbz~EOaISV+*HIpWWeLW;⻍? [ǗcP圉׵QyYg4Ik &'Ě0OWW,+7?̕K!^`;(B5F-fVUmv4CV5tnG0&wAc=e{]fjsCkw;=n7Zz}\]x?$xˮ b9a,] , euy`4 vr Fն;ךI `"CNGմtnGծG-:3clP JTJr|v6Az?W,[jqLL,5׮KW :69bv05iuQ c ,Fn;@wu:WVm:k@oZ\POT0:6呉(1Qr(czК.=}asFLr|vO get|>(͝c2p32^oEJVNgƳߧпqP2ּ 9Lr¢D&,t cV4Eun /3)Eq? E#ޒ*6" +#ۍsF~Y  {p>zqVLmڳ?#Cb5a#Zw~P0v~7~0-ZE?' >5a*P&,mY0h9-$XIhu{vT"rvЊ0p *ڜ93n @N9tNBb\)([/>$H *(k?qW8=)Ki tCk/j\{?w(eq#(-D<$.^f$4/,T{#,|R tT̘UWPD -  FC0+,pyV _>WYN&M2-)p^aEr?k@NvV_)~zB2ScUB)Aw&x|b,ݘ?^0)Jzb/t-Ϙypj>`&q$[ /YVaX4p ă1x#W[בxu uB|_n!ۖUWaKvXh*A6XpJk㻬%3_}ضuѓ㏺Ɣs:U,>Qn]ZW~@Ú{u{E)5#gFچ+Nơ(VaQM\~XEuB3Ns 8?$ x*$=y8:=ԡUm`Ł]ٚgWa32Z*=(pSR gs7J7{"X闅LE 2F#ʁ5j:uZ}7 :S5lfV׃0'x*0& B0&~`%,i[M|%DSJ/M ٽΎ\rfxўLN DMW4&;m!VȽt ϫVntԿfhɂe2 dFvUN hW*Y@7lC)ăE Ϝy萷kk`Q?pu҆Bퟷ3h"~3%M8Ӏa X$璉MsC/ x NC:_R)`",e3^BY 1!~HwB.!-PL?0 Wm:Ug]:xyCm̃kP+8ȉ A;^zcQe D*3%a` V @v|C_]( 97W&Ptg&cs%]~۲ bG~t~ÞePze b@ñ=k?y[hڈ2Y{y [},T0L` @@MǕ[v|" ڜ1ŋS^{z ϲ 5m^bi)=n7LOww멌cArHp*~h@~n1fP8Rj H.T>8pxp"[Ow}DP(: #yfȤ;\Rxr J^n}/+m^%pfxAQLu%g'[ @S c؎+&Yy` aߧG[7j(Ӯ];&%K`й,ŐҔZt+BӺZ,p"0T@pTJ q>W*Y)@ @.C+n AQ$ԸnHDC-<:o~gs|Y@hI~Iܻ˗ MsA/ . >Bͥ鯾ImZ qk Wg}8:,>QK#0¢A<}_ar:J 7S cKNxڏ8q20ka 8Qa-5U%ե?96(-Rp}\ݘyfLu xaX}} -/F`Em*8onE` ѩs2xc._SܼN`X4`,1F%X_{1b1Ahtӆ:XmXq0 łٹm;V8S* X|@q$y^zQu4o-k(c1pxf26Lkvg4<;Fg5:]![|P1w 9b :9ה '5ylVN*(e]{<>!20 жbW񼂤|bϬ@g- 5@ dm=ؕ GhC&њuw9 3^ib` `+6%X4`l; Ơ }n'(8>WMӾq?" Z  :TeF2L"2/tlaᑌOমںGŔ/Zsp{0LdT8ɚ{=>KwՎ>ky7.gafRqc 0 Q>~X 褿8~.&)0Z,焱]ΡCsi,%0gAO_ڭi3 a~Xx=l%eMؗ;"Tp(`F @X4`]֒q̂; d^LRѺdë87N]芌:@`/wg y  0.=lfUӦ?OJȗ6;[6Պ M84 zPe0Ƭ*v^}4S@cu+>f„ b 1Wt~TI$8EdQݞ_g4,0 g^?/Pn(D% n]g4bNvyQ+/oM6XSS^_3]F*NM#*2TTT f xA|ݧs<pu讑Dn?!<) _  ^t;gSwU5\X9W'r~ұy2EoDt]ތ[ɷR5(9]JǪC|;QTO\ƿ]e6Wۯgk:P4PI\?])̟?T]W x~l[]MrZDLuX3{UTӔP> ud~?_;8B52`BK TaM-= Yk+a'~W%(MӢy/6O3TkY~ϤPw^] Xc< =rD C 5֋G ΐFZ/! (*(%ho8FD 2h ')bn`}ť#AA4vXgXsDEh *dw_$; C.O#]K1tw{;8i@}yQ b0"n]k5Eބ𛌧hƓ6у~֭[ё;p &6'@ 7t] . qޠ[c[oF2hVjx)m[`I!5\ Ug XOGZ_@_ȍ8@&>.6!xcå'98GhV؍z:ZQzZSy/-4]KFNRjsV|*.!RM..ٓlƯZ,l{Pw]|ÿyH"Խփ-y.a }3e {5+xŗiԈMr㾇EA7v5Ż9qF0 MzYEֺFZwxmv7w9)x]K]xhSS=!l8?[[!՜@#}~pm!V<"!憿eVskNњæw ;̿k z* {+0\Eh"_p;Ъ#B]z|_)5]ϧWr)06 +rB9 þ# >}pa9Q9fApG<~ gOVB%(@]7biCK( <^ɛ~=|y컢=VT+7BDN ǟ]Dw(MtaW/xcO33\$q˩P4P> *ŹNM`"ɕ6uot%ߟ9;h]/N V7B x,&Dt 8Vnb <=E:~Y`pG=5&%~aqw'e劏-_Qرct*zL韽.+y:&Y8r )2ŚD*MS4?67F_{5 Ywޟ/Qj/]2 >-{ޙ+(iKepi]=*pw )ܝ$GNoE *p'WΟí5ѻ'=n&@+7n3w!q1 z# S 2?p,Jqџ֔|'uXaM ܓ8^~lӬhׅXn@kP40!ޱ{Va'(л >خ{aa_Isq4%T̏}7]-ۗݳ \Qzt9d "E8#?[5uBF!Z8Uc{".o {Lf~~' |#%"ua"2h٘ zŏEWQg<3o!;w O\(_aڶ .ubpQ,hEpʛ \d~qY4{~oKi]O)< zjӲ2*?4nOEhk_ξ&ǥhjAw8gʜ?Gs^:8"c luەjaQ1}~Ґ]<ctpW>xfG~zV;\0(8pȥK1ZjQTɡJ]9-_OSBR1)Bswoa@ӏ @8`jIؼ~m 쯤o@.3`ٻOCwG)4o?+MxբoX<tP{fҶTT0~dֈ r[9-=ZEn_d+^4}t: +mgh+NW/-hI]B O}!0Rpc @Tߗ)ߧhn>;hBnO}(q^izqqț&\ FBJ\pe5W;Wp(x ?WzҟhàYشiri-Þ9n'|F WVC <{ p})ߟi$,,ߦ#V'y}E?@..='<~ qHһ̈›~̚W;5Z"G8wbnl/,Oy [u덪*zo+GP(Rn+Årbo:RA,P5(BKLU4p;Ex^ZeaݗJPӀ) ޥs2. m~%^tĥG&,sZ\~\,E^:}]gUם|7C R(|:j'ib||fdʢK$Y+R[j|<TRc87@>j~댫-JlQx>,-{`\YxO!Rw;EZ QVYJd5||i2uIxńq@*4fRYcE ¿FIւ}޶׼G 55o٭#ܼo) g;ٔ.1/͔[K H;YE_lx+R&.u[xߕ_)#G=X,]Fǎs B~g"/x3bơ}["^>h>h3▛\}n ];HYQj GlLG$W--)8 h+ЈK}kcBAHH} K/6tx}Ojz;&7DRtt4=3O( (jxpiĮ5#a5V+:T{=gCc.u=:'GAQ]8^ }G= g=QgAsU- ,Lww9V(}s b٢Oy/q@/ ^$9ߵvՖSm߬>o~jz>n? {Bȏ7m+jc" ̸rnOx׋G?xdݺu¾6y-xFA@( gM|B,_4 9`aRi&]k..2 xk<:? r<&X:˕oKo+,9. pѶis?\ּPݍ1FgUE?Nz-|?|h܂;sK=NgNt ӖH,z^#iWq {[~+ÁZ5g\2`gnE]U)pOoGQИ;SGk@phh6jG}E]6Yg1g; '86 IDAT((khc.-<+m_ EY;#QrDTWϑƥ}= 򋾨nn͍uEQQs4{|=1xAo430g7]!p8EG>-+1Ws, 0., wL@mlx:;w<rQo !›I+$-:R黪uD@+/H 4 A x¼Lx۸xooy򴴢@ /͹)w]S7W ҋn}o]|?;ȵđ檹eEvUXKxտ㮆N?\~ˏ5۵n)r;ŽbRGs>-*g"B]'.΃s ABظh? {K Z4rã^ѶiͶT63J tcWOq}x2λ) bIAsDmf FZ׉oP8?%1/!EqO篾y/,[J}{i~@oxN!?v]d+v9Znway ¯q9<[/Ђ pY5/ 4/F2+()'?%q!6%\-/CС\: 5O+MKJDt?r“iI|\gs8{8N.3ukyS-8s*ͽw7*z\ܵ1WtaخoJovu7A7^f?Em]GnsN[uJ2 ZPӮ={V4>6]V8`vAo {U6-iqcXѸ6逪-WGxn7g;[稵z˝KL qugՔ}HUuFC-SKWfjJ;6m>6-PCFӰæ|As[6ǀs &+ű篾^Wlh@ !rIݟt(;._  \uѠjtwcnYcjC:vtZhڕn],BL7jRb(>$;\pnx3=xutVjes 8 jlD5`|no+ɟ86-+676ٌ}AqTcK=boEAJJwsf (m5xGqdIgQ0=Ew?OwGE\:Ю oDUv)'V7* iomEPKboqFW`в.mRKCK-Y0[UL AnG9mv4W4V;Qvԝ? # T8Y'999=>oL vbQ]|z28?xL⎵RP"`2zNx1o%Һ (54UucbK#܂u[Eڴ$Y9xl@ L3;k]4Xt=?)>GoݥHkn-tǭCrw6{.p(w4;C@n0[LޜswnW;ஃڿ PqN<7@sֲhiޝ`I}]•bq W_`(hr4--'=(|ZV鸌YBqa^&r Y-wWqA^8ybu3Tus^W=c{ew-*MWo#.;s:z^fǼoHc/'$ @.ܯ(:Cx⢼-gr~Jv^ jӒ&n@ Zw,4wU\O [ݳǣbߤx*+z[ks\.\<cx^E-?_W/F)"pn"/DGc 6XJ+# u/jaw<PZ pk'wݞBI]m?)j2"o | ~> .GgXD. @FZ"hPS;H-Pp/)Vz;ן߉U ::se o?6gPݱ9VH9xkخoF+cmilVqbgz`7VUZ8"UqM,[-pŭ |f|3R4eҜ|)t\[2p`&j-־2H*wEݴS1}T>.#[jKd߸B TV`@Ju4',ÊF1VHkc@(l0:_OՋE)??;yǭYqZ];w{n B_'zsp" 뀮‡'wt8`‘i5ZY܂eJ" <gv~O/~)@afn{УjY0`3]`V7aRQ;ܳH*zyԑ(W7Da.ڠx[aRӠ)9`V4+MBU,R.s{jjJ6yF@A_z#bl &›+֔|G{ːqP-8+ChEڴxdG)%bdDPIJEè\ϔQNPE=y>?Y✈V2[` }VpB|hmQ]mQu⡇;]QT F+ky r DJݮݤɆ?VR*TןN};R(QU^ ?kx3~չXnEî ߰>mRwAx)EBV+>e] jӒ༃3<:ß hGm j ,]FzwyޏUZk+ۙNIF+X3 jR̆xೊjZ~`Q- EyK[ fX~jBtU4pW4Gށ/, Fa_ &{LW8p@V( 90졥*E% E-c?)E< Ewh hv O%;^3|OnW&7`ރ4|P>w)ԟ}aYӉf>&$Ёu9 lM%E)81 (|ba1r@2LA;X1VHkc@#[_yN"Ok{uJN\~l.x /> f 3dtc¨@_δis͐'j"pn#ԃk{ pE9h伃쨻~)vp zɓwxfAhJcrXZup \Y@ XrBJt_O/gҖJd+}a[EH}!`0A-h!r*hW4fpJAϞ=Dt";tG"RJt8x3=^0+~MʯիA-zP|p ZSt#hׅ7P~i?Uf"ASj"pnAX(P܂h ŠF1:wmQp`;GA^wO,;1ljX`EGK);=S<ؙzStHG!:ҐF Փx7~id+9QQy }Rn<~SU-wp9t0 n؉ݮب*pn"/ϕ6y#V(Eyda.p@sqpf{_wqu *;f|{D8y;Nx&a6P0=RX8 -.w/3(BK,Q4pW4Gށw0/qV4U7f{ta~Y^]>/OOsG/E:8GӲrU_>qCOZ}j`/tDt JW(.Dn8(aW(REyE#Wp+@Nx2-FwpׁBBBһN y ;(TT&[&xwyq@:F$ }O (||1 bW(˒E9`FXo"}yO K^]Ho蕛o!jK:iSeq5x0'vzv\3 #?-pczp x1rԹmtр;,1 yKQv]0 \ֿf7zqL->Z @K#CƓoXUQ ]h܂Xh ŠF1xEcVI;ɉy._ oV/FGGӱc1@7'Ұtp4l+d č) ~j׃s 2 Eqxd+Ef伃UX(FvԝҦX'SAn}-im]0Ww l:Ew󣇿| GfP6ls x#BK|׉a-h MK!oZ@ށxd3r8xn[wߺt}Ѻu }u2{`&ؐjucbA8FBpn_}"maE13~m O6逛Q8qz\k߽Fx @!IFoY+BQrСs ڂ jӒ༃̸ * 1N++\3׿GogW,x s@j|:xBA1R@VӬ,Y.儔jӒnV4Z'[Q0KbsC͛(Y}Ɛ_Jy&G~^ȣ?s.M.{UE(Ɋ;p3 >So<9:2)&oހ4-< Dއ 갇J7K ɾ1 >zlz  8 -.lBC;%ŠF$ \QVbd,mvYƁy\,2zTXo\AKir|VB+e"@r!V(zE/+b]r“iI|V4aBhh(UUY`# up48@خ]GU|ݦR>عf{( (̑Q;\7W(.Ȣm:8S(GB (R4Y.`dA&w`2΢,NgϞTRR4K^O*x;‚Í#Jݦ|QpP[9=U&A{Μs &\1\,Xe !5;x4'!;X?50Or ؾRWDu묝cnw>Vo?(5xùnwY wwpc8.W۾+[ ΋- EyQC(3~L$#o\ݡpt5=(Q \?k""A MqnAX(PP4Pw +ȎSڴc;=YQ+_Ml/Uxdtw{ccw DBiqSdCus x#BKCbr  jӒfț𖨗xd3"7f}-"8jNO*YY-g\,PFjӒyyc8f ǚB999?GGGӱc*Nv=хи.&>g1(wou<-& 2K#(hɊ;pP"gΟIOE[4D[!#nwkļσraaM/'Du'*x={ tN3 8B7ZBQC(耼q>༃y2T2 IDATQxvgy~`؃K(h4>o#E ԙ TumK_ӂ#XϠ`IQ:PT?us<Դ@3]:E0.^0/⬖BQc(舼q>༃3Q<  JyW]+ 8 -Xq4gZS"@y+uEMK,M&bEi.D;SZ~\ ,K }]9470WpPzp .vX 'c&>Vz$Eᑅ~-D_nxm_dL$ ~&'Äk~ȡWjtA`4\,۹i6 __` D@isrw[Zp(eyp_Q]zՎ[K/W|8s3OA;cx,qhV>Dbd-1 B;XbF޴m!]{wT\ UFT, #@ 87"`0Bq2r EMK!%ⷘxE#gDށlDPc:k"l@a㪓wJU\{u~ |(졥؎?qEP.oDXfs dG^9=,f10wWzDOjvio0@SORߜ=)9;P0C~ȯQ059Aզ%%yX(ȟˠß4T P9JDBQqWB*32:joU1* Eq6˹f9!AjӒyp(GPF@ U(}ZVN{vM("@\,Xg, L6-f {wAySFrva_l0:_Os(ZYkqCGA!H81 ,Mp.G(N<K*Pׅ۠Ψjz# _L%Ŵvך _:8P\Iu\Z: EAw M Rra+BI:_ZHe}=WaDD+ ?ڣGMhdP\lXhB(\mZd9ws4e|:{dЯM@7~<.!CWU}L0?:Zu r+Yy5[7dۀ9V(F.4/ ,6-)D:xB;X?2G ?Q-FhU}L04Da-E۾+[ ΋rvAYNZyunk!¡^Җ%ۀ_,FRqj5P6"/!\cmQtRnoEc?!F]@伃UX(FNx2-@ށ " 8;LŎU}K>&  ֖O}g0[BQ^8փզ%͐Q,zƵ @QWEcTlž~K:dtvPM`BuV)(bAr@.&h>Mbl=h  [UL BQrnAYN<\6-i\<@ށQwJaYp7AB_^3EiR`ys]ʭM%EM99G8b:6-i;pߌMÀ; XoSc *9p+Kn43j o ظ`_}We6*3  9^0/⬖s BZ8%h18`nLht7}~UћQ o'6ھ^о ʎ<7ex @+-En8bV(BP4զ%M;ݘnHHļqUuLPz?.VsFԙlut 8Hc,W}ஂ5 Ĺ (|!1 br c(iI3M%NJƵ gZ 6wp.\ !CW]He:?hg %[0X(]-Xfhy\8xW{w𳶺 BFBG"A/xKhѣx$\ncd. s 8+yM. @VaE13.Bbp:^i۠Cc*T=ޙ8@}z;,?`[UL BQ^8 ڴrV4 uh 쀦 @lN@AaP_( X,0F*`£rnI [ '(6-i ;+k>uE "Om4} aK`(x Mr>S(*:ݮ w0/qeW4666بW4?ΗRd`45>|hSi }ZVBxW(.Dn8P4U+#@ ;8Ӓ+z @ll0:_O-JAΪ"hׅXEOg+qMu‘Q{8 6^(o)r ,b"EDmZdxލ_YRwT|8/<-e/aBTwZs  -ɆQzD.'íu (OpAŰłUf80 @3gg{wÒyzPqN<7@AXPx,Y#C*t h[0 R˷ڊQ  [NmZ 97>`=j" '.Pp9 1cQ{32|9܂ G>ࢋas d{v<={y /8dT@d\ -=C 6-iwdG)%bdAZ_dI`ghu{t(۾%M-XgsC 6-iy^@y' ~mv~yR8۔t&bp2` (yu]86'\Ln {r4ze-(,P|!d)t<[#˷I-f]0 jӒ༃3QAg"^P(N?9Դ@/im`uju@8`‘i5xİih'pPx+9 ;.f6 F"5 0znB- Ea^[Xw +Y&!CZv F7I߰>܂BQ^8`(yQ;D,Gn"ǿ=L^ J͠2rnO6X jӒcE3NnB(" @G: o+ C5X(]^86 Jݮw]Ó .jк .0QP vu bGYM.@n0Cehpx w dePQHEiZwПf_0N wcW(.[q6ł|P4hB^8ybs47~<Kg5{|oq{9U%@O8GE +d(4#,@ށw<^3E3<46 g ;r+8 gpBeEW4/t\#qYX ^4:N1GVWTFG 2[ N<-@ yE#p+@Nx2-Ϡ ."im@.lSx~~&Du1׫zlj"@G8`vAo"F\, E6h"9`FXo"}yMhm6NcsOP8hׅٝ(ats &\D \,@n P4pwAOyO쨻~*B۠m 5>鮝{Za-Uu+CȨ?R9xAS/hv th9`V4+MBށ ؗ#iX ?GӲ?zҒkU<:+hAaO.jq.=o-8+[> ŠF16-X9@Q~h[(t=m~7#U;>0?-r FQIo?&@[m_& [! MK!-Uxd3rmpoD͍]x@O{U- GFNs bQ(vy}| P4D;D +3dӜ >SoڬOl GNCwW(.ȢmbȣX 4P4Y.ի+M6뿢:8Ϡp`=;]Egpvr o`"Ah`0;%2F38vT^^E-?_w#F1LK 2[ j9+ EW4rxw:ysgnE ⮃S`#6,( ֔|-a"hW(.[q QP0  6-i\<Ȃ+M4܊kЙH⬃ {tZHݎ 8t2N.@!s &\k.].2X&$<㼃%(;.C/|:_򻶾aQ.}^  7oNKg sFqhm)ZS|XWa#o3tJ}A V @[`(w]x^w(eyo= rAO.,Db=Bq֏VȧʶQV>b -0 LH;XbF޴9H6H W?niz>4-XQUC1jވbr LEMK!oZp/g{AŇ:'FAa=FC@@ؙK J(͑[k1 ]ވ-@.<{G6w!=) ~to EG`Bш5Jqd\s k] @-#˷IX(j9&EԦ%%y9ȟˠcRn+7fu(Ʉ41ھgԑB.pAq?NQVӬ,Pg\,7 @P4ڴrypwiEckۀ..0&qa]3>e@4EcLr> ,6-iy^J3KaLP4 new*F.dUP[YA-.Oos7h`aF~0 㼃y -ǝo4>7"ңcP"VWIDAT@hwlkM%iY9.P@w Ņy-gt8V4Gށw07qW4|ihAS܁0Mtk%Br'W(.[q6P8 'iI,Dݯhl xT͌3nۮ;\0]o>mMb*]'P@jBq5b܂Uf8pyE#g+=;X?2&80{ᠩ(N4۵-⃃hHMJ*}u[]=]ӥTcfj񋽕BG!M}вt4`r yQ. E "9qw(ey\8hU\H߉EPh@||U+(8AuTU_GgL*[n9GHA8"%[.@n\Ehw + O%;\ֿ#qQXpG!:x̎"Gן3TրZP,6-X( Ph ڴrX"# шyfܬƀb9r x"F!Lp.0 erUh?pyV H}@8 BQr %(jӒhW4fePQHa94 oU1* Eq6˹f9!PڴrydG)%idz ) J!bV(Z"p!r łu&9PڴFxɑwc Fq`<a&9 _nW8`^,Chq, FCƣX`qBqa^&r Y-`"x EJ^8ybpęYxpuq 5=(t M>hP2^(o)r , BAQ4Y;/,!CWPtp4'nyYނ8dnK2-]EpI[jATW4CKۏ^rh\!*SR %[0X(]9\fc@4!pI<h]-CP5`P)9`V4+32($Nj.ZԷffa8QfP4]MK+D^vԝRXFI3&Pțނ Etۗ>buf806 @WjӒky<`xfǻdߘE ?77Q9a : )C@n0vy}|;rwnW8`^,ìht8{8jvp!{pW(.Dn8h%h18`nLChl}s`&c\ ?:FYrx⢼-grK(զ%M;ݘ4+s/x)-Qşap2`^(!# u1q8ΗR͎P@p?bK'  ^8`r yQ.(`(F;H3C- F;h;xB\+\Swp6Aq/P$pnoEc? )F[`(!y\<0ݮN'Ӓ CJ=U酦ŁNqRa .ȢmbhV4C;N8VHkc0dA[xCϝ({X>Upn#c8v9`YNE0<9 Oh伃쨻~*m "…ҟ_,u t5=ȯpU: Ea^ -CCLC;XbƬ~ w xPa J (8G (( ?t >pnAX(P0  t6rMN$ NFP:[W8HH TNӎV.R" PLiLq^s߬B|?tKsR0{fT_ξy{^ط+ 0tq!JW(Jfo~_@o5z31kz |'kw_[@W?]zruނbv@o AȉƱ2 f.&xނrЄ"{B`PQၾ=ʂh-?*ca8 08yeaCAwAt-(Fv hU ˯8m[7樓-0 ;~<},z To=їqa`Є)]Y(sV߼8f[U=ڣ.ca`*eA%~,.&`;]? b1Po@;h,#&w]wDo]Ą?;h,cDWh*BNyeL3,[BChFDc1qm83&|g8VBCʾ(Jk;~8}gtz ͢C 4xAͨ>ၾ h_ibB[z ˫&ZDhR9ͯ~<}nńⵃ'=22,0BB˾mW܇7]ѣ=2f-h1@;r;N׏’NVn-(ntHLL4;;๢`Ѯ rbBqMo@w :(&&;|iADoA,"Ny&eL3,[1BkFF.-;XP\Yy bFU"`=8 :.M4[+WN_qxܯ6nP,g7t'Q}>}D֛ϧv,7>6[P~r !L3/gx;/L4OL(U?GQ4Â;8~7Ѹ`qw]y@?DgAz ePiB=c}eDg6M4vXL(~`KoA9YthB_- > /|`BҌ \Y(sV߼ނ~C2,8w;r;N׏;ӻյz ʹ`x}e}'/z 6nW~(%& ``bh}:7iz Lh\37riAB3;Xg1*B\DYX}&ˈƸ}8N+]zr~b1Yt9Ҍ;( &\~>ugi7>61X~r \ͨၾ ibB[z ͢C#/$'ʈO/|b/Ą[P~&84GҌٕ:lBG_- AYN4FuOqqwpsjw杮VwksCo;V=!DoA">^˯ dDc1K };ނXD0XLL(-5z#,(,L4[oW?:۹?OW?P,g?{ @hF }Q؅+ q!,[P4Â;=9-&4`Q=ΙF} zw*J3qO@ Xh>!wمVM4Ƅ&X*'&'ʈO/|r1Ń/a EN׌;peoϾ;̾-?~i̞eod%4r1{#+W2Ѹ:[];]`B'4U :Xf˯W z bo?X?z h }exbuseHA\;ةzr뾘P\[@ hfTodY=hw rfYrݗ?BZ/"8Z\L4FQDoAP,Vz h5ьw`hzsqӎ ra_@ fT_@A{gޞ%B\EU[tV39Ө`AB\C[P,=9#4Ӳ :M2[`B 98w@ gX`B+ͨ^@6Ͱ`Bz',Duo%&ՌW`[*zIh@eA+6dX^0ͨȾ,{ =A%ozqeᖗߛW / ;hbBqMoC$4`КQ}9L4[pǓ``3|xO@jFg2Xuӡ?g98w0(`B~Ghь 4Â]=޹"s r1V= ƒϭs2,[$4#hFFhlYlAz?@-"0,(&&[%&bPHLL4i3bPX39Ө`yf98ʁ`p Q}*Wxn7.xs 5|ϰam!4%hFZN4;X4'']?$,l;8Y~*,,'gh{y",NHN4FxA3,x؂? 198XA-"@ FfuĶ=B谜h-<^^E0%4ȉVZp L(@ G`,2,[= ?%Vs/4ʾ1O4Ƅ'\L O4N3,[=&4hFu3;|? H3OeQ#z7; 4jF waCW`X0`ͨ_L3, .,l^~[" h\@UU)aښ-IENDB`parameters/man/figures/figure1.png0000644000176200001440000025036213564613015016720 0ustar liggesusersPNG  IHDR@sRGBgAMA a pHYsodIDATx^ X\?ĸjLcQ`.! [j-b[Xmib/ܐKKoDT}#ڪԷQF5$,,owaseΞ۞sǒBp`Snl M!)76Bp`Snl M!)76Bp`Snl M!)76Bp`Snl M!)76Bp`Snl M!)76Bp`Snl M!)76Bp`Snl M!)76Bp`Snl M!)76Bp`Snl M!]{NQssszzzcc#SWWv= `{n =Ϯ-X^ \QF9N^_KKrss^/776~kw~BױG}~浽[TLƅю09#(~x<:MMMmhʲ ziEӈZv :l 6*{Cp`S⓳Bϛu,lu?ҷk*&g'6vlǛ:(N!׳tC/99Y$ 2=z/_yy9_WMM ς|Vee% -YZZʫl MKp_`7TDC9E&{tmᤙ7T>li=bH%z©e9  )7L^M䏬<e[^V*tn CJpC)IQQO 2 n<[6ziyŧ.n7+76%73ʇ3y5m~IQ.n9U[7?7.cю[+z%с;_MǦ]|GЖE=+$PtH'*>FehrDˈP% kW*i]Y n;H<+ +t$OeOg}SWPY+TpC9ozKJr),,d7~wЇ4St 8H~!X`!_].W18W C^)DA%w|_7rZh2#6wf:8>'僧\bU5,a?g ,cHbpC[YΏؑtbEt KnѮg}j% y}}^oZ͆ {FJ!~qDa5ĤX,EBdNv5z嶴s([N+7Ly<6~0ՂplRZlD>T C^K~2h>^XnhFN19˸w~(4r$*d Fp|xUxc|3a K3^ác{oW5%Vqa}AY= jjjb[A դʫ{ -~{-B^nJ?fyi%;Oss(5i䈆Y7V~9UG] q'},܈v{>Ab,*c<(Lp%7"?-ūLw!6+ܓIpC'GɌW < )I<İ4"9=йH"ڣl c 7BM\+n`7b`yc) PQ_Ttm&ŰSƫm _4hP0R1g1#EHvV7Dº[KeZyrpCMۘyw&7aĴP$/"Qzֈ07D ,7"7EAp36[!{ =Ѥ(K N(;&/$uW?[,Tt! ]jY(GϬR1x\o^u7hG߈ņypr {{mDFt܈!>{Zo~-5$ګcLLpCG%7}7vEA`bΧD5c7$)--5\p(㼈v]QQ5S.6D-Q'45nh,"u0jQ77?v94/_%*ķ~Cv*+Fxmc0d&C=P6ķME0~e(c1J,Fn|]*7 2XWS*-Zѭ%vx6 +9,u򷾖҂ Env!e^|aa!{I*{:)Uz}F/µ-ſG18l"{s7?ox]Z'g"ORz_Z3g4$7D8@Ma n7|LF)zKow^ŵ_| zjTo<2\DP~!DALmVmp!8O"K*TGoZTDV CMGjy5T#PZ 5kE#ZbI@s_,]> Zr+993SE^WX+==ޢȏ?YSZ_b`x(V; RCX'd/yR<]یX% *YP5_ +^O8tW /7?޲g2MI9ohGb*OS`ޗGKW7xۀ }qQ"P P#<^#~]Q`eFpkE /NEEmx< t"e2ćۃ~'H7vy npCF5xm @aDyyF2 þܢzZQD5[bQckNTf}-je84D)Y=Q2H s JRRzZQ~pZ*++X%+|wkӶfl/6n8J^Mݫr( _Adbrcˣҗ:/y7>.f#SQ k_+8QzhGHw-4E%_}t52 nx0]?'6ҽm^hגփ9]5D'I${ a8 dnF>/e3^GX~JL=n Q<3C~5~)-~!* ܼp@`DYpZBvG Ke˾~snVCDM yV*1 U {)ur1SnVC7ޭu?#?ؖx`~ 1EKo nDG|~<*~H n4w=t]Kt+ bcL%t?t{p#FG!@4W5pT^W1 7_8Se-?j%t JKKY~@^# {6p(H!)76Bp`Snl M!)76Bp`Snl hhh(+++//oiiUvbǓ^]]_illt:_Rce Ȩ^N7W]]H/a }|bss3աOM $%%, hn ihho؃ /VMaa![Ws8DvPYY=cËIp#)W&n.QCn`DCpc=Cxm8<«illdpJKK* ܄zm;/&==v8}vZ_CMee%[CRZ x$7`'Yy@|3+n9#lIVumA"_(h˾bϼ[2E/68^>F'_"H E/gP uCm=lPzqm6k.5ԝNgaaaSS[RF rs[ZZ<OiiirrX^E%Z dž @C[HJJ+JD٣USSSTT$COoLEܰ}> ۑxM=!^x~*Ez/+n.VƮ㼶-|b;_ൽv+,DyC}(Mdv隞7LY\5IuuuQQ!IJJK֒ B?/6=Pٍ1\1txG5nwx%F0P f !Da'E_2v?ҭ6{;˰z%!0ݣba19<*+\kth +]? ZAN @!gJ" $19puuurǓBBPKK mY8KZ5$%%ɓ(lAJpaArJCVvrxʄG7$aᙜ=kXnbE\|@܀U"pmS`:#wArβawC]_#'{Jb_75%;{Χm讞c+k8q8ʓMG6kt* %#}@ D<D igrĻ߶܈k #Ԉx$ -b(O1rW<0+by"%}.|̷/J~ZbƉ!"r9ѷ{*#%#Ó3eG``bGB=*"Y nR؊]St{BpV0v2`p9V>ܦ<$(O*{ƛcpvǵ5n,FP܄:{Q>~C nbYD6bI !DaȽ]QGy3dO=nCT#?WK,CEy*|<7rו&^kথcbM(GK*c$Q)ap#=ët^NbSz"rbǛ@6#; E,xrIJJoD;6xpcx^|xl6.bdx:l#( X%R }!,)>=tWo'HH7f$X~MOk} w]IOOA6ngB=&&԰ d'7`H1ۮ(L]6k8ч%%%=]<Sng-k* 6jHn*b(ǖ}}/MVN:rQPi/o~: zyr%p}=Y^ƉҪá k঩ID~ޥeqq1{@쉮+$99aCCUpCh;urt5$7`H1|mT`E? -C=q+D~C>DoKR]yaQPJp}Y_t=4/4kG),,t:E b5_pay/FP]]͗H%%%a?Wyy9[W A_Ʌ1LXTHHdJJJxU6l`K*ۈfd:nï|M$+a+INNfIম} 6м^fӊ{p۞i=/߳"Yn^/KGniiIJJUN'*[Ap3vmkDo@7* ***Zx` le7~n"^ZM_~kMl|k۱ۡX+aYkE444466ATl*9BxUxtZVZXߗ|]LK^Xqj\ێbG,_O _- *wK^o.,7Q^onn.t`C LKLDp8/_(vJeY T__6ѫe떖*6no4 j a%cW?Tgv􈁓/|j,7-0on-b=k3"Arv0@+ Gzlū"!VU0 A nF;jjH4wW ]iwz;ڎX=e1iJ90Zzpú}gX=bjArL*++yUp]n0`^s ):)(??/-IE=N8^ `u{7XSOZQ ]D_B[OW 8ֳUm^/_Y n:C/T|0ZzpC***FCSHnnnp1UU䝲mY,7QufFD?J+K vEA|$ܰGP\ێ1ѪG HD#6+N=8ha/& TtkMzz:#89RPX=rħaX D[#Gb? _մ]o}| o7[_n7"Ymǵ_#?]w3sѿ9G়ՈfÆ 0U9N^%v $>bGD?77x%~ @3$܈DBF |8+E"&?#Cy XnĻ"uYap#IJJz<~2A85J|hAGZq:*Sڈ>NX>,f3)Apnf5Vnn0&Vcm%*:;=Z)K#}p#0ˠǍ\XFG}Y~dF444L&3|֢ 򪨰I :zp8~5O`Apnv*EϨSVj/Xg1/1_-ǯ8< H_X,[qxX7=ZC%jnA n~UְHI grݼ `ǷoSJۥkO7EkLQ7X|T9W_Z,]i}:("J"fyp#Ee]}}=Bqq1k{i w|#R7t7|EV6~+ Ǭr^ю~ ^ySLlN 2qZ,[oVSSR^[_E466JhĄypCGVU*..f[3 r]?vEKi{|#O7r6rW쉾J}! MuWo- Sגؘ ypCD"$%%n"H7"n^/<xWvޯnŤxo`Ap*9v nr}moP7'bܐBn]fjkkٺ%%% ,9/Sg͖}1%*_8j7"Fy[FꇁgXŢ\XmwX|TJ 2yKoa ofuakZPXXȖssXMII #TWWȣ!??_U,b"6Ϋ,cc;ezr==&٘+ >H,=8;бD@sOA²2>HĐt#YQQQ֊heBl1)Gw$n jC*N&F%[R`BpFL/6r{GWMUts? $ 1(rqq1 A,YZZʫ 讶K(yPL # HL,U:lZW@hba,_Ï`d@p=t:MBnllLJJD/(1ʰg0 x`Ԅoߦ(C_058(n r$eo}o}J2 ? "s5I$47ΧPba)W^@گFQ`jpx]mk  unzSw}Kٺ@:7`w;0;b[oWzia0 ؞g gE _@NICt}w8ey*W_ʗ170 K5P/+/_w(o+Ȱyj[ mKEXo}[FG`cn`l25 ї`on |R;Cq^zka@,|W_#-ö7^W,m}~n 6#6-9)l?vU|߷6Sj 3~wr0 zcĖ[z ?\?3봂lo^zyTO2$_L n^m}V+u6+TO?K5 q!X80ocw//m *ц*laouV !X99]Q?Wgּo rh EH,n :UD )w񣴁볗{ lyh]mƷ  Ě~5J` Uwv\gs+G]heHnYmmj@vj𞖃I]:~;V%0ee~8 aՍ5*==$)**ekmJ0k v#vnXg6),UjLJ` y]W n!HLpmm-[eÆ F:zBxokSZAvGS}gv.rzUWO١;e1V);5wiZSt t$tZ[`Ke7^Z~.lTGf-s$JS٦$2 K[hOOA*7 郛"PZD~I )++ohZMMUee% VTLڑUb"-mI[^>eLD=1 H@SHJp/+oٷ=C$wƑ;ЖLGyI#D%JKKyU{tWۥk`XJ՗򣊉N_/z ˛g$V\q;J2Bۤ-i//ڣr &> }"\3 $ y<91ө(,%w{G և,---=]_{,+PJ"USS6"w,TگT6ŏlu5ZAvZl 3O_a8w 2B=~it 2(s> 'o%%%N%PP< O޵QrZM+++!B>0}"+0t=?~}[`.툧v]lKsUS}Ǫi//#CS_A>5g]t)c…o _!7LOëtXB~I[X%áC "4!Hg8}9ʏ2*=G?΂-?]q:W[u﮾)7&Vigl |8E{t t$t<ySg3lUUUիWgc{tM6nܸM6[⦢=O=T ֞={/l1[+8i$v\.׽=#YCsr ۯ'Xpq˫9 ToRn b$GPBLWRR«,UMїG3D+E0wWn3>sݷgZTS}ǪHxجON:0eg/?k~F`[#,Y2čCo_I6na_YgϦ޽W ֻ˚MZq0Z%Cc{Gȅӳr)%_O %)$á17$aijT<QD O#62r`DRuW* ~[ΝcW:oTT?Zꮒ)[Mکͩl::S9rBg /9gF$-Ei۰_Ǐز{;l2 :s·ބ*mVP>e^Hx8h# :?LnV/?x5a׎Z>/'Π=ne[x̢8&&{Dt) װ@pqI<Dk"z`LNN7x8T!OF G1>NLeط@i9~5Jގ GE0uHUkȦ^H[ӕ.I;]=qtWnc#W>I LE+~``,Ԡk PdXFt-a9fcr{.L n . UN䉥WW)=4'#}(}(@SSavv9srz#?V ?yݕ#{;}_'o뜲#D g򶮛JMGNO7t~\w%1~`dПgCӟB FdK\.dzmG6(?1Dc _n'Z jbϞ=ׯ~ʀիW+SPOh*+غnqZ+#+ `r [v!M~E_NYi[9N~섰;{a7?Yp<;YL#_AvWrc3^Ix&VK7<(M +:ctѬo,/Ojb?o_QlAߖ%}Yz~{Kޗv^l~fD[FGK -*ST(! ]SrJW^}!-{(čjV؃Pͣ>jr/C֘t {g>>k7?t`b[QHF^xgR'g…g({'rMW01mn ^֊qeI1 nHuuuRR?rFWr3Q|(Qrr2k-ch uo.7*M/B?QG0hӭnRUh͵__÷'Vi3v(BLGNO> }"\'5)a}|Vtt&9e܈}Y3wߥ)ֲ8b>Sٿps6,n# E{$FZm|ʩr'+{vxzaG_ZQ,#*y;!8F@-Dr:췛C?\_3&#---}]jnwqqqEEvKV|1RЊU^؊bo}2Tۥka~ˮ+?dwXx`ZW/eSvhoVҐx,)'2|1}Rg7(t/St> *ğʿR$d "aX!T;6  no$ҏx>h'>LS>rŒ&oX܉! uWO s0~7 cU9PsZ&@٧}j@iRo_>kݖ# j9wϮlSsncU'E`I*g TsHg'U:,Ca 23W~ҢY߂aBw}˄DLSJ%>uSSiHu6LRZ&&3\A}e 6it "P)_dna3#.%|q[I2ɱrY QΤMÚ~=㰭/[Aph *1kX1W5 r ٧$zoglJWKtOLfUQ>}.ty=ySG:e8[:tn\.W߱cu]A ly_L[Uk^|  8']dJ#Oz .1yKLLe ي.|{M\C';'gR9{"Hw1 &ԥTNNDWXz a[߯!嗀 H@Oii) M<$e=!k:w>&%xi9}Z~V{IYP[6|*mƶ-JH>}FySg3B0g:=Zه}hi腍]l!&f0!6uWÖ<ڸqkA![h V؃ {Y=89v^Pglԧzo[ߚ˝2ɱr؆K9m}V~S Dn7J"HCC8==I L.XM.N ~,0u$S}w3\r79}%HB>5}v:tlX2s`]Fg_V fyUܡ 'BPv`|>FSIN yE^+[ c"ʊa=;f`=#-q P uv1y淸Sm Z 3me{/!HLpp8ZZZxm tZ`X L.~QO.rT_ƗTne;g޴eT߱*lp:tl9huoe r@\R.#P }0-gZ B77b򑇞h m/)N){+ȮC.h,ZaRYQ[ a7";Dd mo[# M.w\hR .]pIa_rVn<V? ?şη.q. k>ulMf`y}j73vLNcUЙGg1:o2 qE'h8+8˒>4ѧBzY*E$v9Dm"6+&ō|BvzrI"B9/.7 iBܢkY9P˰^T ?BuLLR n|Xȋ&g:*lqkL׬_J';P4\ דX6_ 76xǷo;zٰ_Cى8w :?t\͔Wt}_BV{tM` g6! d _+++EB{Wi/g(XوG.MMMׯgL\.gEۆ:Ը/FصT:v)O=Po˧4 N>~vź&kdM6(;}߫AEW>|ﱋ?8+ޢk;՟={Х'9QYC \xgR:93" yI7ᒱz2Vn&37]˙ )sSwLѭd(V 7:{tL M>e8]/+Hב_QHhɩydDbIvn ^_}˗H^T˙hLrlNYl/t-JΰrM ];tj =ma -/Wzgۤ(jz;7?xͿ~+tBٲ&VizGT߱*tLNҹ fZ )ׯIה,@vޝ%*[ C\.}d5in u=5Nݽzǫp=MҒu)KctVLgιrL 2,]_z@B`Om[`Xj|= (kgslM/0w.Mۺ<׃2x0g:t> ]YtZ0!8fg,m]F_L95}W?燓9mJʀ23m:tUk\BW5]q|QO7wgiY m=J eM< t-(Ȥ$ϢNW~7<n@ !/:VT].+wNߎ@W ]FfPu<wN# inn7|;a29E /󁉢ZؚT_ͯ?<}N͹S}@ׂ]:tJY 77~ ~nlڭp:|Aؘxx*iA Z>QN"{=뿷+Su ptEաkDW]5:u{ F7innxNcc`!9Uwj-?=OmT gcq_KflDdcBׅ]#RtѵSI C>7 TWWJM7^7==p8ymuuu\.kgt`@[yyZ\rw g؅/]et{tP BwW`l@Bp3(3DjݘD6 _BpChڑ8{^hTaa!P~~>B؏#Y3xV<5]Zmô=Ӷw+-|.tŧ衫O t?]O ctx {.OnBJJJxUPCC'6~kw`8ghEyH^֯NIWiգ"> t?]Arޣ;{+7nb,(%MRR{InD( Eb @9lt?eMfm٨\=@wtWнAwH}觻 ? 100D _Bp#1mΈ2M.Wp ^H[U_{T%o(Н@toBIcRSg=Iw&ݟNnхtНc}p'wuW]WCp3(^2| nJKK[Q!cFpnz_z>K[Q,r:W~z'Uis*۔)aʖ;!tB 9t]W&OK^;߻6fP_NԆ3`oE=B8q0||맛;mӵMꜞ~+[;b8E ^<ш[螡;^R.Bw)ݫt}!~xg[i뚔T[8:S}Úd/aS]G݁tF4e8Z~Va3 Mh4449@F/&vH^׆VN}_\MߝP;tF(StѽGw ݇t7=ܥ&%0e1X&^y^~ wȃCpCH7TEE[~UfI]<}殗&RZԃYةTڢ݁tH$ݙt*wIw:7kbF&==7+ T}}=t{@SrlMfKy.\|S}o<Xj tHݟtҽje~u6%5da7'wq:eeetQEEEsR555|hllda644Z&}4<$0( Z_~?:MfW+5Q(CrAS{L?.{Xo=lX0|ĞH@̉ɡ죾+Mrr3\---NP@iʥeE ~&5FlXP#ĸcݙt]J*ݱtߚZ~fP477o62 X!? A+Jۚ]RcJ;9P K,;W鎥^Z~ 7H<(^]]]YYI?455l,l(`dR_%y?6^ Ar,t}KwI~ 7`Ҡ7﯑wP(.縓K-97`Ҡ%jPDiЊb(;7%JVzܠئ ,Q AӂDiЊ%N  nA+ ;[nU\xۇӫY@\@p( ZQ7ﱦN-Bg+ CY %S좋hxzЯ#OӨ^6m⵽[ƍ_k{=쳓&Mb Z~FZdI[[gϞ׳ڽ{7l]SO=СC_t)7xk7`ҠH(tn;_496Xuw_,(-9{7w}W$ʝzsss[ 6# E_Oa :jnA+J<7raMzƤ\x[{<0aײwCrΎ{ݚ,ȝnư\/{n@tD믧*ͻロiӦfV{!:;/벁||[骍7Ntp>RZDiЊf$7Tn v&"?ư\)E爅 >SxT t=R qӍf?xUFc>o]ԗ*\Cp( ZQ ʾ`7CSlPE ܹ_DÝoڴd^ٳ3z" ܈T0\vј;l3ïip X4hE1 nX|koƮ7߭f%irAiOiVg[)۔( ngd|ۇ7]^2ѶW}|NE@g~hәo*nrD.fHyiAޔ|DagDp) 4'&mc70@q: I`hNmkjZG X4hE P[4. "KƗՓ;SZ-v*cFt";x-&iЯ_X|贛B+|t(#k[ W@pܰ~:0hC ju%JV yhwƮ㢭ek䋖$".'JѤgs9xGl$IpÈYݮG.j߮LX_+즬vJaEʦ,~@|~K uH@||7l1v-Ip 4F M !K(Vngx>uzKϺH &6o*JQ^5J뛲x-*lpceS?ՈưOj=;A 1nC@1F4)TAc>o/v71,Q nDY3[ND^>ȁF!JAE1]ˋ(!?*Җ[+"(a1ٔ. E>Wa+bz~Va"̇-*nF.s#Đo 6U[Ac> bnA+sTEneDi qp#zr ڔѹ2n,nBKet2塎 z/un :ĂMN#E4 bnA+܈$lF|v:~y$a7eGt,7曲Eo(gBv6صFp .2k[}aAjwИ/V'u5q㩯/Bhlllnn/ҠeMfX=i]/V$"7 f0Y|@q7a7ek +4EQ)Yy!NHmXY Tl7_)w6mī ~1_Dpp7Gay#uFUHW466&%%{<^) ZQ E zMie~v #rej'khߴPvŽ / >#gئVC" nni~D(%0ʦ,~@?5SJHYMXYn]Ep#4И; =[QF[J.7u%OHmm-ҡ2nM/EiЊF4^|wف?m=l;"7"  ɛ [Dp'opt쇈~ک=WKP |@F߇vfFDD_ğ_\I @ ((1 nX;I_@ .jOڮu4GI[FKz^OaL2Lޗ(;?#otfws *曊+Q*_wߴ!a<-%.>;6J@#p'W凪 OD0"RƼѷL߿F| R#u'@DpQQQAHII 2U\\̖pJVf$DĪPE N5d.n7h`Ct  e~pCVW * ep_ DpЗ?==WChaZVy-( ZQܰf FdދR!Nm<醍t70H 4ͶИ=nzo{<!RXXȾ ʂ:~eP a0+.~Qb5Bpq!^G>Zd#WCm=TʫŶƊ>cq_V{iaWXkr|QxMXT___^^^֫JDBTVVi#VB Ж\]]M5uuuУQ/ [d0cNiЊ@z=Gpc¢1\ج[X ą n6[0T)W/xVk۱Cn(.|orPѾ(VyX'}>:Tj0k?+ٟo*"cJr)qdoy<zL***^"nDwf^e}@.:@\zib1 [k+%rmϾ:]p3,t nB"ЦA=7an";y5Ƌt_$UF00rWo]"ZQ +C|Q))))a?0.KL$+ ;vnWEFë슾Cߢi%!P x n*TITT{܉&/|L$ܽoJH5/oA*=7eKFh"w-//8pa_^VE'T.nPQpC 8M}E(ʅ%Cص$~Xbž;<7L]]]QQ2+7IIIXX4: `WJCNAm)eD7Jz"ʖ)|)j)Z[(.qPAv0YpSLߐ47?=їwFv!g"7<ž;x n[SS#R(3G,&)n؜2D|#C@ieZtʎDI]6AWzM7>Ry {ne*/0Xb_Q"(xKYKѹK/ȡxU8Will,++U")..njj˅xiazYXXH+zID<<"XMhklR:bR*/{Rl%V-:e spcn5Қ;JK=wqҘΧ~4ٰҖތi뿊{`I DebBQTW]u` [bҢS)7گ%&οvcBJ]aVRdJ{/,=9ymn",; PAv@Bz飂#466ZSͬ38COiY)o)eAd15eGWʃZW=Ǖ4>5o ZJ.]?,;㴉)sJ ʀ$^pC @Ap1{&0N<z0f ElM7Qoj KCi7~gr~瓜p6JX {`ʻϿ]6c+ S/ PAv`@DϖU>EiY/i)eMw1*[SR̤1禽7#ƓmцќS+<4RQ`$ce@kjUwA} %\(_!.X/n HLV)++c`hLuNَ(7ԭF-ǣΜ:Ss;> B,4J9s= kݫ~WOKV3n{~⚯lT[7[$ 7JJJ=Þ*--"DעS6"&!?8m43|t_M ٚ'^ynu޴S&|a{G͞'zRJF*W nRҪ4Z9PviiU=. d7 $2Ԇ @(J)J-:e n[a9=ꞿ,^4ԋ-3ȳ}/]?fjڪ5}cbv7sf|ǝw6ku, 7笛H[͜/=<~c&& 017kB ӊvRwҺ;^ x J(NKD-:e]QF|pӚ30͘8)wܟѦZjq6-ڜixѤ&~8y5!m}ҩ<5Eοq̄yeǜHsOݳVAiQPAvDCioޢSVeD7NږvMSOpެi=+(a8[;2.Z;/R[]qIRoAp@kڃ=؉3xtG=q⚙w>zo֭W_N?|jה&Κ>3R F9>ambScO8aIߚ7eETYW?;Λ6yItK+ֹ֯k]y]y2POoe}nٹߜ;;cQ9ofk{#I2*Qg7?? ې ֲe]tQSҤIt顨TڄW^bzn @4Pb(#. N vCgNM>a/:WjHm>r>wYO?DYyWnᢦeşwu}zuҦ2!-͕);:S(UubOmԛeh"}X'pӵ%ro, uW'Ң_K=swk)my Q<$R5y;ݱ%ECp3@n4eDl瞴܍Ǟzw 'RǁnZ.Z;3d~_ݴsϘ2}Zŷa.L$ug{9䉁?cN<_uu{2><__qLwi(u疝3zM莴*:NP.l+O4}e DMߪQa%nŞ={X‹/үdWkw!,7P?W-:eQF@p-.[4f?| KM %{e71_f^~^ہ>{\li'w.Iiцswk7?E+_[3/3& +ZA-`:i)V}frN:Z EvVcI6(g_Κ_?я{`c~?pBZ( OsI'%}2H}3>z{-|zOR͙5q|5w <g&X*NY|Xdǰxkղd~ic&L(ֳdWU j>,OܓxT*455tMBoڴׂ-䫇!Cp09/STTt:= )͞.EԋMe03fO=uR8fSKp':[9k]k.w%O|iWݝRy,Al%|In3>:?0r b[Y=o 'L=q, R)[mb)7rmu\̗OS֝N2B*f/]nb3O@`hhhp81==WW__O݄yR)Jb7)9em"O}8P.Y?嚶Rk͹s5f>^~W%'L>e| RsMh=$MmVnbJfLytA*k3?:%z`bpvWApٍ݆0"1og65OtvGs+Ƽ䫇:7`k żj 徢0kjjJJJ_󥊊 ZoR^ FOj)5$`p՛R}&9gǖHK:wV\q#t?ȒSSfHmuiM 7ǜ?d]Ο|} N(+26 'OݿTu)[F_鈦C5ȁf Fy'7e qb"b_\8W]k<g/7Od1ϖohhUvR)J7ɹwt]i'?9s"9:W6P0@c敗⛬s:sԴJSD?Gɗ}_3n}Z`b~S"U/N1?gؓ7Z?'Jۚ ו3S}/50syRLR"(a U9 }p ,q#[Ētsֲ={#n6ۢF… 钉t)1Ӑ=ѥգuW^Mˈi' 7`_n^cc#&??W#VIOOS( .-ԋ );Ry_?Ss[Zo(b|k9饌j.X3/oe %Qf0zL`ӿLl[s)WxܬŁ/yU kY_ҳٮG77Sw>KB7r1Om6_w2&9aOoE×\b\8ߔz"۴i:+܀}WpS__рVaLarxdNEE6O-%JB7)ZrY<}'bJ@j7u{GJe븙/+6V]'NKe0zfnXe]9甓L+ڙ{( QS"W#ݘ ʐ>CEr{h{ҥ_؄aWٳgbk^K@8n^ؗ9\N/[\\Η5*)) %tipTWWbr*2%%%#3UПIȢwE֔N1^~ޑ&8oTX΋O.ߘYqªe3̚4)evj Ձgvu܇kʾd%eۏ_~mjɎ. DJ o};Y +aS^n^(illt\|aGBe4 (F*(-+(R-S3 ƜqY?sxjq?ٝSJQ'Vʯlr=ȒuQ^ǧ,Ooq'<$5ϱss p2NuNmgz sFp !{থøn QM*//o´ mM^3}^ [WDi$X1l)ˈ2N]rA ]Z[ojCŷr?ZZ[sN7mf|3T5RySwHܸ>t7 d-?M:޼nm -?X֯1IvDμ1'fV颓qZ  n<ƍ;p8 ꤢՓt^$:8ANtB$/*"*(D*rbmpLm~Ԝ({ _nA'91)_yWξ{ 5mֽwX k꛾`9iOJM;+'Ru/s֦\~/>=zgro튔S7 7t#-?cOw\`ۢLw_Ȕᾼ^ 0Tpnӧc8הH#݌ @@pe঱/j4vKȃ𪠰+;eeel9Z,--- z)M)&m9eIQ2 Iݼ?ua1n->5G"6TVMURX.;:c#sO_P+WjcA_yulMFLZw9s%M{BeNs;@O.oMbd7#6Aj_܀}6qݹ#ˏD $W]]_"N*>$0!%l[NY^x nZSyj3qoR6ݰK~݅?ޢ;N>yʬ3Ӯ x+; 'Od_%ӏǏ_u=볺׺' nN\5s_ϕKN%_Mw+N>T^sYy鎓;EpCGVΒq'ϼ㥴]q? n7`_ nrss {^tDw*qבd[0?Ii {ҖSV%e%]P6]k\/f鮟;g9LNJsгG%6="'{)L@μLP >*/+ɛ6Y["0v/x"uo_M;|{|96nqb gZ/sJ_^d:߀;d9s*ZRwv)7 n#7`_ nBLE***BaG}*4Xl)k?M0A I7o-:W}_\ZϪg6̛~O_6ꖍK`t8ᔾ6M򳴂+1M[xJ6TfjIR1n9<7/{{%N :o8g>cR:2㤌 @\Cpq|yJUQQ5*77FK*Rѝ 1B-Xd$J(Km9euQ<xB*d$8w,|Ԇ=!՞ tz-ZWc򜌹fϜvfo}-7?CQm*-m6w6WZrow=mV;֧6T@6LWyNW8s„Уi3ϜEKLN[j: zT5dǑ 2}3Yyeweҁc'e7Ӯ{02B6 ؗ- 1"> !}y^ynoڝz3-P\\B 5 ,SA;e.xU$褱yHQܕ(rDupR6{MmXYzH'8~lg]j83))')NY|IΥil/;swkiU=s*Zfp NǓ?/o_gߒN~I;ah9#ͯJhݳlSO$-QV}׼;&L-+`v='?;IsT㡌 @@pLn/A c0 yh_|^N-/{477J5Fd2 ;[>lt'`JiW-lD7TENvlhΥnVVtn?yEΞ=9)Ԇ{|ڤKC_q=n*Rzf˾kLt&ֱ60r ~SKk:e̘y[mW%'ɧ?Bܰfsà1Gݺh˟Yv9HT_Cθ{8)  H ny@$x{ErC"Wf܄B2G;bT(Iy^^G%궜Ql<К;姇R/H{ڰxU'^NjKo>{+3xBJNmh[j)[Z>ͺV~a85L}gPŗY9Kz"Ivڄ 9*F_{-iRߓVv78#6{ӧhYm2h]&2[2?5_II @@pOMn'5A(L%:$%%Y0龚e1镕|SK[&tŴkhhѝ T?eB$>Cۇ(-4e m9eS2iMO'eƞvw.aŻK/=_37םr XkӚ`Ϭ}©< 9Ig~I9z~_duLrWnȁޟ5{6_4DQd׺u'Gg, uB,Hek;RKJp|(c H$nb,T1Am,Et$JUpUh©H—7,._.('HD'"|G!4,唭b50S'0[cP\CE?ޢ2&O8NK+'0qo՛1/}1'p:xdN<Ĝ6v,[9u&g݊hS|<4$:G.ZJܲszDؚ/>/1jeSl}wPВ|ʸqOsw^Mi+ ?D9 @Apc477vRRy:<0^ⶴ1y`SiVrEYpӚ͗ݗO8/h6T+u9;[#]tOK7Mׁm..R{ M+u_yu,MйvEnXi_ѵnEf%%M:|$:]| _nԨY';OZإ[!ƿnU3F:eطrӏnc^/̚0hGZ- Ră&Ɔ1zNg`Hv,^ZX[R&ԙeF v-  sATVRhSϝ7++;_BF]2}Jk;p0^7Lʗ Y)6U 8)x7HmATDFU<}aS&C5744|1vJkͼĪ-lV7[S1i̩Wz~QjC哜zg9W]ɧO]Ŕ];[Z{7ؖZw}}H֤&/py lmulK2)Z~|Ҭ_^gM̓ ~͵ף?—5jI'].w<Zo\LZ(6V 'k2FPv_xTw~x)  HTnP7ϫ,cZ***/wss3 tF41 =fRbؖS,]ʶ]Zڵ?u48ƞx'3~Xy[:')yɓRY{/viR050Ϗ. ĉ?,[kqojCxξ=7롟9K~ѓsearoWhiu+Ϯ/:+E?'bQ3ZggϽr鹓'Ps*79wwddEY&-; y&}MFke~9kݴW9,mVu,Ynjs,mZU[UKZʊO-9G R?< ^˹ۥM\7HmF7(ডj*++gGeZWE{^^64DԶ/QB7~4j_K>ys9w\/%~[on83uiS&:?ԟ~SlmOٝ Icn֞yԆJkNKk?kyf}gK]ӝγR/S-y^[gS+vhW/?< ^jZ&)bN;fӬ-f~LD8SڃO:釮Ԏn:BþBȚ̿,9,-(&u+zfvZ1o{sү̜tbfgҼG=" R (IOOg5`1yp#0jRN .(8V-Nf7e?ߜ2n3~?f<-}we/ L[ /؛g6(la|*G+xeC͆PBM%6}I 3bQl5 Px6N.FOb`ͩDfo}3̖nB{NK'#wl4H399,S[Pq>?u7Hm7=ȭF 7R%MVV{/r?+v(³(9.|=M36=4ג^I2sFu-}wM[Fݱ)"Cߛ*4;hgȲRcLrM!eC&27%/}3C[6]QK/zc||Ohӛrġp2O?ץK;h&^:7Qw E5戓sSڍNJJ^5#xqA#xAFdj^rC߉NE68'bpW;ïϒ%K6|@C~l?jdd$D]g63w}[r%]#G:t/ wg_m۶˸{:!7=w`oYj}:_^O*6Ӳ&?QДyϊ=魪Je‹[ۼ:nqO=}v}i3/ǯ4}:CqƇ޼˖5Y{TKumpc&T=(X6ӍD3FN01tsn}ǧȵASiwSm.H[hMnnR|OtL,?6>k"BuɴIbboln\Lऍs,m y:U朔Sbn nH g^3&u+;.hK n }>3DVBBBcc#-= uv;`y8y6=Ksh*(,ZWg nZ-{TUUEYN n648p 00P:~?2 ܺX*opM`08{K=i7jjj"b},'}(-Bhњ~‚?x=iGMgE'=W2añ7hݨݲ.kj'}-^J;2۔7liO< ma5ڱ5eQM>?ҍbjSX*a[ g?disB.9O>Y 5x9TcvJK^ZQpiMMvtx ucƌ)f"# "=^8uY"W,@m+SJ8-M9%6ry]ØC|{$iaڽ_/h ]o/ڣm?w[s7}y¾yx@Zk.V:x5hFƒbZLL̾}XO nՕ;M O#?](zM#H|sIy-k'=Sfkۛzɹ'<ְ'M![:2nуz15]81ixL~/1w}#Ƅy8MC=:d ' SVrKbSYbzHuˊ:b6[9\VsOj݄aKɚa©{N:pK tCt!wں*sli[ҾHx$24I3%ɿ6} i'x!Ay<-'Jp #o\b :q,xe_wg9 i_:7[C|Ήnr⎭񟿓}{hU̱\;]Io#;?}+ycq|}1?Lh'P_U0rBȭ)oYjs㇏I _N0 mC/mmn!ؗ*ǎ?:Ŗ;*L dvW$bv4vQCzq~+& T5hРϿY'*+[Ya©su Ə3ښLa~A>9;*Xsu$ ELruBޔ|:6):iր .mOs̼+m9yVN{R"_}gtInBE@%x' c7rn<Npg`$m/'rYy<%ް3`6iwlob-O}N~LwǍQ nrfi궵IX܏>@,o>H7&O2,~d1_MZl |( q?2U']'㮺xgNzm`ꆹ &P$2IyiFvQ~mTIȈM9:*#0"%UOMYغWM$>A% :nnpJ[)cL!X-2yBAzswQ|,;ŜJ1GN; ,7wԻj"^̽^N2uK)n*aطmN-oF+ܘ!ι|qdUe-?}ߨoʞˎ;3]_Id +;5K]~ڊ57UCok? Sc>l|_:y _qm&p8aA۩ؙBA%?-}-0l^ZO0 ;lc\1q鐋"-U1GtTK\%OG]ye[gSxbLȤC7Ô7Ije#q\.Gۏ<yŋK Β%KВDsݻIMʆnS_7%%%]Z/rIZ(s 6깄/M(olOtߟGym?QL–9OS)ּckJkBVBJJ DzԆьh9N7OtlM!-!L"[MYp+zcA4;ߩ1[GF⬯2 Α)S҇+&9C{K^:_'t `ۘO ^W6q97%/|Lf̶h -]o<]k~C!NDyBom8_O9ƾJbs1/&D?:%c}ҵt }/pD@+fU*Iw$ %GP4?ʄʍ >C:rs [ݨW'o]ݾA㧺ojA7_N49"xV`)W{HԆN+?_t~MکkFsFXkm] ׆m h視moǞuo"Ә#u$)WmyK|=n)֢Ġ@^Q؆>bďVGSi2WMs8ߐ![4^6MuJLk~-DY23DqbŖ[ʾtN3&_SϮVnV>`?w&n:ߴy+q4't_҅ KݥD2g 20d}cFfsc? :bTᶩö A]:pLvxLM$;%Z8m?";, ?vڲk󏤨4k~3q3B۶44vD;#GRN;5`Ѥ HnIuɾe4) 8`@cΚAOy9~'ǎh`;ff&b[7gf;cnjNɜo7K_ۺ:J18c:}+W6<&Ev[cc#=Kwƽ>Q^[[+ כڵ]N#}b=|}}_)AeV*F` JZ1e|'\ݦ̽o'/Ȟڳ [ۼ:7ۺ^em\q[j>_ʄkⶬ~QTh9yDz!/oGR_MCS# _M4bL3*B7tjl&K+`u2 QM t4Û '7'ڻnN6 Cdҥ|:i&)SN==SO7D:*41'ɿڥ6뎅o&^Q<:4}f d40 zSNJS.j(Z5o>?)Wl7N$"x]T{s/:k%ڰ^O!IcK=Uyiql1 ɭhFsQoZiq4z~Yrƈt?* Ӟ7n)v!*ua` /^o>۪]vI>M][61&o=Ne<&=+J 2v111;v2IYf#3tؿK w PCCCYv*@T:7뎅'Ο0!"4xGҊΛg78gq4wk%f{0Vtg[zl|ѹ3<:wdXٶqm6뎅m)~IЌԞ ?+Ύp $Y5C8}nm#G ?)yQGSIìWf۶nær^ol{?9 ҿL8r{Z!9t+=Q&Ble&*vhVhgB7CW\eAp<2y*z$/غRĂzOMYҞE5"-){g*Z7YRM?mrP6W}ϝ.uIQC[!lbɼۢsf/df\{i.gdqڈs~W#Ŧd_d$浍q;fԑؽR4?qCφS3o 0d{L6&%k|=YF*T~J[¬*a՘\I-EǠЁ\ m#ڙ'ew=[ znSHvޝ/5w\b Bqwmng'&&z C[;!UR7֐м&Y9߼"49 nel[;)|DE-]hU7E6+g?紈ͥYӦ =cefK$Yw,:Ώ ľdf>I?:9F\7̈A.j _֮[ψ#gyv BAzSv)71GG˖o׶1[[97L̉1rO󒭲1'Ś_81aC%{%b+Bqbɷ  xJ{pTk ;Ə >=xjG_4³f~Fro 7lzoziʼܙiŁoՐʍ}6ma~nZQ㧏ɐ !dCԆJ6AkcmTP{vѣ^cRן2ATA TɵfȐI\q*k~k~^J>9llJ#GM=T3;xzWQJM⹰-ndTQHZHyVPHpD药z7c8>;'ђjI=07䤟b,BHHG_|հa<7"H>1)鷄S MBR2[Fܘ$RjaZFD&CMOAyw|d4'ö"PE@+ =o0wMLsfm[?E>^{ˆ>*iˮ-4f[]7G 9soWG^4[0u9bi 0?/ԦCV!L}qBѶT}iIian)s姣VAHӧ:!#F*OlMƽ|)S55يOPdJ(hi;Ie~jC}mnr&17prCp(h n۹TWn TpUz"fߓNϛq ]pMʼn 0g3bMܝ7F=q%;ǯ)u!3O ;&씫B6c%E?¦ U{ڶX#޲b?=ēOy8'<נ?d%G2>pРϿ6|Рf d{mW"EStg4)wW3j8$R7prCp(hbJY(C 9}BDH+==⳷[8ׯuG4քK/9\Y\®&qcԋOŗڳ İӲCZUl\Eܡ5nj I>>sRyicmzq~:d1cư ~Myi9BAiqa#G Oӊ{?dZO$E1;5_6dĠA_g&Xı{DGjз}p6p"_O6C-PE@+ +B&s5 zYY𽻓 fq@E3!kճYry #%Z7%pI4Mwశv.ǵ: ع)/4ж騘KCso0[0ِ97U(suF8GYre.mmj'>xʺ0SEs)Ɲ -Ś,=~pLjF^Q܄˞RprCp(hIpÊmԛͭ!|z3-|zhTもL :}J3ò.)/9,|.K6a좻OƯ%/d 7碋鰱X,1q=k&F2-o|!|#Ç_w 'O=X9Sneά~ πƪ9Gh4|)x+T;IokOdG"CS4~o 8v%EKoHYĺ4|7|xؽ(-_NjNґq[萧6ؔGv|lgMsڷ\gKnڿScLX!%Un#6Խ!A&Wk?U@cI N"DE=I$>d{2>xʉfb+g?]l's< i z§ g7F0 4IHΚ쨿Tku,ϖqiiKCj3Jf$k:?6ࢋT6TƳ-N OtqZ U>mfBElD8)+/VťW_  /BeoVi S'?[B_V 6V(*}mJGs+_jz{}pC볅9lݷSSVCCėvt̎;$uˢU"uXCzn+Z&/mCq' ' 0`^ۚ^K9x0%lذ+^=+L0荶;/:ՄƼe%å5?͔sq}E+DzSb+b"N;UfΙlM .mj1B^{N)7u_z•Ӧٌ̐3k݅9鎥 x97гc4}KM2ߚ%^]:M0;'q,a 2+bg>?[P71')W77x1W׬.Ғwj̙ǵ`rf[ot X?jʱ씟?H 59 2mJwCWIѽJw,ݷt=t#;ngIUFg)LF/ZEܐA{C,ɒTz6A*vĸJiFKnpë:WZZʫکeu"wH#`jpo&i~un*ʅLv_/:vg2gsROY:-|vf[v7< [Efr 3ọ3ԷWM/"D({)7ڻ) :kIwi#C,F6שōVfNr{ѝI'ݥtK-ݽϕ~$} ,y"Vʏ3_갼~ԾC;ngɃf2&*Agh-H{=YCyHD?6qX{ #,aO*5ku%qFu8-lj$2 |xv(8!cg]p]čn<`wgYi$r\]k~Zm*Kg祗wU.>I(Hmp‚&.a[P?7D΍yᢛ?\l=anrgw䩘c9 ^]G-fb_A~ZyjۄP} ycTz9jI?A:'z$'{ӦMOl޼5z@O nC7'S"#=ﰳButH!ݍœيrQX$~C7г_$#oFTr*5)9Ra,g\[MO ihh())rVapői|bqBޭv%M󓄌=|3lД AeVnr>;Lz6rxY a|_~xmٶU?%VMdYG]6mpjי lNds^+?aWR&MstrsOeI=:+l|2C'E?q$t94讣{@]m_>.} ,"8`mV"7({BIgyבG3k|;s7Φ8 njhkJU\tPޒv|c٩-⅌ߜvڕ2nS8_mFhOcd-7asVsG,!}kĨ3S2N0չ=lPȻVXC)H!djff6/M=niOztV[:ּ4w1'XI퓵 w3*;7ޣ;Pp6TVHto>zwssxyQ09$8ef8`4c[ pr'epSQQDFF.Zh : n|X`U-̧ &w&E;dzՔ 6ΔaSn}gvʅ67;KmӬŒ#stͲ{qbYKZOn:-T[?Oώ!X~"62;~[)RejCElٔ+)I۵:iָ리w#amVך(:ON:G(t?-NGw8nkngn䡉QY#UfE11#7֢"|}I<>sݷo[[? ~CUBp=Yl4~`q#!E'펾*rKMWQ]n̻^ml(Pݸা׳2C/PUU%YpCgjkk"wHPEE_' ղŖcvsߘs/sM„~_f)S]͓ KLEl~R>.HƐC49"F7"xD1vO3太odIpٺY};'q ɿ1xͯ[ŮRtCwKtG}~8cK0G{i-3)3ҷ ]>&ugS/Iդcbb:+sng3hoq\Vhʩl|}Uf]-b&"zh-V޺hp >H^3rñ׌T2lyk7M_(~0{ᒻkUJVWW,]/j۶ԝʳ7N(1}dD:aNW/\P_0ezN1fw3bSOj h gkLO1S(HFj ay|I',:_dħh2pWg6?Uz&鬹:\lķĒޘۗDT)F rh) .n4MYL 9tI.t}Kw/эY9JH܀d>Ög-w#s,y8dУ_7/-dWMӧBC)E9]%@p=Kq8+/*^-mYq6Qt¢pXb %Ga悅G.[Mqq1jGc~vmE?|jR{NwoFסBc٩kztң2|12J'v1oV+ÆbMs/*$94_dJe#sN:0[Jӆ7Lwm5Q3vGO@fB]f_,y{x'=ߒB_\Zn M0Ï_ؙK۝l.{X~AEÞ >!KN~~>N܀:}cݍvb? [nNbѾH9rq u@rT6~@Ej\Q6453eEeZS.[MCCCdd$-C kjj]|.,,du"uJKKيAey_K ͹jCc٩y Bƌ -={Tn4n6Xn9r=#5ULNk}w}16$oL?6tI3[SF7۶mcT4h1!};g/y 7I7ûr@ps{Rqk7@:y%oqis.Ҙ8Ht"8YXd~C~oG2gYr{cߍ)S~^B7[W8Rl\3lw}vX>3MNKGR*BpviW nEД-ʑr6/4 UUUTOn}{[^.y;-IBFἌ{6ٳ{Rn'ז~Ss) 8IH :kqi.n4f U<ȯ@?etWmBXZd~M[TEw ݇솄c_ onv%^(PIDAT@tw!҃W_ph:4yKŋKT17]rngNplX9_jU U㪿)t=ETmm-888XM'4K+ Ë2xvjE^IOH gee7uz6FKz'[ zan9?]W_0Y2=*IKnT=ZmrDi/.UNOmr ^ux8bO"N]NjߏӋBʎ=%/8xlv9u n#/xSn!l-ڎLך8]wal颈`tFwȿ3/+up@pKBC !s\fJ:]\zځ;?]d(Zy Y}Dn|)9v6(rQQTpok/ǬxvQȦ(}6exSW-` *yF :lyd3f lIrdV0x?=.6R1t9B*&2Mih h^nl Y(>)]YtZNW_. 4~A/ Y?V!k]D|i1%nW³k:t#)ʿwl6\-۵k<;w;XSng nc rn٩c7 ]2r.El_ yieFpQ\:9*W¶ b&Qjt ?Lהb]qo^w)!KYzBQnNnNR4QY c֯>r-= L$N>y"MSm&Ac&x')>w8V ;'(W5>k l7CF{}!k{uo}EW]4tM;~'AOO&%u?kWENz"xւAY { DW).?.tqu-n\Cp^{颳\=e ߞvU+_2n;x*ZΡ/ dXp8ڮ, .\ޕ;D@ؙ.\]KN)'žrq<V҅|)i7Eϸtڤ1 PאA47O;:) Rtѵ2fu8d C䢳wvS,n ̷/C ,d7 qeຄo_̓Q`S; O8ԥ!Sݤ|CSEvGE3L6s:t++^Χkͯ:@N$V얖3rAtSf 33YƔIbo 3 &qTJ-x-)jQ|ڌɺ"8NgBg-;":t{FKח28X2?|:#݁of[b+/-Ѻ1TqpeఈnroHGi#?l FLn BE{`nES;z)7z!sHg'%b<+E[ j5+˯1 ӽ Vw:+' 2nCغ?44՝w# dP@< K)Nۚ s7FI}$24`PmL׺G٣sHg'UTIvQѱuɯ+ jgt]e̋2"Z1~uVEa_· y:Q# |d"F6iּg"FBۺG9B[ 7:{tLJV kD犮k v Q'M }S2#pQB7tU^g~¤ -.zNn4u<#q)Bޒٍ1GlecOۙ8;}0!fMZ= -t٣sHgR. ]#:^t ^kAW+o,8ҒFϩd!38?q%׻2hV!yRܟ&O8..Jm\ 243:_o{vͅ 1B?FgΡb]6շֺ!R뱣w2~s@AoQNi(d {MOO[o+NpΦWHьٗ/7w۔8jo^ˢf9^ ImЦG93C rySytTЦxwU M[7YqagZ6zwrTKVčErA%tCs膦a<8qnEmiE;Xi>4+l!| =C64m6Tl93C+:c}vV*ٜmj6k * og;RS,Z=d~4Qs_hy S7vhr4% 6j贸p)IgM: 5Wdq.JSN̄gf7yɀ ~ЌЍ-=: ;:t6ЙOEl4dЛtlFG|ݍ(5Ŵu#ێͷ\'B#4S='kD<\*kl3 +ozGbso=G*NDU23ˌxK^P.36mkTkn*F|/3 0L>0سr{bj#߷n-ttt<٠sBgFq\:tt-_~. x {u5]~"p]ɷicWUK9bo7&{J5SS^JFMIkn=Mz׾!gPŹ:>J07Myzs~1O0?`Vҗo&G5!ꞈ& 1ox{dj߷n*tttt4O$W.ά!XC70HmA~M:|ھ]+tDt\ttttttNMVk>X۰qz-q0DžEӞ{+:vP#uQyG$?Э@'xD;o_fۭ)ÓcO\HԚ_ʅeMB*9Mge>`|b6 82,Sop)w|h61 ܾ(ϳĕN2g)EOFgGn L&ox_|须~`K tsNeϩc٩/_v7R aũ5xiw>y?;-.цk6Ώ;("씷vo~>ӞQб(a3C+:c@Apnk?(Nָ[f=VRtFP73e5+VȘ9yBF"+xcfKSl3MMykSHYr.oϴcqQ蜈}XB犟5In}&-=7w7>f?h~]In.0genlŷ>ZW.L ?MО^Ѿ~*E@g :?L@Cp'Һ:Žh\j? j=rش+'-ˊ՝)Û' G2n}غ?4BP7>О^Ѿ~*E3@~vͭ?4Bm _ 544jSQQh}||"h0BQQ_]cqٕG})j3!]TO~,;e^.[IJotڇn}S?mt:tu7?JY/lӜRWZZ*65Ί+"2o5]P C0^z-l9u8exan9__8exKoϢOϥO}=Q졋BG*9u.;~.!N঺חv822/c4ءuod(">/}{v~]+)ݘ21[28?q=~Я\2 m>>>>>Do. ;L:^:j~п;MdMyy9{kС|-jO[h[@b6W\Iȑ#:ė:*FT.޽;??[Bɷm *WW_Ӿl2/@܀c'\pCio5n`[[[˗@wh=rA@֍|. z)é4K2"~*^q37u4exw2m>>>>>]? xtW]d?fN;=aoI_܀[iPjj›.Vwhԗ*[) қU|CŜ'dD||y{faFTw&mOBEg-@ytr`7A$&&wHY_N]Wg8d?ң~կtKz9f;JE*HݜX/@oxN"#z(ߧnbyC&[7 j7e>_0e}s-vhkEi)uQĩ z::":.~Iߤ5]P,79 ?۷7ovRxjtqupԸɳV] [GA}~7lY״OM3[puOVmUh տf*OB/m9߬f6՗/ Ŗl?[w~+HA{g9IuצAZ={l\}}}uƖPb2LOR}_iǎljjjXz-UiE5YDp|oV/,\+Bئ Gѫ|.NΦhe=Fr- ?*'ؗi=Jډ"ٰ :aF}]RyV4P_[[_uo_oזyE@kkо hoC._f3UZ8FZB*4[{56դ8qжoަK;r-ypSXXH/Z-ݦnSQQUJJJX2"h4;ve2Zq6/Rmҥl]^@k?(铢,w#3ucpӼ!}S\o6 Z֥-vhkMS}ފ\t??k=oK#(܀7l$lDA]ܬBK)oEpsKM)['#׽lg!ny=(fTD< UC+ "i=vT_-_}sNZFWXXW7R&BE}xhuF_PsEI{a*<2tB+=Ѻܴp`sTE'Xowi})&m0sPMOkѺڎb. 7j|˶ʹT`Ϻ Qŋvc7NnҟZOHp:&t#j~oEp#n/41F*gԤHaE{CC[;hC6n؝ߚiWik_osNZFY\\L_{'ɃNS0a} ;gX񩪪b+-___/r4+Z9[1֟~ܟ2|VKv%I,Q2|7S.PMOkѺNM{(O?pFbH0 b.vIm䥋޽4Q_.i5~<>Z-4I˯bʏK3_/x^/hZ͑P}zo6K5?Uaς U$ײnVXޢ(iq"]U1F GjBt:[(.H}mvT_jeTKfoߡfKޚS샋b/NKܿ,-.;Y`EZꆧ&էqQDɠ}= >6ag( OCŭ䥋Oʕ+6<#5yvYED}~70<+?Η(ňR"ubgc?ZjCyp#'ȏ԰0 4aTn6FvbDa) fK(3ϕF1>DʕUVVƶ@?E}Uis9ߧ^a}O MnLj -:;5nS넍ϴӻTn  }:8^b-?f7Y5n4 7'/]\<Ú8j$GKWKׄni; srʡ~T6ߕ1_WXaKGKF1)D<@_$F7һ ~¬6l>["\)qرq"m_"zB*r* >kKꖂtoE-̙ raF+?LKh9ihOhxJb-򿎲dF>D}lukmdɒ%|#kB7Ry/N}_7R"'ȃv}+͡V9Rw)gt&& F͟7R{(Z@יwĽYO7 i|y<;yUu~CO?wBNkmz9s*.`t?onOHӰ^}~7P bCz3!W~\Q7uB4M7)-G> @7S Ŵu#߷^gy|ٟtsN9YT)g]';t}¾5n9sH)kΆ.ini"m' 5>һ~oFUJ>Ew/7׽M߶B՛: n]z?`J)l Ґ:-Z6U }^ˮ;継 nMĶSgt)!컵ݪ ]8F}MFO;WCM65~4[ݔ-N'zY>b1ʾw]7FȗEp#.QkAo\Y 2S{{[ruBQ{]+p6z2m>@7黵|JyS+W.^x߾}%ٵk:2C΂gX}:ؓZuYgv iGֽipSiE9|L+ˮ4mz(_)uknJKK{>>/শUXXmQˇ]Sj,7^)LvhkM2 n%5Q. cͱ~RP7fYGpӽSLL̎;Y-jk֬jtIX7Z}?7"IFEKfi "_jۤM_?#\EpCoIÔnF/%oizƲowۊic|/e׫gu1exBkѺ.<Ʒ m }wot"Yd @؜q9L^F>"^/%kbT_< H4W_MƧhMfpCƇ7Q56@2[|ƫo:@뻚~-y\VVVmj*F߰tz?!EEE]zl]VE1ܾ7i=v\δ`8exro]OkѺ֣]O{fwޝ/x/ ~ qX!7Kw!j:FlzDV̝;wǎH}M@/aks"qVʏS5B^n ᗿ*~_o~-sג(h4Ww"դKJJ"蚖+g9kX;`{ƔTyi˷'vÞJ*Rx|9*QVMqq4Oºni4w;iZZI)[YTgqSrzWlhSgI0oo7J+kZ״7ȗZiVy}ViԢSD|{WTc7o},|meW6߹}/.VoTqqqVKP}FM%y}^JjSZZʖP}DAj8V)6-E5u)/,}Z-ϵS 7w~%p6PI@gᎂhdtgkoܮ4]ﱷ>dY)?M0)ũ 4˚]^7paMVV ߱c_]SN{1k-+AOr˵8yXe1|R*L;]ᆬGX뵾ΛkgI`0DŰ5UUUNpp sH"7߹9wCp'јr(6Gb.sLneަ/T,wP 2S97pihhtUUUV9 cV?w$5.VTP˾6Cp]bZ"Rz0!cG]MnںWbnsvHg [3^Q5Kx%/ ԡ7= J1v8Ln!X\Iq68 J7'57WJˇ8!ռc ;8!LSd(}P 20n@''7JJs @?TڸJEżU7p7j|bLpdںǻ|9~ x{oSd+=W'ns>7^Dt)h=r$@eK=:iFI ǎl^` p@i?|rEb~-u t3˾M__<+kKnGXٸ0[ĸ[@Oi=vԴ1EV?7_!WSSjkG*** heθzE(ynhZNMM U -dg𽦋U3Kˋ|M~ @o3UUU+V())ዼIw!88wϞ=|#EEE,"LnQ3Mb pWر .hjlu_U6 URZ6~@( ` U/ n{l]u"##omm-_ ueW*V+=7$n}}}iECثfg0"OtMA U'qpCي|:-b+VVb0~d2mx 7u=̯X"++Kг}`` \\\\SSktPRRb0>m-]*^gv\z1l>8TQQQXXVg+ZA1t,Tj9___Z"[v+DIMu"Zns 08^uuu***x=*yH!G]^O&++Uao[HMQ:E;בihhpvhJ, qH>pwYbns,~Q /E.rbܔH n4h4וdmvmnSYYnhE)Fj_I+VEh!K.---ݱcǪU4"0YYYT؏K5{m-ZDTUU%OL6QZooBk˦jS\\̗:$Mt6 ttYuvٳg[/r|G?'%7=|E3ya0 dMi6FEnZT3<,OggCa*{JWI¾@"gqFmVZ߳k"nMWO[Hii)hyG~g77R T>mH܀שgD.544l25msȃE@]զ+]JF͈.]w46)--HcJ0:mYp#}[{~"~ x*O쇤qHc8Z)FQqj)"tzܳF:jƖ $|r(Eg2Fm׊Ǜg=EpCgtex_ :7*qUJ>Z‰M匳>J -Op07TҕiRt8|@[QǸh4vvv98g4(DdD>ƍMj9)--7xH3L pApH1)_|HD>s=n:5^RRpB8(88/uB>ŅUٵ _*Img^=v|}}2PI x0:NޔcϞ=yG_YYɖ744HyHdd"bp#8[퉨Lii}A8@J~tR;V_efOp"~ x#(o#"wU-_HSWg:݈|?5Maa!ˢEt:}!otC/..YR4ƑBzޢʬ>m-g['SRT'%n@܀uݬk0R]]-0|ȢEN8 n\?{ww&zKP ܀W+))qDFFڷ!$:NџH>Q>ibg*++gHUU"qЩUG<^] ҥK"Ap');@hq+++鳜eCZo*sOuX}RUUbi UVU b=; ƶ|}}UjL 4V5hDY+7НS/rhgd@7+**bٍn6F}* t?6brF~Fkjj p Bpx)7^ Bpx)7^ Bpx)7^ Bpx)7^ Bpx)7^ Bpx)7^ Bp;FSSStR^URRH%F`0hZZFCVZ@Ap^mO 8TWWرO|5z%%%|QGFvunxz+M!QYf3_ ':)(**K nVX>/r!22լK!o' Kmܜ|Y@gpS[[K'WDY/r.00*k4zzFnNbRv@gp#WWWE7FQbv W o$F7RA_ԯ._ tja+Dj:' չ ^_Z듟- 7frV^mjloR|?J[ЇyNGDvNtJv^Qevh]z-Uj=v֍]E*=̯Z%dϞ= Gxj|i L}}=-_bgئv>tҪ*V{)ù7٫SEl4-`4t}}}S*| lMA= tNQ27z~۰R+?0W/)+KEєf&EgvӦSWl9Jޢǣ{u:-1n@<'"]*"#{Ω_kѢEItj͞={JJJH}]ha```qqqUUmRg7,#OXLY:{R:F%Ⰳ[ Os+?dŊt G 4p:|79EpPQ`^mS5FI(1 K.c#Hǣg@<nZ X*%Pc|2acKq+lkSZZJ!q \; g]YU !u1кo2StPt=8TWW+qCNR^Q!0}hZ5N'IAHH[xpK-xg@+an}k9ab?[;X}p1}Xm\qqٕn=v/gkIApyC0H:444t:8qO7UUUlS'|z'zGB@Cp, y݊44Ju8#=Q522N@oAp,ʙy-j[m^ێ< ɩG-*N-MS1 z>H1vWmSWWGoIQ_j#%A3@[fKȖnԥ˃&3R"L)@<n\׌7BmTh0fЧxAIܸf0\ +ܱ'eLCC|pbi n}p>Q>|\n nх"դvOG@O@p&rñnv09RQ7fYR{u=!~`25]P_L͗z-oqq메yDܐ;vZXUU%@}!UpS__ޥ3ՑҊz߬{l}ʗv*P۲ֳWfFQ+rl WT[p8VzkbMOv=EoT#6TSz/qN |{˖ 8`æuW\b`RSΨ\7j=@/iPu*..f[Xht8iQ}}=UTTDը>}JCC[N{Bk[l!KPMt]', !tvm-Njl&i`KS%tjvrIaLHi|h.Bp (ur،* _w^GZU5)..w!|^,G݈oI)'M TBj kRG$ټ澾oц6+{Z]r6_;CjVUU@m_FRHZm[z0{n6ly}-333;7st2J7.EpR7.EpR7.EpR7.EpR7.EpR7.EpR7.EpR7.EpR7.EpR7.EpR7.EpR7.EpR7.EpR7.EpR7.EpR7.EpR7.EpR7.EpR7.EpR7.EpR7.EpR7.EpR7.EpR7.EpR7.EpR7.EpR7.EpR7.EpR7.EpR7.EpR7.EpR7.EpR7.EpR7.EpR7.EpR7.EpR7.EpR7.EpR7.EpR7.EpR7.EpR7.EpR7.EpR7.EpR7.EpR7.EpR7.EpR7.EpR7.EpR7.EpR7.EpR7.EpR7.EpR7.EpR7.EpR7.EpR7.EpR7.EpR7.EpR7.EpR7.EpR7I`׮]#>oٲe?o kd9̡Lle铇̷ٵkWZZZNsZƶmۤw#Hլ 7a!ѩ͒%KA[bEXmۗܲeK^^ߩSJJʢEvam[n]ljDrʷz|;A\Ս5J ̡Ǔ [ޕA@pV7ҍ?~^!UQ]]{"Qͮ],Y.CC Q=gf/z3g4WB: ܳ*O =i|rs=#jԽ{&D7_rܨ (:xGNz[lQMVVC=3nd檷,s Zd&KB:Y۷[nU+ۜm=(mV^^WPm]\V*bsjQMXmHSuh;m/t=)?uƍt< .ŋuJJ>}uȪ]~O欯{1$P6bJ'g{=nA(?^)_P}YH??+/'̬DʬVLe}7?njn2yq#˜eY)Q; An>[Gߊ58]뒯XbuƒE&#yוX1u YMݦ I+\z/޷oF^'HY:B;|ĚIN)12M/*?z8T=RᡰٍIWV Cvuϝ\ >BT/Z4QsO_re3YG7 \GT8ܳj[Uo9Dmv ɂŚ%.&,G78n/ ta=@QP&]_>hQ45UsL\:Qo9n%j /YþM?v3 u|A]Be;?}=3YyZ-(s#j`9Fue#"tnQNÄ";ڪOiN\"[G+=/#juq,!VSezK@IF)cM@~zA2C}=-s ysb$gHapxQ'd3"=^8ToڴP2^xA$$w:AV+&8QM>|Ki5ZVV֎;=R89@Ǒd;tskpc=)&8ѷ ΀l# nZ…2]MtYs֬ PwTq2Ä7Vxı.RKi5mjhnĭ7'YE9dpOPZ-l !5-[!3 nv7"uTt5muΝ;Չ]"kچ2v)Q Op@!IFtH%x&S:-vsNַE: ,YzqC*"ym uKn"$/8 ntIז.a*?*j zዋ3邛8QݯW4[Siۄբ5։,BrvX=XX7ln nlP𧇣NpxJ5a#nݚ|J|߾}ϹH&uY65URDNV3!ڋhТap#B8pHQ=hpT=R]>5\i;\.qԏ'=}bckDM똤MM)dӵ뤚yp>5FDHL2-OEN7%!j)ݫt&>zjBra: HE@'QM" 0q8mp#dvVM+]d}¼iC5S!gq5֡#3k4;݁L~dRJ'VIĴ"If6eJJJyy͛$kksȨEt(tL$WrůZ {\B͜:WWЄ evޭJH^>ǜ4 m۶-9JJjj;$8& l۶zn233W\B*.>lR! K K K K K K K K K KTޛ @;s͟"ႛzn̘1OV7!B7uuuսOp  K K K K v5sN?sO=XGD!m5r ُ$7|s7ol7ٽ{eTKeff\R)%JA .\xqEE÷m&CRSSݫ&,].m߾|$Y[ZB?!pY %K#%˗=濓SMMj!-^P7/\zVNdU栖$mHOo<:L|Nj9GN/e<w*W;n*HKmڴhj}.LՍ5*8 7UԹ BiJ [n-//ןIfj}Ih n ,X`;%{pL-GunMZgUB"B% $ր+/2*b m ܏lq,YQPN$R)u.¬,7Y:~ZE8z֎#cMKq2mj5j!>XkJn_n7\)cʴW_}gNJ#hPp BtvYl+:\jc֌:Mw}zp8N 'ƌA?Qm!7!"H@p#C YGGԃp>. U kCTɴ׫R?kީm9uwKM\"F pm5STK܈k,F"Ե*!Gu :1h5'j% dHԦQCnCG4-N>n 89+{ Ǎc}f*\!;Qۮ547m1Ebg/0Tp#GCuS 83mBҪieݻ.G?Ԗ86䲵&]RٳgŊj8,*"Ss.L&0~ J BN'S>}ɒ%ooØuC\z4iuuk瞳nS9^mg;yDm5i'TldT[$d?w2$gYLqkKT!=?S썈u;0b[L+{;Q ^I-ZNYbO"zd-*++?m45%SubPtUU0QƠƔYpiʖk8ݛUWS,Y)X7<՚_dulD=mv2%B%' t̛7϶0x0fGw#lBSD,'jFlٲE%}˗/N!oZ]sNUx?UGEZ&7"OFk ?Kպ"! &n:sLsPuekrx_VD#/-Dk脓F۶|\:<6mRå~ڶNpmHYbvI'j[oi7Ȑ%d?RnBSYQ78spX|@7IiGGn,pÁME-'-#7G]]W\xbuK2̣w[~3Oa? ^ҥT7!2K["˛n\n\B7o&i̘1OV7!n.BpXqsb"p)"p)"p)"p)-ٵkWZZZNRRR~ash@]]ݨQ[/94`˖-Ă ̡jjjm {쩮S#dffZ|md˖-קݻ#_j;v0 B@l?0֚oXz2+T֣tp: `qqqjdՑ+7ضmuQM h7bT'݄-_\7|s Ҭ۷ӻ N}g :mNnڴI |C[NIMhg nlרNyƾBpkap2&M{'U,K |ͭm'7Bb5sL:B?jPN$B T+ Cv@;gp[" n:8uapp&Z7 joFv F,F|NMGo:mmwbPZJ mn:2ձF{N5}6G n+Ful"< u`Gب:AV7v@;Opo+J:I77U[PF늊 fSIt@7':x}Jjg߄;?òn!>Ք6.A bzF@W[~7z7 wq wVkzEr*dp)Xd9.@MHϜ93%%EkvӦMj2F@lH?A>:r0,dpӽLu+< h9+VPgkڢӓGM\ړ؂(֘F4nN&X+AB7 VVH qlٲE1%Kl_Ŵssb"p)"p)"p)"p)"p)"p)"p)"p)"p)"GӇV@0$34jxURSo$؂]vu)%%6Ս5J/w^lY^^ Wo-Zhǎ]t{쩮[BffU'LeeM i+V8i2;*~3R_9cnI ?+o#$؂7| ͭ50p=ĆCGck`smfStMd`EvrY6sʷ(o29@RR)kNtr`MMMVV?}ަuKzc;6mRC|>eL+ dhmFcA;o_U"obýI"{ܨDl^s5栣рmc.C+!ڜDSCg>w1pE- Yd4T+\/v~Ͷmqwn:8F fcx"4ZA/.[0ЖD-2L(L[<LVV̙3S˹Wppz!'Eh ; ڷjw")D& p87g0GEbЩI[^Tab(2Pf+37?} n3J}j.8PQ{|+B/VF.Y&h+wT]ϟwQ߶&"o|3٘1g|K0'FW#Cͅ䰳І|>߲e˸ n՝eŕW^_ emtj7ĮM4s09BMGXl٣*1J3^[pW?o!«~m2Pޒ& QCed1kx>?a,}v4;}f'ѧۨz=}NGx!>KmsM4s!Aix/bB;%(_e_g6c>(E[2f0d3DC\,!ѩ͒%KA?lٲ%//OBȑEvahOugS:mu>spcm݅5^lS9QX,p۴ B^]ܨ,vol&sE>55u޻@yKFdd7Q `"p̓׎oݻgΜ)`# -fpmS:m5&ǢspQ:gLt(3=Vi&5D& YEK[b4[oe;7ڢu/"mrJU[nUr\|wZ_Ul˳ߟsi?KczoPO dMFIdB۬uY0Y"\?v5%sӞUUg7[U#{}_xJz`_T{k?f-!w7Y,ܜpϱձN:ju_ upG]wĢ.3}֜uMo߾7 \GjM)˖-qq/Ip 91@wǷb̘T U>55lD&e&2+A,,w\Tp#e^ ?,~RkJb+A|Ӌ!DJ6*U~x(lv"G*[+cJ:\ K[2qkyd[pyaᑍFuS#z *u_zBL̸<qԻnD4eFĺ$ͩmYNV6754< /fUpq̝o<4bM]uOEf%3#7 lY)sf%KHVJVMV\UMtΔ& WNѡ>vfuB}jٖ9^3k|t$d0Ȩyb63صkzrsJJ~db^P~(^v["p%Kp#Ӕ8DpZJ}<G}响 [λlΏO1og#CE WG)Ք5WpC' :7֓b}{ 6r&m%\(0]٨MpSٟs֬ PͼKp-NٴDpCIܸ),IBj&n7| =Xv1UPrQ:ng.jMRtDhYY Y' xdxY@[; >E*!}ZMj!N ڃݲe-RKFX"]w9(l=p3w!S7 &$vL7 |ş74.tv;:3>SyxԷ-l2|,,,,,mQCYMYYYeşox9"6Ep= %!JLC5Sߛk[J,YV 5^[ʐ\M6M%ITmO! HFeG}6173%W5Q|,,,,,,^ ywͷ*JH]*wp!y ap;NzFVɸ:aY/..v2+Vњ(!3w֠!/[KpOSF]D6Jō$@ӾO}?̨*>g+S/|3Rc:ꞹ"ɂBʢ>\2VSpcGJptbb߰u5>jC];%Ko>:B7X'G k 5c7iJ'Kn# /|a-9RQf99(봹C_oۊ,,ٮyseeF*AgHǁDzm䟿FF#:v+>m|`G9k:΃OyF=S% nF$+:1D%VhJ 'oNT۶3W_]%iN-W2(V_-ΏU2wYI]zZՏCbC1nZ)" g1!hNÍ5)4*b̌IR_^]c _ssa޽_UX[^mM|*ni;f-/FDHL2-OEN7%!Pl.Ótږ>[z?3C$0ԈiqpjM5i~;hĦp]9٣*43?k K"- / +"#+%!T1X*MN*ЬJ%;v9yӬkBVyk?CGf^#hwpI7w=I\5@*I17֔R/j~;m5CN A9svt;)u{NjQյi1wGE^VAVDVGVJV)yUYwZ9FB$:9F7$\!;G_{wjTs?{Qyܚ/ =}jՇڨEVAVDVGVj×~ +hM~"w*UjV.]D)ǩDч;o,6Φ<(usoxoL޻.:RjU7*)++lp㛪bRX^$$\({nnz@KSԶf"a5564lI9sK;~wSqWjкlG;+ʲ.U*+۰Qj4۶mTvc'mc=-@6Nn;n@h /wbqG}O3J3>]rcWڷG}'j*ˊK%HU*'\J 7@m۶zn233W\B* >lR#vwbtz;)ƌ SpEѪ1F9ή\deeee*BlUHUKKK@poi3Cp!\i9(ًκ uC;f՗J8BZrlHKKKHCM:6`j}_yN]Rݧι⛏_s?;QE*Ab#R-R9RERQR]Ah/RR/:SlЁqeiԞw/i{ţXBZrԓѥlHCo|/7mHC-я /t]]ݨQ[A_;w\lY^^^.`~ܼ֭[+++-#ml޼|md˖-{4={VXUVIWWW;ȭ hp mO6fLͺ}jل,R-R9RERQR]RiRu W ,f2 t07>oϐkzЗuJCjcXpҥKgΜɶVd^lj]}ۊbzhIJPk>+XtŋUFֆg7}wMҽw|;)ƌIMo>55M^djpv|i[D(Ҹfv$F_-)8ꮙ|>z"lyyyj hs[)qhE5r(Cp56lZbnmfrs}c[@i".~uϽ<2X[]@gp2|ݻ0 WKud:ٺuJQr߳ggK2E"%meUpqΝo<0rM]?FE]*_@BCEI|# -o ?+)Op{+ԯv?B^:2P WL &Dn:eFUv6G}g:%oMqmT|iiiiiin|`n mTb=&j[_`Epad;ݶXQn:n_y$iwT&/YZcz" !!"M# $dO5_.laHN17 tL=7t;,7N+HRApq4| =T*cnag90q8}EWfƁl%EFHg,&sp)1ll H617aRSf,U= 07nѢE۷oW*\anֳvޭ:lm(BW_%K[-MF֖p3gLIIѻqԐn:Ɨ ?o]v6uG}g|8􇗯חG}HI3y/M& ''hk֐E6 0dD6sI%Fu lkI7֓kq+N'}/]pҥK+++նSZbEq~WffO YmVV@FirkNZC7|}Ǩ,ro43&ff?[]cXSlH3IcIIII#JS:&Nm'4 sI"&65Q= ϷuV!yz&X |`69fG\U3no0녇[ɮqߧuwNaT ꢇ+S/|)m1d PHII#JSJJ:\ME6SdMܘssbV~xgsKT8zYO5J3:kе,tEQRTUɹNn|#l6ߜlH&܍а//5?)Ӎ][&aQߵ<7iDiJiPi;.UXZ۶+ Ɇ$lT܊kz݆leQ 51I/}mM;?|ngΊ44444jqfoxTY$lZpܫ{29{Y9Fiƫgs5᧴"M, --.M/lAF"lZ5^~lX#Ȧ"l6&$m Wdf^lњoCph7HO.8'g>0ɹF:|x oWnͨ9f#lBO-lTo|#%w$?яf^}Ux7}W|٨*6&LKn|}jQG}ۻIS\i"l< McKn=ܴl\"7W|Y6cs-Vч ~vQãF'_6kߥ=ldl7g:-n.޻ˆ$lTi&m W ,X6f٤͍ZIYs⺊WJA9F鸿q=5`}@=;H:WS6*ٴd,6E6c٘4ha7lF՝gf3ms ƘjWΦ#\L[G9Zdo`wM69dml Z YP?3/^=ַ{#Ե٣i(I&llɆ'lo|cnJl=X>(|/?1:حW:¥OZ_Ɇ7P6E mhp17nbnh%.fSC7l=-\:pfW#l))lnnJl=^]Nۿ|>[cu)Xrj(M6BeR6NDm. Z Ř1酗k [sngc>;h8lq&jhu17n۵k̙3;Fs=Sj'b6-!矐'IHMI5<ЀQF^~esQzB5Oh+ulݺ\t3&QjuN%ӢǍ禇cw-=MBn )~aR@p,Tila:YiڊǫK 'Pb,q|7QS>5$|Կ&)*v>(rm4sa.V'.iZ_R>-*F՚4opմ"YEp%\RX;[$Չjpl=^]7sZ %ſQE  n W'wLB}˗l2G3iWXƔh"xݻ7P[h>Sӊ:/|އm]]aXe}DEVZ$\ٺu&Z*M߮.Jdܷoߖ-[`dȅlҢ.v~j޺"LMobpI$rӈpnkYqk'ۡ?ySYw̘dee n]Vs 3XwM}S-D5+)֯'Dsj=z̭Tt=Kl=^] n@sN u?o{ 9MT 6ܨB)K.Ckƙ7o_O+ocrVM"'CPP]9+[- K2PurЏ<^V`msPjL!˯Ue U3(]8=Ppe[$lKj'5{By!ۃn-H]n%XGZ '@ vi>"B%M#+/bm }lq,YQw̘.r n֐BqԖ=;{NE^y[b47aꢂՇ}f m47`]n#~ *iӬQЉEX;97]f-{XGSs86zajZ!WuU IiL1eګZ3x|'KunJl ud 5f.{Ya&C>U D6M7>Q%x45f0xj 8\N'ynjIP YAKڊK6+Bz/VFyjn"H)D=3 |w'9/^5k=2Us~'@.* s ſ3,/o~:?ٽ1&GpUы ;+:FZGG/p>. U_R?.es2j ӥƶߥ&=:-C UcOr2~RlKyuŊF@ȅQ+GímL5|6\r4!]Dm5$v5z?6^d+-DGkQcCcE|'s}j?iT&*fV?\#|媩Q{iCsUMf:~ȥRSan?eXK\50zKpμ JVU5Mu(jBy; HtY]gȯܨE*zK6&:={o[fi;?%:γ{B8TuR&}M6]|6g>I~t)8HޒD nTr"J9kҋ1_m醈p1&x᭬aF^Gkc#ccۨZnap.ڠ¶1(ɡ,|uS 0j}mKB]'d+Wm i4=j8CQBZ!䳂پ.bOp[}W^_[4XF@ۦ+=Mm03V)K^:늊r8ʉ'Cv]GR A[xu!qgiঅܐը7|Mm7Q9qyf*\]a[ZO[#.i:^_uZ!=z*xuBOQBuo!NMd]BVnB.ٚ&亇"k0ho.ϊuxʨ"7]uˆCjЛu+ ɶ󠅨o8vϸ'Qi_ [W[pgo~˧&; $xޯmc,QdgtQsIgOGCo|!!/nztOLJyie=hc My,NO(o]- %IpM 7Gml#E:o3>H2֢.&ȇrX19 v6 R+bB.7QE!x`pE4gڐ=i;bhupb@p=pP@[-Q6|+ZC.LQ [UDFЛDmfk'Qncj. ^f9Qk"_jVm+i<ڙ+V,Y䭷2mZ2}v]ǫb9s|1h[# 5w%:2ĺɄRmu :EgFcuޯmi/6…9iUjpdy-~jYjE6 T=Z7:RB7'M?Sg=5&R!?TM%d埶jQ 6E Wd*ۂDw:NC7M^?2\_96䲵]Rֻ-Xa^CEprxpsOVΝ;/20*x*%8S f0h[ H2գc\=u|l8 jDXȍNX"F^`}ʞM^*QKi:j;%\-2Qd=cH/M}C5-N0d &pGV[9[ 3qO0#g3b:$2|m:mj5S[DZ赓KYGPl1en~ۄ"mT-zCuo 'å[tV}Hz<najJy/v.Kxb|:ᒅQ͛gBNF3w6)"X;VRmtLoݺU [llK".K-3څڶ3nNvBwޭ~5[W܌U#lM"L9 y}#F^`)[''j 7ՑDP2R9 hm63'7Q<e--G֩"M!vi=NNHo)mie ~,u-B>L:#( r'XÍyo"\+'HR>+d[M6R9ZmN%*l=9r6\4(0h }T"VԯXssb[W3Y:[_h@.vɏ*D'Z!}糄,:Y)J =V?3 Yd4ۂ΅{@h7[Wt?U]wխLcb nBN9iy/yZ.R"Oa>YZ&´NVGMGQ'T(GǍ`%]?TJ=+JQQNFJpEn8.EJl=^]B76*mѽz'Mi#72zmco[*l7!"#D6ꄜV-d CVZ ::ёm7j9!nꮸŋ&tg5p=Qe˕y O0ZǫKOvLw~!FJi7RLJIޱ6WBiBpG? @ {WJl=^]f ]F Sh9Jl=^]n(,7 `UzPYܴKph7I5PYGλsN9E}'nyZO}\p!<&n|NOPYH!{7KZg8k!$Ep#о:.k߯ӅΒ,@;Fpl}?)-}.*mPڶ$Qp#^:.[O>H;Kr7Rn tiSt!$]p#:.[ZZg]n˚Cw|?F_ag\?3קJ7?rt&[1Bvhgng+ K7okx[uwO-vY}?kD> ~,Y=z%ƒ@{[pk׮N:<ЀQF^~eshݻ-[&o CËSkѢE;vPmٲE/55u޽0jjjT9*➰#۳gOuuM233WZeLbŊ: EO7|nH\s:2[g/$g.ܬRega}7.6|`^>rFs g|Zj4__{sɦxhū$up#Yc۶mFcSie߶-{% |u [9PSW0++kjaEҭsgˋ;ee➰#o4n"b M m߆GCͩOuLun^m:ij~z'GM*MZ;{sa PY34ϲA3i2Z?.IFJKg79ci&5D:r,CTZǷm^|gBFՅ:kGZգl7oJ{?vV]*\wT_oD_sp'8 9Oeup8ZG`E( 橋{b-FNLxuYϲё*wf?l&TN;ntv!+x,o4GVn@+{bS< -":!VX!oeeeY/OQ%'T+Qw1/qO`x1FOhpa}Pp\P _smn§/߄duyWj=>P欴z[OGKHiCV-x,/`ƹ q|:1,Op#/MVV̙3B[)*ۣ3AuLK"XC}b: 9O_N pֻZMVn\tZMrCi_pbߌ7ܑc,5y. ewhrZ`wo/wMp#EY!O.@ku8 YT(Q+,աE^yUw"7~NBMD_t8_!G/Qfw8r^N pֵsRJ n׵ tgYsȳ~vI=ԱKDKBh#P-_WOs{/z64i/kEU"Joܞ)-pȊE8.mVp/og0(n!!/Rbv!薴;1Z/qOpH_qK.]x:|ɒ%_GCqԧ)9ٸM_G n qs ܇)}C ޷4}Zjԕ]>.yvsGoe7دpulh sG>g /kbB„;N>jŶKe7"c*ʉ/mpķt{#"Dn"ST )tJz(Dm'DH~aD_ q|(8S}[:*D:W/ul3%MV7/~zg|7Ӎ'h%ϟ3n+1{_6y7hY?sL9+͚C f_=յF`IptYcEB8mvXo./Shm(rj[E"$={DT7 [*)J?t'1c:&d?=c&zƿ"ӾmI|RE95f vQq+8aܬ9~1vᩮy߆.퐯ZG\7_Z??Ywĕsn.) n8dEE (GmVp/og0ܨVm+RGpӧP#$c3'vHB늊O">JhڝhGCS7ڄ[,%mr]7'9ZgÇztm>ĵOe_;W2[lʾűezn|8*GHY[0殏{-]@8aV&coo-E{L]R0jjaW<==q6']if7"VO,H?Ƿ^~7#-UwwO^l9p;C =!lTH5p8Pe#EpbS%QgVM_]cӠ2F }<}#\ˇ%Ɓ9#Ϗ?d]5O_8iʩcxY7 lrky o+EiDe7"&Fcڋ;o[ý %\_bnwTՈpU9 9qLg.1 &?=a(!&6PT߁QL[*mB] ni5!~㟪W^-lژxzԇAMȲ8T'xpa̸lCV8xLo?6mRCdT(o[Mý %ܨUABׂu`*S_8TFgTgff!22{`EDVFu׼TFV qh)jWԾhm֕JgDgxyi{{٤}njaIUd#Ȟ*x̟qcӺjg.ޟ;u(i)2LϯMKє7f횙fON LY[׼֭[+++ͱq+WGCHqWлw.ږ%j6.%l4|rİA=);P7-}ӿ LGw^+y.>3~ &oH=@WsF~ٽ{~iPG cxӌb,26.U 8[1_]3/v|lC Ycʖ-[tS]d UVp/:Zo]NW$%r6. }F C [٨r^|q4n4uώ 3.>dlQO왞;=u>]fk3R.0:uޥk[q]{F|cnI[J#Ɯ@SkǍ}bs^:uN9iӈ'y74@۳Dԥ7RM\?G#lVe8;Ow*w&qJ;ŧw& ws󽡃οTVRݹs^=+rYM eNɿcb(K|3 kSvӡ)d7w"=[)Km4]up߳P/uKUBWfOjlW꺬VNtW[uf|c>C^tW<'JzMè٥G/3Mԩ_ߣ[XIz0K>Nh ڞJm]mpz]Rϙ?xFyka̸^s珦<&g'ݒ=wVFq={qG{6g~{_}B(C%s*b<*d9?mi{Ɨwۀm7c/^H! zMغwui߯7_J2O6J,7^|s7guڸnF=)wFFvFYnB['8e1܉A]N;2~jq7RnAph{.S{*-]]psώ1vP~knKjasm?LK&|Kg}gѳ?;88%}1&K#*7/ʬqў=铼_~s´Xk74H/Vڹ󟋲Zt)FU3Ƙ:b&H! :Km.'Ysȟ\\3πÇn}|Z~R鿋Μ?/yjq '\tbaz̺mVnh=-%׷PP'(tV~"xמi_~Oc;x(8@#=[O]ن.5u{Ewʸ>h }/{_y{vk =8Ѧw~oZ?5=?k^O>s.m}ok/ I8v]{x οk*@ݻtydj^'JM <sʉn5!#™AITnT-ڞ~Ϻzz_uKU}",c?,ix%/mvo`W\>iI}G ~:OMgmTHN: 86y74ȋߑ6Fv9c` RUvK!|cRR)66Gph{R;+gK27=}us.Jz~vq8Tj#C>-5~vԵLӾ{矘6t'L/8< O!o `DN]!uӮ_D5<U-<)[1*~:uC3=9nEܐ܀lT?[$kpz:;&]NZ CGm'%{w~N8e)/zZnVz~z:OMY*kGzL̋3~j \rO^-ܘmܨrS8A>s?? ˶:W:xpCjp @۳Mul꒜~φ&ϝyzyG>*1j6@|PvYUwGU']~{3;ڬ=쟡#Ji<1dĈ3g._j#_L`T٪QUG :rpCjp@۳uG F%'ܱ3O/شԟԨҽ;V]hc*ɏnYu]VZϡW\=ϽERU؞]:w6ѩS]^*jgMs e*G* WW/MSƫO|c%IBjp@۳A 񳍣K7};&O4rt履^9?/7{qf;ha 4y~w}t5HE!;t%9LܤtihƟTtJ9/y0畢O-py9H!l]zYn [tď mnmqJgMd ncvu7'ʌ}&[E?{{ 2>Wѩƥu5z4U!xޱ},W)L8=SLlTa 8k7K6. )KF sI]:w)t<qv_#oȻuB|N.һW_~][M nHm.Dph{SR?ۘ$MpRwX/ԧǧ6RjK_|w/3,3gܶ'Qfnhuÿ:dF GD?E[0xU*Y3|rb.oNm]X?wb[._>3d&vPȇ-fQzB # }E\:ZpCjp'@۳uDF%9L(۽ _IPj~5kK'K.C{<Gɬl3?=R3i\IfeiS#4U;M&8ֵkeӌ9 s Of Xx׬YsڼSRA: 9'Mr(7` 'ܛ5NY&)kӮ$݋/*! zPIZvlڼ5zBa}~|CQ_f|||dʧ$O_ &exӇ 3cG~ݐwC^nsk8|騡KG1g苛TSL߁xvOc^tg20\{.OMtsKVTNeϧfY nU.,'!Ylͮ]JIIy͡uuuFRoЀݻw/[,pib;wW^^c.ڜT̕+WEe055u޽ K*S&-Y|/[VVV _7o63 7|PsT$9]u8&Nm&;7eRϐ{_-]+&<|uڍd>2A'r1'9LCG*6 *~dX5HNQYT7+7(Oo2jO3Wjݻ,mv{O۽^2CGs _(>[iGs"KjQ7೭K nNm;0zB5Oxg֌ 6"mٺRUwlXj>Uj ?8//>7s.ڨ,SQ S7R?`G܆9EƩw a~>&9Uӿw^qq4X (K}vG#ZwN' 6mRC"o[ޜI=n_ t!\FUJUt8~J0iBmKGjBi>QVVsk4'sm{F 9-p{ksT:~iuqipckkSRQbouӌ/cyW}}RgԞs/|?Ouڲյ{؍FFMmX)NXYE_WaMV| 3#wdC{0JVOLW|џ+zR<k9RQP7(7p%Tv^Gf/.xhUcG만SKn?⤜'L%I7;i~jzȪޕh7Ys(q> av. 'IP9QfeeYk9OQQDR  ᨞|I5UL{ BRu;n;nZ6roC~~ќm:TRb&ō{ۻ'K}׍{o>vȧ/f_I̩=rφFϺ#K8)knރk,tE;E=Uj<6)'[j`S}MKӜ— ;w>67Ν6K/5 X7yTukNMn&AFy* >zD_s(]ZepF8v?|SAu{sF^e]j:HEGn:wq+ItƶOZ-UUC@X -'rCv+B_-"\*&k@DDn"p)H ܣmAݼyM(&!_/bm!O*ZiTTsr7:[IiN6+]\NxaόӺtu JmT+ G}+iW/п{ ӮSϚ:_2)0'-5<=ި<̦Ũ*r3`m7 {3G2uNHPSoH|/};"#ka4j)'YζNJ˥6"!>^_z;8r">Ax6$sMȬ$¯N|M"I ?m(\]m D]cٲe/33sӦM_|?꣇[pYm2yt7:[.jif67]ܬ=ʩ>]N2㓠FʾRkO#5?Ξ-gA:* <ػY7ȆAnݖ`ʾY_]:wkqz]i}x_GoCnȓz<MB+?%(؉F@qg `@@}ن-G6R>*yoG`VLxgߚܻKOO}lΐ=u{TRcN>& eYR&UED-և^0:׻wki7M6NYuc~&X -*އ:Ģ@ BvR":i8$cOp{ \nMܴpULX:抰&8hQww$g.mTz3oԋf4/V7(w% 3[O͞כ;[۬6S[}ISNNa?1ѩSJ.wO8zB͑<} 'cLBw&֥avv5ҩwlƑF54ctttۥOMs eOr2#\ERs G]AU%*!73.1y7Vx*TW~c0)?@J6H7Mï]I^81sD\ύoz6K돜0/58@EݺX(h˴I G 39P %9o 5A|ɧfF83.5#[=F.N7-ڈazV(K,Yb=-Ʌsǽ[T?nHviXb nTu: H#5O|[n_pB ޽[u-Ҧ5rMT+_GV\i۹Dp[u|U( .լ_[34[OV_(MAHD\1WsՋ^_vsqb=[g~݈ڈF ӍG%\d»+ߢPٺ[_)gfd5r??.Tצ4Z~ްuS0O24 x6yKSgJcN wv]T766Yom6nH_ʢó Lt!,wqo75 uB~55M8 [@Ё#G>bF;?ܘ 'ns/;ˀF{|>DjTӰo?o l>٨,RNFUǍ6 X2bTꀺ横~NsSn|mP)?\~VlSgۜ7:w{ט[6z̿ٹ6*?8%ѩ>$orѷ}ovO6Oz*oo&lڔ{~M5eճ|y7p+$ nZ':.[O>EͽwKāi+yGOylCCklu+/=wd^z=/Wy;-;oמ7]=y}ύӟrڻ,YW|y⼓G8Ry}`&SIz]hT|n12S1߶)b1=/_9JV9{=῰HU53k~?rUOTeTW<65aV}*мU1!?7S:~%tpsoiWGk|_ܟyOƩ;宻-dr^#<?fmīlz3^Zד.:/Ө-߇X6) ~5_y3eӿ3Щw1ØFUQCN߷iR>lN$G#skSZIN.C~ƌC"ܹsYfe(4*c}م*wk-_sK3/9\μ~m{n23oN%]pCjhn'E;~ܨ\A}}r)~ܻo=zsZ S0wdܙ}?=3:ԯ{ d7~Y6ޯ<2褾}y+_={Y+u|l 6Ss3r\~6dJ=XQ]pp\.&:sYK.gܖ]sGr6ܑ{]6e@ǜS@$tG >|_,>FJK/w&?.s>_ZM3>I/5NFS98d1nNЫ[f/Z0x9S'o#>>g^N't|'Ou܌9TZ,389.6~N_FO3K]E'L`2ƔO;uɕwt}?9-U(!c7;~֏{ps=eG|7dՂe?R3uӚoϸټq[<Ӽwe^79[:V o!qPjc,o&k]9߻b/f {(wYsHM6:u?xK;&34u~fdm* )3Y]ƬMhM=iEACe񴴾jդ^U%/Nֹ!.jN9RQNa_.sG &zoqո1_1xRRE rp7=<{=m%!o7;~lfCyG:lƩo޳ƫ>wdozwbT=7ќ2jFUCɐ~'fmz###CMxuYg 4lK_)fL8'D)1*.i/eoGr_yfO~Ĵo,)~ڟؖvՔY+U'|ay~WtI OJtq]E?є=Y_2aPT*/~LMg8Bg4[4oC]3RW-R*Cyw32w ?͘79暱e+E];OfxBKEWdQ/#Uf[\(UmE .=L\ܣR>3[ ^{% qpCjnW+wTW3 n=<2 IV<~"oۯm+/対q;^I;cW}}[)X\_l}??=)y 3m3ۭR|7[ϳN[AqQ9?L3S>GMFTliT4\9\Qɬ'hN/sKN6z/UFII6 f=y)8ѹS+bZ:htDŽtԒ9E}{!Szl?daj6MWxhw}deP0fJG?\QfQC*XM.[pZţnWweS,pq-kqsp@Ap@+xu9lh\ʀ>#dq yko~wqʶ_Mk_?{)xw}sx$wٿz$/L7TRK'L4z%zf׻sKmԕmy4 2/C.>wAU'N)?Yﳞk]sh-n}c7/w7RǝqlT}ݸ/=[NyVBcNіitR=vzqf=r%j4ҥ?JJo?k,6N-sBA0c|?U*-۫ٽ{VOYΔ1*.3C*ϫ?RleTV2S'<zRE_>ґCqJe3)X2ƨM-[WyqWu:mҍG! nh%.ǂ{ ﷞whO,sWV8噟}ӔJF,sCu ܊Ŧ_J<#.f-t7~/w=QRu{g4蹂=Nx,Ofƌ>Gәv}cF^ltHpnܥ]$u (,r9pgZ@EAlՙ5/o2!}=Sug.I=iWөޞW{-Ylߘ꤉ih7 z n5_y4턾?a &_-e} O=)oܻ:'L|ԟn[ˇs,oY9S}<0f榏2u92c~gf}'kL>}}AiĪLӻPp%wtUnZu'K{2 (*-XԹS 4qenSiE{3=#3r;y{'?em"*3hVʉzuvy1oi׍=PNX6f)g֟4.NRO>U03iJ'en|48 n@p@+xu7 tR_?k)S!+{/NSj~hԇk>.u9<`sWߘt~ GsRFJ}َ?wfF{5{WL?3s[{!&cUй[J-mַ{^{hs3*"Uh^3˯0=/_g-1ٟ9^}8b̛aTSi'Zrc`T[O .؇++ | g( 'd x X;yl3 8~2W';sc,! Yܒ&x>]]v6uZnpZǫqy7[3|tK_K&~ -Yg;reW]'`K/i^!(^c[ȐʋǦNfwaO7J fl/jt {Og*M!{ͺ8>Nl}ˆv4G=ޅGIܵs?e[ϣQ,G6Fe| wLpqǥ!(;_/o } }1hfS53^Omӎ*k+ <滃{pdH%T =X[kVD={ޓd^ݺRmVz797G=ꙭϚutJ42cnIÜJ^3v{twYFUQ3%H zӻ?VF]t~r??jNö\{V ])K{(Y[.:|rztπ=wzTצ4(3;',rB.LѩU%F־I?ԹoB&Øy)j;'sK4*>رNͩ2>|S oJޑEi}z(PsK~vYׅٜ>x' ʖN;*:yfѷ7 z n{no?|XY8EOT=ؒN;;Pp{G-7Alͮ] %%6Ս5J/Cv޽lٲC CËSPOoٳ://OffU j}nZYYiѩSyy͛UNGm˖-ݻt<Dm hhM.ǂz1ߗ>C3,Ɇ?g/j7K*>P^gNK d+o炛gG2||?X1bЈC_Z=!2vg]SM6*Wl+r 8ȯ]ի9E)ozˬ=:o[A|J7FU3YaÆ:dN㿻aǘ["g]Ė?|ՒCڨ"{ttb{T5qBpYX,[-ڱcz79`^bELz"VqOp֏7_ |uͭ\+:\Ɨ&!|)ջ -zHQ\\[-E{Ky?毺.*Zx7TP<=={/ϸ)l4?c[ybOf ܜŘSxQeohh4kpFv;l1D??9;ʦu <{Jo[M?&_Js2eZ2R?S8JlFqjcF ѩSb8,EpYLzMԞLwqDl!m۶Mm/|7taچbTʺ Iúp)zW 7ZlYYY۷o7m9 j# yaUe?Xa|C |U&m}I}r.I&mf +P,k") k%UQu#UP[R%r7LOs̽Y:m?yΝ3sΙ{ۥ[jj .gqrLp#f`?lIyFfLMy$r׮Z1h_"?ys5m9mO{93=[3[^WM_0kZ3bksQ+ z< i֪#48গ\+|5Y8.ESʯ{G/Q»X!&.G"^thР;;jX{zsS5@jd׮]"\^l~:i&kR8.?TqZAiLCJդz!W>ȓO(Nmr6/.F;}3'm{3~rѺg篜it.v= WNoU3ŗ~wK.^5=})&Vߚռqˣtj0|V9/HW^C_g]p%Vӟ f:}aiQ]VUtU1:ƛd*=VKCNHdNRQ dT}i^Kecfت㌛^WVE}\C+b]zU^ҳ؃oH6J#Pq/PQC#֯>zGQ<#Wu!։ܨ,rrrUZ%~?y:#MM)AK8a'T#nB DžcīK恃;[|ɴEMIO57劋Ϫ]yg0l=Y=_9Λsd./6m#.lÆyFJCư֨R|{AQ*_0[?c|dЅfiuM嘤WFuQ[LERʖRWmwFdXJJؔ&izm3:$&$\6E򞝽5w&)1/G!|Sl{\];>ec >GnlViwd8bRv^cEeuS:}mCLT%GQeKrz;@~YD(V\EײzO^г$ :z0#:u"oQ>"i>-\Ά! :v[}qFպ'g5-%;u(+H[^v2&/Y8i1GH29}}&3>' mmNck/0\U-6dZ\l x_S/%mtR-{ ;$)Ee>{˯vE?tdZ'X uL8Ľ TMF( EYJwGq7Bg1Jh*Jp#žtD=D ;oll_:LDq|)h>e[(.gC_rxu:e[LW&F9l TݶFƲmǓG)˔tp}ސ"L2CnW,ƤxMyU_P^Z8jDkUQs*–QUZ]151t1|k}_8;ɖ%|6iSG+)7X}1.N SU(9X/6r$*aM|ߙ nԾ9D}_pZ qUAҿ#G7٣D؏67aQ>"i>Fpsrxu+"ѵCX)==ɼjlh&;|HUe\M*ҩ>ۨ_k AOcoKr3KoPۇ1I^B3{k:q)OeQFo bƍ({nܬE5j~:tH!g?Mh.Do2n5F`Xs9cī˯ ~H n+ˑRUdy⋧NTorUx#JNI#˶W|?wr-kﵧD)jo./=NE~ny7T757g= [psg$ժ! >kew:ZM!q#//'+5Jҝ!n D\{SYJSFLMO^F<~UHnܬEm$9}IfwNXaYIHmr6%LjW/7K>>"ޫ(Թy_3gߔ;gg"nSELo0kj_P(5pC=lѣjƧ\1KCu MkE ڞQuEHwrXv*HO͹xıIIf8`<:@$|V5Atq `vV(uYY d70,eNch3p3z); P !#ȿH3ii%DSEB{1~xZͣ߃-T5W<8_pm_ʫV:uujҎYFfD*R{K}sAӧfFxzAMHn)&_Yo^%oF혍/'+_oe)tdinr3\.P?\Ms=|\^=%&%4J]{ƴP~{DvvP+--A>,'n,YH _zu몫՜')a?BHo&ՔYQC[)qp9 LjWlG=dL{cEg}^ٙꤘk26_~;nZVQԡCDmlR -^<7T7U`}FO,~u3'6`L+~i5ůKJ[}efথ[}gMW1fEƦڮ)Slx:xnp#]Q:tK7aʪ TB//e^}M/u$~cIuhC?q9z؂yb>% d̊i-tOw}i>G*c'Pq<QQ7w3'|Ѵb4Pϊ-7 LjW[QtG̺?Ժ&_(/KƄ>n'5 =:oFh`G֎ho߱~tXy#RΟ<^ͯ}qFȝmɨ-|ڤF6-o._\9.i睿[gUAV<[fOsmO/ytc>(=Ϸ5ыt?CJ)]Աj]8%YT `Ϟ=[B7"%"nF"!2ݻwWyRS4[&dn@#^](O\RvymKߎ@/':z͘iOcR{czW3'$*?8#Q|EX6h,UAEs;\n-yz(E^CnkL/ͫbC-/7rzm%N*e1i[tE-sJuTk@!8FtQ2^z~Ә=QQkmo;]A:kB[&Oߘ98e^vqfYY4epcZRY^iRYWkEQ[ҼK皇͢@7kl78(XE)d-Rd#; [ǍQ%M)ck}#r덕_tB! -n^Cp@qxÖ& /(6ܱd1uksƼ?˭ hEFrl.kB\xaPb^ØM|~sF k*:XQ0e~X2x*s/~]S(C V-t?;b(]q"ڣC[U1>̆M.:t0f٤}} ]^f57J^95/}|[ixnW‹/+}^xo~[vEk.`{6݋渿j"GK>cc֣.%Lt6rQ`2Hke.8T^жl_8kc܎x _mkIGYg(Fؽ8G~pO*-ے܍ÇdC t #9m/p4l1k9C@C{ERҩkIn&M\؋tciVK^Fp@|}`a5WVQ}Ch%G/qꗖN͖?zd.tV}k6V,6cK3ҥnXüPE/nL-H5*) #Ha3Gunc> ܾ 2?𚩁-^1o 85@eсe̛?!Q@2cڎӟ.2Y/NyL5po r}\ztdyYnKߙY o[kG>6y IG$Jt0*"^^pǁ5j* Zd]2 ~ucvPf=j9+)^Zfͳz5%i>ɼnuсeӆZ uTrؼۋLݕS;*9im>39cIgeTK{eKrRӭmT l,ܕ;kńa[9biCH>c[ޢT:t<҅#IwN%]ΡE6U:]/t]@ <|ѝmg-7jGoA{|dƳ^}ӯd>~{-j֒QwpYVpDQklV_pze6MUj S4 G'J1SS#ׂ1tȎ߯(4jۃ]GenKjK w2"uUp#]E:t<҅#lHhd7+7Oj ܺ|bcKt%*QƔ-ṡ߱W͝;#^yA[ }[bkSo._:蓏>jԞw^+ªWGnDaeg/3jRW 8 [+;OX+]tߜҔщN9oKehY:t0m86J͓iv5Z  _dz>!w=#gWQc̸GbA&N:֦}KG k,T%/m|ӗn,th:;3\0z5S&(o˃ "PJg?o?/3}ńQ粇J88ϥN#F1{H'"Fc#銲F9f .8Ө-npu̡eF8cvM#ڳ,uM F`_nhj:w_ {ʩ31wuW8bזwV/!+u`))o'uj#ENv?u&\йՓqCFڤHg.!Cc##r +Z +<Sf?2껺OIx _}pw4 VuM|0wu=p5_~FoF0@,3PEqZI|kuCd z)SOI0hȬץmi||P/M/@tv-/n&M{;no4]i]?27c{vc1JD<ñ'\0]9'g2"f=j~Hω7Ӄ/?jbcn%9poґRοkҵ?I+ogGFҠҬҸ囟FwlabM~Tҵ<C |JMK>9%#Ժݹoq惝 ;ٕ͐y]JUS C 9xC8=_ \C6Ek׎E1꓾mΌUpo{keG{]oL_|?IWpa?)~﻾3g><&x44444nhFF*kҝSY x '(p9FmqkG_^|dxi~Soks "}rMbi7e )*?aMiDiJiPiVi\ib7FIn#/]?6N0?298T%3jͪNiL\ctͧdYřuM?˚jsCVqP.i(0UےܛgO&m%Ӿ|֣FF1Y" ''(M) *HElt<҅<RǫohF n.X:4盗n(#덩[[bog`4'Rο+0_ޚvc3bxKeQsxT䛕\[nywsg`⸡]=E+a1+omo|8ryTwCXdp|҈ҔMRT?#@Ap ~R7/2Jf[z#D/Y ƈR~ Kߎz5csl~cTŐ1\~gݼyN:viFmqK*/h](+>'& i4DuaVe>bnyԕV/}$ulcKzHRd֊PR+ _Zٲ3>.np`ޞ"M# $gJ-[(ؗ"X:t$Vw'؂:77>Nӣ=]Oqg.aZ#AoFlذZ$JžJe~ NZD-zu֩eG+zn;vޭ^ɑRmD:y;pj3wUcIjܨzcҶ6WNe=jZ~i?+a[T%HISjc([VwOz}YUNaJMQ]VUƲ/,]tAfߞpy#Gg`1dvńfC/E*(4@LXTK|?V'/t2vb}}ݫf]b]ˮ]666yf%uVZ%?|uUq^SOُP衧,(؛Xh}kK784NY{mFEIM擤[_ymd1|dxog } HbekլGѵpLNi(Eqɴ)LIkMPOSe5cG/=jA 61Ʀzfwld#M !!b>lI4c#3K7`u p"72fPWtD[<7#"ԠE0n3qT>Qa<^K/@v;izj녠H SM/ֶ<8PYj]#uM.1rG-Jݖ^2ۧ~O iHa+ [\? -xo rnO5 ˻g@%NGݼ0*qv|iiiiiF`up'q>(ɋcNuENN}Z7}'TZLN bdƽ ps〉Ѓ+t,6Q\ZZn+\^T0>㹳]sSvw8b{mkN/ 36?Czãg5~wqRmkFex-3WN7s0C%4iְg&yO|Đmk nE~ICHslHKCKsKK[ n`rrrI]\8E_帰,Ig-; 'z *9lp&r mwX:G|/7Dzd}Z"36&} GnN7UX(xiI3 pL0pȥׄ+=eSogCy~C=HJ%KUKKwlbΦHXZjxp3-Ju"v&G@7qz:Y#Y3*Fi7*ٸqyaҦ<ױ¨wF̃_{3޼x8D+u};~h,N -M~ZO KnjNLH6N9Snx&ka#ӷ-cጮHJJJ%KUKKK;68Rf |b]sZ N:q7Bآt^}p ^*Y[q>Q Dt uytF^1{6Җ[.S 5~ 2j[cxdxQ<}W_IRvGvzky~_>^*kU~6sZ-#gy"U*++,U-Q҈l.{A;k;}Q?D*%3+l=ޒ4mt?&M-吺MmOϝ?r5A G ߎ@؋zHJ5JeJJJJ%;65RF nf%z'boLkQF)}ep檜^Q!#Q& 7=7:{1'}/nμKGKf|ed1uks x`#Ƅ~KCBB/w]v#+5l8"iҬ)zHIIJ5Jew^2[ױj5i>iD9.FBlܸQ61]Z ƔG n2ad "t$ yЃTF#с\QQ>n^o#/2Jfj-Wo=pptcv/ c1@yaiJg}Se3z5Qg4W3;sA1{GIuIIIJ5ȱa44ymIYMN 7Dϋqݞ:sPj"nFyDWi7/=SG\/k>R@Cp:oFm<*/h[(׳^{q;:8cyīNiCeVٍTuT}gQ\%&t`޿(kvJKgપ]oL7uM)0{gjqwd7|nTGyKSF[:i_|/jUH*JK*MαyZ_FHj0p9Y}/z˵ȲӧO_f޽{<2>Q1(ut9w(++KJJrg\v5)hllܿY胎@E: 7Yejr=Ȕq*_׭7jG7/6d7/vŭ~=a/K"]1ӌjs* ԅQ/tLȦ.ۙ;FB*DeoHIEIuI96R1V^F b nԘ\}q 'C$z=ߗd08N {F][t bQRZiinP &﷢;''W3"va+Fuv*^*n|iYgWmg?[4 Q>>ei^ܔ`$Y }h6´_m*BZr[KMp*ia),&0=mF8.r"+ڳgOuu(dvZmћ>\sFJ+kfADhuprۼy3GCF6R $ofb5lo;syҊ߼_^^6(.8Әdd<~֣ƨO8t!AIj,2rVi,x$w$M7c5S6h#/ U\ "HE*eVS[<7'' U.X:4;|GS2dVui[^N< T=3)7=mh(;2|`ׅ [~f||HkTTTc#M ۥ_DFo ^EW3w W$mkzؼ]#Fڝ ZJiC\>Nj澵,߼M͑RUh=Xf 7;)҇ZR&M5r>;a\_d7egeee*Y_Z*ܪz#n@d|'zQ[O Z-4Jl&na8M3xr϶& \5n)w?\.|de7}>CQw(YqKO(R?|}Ny\Ec.4Q?* *ζo>zv#e ^KnwYw9{e[K ,L^qÔ>֒`vl+n͖䆟ʎNɮlb=J:@*BpƁ-Tpc\c]7&ok=[ͨe{z "H?(cRοsMvɛ}Uwlll^E]qZbΦ/%%fU;7 ~~GG7-_l,T>7Rs⺦`Ӗ5uO0a :a )}vMiwmo|8ΎkȆFι-ͻ̔]pT""#U$eU@,n@w5瘏 t Z%_=|k1!71K]oG[F#f(c]8xa#S|;;|Z{6E6I6L6O6R6U6X6+G}]tTUM#=[V5 #Ri/g5n~:y1a{1"!#$&')HEv_*A߳* .7t?p)y;7nmZ6@6C6F6I6L6ϱQw Rg\yQ[^7/2Jf]Y|z#w~^dj $0ǦF*SүTvߪn#=h;4Սo-[hL3mz#XբR. ͐qs;)l=MvYvܪ@pzK{{[ߨᝥ9ƒY?[/qt1i[[^9%o.dj&E*棾knл_kg6jF4OnY÷,ki#{877UȊduRYcc"3puKv^vy o^gxfkRdxE=[V\V!+96 l]ݑvw>|g-7jn~yAlQ2w_4opD0qyy 6/o.:L͖]ݱv >?oШ.rc)[s6ޘ5+dYP7RXQbΦP6^v^Fp |bQrF̃_YgT eYP77t"RM ͶvO.<˼_G.5J?}gҶ@Є-2,ȂG}&Ɇ}h>[po߾~%%%=cԠT3<&666_>//O֬YϫW#u-^POX/+VkӦM֬!t+;mtjr 6쭷޲&Eu:}gggoٲz-Xk۶v8$:T:JKfκm_HX#E&K2&3""$0a8u7Gv! 9֯_ebOQ t0rJk8SnQ]$D*Mg[oLdƒY1R'%Ia&1_*e?E'#a'ݻWW .vעWexDǝ ewj.; bUV#MSOُbd;ܷpi/W\dW~d"dƿ|;|L7.E~Dy)EYڼ6ꊋdc8~Ǎ>E(鵟G暗Xע^l ]|tGN8@_k[Um[=2PyA۲FɌuߝӐ"?ev6RduRYllAU<̗zJe#ǜ6l999^LkQ3n ;"p"x]Ms)1Cb^uUյu':)++S?wyc}P1w=VQ/xJߺLdnzU/7qDpO_UFpfdtx)R7UȊdu֊<#ƞ(]鎨Z8jqoC.E{RRҺ#qB7`~v5X|tG:~.Q[V.|@Va c nfâ?xPlM Xb 655Y/؆jz8ĴkFD_ z=G..n>B׫V2;Mֳ6lؠ*v֭+++S}/ h!]vQ3bE_^w "̓SOIػw#[gyDZ6 nƝ]ݪU!kovY8{ֶ{ng{}xqOl@=%8MFj67CXע^l Fk7mdMB,nor6447ܨc\CM/*u(qw q8F/Sc{Ĵ5zƳEp6^&>q7'TKug qaT%7B#999.iR5X8.ע%%Rmeছh]7WM ֤#wя;^gpZ? H5=R#jq}2Zƛj83I;5QoHWm! >E׬YϪ4sڵQ;/3%|u䄎^*t}.1xε^nݺ.L 82#aMSsD nahOnz."oOw F#Jث0B!;u xFEJNAc&.D'y} 8JF=D p*OѼ~}Ƈ\QQ`͑F?b:xJ@pQ7EpQ7EpQ7EpQ7EpQ7EpQ7EpQ7EpQ7EpQ7EpQ7[po߾~%%%=cԠT3i[pVXMMM }/ (nF1EOqr#kT&U{ ߯) K2ejիW[L<|tp#4'/,İu9Pt{# ZvڥްQKpJj*!lnrhQZV_ ֤#GG>j"KI#FӴX"؇.=x5ݠMpMNmJpM>c:_,zJb͚5>J֮]K|K">e +F,Fw({Z>}F8"nE%vV\$*^F^{ &/25w3WSnri]Frv εqnL T="̏DZJL+rF$oj́{M#PonC՞캌T6*E^^  zGBWxNDq7aS( Ĵ&z뾈Fu%Rv]8p@ E3¾!MkҲ(yOٳG%<|:^Opibƍ(#8֢j'qb_FQ}#j̙!\QQAS8cҲa/G%fQT[/6p'ѧ~JZ3XG&q싦/"5QQB7JXԿK:I3Z6J(|N=]> p9c? aDGnBoZGts-qUeϯyto/ZVM'9.\>e F$ ;,܄MOܯ~:FsdYvk֬ٻw'p>J" Tڵko?x .!PVVb nbsFkR8k>R7氟$m6olo}3;Ql@8c'gG~-^ꃵ0Rd>(1tpT~Y%(uhּ Jˮ^zݺujQ" m M5Y8b5'؂(D4SB9D,SZ7"Yў={XTȠSYйXjt 9gJY/5ezȑwDiiizG$a#I#x Gx Gx GxTw@뮻_E"7@KOO~ x `Gp{<{vw7EpQ7EpQ7EpQ7EpQ7EpQ7EpQ7EpQ7EpQ7EpQ7EpQ7EpQ7EpQ7EpQ7EpQ7EpQ7EpQ7EpQ7EpQ7EpQ7EpQ7EpQ7EpQ7EpQ7EpQ7EpQ7EpQ7EpQ7EpQ7EpQ7EpQ7EpQ7EpQ7EpQ7EpQ7EpQ7EpQ7EpQ7EpQ7EpQ7EpQ7EpQ7EpQ7EpQ7EpQ7EpQ7EpQ7EpQ7EpQ7EpQ7EpQ7EpQ7EpQ7EpQ7EpQ7EpQ7EpQ7EpQ7EpQ7EpQ7EpQ7EpQ7EpQ7EpQ7EpQ7EpIŶS4IENDB`parameters/man/dot-data_frame.Rd0000644000176200001440000000034213620043640016323 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/utils.R \name{.data_frame} \alias{.data_frame} \title{help-functions} \usage{ .data_frame(...) } \description{ help-functions } \keyword{internal} parameters/man/reexports.Rd0000644000176200001440000000074113620043640015512 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/ci.R, R/equivalence_test.R \docType{import} \name{reexports} \alias{reexports} \alias{ci} \alias{equivalence_test} \title{Objects exported from other packages} \keyword{internal} \description{ These objects are imported from other packages. Follow the links below to see their documentation. \describe{ \item{bayestestR}{\code{\link[bayestestR]{ci}}, \code{\link[bayestestR]{equivalence_test}}} }} parameters/man/principal_components.Rd0000644000176200001440000001234513617565662017733 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/principal_components.R \name{principal_components} \alias{principal_components} \alias{closest_component} \title{Principal Component Analysis (PCA)} \usage{ principal_components( x, n = "auto", rotation = "none", sort = FALSE, threshold = NULL, standardize = TRUE, ... ) closest_component(x) } \arguments{ \item{x}{A dataframe or a statistical model.} \item{n}{Number of components to extract. If \code{n="all"}, then \code{n} is set as the number of variables minus 1 (\code{ncol(x)-1}). If \code{n="auto"} (default) or \code{n=NULL}, the number of components is selected through \code{\link{n_factors}}. In \code{\link{reduce_parameters}}, can also be \code{"max"}, in which case it will select all the components that are maximally pseudo-loaded (i.e., correlated) by at least one variable.} \item{rotation}{If not \code{"none"}, the PCA will be computed using the \pkg{psych} package. Possible options include \code{"varimax"}, \code{"quartimax"}, \code{"promax"}, \code{"oblimin"}, \code{"simplimax"}, and \code{"cluster"}. See \code{\link[psych]{fa}} for details.} \item{sort}{Sort the loadings.} \item{threshold}{A value between 0 and 1 indicates which (absolute) values from the loadings should be removed. An integer higher than 1 indicates the n strongest loadings to retain. Can also be \code{"max"}, in which case it will only display the maximum loading per variable (the most simple structure).} \item{standardize}{A logical value indicating whether the variables should be standardized (centred and scaled) to have unit variance before the analysis takes place (in general, such scaling is advisable).} \item{...}{Arguments passed to or from other methods.} } \value{ A data frame of loadings. } \description{ This function performs a principal component analysis (PCA) and returns the loadings as a dataframe. } \details{ \subsection{Complexity}{ Complexity represents the number of latent components needed to account for the observed variables. Whereas a perfect simple structure solution has a complexity of 1 in that each item would only load on one factor, a solution with evenly distributed items has a complexity greater than 1 (\cite{Hofman, 1978; Pettersson and Turkheimer, 2010}) . } \subsection{Uniqueness}{ Uniqueness represents the variance that is 'unique' to the variable and not shared with other variables. It is equal to \code{1 – communality} (variance that is shared with other variables). A uniqueness of \code{0.20} suggests that 20\% or that variable's variance is not shared with other variables in the overall factor model. The greater 'uniqueness' the lower the relevance of the variable in the factor model. } \subsection{MSA}{ MSA represents the Kaiser-Meyer-Olkin Measure of Sampling Adequacy (\cite{Kaiser and Rice, 1974}) for each item. It indicates whether there is enough data for each factor give reliable results for the PCA. The value should be > 0.6, and desirable values are > 0.8 (\cite{Tabachnick and Fidell, 2013}). } \subsection{PCA or FA?}{ There is a simplified rule of thumb that may help do decide whether to run a factor analysis or a principal component analysis: \itemize{ \item Run factor analysis if you assume or wish to test a theoretical model of latent factors causing observed variables. \item Run principal component analysis If you want to simply reduce your correlated observed variables to a smaller set of important independent composite variables. } (Source: \href{https://stats.stackexchange.com/q/1576/54740}{CrossValidated}) } } \note{ There is a \code{summary()}-method that prints the Eigenvalues and (explained) variance for each extracted component. \code{closest_component()} will return a numeric vector with the assigned component index for each column from the original data frame. There is also a \href{https://easystats.github.io/see/articles/parameters.html}{\code{plot()}-method} implemented in the \href{https://easystats.github.io/see/}{\pkg{see}-package}. } \examples{ library(parameters) principal_components(mtcars[, 1:7], n = "all", threshold = 0.2) principal_components(mtcars[, 1:7], n = 2, rotation = "oblimin", threshold = "max", sort = TRUE) principal_components(mtcars[, 1:7], n = 2, threshold = 2, sort = TRUE) pca <- principal_components(mtcars[, 1:5], n = 2, rotation = "varimax") summary(pca) predict(pca) # which variables from the original data belong to which extracted component? closest_component(pca) \donttest{ # Automated number of components principal_components(mtcars[, 1:4], n = "auto") } } \references{ \itemize{ \item Kaiser, H.F. and Rice. J. (1974). Little jiffy, mark iv. Educational and Psychological Measurement, 34(1):111–117 \item Hofmann, R. (1978). Complexity and simplicity as objective indices descriptive of factor solutions. Multivariate Behavioral Research, 13:2, 247-250, \doi{10.1207/s15327906mbr1302_9} \item Pettersson, E., & Turkheimer, E. (2010). Item selection, evaluation, and simple structure in personality data. Journal of research in personality, 44(4), 407-420, \doi{10.1016/j.jrp.2010.03.002} \item Tabachnick, B. G., and Fidell, L. S. (2013). Using multivariate statistics (6th ed.). Boston: Pearson Education. } } parameters/man/DRR.Rd0000644000176200001440000000306613575512105014117 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/reduce_parameters.R \name{DRR} \alias{DRR} \title{Dimensionality Reduction via Regression (DRR)} \usage{ DRR(x, n = "all", ...) } \arguments{ \item{x}{A dataframe or a statistical model.} \item{n}{Number of components to extract. If \code{n="all"}, then \code{n} is set as the number of variables minus 1 (\code{ncol(x)-1}). If \code{n="auto"} (default) or \code{n=NULL}, the number of components is selected through \code{\link{n_factors}}. In \code{\link{reduce_parameters}}, can also be \code{"max"}, in which case it will select all the components that are maximally pseudo-loaded (i.e., correlated) by at least one variable.} \item{...}{Arguments passed to or from other methods.} } \description{ Dimensionality Reduction via Regression (DRR) is a very recent technique extending PCA (Laparra et al., 2015). Starting from a rotated PCA, it predicts redundant information from the remaining components using non-linear regression. Some of the most notable advantages of performing PCR are avoidance of multicollinearity between predictors and overfitting mitigation. PCR tends to perform well when the first principal components are enough to explain most of the variation in the predictors. Requires the \pkg{DRR} package to be installed. } \examples{ \donttest{ DRR(iris[, 1:4]) } } \references{ \itemize{ \item Laparra, V., Malo, J., & Camps-Valls, G. (2015). Dimensionality reduction via regression in hyperspectral imagery. IEEE Journal of Selected Topics in Signal Processing, 9(6), 1026-1036. } } parameters/man/model_parameters.stanreg.Rd0000644000176200001440000000772213615560005020455 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/model_parameters.bayesian.R \name{model_parameters.stanreg} \alias{model_parameters.stanreg} \alias{model_parameters.brmsfit} \title{Parameters from Bayesian Models} \usage{ \method{model_parameters}{stanreg}( model, centrality = "median", dispersion = FALSE, ci = 0.89, ci_method = "hdi", test = c("pd", "rope"), rope_range = "default", rope_ci = 1, bf_prior = NULL, diagnostic = c("ESS", "Rhat"), priors = TRUE, iterations = 1000, effects = "fixed", ... ) \method{model_parameters}{brmsfit}( model, centrality = "median", dispersion = FALSE, ci = 0.89, ci_method = "hdi", test = c("pd", "rope"), rope_range = "default", rope_ci = 1, bf_prior = NULL, diagnostic = c("ESS", "Rhat"), priors = TRUE, iterations = 1000, effects = "fixed", component = "all", ... ) } \arguments{ \item{model}{Bayesian model.} \item{centrality}{The point-estimates (centrality indices) to compute. Character (vector) or list with one or more of these options: \code{"median"}, \code{"mean"}, \code{"MAP"} or \code{"all"}.} \item{dispersion}{Logical, if \code{TRUE}, computes indices of dispersion related to the estimate(s) (\code{SD} and \code{MAD} for \code{mean} and \code{median}, respectively).} \item{ci}{Confidence Interval (CI) level. Default to 0.95 (95\%).} \item{ci_method}{The type of index used for Credible Interval. Can be \code{"HDI"} (default, see \code{\link[bayestestR:hdi]{hdi}}), \code{"ETI"} (see \code{\link[bayestestR:eti]{eti}}) or \code{"SI"} (see \code{\link[bayestestR:si]{si}}).} \item{test}{The indices of effect existence to compute. Character (vector) or list with one or more of these options: \code{"p_direction"} (or \code{"pd"}), \code{"rope"}, \code{"p_map"}, \code{"equivalence_test"} (or \code{"equitest"}), \code{"bayesfactor"} (or \code{"bf"}) or \code{"all"} to compute all tests. For each "test", the corresponding \pkg{bayestestR} function is called (e.g. \code{\link[bayestestR:rope]{rope}} or \code{\link[bayestestR:p_direction]{p_direction}}) and its results included in the summary output.} \item{rope_range}{ROPE's lower and higher bounds. Should be a list of two values (e.g., \code{c(-0.1, 0.1)}) or \code{"default"}. If \code{"default"}, the bounds are set to \code{x +- 0.1*SD(response)}.} \item{rope_ci}{The Credible Interval (CI) probability, corresponding to the proportion of HDI, to use for the percentage in ROPE.} \item{bf_prior}{Distribution representing a prior for the computation of Bayes factors / SI. Used if the input is a posterior, otherwise (in the case of models) ignored.} \item{diagnostic}{Diagnostic metrics to compute. Character (vector) or list with one or more of these options: \code{"ESS"}, \code{"Rhat"}, \code{"MCSE"} or \code{"all"}.} \item{priors}{Add the prior used for each parameter.} \item{iterations}{The number of bootstrap replicates. This only apply in the case of bootstrapped frequentist models.} \item{effects}{Should results for fixed effects, random effects or both be returned? Only applies to mixed models. May be abbreviated.} \item{...}{Arguments passed to or from other methods. For instance, when \code{bootstrap = TRUE}, arguments like \code{ci_method} are passed down to \code{\link[bayestestR]{describe_posterior}}.} \item{component}{Model component for which parameters should be shown. May be one of \code{"conditional"}, \code{"precision"} (\pkg{betareg}), \code{"scale"} (\pkg{ordinal}), \code{"extra"} (\pkg{glmx}) or \code{"all"}.} } \value{ A data frame of indices related to the model's parameters. } \description{ Parameters of Bayesian models. } \examples{ \donttest{ library(parameters) if (require("rstanarm")) { model <- rstanarm::stan_glm(Sepal.Length ~ Petal.Length * Species, data = iris, iter = 500, refresh = 0 ) model_parameters(model) } } } \seealso{ \code{\link[parameters:standardize_names]{standardize_names()}} to rename columns into a consistent, standardized naming scheme. } parameters/man/dot-n_factors_bentler.Rd0000644000176200001440000000050713620043640017734 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/n_factors.R \name{.n_factors_bentler} \alias{.n_factors_bentler} \title{Bentler and Yuan's Procedure} \usage{ .n_factors_bentler(eigen_values = NULL, model = "factors", nobs = NULL) } \description{ Bentler and Yuan's Procedure } \keyword{internal} parameters/man/bootstrap_model.Rd0000644000176200001440000000153313610137126016656 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/bootstrap_model.R \name{bootstrap_model} \alias{bootstrap_model} \alias{model_bootstrap} \title{Model bootstrapping} \usage{ bootstrap_model(model, iterations = 1000, verbose = FALSE, ...) model_bootstrap(model, iterations = 1000, verbose = FALSE, ...) } \arguments{ \item{model}{Statistical model.} \item{iterations}{The number of draws to simulate/bootstrap.} \item{verbose}{Hide possible refit messages.} \item{...}{Arguments passed to or from other methods.} } \value{ A data frame. } \description{ Bootstrap a statistical model n times to return a data frame of estimates. } \examples{ model <- lm(mpg ~ wt + cyl, data = mtcars) head(bootstrap_model(model)) } \seealso{ \code{\link{bootstrap_parameters}}, \code{\link{simulate_model}}, \code{\link{simulate_parameters}} } parameters/man/format_parameters.Rd0000644000176200001440000000155613607421425017205 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/format_parameters.R \name{format_parameters} \alias{format_parameters} \title{Parameter names formatting} \usage{ format_parameters(model) } \arguments{ \item{model}{A statistical model.} } \value{ The formatted parameter names. } \description{ Parameter names formatting } \examples{ library(parameters) model <- lm(Sepal.Length ~ Species * Sepal.Width, data = iris) format_parameters(model) model <- lm(Sepal.Length ~ Species / Petal.Length, data = iris) format_parameters(model) model <- lm(Sepal.Length ~ Petal.Length + (Species / Sepal.Width), data = iris) format_parameters(model) model <- lm(Sepal.Length ~ Species + poly(Sepal.Width, 2), data = iris) format_parameters(model) model <- lm(Sepal.Length ~ Species + poly(Sepal.Width, 2, raw = TRUE), data = iris) format_parameters(model) } parameters/man/model_parameters.rma.Rd0000644000176200001440000000461113617625266017600 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/model_parameters.rma.R \name{model_parameters.rma} \alias{model_parameters.rma} \title{Parameters from Meta-Analysis} \usage{ \method{model_parameters}{rma}( model, ci = 0.95, bootstrap = FALSE, iterations = 1000, standardize = NULL, exponentiate = FALSE, ... ) } \arguments{ \item{model}{Model object.} \item{ci}{Confidence Interval (CI) level. Default to 0.95 (95\%).} \item{bootstrap}{Should estimates be based on bootstrapped model? If \code{TRUE}, then arguments of \link[=model_parameters.stanreg]{Bayesian regressions} apply (see also \code{\link[=parameters_bootstrap]{parameters_bootstrap()}}).} \item{iterations}{The number of bootstrap replicates. This only apply in the case of bootstrapped frequentist models.} \item{standardize}{The method used for standardizing the parameters. Can be \code{"refit"}, \code{"posthoc"}, \code{"smart"}, \code{"basic"} or \code{NULL} (default) for no standardization. See 'Details' in \code{\link[effectsize]{standardize_parameters}}. Note that robust estimation (i.e. \code{robust=TRUE}) of standardized parameters only works when \code{standardize="refit"}.} \item{exponentiate}{Logical, indicating whether or not to exponentiate the the coefficients (and related confidence intervals). This is typical for, say, logistic regressions, or more generally speaking: for models with log or logit link.} \item{...}{Arguments passed to or from other methods. For instance, when \code{bootstrap = TRUE}, arguments like \code{ci_method} are passed down to \code{\link[bayestestR]{describe_posterior}}.} } \value{ A data frame of indices related to the model's parameters. } \description{ Extract and compute indices and measures to describe parameters of meta-analysis models. } \examples{ library(parameters) mydat <- data.frame( effectsize = c(-0.393, 0.675, 0.282, -1.398), stderr = c(0.317, 0.317, 0.13, 0.36) ) if (require("metafor")) { model <- rma(yi = effectsize, sei = stderr, method = "REML", data = mydat) model_parameters(model) } # with subgroups if (require("metafor")) { data(dat.bcg) dat <- escalc( measure = "RR", ai = tpos, bi = tneg, ci = cpos, di = cneg, data = dat.bcg ) dat$alloc <- ifelse(dat$alloc == "random", "random", "other") model <- rma(yi, vi, mods = ~ alloc, data = dat, digits = 3, slab = author) model_parameters(model) } } parameters/man/n_factors.Rd0000644000176200001440000001136213617565662015461 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/n_factors.R \name{n_factors} \alias{n_factors} \title{Number of components/factors to retain in PCA/FA} \usage{ n_factors( x, type = "FA", rotation = "varimax", algorithm = "default", package = c("nFactors", "psych"), cor = NULL, safe = TRUE, ... ) } \arguments{ \item{x}{A dataframe.} \item{type}{Can be \code{"FA"} or \code{"PCA"}, depending on what you want to do.} \item{rotation}{Only used for VSS (Very Simple Structure criterion, see \code{\link[psych]{VSS}}). The rotation to apply. Can be \code{"none"}, \code{"varimax"}, \code{"quartimax"}, \code{"bentlerT"}, \code{"equamax"}, \code{"varimin"}, \code{"geominT"} and \code{"bifactor"} for orthogonal rotations, and \code{"promax"}, \code{"oblimin"}, \code{"simplimax"}, \code{"bentlerQ"}, \code{"geominQ"}, \code{"biquartimin"} and \code{"cluster"} for oblique transformations.} \item{algorithm}{Factoring method used by VSS. Can be \code{"pa"} for Principal Axis Factor Analysis, \code{"minres"} for minimum residual (OLS) factoring, \code{"mle"} for Maximum Likelihood FA and \code{"pc"} for Principal Components. \code{"default"} will select \code{"minres"} if \code{type = "FA"} and \code{"pc"} if \code{type = "PCA"}.} \item{package}{These are the packages from which methods are used. Can be \code{"all"} or a vector containing \code{"nFactors"}, \code{"psych"} and \code{"EGAnet"}. However, \code{"EGAnet"} can be very slow for bigger datasets. Thus, by default, \code{c("nFactors", "psych")} are selected.} \item{cor}{An optional correlation matrix that can be used. If \code{NULL}, will compute it by running \code{cor()} on the passed data.} \item{safe}{If \code{TRUE}, will run all the procedures in try blocks, and will only return those that work and silently skip the ones that may fail.} \item{...}{Arguments passed to or from other methods.} } \value{ A data frame. } \description{ This function runs many existing procedures for determining how many factors to retain for your factor analysis (FA) or dimension reduction (PCA). It returns the number of factors based on the maximum consensus between methods. In case of ties, it will keep the simplest models and select the solution with the less factors. } \note{ There is also a \href{https://easystats.github.io/see/articles/parameters.html}{\code{plot()}-method} implemented in the \href{https://easystats.github.io/see/}{\pkg{see}-package}. } \examples{ library(parameters) n_factors(mtcars, type = "PCA") result <- n_factors(mtcars[1:5], type = "FA") as.data.frame(result) summary(result) \donttest{ n_factors(mtcars, type = "PCA", package = "all") n_factors(mtcars, type = "FA", algorithm = "mle", package = "all") } } \references{ \itemize{ \item Bartlett, M. S. (1950). Tests of significance in factor analysis. British Journal of statistical psychology, 3(2), 77-85. \item Bentler, P. M., & Yuan, K. H. (1996). Test of linear trend in eigenvalues of a covariance matrix with application to data analysis. British Journal of Mathematical and Statistical Psychology, 49(2), 299-312. \item Cattell, R. B. (1966). The scree test for the number of factors. Multivariate behavioral research, 1(2), 245-276. \item Finch, W. H. (2019). Using Fit Statistic Differences to Determine the Optimal Number of Factors to Retain in an Exploratory Factor Analysis. Educational and Psychological Measurement. \item Zoski, K. W., & Jurs, S. (1996). An objective counterpart to the visual scree test for factor analysis: The standard error scree. Educational and Psychological Measurement, 56(3), 443-451. \item Zoski, K., & Jurs, S. (1993). Using multiple regression to determine the number of factors to retain in factor analysis. Multiple Linear Regression Viewpoints, 20(1), 5-9. \item Nasser, F., Benson, J., & Wisenbaker, J. (2002). The performance of regression-based variations of the visual scree for determining the number of common factors. Educational and psychological measurement, 62(3), 397-419. \item Golino, H., Shi, D., Garrido, L. E., Christensen, A. P., Nieto, M. D., Sadana, R., & Thiyagarajan, J. A. (2018). Investigating the performance of Exploratory Graph Analysis and traditional techniques to identify the number of latent factors: A simulation and tutorial. \item Golino, H. F., & Epskamp, S. (2017). Exploratory graph analysis: A new approach for estimating the number of dimensions in psychological research. PloS one, 12(6), e0174035. \item Revelle, W., & Rocklin, T. (1979). Very simple structure: An alternative procedure for estimating the optimal number of interpretable factors. Multivariate Behavioral Research, 14(4), 403-414. \item Velicer, W. F. (1976). Determining the number of components from the matrix of partial correlations. Psychometrika, 41(3), 321-327. } } parameters/man/p_value_kenward.Rd0000644000176200001440000000405013611426120016620 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/ci_kenward.R, R/dof_kenward.R, % R/p_value_kenward.R, R/se_kenward.R \name{ci_kenward} \alias{ci_kenward} \alias{dof_kenward} \alias{p_value_kenward} \alias{se_kenward} \title{Kenward-Roger approximation for SEs, CIs and p-values} \usage{ ci_kenward(model, ci = 0.95) dof_kenward(model) p_value_kenward(model, dof = NULL) se_kenward(model) } \arguments{ \item{model}{A statistical model.} \item{ci}{Confidence Interval (CI) level. Default to 0.95 (95\%).} \item{dof}{Degrees of Freedom.} } \value{ A data frame. } \description{ An approximate F-test based on the Kenward-Roger (1997) approach. } \details{ Inferential statistics (like p-values, confidence intervals and standard errors) may be biased in mixed models when the number of clusters is small (even if the sample size of level-1 units is high). In such cases it is recommended to approximate a more accurate number of degrees of freedom for such inferential statistics. Unlike simpler approximation heuristics like the "m-l-1" rule (\code{dof_ml1}), the Kenward-Roger approximation is also applicable in more complex multilevel designs, e.g. with cross-classified clusters. However, the "m-l-1" heuristic also applies to generalized mixed models, while approaches like Kenward-Roger or Satterthwaite are limited to linear mixed models only. } \examples{ \donttest{ if (require("lme4")) { model <- lmer(Petal.Length ~ Sepal.Length + (1 | Species), data = iris) p_value_kenward(model) } } } \references{ Kenward, M. G., & Roger, J. H. (1997). Small sample inference for fixed effects from restricted maximum likelihood. Biometrics, 983-997. } \seealso{ \code{dof_kenward()} and \code{se_kenward()} are small helper-functions to calculate approximated degrees of freedom and standard errors for model parameters, based on the Kenward-Roger (1997) approach. \cr \cr \code{\link[=dof_satterthwaite]{dof_satterthwaite()}} and \code{\link[=dof_ml1]{dof_ml1}} approximate degrees of freedom based on Satterthwaite's method or the "m-l-1" rule. } parameters/man/describe_distribution.Rd0000644000176200001440000000306213616055164020046 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/describe_distribution.R \name{describe_distribution} \alias{describe_distribution} \alias{describe_distribution.data.frame} \title{Describe a distribution} \usage{ describe_distribution( x, centrality = "mean", dispersion = TRUE, range = TRUE, ... ) \method{describe_distribution}{data.frame}( x, centrality = "mean", dispersion = TRUE, range = TRUE, include_factors = FALSE, ... ) } \arguments{ \item{x}{A numeric vector.} \item{centrality}{The point-estimates (centrality indices) to compute. Character (vector) or list with one or more of these options: \code{"median"}, \code{"mean"}, \code{"MAP"} or \code{"all"}.} \item{dispersion}{Logical, if \code{TRUE}, computes indices of dispersion related to the estimate(s) (\code{SD} and \code{MAD} for \code{mean} and \code{median}, respectively).} \item{range}{Return the range (min and max).} \item{...}{Additional arguments to be passed to or from methods.} \item{include_factors}{Logical, if \code{TRUE}, factors are included in the output, however, only columns for range (first and last factor levels) as well as n and missing will contain information.} } \value{ A data frame with columns that describe the properties of the variables. } \description{ This function describes a distribution by a set of indices (e.g., measures of centrality, dispersion, range, skewness, kurtosis). } \examples{ describe_distribution(rnorm(100)) data(iris) describe_distribution(iris) describe_distribution(iris, include_factors = TRUE) } parameters/man/convert_efa_to_cfa.Rd0000644000176200001440000000253613611663034017276 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/convert_efa_to_cfa.R \name{convert_efa_to_cfa} \alias{convert_efa_to_cfa} \alias{convert_efa_to_cfa.fa} \alias{efa_to_cfa} \title{Conversion between EFA results and CFA structure} \usage{ convert_efa_to_cfa(model, ...) \method{convert_efa_to_cfa}{fa}(model, threshold = "max", names = NULL, ...) efa_to_cfa(model, ...) } \arguments{ \item{model}{An EFA model (e.g., a \code{psych::fa} object).} \item{...}{Arguments passed to or from other methods.} \item{threshold}{A value between 0 and 1 indicates which (absolute) values from the loadings should be removed. An integer higher than 1 indicates the n strongest loadings to retain. Can also be \code{"max"}, in which case it will only display the maximum loading per variable (the most simple structure).} \item{names}{Vector containing dimension names.} } \value{ Converted index. } \description{ Enables a conversion between Exploratory Factor Analysis (EFA) and Confirmatory Factor Analysis (CFA) \code{lavaan}-ready structure. } \examples{ \donttest{ library(parameters) if (require("psych") && require("lavaan")) { efa <- psych::fa(attitude, nfactors = 3) model1 <- efa_to_cfa(efa) model2 <- efa_to_cfa(efa, threshold = 0.3) anova( lavaan::cfa(model1, data = attitude), lavaan::cfa(model2, data = attitude) ) } } } parameters/man/n_clusters.Rd0000644000176200001440000000325613617565662015667 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/n_clusters.R \name{n_clusters} \alias{n_clusters} \title{Number of clusters to extract} \usage{ n_clusters( x, standardize = TRUE, force = FALSE, package = c("NbClust", "mclust", "cluster", "M3C"), fast = TRUE, ... ) } \arguments{ \item{x}{A data frame.} \item{standardize}{Standardize the dataframe before clustering (default).} \item{force}{Logical, if \code{TRUE}, factors are converted to numerical values in order to be included in the data for determining the number of clusters. By default, factors are removed, because most methods that determine the number of clusters need numeric input only.} \item{package}{These are the packages from which methods are used to determine the number of clusters. Can be \code{"all"} or a vector containing \code{"NbClust"}, \code{"mclust"}, \code{"cluster"} and \code{"M3C"}.} \item{fast}{If \code{FALSE}, will compute 4 more indices (sets \code{index = "allong"} in \code{NbClust}). This has been deactivated by default as it is computationally heavy.} \item{...}{Arguments passed to or from other methods.} } \description{ This function runs many existing procedures for determining how many clusters are present in your data. It returns the number of clusters based on the maximum consensus. In case of ties, it will select the solution with the less clusters. } \note{ There is also a \href{https://easystats.github.io/see/articles/parameters.html}{\code{plot()}-method} implemented in the \href{https://easystats.github.io/see/}{\pkg{see}-package}. } \examples{ library(parameters) \donttest{ n_clusters(iris[, 1:4], package = c("NbClust", "mclust", "cluster")) } } parameters/man/dot-filter_component.Rd0000644000176200001440000000061313620043640017610 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/utils.R \name{.filter_component} \alias{.filter_component} \title{for models with zero-inflation component, return required component of model-summary} \usage{ .filter_component(dat, component) } \description{ for models with zero-inflation component, return required component of model-summary } \keyword{internal} parameters/man/model_parameters.mlm.Rd0000644000176200001440000000654013615560005017574 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/model_parameters.multinom.R \name{model_parameters.mlm} \alias{model_parameters.mlm} \alias{model_parameters.multinom} \alias{model_parameters.bracl} \alias{model_parameters.DirichletRegModel} \title{Parameters from multinomial or cumulative link models} \usage{ \method{model_parameters}{mlm}( model, ci = 0.95, bootstrap = FALSE, iterations = 1000, standardize = NULL, exponentiate = FALSE, ... ) \method{model_parameters}{multinom}( model, ci = 0.95, bootstrap = FALSE, iterations = 1000, standardize = NULL, exponentiate = FALSE, ... ) \method{model_parameters}{bracl}( model, ci = 0.95, bootstrap = FALSE, iterations = 1000, standardize = NULL, exponentiate = FALSE, ... ) \method{model_parameters}{DirichletRegModel}( model, ci = 0.95, bootstrap = FALSE, iterations = 1000, component = c("all", "conditional", "precision"), standardize = NULL, exponentiate = FALSE, ... ) } \arguments{ \item{model}{A model with multinomial or categorical response value.} \item{ci}{Confidence Interval (CI) level. Default to 0.95 (95\%).} \item{bootstrap}{Should estimates be based on bootstrapped model? If \code{TRUE}, then arguments of \link[=model_parameters.stanreg]{Bayesian regressions} apply (see also \code{\link[=parameters_bootstrap]{parameters_bootstrap()}}).} \item{iterations}{The number of bootstrap replicates. This only apply in the case of bootstrapped frequentist models.} \item{standardize}{The method used for standardizing the parameters. Can be \code{"refit"}, \code{"posthoc"}, \code{"smart"}, \code{"basic"} or \code{NULL} (default) for no standardization. See 'Details' in \code{\link[effectsize]{standardize_parameters}}. Note that robust estimation (i.e. \code{robust=TRUE}) of standardized parameters only works when \code{standardize="refit"}.} \item{exponentiate}{Logical, indicating whether or not to exponentiate the the coefficients (and related confidence intervals). This is typical for, say, logistic regressions, or more generally speaking: for models with log or logit link.} \item{...}{Arguments passed to or from other methods. For instance, when \code{bootstrap = TRUE}, arguments like \code{ci_method} are passed down to \code{\link[bayestestR]{describe_posterior}}.} \item{component}{Model component for which parameters should be shown. May be one of \code{"conditional"}, \code{"precision"} (\pkg{betareg}), \code{"scale"} (\pkg{ordinal}), \code{"extra"} (\pkg{glmx}) or \code{"all"}.} } \value{ A data frame of indices related to the model's parameters. } \description{ Parameters from multinomial or cumulative link models } \details{ Multinomial or cumulative link models, i.e. models where the response value (dependent variable) is categorical and has more than two levels, usually return coefficients for each response level. Hence, the output from \code{model_parameters()} will split the coefficient tables by the different levels of the model's response. } \examples{ library(parameters) if (require("brglm2")) { data("stemcell") model <- bracl( research ~ as.numeric(religion) + gender, weights = frequency, data = stemcell, type = "ML" ) model_parameters(model) } } \seealso{ \code{\link[=standardize_names]{standardize_names()}} to rename columns into a consistent, standardized naming scheme. } parameters/man/factor_analysis.Rd0000644000176200001440000000716113610137126016645 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/factor_analysis.R \name{factor_analysis} \alias{factor_analysis} \title{Factor Analysis (FA)} \usage{ factor_analysis( x, n = "auto", rotation = "none", sort = FALSE, threshold = NULL, standardize = TRUE, ... ) } \arguments{ \item{x}{A dataframe or a statistical model.} \item{n}{Number of components to extract. If \code{n="all"}, then \code{n} is set as the number of variables minus 1 (\code{ncol(x)-1}). If \code{n="auto"} (default) or \code{n=NULL}, the number of components is selected through \code{\link{n_factors}}. In \code{\link{reduce_parameters}}, can also be \code{"max"}, in which case it will select all the components that are maximally pseudo-loaded (i.e., correlated) by at least one variable.} \item{rotation}{If not \code{"none"}, the PCA will be computed using the \pkg{psych} package. Possible options include \code{"varimax"}, \code{"quartimax"}, \code{"promax"}, \code{"oblimin"}, \code{"simplimax"}, and \code{"cluster"}. See \code{\link[psych]{fa}} for details.} \item{sort}{Sort the loadings.} \item{threshold}{A value between 0 and 1 indicates which (absolute) values from the loadings should be removed. An integer higher than 1 indicates the n strongest loadings to retain. Can also be \code{"max"}, in which case it will only display the maximum loading per variable (the most simple structure).} \item{standardize}{A logical value indicating whether the variables should be standardized (centred and scaled) to have unit variance before the analysis takes place (in general, such scaling is advisable).} \item{...}{Arguments passed to or from other methods.} } \value{ A data frame of loadings. } \description{ This function performs a Factor Analysis (FA). } \details{ \subsection{Complexity}{ Complexity represents the number of latent components needed to account for the observed variables. Whereas a perfect simple structure solution has a complexity of 1 in that each item would only load on one factor, a solution with evenly distributed items has a complexity greater than 1 (\cite{Hofman, 1978; Pettersson and Turkheimer, 2010}) . } \subsection{FA or PCA?}{ There is a simplified rule of thumb that may help do decide whether to run a principal component analysis or a factor analysis: \itemize{ \item Run principal component analysis if you assume or wish to test a theoretical model of latent factors causing observed variables. \item Run factor analysis If you want to simply reduce your correlated observed variables to a smaller set of important independent composite variables. } (Source: \href{https://stats.stackexchange.com/q/1576/54740}{CrossValidated}) } } \note{ There is a \code{summary()}-method that prints the Eigenvalues and (explained) variance for each extracted component. } \examples{ library(parameters) factor_analysis(mtcars[, 1:7], n = "all", threshold = 0.2) factor_analysis(mtcars[, 1:7], n = 2, rotation = "oblimin", threshold = "max", sort = TRUE) factor_analysis(mtcars[, 1:7], n = 2, threshold = 2, sort = TRUE) efa <- factor_analysis(mtcars[, 1:5], n = 2) summary(efa) predict(efa) \donttest{ # Automated number of components factor_analysis(mtcars[, 1:4], n = "auto") } } \references{ \itemize{ \item Hofmann, R. (1978). Complexity and simplicity as objective indices descriptive of factor solutions. Multivariate Behavioral Research, 13:2, 247-250, \doi{10.1207/s15327906mbr1302_9} \item Pettersson, E., & Turkheimer, E. (2010). Item selection, evaluation, and simple structure in personality data. Journal of research in personality, 44(4), 407-420, \doi{10.1016/j.jrp.2010.03.002} } } parameters/man/dot-n_factors_sescree.Rd0000644000176200001440000000060213620043640017726 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/n_factors.R \name{.n_factors_sescree} \alias{.n_factors_sescree} \title{Standard Error Scree and Coefficient of Determination Procedures} \usage{ .n_factors_sescree(eigen_values = NULL, model = "factors") } \description{ Standard Error Scree and Coefficient of Determination Procedures } \keyword{internal} parameters/man/p_value_ml1.Rd0000644000176200001440000000601413614111266015665 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/ci_ml1.R, R/dof_ml1.R, R/p_value_ml1.R, % R/se_ml1.R \name{ci_ml1} \alias{ci_ml1} \alias{dof_ml1} \alias{p_value_ml1} \alias{se_ml1} \title{"m-l-1" approximation for SEs, CIs and p-values} \usage{ ci_ml1(model, ci = 0.95) dof_ml1(model) p_value_ml1(model, dof = NULL) se_ml1(model) } \arguments{ \item{model}{A mixed model.} \item{ci}{Confidence Interval (CI) level. Default to 0.95 (95\%).} \item{dof}{Degrees of Freedom.} } \value{ A data frame. } \description{ Approximation of degrees of freedom based on a "m-l-1" heuristic as suggested by Elff et al. (2019). } \details{ \subsection{Small Sample Cluster corrected Degrees of Freedom}{ Inferential statistics (like p-values, confidence intervals and standard errors) may be biased in mixed models when the number of clusters is small (even if the sample size of level-1 units is high). In such cases it is recommended to approximate a more accurate number of degrees of freedom for such inferential statitics (see \cite{Li and Redden 2015}). The \emph{m-l-1} heuristic is such an approach that uses a t-distribution with fewer degrees of freedom (\code{dof_ml1}) to calculate p-values (\code{p_value_ml1}), standard errors (\code{se_ml1}) and confidence intervals (\code{ci(method = "ml1")}). } \subsection{Degrees of Freedom for Longitudinal Designs (Repeated Measures)}{ In particular for repeated measure designs (longitudinal data analysis), the \emph{m-l-1} heuristic is likely to be more accurate than simply using the residual or infinite degrees of freedom, because \code{dof_ml1()} returns different degrees of freedom for within-cluster and between-cluster effects. } \subsection{Limitations of the "m-l-1" Heuristic}{ Note that the "m-l-1" heuristic is not applicable (or at least less accurate) for complex multilevel designs, e.g. with cross-classified clusters. In such cases, more accurate approaches like the Kenward-Roger approximation (\code{dof_kenward()}) is recommended. However, the "m-l-1" heuristic also applies to generalized mixed models, while approaches like Kenward-Roger or Satterthwaite are limited to linear mixed models only. } } \examples{ \donttest{ if (require("lme4")) { model <- lmer(Petal.Length ~ Sepal.Length + (1 | Species), data = iris) p_value_ml1(model) } } } \references{ \itemize{ \item Elff, M.; Heisig, J.P.; Schaeffer, M.; Shikano, S. (2019). Multilevel Analysis with Few Clusters: Improving Likelihood-based Methods to Provide Unbiased Estimates and Accurate Inference, British Journal of Political Science. \item Li, P., Redden, D. T. (2015). Comparing denominator degrees of freedom approximations for the generalized linear mixed model in analyzing binary outcome in small sample cluster-randomized trials. BMC Medical Research Methodology, 15(1), 38. \doi{10.1186/s12874-015-0026-x} } } \seealso{ \code{dof_ml1()} and \code{se_ml1()} are small helper-functions to calculate approximated degrees of freedom and standard errors of model parameters, based on the "m-l-1" heuristic. } parameters/man/simulate_parameters.Rd0000644000176200001440000000720213617565662017547 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/simulate_parameters.R \name{simulate_parameters} \alias{simulate_parameters} \alias{parameters_simulate} \alias{simulate_parameters.default} \title{Simulate Model Parameters} \usage{ simulate_parameters(model, ...) parameters_simulate(model, ...) \method{simulate_parameters}{default}( model, iterations = 1000, centrality = "median", ci = 0.95, ci_method = "quantile", test = "p-value", ... ) } \arguments{ \item{model}{Statistical model (no Bayesian models).} \item{...}{Arguments passed to or from other methods.} \item{iterations}{The number of draws to simulate/bootstrap.} \item{centrality}{The point-estimates (centrality indices) to compute. Character (vector) or list with one or more of these options: \code{"median"}, \code{"mean"}, \code{"MAP"} or \code{"all"}.} \item{ci}{Value or vector of probability of the CI (between 0 and 1) to be estimated. Default to \code{.89} (89\%) for Bayesian models and \code{.95} (95\%) for frequentist models.} \item{ci_method}{The type of index used for Credible Interval. Can be \code{"HDI"} (default, see \code{\link[bayestestR:hdi]{hdi}}), \code{"ETI"} (see \code{\link[bayestestR:eti]{eti}}) or \code{"SI"} (see \code{\link[bayestestR:si]{si}}).} \item{test}{The indices of effect existence to compute. Character (vector) or list with one or more of these options: \code{"p_direction"} (or \code{"pd"}), \code{"rope"}, \code{"p_map"}, \code{"equivalence_test"} (or \code{"equitest"}), \code{"bayesfactor"} (or \code{"bf"}) or \code{"all"} to compute all tests. For each "test", the corresponding \pkg{bayestestR} function is called (e.g. \code{\link[bayestestR:rope]{rope}} or \code{\link[bayestestR:p_direction]{p_direction}}) and its results included in the summary output.} } \value{ A data frame with simulated parameters. } \description{ Compute simulated draws of parameters and their related indices such as Confidence Intervals (CI) and p-values. Simulating parameter draws can be seen as a (computationally faster) alternative to bootstrapping. } \details{ \subsection{Technical Details}{ \code{simulate_model()} is a computationally faster alternative to \code{bootstrap_model()}. Simulated draws for coefficients are based on a multivariate normal distribution (\code{MASS::mvrnorm()}) with mean \code{mu = coef(model)} and variance \code{Sigma = vcov(model)}. } \subsection{Models with Zero-Inflation Component}{ For models from packages \pkg{glmmTMB}, \pkg{pscl}, \pkg{GLMMadaptive} and \pkg{countreg}, the \code{component} argument can be used to specify which parameters should be simulated. For all other models, parameters from the conditional component (fixed effects) are simulated. This may include smooth terms, but not random effects. } } \note{ There is also a \href{https://easystats.github.io/see/articles/parameters.html}{\code{plot()}-method} implemented in the \href{https://easystats.github.io/see/}{\pkg{see}-package}. } \examples{ library(parameters) library(glmmTMB) model <- lm(Sepal.Length ~ Species * Petal.Width + Petal.Length, data = iris) simulate_parameters(model) model <- glmmTMB( count ~ spp + mined + (1 | site), ziformula = ~mined, family = poisson(), data = Salamanders ) simulate_parameters(model, centrality = "mean") simulate_parameters(model, ci = c(.8, .95), component = "zero_inflated") } \references{ Gelman A, Hill J. Data analysis using regression and multilevel/hierarchical models. Cambridge; New York: Cambridge University Press 2007: 140-143 } \seealso{ \code{\link{bootstrap_model}}, \code{\link{bootstrap_parameters}}, \code{\link{simulate_model}} } parameters/man/cluster_analysis.Rd0000644000176200001440000001047613617565662017074 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/cluster_analysis.R \name{cluster_analysis} \alias{cluster_analysis} \title{Compute cluster analysis and return group indices} \usage{ cluster_analysis( x, n_clusters = NULL, method = c("hclust", "kmeans"), distance = c("euclidean", "maximum", "manhattan", "canberra", "binary", "minkowski"), agglomeration = c("ward", "ward.D", "ward.D2", "single", "complete", "average", "mcquitty", "median", "centroid"), iterations = 20, algorithm = c("Hartigan-Wong", "Lloyd", "MacQueen"), force = TRUE, package = c("NbClust", "mclust"), verbose = TRUE ) } \arguments{ \item{x}{A data frame.} \item{n_clusters}{Number of clusters used for the cluster solution. By default, the number of clusters to extract is determined by calling \code{\link{n_clusters}}.} \item{method}{Method for computing the cluster analysis. By default (\code{"hclust"}), a hierarchical cluster analysis, will be computed. Use \code{"kmeans"} to compute a kmeans cluster analysis. You can specify the initial letters only.} \item{distance}{Distance measure to be used when \code{method = "hclust"} (for hierarchical clustering). Must be one of \code{"euclidean"}, \code{"maximum"}, \code{"manhattan"}, \code{"canberra"}, \code{"binary"} or \code{"minkowski"}. See \code{\link{dist}}. If is \code{method = "kmeans"} this argument will be ignored.} \item{agglomeration}{Agglomeration method to be used when \code{method = "hclust"} (for hierarchical clustering). This should be one of \code{"ward"}, \code{"single"}, \code{"complete"}, \code{"average"}, \code{"mcquitty"}, \code{"median"} or \code{"centroid"}. Default is \code{"ward"} (see \code{\link{hclust}}). If \code{method = "kmeans"} this argument will be ignored.} \item{iterations}{Maximum number of iterations allowed. Only applies, if \code{method = "kmeans"}. See \code{\link{kmeans}} for details on this argument.} \item{algorithm}{Algorithm used for calculating kmeans cluster. Only applies, if \code{method = "kmeans"}. May be one of \code{"Hartigan-Wong"} (default), \code{"Lloyd"} (used by SPSS), or \code{"MacQueen"}. See \code{\link{kmeans}} for details on this argument.} \item{force}{Logical, if \code{TRUE}, ordered factors (ordinal variables) are converted to numeric values, while character vectors and factors are converted to dummy-variables (numeric 0/1) and are included in the cluster analysis. If \code{FALSE}, factors and character vectors are removed before computing the cluster analysis. For \code{method = "kmeans"} and \code{force = TRUE}, only ordered factors are used, because \code{\link{kmeans}} fails for dummy variables.} \item{package}{These are the packages from which methods are used to determine the number of clusters. Can be \code{"all"} or a vector containing \code{"NbClust"}, \code{"mclust"}, \code{"cluster"} and \code{"M3C"}.} \item{verbose}{Toggle off warnings.} } \value{ The group classification for each observation as vector. The returned vector includes missing values, so it has the same length as \code{nrow(x)}. } \description{ Compute hierarchical or kmeans cluster analysis and return the group assignment for each observation as vector. } \details{ The \code{print()} and \code{plot()} methods show the (standardized) mean value for each variable within each cluster. Thus, a higher absolute value indicates that a certain variable characteristic is more pronounced within that specific cluster (as compared to other cluster groups with lower absolute mean values). } \note{ There is also a \href{https://easystats.github.io/see/articles/parameters.html}{\code{plot()}-method} implemented in the \href{https://easystats.github.io/see/}{\pkg{see}-package}. } \examples{ # Hierarchical clustering of mtcars-dataset groups <- cluster_analysis(iris[, 1:4], 3) # K-means clustering of mtcars-dataset, auto-detection of cluster-groups \dontrun{ groups <- cluster_analysis(iris[, 1:4], method = "k") } } \references{ Maechler M, Rousseeuw P, Struyf A, Hubert M, Hornik K (2014) cluster: Cluster Analysis Basics and Extensions. R package. } \seealso{ \code{\link{n_clusters}} to determine the number of clusters to extract, \code{\link{cluster_discrimination}} to determine the accuracy of cluster group classification and \code{\link{check_clusterstructure}} to check suitability of data for clustering. } parameters/man/standardize_names.Rd0000644000176200001440000000357713566047516017203 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/standardize_names.R \name{standardize_names} \alias{standardize_names} \alias{standardize_names.parameters_model} \title{Standardize column names} \usage{ standardize_names(data, ...) \method{standardize_names}{parameters_model}(data, style = c("easystats", "broom"), ...) } \arguments{ \item{data}{A data frame. Currently, only objects from \code{\link[=model_parameters]{model_parameters()}} are accepted.} \item{...}{Currently not used.} \item{style}{Standardization can either be based on the naming conventions from the easystats project, or on \pkg{broom}'s naming scheme.} } \value{ A data frame, with standardized column names. } \description{ Standardize column names from data frames, in particular objects returned from \code{\link[=model_parameters]{model_parameters()}}, so column names are consistent and the same for any model object. } \details{ This method is in particular useful for package developers or users who use \code{\link[=model_parameters]{model_parameters()}} in their own code or functions to retrieve model parameters for further processing. As \code{model_parameters()} returns a data frame with varying column names (depending on the input), accessing the required information is probably not quite straightforward. In such cases, \code{standardize_names()} can be used to get consistent, i.e. always the same column names, no matter what kind of model was used in \code{model_parameters()}. \cr \cr For \code{style = "broom"}, column names are renamed to match \pkg{broom}'s naming scheme, i.e. \code{Parameter} is renamed to \code{term}, \code{Coefficient} becomes \code{estimate} and so on. } \examples{ library(parameters) model <- lm(mpg ~ wt + cyl, data = mtcars) mp <- model_parameters(model) as.data.frame(mp) standardize_names(mp) standardize_names(mp, style = "broom") } parameters/man/degrees_of_freedom.Rd0000644000176200001440000000266513614111266017274 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/dof.R \name{degrees_of_freedom} \alias{degrees_of_freedom} \alias{dof} \title{Degrees of Freedom (DoF)} \usage{ degrees_of_freedom(model, method = "analytical") dof(model, method = "analytical") } \arguments{ \item{model}{A statistical model.} \item{method}{Can be \code{"analytical"} (default, DoFs are estimated based on the model type), \code{"fit"}, in which case they are directly taken from the model if available (for Bayesian models, the goal (looking for help to make it happen) would be to refit the model as a frequentist one before extracting the DoFs), \code{"ml1"} (see \code{\link{dof_ml1}}), \code{"betwithin"} (see \code{\link{dof_betwithin}}), \code{"satterthwaite"} (see \code{\link{dof_satterthwaite}}), \code{"kenward"} (see \code{\link{dof_kenward}}) or \code{"any"}, which tries to extract DoF by any of those methods, whichever succeeds.} } \description{ Estimate or extract degrees of freedom of models. } \examples{ model <- lm(Sepal.Length ~ Petal.Length * Species, data = iris) dof(model) model <- glm(vs ~ mpg * cyl, data = mtcars, family = "binomial") dof(model) if (require("lme4")) { model <- lmer(Sepal.Length ~ Petal.Length + (1 | Species), data = iris) dof(model) } \donttest{ if (require("rstanarm")) { model <- stan_glm( Sepal.Length ~ Petal.Length * Species, data = iris, chains = 2, refresh = 0 ) dof(model) } } } parameters/man/dot-recode_to_zero.Rd0000644000176200001440000000051013620043640017237 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/utils.R \name{.recode_to_zero} \alias{.recode_to_zero} \title{Recode a variable so its lowest value is beginning with zero} \usage{ .recode_to_zero(x) } \description{ Recode a variable so its lowest value is beginning with zero } \keyword{internal} parameters/man/ci.merMod.Rd0000644000176200001440000000742713614120052015300 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/ci.R \name{ci.merMod} \alias{ci.merMod} \alias{ci.default} \alias{ci.glm} \alias{ci.polr} \alias{ci.mixor} \alias{ci.DirichletRegModel} \alias{ci.glmmTMB} \alias{ci.zeroinfl} \alias{ci.hurdle} \alias{ci.MixMod} \alias{ci.betareg} \alias{ci.clm2} \alias{ci.lme} \title{Confidence Intervals (CI)} \usage{ \method{ci}{merMod}( x, ci = 0.95, method = c("wald", "ml1", "betwithin", "satterthwaite", "kenward", "boot"), ... ) \method{ci}{default}(x, ci = 0.95, method = NULL, ...) \method{ci}{glm}(x, ci = 0.95, method = c("profile", "wald", "robust"), ...) \method{ci}{polr}(x, ci = 0.95, method = c("profile", "wald", "robust"), ...) \method{ci}{mixor}(x, ci = 0.95, effects = c("all", "fixed", "random"), ...) \method{ci}{DirichletRegModel}(x, ci = 0.95, component = c("all", "conditional", "precision"), ...) \method{ci}{glmmTMB}( x, ci = 0.95, component = c("all", "conditional", "zi", "zero_inflated"), method = c("wald", "ml1", "betwithin", "robust"), ... ) \method{ci}{zeroinfl}( x, ci = 0.95, component = c("all", "conditional", "zi", "zero_inflated"), method = c("wald", "ml1", "betwithin", "robust"), ... ) \method{ci}{hurdle}( x, ci = 0.95, component = c("all", "conditional", "zi", "zero_inflated"), method = c("wald", "ml1", "betwithin", "robust"), ... ) \method{ci}{MixMod}( x, ci = 0.95, component = c("all", "conditional", "zi", "zero_inflated"), ... ) \method{ci}{betareg}(x, ci = 0.95, component = c("all", "conditional", "precision"), ...) \method{ci}{clm2}(x, ci = 0.95, component = c("all", "conditional", "scale"), ...) \method{ci}{lme}(x, ci = 0.95, method = c("wald", "betwithin", "ml1", "satterthwaite"), ...) } \arguments{ \item{x}{A statistical model.} \item{ci}{Confidence Interval (CI) level. Default to 0.95 (95\%).} \item{method}{For mixed models, can be \code{\link[=ci_wald]{"wald"}} (default), \code{\link[=ci_ml1]{"ml1"}} or \code{\link[=ci_betwithin]{"betwithin"}}. For linear mixed model, can also be \code{\link[=ci_satterthwaite]{"satterthwaite"}}, \code{\link[=ci_kenward]{"kenward"}} or \code{"boot"} and \code{lme4::confint.merMod}). For (generalized) linear models, can be \code{"robust"} to compute confidence intervals based on robust standard errors, and for generalized linear models, may also be \code{"profile"} (default) or \code{"wald"}.} \item{...}{Arguments passed down to \code{standard_error_robust()} when confidence intervals or p-values based on robust standard errors should be computed.} \item{effects}{Should standard errors for fixed effects or random effects be returned? Only applies to mixed models. May be abbreviated. When standard errors for random effects are requested, for each grouping factor a list of standard errors (per group level) for random intercepts and slopes is returned.} \item{component}{Should all parameters, parameters for the conditional model, or for the zero-inflated part of the model be returned? Applies to models with zero-inflated component. \code{component} may be one of \code{"conditional"}, \code{"zi"}, \code{"zero-inflated"} or \code{"all"} (default). May be abbreviated.} } \value{ A data frame containing the CI bounds. } \description{ Compute confidence intervals (CI) for frequentist models. } \note{ \code{ci_robust()} resp. \code{ci(method = "robust")} rely on the \pkg{sandwich} or \pkg{clubSandwich} package (the latter if \code{vcov_estimation = "CR"} for cluster-robust standard errors) and will thus only work for those models supported by those packages. } \examples{ \donttest{ library(parameters) if (require("glmmTMB")) { model <- glmmTMB( count ~ spp + mined + (1 | site), ziformula = ~mined, family = poisson(), data = Salamanders ) ci(model) ci(model, component = "zi") } } } parameters/man/format_p.Rd0000644000176200001440000000216313603326255015275 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/format_p.R \name{format_p} \alias{format_p} \title{p-values formatting} \usage{ format_p( p, stars = FALSE, stars_only = FALSE, name = "p", missing = "", digits = 3, ... ) } \arguments{ \item{p}{value or vector of p-values.} \item{stars}{Add significance stars (e.g., p < .001***).} \item{stars_only}{Return only significance stars.} \item{name}{Name prefixing the text. Can be \code{NULL}.} \item{missing}{Value by which \code{NA} values are replaced. By default, an empty string (i.e. \code{""}) is returned for \code{NA}.} \item{digits}{Number of significant digits. May also be \code{"scientific"} to return exact p-values in scientific notation, or \code{"apa"} to use an APA-style for p-values.} \item{...}{Arguments from other methods.} } \value{ A formatted string. } \description{ Format p-values. } \examples{ format_p(c(.02, .065, 0, .23)) format_p(c(.02, .065, 0, .23), name = NULL) format_p(c(.02, .065, 0, .23), stars_only = TRUE) model <- lm(mpg ~ wt + cyl, data = mtcars) format_p(p_value(model)$p, digits = "scientific") } parameters/man/parameters_table.Rd0000644000176200001440000000161213613647766017014 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/parameters_table.R \name{parameters_table} \alias{parameters_table} \title{Parameter table formatting} \usage{ parameters_table(x, pretty_names = TRUE, stars = FALSE, ...) } \arguments{ \item{x}{A data frame of model's parameters.} \item{pretty_names}{Pretty parameters' names.} \item{stars}{Add significance stars (e.g., p < .001***).} \item{...}{Arguments passed to or from other methods.} } \value{ A data frame. } \description{ Parameter table formatting } \examples{ library(parameters) x <- model_parameters(lm(Sepal.Length ~ Species * Sepal.Width, data = iris)) as.data.frame(parameters_table(x)) \donttest{ if (require("rstanarm")) { model <- stan_glm(Sepal.Length ~ Species, data = iris, refresh = 0, seed = 123) x <- model_parameters(model, ci = c(0.69, 0.89, 0.95)) as.data.frame(parameters_table(x)) } } } parameters/man/p_value_wald.Rd0000644000176200001440000000562213611426120016122 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/ci_wald.R, R/p_value_wald.R \name{ci_wald} \alias{ci_wald} \alias{p_value_wald} \alias{p_value_wald.merMod} \title{Wald-test approximation for CIs and p-values} \usage{ ci_wald( model, ci = 0.95, dof = NULL, effects = c("fixed", "random", "all"), component = c("all", "conditional", "zi", "zero_inflated", "precision", "scale", "smooth_terms"), robust = FALSE, ... ) p_value_wald(model, ...) \method{p_value_wald}{merMod}(model, dof = Inf, ...) } \arguments{ \item{model}{A statistical model.} \item{ci}{Confidence Interval (CI) level. Default to 0.95 (95\%).} \item{dof}{Degrees of Freedom. If not specified, for \code{ci_wald()}, defaults to model's residual degrees of freedom (i.e. \code{n-k}, where \code{n} is the number of observations and \code{k} is the number of parameters). For \code{p_value_wald()}, defaults to \code{Inf}.} \item{effects}{Should standard errors for fixed effects or random effects be returned? Only applies to mixed models. May be abbreviated. When standard errors for random effects are requested, for each grouping factor a list of standard errors (per group level) for random intercepts and slopes is returned.} \item{component}{Should all parameters, parameters for the conditional model, or for the zero-inflated part of the model be returned? Applies to models with zero-inflated component. \code{component} may be one of \code{"conditional"}, \code{"zi"}, \code{"zero-inflated"} or \code{"all"} (default). May be abbreviated.} \item{robust}{Logical, if \code{TRUE}, robust standard errors are calculated (if possible), and confidence intervals and p-values are based on these robust standard errors. Additional arguments like \code{vcov_estimation} or \code{vcov_type} are passed down to other methods, see \code{\link[=standard_error_robust]{standard_error_robust()}} for details.} \item{...}{Arguments passed down to \code{standard_error_robust()} when confidence intervals or p-values based on robust standard errors should be computed.} } \value{ A data frame. } \description{ The Wald-test approximation treats t-values as Wald z. Since the t distribution converges to the z distribution as degrees of freedom increase, this is like assuming infinite degrees of freedom. While this is unambiguously anti-conservative, this approximation appears as reasonable for reasonable sample sizes (Barr et al., 2013). That is, if we take the p-value to measure the probability of a false positive, this approximation produces a higher false positive rate than the nominal 5\% at p = 0.05. } \examples{ \donttest{ if (require("lme4")) { model <- lmer(Petal.Length ~ Sepal.Length + (1 | Species), data = iris) p_value_wald(model) ci_wald(model, ci = c(0.90, 0.95)) } } } \references{ Barr, D. J. (2013). Random effects structure for testing interactions in linear mixed-effects models. Frontiers in psychology, 4, 328. } parameters/man/dot-flatten_list.Rd0000644000176200001440000000055313620043640016734 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/utils.R \name{.flatten_list} \alias{.flatten_list} \title{Flatten a list} \usage{ .flatten_list(object, name = "name") } \arguments{ \item{object}{A list.} \item{name}{Name of column of keys in the case the output is a dataframe.} } \description{ Flatten a list } \keyword{internal} parameters/man/check_clusterstructure.Rd0000644000176200001440000000374113607421425020267 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/check_clusterstructure.R \name{check_clusterstructure} \alias{check_clusterstructure} \title{Check suitability of data for clustering} \usage{ check_clusterstructure(x, standardize = TRUE, distance = "euclidean", ...) } \arguments{ \item{x}{A data frame.} \item{standardize}{Standardize the dataframe before clustering (default).} \item{distance}{Distance method used. Other methods than "euclidean" (default) are exploratory in the context of clustering tendency. See \code{\link{dist}} for list of available methods.} \item{...}{Arguments passed to or from other methods.} } \value{ The H statistic (numeric) } \description{ This checks whether the data is appropriate for clustering using the Hopkins' H statistic of given data. If the value of Hopkins statistic is close to 0 (below 0.5), then we can reject the null hypothesis and conclude that the dataset is significantly clusterable. A value for H lower than 0.25 indicates a clustering tendency at the 90\% confidence level. The visual assessment of cluster tendency (VAT) approach (Bezdek and Hathaway, 2002) consists in investigating the heatmap of the ordered dissimilarity matrix. Following this, one can potentially detect the clustering tendency by counting the number of square shaped blocks along the diagonal. } \examples{ library(parameters) check_clusterstructure(iris[, 1:4]) plot(check_clusterstructure(iris[, 1:4])) } \references{ \itemize{ \item Lawson, R. G., & Jurs, P. C. (1990). New index for clustering tendency and its application to chemical problems. Journal of chemical information and computer sciences, 30(1), 36-41. \item Bezdek, J. C., & Hathaway, R. J. (2002, May). VAT: A tool for visual assessment of (cluster) tendency. In Proceedings of the 2002 International Joint Conference on Neural Networks. IJCNN02 (3), 2225-2230. IEEE. } } \seealso{ \code{\link{check_kmo}}, \code{\link{check_sphericity}} and \code{\link{check_factorstructure}}. } parameters/man/simulate_model.Rd0000644000176200001440000000453513610137126016471 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/simulate_model.R \name{simulate_model} \alias{simulate_model} \alias{model_simulate} \alias{simulate_model.glmmTMB} \title{Simulated draws from model coefficients} \usage{ simulate_model(model, iterations = 1000, ...) model_simulate(model, iterations = 1000, ...) \method{simulate_model}{glmmTMB}( model, iterations = 1000, component = c("all", "conditional", "zi", "zero_inflated"), ... ) } \arguments{ \item{model}{Statistical model (no Bayesian models).} \item{iterations}{The number of draws to simulate/bootstrap.} \item{...}{Arguments passed to or from other methods.} \item{component}{Should all parameters, parameters for the conditional model, or for the zero-inflated part of the model be returned? Applies to models with zero-inflated component. \code{component} may be one of \code{"conditional"}, \code{"zi"}, \code{"zero-inflated"} or \code{"all"} (default). May be abbreviated.} } \value{ A data frame. } \description{ Simulate draws from a statistical model to return a data frame of estimates. } \details{ \subsection{Technical Details}{ \code{simulate_model()} is a computationally faster alternative to \code{bootstrap_model()}. Simulated draws for coefficients are based on a multivariate normal distribution (\code{MASS::mvrnorm()}) with mean \code{mu = coef(model)} and variance \code{Sigma = vcov(model)}. } \subsection{Models with Zero-Inflation Component}{ For models from packages \pkg{glmmTMB}, \pkg{pscl}, \pkg{GLMMadaptive} and \pkg{countreg}, the \code{component} argument can be used to specify which parameters should be simulated. For all other models, parameters from the conditional component (fixed effects) are simulated. This may include smooth terms, but not random effects. } } \examples{ library(parameters) library(glmmTMB) model <- lm(Sepal.Length ~ Species * Petal.Width + Petal.Length, data = iris) head(simulate_model(model)) model <- glmmTMB( count ~ spp + mined + (1 | site), ziformula = ~mined, family = poisson(), data = Salamanders ) head(simulate_model(model)) head(simulate_model(model, component = "zero_inflated")) } \seealso{ \code{\link[=parameters_simulate]{simulate_parameters()}}, \code{\link[=bootstrap_model]{bootstrap_model()}}, \code{\link[=bootstrap_parameters]{bootstrap_parameters()}} } parameters/man/dot-n_factors_cng.Rd0000644000176200001440000000047213620043640017051 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/n_factors.R \name{.n_factors_cng} \alias{.n_factors_cng} \title{Cattell-Nelson-Gorsuch CNG Indices} \usage{ .n_factors_cng(eigen_values = NULL, model = "factors") } \description{ Cattell-Nelson-Gorsuch CNG Indices } \keyword{internal} parameters/man/print.Rd0000644000176200001440000000265713613651764014641 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/print.parameters_model.R \name{print} \alias{print} \alias{print.parameters_model} \title{Print model parameters} \usage{ \method{print}{parameters_model}(x, pretty_names = TRUE, split_components = TRUE, select = NULL, ...) } \arguments{ \item{x}{An object returned by \code{\link[=model_parameters]{model_parameters()}}.} \item{pretty_names}{Pretty parameters' names.} \item{split_components}{Logical, if \code{TRUE} (default), For models with multiple components (zero-inflation, smooth terms, ...), each component is printed in a separate table. If \code{FALSE}, model parameters are printed in a single table and a \code{Component} column is added to the output.} \item{select}{Character vector (or numeric index) of column names that should be printed. If \code{NULL} (default), all columns are printed.} \item{...}{Arguments passed to or from other methods.} } \value{ \code{NULL} } \description{ A \code{print()}-method for objects from \code{\link[=model_parameters]{model_parameters()}}. } \examples{ library(parameters) if (require("glmmTMB")) { model <- glmmTMB( count ~ spp + mined + (1 | site), ziformula = ~mined, family = poisson(), data = Salamanders ) mp <- model_parameters(model) print(mp, pretty_names = FALSE) print(mp, split_components = FALSE) print(mp, select = c("Parameter", "Coefficient", "CI_low", "CI_high")) } } parameters/man/bootstrap_parameters.Rd0000644000176200001440000000447213612621513017726 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/bootstrap_parameters.R \name{bootstrap_parameters} \alias{bootstrap_parameters} \alias{parameters_bootstrap} \title{Parameters bootstrapping} \usage{ bootstrap_parameters( model, iterations = 1000, centrality = "median", ci = 0.95, ci_method = "quantile", test = "p-value", ... ) parameters_bootstrap( model, iterations = 1000, centrality = "median", ci = 0.95, ci_method = "quantile", test = "p-value", ... ) } \arguments{ \item{model}{Statistical model.} \item{iterations}{The number of draws to simulate/bootstrap.} \item{centrality}{The point-estimates (centrality indices) to compute. Character (vector) or list with one or more of these options: \code{"median"}, \code{"mean"}, \code{"MAP"} or \code{"all"}.} \item{ci}{Value or vector of probability of the CI (between 0 and 1) to be estimated. Default to \code{.89} (89\%) for Bayesian models and \code{.95} (95\%) for frequentist models.} \item{ci_method}{The type of index used for Credible Interval. Can be \code{"HDI"} (default, see \code{\link[bayestestR:hdi]{hdi}}), \code{"ETI"} (see \code{\link[bayestestR:eti]{eti}}) or \code{"SI"} (see \code{\link[bayestestR:si]{si}}).} \item{test}{The indices of effect existence to compute. Character (vector) or list with one or more of these options: \code{"p_direction"} (or \code{"pd"}), \code{"rope"}, \code{"p_map"}, \code{"equivalence_test"} (or \code{"equitest"}), \code{"bayesfactor"} (or \code{"bf"}) or \code{"all"} to compute all tests. For each "test", the corresponding \pkg{bayestestR} function is called (e.g. \code{\link[bayestestR:rope]{rope}} or \code{\link[bayestestR:p_direction]{p_direction}}) and its results included in the summary output.} \item{...}{Arguments passed to or from other methods.} } \value{ Bootstrapped parameters. } \description{ Compute bootstrapped parameters and their related indices such as Confidence Intervals (CI) and p-values. } \examples{ library(parameters) model <- lm(Sepal.Length ~ Species * Petal.Width, data = iris) bootstrap_parameters(model) } \references{ Davison, A. C., & Hinkley, D. V. (1997). Bootstrap methods and their application (Vol. 1). Cambridge university press. } \seealso{ \code{\link{bootstrap_model}}, \code{\link{simulate_parameters}}, \code{\link{simulate_model}} } parameters/man/check_sphericity.Rd0000644000176200001440000000252313600115277017003 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/check_factorstructure.R \name{check_sphericity} \alias{check_sphericity} \title{Bartlett's Test of Sphericity} \usage{ check_sphericity(x, ...) } \arguments{ \item{x}{A dataframe.} \item{...}{Arguments passed to or from other methods.} } \value{ A list of indices related to sphericity. } \description{ Bartlett (1951) introduced the test of sphericity, which tests whether a matrix is significantly different from an identity matrix. This statistical test for the presence of correlations among variables, providing the statistical probability that the correlation matrix has significant correlations among at least some of variables. As for factor analysis to work, some relationships between variables are needed, thus, a significant Bartlett’s test of sphericity is required, say p < .001. } \details{ This function is strongly inspired by the \code{cortest.bartlett} function in the \pkg{psych} package (Revelle, 2016). All credits go to its author. } \examples{ library(parameters) check_sphericity(mtcars) } \references{ \itemize{ \item Revelle, W. (2016). How To: Use the psych package for Factor Analysis and data reduction. \item Bartlett, M. S. (1951). The effect of standardization on a Chi-square approximation in factor analysis. Biometrika, 38(3/4), 337-344. } } parameters/man/select_parameters.Rd0000644000176200001440000000525113616217154017172 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/select_parameters.R, % R/select_parameters.stanreg.R \name{select_parameters} \alias{select_parameters} \alias{parameters_selection} \alias{select_parameters.lm} \alias{select_parameters.merMod} \alias{select_parameters.stanreg} \title{Automated selection of model parameters} \usage{ select_parameters(model, ...) parameters_selection(model, ...) \method{select_parameters}{lm}(model, direction = "both", steps = 1000, k = 2, ...) \method{select_parameters}{merMod}(model, direction = "backward", steps = 1000, ...) \method{select_parameters}{stanreg}(model, method = NULL, cross_validation = FALSE, ...) } \arguments{ \item{model}{A statistical model.} \item{...}{Arguments passed to or from other methods.} \item{direction}{ the mode of stepwise search, can be one of \code{"both"}, \code{"backward"}, or \code{"forward"}, with a default of \code{"both"}. If the \code{scope} argument is missing the default for \code{direction} is \code{"backward"}. Values can be abbreviated. } \item{steps}{ the maximum number of steps to be considered. The default is 1000 (essentially as many as required). It is typically used to stop the process early. } \item{k}{ the multiple of the number of degrees of freedom used for the penalty. Only \code{k = 2} gives the genuine AIC: \code{k = log(n)} is sometimes referred to as BIC or SBC. } \item{method}{The method used in the variable selection. Can be \code{NULL} (default), \code{"forward"} or \code{"L1"}. See \code{projpred::varsel}.} \item{cross_validation}{Select with cross-validation.} } \value{ The model refitted with optimal number of parameters. } \description{ This function performs an automated selection of the 'best' parameters, updating and returning the "best" model. For frequentist simple GLMs, it performs an AIC-based stepwise selection. For Bayesian models, it uses the \code{projpred} package. } \examples{ model <- lm(mpg ~ ., data = mtcars) select_parameters(model) model <- lm(mpg ~ cyl * disp * hp * wt, data = mtcars) select_parameters(model) \donttest{ # lme4 ------------------------------------------- if (require("lme4")) { model <- lmer( Sepal.Width ~ Sepal.Length * Petal.Width * Petal.Length + (1 | Species), data = iris ) select_parameters(model) } # rstanarm ------------------------------------------- if (require("rstanarm")) { model <- stan_glm(mpg ~ ., data = mtcars, iter = 500, refresh = 0) select_parameters(model, cross_validation = TRUE) model <- stan_glm(mpg ~ cyl * disp * hp, data = mtcars, iter = 500, refresh = 0) select_parameters(model, cross_validation = FALSE) } } } parameters/man/dot-compact_list.Rd0000644000176200001440000000041013620043640016715 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/utils.R \name{.compact_list} \alias{.compact_list} \title{remove NULL elements from lists} \usage{ .compact_list(x) } \description{ remove NULL elements from lists } \keyword{internal} parameters/DESCRIPTION0000644000176200001440000000604413620060023014117 0ustar liggesusersType: Package Package: parameters Title: Processing of Model Parameters Version: 0.5.0 Authors@R: c( person("Daniel", "Lüdecke", role = c("aut", "cre"), email = "d.luedecke@uke.de", comment = c(ORCID = "0000-0002-8895-3206")), person("Dominique", "Makowski", email = "dom.makowski@gmail.com", role = c("aut"), comment = c(ORCID = "0000-0001-5375-9967")), person("Mattan S.", "Ben-Shachar", role = c("aut"), email = "matanshm@post.bgu.ac.il", comment = c(ORCID = "0000-0002-4287-4801")), person(given = "Indrajeet", family = "Patil", role = c("aut"), email = "patilindrajeet.science@gmail.com", comment = c(ORCID = "0000-0003-1995-6531")), person(given = "Søren", family = "Højsgaard", role = c("aut"), email = "sorenh@math.aau.dk"), person(given = "Zen J.", family = "Lau", role = c("ctb"), email = "zenjuen.lau@ntu.edu.sg") ) Description: Utilities for processing the parameters of various statistical models. Beyond computing p values, CIs, and other indices for a wide variety of models (see support list of insight; Lüdecke, Waggoner & Makowski (2019) ), this package implements features like bootstrapping or simulating of parameters and models, feature reduction (feature extraction and variable selection) as well as functions to describe data and variable characteristics (e.g. skewness, kurtosis, smoothness or distribution). License: GPL-3 URL: https://easystats.github.io/parameters/ BugReports: https://github.com/easystats/parameters/issues Depends: R (>= 3.2) Imports: bayestestR (>= 0.5.0), insight (>= 0.8.1), methods, stats, tools, utils Suggests: AER, aod, BayesFactor, BayesFM, betareg, boot, brglm2, brms, cAIC4, clubSandwich, cluster, covr, cplm, dplyr, DRR, effectsize, EGAnet (>= 0.7), FactoMineR, fastICA, gamlss, gee, geepack, GLMMadaptive, glmmTMB, GPArotation, lavaan, lavaSearch2, lme4, lmerTest, logspline, knitr, MASS, Matrix, mclust, MCMCglmm, metafor, mgcv, multimode, MuMIn, M3C, NbClust, nFactors, nlme, panelr, performance, plm, projpred, pscl, psych, randomForest, rmarkdown, rstanarm, sandwich, see, sjstats, spelling, survey, survival, testthat, TMB, truncreg, VGAM, Zelig Encoding: UTF-8 Language: en-US RoxygenNote: 7.0.2 VignetteBuilder: knitr NeedsCompilation: no Packaged: 2020-02-09 17:57:33 UTC; Daniel Author: Daniel Lüdecke [aut, cre] (), Dominique Makowski [aut] (), Mattan S. Ben-Shachar [aut] (), Indrajeet Patil [aut] (), Søren Højsgaard [aut], Zen J. Lau [ctb] Maintainer: Daniel Lüdecke Repository: CRAN Date/Publication: 2020-02-09 19:40:03 UTC parameters/build/0000755000176200001440000000000013620044013013505 5ustar liggesusersparameters/build/vignette.rds0000644000176200001440000000063413620044013016047 0ustar liggesusersSQK09TEtBX}m&Bӎ$e't&]֭>}eiX 0-qi7FS8 ΐ jx41eZ%c\\@ Eoic= FY43E|ϩxQ~^۠q;t  E 9j% Rs_#WPPAW4{ b{x+' N UMN\<: yvve"Bұ*?R|p$hD5ىn S]m$'Vvu/ۙJ<~>a4y\Z$+ĶnQst-bZ.E݅JYd_vb񶫭1ƞ n626O''Sdžaԯ'M5 M,tߺyel{,XE\k@ݦVN־!?suV0Ο5sМA5o\_{g d ޚX{TCm'Wt#7]\\-7o4#w+E\.͎JU7cg'g'&'W#f/u/0sZۚdU(6q0AlIDR޵?ysh7 uC|콝oJ1z0PBLMWsu$I[wck`=l"=ua Bc#~%$(1x)9#{ }DRZlQ&(>(R2 -uײfQn1K7PH7x!Ө2xezHnT CH?PAa0' (Tӽ8sl?PS,0_="HW7!w3¼b٢䥟\ ZյęCFB?6SGѣ`I%xۍAZj|l!e+KlAVØl&-)YFv8k{HQ&1Ea]2n.ޒ>dP߱l6+ g5W{WlfCŒ9Y@Vu,t`$=j7Al<#T&+֡?8[윚&E2BL_AswuYMVY w#f(eݥb9i47-d0p`;_WMURyC\л,[%FFZάc^ c@tnR GE|gbwO?ԔyKH_RHR'sⓄNf"}5N2urYkY뚭k+uHY7ɓA Ž2;4Qૈ@(pH0o<$}HTjzXUF3GҟHF3I?[3dHW%O!~*.D2c/"d_BK4x/u-% ٭ZIF}Km-r{ ~SPnމ(ϒB9?mAݬrgǮ.أMͯ|ՏO&`]'}0J#"G )h& "+TPCt\f e5xΧPOqʧ V7JzPdC7 oc̒~oS„ j6,A=KLmS G55Zt* 4`[9殿i':/1xf[h|Ko"L #-q@s ص%AcH_yȀGO!} H?ۊO썙iZ*xUG7׼%vqiilJ,]G\BzIv.WEz&7ch;͒l@ V<>̂Q-Ji47A&>]!zKgdۈFԲgfҊ'N`'?"O!;P-Y|sH.|HCZC/ beݔRWQ!R_"~*\1{AIF1]32[!=m#Km)A2{"ɒ%*H8jT^fw42vX!hCi$_nwtYlR!T&){ŢƔ\46N9 Enwpѡ% 30k |C|ik * $'(pOlG md-~K&dkFnPjϽ,ͥ? j+eT]:CVY`UjavϦekdNtڤ%l~o8mkq_Z{ذ`fɌ*א2Sn x:7fp(b7U*46IQw*i[ 3` zoXHET nզC*>v}+nvPO&ݟAcu x`{=ܡA\Tx"׼~F~WH]ޓS ڌ~aKk,iTMؼ0 Ї3Wz_V#v}7kAN׼ qGE;psF&*{Ɲ|zLΡosOl<=mG6+jbCOY3{j^n`69솳`3X^++f}sw/GQwHO&oNPQTZ3AhҡSa'|IvTs๫z(+c)@Rd1Q4B.P@ge-$[ lR,1OajSgG$6azC:̄.SJ}RȠӝ5h<3]qy"Obb1]ɔ?mx4U;-^qC|bыM+6up'@6t.Ԥ;EZ`UMWB_DE%i`ZqΘBYkqt,^IF,Z!Đ ]e*y zb.S.XNnijH!"L#}>pNF Pګ-uϲSeg!¥y+8 B݃Bl#`q_C;oL>bLu߁wp(Dt9'PgJ?Πt;D=J_׸- 2:nٕO6P'5{sp(/3B4*B# Bq{BZ1{ߕaZƬFM` nt6=9ʁ7)2w;Pב^W(W (=5hoŧ9.;V  }ņVWmHǮ{EtP<|}-h OyOw7L#QqI>\?Jsy54 m;\W92˱xi{U+3"yKH_Ocad Nr'/7A \ƻ8O3>bӢ^px|\Sq[c B"U*{`a: yem[4Hky[Wl--ӅBq/q"E9ls @}A"`̿xU~|mw;Qf}nW<͡?ӴJjH~JʂZ+;]v )Nu[E,Ϫts? Kַz̷gjq,p–{7xeߙ^,|ި?Ga"S(v39 f GQfY Q fS2u[ͧk+dK/ jq\6D6ƥM/f3q*HsHK]X6N=H~2JˎFo0*yį#u՛i_d ~2\GWkiLCKWNy\Eq2*cN(lP+(thSBKɆou?=mib$*}HEn/o7BJ|(V'xB=MS++٥BNeC|i2{,4.WTT-(c%[ IG@ݱfIޝ鯻\}ƪ^ܱ\C^ٹ {灦]*^ X/0oOhwV7}y`ukxgyU_ڔ{p]57vH_{cz7uY+Kh]^JU@aO^^,S ٪EtrqleS I7V?= o4;5hiMQeXk;"UobÍeNEZ]ϲڢQl{uE}{TH[o۲nfy/:l6ܢtVlߧiVe[lfڽLsOwemiV͝n[uϲi`ҖB{o$+Cym6i)l { i&Ʒպ体[|듵2Bsz-l|GVNfMy#?hp$rJFRCL57.|sE VRj/ ^v>il}-;VmmbK,_6:sW"_nnVhk1j/T Nt-ڲeW[ =[IEwO Pi/W:ݭ NTil}S^;Rot5V֥lݣXn¸<#CG"t2f{頨a&?`4ɬc{ۜ_Fq'}f=\EzQe&+x)߮S{@<ɗH0Xq}i70G.Ln,^~q|n18<6Z>;sJMsŜ=M =s{n9,l𐛮T.k Bܢ[TfǍjnىɕH~TZ JDekzcQΊͯMRՂ߈8Dg8G}a{y,b=>$Yh:Oh]\^s{y9Z[ - 1E' jfjcJWVB F](.iL0 >4seNjAL@;=@n+i@'8lJeCseLCEe`3TPm6yiRsW *e1[df   ֡*ĭ~v!>ɫЛIn;#LfQ6ɕIi2dHAn@=7!~w4.E|i$|qIq %`{ |sx{0UCO?pٔ:p'Fc{E$ŠjO8GQ:nYf3^8UXμj'_0RiPY:h!Ua y`6xqa΍uzߺdEs 5I420e7@Jj뎫QT-`pEet1T&I4Yw\vHʏlT^C_ld;MfüT? ,uxc)V RSGJظ!>1mFsuCR:ddyɵx颵V25RRfQڶe;C^ZJVtM> w üe2%B: 鎟- p˄  ņjyKZ l4S-HU^Bc%WZk iː襄 efP(TaSDNVkH_]aq܈6PJ9}{Q}gsyh^ie2:iH/"񏘆{R}qr,QQk&Ef)LAޞLq2 7L?v![ɳoZe IOٸ֫ uQ+H_<-8{-Gyۆ ևqpbzgebG%Bte9Uz8 ,sMwEef$r6#i3HQ˅N#Ell";z%j[9ė~)6{OCpVB6shu=ȍkZ_!rH*/?8Xl:3-9uaQ^Dbcİ[MBj hJ݂kMM사s۪SHOGzg. -%Z1o Dt|w;,(: ocJՅku6 WG;/4x}ZBp(UP|C|=:_rs^KY=gHRYT( Qr/ ͵xJEDKސpKΡ!=:;a,dKNr VV<2#}<^wTa#a`Zq9"dSԮאL:y'ыU|b,!DC" UA:G 2>7n#( o?#t@E9'&|!E}՟u 0l{dн+ #ƌBVRnL KuԍJ1rWĵL8V:x~\sI~zb3um}7FF)V;Ho{(D~Y--S 7:Hz[ž7]RK\RC| 9Ú\re\ĖؙMk;zAZ UD?c *(Gc{=R~c!-r9) %GfSE/QBժ'逳<<ϝ]u-R m '맅 ؚZh}Fx髱3Q! ^ PJ\KڵlA(ŧ95P-e[IT=lЛz8HפP4t./CP۩zJ&ÓNճݖIzR-Iz\$DРz!(f2>138(qxO^:P~`#̹WOHó2>fؑc? *r$0v֑$7hΪs,;@5whS:L"e|:IP|:OG_=N%Og%O'L\>l$O')6OG*ϧp|:2pn@X>, "NRl#"WO'?Nt{ӉYXtq4Y#'|:q|:IY=$m!̧#)*Oյ55PhF:8v(T̴D 0JR[fQjt:ӎcL;veڑiGBm יv$ԁL;1ڞi'v0$iGRɴSGݞ$`'fZ`[lXZVϺx:pqE[_zN)lȕI1tJX4֡AA9AsBcO pH?#?s=1i8=2y/"}1~< j1Ch%ƛؖe ^C)LaFbz9X#!Qz#}ťO -7PB|i5RN'ա9Qx(t:A~$Xt:5ꎥ!=:2tX: w"q?Aq݈>:^R_#v uvgɡ+¾`EP(4RHŀA`UfgsF^Ho )!OsyxO/] ! =99n$4dYӫjAhpwxJ]nB.Xsᣓ!4/ؖv93,XjZƱ67õ\_f5ܽ"S6`zɻE?hBq^״p˙@|ظ%/Xؙnꥋn$e2ulXWl q\

ɀER|Hwanͣ=Q"[VFs(P|Ci% suHSX9isA=6;6?瑇?U`Hf7I6gһSs9N%ZeMT6l?,V0Z6 qDC.q$u^dlh[& M"E0UPme WdDUb4 u[]C|$ى%58*5,1q6e UaH0I1.Lν?J$Kܮ]錹]ANR"{%I$uPn{7MRsݜ 'S_9IԲ"I2ڑNsFQ+t-Zkb[2I]{L6I[ݾ?IR^ge҈D>u${[ӇpV>2i4F${C$̶$uou{ -v\qoN&+q(VI.DHj P XJs"#I]O./,=RD.P!_9#_a[?0#X8xiUиoiRÈ>b">% *g/"}16KLS"yk;ރH?vxǑ>vH;7S8xcf:*4*s\/Pv)2uEe FT@#e'rϗrX"}46s݄܍<&Kߗjh1y'Ąۆk 3G?6SG#dL}B_HvDz]וYq`>Ä%:%(b+qւ0҇ERs/$ƙ¶l6r k[=&3ч{w5l+XԚlG !0VG1G* R4D*9Xô(M:6*d'X(w*,@| e生KƵ̳J^F QfՑg6t]PF g9'i6(fҘMAϘ65Wѻ,[%FFZάBZ6/ h1 :r7SHO)lcРAah<%/)Lio9IB'3W['uH4eXl.QrHY7 2`Xu[*U dA??~>Q?L5AӀ GW/IL>҈ħH C [xy 1]rf䁲vWf@<ӀI'8]RaP~+=6J:^Itlp9 #c Oj6, 2 Ya+fqvO e_SsE[ O{/|0(u S3o7X}xʟjGZj5'Pǐ>&-O_ȀGO!}JacȎ>㱭ޘϪZ9_Fm琖~*f$u%)mrZDWo"}3NV,a9U[,բn!n Lc}0p8 kDZjCEtFF-a?L-{^BY| H'0}HǟnŞ_GueؖV>9?V>y?!!^ZM)|+H%H!%W^K#݋ wN2+"؊ٕ9 <-6JQ#WM,g~cK5Ĭj|y3^;|Pj{{T Mߔ %ӧ0b}+v}-x !T&A{ŢqƔ4'Pgi'! N Fz gZ]0QS&l>GVdaɱIO&o7P<i|j{{pAn^Ow ;HGF tŏ1xfQFVpd 9_ffU;X7JBmZ\d@Mfw>Xe]cI[j}Y>|Ra͚ |S?_؋Zq]w%vg~۾"?`嫊3~rnهxNI_\q9'HKMʟ"]rFMJ+ VtE+41*Vj3&kTsכ@b7ZhV*PcPL;wƢ4pɆzs*s墰7?Ky7DT}!wJgGO:H)8_csk35b-Rch')&5wz/Xe̐3g!7(Sw؛UNe9:`}չb"-w_C%3C<9dc&@i"{$+[WNQ&YEpm3.j#ʔ(_ƺܴ6F=wa~ #Q;@.Q7b&15G^,VWɦU%T7t-o T˦n4j&UC|W;aΘ,}V wR7Æ~ɬa\܅n! Y)3saa.|# hmA sE,ҟM&*n4A U@./b0Sr݊3A '/ܡw %\Yf޹s33#Q5dD<~PglqޡztCGZKKA\@:Ft :דt^@0X"6RwJ;/RJ<~9}zPe‚xNbrA Cܺ.NR>hs˚98՗E$~!@ZݡȝP^ćQ8?׊f5jf һCM8iZ[ίMcNҬNlx2}?КMJ".ҿ7?bk5@N^ ԋ2ީaL^^(peTftpȲd\ ޅblR=5&JVr9.| Ajqqާ ]Ò[i:}0`&nn]ˈ+> n7ۏXi28L/pZ2(GE 7{?v> `I"/Td_Qw@Ci 4`L;%dY?i$]ӀqCQ/(OO*i. e? /9]v"=nRBaC_!Ȩ ywkOc|\DzTc9CS̗݂f;ΐԙd0a dsdGyYж[LCRƊye [/a5|^|Fcfk'4*&D) zMJƬξ`nUK[>bӢ^pxz<߯a1kZ`D0B X!> WX@nYc>RO E*qտ 0ކ;?a%czV[Kj)2 1Ƚ)d=*#ƒeO$BPܯǶx9qeպezN^а {QET^~m=Q;C|$y .;+)B񟦅=@ XZJE,D{*8 *ZʐW37Yr5KPkP>}uu3CyY2[<y/w7 a'`V4CCS||@{QX@^TdOm Yu~霻ʻ\~t>72vaۺR9YP5؇\%m]lf&ԡ](1}l]gG'F3,_GZj*ړM?=DVw v1RՍ{^q-pEK8zD~$9scg΍LWsQխŅ4Wh-}-re.I$t2\Gnz1>ELo-OՖ>Y^EPǫaݎ` t%˦bSS WXR\xV5,.P+t<)Yy_YchNWOmpڭw*bRioXꨵջfŝ_V -gY_*.o~ce.g#%XL)+FI>8>`x0 ERəǡOޗ}]ɂT#cّQX*yį#>nq #GX4T[F+<UˢHTGD1'(I:v)Edѷ}ܶݴ[EbQZsUya7DvBJ|$MD;E6_H!lb,Y#1.u)kpFw\IR̲/YkS, g{7TxHc(Io;whkhem1=jSy(]RBYK.ZK?+R%NKΏyM TCm'Wt#7]\\-7o4#w+E\.͎ܢ7s3vvr<vbrr8|e_`¶Af CkMXO`ߓq;pw6ݒn?>+oEy@parameters/tests/0000755000176200001440000000000013603206134013555 5ustar liggesusersparameters/tests/spelling.R0000644000176200001440000000023313603206134015513 0ustar liggesusersif (requireNamespace("spelling", quietly = TRUE)) { spelling::spell_check_test( vignettes = TRUE, error = FALSE, skip_on_cran = TRUE ) } parameters/tests/testthat/0000755000176200001440000000000013620060023015407 5ustar liggesusersparameters/tests/testthat/test-geeglm.R0000644000176200001440000000150113611144133017751 0ustar liggesusersif (require("testthat") && require("parameters") && require("geepack")) { data(warpbreaks) m1 <- geeglm( breaks ~ tension, id = wool, data = warpbreaks, family = poisson, corstr = "ar1" ) test_that("ci", { expect_equal( ci(m1)$CI_low, c(3.28294, -0.76741, -0.64708), tolerance = 1e-4 ) }) test_that("se", { expect_equal( standard_error(m1)$SE, c(0.15931, 0.22554, 0.06598), tolerance = 1e-4 ) }) test_that("p_value", { expect_equal( p_value(m1)$p, c(0, 0.14913, 0), tolerance = 1e-4 ) }) test_that("model_parameters", { expect_equal( model_parameters(m1)$Coefficient, c(3.59517, -0.32536, -0.51776), tolerance = 1e-4 ) }) } parameters/tests/testthat/test-model_parameters_labels.R0000644000176200001440000000676113611145456023404 0ustar liggesusersif (require("insight") && require("testthat") && require("parameters") && require("lme4")) { test_that("model_parameters_labels", { data(mtcars) mtcars$am <- as.factor(mtcars$am) m1 <- lmer(mpg ~ hp * am + (1 | cyl), data = mtcars) expect_equal( attr(model_parameters(m1), "pretty_names"), c(`(Intercept)` = "(Intercept)", hp = "hp", am1 = "am [1]", `hp:am1` = "hp * am [1]") ) m2 <- lmer(mpg ~ hp * as.factor(am) + (1 | cyl), data = mtcars) expect_equal( attr(model_parameters(m2), "pretty_names"), c( `(Intercept)` = "(Intercept)", hp = "hp", `as.factor(am)1` = "am [1]", `hp:as.factor(am)1` = "hp * am [1]" ) ) m3 <- lmer(mpg ~ hp * log(gear) + (1 | cyl), data = mtcars) expect_equal( attr(model_parameters(m3), "pretty_names"), c( `(Intercept)` = "(Intercept)", hp = "hp", `log(gear)` = "gear [log]", `hp:log(gear)` = "hp * gear [log]" ) ) m4 <- lm(mpg ~ as.factor(cyl) + hp * log(gear), data = mtcars) expect_equal( attr(model_parameters(m4), "pretty_names"), c( `(Intercept)` = "(Intercept)", `as.factor(cyl)6` = "cyl [6]", `as.factor(cyl)8` = "cyl [8]", hp = "hp", `log(gear)` = "gear [log]", `hp:log(gear)` = "hp * gear [log]" ) ) m5 <- lm(mpg ~ as.factor(cyl) * I(wt / 10) + hp * log(gear), data = mtcars) expect_equal( attr(model_parameters(m5), "pretty_names"), c( `(Intercept)` = "(Intercept)", `as.factor(cyl)6` = "cyl [6]", `as.factor(cyl)8` = "cyl [8]", `I(wt/10)` = "wt/10", hp = "hp", `log(gear)` = "gear [log]", `as.factor(cyl)6:I(wt/10)` = "cyl [6] * wt/10", `as.factor(cyl)8:I(wt/10)` = "cyl [8] * wt/10", `hp:log(gear)` = "hp * gear [log]" ) ) m6 <- lm(mpg ~ as.factor(cyl) * log(wt) + hp * log(gear), data = mtcars) expect_equal( attr(model_parameters(m6), "pretty_names"), c( `(Intercept)` = "(Intercept)", `as.factor(cyl)6` = "cyl [6]", `as.factor(cyl)8` = "cyl [8]", `log(wt)` = "wt [log]", hp = "hp", `log(gear)` = "gear [log]", `as.factor(cyl)6:log(wt)` = "cyl [6] * wt [log]", `as.factor(cyl)8:log(wt)` = "cyl [8] * wt [log]", `hp:log(gear)` = "hp * gear [log]" ) ) m7 <- lm(mpg ~ as.factor(cyl) * poly(wt, 2) + hp * log(gear), data = mtcars) expect_equal( attr(model_parameters(m7), "pretty_names"), c( `(Intercept)` = "(Intercept)", `as.factor(cyl)6` = "cyl6", `as.factor(cyl)8` = "cyl8", `poly(wt, 2)1` = "wt [1st degree]", `poly(wt, 2)2` = "wt [2nd degree]", hp = "hp", `log(gear)` = "gear [log]", `as.factor(cyl)6:poly(wt, 2)1` = "cyl6 * wt [1st degree]", `as.factor(cyl)8:poly(wt, 2)1` = "cyl8 * wt [1st degree]", `as.factor(cyl)6:poly(wt, 2)2` = "cyl6 * wt [2nd degree]", `as.factor(cyl)8:poly(wt, 2)2` = "cyl8 * wt [2nd degree]", `hp:log(gear)` = "hp * gear [log]" ) ) m8 <- lm(mpg ~ as.factor(cyl) * I(wt^2) + hp * log(gear), data = mtcars) expect_equal( attr(model_parameters(m8), "pretty_names"), c( `(Intercept)` = "(Intercept)", `as.factor(cyl)6` = "cyl [6]", `as.factor(cyl)8` = "cyl [8]", `I(wt^2)` = "wt^2", hp = "hp", `log(gear)` = "gear [log]", `as.factor(cyl)6:I(wt^2)` = "cyl [6] * wt^2", `as.factor(cyl)8:I(wt^2)` = "cyl [8] * wt^2", `hp:log(gear)` = "hp * gear [log]" ) ) }) } parameters/tests/testthat/test-lme.R0000644000176200001440000000347413617043573017315 0ustar liggesusersif (require("testthat") && require("parameters") && require("nlme") && require("lme4")) { data("sleepstudy") m1 <- nlme::lme(Reaction ~ Days, random = ~ 1 + Days | Subject, data = sleepstudy ) test_that("ci", { expect_equal( ci(m1)$CI_low, c(237.927995380985, 7.4146616764556), tolerance = 1e-4 ) }) test_that("se", { expect_equal( standard_error(m1)$SE, c(6.82451602451407, 1.54578275017725), tolerance = 1e-4 ) }) test_that("p_value", { expect_equal( p_value(m1)$p, c(2.38350215912719e-80, 2.26328050057813e-10), tolerance = 1e-4 ) }) test_that("model_parameters", { expect_equal( model_parameters(m1)$Coefficient, c(251.405104848485, 10.467285959596), tolerance = 1e-4 ) }) data("Orthodont") m2 <- nlme::lme( distance ~ age + Sex, random = ~ 1 | Subject, data = Orthodont, method = "ML" ) test_that("model_parameters", { params <- model_parameters(m2) expect_equal(params$Coefficient, c(17.70671, 0.66019, -2.32102), tolerance = 1e-4) expect_equal(params$SE, c(0.83155, 0.06209, 0.74307), tolerance = 1e-4) # expect_equal(params$df, c(80, 80, 25), tolerance = 1e-4) expect_equal(params$CI_low, c(16.07503, 0.53834, -3.82999), tolerance = 1e-4) }) test_that("model_parameters, satterthwaite", { params <- model_parameters(m2, df_method = "satterthwaite") expect_equal(params$Coefficient, c(17.70671, 0.66019, -2.32102), tolerance = 1e-4) expect_equal(params$SE, c(0.83155, 0.06209, 0.74307), tolerance = 1e-4) # expect_equal(params$df, c(104.1503, 82.87867, 26.25), tolerance = 1e-4) expect_equal(params$CI_low, c(16.05848, 0.5379, -3.81337), tolerance = 1e-4) }) } parameters/tests/testthat/test-tobit.R0000644000176200001440000000165013542452500017642 0ustar liggesusersif (require("testthat") && require("parameters") && require("AER")) { data("Affairs", package = "AER") m1 <- AER::tobit( affairs ~ age + yearsmarried + religiousness + occupation + rating, data = Affairs ) test_that("ci", { expect_equal( ci(m1)$CI_low, c(2.80106, -0.33435, 0.29049, -2.47756, -0.17261, -3.0843), tolerance = 1e-4 ) }) test_that("se", { expect_equal( standard_error(m1)$SE, c(2.74145, 0.07909, 0.13452, 0.40375, 0.25442, 0.40783), tolerance = 1e-4 ) }) test_that("p_value", { expect_equal( p_value(m1)$p, c(0.00287, 0.02337, 4e-05, 3e-05, 0.20001, 0), tolerance = 1e-4 ) }) test_that("model_parameters", { expect_equal( model_parameters(m1)$Coefficient, c(8.1742, -0.17933, 0.55414, -1.68622, 0.32605, -2.28497), tolerance = 1e-4 ) }) } parameters/tests/testthat/test-model_parameters_df_method.R0000644000176200001440000000432713614640330024061 0ustar liggesusersif (require("testthat") && require("parameters") && require("lme4")) { data("mtcars") mtcars$cyl <- as.factor(mtcars$cyl) model <- lmer(mpg ~ as.factor(gear) * hp + as.factor(am) + wt + (1 | cyl), data = mtcars) mp1 <- model_parameters(model, digits = 5) mp2 <- model_parameters(model, digits = 5, df_method = "s") mp3 <- model_parameters(model, digits = 5, df_method = "k") test_that("model_parameters, df_method wald", { expect_equal(mp1$SE, c(2.77457, 3.69574, 3.521, 0.01574, 1.58514, 0.86316, 0.02973, 0.01668), tolerance = 1e-3) expect_equal(mp1$df, c(22, 22, 22, 22, 22, 22, 22, 22), tolerance = 1e-3) expect_equal(mp1$p, c(0, 0.00068, 0.12872, 0.15695, 0.846, 0.00224, 0.00029, 0.31562), tolerance = 1e-3) expect_equal(mp1$CI_low, c(24.86326, 5.31796, -1.5521, -0.05313, -2.79893, -4.33015, -0.16595, -0.04943), tolerance = 1e-3) }) test_that("model_parameters, df_method satterthwaite", { expect_equal(mp2$SE, c(2.77457, 3.69574, 3.521, 0.01574, 1.58514, 0.86316, 0.02973, 0.01668), tolerance = 1e-3) expect_equal(mp2$df, c(24, 24, 24, 24, 24, 24, 24, 24), tolerance = 1e-3) expect_equal(mp2$p, c(0, 0.00236, 0.14179, 0.16979, 0.84763, 0.00542, 0.00136, 0.32563), tolerance = 1e-3) expect_equal(mp2$CI_low, c(24.86326, 5.31796, -1.5521, -0.05313, -2.79893, -4.33015, -0.16595, -0.04943), tolerance = 1e-3) }) test_that("model_parameters, df_method kenward", { expect_equal(mp3$SE, c(2.97608, 6.10454, 3.98754, 0.02032, 1.60327, 0.91599, 0.05509, 0.01962), tolerance = 1e-3) expect_equal(mp3$df, c(19.39553, 23.57086, 22.7421, 2.72622, 5.27602, 22.82714, 8.97297, 23.76299), tolerance = 1e-3) expect_equal(mp3$p, c(0, 0.01772, 0.14202, 0.1907, 0.84772, 0.00546, 0.04232, 0.32614), tolerance = 1e-3) expect_equal(mp3$CI_low, c(24.46832, 0.5968, -2.46649, -0.06211, -2.83447, -4.4337, -0.21565, -0.0552), tolerance = 1e-3) }) model <- lm(mpg ~ as.factor(gear) * hp + as.factor(am) + wt, data = mtcars) test_that("model_parameters, df_method-lm", { testthat::expect_is(model_parameters(model), class = "parameters_model") testthat::expect_is(model_parameters(model, df_method = "kenward"), class = "parameters_model") }) } parameters/tests/testthat/test-model_parameters.mixed.R0000644000176200001440000000251313620005661023147 0ustar liggesusersif (require("testthat") && require("parameters") && require("lme4")) { data(mtcars) m1 <- lme4::lmer(wt ~ cyl + (1 | gear), data = mtcars) m2 <- lme4::glmer(vs ~ cyl + (1 | gear), data = mtcars, family = "binomial") test_that("model_parameters.mixed", { params <- model_parameters(m1) testthat::expect_equal(c(nrow(params), ncol(params)), c(2, 8)) testthat::expect_equal(params$CI_high, c(1.6373105660317, 0.554067677205595), tolerance = 1e-3) params <- model_parameters(m1, ci = c(0.8, 0.9)) testthat::expect_equal(c(nrow(params), ncol(params)), c(2, 10)) testthat::expect_equal(params$CI_high_80, c(1.29595665381331, 0.502185700948862), tolerance = 1e-3) testthat::expect_equal(params$CI_high_90, c(1.47875781798108, 0.529969433080186), tolerance = 1e-3) params <- model_parameters(m2) testthat::expect_equal(c(nrow(params), ncol(params)), c(2, 8)) model <- lme4::glmer(vs ~ drat + cyl + (1 | gear), data = mtcars, family = "binomial") params <- model_parameters(model) cs <- coef(summary(model)) testthat::expect_equal(c(nrow(params), ncol(params)), c(3, 8)) testthat::expect_equal(params$Parameter, rownames(cs)) # TODO: Not sure how to deal with bootstrapped mixed models... As it throws an unreasonable amount of singular fits... }) } parameters/tests/testthat/test-gee.R0000644000176200001440000000136713563605604017276 0ustar liggesusersif (require("testthat") && require("parameters") && require("gee")) { data(warpbreaks) m1 <- gee(breaks ~ tension, id = wool, data = warpbreaks) test_that("ci", { expect_equal( ci(m1)$CI_low, c(30.90044, -17.76184, -22.48406), tolerance = 1e-4 ) }) test_that("se", { expect_equal( standard_error(m1)$SE, c(2.80028, 3.96019, 3.96019), tolerance = 1e-4 ) }) test_that("p_value", { expect_equal( p_value(m1)$p, c(0, 0.014717, 0.000501), tolerance = 1e-4 ) }) test_that("model_parameters", { expect_equal( model_parameters(m1)$Coefficient, c(36.38889, -10, -14.72222), tolerance = 1e-4 ) }) } parameters/tests/testthat/test-model_parameters.glm.R0000644000176200001440000000253613620020426022621 0ustar liggesusersif (require("testthat") && require("parameters")) { data(mtcars) test_that("model_parameters.lm", { model <- lm(mpg ~ wt, data = mtcars) params <- model_parameters(model) testthat::expect_equal(c(nrow(params), ncol(params)), c(2, 8)) testthat::expect_equal(params$CI_high, c(41.119752761418, -4.20263490802709), tolerance = 1e-3) params <- model_parameters(model, ci = c(0.8, 0.9)) testthat::expect_equal(c(nrow(params), ncol(params)), c(2, 10)) params <- model_parameters(model, dispersion = TRUE, bootstrap = TRUE, n = 500) testthat::expect_equal(c(nrow(params), ncol(params)), c(2, 6)) model <- lm(mpg ~ wt + cyl, data = mtcars) params <- model_parameters(model) testthat::expect_equal(c(nrow(params), ncol(params)), c(3, 8)) model <- lm(mpg ~ wt * cyl, data = mtcars) params <- model_parameters(model) testthat::expect_equal(c(nrow(params), ncol(params)), c(4, 8)) }) test_that("model_parameters.glm - binomial", { set.seed(333) model <- glm(vs ~ wt + cyl, data = mtcars, family = "binomial") params <- model_parameters(model) testthat::expect_equal(c(nrow(params), ncol(params)), c(3, 8)) params <- suppressWarnings(model_parameters(model, bootstrap = TRUE, n = 500)) testthat::expect_equal(c(nrow(params), ncol(params)), c(3, 5)) }) }parameters/tests/testthat/test-parameters_selection.R0000644000176200001440000000132113611655362022734 0ustar liggesusersif (require("testthat") && require("parameters")) { test_that("select_parameters", { model <- lm(mpg ~ ., data = mtcars) x <- select_parameters(model) testthat::expect_equal(n_parameters(model) - n_parameters(x), 7) # library(lme4) # model <- lmer(Sepal.Width ~ Sepal.Length * Petal.Width * Petal.Length + (1 | Species), data = iris) # x <- select_parameters(model) # testthat::expect_equal(n_parameters(model) - n_parameters(x), 0) # This is broken # library(rstanarm) # model <- stan_glm(mpg ~ ., data = mtcars, refresh = 0) # x <- select_parameters(model, cross_validation = TRUE) # testthat::expect_equal(n_parameters(model) - n_parameters(x), 9) }) }parameters/tests/testthat/test-model_parameters.cpglmm.R0000644000176200001440000000100313603350312023306 0ustar liggesusersif (require("testthat") && require("insight") && require("parameters") && require("cplm")) { data("FineRoot") model <- cpglmm(RLD ~ Stock + Spacing + (1 | Plant), data = FineRoot) test_that("model_parameters.cpglmm", { params <- model_parameters(model) testthat::expect_equal(params$SE, c(0.1308, 0.2514, 0.2, 0.1921), tolerance = 1e-3) testthat::expect_equal( colnames(params), c("Parameter", "Coefficient", "SE", "CI_low", "CI_high", "t", "df_error", "p") ) }) } parameters/tests/testthat/test-model_parameters_mixed_coeforder.R0000644000176200001440000000107113611145373025263 0ustar liggesusersif (require("lme4") && require("testthat") && require("parameters")) { set.seed(1) dat <- data.frame( TST.diff = runif(100, 0, 100), Exposition = as.factor(sample(0:2, 100, TRUE)), Gruppe = as.factor(sample(0:1, 100, TRUE)), Kennung = as.factor(sample(1:5, 100, TRUE)) ) m <- lme4::lmer(TST.diff ~ Exposition + Gruppe + Gruppe:Exposition + (1 | Kennung), data = dat) test_that("model_parameters.mixed.coeforder", { cs <- coef(summary(m)) mp <- model_parameters(m) expect_equal(mp$Parameter, rownames(cs)) }) } parameters/tests/testthat/test-distributions.R0000644000176200001440000000067013612652474021436 0ustar liggesusersif (require("testthat") && require("bayestestR") && require("parameters")) { test_that("distributions", { x <- bayestestR::distribution_normal(100) testthat::expect_equal(kurtosis(x), -0.3204763, tol = 0.01) testthat::expect_equal(skewness(x), -5.050428e-16, tol = 0.01) testthat::expect_equal(smoothness(x, "diff"), 0.919, tol = 0.01) testthat::expect_equal(smoothness(x, "cor"), 0.998, tol = 0.01) }) } parameters/tests/testthat/test-model_parameters.BFBayesFactor.R0000644000176200001440000000234413603206134024454 0ustar liggesusersif (require("testthat") && require("parameters") && require("BayesFactor") && require("logspline")) { test_that("model_parameters.BFBayesFactor", { # testthat::skip_on_travis() model <- BayesFactor::ttestBF(iris$Sepal.Width, iris$Petal.Length, paired = TRUE) testthat::expect_equal(parameters::model_parameters(model)$BF, 492.770, tolerance = 2) model <- BayesFactor::correlationBF(iris$Sepal.Width, iris$Petal.Length) testthat::expect_equal(parameters::model_parameters(model)$BF, 348853.6, tolerance = 10) set.seed(123) model <- BayesFactor::anovaBF(Sepal.Length ~ Species, data = iris) testthat::expect_equal(parameters::model_parameters(model)$Median, c(5.8431, -0.8266, 0.092, 0.734, 0.2681, 2.0415), tolerance = 2) df <- mtcars df$gear <- as.factor(df$gear) df$am <- as.factor(df$am) model <- BayesFactor::ttestBF(formula = mpg ~ am, data = df) testthat::expect_equal(parameters::model_parameters(model)$BF, 86, tolerance = 2) set.seed(123) model <- BayesFactor::anovaBF(mpg ~ gear * am, data = df, ) testthat::expect_equal(parameters::model_parameters(model)$Median, c(20.69277, -3.24014, 3.24014, 25.28076, 0.79331), tolerance = 2) }) } parameters/tests/testthat/test-model_parameters_std.R0000644000176200001440000000413013610616270022714 0ustar liggesusersif (require("testthat") && require("parameters") && require("insight")) { data(mtcars) mtcars$am <- as.factor(mtcars$am) model <- lm(mpg ~ wt * am, data = mtcars) test_that("model_parameters, standardize-refit", { params <- model_parameters(model, standardize = "refit") testthat::expect_equal(c(nrow(params), ncol(params)), c(4, 8)) testthat::expect_equal(params$Coefficient, c(-0.14183, -0.61463, -0.35967, -0.86017), tolerance = 1e-3) testthat::expect_equal(params$SE, c(0.12207, 0.12755, 0.23542, 0.23454), tolerance = 1e-3) testthat::expect_equal(params$CI_high, c(0.10821, -0.35336, 0.12257, -0.37973), tolerance = 1e-3) }) test_that("model_parameters, standardize-posthoc", { params <- model_parameters(model, standardize = "posthoc") testthat::expect_equal(c(nrow(params), ncol(params)), c(4, 8)) testthat::expect_equal(params$Std_Coefficient, c(0, -0.61463, 2.46865, -0.87911), tolerance = 1e-3) testthat::expect_equal(params$SE, c(0, 0.12755, 0.7075, 0.23971), tolerance = 1e-3) testthat::expect_equal(params$CI_high, c(0, -0.36464, 3.85532, -0.4093), tolerance = 1e-3) }) test_that("model_parameters, standardize-basic", { params <- model_parameters(model, standardize = "basic") testthat::expect_equal(c(nrow(params), ncol(params)), c(4, 8)) testthat::expect_equal(params$Std_Coefficient, c(0, -0.61463, 1.23183, -1.11016), tolerance = 1e-3) testthat::expect_equal(params$SE, c(0, 0.12755, 0.35303, 0.30271), tolerance = 1e-3) testthat::expect_equal(params$CI_high, c(0, -0.36464, 1.92377, -0.51687), tolerance = 1e-3) }) test_that("model_parameters, standardize-smart", { params <- model_parameters(model, standardize = "smart") testthat::expect_equal(c(nrow(params), ncol(params)), c(4, 8)) testthat::expect_equal(params$Std_Coefficient, c(0, -0.61463, 2.41278, -0.85922), tolerance = 1e-3) testthat::expect_equal(params$SE, c(0, 0.12755, 0.69148, 0.23428), tolerance = 1e-3) testthat::expect_equal(params$CI_high, c(0, -0.36464, 3.76807, -0.40003), tolerance = 1e-3) }) } parameters/tests/testthat/test-format_parameters.R0000644000176200001440000003121113611144726022235 0ustar liggesusersif (require("testthat") && require("parameters") && require("splines")) { data(iris) set.seed(123) iris$cat <- sample(LETTERS[1:4], nrow(iris), replace = TRUE) test_that("format_parameters-1", { model <- lm(Sepal.Length ~ Species * Sepal.Width, data = iris) fp <- format_parameters(model) expect_equal(fp, c( `(Intercept)` = "(Intercept)", Speciesversicolor = "Species [versicolor]", Speciesvirginica = "Species [virginica]", Sepal.Width = "Sepal.Width", `Speciesversicolor:Sepal.Width` = "Species [versicolor] * Sepal.Width", `Speciesvirginica:Sepal.Width` = "Species [virginica] * Sepal.Width" )) }) test_that("format_parameters-2", { model <- lm(Sepal.Length ~ Sepal.Width * Species, data = iris) fp <- format_parameters(model) expect_equal(fp, c( `(Intercept)` = "(Intercept)", Sepal.Width = "Sepal.Width", Speciesversicolor = "Species [versicolor]", Speciesvirginica = "Species [virginica]", `Sepal.Width:Speciesversicolor` = "Sepal.Width * Species [versicolor]", `Sepal.Width:Speciesvirginica` = "Sepal.Width * Species [virginica]" )) }) test_that("format_parameters-3", { model <- lm(Sepal.Length ~ Species * Sepal.Width * Petal.Length, data = iris) fp <- format_parameters(model) expect_equal(fp, c( `(Intercept)` = "(Intercept)", Speciesversicolor = "Species [versicolor]", Speciesvirginica = "Species [virginica]", Sepal.Width = "Sepal.Width", Petal.Length = "Petal.Length", `Speciesversicolor:Sepal.Width` = "Species [versicolor] * Sepal.Width", `Speciesvirginica:Sepal.Width` = "Species [virginica] * Sepal.Width", `Speciesversicolor:Petal.Length` = "Species [versicolor] * Petal.Length", `Speciesvirginica:Petal.Length` = "Species [virginica] * Petal.Length", `Sepal.Width:Petal.Length` = "Sepal.Width * Petal.Length", `Speciesversicolor:Sepal.Width:Petal.Length` = "(Species [versicolor] * Sepal.Width) * Petal.Length", `Speciesvirginica:Sepal.Width:Petal.Length` = "(Species [virginica] * Sepal.Width) * Petal.Length" )) }) test_that("format_parameters-4", { model <- lm(Sepal.Length ~ Species * cat * Petal.Length, data = iris) fp <- format_parameters(model) expect_equal(fp, c( `(Intercept)` = "(Intercept)", Speciesversicolor = "Species [versicolor]", Speciesvirginica = "Species [virginica]", catB = "cat [B]", catC = "cat [C]", catD = "cat [D]", Petal.Length = "Petal.Length", `Speciesversicolor:catB` = "Species [versicolor] * cat [B]", `Speciesvirginica:catB` = "Species [virginica] * cat [B]", `Speciesversicolor:catC` = "Species [versicolor] * cat [C]", `Speciesvirginica:catC` = "Species [virginica] * cat [C]", `Speciesversicolor:catD` = "Species [versicolor] * cat [D]", `Speciesvirginica:catD` = "Species [virginica] * cat [D]", `Speciesversicolor:Petal.Length` = "Species [versicolor] * Petal.Length", `Speciesvirginica:Petal.Length` = "Species [virginica] * Petal.Length", `catB:Petal.Length` = "cat [B] * Petal.Length", `catC:Petal.Length` = "cat [C] * Petal.Length", `catD:Petal.Length` = "cat [D] * Petal.Length", `Speciesversicolor:catB:Petal.Length` = "(Species [versicolor] * cat [B]) * Petal.Length", `Speciesvirginica:catB:Petal.Length` = "(Species [virginica] * cat [B]) * Petal.Length", `Speciesversicolor:catC:Petal.Length` = "(Species [versicolor] * cat [C]) * Petal.Length", `Speciesvirginica:catC:Petal.Length` = "(Species [virginica] * cat [C]) * Petal.Length", `Speciesversicolor:catD:Petal.Length` = "(Species [versicolor] * cat [D]) * Petal.Length", `Speciesvirginica:catD:Petal.Length` = "(Species [virginica] * cat [D]) * Petal.Length" )) }) test_that("format_parameters-5", { model <- lm(Sepal.Length ~ Species / Petal.Length, data = iris) fp <- format_parameters(model) expect_equal(fp, c( `(Intercept)` = "(Intercept)", Speciesversicolor = "Species [versicolor]", Speciesvirginica = "Species [virginica]", `Speciessetosa:Petal.Length` = "Species [setosa] : Petal.Length", `Speciesversicolor:Petal.Length` = "Species [versicolor] : Petal.Length", `Speciesvirginica:Petal.Length` = "Species [virginica] : Petal.Length" )) }) test_that("format_parameters-6", { model <- lm(Sepal.Length ~ Petal.Length + (Species / Sepal.Width), data = iris) fp <- format_parameters(model) expect_equal(fp, c( `(Intercept)` = "(Intercept)", Petal.Length = "Petal.Length", Speciesversicolor = "Species [versicolor]", Speciesvirginica = "Species [virginica]", `Speciessetosa:Sepal.Width` = "Species [setosa] : Sepal.Width", `Speciesversicolor:Sepal.Width` = "Species [versicolor] : Sepal.Width", `Speciesvirginica:Sepal.Width` = "Species [virginica] : Sepal.Width" )) }) test_that("format_parameters-7", { model <- lm(Sepal.Length ~ Species / Petal.Length * Sepal.Width, data = iris) fp <- format_parameters(model) expect_equal(fp, c( `(Intercept)` = "(Intercept)", Speciesversicolor = "Species [versicolor]", Speciesvirginica = "Species [virginica]", Sepal.Width = "Sepal.Width", `Speciessetosa:Petal.Length` = "Species [setosa] : Petal.Length", `Speciesversicolor:Petal.Length` = "Species [versicolor] : Petal.Length", `Speciesvirginica:Petal.Length` = "Species [virginica] : Petal.Length", `Speciesversicolor:Sepal.Width` = "Species [versicolor] * Sepal.Width", `Speciesvirginica:Sepal.Width` = "Species [virginica] * Sepal.Width", `Speciessetosa:Petal.Length:Sepal.Width` = "(Species [setosa] * Petal.Length) * Sepal.Width", `Speciesversicolor:Petal.Length:Sepal.Width` = "(Species [versicolor] * Petal.Length) * Sepal.Width", `Speciesvirginica:Petal.Length:Sepal.Width` = "(Species [virginica] * Petal.Length) * Sepal.Width" )) }) test_that("format_parameters-8", { model <- lm(Sepal.Length ~ Species / (Petal.Length * Sepal.Width), data = iris) fp <- format_parameters(model) expect_equal(fp, c( `(Intercept)` = "(Intercept)", Speciesversicolor = "Species [versicolor]", Speciesvirginica = "Species [virginica]", `Speciessetosa:Petal.Length` = "Species [setosa] : Petal.Length", `Speciesversicolor:Petal.Length` = "Species [versicolor] : Petal.Length", `Speciesvirginica:Petal.Length` = "Species [virginica] : Petal.Length", `Speciessetosa:Sepal.Width` = "Species [setosa] : Sepal.Width", `Speciesversicolor:Sepal.Width` = "Species [versicolor] : Sepal.Width", `Speciesvirginica:Sepal.Width` = "Species [virginica] : Sepal.Width", `Speciessetosa:Petal.Length:Sepal.Width` = "Species [setosa] : Petal.Length : Sepal.Width", `Speciesversicolor:Petal.Length:Sepal.Width` = "Species [versicolor] : Petal.Length : Sepal.Width", `Speciesvirginica:Petal.Length:Sepal.Width` = "Species [virginica] : Petal.Length : Sepal.Width" )) }) test_that("format_parameters-9", { model <- lm(Sepal.Length ~ Petal.Length + (Species / (Sepal.Width * Petal.Width)), data = iris) fp <- format_parameters(model) expect_equal(fp, c( `(Intercept)` = "(Intercept)", Petal.Length = "Petal.Length", Speciesversicolor = "Species [versicolor]", Speciesvirginica = "Species [virginica]", `Speciessetosa:Sepal.Width` = "Species [setosa] : Sepal.Width", `Speciesversicolor:Sepal.Width` = "Species [versicolor] : Sepal.Width", `Speciesvirginica:Sepal.Width` = "Species [virginica] : Sepal.Width", `Speciessetosa:Petal.Width` = "Species [setosa] : Petal.Width", `Speciesversicolor:Petal.Width` = "Species [versicolor] : Petal.Width", `Speciesvirginica:Petal.Width` = "Species [virginica] : Petal.Width", `Speciessetosa:Sepal.Width:Petal.Width` = "Species [setosa] : Sepal.Width : Petal.Width", `Speciesversicolor:Sepal.Width:Petal.Width` = "Species [versicolor] : Sepal.Width : Petal.Width", `Speciesvirginica:Sepal.Width:Petal.Width` = "Species [virginica] : Sepal.Width : Petal.Width" )) }) test_that("format_parameters-10", { model <- lm(Sepal.Length ~ Species + poly(Sepal.Width, 2), data = iris) fp <- format_parameters(model) expect_equal(fp, c( `(Intercept)` = "(Intercept)", Speciesversicolor = "Species [versicolor]", Speciesvirginica = "Species [virginica]", `poly(Sepal.Width, 2)1` = "Sepal.Width [1st degree]", `poly(Sepal.Width, 2)2` = "Sepal.Width [2nd degree]" )) }) test_that("format_parameters-11", { model <- lm(Sepal.Length ~ Species + poly(Sepal.Width, 2, raw = TRUE), data = iris) fp <- format_parameters(model) expect_equal(fp, c( `(Intercept)` = "(Intercept)", Speciesversicolor = "Species [versicolor]", Speciesvirginica = "Species [virginica]", `poly(Sepal.Width, 2, raw = TRUE)1` = "Sepal.Width [1st degree]", `poly(Sepal.Width, 2, raw = TRUE)2` = "Sepal.Width [2nd degree]" )) }) test_that("format_parameters-12", { model <- lm(Sepal.Length ~ Petal.Length * bs(Petal.Width), data = iris) fp <- format_parameters(model) expect_equal(fp, c( `(Intercept)` = "(Intercept)", Petal.Length = "Petal.Length", `bs(Petal.Width)1` = "Petal.Width [1st degree]", `bs(Petal.Width)2` = "Petal.Width [2nd degree]", `bs(Petal.Width)3` = "Petal.Width [3rd degree]", `Petal.Length:bs(Petal.Width)1` = "Petal.Length * Petal.Width [1st degree]", `Petal.Length:bs(Petal.Width)2` = "Petal.Length * Petal.Width [2nd degree]", `Petal.Length:bs(Petal.Width)3` = "Petal.Length * Petal.Width [3rd degree]" )) }) test_that("format_parameters-13", { model <- lm(Sepal.Length ~ Petal.Length * bs(Petal.Width, degree = 4), data = iris) fp <- format_parameters(model) expect_equal(fp, c( `(Intercept)` = "(Intercept)", Petal.Length = "Petal.Length", `bs(Petal.Width, degree = 4)1` = "Petal.Width [1st degree]", `bs(Petal.Width, degree = 4)2` = "Petal.Width [2nd degree]", `bs(Petal.Width, degree = 4)3` = "Petal.Width [3rd degree]", `bs(Petal.Width, degree = 4)4` = "Petal.Width [4th degree]", `Petal.Length:bs(Petal.Width, degree = 4)1` = "Petal.Length * Petal.Width [1st degree]", `Petal.Length:bs(Petal.Width, degree = 4)2` = "Petal.Length * Petal.Width [2nd degree]", `Petal.Length:bs(Petal.Width, degree = 4)3` = "Petal.Length * Petal.Width [3rd degree]", `Petal.Length:bs(Petal.Width, degree = 4)4` = "Petal.Length * Petal.Width [4th degree]" )) }) test_that("format_parameters-14", { model <- lm(Sepal.Length ~ Petal.Length * ns(Petal.Width, df = 3), data = iris) fp <- format_parameters(model) expect_equal(fp, c( `(Intercept)` = "(Intercept)", Petal.Length = "Petal.Length", `ns(Petal.Width, df = 3)1` = "Petal.Width [1st degree]", `ns(Petal.Width, df = 3)2` = "Petal.Width [2nd degree]", `ns(Petal.Width, df = 3)3` = "Petal.Width [3rd degree]", `Petal.Length:ns(Petal.Width, df = 3)1` = "Petal.Length * Petal.Width [1st degree]", `Petal.Length:ns(Petal.Width, df = 3)2` = "Petal.Length * Petal.Width [2nd degree]", `Petal.Length:ns(Petal.Width, df = 3)3` = "Petal.Length * Petal.Width [3rd degree]" )) }) test_that("format_parameters-15", { model <- lm(Sepal.Length ~ Petal.Length * I(Petal.Width^2), data = iris) fp <- format_parameters(model) expect_equal(fp, c( `(Intercept)` = "(Intercept)", Petal.Length = "Petal.Length", `I(Petal.Width^2)` = "Petal.Width^2", `Petal.Length:I(Petal.Width^2)` = "Petal.Length * Petal.Width^2" )) }) test_that("format_parameters-16", { model <- lm(Sepal.Length ~ Petal.Length * as.factor(Species), data = iris) fp <- format_parameters(model) expect_equal(fp, c( `(Intercept)` = "(Intercept)", Petal.Length = "Petal.Length", `as.factor(Species)versicolor` = "Species [versicolor]", `as.factor(Species)virginica` = "Species [virginica]", `Petal.Length:as.factor(Species)versicolor` = "Petal.Length * Species [versicolor]", `Petal.Length:as.factor(Species)virginica` = "Petal.Length * Species [virginica]" )) }) test_that("format_parameters-17", { if (require("pscl")) { data("bioChemists") model <- zeroinfl(art ~ fem + mar + kid5 + ment | kid5 + phd, data = bioChemists) fp <- format_parameters(model) expect_equal(fp, c( `count_(Intercept)` = "(Intercept)", count_femWomen = "fem [Women]", count_marMarried = "mar [Married]", count_kid5 = "kid5", count_ment = "ment", `zero_(Intercept)` = "(Intercept)", zero_kid5 = "kid5", zero_phd = "phd" )) } }) } parameters/tests/testthat/test-glmmTMB.R0000644000176200001440000000544413616620357020036 0ustar liggesusersif (require("testthat") && require("parameters") && require("glmmTMB")) { data("fish") m1 <- glmmTMB( count ~ child + camper + (1 | persons), ziformula = ~ child + camper + (1 | persons), data = fish, family = truncated_poisson() ) m2 <- glmmTMB( count ~ child + camper + (1 | persons), data = fish, family = poisson() ) test_that("ci", { expect_equal( ci(m1)$CI_low, c(0.33067, -1.32402, 0.55037, -1.66786, 1.44667, -1.64177), tolerance = 1e-3 ) expect_equal( ci(m1, component = "cond")$CI_low, c(0.33067, -1.32402, 0.55037), tolerance = 1e-3 ) expect_equal( ci(m1, component = "zi")$CI_low, c(-1.66786, 1.44667, -1.64177), tolerance = 1e-3 ) expect_equal( ci(m2)$CI_low, c(-0.47982, -1.85096, 0.76044), tolerance = 1e-3 ) expect_equal( ci(m2, component = "cond")$CI_low, c(-0.47982, -1.85096, 0.76044), tolerance = 1e-3 ) expect_null(ci(m2, component = "zi")) }) test_that("se", { expect_equal( standard_error(m1)$SE, c(0.47559, 0.09305, 0.09346, 0.65229, 0.3099, 0.32324), tolerance = 1e-3 ) expect_equal( standard_error(m1, component = "cond")$SE, c(0.47559, 0.09305, 0.09346), tolerance = 1e-3 ) expect_equal( standard_error(m1, component = "zi")$SE, c(0.65229, 0.3099, 0.32324), tolerance = 1e-3 ) expect_equal( standard_error(m2)$SE, c(0.62127, 0.08128, 0.08915), tolerance = 1e-3 ) expect_equal( standard_error(m2, component = "cond")$SE, c(0.62127, 0.08128, 0.08915), tolerance = 1e-3 ) expect_null(standard_error(m2, component = "zi")) }) test_that("p_value", { expect_equal( p_value(m1)$p, c(0.00792, 0, 0, 0.55054, 0, 0.00181), tolerance = 1e-3 ) expect_equal( p_value(m1, component = "cond")$p, c(0.00792, 0, 0), tolerance = 1e-3 ) expect_equal( p_value(m1, component = "zi")$p, c(0.55054, 0, 0.00181), tolerance = 1e-3 ) expect_equal( p_value(m2)$p, c(0.23497, 0, 0), tolerance = 1e-3 ) expect_equal( p_value(m2, component = "cond")$p, c(0.23497, 0, 0), tolerance = 1e-3 ) expect_null(p_value(m2, component = "zi")) }) test_that("model_parameters", { expect_equal( model_parameters(m1)$Coefficient, c(1.2628, -1.14165, 0.73354, -0.38939, 2.05407, -1.00823), tolerance = 1e-3 ) expect_equal( model_parameters(m2)$Coefficient, c(0.73785, -1.69166, 0.93516), tolerance = 1e-3 ) }) } parameters/tests/testthat/test-model_parameters.hurdle.R0000644000176200001440000000125513611361523023330 0ustar liggesusersif (require("testthat") && require("pscl") && require("parameters")) { set.seed(123) data("bioChemists", package = "pscl") model <- hurdle(formula = art ~ ., data = bioChemists, zero = "geometric") test_that("model_parameters.hurdle", { params <- model_parameters(model) testthat::expect_equal(params$SE, c(0.12246, 0.06522, 0.07283, 0.04845, 0.0313, 0.00228, 0.29552, 0.15911, 0.18082, 0.11113, 0.07956, 0.01302), tolerance = 1e-3) testthat::expect_equal(params$Coefficient, unname(coef(model)), tolerance = 1e-3) testthat::expect_equal(params$z, unname(c(coef(summary(model))[[1]][, 3], coef(summary(model))[[2]][, 3])), tolerance = 1e-3) }) } parameters/tests/testthat/test-panelr.R0000644000176200001440000000323513611146227020006 0ustar liggesusersif (require("testthat") && require("parameters") && require("panelr")) { data("WageData") wages <- panel_data(WageData, id = id, wave = t) m1 <- wbm(lwage ~ lag(union) + wks | blk + fem | blk * lag(union), data = wages) m2 <- wbm(lwage ~ lag(union) + wks | blk + fem | blk * (t | id), data = wages) test_that("ci", { expect_equal( ci(m1)$CI_low, c(0.00807, -0.00376, 6.14479, -0.09624, -0.00507, -0.34607, -0.53918, -0.37071), tolerance = 1e-3 ) expect_equal( ci(m2)$CI_low, c(-0.01668, -0.00139, 6.01762, -0.08795, -0.0055, -0.32126, -0.54359), tolerance = 1e-3 ) }) test_that("se", { expect_equal( standard_error(m1)$SE, c(0.0256, 0.00108, 0.2313, 0.03482, 0.00482, 0.05952, 0.04971, 0.12418), tolerance = 1e-3 ) expect_equal( standard_error(m2)$SE, c(0.01838, 0.00073, 0.22549, 0.03394, 0.0047, 0.05803, 0.04846), tolerance = 1e-3 ) }) test_that("p_value", { expect_equal( p_value(m1)$p, c(0.02295, 0.13007, 0, 0.42167, 0.36422, 0.00013, 0, 0.30533), tolerance = 1e-3 ) expect_equal( p_value(m2)$p, c(0.29282, 0.9538, 0, 0.52805, 0.43004, 0.00038, 0), tolerance = 1e-3 ) }) test_that("model_parameters", { expect_equal( model_parameters(m1)$Coefficient, c(0.05825, -0.00164, 6.59813, -0.028, 0.00438, -0.22941, -0.44176, -0.12732), tolerance = 1e-3 ) expect_equal( model_parameters(m2)$Coefficient, c(0.01934, 4e-05, 6.45957, -0.02143, 0.00371, -0.20753, -0.44861), tolerance = 1e-3 ) }) } parameters/tests/testthat/test-model_parameters.lme.R0000644000176200001440000000104013603206134022607 0ustar liggesusersif (require("testthat") && require("insight") && require("parameters") && require("nlme") && require("lme4")) { data("sleepstudy") model <- lme(Reaction ~ Days, random = ~ 1 + Days | Subject, data = sleepstudy ) test_that("model_parameters.lme", { params <- model_parameters(model) testthat::expect_equal(params$SE, c(6.8245, 1.5458), tolerance = 1e-3) testthat::expect_equal( colnames(params), c("Parameter", "Coefficient", "SE", "CI_low", "CI_high", "t", "df_error", "p") ) }) } parameters/tests/testthat/test-skewness.R0000644000176200001440000000234113612122313020353 0ustar liggesusersif (require("testthat") && require("parameters")) { test_that("skewness", { data(iris) testthat::expect_equal(skewness(iris$Sepal.Length), 0.314911, tol = 1e-3) testthat::expect_equal(skewness(iris$Sepal.Length, type = 1), 0.3117531, tol = 1e-3) testthat::expect_equal(skewness(iris$Sepal.Length, type = 3), 0.3086407, tol = 1e-3) }) test_that("kurtosis", { data(iris) testthat::expect_equal(kurtosis(iris$Sepal.Length), -0.552064, tol = 1e-3) testthat::expect_equal(kurtosis(iris$Sepal.Length, type = 1), -0.5735679, tol = 1e-3) testthat::expect_equal(kurtosis(iris$Sepal.Length, type = 3), -0.6058125, tol = 1e-3) }) test_that("skewness", { data(iris) testthat::expect_equal( skewness(iris[, 1:4]), c(Sepal.Length = 0.314910956636973, Sepal.Width = 0.318965664713603, Petal.Length = -0.274884179751012, Petal.Width = -0.10296674764898), tol = 1e-3 ) }) test_that("kurtosis", { data(iris) testthat::expect_equal( kurtosis(iris[, 1:4]), c(Sepal.Length = -0.552064041315639, Sepal.Width = 0.228249042468194, Petal.Length = -1.40210341552175, Petal.Width = -1.34060399661265), tol = 1e-3 ) }) } parameters/tests/testthat/test-p_value.R0000644000176200001440000000724313620031334020153 0ustar liggesusers.runThisTest <- Sys.getenv("RunAllggeffectsTests") == "yes" if (.runThisTest || Sys.getenv("USER") == "travis") { if (require("testthat") && require("parameters") && require("lme4") && require("insight")) { data(mtcars) test_that("p_value", { # h-tests model <- insight::download_model("htest_1") testthat::expect_equal(p_value(model), 0.0413, tol = 0.01) model <- insight::download_model("htest_2") testthat::expect_equal(p_value(model), 0.151, tol = 0.01) model <- insight::download_model("htest_3") testthat::expect_equal(p_value(model), 0.183, tol = 0.01) model <- insight::download_model("htest_4") testthat::expect_equal(p_value(model), 0, tol = 0.01) model <- insight::download_model("htest_5") testthat::expect_equal(p_value(model), 0, tol = 0.01) model <- insight::download_model("htest_6") testthat::expect_equal(p_value(model), 0, tol = 0.01) model <- insight::download_model("htest_7") testthat::expect_equal(p_value(model), 0, tol = 0.01) model <- insight::download_model("htest_8") testthat::expect_equal(p_value(model), 0, tol = 0.01) # ANOVAs model <- insight::download_model("aov_1") testthat::expect_equal(p_value(model)$p, 0, tol = 0.01) model <- insight::download_model("anova_1") testthat::expect_equal(p_value(model)$p, 0, tol = 0.01) model <- insight::download_model("aovlist_1") testthat::expect_equal(p_value(model)$p, 0, tol = 0.01) model <- insight::download_model("aov_2") testthat::expect_equal(p_value(model)$p[1], 0, tol = 0.01) model <- insight::download_model("anova_2") testthat::expect_equal(p_value(model)$p[1], 0, tol = 0.01) model <- insight::download_model("aovlist_2") testthat::expect_equal(p_value(model)$p[1], 0.922, tol = 0.01) model <- insight::download_model("aov_3") testthat::expect_equal(p_value(model)$p[1], 0, tol = 0.01) model <- insight::download_model("anova_3") testthat::expect_equal(p_value(model)$p[1], 0, tol = 0.01) model <- insight::download_model("aovlist_3") testthat::expect_equal(p_value(model)$p[1], 0, tol = 0.01) model <- insight::download_model("anova_4") testthat::expect_equal(p_value(model)$p[2], 0, tol = 0.01) # ANOVA lmer model <- insight::download_model("anova_lmerMod_0") testthat::expect_equal(p_value(model), NA) model <- insight::download_model("anova_lmerMod_1") testthat::expect_equal(p_value(model), NA) model <- insight::download_model("anova_lmerMod_2") testthat::expect_equal(p_value(model), NA) model <- insight::download_model("anova_lmerMod_3") testthat::expect_equal(p_value(model), NA) model <- insight::download_model("anova_lmerMod_4") testthat::expect_equal(p_value(model), NA) model <- insight::download_model("anova_lmerMod_5") testthat::expect_equal(p_value(model), NA) model <- insight::download_model("anova_lmerMod_6") testthat::expect_equal(p_value(model)$p[2], 0, tol = 0.01) # Mixed models model <- lme4::lmer(wt ~ cyl + (1 | gear), data = mtcars) testthat::expect_equal(p_value(model)$p[1], 0.195, tol = 0.01) testthat::expect_equal(p_value(model, method = "kr")$p[1], 0.227, tol = 0.01) model <- insight::download_model("merMod_1") testthat::expect_equal(p_value(model)$p[1], 0.065, tol = 0.01) model <- insight::download_model("merMod_2") testthat::expect_equal(p_value(model)$p[1], 0.299, tol = 0.01) }) } } parameters/tests/testthat/test-betareg.R0000644000176200001440000000417313611144152020133 0ustar liggesusersif (Sys.getenv("USER") != "travis") { if (require("testthat") && require("parameters") && require("betareg")) { data("GasolineYield") data("FoodExpenditure") m1 <- betareg(yield ~ batch + temp, data = GasolineYield) m2 <- betareg(I(food / income) ~ income + persons, data = FoodExpenditure) test_that("ci", { expect_equal( ci(m1)$CI_low, c( -6.51692, 1.52932, 1.09151, 1.34475, 0.85909, 0.93085, 0.83233, 0.32981, 0.28241, 0.15335, 0.01016, 224.63213 ), tolerance = 1e-4 ) expect_equal( ci(m2)$CI_low, c(-1.06129, -0.01825, 0.0492, 19.77403), tolerance = 1e-4 ) }) test_that("se", { expect_equal( standard_error(m1)$SE, c( 0.18232, 0.10123, 0.1179, 0.1161, 0.10236, 0.10352, 0.10604, 0.10913, 0.10893, 0.11859, 0.00041, 110.02562 ), tolerance = 1e-4 ) expect_equal( standard_error(m2)$SE, c(0.22385, 0.00304, 0.03534, 8.0796), tolerance = 1e-4 ) }) test_that("p_value", { expect_equal( p_value(m1)$p, c(0, 0, 0, 0, 0, 0, 0, 0, 1e-05, 0.00114, 0, 6e-05), tolerance = 1e-4 ) expect_equal( p_value(m2)$p, c(0.00542, 5e-05, 8e-04, 1e-05), tolerance = 1e-4 ) }) test_that("model_parameters", { expect_equal( model_parameters(m1)$Coefficient, c(-6.15957, 1.72773, 1.3226, 1.57231, 1.05971, 1.13375, 1.04016, 0.54369, 0.4959, 0.38579, 0.01097), tolerance = 1e-4 ) expect_equal( model_parameters(m1, component = "all")$Coefficient, c(-6.15957, 1.72773, 1.3226, 1.57231, 1.05971, 1.13375, 1.04016, 0.54369, 0.4959, 0.38579, 0.01097, 440.27838856), tolerance = 1e-4 ) expect_equal(model_parameters(m2)$Coefficient, c(-0.62255, -0.0123, 0.11846), tolerance = 1e-4) expect_equal(model_parameters(m2, component = "all")$Coefficient, c(-0.62255, -0.0123, 0.11846, 35.60975033), tolerance = 1e-4) }) } } parameters/tests/testthat/test-equivalence_test.R0000644000176200001440000000047713611144364022072 0ustar liggesusersif (require("testthat") && require("parameters")) { test_that("equivalence_test", { m <- lm(mpg ~ gear + wt + cyl + hp, data = mtcars) x <- equivalence_test(m) testthat::expect_equal(c(nrow(x), ncol(x)), c(5, 6)) testthat::expect_true(is.character(capture.output(equivalence_test(m)))) }) }parameters/tests/testthat/test-model_parameters_std_mixed.R0000644000176200001440000000555013620005455024107 0ustar liggesusers.runThisTest <- Sys.getenv("RunAllggeffectsTests") == "yes" if (.runThisTest || Sys.getenv("USER") == "travis") { if (require("testthat") && require("parameters") && require("lme4")) { data(iris) set.seed(1234) iris$grp <- as.factor(sample(1:3, nrow(iris), replace = TRUE)) # fit example model model <- lme4::lmer( Sepal.Length ~ Species * Sepal.Width + Petal.Length + (1 | grp), data = iris ) test_that("model_parameters, standardize-refit", { params <- model_parameters(model, standardize = "refit") testthat::expect_equal(c(nrow(params), ncol(params)), c(7, 8)) testthat::expect_equal(params$Coefficient, c(0.96949, -1.28631, -1.81461, 0.34791, 1.74252, -0.25421, -0.18834), tolerance = 1e-3) testthat::expect_equal(params$SE, c(0.2045, 0.2619, 0.34035, 0.05968, 0.13914, 0.09762, 0.0945), tolerance = 1e-3) testthat::expect_equal(params$CI_high, c(1.37031, -0.77301, -1.14754, 0.46488, 2.01523, -0.06287, -0.00312), tolerance = 1e-3) }) test_that("model_parameters, standardize-posthoc", { params <- model_parameters(model, standardize = "posthoc") testthat::expect_equal(c(nrow(params), ncol(params)), c(7, 8)) testthat::expect_equal(params$Std_Coefficient, c(0, 0.49679, -0.49355, 0.34791, 1.74252, -0.25421, -0.18834), tolerance = 1e-3) testthat::expect_equal(params$SE, c(0, 0.66228, 0.70202, 0.05968, 0.13914, 0.09762, 0.0945), tolerance = 1e-3) testthat::expect_equal(params$CI_high, c(0, 1.79483, 0.88238, 0.46488, 2.01523, -0.06287, -0.00312), tolerance = 1e-3) }) test_that("model_parameters, standardize-basic", { params <- model_parameters(model, standardize = "basic") testthat::expect_equal(c(nrow(params), ncol(params)), c(7, 8)) testthat::expect_equal(params$Std_Coefficient, c(0, 0.23497, -0.23344, 0.34791, 1.74252, -0.77129, -0.61304), tolerance = 1e-3) testthat::expect_equal(params$SE, c(0, 0.31325, 0.33204, 0.05968, 0.13914, 0.2962, 0.30761), tolerance = 1e-3) testthat::expect_equal(params$CI_high, c(0, 0.84893, 0.41735, 0.46488, 2.01523, -0.19075, -0.01014), tolerance = 1e-3) }) test_that("model_parameters, standardize-refit robust", { params <- model_parameters(model, standardize = "refit", robust = TRUE, vcov_estimation = "CR", vcov_type = "CR1", vcov_args = list(cluster = iris$grp)) testthat::expect_equal(c(nrow(params), ncol(params)), c(7, 8)) testthat::expect_equal(params$Coefficient, c(0.96949, -1.28631, -1.81461, 0.34791, 1.74252, -0.25421, -0.18834), tolerance = 1e-3) testthat::expect_equal(params$SE, c(0.07726, 0.33406, 0.22647, 0.0524, 0.10092, 0.18537, 0.05552), tolerance = 1e-3) testthat::expect_equal(params$CI_high, c(1.12224, -0.6259, -1.36691, 0.45151, 1.94204, 0.11227, -0.07858), tolerance = 1e-3) }) } } parameters/tests/testthat/test-coxph.R0000644000176200001440000000302113613002045017626 0ustar liggesusersif (require("testthat") && require("parameters") && require("survival")) { data("lung") lung <- subset(lung, subset = ph.ecog %in% 0:2) lung$sex <- factor(lung$sex, labels = c("male", "female")) lung$ph.ecog <- factor(lung$ph.ecog, labels = c("good", "ok", "limited")) m1 <- coxph(Surv(time, status) ~ sex + age + ph.ecog, data = lung) test_that("ci", { expect_equal( ci(m1)$CI_low, c(-0.87535, -0.00747, 0.01862, 0.45527), tolerance = 1e-4 ) }) test_that("se", { expect_equal( standard_error(m1)$SE, c(0.16823, 0.00931, 0.19961, 0.22809), tolerance = 1e-4 ) }) test_that("p_value", { expect_equal( p_value(m1)$p, c(0.00118, 0.24713, 0.04005, 8e-05), tolerance = 1e-4 ) }) test_that("model_parameters", { expect_equal( model_parameters(m1)$Coefficient, c(-0.54563, 0.01078, 0.40984, 0.90232), tolerance = 1e-4 ) }) # Create the simplest test data set test1 <- list( time = c(4, 3, 1, 1, 2, 2, 3), status = c(1, 1, 1, 0, 1, 1, 0), x = c(0, 2, 1, 1, 1, 0, 0), sex = c(0, 0, 0, 0, 1, 1, 1) ) # Fit a stratified model m2 <- coxph(Surv(time, status) ~ x + strata(sex), test1) test_that("model_parameters", { expect_equal(model_parameters(m2)$Coefficient, 0.8023179, tolerance = 1e-4) expect_equal(model_parameters(m2)$z, 0.9756088, tolerance = 1e-4) expect_equal(model_parameters(m2)$p, 0.3292583, tolerance = 1e-4) }) } parameters/tests/testthat/test-GLMMadaptive.R0000644000176200001440000000570113616620020020770 0ustar liggesusersif (require("testthat") && require("parameters") && require("lme4") && require("GLMMadaptive")) { data("fish") data("cbpp") m1 <- mixed_model( count ~ child + camper, random = ~ 1 | persons, zi_fixed = ~ child + livebait, data = fish, family = zi.poisson() ) m2 <- mixed_model( cbind(incidence, size - incidence) ~ period, random = ~ 1 | herd, data = cbpp, family = binomial ) test_that("ci", { expect_equal( ci(m1)$CI_low, c(0.08708, -1.35715, 0.58599, -0.99993, 0.75543, -2.1166), tolerance = 1e-3 ) expect_equal( ci(m1, component = "cond")$CI_low, c(0.08708, -1.35715, 0.58599), tolerance = 1e-3 ) expect_equal( ci(m1, component = "zi")$CI_low, c(-0.99993, 0.75543, -2.1166), tolerance = 1e-3 ) expect_equal( ci(m2)$CI_low, c(-1.8572, -1.59265, -1.76827, -2.41754), tolerance = 1e-3 ) expect_equal( ci(m2, component = "cond")$CI_low, c(-1.8572, -1.59265, -1.76827, -2.41754), tolerance = 1e-3 ) expect_null(ci(m2, component = "zi")) }) test_that("se", { expect_equal( standard_error(m1)$SE, c(0.54002, 0.09485, 0.09356, 0.46812, 0.29416, 0.50763), tolerance = 1e-3 ) expect_equal( standard_error(m1, component = "cond")$SE, c(0.54002, 0.09485, 0.09356), tolerance = 1e-3 ) expect_equal( standard_error(m1, component = "zi")$SE, c(0.46812, 0.29416, 0.50763), tolerance = 1e-3 ) expect_equal( standard_error(m2)$SE, c(0.23354, 0.30678, 0.32678, 0.42761), tolerance = 1e-3 ) expect_equal( standard_error(m2, component = "cond")$SE, c(0.23354, 0.30678, 0.32678, 0.42761), tolerance = 1e-3 ) expect_null(standard_error(m2, component = "zi")) }) test_that("p_value", { expect_equal( p_value(m1)$p, c(0.0339, 0, 0, 0.86023, 1e-05, 0.02713), tolerance = 1e-3 ) expect_equal( p_value(m1, component = "cond")$p, c(0.0339, 0, 0), tolerance = 1e-3 ) expect_equal( p_value(m1, component = "zi")$p, c(0.86023, 1e-05, 0.02713), tolerance = 1e-3 ) expect_equal( p_value(m2)$p, c(0, 0.00123, 0.00056, 0.00022), tolerance = 1e-3 ) expect_equal( p_value(m2, component = "cond")$p, c(0, 0.00123, 0.00056, 0.00022), tolerance = 1e-3 ) expect_null(p_value(m2, component = "zi")) }) test_that("model_parameters", { expect_equal( model_parameters(m1)$Coefficient, c(1.14549, -1.17125, 0.76937, -0.08243, 1.33197, -1.12165), tolerance = 1e-3 ) expect_equal( model_parameters(m2)$Coefficient, c(-1.39946, -0.99138, -1.1278, -1.57945), tolerance = 1e-3 ) }) } parameters/tests/testthat/test-model_parameters.aov.R0000644000176200001440000000604213620020550022621 0ustar liggesusers.runThisTest <- Sys.getenv("RunAllggeffectsTests") == "yes" if (.runThisTest || Sys.getenv("USER") == "travis") { if (require("insight") && require("testthat") && require("lme4") && require("parameters")) { data(iris) iris$Cat1 <- rep(c("X", "X", "Y"), length.out = nrow(iris)) iris$Cat2 <- rep(c("A", "B"), length.out = nrow(iris)) test_that("model_parameters.aov", { model <- aov(Sepal.Width ~ Species, data = iris) testthat::expect_equal(sum(model_parameters(model, omega_squared = "partial", eta_squared = "partial", epsilon_squared = TRUE)$df), 149) model <- aov(Sepal.Length ~ Species * Cat1 * Cat2, data = iris) testthat::expect_equal(sum(model_parameters(model, omega_squared = "raw", eta_squared = "partial", epsilon_squared = TRUE)$df), 149) model <- aov(Sepal.Length ~ Species / Cat1 * Cat2, data = iris) testthat::expect_equal(sum(model_parameters(model)$df), 149) }) data(mtcars) test_that("model_parameters.anova", { model <- anova(lm(Sepal.Width ~ Species, data = iris)) testthat::expect_equal(sum(model_parameters(model)$df), 149) model <- anova(lm(Sepal.Length ~ Species * Cat1 * Cat2, data = iris)) testthat::expect_equal(sum(model_parameters(model)$df), 149) model <- anova(lmer(wt ~ 1 + (1 | gear), data = mtcars)) testthat::expect_equal(nrow(model_parameters(model)), 0) model <- anova(lmer(wt ~ cyl + (1 | gear), data = mtcars)) testthat::expect_equal(sum(model_parameters(model)$df), 1) model <- anova(lmer(wt ~ drat + cyl + (1 | gear), data = mtcars)) testthat::expect_equal(sum(model_parameters(model)$df), 2) model <- anova(lmer(wt ~ drat * cyl + (1 | gear), data = mtcars)) testthat::expect_equal(sum(model_parameters(model)$df), 3) model <- anova(lmer(wt ~ drat/cyl + (1 | gear), data = mtcars)) testthat::expect_equal(sum(model_parameters(model)$df), 2) }) if (.runThisTest) { test_that("model_parameters.anova", { model <- insight::download_model("anova_3") testthat::expect_equal(sum(model_parameters(model)$df), 149) model <- insight::download_model("anova_4") testthat::expect_equal(sum(model_parameters(model)$df, na.rm = TRUE), 2) model <- insight::download_model("anova_lmerMod_5") testthat::expect_equal(sum(model_parameters(model)$df), 1) model <- insight::download_model("anova_lmerMod_6") testthat::expect_equal(sum(model_parameters(model)$df), 12) }) } data(mtcars) test_that("model_parameters.anova", { model <- aov(wt ~ cyl + Error(gear), data = mtcars) testthat::expect_equal(sum(model_parameters(model)$df), 31) model <- aov(Sepal.Length ~ Species * Cat1 + Error(Cat2), data = iris) testthat::expect_equal(sum(model_parameters(model)$df), 149) model <- aov(Sepal.Length ~ Species / Cat1 + Error(Cat2), data = iris) testthat::expect_equal(sum(model_parameters(model)$df), 149) }) } } parameters/tests/testthat/test-n_factors.R0000644000176200001440000000027013611655362020504 0ustar liggesusersif (require("testthat") && require("parameters")) { test_that("n_factors", { set.seed(333) x <- n_factors(mtcars[, 1:4]) testthat::expect_equal(ncol(x), 3) }) } parameters/tests/testthat/test-gam.R0000644000176200001440000000147613542452500017273 0ustar liggesusersif (require("testthat") && require("parameters") && require("mgcv")) { set.seed(123) dat <- gamSim(1, n = 400, dist = "normal", scale = 2) m1 <- mgcv::gam(y ~ s(x0) + s(x1) + s(x2) + s(x3), data = dat) test_that("ci", { expect_equal( ci(m1)$CI_low, c(7.771085, NA, NA, NA, NA), tolerance = 1e-4 ) }) test_that("se", { expect_equal( standard_error(m1)$SE, c(0.1020741, NA, NA, NA, NA), tolerance = 1e-4 ) }) test_that("p_value", { expect_equal( p_value(m1)$p, c(0, 0, 0, 0, 0.00196), tolerance = 1e-4 ) }) test_that("model_parameters", { expect_equal( model_parameters(m1)$Coefficient, c(7.97176, 3.63421, 2.97192, 8.29867, 1.04607), tolerance = 1e-4 ) }) } parameters/tests/testthat/test-plm.R0000644000176200001440000000321713542452500017312 0ustar liggesusersif (require("testthat") && require("parameters") && require("plm")) { data(Crime) data("Produc", package = "plm") set.seed(123) Crime$year <- as.factor(Crime$year) m1 <- plm(lcrmrte ~ lprbarr + year | . - lprbarr + lmix, data = Crime, model = "random") m2 <- plm::plm( formula = log(gsp) ~ log(pcap) + log(pc) + log(emp) + unemp, data = Produc, index = c("state", "year") ) test_that("ci", { expect_equal( ci(m1)$CI_low, c(-3.73774, -0.12257, -0.0596, -0.13346, -0.1837, -0.17772, -0.11678, -0.03952), tolerance = 1e-4 ) expect_equal( ci(m2)$CI_low, c(-0.08299, 0.24277, 0.70918, -0.00724), tolerance = 1e-4 ) }) test_that("se", { expect_equal( standard_error(m1)$SE, c(0.13223, 0.09221, 0.02684, 0.02679, 0.02704, 0.02671, 0.02663, 0.02664), tolerance = 1e-4 ) expect_equal( standard_error(m2)$SE, c(0.029, 0.02512, 0.03009, 0.00099), tolerance = 1e-4 ) }) test_that("p_value", { expect_equal( p_value(m1)$p, c(0, 0.52827, 0.79447, 0.00252, 0, 0, 0.0153, 0.63378), tolerance = 1e-4 ) expect_equal( p_value(m2)$p, c(0.36752, 0, 0, 0), tolerance = 1e-4 ) }) test_that("model_parameters", { expect_equal( model_parameters(m1)$Coefficient, c(-3.47857, 0.05815, -0.00699, -0.08095, -0.13071, -0.12537, -0.06458, 0.01269), tolerance = 1e-4 ) expect_equal( model_parameters(m2)$Coefficient, c(-0.02615, 0.29201, 0.76816, -0.0053), tolerance = 1e-4 ) }) } parameters/tests/testthat/test-format.R0000644000176200001440000000244413603206134020011 0ustar liggesusersif (require("testthat") && require("parameters")) { test_that("format_ci", { testthat::expect_equal(format_ci(1.2012313, 145), "95% CI [1.20, 145.00]") testthat::expect_equal(format_ci(c(1.2012313, NA), c(145, 12.4)), c("95% CI [1.20, 145.00]", "95% CI [NA, 12.40]")) testthat::expect_equal(format_ci(c(NA, NA), c(1.2012313, NA)), c("95% CI [NA, 1.20]", "")) }) test_that("format_p", { testthat::expect_equal(nchar(format_p(0.02)), 9) testthat::expect_equal(nchar(format_p(0.02, stars = TRUE)), 10) testthat::expect_equal(nchar(format_p(0.02, stars_only = TRUE)), 1) }) test_that("format_number and format_order", { testthat::expect_equal(format_number(2), "two") testthat::expect_equal(format_number(45), "forty five") testthat::expect_equal(format_number(2), "two") testthat::expect_equal(format_order(2), "second") testthat::expect_equal(format_order(45), "forty fifth") testthat::expect_equal(format_order(2, textual = FALSE), "2nd") testthat::expect_equal(format_order(45, textual = FALSE), "45th") }) test_that("format others", { testthat::expect_true(is.character(format_pd(0.02))) testthat::expect_equal(nchar(format_bf(4)), 9) testthat::expect_true(is.character(format_rope(0.02))) }) } parameters/tests/testthat/test-checks.R0000644000176200001440000000064513611144221017757 0ustar liggesusersif (require("testthat") && require("parameters")) { test_that("check_factorstructure", { x <- check_factorstructure(mtcars) testthat::expect_equal(x$KMO$MSA, 0.826, tol = 0.01) testthat::expect_equal(x$sphericity$chisq, 408.011, tol = 0.01) }) test_that("check_clusterstructure", { set.seed(333) testthat::expect_equal(check_clusterstructure(iris[, 1:4])$H, 0.187, tol = 0.01) }) }parameters/tests/testthat/test-describe_distribution.R0000644000176200001440000000031413611144307023074 0ustar liggesusersif (require("testthat") && require("parameters")) { test_that("describe_distribution", { x <- describe_distribution(rnorm(100)) testthat::expect_equal(c(nrow(x), ncol(x)), c(1, 8)) }) } parameters/tests/testthat/test-model_parameters.htest.R0000644000176200001440000000203013617207022023163 0ustar liggesusersif (require("testthat") && require("parameters")) { test_that("model_parameters.htest", { params <- model_parameters(cor.test(mtcars$mpg, mtcars$cyl, method = "pearson")) testthat::expect_equal(params$r, -0.852, tolerance = 0.05) testthat::expect_warning(params <- model_parameters(cor.test(mtcars$mpg, mtcars$cyl, method = "spearman"))) testthat::expect_equal(params$rho, -0.9108, tolerance = 0.05) testthat::expect_warning(params <- model_parameters(cor.test(mtcars$mpg, mtcars$cyl, method = "kendall"))) testthat::expect_equal(params$tau, -0.795, tolerance = 0.05) params <- model_parameters(t.test(iris$Sepal.Width, iris$Sepal.Length)) testthat::expect_equal(params$Difference, -2.786, tolerance = 0.05) params <- model_parameters(t.test(mtcars$mpg ~ mtcars$vs)) testthat::expect_equal(params$Difference, 7.940, tolerance = 0.05) params <- model_parameters(t.test(iris$Sepal.Width, mu = 1)) testthat::expect_equal(params$Difference, 2.0573, tolerance = 0.05) }) }parameters/tests/testthat/test-model_parameters_robust.R0000644000176200001440000000422413610615033023440 0ustar liggesusersif (require("testthat") && require("parameters") && require("sandwich") && require("effectsize")) { data(mtcars) mtcars$am <- as.factor(mtcars$am) model <- lm(mpg ~ wt * am + cyl + gear, data = mtcars) test_that("model_parameters, robust", { params <- model_parameters(model, robust = TRUE) robust_se <- unname(sqrt(diag(sandwich::vcovHC(model)))) expect_equal(params$SE, robust_se, tolerance = 1e-3) expect_equal(params$p, c(0, 0.0259, 0.01478, 0.01197, 0.95238, 0.01165), tolerance = 1e-3) }) test_that("model_parameters, robust CL", { params <- model_parameters(model, robust = TRUE, vcov_estimation = "CL", vcov_type = "HC1") robust_se <- unname(sqrt(diag(sandwich::vcovCL(model)))) expect_equal(params$SE, robust_se, tolerance = 1e-3) expect_equal(params$p, c(0, 0.00695, 0.00322, 0.00435, 0.94471, 0.00176), tolerance = 1e-3) }) model2 <- lm(mpg ~ wt * am + cyl + gear, data = effectsize::standardize(mtcars)) test_that("model_parameters, robust", { params <- model_parameters(model, standardize = "refit", robust = TRUE) robust_se <- unname(sqrt(diag(sandwich::vcovHC(model2)))) expect_equal(params$SE, robust_se, tolerance = 1e-3) expect_equal(params$p, c(0.28624, 0.0259, 0.43611, 0.01197, 0.95238, 0.01165), tolerance = 1e-3) }) # cluster-robust standard errors, using clubSandwich data(iris) model <- lm(Petal.Length ~ Sepal.Length * Species, data = iris) iris$cluster <- factor(rep(LETTERS[1:8], length.out = nrow(iris))) test_that("model_parameters, robust CR", { params <- model_parameters(model, robust = TRUE, vcov_estimation = "CR", vcov_type = "CR1", vcov_args = list(cluster = iris$cluster)) robust_se <- unname(sqrt(diag(clubSandwich::vcovCR(model, type = "CR1", cluster = iris$cluster)))) expect_equal(params$SE, robust_se, tolerance = 1e-3) expect_equal(params$p, c(0.01246, 0.04172, 0.18895, 0.57496, 0, 0), tolerance = 1e-3) }) test_that("model_parameters, normal", { params <- model_parameters(model) expect_equal(params$p, c(0.13267, 0.21557, 0.36757, 0.77012, 3e-05, 0), tolerance = 1e-3) }) } parameters/tests/testthat/test-bracl.R0000644000176200001440000000261613613642100017603 0ustar liggesusersif (require("testthat") && require("parameters") && require("utils") && require("brglm2")) { data("stemcell") levels(stemcell$research) <- c("definitly", "alterly", "probably not", "definitely not") m1 <- bracl(research ~ as.numeric(religion) + gender, weights = frequency, data = stemcell, type = "ML") test_that("model_parameters", { params <- model_parameters(m1) expect_equal( params$Response, c("definitly", "alterly", "probably not", "definitly", "alterly", "probably not", "definitly", "alterly", "probably not") ) expect_equal( params$Parameter, c("definitly:(Intercept)", "alterly:(Intercept)", "probably not:(Intercept)", "definitly:as.numeric(religion)", "alterly:as.numeric(religion)", "probably not:as.numeric(religion)", "definitly:genderfemale", "alterly:genderfemale", "probably not:genderfemale") ) expect_equal( params$Coefficient, c(-1.24836, 0.47098, 0.42741, 0.4382, 0.25962, 0.01192, -0.13683, 0.18707, -0.16093), tolerance = 1e-3 ) }) # check order of response levels test_that("print model_parameters", { out <- utils::capture.output(print(model_parameters(m1))) expect_equal(out[1], "# Response level: definitly") expect_equal(out[9], "# Response level: alterly") expect_equal(out[17], "# Response level: probably not") }) } parameters/tests/testthat/test-pca.R0000644000176200001440000000212613611655362017273 0ustar liggesusersif (require("testthat") && require("parameters")) { test_that("principal_components", { x <- parameters::principal_components(mtcars[, 1:7], rotation = "varimax") testthat::expect_equal( x$RC1, c( -0.836114674884308, 0.766808147590597, 0.85441780762136, 0.548502661888057, -0.889046093964722, 0.931879020871552, -0.030485507571411 ), tolerance = 0.01 ) testthat::expect_equal( colnames(x), c("Variable", "RC1", "RC2", "Complexity", "Uniqueness", "MSA") ) }) test_that("principal_components", { x <- parameters::principal_components(mtcars[, 1:7]) testthat::expect_equal( x$PC1, c( -0.930866058535747, 0.9578708009312, 0.952846253483008, 0.874493647245971, -0.746868056938478, 0.882509152331738, -0.541093678419456 ), tolerance = 0.01 ) testthat::expect_equal( colnames(x), c("Variable", "PC1", "PC2", "Complexity") ) }) } parameters/tests/testthat/test-zeroinfl.R0000644000176200001440000000275313617521664020371 0ustar liggesusersif (require("testthat") && require("parameters") && require("pscl")) { data("bioChemists") m1 <- zeroinfl(art ~ fem + mar + kid5 + ment | kid5 + phd, data = bioChemists) test_that("ci", { expect_equal( ci(m1)$CI_low, c(0.42844, -0.34446, 0.00734, -0.26277, 0.01717, -1.77978, -0.37558, -0.51411), tolerance = 1e-4 ) }) test_that("se", { expect_equal( standard_error(m1)$SE, c(0.06797, 0.05868, 0.06593, 0.04874, 0.00212, 0.43378, 0.21509, 0.1352), tolerance = 1e-4 ) }) test_that("p_value", { expect_equal( p_value(m1)$p, c(0, 9e-05, 0.03833, 6e-04, 0, 0.03211, 0.83068, 0.06539), tolerance = 1e-4 ) }) test_that("model_parameters", { expect_equal( model_parameters(m1)$Coefficient, c(0.56167, -0.22945, 0.13656, -0.16725, 0.02132, -0.92959, 0.04599, -0.24912), tolerance = 1e-4 ) }) ## TODO activate once insight update on CRAN # m2 <- zeroinfl(formula = art ~ . | 1, data = bioChemists, dist = "negbin") # test_that("model_parameters", { # expect_equal( # model_parameters(m2)$Coefficient, # c(0.25615, -0.21642, 0.15049, -0.17642, 0.01527, 0.02908, -11.95447), # tolerance = 1e-4 # ) # expect_equal( # model_parameters(m2)$Coefficient, # c("conditional", "conditional", "conditional", "conditional", # "conditional", "conditional", "zero_inflated") # ) # }) } parameters/tests/testthat/test-glmer.R0000644000176200001440000000401113614117300017615 0ustar liggesusersif (require("testthat") && require("parameters") && require("lme4")) { data("cbpp") model <- glmer( cbind(incidence, size - incidence) ~ period + (1 | herd), data = cbpp, family = binomial(), nAGQ = 0 ) test_that("model_parameters.glmer", { params <- model_parameters(model) expect_equal(params$SE, c(0.22758, 0.30329, 0.32351, 0.42445), tolerance = 1e-3) expect_equal(params$df, c(51, 51, 51, 51), tolerance = 1e-3) }) test_that("model_parameters.glmer ml1", { params <- model_parameters(model, df_method = "ml1") expect_equal(params$SE, c(0.26093, 0.31854, 0.34172, 0.45132), tolerance = 1e-3) expect_equal(params$df, c(54, 54, 54, 54), tolerance = 1e-3) }) test_that("model_parameters.glmer betwithin", { params <- model_parameters(model, df_method = "betwithin") expect_equal(params$SE, c(0.27486, 0.32572, 0.35021, 0.46373), tolerance = 1e-3) expect_equal(params$df, c(36, 36, 36, 36), tolerance = 1e-3) }) set.seed(123) cbpp$time <- runif(nrow(cbpp), 1, 4) model <- glmer( cbind(incidence, size - incidence) ~ period + time + (1 + time | herd), data = cbpp, family = binomial(), nAGQ = 0 ) test_that("model_parameters.glmer", { params <- model_parameters(model) expect_equal(params$SE, c(0.66539, 0.36178, 0.36223, 0.45528, 0.2379), tolerance = 1e-3) expect_equal(params$df, c(48, 48, 48, 48, 48), tolerance = 1e-3) }) test_that("model_parameters.glmer ml1", { params <- model_parameters(model, df_method = "ml1") expect_equal(params$SE, c(0.68563, 0.37647, 0.37702, 0.48758, 0.23907), tolerance = 1e-3) expect_equal(params$df, c(53, 53, 53, 53, 53), tolerance = 1e-3) }) test_that("model_parameters.glmer betwithin", { params <- model_parameters(model, df_method = "betwithin") expect_equal(params$SE, c(0.69571, 0.38368, 0.38427, 0.50275, 0.24484), tolerance = 1e-3) expect_equal(params$df, c(35, 35, 35, 35, 9), tolerance = 1e-3) }) } parameters/tests/testthat/test-ci.R0000644000176200001440000000176613620005762017125 0ustar liggesusersif (require("testthat") && require("lme4") && require("parameters")) { data(mtcars) test_that("ci", { model <- lm(mpg ~ wt, data = mtcars) testthat::expect_equal(ci(model)[1, 3], 33.4505, tol = 0.01) testthat::expect_equal(ci(model, ci = c(0.7, 0.8))[1, 3], 35.30486, tol = 0.01) model <- glm(vs ~ wt, family = "binomial", data = mtcars) testthat::expect_equal(ci(model)[1, 3], 1.934013, tol = 0.01) model <- lme4::lmer(wt ~ cyl + (1 | gear), data = mtcars) testthat::expect_equal(ci(model)[1, 3], -0.335063, tol = 0.01) set.seed(1) val <- ci(model, method = "boot")[1, 3] testthat::expect_equal(val, -0.555424, tol = 0.01) model <- lme4::glmer(vs ~ cyl + (1 | gear), data = mtcars, family = "binomial") testthat::expect_equal(ci(model)[1, 3], -0.7876679, tol = 0.01) model <- lme4::glmer(vs ~ drat + cyl + (1 | gear), data = mtcars, family = "binomial") testthat::expect_equal(ci(model)[1, 3], -48.14195, tol = 0.01) }) } parameters/tests/testthat/test-model_parameters.metafor.R0000644000176200001440000000150113613110440023464 0ustar liggesusersif (require("testthat") && require("insight") && require("parameters") && require("metafor")) { test <- data.frame( estimate = c(0.111, 0.245, 0.8, 1.1, 0.03), std.error = c(0.05, 0.111, 0.001, 0.2, 0.01) ) mydat <<- test model <- metafor::rma(yi = estimate, sei = std.error, data = mydat) params <- model_parameters(model) test_that("model_parameters.metafor", { expect_equal(params$Parameter, c("Study 1", "Study 2", "Study 3", "Study 4", "Study 5", "Overall")) expect_equal( colnames(params), c("Parameter", "Coefficient", "SE", "CI_low", "CI_high", "z", "p", "Weight") ) expect_equal(params$Coefficient, c(0.111, 0.245, 0.8, 1.1, 0.03, 0.43769), tolerance = 1e-3) expect_equal(params$Weight, c(400, 81.16224, 1e+06, 25, 10000, NA), tolerance = 1e-3) }) } parameters/tests/testthat/test-ivreg.R0000644000176200001440000000203213542452500017630 0ustar liggesusersif (require("testthat") && require("parameters") && require("AER")) { data(CigarettesSW) CigarettesSW$rprice <- with(CigarettesSW, price / cpi) CigarettesSW$rincome <- with(CigarettesSW, income / population / cpi) CigarettesSW$tdiff <- with(CigarettesSW, (taxs - tax) / cpi) m1 <- ivreg( log(packs) ~ log(rprice) + log(rincome) | log(rincome) + tdiff + I(tax / cpi), data = CigarettesSW, subset = year == "1995" ) test_that("ci", { expect_equal( ci(m1)$CI_low, c(7.82022, -1.79328, -0.18717), tolerance = 1e-4 ) }) test_that("se", { expect_equal( standard_error(m1)$SE, c(1.05856, 0.2632, 0.23857), tolerance = 1e-4 ) }) test_that("p_value", { expect_equal( p_value(m1)$p, c(0, 1e-05, 0.24602), tolerance = 1e-4 ) }) test_that("model_parameters", { expect_equal( model_parameters(m1)$Coefficient, c(9.89496, -1.27742, 0.2804), tolerance = 1e-4 ) }) } parameters/tests/testthat/test-model_parameters.efa_cfa.R0000644000176200001440000000451213611145211023402 0ustar liggesusersif (require("testthat") && require("parameters") && require("psych") && require("lavaan") && require("BayesFM") && require("FactoMineR")) { test_that("principal_components", { set.seed(333) x <- principal_components(mtcars[, 1:7], n = "all", threshold = 0.2) testthat::expect_equal(c(ncol(x), nrow(x)), c(8, 7)) x <- principal_components(mtcars[, 1:7], n = 2, rotation = "oblimin", threshold = "max", sort = TRUE) testthat::expect_equal(c(ncol(x), nrow(x)), c(6, 7)) pca <- principal_components(mtcars[, 1:5], n = 2) testthat::expect_equal(c(ncol(pca), nrow(pca)), c(4, 5)) x <- summary(pca) testthat::expect_equal(c(ncol(x), nrow(x)), c(3, 4)) x <- model_parameters(pca) testthat::expect_equal(c(ncol(x), nrow(x)), c(5, 2)) x <- predict(pca) testthat::expect_equal(c(ncol(x), nrow(x)), c(2, 32)) }) test_that("efa-cfa", { efa <- psych::fa(attitude, nfactors = 3) params <- parameters::model_parameters(efa) testthat::expect_equal(c(nrow(params), ncol(params)), c(7, 6)) model1 <- efa_to_cfa(efa) model2 <- efa_to_cfa(efa, threshold = 0.3) testthat::expect_equal(nchar(model1), 109) m1 <- lavaan::cfa(model1, data = attitude) params <- parameters::model_parameters(m1) testthat::expect_equal(c(nrow(params), ncol(params)), c(10, 9)) testthat::expect_warning(parameters::model_parameters(m1, ci = c(0.8, 0.9))) params <- parameters::model_parameters(m1, standardize = TRUE, type = "all") testthat::expect_equal(c(nrow(params), ncol(params)), c(20, 9)) x <- lavaan::anova(m1, lavaan::cfa(model2, data = attitude)) params <- parameters::model_parameters(x) testthat::expect_equal(c(nrow(params), ncol(params)), c(2, 6)) }) test_that("FactoMineR", { x <- model_parameters(FactoMineR::PCA(mtcars, ncp = 3), threshold = 0.2, sort = TRUE) testthat::expect_equal(c(ncol(x), nrow(x)), c(5, 11)) x <- model_parameters(FactoMineR::FAMD(iris, ncp = 3), threshold = 0.2, sort = TRUE) testthat::expect_equal(c(ncol(x), nrow(x)), c(5, 5)) }) test_that("BayesFM", { set.seed(333) befa <- BayesFM::befa(mtcars, iter = 1000) params <- parameters::model_parameters(befa, sort = TRUE) testthat::expect_equal(nrow(params), 11) }) }parameters/tests/testthat/test-MCMCglmm.R0000644000176200001440000000150013543202174020110 0ustar liggesusersif (require("testthat") && require("parameters") && require("MCMCglmm")) { data(PlodiaPO) set.seed(123) m1 <- MCMCglmm( PO ~ plate, random = ~FSfamily, data = PlodiaPO, verbose = FALSE, nitt = 1300, burnin = 300, thin = 1 ) test_that("ci", { expect_equal( ci(m1)$CI_low, c(0.976294, 0.034227), tolerance = 0.01 ) }) test_that("se", { expect_equal( standard_error(m1)$SE, c(0.024089, 0.005111), tolerance = 0.01 ) }) test_that("p_value", { expect_equal( p_value(m1)$p, c(0, 0), tolerance = 0.01 ) }) test_that("model_parameters", { expect_equal( model_parameters(m1)$Median, c(1.013152, 0.042433), tolerance = 0.01 ) }) } parameters/tests/testthat/test-model_parameters.truncreg.R0000644000176200001440000000127513611361523023700 0ustar liggesusersif (require("testthat") && require("truncreg") && require("survival") && require("parameters")) { set.seed(123) data("tobin", package = "survival") model <- truncreg( formula = durable ~ age + quant, data = tobin, subset = durable > 0 ) test_that("model_parameters.truncreg", { params <- model_parameters(model) testthat::expect_equal(params$SE, c(9.21875, 0.22722, 0.03259, 0.56841), tolerance = 1e-3) testthat::expect_equal(params$t, c(1.36653, 1.89693, -3.64473, 2.90599), tolerance = 1e-3) testthat::expect_equal( colnames(params), c("Parameter", "Coefficient", "SE", "CI_low", "CI_high", "t", "df_error", "p") ) }) } parameters/tests/testthat/test-gls.R0000644000176200001440000000167113542452500017311 0ustar liggesusersif (require("testthat") && require("parameters") && require("nlme")) { data(Ovary) m1 <- gls(follicles ~ sin(2 * pi * Time) + cos(2 * pi * Time), data = Ovary, correlation = corAR1(form = ~ 1 | Mare) ) test_that("ci", { expect_equal( ci(m1)$CI_low, c(10.9137205623851, -4.03898261140754, -2.26675468048102), tolerance = 1e-4 ) }) test_that("se", { expect_equal( standard_error(m1)$SE, c(0.664643651063474, 0.645047778144975, 0.697538308948056), tolerance = 1e-4 ) }) test_that("p_value", { expect_equal( p_value(m1)$p, c(2.6187369542827e-51, 2.28628382225752e-05, 0.198137111907874), tolerance = 1e-4 ) }) test_that("model_parameters", { expect_equal( model_parameters(m1)$Coefficient, c(12.2163981810227, -2.77471219793581, -0.899604717105857), tolerance = 1e-4 ) }) } parameters/tests/testthat/test-backticks.R0000644000176200001440000000761413611144300020456 0ustar liggesusers.runThisTest <- Sys.getenv("RunAllggeffectsTests") == "yes" if (.runThisTest && Sys.getenv("USER") != "travis") { if (require("testthat") && require("parameters")) { data(iris) iris$`a m` <- iris$Species iris$`Sepal Width` <- iris$Sepal.Width m1 <- lm(`Sepal Width` ~ Petal.Length + `a m` * log(Sepal.Length), data = iris) m2 <- lm(Sepal.Width ~ Petal.Length + Species * log(Sepal.Length), data = iris) test_that("standard_error, backticks", { expect_equal( standard_error(m1)$Parameter, c( "(Intercept)", "Petal.Length", "a mversicolor", "a mvirginica", "log(Sepal.Length)", "a mversicolor:log(Sepal.Length)", "a mvirginica:log(Sepal.Length)" ) ) expect_equal( standard_error(m2)$Parameter, c( "(Intercept)", "Petal.Length", "Speciesversicolor", "Speciesvirginica", "log(Sepal.Length)", "Speciesversicolor:log(Sepal.Length)", "Speciesvirginica:log(Sepal.Length)" ) ) }) test_that("ci, backticks", { expect_equal( ci(m1)$Parameter, c( "(Intercept)", "Petal.Length", "a mversicolor", "a mvirginica", "log(Sepal.Length)", "a mversicolor:log(Sepal.Length)", "a mvirginica:log(Sepal.Length)" ) ) expect_equal( ci(m2)$Parameter, c( "(Intercept)", "Petal.Length", "Speciesversicolor", "Speciesvirginica", "log(Sepal.Length)", "Speciesversicolor:log(Sepal.Length)", "Speciesvirginica:log(Sepal.Length)" ) ) expect_equal( ci_wald(m1)$Parameter, c( "(Intercept)", "Petal.Length", "a mversicolor", "a mvirginica", "log(Sepal.Length)", "a mversicolor:log(Sepal.Length)", "a mvirginica:log(Sepal.Length)" ) ) expect_equal( ci_wald(m2)$Parameter, c( "(Intercept)", "Petal.Length", "Speciesversicolor", "Speciesvirginica", "log(Sepal.Length)", "Speciesversicolor:log(Sepal.Length)", "Speciesvirginica:log(Sepal.Length)" ) ) }) test_that("p, backticks", { expect_equal( p_value(m1)$Parameter, c( "(Intercept)", "Petal.Length", "a mversicolor", "a mvirginica", "log(Sepal.Length)", "a mversicolor:log(Sepal.Length)", "a mvirginica:log(Sepal.Length)" ) ) expect_equal( p_value(m2)$Parameter, c( "(Intercept)", "Petal.Length", "Speciesversicolor", "Speciesvirginica", "log(Sepal.Length)", "Speciesversicolor:log(Sepal.Length)", "Speciesvirginica:log(Sepal.Length)" ) ) }) test_that("model_parameters, backticks", { expect_equal( model_parameters(m1)$Parameter, c( "(Intercept)", "Petal.Length", "a mversicolor", "a mvirginica", "log(Sepal.Length)", "a mversicolor:log(Sepal.Length)", "a mvirginica:log(Sepal.Length)" ) ) expect_equal( model_parameters(m2)$Parameter, c( "(Intercept)", "Petal.Length", "Speciesversicolor", "Speciesvirginica", "log(Sepal.Length)", "Speciesversicolor:log(Sepal.Length)", "Speciesvirginica:log(Sepal.Length)" ) ) }) test_that("model_parameters-2, backticks", { expect_equal( model_parameters(parameters_selection(m1))$Parameter, c( "(Intercept)", "a mversicolor", "a mvirginica", "log(Sepal.Length)", "a mversicolor:log(Sepal.Length)", "a mvirginica:log(Sepal.Length)" ) ) expect_equal( model_parameters(parameters_selection(m2))$Parameter, c( "(Intercept)", "Speciesversicolor", "Speciesvirginica", "log(Sepal.Length)", "Speciesversicolor:log(Sepal.Length)", "Speciesvirginica:log(Sepal.Length)" ) ) }) } } parameters/tests/testthat/test-gamm.R0000644000176200001440000000155513563605646017464 0ustar liggesusersif (require("testthat") && require("parameters") && require("mgcv")) { set.seed(123) dat <- gamSim(6, n = 200, scale = .2, dist = "poisson") m1 <- gamm( y ~ s(x0) + s(x1) + s(x2), family = poisson, data = dat, random = list(fac = ~1) ) test_that("ci", { expect_equal( ci(m1)$CI_low, c(2.361598, NA, NA, NA), tolerance = 1e-4 ) }) test_that("se", { expect_equal( standard_error(m1)$SE, c(0.3476989, NA, NA, NA), tolerance = 1e-4 ) }) test_that("p_value", { expect_equal( p_value(m1)$p, c(0, 0, 0, 0), tolerance = 1e-4 ) }) test_that("model_parameters", { expect_equal( model_parameters(m1)$Coefficient, c(3.0476, 3.84674, 3.17375, 8.51841), tolerance = 1e-4 ) }) } parameters/tests/testthat.R0000644000176200001440000000037713563762601015562 0ustar liggesuserslibrary(testthat) library(parameters) if (length(strsplit(packageDescription("parameters")$Version, "\\.")[[1]]) > 3) { Sys.setenv("RunAllparametersTests" = "yes") } else { Sys.setenv("RunAllparametersTests" = "no") } test_check("parameters") parameters/vignettes/0000755000176200001440000000000013620044013014416 5ustar liggesusersparameters/vignettes/model_parameters.Rmd0000644000176200001440000001530513620043433020416 0ustar liggesusers--- title: "Summary of Model Parameters" output: github_document: toc: true fig_width: 10.08 fig_height: 6 rmarkdown::html_vignette: toc: true fig_width: 10.08 fig_height: 6 tags: [r, parameters, variable selection, feature selection] vignette: > %\VignetteIndexEntry{Summary of Model Parameters} \usepackage[utf8]{inputenc} %\VignetteEngine{knitr::rmarkdown} editor_options: chunk_output_type: console bibliography: bibliography.bib --- ```{r message=FALSE, warning=FALSE, include=FALSE} library(knitr) options(knitr.kable.NA = '') options(digits = 2) knitr::opts_chunk$set(comment = ">") if (!requireNamespace("dplyr", quietly = TRUE) || !requireNamespace("BayesFactor", quietly = TRUE) || !requireNamespace("lme4", quietly = TRUE) || !requireNamespace("metafor", quietly = TRUE) || !requireNamespace("lavaan", quietly = TRUE) || !requireNamespace("brms", quietly = TRUE) || !requireNamespace("psych", quietly = TRUE) || !requireNamespace("rstanarm", quietly = TRUE) || !requireNamespace("FactoMineR", quietly = TRUE)) { knitr::opts_chunk$set(eval = FALSE) } else { library(parameters) library(dplyr) } set.seed(333) ``` The `model_parameters()` function (also accessible via the shortcut `parameters()`) allows you to extract the parameters and their characteristics from various models in a consistent way. It can be considered as a lightweight alternative to [`broom::tidy()`](https://github.com/tidymodels/broom), with some notable differences: - The names of the returned data frame are **specific** to their content. For instance, the column containing the statistic is named following the statistic name, i.e., *t*, *z*, etc., instead of a generic name such as *statistic* (**however**, you can get standardized (generic) column names using [`standardize_names()`](https://easystats.github.io/parameters/reference/standardize_names.html)). - It is able to compute or extract indices not available by default, such as ***p*-values**, **CIs**, etc. - It includes **feature engineering** capabilities, including parameters [**bootstrapping**](https://easystats.github.io/parameters/articles/bootstrapping.html). ## Correlations and *t*-tests ### Frequentist ```{r, warning=FALSE, message=FALSE} cor.test(iris$Sepal.Length, iris$Sepal.Width) %>% parameters() ``` ```{r, warning=FALSE, message=FALSE} t.test(mpg ~ vs, data = mtcars) %>% parameters() ``` ### Bayesian ```{r, warning=FALSE, message=FALSE} library(BayesFactor) BayesFactor::correlationBF(iris$Sepal.Length, iris$Sepal.Width) %>% parameters() ``` ```{r, warning=FALSE, message=FALSE} BayesFactor::ttestBF(formula = mpg ~ vs, data = mtcars) %>% parameters() ``` ## ANOVAs Indices of effect size for ANOVAs, such as partial and non-partial versions of `eta_squared()`, `epsilon_sqared()` or `omega_squared()`, were moved to the [**effectsize**-package](https://easystats.github.io/effectsize/). However, **parameters** uses these function to compute such indices for parameters summaries. ### Simple ```{r, warning=FALSE, message=FALSE} aov(Sepal.Length ~ Species, data = iris) %>% parameters(omega_squared = "partial", eta_squared = "partial", epsilon_squared = TRUE) ``` ### Repeated measures `parameters()` (resp. its alias `model_parameters()`) also works on repeated measures ANOVAs, whether computed from `aov()` or from a mixed model. ```{r, warning=FALSE, message=FALSE} aov(mpg ~ am + Error(gear), data = mtcars) %>% parameters() ``` ## Regressions (GLMs, Mixed Models, GAMs, ...) `parameters()` (resp. its alias `model_parameters()`) was mainly built with regression models in mind. It works for many types of models and packages, including mixed models and Bayesian models. ### GLMs ```{r, warning=FALSE, message=FALSE} glm(vs ~ poly(mpg, 2) + cyl, data = mtcars) %>% parameters() ``` ### Mixed Models ```{r, warning=FALSE, message=FALSE} library(lme4) lmer(Sepal.Width ~ Petal.Length + (1|Species), data = iris) %>% parameters() ``` ### Bayesian Models `model_parameters()` works fine with Bayesian models from the **rstanarm** package... ```{r, warning=FALSE, message=FALSE, eval = FALSE} library(rstanarm) stan_glm(mpg ~ wt * cyl, data = mtcars) %>% parameters() ``` ```{r, warning=FALSE, message=FALSE, echo = FALSE} library(rstanarm) stan_glm(mpg ~ wt * cyl, data = mtcars, iter = 500, chains = 2, refresh = 0) %>% parameters() ``` ... as well as for (more complex) models from the **brms** package. For more complex models, other model components can be printed using the arguments `effects` and `component` arguments. ```{r, warning=FALSE, message=FALSE} library(brms) data(fish) set.seed(123) model <- brm(bf( count ~ persons + child + camper + (1 | persons), zi ~ child + camper + (1 | persons) ), data = fish, family = zero_inflated_poisson(), iter = 500, chains = 1, refresh = 0 ) parameters(model, component = "conditional") parameters(model, effects = "all", component = "all") ``` ## Structural Models (PCA, EFA, CFA, SEM...) The **parameters** package extends the support to structural models. ### Principal Component Analysis (PCA) and Exploratory Factor Analysis (EFA) ```{r, warning=FALSE, message=FALSE} library(psych) psych::pca(mtcars, nfactors = 3) %>% parameters() ``` ```{r, warning=FALSE, message=FALSE, eval = FALSE} library(FactoMineR) FactoMineR::FAMD(iris, ncp = 3) %>% parameters() ``` ```{r, warning=FALSE, message=FALSE, echo = FALSE} library(FactoMineR) FactoMineR::FAMD(iris, ncp = 3, graph = FALSE) %>% parameters() ``` ### Confirmatory Factor Analysis (CFA) and Structural Equation Models (SEM) #### Frequentist ```{r, warning=FALSE, message=FALSE} library(lavaan) model <- lavaan::cfa(' visual =~ x1 + x2 + x3 textual =~ x4 + x5 + x6 speed =~ x7 + x8 + x9 ', data = HolzingerSwineford1939) model_parameters(model) ``` #### Bayesian `blavaan` to be done. ## Meta-Analysis `parameters()` also works for `rma`-objects from the **metafor** package. ```{r, warning=FALSE, message=FALSE} library(metafor) mydat <- data.frame( effectsize = c(-0.393, 0.675, 0.282, -1.398), standarderror = c(0.317, 0.317, 0.13, 0.36) ) rma(yi = effectsize, sei = standarderror, method = "REML", data = mydat) %>% model_parameters() ``` ## Plotting Model Parameters There is a `plot()`-method implemented in the [**see**-package](https://easystats.github.io/see/). Several examples are shown [in this vignette](https://easystats.github.io/see/articles/parameters.html). parameters/vignettes/bibliography.bib0000644000176200001440000000435413503403444017564 0ustar liggesusers@Manual{revelle2018, title = {psych: Procedures for Psychological, Psychometric, and Personality Research}, author = {William Revelle}, organization = { Northwestern University}, address = { Evanston, Illinois}, year = {2018}, note = {R package version 1.8.12}, url = {https://CRAN.R-project.org/package=psych} } @article{makowski2018psycho, title={The psycho package: An efficient and publishing-oriented workflow for psychological science}, author={Makowski, Dominique}, journal={Journal of Open Source Software}, volume={3}, number={22}, pages={470}, year={2018} } @article{menard2011standards, title={Standards for standardized logistic regression coefficients}, author={Menard, Scott}, journal={Social Forces}, volume={89}, number={4}, pages={1409--1428}, year={2011}, publisher={The University of North Carolina Press} } @article{schielzeth2010simple, title={Simple means to improve the interpretability of regression coefficients}, author={Schielzeth, Holger}, journal={Methods in Ecology and Evolution}, volume={1}, number={2}, pages={103--113}, year={2010}, publisher={Wiley Online Library} } @article{gelman2008scaling, title={Scaling regression inputs by dividing by two standard deviations}, author={Gelman, Andrew}, journal={Statistics in medicine}, volume={27}, number={15}, pages={2865--2873}, year={2008}, publisher={Wiley Online Library} } @article{menard2004six, title={Six approaches to calculating standardized logistic regression coefficients}, author={Menard, Scott}, journal={The American Statistician}, volume={58}, number={3}, pages={218--223}, year={2004}, publisher={Taylor \& Francis} } @article{bring1994standardize, title={How to standardize regression coefficients}, author={Bring, Johan}, journal={The American Statistician}, volume={48}, number={3}, pages={209--213}, year={1994}, publisher={Taylor \& Francis} } @article{neter1989applied, title={Applied linear regression models}, author={Neter, John and Wasserman, William and Kutner, Michael H}, year={1989}, publisher={Irwin Homewood, IL} } parameters/vignettes/model_parameters_robust.Rmd0000644000176200001440000001524213611655362022026 0ustar liggesusers--- title: "Robust Estimation of Standard Errors, Confidence Intervals and p-values" output: github_document: toc: true fig_width: 10.08 fig_height: 6 rmarkdown::html_vignette: toc: true fig_width: 10.08 fig_height: 6 tags: [r, parameters, variable selection, feature selection] vignette: > %\VignetteIndexEntry{Robust Estimation of Standard Errors} \usepackage[utf8]{inputenc} %\VignetteEngine{knitr::rmarkdown} editor_options: chunk_output_type: console bibliography: bibliography.bib --- ```{r message=FALSE, warning=FALSE, include=FALSE} library(knitr) options(knitr.kable.NA = '') options(digits = 2) knitr::opts_chunk$set(comment = ">") if (!requireNamespace("dplyr", quietly = TRUE) || !requireNamespace("sandwich", quietly = TRUE) || !requireNamespace("lme4", quietly = TRUE) || !requireNamespace("clubSandwich", quietly = TRUE)) { knitr::opts_chunk$set(eval = FALSE) } else { library(parameters) library(dplyr) } set.seed(333) ``` The [`model_parameters()`](https://easystats.github.io/parameters/articles/model_parameters.html) function also allows the computation of standard errors, confidence intervals and p-values based on robust covariance matrix estimation from model parameters. Robust estimation is based on the packages **sandwich** and **clubSandwich**, so all models supported by either of these packages work with `model_parameters()` when `robust = TRUE`. ## Classical Regression Models ### Robust Covariance Matrix Estimation from Model Parameters By default, when `model_parameters(robust = TRUE)`, it internally calls `sandwich::vcovHC(type = "HC3")`. However, there are three arguments that allow for choosing different methods and options of robust estimation: `vcov_estimation`, `vcov_type` and `vcov_args` (see [`?standard_error_robust`](https://easystats.github.io/parameters/reference/standard_error_robust.html) for further details). Let us start with a simple example, which uses a heteroskedasticity-consistent covariance matrix estimation with estimation-type "HC3" (i.e. `sandwich::vcovHC(type = "HC3")` is called): ```{r} data(iris) model <- lm(Petal.Length ~ Sepal.Length * Species + Sepal.Width, data = iris) # model parameters, where SE, CI and p-values are based on robust estimation mp <- model_parameters(model, robust = TRUE) mp # compare standard errors to result from sandwich-package mp$SE unname(sqrt(diag(sandwich::vcovHC(model)))) ``` ### Cluster-Robust Covariance Matrix Estimation (sandwich) If another covariance matrix estimation is required, use the `vcov_estimation`-argument. This argument needs the suffix for the related `vcov*()`-functions as value, i.e. `vcov_estimation = "CL"` would call `sandwich::vcovCL()`, or `vcov_estimation = "HAC"` would call `sandwich::vcovHAC()`. The specific estimation type can be changed with `vcov_type`. E.g., `sandwich::vcovCL()` accepts estimation types HC0 to HC3. In the next example, we use a clustered covariance matrix estimation with HC1-estimation type. ```{r} # change estimation-type mp <- model_parameters(model, robust = TRUE, vcov_estimation = "CL", vcov_type = "HC1") mp # compare standard errors to result from sandwich-package mp$SE unname(sqrt(diag(sandwich::vcovCL(model)))) ``` Usually, clustered covariance matrix estimation is used when there is a cluster-structure in the data. The variable indicating the cluster-structure can be defined in `sandwich::vcovCL()` with the `cluster`-argument. In `model_parameters()`, additional arguments that should be passed down to functions from the **sandwich** package can be specified in `vcov_args`: ```{r} iris$cluster <- factor(rep(LETTERS[1:8], length.out = nrow(iris))) # change estimation-type, defining additional arguments mp <- model_parameters( model, robust = TRUE, vcov_estimation = "CL", vcov_type = "HC1", vcov_args = list(cluster = iris$cluster) ) mp # compare standard errors to result from sandwich-package mp$SE unname(sqrt(diag(sandwich::vcovCL(model, cluster = iris$cluster)))) ``` ### Cluster-Robust Covariance Matrix Estimation (clubSandwich) Cluster-robust estimation of the variance-covariance matrix can also be achieved using `clubSandwich::vcovCR()`. Thus, when `vcov_estimation = "CR"`, the related function from the **clubSandwich** package is called. Note that this function _requires_ the specification of the `cluster`-argument. ```{r} # create fake-cluster-variable, to demonstrate cluster robust standard errors iris$cluster <- factor(rep(LETTERS[1:8], length.out = nrow(iris))) # cluster-robust estimation mp <- model_parameters( model, robust = TRUE, vcov_estimation = "CR", vcov_type = "CR1", vcov_args = list(cluster = iris$cluster) ) mp # compare standard errors to result from clubSsandwich-package mp$SE unname(sqrt(diag(clubSandwich::vcovCR(model, type = "CR1", cluster = iris$cluster)))) ``` ### Robust Covariance Matrix Estimation on Standardized Model Parameters Finally, robust estimation can be combined with standardization. However, robust covariance matrix estimation only works for `standardize = "refit"`. ```{r} # model parameters, robust estimation on standardized model model_parameters(model, standardize = "refit", robust = TRUE) ``` ## Mixed Models ### Robust Covariance Matrix Estimation for Mixed Models For linear mixed models, that by definition have a clustered ("hierarchical" or multilevel) structure in the data, it is also possible to estimate a cluster-robust covariance matrix. This is possible due to the **clubSandwich** package, thus we need to define the same arguments as in the above example. ```{r} library(lme4) data(iris) set.seed(1234) iris$grp <- as.factor(sample(1:3, nrow(iris), replace = TRUE)) # fit example model model <- lme4::lmer( Sepal.Length ~ Species * Sepal.Width + Petal.Length + (1 | grp), data = iris ) # normal model parameters, like from 'summary()' model_parameters(model) # model parameters, cluster robust estimation for mixed models model_parameters( model, robust = TRUE, vcov_estimation = "CR", vcov_type = "CR1", vcov_args = list(cluster = iris$grp) ) ``` ### Robust Covariance Matrix Estimation on Standardized Mixed Model Parameters Again, robust estimation can be combined with standardization for linear mixed models as well, which in such cases also only works for `standardize = "refit"`. ```{r} # model parameters, cluster robust estimation on standardized mixed model model_parameters( model, standardize = "refit", robust = TRUE, vcov_estimation = "CR", vcov_type = "CR1", vcov_args = list(cluster = iris$grp) ) ``` parameters/vignettes/parameters_reduction.Rmd0000644000176200001440000001430013611655362021316 0ustar liggesusers--- title: "Feature Reduction (PCA, cMDS, ICA...)" output: github_document: toc: true fig_width: 10.08 fig_height: 6 rmarkdown::html_vignette: toc: true fig_width: 10.08 fig_height: 6 tags: [r, parameters, variable extraction, feature extraction, dimension extraction] vignette: > %\VignetteIndexEntry{Feature Reduction (PCA, cMDS, ICA, ...)} \usepackage[utf8]{inputenc} %\VignetteEngine{knitr::rmarkdown} editor_options: chunk_output_type: console bibliography: bibliography.bib --- ```{r message=FALSE, warning=FALSE, include=FALSE} library(knitr) options(knitr.kable.NA = '') options(digits = 2) knitr::opts_chunk$set(comment = ">") if (!requireNamespace("dplyr", quietly = TRUE) || !requireNamespace("psych", quietly = TRUE)) { knitr::opts_chunk$set(eval = FALSE) } else { library(parameters) library(dplyr) } set.seed(333) ``` Also known as [**feature extraction** or **dimension reduction**](https://en.wikipedia.org/wiki/Feature_extraction) in machine learning, the goal of variable reduction is to **reduce the number of predictors** by derivating, from a set of measured data, new variables intended to be informative and non-redundant. This method can be used to **simplify models**, which can benefit model interpretation, shorten fitting time, and improve generalization (by reducing overfitting). ## Quick and Exploratory Method Let's start by fitting a multiple regression with the `attitude` dataset, available is base R, to predict the overall **rating** by employees of their organization with the remaining variables (handling of employee **complaints**, special **privileges**, opportunity of **learning**, **raises**, a feedback considered too **critical** and opportunity of **advancement**). ```{r message=FALSE, warning=FALSE} model <- lm(rating ~ ., data = attitude) parameters(model) ``` We can explore a reduction of the number of parameters with the `reduce_parameters()` function. ```{r message=FALSE, warning=FALSE} newmodel <- reduce_parameters(model) parameters(newmodel) ``` This quickly *hints* toward the fact that the model could be represented via **two "latent" dimensions**, one correlated with all the positive things that a company has to offer, and the other one related to the amount of negative critiques received by the employees. These two dimensions have a positive and negative relationship with the company rating, respectively. > What does `reduce_parameters()` exactly do? This function performs a reduction in the parameters space (the number of variables). It starts by creating a new set of variables, based on a given method (the default method is "**PCA**", but other are available via the `method` argument, such as "**cMDS**", "**DRR**" or "**ICA**"). Then, it names this new dimensions using the original variables that *correlate* the most with it. For instance, a variable named 'V1_0.97/V4_-0.88' means that the V1 and the V4 variables correlate maximally (with respective coefficients of .97 and -.88) with this dimension. ```{r message=FALSE, warning=FALSE} reduce_parameters(model, method = "cMDS") %>% parameters() ``` A different method (**Classical Multidimensional Scaling - cMDS**) suggests that negative critiques do not have a significant impact on the rating, and that the lack of opportunities of career advancement is a separate dimension with an importance on its own. Although this function can be useful in exploratory data analysis, it's best to perform the dimension reduction step in a **separate and dedicated stage**, as this is a very important process in the data analysis workflow. ## Principal Component Analysis (PCA) PCA is a widely used procedure that lies in-between dimension reduction and structural modelling. Indeed, one of the way of reducing the number of predictors is to extract a new set of uncorrelated variables that will *represent* variance of your initial dataset. But how the original variables relate between themselves can also be a question on its own. We can apply the `principal_components()` function to do the the predictors of the model: ```{r message=FALSE, warning=FALSE} pca <- principal_components(insight::get_predictors(model), n = "auto") pca ``` The `principal_component()` function automatically selected one component (if the number of components is not specified, this function uses [`n_factors()`](https://easystats.github.io/parameters/articles/n_factors.html) to estimate the optimal number to keep) and returned the **loadings**, i.e., the relationship with all of the original variables. As we can see here, it seems that our new component captured the essence (more than half of the total variance present in the original dataset) of all our other variables together. We can **extract** the values of this component for each of our observation using the `predict()` method and add in the response variable of our initial dataset. ```{r message=FALSE, warning=FALSE} newdata <- predict(pca) newdata$rating <- attitude$rating ``` We can know update the model with this new component: ```{r message=FALSE, warning=FALSE} update(model, rating ~ PC1, data = newdata) %>% parameters() ``` ### Using the `psych` package for PCA You can also use different packages for models, such as [`psych`](https://cran.r-project.org/package=psych) [@revelle2018] or [`FactoMineR`](http://factominer.free.fr/) for PCA or Exploratory Factor Analysis (EFA), as it allows for more flexibility, control and details when running such procedures. Thus, the functions from this package are **fully supported** by `parameters` through the `model_parameters()` function. As such, the above analysis can be fully reproduced as follows: ```{r message=FALSE, warning=FALSE} library(psych) # Fit the PCA pca <- psych::principal(attitude, nfactors = 1) %>% model_parameters() pca ``` *Note:* By default, `psych::principal()` uses a **varimax** rotation to extract rotated components, possibly leading to discrepancies in the results. Finally, refit the model: ```{r message=FALSE, warning=FALSE} df <- cbind(attitude, predict(pca)) update(model, rating ~ PC1, data = df) %>% model_parameters() ``` # Referencesparameters/vignettes/parameters_selection.Rmd0000644000176200001440000001227213620032437021305 0ustar liggesusers--- title: "Parameters Selection" output: github_document: toc: true fig_width: 10.08 fig_height: 6 rmarkdown::html_vignette: toc: true fig_width: 10.08 fig_height: 6 tags: [r, parameters, variable selection, feature selection] vignette: > %\VignetteIndexEntry{Parameters Selection} \usepackage[utf8]{inputenc} %\VignetteEngine{knitr::rmarkdown} editor_options: chunk_output_type: console bibliography: bibliography.bib --- ```{r message=FALSE, warning=FALSE, include=FALSE} library(knitr) options(knitr.kable.NA = '') options(digits = 2) knitr::opts_chunk$set(comment = ">") if (!requireNamespace("dplyr", quietly = TRUE) || !requireNamespace("performance", quietly = TRUE) || !requireNamespace("lme4", quietly = TRUE)) { knitr::opts_chunk$set(eval = FALSE) } else { library(parameters) library(dplyr) } set.seed(333) ``` Also known as [**feature selection**](https://en.wikipedia.org/wiki/Feature_selection) in machine learning, the goal of variable selection is to **identify a subset of predictors** to **simplify models**. This can benefit model interpretation, shorten fitting time, and improve generalization (by reducing overfitting). There are many different methods. The one that is appropriate for a given problem depends on the model type, the data, the objective and the theoretical rationale. The `parameters` package implements a helper that will **automatically pick a method deemed appropriate for the provided model**, run the variables selection and return the **optimal formula**, which you can then re-use to update the model. ## Simple linear regression ### Fit a powerful model If you are familiar with R and the formula interface, you know of the possibility of including a dot (`.`) in the formula, signifying **"all the remaining variables"**. Curiously, few are aware of the possibility of additionally easily adding **all the interaction terms**. This can be achieved using the `*.*` notation. Let's try that with the linear regression predicting **Sepal.Length** with the [`iris`](https://en.wikipedia.org/wiki/Iris_flower_data_set) dataset, included by default in R. ```{r message=FALSE, warning=FALSE} model <- lm(Sepal.Length ~ .*., data=iris) summary(model) ``` ***Wow, that's a lot of parameters! And almost none of them is significant...*** Which is ***weird***, considering that **gorgeous R2! 0.882!** *I wish I had that in my research...* ### Too many parameters? As you might know, having a **model that is too performant is not always a good thing**. For instance, it can be a marker of [**overfitting**](https://en.wikipedia.org/wiki/Overfitting): the model corresponds too closely to a particular set of data, and may therefore fail to predict future observations reliably. In multiple regressions, in can also fall under the [**Freedman's paradox**](https://en.wikipedia.org/wiki/Freedman%27s_paradox): some predictors that have actually no relation to the dependent variable being predicted will be **spuriously found to be statistically significant**. Let's run a few checks using the [**performance**](https://github.com/easystats/performance) package: ```{r message=FALSE, warning=FALSE} library(performance) check_normality(model) check_heteroscedasticity(model) check_autocorrelation(model) check_collinearity(model) ``` The main issue of the model seems to be the high [multicollinearity](https://en.wikipedia.org/wiki/Multicollinearity). This suggests that our model might not be able to give valid results about any individual predictor, nor tell which predictors are redundant with respect to others. ### Parameters selection Time to do some variables selection. This can be easily done using the `select_parameters()` function, that will **automatically select the best variables** and update the model accordingly. One way of using that is in a tidy pipeline (using [`%>%`](https://cran.r-project.org/package=magrittr/README.html)), using this output to update a new model. ```{r message=FALSE, warning=FALSE} lm(Sepal.Length ~ .*., data = iris) %>% select_parameters() %>% summary() ``` That's still a lot of parameters, but as you can see, but almost all of them are now significant, and the R2 did not change much. Although appealing, please note that these automated selection methods are [**quite criticized**](https://towardsdatascience.com/stopping-stepwise-why-stepwise-selection-is-bad-and-what-you-should-use-instead-90818b3f52df), and should not be used in place of **theoretical** or **hypothetical** reasons (*i.e.*, you should have justified hypotheses about the parameters of your model). ## Mixed and Bayesian models For simple linear regressions as above, the selection is made using the `step()` function (available in base R). This performs a [**stepwise**](https://en.wikipedia.org/wiki/Stepwise_regression) selection. However, this procedures is not available for other types of models, such as **mixed** or **Bayesian** models. ### Mixed model ```{r message=FALSE, warning=FALSE} library(lme4) lmer(Sepal.Length ~ Sepal.Width * Petal.Length * Petal.Width + (1|Species), data = iris) %>% select_parameters() %>% summary() ```parameters/vignettes/efa_cfa.Rmd0000644000176200001440000002072313617043573016452 0ustar liggesusers--- title: "Structural Models (EFA, CFA, SEM...)" output: github_document: toc: true fig_width: 10.08 fig_height: 6 rmarkdown::html_vignette: toc: true fig_width: 10.08 fig_height: 6 tags: [r, parameters, efa, cfa, factor analysis, sem, fa, pca, how many factors, n factors] vignette: > %\VignetteIndexEntry{Structural Models (EFA, CFA, SEM, ...)} \usepackage[utf8]{inputenc} %\VignetteEngine{knitr::rmarkdown} editor_options: chunk_output_type: console bibliography: bibliography.bib --- ```{r message=FALSE, warning=FALSE, include=FALSE} library(knitr) options(knitr.kable.NA = '') options(digits = 2) knitr::opts_chunk$set(comment = ">") if (!requireNamespace("dplyr", quietly = TRUE) || !requireNamespace("see", quietly = TRUE) || !requireNamespace("lavaan", quietly = TRUE) || !requireNamespace("performance", quietly = TRUE) || !requireNamespace("psych", quietly = TRUE)) { knitr::opts_chunk$set(eval = FALSE) } set.seed(333) ``` # How to perform a Factor Analysis (FA) The difference between PCA and EFA can be quite hard to intuitively grasp as their output is very familiar. The idea is that PCA aims at extracting the most variance possible from all variables of the dataset, whereas EFA aims at creating consistent factors from the dataset without desperately trying to represent all the variables. This is why PCA is popular for feature reduction, as it will try to best represent the variance contained in the original data, minimizing the loss of information. On the other hand, EFA is usually in the context of exploring the latent dimensions that might be hidden in the observed variables, without necessary striving at representing the whole dataset. To illustrate EFA, let us use the [International Personality Item Pool](https://ipip.ori.org/) data available in the [`psych`](https://www.personality-project.org/r/html/bfi.html) package. It includes 25 personality self report items. The authors built these items following the **big 5** personality structure. ## Factor Structure (Sphericity and KMO) The first step is to test the dataset for factor analysis suitability. Two existing methods are the **Bartlett's Test of Sphericity** and the **Kaiser, Meyer, Olkin (KMO) Measure of Sampling Adequacy (MSA)**. The former tests whether a matrix is significantly different from an identity matrix. This statistical test for the presence of correlations among variables, providing the statistical probability that the correlation matrix has significant correlations among at least some of variables. As for factor analysis to work, some relationships between variables are needed, thus, a significant Bartlett's test of sphericity is required, say *p* < .001. The latter was introduced by Kaiser (1970) as the Measure of Sampling Adequacy (MSA), later modified by Kaiser and Rice (1974). The Kaiser-Meyer-Olkin (KMO) statistic, which can vary from 0 to 1, indicates the degree to which each variable in a set is predicted without error by the other variables. A value of 0 indicates that the sum of partial correlations is large relative to the sum correlations, indicating factor analysis is likely to be inappropriate. A KMO value close to 1 indicates that the sum of partial correlations is not large relative to the sum of correlations and so factor analysis should yield distinct and reliable factors. Both tests can be performed by using the `check_factorstructure()` function. ```{r message=FALSE, warning=FALSE} library(parameters) library(dplyr) library(psych) # Load the data data <- psych::bfi[, 1:25] # Select only the 25 first columns corresponding to the items data <- na.omit(data) # remove missing values # Check factor structure check_factorstructure(data) ``` ## Exploratory Factor Analysis (EFA) Now that we are confident that our dataset is appropriate, we will explore a factor structure made of 5 latent variables, corresponding to the items' authors theory of personality. ```{r message=FALSE, warning=FALSE} # Fit an EFA efa <- psych::fa(data, nfactors = 5) %>% model_parameters(sort = TRUE, threshold = "max") efa ``` As we can see, the 25 items nicely spread on the 5 latent factors, the famous **big 5**. Based on this model, we can now predict back the scores for each individual for these new variables: ```{r message=FALSE, warning=FALSE, eval=FALSE} predict(efa, names = c("Neuroticism", "Conscientiousness", "Extraversion", "Agreeableness", "Opennness")) ``` ```{r message=FALSE, warning=FALSE, echo=FALSE} head(predict(efa, names = c("Neuroticism", "Conscientiousness", "Extraversion", "Agreeableness", "Opennness")), 5) ``` ## How many factors to retain in Factor Analysis (FA) When running a **factor analysis (FA)**, one often needs to specify **how many components** (or latent variables) to retain or to extract. This decision is often motivated or supported by some statistical indices and procedures aiming at finding the optimal number of factors. Interestingly, a huge amount of methods exist to statistically address this issue, giving sometimes very different results... Unfortunately, there is no consensus on **which method to use**, or which is the best. ### The Method Agreement procedure The Method Agreement procedure, first implemented in the [`psycho`](https://neuropsychology.github.io/psycho.R/2018/05/24/n_factors.html) package [@makowski2018psycho], proposes to rely on the consensus of methods, rather than on one method in particular. This procedure can be easily used via the `n_factors()` function, re-implemented and improved in the [**parameters**](https://github.com/easystats/parameters) package. One can provide a dataframe, and the function will run a large number of routines and return the optimal number of factors based on the higher consensus. ```{r message=FALSE, warning=FALSE} n <- n_factors(data) n ``` Interestingly, the smallest nubmer of factors that most methods suggest is 6... Which is consistent whith the newer models of personality (e.g., HEXACO). More details, as well as a summary table can be obtained as follows: ```{r message=FALSE, warning=FALSE} as.data.frame(n) summary(n) ``` A plot can also be obtained (the `see` package must be loaded): ```{r message=FALSE, warning=FALSE} library(see) plot(n) + theme_modern() ``` ## Confirmatory Factor Analysis (CFA) We've seen above that while an EFA with 5 latent variables works great on our dataset, a structure with 6 latent factors might in fact be more appropriate. How can we **statistically test** if that's actually the case? This can be done using Confirmatory Factor Analysis (CFA), that bridges factor analysis with Structural Equation Modelling (SEM). However, in order to do that cleanly, EFA should be independent from CFA, in the sense that the factor structure should be explored on a **"training" set**, and then tested (or "confirmed") on a **test set**. In other words, the dataset used for exploration and confirmation is not the same. Note that this procedure is also standard in the field of machine learning. ### Partition the data The data can be easily split into two sets with the `data_partition()` function, through which we will use 70\% of the sample as training and the rest as test. ```{r message=FALSE, warning=FALSE} partitions <- data_partition(data, training_proportion = 0.7) training <- partitions$training test <- partitions$test ``` ### Create CFA structures out of EFA models In the next step, we will run two EFA models on the training set, specifying 5 and 6 latent factors respectively, that we will then transform into CFA structures. ```{r message=FALSE, warning=FALSE} structure_big5 <- psych::fa(training, nfactors = 5) %>% efa_to_cfa() structure_big6 <- psych::fa(training, nfactors = 6) %>% efa_to_cfa() # Investigate how a model looks structure_big5 ``` As we can see, a structure is just a string encoding how the **manifest variables** (the observed variables) are integrated into latent variables. ### Fit and Compare models We can finally with that structure to the test set using the `lavaan` package, and compare these models together: ```{r message=FALSE, warning=FALSE} library(lavaan) library(performance) big5 <- lavaan::cfa(structure_big5, data = test) big6 <- lavaan::cfa(structure_big6, data = test) performance::compare_performance(big5, big6) ``` All in all, it seems that the big 5 structure remains quite reliable. # References parameters/vignettes/model_parameters_standardized.Rmd0000644000176200001440000001247413611655362023170 0ustar liggesusers--- title: "Standardized Model Parameters" output: github_document: toc: true fig_width: 10.08 fig_height: 6 rmarkdown::html_vignette: toc: true fig_width: 10.08 fig_height: 6 tags: [r, parameters, variable selection, feature selection] vignette: > %\VignetteIndexEntry{Standardized Model Parameters} \usepackage[utf8]{inputenc} %\VignetteEngine{knitr::rmarkdown} editor_options: chunk_output_type: console bibliography: bibliography.bib --- ```{r message=FALSE, warning=FALSE, include=FALSE} library(knitr) options(knitr.kable.NA = '') options(digits = 2) knitr::opts_chunk$set(comment = ">") if (!requireNamespace("dplyr", quietly = TRUE) || !requireNamespace("lme4", quietly = TRUE)) { knitr::opts_chunk$set(eval = FALSE) } else { library(parameters) library(dplyr) } set.seed(333) ``` The [`model_parameters()`](https://easystats.github.io/parameters/articles/model_parameters.html) function (also accessible via the shortcut `parameters()`) can be used to calculate standardized model parameters, too, via the `standardize`-argument. There are different methods of standardizing model parameters: `"refit"`, `"posthoc"`, `"smart"` and `"basic"` (see [`?effectsize::standardize_parameters`](https://easystats.github.io/effectsize/reference/standardize_parameters.html) for further details). ## Standardization by re-fitting the model `standardize = "refit"` is based on a complete model re-fit with a standardized version of data. Hence, this method is equal to standardizing the variables before fitting the model. It is the most accurate (Neter et al., 1989), but it is also the most computationally costly and long (especially for heavy models such as, for instance, for Bayesian models). This method is particularly recommended for complex models that include interactions or transformations (e.g., polynomial or spline terms). When `standardize = "refit"`, `model_parameters()` internally calls [`effectsize::standardize()`](https://easystats.github.io/effectsize/reference/standardize.html) to standardize the data that was used to fit the model and updates the model with the standardized data. Note that `effectsize::standardize()` tries to detect which variables should be standardized and which not. For instance, having a `log(x)` in the model formula would exclude `x` from being standardized, because `x` might get negative values, and thus `log(x)` would no longer be defined. Factors will also be not standardized. Response variables will be standardized, if appropriate. ```{r} library(lme4) data(iris) set.seed(1234) iris$grp <- as.factor(sample(1:3, nrow(iris), replace = TRUE)) # fit example model model <- lme4::lmer( Sepal.Length ~ Species * Sepal.Width + Petal.Length + (1 | grp), data = iris ) # classic model parameters model_parameters(model) # standardized model parameters model_parameters(model, standardize = "refit") ``` The second output is identical to following: ```{r} # standardize continuous variables manually model2 <- lme4::lmer( scale(Sepal.Length) ~ Species * scale(Sepal.Width) + scale(Petal.Length) + (1 | grp), data = iris ) model_parameters(model2) ``` ## Post-hoc standardization `standardize = "posthoc"` aims at emulating the results obtained by `"refit"` without refitting the model. The coefficients are divided by the standard deviation of the outcome (which becomes their expression 'unit'). Then, the coefficients related to numeric variables are additionally multiplied by the standard deviation of the related terms, so that they correspond to changes of 1 SD of the predictor (e.g., "a change in 1 SD of x is related to a change of 0.24 of the SD of y"). This does not apply to binary variables or factors, so the coefficients are still related to changes in levels. This method is not accurate and tends to give aberrant results when interactions are specified. However, this method of standardization is the "classic" result obtained by many statistical packages when standardized coefficients are requested. When `standardize = "posthoc"`, `model_parameters()` internally calls [`effectsize::standardize_parameters(method = "posthoc")`](https://easystats.github.io/effectsize/reference/standardize_parameters.html). Test statistic and p-values are not affected, i.e. they are the same as if no standardization would be applied. ```{r} model_parameters(model, standardize = "posthoc") ``` `standardize = "basic"` also applies post-hoc standardization, however, factors are converted to numeric, which means that it also scales the coefficient by the standard deviation of model's matrix' parameter of factor levels (transformed to integers) or binary predictors. ```{r} model_parameters(model, standardize = "basic") ``` ## Smart standardization `standardize = "smart"` is similar to `standardize = "posthoc"` in that it does not involve model re-fitting. The difference is that the SD of the response is computed on the relevant section of the data. For instance, if a factor with 3 levels A (the intercept), B and C is entered as a predictor, the effect corresponding to B vs. A will be scaled by the variance of the response at the intercept only. As a results, the coefficients for effects of factors are similar to a Glass' delta. ```{r} model_parameters(model, standardize = "smart") ``` parameters/R/0000755000176200001440000000000013617206715012626 5ustar liggesusersparameters/R/format_p.R0000644000176200001440000000540213617043573014562 0ustar liggesusers#' p-values formatting #' #' Format p-values. #' #' @param p value or vector of p-values. #' @param stars Add significance stars (e.g., p < .001***). #' @param stars_only Return only significance stars. #' @param name Name prefixing the text. Can be \code{NULL}. #' @param digits Number of significant digits. May also be \code{"scientific"} to return exact p-values in scientific notation, or \code{"apa"} to use an APA-style for p-values. #' @param ... Arguments from other methods. #' @inherit insight::format_value #' #' @return A formatted string. #' @examples #' format_p(c(.02, .065, 0, .23)) #' format_p(c(.02, .065, 0, .23), name = NULL) #' format_p(c(.02, .065, 0, .23), stars_only = TRUE) #' #' model <- lm(mpg ~ wt + cyl, data = mtcars) #' format_p(p_value(model)$p, digits = "scientific") #' @importFrom insight format_value #' @export format_p <- function(p, stars = FALSE, stars_only = FALSE, name = "p", missing = "", digits = 3, ...) { if (digits == "apa") { text <- ifelse(is.na(p), NA, ifelse(p < 0.001, "< .001***", ifelse(p < 0.01, "< .01**", ifelse(p < 0.05, "< .05*", paste0("= ", insight::format_value(p, 3)) ) ) ) ) } else if (digits == "scientific") { text <- ifelse(is.na(p), NA, ifelse(p < 0.001, sprintf("= %.5e***", p), ifelse(p < 0.01, sprintf("= %.5e**", p), ifelse(p < 0.05, sprintf("= %.5e*", p), sprintf("= %.5e", p) ) ) ) ) } else if (digits <= 3) { text <- ifelse(is.na(p), NA, ifelse(p < 0.001, "< .001***", ifelse(p < 0.01, paste0("= ", insight::format_value(p, digits), "**"), ifelse(p < 0.05, paste0("= ", insight::format_value(p, digits), "*"), paste0("= ", insight::format_value(p, digits)) ) ) ) ) } else { text <- ifelse(is.na(p), NA, ifelse(p < 0.001, paste0("= ", insight::format_value(p, digits), "***"), ifelse(p < 0.01, paste0("= ", insight::format_value(p, digits), "**"), ifelse(p < 0.05, paste0("= ", insight::format_value(p, digits), "*"), paste0("= ", insight::format_value(p, digits)) ) ) ) ) } .add_prefix_and_remove_stars(text, stars, stars_only, name, missing) } #' @keywords internal .add_prefix_and_remove_stars <- function(text, stars, stars_only, name, missing = "") { missing_index <- is.na(text) if (is.null(name)) { text <- gsub("= ", "", text) } else { text <- paste(name, text) } if (stars_only == TRUE) { text <- gsub("[^\\*]", "", text) } else if (stars == FALSE) { text <- gsub("\\*", "", text) } text[missing_index] <- missing text } parameters/R/model_parameters.lavaan.R0000644000176200001440000001010713611655361017533 0ustar liggesusers#' Parameters from CFA/SEM models #' #' Format CFA/SEM objects from the (b)lavaan package (Rosseel, 2012; Merkle and Rosseel 2018). #' #' @param model CFA or SEM created by the \code{lavaan::cfa} or \code{lavaan::sem} functions. #' @param standardize Return standardized parameters (standardized coefficients). See \code{lavaan::standardizedsolution}. #' @inheritParams model_parameters.default #' @param type What type of links to return. Can be \code{"all"} or some of \code{c("regression", "correlation", "loading", "variance", "mean")}. #' @param ... Arguments passed to or from other methods. #' #' #' @examples #' library(parameters) #' #' # lavaan ------------------------------------- #' if (require("lavaan")) { #' #' # Confirmatory Factor Analysis (CFA) --------- #' #' structure <- " visual =~ x1 + x2 + x3 #' textual =~ x4 + x5 + x6 #' speed =~ x7 + x8 + x9 " #' model <- lavaan::cfa(structure, data = HolzingerSwineford1939) #' model_parameters(model) #' model_parameters(model, standardize = TRUE) #' #' # Structural Equation Model (SEM) ------------ #' #' structure <- " #' # latent variable definitions #' ind60 =~ x1 + x2 + x3 #' dem60 =~ y1 + a*y2 + b*y3 + c*y4 #' dem65 =~ y5 + a*y6 + b*y7 + c*y8 #' # regressions #' dem60 ~ ind60 #' dem65 ~ ind60 + dem60 #' # residual correlations #' y1 ~~ y5 #' y2 ~~ y4 + y6 #' y3 ~~ y7 #' y4 ~~ y8 #' y6 ~~ y8 #' " #' model <- lavaan::sem(structure, data = PoliticalDemocracy) #' model_parameters(model) #' model_parameters(model, standardize = TRUE) #' } #' @return A data frame of indices related to the model's parameters. #' #' @references \itemize{ #' \item Rosseel Y (2012). lavaan: An R Package for Structural Equation Modeling. Journal of Statistical Software, 48(2), 1-36. #' \item Merkle EC , Rosseel Y (2018). blavaan: Bayesian Structural Equation Models via Parameter Expansion. Journal of Statistical Software, 85(4), 1-30. http://www.jstatsoft.org/v85/i04/ #' } #' @export model_parameters.lavaan <- function(model, ci = 0.95, standardize = FALSE, type = c("regression", "correlation", "loading"), ...) { params <- .extract_parameters_lavaan(model, ci = ci, standardize = standardize, ...) # Filter if (all(type == "all")) { type <- c("regression", "correlation", "loading", "variance", "mean") } params <- params[tolower(params$Type) %in% type, ] # add class-attribute for printing class(params) <- c("parameters_sem", "see_parameters_sem", class(params)) attr(params, "ci") <- ci attr(params, "model") <- model params } #' @export model_parameters.blavaan <- function(model, ci = 0.95, standardize = FALSE, type = c("regression", "correlation", "loading"), ...) { params <- .extract_parameters_blavaan(model, ci = ci, standardize = standardize, ...) # Filter if (all(type == "all")) { type <- c("regression", "correlation", "loading", "variance", "mean") } params <- params[tolower(params$Type) %in% type, ] # add class-attribute for printing class(params) <- c("parameters_sem", "see_parameters_sem", class(params)) attr(params, "ci") <- ci attr(params, "model") <- model params } #' @export n_parameters.lavaan <- function(x, ...) { if (!requireNamespace("lavaan", quietly = TRUE)) { stop("Package 'lavaan' required for this function to work. Please install it by running `install.packages('lavaan')`.") } lavaan::fitmeasures(x)$npar } #' @importFrom insight format_table #' @export print.parameters_sem <- function(x, ...) { .print_model_parms_components(x, pretty_names = TRUE, split_column = "Type") } #' @export predict.parameters_sem <- function(object, newdata = NULL, ...) { if (!requireNamespace("lavaan", quietly = TRUE)) { stop("Package 'lavaan' required for this function to work. Please install it by running `install.packages('lavaan')`.") } as.data.frame(lavaan::lavPredict(attributes(object)$model, newdata = newdata, method = "EBM", ...)) } parameters/R/cluster_discrimination.R0000644000176200001440000000501113603206134017511 0ustar liggesusers#' @title Compute a linear discriminant analysis on classified cluster groups #' #' @name cluster_discrimination #' #' @description Computes linear discriminant analysis on classified cluster groups, #' and determines the goodness of classification for each cluster group. #' #' @param x A data frame #' @param cluster_groups Group classification of the cluster analysis, which can #' be retrieved from the \code{\link{cluster_analysis}} function. #' #' @seealso \code{\link{n_clusters}} to determine the number of clusters to extract, \code{\link{cluster_analysis}} to compute a cluster analysis and \code{\link{check_clusterstructure}} to check suitability of data for clustering. #' #' @examples #' \dontrun{ #' # retrieve group classification from hierarchical cluster analysis #' groups <- cluster_analysis(iris[, 1:4]) #' #' # goodness of group classificatoin #' cluster_discrimination(iris[, 1:4], cluster_groups = groups) #' } #' @importFrom stats na.omit #' @export cluster_discrimination <- function(x, cluster_groups = NULL) { if (is.null(cluster_groups)) { cluster_groups <- cluster_analysis(x) } x <- stats::na.omit(x) cluster_groups <- stats::na.omit(cluster_groups) # compute discriminant analysis of groups on original data frame if (!requireNamespace("MASS", quietly = TRUE)) { stop("Package 'MASS' required for this function to work. Please install it by running `install.packages('MASS')`.") } disc <- MASS::lda(cluster_groups ~ ., data = x, na.action = "na.omit", CV = TRUE) # Assess the accuracy of the prediction # percent correct for each category of groups classification_table <- table(cluster_groups, disc$class) correct <- diag(prop.table(classification_table, 1)) # total correct percentage total_correct <- sum(diag(prop.table(classification_table))) out <- data.frame( Group = unique(cluster_groups), Accuracy = correct, stringsAsFactors = FALSE ) attr(out, "Overall_Accuracy") <- total_correct class(out) <- c("cluster_discrimintation", class(out)) out } #' @export print.cluster_discrimintation <- function(x, ...) { insight::print_color("# Accuracy of Cluster Group Classification\n\n", "blue") total_accuracy <- attributes(x)$Overall_Accuracy x$Accuracy <- sprintf("%.2f%%", 100 * x$Accuracy) total <- sprintf("%.2f%%", 100 * total_accuracy) print.data.frame(x, row.names = FALSE, ...) insight::print_color(sprintf("\nOverall accuracy of classification: %s\n", total), "yellow") } parameters/R/dof.R0000644000176200001440000001047313614640330013516 0ustar liggesusers#' Degrees of Freedom (DoF) #' #' Estimate or extract degrees of freedom of models. #' #' @param model A statistical model. #' @param method Can be \code{"analytical"} (default, DoFs are estimated based on the model type), \code{"fit"}, in which case they are directly taken from the model if available (for Bayesian models, the goal (looking for help to make it happen) would be to refit the model as a frequentist one before extracting the DoFs), \code{"ml1"} (see \code{\link{dof_ml1}}), \code{"betwithin"} (see \code{\link{dof_betwithin}}), \code{"satterthwaite"} (see \code{\link{dof_satterthwaite}}), \code{"kenward"} (see \code{\link{dof_kenward}}) or \code{"any"}, which tries to extract DoF by any of those methods, whichever succeeds. #' #' @examples #' model <- lm(Sepal.Length ~ Petal.Length * Species, data = iris) #' dof(model) #' #' model <- glm(vs ~ mpg * cyl, data = mtcars, family = "binomial") #' dof(model) #' #' if (require("lme4")) { #' model <- lmer(Sepal.Length ~ Petal.Length + (1 | Species), data = iris) #' dof(model) #' } #' \donttest{ #' if (require("rstanarm")) { #' model <- stan_glm( #' Sepal.Length ~ Petal.Length * Species, #' data = iris, #' chains = 2, #' refresh = 0 #' ) #' dof(model) #' } #' } #' @export degrees_of_freedom <- function(model, method = "analytical") { method <- tolower(method) method <- match.arg(method, c("analytical", "any", "fit", "ml1", "betwithin", "satterthwaite", "kenward", "nokr", "wald")) if (!.dof_method_ok(model, method)) { method <- "any" } if (method == "any") { dof <- .degrees_of_freedom_fit(model, verbose = FALSE) if (is.null(dof) || all(is.infinite(dof)) || anyNA(dof)) { dof <- .degrees_of_freedom_analytical(model, kenward = FALSE) } } else if (method == "ml1") { dof <- dof_ml1(model) } else if (method == "wald") { dof <- Inf } else if (method == "satterthwaite") { dof <- dof_satterthwaite(model) } else if (method == "betwithin") { dof <- dof_betwithin(model) } else if (method == "kenward") { dof <- dof_kenward(model) } else if (method == "analytical") { dof <- .degrees_of_freedom_analytical(model) } else if (method == "nokr") { dof <- .degrees_of_freedom_analytical(model, kenward = FALSE) } else { dof <- .degrees_of_freedom_fit(model) } dof } #' @rdname degrees_of_freedom #' @export dof <- degrees_of_freedom #' @keywords internal .degrees_of_freedom_analytical <- function(model, kenward = TRUE) { nparam <- n_parameters(model) n <- insight::n_obs(model) if (isTRUE(kenward) && inherits(model, "lmerMod")) { dof <- as.numeric(dof_kenward(model)) } else { dof <- rep(n - nparam, nparam) } dof } #' @importFrom bayestestR bayesian_as_frequentist #' @importFrom stats df.residual #' @keywords internal .degrees_of_freedom_fit <- function(model, verbose = TRUE) { info <- insight::model_info(model) if (info$is_bayesian) { model <- bayestestR::bayesian_as_frequentist(model) } # 1st try dof <- try(stats::df.residual(model), silent = TRUE) # 2nd try if (inherits(dof, "try-error") || is.null(dof)) { dof <- try(summary(model)$df[2], silent = TRUE) } # 3rd try, nlme if (inherits(dof, "try-error") || is.null(dof)) { dof <- try(unname(model$fixDF$X), silent = TRUE) } # last try if (inherits(dof, "try-error") || is.null(dof)) { dof <- Inf if (verbose) { insight::print_color("Could not extract degrees of freedom.\n", "red") } } dof } .dof_method_ok <- function(model, method) { if (is.null(method)) { return(TRUE) } if (!insight::model_info(model)$is_mixed) { return(FALSE) } method <- tolower(method) if (!(method %in% c("analytical", "any", "fit", "satterthwaite", "betwithin", "kenward", "kr", "nokr", "wald", "ml1"))) { warning("'df_method' must be one of 'wald', 'kenward', 'satterthwaite', 'betwithin' or ' ml1'. Using 'wald' now.", call. = FALSE) return(FALSE) } if (!insight::model_info(model)$is_linear && method %in% c("satterthwaite", "kenward", "kr")) { warning(sprintf("'%s'-degrees of freedoms are only available for linear mixed models.", method), call. = FALSE) return(FALSE) } return(TRUE) } parameters/R/ci_satterthwaite.R0000644000176200001440000000047213611665203016312 0ustar liggesusers#' @rdname p_value_satterthwaite #' @export ci_satterthwaite <- function(model, ci = .95) { out <- lapply(ci, function(i) { .ci_wald(model = model, ci = i, dof = Inf, effects = "fixed", component = "all", method = "satterthwaite") }) out <- do.call(rbind, out) row.names(out) <- NULL out } parameters/R/format_number.R0000644000176200001440000000554613534252567015627 0ustar liggesusers#' Convert number to words #' #' Convert number to words. The code has been adapted from here https://github.com/ateucher/useful_code/blob/master/R/numbers2words.r #' #' @param x Number. #' @param textual Return words. If \code{FALSE}, will run \code{\link[insight]{format_value}}. #' @param ... Arguments to be passed to \code{\link[insight]{format_value}} if \code{textual} is \code{FALSE}. #' #' #' @return A formatted string. #' @examples #' format_number(2) #' format_number(45) #' format_number(324.68765) #' @importFrom insight format_value #' @export format_number <- function(x, textual = TRUE, ...) { if (textual) { .format_number(x) } else { insight::format_value(x, ...) } } #' @keywords internal .format_number <- function(x) { # https://github.com/ateucher/useful_code/blob/master/R/numbers2words.r ## Function by John Fox found here: ## http://tolstoy.newcastle.edu.au/R/help/05/04/2715.html ## Tweaks by AJH to add commas and "and" helper <- function(x) { digits <- rev(strsplit(as.character(x), "")[[1]]) nDigits <- length(digits) if (nDigits == 1) { as.vector(ones[digits]) } else if (nDigits == 2) { if (x <= 19) { as.vector(teens[digits[1]]) } else { trim(paste( tens[digits[2]], Recall(as.numeric(digits[1])) )) } } else if (nDigits == 3) { trim(paste( ones[digits[3]], "hundred and", Recall(makeNumber(digits[2:1])) )) } else { nSuffix <- ((nDigits + 2) %/% 3) - 1 if (nSuffix > length(suffixes)) stop(paste(x, "is too large!")) trim(paste( Recall(makeNumber(digits[ nDigits:(3 * nSuffix + 1) ])), suffixes[nSuffix], ",", Recall(makeNumber(digits[(3 * nSuffix):1])) )) } } trim <- function(text) { # Tidy leading/trailing whitespace, space before comma text <- gsub("^\ ", "", gsub("\ *$", "", gsub("\ ,", ",", text))) # Clear any trailing " and" text <- gsub(" and$", "", text) # Clear any trailing comma gsub("\ *,$", "", text) } makeNumber <- function(...) as.numeric(paste(..., collapse = "")) # Disable scientific notation opts <- options(scipen = 100) on.exit(options(opts)) ones <- c( "", "one", "two", "three", "four", "five", "six", "seven", "eight", "nine" ) names(ones) <- 0:9 teens <- c( "ten", "eleven", "twelve", "thirteen", "fourteen", "fifteen", "sixteen", " seventeen", "eighteen", "nineteen" ) names(teens) <- 0:9 tens <- c( "twenty", "thirty", "forty", "fifty", "sixty", "seventy", "eighty", "ninety" ) names(tens) <- 2:9 x <- round(x) suffixes <- c("thousand", "million", "billion", "trillion") if (length(x) > 1) { return(trim(sapply(x, helper))) } helper(x) } parameters/R/cluster_analysis.R0000644000176200001440000001773713617565540016360 0ustar liggesusers#' @title Compute cluster analysis and return group indices #' @name cluster_analysis #' @description Compute hierarchical or kmeans cluster analysis and return the group #' assignment for each observation as vector. #' #' @references Maechler M, Rousseeuw P, Struyf A, Hubert M, Hornik K (2014) cluster: Cluster Analysis Basics and Extensions. R package. #' #' @param x A data frame. #' @param n_clusters Number of clusters used for the cluster solution. By default, #' the number of clusters to extract is determined by calling \code{\link{n_clusters}}. #' @param method Method for computing the cluster analysis. By default (\code{"hclust"}), a #' hierarchical cluster analysis, will be computed. Use \code{"kmeans"} to #' compute a kmeans cluster analysis. You can specify the initial letters only. #' @param distance Distance measure to be used when \code{method = "hclust"} (for hierarchical #' clustering). Must be one of \code{"euclidean"}, \code{"maximum"}, \code{"manhattan"}, #' \code{"canberra"}, \code{"binary"} or \code{"minkowski"}. See \code{\link{dist}}. #' If is \code{method = "kmeans"} this argument will be ignored. #' @param agglomeration Agglomeration method to be used when \code{method = "hclust"} (for hierarchical #' clustering). This should be one of \code{"ward"}, \code{"single"}, \code{"complete"}, \code{"average"}, #' \code{"mcquitty"}, \code{"median"} or \code{"centroid"}. Default is \code{"ward"} (see \code{\link{hclust}}). #' If \code{method = "kmeans"} this argument will be ignored. #' @param iterations Maximum number of iterations allowed. Only applies, if #' \code{method = "kmeans"}. See \code{\link{kmeans}} for details on this argument. #' @param algorithm Algorithm used for calculating kmeans cluster. Only applies, if #' \code{method = "kmeans"}. May be one of \code{"Hartigan-Wong"} (default), #' \code{"Lloyd"} (used by SPSS), or \code{"MacQueen"}. See \code{\link{kmeans}} #' for details on this argument. #' @param force Logical, if \code{TRUE}, ordered factors (ordinal variables) are #' converted to numeric values, while character vectors and factors are converted #' to dummy-variables (numeric 0/1) and are included in the cluster analysis. #' If \code{FALSE}, factors and character vectors are removed before computing #' the cluster analysis. For \code{method = "kmeans"} and \code{force = TRUE}, #' only ordered factors are used, because \code{\link{kmeans}} fails for dummy #' variables. #' #' @inheritParams equivalence_test.lm #' @inheritParams n_clusters #' #' @return The group classification for each observation as vector. The #' returned vector includes missing values, so it has the same length #' as \code{nrow(x)}. #' #' @note There is also a \href{https://easystats.github.io/see/articles/parameters.html}{\code{plot()}-method} implemented in the \href{https://easystats.github.io/see/}{\pkg{see}-package}. #' #' @details The \code{print()} and \code{plot()} methods show the (standardized) #' mean value for each variable within each cluster. Thus, a higher absolute #' value indicates that a certain variable characteristic is more pronounced #' within that specific cluster (as compared to other cluster groups with lower #' absolute mean values). #' #' @seealso \code{\link{n_clusters}} to determine the number of clusters to extract, \code{\link{cluster_discrimination}} to determine the accuracy of cluster group classification and \code{\link{check_clusterstructure}} to check suitability of data for clustering. #' #' @examples #' # Hierarchical clustering of mtcars-dataset #' groups <- cluster_analysis(iris[, 1:4], 3) #' #' # K-means clustering of mtcars-dataset, auto-detection of cluster-groups #' \dontrun{ #' groups <- cluster_analysis(iris[, 1:4], method = "k") #' } #' @importFrom stats dist na.omit hclust kmeans cutree complete.cases #' @export cluster_analysis <- function(x, n_clusters = NULL, method = c("hclust", "kmeans"), distance = c("euclidean", "maximum", "manhattan", "canberra", "binary", "minkowski"), agglomeration = c("ward", "ward.D", "ward.D2", "single", "complete", "average", "mcquitty", "median", "centroid"), iterations = 20, algorithm = c("Hartigan-Wong", "Lloyd", "MacQueen"), force = TRUE, package = c("NbClust", "mclust"), verbose = TRUE) { # match arguments distance <- match.arg(distance) method <- match.arg(method) agglomeration <- match.arg(agglomeration) algorithm <- match.arg(algorithm) # include factors? if (force) { # ordered factors to numeric factors <- sapply(x, is.ordered) if (any(factors)) { x[factors] <- sapply(x[factors], .factor_to_numeric) } # character and factors to dummies factors <- sapply(x, function(i) is.character(i) | is.factor(i)) if (any(factors)) { if (method == "kmeans") { x <- x[sapply(x, is.numeric)] } else { dummies <- lapply(x[factors], .factor_to_dummy) x <- cbind(x[!factors], dummies) } } } else { # remove factors x <- x[sapply(x, is.numeric)] } # check number of clusters if (is.null(n_clusters)) { nc <- n_clusters(x, package = package, force = force) ncs <- attributes(nc)$summary n_clusters <- ncs$n_Clusters[which.max(ncs$n_Methods)][1] if (verbose) { insight::print_color(sprintf("Using solution with %i clusters, supported by %i out of %i methods.\n", n_clusters, max(ncs$n_Methods), sum(ncs$n_Methods)), "blue") } } # save original data, standardized, for later use original_data <- as.data.frame(scale(x)) # create NA-vector of same length as data frame complete.groups <- rep(NA, times = nrow(x)) # save IDs from non-missing data non_missing <- stats::complete.cases(x) x <- stats::na.omit(x) # Ward Hierarchical Clustering if (method == "hclust") { # check for argument and R version if (agglomeration == "ward") agglomeration <- "ward.D2" # distance matrix d <- stats::dist(x, method = distance) # hierarchical clustering hc <- stats::hclust(d, method = agglomeration) # cut tree into x clusters groups <- stats::cutree(hc, k = n_clusters) } else { km <- stats::kmeans(x, centers = n_clusters, iter.max = iterations, algorithm = algorithm) # return cluster assignment groups <- km$cluster } # create vector with cluster group classification, # including missings complete.groups[non_missing] <- groups # create mean of z-score for each variable in data out <- as.data.frame(do.call(rbind, lapply(original_data, tapply, complete.groups, mean))) colnames(out) <- sprintf("Group %s", colnames(out)) out <- cbind(data.frame(Term = rownames(out), stringsAsFactors = FALSE), out) rownames(out) <- NULL attr(complete.groups, "data") <- out attr(complete.groups, "accuracy") <- tryCatch( { cluster_discrimination(original_data, complete.groups) }, error = function(e) { NULL } ) class(complete.groups) <- c("cluster_analysis", "see_cluster_analysis", class(complete.groups)) complete.groups } #' @export print.cluster_analysis <- function(x, digits = 2, ...) { # retrieve data dat <- attr(x, "data", exact = TRUE) if (is.null(dat)) { stop("Could not find data frame that was used for cluster analysis.", call. = FALSE) } # save output from cluster_discrimination() accuracy <- attributes(x)$accuracy # headline insight::print_color("# Cluster Analysis (mean z-score by cluster)\n\n", "blue") # round numeric variables (i.e. all but first term column) dat[2:ncol(dat)] <- sapply(dat[2:ncol(dat)], round, digits = digits) print.data.frame(dat, row.names = FALSE) if (!is.null(accuracy)) { cat("\n") print(accuracy) } } parameters/R/select_parameters.R0000644000176200001440000000637313616217112016454 0ustar liggesusers#' Automated selection of model parameters #' #' This function performs an automated selection of the 'best' parameters, updating and returning the "best" model. For frequentist simple GLMs, it performs an AIC-based stepwise selection. For Bayesian models, it uses the \code{projpred} package. #' #' @param model A statistical model. #' @param ... Arguments passed to or from other methods. #' #' @examples #' model <- lm(mpg ~ ., data = mtcars) #' select_parameters(model) #' #' model <- lm(mpg ~ cyl * disp * hp * wt, data = mtcars) #' select_parameters(model) #' \donttest{ #' # lme4 ------------------------------------------- #' if (require("lme4")) { #' model <- lmer( #' Sepal.Width ~ Sepal.Length * Petal.Width * Petal.Length + (1 | Species), #' data = iris #' ) #' select_parameters(model) #' } #' #' # rstanarm ------------------------------------------- #' if (require("rstanarm")) { #' model <- stan_glm(mpg ~ ., data = mtcars, iter = 500, refresh = 0) #' select_parameters(model, cross_validation = TRUE) #' #' model <- stan_glm(mpg ~ cyl * disp * hp, data = mtcars, iter = 500, refresh = 0) #' select_parameters(model, cross_validation = FALSE) #' } #' } #' @return The model refitted with optimal number of parameters. #' @export select_parameters <- function(model, ...) { UseMethod("select_parameters") } #' @rdname select_parameters #' @export parameters_selection <- select_parameters #' @rdname select_parameters #' @inheritParams stats::step #' @importFrom stats step #' @export select_parameters.lm <- function(model, direction = "both", steps = 1000, k = 2, ...) { junk <- utils::capture.output(best <- stats::step(model, trace = 0, direction = direction, steps = steps, k = k, ... )) best } #' @rdname select_parameters #' @export select_parameters.merMod <- function(model, direction = "backward", steps = 1000, ...) { # Using cAIC4's stepcAIC() if (!requireNamespace("cAIC4", quietly = TRUE)) { stop("Package 'cAIC4' required for this function to work. Please install it by running `install.packages('cAIC4')`.") } # Find slope and group candidates # data <- insight::get_data(model) # factors <- names(data[sapply(data, is.factor)]) # if(length(factors) == 0){ # factors <- NULL # } # nums <- names(data[sapply(data, is.numeric)]) # if(length(nums) == 0){ # nums <- NULL # } best <- cAIC4::stepcAIC(model, # groupCandidates = factors, # slopeCandidates = nums, direction = direction, steps = steps, allowUseAcross = TRUE )$finalModel # Using MuMIn's dredge(): works nicely BUT throws unnecessary warnings and requires to set global options for na.action even tho no NaNs. # The code is here: https://github.com/cran/MuMIn/blob/master/R/dredge.R Maybe it could be reimplemented? # if (!requireNamespace("MuMIn", quietly = TRUE)) { # stop("Package 'MuMIn' required for this function to work. Please install it by running `install.packages('MuMIn')`.") # } # model <- lmer(Sepal.Width ~ Sepal.Length * Petal.Width * Petal.Length + (1 | Species), data = iris, na.action = na.fail) # summary(MuMIn::get.models(MuMIn::dredge(model), 1)[[1]]) best } parameters/R/model_parameters.multinom.R0000644000176200001440000000546513613634253020147 0ustar liggesusers#' Parameters from multinomial or cumulative link models #' #' Parameters from multinomial or cumulative link models #' #' @param model A model with multinomial or categorical response value. #' @inheritParams model_parameters.default #' @inheritParams simulate_model #' #' @details Multinomial or cumulative link models, i.e. models where the #' response value (dependent variable) is categorical and has more than two #' levels, usually return coefficients for each response level. Hence, the #' output from \code{model_parameters()} will split the coefficient tables #' by the different levels of the model's response. #' #' @seealso \code{\link[=standardize_names]{standardize_names()}} to rename #' columns into a consistent, standardized naming scheme. #' #' @examples #' library(parameters) #' if (require("brglm2")) { #' data("stemcell") #' model <- bracl( #' research ~ as.numeric(religion) + gender, #' weights = frequency, #' data = stemcell, #' type = "ML" #' ) #' model_parameters(model) #' } #' @return A data frame of indices related to the model's parameters. #' @inheritParams simulate_model #' @export model_parameters.mlm <- function(model, ci = .95, bootstrap = FALSE, iterations = 1000, standardize = NULL, exponentiate = FALSE, ...) { out <- .model_parameters_generic( model = model, ci = ci, bootstrap = bootstrap, iterations = iterations, merge_by = c("Parameter", "Response"), standardize = standardize, exponentiate = exponentiate, robust = FALSE, ... ) attr(out, "object_name") <- deparse(substitute(model), width.cutoff = 500) out } #' @rdname model_parameters.mlm #' @export model_parameters.multinom <- model_parameters.mlm #' @export model_parameters.brmultinom <- model_parameters.mlm #' @rdname model_parameters.mlm #' @export model_parameters.bracl <- model_parameters.mlm #' @rdname model_parameters.mlm #' @export model_parameters.DirichletRegModel <- function(model, ci = .95, bootstrap = FALSE, iterations = 1000, component = c("all", "conditional", "precision"), standardize = NULL, exponentiate = FALSE, ...) { component <- match.arg(component) if (component == "all") { merge_by <- c("Parameter", "Component", "Response") } else { merge_by <- c("Parameter", "Response") } ## TODO check merge by junk <- utils::capture.output(out <- .model_parameters_generic( model = model, ci = ci, component = component, bootstrap = bootstrap, iterations = iterations, merge_by = merge_by, standardize = standardize, exponentiate = exponentiate, robust = FALSE, ... )) out$Response[is.na(out$Response)] <- "" attr(out, "object_name") <- deparse(substitute(model), width.cutoff = 500) out } parameters/R/check_factorstructure.R0000644000176200001440000001467513603206134017350 0ustar liggesusers#' Check suitability of data for Factor Analysis (FA) #' #' This checks whether the data is appropriate for Factor Analysis (FA) by running the \link[=check_sphericity]{Bartlett's Test of Sphericity} and the \link[=check_kmo]{Kaiser, Meyer, Olkin (KMO) Measure of Sampling Adequacy (MSA)}. #' #' @inheritParams check_sphericity #' @examples #' library(parameters) #' check_factorstructure(mtcars) #' @return A list of lists of indices related to sphericity and KMO. #' @seealso check_kmo check_sphericity check_clusterstructure #' @export check_factorstructure <- function(x, ...) { # TODO: detect (and remove?) factors # TODO: This could be improved using the correlation package to use different correlation methods kmo <- check_kmo(x, ...) sphericity <- check_sphericity(x, ...) text <- paste0(" - KMO: ", attributes(kmo)$text, "\n - Sphericity: ", attributes(sphericity)$text) if (attributes(kmo)$color == "red" | attributes(sphericity)$color == "red") { color <- "red" } else { color <- "green" } out <- list(KMO = kmo, sphericity = sphericity) attr(out, "text") <- text attr(out, "color") <- color attr(out, "title") <- "Is the data suitable for Factor Analysis?" class(out) <- c("easystats_check", class(out)) out } #' Kaiser, Meyer, Olkin (KMO) Measure of Sampling Adequacy (MSA) for Factor Analysis #' #' Kaiser (1970) introduced a Measure of Sampling Adequacy (MSA), later modified by Kaiser and Rice (1974). The Kaiser-Meyer-Olkin (KMO) statistic, which can vary from 0 to 1, indicates the degree to which each variable in a set is predicted without error by the other variables. #' #' A value of 0 indicates that the sum of partial correlations is large relative to the sum correlations, indicating factor analysis is likely to be inappropriate. A KMO value close to 1 indicates that the sum of partial correlations is not large relative to the sum of correlations and so factor analysis should yield distinct and reliable factors. #' #' Kaiser (1975) suggested that KMO > .9 were marvelous, in the .80s, meritourious, in the .70s, middling, in the .60s, mediocre, in the .50s, miserable, and less than .5, unacceptable. Hair et al. (2006) suggest accepting a value > 0.5. Values between 0.5 and 0.7 are mediocre, and values between 0.7 and 0.8 are good. #' #' #' @inheritParams check_sphericity #' #' @examples #' library(parameters) #' check_kmo(mtcars) #' @return A list of indices related to KMO. #' #' @details This function is strongly inspired by the \code{KMO} function in the \code{psych} package (Revelle, 2016). All credits go to its author. #' #' @references \itemize{ #' \item Revelle, W. (2016). How To: Use the psych package for Factor Analysis and data reduction. #' \item Kaiser, H. F. (1970). A second generation little jiffy. Psychometrika, 35(4), 401-415. #' \item Kaiser, H. F., & Rice, J. (1974). Little jiffy, mark IV. Educational and psychological measurement, 34(1), 111-117. #' \item Kaiser, H. F. (1974). An index of factorial simplicity. Psychometrika, 39(1), 31-36. #' } #' @importFrom stats cor cov2cor #' @export check_kmo <- function(x, ...) { cormatrix <- stats::cor(x, use = "pairwise.complete.obs", ...) Q <- solve(cormatrix) Q <- stats::cov2cor(Q) diag(Q) <- 0 diag(cormatrix) <- 0 sumQ2 <- sum(Q^2) sumr2 <- sum(cormatrix^2) MSA <- sumr2 / (sumr2 + sumQ2) MSA_variable <- colSums(cormatrix^2) / (colSums(cormatrix^2) + colSums(Q^2)) out <- list(MSA = MSA, MSA_variable = MSA_variable) if (MSA < 0.5) { text <- sprintf("The Kaiser, Meyer, Olkin (KMO) measure of sampling adequacy suggests that factor analysis is likely to be inappropriate (KMO = %.2f).", MSA) color <- "red" } else { text <- sprintf("The Kaiser, Meyer, Olkin (KMO) measure of sampling adequacy suggests that data seems appropriate for factor analysis (KMO = %.2f).", MSA) color <- "green" } attr(out, "text") <- text attr(out, "color") <- color attr(out, "title") <- "KMO Measure of Sampling Adequacy" class(out) <- c("easystats_check", class(out)) out } #' Bartlett's Test of Sphericity #' #' Bartlett (1951) introduced the test of sphericity, which tests whether a matrix is significantly different from an identity matrix. This statistical test for the presence of correlations among variables, providing the statistical probability that the correlation matrix has significant correlations among at least some of variables. As for factor analysis to work, some relationships between variables are needed, thus, a significant Bartlett’s test of sphericity is required, say p < .001. #' #' #' @param x A dataframe. #' @param ... Arguments passed to or from other methods. #' #' @examples #' library(parameters) #' check_sphericity(mtcars) #' @details This function is strongly inspired by the \code{cortest.bartlett} function in the \pkg{psych} package (Revelle, 2016). All credits go to its author. #' #' @return A list of indices related to sphericity. #' #' @references \itemize{ #' \item Revelle, W. (2016). How To: Use the psych package for Factor Analysis and data reduction. #' \item Bartlett, M. S. (1951). The effect of standardization on a Chi-square approximation in factor analysis. Biometrika, 38(3/4), 337-344. #' } #' #' @importFrom stats pchisq cor #' #' @export check_sphericity <- function(x, ...) { # This could be improved using the correlation package to use different correlation methods cormatrix <- stats::cor(x, use = "pairwise.complete.obs", ...) n <- nrow(x) p <- dim(cormatrix)[2] detR <- det(cormatrix) statistic <- -log(detR) * (n - 1 - (2 * p + 5) / 6) df <- p * (p - 1) / 2 pval <- stats::pchisq(statistic, df, lower.tail = FALSE) out <- list(chisq = statistic, p = pval, dof = df) if (pval < 0.001) { text <- sprintf("Bartlett's test of sphericity suggests that there is sufficient significant correlation in the data for factor analaysis (Chisq(%i) = %.2f, %s).", df, statistic, format_p(pval)) color <- "green" } else { text <- sprintf("Bartlett's test of sphericity suggests that there is not enough significant correlation in the data for factor analaysis (Chisq(%i) = %.2f, %s).", df, statistic, format_p(pval)) color <- "red" } attr(out, "text") <- text attr(out, "color") <- color attr(out, "title") <- "Test of Sphericity" class(out) <- c("easystats_check", class(out)) out } parameters/R/parameters_type.R0000644000176200001440000002077113613634253016162 0ustar liggesusers#' Type of model parameters #' #' @param model A statistical model. #' @param ... Arguments passed to or from other methods. #' #' @examples #' library(parameters) #' #' model <- lm(Sepal.Length ~ Petal.Length + Species, data = iris) #' parameters_type(model) #' #' model <- lm(Sepal.Length ~ Species + poly(Sepal.Width, 2), data = iris) #' parameters_type(model) #' #' model <- lm(Sepal.Length ~ Species + poly(Sepal.Width, 2, raw = TRUE), data = iris) #' parameters_type(model) #' #' # Interactions #' model <- lm(Sepal.Length ~ Sepal.Width * Species, data = iris) #' parameters_type(model) #' #' model <- lm(Sepal.Length ~ Sepal.Width * Species * Petal.Length, data = iris) #' parameters_type(model) #' #' model <- lm(Sepal.Length ~ Species * Sepal.Width, data = iris) #' parameters_type(model) #' #' model <- lm(Sepal.Length ~ Species / Sepal.Width, data = iris) #' parameters_type(model) #' #' #' # Complex interactions #' data <- iris #' data$fac2 <- ifelse(data$Sepal.Width > mean(data$Sepal.Width), "A", "B") #' model <- lm(Sepal.Length ~ Species / fac2 / Petal.Length, data = data) #' parameters_type(model) #' #' model <- lm(Sepal.Length ~ Species / fac2 * Petal.Length, data = data) #' parameters_type(model) #' @return A data frame. #' @export parameters_type <- function(model, ...) { # Get info params <- data.frame( Parameter = insight::find_parameters(model, effects = "fixed", flatten = TRUE), stringsAsFactors = FALSE ) # Special case if (inherits(model, "polr")) { params$Parameter <- gsub("Intercept: ", "", params$Parameter, fixed = TRUE) } # Special case if (inherits(model, "bracl")) { params$Parameter <- gsub("(.*):(.*)", "\\2", params$Parameter) } # Special case if (inherits(model, "DirichletRegModel")) { cf <- stats::coef(model) if (model$parametrization == "common") { pattern <- paste0("(", paste(model$varnames, collapse = "|"), ")\\.(.*)") params$Parameter <- gsub(pattern, "\\2", names(unlist(cf))) } else { params$Parameter <- gsub("(.*)\\.(.*)\\.(.*)", "\\3", names(unlist(cf))) } } # Remove "as.factor()", "log()" etc. from parameter names but save original parameter before original_parameter <- params$Parameter params$Parameter <- .clean_parameter_names(params$Parameter, full = TRUE) ## TODO can we get rid of the count_ / zero_ prefix here? if (inherits(model, c("zeroinfl", "hurdle", "zerocount"))) { params$Parameter <- gsub("^(count_|zero_)", "", params$Parameter) } data <- insight::get_data(model) if (is.null(data)) { return(NULL) } reference <- .list_factors_numerics(data) # Get types main <- .parameters_type_table(names = params$Parameter, data, reference) secondary <- .parameters_type_table(names = main$Secondary_Parameter, data, reference) names(secondary) <- paste0("Secondary_", names(secondary)) names(secondary)[names(secondary) == "Secondary_Secondary_Parameter"] <- "Tertiary_Parameter" out <- cbind(params, main, secondary) # Deal with nested interactions for (i in unique(paste0(out[out$Type == "interaction", "Variable"], out[out$Type == "interaction", "Secondary_Variable"]))) { interac <- out[paste0(out$Variable, out$Secondary_Variable) == i, ] if (!all(interac$Term %in% out$Parameter)) { out[paste0(out$Variable, out$Secondary_Variable) == i, "Type"] <- "nested" } } for (i in unique(out$Secondary_Parameter)) { if (!is.na(i) && i %in% out$Parameter) { .param_type <- out[!is.na(out$Parameter) & out$Parameter == i, "Type"] .param_secondary_type <- out[!is.na(out$Secondary_Parameter) & out$Secondary_Parameter == i, "Secondary_Type"] if (length(.param_type) == length(.param_secondary_type) || length(.param_type) == 1) { out[!is.na(out$Secondary_Parameter) & out$Secondary_Parameter == i, "Secondary_Type"] <- .param_type } } } out$Parameter <- original_parameter out } #' @keywords internal .parameters_type_table <- function(names, data, reference) { out <- lapply(names, .parameters_type, data = data, reference = reference) out <- as.data.frame(do.call(rbind, out), stringsAsFactors = FALSE) names(out) <- c("Type", "Link", "Term", "Variable", "Level", "Secondary_Parameter") out } #' @keywords internal .parameters_type <- function(name, data, reference) { if (grepl(":", name, fixed = TRUE)) { # Split var <- unlist(strsplit(name, ":", fixed = TRUE)) if (length(var) > 2) { var <- c(utils::tail(var, 1), paste0(utils::head(var, -1), collapse = ":")) } else { var <- rev(var) } # Check if any is factor types <- unlist(lapply(var, function(x, data, reference) .parameters_type_basic(x, data, reference)[1], data = data, reference = reference)) link <- ifelse(any("factor" %in% types), "Difference", "Association") # Get type main <- .parameters_type_basic(var[1], data, reference) return(c("interaction", link, main[3], main[4], main[5], var[2])) } else { .parameters_type_basic(name, data, reference) } } #' @importFrom utils tail head #' @keywords internal .parameters_type_basic <- function(name, data, reference) { if (is.na(name)) { return(c(NA, NA, NA, NA, NA, NA)) } # parameter type is determined here. for formatting / printing, # refer to ".format_parameter()". Make sure that pattern # processed here are not "cleaned" (i.e. removed) in # ".clean_parameter_names()" cleaned_name <- .clean_parameter_names(name, full = TRUE) # Intercept if (.in_intercepts(cleaned_name)) { return(c("intercept", "Mean", "(Intercept)", NA, NA, NA)) # Numeric } else if (cleaned_name %in% reference$numeric) { return(c("numeric", "Association", name, name, NA, NA)) # Factors } else if (cleaned_name %in% reference$levels) { fac <- reference$levels_parent[match(cleaned_name, reference$levels)] return(c( "factor", "Difference", name, fac, gsub(fac, "", name, fixed = TRUE), NA )) # Polynomials } else if (grepl("poly(", name, fixed = TRUE)) { if (grepl(", raw = TRUE", name, fixed = TRUE)) { name <- gsub(", raw = TRUE", "", name, fixed = TRUE) type <- "poly_raw" } else { type <- "poly" } vars <- gsub("poly(", "", name, fixed = TRUE) vars <- unlist(strsplit(vars, ", ", fixed = TRUE)) var <- vars[[1]] degree <- vars[[2]] degree <- substr(vars[[2]], nchar(vars[[2]]), nchar(vars[[2]])) return(c(type, "Association", name, var, degree, NA)) # Splines } else if (grepl("(bs|ns|psline|lspline|rcs)\\(", name)) { type <- "spline" var <- gsub("(bs|ns|psline|lspline|rcs)\\((.*)\\)(\\d)", "\\2", name) if (grepl(",", var, fixed = TRUE)) { var <- substr(var, start = 0, stop = regexpr(",", var, fixed = TRUE) - 1) } degree <- gsub("(bs|ns|psline|lspline|rcs)\\((.*)\\)(\\d)", "\\3", name) return(c(type, "Association", name, var, degree, NA)) # log-transformation } else if (grepl("(log|logb|log1p|log2|log10)\\(", name)) { type <- "logarithm" var <- gsub("(log|logb|log1p|log2|log10)\\((.*)\\)", "\\2", name) if (grepl(",", var, fixed = TRUE)) { var <- substr(var, start = 0, stop = regexpr(",", var, fixed = TRUE) - 1) } return(c(type, "Association", name, var, NA, NA)) # As Is } else if (grepl("^I\\(", name)) { type <- "asis" var <- gsub("^I\\((.*)\\)", "\\1", name) return(c(type, "Association", name, var, NA, NA)) # Smooth } else if (grepl("^s\\(", name)) { return(c("smooth", "Association", name, NA, NA, NA)) # Smooth } else if (grepl("^smooth_", name)) { return(c("smooth", "Association", gsub("^smooth_(.*)\\[(.*)\\]", "\\2", name), NA, NA, NA)) } else { return(c("unknown", NA, NA, NA, NA, NA)) } } #' @keywords internal .list_factors_numerics <- function(data) { out <- list() out$numeric <- names(data[sapply(data, is.numeric)]) # Factors out$factor <- names(data[sapply(data, is.factor) | sapply(data, is.character)]) out$levels <- NA out$levels_parent <- NA for (fac in out$factor) { levels <- paste0(fac, unique(data[[fac]])) out$levels_parent <- c(out$levels_parent, rep(fac, length(levels))) out$levels <- c(out$levels, levels) } out$levels <- out$levels[!is.na(out$levels)] out$levels_parent <- out$levels_parent[!is.na(out$levels_parent)] out } parameters/R/check_multimodal.R0000644000176200001440000000763613603206134016257 0ustar liggesusers#' Check if a distribution is unimodal or multimodal #' #' For univariate distributions (one-dimensional vectors), this functions performs a Ameijeiras-Alonso et al. (2018) excess mass test. For multivariate distributions (dataframes), it uses mixture modelling. However, it seems that it always returns a significant result (suggesting that the distribution is multimodal). A better method might be needed here. #' #' #' @param x A numeric vector or a dataframe. #' @inheritParams check_factorstructure #' #' @examples #' # Univariate #' x <- rnorm(2000) #' check_multimodal(x) #' #' x <- c(rnorm(1000), rnorm(1000, 2)) #' check_multimodal(x) #' #' # Multivariate #' \donttest{ #' m <- data.frame( #' x = rnorm(200), #' y = rbeta(200, 2, 1) #' ) #' plot(m$x, m$y) #' check_multimodal(m) #' #' m <- data.frame( #' x = c(rnorm(100), rnorm(100, 4)), #' y = c(rbeta(100, 2, 1), rbeta(100, 1, 4)) #' ) #' plot(m$x, m$y) #' check_multimodal(m) #' } #' @references \itemize{ #' \item Ameijeiras-Alonso, J., Crujeiras, R. M., \& Rodríguez-Casal, A. (2019). Mode testing, critical bandwidth and excess mass. Test, 28(3), 900-919. #' } #' @export check_multimodal <- function(x, ...) { UseMethod("check_multimodal") } #' @importFrom stats logLik #' @export check_multimodal.data.frame <- function(x, ...) { if (!requireNamespace("mclust", quietly = TRUE)) { stop("Package 'mclust' required for this function to work. Please install it by running `install.packages('mclust')`.") } mclustBIC <- mclust::mclustBIC # this is needed as it is internally required by the following functions model <- mclust::Mclust(x, verbose = FALSE) model_h0 <- mclust::Mclust(x, G = 1, verbose = FALSE) # Parametric loglik <- logLik(model) loglik0 <- logLik(model_h0) rez <- list(Chisq = as.numeric(loglik - loglik0), df = attributes(loglik)$df - 2) rez$p <- 1 - pchisq(rez$Chisq, df = rez$df) # Text text <- "The parametric mixture modelling test suggests that " if (rez$p < .05) { text <- paste0( text, "the multivariate distribution is significantly multimodal (Chi2(", insight::format_value(rez$df, protect_integers = TRUE), ") = ", insight::format_value(rez$Chisq), ", ", format_p(rez$p), ").\n" ) color <- "green" } else { text <- paste0( text, "the hypothesis of a multimodal multivariate distribution cannot be rejected (Chi2(", insight::format_value(rez$df, protect_integers = TRUE), ") = ", insight::format_value(rez$Chisq), ", ", format_p(rez$p), ").\n" ) color <- "yellow" } attr(rez, "text") <- text attr(rez, "color") <- color attr(rez, "title") <- "Is the data multimodal?" class(rez) <- c("easystats_check", class(rez)) rez } #' @export check_multimodal.numeric <- function(x, ...) { if (!requireNamespace("multimode", quietly = TRUE)) { stop("Package 'multimode' required for this function to work. Please install it by running `install.packages('multimode')`.") } rez <- multimode::modetest(x, mod0 = 1, method = "ACR") rez <- list(p = rez$p.value, excess_mass = rez$statistic) text <- "The Ameijeiras-Alonso et al. (2018) excess mass test suggests that " if (rez$p < .05) { text <- paste0( text, "the distribution is significantly multimodal (excess mass = ", insight::format_value(rez$excess_mass), ", ", format_p(rez$p), ").\n" ) color <- "green" } else { text <- paste0( text, "the hypothesis of a multimodal distribution cannot be rejected (excess mass = ", insight::format_value(rez$excess_mass), ", ", format_p(rez$p), ").\n" ) color <- "yellow" } attr(rez, "text") <- text attr(rez, "color") <- color attr(rez, "title") <- "Is the variable multimodal?" class(rez) <- c("easystats_check", class(rez)) rez } parameters/R/model_parameters.Mclust.R0000644000176200001440000000221113610640726017533 0ustar liggesusers#' Parameters from Mixture Models #' #' Format mixture models obtained for example by \code{mclust::Mclust}. #' #' @param model Mixture model. #' @param ... Arguments passed to or from other methods. #' #' @examples #' library(parameters) #' library(mclust) #' #' model <- mclust::Mclust(iris[1:4], verbose = FALSE) #' model_parameters(model) #' @export model_parameters.Mclust <- function(model, ...) { params <- cbind( data.frame( Cluster = as.data.frame(table(model$classification))$Var1, n_Obs = as.data.frame(table(model$classification))$Freq ), t(model$parameters$mean) ) # Long means means <- .long_loadings(params, loadings_columns = 3:ncol(params)) means <- means[c("Cluster", "Loading", "Component")] names(means) <- c("Cluster", "Mean", "Variable") attr(params, "means") <- means attr(params, "scores") <- model$classification attr(params, "model") <- model attr(params, "type") <- "mixture" attr(params, "title") <- "Gaussian finite mixture model fitted by EM algorithm" class(params) <- c("parameters_model", "parameters_clusters", class(params)) params } parameters/R/utils_pca_efa.R0000644000176200001440000002024213607401411015534 0ustar liggesusers# summary ----------------------------------------------------------------- #' @export summary.parameters_efa <- function(object, ...) { x <- attributes(object)$summary cols <- intersect( c("Std_Dev", "Eigenvalues", "Variance", "Variance_Cumulative", "Variance_Proportion"), colnames(x) ) x <- as.data.frame(t(x[, cols])) x <- cbind(data.frame("Parameter" = row.names(x), stringsAsFactors = FALSE), x) names(x) <- c("Parameter", attributes(object)$summary$Component) row.names(x) <- NULL if ("parameters_efa" %in% class(object)) { class(x) <- c("parameters_efa_summary", class(object)) } else { class(x) <- c("parameters_pca_summary", class(object)) } x } #' @export summary.parameters_pca <- summary.parameters_efa #' @export model_parameters.parameters_efa <- function(model, ...) { x <- attributes(model)$summary if ("parameters_efa" %in% class(model)) { class(x) <- c("parameters_efa_summary", class(model)) } else { class(x) <- c("parameters_pca_summary", class(model)) } x } #' @export model_parameters.parameters_pca <- model_parameters.parameters_efa #' @export summary.parameters_omega <- function(object, ...) { table_var <- attributes(object)$summary class(table_var) <- c("parameters_omega_summary", class(table_var)) table_var } # predict ----------------------------------------------------------------- #' @export predict.parameters_efa <- function(object, newdata = NULL, names = NULL, ...) { if (is.null(newdata)) { out <- as.data.frame(attributes(object)$scores) } else { out <- as.data.frame(predict(attributes(object)$model, newdata = newdata, ...)) } if (!is.null(names)) { names(out)[1:length(c(names))] <- names } row.names(out) <- NULL out } #' @export predict.parameters_pca <- predict.parameters_efa # print ------------------------------------------------------------------- #' @importFrom insight format_table #' @export print.parameters_efa_summary <- function(x, digits = 3, ...) { insight::print_color("# (Explained) Variance of Components\n\n", "blue") if ("Parameter" %in% names(x)) { x$Parameter <- c("Eigenvalues", "Variance Explained", "Variance Explained (Cumulative)", "Variance Explained (Proportion)") } else if ("Component" %in% names(x)) { names(x) <- c("Copmponent", "Eigenvalues", "Variance Explained", "Variance Explained (Cumulative)", "Variance Explained (Proportion)") } cat(insight::format_table(x, digits = digits, ...)) invisible(x) } #' @export print.parameters_pca_summary <- print.parameters_efa_summary #' @importFrom insight print_color print_colour #' @export print.parameters_efa <- function(x, digits = 2, sort = FALSE, threshold = NULL, labels = NULL, ...) { # Labels if (!is.null(labels)) { x$Label <- labels x <- x[c("Variable", "Label", names(x)[!names(x) %in% c("Variable", "Label")])] } # Sorting if (isTRUE(sort)) { x <- .sort_loadings(x) } # Replace by NA all cells below threshold if (!is.null(threshold)) { x <- .filter_loadings(x, threshold = threshold) } rotation_name <- attr(x, "rotation", exact = TRUE) if (rotation_name == "none") { insight::print_color("# Loadings from Principal Component Analysis (no rotation)\n\n", "blue") } else { insight::print_color(sprintf("# Rotated loadings from Principal Component Analysis (%s-rotation)\n\n", rotation_name), "blue") } cat(insight::format_table(x, digits = digits, ...)) if (!is.null(attributes(x)$type)) { cat("\n") insight::print_colour(.text_components_variance(x), "yellow") cat("\n") } } #' @export print.parameters_pca <- print.parameters_efa #' @export print.parameters_omega <- function(x, ...) { names(x) <- c("Composite", "Omega (total)", "Omega (hierarchical)", "Omega (group)") cat(insight::format_table(x)) } #' @export print.parameters_omega_summary <- function(x, ...) { names(x) <- c("Composite", "Total Variance (%)", "Variance due to General Factor (%)", "Variance due to Group Factor (%)") cat(insight::format_table(x)) } #' @keywords internal .text_components_variance <- function(x) { type <- attributes(x)$type if (type %in% c("prcomp", "principal", "pca")) { type <- "principal component" } else if (type %in% c("fa")) { type <- "latent factor" } else if (type %in% c("kmeans")) { type <- "cluster" } else { type <- paste0(type, " component") } if (type == "cluster") { summary <- as.data.frame(x) variance <- attributes(x)$variance * 100 } else { summary <- attributes(x)$summary variance <- max(summary$Variance_Cumulative) * 100 } if (nrow(summary) == 1) { text <- paste0("The unique ", type) } else { text <- paste0("The ", nrow(summary), " ", type, "s") } # rotation if (!is.null(attributes(x)$rotation) && attributes(x)$rotation != "none") { text <- paste0(text, " (", attributes(x)$rotation, " rotation)") } text <- paste0( text, " accounted for ", sprintf("%.2f", variance), "% of the total variance of the original data" ) if (type == "cluster" || nrow(summary) == 1) { text <- paste0(text, ".") } else { text <- paste0( text, " (", paste0(summary$Component, " = ", sprintf("%.2f", summary$Variance * 100), "%", collapse = ", " ), ")." ) } text } # sort -------------------------------------------------------------------- #' @export sort.parameters_efa <- function(x, ...) { .sort_loadings(x) } #' @export sort.parameters_pca <- sort.parameters_efa #' @keywords internal .sort_loadings <- function(loadings, cols = NULL) { if (is.null(cols)) { cols <- attributes(loadings)$loadings_columns } # Remove variable name column x <- loadings[, cols, drop = FALSE] row.names(x) <- NULL # Initialize clusters nitems <- nrow(x) loads <- data.frame(item = seq(1:nitems), cluster = rep(0, nitems)) # first sort them into clusters: Find the maximum for each row and assign it to that cluster loads$cluster <- apply(abs(x), 1, which.max) ord <- sort(loads$cluster, index.return = TRUE) x[1:nitems, ] <- x[ord$ix, ] rownames(x)[1:nitems] <- rownames(x)[ord$ix] total.ord <- ord$ix # now sort column wise so that the loadings that have their highest loading on each cluster items <- table(loads$cluster) # how many items are in each cluster? first <- 1 item <- loads$item for (i in 1:length(items)) { if (items[i] > 0) { last <- first + items[i] - 1 ord <- sort(abs(x[first:last, i]), decreasing = TRUE, index.return = TRUE) x[first:last, ] <- x[item[ord$ix + first - 1], ] loads[first:last, 1] <- item[ord$ix + first - 1] rownames(x)[first:last] <- rownames(x)[ord$ix + first - 1] total.ord[first:last] <- total.ord[ord$ix + first - 1 ] first <- first + items[i] } } order <- row.names(x) loadings <- loadings[as.numeric(as.character(order)), ] # Arrange by max row.names(loadings) <- NULL loadings } # Filter -------------------------------------------------------------------- #' @keywords internal .filter_loadings <- function(loadings, threshold = 0.2, loadings_columns = NULL) { if (is.null(loadings_columns)) { loadings_columns <- attributes(loadings)$loadings_columns } if (threshold == "max" | threshold >= 1) { if (threshold == "max") { for (row in 1:nrow(loadings)) { maxi <- max(abs(loadings[row, loadings_columns, drop = FALSE])) loadings[row, loadings_columns][abs(loadings[row, loadings_columns]) < maxi] <- NA } } else { for (col in loadings_columns) { loadings[tail(order(abs(loadings[, col]), decreasing = TRUE), -round(threshold)), col] <- NA } } } else { loadings[, loadings_columns][abs(loadings[, loadings_columns]) < threshold] <- NA } loadings } parameters/R/ci_profile_boot.R0000644000176200001440000000403413615056553016111 0ustar liggesusers#' @importFrom insight get_parameters #' @importFrom stats confint .ci_profiled <- function(model, ci) { glm_ci <- tryCatch( { out <- as.data.frame(stats::confint(model, level = ci), stringsAsFactors = FALSE) names(out) <- c("CI_low", "CI_high") out$CI <- ci * 100 out$Parameter <- insight::get_parameters(model, effects = "fixed", component = "conditional")$Parameter out <- out[c("Parameter", "CI", "CI_low", "CI_high")] rownames(out) <- NULL out }, error = function(e) { NULL } ) if (is.null(glm_ci)) { glm_ci <- ci_wald(model, ci = ci) } glm_ci } # we need this function for models where confint and get_parameters return # different length (e.g. as for "polr" models) #' @importFrom stats confint .ci_profiled2 <- function(model, ci) { glm_ci <- tryCatch( { out <- as.data.frame(stats::confint(model, level = ci), stringsAsFactors = FALSE) names(out) <- c("CI_low", "CI_high") out$CI <- ci * 100 out$Parameter <- .remove_backticks_from_string(rownames(out)) out <- out[c("Parameter", "CI", "CI_low", "CI_high")] rownames(out) <- NULL out }, error = function(e) { NULL } ) if (is.null(glm_ci)) { glm_ci <- ci_wald(model, ci = ci) } glm_ci } #' @keywords internal .ci_boot_merMod <- function(x, ci, ...) { if (!requireNamespace("lme4", quietly = TRUE)) { stop("Package 'lme4' required for this function to work. Please install it by running `install.packages('lme4')`.") } # Compute out <- as.data.frame(lme4::confint.merMod(x, level = ci, method = "boot", ...)) rownames(out) <- gsub("`", "", rownames(out), fixed = TRUE) out <- out[rownames(out) %in% insight::find_parameters(x, effects = "fixed")$conditional, ] names(out) <- c("CI_low", "CI_high") # Clean up out$Parameter <- row.names(out) out$CI <- ci out <- out[c("Parameter", "CI", "CI_low", "CI_high")] row.names(out) <- NULL out } parameters/R/p_value_ml1.R0000644000176200001440000000574513614103332015154 0ustar liggesusers#' "m-l-1" approximation for SEs, CIs and p-values #' #' Approximation of degrees of freedom based on a "m-l-1" heuristic as suggested by Elff et al. (2019). #' #' @param model A mixed model. #' @param dof Degrees of Freedom. #' @inheritParams ci.merMod #' #' @details \subsection{Small Sample Cluster corrected Degrees of Freedom}{ #' Inferential statistics (like p-values, confidence intervals and #' standard errors) may be biased in mixed models when the number of clusters #' is small (even if the sample size of level-1 units is high). In such cases #' it is recommended to approximate a more accurate number of degrees of freedom #' for such inferential statitics (see \cite{Li and Redden 2015}). The #' \emph{m-l-1} heuristic is such an approach that uses a t-distribution with #' fewer degrees of freedom (\code{dof_ml1}) to calculate p-values #' (\code{p_value_ml1}), standard errors (\code{se_ml1}) and confidence intervals #' (\code{ci(method = "ml1")}). #' } #' \subsection{Degrees of Freedom for Longitudinal Designs (Repeated Measures)}{ #' In particular for repeated measure designs (longitudinal data analysis), #' the \emph{m-l-1} heuristic is likely to be more accurate than simply using the #' residual or infinite degrees of freedom, because \code{dof_ml1()} returns #' different degrees of freedom for within-cluster and between-cluster effects. #' } #' \subsection{Limitations of the "m-l-1" Heuristic}{ #' Note that the "m-l-1" heuristic is not applicable (or at least less accurate) #' for complex multilevel designs, e.g. with cross-classified clusters. In such cases, #' more accurate approaches like the Kenward-Roger approximation (\code{dof_kenward()}) #' is recommended. However, the "m-l-1" heuristic also applies to generalized #' mixed models, while approaches like Kenward-Roger or Satterthwaite are limited #' to linear mixed models only. #' } #' @seealso \code{dof_ml1()} and \code{se_ml1()} are small helper-functions #' to calculate approximated degrees of freedom and standard errors of model #' parameters, based on the "m-l-1" heuristic. #' #' @examples #' \donttest{ #' if (require("lme4")) { #' model <- lmer(Petal.Length ~ Sepal.Length + (1 | Species), data = iris) #' p_value_ml1(model) #' } #' } #' @return A data frame. #' @references \itemize{ #' \item Elff, M.; Heisig, J.P.; Schaeffer, M.; Shikano, S. (2019). Multilevel Analysis with Few Clusters: Improving Likelihood-based Methods to Provide Unbiased Estimates and Accurate Inference, British Journal of Political Science. #' \item Li, P., Redden, D. T. (2015). Comparing denominator degrees of freedom approximations for the generalized linear mixed model in analyzing binary outcome in small sample cluster-randomized trials. BMC Medical Research Methodology, 15(1), 38. \doi{10.1186/s12874-015-0026-x} #' } #' @importFrom stats pt coef #' @export p_value_ml1 <- function(model, dof = NULL) { if (is.null(dof)) { dof <- dof_ml1(model) } .p_value_dof(model, dof) } parameters/R/utils.R0000644000176200001440000001062713613647624014123 0ustar liggesusers#' help-functions #' @keywords internal .data_frame <- function(...) { x <- data.frame(..., stringsAsFactors = FALSE) rownames(x) <- NULL x } #' Flatten a list #' #' @param object A list. #' @param name Name of column of keys in the case the output is a dataframe. #' @keywords internal .flatten_list <- function(object, name = "name") { if (length(object) == 1) { object[[1]] } else if (all(sapply(object, is.data.frame))) { if (is.null(names(object))) { as.data.frame(t(sapply(object, rbind))) } else { tryCatch( { rn <- names(object) object <- do.call(rbind, object) object[name] <- rn object[c(name, setdiff(names(object), name))] }, warning = function(w) { object }, error = function(e) { object } ) } } else { object } } #' Recode a variable so its lowest value is beginning with zero #' #' @keywords internal .recode_to_zero <- function(x) { # check if factor if (is.factor(x) || is.character(x)) { # try to convert to numeric x <- .factor_to_numeric(x) } # retrieve lowest category minval <- min(x, na.rm = TRUE) sapply(x, function(y) y - minval) } #' Safe transformation from factor/character to numeric #' #' @importFrom stats na.omit #' @keywords internal .factor_to_numeric <- function(x) { if (is.numeric(x)) { return(x) } if (anyNA(suppressWarnings(as.numeric(as.character(stats::na.omit(x)))))) { if (is.character(x)) { x <- as.factor(x) } levels(x) <- 1:nlevels(x) } as.numeric(as.character(x)) } #' Safe transformation from factor/character to numeric #' #' @importFrom stats na.omit #' @keywords internal .factor_to_dummy <- function(x) { if (is.numeric(x)) { return(x) } # get unique levels / values values <- if (is.factor(x)) { levels(x) } else { stats::na.omit(unique(x)) } dummy <- as.data.frame(do.call(cbind, lapply(values, function(i) { out <- rep(0, length(x)) out[is.na(x)] <- NA out[x == i] <- 1 out }))) colnames(dummy) <- values dummy } #' Find most common occurence #' #' @keywords internal .find_most_common <- function(x) { out <- names(sort(table(x), decreasing = TRUE))[1] if (is.numeric(x)) out <- as.numeric(out) out } #' remove NULL elements from lists #' @keywords internal .compact_list <- function(x) x[!sapply(x, function(i) length(i) == 0 || is.null(i) || any(i == "NULL"))] #' remove empty string from character #' @keywords internal .compact_character <- function(x) x[!sapply(x, function(i) nchar(i) == 0 || is.null(i) || any(i == "NULL"))] #' @keywords internal .rename_values <- function(x, old, new) { x[x %in% old] <- new x } #' for models with zero-inflation component, return required component of model-summary #' @keywords internal .filter_component <- function(dat, component) { switch( component, "conditional" = dat[dat$Component == "conditional", ], "zi" = , "zero_inflated" = dat[dat$Component == "zero_inflated", ], dat ) } # Find log-terms inside model formula, and return "clean" term names #' @importFrom insight find_terms .log_terms <- function(model) { x <- insight::find_terms(model, flatten = TRUE) gsub("^log\\((.*)\\)", "\\1", x[grepl("^log\\((.*)\\)", x)]) } # capitalize first character in string #' @keywords internal .capitalize <- function(x) { capped <- grep("^[A-Z]", x, invert = TRUE) substr(x[capped], 1, 1) <- toupper(substr(x[capped], 1, 1)) x } #' @keywords internal .safe_deparse <- function(string) { paste0(sapply(deparse(string, width.cutoff = 500), trimws, simplify = TRUE), collapse = " ") } #' @keywords internal .remove_columns <- function(data, variables) { to_remove <- which(colnames(data) %in% variables) if (length(to_remove)) data[, -to_remove, drop = FALSE] else data } #' @keywords internal .is_empty_object <- function(x) { if (is.list(x)) { x <- tryCatch({ .compact_list(x) }, error = function(x) { x }) } # this is an ugly fix because of ugly tibbles if (inherits(x, c("tbl_df", "tbl"))) x <- as.data.frame(x) x <- suppressWarnings(x[!is.na(x)]) length(x) == 0 || is.null(x) } parameters/R/se_kenward.R0000644000176200001440000000056013617475213015075 0ustar liggesusers#' @rdname p_value_kenward #' @importFrom insight get_parameters #' @export se_kenward <- function(model) { vcov_adj <- .vcov_kenward_ajusted(model) params <- insight::get_parameters(model, effects = "fixed") data.frame( Parameter = params$Parameter, SE = abs(as.vector(sqrt(diag(as.matrix(vcov_adj))))), stringsAsFactors = FALSE ) } parameters/R/dof_kenward.R0000644000176200001440000003007213617476323015242 0ustar liggesusers#' @rdname p_value_kenward #' @importFrom insight find_parameters #' @export dof_kenward <- function(model) { parameters <- insight::find_parameters(model, effects = "fixed", flatten = TRUE) L <- as.data.frame(diag(rep(1, n_parameters(model, effects = "fixed")))) stats::setNames(sapply(L, .kenward_adjusted_ddf, model = model), parameters) # stats::setNames(sapply(L, pbkrtest::get_ddf_Lb, object = model), parameters) } # The following code was taken from the "pbkrtest" package and slightly modified #' @author Søren Højsgaard, \email{sorenh@@math.aau.dk} .kenward_adjusted_ddf <- function(model, linear_coef) { .adjusted_ddf(.vcov_kenward_ajusted(model), linear_coef, stats::vcov(model)) } .adjusted_ddf <- function(adjusted_vcov, linear_coef, unadjusted_vcov = adjusted_vcov){ if (!requireNamespace("Matrix", quietly = TRUE)) { stop("Package 'Matrix' required for this function to work. Please install it.") } if (!is.matrix(linear_coef)) { linear_coef <- matrix(linear_coef, ncol = 1) } vlb <- sum(linear_coef * (unadjusted_vcov %*% linear_coef)) theta <- Matrix::Matrix(as.numeric(outer(linear_coef, linear_coef) / vlb), nrow = length(linear_coef)) P <- attr(adjusted_vcov, "P") W <- attr(adjusted_vcov, "W") A1 <- A2 <- 0 theta_unadjusted_vcov <- theta %*% unadjusted_vcov n.ggamma = length(P) for (ii in 1:n.ggamma) { for (jj in c(ii:n.ggamma)) { e <- ifelse(ii == jj, 1, 2) ui <- as.matrix(theta_unadjusted_vcov %*% P[[ii]] %*% unadjusted_vcov) uj <- as.matrix(theta_unadjusted_vcov %*% P[[jj]] %*% unadjusted_vcov) A1 <- A1 + e * W[ii, jj] * (sum(diag(ui)) * sum(diag(uj))) A2 <- A2 + e * W[ii, jj] * sum(ui * t(uj)) } } B <- (A1 + 6 * A2) / 2 g <- (2 * A1 - 5 * A2) / (3 * A2) c1 <- g / (3 + 2 * (1 - g)) c2 <- (1 - g) / (3 + 2 * (1 - g)) c3 <- (3 - g) / (3 + 2 * (1 - g)) EE <- 1 + A2 VV <- 2 * (1 + B) EEstar <- 1 / (1 - A2) VVstar <- 2 * ((1 + c1 * B) / ((1 - c2 * B)^2 * (1 - c3 * B))) V0 <- 1 + c1 * B V1 <- 1 - c2 * B V2 <- 1 - c3 * B V0 <- ifelse(abs(V0) < 1e-10, 0, V0) rho <- (.divZero(1 - A2, V1))^2 * V0 / V2 df2 <- 4 + 3 / (rho - 1) df2 } .divZero <- function(x, y, tol = 1e-14) { ## ratio x/y is set to 1 if both |x| and |y| are below tol if (abs(x) < tol & abs(y) < tol) { 1 } else { x / y } } #' @importFrom stats update .vcov_kenward_ajusted <- function(model) { if (!requireNamespace("lme4", quietly = TRUE)) { stop("Package 'lme4' required for this function to work. Please install it.") } if (!(lme4::getME(model, "is_REML"))) { model <- stats::update(model, . ~ ., REML = TRUE) } .vcovAdj16_internal(stats::vcov(model), .get_SigmaG(model), lme4::getME(model, "X")) } #' @importFrom stats sigma .get_SigmaG <- function(model) { if (!requireNamespace("lme4", quietly = TRUE)) { stop("Package 'lme4' required for this function to work. Please install it.") } if (!requireNamespace("Matrix", quietly = TRUE)) { stop("Package 'Matrix' required for this function to work. Please install it.") } GGamma <- lme4::VarCorr(model) SS <- .shgetME(model) ## Put covariance parameters for the random effects into a vector: ## Fixme: It is a bit ugly to throw everything into one long vector here; a list would be more elegant ggamma <- NULL for (ii in 1:(SS$n.RT)) { Lii <- GGamma[[ii]] ggamma <- c(ggamma, Lii[lower.tri(Lii, diag = TRUE)]) } ggamma <- c(ggamma, stats::sigma(model)^2) ## Extend ggamma by the residuals variance n.ggamma <- length(ggamma) ## Find G_r: G <- NULL Zt <- lme4::getME(model, "Zt") for (ss in 1:SS$n.RT) { ZZ <- .shget_Zt_group(ss, Zt, SS$Gp) n.lev <- SS$n.lev.by.RT2[ss] ## ; cat(sprintf("n.lev=%i\n", n.lev)) Ig <- Matrix::sparseMatrix(1:n.lev, 1:n.lev, x = 1) for (rr in 1:SS$n.parm.by.RT[ss]) { ## This is takes care of the case where there is random regression and several matrices have to be constructed. ## FIXME: I am not sure this is correct if there is a random quadratic term. The '2' below looks suspicious. ii.jj <- .index2UpperTriEntry(rr, SS$n.comp.by.RT[ss]) ##; cat("ii.jj:"); print(ii.jj) ii.jj <- unique(ii.jj) if (length(ii.jj) == 1) { EE <- Matrix::sparseMatrix( ii.jj, ii.jj, x = 1, dims = rep(SS$n.comp.by.RT[ss], 2) ) } else { EE <- Matrix::sparseMatrix(ii.jj, ii.jj[2:1], dims = rep(SS$n.comp.by.RT[ss], 2)) } EE <- Ig %x% EE ## Kronecker product G <- c(G, list(t(ZZ) %*% EE %*% ZZ)) } } ## Extend by the indentity for the residual n.obs <- insight::n_obs(model) G <- c(G, list(Matrix::sparseMatrix(1:n.obs, 1:n.obs, x = 1))) Sigma <- ggamma[1] * G[[1]] for (ii in 2:n.ggamma) { Sigma <- Sigma + ggamma[ii] * G[[ii]] } list(Sigma = Sigma, G = G, n.ggamma = n.ggamma) } .index2UpperTriEntry <- function(k, N) { ## inverse of indexSymmat2vec ## result: index pair (i,j) with i>=j ## k: element in the vector of upper triangular elements ## example: N=3: k=1 -> (1,1), k=2 -> (1,2), k=3 -> (1,3), k=4 -> (2,2) aa <- cumsum(N:1) aaLow <- c(0, aa[-length(aa)]) i <- which(aaLow < k & k <= aa) j <- k - N * i + N - i * (3 - i) / 2 + i c(i, j) } .vcovAdj16_internal <- function(Phi, SigmaG, X) { if (!requireNamespace("MASS", quietly = TRUE)) { stop("Package 'MASS' required for this function to work. Please install it.") } if (!requireNamespace("Matrix", quietly = TRUE)) { stop("Package 'Matrix' required for this function to work. Please install it.") } SigmaInv <- chol2inv(chol(Matrix::forceSymmetric(as.matrix(SigmaG$Sigma)))) n.ggamma <- SigmaG$n.ggamma TT <- as.matrix(SigmaInv %*% X) HH <- OO <- vector("list", n.ggamma) for (ii in 1:n.ggamma) { HH[[ii]] <- as.matrix(SigmaG$G[[ii]] %*% SigmaInv) OO[[ii]] <- as.matrix(HH[[ii]] %*% X) } ## Finding PP, QQ PP <- QQ <- NULL for (rr in 1:n.ggamma) { OrTrans <- t(OO[[rr]]) PP <- c(PP, list(Matrix::forceSymmetric(-1 * OrTrans %*% TT))) for (ss in rr:n.ggamma) { QQ <- c(QQ, list(OrTrans %*% SigmaInv %*% OO[[ss]])) } } PP <- as.matrix(PP) QQ <- as.matrix(QQ) Ktrace <- matrix(NA, nrow = n.ggamma, ncol = n.ggamma) for (rr in 1:n.ggamma) { HrTrans <- t(HH[[rr]]) for (ss in rr:n.ggamma) { Ktrace[rr, ss] <- Ktrace[ss, rr] <- sum(HrTrans * HH[[ss]]) } } ## Finding information matrix IE2 <- matrix(NA, nrow = n.ggamma, ncol = n.ggamma) for (ii in 1:n.ggamma) { Phi.P.ii <- Phi %*% PP[[ii]] for (jj in c(ii:n.ggamma)) { www <- .indexSymmat2vec(ii, jj, n.ggamma) IE2[ii, jj] <- IE2[jj, ii] <- Ktrace[ii, jj] - 2 * sum(Phi * QQ[[www]]) + sum(Phi.P.ii * (PP[[jj]] %*% Phi)) } } eigenIE2 <- eigen(IE2, only.values = TRUE)$values condi <- min(abs(eigenIE2)) WW <- if (condi > 1e-10) as.matrix(Matrix::forceSymmetric(2 * solve(IE2))) else as.matrix(Matrix::forceSymmetric(2 * MASS::ginv(IE2))) UU <- matrix(0, nrow = ncol(X), ncol = ncol(X)) for (ii in 1:(n.ggamma - 1)) { for (jj in c((ii + 1):n.ggamma)) { www <- .indexSymmat2vec(ii, jj, n.ggamma) UU <- UU + WW[ii, jj] * (QQ[[www]] - PP[[ii]] %*% Phi %*% PP[[jj]]) } } UU <- as.matrix(UU) UU <- UU + t(UU) for (ii in 1:n.ggamma) { www <- .indexSymmat2vec(ii, ii, n.ggamma) UU <- UU + WW[ii, ii] * (QQ[[www]] - PP[[ii]] %*% Phi %*% PP[[ii]]) } GGAMMA <- Phi %*% UU %*% Phi PhiA <- Phi + 2 * GGAMMA attr(PhiA, "P") <- PP attr(PhiA, "W") <- WW attr(PhiA, "condi") <- condi PhiA # n.ggamma <- SigmaG$n.ggamma # # M <- cbind(do.call(cbind, SigmaG$G), X) # SinvM <- chol2inv(chol(Matrix::forceSymmetric(SigmaG$Sigma))) %*% M # # v <- c(rep(1:length(SigmaG$G), each = nrow(SinvM)), rep(length(SigmaG$G) + 1, ncol(X))) # idx <- lapply(unique.default(v), function(i) { # which(v == i) # }) # SinvG <- lapply(idx, function(z) { # SinvM[, z] # }) # # SinvX <- SinvG[[length(SinvG)]] # SinvG[length(SinvG)] <- NULL # # OO <- lapply(1:n.ggamma, function(i) { # SigmaG$G[[i]] %*% SinvX # }) # # PP <- vector("list", n.ggamma) # QQ <- vector("list", n.ggamma * (n.ggamma + 1) / 2) # index <- 1 # for (r in 1:n.ggamma) { # OOt.r <- t(OO[[r]]) # PP[[r]] <- -1 * (OOt.r %*% SinvX) # for (s in r:n.ggamma) { # QQ[[index]] <- OOt.r %*% (SinvG[[s]] %*% SinvX) # index <- index + 1 # } # } # # Ktrace <- matrix(NA, nrow = n.ggamma, ncol = n.ggamma) # for (r in 1:n.ggamma) { # HHr <- SinvG[[r]] # for (s in r:n.ggamma) { # Ktrace[r, s] <- Ktrace[s, r] <- sum(HHr * SinvG[[s]]) # } # } # # ## Finding information matrix # IE2 <- matrix(0, nrow = n.ggamma, ncol = n.ggamma) # for (ii in 1:n.ggamma) { # Phi.P.ii <- Phi %*% PP[[ii]] # for (jj in c(ii:n.ggamma)) { # www <- .indexSymmat2vec(ii, jj, n.ggamma) # IE2[ii, jj] <- IE2[jj, ii] <- Ktrace[ii, jj] - # 2 * sum(Phi * QQ[[www]]) + sum(Phi.P.ii * (PP[[jj]] %*% Phi)) # } # } # # eigenIE2 <- eigen(IE2, only.values = TRUE)$values # condi <- min(abs(eigenIE2)) # WW <- if (condi > 1e-10) # Matrix::forceSymmetric(2 * solve(IE2)) # else # Matrix::forceSymmetric(2 * MASS::ginv(IE2)) # # UU <- matrix(0, nrow = ncol(X), ncol = ncol(X)) # for (ii in 1:(n.ggamma - 1)) { # for (jj in c((ii + 1):n.ggamma)) { # www <- .indexSymmat2vec(ii, jj, n.ggamma) # UU <- UU + WW[ii, jj] * (QQ[[www]] - PP[[ii]] %*% Phi %*% PP[[jj]]) # } # } # # UU <- UU + t(UU) # for (ii in 1:n.ggamma) { # www <- .indexSymmat2vec(ii, ii, n.ggamma) # UU <- UU + WW[ii, ii] * (QQ[[www]] - PP[[ii]] %*% Phi %*% PP[[ii]]) # } # # GGAMMA <- Phi %*% UU %*% Phi # PhiA <- Phi + 2 * GGAMMA # attr(PhiA, "P") <- PP # attr(PhiA, "W") <- WW # attr(PhiA, "condi") <- condi # PhiA } .indexSymmat2vec <- function(i, j, N) { ## S[i,j] symetric N times N matrix ## r the vector of upper triangular element in row major order: ## r= c(S[1,1],S[1,2]...,S[1,j], S[1,N], S[2,2],...S[N,N] ##Result: k: index of k-th element of r k <- if (i <= j) { (i - 1) * (N - i / 2) + j } else { (j - 1) * (N - j / 2) + i } } .shgetME <- function(model) { if (!requireNamespace("lme4", quietly = TRUE)) { stop("Package 'lme4' required for this function to work. Please install it.") } Gp <- lme4::getME(model, "Gp") n.RT <- length(Gp) - 1 ## Number of random terms (i.e. of (|)'s) n.lev.by.RT <- sapply(lme4::getME(model, "flist"), function(x) length(levels(x))) n.comp.by.RT <- .get.RT.dim.by.RT(model) n.parm.by.RT <- (n.comp.by.RT + 1) * n.comp.by.RT / 2 n.RE.by.RT <- diff(Gp) n.lev.by.RT2 <- n.RE.by.RT / n.comp.by.RT ## Same as n.lev.by.RT2 ??? list(Gp = Gp, ## group.index n.RT = n.RT, ## n.groupFac n.lev.by.RT = n.lev.by.RT, ## nn.groupFacLevelsNew n.comp.by.RT = n.comp.by.RT, ## nn.GGamma n.parm.by.RT = n.parm.by.RT, ## mm.GGamma n.RE.by.RT = n.RE.by.RT, ## ... Not returned before n.lev.by.RT2 = n.lev.by.RT2, ## nn.groupFacLevels n_rtrms = lme4::getME(model, "n_rtrms") ) } ## Alternative to .get_Zt_group .shget_Zt_group <- function(ii.group, Zt, Gp, ...) { zIndex.sub <- (Gp[ii.group] + 1):Gp[ii.group + 1] as.matrix(Zt[ zIndex.sub , ]) } .get.RT.dim.by.RT <- function(model) { if (!requireNamespace("lme4", quietly = TRUE)) { stop("Package 'lme4' required for this function to work. Please install it.") } ## output: dimension (no of columns) of covariance matrix for random term ii if (inherits(model, "mer")) { sapply(model@ST,function(X) nrow(X)) } else { sapply(lme4::getME(model, "cnms"), length) } } parameters/R/dof_betwithin.R0000644000176200001440000000204113615056553015574 0ustar liggesusers#' @rdname p_value_betwithin #' @importFrom insight find_random_slopes find_parameters has_intercept n_obs #' @importFrom stats setNames #' @export dof_betwithin <- function(model) { if (!insight::model_info(model)$is_mixed) { stop("Model must be a mixed model.") } ngrps <- sum(.n_randomeffects(model)) parameters <- insight::find_parameters(model, effects = "fixed")[["conditional"]] within_effects <- unlist(insight::find_random_slopes(model)) has_intcp <- insight::has_intercept(model) ddf_within <- ngrps - n_parameters(model) ddf_between <- insight::n_obs(model) - ngrps - n_parameters(model) if (has_intcp) { ddf_between <- ddf_between - 1 ddf_within <- ddf_within - 1 } within_index <- match(within_effects, parameters) ddf <- stats::setNames(1:length(parameters), parameters) if (length(within_index) > 0) { ddf[match(within_effects, parameters)] <- ddf_within ddf[-match(within_effects, parameters)] <- ddf_between } else { ddf <- ddf_between } ddf } parameters/R/select_parameters.stanreg.R0000644000176200001440000000443013616217062020112 0ustar liggesusers#' @param method The method used in the variable selection. Can be \code{NULL} (default), \code{"forward"} or \code{"L1"}. See \code{projpred::varsel}. #' @param cross_validation Select with cross-validation. #' @rdname select_parameters #' @importFrom stats update #' @export select_parameters.stanreg <- function(model, method = NULL, cross_validation = FALSE, ...) { if (!requireNamespace("projpred", quietly = TRUE)) { stop("Package 'projpred' required for this function to work. Please install it by running `install.packages('projpred')`.") } if (cross_validation) { message("Cross-validating best parameters...") junk <- utils::capture.output(selection <- projpred::cv_varsel(model, method = method), ...) } else { selection <- projpred::varsel(model, method = method, ...) } # Visualise # varsel_plot(selection, stats = c('elpd', 'rmse'), deltas=T) # Extract parameters projection <- projpred::project(selection, nv = projpred::suggest_size(selection), ...) parameters <- row.names(projection$beta) # Reconstruct formula formula <- .reconstruct_formula(parameters, model) # Update model junk <- utils::capture.output(best <- stats::update(model, formula = formula, ...)) best } #' @importFrom insight find_response #' @keywords internal .reconstruct_formula <- function(parameters, model) { # # Clean # if (tail(parameters, 1) == "sigma") { # parameters <- parameters[1:length(parameters) - 1] # } # if (parameters[1] == "(Intercept)") { # parameters <- parameters[2:length(parameters)] # } # # # Detect interactions # interactions <- parameters[grepl(":", parameters)] # if (length(interactions) > 0) { # for (interaction in interactions) { # terms <- unlist(strsplit(interaction, ":", fixed = TRUE)) # if (length(terms) == 2) { # if (all(terms %in% parameters)) { # # replace interactions components by interactions # parameters <- parameters[!parameters %in% c(terms, interaction)] # parameters <- c(parameters, paste0(terms, collapse = " * ")) # } # } # } # } formula <- paste(parameters, collapse = " + ") formula <- paste(insight::find_response(model), "~", formula) formula } parameters/R/standard_error.R0000644000176200001440000007641413617512732015775 0ustar liggesusers#' Standard Errors #' #' \code{standard_error()} attempts to return standard errors of model parameters, while \code{standard_error_robust()} attempts to return robust standard errors. #' #' @param model A model. #' @param force Logical, if \code{TRUE}, factors are converted to numerical #' values to calculate the standard error, with the lowest level being the #' value \code{1} (unless the factor has numeric levels, which are converted #' to the corresponding numeric value). By default, \code{NA} is returned #' for factors or character vectors. #' @param method If \code{"robust"}, robust standard errors are computed #' by calling \code{\link[=standard_error_robust]{standard_error_robust()}}. #' \code{standard_error_robust()}, in turn, calls one of the \code{vcov*()}-functions #' from the \pkg{sandwich} or \pkg{clubSandwich} package for robust covariance #' matrix estimators. For certain mixed models, \code{method} may also be one #' of \code{"wald"}, \code{\link[=p_value_ml1]{"ml1"}}, \code{\link[=p_value_betwithin]{"betwithin"}}, #' \code{\link[=p_value_satterthwaite]{"satterthwaite"}} or \code{\link[=p_value_kenward]{"kenward"}}. #' @param verbose Toggle off warnings. #' @param ... Arguments passed to or from other methods. For \code{standard_error()}, #' if \code{method = "robust"}, arguments \code{vcov_estimation}, \code{vcov_type} #' and \code{vcov_args} can be passed down to \code{\link[=standard_error_robust]{standard_error_robust()}}. #' @param effects Should standard errors for fixed effects or random effects #' be returned? Only applies to mixed models. May be abbreviated. When #' standard errors for random effects are requested, for each grouping factor #' a list of standard errors (per group level) for random intercepts and slopes #' is returned. #' @inheritParams simulate_model #' #' @examples #' model <- lm(Petal.Length ~ Sepal.Length * Species, data = iris) #' standard_error(model) #' @return A data frame. #' @importFrom stats coef vcov setNames var na.omit #' @importFrom insight get_varcov print_color get_parameters find_parameters #' @importFrom utils capture.output #' @export standard_error <- function(model, ...) { UseMethod("standard_error") } # Standard objects --------------------------------------------------------- #' @rdname standard_error #' @export standard_error.factor <- function(model, force = FALSE, verbose = TRUE, ...) { if (force) { standard_error(as.numeric(model), ...) } else { if (verbose) { warning("Can't compute standard error of non-numeric variables.", call. = FALSE) } return(NA) } } #' @export standard_error.character <- standard_error.factor #' @export standard_error.numeric <- function(model, ...) { sqrt(stats::var(model, na.rm = TRUE) / length(stats::na.omit(model))) } #' @export standard_error.data.frame <- function(model, verbose = TRUE, ...) { unlist(sapply(model, standard_error, verbose = verbose)) } #' @export standard_error.list <- function(model, ...) { if ("gam" %in% names(model)) { model <- model$gam class(model) <- c("gam", "lm", "glm") standard_error(model) } else { insight::print_color("\nCould not extract standard errors from model object.\n", "red") } } #' @export standard_error.table <- function(model, ...) { # compute standard error of proportions if (length(dim(model)) == 1) { total.n <- as.vector(sum(model)) rel.frq <- as.vector(model) / total.n out <- .data_frame( Value = names(model), Proportion = rel.frq, SE = suppressWarnings(sqrt(rel.frq * (1 - rel.frq) / total.n)) ) } else { out <- NA } out } #' @export standard_error.xtabs <- standard_error.table #' @importFrom insight print_color #' @export standard_error.effectsize_std_params <- function(model, ...) { se <- attr(model, "standard_error") if (is.null(se)) { insight::print_color("\nCould not extract standard errors of standardized coefficients.\n", "red") return(NULL) } out <- .data_frame( Parameter = model$Parameter, SE = as.vector(se) ) .remove_backticks_from_parameter_names(out) } # Default methods --------------------------------------------------------- #' @rdname standard_error #' @export standard_error.default <- function(model, method = NULL, ...) { if (!is.null(method)) { method <- tolower(method) } else { method <- "wald" } if (method == "robust") { standard_error_robust(model, ...) } else if (method == "ml1") { se_ml1(model) } else if (method == "betwithin") { se_betwithin(model) } else { se <- tryCatch( { if (grepl("^Zelig-", class(model)[1])) { if (!requireNamespace("Zelig", quietly = TRUE)) { stop("Package `Zelig` required. Please install", call. = FALSE) } unlist(Zelig::get_se(model)) } else { .get_se_from_summary(model) } }, error = function(e) { NULL } ) # if all fails, try to get se from varcov if (is.null(se)) { se <- tryCatch( { varcov <- insight::get_varcov(model) se <- sqrt(diag(varcov)) names(se) <- colnames(varcov) }, error = function(e) { NULL } ) } if (is.null(se)) { insight::print_color("\nCould not extract standard errors from model object.\n", "red") } else { .data_frame( Parameter = names(se), SE = as.vector(se) ) } } } #' @export standard_error.truncreg <- standard_error.default #' @export standard_error.lm_robust <- standard_error.default #' @export standard_error.censReg <- standard_error.default #' @export standard_error.geeglm <- standard_error.default #' @export standard_error.negbin <- standard_error.default #' @export standard_error.ivreg <- standard_error.default #' @export standard_error.LORgee <- standard_error.default #' @export standard_error.lme <- standard_error.default #' @export standard_error.gls <- standard_error.default #' @export standard_error.mlm <- function(model, ...) { cs <- stats::coef(summary(model)) se <- lapply(names(cs), function(x) { params <- cs[[x]] .data_frame( Parameter = rownames(params), SE = params[, "Std. Error"], Response = gsub("^Response (.*)", "\\1", x) ) }) .remove_backticks_from_parameter_names(do.call(rbind, se)) } #' @export standard_error.tobit <- function(model, ...) { params <- insight::get_parameters(model) std.error <- standard_error.default(model, ...) std.error[std.error$Parameter %in% params$Parameter, ] } # Methods that work like simple linear models ---------------------------------- #' @export standard_error.lm <- function(model, method = NULL, ...) { robust <- !is.null(method) && method == "robust" if (isTRUE(robust)) { standard_error_robust(model, ...) } else { .data_frame( Parameter = insight::find_parameters(model, effects = "fixed", component = "conditional", flatten = TRUE), SE = .get_se_from_summary(model) ) } } #' @export standard_error.glm <- standard_error.lm # Mixed models --------------------------------------------------------------- #' @rdname standard_error #' @export standard_error.merMod <- function(model, effects = c("fixed", "random"), method = NULL, ...) { effects <- match.arg(effects) if (is.null(method)) method <- "wald" robust <- !is.null(method) && method == "robust" if (effects == "random") { if (!requireNamespace("lme4", quietly = TRUE)) { stop("Package 'lme4' required to calculate standard errors for random effects. Please install it.") } rand.se <- lme4::ranef(model, condVar = TRUE) n.groupings <- length(rand.se) for (m in 1:n.groupings) { vars.m <- attr(rand.se[[m]], "postVar") K <- dim(vars.m)[1] J <- dim(vars.m)[3] names.full <- dimnames(rand.se[[m]]) rand.se[[m]] <- array(NA, c(J, K)) for (j in 1:J) { rand.se[[m]][j, ] <- sqrt(diag(as.matrix(vars.m[, , j]))) } dimnames(rand.se[[m]]) <- list(names.full[[1]], names.full[[2]]) } rand.se } else { if (isTRUE(robust)) { standard_error_robust(model, ...) } else { # Classic and Satterthwaite SE if (method %in% c("wald", "satterthwaite")) { .data_frame( Parameter = insight::find_parameters(model, effects = "fixed", component = "conditional", flatten = TRUE), SE = .get_se_from_summary(model) ) # ml1 approx } else if (method == "ml1") { se_ml1(model) } else if (method == "betwithin") { se_betwithin(model) # Kenward approx } else if (method %in% c("kenward", "kr")) { se_kenward(model) } } } } #' @rdname standard_error #' @export standard_error.glmmTMB <- function(model, effects = c("fixed", "random"), component = c("all", "conditional", "zi", "zero_inflated"), ...) { component <- match.arg(component) effects <- match.arg(effects) if (effects == "random") { if (requireNamespace("TMB", quietly = TRUE) && requireNamespace("glmmTMB", quietly = TRUE)) { s1 <- TMB::sdreport(model$obj, getJointPrecision = TRUE) s2 <- sqrt(s1$diag.cov.random) rand.ef <- glmmTMB::ranef(model)[[1]] rand.se <- lapply(rand.ef, function(.x) { cnt <- nrow(.x) * ncol(.x) s3 <- s2[1:cnt] s2 <- s2[-(1:cnt)] d <- as.data.frame(matrix(sqrt(s3), ncol = ncol(.x), byrow = TRUE)) colnames(d) <- colnames(.x) d }) rand.se } else { return(NULL) } } else { if (is.null(.check_component(model, component))) { return(NULL) } cs <- .compact_list(stats::coef(summary(model))) x <- lapply(names(cs), function(i) { .data_frame( Parameter = insight::find_parameters(model, effects = "fixed", component = i, flatten = TRUE), SE = as.vector(cs[[i]][, 2]), Component = i ) }) se <- do.call(rbind, x) se$Component <- .rename_values(se$Component, "cond", "conditional") se$Component <- .rename_values(se$Component, "zi", "zero_inflated") .filter_component(se, component) } } #' @rdname standard_error #' @importFrom insight find_random #' @export standard_error.MixMod <- function(model, effects = c("fixed", "random"), component = c("all", "conditional", "zi", "zero_inflated"), ...) { component <- match.arg(component) effects <- match.arg(effects) if (effects == "random") { if (!requireNamespace("lme4", quietly = TRUE)) { stop("Package 'lme4' required to calculate standard errors for random effects. Please install it.") } rand.se <- lme4::ranef(model, post_vars = TRUE) vars.m <- attr(rand.se, "post_vars") all_names <- attributes(rand.se)$dimnames if (dim(vars.m[[1]])[1] == 1) { rand.se <- sqrt(unlist(vars.m)) } else { rand.se <- do.call( rbind, lapply(vars.m, function(.x) t(as.data.frame(sqrt(diag(.x))))) ) rownames(rand.se) <- all_names[[1]] colnames(rand.se) <- all_names[[2]] rand.se <- list(rand.se) names(rand.se) <- insight::find_random(model, flatten = TRUE) } rand.se } else { if (is.null(.check_component(model, component))) { return(NULL) } s <- summary(model) cs <- list(s$coef_table, s$coef_table_zi) names(cs) <- c("conditional", "zero_inflated") cs <- .compact_list(cs) x <- lapply(names(cs), function(i) { .data_frame( Parameter = insight::find_parameters(model, effects = "fixed", component = i, flatten = TRUE), SE = as.vector(cs[[i]][, 2]), Component = i ) }) se <- do.call(rbind, x) .filter_component(se, component) } } # Zero-inflated models -------------------------------------------------------- #' @rdname standard_error #' @export standard_error.zeroinfl <- function(model, component = c("all", "conditional", "zi", "zero_inflated"), method = NULL, ...) { component <- match.arg(component) if (is.null(.check_component(model, component))) { return(NULL) } robust <- !is.null(method) && method == "robust" if (isTRUE(robust)) { return(standard_error_robust(model, ...)) } cs <- .compact_list(stats::coef(summary(model))) x <- lapply(names(cs), function(i) { comp <- ifelse(i == "count", "conditional", "zi") stats <- cs[[i]] # remove log(theta) theta <- grepl("Log(theta)", rownames(stats), fixed = TRUE) if (any(theta)) { stats <- stats[!theta, ] } .data_frame( Parameter = insight::find_parameters(model, effects = "fixed", component = comp, flatten = TRUE), SE = as.vector(stats[, 2]), Component = comp ) }) se <- do.call(rbind, x) se$Component <- .rename_values(se$Component, "cond", "conditional") se$Component <- .rename_values(se$Component, "zi", "zero_inflated") .filter_component(se, component) } #' @export standard_error.hurdle <- standard_error.zeroinfl #' @export standard_error.zerocount <- standard_error.zeroinfl # ANOVA --------------------------------------------------------------- #' @export standard_error.aov <- function(model, ...) { params <- model_parameters(model) data.frame( Parameter = params$Parameter, SE = params$SE, stringsAsFactors = FALSE ) } #' @export standard_error.anova <- standard_error.aov #' @export standard_error.aovlist <- standard_error.aov # Survey models --------------------------------------------------------------- #' @export standard_error.svyglm.nb <- function(model, ...) { if (!isNamespaceLoaded("survey")) { requireNamespace("survey", quietly = TRUE) } se <- sqrt(diag(stats::vcov(model, stderr = "robust"))) .data_frame( Parameter = .remove_backticks_from_string(names(se)), SE = as.vector(se) ) } #' @export standard_error.svyglm.zip <- standard_error.svyglm.nb #' @export standard_error.svyglm <- function(model, ...) { cs <- stats::coef(summary(model)) se <- cs[, 2] .data_frame( Parameter = .remove_backticks_from_string(names(se)), SE = as.vector(se) ) } # Survival Models ------------------------------------------------------- #' @export standard_error.coxme <- function(model, ...) { beta <- model$coefficients if (length(beta) > 0) { .data_frame( Parameter = .remove_backticks_from_string(names(beta)), SE = sqrt(diag(stats::vcov(model))) ) } } #' @rdname standard_error #' @export standard_error.coxph <- function(model, method = NULL, ...) { robust <- !is.null(method) && method == "robust" if (isTRUE(robust)) { return(standard_error_robust(model, ...)) } params <- insight::get_parameters(model) cs <- stats::coef(summary(model)) se <- cs[, 3] .data_frame( Parameter = params$Parameter, SE = as.vector(se) ) } #' @export standard_error.survreg <- function(model, method = NULL, ...) { robust <- !is.null(method) && method == "robust" if (isTRUE(robust)) { return(standard_error_robust(model, ...)) } s <- summary(model) se <- s$table[, 2] .data_frame( Parameter = .remove_backticks_from_string(names(se)), SE = as.vector(se) ) } #' @export standard_error.flexsurvreg <- function(model, ...) { params <- insight::find_parameters(model, flatten = TRUE) se <- model$res[rownames(model$res) %in% params, "se"] .data_frame( Parameter = .remove_backticks_from_string(names(se)), SE = as.vector(se) ) } #' @export standard_error.aareg <- function(model, ...) { s <- summary(model) se <- s$table[, "se(coef)"] .data_frame( Parameter = .remove_backticks_from_string(names(se)), SE = as.vector(se) ) } # Ordinal Models --------------------------------------------------------- #' @export standard_error.multinom <- function(model, ...) { se <- tryCatch( { stderr <- summary(model)$standard.errors if (is.null(stderr)) { vc <- insight::get_varcov(model) stderr <- as.vector(sqrt(diag(vc))) } else { if (is.matrix(stderr)) { tmp <- c() for (i in 1:nrow(stderr)) { tmp <- c(tmp, as.vector(stderr[i, ])) } } else { tmp <- as.vector(stderr) } stderr <- tmp } stderr }, error = function(e) { vc <- insight::get_varcov(model) as.vector(sqrt(diag(vc))) } ) params <- insight::get_parameters(model) if ("Response" %in% colnames(params)) { .data_frame( Parameter = params$Parameter, SE = se, Response = params$Response ) } else { .data_frame( Parameter = params$Parameter, SE = se ) } } #' @export standard_error.brmultinom <- standard_error.multinom #' @export standard_error.polr <- function(model, method = NULL, ...) { robust <- !is.null(method) && method == "robust" if (isTRUE(robust)) { return(standard_error_robust(model, ...)) } smry <- suppressMessages(as.data.frame(stats::coef(summary(model)))) se <- smry[[2]] names(se) <- rownames(smry) .data_frame( Parameter = .remove_backticks_from_string(names(se)), SE = as.vector(se) ) } #' @rdname standard_error #' @importFrom insight get_parameters #' @export standard_error.mixor <- function(model, effects = c("all", "fixed", "random"), ...) { effects <- match.arg(effects) stats <- model$Model[, "Std. Error"] parms <- get_parameters(model, effects = effects) .data_frame( Parameter = parms$Parameter, SE = stats[parms$Parameter], Effects = parms$Effects ) } #' @rdname standard_error #' @importFrom insight get_parameters #' @export standard_error.clm2 <- function(model, component = c("all", "conditional", "scale"), ...) { component <- match.arg(component) stats <- .get_se_from_summary(model) parms <- get_parameters(model, component = component) .data_frame( Parameter = parms$Parameter, SE = stats[parms$Parameter], Component = parms$Component ) } #' @export standard_error.clmm2 <- standard_error.clm2 #' @export standard_error.bracl <- function(model, ...) { smry <- suppressMessages(as.data.frame(stats::coef(summary(model)))) se <- smry[[2]] names(se) <- rownames(smry) params <- insight::get_parameters(model) .data_frame( Parameter = params$Parameter, SE = as.vector(se), Response = params$Response ) } # Other models --------------------------------------------------------------- #' @export standard_error.cgam <- function(model, ...) { sc <- summary(model) se <- as.vector(sc$coefficients[, "StdErr"]) params <- insight::get_parameters(model, component = "all") if (!is.null(sc$coefficients2)) se <- c(se, rep(NA, nrow(sc$coefficients2))) .data_frame( Parameter = params$Parameter, SE = se, Component = params$Component ) } #' @importFrom utils capture.output #' @export standard_error.cpglm <- function(model, ...) { if (!requireNamespace("cplm", quietly = TRUE)) { stop("To use this function, please install package 'cplm'.") } junk <- utils::capture.output(stats <- cplm::summary(model)$coefficients) params <- insight::get_parameters(model) .data_frame( Parameter = params$Parameter, SE = as.vector(stats[, "Std. Error"]) ) } #' @export standard_error.cpglmm <- function(model, ...) { if (!requireNamespace("cplm", quietly = TRUE)) { stop("To use this function, please install package 'cplm'.") } stats <- cplm::summary(model)$coefs params <- insight::get_parameters(model) .data_frame( Parameter = params$Parameter, SE = as.vector(stats[, "Std. Error"]) ) } #' @export standard_error.rq <- function(model, ...) { se <- tryCatch( { cs <- suppressWarnings(stats::coef(summary(model))) se_column <- intersect(c("Std Error", "Std. Error"), colnames(cs)) if (length(se_column)) { cs[, se_column] } else { vc <- insight::get_varcov(model) as.vector(sqrt(diag(vc))) } }, error = function(e) { vc <- insight::get_varcov(model) as.vector(sqrt(diag(vc))) } ) params <- insight::get_parameters(model) .data_frame( Parameter = params$Parameter, SE = se ) } #' @export standard_error.crq <- standard_error.rq #' @export standard_error.nlrq <- standard_error.rq #' @export standard_error.rqss <- function(model, component = c("all", "conditional", "smooth_terms"), ...) { component <- match.arg(component) cs <- summary(model)$coef se_column <- intersect(c("Std Error", "Std. Error"), colnames(cs)) se <- cs[, se_column] params_cond <- insight::get_parameters(model, component = "conditional") params_smooth <- insight::get_parameters(model, component = "smooth_terms") out_cond <- .data_frame( Parameter = params_cond$Parameter, SE = se, Component = "conditional" ) out_smooth <- .data_frame( Parameter = params_smooth$Parameter, SE = NA, Component = "smooth_terms" ) switch( component, "all" = rbind(out_cond, out_smooth), "conditional" = out_cond, "smooth_terms" = out_smooth ) } #' @export standard_error.complmrob <- function(model, ...) { stats <- summary(model)$stats params <- insight::get_parameters(model) .data_frame( Parameter = params$Parameter, SE = as.vector(stats[, "Std. Error"]) ) } #' @export standard_error.glmx <- function(model, ...) { stats <- stats::coef(summary(model)) params <- insight::get_parameters(model) .data_frame( Parameter = params$Parameter, SE = c(as.vector(stats$glm[, "Std. Error"]), as.vector(stats$extra[, "Std. Error"])), Component = params$Component ) } #' @export standard_error.fixest <- function(model, ...) { stats <- summary(model) params <- insight::get_parameters(model) .data_frame( Parameter = params$Parameter, SE = as.vector(stats$se) ) } #' @export standard_error.feglm <- function(model, ...) { stats <- stats::coef(summary(model)) params <- insight::get_parameters(model) .data_frame( Parameter = params$Parameter, SE = as.vector(stats[, "Std. error"]) ) } #' @export standard_error.biglm <- function(model, ...) { cs <- summary(model)$mat params <- insight::get_parameters(model) .data_frame( Parameter = params$Parameter, SE = as.vector(cs[, 4]) ) } #' @export standard_error.crch <- function(model, ...) { cs <- do.call(rbind, stats::coef(summary(model), model = "full")) params <- insight::get_parameters(model) .data_frame( Parameter = params$Parameter, SE = as.vector(cs[, 2]) ) } #' @export standard_error.gee <- function(model, method = NULL, ...) { cs <- stats::coef(summary(model)) robust <- !is.null(method) && method == "robust" if (isTRUE(robust)) { se <- as.vector(cs[, "Robust S.E."]) } else { se <- as.vector(cs[, "Naive S.E."]) } .data_frame(Parameter = .remove_backticks_from_string(rownames(cs)), SE = se) } #' @export standard_error.logistf <- function(model, ...) { utils::capture.output(s <- summary(model)) se <- sqrt(diag(s$var)) .data_frame( Parameter = .remove_backticks_from_string(names(s$coefficients)), SE = as.vector(se) ) } #' @export standard_error.glimML <- function(model, ...) { if (!requireNamespace("aod", quietly = TRUE)) { stop("Package 'aod' required for this function to work. Please install it.") } s <- methods::slot(aod::summary(model), "Coef") se <- s[, 2] .data_frame( Parameter = .remove_backticks_from_string(rownames(s)), SE = as.vector(se) ) } #' @export standard_error.lrm <- function(model, ...) { se <- sqrt(diag(stats::vcov(model))) # psm-models returns vcov-matrix w/o dimnames if (is.null(names(se))) names(se) <- names(stats::coef(model)) .data_frame( Parameter = .remove_backticks_from_string(names(se)), SE = as.vector(se) ) } #' @export standard_error.ols <- standard_error.lrm #' @export standard_error.rms <- standard_error.lrm #' @export standard_error.psm <- standard_error.lrm #' @rdname standard_error #' @export standard_error.betareg <- function(model, component = c("all", "conditional", "precision"), ...) { component <- match.arg(component) params <- insight::get_parameters(model) cs <- do.call(rbind, stats::coef(summary(model))) se <- cs[, 2] out <- .data_frame( Parameter = .remove_backticks_from_string(names(se)), Component = params$Component, SE = as.vector(se) ) if (component != "all") { out <- out[out$Component == component, ] } out } #' @rdname standard_error #' @export standard_error.DirichletRegModel <- function(model, component = c("all", "conditional", "precision"), ...) { component <- match.arg(component) params <- insight::get_parameters(model) out <- .data_frame( Parameter = params$Parameter, Response = params$Response, SE = as.vector(model$se) ) if (!is.null(params$Component)) { out$Component <- params$Component } else { component <- "all" } if (component != "all") { out <- out[out$Component == component, ] } out } #' @export standard_error.gamlss <- function(model, ...) { parms <- insight::get_parameters(model) utils::capture.output(cs <- summary(model)) .data_frame( Parameter = parms$Parameter, SE = as.vector(cs[, 2]), Component = parms$Component ) } #' @export standard_error.plm <- function(model, ...) { se <- stats::coef(summary(model)) .data_frame( Parameter = .remove_backticks_from_string(names(se[, 2])), SE = as.vector(se[, 2]) ) } #' @export standard_error.gam <- function(model, ...) { p.table <- summary(model)$p.table s.table <- summary(model)$s.table n_cond <- nrow(p.table) n_smooth <- nrow(s.table) .data_frame( Parameter = .remove_backticks_from_string(c(rownames(p.table), rownames(s.table))), SE = c(as.vector(p.table[, 2]), rep(NA, n_smooth)), Component = c(rep("conditional", n_cond), rep("smooth_terms", n_smooth)) ) } #' @export standard_error.gamm <- function(model, ...) { model <- model$gam class(model) <- c("gam", "lm", "glm") standard_error(model) } #' @export standard_error.gamm4 <- standard_error.gamm #' @export standard_error.MCMCglmm <- function(model, ...) { nF <- model$Fixed$nfl parms <- as.data.frame(model$Sol[, 1:nF, drop = FALSE]) .data_frame( Parameter = .remove_backticks_from_string(colnames(parms)), SE = unname(sapply(parms, stats::sd)) ) } #' @export standard_error.BBmm <- function(model, ...) { .data_frame( Parameter = insight::find_parameters(model, effects = "fixed", component = "conditional", flatten = TRUE), SE = as.data.frame(summary(model)$fixed.coefficients)$StdErr ) } #' @export standard_error.BBreg <- function(model, ...) { .data_frame( Parameter = insight::find_parameters(model, effects = "fixed", component = "conditional", flatten = TRUE), SE = as.data.frame(summary(model)$coefficients)$StdErr ) } #' @export standard_error.wbm <- function(model, ...) { s <- summary(model) se <- c( s$within_table[, "S.E."], s$between_table[, "S.E."], s$ints_table[, "S.E."] ) params <- insight::get_parameters(model, effects = "fixed") .data_frame( Parameter = params$Parameter, SE = as.vector(se), Component = params$Component ) } #' @export standard_error.wbgee <- standard_error.wbm #' @export standard_error.htest <- function(model, ...) { } #' @importFrom insight get_varcov #' @export standard_error.vglm <- function(model, ...) { se <- sqrt(diag(insight::get_varcov(model))) .data_frame( Parameter = .remove_backticks_from_string(names(se)), SE = as.vector(se) ) } #' @importFrom insight get_varcov #' @export standard_error.vgam <- function(model, ...) { params <- insight::get_parameters(model) se <- sqrt(diag(insight::get_varcov(model))) .data_frame( Parameter = .remove_backticks_from_string(names(se)), SE = as.vector(se), Component = params$Component ) } #' @export standard_error.gmnl <- function(model, ...) { cs <- summary(model)$CoefTable se <- cs[, 2] pv <- .data_frame( Parameter = .remove_backticks_from_string(names(se)), SE = as.vector(se) ) # rename intercepts intercepts <- grepl(":(intercept)", pv$Parameter, fixed = TRUE) pv$Parameter[intercepts] <- sprintf( "(Intercept: %s)", sub(":(intercept)", replacement = "", pv$Parameter[intercepts], fixed = TRUE) ) pv } #' @export standard_error.rma <- function(model, ...) { params <- insight::get_parameters(model) .data_frame( Parameter = .remove_backticks_from_string(params$Parameter), SE = model[["se"]] ) } # helper ----------------------------------------------------------------- .get_se_from_summary <- function(model, component = NULL) { cs <- stats::coef(summary(model)) se <- NULL if (is.list(cs) && !is.null(component)) cs <- cs[[component]] if (!is.null(cs)) { # do we have a se column? se_col <- which(colnames(cs) == "Std. Error") # if not, default to 2 if (length(se_col) == 0) se_col <- 2 se <- as.vector(cs[, se_col]) if (is.null(names(se))) { coef_names <- rownames(cs) if (length(coef_names) == length(se)) names(se) <- coef_names } } names(se) <- .remove_backticks_from_string(names(se)) se } # .ranef_se <- function(x) { # if (!requireNamespace("lme4", quietly = TRUE)) { # stop("Package 'lme4' required for this function to work. Please install it by running `install.packages('lme4')`.") # } # # cc <- stats::coef(model) # # # get names of intercepts # inames <- names(cc) # # # variances of fixed effects # fixed.vars <- diag(as.matrix(stats::vcov(model))) # # # extract variances of conditional modes # r1 <- lme4::ranef(model, condVar = TRUE) # # # we may have multiple random intercepts, iterate all # se.merMod <- lapply(1:length(cc), function(i) { # cmode.vars <- t(apply(attr(r1[[i]], "postVar"), 3, diag)) # seVals <- sqrt(sweep(cmode.vars, 2, fixed.vars[names(r1[[i]])], "+", check.margin = FALSE)) # # if (length(r1[[i]]) == 1) { # seVals <- as.data.frame(t(seVals)) # stats::setNames(seVals, names(r1[[i]])) # } else { # seVals <- seVals[, 1:2] # stats::setNames(as.data.frame(seVals), names(r1[[i]])) # } # }) # # # set names of list # names(se.merMod) <- inames # # se.merMod # } parameters/R/describe_distribution.R0000644000176200001440000000615013616055631017330 0ustar liggesusers#' Describe a distribution #' #' This function describes a distribution by a set of indices (e.g., measures of centrality, dispersion, range, skewness, kurtosis). #' #' @param x A numeric vector. #' @param range Return the range (min and max). #' @param include_factors Logical, if \code{TRUE}, factors are included in the output, however, only columns for range (first and last factor levels) as well as n and missing will contain information. #' @inheritParams bayestestR::point_estimate #' #' @return A data frame with columns that describe the properties of the variables. #' @examples #' describe_distribution(rnorm(100)) #' #' data(iris) #' describe_distribution(iris) #' describe_distribution(iris, include_factors = TRUE) #' @export describe_distribution <- function(x, centrality = "mean", dispersion = TRUE, range = TRUE, ...) { UseMethod("describe_distribution") } #' @importFrom stats na.omit #' @export describe_distribution.numeric <- function(x, centrality = "mean", dispersion = TRUE, range = TRUE, ...) { out <- data.frame(.temp = 0) # Missing n_missing <- sum(is.na(x)) x <- stats::na.omit(x) # Point estimates out <- cbind( out, bayestestR::point_estimate(x, centrality = centrality, dispersion = dispersion, ...) ) # Range if (range) { out <- cbind( out, data.frame( Min = min(x, na.rm = TRUE), Max = max(x, na.rm = TRUE) ) ) } # Skewness out <- cbind( out, data.frame( Skewness = skewness(x), Kurtosis = kurtosis(x) ) ) out$n <- length(x) out$n_Missing <- n_missing out$`.temp` <- NULL class(out) <- unique(c("parameters_distribution", class(out))) attr(out, "data") <- x out } #' @export describe_distribution.factor <- function(x, centrality = "mean", dispersion = TRUE, range = TRUE, ...) { # Missing n_missing <- sum(is.na(x)) x <- stats::na.omit(x) out <- data.frame( Mean = NA, SD = NA, Min = levels(x)[1], Max = levels(x)[nlevels(x)], Skewness = NA, Kurtosis = NA, n = length(x), n_Missing = n_missing, stringsAsFactors = FALSE ) if (!dispersion) { out$SD <- NULL } if (!range) { out$Min <- NULL out$Max <- NULL } out } #' @rdname describe_distribution #' @export describe_distribution.data.frame <- function(x, centrality = "mean", dispersion = TRUE, range = TRUE, include_factors = FALSE, ...) { out <- do.call(rbind, lapply(x, function(i) { if ((include_factors && is.factor(i)) || (!is.character(i) && !is.factor(i))) { describe_distribution(i, centrality = centrality, dispersion = dispersion, range = range) } })) out$Variable <- row.names(out) row.names(out) <- NULL out <- out[c("Variable", setdiff(colnames(out), "Variable"))] class(out) <- unique(c("parameters_distribution", class(out))) attr(out, "object_name") <- deparse(substitute(x), width.cutoff = 500) out } #' @export print.parameters_distribution <- function(x, ...) { cat(insight::format_table(x)) } parameters/R/random_parameters.R0000644000176200001440000001415713617510313016454 0ustar liggesusers#' @title Summary information from random effects #' @name random_parameters #' #' @description This function extracts the different variance components of a #' mixed model and returns the result as a data frame. #' #' @param model A mixed effects model (including \code{stanreg} models). #' #' @return A data frame with random effects statistics for the variance components, #' including number of levels per random effect group, as well as complete #' observations in the model. #' #' @details The variance components are obtained from \code{\link[insight]{get_variance}} #' and are denoted as following: #' \subsection{Within-group (or residual) variance}{ #' The residual variance, \ifelse{html}{\out{σ2ε}}{\eqn{\sigma^2_\epsilon}}, #' is the sum of the distribution-specific variance and the variance due to additive dispersion. #' It indicates the \emph{within-group variance}. #' } #' \subsection{Between-group random intercept variance}{ #' The random intercept variance, or \emph{between-group} variance #' for the intercept (\ifelse{html}{\out{τ00}}{\eqn{\tau_{00}}}), #' is obtained from \code{VarCorr()}. It indicates how much groups #' or subjects differ from each other. #' } #' \subsection{Between-group random slope variance}{ #' The random slope variance, or \emph{between-group} variance #' for the slopes (\ifelse{html}{\out{τ11}}{\eqn{\tau_{11}}}) #' is obtained from \code{VarCorr()}. This measure is only available #' for mixed models with random slopes. It indicates how much groups #' or subjects differ from each other according to their slopes. #' } #' \subsection{Random slope-intercept correlation}{ #' The random slope-intercept correlation #' (\ifelse{html}{\out{ρ01}}{\eqn{\rho_{01}}}) #' is obtained from \code{VarCorr()}. This measure is only available #' for mixed models with random intercepts and slopes. #' } #' #' @examples #' if (require("lme4")) { #' data(sleepstudy) #' model <- lmer(Reaction ~ Days + (1 + Days | Subject), data = sleepstudy) #' random_parameters(model) #' } #' @export random_parameters <- function(model) { out <- .randomeffects_summary(model) class(out) <- c("parameters_random", class(out)) out } # helper ----------------------------------- .n_randomeffects <- function(model) { sapply(insight::get_data(model)[insight::find_random(model, split_nested = TRUE, flatten = TRUE)], function(i) length(unique(i, na.rm = TRUE))) } #' @importFrom insight find_random get_variance find_random_slopes n_obs .randomeffects_summary <- function(model) { out <- list() re_variances <- suppressWarnings(insight::get_variance(model)) model_re <- insight::find_random(model, split_nested = FALSE, flatten = TRUE) model_rs <- unlist(insight::find_random_slopes(model)) if (length(re_variances) && sum(!is.na(re_variances)) > 0 && !is.null(re_variances)) { # Residual Variance (Sigma^2) out$Sigma2 <- re_variances$var.residual # Random Intercept Variance var_intercept <- as.list(re_variances$var.intercept) names(var_intercept) <- paste0("tau00_", names(re_variances$var.intercept)) out <- c(out, var_intercept) # Random Slope Variance if (!.is_empty_object(re_variances$var.slope) && !.is_empty_object(model_rs)) { var_slope <- as.list(re_variances$var.slope) names(var_slope) <- paste0("tau11_", names(re_variances$var.slope)) out <- c(out, var_slope) } # Slope-Intercept Correlation if (!.is_empty_object(re_variances$cor.slope_intercept) && !.is_empty_object(model_rs)) { cor_slope_intercept <- as.list(re_variances$cor.slope_intercept) names(cor_slope_intercept) <- paste0("rho01_", model_re, ".", model_rs) out <- c(out, cor_slope_intercept) } } # Number of levels per random-effect groups n_re <- as.list(.n_randomeffects(model)) names(n_re) <- paste0("N_", names(n_re)) out <- c(out, n_re) # number of observations out$Observations <- insight::n_obs(model) # make nice data frame out <- as.data.frame(do.call(rbind, out), stringsAsFactors = FALSE) out$Description <- rownames(out) rownames(out) <- NULL colnames(out) <- c("Value", "Description") # Additional information out$Component <- "" out$Component[out$Description == "Sigma2"] <- "sigma2" out$Component[grepl("^tau00_", out$Description)] <- "tau00" out$Component[grepl("^tau11_", out$Description)] <- "tau11" out$Component[grepl("^rho01_", out$Description)] <- "rho01" # Additional information out$Term <- "" out$Term[out$Component == "tau00"] <- gsub("^tau00_(.*)", "\\1", out$Description[out$Component == "tau00"]) out$Term[out$Component == "tau11"] <- gsub("^tau11_(.*)", "\\1", out$Description[out$Component == "tau11"]) out$Term[out$Component == "rho01"] <- gsub("^rho01_(.*)", "\\1", out$Description[out$Component == "rho01"]) # renaming out$Type <- "" # Within-Group Variance out$Type[out$Description == "Sigma2"] <- "" out$Description[out$Description == "Sigma2"] <- "Within-Group Variance" # Between-Group Variance out$Type[grepl("^tau00_", out$Description)] <- "Random Intercept" out$Description <- gsub("^tau00_(.*)", "Between-Group Variance", out$Description) out$Type[grepl("^tau11_", out$Description)] <- "Random Slope" out$Description <- gsub("^tau11_(.*)", "Between-Group Variance", out$Description) # correlations out$Type[grepl("^rho01_", out$Description)] <- "" out$Description <- gsub("^rho01_(.*)", "Correlations", out$Description) out$Type[grepl("N_(.*)", out$Description)] <- "" out$Term[grepl("N_(.*)", out$Description)] <- gsub("N_(.*)", "\\1", out$Description[grepl("N_(.*)", out$Description)]) out$Description <- gsub("_(.*)", "", out$Description) out$Type[grepl("^X", out$Description)] <- "" out$Description[grepl("^X", out$Description)] <- NA out$Component[out$Component == ""] <- NA out$Term[out$Term == ""] <- NA out[c("Description", "Component", "Type", "Term", "Value")] } parameters/R/ci_kenward.R0000644000176200001440000000045013611011136015037 0ustar liggesusers#' @rdname p_value_kenward #' @export ci_kenward <- function(model, ci = .95) { out <- lapply(ci, function(i) { .ci_wald(model = model, ci = i, dof = Inf, effects = "fixed", component = "all", method = "kenward") }) out <- do.call(rbind, out) row.names(out) <- NULL out } parameters/R/principal_components.R0000644000176200001440000002470013617565646017215 0ustar liggesusers#' Principal Component Analysis (PCA) #' #' This function performs a principal component analysis (PCA) and returns the loadings as a dataframe. #' #' @param x A dataframe or a statistical model. #' @param n Number of components to extract. If \code{n="all"}, then \code{n} is set as the number of variables minus 1 (\code{ncol(x)-1}). If \code{n="auto"} (default) or \code{n=NULL}, the number of components is selected through \code{\link{n_factors}}. In \code{\link{reduce_parameters}}, can also be \code{"max"}, in which case it will select all the components that are maximally pseudo-loaded (i.e., correlated) by at least one variable. #' @param rotation If not \code{"none"}, the PCA will be computed using the \pkg{psych} package. Possible options include \code{"varimax"}, \code{"quartimax"}, \code{"promax"}, \code{"oblimin"}, \code{"simplimax"}, and \code{"cluster"}. See \code{\link[psych]{fa}} for details. #' @param sort Sort the loadings. #' @param threshold A value between 0 and 1 indicates which (absolute) values from the loadings should be removed. An integer higher than 1 indicates the n strongest loadings to retain. Can also be \code{"max"}, in which case it will only display the maximum loading per variable (the most simple structure). #' @param standardize A logical value indicating whether the variables should be standardized (centred and scaled) to have unit variance before the analysis takes place (in general, such scaling is advisable). #' @param ... Arguments passed to or from other methods. #' #' @details #' \subsection{Complexity}{ #' Complexity represents the number of latent components needed to account #' for the observed variables. Whereas a perfect simple structure solution #' has a complexity of 1 in that each item would only load on one factor, #' a solution with evenly distributed items has a complexity greater than 1 #' (\cite{Hofman, 1978; Pettersson and Turkheimer, 2010}) . #' } #' \subsection{Uniqueness}{ #' Uniqueness represents the variance that is 'unique' to the variable and #' not shared with other variables. It is equal to \code{1 – communality} #' (variance that is shared with other variables). A uniqueness of \code{0.20} #' suggests that 20\% or that variable's variance is not shared with other #' variables in the overall factor model. The greater 'uniqueness' the lower #' the relevance of the variable in the factor model. #' } #' \subsection{MSA}{ #' MSA represents the Kaiser-Meyer-Olkin Measure of Sampling Adequacy #' (\cite{Kaiser and Rice, 1974}) for each item. It indicates whether there #' is enough data for each factor give reliable results for the PCA. The #' value should be > 0.6, and desirable values are > 0.8 #' (\cite{Tabachnick and Fidell, 2013}). #' } #' \subsection{PCA or FA?}{ #' There is a simplified rule of thumb that may help do decide whether to run #' a factor analysis or a principal component analysis: #' \itemize{ #' \item Run factor analysis if you assume or wish to test a theoretical model of latent factors causing observed variables. #' \item Run principal component analysis If you want to simply reduce your correlated observed variables to a smaller set of important independent composite variables. #' } #' (Source: \href{https://stats.stackexchange.com/q/1576/54740}{CrossValidated}) #' } #' #' @note There is a \code{summary()}-method that prints the Eigenvalues and (explained) variance for each extracted component. \code{closest_component()} will return a numeric vector with the assigned component index for each column from the original data frame. There is also a \href{https://easystats.github.io/see/articles/parameters.html}{\code{plot()}-method} implemented in the \href{https://easystats.github.io/see/}{\pkg{see}-package}. #' #' @examples #' library(parameters) #' #' principal_components(mtcars[, 1:7], n = "all", threshold = 0.2) #' principal_components(mtcars[, 1:7], n = 2, rotation = "oblimin", threshold = "max", sort = TRUE) #' principal_components(mtcars[, 1:7], n = 2, threshold = 2, sort = TRUE) #' #' pca <- principal_components(mtcars[, 1:5], n = 2, rotation = "varimax") #' summary(pca) #' predict(pca) #' #' # which variables from the original data belong to which extracted component? #' closest_component(pca) #' #' \donttest{ #' # Automated number of components #' principal_components(mtcars[, 1:4], n = "auto") #' } #' #' @return A data frame of loadings. #' @references \itemize{ #' \item Kaiser, H.F. and Rice. J. (1974). Little jiffy, mark iv. Educational and Psychological Measurement, 34(1):111–117 #' \item Hofmann, R. (1978). Complexity and simplicity as objective indices descriptive of factor solutions. Multivariate Behavioral Research, 13:2, 247-250, \doi{10.1207/s15327906mbr1302_9} #' \item Pettersson, E., & Turkheimer, E. (2010). Item selection, evaluation, and simple structure in personality data. Journal of research in personality, 44(4), 407-420, \doi{10.1016/j.jrp.2010.03.002} #' \item Tabachnick, B. G., and Fidell, L. S. (2013). Using multivariate statistics (6th ed.). Boston: Pearson Education. #' } #' @importFrom stats prcomp #' @export principal_components <- function(x, n = "auto", rotation = "none", sort = FALSE, threshold = NULL, standardize = TRUE, ...) { UseMethod("principal_components") } #' @rdname principal_components #' @export closest_component <- function(x) { attributes(x)$closest_component } #' @importFrom stats prcomp na.omit setNames #' @export principal_components.data.frame <- function(x, n = "auto", rotation = "none", sort = FALSE, threshold = NULL, standardize = TRUE, ...) { # save name of data set data_name <- deparse(substitute(x)) # original data original_data <- x # remove missings x <- stats::na.omit(x) # PCA model <- stats::prcomp(x, retx = TRUE, center = TRUE, scale. = standardize, ...) # N factors n <- .get_n_factors(x, n = n, type = "PCA", rotation = rotation) # Rotation if (rotation != "none") { loadings <- .pca_rotate(x, n, rotation = rotation, sort = sort, threshold = threshold, original_data = original_data, ...) attr(loadings, "data") <- data_name return(loadings) } # Re-add centers and scales if (standardize) { model$center <- attributes(x)$center model$scale <- attributes(x)$scale } # Summary (cumulative variance etc.) eigenvalues <- model$sdev^2 data_summary <- .data_frame( Component = sprintf("PC%i", seq_len(length(model$sdev))), Eigenvalues = eigenvalues, Variance = eigenvalues / sum(eigenvalues), Variance_Cumulative = cumsum(eigenvalues / sum(eigenvalues)) ) data_summary$Variance_Proportion <- data_summary$Variance / sum(data_summary$Variance) model$sdev <- model$sdev[1:n] model$rotation <- model$rotation[, 1:n, drop = FALSE] model$x <- model$x[, 1:n, drop = FALSE] data_summary <- data_summary[1:n, , drop = FALSE] # Compute loadings if (length(model$sdev) > 1) { loadings <- as.data.frame(model$rotation %*% diag(model$sdev)) } else { loadings <- as.data.frame(model$rotation %*% model$sdev) } names(loadings) <- data_summary$Component # Format loadings <- cbind(data.frame(Variable = row.names(loadings)), loadings) row.names(loadings) <- NULL # Add information loading_cols <- 2:(n + 1) loadings$Complexity <- (apply(loadings[, loading_cols, drop = FALSE], 1, function(x) sum(x^2)))^2 / apply(loadings[, loading_cols, drop = FALSE], 1, function(x) sum(x^4)) # Add attributes attr(loadings, "summary") <- data_summary attr(loadings, "model") <- model attr(loadings, "rotation") <- "none" attr(loadings, "scores") <- model$x attr(loadings, "standardize") <- standardize attr(loadings, "additional_arguments") <- list(...) attr(loadings, "n") <- n attr(loadings, "type") <- "prcomp" attr(loadings, "loadings_columns") <- loading_cols # Sorting if (isTRUE(sort)) { loadings <- .sort_loadings(loadings) } # Replace by NA all cells below threshold if (!is.null(threshold)) { loadings <- .filter_loadings(loadings, threshold = threshold) } # Add some more attributes attr(loadings, "loadings_long") <- .long_loadings(loadings, threshold = threshold) # here we match the original columns in the data set with the assigned components # for each variable, so we know which column in the original data set belongs # to which extracted component... attr(loadings, "closest_component") <- .closest_component(loadings, loadings_columns = loading_cols, variable_names = colnames(x)) attr(loadings, "data") <- data_name attr(loadings, "data_set") <- original_data # add class-attribute for printing class(loadings) <- unique(c("parameters_pca", "see_parameters_pca", class(loadings))) loadings } #' @keywords internal .get_n_factors <- function(x, n = NULL, type = "PCA", rotation = "varimax", ...) { # N factors if (is.null(n) || n == "auto") { n <- as.numeric(n_factors(x, type = type, rotation = rotation, ...)) } else if (n == "all") { n <- ncol(x) - 1 } else if (n >= ncol(x)) { n <- ncol(x) - 1 } n } #' @keywords internal .pca_rotate <- function(x, n, rotation, sort = FALSE, threshold = NULL, original_data = NULL, ...) { if (!(rotation %in% c("varimax", "quartimax", "promax", "oblimin", "simplimax", "cluster", "none"))) { stop("`rotation` must be one of \"varimax\", \"quartimax\", \"promax\", \"oblimin\", \"simplimax\", \"cluster\" or \"none\".") } if (!inherits(x, c("prcomp", "data.frame"))) { stop("`x` must be of class `prcomp` or a data frame.", call. = FALSE) } if (!inherits(x, "data.frame") && rotation != "varimax") { stop(sprintf("`x` must be a data frame for `%s`-rotation.", rotation), call. = FALSE) } # rotate loadings if (!requireNamespace("psych", quietly = TRUE)) { stop(sprintf("Package `psych` required for `%s`-rotation.", rotation), call. = FALSE) } pca <- psych::principal(x, nfactors = n, rotate = rotation, ...) msa <- psych::KMO(x) attr(pca, "MSA") <- msa$MSAi out <- model_parameters(pca, sort = sort, threshold = threshold) attr(out, "data_set") <- original_data out } .closest_component <- function(loadings, loadings_columns, variable_names) { component_columns <- apply(loadings[loadings_columns], 1, function(i) which.max(abs(i))) stats::setNames(component_columns, variable_names) }parameters/R/format_rope.R0000644000176200001440000000116613607421327015267 0ustar liggesusers#' Percentage in ROPE formatting #' #' @param rope_percentage Value or vector of percentages in ROPE. #' @inheritParams format_p #' #' @return A formatted string. #' #' @examples #' format_rope(c(0.02, 0.12, 0.357, 0)) #' format_rope(c(0.02, 0.12, 0.357, 0), name = NULL) #' @importFrom insight format_value #' @export format_rope <- function(rope_percentage, name = "in ROPE") { text <- ifelse(rope_percentage == 0, "0%", ifelse(rope_percentage == 1, "100%", paste0(insight::format_value(rope_percentage * 100), "%") ) ) if (!is.null(name)) { text <- paste(text, name) } text } parameters/R/model_parameters.zeroinfl.R0000644000176200001440000000356213613634253020127 0ustar liggesusers#' Parameters from Zero-Inflated Models #' #' Parameters from zero-inflated models. #' #' @param model A model with zero-inflation component. #' @inheritParams model_parameters.default #' @inheritParams simulate_model #' #' @seealso \code{\link[=standardize_names]{standardize_names()}} to rename #' columns into a consistent, standardized naming scheme. #' #' @examples #' library(parameters) #' if (require("pscl")) { #' data("bioChemists") #' model <- zeroinfl(art ~ fem + mar + kid5 + ment | kid5 + phd, data = bioChemists) #' model_parameters(model) #' } #' @return A data frame of indices related to the model's parameters. #' @inheritParams simulate_model #' @export model_parameters.zeroinfl <- function(model, ci = .95, bootstrap = FALSE, iterations = 1000, component = c("all", "conditional", "zi", "zero_inflated"), standardize = NULL, exponentiate = FALSE, robust = FALSE, ...) { component <- match.arg(component) # fix argument, if model has no zi-part if (!insight::model_info(model)$is_zero_inflated && component != "conditional") { component <- "conditional" } # Processing if (bootstrap) { parameters <- bootstrap_parameters(model, iterations = iterations, ci = ci, ...) } else { parameters <- .extract_parameters_generic(model, ci = ci, component = component, standardize = standardize, robust = robust, ...) } if (exponentiate) parameters <- .exponentiate_parameters(parameters) parameters <- .add_model_parameters_attributes(parameters, model, ci, exponentiate, ...) attr(parameters, "object_name") <- deparse(substitute(model), width.cutoff = 500) class(parameters) <- c("parameters_model", "see_parameters_model", class(parameters)) parameters } #' @export model_parameters.hurdle <- model_parameters.zeroinfl #' @export model_parameters.zerocount <- model_parameters.zeroinfl parameters/R/ci_ml1.R0000644000176200001440000000043413611011143014075 0ustar liggesusers#' @rdname p_value_ml1 #' @export ci_ml1 <- function(model, ci = .95) { out <- lapply(ci, function(i) { .ci_wald(model = model, ci = i, effects = "fixed", component = "all", dof = Inf, method = "ml1") }) out <- do.call(rbind, out) row.names(out) <- NULL out } parameters/R/convert_data_to_numeric.R0000644000176200001440000000366113565032021017641 0ustar liggesusers#' Convert data to numeric #' #' Convert data to numeric by converting characters to factors and factors to either numeric levels or dummy variables. #' #' @param x A data frame or a vector. #' @param dummy_factors Transform factors to dummy factors (all factor levels as different columns filled with a binary 0-1 value). #' @param ... Arguments passed to or from other methods. #' #' @examples #' head(convert_data_to_numeric(iris)) #' @return A data frame of numeric variables. #' @importFrom stats model.matrix #' @export convert_data_to_numeric <- function(x, dummy_factors = TRUE, ...) { UseMethod("convert_data_to_numeric") } #' @rdname convert_data_to_numeric #' @export data_to_numeric <- convert_data_to_numeric #' @export convert_data_to_numeric.data.frame <- function(x, dummy_factors = TRUE, ...) { out <- sapply(x, convert_data_to_numeric, dummy_factors = dummy_factors, simplify = FALSE) as.data.frame(do.call(cbind, out)) } #' @export convert_data_to_numeric.numeric <- function(x, ...) { as.numeric(x) } #' @export convert_data_to_numeric.double <- convert_data_to_numeric.numeric #' @export convert_data_to_numeric.logical <- convert_data_to_numeric.numeric #' @importFrom stats model.matrix #' @export convert_data_to_numeric.factor <- function(x, dummy_factors = TRUE, ...) { if (dummy_factors) { out <- as.data.frame(stats::model.matrix(~x, contrasts.arg = list(x = "contr.treatment"))) out[1] <- as.numeric(rowSums(out[2:ncol(out)]) == 0) names(out) <- levels(x) } else { out <- as.numeric(x) } out } #' @export convert_data_to_numeric.character <- function(x, dummy_factors = FALSE, ...) { nums <- grepl("[-]?[0-9]+[.]?[0-9]*|[-]?[0-9]+[L]?|[-]?[0-9]+[.]?[0-9]*[eE][0-9]+", x) if (all(nums)) { out <- as.numeric(nums) } else { out <- convert_data_to_numeric(as.factor(nums), dummy_factors = dummy_factors) } out } parameters/R/model_parameters.bayesian.R0000644000176200001440000001323213611425471020063 0ustar liggesusers#' Parameters from Bayesian Models #' #' Parameters of Bayesian models. #' #' @param model Bayesian model. #' @inheritParams model_parameters.default #' @inheritParams bayestestR::describe_posterior #' #' @seealso \code{\link[parameters:standardize_names]{standardize_names()}} to rename #' columns into a consistent, standardized naming scheme. #' #' @examples #' \donttest{ #' library(parameters) #' if (require("rstanarm")) { #' model <- rstanarm::stan_glm(Sepal.Length ~ Petal.Length * Species, #' data = iris, iter = 500, refresh = 0 #' ) #' model_parameters(model) #' } #' } #' @return A data frame of indices related to the model's parameters. #' @importFrom insight get_priors #' @inheritParams insight::get_parameters #' @export model_parameters.stanreg <- function(model, centrality = "median", dispersion = FALSE, ci = .89, ci_method = "hdi", test = c("pd", "rope"), rope_range = "default", rope_ci = 1.0, bf_prior = NULL, diagnostic = c("ESS", "Rhat"), priors = TRUE, iterations = 1000, effects = "fixed", ...) { # Processing parameters <- .extract_parameters_bayesian(model, centrality = centrality, dispersion = dispersion, ci = ci, ci_method = ci_method, test = test, rope_range = rope_range, rope_ci = rope_ci, bf_prior = bf_prior, diagnostic = diagnostic, priors = priors, iterations = iterations, effects = effects, ...) if (effects == "fixed") { attr(parameters, "pretty_names") <- format_parameters(model) } else { parameters <- .add_pretty_names(parameters, model, effects = effects, component = NULL) } attr(parameters, "ci") <- ci attr(parameters, "object_name") <- deparse(substitute(model), width.cutoff = 500) class(parameters) <- c("parameters_model", "see_parameters_model", class(parameters)) parameters } #' @export model_parameters.stanmvreg <- function(model, centrality = "median", dispersion = FALSE, ci = .89, ci_method = "hdi", test = "pd", rope_range = "default", rope_ci = 1.0, bf_prior = NULL, diagnostic = c("ESS", "Rhat"), priors = TRUE, iterations = 1000, effects = "fixed", ...) { # Processing parameters <- .extract_parameters_bayesian(model, centrality = centrality, dispersion = dispersion, ci = ci, ci_method = ci_method, test = test, rope_range = rope_range, rope_ci = rope_ci, bf_prior = bf_prior, diagnostic = diagnostic, priors = priors, iterations = iterations, effects = effects, ...) parameters$Parameter <- gsub("^(.*)\\|(.*)", "\\2", parameters$Parameter) if (effects == "fixed") { attr(parameters, "pretty_names") <- format_parameters(model) } else { parameters <- .add_pretty_names(parameters, model, effects = effects, component = NULL) } attr(parameters, "ci") <- ci attr(parameters, "object_name") <- deparse(substitute(model), width.cutoff = 500) class(parameters) <- c("parameters_model", "see_parameters_model", class(parameters)) parameters } #' @rdname model_parameters.stanreg #' @inheritParams insight::get_parameters #' @export model_parameters.brmsfit <- function(model, centrality = "median", dispersion = FALSE, ci = .89, ci_method = "hdi", test = c("pd", "rope"), rope_range = "default", rope_ci = 1.0, bf_prior = NULL, diagnostic = c("ESS", "Rhat"), priors = TRUE, iterations = 1000, effects = "fixed", component = "all", ...) { # Processing parameters <- .extract_parameters_bayesian(model, centrality = centrality, dispersion = dispersion, ci = ci, ci_method = ci_method, test = test, rope_range = rope_range, rope_ci = rope_ci, bf_prior = bf_prior, diagnostic = diagnostic, priors = priors, iterations = iterations, effects = effects, component = component, ...) if (effects == "fixed" && component == "conditional") { attr(parameters, "pretty_names") <- format_parameters(model) } else { parameters <- .add_pretty_names(parameters, model, effects = effects, component = component) } attr(parameters, "ci") <- ci attr(parameters, "object_name") <- deparse(substitute(model), width.cutoff = 500) class(parameters) <- c("parameters_model", "see_parameters_model", class(parameters)) parameters } #' @export model_parameters.MCMCglmm <- function(model, centrality = "median", dispersion = FALSE, ci = .89, ci_method = "hdi", test = c("pd", "rope"), rope_range = "default", rope_ci = 1.0, bf_prior = NULL, diagnostic = c("ESS", "Rhat"), priors = TRUE, iterations = 1000, ...) { # Processing parameters <- .extract_parameters_bayesian(model, centrality = centrality, dispersion = dispersion, ci = ci, ci_method = ci_method, test = test, rope_range = rope_range, rope_ci = rope_ci, bf_prior = bf_prior, diagnostic = diagnostic, priors = priors, iterations = iterations, ...) attr(parameters, "pretty_names") <- format_parameters(model) attr(parameters, "ci") <- ci attr(parameters, "object_name") <- deparse(substitute(model), width.cutoff = 500) class(parameters) <- c("parameters_model", "see_parameters_model", class(parameters)) parameters } #' @export model_parameters.mcmc <- function(model, centrality = "median", dispersion = FALSE, ci = .89, ci_method = "hdi", test = c("pd", "rope"), rope_range = "default", rope_ci = 1.0, iterations = 1000, ...) { # Processing parameters <- .extract_parameters_bayesian(model, centrality = centrality, dispersion = dispersion, ci = ci, ci_method = ci_method, test = test, rope_range = rope_range, rope_ci = rope_ci, bf_prior = NULL, diagnostic = NULL, priors = FALSE, iterations = iterations, ...) attr(parameters, "ci") <- ci attr(parameters, "object_name") <- deparse(substitute(model), width.cutoff = 500) class(parameters) <- c("parameters_model", "see_parameters_model", class(parameters)) parameters } parameters/R/format_parameters.R0000644000176200001440000001545013615056553016472 0ustar liggesusers#' Parameter names formatting #' #' @param model A statistical model. #' #' @examples #' library(parameters) #' #' model <- lm(Sepal.Length ~ Species * Sepal.Width, data = iris) #' format_parameters(model) #' #' model <- lm(Sepal.Length ~ Species / Petal.Length, data = iris) #' format_parameters(model) #' #' model <- lm(Sepal.Length ~ Petal.Length + (Species / Sepal.Width), data = iris) #' format_parameters(model) #' #' model <- lm(Sepal.Length ~ Species + poly(Sepal.Width, 2), data = iris) #' format_parameters(model) #' #' model <- lm(Sepal.Length ~ Species + poly(Sepal.Width, 2, raw = TRUE), data = iris) #' format_parameters(model) #' @return The formatted parameter names. #' @importFrom utils tail head #' @export format_parameters <- function(model) { UseMethod("format_parameters") } #' @export format_parameters.default <- function(model) { original_names <- names <- insight::find_parameters(model, effects = "fixed", flatten = TRUE) info <- insight::model_info(model) # quick fix, for multivariate response models, we use # info from first model only if (insight::is_multivariate(model)) { info <- info[[1]] } # special handling hurdle- and zeroinfl-models --------------------- if (info$is_zero_inflated | info$is_hurdle) { names <- gsub("^(count_|zero_)", "", names) } # special handling bracl --------------------- if (inherits(model, "bracl")) { names <- gsub("(.*):(.*)", "\\2", names) } # special handling DirichletRegModel --------------------- dirich_names <- NULL if (inherits(model, "DirichletRegModel")) { cf <- stats::coef(model) if (model$parametrization == "common") { pattern <- paste0("(", paste(model$varnames, collapse = "|"), ")\\.(.*)") dirich_names <- names <- gsub(pattern, "\\2", names(unlist(cf))) } else { dirich_names <- names <- gsub("(.*)\\.(.*)\\.(.*)", "\\3", names(unlist(cf))) } original_names <- names } # remove "as.factor()", "log()" etc. from parameter names names <- .clean_parameter_names(names) # Type-specific changes types <- parameters_type(model) if (is.null(types)) { return(NULL) } types$Parameter <- .clean_parameter_names(types$Parameter, full = TRUE) # hurdle- and zeroinfl-models if (info$is_zero_inflated | info$is_hurdle) { types$Parameter <- gsub("^(count_|zero_)", "", types$Parameter) } # special handling DirichletRegModel if (inherits(model, "DirichletRegModel") && !is.null(dirich_names)) { types$Parameter <- dirich_names } for (i in 1:nrow(types)) { name <- types$Parameter[i] # No interaction if (!types$Type[i] %in% c("interaction", "nested")) { type <- types[i, ] names[i] <- .format_parameter(name, variable = type$Variable, type = type$Type, level = type$Level) # Interaction or nesting } else { components <- unlist(strsplit(name, ":", fixed = TRUE)) is_nested <- types$Type[i] %in% "nested" for (j in 1:length(components)) { if (components[j] %in% types$Parameter) { type <- types[types$Parameter == components[j], ] ## TODO check if this is ok... # for models with multiple response categories, we might have same # variable for each response, thus we have multiple rows here, # where only one row is required. if (nrow(type) > 1) type <- type[1, ] components[j] <- .format_parameter(components[j], variable = type$Variable, type = type$Type, level = type$Level) } else if (components[j] %in% types$Secondary_Parameter) { type <- types[!is.na(types$Secondary_Parameter) & types$Secondary_Parameter == components[j], ] components[j] <- .format_parameter(components[j], variable = type[1, ]$Secondary_Variable, type = type[1, ]$Secondary_Type, level = type[1, ]$Secondary_Level) } } names[i] <- .format_interaction(components, type = types[i, "Type"], is_nested = is_nested) } } # "types$Parameter" here is cleaned, i.e. patterns like "log()", "as.factor()" # etc. are removed. However, these patterns are needed in "parameters_table()", # code-line x$Parameter <- attributes(x)$pretty_names[x$Parameter] # when we use "types$Parameter" here, matching of pretty names does not work, # so output will be NA resp. blank fields... Thus, I think we should use # the original paramter-names here. names(names) <- original_names # types$Parameter names } #' @export format_parameters.rma <- function(model) { params <- insight::find_parameters(model, flatten = TRUE) names(params) <- params params } #' @export format_parameters.parameters_model <- function(model) { if (!is.null(attributes(model)$pretty_names)) { model$Parameter <- attributes(model)$pretty_names[model$Parameter] } model } # Utilities --------------------------------------------------------------- #' @keywords internal .format_parameter <- function(name, variable, type, level) { # Factors if (type == "factor") { name <- .format_factor(name = name, variable = variable) } # Polynomials if (type %in% c("poly", "poly_raw")) { name <- .format_poly(name = name, variable = variable, type = type, degree = level) } # Splines if (type == "spline") { name <- .format_poly(name = name, variable = variable, type = type, degree = level) } # log-transformation if (type == "logarithm") { name <- .format_log(name = name, variable = variable, type = type) } # As Is if (type == "asis") { name <- variable } # Smooth if (type == "smooth") { name <- gsub("^smooth_(.*)\\[(.*)\\]", "\\2", name) name <- gsub("s(", "Smooth term (", name, fixed = TRUE) } name } #' @importFrom utils tail head #' @keywords internal .format_interaction <- function(components, type, is_nested = FALSE) { sep <- ifelse(is_nested, " : ", " * ") if (length(components) > 2) { if (type == "interaction") { components <- paste0("(", paste0(utils::head(components, -1), collapse = " * "), ")", sep, utils::tail(components, 1)) } else { components <- paste0(components, collapse = sep) } } else { components <- paste0(components, collapse = sep) } components } #' @keywords internal .format_factor <- function(name, variable) { level <- gsub(variable, "", name) paste0(variable, " [", level, "]") } #' @keywords internal .format_poly <- function(name, variable, type, degree) { paste0(variable, " [", format_order(as.numeric(degree), textual = FALSE), " degree]") } #' @keywords internal .format_log <- function(name, variable, type) { paste0(variable, " [", gsub("(.*)\\((.*)\\)", "\\1", name), "]") } parameters/R/p_value_satterthwaite.R0000644000176200001440000000434313613027776017364 0ustar liggesusers#' Satterthwaite approximation for SEs, CIs and p-values #' #' An approximate F-test based on the Satterthwaite (1946) approach. #' #' @param model A statistical model. #' @param dof Degrees of Freedom. #' @inheritParams ci.merMod #' #' @details Inferential statistics (like p-values, confidence intervals and #' standard errors) may be biased in mixed models when the number of clusters #' is small (even if the sample size of level-1 units is high). In such cases #' it is recommended to approximate a more accurate number of degrees of freedom #' for such inferential statitics. Unlike simpler approximation heuristics #' like the "m-l-1" rule (\code{dof_ml1}), the Satterthwaite approximation is #' also applicable in more complex multilevel designs. However, the "m-l-1" #' heuristic also applies to generalized mixed models, while approaches like #' Kenward-Roger or Satterthwaite are limited to linear mixed models only. #' #' @seealso \code{dof_satterthwaite()} and \code{se_satterthwaite()} are small helper-functions #' to calculate approximated degrees of freedom and standard errors for model #' parameters, based on the Satterthwaite (1946) approach. #' \cr \cr #' \code{\link[=dof_kenward]{dof_kenward()}} and \code{\link[=dof_ml1]{dof_ml1()}} #' approximate degrees of freedom based on Kenward-Roger's method or the "m-l-1" rule. #' #' @examples #' \donttest{ #' if (require("lme4")) { #' model <- lmer(Petal.Length ~ Sepal.Length + (1 | Species), data = iris) #' p_value_satterthwaite(model) #' } #' } #' @return A data frame. #' @references Satterthwaite FE (1946) An approximate distribution of estimates of variance components. Biometrics Bulletin 2 (6):110–4. #' @importFrom stats pt coef #' @export p_value_satterthwaite <- function(model, dof = NULL) { UseMethod("p_value_satterthwaite") } #' @export p_value_satterthwaite.lmerMod <- function(model, dof = NULL) { if (is.null(dof)) { dof <- dof_satterthwaite(model) } .p_value_dof(model, dof) } #' @export p_value_satterthwaite.lme <- function(model, dof = NULL) { if (is.null(dof)) { dof <- dof_satterthwaite(model) } .p_value_dof(model, dof) } #' @export p_value_satterthwaite.gls <- p_value_satterthwaite.lmeparameters/R/bootstrap_model.R0000644000176200001440000000755413615056552016161 0ustar liggesusers#' Model bootstrapping #' #' Bootstrap a statistical model n times to return a data frame of estimates. #' #' @param model Statistical model. #' @param iterations The number of draws to simulate/bootstrap. #' @param verbose Hide possible refit messages. #' @param ... Arguments passed to or from other methods. #' #' @return A data frame. #' #' @seealso \code{\link{bootstrap_parameters}}, \code{\link{simulate_model}}, \code{\link{simulate_parameters}} #' #' @examples #' model <- lm(mpg ~ wt + cyl, data = mtcars) #' head(bootstrap_model(model)) #' @export bootstrap_model <- function(model, iterations = 1000, verbose = FALSE, ...) { UseMethod("bootstrap_model") } #' @rdname bootstrap_model #' @export model_bootstrap <- bootstrap_model #' @importFrom stats coef update setNames #' @importFrom insight get_data find_parameters get_parameters #' @export bootstrap_model.default <- function(model, iterations = 1000, verbose = FALSE, ...) { if (!requireNamespace("boot", quietly = TRUE)) { stop("Package 'boot' needed for this function to work. Please install it.") } data <- insight::get_data(model) boot_function <- function(model, data, indices) { d <- data[indices, ] # allows boot to select sample if (inherits(model, "biglm")) { fit <- suppressMessages(stats::update(model, moredata = d)) } else { if (verbose) { fit <- stats::update(model, data = d) } else { fit <- suppressMessages(stats::update(model, data = d)) } } params <- insight::get_parameters(fit) params <- stats::setNames(params$Estimate, params$Parameter) # Transform to named vector return(params) } results <- boot::boot(data = data, statistic = boot_function, R = iterations, model = model) out <- as.data.frame(results$t) names(out) <- insight::get_parameters(model)$Parameter out } #' @export bootstrap_model.merMod <- function(model, iterations = 1000, verbose = FALSE, ...) { if (!requireNamespace("lme4", quietly = TRUE)) { stop("Package 'lme4' required for this function to work. Please install it by running `install.packages('lme4')`.") } boot_function <- function(model) { params <- insight::get_parameters(model) params <- stats::setNames(params$Estimate, params$Parameter) # Transform to named vector return(params) } if (verbose) { results <- suppressMessages(lme4::bootMer(model, boot_function, nsim = iterations, ...)) } else { results <- lme4::bootMer(model, boot_function, nsim = iterations, ...) } out <- as.data.frame(results$t) names(out) <- insight::find_parameters(model, effects = "fixed")$conditional out } # bootstrap_model.htest <- function(model, n = 1000, verbose = FALSE, ...) { # data <- insight::get_data(model) # # boot_function <- function(model, data, indices) { # d <- data[indices, ] # allows boot to select sample # # if (verbose) { # fit <- suppressMessages(update(model, data = d)) # } else { # fit <- update(model, data = d) # } # # return(model$estimate) # } # # results <- boot::boot(data = data, statistic = boot_function, R = n, model = model) # # return(results) # } #' @export as.data.frame.lm <- function(x, row.names = NULL, optional = FALSE, iterations = 1000, verbose = FALSE, ...) { bootstrap_model(x, iterations = iterations, verbose = verbose, ...) } #' @export as.data.frame.merMod <- function(x, row.names = NULL, optional = FALSE, iterations = 1000, verbose = FALSE, ...) { bootstrap_model(x, iterations = iterations, verbose = verbose, ...) } #' @export as.data.frame.glmmTMB <- function(x, row.names = NULL, optional = FALSE, iterations = 1000, verbose = FALSE, ...) { bootstrap_model(x, iterations = iterations, verbose = verbose, ...) } parameters/R/se_satterthwaite.R0000644000176200001440000000153113613027752016326 0ustar liggesusers#' @rdname p_value_satterthwaite #' @importFrom stats qnorm #' @importFrom insight get_parameters #' @export se_satterthwaite <- function(model) { UseMethod("se_satterthwaite") } #' @export se_satterthwaite.default <- function(model) { standard_error(model) } #' @importFrom stats setNames #' @export se_satterthwaite.lme <- function(model) { if (!requireNamespace("lavaSearch2", quietly = TRUE)) { stop("Package `lavaSearch2` required for Satterthwaite approximation.", call. = FALSE) } params <- insight::get_parameters(model, effects = "fixed") lavaSearch2::sCorrect(model) <- TRUE s <- lavaSearch2::summary2(model) data.frame( Parameter = params$Parameter, SE = as.vector(s$tTable[, "Std.Error"]), stringsAsFactors = FALSE ) } #' @export se_satterthwaite.gls <- se_satterthwaite.lme parameters/R/n_clusters.R0000644000176200001440000001440613617565603015143 0ustar liggesusers#' Number of clusters to extract #' #' This function runs many existing procedures for determining how many clusters are present in your data. It returns the number of clusters based on the maximum consensus. In case of ties, it will select the solution with the less clusters. #' #' @inheritParams check_clusterstructure #' @param force Logical, if \code{TRUE}, factors are converted to numerical #' values in order to be included in the data for determining the number of #' clusters. By default, factors are removed, because most methods that determine #' the number of clusters need numeric input only. #' @param package These are the packages from which methods are used to determine the number of clusters. Can be \code{"all"} or a vector containing \code{"NbClust"}, \code{"mclust"}, \code{"cluster"} and \code{"M3C"}. #' @param fast If \code{FALSE}, will compute 4 more indices (sets \code{index = "allong"} in \code{NbClust}). This has been deactivated by default as it is computationally heavy. #' #' @note There is also a \href{https://easystats.github.io/see/articles/parameters.html}{\code{plot()}-method} implemented in the \href{https://easystats.github.io/see/}{\pkg{see}-package}. #' #' @examples #' library(parameters) #' \donttest{ #' n_clusters(iris[, 1:4], package = c("NbClust", "mclust", "cluster")) #' } #' @export n_clusters <- function(x, standardize = TRUE, force = FALSE, package = c("NbClust", "mclust", "cluster", "M3C"), fast = TRUE, ...) { if (all(package == "all")) { package <- c("NbClust", "mclust", "cluster", "M3C") } # convert factors to numeric if (force) { factors <- sapply(x, function(i) is.character(i) | is.factor(i)) if (any(factors)) x[factors] <- sapply(x[factors], .factor_to_numeric) } # remove all missing values from data, only use numerics x <- stats::na.omit(as.data.frame(x[sapply(x, is.numeric)])) if (standardize) { x <- as.data.frame(scale(x)) } out <- data.frame() if ("nbclust" %in% tolower(package)) { out <- rbind(out, .n_clusters_NbClust(x, fast = fast)) } if ("mclust" %in% tolower(package)) { out <- rbind(out, .n_clusters_mclust(x)) } if ("cluster" %in% tolower(package)) { out <- rbind(out, .n_clusters_cluster(x)) } if ("M3C" %in% tolower(package)) { out <- rbind(out, .n_clusters_M3C(x, fast = fast)) } out <- out[order(out$n_Clusters), ] # Arrange by n clusters row.names(out) <- NULL # Reset row index out$Method <- as.character(out$Method) # Remove duplicate methods starting with the smallest dupli <- c() for (i in 1:nrow(out)) { if (i > 1 && out[i, "Method"] %in% out$Method[1:i - 1]) { dupli <- c(dupli, i) } } if (!is.null(dupli)) { out <- out[-dupli, ] } # Add summary by_clusters <- .data_frame( n_Clusters = as.numeric(unique(out$n_Clusters)), n_Methods = as.numeric(by(out, as.factor(out$n_Clusters), function(out) n <- nrow(out))) ) attr(out, "summary") <- by_clusters attr(out, "n") <- min(as.numeric(as.character(by_clusters[by_clusters$n_Methods == max(by_clusters$n_Methods), c("n_Clusters")]))) class(out) <- c("n_clusters", "see_n_clusters", class(out)) out } #' @keywords internal .n_clusters_mclust <- function(x, ...) { if (!requireNamespace("mclust", quietly = TRUE)) { stop("Package 'mclust' required for this function to work. Please install it by running `install.packages('mclust')`.") } mclustBIC <- mclust::mclustBIC # this is needed as it is internally required by the following function BIC <- mclust::mclustBIC(x, verbose = FALSE) out <- data.frame(unclass(BIC)) n <- which(out == max(out, na.rm = TRUE), arr.ind = TRUE)[1] data.frame(n_Clusters = n, Method = "Mixture", Package = "mclust") } #' @importFrom utils capture.output #' @keywords internal .n_clusters_cluster <- function(x, ...) { if (!requireNamespace("cluster", quietly = TRUE)) { stop("Package 'cluster' required for this function to work. Please install it by running `install.packages('cluster')`.") } # listwise deletion of missing x <- stats::na.omit(x) # Gap Statistic for Estimating the Number of Clusters junk <- utils::capture.output(gap <- cluster::clusGap(x, kmeans, K.max = 10, B = 100)$Tab) # Gap Statistic for Estimating the Number of Clusters n <- cluster::maxSE(f = gap[, "gap"], SE.f = gap[, "SE.sim"], method = "Tibs2001SEmax", SE.factor = 1) data.frame(n_Clusters = n, Method = "Tibs2001SEmax", Package = "cluster") } #' @importFrom grDevices png dev.off #' @keywords internal .n_clusters_NbClust <- function(x, fast = TRUE, ...) { if (!requireNamespace("NbClust", quietly = TRUE)) { stop("Package 'NbClust' required for this function to work. Please install it by running `install.packages('NbClust')`.") } # Run the function and suppress output and automatic plotting ff <- tempfile() grDevices::png(filename = ff) if (fast) { indices <- "all" } else { indices <- "allong" } junk <- utils::capture.output(n <- NbClust::NbClust(x, min.nc = 2, max.nc = 9, method = "ward.D2", index = indices)) grDevices::dev.off() unlink(ff) out <- as.data.frame(t(n$Best.nc)) data.frame(n_Clusters = out$Number_clusters, Method = row.names(out), Package = "NbClust") } #' @keywords internal .n_clusters_M3C <- function(x, fast=TRUE, ...) { if (!requireNamespace("M3C", quietly = TRUE)) { stop("Package 'M3C' required for this function to work. Please install it from Bioconductor by first running `install.packages(\"BiocManager\")`, then `BiocManager::install(\"M3C\")`.") # Not on CRAN (but on github and bioconductor) } data <- data.frame(t(x)) colnames(data) <- paste0('x', seq(1, ncol(data))) # Add columns names as required by the package suppressMessages(out <- M3C::M3C(data, method=2)) out <- data.frame(n_Clusters = which.max(out$scores$PCSI), Method = "Consensus clustering algorithm (penalty term)", Package = "M3C") # Doesn't work # if (fast == FALSE){ # suppressMessages(out <- M3C::M3C(data, method=1)) # out <- rbind(out, data.frame(n_Clusters = which.max(out$scores$RCSI), Method = "Consensus clustering algorithm (Monte Carlo)", Package = "M3C")) # } out } parameters/R/rescale_weights.R0000644000176200001440000001766013611426036016125 0ustar liggesusers#' @title Rescale design weights for multilevel analysis #' @name rescale_weights #' #' @description Most functions to fit multilevel and mixed effects models only #' allow to specify frequency weights, but not design (i.e. sampling or probability) #' weights, which should be used when analyzing complex samples and survey data. #' \code{rescale_weights()} implements an algorithm proposed by \cite{Asparouhov (2006)} #' and \cite{Carle (2009)} to rescale design weights in survey data to account for #' the grouping structure of multilevel models, which then can be used for #' multilevel modelling. #' #' @param data A data frame. #' @param group Variable names (as character vector), indicating the grouping #' structure (strata) of the survey data (level-2-cluster variable). It is #' also possible to create weights for multiple group variables; in such cases, #' each created weighting variable will be suffixed by the name of the group #' variable. #' @param probability_weights Variable indicating the probability (design or sampling) #' weights of the survey data (level-1-weight). #' @param nest Logical, if \code{TRUE} and \code{group} indicates at least two #' group variables, then groups are "nested", i.e. groups are now a combination #' from each group level of the variables in \code{group}. #' #' @return \code{data}, including the new weighting variables: \code{pweights_a} and \code{pweights_b}, which represent the rescaled design weights to use in multilevel models (use these variables for the \code{weights} argument). #' #' @details Rescaling is based on two methods: For \code{pweights_a}, the sample #' weights \code{probability_weights} are adjusted by a factor that represents the proportion #' of group size divided by the sum of sampling weights within each group. #' The adjustment factor for \code{pweights_b} is the sum of sample weights #' within each group divided by the sum of squared sample weights within #' each group (see \cite{Carle (2009)}, Appendix B). #' \cr \cr #' Regarding the choice between scaling methods A and B, Carle suggests #' that "analysts who wish to discuss point estimates should report results #' based on weighting method A. For analysts more interested in residual #' between-group variance, method B may generally provide the least biased #' estimates". In general, it is recommended to fit a non-weighted model #' and weighted models with both scaling methods and when comparing the #' models, see whether the "inferential decisions converge", to gain #' confidence in the results. #' \cr \cr #' Though the bias of scaled weights decreases with increasing group size, #' method A is preferred when insufficient or low group size is a concern. #' \cr \cr #' The group ID and probably PSU may be used as random effects (e.g. #' nested design, or group and PSU as varying intercepts), depending #' on the survey design that should be mimicked. #' #' @references \itemize{ #' \item Carle A.C. (2009). Fitting multilevel models in complex survey data with design weights: Recommendations. BMC Medical Research Methodology 9(49): 1-13 #' \item Asparouhov T. (2006). General Multi-Level Modeling with Sampling Weights. Communications in Statistics - Theory and Methods 35: 439-460 #' } #' #' @examples #' if (require("sjstats")) { #' data(nhanes_sample, package = "sjstats") #' head(rescale_weights(nhanes_sample, "SDMVSTRA", "WTINT2YR")) #' #' # also works with multiple group-variables... #' head(rescale_weights(nhanes_sample, c("SDMVSTRA", "SDMVPSU"), "WTINT2YR")) #' #' # or nested structures. #' x <- rescale_weights( #' data = nhanes_sample, #' group = c("SDMVSTRA", "SDMVPSU"), #' probability_weights = "WTINT2YR", #' nest = TRUE #' ) #' head(x) #' } #' #' if (require("lme4") && require("sjstats")) { #' data(nhanes_sample, package = "sjstats") #' nhanes_sample <- rescale_weights(nhanes_sample, "SDMVSTRA", "WTINT2YR") #' glmer( #' total ~ factor(RIAGENDR) * (log(age) + factor(RIDRETH1)) + (1 | SDMVPSU), #' family = poisson(), #' data = nhanes_sample, #' weights = pweights_a #' ) #' } #' @export rescale_weights <- function(data, group, probability_weights, nest = FALSE) { # check if weight has missings. we need to remove them first, # and add back weights to correct cases later weight_missings <- which(is.na(data[[probability_weights]])) weight_non_na <- which(!is.na(data[[probability_weights]])) if (length(weight_missings) > 0) { data_tmp <- data[weight_non_na, ] } else { data_tmp <- data } # sort id data_tmp$.bamboozled <- 1:nrow(data_tmp) if (nest && length(group) < 2) { warning(sprintf("Only one group variable selected, no nested structure possible. Rescaling weights for grout '%s' now.", group), call. = FALSE) nest <- FALSE } if (nest) { out <- .rescale_weights_nested(data_tmp, group, probability_weights, nrow(data), weight_non_na) } else { out <- lapply(group, function(i) { x <- .rescale_weights(data_tmp, i, probability_weights, nrow(data), weight_non_na) if (length(group) > 1) { colnames(x) <- sprintf(c("pweight_a_%s", "pweight_b_%s"), i) } x }) } do.call(cbind, list(data, out)) } # rescale weights, for one or more group variables ---------------------------- .rescale_weights <- function(x, group, probability_weights, n, weight_non_na) { # compute sum of weights per group design_weights <- data.frame( group = sort(unique(x[[group]])), sum_weights_by_group = tapply(x[[probability_weights]], as.factor(x[[group]]), sum), sum_squared_weights_by_group = tapply(x[[probability_weights]]^2, as.factor(x[[group]]), sum), n_per_group = as.vector(table(x[[group]])), stringsAsFactors = FALSE ) colnames(design_weights)[1] <- group x <- merge(x, design_weights, by = group, sort = FALSE) # restore original order x <- x[order(x$.bamboozled), ] x$.bamboozled <- NULL # multiply the original weight by the fraction of the # sampling unit total population based on Carle 2009 w_a <- x[[probability_weights]] * x$n_per_group / x$sum_weights_by_group w_b <- x[[probability_weights]] * x$sum_weights_by_group / x$sum_squared_weights_by_group out <- data.frame( pweights_a = rep(as.numeric(NA), times = n), pweights_b = rep(as.numeric(NA), times = n) ) out$pweights_a[weight_non_na] <- w_a out$pweights_b[weight_non_na] <- w_b out } # rescale weights, for nested groups ---------------------------- .rescale_weights_nested <- function(x, group, probability_weights, n, weight_non_na) { groups <- expand.grid(lapply(group, function(i) sort(unique(x[[i]])))) colnames(groups) <- group # compute sum of weights per group design_weights <- cbind( groups, data.frame( sum_weights_by_group = unlist(as.list(tapply(x[[probability_weights]], lapply(group, function(i) as.factor(x[[i]])), sum))), sum_squared_weights_by_group = unlist(as.list(tapply(x[[probability_weights]]^2, lapply(group, function(i) as.factor(x[[i]])), sum))), n_per_group = unlist(as.list(table(x[, group]))), stringsAsFactors = FALSE ) ) x <- merge(x, design_weights, by = group, sort = FALSE) # restore original order x <- x[order(x$.bamboozled), ] x$.bamboozled <- NULL # multiply the original weight by the fraction of the # sampling unit total population based on Carle 2009 w_a <- x[[probability_weights]] * x$n_per_group / x$sum_weights_by_group w_b <- x[[probability_weights]] * x$sum_weights_by_group / x$sum_squared_weights_by_group out <- data.frame( pweights_a = rep(as.numeric(NA), times = n), pweights_b = rep(as.numeric(NA), times = n) ) out$pweights_a[weight_non_na] <- w_a out$pweights_b[weight_non_na] <- w_b out } parameters/R/utils_model_parameters.R0000644000176200001440000000427513615527105017521 0ustar liggesusers#' @keywords internal .add_model_parameters_attributes <- function(parameters, model, ci, exponentiate = FALSE, ...) { dot.arguments <- lapply(match.call(expand.dots = FALSE)$`...`, function(x) x) info <- insight::model_info(model) attr(parameters, "pretty_names") <- format_parameters(model) attr(parameters, "ci") <- ci attr(parameters, "exponentiate") <- exponentiate attr(parameters, "ordinal_model") <- info$is_ordinal | info$is_multinomial attr(parameters, "model_class") <- class(model) if (inherits(model, c("rma", "rma.uni"))) { attr(parameters, "data") <- insight::get_data(model) attr(parameters, "study_weights") <- 1 / model$vi } if ("digits" %in% names(dot.arguments)) { attr(parameters, "digits") <- eval(dot.arguments[["digits"]]) } else { attr(parameters, "digits") <- 2 } if ("ci_digits" %in% names(dot.arguments)) { attr(parameters, "ci_digits") <- eval(dot.arguments[["ci_digits"]]) } else { attr(parameters, "ci_digits") <- 2 } if ("p_digits" %in% names(dot.arguments)) { attr(parameters, "p_digits") <- eval(dot.arguments[["p_digits"]]) } else { attr(parameters, "p_digits") <- 3 } parameters } #' @keywords internal .exponentiate_parameters <- function(parameters) { columns <- grepl(pattern = "^(Coefficient|Std_Coefficient|CI_)", colnames(parameters)) if (any(columns)) parameters[columns] <- exp(parameters[columns]) parameters } #' @importFrom insight clean_parameters .add_pretty_names <- function(parameters, model, effects = NULL, component = NULL) { attr(parameters, "model_class") <- class(model) clean_params <- insight::clean_parameters(model) if (is.null(effects)) { effects <- "fixed" } else if (effects == "all") { effects <- c("fixed", "random") } if (is.null(component)) { component <- "conditional" } else if (component == "all") { component <- c("conditional", "zi", "zero_inflated", "dispersion") } clean_params <- clean_params[clean_params$Component %in% component & clean_params$Effects %in% effects, ] attr(parameters, "cleaned_parameters") <- clean_params$Cleaned_Parameter parameters } parameters/R/data_partition.R0000644000176200001440000000372613610642113015750 0ustar liggesusers#' Partition data into a test and a training set #' #' Creates a training and a test set based on a dataframe. Can also be stratified (i.e., evenly spread a given factor) using the \code{group} argument. #' #' @param x A data frame, or an object that can be coerced to a data frame. #' @param training_proportion The proportion (between 0 and 1) of the training set. The remaining part will be used for the test set. #' @param group A character vector indicating the name(s) of the column(s) used for stratified partitioning. #' #' @return A list of two data frames, named \code{test} and \code{training}. #' #' @examples #' df <- iris #' df$Smell <- rep(c("Strong", "Light"), 75) #' #' head(data_partition(df)) #' head(data_partition(df, group = "Species")) #' head(data_partition(df, group = c("Species", "Smell"))) #' @export data_partition <- function(x, training_proportion = 0.7, group = NULL) { if (!is.data.frame(x)) { x <- tryCatch( expr = { as.data.frame(x) }, error = function(e) { NULL } ) if (is.null(x)) { stop("`x` needs to be a data frame, or an object that can be coerced to a data frame.") } } training <- data.frame() test <- data.frame() if (!is.null(group)) { for (i in split(x, x[group])) { out <- .data_partition(i, training_proportion) training <- rbind(training, i[out$training, ]) test <- rbind(test, i[out$test, ]) } } else { out <- .data_partition(x, training_proportion) training <- rbind(training, x[out$training, ]) test <- rbind(test, x[out$test, ]) } list( training = training, test = test ) } #' @keywords internal .data_partition <- function(x, training_proportion = 0.8) { training_indices <- sample(1:nrow(x), size = training_proportion * nrow(x)) test_indices <- (1:nrow(x))[-training_indices] list( training = training_indices, test = test_indices ) } parameters/R/format_pd.R0000644000176200001440000000157313607421321014721 0ustar liggesusers#' Probability of direction (pd) formatting #' #' @param pd Probability of direction (pd). #' @inheritParams format_p #' #' @return A formatted string. #' @examples #' format_pd(0.12) #' format_pd(c(0.12, 1, 0.9999, 0.98, 0.995, 0.96), name = NULL) #' format_pd(c(0.12, 1, 0.9999, 0.98, 0.995, 0.96), stars = TRUE) #' @importFrom insight format_value #' @export format_pd <- function(pd, stars = FALSE, stars_only = FALSE, name = "pd") { text <- ifelse(pd >= 1, "= 100%***", ifelse(pd > 0.999, paste0("= ", insight::format_value(pd * 100), "%***"), ifelse(pd > 0.99, paste0("= ", insight::format_value(pd * 100), "%**"), ifelse(pd > 0.97, paste0("= ", insight::format_value(pd * 100), "%*"), paste0("= ", insight::format_value(pd * 100), "%") ) ) ) ) .add_prefix_and_remove_stars(text, stars, stars_only, name) } parameters/R/ci_betwithin.R0000644000176200001440000000045613614067274015430 0ustar liggesusers#' @rdname p_value_betwithin #' @export ci_betwithin <- function(model, ci = .95) { out <- lapply(ci, function(i) { .ci_wald(model = model, ci = i, effects = "fixed", component = "all", dof = Inf, method = "betwithin") }) out <- do.call(rbind, out) row.names(out) <- NULL out } parameters/R/model_parameters_mixed.R0000644000176200001440000001451113614227067017464 0ustar liggesusers#' Parameters from Mixed Models #' #' Parameters from (linear) mixed models. #' #' @param model A mixed model. #' @param effects Should parameters for fixed effects, random effects or both be returned? Only applies to mixed models. May be abbreviated. #' @param details Logical, if \code{TRUE}, a summary of the random effects is included. See \code{\link{random_parameters}} for details. #' @inheritParams model_parameters.default #' @param df_method Method for computing degrees of freedom for p values, standard errors and confidence intervals (CI). May be \code{"wald"} (default, see \code{\link{degrees_of_freedom}}), \code{"ml1"} (see \code{\link{dof_ml1}}), \code{"betwithin"} (see \code{\link{dof_betwithin}}), \code{"satterthwaite"} (see \code{\link{dof_satterthwaite}}) or \code{"kenward"} (see \code{\link{dof_kenward}}). Note that when \code{df_method} is not \code{"wald"}, robust standard errors etc. cannot be computed. #' #' @seealso \code{\link[=standardize_names]{standardize_names()}} to rename #' columns into a consistent, standardized naming scheme. #' #' @examples #' library(parameters) #' if (require("lme4") && require("glmmTMB")) { #' model <- lmer(mpg ~ wt + (1 | gear), data = mtcars) #' model_parameters(model) #' #' model <- glmmTMB( #' count ~ spp + mined + (1 | site), #' ziformula = ~mined, #' family = poisson(), #' data = Salamanders #' ) #' model_parameters(model, details = TRUE) #' } #' \donttest{ #' if (require("lme4")) { #' model <- lmer(mpg ~ wt + (1 | gear), data = mtcars) #' model_parameters(model, bootstrap = TRUE, iterations = 50) #' } #' } #' @return A data frame of indices related to the model's parameters. #' @export model_parameters.merMod <- function(model, ci = .95, bootstrap = FALSE, df_method = "wald", iterations = 1000, standardize = NULL, exponentiate = FALSE, robust = FALSE, details = FALSE, ...) { # p-values, CI and se might be based of wald, or KR df_method <- tolower(df_method) df_method <- match.arg(df_method, choices = c("wald", "ml1", "betwithin", "satterthwaite", "kenward")) # Processing if (bootstrap) { parameters <- bootstrap_parameters(model, iterations = iterations, ci = ci, ...) } else { parameters <- .extract_parameters_mixed(model, ci = ci, df_method = df_method, robust = robust, standardize = standardize, ...) } if (exponentiate) parameters <- .exponentiate_parameters(parameters) parameters <- .add_model_parameters_attributes(parameters, model, ci, exponentiate, ...) if (isTRUE(details)) { attr(parameters, "details") <- .randomeffects_summary(model) } attr(parameters, "object_name") <- deparse(substitute(model), width.cutoff = 500) class(parameters) <- c("parameters_model", "see_parameters_model", class(parameters)) parameters } #' @export model_parameters.lme <- model_parameters.merMod # Mixed Models with zero inflation ------------------------------------ #' @inheritParams simulate_model #' @rdname model_parameters.merMod #' @export model_parameters.glmmTMB <- function(model, ci = .95, bootstrap = FALSE, iterations = 1000, component = c("all", "conditional", "zi", "zero_inflated"), standardize = NULL, exponentiate = FALSE, df_method = NULL, details = FALSE, ...) { component <- match.arg(component) # p-values, CI and se might be based on differen df-methods if (!is.null(df_method)) { df_method <- tolower(df_method) df_method <- match.arg(df_method, choices = c("wald", "ml1", "betwithin")) } # fix argument, if model has no zi-part if (!insight::model_info(model)$is_zero_inflated && component != "conditional") { component <- "conditional" } # Processing if (bootstrap) { parameters <- bootstrap_parameters(model, iterations = iterations, ci = ci, ...) } else { parameters <- .extract_parameters_generic(model, ci = ci, component = component, standardize = standardize, robust = FALSE, df_method = df_method, ...) } if (exponentiate) parameters <- .exponentiate_parameters(parameters) parameters <- .add_model_parameters_attributes(parameters, model, ci, exponentiate, ...) if (isTRUE(details)) { attr(parameters, "details") <- .randomeffects_summary(model) } attr(parameters, "object_name") <- deparse(substitute(model), width.cutoff = 500) class(parameters) <- c("parameters_model", "see_parameters_model", class(parameters)) parameters } #' @export model_parameters.MixMod <- model_parameters.glmmTMB # other mixed models ------------------------------- #' @rdname model_parameters.merMod #' @export model_parameters.mixor <- function(model, ci = .95, effects = c("all", "fixed", "random"), bootstrap = FALSE, iterations = 1000, standardize = NULL, exponentiate = FALSE, details = FALSE, ...) { effects <- match.arg(effects) out <- .model_parameters_generic( model = model, ci = ci, bootstrap = bootstrap, iterations = iterations, merge_by = c("Parameter", "Effects"), standardize = standardize, exponentiate = exponentiate, effects = effects, robust = FALSE, ... ) attr(out, "object_name") <- deparse(substitute(model), width.cutoff = 500) if (isTRUE(details)) { attr(out, "details") <- .randomeffects_summary(model) } out } #' @rdname model_parameters.merMod #' @export model_parameters.clmm <- function(model, ci = .95, bootstrap = FALSE, iterations = 1000, standardize = NULL, exponentiate = FALSE, details = FALSE, df_method = NULL, ...) { # p-values, CI and se might be based on differen df-methods if (!is.null(df_method)) { df_method <- tolower(df_method) df_method <- match.arg(df_method, choices = c("wald", "ml1", "betwithin")) } out <- .model_parameters_generic( model = model, ci = ci, bootstrap = bootstrap, iterations = iterations, merge_by = "Parameter", standardize = standardize, exponentiate = exponentiate, effects = "fixed", robust = FALSE, df_method = df_method, ... ) attr(out, "object_name") <- deparse(substitute(model), width.cutoff = 500) if (isTRUE(details)) { attr(out, "details") <- .randomeffects_summary(model) } out } #' @export model_parameters.cpglmm <- model_parameters.clmm #' @export model_parameters.rlmerMod <- model_parameters.clmm parameters/R/utils_cleaners.R0000644000176200001440000000510313563573272015771 0ustar liggesusers#' @keywords internal .clean_parameter_names <- function(x, full = FALSE) { # return if x is empty if (is.null(x) || length(x) == 0) { return("") } # here we need to capture only those patterns that we do *not* want to format # in a particular style. However, these patterns will not be shown in the output # from "model_parameters()". If certain patterns contain useful information, # remove them here and clean/prepare them in ".parameters_type_basic()". # for formatting / printing, refer to ".format_parameter()". pattern <- if (full) { c( "as.factor", "as.numeric", "factor", "offset", "lag", "diff", "catg", "asis", "matrx", "pol", "strata", "strat", "scale", "scored", "interaction", "sqrt", "lsp", "pb", "lo", "t2", "te", "ti", "tt", "mi", "mo", "gp" ) } else { c("as.factor", "as.numeric", "factor", "catg", "asis", "interaction") } for (j in 1:length(pattern)) { # remove possible namespace x <- sub("(.*)::(.*)", "\\2", x) if (pattern[j] == "offset") { x <- trimws(sub("offset\\(([^-+ )]*)\\)(.*)", "\\1\\2", x)) } else if (pattern[j] == "I") { if (full) { x <- trimws(sub("I\\(((\\w|\\.)*).*", "\\1", x)) } else { x <- trimws(sub("I\\((.*)\\)(.*)", "\\1", x)) } } else { p <- paste0(pattern[j], "\\(((\\w|\\.)*)\\)(.*)") x <- trimws(sub(p, "\\1\\3", x)) } } gsub("`", "", x, fixed = TRUE) } #' @keywords internal .clean_confint <- function(ci) { estimate_row <- grep(pattern = "^estimate", x = rownames(ci), ignore.case = TRUE) if (length(estimate_row)) { ci <- ci[-estimate_row, ] } zi_col <- grep(pattern = "^zi\\.", x = colnames(ci), ignore.case = TRUE) if (length(zi_col)) { ci <- ci[, -zi_col, drop = FALSE] } colnames(ci) <- gsub("cond.", "", colnames(ci), fixed = TRUE) ci } #' @keywords internal .remove_backticks_from_string <- function(x) { if (is.character(x)) { x <- gsub("`", "", x, fixed = TRUE) } x } #' @keywords internal .remove_backticks_from_parameter_names <- function(x) { if (is.data.frame(x) && "Parameter" %in% colnames(x)) { x$Parameter <- gsub("`", "", x$Parameter, fixed = TRUE) } x } #' @keywords internal .intercepts <- function() { c("(intercept)_zi", "intercept (zero-inflated)", "intercept", "zi_intercept", "(intercept)", "b_intercept", "b_zi_intercept") } #' @keywords internal .in_intercepts <- function(x) { tolower(x) %in% .intercepts() | grepl("^intercept", tolower(x)) } parameters/R/simulate_parameters.R0000644000176200001440000000746313617565551017037 0ustar liggesusers#' Simulate Model Parameters #' #' Compute simulated draws of parameters and their related indices such as Confidence Intervals (CI) and p-values. Simulating parameter draws can be seen as a (computationally faster) alternative to bootstrapping. #' #' @inheritParams simulate_model #' @inheritParams bayestestR::describe_posterior #' #' @return A data frame with simulated parameters. #' #' @references Gelman A, Hill J. Data analysis using regression and multilevel/hierarchical models. Cambridge; New York: Cambridge University Press 2007: 140-143 #' #' @seealso \code{\link{bootstrap_model}}, \code{\link{bootstrap_parameters}}, \code{\link{simulate_model}} #' #' @note There is also a \href{https://easystats.github.io/see/articles/parameters.html}{\code{plot()}-method} implemented in the \href{https://easystats.github.io/see/}{\pkg{see}-package}. #' #' @details #' \subsection{Technical Details}{ #' \code{simulate_model()} is a computationally faster alternative #' to \code{bootstrap_model()}. Simulated draws for coefficients are based #' on a multivariate normal distribution (\code{MASS::mvrnorm()}) with mean #' \code{mu = coef(model)} and variance \code{Sigma = vcov(model)}. #' } #' \subsection{Models with Zero-Inflation Component}{ #' For models from packages \pkg{glmmTMB}, \pkg{pscl}, \pkg{GLMMadaptive} and #' \pkg{countreg}, the \code{component} argument can be used to specify #' which parameters should be simulated. For all other models, parameters #' from the conditional component (fixed effects) are simulated. This may #' include smooth terms, but not random effects. #' } #' #' @examples #' library(parameters) #' library(glmmTMB) #' #' model <- lm(Sepal.Length ~ Species * Petal.Width + Petal.Length, data = iris) #' simulate_parameters(model) #' #' model <- glmmTMB( #' count ~ spp + mined + (1 | site), #' ziformula = ~mined, #' family = poisson(), #' data = Salamanders #' ) #' simulate_parameters(model, centrality = "mean") #' simulate_parameters(model, ci = c(.8, .95), component = "zero_inflated") #' @importFrom bayestestR describe_posterior #' @importFrom tools toTitleCase #' @export simulate_parameters <- function(model, ...) { UseMethod("simulate_parameters") } #' @rdname simulate_parameters #' @export parameters_simulate <- simulate_parameters #' @rdname simulate_parameters #' @export simulate_parameters.default <- function(model, iterations = 1000, centrality = "median", ci = .95, ci_method = "quantile", test = "p-value", ...) { data <- simulate_model(model, iterations = iterations, ...) out <- .summary_bootstrap(data = data, test = test, centrality = centrality, ci = ci, ci_method = ci_method, ...) params <- insight::get_parameters(model) if ("Effects" %in% colnames(params) && length(unique(params$Effects)) > 1) { out$Effects <- params$Effects } class(out) <- c("parameters_simulate", "see_parameters_simulate", class(out)) attr(out, "object_name") <- deparse(substitute(model), width.cutoff = 500) attr(out, "iterations") <- iterations out } #' @export simulate_parameters.multinom <- function(model, iterations = 1000, centrality = "median", ci = .95, ci_method = "quantile", test = "p-value", ...) { data <- simulate_model(model, iterations = iterations, ...) out <- .summary_bootstrap(data = data, test = test, centrality = centrality, ci = ci, ci_method = ci_method, ...) params <- insight::get_parameters(model) out$Parameter <- params$Parameter if ("Response" %in% colnames(params)) { out$Response <- params$Response } class(out) <- c("parameters_simulate", "see_parameters_simulate", class(out)) attr(out, "object_name") <- deparse(substitute(model), width.cutoff = 500) attr(out, "iterations") <- iterations out } parameters/R/model_parameters.psych.R0000644000176200001440000001635713612621074017427 0ustar liggesusers#' Parameters from Structural Models (PCA, EFA, ...) #' #' Format structural models from the \pkg{psych} or \pkg{FactoMineR} packages. #' #' @param model PCA or FA created by the \pkg{psych} or \pkg{FactoMineR} packages (e.g. through \code{psych::principal}, \code{psych::fa} or \code{psych::omega}). #' @inheritParams principal_components #' @param labels A character vector containing labels to be added to the loadings data. Usually, the question related to the item. #' @param ... Arguments passed to or from other methods. #' #' @details #' For the structural models obtained with \pkg{psych}, the following indices are present: #' \itemize{ #' \item \strong{Complexity} (\cite{Hoffman's, 1978; Pettersson and Turkheimer, 2010}) represents the number of latent components needed to account for the observed variables. Whereas a perfect simple structure solution has a complexity of 1 in that each item would only load on one factor, a solution with evenly distributed items has a complexity greater than 1. #' \item \strong{Uniqueness} represents the variance that is 'unique' to the variable and not shared with other variables. It is equal to \code{1 – communality} (variance that is shared with other variables). A uniqueness of \code{0.20} suggests that 20\% or that variable's variance is not shared with other variables in the overall factor model. The greater 'uniqueness' the lower the relevance of the variable in the factor model. #' \item \strong{MSA} represents the Kaiser-Meyer-Olkin Measure of Sampling Adequacy (\cite{Kaiser and Rice, 1974}) for each item. It indicates whether there is enough data for each factor give reliable results for the PCA. The value should be > 0.6, and desirable values are > 0.8 (\cite{Tabachnick and Fidell, 2013}). #' } #' #' @examples #' library(parameters) #' library(psych) #' #' # Principal Component Analysis (PCA) --------- #' pca <- psych::principal(attitude) #' model_parameters(pca) #' #' pca <- psych::principal(attitude, nfactors = 3, rotate = "none") #' model_parameters(pca, sort = TRUE, threshold = 0.2) #' #' principal_components(attitude, n = 3, sort = TRUE, threshold = 0.2) #' \donttest{ #' # Exploratory Factor Analysis (EFA) --------- #' efa <- psych::fa(attitude, nfactors = 3) #' model_parameters(efa, threshold = "max", sort = TRUE, labels = as.character(1:ncol(attitude))) #' } #' #' # Omega --------- #' omega <- psych::omega(mtcars, nfactors = 3) #' params <- model_parameters(omega) #' params #' summary(params) #' #' #' # FactoMineR --------- #' \dontrun{ #' library(FactoMineR) #' #' model <- FactoMineR::PCA(iris[, 1:4], ncp = 2) #' model_parameters(model) #' attributes(model_parameters(model))$scores #' #' model <- FactoMineR::FAMD(iris, ncp = 2) #' model_parameters(model) #' } #' @return A data frame of loadings. #' @references \itemize{ #' \item Kaiser, H.F. and Rice. J. (1974). Little jiffy, mark iv. Educational and Psychological Measurement, 34(1):111–117 #' \item Pettersson, E., \& Turkheimer, E. (2010). Item selection, evaluation, and simple structure in personality data. Journal of research in personality, 44(4), 407-420. #' \item Revelle, W. (2016). How To: Use the psych package for Factor Analysis and data reduction. #' \item Tabachnick, B. G., and Fidell, L. S. (2013). Using multivariate statistics (6th ed.). Boston: Pearson Education. #' } #' @export model_parameters.principal <- function(model, sort = FALSE, threshold = NULL, labels = NULL, ...) { # n n <- model$factors # Get summary variance <- as.data.frame(unclass(model$Vaccounted)) data_summary <- .data_frame( Component = names(variance), Eigenvalues = model$values[1:n], Variance = as.numeric(variance["Proportion Var", ]) ) if ("Cumulative Var" %in% row.names(variance)) { data_summary$Variance_Cumulative <- as.numeric(variance["Cumulative Var", ]) } else { if (ncol(variance) == 1) { data_summary$Variance_Cumulative <- as.numeric(variance["Proportion Var", ]) } else { data_summary$Variance_Cumulative <- NA } } data_summary$Variance_Proportion <- data_summary$Variance / sum(data_summary$Variance) # Get loadings loadings <- as.data.frame(unclass(model$loadings)) # Format loadings <- cbind(data.frame(Variable = row.names(loadings)), loadings) row.names(loadings) <- NULL # Labels if (!is.null(labels)) { loadings$Label <- labels loadings <- loadings[c("Variable", "Label", names(loadings)[!names(loadings) %in% c("Variable", "Label")])] loading_cols <- 3:(n + 2) } else { loading_cols <- 2:(n + 1) } # Add information loadings$Complexity <- model$complexity loadings$Uniqueness <- model$uniquenesses loadings$MSA <- attributes(model)$MSA # Add attributes attr(loadings, "summary") <- data_summary attr(loadings, "model") <- model attr(loadings, "rotation") <- model$rotation attr(loadings, "scores") <- model$scores attr(loadings, "additional_arguments") <- list(...) attr(loadings, "n") <- n attr(loadings, "type") <- model$fn attr(loadings, "loadings_columns") <- loading_cols # Sorting if (isTRUE(sort)) { loadings <- .sort_loadings(loadings) } # Replace by NA all cells below threshold if (!is.null(threshold)) { loadings <- .filter_loadings(loadings, threshold = threshold) } # Add some more attributes attr(loadings, "loadings_long") <- .long_loadings(loadings, threshold = threshold, loadings_columns = loading_cols) # here we match the original columns in the data set with the assigned components # for each variable, so we know which column in the original data set belongs # to which extracted component... attr(loadings, "closest_component") <- .closest_component(loadings, loadings_columns = loading_cols, variable_names = rownames(model$loadings)) # add class-attribute for printing if (model$fn == "principal") { class(loadings) <- unique(c("parameters_pca", "see_parameters_pca", class(loadings))) } else { class(loadings) <- unique(c("parameters_efa", "see_parameters_efa", class(loadings))) } loadings } #' @export model_parameters.fa <- model_parameters.principal #' @rdname model_parameters.principal #' @export model_parameters.omega <- function(model, ...) { # Table of omega coefficients table_om <- model$omega.group colnames(table_om) <- c("Omega_Total", "Omega_Hierarchical", "Omega_Group") table_om$Composite <- row.names(table_om) row.names(table_om) <- NULL table_om <- table_om[c("Composite", names(table_om)[names(table_om) != "Composite"])] # Get summary: Table of Variance table_var <- as.data.frame(unclass(model$omega.group)) table_var$Composite <- rownames(model$omega.group) table_var$Total <- table_var$total * 100 table_var$General <- table_var$general * 100 table_var$Group <- table_var$group * 100 table_var <- table_var[c("Composite", "Total", "General", "Group")] # colnames(table_var) <- c("Composite", "Total Variance (%)", "Variance due to General Factor (%)", "Variance due to Group Factor (%)") # cor.plot(psych::fa.sort(om), main = title) out <- table_om attr(out, "summary") <- table_var class(out) <- c("parameters_omega", class(out)) out } parameters/R/format_algorithm.R0000644000176200001440000000250313607421255016304 0ustar liggesusers#' Model Algorithm formatting #' #' @param model A statistical model. #' #' @examples #' model <- lm(Sepal.Length ~ Species, data = iris) #' format_algorithm(model) #' @importFrom insight find_algorithm #' @export format_algorithm <- function(model) { algorithm <- insight::find_algorithm(model) text <- "" if (is.null(algorithm$algorithm)) { return(text) } # Name text <- algorithm$algorithm if (text == "sampling") { text <- "MCMC sampling" } # Chains if (!is.null(algorithm$chains)) { text <- paste0( text, " with ", algorithm$chains, " chains" ) if (!is.null(algorithm$iterations)) { text <- paste0( text, " of ", algorithm$iterations, " iterations" ) } if (!is.null(algorithm$warmup)) { text <- paste0( text, " and a warmup of ", algorithm$warmup ) } # Thinning? } # Optimizer if (!is.null(algorithm$optimizer)) { optimizer <- algorithm$optimizer if (optimizer == "bobyqa") { optimizer <- "BOBYQA" } if (optimizer == "Nelder_Mead") { optimizer <- "Nelder-Mead" } text <- paste0( text, " and ", optimizer, " optimizer" ) } text } parameters/R/n_parameters.R0000644000176200001440000001043513607420051015422 0ustar liggesusers#' Count number of parameters in a model #' #' Returns the number of parameters of a model. #' #' @param x A statistical model. #' @param effects Should number of parameters for fixed effects, random effects #' or both be returned? Only applies to mixed models. May be abbreviated. #' @param component Should total number of parameters, number parameters for the #' conditional model, the zero-inflated part of the model, the dispersion #' term or the instrumental variables be returned? Applies to models #' with zero-inflated and/or dispersion formula, or to models with instrumental #' variable (so called fixed-effects regressions). May be abbreviated. #' @param ... Arguments passed to or from other methods. #' #' @return The number of parameters in the model. #' @export n_parameters <- function(x, ...) { UseMethod("n_parameters") } # Default models ------------------------------------- #' @rdname n_parameters #' @export n_parameters.default <- function(x, ...) { length(insight::find_parameters(x, effects = "fixed", flatten = TRUE, ...)) } # Models with random effects ------------------------------------- #' @rdname n_parameters #' @export n_parameters.merMod <- function(x, effects = c("fixed", "random"), ...) { effects <- match.arg(effects) length(insight::find_parameters(x, effects = effects, flatten = TRUE, ...)) } #' @export n_parameters.BBmm <- n_parameters.merMod #' @export n_parameters.glimML <- n_parameters.merMod #' @export n_parameters.cpglmm <- n_parameters.merMod #' @export n_parameters.rlmerMod <- n_parameters.merMod #' @export n_parameters.mixed <- n_parameters.merMod #' @export n_parameters.coxme <- n_parameters.merMod #' @export n_parameters.lme <- n_parameters.merMod #' @export n_parameters.MCMCglmm <- n_parameters.merMod #' @export n_parameters.sim.merMod <- n_parameters.merMod #' @export n_parameters.wbm <- n_parameters.merMod # Models with random effects and other components ---------------------------- #' @export n_parameters.MixMod <- function(x, effects = c("fixed", "random"), component = c("all", "conditional", "zi", "zero_inflated"), ...) { effects <- match.arg(effects) component <- match.arg(component) length(insight::find_parameters(x, effects = effects, component = component, flatten = TRUE, ...)) } #' @rdname n_parameters #' @export n_parameters.glmmTMB <- n_parameters.MixMod # Models with (zero-inflation) components ---------------------------- #' @rdname n_parameters #' @export n_parameters.zeroinfl <- function(x, component = c("all", "conditional", "zi", "zero_inflated"), ...) { component <- match.arg(component) length(insight::find_parameters(x, component = component, flatten = TRUE, ...)) } #' @export n_parameters.hurdle <- n_parameters.zeroinfl #' @export n_parameters.zerotrunc <- n_parameters.default # GAMs ---------------------------- #' @rdname n_parameters #' @export n_parameters.gam <- function(x, component = c("all", "conditional", "smooth_terms"), ...) { component <- match.arg(component) length(insight::find_parameters(x, component = component, flatten = TRUE, ...)) } #' @export n_parameters.Gam <- n_parameters.gam #' @export n_parameters.vgam <- n_parameters.gam # Bayesian Models ---------------------------- #' @rdname n_parameters #' @export n_parameters.brmsfit <- function(x, effects = c("all", "fixed", "random"), component = c("all", "conditional", "zi", "zero_inflated", "dispersion", "simplex", "sigma", "smooth_terms"), ...) { effects <- match.arg(effects) component <- match.arg(component) length(insight::find_parameters(x, effects = effects, component = component, flatten = TRUE, ...)) } #' @export n_parameters.stanreg <- function(x, effects = c("all", "fixed", "random"), component = c("all", "conditional", "smooth_terms"), ...) { effects <- match.arg(effects) component <- match.arg(component) length(insight::find_parameters(x, effects = effects, component = component, flatten = TRUE, ...)) } #' @export n_parameters.stanmvreg <- n_parameters.stanreg # Other models ------------------------------------- #' @export n_parameters.multinom <- function(x, ...) { nrow(insight::get_parameters(x)) } parameters/R/smoothness.R0000644000176200001440000000217013603206134015141 0ustar liggesusers#' Quantify the smoothness of a vector #' #' @param x Numeric vector (similar to a time series). #' @param method Can be "diff" (the standard deviation of the standardized differences) or "cor" (default, lag-one autocorrelation). #' @param lag An integer indicating which lag to use. If less than 1, will be interpreted as expressed in percentage of the length of the vector. #' #' @examples #' x <- (-10:10)^3 + rnorm(21, 0, 100) #' plot(x) #' smoothness(x, method = "cor") #' smoothness(x, method = "diff") #' @return Value of smoothness. #' @references https://stats.stackexchange.com/questions/24607/how-to-measure-smoothness-of-a-time-series-in-r #' #' @importFrom stats cor sd #' @importFrom utils head tail #' @export smoothness <- function(x, method = "cor", lag = 1) { if (lag < 1) { lag <- round(lag * length(x)) } if (lag <= 0) { stop("lag cannot be that small.") } if (method == "cor") { smooth <- stats::cor(utils::head(x, length(x) - lag), utils::tail(x, length(x) - lag)) } else { smooth <- stats::sd(diff(x, lag = lag)) / abs(mean(diff(x, lag = lag))) } smooth } parameters/R/parameters_table.R0000644000176200001440000001274513617043573016275 0ustar liggesusers#' Parameter table formatting #' #' @param x A data frame of model's parameters. #' @param pretty_names Pretty parameters' names. #' @inheritParams format_p #' @param ... Arguments passed to or from other methods. #' #' @examples #' library(parameters) #' #' x <- model_parameters(lm(Sepal.Length ~ Species * Sepal.Width, data = iris)) #' as.data.frame(parameters_table(x)) #' \donttest{ #' if (require("rstanarm")) { #' model <- stan_glm(Sepal.Length ~ Species, data = iris, refresh = 0, seed = 123) #' x <- model_parameters(model, ci = c(0.69, 0.89, 0.95)) #' as.data.frame(parameters_table(x)) #' } #' } #' @return A data frame. #' #' @importFrom tools toTitleCase #' @importFrom insight format_value format_ci #' @importFrom stats na.omit #' @export parameters_table <- function(x, pretty_names = TRUE, stars = FALSE, ...) { # check if user supplied digits attributes digits <- attributes(x)$digits ci_digits <- attributes(x)$ci_digits p_digits <- attributes(x)$p_digits if (is.null(digits)) digits <- 2 if (is.null(ci_digits)) ci_digits <- 2 if (is.null(p_digits)) p_digits <- 3 x <- as.data.frame(x) # Format parameters names if (pretty_names & !is.null(attributes(x)$pretty_names)) { x$Parameter <- attributes(x)$pretty_names[x$Parameter] } # Format specific columns if ("n_Obs" %in% names(x)) x$n_Obs <- insight::format_value(x$n_Obs, protect_integers = TRUE) if ("n_Missing" %in% names(x)) x$n_Missing <- insight::format_value(x$n_Missing, protect_integers = TRUE) # generic df if ("df" %in% names(x)) x$df <- insight::format_value(x$df, protect_integers = TRUE) # residual df if ("df_residual" %in% names(x)) x$df_residual <- insight::format_value(x$df_residual, protect_integers = TRUE) names(x)[names(x) == "df_residual"] <- "df" # df for errors if ("df_error" %in% names(x)) x$df_error <- insight::format_value(x$df_error, protect_integers = TRUE) names(x)[names(x) == "df_error"] <- "df" # P values if ("p" %in% names(x)) { x$p <- format_p(x$p, stars = stars, name = NULL, missing = "", digits = p_digits) x$p <- format(x$p, justify = "left") } # CI ci_low <- names(x)[grep("CI_low*", names(x))] ci_high <- names(x)[grep("CI_high*", names(x))] if (length(ci_low) >= 1 & length(ci_low) == length(ci_high)) { if (is.null(attributes(x)$ci)) { ci_colname <- "CI" } else { ci_colname <- sprintf("%i%% CI", attributes(x)$ci * 100) } # Get characters to align the CI for (i in 1:length(ci_colname)) { x[ci_colname[i]] <- insight::format_ci(x[[ci_low[i]]], x[[ci_high[i]]], ci = NULL, digits = ci_digits, width = "auto", brackets = TRUE) } # Replace at initial position ci_position <- which(names(x) == ci_low[1]) x <- x[c(names(x)[0:(ci_position - 1)][!names(x)[0:(ci_position - 1)] %in% ci_colname], ci_colname, names(x)[ci_position:(length(names(x)) - 1)][!names(x)[ci_position:(length(names(x)) - 1)] %in% ci_colname])] x <- x[!names(x) %in% c(ci_low, ci_high)] } # Misc names(x)[names(x) == "Cohens_d"] <- "Cohen's d" # Standardized std_cols <- names(x)[grepl("Std_", names(x))] x[std_cols] <- insight::format_value(x[std_cols], digits = digits) names(x)[grepl("Std_", names(x))] <- paste0(gsub("Std_", "", std_cols), " (std.)") # Partial x[names(x)[grepl("_partial", names(x))]] <- insight::format_value(x[names(x)[grepl("_partial", names(x))]]) names(x)[grepl("_partial", names(x))] <- paste0(gsub("_partial", "", names(x)[grepl("_partial", names(x))]), " (partial)") # metafor if ("Weight" %in% names(x)) x$Weight <- insight::format_value(x$Weight, protect_integers = TRUE) # Bayesian if ("Prior_Location" %in% names(x)) x$Prior_Location <- insight::format_value(x$Prior_Location, protect_integers = TRUE) if ("Prior_Scale" %in% names(x)) x$Prior_Scale <- insight::format_value(x$Prior_Scale, protect_integers = TRUE) if ("BF" %in% names(x)) x$BF <- format_bf(x$BF, name = NULL, stars = stars) if ("pd" %in% names(x)) x$pd <- format_pd(x$pd, name = NULL, stars = stars) if ("ROPE_Percentage" %in% names(x)) x$ROPE_Percentage <- format_rope(x$ROPE_Percentage, name = NULL) names(x)[names(x) == "ROPE_Percentage"] <- "% in ROPE" # Priors if (all(c("Prior_Distribution", "Prior_Location", "Prior_Scale") %in% names(x))) { x$Prior <- paste0( tools::toTitleCase(x$Prior_Distribution), " (", x$Prior_Location, " +- ", x$Prior_Scale, ")" ) col_position <- which(names(x) == "Prior_Distribution") x <- x[c(names(x)[0:(col_position - 1)], "Prior", names(x)[col_position:(length(names(x)) - 1)])] # Replace at initial position x$Prior_Distribution <- x$Prior_Location <- x$Prior_Scale <- NULL } if ("Rhat" %in% names(x)) x$Rhat <- insight::format_value(x$Rhat, digits = 3) if ("ESS" %in% names(x)) x$ESS <- insight::format_value(x$ESS, protect_integers = TRUE) # Format remaining columns other_cols <- names(x)[sapply(x, is.numeric)] x[other_cols[other_cols %in% names(x)]] <- insight::format_value(x[other_cols[other_cols %in% names(x)]], digits = digits) # SEM links if (all(c("To", "Operator", "From") %in% names(x))) { x$Link <- paste(x$To, x$Operator, x$From) col_position <- which(names(x) == "To") x <- x[c(names(x)[0:(col_position - 1)], "Link", names(x)[col_position:(length(names(x)) - 1)])] # Replace at initial position x$To <- x$Operator <- x$From <- NULL } x } parameters/R/p_value_wald.R0000644000176200001440000000501013611425764015410 0ustar liggesusers#' Wald-test approximation for CIs and p-values #' #' The Wald-test approximation treats t-values as Wald z. Since the t distribution converges to the z distribution as degrees of freedom increase, this is like assuming infinite degrees of freedom. While this is unambiguously anti-conservative, this approximation appears as reasonable for reasonable sample sizes (Barr et al., 2013). That is, if we take the p-value to measure the probability of a false positive, this approximation produces a higher false positive rate than the nominal 5\% at p = 0.05. #' #' @param model A statistical model. #' @param ... Arguments passed down to \code{standard_error_robust()} when confidence intervals or p-values based on robust standard errors should be computed. #' #' @examples #' \donttest{ #' if (require("lme4")) { #' model <- lmer(Petal.Length ~ Sepal.Length + (1 | Species), data = iris) #' p_value_wald(model) #' ci_wald(model, ci = c(0.90, 0.95)) #' } #' } #' @return A data frame. #' @importFrom stats coef pt #' @references Barr, D. J. (2013). Random effects structure for testing interactions in linear mixed-effects models. Frontiers in psychology, 4, 328. #' @export p_value_wald <- function(model, ...) { UseMethod("p_value_wald") } #' @rdname p_value_wald #' @export p_value_wald.merMod <- function(model, dof = Inf, ...) { params <- as.data.frame(stats::coef(summary(model))) .p_value_wald(params, dof) } #' @export p_value_wald.rlmerMod <- function(model, dof = Inf, ...) { params <- as.data.frame(stats::coef(summary(model))) .p_value_wald(params, dof) } #' @export p_value_wald.cpglmm <- function(model, dof = Inf, ...) { if (!requireNamespace("cplm", quietly = TRUE)) { stop("To use this function, please install package 'cplm'.") } params <- as.data.frame(cplm::summary(model)$coefs) .p_value_wald(params, dof) } .p_value_wald <- function(params, dof = NULL) { if (is.null(dof)) dof <- Inf if ("t value" %in% names(params)) { p <- 2 * stats::pt(abs(params[, "t value"]), df = dof, lower.tail = FALSE) } else if ("z value" %in% names(params)) { p <- 2 * stats::pt(abs(params[, "z value"]), df = dof, lower.tail = FALSE) } else { stop("Couldn't find any suitable statistic (t or z value) for Wald-test approximation.") } if (is.null(names(p))) { coef_names <- rownames(params) } else { coef_names <- names(p) } .data_frame( Parameter = .remove_backticks_from_string(coef_names), p = unname(p) ) } parameters/R/p_value_kenward.R0000644000176200001440000000453013611426102016105 0ustar liggesusers#' Kenward-Roger approximation for SEs, CIs and p-values #' #' An approximate F-test based on the Kenward-Roger (1997) approach. #' #' @param model A statistical model. #' @param dof Degrees of Freedom. #' @inheritParams ci.merMod #' #' @details Inferential statistics (like p-values, confidence intervals and #' standard errors) may be biased in mixed models when the number of clusters #' is small (even if the sample size of level-1 units is high). In such cases #' it is recommended to approximate a more accurate number of degrees of freedom #' for such inferential statistics. Unlike simpler approximation heuristics #' like the "m-l-1" rule (\code{dof_ml1}), the Kenward-Roger approximation is #' also applicable in more complex multilevel designs, e.g. with cross-classified #' clusters. However, the "m-l-1" heuristic also applies to generalized #' mixed models, while approaches like Kenward-Roger or Satterthwaite are limited #' to linear mixed models only. #' #' @seealso \code{dof_kenward()} and \code{se_kenward()} are small helper-functions #' to calculate approximated degrees of freedom and standard errors for model #' parameters, based on the Kenward-Roger (1997) approach. #' \cr \cr #' \code{\link[=dof_satterthwaite]{dof_satterthwaite()}} and #' \code{\link[=dof_ml1]{dof_ml1}} approximate degrees #' of freedom based on Satterthwaite's method or the "m-l-1" rule. #' #' @examples #' \donttest{ #' if (require("lme4")) { #' model <- lmer(Petal.Length ~ Sepal.Length + (1 | Species), data = iris) #' p_value_kenward(model) #' } #' } #' @return A data frame. #' @references Kenward, M. G., & Roger, J. H. (1997). Small sample inference for fixed effects from restricted maximum likelihood. Biometrics, 983-997. #' @importFrom stats pt coef #' @export p_value_kenward <- function(model, dof = NULL) { UseMethod("p_value_kenward") } #' @export p_value_kenward.lmerMod <- function(model, dof = NULL) { if (is.null(dof)) { dof <- dof_kenward(model) } .p_value_dof(model, dof) } # helper ------------------------------ .p_value_dof <- function(model, dof) { statistic <- insight::get_statistic(model) p <- 2 * stats::pt(abs(statistic$Statistic), df = dof, lower.tail = FALSE) data.frame( Parameter = statistic$Parameter, p = unname(p), stringsAsFactors = FALSE ) } parameters/R/se_ml1.R0000644000176200001440000000062613611005352014121 0ustar liggesusers#' @rdname p_value_ml1 #' @importFrom stats qnorm #' @importFrom insight get_parameters #' @export se_ml1 <- function(model) { params <- insight::get_parameters(model) p <- p_value_ml1(model) statistic <- stats::qnorm(p$p / 2, lower.tail = FALSE) data.frame( Parameter = params$Parameter, SE = abs(as.vector(params$Estimate / statistic)), stringsAsFactors = FALSE ) } parameters/R/bootstrap_parameters.R0000644000176200001440000000424413574224320017207 0ustar liggesusers#' Parameters bootstrapping #' #' Compute bootstrapped parameters and their related indices such as Confidence Intervals (CI) and p-values. #' #' #' @inheritParams bootstrap_model #' @inheritParams bayestestR::describe_posterior #' #' @return Bootstrapped parameters. #' #' @references Davison, A. C., & Hinkley, D. V. (1997). Bootstrap methods and their application (Vol. 1). Cambridge university press. #' #' @seealso \code{\link{bootstrap_model}}, \code{\link{simulate_parameters}}, \code{\link{simulate_model}} #' #' @examples #' library(parameters) #' #' model <- lm(Sepal.Length ~ Species * Petal.Width, data = iris) #' bootstrap_parameters(model) #' @importFrom tools toTitleCase #' @export bootstrap_parameters <- function(model, iterations = 1000, centrality = "median", ci = .95, ci_method = "quantile", test = "p-value", ...) { data <- bootstrap_model(model, iterations = iterations, ...) .summary_bootstrap(data = data, test = test, centrality = centrality, ci = ci, ci_method = ci_method, ...) } #' @rdname bootstrap_parameters #' @export parameters_bootstrap <- bootstrap_parameters #' @keywords internal .summary_bootstrap <- function(data, test, centrality, ci, ci_method, ...) { # Is the p-value requested? if (any(test %in% c("p-value", "p", "pval"))) { p_value <- TRUE test <- setdiff(test, c("p-value", "p", "pval")) if (length(test) == 0) test <- NULL } else { p_value <- FALSE } parameters <- bayestestR::describe_posterior(data, centrality = centrality, ci = ci, ci_method = ci_method, test = test, ...) # Remove unecessary columns if ("CI" %in% names(parameters) && length(unique(parameters$CI)) == 1) { parameters$CI <- NULL } # Coef if (length(c(centrality)) == 1) { names(parameters)[names(parameters) == tools::toTitleCase(centrality)] <- "Coefficient" } # p-value if (p_value) { parameters$.col_order <- 1:nrow(parameters) p <- p_value(data, ...) parameters <- merge(parameters, p, all = TRUE) parameters <- parameters[order(parameters$.col_order), ] parameters$.col_order <- NULL } rownames(parameters) <- NULL parameters } parameters/R/format_bf.R0000644000176200001440000000145613607421264014713 0ustar liggesusers#' Bayes Factor formatting #' #' @param bf Bayes Factor. #' @inheritParams format_p #' #' @return A formatted string. #' #' @examples #' format_bf(1.20) #' format_bf(c(1.20, 1557, 3.5, 12), stars = TRUE) #' format_bf(c(1.20, 1557, 3.5, 12), name = NULL) #' @importFrom insight format_value #' @export format_bf <- function(bf, stars = FALSE, stars_only = FALSE, name = "BF") { text <- ifelse(bf > 999, "> 999***", paste0( "= ", ifelse(bf > 30, paste0(insight::format_value(bf), "***"), ifelse(bf > 10, paste0(insight::format_value(bf), "**"), ifelse(bf > 3, paste0(insight::format_value(bf), "*"), paste0(insight::format_value(bf)) ) ) ) ) ) .add_prefix_and_remove_stars(text, stars, stars_only, name) } parameters/R/n_factors.R0000644000176200001440000004774113617565573014756 0ustar liggesusers#' Number of components/factors to retain in PCA/FA #' #' This function runs many existing procedures for determining how many factors to retain for your factor analysis (FA) or dimension reduction (PCA). It returns the number of factors based on the maximum consensus between methods. In case of ties, it will keep the simplest models and select the solution with the less factors. #' #' @param x A dataframe. #' @param type Can be \code{"FA"} or \code{"PCA"}, depending on what you want to do. #' @param rotation Only used for VSS (Very Simple Structure criterion, see \code{\link[psych]{VSS}}). The rotation to apply. Can be \code{"none"}, \code{"varimax"}, \code{"quartimax"}, \code{"bentlerT"}, \code{"equamax"}, \code{"varimin"}, \code{"geominT"} and \code{"bifactor"} for orthogonal rotations, and \code{"promax"}, \code{"oblimin"}, \code{"simplimax"}, \code{"bentlerQ"}, \code{"geominQ"}, \code{"biquartimin"} and \code{"cluster"} for oblique transformations. #' @param algorithm Factoring method used by VSS. Can be \code{"pa"} for Principal Axis Factor Analysis, \code{"minres"} for minimum residual (OLS) factoring, \code{"mle"} for Maximum Likelihood FA and \code{"pc"} for Principal Components. \code{"default"} will select \code{"minres"} if \code{type = "FA"} and \code{"pc"} if \code{type = "PCA"}. #' @param package These are the packages from which methods are used. Can be \code{"all"} or a vector containing \code{"nFactors"}, \code{"psych"} and \code{"EGAnet"}. However, \code{"EGAnet"} can be very slow for bigger datasets. Thus, by default, \code{c("nFactors", "psych")} are selected. #' @param safe If \code{TRUE}, will run all the procedures in try blocks, and will only return those that work and silently skip the ones that may fail. #' @param cor An optional correlation matrix that can be used. If \code{NULL}, will compute it by running \code{cor()} on the passed data. #' @param ... Arguments passed to or from other methods. #' #' @note There is also a \href{https://easystats.github.io/see/articles/parameters.html}{\code{plot()}-method} implemented in the \href{https://easystats.github.io/see/}{\pkg{see}-package}. #' #' @examples #' library(parameters) #' #' n_factors(mtcars, type = "PCA") #' #' result <- n_factors(mtcars[1:5], type = "FA") #' as.data.frame(result) #' summary(result) #' \donttest{ #' n_factors(mtcars, type = "PCA", package = "all") #' n_factors(mtcars, type = "FA", algorithm = "mle", package = "all") #' } #' #' @return A data frame. #' #' @references \itemize{ #' \item Bartlett, M. S. (1950). Tests of significance in factor analysis. British Journal of statistical psychology, 3(2), 77-85. #' \item Bentler, P. M., & Yuan, K. H. (1996). Test of linear trend in eigenvalues of a covariance matrix with application to data analysis. British Journal of Mathematical and Statistical Psychology, 49(2), 299-312. #' \item Cattell, R. B. (1966). The scree test for the number of factors. Multivariate behavioral research, 1(2), 245-276. #' \item Finch, W. H. (2019). Using Fit Statistic Differences to Determine the Optimal Number of Factors to Retain in an Exploratory Factor Analysis. Educational and Psychological Measurement. #' \item Zoski, K. W., & Jurs, S. (1996). An objective counterpart to the visual scree test for factor analysis: The standard error scree. Educational and Psychological Measurement, 56(3), 443-451. #' \item Zoski, K., & Jurs, S. (1993). Using multiple regression to determine the number of factors to retain in factor analysis. Multiple Linear Regression Viewpoints, 20(1), 5-9. #' \item Nasser, F., Benson, J., & Wisenbaker, J. (2002). The performance of regression-based variations of the visual scree for determining the number of common factors. Educational and psychological measurement, 62(3), 397-419. #' \item Golino, H., Shi, D., Garrido, L. E., Christensen, A. P., Nieto, M. D., Sadana, R., & Thiyagarajan, J. A. (2018). Investigating the performance of Exploratory Graph Analysis and traditional techniques to identify the number of latent factors: A simulation and tutorial. #' \item Golino, H. F., & Epskamp, S. (2017). Exploratory graph analysis: A new approach for estimating the number of dimensions in psychological research. PloS one, 12(6), e0174035. #' \item Revelle, W., & Rocklin, T. (1979). Very simple structure: An alternative procedure for estimating the optimal number of interpretable factors. Multivariate Behavioral Research, 14(4), 403-414. #' \item Velicer, W. F. (1976). Determining the number of components from the matrix of partial correlations. Psychometrika, 41(3), 321-327. #' } #' @importFrom stats cor #' @export n_factors <- function(x, type = "FA", rotation = "varimax", algorithm = "default", package = c("nFactors", "psych"), cor = NULL, safe = TRUE, ...) { if (all(package == "all")) { package <- c("nFactors", "EGAnet", "psych") } # Initialize parameters nobs <- nrow(x) # Correlation matrix if (is.null(cor)) { cor <- stats::cor(x, use = "pairwise.complete.obs") } eigen_values <- eigen(cor)$values # Initialize dataframe out <- data.frame() # nFactors ------------------------------------------- if ("nFactors" %in% c(package)) { if (!requireNamespace("nFactors", quietly = TRUE)) { stop("Package 'nFactors' required for this function to work. Please install it by running `install.packages('nFactors')`.") } # Model if (tolower(type) %in% c("fa", "factor", "efa")) { model <- "factors" } else { model <- "components" } # Compute all if (safe) { out <- rbind( out, tryCatch(.n_factors_bartlett(eigen_values, model, nobs), warning = function(w) data.frame(), error = function(e) data.frame() ) ) out <- rbind( out, tryCatch(.n_factors_bentler(eigen_values, model, nobs), warning = function(w) data.frame(), error = function(e) data.frame() ) ) out <- rbind( out, tryCatch(.n_factors_cng(eigen_values, model), warning = function(w) data.frame(), error = function(e) data.frame() ) ) out <- rbind( out, tryCatch(.n_factors_mreg(eigen_values, model), warning = function(w) data.frame(), error = function(e) data.frame() ) ) out <- rbind( out, tryCatch(.n_factors_scree(eigen_values, model), warning = function(w) data.frame(), error = function(e) data.frame() ) ) out <- rbind( out, tryCatch(.n_factors_sescree(eigen_values, model), warning = function(w) data.frame(), error = function(e) data.frame() ) ) } else { out <- rbind( out, .n_factors_bartlett(eigen_values, model, nobs) ) out <- rbind( out, .n_factors_bentler(eigen_values, model, nobs) ) out <- rbind( out, .n_factors_cng(eigen_values, model) ) out <- rbind( out, .n_factors_mreg(eigen_values, model) ) out <- rbind( out, .n_factors_scree(eigen_values, model) ) out <- rbind( out, .n_factors_sescree(eigen_values, model) ) } } # EGAnet ------------------------------------------- if ("EGAnet" %in% c(package)) { if (!requireNamespace("EGAnet", quietly = TRUE)) { stop("Package 'EGAnet' required for this function to work. Please install it by running `install.packages('EGAnet')`.") } if (safe) { out <- rbind( out, tryCatch(.n_factors_ega(x, cor, nobs, eigen_values, type), warning = function(w) data.frame(), error = function(e) data.frame() ) ) } else { out <- rbind( out, .n_factors_ega(x, cor, nobs, eigen_values, type) ) } } # psych ------------------------------------------- if ("psych" %in% c(package)) { if (!requireNamespace("psych", quietly = TRUE)) { stop("Package 'psych' required for this function to work. Please install it by running `install.packages('psych')`.") } if (safe) { out <- rbind( out, tryCatch(.n_factors_vss(x, cor, nobs, type, rotation, algorithm), warning = function(w) data.frame(), error = function(e) data.frame() ) ) out <- rbind( out, tryCatch(.n_factors_fit(x, cor, nobs, type, rotation, algorithm), warning = function(w) data.frame(), error = function(e) data.frame() ) ) } else { out <- rbind( out, .n_factors_vss(x, cor, nobs, type, rotation, algorithm) ) out <- rbind( out, .n_factors_fit(x, cor, nobs, type, rotation, algorithm) ) } } # OUTPUT ---------------------------------------------- # TODO created weighted composite score out <- out[!is.na(out$n_Factors), ] # Remove empty methods out <- out[order(out$n_Factors), ] # Arrange by n factors row.names(out) <- NULL # Reset row index # Add summary by_factors <- .data_frame( n_Factors = as.numeric(unique(out$n_Factors)), n_Methods = as.numeric(by(out, as.factor(out$n_Factors), function(out) n <- nrow(out))) ) attr(out, "summary") <- by_factors attr(out, "n") <- min(as.numeric(as.character(by_factors[by_factors$n_Methods == max(by_factors$n_Methods), c("n_Factors")]))) class(out) <- c("n_factors", "see_n_factors", class(out)) out } #' @importFrom insight print_color #' @export print.n_factors <- function(x, ...) { results <- attributes(x)$summary # Extract info max_methods <- max(results$n_Methods) best_n <- attributes(x)$n # Extract methods if ("n_Factors" %in% names(x)) { type <- "factor" methods_text <- paste0(as.character(x[x$n_Factors == best_n, "Method"]), collapse = ", ") } else { type <- "cluster" methods_text <- paste0(as.character(x[x$n_Clusters == best_n, "Method"]), collapse = ", ") } # Text text <- paste0( "The choice of ", as.character(best_n), ifelse(type == "factor", " dimensions ", " clusters "), "is supported by ", max_methods, " (", sprintf("%.2f", max_methods / nrow(x) * 100), "%) methods out of ", nrow(x), " (", methods_text, ").\n" ) insight::print_color("# Method Agreement Procedure:\n\n", "blue") cat(text) } #' @export summary.n_factors <- function(object, ...) { attributes(object)$summary } #' @export as.numeric.n_factors <- function(x, ...) { attributes(x)$n } #' @export as.double.n_factors <- as.numeric.n_factors #' @export summary.n_clusters <- summary.n_factors #' @export as.numeric.n_clusters <- as.numeric.n_factors #' @export as.double.n_clusters <- as.double.n_factors #' @export print.n_clusters <- print.n_factors #' Bartlett, Anderson and Lawley Procedures #' @importFrom tools toTitleCase #' @keywords internal .n_factors_bartlett <- function(eigen_values = NULL, model = "factors", nobs = NULL) { nfac <- nFactors::nBartlett(eigen_values, N = nobs, alpha = 0.05, details = FALSE, model = model)$nFactors data.frame( n_Factors = as.numeric(nfac), Method = tools::toTitleCase(names(nfac)), Family = "Barlett" ) } #' Bentler and Yuan's Procedure #' @keywords internal .n_factors_bentler <- function(eigen_values = NULL, model = "factors", nobs = NULL) { nfac <- .nBentler(x = eigen_values, N = nobs, model = model, alpha = 0.05, details = FALSE)$nFactors data.frame( n_Factors = as.numeric(nfac), Method = "Bentler", Family = "Bentler" ) } #' Cattell-Nelson-Gorsuch CNG Indices #' @keywords internal .n_factors_cng <- function(eigen_values = NULL, model = "factors") { if (length(eigen_values) < 6) { nfac <- NA } else { nfac <- nFactors::nCng(x = eigen_values, cor = TRUE, model = model)$nFactors } data.frame( n_Factors = as.numeric(nfac), Method = "CNG", Family = "CNG" ) } #' Multiple Regression Procedure #' @keywords internal .n_factors_mreg <- function(eigen_values = NULL, model = "factors") { if (length(eigen_values) < 6) { nfac <- NA } else { nfac <- nFactors::nMreg(x = eigen_values, cor = TRUE, model = model)$nFactors } data.frame( n_Factors = as.numeric(nfac), Method = c("beta", "t", "p"), Family = "Multiple_regression" ) } #' Non Graphical Cattell's Scree Test #' @keywords internal .n_factors_scree <- function(eigen_values = NULL, model = "factors") { nfac <- unlist(nFactors::nScree(x = eigen_values, cor = TRUE, model = model)$Components) data.frame( n_Factors = as.numeric(nfac), Method = c("Optimal coordinates", "Acceleration factor", "Parallel analysis", "Kaiser criterion"), Family = "Scree" ) } #' Standard Error Scree and Coefficient of Determination Procedures #' @keywords internal .n_factors_sescree <- function(eigen_values = NULL, model = "factors") { nfac <- nFactors::nSeScree(x = eigen_values, cor = TRUE, model = model)$nFactors data.frame( n_Factors = as.numeric(nfac), Method = c("SE Scree", "R2"), Family = "Scree_SE" ) } # EGAnet ------------------------ #' @importFrom utils capture.output #' @keywords internal .n_factors_ega <- function(x = NULL, cor = NULL, nobs = NULL, eigen_values = NULL, type = "FA") { # Replace with own corelation matrix junk <- utils::capture.output(suppressWarnings(suppressMessages(nfac_glasso <- EGAnet::EGA(x, model = "glasso", plot.EGA = FALSE)$n.dim))) junk <- utils::capture.output(suppressWarnings(suppressMessages(nfac_TMFG <- EGAnet::EGA(x, model = "TMFG", plot.EGA = FALSE)$n.dim))) # junk <- utils::capture.output(suppressWarnings(suppressMessages(nfac_glasso_boot <- EGAnet::bootEGA(x, model = "glasso", n = 500, plot.typicalStructure = FALSE)$n.dim))) # junk <- utils::capture.output(suppressWarnings(suppressMessages(nfac_TMFG_boot <- EGAnet::bootEGA(x, model = "TMFG", n = 500, plot.typicalStructure = FALSE)$n.dim))) data.frame( n_Factors = as.numeric(c(nfac_glasso, nfac_TMFG)), Method = c("EGA (glasso)", "EGA (TMFG)"), Family = "EGA" ) } # psych ------------------------ #' @keywords internal .n_factors_vss <- function(x = NULL, cor = NULL, nobs = NULL, type = "FA", rotation = "varimax", algorithm = "default") { if (algorithm == "default") { if (tolower(type) %in% c("fa", "factor", "efa")) { algorithm <- "minres" } else { algorithm <- "pc" } } # Compute VSS vss <- psych::VSS( cor, n = ncol(x) - 1, n.obs = nobs, rotate = rotation, fm = algorithm, plot = FALSE ) # Format results stats <- vss$vss.stats stats$map <- vss$map stats$n_Factors <- seq_len(nrow(stats)) names(stats) <- gsub("cfit.", "VSS_Complexity_", names(stats)) # Indices vss_1 <- which.max(stats$VSS_Complexity_1) vss_2 <- which.max(stats$VSS_Complexity_2) velicer_MAP <- which.min(stats$map) BIC_reg <- which.min(stats$BIC) BIC_adj <- which.min(stats$SABIC) BIC_reg <- ifelse(length(BIC_reg) == 0, NA, BIC_reg) BIC_adj <- ifelse(length(BIC_adj) == 0, NA, BIC_adj) data.frame( n_Factors = as.numeric(c(vss_1, vss_2, velicer_MAP, BIC_reg, BIC_adj)), Method = c("VSS complexity 1", "VSS complexity 2", "Velicer's MAP", "BIC", "BIC (adjusted)"), Family = c("VSS", "VSS", "Velicers_MAP", "BIC", "BIC") ) } #' @keywords internal .n_factors_fit <- function(x = NULL, cor = NULL, nobs = NULL, type = "FA", rotation = "varimax", algorithm = "default") { if (algorithm == "default") { if (tolower(type) %in% c("fa", "factor", "efa")) { algorithm <- "minres" } else { algorithm <- "pc" } } rez <- data.frame() for (n in 1:(ncol(cor) - 1)) { if (tolower(type) %in% c("fa", "factor", "efa")) { factors <- tryCatch(psych::fa(cor, nfactors = n, n.obs = nobs, rotate = rotation, fm = algorithm ), warning = function(w) NA, error = function(e) NA ) } else { factors <- tryCatch(psych::pca(cor, nfactors = n, n.obs = nobs, rotate = rotation ), warning = function(w) NA, error = function(e) NA ) } if (all(is.na(factors))) { next } rmsea <- ifelse(is.null(factors$RMSEA), NA, factors$RMSEA[1]) rmsr <- ifelse(is.null(factors$rms), NA, factors$rms) crms <- ifelse(is.null(factors$crms), NA, factors$crms) bic <- ifelse(is.null(factors$BIC), NA, factors$BIC) tli <- ifelse(is.null(factors$TLI), NA, factors$TLI) rez <- rbind( rez, data.frame( n = n, TLI = tli, Fit = factors$fit.off, RMSEA = rmsea, RMSR = rmsr, CRMS = crms, BIC = bic ) ) } TLI <- ifelse(all(is.na(rez$TLI)), NA, rez[!is.na(rez$TLI) & rez$TLI == min(rez$TLI, na.rm = TRUE), "n"]) RMSEA <- ifelse(all(is.na(rez$RMSEA)), NA, rez[!is.na(rez$RMSEA) & rez$RMSEA == max(rez$RMSEA, na.rm = TRUE), "n"]) RMSR <- ifelse(all(is.na(rez$RMSR)), NA, rez[!is.na(rez$RMSR) & rez$RMSR == min(rez$RMSR, na.rm = TRUE), "n"]) CRMS <- ifelse(all(is.na(rez$CRMS)), NA, rez[!is.na(rez$CRMS) & rez$CRMS == min(rez$CRMS, na.rm = TRUE), "n"]) BIC <- ifelse(all(is.na(rez$BIC)), NA, rez[!is.na(rez$BIC) & rez$BIC == min(rez$BIC, na.rm = TRUE), "n"]) data.frame( n_Factors = c(TLI, RMSEA, CRMS, BIC), Method = c("TLI", "RMSEA", "CRMS", "BIC"), Family = c("Fit", "Fit", "Fit", "Fit") ) } # Re-implementation of nBentler in nFactors ------------------------ #' @importFrom stats lm #' @keywords internal .nBentler <- function(x, N, model = model, log = TRUE, alpha = 0.05, cor = TRUE, details = TRUE, ...) { if (!requireNamespace("nFactors", quietly = TRUE)) { stop("Package 'nFactors' required for this function to work. Please install it by running `install.packages('lattice')`.") } lambda <- nFactors::eigenComputes(x, cor = cor, model = model, ...) if (length(which(lambda < 0)) > 0) { stop("These indices are only valid with a principal component solution. So, only positive eigenvalues are permitted.") } minPar <- c(min(lambda) - abs(min(lambda)) + .001, 0.001) maxPar <- c(max(lambda), stats::lm(lambda ~ I(length(lambda):1))$coef[2]) n <- N significance <- alpha min.k <- 3 LRT <- data.frame( q = numeric(length(lambda) - min.k), k = numeric(length(lambda) - min.k), LRT = numeric(length(lambda) - min.k), a = numeric(length(lambda) - min.k), b = numeric(length(lambda) - min.k), p = numeric(length(lambda) - min.k), convergence = numeric(length(lambda) - min.k) ) bentler.n <- 0 for (i in 1:(length(lambda) - min.k)) { temp <- nFactors::bentlerParameters(x = lambda, N = n, nFactors = i, log = log, cor = cor, minPar = minPar, maxPar = maxPar, graphic = FALSE) LRT[i, 3] <- temp$lrt LRT[i, 4] <- ifelse(is.null(temp$coef[1]), NA, temp$coef[1]) LRT[i, 5] <- ifelse(is.null(temp$coef[2]), NA, temp$coef[2]) LRT[i, 6] <- ifelse(is.null(temp$p.value), NA, temp$p.value) LRT[i, 7] <- ifelse(is.null(temp$convergence), NA, temp$convergence) LRT[i, 2] <- i LRT[i, 1] <- length(lambda) - i } # LRT <- LRT[order(LRT[,1],decreasing = TRUE),] for (i in 1:(length(lambda) - min.k)) { if (i == 1) bentler.n <- bentler.n + as.numeric(LRT$p[i] <= significance) if (i > 1) { if (LRT$p[i - 1] <= 0.05) bentler.n <- bentler.n + as.numeric(LRT$p[i] <= significance) } } if (bentler.n == 0) bentler.n <- length(lambda) if (details == TRUE) details <- LRT else details <- NULL res <- list(detail = details, nFactors = bentler.n) class(res) <- c("nFactors", "list") res } parameters/R/simulate_model.R0000644000176200001440000002466313613111101015743 0ustar liggesusers#' Simulated draws from model coefficients #' #' Simulate draws from a statistical model to return a data frame of estimates. #' #' @param model Statistical model (no Bayesian models). #' @param component Should all parameters, parameters for the conditional model, #' or for the zero-inflated part of the model be returned? Applies to models #' with zero-inflated component. \code{component} may be one of \code{"conditional"}, #' \code{"zi"}, \code{"zero-inflated"} or \code{"all"} (default). May be abbreviated. #' @inheritParams bootstrap_model #' #' @return A data frame. #' #' @seealso \code{\link[=parameters_simulate]{simulate_parameters()}}, #' \code{\link[=bootstrap_model]{bootstrap_model()}}, #' \code{\link[=bootstrap_parameters]{bootstrap_parameters()}} #' #' @details #' \subsection{Technical Details}{ #' \code{simulate_model()} is a computationally faster alternative #' to \code{bootstrap_model()}. Simulated draws for coefficients are based #' on a multivariate normal distribution (\code{MASS::mvrnorm()}) with mean #' \code{mu = coef(model)} and variance \code{Sigma = vcov(model)}. #' } #' \subsection{Models with Zero-Inflation Component}{ #' For models from packages \pkg{glmmTMB}, \pkg{pscl}, \pkg{GLMMadaptive} and #' \pkg{countreg}, the \code{component} argument can be used to specify #' which parameters should be simulated. For all other models, parameters #' from the conditional component (fixed effects) are simulated. This may #' include smooth terms, but not random effects. #' } #' #' @examples #' library(parameters) #' library(glmmTMB) #' #' model <- lm(Sepal.Length ~ Species * Petal.Width + Petal.Length, data = iris) #' head(simulate_model(model)) #' #' model <- glmmTMB( #' count ~ spp + mined + (1 | site), #' ziformula = ~mined, #' family = poisson(), #' data = Salamanders #' ) #' head(simulate_model(model)) #' head(simulate_model(model, component = "zero_inflated")) #' @export simulate_model <- function(model, iterations = 1000, ...) { UseMethod("simulate_model") } #' @rdname simulate_model #' @export model_simulate <- simulate_model # Models with single component only ----------------------------------------- #' @importFrom stats vcov setNames #' @importFrom insight get_parameters #' @export simulate_model.default <- function(model, iterations = 1000, ...) { .simulate_model(model, iterations, component = "conditional", effects = "fixed") } #' @export simulate_model.lm <- simulate_model.default #' @export simulate_model.glmmadmb <- simulate_model.default #' @export simulate_model.cglm <- simulate_model.default #' @export simulate_model.cpglm <- simulate_model.default #' @export simulate_model.cpglmm <- simulate_model.default #' @export simulate_model.feglm <- simulate_model.default #' @export simulate_model.iv_robust <- simulate_model.default #' @export simulate_model.fixest <- simulate_model.default #' @export simulate_model.rq <- simulate_model.default #' @export simulate_model.crq <- simulate_model.default #' @export simulate_model.nlrq <- simulate_model.default #' @export simulate_model.speedglm <- simulate_model.default #' @export simulate_model.speedlm <- simulate_model.default #' @export simulate_model.glm <- simulate_model.default #' @export simulate_model.glmRob <- simulate_model.default #' @export simulate_model.lmRob <- simulate_model.default #' @export simulate_model.gls <- simulate_model.default #' @export simulate_model.lme <- simulate_model.default #' @export simulate_model.crch <- simulate_model.default #' @export simulate_model.biglm <- simulate_model.default #' @export simulate_model.plm <- simulate_model.default #' @export simulate_model.flexsurvreg <- simulate_model.default #' @export simulate_model.LORgee <- simulate_model.default #' @export simulate_model.feis <- simulate_model.default #' @export simulate_model.lmrob <- simulate_model.default #' @export simulate_model.glmrob <- simulate_model.default #' @export simulate_model.merMod <- simulate_model.default #' @export simulate_model.gamlss <- simulate_model.default #' @export simulate_model.lm_robust <- simulate_model.default #' @export simulate_model.coxme <- simulate_model.default #' @export simulate_model.geeglm <- simulate_model.default #' @export simulate_model.gee <- simulate_model.default #' @export simulate_model.clm <- simulate_model.default #' @export simulate_model.polr <- simulate_model.default #' @export simulate_model.coxph <- simulate_model.default #' @export simulate_model.svyglm.nb <- simulate_model.default #' @export simulate_model.svyglm.zip <- simulate_model.default #' @export simulate_model.logistf <- simulate_model.default #' @export simulate_model.truncreg <- simulate_model.default #' @export simulate_model.glimML <- simulate_model.default #' @export simulate_model.ivreg <- simulate_model.default #' @export simulate_model.lrm <- simulate_model.default #' @export simulate_model.psm <- simulate_model.default #' @export simulate_model.ols <- simulate_model.default #' @export simulate_model.rms <- simulate_model.default #' @export simulate_model.vglm <- simulate_model.default #' @export simulate_model.censReg <- simulate_model.default #' @export simulate_model.tobit <- simulate_model.default #' @export simulate_model.survreg <- simulate_model.default #' @export simulate_model.multinom <- simulate_model.default #' @export simulate_model.brmultinom <- simulate_model.default #' @export simulate_model.bracl <- simulate_model.default # gam models ----------------------------------------- #' @importFrom insight get_varcov #' @export simulate_model.gam <- function(model, iterations = 1000, ...) { if (!requireNamespace("MASS", quietly = TRUE)) { stop("Package 'MASS' needed for this function to work. Please install it.", call. = FALSE) } if (is.null(iterations)) iterations <- 1000 beta <- stats::coef(model) varcov <- insight::get_varcov(model, component = "all") as.data.frame(MASS::mvrnorm(n = iterations, mu = beta, Sigma = varcov)) } #' @export simulate_model.gamm <- function(model, iterations = 1000, ...) { model <- model$gam class(model) <- c("gam", "lm", "glm") simulate_model(model, iterations = iterations, ...) } #' @export simulate_model.list <- function(model, iterations = 1000, ...) { if ("gam" %in% names(model)) { model <- model$gam class(model) <- c("gam", "lm", "glm") simulate_model(model, iterations = iterations, ...) } } #' @export simulate_model.vgam <- function(model, iterations = 1000, ...) { .simulate_model(model, iterations, component = "all") } # Models with zero-inflation components --------------------------------------- #' @importFrom stats vcov setNames #' @importFrom insight get_parameters #' @rdname simulate_model #' @export simulate_model.glmmTMB <- function(model, iterations = 1000, component = c("all", "conditional", "zi", "zero_inflated"), ...) { component <- match.arg(component) if (component %in% c("zi", "zero_inflated", "all") && !insight::model_info(model)$is_zero_inflated) { insight::print_color("Model has no zero-inflation component. Simulating from conditional parameters.\n", "red") component <- "conditional" } if (is.null(iterations)) iterations <- 1000 if (component == "all") { d1 <- .simulate_model(model, iterations, component = "conditional") d2 <- .simulate_model(model, iterations, component = "zero_inflated") colnames(d2) <- paste0(colnames(d2), "_zi") d <- cbind(d1, d2) } else if (component == "conditional") { d <- .simulate_model(model, iterations, component = "conditional") } else { d <- .simulate_model(model, iterations, component = "zero_inflated") } d } #' @export simulate_model.MixMod <- simulate_model.glmmTMB #' @export simulate_model.zeroinfl <- simulate_model.glmmTMB #' @export simulate_model.hurdle <- simulate_model.zeroinfl #' @export simulate_model.zerocount <- simulate_model.zeroinfl # Other models --------------------------------------- #' @export simulate_model.betareg <- function(model, iterations = 1000, component = c("all", "conditional", "precision"), ...) { component <- match.arg(component) .simulate_model(model, iterations, component = component) } #' @export simulate_model.clm2 <- function(model, iterations = 1000, component = c("all", "conditional", "scale"), ...) { component <- match.arg(component) .simulate_model(model, iterations, component = component) } #' @export simulate_model.clmm2 <- simulate_model.clm2 #' @export simulate_model.glmx <- function(model, iterations = 1000, component = c("all", "conditional", "extra"), ...) { component <- match.arg(component) .simulate_model(model, iterations, component = component) } #' @export simulate_model.mixor <- function(model, iterations = 1000, effects = c("all", "fixed", "random"), ...) { effects <- match.arg(effects) .simulate_model(model, iterations, component = "conditional", effects = effects) } # helper ----------------------------------------- #' @importFrom insight get_varcov .simulate_model <- function(model, iterations, component = "conditional", effects = "fixed") { if (!requireNamespace("MASS", quietly = TRUE)) { stop("Package 'MASS' needed for this function to work. Please install it.", call. = FALSE) } if (is.null(iterations)) iterations <- 1000 params <- insight::get_parameters(model, effects = effects, component = component) beta <- stats::setNames(params$Estimate, params$Parameter) # Transform to named vector varcov <- insight::get_varcov(model, component = component, effects = effects) as.data.frame(MASS::mvrnorm(n = iterations, mu = beta, Sigma = varcov)) ## Alternative approach, similar to arm::sim() # k <- length(insight::find_parameters(model, effects = "fixed", component = "conditional", flatten = TRUE)) # n <- insight::n_obs(model) # beta.cov <- stats::vcov(model) / stats::sigma(model) # s <- vector("double", iterations) # b <- array(NA, c(100, k)) # for (i in 1:iterations) { # s[i] <- stats::sigma(model) * sqrt((n - k) / rchisq(1, n - k)) # b[i,] <- MASS::mvrnorm(n = 1, mu = beta, Sigma = beta.cov * s[i] ^ 2) # } } parameters/R/dof_satterthwaite.R0000644000176200001440000000205713612357372016476 0ustar liggesusers#' @importFrom stats setNames #' @rdname p_value_satterthwaite #' @export dof_satterthwaite <- function(model) { UseMethod("dof_satterthwaite") } #' @export dof_satterthwaite.lmerMod <- function(model) { if (!requireNamespace("lmerTest", quietly = TRUE)) { stop("Package `lmerTest` required for Satterthwaite approximation. Please install it.", call. = FALSE) } parameters <- find_parameters(model, effects = "fixed", flatten = TRUE) lmerTest_model <- lmerTest::as_lmerModLmerTest(model) s <- summary(lmerTest_model) stats::setNames(as.vector(s$coefficients[, 3]), parameters) } #' @export dof_satterthwaite.lme <- function(model) { if (!requireNamespace("lavaSearch2", quietly = TRUE)) { stop("Package `lavaSearch2` required for Satterthwaite approximation. Please install it.", call. = FALSE) } parameters <- find_parameters(model, effects = "fixed", flatten = TRUE) lavaSearch2::sCorrect(model) <- TRUE s <- lavaSearch2::summary2(model) stats::setNames(as.vector(s$tTable[, "df"]), parameters) } parameters/R/fish.R0000644000176200001440000000024013616616611013675 0ustar liggesusers#' @docType data #' @title Sample data set #' @name fish #' @keywords data #' #' @description A sample data set, used in tests and some examples. NULL parameters/R/model_parameters_default.R0000644000176200001440000001621513615557774020021 0ustar liggesusers#' Parameters from (General) Linear Models #' #' Extract and compute indices and measures to describe parameters of (general) linear models (GLMs). #' #' @param model Model object. #' @param ci Confidence Interval (CI) level. Default to 0.95 (95\%). #' @param bootstrap Should estimates be based on bootstrapped model? If \code{TRUE}, then arguments of \link[=model_parameters.stanreg]{Bayesian regressions} apply (see also \code{\link[=parameters_bootstrap]{parameters_bootstrap()}}). #' @param iterations The number of bootstrap replicates. This only apply in the case of bootstrapped frequentist models. #' @param standardize The method used for standardizing the parameters. Can be \code{"refit"}, \code{"posthoc"}, \code{"smart"}, \code{"basic"} or \code{NULL} (default) for no standardization. See 'Details' in \code{\link[effectsize]{standardize_parameters}}. Note that robust estimation (i.e. \code{robust=TRUE}) of standardized parameters only works when \code{standardize="refit"}. #' @param exponentiate Logical, indicating whether or not to exponentiate the the coefficients (and related confidence intervals). This is typical for, say, logistic regressions, or more generally speaking: for models with log or logit link. #' @param robust Logical, if \code{TRUE}, robust standard errors are calculated (if possible), and confidence intervals and p-values are based on these robust standard errors. Additional arguments like \code{vcov_estimation} or \code{vcov_type} are passed down to other methods, see \code{\link[=standard_error_robust]{standard_error_robust()}} for details. #' @param component Model component for which parameters should be shown. May be one of \code{"conditional"}, \code{"precision"} (\pkg{betareg}), \code{"scale"} (\pkg{ordinal}), \code{"extra"} (\pkg{glmx}) or \code{"all"}. #' @param ... Arguments passed to or from other methods. For instance, when \code{bootstrap = TRUE}, arguments like \code{ci_method} are passed down to \code{\link[bayestestR]{describe_posterior}}. #' #' @seealso \code{\link[=standardize_names]{standardize_names()}} to rename #' columns into a consistent, standardized naming scheme. #' #' @examples #' library(parameters) #' model <- lm(mpg ~ wt + cyl, data = mtcars) #' #' model_parameters(model) #' #' # bootstrapped parameters #' model_parameters(model, bootstrap = TRUE) #' #' # standardized parameters #' model_parameters(model, standardize = "refit") #' #' # different p-value style in output #' model_parameters(model, p_digits = 5) #' model_parameters(model, digits = 3, ci_digits = 4, p_digits = "scientific") #' #' # logistic regression model #' model <- glm(vs ~ wt + cyl, data = mtcars, family = "binomial") #' model_parameters(model) #' #' # show odds ratio / exponentiated coefficients #' model_parameters(model, exponentiate = TRUE) #' @return A data frame of indices related to the model's parameters. #' @export model_parameters.default <- function(model, ci = .95, bootstrap = FALSE, iterations = 1000, standardize = NULL, exponentiate = FALSE, robust = FALSE, ...) { out <- .model_parameters_generic( model = model, ci = ci, bootstrap = bootstrap, iterations = iterations, merge_by = "Parameter", standardize = standardize, exponentiate = exponentiate, effects = "fixed", robust = robust, ... ) attr(out, "object_name") <- deparse(substitute(model), width.cutoff = 500) out } .model_parameters_generic <- function(model, ci = .95, bootstrap = FALSE, iterations = 1000, merge_by = "Parameter", standardize = NULL, exponentiate = FALSE, effects = "fixed", robust = FALSE, df_method = NULL, ...) { # to avoid "match multiple argument error", check if "component" was # already used as argument and passed via "...". mc <- match.call() comp_argument <- parse(text = .safe_deparse(mc))[[1]]$component # Processing if (bootstrap) { parameters <- bootstrap_parameters(model, iterations = iterations, ci = ci, ...) } else { parameters <- if (is.null(comp_argument)) { .extract_parameters_generic(model, ci = ci, component = "conditional", merge_by = merge_by, standardize = standardize, effects = effects, robust = robust, df_method = df_method, ...) } else { .extract_parameters_generic(model, ci = ci, merge_by = merge_by, standardize = standardize, effects = effects, robust = robust, df_method = df_method, ...) } } if (exponentiate) parameters <- .exponentiate_parameters(parameters) parameters <- .add_model_parameters_attributes(parameters, model, ci, exponentiate, ...) class(parameters) <- c("parameters_model", "see_parameters_model", class(parameters)) parameters } # other special cases ------------------------------------------------ #' @rdname model_parameters.default #' @export model_parameters.betareg <- function(model, ci = .95, bootstrap = FALSE, iterations = 1000, component = c("conditional", "precision", "all"), standardize = NULL, exponentiate = FALSE, ...) { component <- match.arg(component) if (component == "all") { merge_by <- c("Parameter", "Component") } else { merge_by <- "Parameter" } ## TODO check merge by out <- .model_parameters_generic( model = model, ci = ci, component = component, bootstrap = bootstrap, iterations = iterations, merge_by = c("Parameter", "Component"), standardize = standardize, exponentiate = exponentiate, robust = FALSE, ... ) attr(out, "object_name") <- deparse(substitute(model), width.cutoff = 500) out } #' @rdname model_parameters.default #' @export model_parameters.clm2 <- function(model, ci = .95, bootstrap = FALSE, iterations = 1000, component = c("all", "conditional", "scale"), standardize = NULL, exponentiate = FALSE, ...) { component <- match.arg(component) if (component == "all") { merge_by <- c("Parameter", "Component") } else { merge_by <- "Parameter" } ## TODO check merge by out <- .model_parameters_generic( model = model, ci = ci, component = component, bootstrap = bootstrap, iterations = iterations, merge_by = c("Parameter", "Component"), standardize = standardize, exponentiate = exponentiate, robust = FALSE, ... ) attr(out, "object_name") <- deparse(substitute(model), width.cutoff = 500) out } #' @export model_parameters.clmm2 <- model_parameters.clm2 #' @rdname model_parameters.default #' @export model_parameters.glmx <- function(model, ci = .95, bootstrap = FALSE, iterations = 1000, component = c("all", "conditional", "extra"), standardize = NULL, exponentiate = FALSE, ...) { component <- match.arg(component) if (component == "all") { merge_by <- c("Parameter", "Component") } else { merge_by <- "Parameter" } out <- .model_parameters_generic( model = model, ci = ci, component = component, bootstrap = bootstrap, iterations = iterations, merge_by = merge_by, standardize = standardize, exponentiate = exponentiate, robust = FALSE, ... ) attr(out, "object_name") <- deparse(substitute(model), width.cutoff = 500) out } parameters/R/reshape_loadings.R0000644000176200001440000000756413603206134016262 0ustar liggesusers#' Reshape loadings between wide/long formats #' #' Reshape loadings between wide/long formats. #' #' #' @examples #' library(parameters) #' library(psych) #' #' pca <- model_parameters(psych::fa(attitude, nfactors = 3)) #' loadings <- reshape_loadings(pca) #' #' loadings #' reshape_loadings(loadings) #' @export reshape_loadings <- function(x, ...) { UseMethod("reshape_loadings") } #' @rdname reshape_loadings #' @inheritParams principal_components #' @export reshape_loadings.parameters_efa <- function(x, threshold = NULL, ...) { current_format <- attributes(x)$loadings_format if (is.null(current_format) || current_format == "wide") { .long_loadings(x, threshold = threshold) } else { .wide_loadings(x) } } #' @rdname reshape_loadings #' @param loadings_columns Vector indicating the columns corresponding to loadings. #' @export reshape_loadings.data.frame <- function(x, threshold = NULL, loadings_columns = NULL, ...) { if (is.null(loadings_columns)) loadings_columns <- 1:ncol(x) if (length(loadings_columns) > 1) { .long_loadings(x, threshold = threshold, loadings_columns = loadings_columns) } } #' @importFrom stats reshape #' @keywords internal .wide_loadings <- function(loadings, loadings_columns = "Loading", component_column = "Component", variable_column = "Variable", ...) { if (is.numeric(loadings[[component_column]])) { loadings[[component_column]] <- paste0("F", loadings[[component_column]]) } complexity_column <- if ("Complexity" %in% colnames(loadings)) "Complexity" else NULL uniqueness_column <- if ("Uniqueness" %in% colnames(loadings)) "Uniqueness" else NULL reshape_columns <- c(loadings_columns, component_column, variable_column, complexity_column, uniqueness_column) loadings <- stats::reshape( loadings[reshape_columns], idvar = variable_column, timevar = component_column, direction = "wide", v.names = c(loadings_columns), sep = "_" ) names(loadings) <- gsub(paste0(loadings_columns, "_"), "", names(loadings)) attr(loadings, "loadings_format") <- "wide" class(loadings) <- unique(c("parameters_loadings", class(loadings))) # clean-up, column-order row.names(loadings) <- NULL column_order <- c(setdiff(colnames(loadings), c("Complexity", "Uniqueness")), c("Complexity", "Uniqueness")) loadings[column_order[column_order %in% colnames(loadings)]] } #' @importFrom stats reshape #' @keywords internal .long_loadings <- function(loadings, threshold = NULL, loadings_columns = NULL) { if (is.null(loadings_columns)) { loadings_columns <- attributes(loadings)$loadings_columns } if (!is.null(threshold)) { loadings <- .filter_loadings(loadings, threshold = threshold, loadings_columns = loadings_columns) } # Reshape to long long <- stats::reshape(loadings, direction = "long", varying = list(names(loadings)[loadings_columns]), v.names = "Loading", timevar = "Component", idvar = "Variable" ) # Restore component names for (i in 1:length(unique(long$Component))) { component <- unique(long$Component)[[i]] name <- names(loadings)[loadings_columns][[i]] long[long$Component == component, "Component"] <- name } # Filtering long <- long[!is.na(long$Loading), ] row.names(long) <- NULL # Reorder columns loadings <- long[, c( "Component", "Variable", "Loading", names(loadings)[-loadings_columns][!names(loadings)[-loadings_columns] %in% c("Component", "Variable", "Loading")] )] attr(loadings, "loadings_format") <- "long" class(loadings) <- unique(c("parameters_loadings", class(loadings))) loadings } #' @importFrom insight format_table #' @export print.parameters_loadings <- function(x, ...) { formatted_table <- parameters_table(x) cat(insight::format_table(formatted_table)) } parameters/R/get_scores.R0000644000176200001440000000416313612621074015104 0ustar liggesusers#' Get Scores from Principal Component Analysis (PCA) #' #' \code{get_scores()} takes \code{n_items} amount of items that load the most (either by loading cutoff or number) on a component, and then computes their average. #' #' @param x An object returned by \code{\link{principal_components}}. #' @param n_items Number of required (i.e. non-missing) items to build the sum score. If \code{NULL}, the value is chosen to match half of the number of columns in a data frame. #' #' @details \code{get_scores()} takes the results from \code{\link{principal_components}} #' and extracts the variables for each component found by the PCA. Then, for #' each of these "subscales", row means are calculated (which equals adding #' up the single items and dividing by the number of items). This results in #' a sum score for each component from the PCA, which is on the same scale as #' the original, single items that were used to compute the PCA. #' #' @examples #' library(parameters) #' pca <- principal_components(mtcars[, 1:7], n = 2, rotation = "varimax") #' #' # PCA extracted two components #' pca #' #' # assignment of items to each component #' closest_component(pca) #' #' # now we want to have sum scores for each component #' get_scores(pca) #' #' # compare to manually computed sum score for 2nd component, which #' # consists of items "hp" and "qsec" #' (mtcars$hp + mtcars$qsec) / 2 #' @return A data frame with subscales, which are average sum scores for all items from each component. #' @export get_scores <- function(x, n_items = NULL) { subscales <- closest_component(x) data_set <- attributes(x)$data_set out <- lapply(sort(unique(subscales)), function(.subscale) { columns <- names(subscales)[subscales == .subscale] items <- data_set[columns] if (is.null(n_items)) { .n_items <- round(ncol(items) / 2) } else { .n_items <- n_items } apply(items, 1, function(i) ifelse(sum(!is.na(i)) >= .n_items, mean(i, na.rm = TRUE), NA)) }) out <- as.data.frame(do.call(cbind, out)) colnames(out) <- sprintf("Component_%i", 1:ncol(out)) out } parameters/R/reduce_parameters.R0000644000176200001440000002265413607421623016450 0ustar liggesusers#' Dimensionality reduction (DR) / Features Reduction #' #' This function performs a reduction in the parameters space (the number of variables). It starts by creating a new set of variables, based on a given method (the default method is "PCA", but other are available via the \code{method} argument, such as "cMDS", "DRR" or "ICA"). Then, it names this new dimensions using the original variables that correlates the most with it. For instance, a variable named 'V1_0.97/V4_-0.88' means that the V1 and the V4 variables correlate maximally (with respective coefficients of .97 and -.88) with this dimension. Although this function can be useful in exploratory data analysis, it's best to perform the dimension reduction step in a separate and dedicated stage, as this is a very important process in the data analysis workflow. #' #' @inheritParams principal_components #' @param method The features reduction method. Can be one of 'PCA', 'cMDS', 'DRR', 'ICA' (see the Details section). #' #' @details The different methods available are described below: #' #' \subsection{Supervised Methods}{ #' \itemize{ #' \item \strong{PCA}: See \code{\link{principal_components}}. #' \item \strong{cMDS / PCoA}: See \code{\link{cmds}}. Classical Multidimensional Scaling (cMDS) takes a set of dissimilarities (i.e., a distance matrix) and returns a set of points such that the distances between the points are approximately equal to the dissimilarities. #' \item \strong{DRR}: See \code{\link{DRR}}. Dimensionality Reduction via Regression (DRR) is a very recent technique extending PCA (Laparra et al., 2015). Starting from a rotated PCA, it predicts redundant information from the remaining components using non-linear regression. Some of the most notable advantages of performing PCR are avoidance of multicollinearity between predictors and overfitting mitigation. PCR tends to perform well when the first principal components are enough to explain most of the variation in the predictors. Requires the \pkg{DRR} package to be installed. #' \item \strong{ICA}: See \code{\link{ICA}}. Performs an Independent Component Analysis using the FastICA algorithm. Contrary to PCA, that attempts to find uncorrelated sources (through least squares minimization), ICA attempts to find independent sources, i.e., the source space that maximizes the "non-gaussianity" of all sources. Contrary to PCA, ICA does not rank each source, which makes it a poor tool for dimensionality reduction. Requires the \pkg{fastICA} package to be installed. #' } #' } #' #' @references \itemize{ #' \item Nguyen, L. H., \& Holmes, S. (2019). Ten quick tips for effective dimensionality reduction. PLOS Computational Biology, 15(6). #' \item Laparra, V., Malo, J., & Camps-Valls, G. (2015). Dimensionality reduction via regression in hyperspectral imagery. IEEE Journal of Selected Topics in Signal Processing, 9(6), 1026-1036. #' } #' #' @examples #' out <- reduce_parameters(iris, method = "PCA", n = "max") #' head(out) #' @importFrom stats dist #' @export reduce_parameters <- function(x, method = "PCA", n = "max", ...) { UseMethod("reduce_parameters") } #' @rdname reduce_parameters #' @export parameters_reduction <- reduce_parameters #' @export reduce_parameters.data.frame <- function(x, method = "PCA", n = "max", ...) { x <- convert_data_to_numeric(x) # N factors if (n == "max") { nfac <- ncol(x) - 1 } else { nfac <- .get_n_factors(x, n = n, type = "PCA", rotation = "none") } # compute new features if (tolower(method) %in% c("pca", "principal")) { features <- principal_components(x, n = nfac, ...) features <- as.data.frame(attributes(features)$scores) } else if (tolower(method) %in% c("cmds", "pcoa")) { features <- cmds(x, n = nfac, ...) } else if (tolower(method) %in% c("drr")) { features <- DRR(x, n = nfac, ...) } else if (tolower(method) %in% c("ica")) { features <- ICA(x, n = nfac, ...) } else { stop("'method' must be one of 'PCA', 'cMDS', 'DRR' or 'ICA'.") } # Get weights / pseudo-loadings (correlations) cormat <- as.data.frame(cor(x = x, y = features)) cormat <- cbind(data.frame(Variable = row.names(cormat)), cormat) weights <- as.data.frame(.sort_loadings(cormat, cols = 2:ncol(cormat))) if (n == "max") { weights <- .filter_loadings(weights, threshold = "max", 2:ncol(weights)) non_empty <- sapply(weights[2:ncol(weights)], function(x) !all(is.na(x))) weights <- weights[c(TRUE, non_empty)] features <- features[, non_empty] weights[is.na(weights)] <- 0 weights <- .filter_loadings(.sort_loadings(weights, cols = 2:ncol(weights)), threshold = "max", 2:ncol(weights)) } # Create varnames varnames <- sapply(weights[2:ncol(weights)], function(x) { name <- weights$Variable[!is.na(x)] weight <- insight::format_value(x[!is.na(x)]) paste0(paste(name, weight, sep = "_"), collapse = "/") }) names(features) <- as.character(varnames) # Attributes attr(features, "loadings") <- weights class(features) <- c("parameters_reduction", class(features)) # Out features } #' @export reduce_parameters.lm <- function(x, method = "PCA", n = "max", ...) { data <- reduce_parameters(convert_data_to_numeric(insight::get_predictors(x, ...), ...), method = method, n = n) y <- data.frame(.row = 1:length(insight::get_response(x))) y[insight::find_response(x)] <- insight::get_response(x) y$.row <- NULL formula <- paste(insight::find_response(x), "~", paste(paste0("`", names(data), "`"), collapse = " + ")) update(x, formula = formula, data = cbind(data, y)) } #' @export reduce_parameters.merMod <- reduce_parameters.lm #' @export principal_components.lm <- function(x, ...) { reduce_parameters(x, method = "PCA", ...) } #' @export principal_components.merMod <- principal_components.lm #' Classical Multidimensional Scaling (cMDS) #' #' Also referred to as principal Coordinates Analysis (PCoA), Classical Multidimensional Scaling (cMDS) takes a set of dissimilarities (i.e., a distance matrix) and returns a set of points such that the distances between the points are approximately equal to the dissimilarities. #' #' @inheritParams principal_components #' @param distance The distance measure to be used. This must be one of "euclidean", "maximum", "manhattan", "canberra", "binary" or "minkowski". Any unambiguous substring can be given. #' #' @references \itemize{ #' \item Nguyen, L. H., \& Holmes, S. (2019). Ten quick tips for effective dimensionality reduction. PLOS Computational Biology, 15(6). #' } #' #' @examples #' cmds(iris[, 1:4]) #' @importFrom stats dist #' @export cmds <- function(x, n = "all", distance = "euclidean", ...) { n <- .get_n_factors(x, n = n, type = "PCA", rotation = "none") d <- stats::dist(x, method = distance) cmd <- stats::cmdscale(d, k = n, eig = TRUE) features <- as.data.frame(cmd$points) names(features) <- paste0("CMDS", 1:ncol(features)) features } #' Dimensionality Reduction via Regression (DRR) #' #' Dimensionality Reduction via Regression (DRR) is a very recent technique extending PCA (Laparra et al., 2015). Starting from a rotated PCA, it predicts redundant information from the remaining components using non-linear regression. Some of the most notable advantages of performing PCR are avoidance of multicollinearity between predictors and overfitting mitigation. PCR tends to perform well when the first principal components are enough to explain most of the variation in the predictors. Requires the \pkg{DRR} package to be installed. #' #' @inheritParams principal_components #' #' @references \itemize{ #' \item Laparra, V., Malo, J., & Camps-Valls, G. (2015). Dimensionality reduction via regression in hyperspectral imagery. IEEE Journal of Selected Topics in Signal Processing, 9(6), 1026-1036. #' } #' #' @examples #' \donttest{ #' DRR(iris[, 1:4]) #' } #' #' @importFrom stats dist #' @export DRR <- function(x, n = "all", ...) { n <- .get_n_factors(x, n = n, type = "PCA", rotation = "none") if (!requireNamespace("DRR", quietly = TRUE)) { stop("Package 'DRR' required for this function to work. Please install it by running `install.packages('DRR')`.") } junk <- utils::capture.output(suppressMessages(rez <- DRR::drr(x, n))) features <- as.data.frame(rez$fitted.data) names(features) <- paste0("DRR", 1:ncol(features)) features } #' Independent Component Analysis (ICA) #' #' Performs an Independent Component Analysis using the FastICA algorithm. Contrary to PCA, that attempts to find uncorrelated sources (through least squares minimization), ICA attempts to find independent sources, i.e., the source space that maximizes the "non-gaussianity" of all sources. Contrary to PCA, ICA does not rank each source, which makes it a poor tool for dimensionality reduction. Requires the \pkg{fastICA} package to be installed. #' #' @inheritParams principal_components #' #' #' @examples #' ICA(iris[, 1:4]) #' @export ICA <- function(x, n = "all", ...) { n <- .get_n_factors(x, n = n, type = "PCA", rotation = "none") if (!requireNamespace("fastICA", quietly = TRUE)) { stop("Package 'fastICA' required for this function to work. Please install it by running `install.packages('fastICA')`.") } rez <- fastICA::fastICA(x, n.comp = ncol(x) - 1) features <- as.data.frame(rez$S) names(features) <- paste0("ICA", 1:ncol(features)) features } parameters/R/model_parameters.rma.R0000644000176200001440000000676213617756645017101 0ustar liggesusers#' Parameters from Meta-Analysis #' #' Extract and compute indices and measures to describe parameters of meta-analysis models. #' #' @inheritParams model_parameters.default #' #' @examples #' library(parameters) #' mydat <- data.frame( #' effectsize = c(-0.393, 0.675, 0.282, -1.398), #' stderr = c(0.317, 0.317, 0.13, 0.36) #' ) #' if (require("metafor")) { #' model <- rma(yi = effectsize, sei = stderr, method = "REML", data = mydat) #' model_parameters(model) #' } #' #' # with subgroups #' if (require("metafor")) { #' data(dat.bcg) #' dat <- escalc( #' measure = "RR", #' ai = tpos, #' bi = tneg, #' ci = cpos, #' di = cneg, #' data = dat.bcg #' ) #' dat$alloc <- ifelse(dat$alloc == "random", "random", "other") #' model <- rma(yi, vi, mods = ~ alloc, data = dat, digits = 3, slab = author) #' model_parameters(model) #' } #' @return A data frame of indices related to the model's parameters. #' @importFrom stats qt pt setNames #' @export model_parameters.rma <- function(model, ci = .95, bootstrap = FALSE, iterations = 1000, standardize = NULL, exponentiate = FALSE, ...) { meta_analysis_overall <- .model_parameters_generic( model = model, ci = ci, bootstrap = bootstrap, iterations = iterations, merge_by = "Parameter", standardize = standardize, exponentiate = exponentiate, ... ) subgroups <- NULL group_variable <- NULL # subgroup analyses? if (!is.null(model$formula.mods)) { group_variable <- deparse(model$formula.mods[[2]])[1] model_data <- insight::get_data(model) if (group_variable %in% colnames(model_data)) { subgroups <- sort(unique(model_data[[group_variable]])) } } if (nrow(meta_analysis_overall) > 1 && !is.null(subgroups)) { meta_analysis_overall$Subgroup <- subgroups meta_analysis_overall$Parameter <- "(Intercept)" } alpha <- (1 + ci) / 2 rma_parameters <- if (!is.null(model$slab) && !is.numeric(model$slab)) { sprintf("%s", model$slab) } else { sprintf("Study %i", 1:model[["k"]]) } rma_coeffients <- as.vector(model$yi) rma_se <- as.vector(sqrt(model$vi)) rma_ci_low <- rma_coeffients - rma_se * stats::qt(alpha, df = Inf) rma_ci_high <- rma_coeffients + rma_se * stats::qt(alpha, df = Inf) rma_statistic <- rma_coeffients / rma_se rma_ci_p <- 2 * stats::pt(abs(rma_statistic), df = Inf, lower.tail = FALSE) meta_analysis_studies <- data.frame( Parameter = rma_parameters, Coefficient = rma_coeffients, SE = rma_se, CI_low = rma_ci_low, CI_high = rma_ci_high, z = rma_statistic, df_error = NA, p = rma_ci_p, Weight = 1 / as.vector(model$vi), stringsAsFactors = FALSE ) # subgroup analyses? if (!is.null(subgroups)) { meta_analysis_studies$Subgroup <- insight::get_data(model)[[group_variable]] } original_attributes <- attributes(meta_analysis_overall) out <- merge(meta_analysis_studies, meta_analysis_overall, all = TRUE, sort = FALSE) # fix intercept name out$Parameter[out$Parameter == "(Intercept)"] <- "Overall" original_attributes$names <- names(out) original_attributes$row.names <- 1:nrow(out) original_attributes$pretty_names <- stats::setNames(out$Parameter, out$Parameter) attributes(out) <- original_attributes # no df out$df_error <- NULL attr(out, "object_name") <- deparse(substitute(model), width.cutoff = 500) attr(out, "measure") <- model$measure out } parameters/R/format_model.R0000644000176200001440000000241413607421270015414 0ustar liggesusers#' Model Name formatting #' #' @param model A statistical model. #' #' @examples #' model <- lm(Sepal.Length ~ Species, data = iris) #' format_model(model) #' @importFrom insight model_info #' @export format_model <- function(model) { info <- insight::model_info(model) if (all(insight::find_parameters(model, flatten = FALSE) == "(Intercept)")) { type <- "constant (intercept-only) " } else { type <- "" } if ("Mclust" %in% class(model)) { return("Gaussian finite mixture fitted by EM algorithm") } if (info$is_bayesian) { type <- paste0(type, "Bayesian ") } if (info$is_zero_inflated) { type <- paste0(type, "zero-inflated ") } # TODO: hurdle? if (info$is_logit) { type <- paste0(type, "logistic ") } else if (info$is_probit) { type <- paste0(type, "probit ") } else if (info$is_linear) { type <- paste0(type, "linear ") } else { type <- paste0(type, "general linear ") } if (info$is_mixed) { type <- paste0(type, "mixed ") } type <- paste0(type, "model") if (grepl("general linear", type)) { type <- paste0( type, " (", info$family, " family with a ", info$link_function, " link)" ) } type } parameters/R/skewness_kurtosis.R0000644000176200001440000001760613612122313016552 0ustar liggesusers#' Compute Skewness and Kurtosis #' #' @param x A numeric vector or data.frame. #' @param na.rm Remove missing values. #' @param type Type of algorithm for computing skewness. May be one of \code{1} (or \code{"1"}, \code{"I"} or \code{"classic"}), \code{2} (or \code{"2"}, \code{"II"} or \code{"SPSS"} or \code{"SAS"}) or \code{3} (or \code{"3"}, \code{"III"} or \code{"Minitab"}). See 'Details'. #' @param ... Arguments passed to or from other methods. #' #' @details \subsection{Skewness}{ #' Symmetric distributions have a \code{skewness} around zero, while #' a negative skewness values indicates a "left-skewed" distribution, and a #' positive skewness values indicates a "right-skewed" distribution. Examples #' for the relationship of skewness and distributions are: #' \itemize{ #' \item Normal distribution (and other symmetric distribution) has a skewness of 0 #' \item Half-normal distribution has a skewness just below 1 #' \item Exponential distribution has a skewness of 2 #' \item Lognormal distribution can have a skewness of any positive value, depending on its parameters #' } #' (\cite{https://en.wikipedia.org/wiki/Skewness}) #' } #' \subsection{Types of Skewness}{ #' \code{skewness()} supports three different methods for estimating skewness, as discussed in \cite{Joanes and Gill (1988)}: #' \itemize{ #' \item Type "1" is the "classical" method, which is \code{g1 = (sum((x - mean(x))^3) / n) / (sum((x - mean(x))^2) / n)^1.5} #' \item Type "2" first calculates the type-1 skewness, than adjusts the result: \code{G1 = g1 * sqrt(n * (n - 1)) / (n - 2)}. This is what SAS and SPSS usually return #' \item Type "3" first calculates the type-1 skewness, than adjusts the result: \code{b1 = g1 * ((1 - 1 / n))^1.5}. This is what Minitab usually returns. #' } #' } #' \subsection{Kurtosis}{ #' The \code{kurtosis} is a measure of "tailedness" of a distribution. A distribution #' with a kurtosis values of about zero is called "mesokurtic". A kurtosis value #' larger than zero indicates a "leptokurtic" distribution with \emph{fatter} tails. #' A kurtosis value below zero indicates a "platykurtic" distribution with \emph{thinner} #' tails (\cite{https://en.wikipedia.org/wiki/Kurtosis}). #' } #' \subsection{Types of Kurtosis}{ #' \code{kurtosis()} supports three different methods for estimating kurtosis, as discussed in \cite{Joanes and Gill (1988)}: #' \itemize{ #' \item Type "1" is the "classical" method, which is \code{g2 = n * sum((x - mean(x))^4) / (sum((x - mean(x))^2)^2) - 3}. #' \item Type "2" first calculates the type-1 kurtosis, than adjusts the result: \code{G2 = ((n + 1) * g2 + 6) * (n - 1)/((n - 2) * (n - 3))}. This is what SAS and SPSS usually return #' \item Type "3" first calculates the type-1 kurtosis, than adjusts the result: \code{b2 = (g2 + 3) * (1 - 1 / n)^2 - 3}. This is what Minitab usually returns. #' } #' } #' #' @references D. N. Joanes and C. A. Gill (1998). Comparing measures of sample skewness and kurtosis. The Statistician, 47, 183–189. #' #' @return Values of skewness or kurtosis. #' @examples #' skewness(rnorm(1000)) #' kurtosis(rnorm(1000)) #' @export skewness <- function(x, na.rm = TRUE, type = "2", ...) { UseMethod("skewness") } #' @export skewness.numeric <- function(x, na.rm = TRUE, type = "2", ...) { if (na.rm) x <- x[!is.na(x)] n <- length(x) out <- (sum((x - mean(x))^3) / n) / (sum((x - mean(x))^2) / n)^1.5 type <- .check_skewness_type(type) if (type == "2" && n < 3) { warning("Need at least 3 complete observations for type-2-skewness. Using 'type=\"1\"' now.", call. = FALSE) type <- "1" } .skewness <- switch( type, "1" = out, "2" = out * sqrt(n * (n - 1)) / (n - 2), "3" = out * ((1 - 1 / n))^1.5 ) # # out_se <- sqrt((6 * (n - 2)) / ((n + 1) * (n + 3))) # # .skewness_se <- switch( # type, # "1" = out_se, # "2" = out_se * ((sqrt(n * (n - 1))) / (n - 2)), # "3" = out_se * (((n - 1) / n)^1.5), # ) # # attr(.skewness, "SE") <- .skewness_se # class(.skewness) <- unique(c("parameters_skewness", class(.skewness))) .skewness } #' @export skewness.matrix <- function(x, na.rm = TRUE, type = "2", ...) { apply(x, 2, skewness, na.rm = na.rm, type = type) } #' @export skewness.data.frame <- function(x, na.rm = TRUE, type = "2", ...) { sapply(x, skewness, na.rm = na.rm, type = type) # out <- lapply(x, skewness, na.rm = na.rm, type = type) # out <- data.frame( # Parameter = names(out), # Skewness = unname(sapply(out, as.vector)), # SE = unname(sapply(out, function(i) attributes(i)$SE)), # stringsAsFactors = FALSE # ) # # class(out) <- c("parameters_skewness", "data.frame") # out } #' @export skewness.default <- function(x, na.rm = TRUE, type = "2", ...) { skewness(as.vector(x), na.rm = na.rm, type = type) } #' @rdname skewness #' @export kurtosis <- function(x, na.rm = TRUE, type = "2", ...) { UseMethod("kurtosis") } #' @export kurtosis.numeric <- function(x, na.rm = TRUE, type = "2", ...) { if (na.rm) x <- x[!is.na(x)] n <- length(x) out <- n * sum((x - mean(x))^4) / (sum((x - mean(x))^2)^2) type <- .check_skewness_type(type) if (type == "2" && n < 4) { warning("Need at least 4 complete observations for type-2-kurtosis Using 'type=\"1\"' now.", call. = FALSE) type <- "1" } .kurtosis <- switch( type, "1" = out - 3, "2" = ((n + 1) * (out - 3) + 6) * (n - 1)/((n - 2) * (n - 3)), "3" = out * (1 - 1 / n)^2 - 3 ) # out_se <- sqrt((24 * n * (n - 2) * (n - 3)) / (((n + 1)^2) * (n + 3) * (n + 5))) # # .kurtosis_se <- switch( # type, # "1" = out_se, # "2" = out_se * (((n - 1) * (n + 1)) / ((n - 2) * (n - 3))), # "3" = out_se * ((n - 1) / n)^2 # ) # # attr(.kurtosis, "SE") <- .kurtosis_se # class(.kurtosis) <- unique(c("parameters_kurtosis", class(.kurtosis))) .kurtosis } #' @export kurtosis.matrix <- function(x, na.rm = TRUE, type = "2", ...) { apply(x, 2, kurtosis, na.rm = na.rm, type = type) } #' @export kurtosis.data.frame <- function(x, na.rm = TRUE, type = "2", ...) { sapply(x, kurtosis, na.rm = na.rm, type = type) # out <- lapply(x, kurtosis, na.rm = na.rm, type = type) # out <- data.frame( # Parameter = names(out), # Kurtosis = unname(sapply(out, as.vector)), # SE = unname(sapply(out, function(i) attributes(i)$SE)), # stringsAsFactors = FALSE # ) # # class(out) <- c("parameters_kurtosis", "data.frame") # out } #' @export kurtosis.default <- function(x, na.rm = TRUE, type = "2", ...) { kurtosis(as.vector(x), na.rm = na.rm, type = type) } .check_skewness_type <- function(type) { # convenience if (is.numeric(type)) type <- as.character(type) if (is.null(type) || is.na(type) || !(type %in% c("1", "2", "3", "I", "II", "III", "classic", "SPSS", "SAS", "Minitab"))) { warning("'type' must be a character value from \"1\" to \"3\". Using 'type=\"2\"' now.", call. = FALSE) type <- "2" } switch( type, "1" = , "I" = , "classic" = "1", "2" = , "II" = , "SPSS" = , "SAS" = "2", "3" = , "III" = , "Minitab" = "3" ) } #' #' @export #' print.parameters_skewness <- function(x, digits = 3, ...) { #' if (is.numeric(x)) { #' cat(sprintf("Skewness (SE): %.*f (%.*f)\n", digits, as.vector(x), digits, attributes(x)$SE)) #' } else if (is.data.frame(x)) { #' cat(insight::format_table(x, digits = digits)) #' } else { #' NextMethod() #' } #' } #' #' #' #' @export #' print.parameters_kurtosis <- function(x, digits = 3, ...) { #' if (is.numeric(x)) { #' cat(sprintf("Kurtosis (SE): %.*f (%.*f)\n", digits, as.vector(x), digits, attributes(x)$SE)) #' } else if (is.data.frame(x)) { #' cat(insight::format_table(x, digits = digits)) #' } else { #' NextMethod() #' } #' } parameters/R/model_parameters.FactoMineR.R0000644000176200001440000000445113563607352020270 0ustar liggesusers#' @rdname model_parameters.principal #' @export model_parameters.PCA <- function(model, sort = FALSE, threshold = NULL, labels = NULL, ...) { loadings <- as.data.frame(model$var$coord) n <- model$call$ncp # Get summary eig <- as.data.frame(model$eig[1:n, ]) data_summary <- .data_frame( Component = names(loadings), Eigenvalues = eig$eigenvalue, Variance = eig$`percentage of variance` / 100, Variance_Cumulative = eig$`cumulative percentage of variance` / 100 ) data_summary$Variance_Proportion <- data_summary$Variance / sum(data_summary$Variance) # Format loadings <- cbind(data.frame(Variable = row.names(loadings)), loadings) row.names(loadings) <- NULL # Labels if (!is.null(labels)) { loadings$Label <- labels loadings <- loadings[c("Variable", "Label", names(loadings)[!names(loadings) %in% c("Variable", "Label")])] loading_cols <- 3:(n + 2) } else { loading_cols <- 2:(n + 1) } loadings$Complexity <- (apply(loadings[, loading_cols, drop = FALSE], 1, function(x) sum(x^2)))^2 / apply(loadings[, loading_cols, drop = FALSE], 1, function(x) sum(x^4)) # Add attributes attr(loadings, "summary") <- data_summary attr(loadings, "model") <- model attr(loadings, "rotation") <- "none" attr(loadings, "scores") <- as.data.frame(model$ind$coord) attr(loadings, "additional_arguments") <- list(...) attr(loadings, "n") <- n attr(loadings, "loadings_columns") <- loading_cols # Sorting if (isTRUE(sort)) { loadings <- .sort_loadings(loadings) } # Replace by NA all cells below threshold if (!is.null(threshold)) { loadings <- .filter_loadings(loadings, threshold = threshold) } # Add some more attributes attr(loadings, "loadings_long") <- .long_loadings(loadings, threshold = threshold, loadings_columns = loading_cols) # add class-attribute for printing if ("PCA" %in% class(model)) { attr(loadings, "type") <- "pca" class(loadings) <- unique(c("parameters_pca", "see_parameters_pca", class(loadings))) } else if ("FAMD" %in% class(model)) { attr(loadings, "type") <- "fa" class(loadings) <- unique(c("parameters_efa", "see_parameters_efa", class(loadings))) } loadings } #' @export model_parameters.FAMD <- model_parameters.PCA parameters/R/equivalence_test.R0000644000176200001440000001051413541761044016307 0ustar liggesusers#' @importFrom bayestestR equivalence_test #' @export bayestestR::equivalence_test #' @title Equivalence test #' #' @description Compute the equivalence test for frequentist models. #' #' @param x A statistical model. #' @param range The range of practical equivalence of an effect. May be \code{"default"}, #' to automatically define this range based on properties of the model's data. #' @param ci Confidence Interval (CI) level. Default to 0.95 (95\%). #' @param verbose Toggle off warnings. #' @param ... Arguments passed to or from other methods. #' #' @seealso For more details, see \code{\link[bayestestR:equivalence_test]{equivalence_test}}. #' #' @return A data frame. #' @examples #' m <- lm(mpg ~ gear + wt + cyl + hp, data = mtcars) #' equivalence_test(m) #' @export equivalence_test.lm <- function(x, range = "default", ci = .95, verbose = TRUE, ...) { .equivalence_test_frequentist(x, range, ci, verbose, ...) } #' @export equivalence_test.glm <- equivalence_test.lm #' @export equivalence_test.merMod <- equivalence_test.lm #' @export equivalence_test.glmmTMB <- equivalence_test.lm #' @export equivalence_test.MixMod <- equivalence_test.lm #' @importFrom stats confint #' @importFrom bayestestR equivalence_test rope_range #' @keywords internal .equivalence_test_frequentist <- function(x, range = "default", ci = .95, verbose = TRUE, ...) { if (all(range == "default")) { range <- bayestestR::rope_range(x) } else if (!all(is.numeric(range)) | length(range) != 2) { stop("`range` should be 'default' or a vector of 2 numeric values (e.g., c(-0.1, 0.1)).") } conf_int <- .clean_confint(as.data.frame(t(stats::confint(x, level = ci, method = "Wald", estimate = FALSE)))) l <- lapply( conf_int, .equivalence_test_numeric, range_rope = range, verbose = verbose ) dat <- do.call(rbind, l) out <- data.frame( Parameter = names(l), CI = ci, dat, stringsAsFactors = FALSE ) class(out) <- c("equivalence_test_lm", class(out)) attr(out, "rope") <- range out } #' @keywords internal .equivalence_test_numeric <- function(range_ci, range_rope, verbose) { if (min(range_ci) > max(range_rope) || max(range_ci) < min(range_rope)) { decision <- "Rejected" coverage <- 0 } else if (max(range_ci) <= max(range_rope) && min(range_ci) >= min(range_rope)) { decision <- "Accepted" coverage <- 1 } else { diff_rope <- abs(diff(range_rope)) diff_ci <- abs(diff(range_ci)) decision <- "Undecided" if (min(range_rope) >= min(range_ci) && max(range_rope) <= max(range_ci)) { coverage <- diff_rope / diff_ci } else if (min(range_ci) <= min(range_rope)) { coverage <- abs(diff(c(min(range_rope), max(range_ci)))) / diff_ci } else { coverage <- abs(diff(c(min(range_ci), max(range_rope)))) / diff_ci } } data.frame( CI_low = range_ci[1], CI_high = range_ci[2], ROPE_Percentage = coverage, ROPE_Equivalence = decision, stringsAsFactors = FALSE ) } #' @importFrom insight print_color #' @export print.equivalence_test_lm <- function(x, digits = 2, ...) { insight::print_color("# Test for Practical Equivalence\n\n", "blue") .rope <- attr(x, "rope", exact = TRUE) cat(sprintf(" ROPE: [%.*f %.*f]\n\n", digits, .rope[1], digits, .rope[2])) # find the longest CI-value, so we can align the brackets in the ouput x$CI_low <- sprintf("%.*f", digits, x$CI_low) x$CI_high <- sprintf("%.*f", digits, x$CI_high) maxlen_low <- max(nchar(x$CI_low)) maxlen_high <- max(nchar(x$CI_high)) x$ROPE_Percentage <- sprintf("%.*f %%", digits, 100 * x$ROPE_Percentage) x$conf.int <- sprintf("[%*s %*s]", maxlen_low, x$CI_low, maxlen_high, x$CI_high) CI <- unique(x$CI) keep.columns <- c("CI", "Parameter", "ROPE_Equivalence", "ROPE_Percentage", "conf.int") x <- x[, intersect(keep.columns, colnames(x))] colnames(x)[which(colnames(x) == "ROPE_Equivalence")] <- "H0" colnames(x)[which(colnames(x) == "ROPE_Percentage")] <- "inside ROPE" for (i in CI) { xsub <- x[x$CI == i, -which(colnames(x) == "CI"), drop = FALSE] colnames(xsub)[ncol(xsub)] <- sprintf("%i%% CI", round(100 * i)) print.data.frame(xsub, digits = digits, row.names = FALSE) cat("\n") } } parameters/R/dof_ml1.R0000644000176200001440000000426613615056553014303 0ustar liggesusers#' @rdname p_value_ml1 #' @importFrom insight get_random find_predictors find_parameters get_data has_intercept #' @export dof_ml1 <- function(model) { if (!insight::model_info(model)$is_mixed) { stop("Model must be a mixed model.") } re_groups <- insight::get_random(model) parameters <- insight::find_parameters(model, effects = "fixed")[["conditional"]] predictors <- insight::find_predictors(model, effects = "fixed", component = "conditional", flatten = TRUE) predictors <- setdiff(predictors, names(re_groups)) model_data <- insight::get_data(model)[predictors] has_intcp <- insight::has_intercept(model) term_assignment <- .find_term_assignment(model_data, predictors, parameters) ddf <- sapply(model_data, function(.x) { min(sapply(re_groups, .get_df_ml1_approx, x = .x)) }) ltab <- table(ddf) ltab <- list(m = as.integer(names(ltab)), l = as.vector(ltab)) ltab$ddf <- ltab$m - ltab$l if (has_intcp) ltab$ddf <- ltab$ddf - 1 ii <- match(ddf, ltab$m) ddf[] <- ltab$ddf[ii] out <- numeric(length = length(parameters)) ## TODO number of items to replace is not a multiple of replacement length suppressWarnings(out[which("(Intercept)" != parameters)] <- ddf[term_assignment]) if (has_intcp) out[which("(Intercept)" == parameters)] <- min(ddf) stats::setNames(out, parameters) } #' @importFrom stats ave var .get_df_ml1_approx <- function(x, g) { m <- nlevels(g) n <- length(x) x <- as.numeric(x) x.bar <- stats::ave(x, g) var.within <- stats::var(x - x.bar) var.between <- stats::var(x.bar) if (var.within >= var.between) { return(n) } else { return(m) } } #' @importFrom stats na.omit #' @importFrom insight clean_names .find_term_assignment <- function(model_data, predictors, parameters) { parms <- unlist(lapply(1:length(predictors), function(i) { p <- predictors[i] if (is.factor(model_data[[p]])) { ps <- paste0(p, levels(model_data[[p]])) names(ps)[1:length(ps)] <- i ps } else { names(p) <- i p } })) stats::na.omit(as.numeric(names(parms)[match(insight::clean_names(parameters), parms)])) } parameters/R/print.parameters_model.R0000644000176200001440000002245013617621360017427 0ustar liggesusers#' @title Print model parameters #' @name print #' #' @description A \code{print()}-method for objects from \code{\link[=model_parameters]{model_parameters()}}. #' #' @param x An object returned by \code{\link[=model_parameters]{model_parameters()}}. #' @param split_components Logical, if \code{TRUE} (default), For models with #' multiple components (zero-inflation, smooth terms, ...), each component is #' printed in a separate table. If \code{FALSE}, model parameters are printed #' in a single table and a \code{Component} column is added to the output. #' @param select Character vector (or numeric index) of column names that should #' be printed. If \code{NULL} (default), all columns are printed. #' @inheritParams parameters_table #' @return \code{NULL} #' #' @examples #' library(parameters) #' if (require("glmmTMB")) { #' model <- glmmTMB( #' count ~ spp + mined + (1 | site), #' ziformula = ~mined, #' family = poisson(), #' data = Salamanders #' ) #' mp <- model_parameters(model) #' #' print(mp, pretty_names = FALSE) #' #' print(mp, split_components = FALSE) #' #' print(mp, select = c("Parameter", "Coefficient", "CI_low", "CI_high")) #' } #' @importFrom insight format_table #' @export print.parameters_model <- function(x, pretty_names = TRUE, split_components = TRUE, select = NULL, ...) { res <- attributes(x)$details if (!is.null(select)) { if (is.numeric(select)) select <- colnames(x)[select] select <- union(select, c("Component", "Effects", "Response", "Subgroup")) to_remove <- setdiff(colnames(x), select) x[to_remove] <- NULL } if (!is.null(attributes(x)$title)) { insight::print_color(paste0("# ", attributes(x)$title, "\n\n"), "blue") } else if (!is.null(res)) { insight::print_color("# Fixed Effects\n\n", "blue") } # For Bayesian models, we need to prettify parameter names here... mc <- attributes(x)$model_class cp <- attributes(x)$cleaned_parameters if (!is.null(mc) && !is.null(cp) && mc %in% c("stanreg", "stanmvreg", "brmsfit")) { x$Parameter <- cp pretty_names <- FALSE } split_by <- "" split_by <- c(split_by, ifelse("Component" %in% names(x) && length(unique(x$Component)) > 1, "Component", "")) split_by <- c(split_by, ifelse("Effects" %in% names(x) && length(unique(x$Effects)) > 1, "Effects", "")) split_by <- c(split_by, ifelse("Response" %in% names(x) && length(unique(x$Response)) > 1, "Response", "")) split_by <- c(split_by, ifelse("Subgroup" %in% names(x) && length(unique(x$Subgroup)) > 1, "Subgroup", "")) split_by <- split_by[nchar(split_by) > 0] if (split_components && !is.null(split_by) && length(split_by)) { .print_model_parms_components(x, pretty_names, split_column = split_by, ...) } else { formatted_table <- parameters_table(x, pretty_names = pretty_names, ...) cat(insight::format_table(formatted_table)) } # print summary for random effects if (!is.null(res)) { .print_random_parameters(res, digits = attributes(x)$digits) } } #' @export print.parameters_random <- function(x, digits = 2, ...) { .print_random_parameters(x, digits = digits) } #' @keywords internal .print_random_parameters <- function(random_params, digits = 2) { insight::print_color("\n# Random Effects\n\n", "blue") # format values random_params$Value <- format(sprintf("%g", round(random_params$Value, digits = digits)), justify = "right") # create summary-information for each component random_params$Line <- "" random_params$Term[is.na(random_params$Term)] <- "" non_empty <- random_params$Term != "" & random_params$Type != "" random_params$Line[non_empty] <- sprintf("%s (%s)", random_params$Type[non_empty], random_params$Term[non_empty]) non_empty <- random_params$Term != "" & random_params$Type == "" random_params$Line[non_empty] <- sprintf("%s", random_params$Term[non_empty]) # final fix, indentions random_params$Line <- sprintf(" %s", format(random_params$Line)) max_len <- max(nchar(random_params$Line)) + 2 out <- split(random_params, factor(random_params$Description, levels = unique(random_params$Description))) for (i in out) { if ("Within-Group Variance" %in% i$Description) { insight::print_color(format("Within-Group Variance", width = max_len), color = "blue") cat(sprintf("%s\n", i$Value)) } else if ("Between-Group Variance" %in% i$Description) { insight::print_color("Between-Group Variance\n", "blue") for (j in 1:nrow(i)) { cat(sprintf("%s %s\n", i$Line[j], i$Value[j])) } } else if ("Correlations" %in% i$Description) { insight::print_color("Correlations\n", "blue") for (j in 1:nrow(i)) { cat(sprintf("%s %s\n", i$Line[j], i$Value[j])) } } else if ("N" %in% i$Description) { insight::print_color("N (groups per factor)\n", "blue") for (j in 1:nrow(i)) { cat(sprintf(" %s%s\n", format(i$Term[j], width = max_len - 2), i$Value[j])) } } else if ("Observations" %in% i$Description) { insight::print_color(format("Observations", width = max_len), color = "blue") cat(sprintf("%s\n", i$Value)) } } } #' @keywords internal .print_model_parms_components <- function(x, pretty_names, split_column = "Component", ...) { # check if user supplied digits attributes digits <- attributes(x)$digits ci_digits <- attributes(x)$ci_digits p_digits <- attributes(x)$p_digits is_ordinal_model <- attributes(x)$ordinal_model if (is.null(is_ordinal_model)) is_ordinal_model <- FALSE # make sure we have correct order of levels from split-factor x[split_column] <- lapply(x[split_column], function(i) { if (!is.factor(i)) i <- factor(i, levels = unique(i)) i }) # set up split-factor if (length(split_column) > 1) { split_by <- lapply(split_column, function(i) x[[i]]) } else { split_by <- list(x[[split_column]]) } names(split_by) <- split_column # make sure we have correct sorting here... tables <- split(x, f = split_by) # sanity check - only preserve tables with any data in data frames tables <- tables[sapply(tables, nrow) > 0] for (type in names(tables)) { # Don't print Component column for (i in split_column) { tables[[type]][[i]] <- NULL } # Smooth terms statistics if ("t / F" %in% names(tables[[type]])) { if (type == "smooth_terms") { names(tables[[type]])[names(tables[[type]]) == "t / F"] <- "F" } if (type == "conditional") { names(tables[[type]])[names(tables[[type]]) == "t / F"] <- "t" } } if ("z / Chisq" %in% names(tables[[type]])) { if (type == "smooth_terms") { names(tables[[type]])[names(tables[[type]]) == "z / Chisq"] <- "Chisq" } if (type == "conditional") { names(tables[[type]])[names(tables[[type]]) == "z / Chisq"] <- "z" } } # Don't print se and ci if all are missing if (all(is.na(tables[[type]]$SE))) tables[[type]]$SE <- NULL if (all(is.na(tables[[type]]$CI_low))) tables[[type]]$CI_low <- NULL if (all(is.na(tables[[type]]$CI_high))) tables[[type]]$CI_high <- NULL # Don't print if empty col tables[[type]][sapply(tables[[type]], function(x) { all(x == "") | all(is.na(x)) })] <- NULL attr(tables[[type]], "digits") <- digits attr(tables[[type]], "ci_digits") <- ci_digits attr(tables[[type]], "p_digits") <- p_digits formatted_table <- parameters_table(tables[[type]], pretty_names = pretty_names, ...) component_name <- switch( type, "mu" = , "fixed" = , "conditional" = "Fixed Effects", "random" = "Random Effects", "conditional.fixed" = "Fixed Effects (Count Model)", "conditional.random" = "Random Effects (Count Model)", "zero_inflated" = "Zero-Inflated", "zero_inflated.fixed" = "Fixed Effects (Zero-Inflated Model)", "zero_inflated.random" = "Random Effects (Zero-Inflated Model)", "smooth_sd" = "Smooth Terms (SD)", "smooth_terms" = "Smooth Terms", "sigma" = "Sigma", "Correlation" = "Correlation", "Loading" = "Loading", "scale" = , "scale.fixed" = "Scale Parameters", "extra" = , "extra.fixed" = "Extra Parameters", "nu" = "Nu", "tau" = "Tau", "precision" = , "precision." = "Precision", type ) if ("DirichletRegModel" %in% attributes(x)$model_class) { if (grepl("^conditional\\.", component_name) || split_column == "Response") { s1 <- "Response level:" s2 <- gsub("^conditional\\.(.*)", "\\1", component_name) } else { s1 <- component_name s2 <- "" } } else if (length(split_column) > 1) { s1 <- component_name s2 <- "" } else if (split_column == "Response" && is_ordinal_model) { s1 <- "Response level:" s2 <- component_name } else if (split_column == "Subgroup") { s1 <- component_name s2 <- "" } else { s1 <- component_name s2 <- split_column } # Print insight::print_color(sprintf("# %s %s\n\n", s1, tolower(s2)), "blue") cat(insight::format_table(formatted_table)) cat("\n") } } parameters/R/model_parameters.BayesFM.R0000644000176200001440000000743713607420417017570 0ustar liggesusers#' Parameters from PCA/FA #' #' Format PCA/FA objects from the psych package (Revelle, 2016). #' #' @param model Bayesian EFA created by the \code{BayesFM::befa}. #' @inheritParams principal_components #' @inheritParams bayestestR::describe_posterior #' @param ... Arguments passed to or from other methods. #' #' #' @examples #' library(parameters) #' \donttest{ #' library(BayesFM) #' efa <- BayesFM::befa(mtcars, iter = 1000) #' results <- model_parameters(efa, sort = TRUE) #' #' results #' attributes(results)$loadings_long #' efa_to_cfa(results) #' } #' #' @return A data frame of loadings. #' #' @export model_parameters.befa <- function(model, sort = FALSE, centrality = "median", dispersion = FALSE, ci = .89, ci_method = "hdi", test = NULL, ...) { if (!attr(model, "post.column.switch") | !attr(model, "post.sign.switch")) { if (!requireNamespace("BayesFM", quietly = TRUE)) { stop("Package 'BayesFM' required for this function to work. Please install it by running `install.packages('BayesFM')`.") } if (!attr(model, "post.column.switch")) model <- BayesFM::post.column.switch(model) if (!attr(model, "post.sign.switch")) model <- BayesFM::post.sign.switch(model) } loadings <- as.data.frame(model$alpha) names(loadings) <- gsub("alpha:", "", names(loadings)) loadings <- stats::reshape( loadings, direction = "long", varying = list(names(loadings)), sep = "_", timevar = "Variable", v.names = "Loading", idvar = "Draw", times = names(loadings) ) components <- as.data.frame(model$dedic) names(components) <- gsub("dedic:", "", names(components)) components <- stats::reshape( components, direction = "long", varying = list(names(components)), sep = "_", timevar = "Variable", v.names = "Component", idvar = "Draw", times = names(components) ) loadings <- merge(components, loadings) # Compute posterior by dedic long_loadings <- data.frame() for (var in unique(loadings$Variable)) { for (comp in unique(loadings$Component)) { chunk <- loadings[loadings$Variable == var & loadings$Component == comp, ] if (nrow(chunk) == 0) { rez <- bayestestR::describe_posterior(loadings$Loading, centrality = centrality, dispersion = dispersion, ci = ci, ci_method = ci_method, test = test, ...) rez[1, ] <- NA } else { rez <- bayestestR::describe_posterior(chunk$Loading, centrality = centrality, dispersion = dispersion, ci = ci, ci_method = ci_method, test = test, ...) } long_loadings <- rbind( long_loadings, cbind(data.frame(Component = comp, Variable = var), rez) ) } } long_loadings$Component <- paste0("F", long_loadings$Component) # Clean long_loadings$Parameter <- NULL if ("CI" %in% names(long_loadings) && length(unique(na.omit(long_loadings$CI))) == 1) { long_loadings$CI <- NULL } long_loadings <- long_loadings[long_loadings$Component != 0, ] loadings <- .wide_loadings(long_loadings, loadings_columns = names(long_loadings)[3], component_column = "Component", variable_column = "Variable") # Add attributes attr(loadings, "model") <- model attr(loadings, "additional_arguments") <- list(...) attr(loadings, "n") <- length(unique(long_loadings$Component)) attr(loadings, "loadings_columns") <- names(loadings)[2:ncol(loadings)] attr(loadings, "ci") <- ci # Sorting if (isTRUE(sort)) { loadings <- .sort_loadings(loadings) } # Add some more attributes long_loadings <- na.omit(long_loadings) row.names(long_loadings) <- NULL attr(loadings, "loadings_long") <- long_loadings # add class-attribute for printing class(loadings) <- c("parameters_efa", class(loadings)) loadings } parameters/R/plot.R0000644000176200001440000000427513616053306013732 0ustar liggesusers#' @export plot.parameters_sem <- function(x, ...) { if (!requireNamespace("see", quietly = TRUE)) { stop("Package 'see' needed to plot SEM and CFA graphs. Please install it by running `install.packages('see')`.") } NextMethod() } #' @export plot.parameters_model <- function(x, ...) { if (!requireNamespace("see", quietly = TRUE)) { stop("Package 'see' needed to plot model parameters. Please install it by running `install.packages('see')`.") } NextMethod() } #' @export plot.parameters_simulate <- function(x, ...) { if (!requireNamespace("see", quietly = TRUE)) { stop("Package 'see' needed to plot point-estimates. Please install it by running `install.packages('see')`.") } NextMethod() } #' @export plot.n_factors <- function(x, ...) { if (!requireNamespace("see", quietly = TRUE)) { stop("Package 'see' needed to plot point-estimates. Please install it by running `install.packages('see')`.") } NextMethod() } #' @export plot.parameters_distribution <- function(x, ...) { if (!requireNamespace("see", quietly = TRUE)) { stop("Package 'see' needed to plot distributions. Please install it by running `install.packages('see')`.") } NextMethod() } #' @export plot.n_clusters <- function(x, ...) { if (!requireNamespace("see", quietly = TRUE)) { stop("Package 'see' needed to plot point-estimates. Please install it by running `install.packages('see')`.") } NextMethod() } #' @export plot.parameters_pca <- function(x, ...) { if (!requireNamespace("see", quietly = TRUE)) { stop("Package 'see' needed to plot PCA. Please install it by running `install.packages('see')`.") } NextMethod() } #' @export plot.parameters_efa <- function(x, ...) { if (!requireNamespace("see", quietly = TRUE)) { stop("Package 'see' needed to plot EFA Please install it by running `install.packages('see')`.") } NextMethod() } #' @export plot.cluster_analysis <- function(x, ...) { if (!requireNamespace("see", quietly = TRUE)) { stop("Package 'see' needed to plot results from cluster analysis. Please install it by running `install.packages('see')`.") } NextMethod() } parameters/R/model_parameters.wbm.R0000644000176200001440000000130213614227035017047 0ustar liggesusers#' @inheritParams model_parameters.merMod #' @export model_parameters.wbm <- function(model, ci = .95, bootstrap = FALSE, iterations = 1000, exponentiate = FALSE, details = FALSE, ...) { out <- .model_parameters_generic( model = model, ci = ci, bootstrap = bootstrap, iterations = iterations, merge_by = c("Parameter", "Component"), standardize = NULL, exponentiate = exponentiate, robust = FALSE, ... ) attr(out, "object_name") <- deparse(substitute(model), width.cutoff = 500) if (isTRUE(details)) { attr(out, "details") <- .randomeffects_summary(model) } out } #' @export model_parameters.wbgee <- model_parameters.wbm parameters/R/standardize_names.R0000644000176200001440000000636313602404207016442 0ustar liggesusers#' Standardize column names #' #' Standardize column names from data frames, in particular objects returned from #' \code{\link[=model_parameters]{model_parameters()}}, so column names are #' consistent and the same for any model object. #' #' @param data A data frame. Currently, only objects from \code{\link[=model_parameters]{model_parameters()}} are accepted. #' @param style Standardization can either be based on the naming conventions from the easystats project, or on \pkg{broom}'s naming scheme. #' @param ... Currently not used. #' #' @return A data frame, with standardized column names. #' #' @details This method is in particular useful for package developers or users #' who use \code{\link[=model_parameters]{model_parameters()}} in their own #' code or functions to retrieve model parameters for further processing. As #' \code{model_parameters()} returns a data frame with varying column names #' (depending on the input), accessing the required information is probably #' not quite straightforward. In such cases, \code{standardize_names()} can #' be used to get consistent, i.e. always the same column names, no matter #' what kind of model was used in \code{model_parameters()}. #' \cr \cr #' For \code{style = "broom"}, column names are renamed to match \pkg{broom}'s #' naming scheme, i.e. \code{Parameter} is renamed to \code{term}, \code{Coefficient} #' becomes \code{estimate} and so on. #' #' @examples #' library(parameters) #' model <- lm(mpg ~ wt + cyl, data = mtcars) #' mp <- model_parameters(model) #' #' as.data.frame(mp) #' standardize_names(mp) #' standardize_names(mp, style = "broom") #' @export standardize_names <- function(data, ...) { UseMethod("standardize_names") } #' @export standardize_names.default <- function(data, ...) { insight::print_color(sprintf("Objects of class '%s' are currently not supported.\n", class(data)[1]), "red") } #' @rdname standardize_names #' @export standardize_names.parameters_model <- function(data, style = c("easystats", "broom"), ...) { style <- match.arg(style) .standardize_names(data, style, ...) } .standardize_names <- function(data, style, ...) { cn <- colnames(data) if (style == "easystats") { cn[cn %in% c("t", "z", "F", "chisq", "t / F", "z / Chisq")] <- "Statistic" cn[cn %in% c("Median", "Mean", "MAP")] <- "Coefficient" cn[cn %in% c("df_residual", "df_error")] <- "df" } else { # easy replacements cn[cn == "Parameter"] <- "term" cn[cn == "SE"] <- "std.error" cn[cn == "SD"] <- "std.dev" cn[cn == "p"] <- "p.value" cn[cn == "BF"] <- "bayes.factor" cn[cn == "Component"] <- "component" cn[cn == "Effects"] <- "effects" cn[cn == "Response"] <- "response" # more sophisticated replacements cn[cn %in% c("df_residual", "df_error")] <- "df.error" cn[cn %in% c("Coefficient", "Std_Coefficient", "Median", "Mean", "MAP")] <- "estimate" cn[cn %in% c("t", "z", "F", "chisq", "t / F", "z / Chisq")] <- "statistic" # fancy regex replacements cn <- gsub("^CI_low", "conf.low", cn) cn <- gsub("^CI_high", "conf.high", cn) # lowercase for everything cn <- tolower(cn) } colnames(data) <- cn as.data.frame(data) } parameters/R/model_parameters.kmeans.R0000644000176200001440000000534713607420373017557 0ustar liggesusers#' Parameters from Cluster Models (k-means, ...) #' #' Format cluster models obtained for example by \code{\link{kmeans}}. #' #' @param model Cluster model. #' @param ... Arguments passed to or from other methods. #' #' @examples #' library(parameters) #' #' model <- kmeans(iris[1:4], centers = 3) #' model_parameters(model) #' @export model_parameters.kmeans <- function(model, ...) { params <- cbind( data.frame( Cluster = row.names(model$centers), n_Obs = model$size, Sum_Squares = model$withinss ), model$centers ) # Long means means <- .long_loadings(params, loadings_columns = 4:ncol(params)) means <- means[c("Cluster", "Loading", "Component")] names(means) <- c("Cluster", "Mean", "Variable") attr(params, "variance") <- model$betweenss / model$totss attr(params, "means") <- means attr(params, "model") <- model attr(params, "iterations") <- model$iter attr(params, "scores") <- model$cluster attr(params, "type") <- "kmeans" class(params) <- c("parameters_clusters", class(params)) params } #' @importFrom insight format_table #' @export print.parameters_clusters <- function(x, digits = 2, ...) { insight::print_color("# K-means Cluster Means", "blue") cat("\n\n") insight::print_colour(.text_components_variance(x), "yellow") cat("\n\n") cat(insight::format_table(x, digits = digits, ...)) invisible(x) } #' @export summary.parameters_clusters <- function(object, ...) { object[1:3] } #' @importFrom stats predict #' @export predict.parameters_clusters <- function(object, newdata = NULL, names = NULL, ...) { if (is.null(newdata)) { out <- attributes(object)$scores } else { out <- stats::predict(attributes(object)$model, newdata = newdata, ...) } # Add labels if (!is.null(names)) { # List if (is.list(names)) { out <- as.factor(out) for (i in names(names)) { levels(out)[levels(out) == i] <- names[[i]] } # Vector } else if (is.character(names)) { out <- names[as.numeric(out)] } else { stop("'names' must be a character vector or a list.") } out <- as.character(out) } out } #' @export predict.kmeans <- function(object, newdata = NULL, ...) { if (is.null(newdata)) { return(object$cluster) } # compute squared euclidean distance from each sample to each cluster center centers <- object$centers sumsquares_by_center <- apply(centers, 1, function(x) { colSums((t(newdata) - x)^2) }) if (is.null(nrow(sumsquares_by_center))) { as.vector(which.min(sumsquares_by_center)) } else { as.vector(apply(as.data.frame(sumsquares_by_center), 1, which.min)) } } parameters/R/format_order.R0000644000176200001440000000245213534252567015443 0ustar liggesusers#' Order (first, second, ...) formatting #' #' Format order. #' #' @param order value or vector of orders. #' @param textual Return number as words. If \code{FALSE}, will run \code{\link[insight]{format_value}}. #' @inheritParams format_number #' #' @return A formatted string. #' @examples #' format_order(2) #' format_order(8) #' format_order(25, textual = FALSE) #' @importFrom insight format_value #' @importFrom utils tail #' @export format_order <- function(order, textual = TRUE, ...) { if (textual) { order <- format_number(order) parts <- unlist(strsplit(order, " ", fixed = TRUE)) parts[length(parts)] <- switch(utils::tail(parts, 1), "one" = "first", "two" = "second", "three" = "third", "four" = "fourth", "five" = "fifth", "six" = "sixth", "seven" = "seventh", "eight" = "eigth", "nine" = "ninth" ) out <- paste(parts, collapse = " ") } else { number <- insight::format_value(order, digits = 0, ...) last <- substr(number, nchar(number), nchar(number)) out <- paste0(number, switch(last, "1" = "st", "2" = "nd", "3" = "rd", "4" = "th", "5" = "th", "6" = "th", "7" = "th", "8" = "th", "9" = "th" )) } out } parameters/R/check_clusterstructure.R0000644000176200001440000001115313607421047017545 0ustar liggesusers#' Check suitability of data for clustering #' #' This checks whether the data is appropriate for clustering using the Hopkins' H statistic of given data. If the value of Hopkins statistic is close to 0 (below 0.5), then we can reject the null hypothesis and conclude that the dataset is significantly clusterable. A value for H lower than 0.25 indicates a clustering tendency at the 90\% confidence level. The visual assessment of cluster tendency (VAT) approach (Bezdek and Hathaway, 2002) consists in investigating the heatmap of the ordered dissimilarity matrix. Following this, one can potentially detect the clustering tendency by counting the number of square shaped blocks along the diagonal. #' #' @param x A data frame. #' @param standardize Standardize the dataframe before clustering (default). #' @param distance Distance method used. Other methods than "euclidean" (default) are exploratory in the context of clustering tendency. See \code{\link{dist}} for list of available methods. #' @param ... Arguments passed to or from other methods. #' #' @examples #' library(parameters) #' check_clusterstructure(iris[, 1:4]) #' plot(check_clusterstructure(iris[, 1:4])) #' @return The H statistic (numeric) #' #' @seealso \code{\link{check_kmo}}, \code{\link{check_sphericity}} and \code{\link{check_factorstructure}}. #' #' @references \itemize{ #' \item Lawson, R. G., & Jurs, P. C. (1990). New index for clustering tendency and its application to chemical problems. Journal of chemical information and computer sciences, 30(1), 36-41. #' \item Bezdek, J. C., & Hathaway, R. J. (2002, May). VAT: A tool for visual assessment of (cluster) tendency. In Proceedings of the 2002 International Joint Conference on Neural Networks. IJCNN02 (3), 2225-2230. IEEE. #' } #' @export check_clusterstructure <- function(x, standardize = TRUE, distance = "euclidean", ...) { if (standardize) { x <- as.data.frame(scale(x)) } H <- .clusterstructure_hopkins(x, distance = distance) if (H < 0.5) { text <- paste0( "The dataset is suitable for clustering (Hopkins' H = ", insight::format_value(H), ").\n" ) color <- "green" } else { text <- paste0( "The dataset is not suitable for clustering (Hopkins' H = ", insight::format_value(H), ").\n" ) color <- "red" } out <- list( H = H, dissimilarity_matrix = .clusterstructure_dm(x, distance = distance, method = "ward.D2") ) attr(out, "text") <- text attr(out, "color") <- color attr(out, "title") <- "Clustering tendency" class(out) <- c("see_check_clusterstructure", "check_clusterstructure", "easystats_check", class(out)) out } #' @importFrom stats heatmap #' @importFrom grDevices colorRampPalette #' @export plot.check_clusterstructure <- function(x, ...) { # Can be reimplemented with ggplot in see stats::heatmap( x$dissimilarity_matrix, Rowv = NA, Colv = NA, labRow = FALSE, labCol = FALSE, col = grDevices::colorRampPalette(c("#2196F3", "#FAFAFA", "#E91E63"))(100) ) } #' @importFrom stats hclust dist #' @keywords internal .clusterstructure_dm <- function(x, distance = "euclidean", method = "ward.D2") { d <- stats::dist(x, method = distance) hc <- stats::hclust(d, method = method) as.matrix(d)[hc$order, hc$order] } #' @importFrom stats runif #' @keywords internal .clusterstructure_hopkins <- function(x, distance = "euclidean") { # This is based on the hopkins() function from the clustertend package if (is.data.frame(x)) { x <- as.matrix(x) } n <- nrow(x) - 1 c <- apply(x, 2, min) # minimum value per column d <- apply(x, 2, max) p <- matrix(0, ncol = ncol(x), nrow = n) # n vectors of space for (i in 1:ncol(x)) { p[, i] <- runif(n, min = c[i], max = d[i]) } k <- round(runif(n, 1, nrow(x))) q <- as.matrix(x[k, ]) distp <- rep(0, nrow(x)) # distq=rep(0,nrow(x)-1) distq <- 0 minp <- rep(0, n) minq <- rep(0, n) for (i in 1:n) { distp[1] <- dist(rbind(p[i, ], x[1, ]), method = distance) minqi <- dist(rbind(q[i, ], x[1, ]), method = distance) for (j in 2:nrow(x)) { distp[j] <- dist(rbind(p[i, ], x[j, ]), method = distance) error <- q[i, ] - x[j, ] if (sum(abs(error)) != 0) { # distq[j]<-dist(rbind(q[i,],x[j,])) distq <- dist(rbind(q[i, ], x[j, ]), method = distance) if (distq < minqi) { minqi <- distq } } } minp[i] <- min(distp) # minq[i]<-apply(distq,1,min) minq[i] <- minqi } H <- (sum(minq) / (sum(minp) + sum(minq))) H } parameters/R/robust_estimation.R0000644000176200001440000001267013616321014016516 0ustar liggesusers#' Robust estimation #' #' \code{standard_error_robust()}, \code{ci_robust()} and \code{p_value_robust()} #' attempt to return indices based on robust estimation of the variance-covariance #' matrix, using the packages \pkg{sandwich} and \pkg{clubSandwich}. #' #' @param model A model. #' @param vcov_estimation String, indicating the suffix of the \code{vcov*()}-function #' from the \pkg{sandwich}-package, e.g. \code{vcov_estimation = "CL"} (which #' calls \code{\link[sandwich]{vcovCL}} to compute clustered covariance matrix #' estimators), or \code{vcov_estimation = "HC"} (which calls #' \code{\link[sandwich:vcovHC]{vcovHC()}} to compute heteroskedasticity-consistent #' covariance matrix estimators). #' @param vcov_type Character vector, specifying the estimation type for the #' robust covariance matrix estimation (see \code{\link[sandwich:vcovHC]{vcovHC()}} #' or \code{\link[clubSandwich:vcovCR]{vcovCR()}} for details). #' @param vcov_args List of named vectors, used as additional arguments that #' are passed down to the \pkg{sandwich}-function specified in \code{vcov_estimation}. #' @param ... Arguments passed to or from other methods. For \code{standard_error()}, #' if \code{method = "robust"}, arguments \code{vcov_estimation}, \code{vcov_type} #' and \code{vcov_args} can be passed down to \code{standard_error_robust()}. #' @inheritParams ci.merMod #' #' @note These functions rely on the \pkg{sandwich} or \pkg{clubSandwich} package #' (the latter if \code{vcov_estimation = "CR"} for cluster-robust standard errors) #' and will thus only work for those models supported by those packages. #' #' @examples #' # robust standard errors, calling sandwich::vcovHC(type="HC3") by default #' model <- lm(Petal.Length ~ Sepal.Length * Species, data = iris) #' standard_error_robust(model) #' #' # cluster-robust standard errors, using clubSandwich #' iris$cluster <- factor(rep(LETTERS[1:8], length.out = nrow(iris))) #' standard_error_robust( #' model, #' vcov_type = "CR2", #' vcov_args = list(cluster = iris$cluster) #' ) #' @return A data frame. #' @export standard_error_robust <- function(model, vcov_estimation = "HC", vcov_type = NULL, vcov_args = NULL, ...) { # exceptions if (inherits(model, "gee")) { return(standard_error(model, method = "robust", ...)) } robust <- .robust_covariance_matrix( model, vcov_fun = paste0("vcov", vcov_estimation), vcov_type = vcov_type, vcov_args = vcov_args ) robust[, c("Parameter", "SE")] } #' @rdname standard_error_robust #' @export p_value_robust <- function(model, vcov_estimation = "HC", vcov_type = NULL, vcov_args = NULL, ...) { # exceptions if (inherits(model, "gee")) { return(p_value(model, method = "robust", ...)) } robust <- .robust_covariance_matrix( model, vcov_fun = paste0("vcov", vcov_estimation), vcov_type = vcov_type, vcov_args = vcov_args ) robust[, c("Parameter", "p")] } #' @rdname standard_error_robust #' @export ci_robust <- function(model, ci = 0.95, vcov_estimation = "HC", vcov_type = NULL, vcov_args = NULL, ...) { ci_wald( model = model, ci = ci, component = "conditional", robust = TRUE, vcov_estimation = vcov_estimation, vcov_type = vcov_type, vcov_args = vcov_args, ... ) } #' @importFrom insight n_obs #' @importFrom stats coef pnorm pt .robust_covariance_matrix <- function(x, vcov_fun = "vcovHC", vcov_type = NULL, vcov_args = NULL) { # fix default, if necessary if (!is.null(vcov_type) && vcov_type %in% c("CR0", "CR1", "CR1p", "CR1S", "CR2", "CR3")) { vcov_fun <- "vcovCR" } # set default for clubSandwich if (vcov_fun == "vcovCR" && is.null(vcov_type)) { vcov_type <- "CR0" } # check if required package is available if (vcov_fun == "vcovCR") { if (!requireNamespace("clubSandwich", quietly = TRUE)) { stop("Package `clubSandwich` needed for this function. Please install and try again.") } package <- "clubSandwich" } else { if (!requireNamespace("sandwich", quietly = TRUE)) { stop("Package `sandwich` needed for this function. Please install and try again.") } package <- "sandwich" } # get coefficients params <- insight::get_parameters(x) # compute robust standard errors based on vcov if (package == "sandwich") { vcov_fun <- get(vcov_fun, asNamespace("sandwich")) .vcov <- do.call(vcov_fun, c(list(x = x, type = vcov_type), vcov_args)) } else { vcov_fun <- clubSandwich::vcovCR .vcov <- do.call(vcov_fun, c(list(obj = x, type = vcov_type), vcov_args)) } se <- sqrt(diag(.vcov)) dendf <- degrees_of_freedom(x, method = "any") t.stat <- params$Estimate / se if (is.null(dendf)) { p.value <- 2 * stats::pnorm(abs(t.stat), lower.tail = FALSE) } else { p.value <- 2 * stats::pt(abs(t.stat), df = dendf, lower.tail = FALSE) } .data_frame( Parameter = params$Parameter, Estimate = params$Estimate, SE = se, Statistic = t.stat, p = p.value ) } parameters/R/factor_analysis.R0000644000176200001440000000706513611361523016133 0ustar liggesusers#' Factor Analysis (FA) #' #' This function performs a Factor Analysis (FA). #' #' @inheritParams principal_components #' #' @details #' \subsection{Complexity}{ #' Complexity represents the number of latent components needed to account #' for the observed variables. Whereas a perfect simple structure solution #' has a complexity of 1 in that each item would only load on one factor, #' a solution with evenly distributed items has a complexity greater than 1 #' (\cite{Hofman, 1978; Pettersson and Turkheimer, 2010}) . #' } #' \subsection{FA or PCA?}{ #' There is a simplified rule of thumb that may help do decide whether to run #' a principal component analysis or a factor analysis: #' \itemize{ #' \item Run principal component analysis if you assume or wish to test a theoretical model of latent factors causing observed variables. #' \item Run factor analysis If you want to simply reduce your correlated observed variables to a smaller set of important independent composite variables. #' } #' (Source: \href{https://stats.stackexchange.com/q/1576/54740}{CrossValidated}) #' } #' #' @note There is a \code{summary()}-method that prints the Eigenvalues and (explained) variance for each extracted component. #' #' @examples #' library(parameters) #' #' factor_analysis(mtcars[, 1:7], n = "all", threshold = 0.2) #' factor_analysis(mtcars[, 1:7], n = 2, rotation = "oblimin", threshold = "max", sort = TRUE) #' factor_analysis(mtcars[, 1:7], n = 2, threshold = 2, sort = TRUE) #' #' efa <- factor_analysis(mtcars[, 1:5], n = 2) #' summary(efa) #' predict(efa) #' \donttest{ #' # Automated number of components #' factor_analysis(mtcars[, 1:4], n = "auto") #' } #' #' @return A data frame of loadings. #' @references \itemize{ #' \item Hofmann, R. (1978). Complexity and simplicity as objective indices descriptive of factor solutions. Multivariate Behavioral Research, 13:2, 247-250, \doi{10.1207/s15327906mbr1302_9} #' \item Pettersson, E., & Turkheimer, E. (2010). Item selection, evaluation, and simple structure in personality data. Journal of research in personality, 44(4), 407-420, \doi{10.1016/j.jrp.2010.03.002} #' } #' @importFrom stats prcomp #' @export factor_analysis <- function(x, n = "auto", rotation = "none", sort = FALSE, threshold = NULL, standardize = TRUE, ...) { UseMethod("factor_analysis") } #' @importFrom stats prcomp na.omit #' @export factor_analysis.data.frame <- function(x, n = "auto", rotation = "none", sort = FALSE, threshold = NULL, standardize = TRUE, ...) { # Standardize if (standardize) { x <- as.data.frame(scale(x)) } # N factors n <- .get_n_factors(x, n = n, type = "FA", rotation = rotation) .FA_rotate(x, n, rotation = rotation, sort = sort, threshold = threshold, ...) } #' @keywords internal .FA_rotate <- function(x, n, rotation, sort = FALSE, threshold = NULL, ...) { if (!(rotation %in% c("varimax", "quartimax", "promax", "oblimin", "simplimax", "cluster", "none"))) { stop("`rotation` must be one of \"varimax\", \"quartimax\", \"promax\", \"oblimin\", \"simplimax\", \"cluster\" or \"none\".") } if (!inherits(x, "data.frame")) { stop("`x` must be a data frame.", call. = FALSE) } # rotate loadings if (!requireNamespace("psych", quietly = TRUE)) { stop(sprintf("Package `psych` required for `%s`-rotation.", rotation), call. = FALSE) } out <- model_parameters(psych::fa(x, nfactors = n, rotate = rotation, ...), sort = sort, threshold = threshold) attr(out, "data_set") <- x out } parameters/R/utils_values_aov.R0000644000176200001440000000201713553147624016337 0ustar liggesusers#' @keywords internal .values_aov <- function(params) { # number of observations if ("Group" %in% names(params) && ("Within" %in% params$Group)) { lapply(split(params, params$Group), function(.i) { N <- sum(.i$df) + 1 .prepare_values_aov(.i, N) }) } else { N <- sum(params$df) + 1 .prepare_values_aov(params, N) } } #' @keywords internal .prepare_values_aov <- function(params, N) { # get mean squared of residuals Mean_Square_residuals <- sum(params[params$Parameter == "Residuals", ]$Mean_Square) # get sum of squares of residuals Sum_Squares_residuals <- sum(params[params$Parameter == "Residuals", ]$Sum_Squares) # get total sum of squares Sum_Squares_total <- sum(params$Sum_Squares) # number of terms in model N_terms <- nrow(params) - 1 list( "Mean_Square_residuals" = Mean_Square_residuals, "Sum_Squares_residuals" = Sum_Squares_residuals, "Sum_Squares_total" = Sum_Squares_total, "n_terms" = N_terms, "n" = N ) } parameters/R/model_parameters.gam.R0000644000176200001440000000612013614640330017026 0ustar liggesusers#' Parameters from Generalized Additive (Mixed) Models #' #' Extract and compute indices and measures to describe parameters of generalized additive models (GAM(M)s). #' #' @param model A gam/gamm model. #' @inheritParams model_parameters.default #' #' @seealso \code{\link[=standardize_names]{standardize_names()}} to rename #' columns into a consistent, standardized naming scheme. #' #' @return A data frame of indices related to the model's parameters. #' #' @examples #' library(parameters) #' library(mgcv) #' #' dat <- gamSim(1, n = 400, dist = "normal", scale = 2) #' model <- gam(y ~ s(x0) + s(x1) + s(x2) + s(x3), data = dat) #' model_parameters(model) #' @export model_parameters.gam <- function(model, ci = .95, bootstrap = FALSE, iterations = 1000, standardize = NULL, exponentiate = FALSE, robust = FALSE, ...) { # Processing if (bootstrap) { parameters <- bootstrap_parameters(model, iterations = iterations, ci = ci, ...) } else { parameters <- .extract_parameters_generic(model, ci = ci, component = "all", merge_by = c("Parameter", "Component"), standardize = standardize, robust = robust) } if (exponentiate) parameters <- .exponentiate_parameters(parameters) parameters <- .add_model_parameters_attributes(parameters, model, ci, exponentiate, ...) attr(parameters, "object_name") <- deparse(substitute(model), width.cutoff = 500) class(parameters) <- c("parameters_model", "see_parameters_model", class(parameters)) parameters } #' @export model_parameters.vgam <- model_parameters.gam #' @export model_parameters.gamm <- function(model, ci = .95, bootstrap = FALSE, iterations = 1000, ...) { model <- model$gam class(model) <- c("gam", "lm", "glm") model_parameters(model, ci = ci, bootstrap = bootstrap, iterations = iterations, robust = FALSE, ...) } #' @export model_parameters.list <- function(model, ci = .95, bootstrap = FALSE, iterations = 1000, robust = FALSE, ...) { if ("gam" %in% names(model)) { model <- model$gam class(model) <- c("gam", "lm", "glm") model_parameters(model, ci = ci, bootstrap = bootstrap, iterations = iterations, robust = robust, ...) } } #' @export model_parameters.gamlss <- model_parameters.gam #' @rdname model_parameters.gam #' @export model_parameters.rqss <- function(model, ci = .95, bootstrap = FALSE, iterations = 1000, component = c("conditional", "smooth_terms", "all"), standardize = NULL, exponentiate = FALSE, ...) { component <- match.arg(component) if (component == "all") { merge_by <- c("Parameter", "Component") } else { merge_by <- "Parameter" } ## TODO check merge by out <- .model_parameters_generic( model = model, ci = ci, component = component, bootstrap = bootstrap, iterations = iterations, merge_by = merge_by, standardize = standardize, exponentiate = exponentiate, ... ) attr(out, "object_name") <- deparse(substitute(model), width.cutoff = 500) out } #' @rdname model_parameters.gam #' @export model_parameters.cgam <- model_parameters.rqss parameters/R/ci.R0000644000176200001440000003325113615056553013351 0ustar liggesusers#' Confidence Intervals (CI) #' #' Compute confidence intervals (CI) for frequentist models. #' #' @param x A statistical model. #' @param ci Confidence Interval (CI) level. Default to 0.95 (95\%). #' @param method For mixed models, can be \code{\link[=ci_wald]{"wald"}} (default), \code{\link[=ci_ml1]{"ml1"}} or \code{\link[=ci_betwithin]{"betwithin"}}. For linear mixed model, can also be \code{\link[=ci_satterthwaite]{"satterthwaite"}}, \code{\link[=ci_kenward]{"kenward"}} or \code{"boot"} and \code{lme4::confint.merMod}). For (generalized) linear models, can be \code{"robust"} to compute confidence intervals based on robust standard errors, and for generalized linear models, may also be \code{"profile"} (default) or \code{"wald"}. #' @param ... Arguments passed down to \code{standard_error_robust()} when confidence intervals or p-values based on robust standard errors should be computed. #' @inheritParams simulate_model #' @inheritParams standard_error #' #' @return A data frame containing the CI bounds. #' #' @note \code{ci_robust()} resp. \code{ci(method = "robust")} #' rely on the \pkg{sandwich} or \pkg{clubSandwich} package (the latter if #' \code{vcov_estimation = "CR"} for cluster-robust standard errors) and will #' thus only work for those models supported by those packages. #' #' @examples #' \donttest{ #' library(parameters) #' if (require("glmmTMB")) { #' model <- glmmTMB( #' count ~ spp + mined + (1 | site), #' ziformula = ~mined, #' family = poisson(), #' data = Salamanders #' ) #' #' ci(model) #' ci(model, component = "zi") #' } #' } #' @export ci.merMod <- function(x, ci = 0.95, method = c("wald", "ml1", "betwithin", "satterthwaite", "kenward", "boot"), ...) { method <- tolower(method) method <- match.arg(method) # Wald approx if (method == "wald") { out <- ci_wald(model = x, ci = ci, dof = Inf) # ml1 approx } else if (method == "ml1") { out <- ci_ml1(x, ci) # betwithin approx } else if (method == "betwithin") { out <- ci_betwithin(x, ci) # Satterthwaite } else if (method == "satterthwaite") { out <- ci_satterthwaite(x, ci) # Kenward approx } else if (method %in% c("kenward", "kr")) { out <- ci_kenward(x, ci) # bootstrapping } else if (method == "boot") { out <- lapply(ci, function(ci, x) .ci_boot_merMod(x, ci, ...), x = x) out <- do.call(rbind, out) row.names(out) <- NULL } out } #' @importFrom bayestestR ci #' @export bayestestR::ci # Default Wald CI method ------------------------------------------------------ #' @rdname ci.merMod #' @export ci.default <- function(x, ci = .95, method = NULL, ...) { if (!is.null(method)) { method <- tolower(method) } else { method <- "wald" } if (method == "robust") { ci_wald(model = x, ci = ci, dof = Inf, robust = TRUE) } else if (method == "ml1") { ci_ml1(model = x, ci = ci) } else if (method == "betwithin") { ci_betwithin(model = x, ci = ci) } else { ci_wald(model = x, ci = ci, dof = Inf, robust = FALSE) } } #' @export ci.mlm <- function(x, ci = .95, ...) { out <- lapply(ci, function(i) { .ci <- stats::confint(x, level = i, ...) rn <- rownames(.ci) .data_frame( Parameter = gsub("^(.*):(.*)", "\\2", rn), CI = i, CI_low = .ci[, 1], CI_high = .ci[, 2], Response = gsub("^(.*):(.*)", "\\1", rn) ) }) .remove_backticks_from_parameter_names(do.call(rbind, out)) } #' @method ci lm #' @export ci.lm <- function(x, ci = .95, method = NULL, ...) { robust <- !is.null(method) && method == "robust" ci_wald(model = x, ci = ci, robust = robust, ...) } #' @export ci.lm_robust <- ci.lm #' @export ci.comlmrob <- ci.lm #' @export ci.rq <- ci.lm #' @export ci.rqss <- ci.lm #' @export ci.crq <- ci.lm #' @export ci.nlrq <- ci.lm #' @export ci.BBmm <- ci.lm #' @export ci.BBreg <- ci.lm #' @export ci.gam <- function(x, ci = .95, ...) { ci_wald(model = x, ci = ci, ...) } #' @export ci.list <- function(x, ci = .95, ...) { if ("gam" %in% names(x)) { x <- x$gam class(x) <- c("gam", "lm", "glm") ci(x, ci = ci, ...) } else { return(NULL) } } # glm CI method with profiling ----------------------------------------------- #' @rdname ci.merMod #' @method ci glm #' @export ci.glm <- function(x, ci = .95, method = c("profile", "wald", "robust"), ...) { method <- match.arg(method) if (method == "profile") { out <- lapply(ci, function(i) .ci_profiled(model = x, ci = i)) out <- do.call(rbind, out) } else if (method == "robust") { out <- ci_wald(model = x, ci = ci, robust = TRUE, ...) } else { out <- ci_wald(model = x, ci = ci) } row.names(out) <- NULL out } #' @export ci.negbin <- ci.glm #' @export ci.logistf <- ci.glm #' @rdname ci.merMod #' @export ci.polr <- function(x, ci = .95, method = c("profile", "wald", "robust"), ...) { method <- match.arg(method) if (method == "profile") { out <- lapply(ci, function(i) .ci_profiled2(model = x, ci = i)) out <- do.call(rbind, out) } else if (method == "robust") { out <- ci_wald(model = x, ci = ci, robust = TRUE, ...) } else { out <- ci_wald(model = x, ci = ci) } # for polr, profiled CI do not return CI for response levels # thus, we also calculate Wald CI and add missing rows to result out_missing <- ci_wald(model = x, ci = ci) missing_rows <- out_missing$Parameter %in% setdiff(out_missing$Parameter, out$Parameter) out <- rbind(out, out_missing[missing_rows, ]) # fix names, to match standard error and p_value out$Parameter <- gsub("Intercept: ", "", out$Parameter, fixed = TRUE) row.names(out) <- NULL out } # Default Wald CI method with Inf dof ----------------------------------------- #' @export ci.gamlss <- function(x, ci = .95, method = NULL, ...) { robust <- !is.null(method) && method == "robust" ci_wald(model = x, ci = ci, dof = Inf, robust = robust, ...) } #' @export ci.speedglm <- ci.gamlss #' @export ci.cpglm <- ci.gamlss #' @export ci.cpglmm <- ci.gamlss #' @export ci.glmx <- ci.gamlss #' @export ci.glmmadmb <- ci.gamlss #' @export ci.fixest <- ci.gamlss #' @export ci.feglm <- ci.gamlss #' @export ci.speedlm <- ci.gamlss #' @export ci.glmrob <- ci.gamlss #' @export ci.plm <- ci.gamlss #' @export ci.LORgee <- ci.gamlss #' @export ci.truncreg <- ci.gamlss #' @export ci.ivreg <- ci.gamlss #' @export ci.gee <- ci.gamlss #' @export ci.tobit <- ci.gamlss #' @export ci.geeglm <- ci.gamlss #' @export ci.coxph <- ci.gamlss #' @export ci.aareg <- ci.gamlss #' @export ci.clm <- ci.gamlss #' @export ci.crch <- ci.gamlss #' @export ci.feis <- ci.gamlss #' @export ci.censReg <- ci.gamlss #' @export ci.survreg <- ci.gamlss #' @export ci.flexsurvreg <- ci.gamlss #' @export ci.coxme <- ci.gamlss #' @export ci.svyglm.nb <- ci.gamlss #' @export ci.lrm <- ci.gamlss #' @export ci.psm <- ci.gamlss #' @export ci.ols <- ci.gamlss #' @export ci.rms <- ci.gamlss #' @export ci.svyglm.zip <- ci.gamlss #' @export ci.vglm <- ci.gamlss #' @export ci.svyglm.glimML <- ci.gamlss #' @rdname ci.merMod #' @export ci.mixor <- function(x, ci = .95, effects = c("all", "fixed", "random"), ...) { effects <- match.arg(effects) ci_wald(model = x, ci = ci, dof = Inf, effects = effects, robust = FALSE, ...) } #' @export ci.gamm <- function(x, ci = .95, ...) { x <- x$gam class(x) <- c("gam", "lm", "glm") ci(x, ci = ci, ...) } #' @export ci.gamm4 <- ci.gamm #' @export ci.multinom <- function(x, ci = .95, method = NULL, ...) { robust <- !is.null(method) && method == "robust" params <- insight::get_parameters(x) out <- ci_wald(model = x, ci = ci, dof = Inf, robust = robust, ...) if ("Response" %in% colnames(params)) { out$Response <- params$Response } out } #' @export ci.brmultinom <- ci.multinom #' @export ci.bracl <- ci.multinom #' @rdname ci.merMod #' @export ci.DirichletRegModel <- function(x, ci = .95, component = c("all", "conditional", "precision"), ...) { component <- match.arg(component) params <- insight::get_parameters(x, component = component) out <- ci_wald(model = x, ci = ci, dof = Inf, ...) if (is.null(out$Component)) { component <- "all" } if ("Response" %in% colnames(params)) { out$Response <- params$Response } if (component != "all") { out <- out[out$Component == component, ] } out } # Zero-Inflated and Mixed models ----------------------------------------- #' @rdname ci.merMod #' @export ci.glmmTMB <- function(x, ci = .95, component = c("all", "conditional", "zi", "zero_inflated"), method = c("wald", "ml1", "betwithin", "robust"), ...) { method <- tolower(method) method <- match.arg(method) component <- match.arg(component) if (is.null(.check_component(x, component))) { return(NULL) } if (method == "robust") { ci_wald(model = x, ci = ci, dof = Inf, component = component, robust = TRUE) } else if (method == "wald") { ci_wald(model = x, ci = ci, dof = Inf, component = component, robust = FALSE) } else if (method == "ml1") { ci_ml1(model = x, ci = ci) } else if (method == "betwithin") { ci_betwithin(model = x, ci = ci) } } #' @rdname ci.merMod #' @export ci.zeroinfl <- ci.glmmTMB #' @rdname ci.merMod #' @export ci.hurdle <- ci.glmmTMB #' @export ci.zerocount <- ci.glmmTMB #' @rdname ci.merMod #' @export ci.MixMod <- function(x, ci = .95, component = c("all", "conditional", "zi", "zero_inflated"), ...) { component <- match.arg(component) if (is.null(.check_component(x, component))) { return(NULL) } ci_wald(model = x, ci = ci, dof = Inf, component = component) } # Special models ----------------------------------------- #' @rdname ci.merMod #' @export ci.betareg <- function(x, ci = .95, component = c("all", "conditional", "precision"), ...) { component <- match.arg(component) ci_wald(model = x, ci = ci, dof = Inf, component = component) } #' @rdname ci.merMod #' @export ci.clm2 <- function(x, ci = .95, component = c("all", "conditional", "scale"), ...) { component <- match.arg(component) ci_wald(model = x, ci = ci, dof = Inf, component = component) } #' @export ci.clmm2 <- ci.clm2 #' @export ci.biglm <- function(x, ci = .95, ...) { out <- lapply(ci, function(i) { ci_list <- stats::confint(x, level = i, ...) .data_frame( Parameter = rownames(ci_list), CI = i * 100, CI_low = as.vector(ci_list[, 1]), CI_high = as.vector(ci_list[, 2]) ) }) .remove_backticks_from_parameter_names(do.call(rbind, out)) } #' @export ci.gls <- ci.biglm #' @rdname ci.merMod #' @export ci.lme <- function(x, ci = .95, method = c("wald", "betwithin", "ml1", "satterthwaite"), ...) { method <- tolower(method) method <- match.arg(method) if (method == "wald") { if (!requireNamespace("nlme", quietly = TRUE)) { ci_wald(model = x, ci = ci) } else { out <- lapply(ci, function(i) { ci_list <- nlme::intervals(x, level = i, ...) .data_frame( Parameter = rownames(ci_list$fixed), CI = i * 100, CI_low = as.vector(ci_list$fixed[, "lower"]), CI_high = as.vector(ci_list$fixed[, "upper"]) ) }) .remove_backticks_from_parameter_names(do.call(rbind, out)) } # ml1 approx } else if (method == "ml1") { ci_ml1(x, ci) # betwithin approx } else if (method == "betwithin") { ci_betwithin(x, ci) # Satterthwaite } else if (method == "satterthwaite") { ci_satterthwaite(x, ci) } } #' @importFrom insight print_color #' @importFrom stats qnorm #' @export ci.effectsize_std_params <- function(x, ci = .95, ...) { se <- attr(x, "standard_error") if (is.null(se)) { insight::print_color("\nCould not extract standard errors of standardized coefficients.\n", "red") return(NULL) } out <- lapply(ci, function(i) { alpha <- (1 + i) / 2 fac <- stats::qnorm(alpha) data.frame( Parameter = x$Parameter, CI = i * 100, CI_low = x$Std_Coefficient - se * fac, CI_high = x$Std_Coefficient + se * fac, stringsAsFactors = FALSE ) }) .remove_backticks_from_parameter_names(do.call(rbind, out)) } #' @export ci.rma <- function(x, ci = .95, ...) { params <- insight::get_parameters(x) out <- lapply(ci, function(i) { model <- stats::update(x, level = i) .data_frame( Parameter = params[[1]], CI = i * 100, CI_low = as.vector(model$ci.lb), CI_high = as.vector(model$ci.ub) ) }) .remove_backticks_from_parameter_names(do.call(rbind, out)) } # helper ----------------------------------------- #' @keywords internal .check_component <- function(m, x) { if (!insight::model_info(m)$is_zero_inflated && x %in% c("zi", "zero_inflated")) { insight::print_color("Model has no zero-inflation component!\n", "red") x <- NULL } x } #' @keywords internal .ci_from_refit <- function(std_coef, ci) { se <- attributes(std_coef)$standard_error$SE alpha <- (1 + ci) / 2 fac <- stats::qnorm(alpha) out <- data.frame( Parameter = std_coef$Parameter, CI = ci * 100, CI_low = std_coef$Std_Coefficient - se * fac, CI_high = std_coef$Std_Coefficient + se * fac, stringsAsFactors = FALSE ) .remove_backticks_from_parameter_names(out) } parameters/R/ci_wald.R0000644000176200001440000000506613614067274014364 0ustar liggesusers#' @rdname p_value_wald #' #' @param ci Confidence Interval (CI) level. Default to 0.95 (95\%). #' @param dof Degrees of Freedom. If not specified, for \code{ci_wald()}, defaults to model's residual degrees of freedom (i.e. \code{n-k}, where \code{n} is the number of observations and \code{k} is the number of parameters). For \code{p_value_wald()}, defaults to \code{Inf}. #' #' @inheritParams simulate_model #' @inheritParams standard_error #' @inheritParams model_parameters.default #' #' @importFrom stats qt coef #' @export ci_wald <- function(model, ci = .95, dof = NULL, effects = c("fixed", "random", "all"), component = c("all", "conditional", "zi", "zero_inflated", "precision", "scale", "smooth_terms"), robust = FALSE, ...) { effects <- match.arg(effects) component <- match.arg(component) out <- lapply(ci, function(i) { .ci_wald(model = model, ci = i, dof = dof, effects = effects, component = component, robust = robust, method = "wald", ...) }) out <- do.call(rbind, out) row.names(out) <- NULL out } #' @importFrom insight get_parameters n_obs #' @importFrom stats qt #' @keywords internal .ci_wald <- function(model, ci, dof, effects, component, robust = FALSE, method = "wald", ...) { params <- insight::get_parameters(model, effects = effects, component = component) estimates <- params$Estimate method <- tolower(method) stderror <- if (isTRUE(robust)) { standard_error_robust(model, ...) } else { switch( method, "wald" = standard_error(model, component = component), "kenward" = , "kr" = se_kenward(model), "ml1" = se_ml1(model), "betwithin" = se_betwithin(model), "satterthwaite" = se_satterthwaite(model), standard_error(model, component = component) ) } # filter non-matching parameters stderror <- stderror[1:nrow(params), ] se <- stderror$SE if (is.null(dof)) { # residual df dof <- degrees_of_freedom(model, method = "any") # make sure we have a value for degrees of freedom if (is.null(dof) || length(dof) == 0) dof <- Inf } alpha <- (1 + ci) / 2 fac <- stats::qt(alpha, df = dof) out <- cbind( CI_low = estimates - se * fac, CI_high = estimates + se * fac ) out <- as.data.frame(out) out$CI <- ci * 100 out$Parameter <- params$Parameter out <- out[c("Parameter", "CI", "CI_low", "CI_high")] if ("Component" %in% names(params)) out$Component <- params$Component if ("Effects" %in% names(params) && effects != "fixed") out$Effects <- params$Effects out } parameters/R/se_betwithin.R0000644000176200001440000000065013614067274015440 0ustar liggesusers#' @rdname p_value_betwithin #' @importFrom stats qnorm #' @importFrom insight get_parameters #' @export se_betwithin <- function(model) { params <- insight::get_parameters(model) p <- p_value_betwithin(model) statistic <- stats::qnorm(p$p / 2, lower.tail = FALSE) data.frame( Parameter = params$Parameter, SE = abs(as.vector(params$Estimate / statistic)), stringsAsFactors = FALSE ) } parameters/R/demean.R0000644000176200001440000002255613603206134014202 0ustar liggesusers#' Compute group-meaned and de-meaned variables #' #' \code{demean()} computes group- and de-meaned versions of a #' variable that can be used in regression analysis to model the between- #' and within-subject effect. #' #' @param x A data frame. #' @param select Character vector with names of variables to select that should be group- and de-meaned. #' @param group Name of the variable that indicates the group- or cluster-ID. #' @param suffix_demean,suffix_groupmean String value, will be appended to the names of the #' group-meaned and de-meaned variables of \code{x}. By default, de-meaned #' variables will be suffixed with \code{"_within"} and grouped-meaned variables #' with \code{"_between"}. #' #' @return A data frame with the group-/de-meaned variables, which get the suffix #' \code{"_between"} (for the group-meaned variable) and \code{"_within"} (for #' the de-meaned variable) by default. #' #' @details #' \subsection{Panel data and correlating fixed and group effects}{ #' \code{demean()} is intended to create group- and de-meaned variables #' for panel regression models (fixed effects models), or for complex #' random-effect-within-between models (see \cite{Bell et al. 2018}), #' where group-effects (random effects) and fixed effects correlate (see #' \cite{Bafumi and Gelman 2006)}). This violation of one of the #' \emph{Gauss-Markov-assumptions} can happen, for instance, when analysing panel #' data. To control for correlating predictors and group effects, it is #' recommended to include the group-meaned and de-meaned version of #' \emph{time-varying covariates} in the model. By this, one can fit #' complex multilevel models for panel data, including time-varying predictors, #' time-invariant predictors and random effects. This approach is superior to #' classic fixed-effects models, which lack information of variation in the #' group-effects or between-subject effects. #' } #' \subsection{Terminology}{ #' The group-meaned variable is simply the mean of an independent variable #' within each group (or id-level or cluster) represented by \code{group}. #' It represents the cluster-mean of an independent variable. The de-meaned #' variable is then the centered version of the group-meaned variable. De-meaning #' is sometimes also called person-mean centering or centering within clusters. #' } #' \subsection{De-meaning with continuous predictors}{ #' For continuous time-varying predictors, the recommendation is to include #' both their de-meaned and group-meaned versions as fixed effects, but not #' the raw (untransformed) time-varying predictors themselves. The de-meaned #' predictor should also be included as random effect (random slope). In #' regression models, the coefficient of the de-meaned predictors indicates #' the within-subject effect, while the coefficient of the group-meaned #' predictor indicates the between-subject effect. #' } #' \subsection{De-meaning with binary predictors}{ #' For binary time-varying predictors, the recommendation is to include #' the raw (untransformed) binary predictor as fixed effect only and the #' \emph{de-meaned} variable as random effect (random slope) #' (\cite{Hoffmann 2015, chapter 8-2.I}). \code{demean()} will thus coerce #' categorical time-varying predictors to numeric to compute the de- and #' group-meaned versions for these variables. #' } #' \subsection{De-meaning interaction terms}{ #' There are multiple ways to deal with interaction terms of within- and #' between-effects. A classical approach is to simply use the product #' term of the de-meaned variables (i.e. introducing the de-meaned variables #' as interaction term in the model formula, e.g. \code{y ~ x_within * time_within}). #' This approach, however, might be subject to bias (see \cite{Giesselmann & Schmidt-Catran 2018}). #' \cr \cr #' Another option is to first calculate the product term and then apply the #' de-meaning to it. This approach produces an estimator \dQuote{that reflects #' unit-level differences of interacted variables whose moderators vary #' within units}, which is desirable if \emph{no} within interaction of #' two time-dependent variables is required. \cr \cr #' A third option, when the interaction should result in a genuine within #' estimator, is to "double de-mean" the interaction terms #' (\cite{Giesselmann & Schmidt-Catran 2018}), however, this is currently #' not supported by \code{demean()}. If this is required, the \code{wmb()} #' function from the \pkg{panelr} package should be used. \cr \cr #' To de-mean interaction terms for within-between models, simply specify #' the term as interaction for the \code{select}-argument, e.g. #' \code{select = "a*b"} (see 'Examples'). #' } #' \subsection{Analysing panel data with mixed models using lme4}{ #' A description of how to translate the #' formulas described in \emph{Bell et al. 2018} into R using \code{lmer()} #' from \pkg{lme4} or \code{glmmTMB()} from \pkg{glmmTMB} can be found here: #' \href{https://strengejacke.github.io/mixed-models-snippets/random-effects-within-between-effects-model.html}{for lmer()} #' and \href{https://strengejacke.github.io/mixed-models-snippets/random-effects-within-between-effects-model-glmmtmb.html}{for glmmTMB()}. #' } #' #' @references \itemize{ #' \item Bafumi J, Gelman A. 2006. Fitting Multilevel Models When Predictors and Group Effects Correlate. In. Philadelphia, PA: Annual meeting of the American Political Science Association. #' \item Bell A, Fairbrother M, Jones K. 2018. Fixed and Random Effects Models: Making an Informed Choice. Quality & Quantity. #' \item Giesselmann M, Schmidt-Catran A. (2018). Interactions in fixed effects regression models (Discussion Papers of DIW Berlin No. 1748). DIW Berlin, German Institute for Economic Research. Retrieved from https://ideas.repec.org/p/diw/diwwpp/dp1748.html #' \item Hoffman L. 2015. Longitudinal analysis: modeling within-person fluctuation and change. New York: Routledge #' } #' @examples #' data(iris) #' iris$ID <- sample(1:4, nrow(iris), replace = TRUE) # fake-ID #' iris$binary <- as.factor(rbinom(150, 1, .35)) # binary variable #' #' x <- demean(iris, select = c("Sepal.Length", "Petal.Length"), group = ID) #' head(x) #' #' x <- demean(iris, select = c("Sepal.Length", "binary", "Species"), group = ID) #' head(x) #' #' # demean interaction term x*y #' dat <- data.frame( #' a = c(1, 2, 3, 4, 1, 2, 3, 4), #' x = c(4, 3, 3, 4, 1, 2, 1, 2), #' y = c(1, 2, 1, 2, 4, 3, 2, 1), #' ID = c(1, 2, 3, 1, 2, 3, 1, 2) #' ) #' demean(dat, select = c("a", "x*y"), group = "ID") #' @importFrom stats ave #' @export demean <- function(x, select, group, suffix_demean = "_within", suffix_groupmean = "_between") { interactions_no <- select[!grepl("(\\*|\\:)", select)] interactions_yes <- select[grepl("(\\*|\\:)", select)] if (length(interactions_yes)) { interaction_terms <- lapply(strsplit(interactions_yes, "*", fixed = TRUE), trimws) product <- lapply(interaction_terms, function(i) do.call(`*`, x[, i])) new_dat <- as.data.frame(stats::setNames(product, gsub("\\s", "", gsub("*", "_", interactions_yes, fixed = TRUE)))) x <- cbind(x, new_dat) select <- c(interactions_no, colnames(new_dat)) } not_found <- setdiff(select, colnames(x)) if (length(not_found)) { insight::print_color(sprintf( "%i variables were not found in the dataset: %s\n", length(not_found), paste0(not_found, collapse = ", ") ), color = "red" ) } select <- intersect(colnames(x), select) # parse group-variable name to string group <- gsub("\"", "", deparse(substitute(group)), fixed = TRUE) # get data to demean... dat <- x[, c(select, group)] # find categorical predictors that are coded as factors categorical_predictors <- sapply(dat[select], is.factor) # convert binrary predictors to numeric if (any(categorical_predictors)) { dat[select[categorical_predictors]] <- lapply( dat[select[categorical_predictors]], function(i) as.numeric(i) - 1 ) insight::print_color( sprintf( "Categorical predictors (%s) have been coerced to numeric values to compute de- and group-meaned variables.\n", paste0(names(categorical_predictors)[categorical_predictors], collapse = ", ") ), "yellow" ) } # group variables, then calculate the mean-value # for variables within each group (the group means). assign # mean values to a vector of same length as the data x_gm_list <- lapply(select, function(i) { stats::ave(dat[[i]], dat[[group]], FUN = function(.gm) mean(.gm, na.rm = TRUE)) }) names(x_gm_list) <- select # create de-meaned variables by substracting the group mean from each individual value x_dm_list <- lapply(select, function(i) dat[[i]] - x_gm_list[[i]]) names(x_dm_list) <- select # convert to data frame and add suffix to column names x_gm <- as.data.frame(x_gm_list) x_dm <- as.data.frame(x_dm_list) colnames(x_dm) <- sprintf("%s%s", colnames(x_dm), suffix_demean) colnames(x_gm) <- sprintf("%s%s", colnames(x_gm), suffix_groupmean) cbind(x_gm, x_dm) } parameters/R/p_value_betwithin.R0000644000176200001440000000532613614103362016456 0ustar liggesusers#' Between-within approximation for SEs, CIs and p-values #' #' Approximation of degrees of freedom based on a "between-within" heuristic. #' #' @param model A mixed model. #' @param dof Degrees of Freedom. #' @inheritParams ci.merMod #' #' @details \subsection{Small Sample Cluster corrected Degrees of Freedom}{ #' Inferential statistics (like p-values, confidence intervals and #' standard errors) may be biased in mixed models when the number of clusters #' is small (even if the sample size of level-1 units is high). In such cases #' it is recommended to approximate a more accurate number of degrees of freedom #' for such inferential statitics (see \cite{Li and Redden 2015}). The #' \emph{Between-within} denominator degrees of freedom approximation is #' recommended in particular for generalized linear mixed models with repeated #' measurements (longitudinal design). \code{dof_betwithin}) implements a heuristic #' based on the between-within approach. \strong{Note} that this implementation #' does not return exactly the same results as shown in \cite{Li and Redden 2015}, #' but similar. #' } #' \subsection{Degrees of Freedom for Longitudinal Designs (Repeated Measures)}{ #' In particular for repeated measure designs (longitudinal data analysis), #' the \emph{between-within} heuristic is likely to be more accurate than simply #' using the residual or infinite degrees of freedom, because \code{dof_betwithin()} #' returns different degrees of freedom for within-cluster and between-cluster effects. #' } #' @seealso \code{dof_betwithin()} and \code{se_betwithin()} are small helper-functions #' to calculate approximated degrees of freedom and standard errors of model #' parameters, based on the "between-within" heuristic. #' #' @examples #' \donttest{ #' if (require("lme4")) { #' data(sleepstudy) #' model <- lmer(Reaction ~ Days + (1 + Days | Subject), data = sleepstudy) #' dof_betwithin(model) #' p_value_betwithin(model) #' } #' } #' @return A data frame. #' @references \itemize{ #' \item Elff, M.; Heisig, J.P.; Schaeffer, M.; Shikano, S. (2019). Multilevel Analysis with Few Clusters: Improving Likelihood-based Methods to Provide Unbiased Estimates and Accurate Inference, British Journal of Political Science. #' \item Li, P., Redden, D. T. (2015). Comparing denominator degrees of freedom approximations for the generalized linear mixed model in analyzing binary outcome in small sample cluster-randomized trials. BMC Medical Research Methodology, 15(1), 38. \doi{10.1186/s12874-015-0026-x} #' } #' @importFrom stats pt coef #' @export p_value_betwithin <- function(model, dof = NULL) { if (is.null(dof)) { dof <- dof_betwithin(model) } .p_value_dof(model, dof) } parameters/R/model_parameters.aov.R0000644000176200001440000000623113611425606017056 0ustar liggesusers#' Parameters from ANOVAs #' #' Parameters from ANOVAs. #' #' @param model Object of class \link{aov}, \link{anova} or \code{aovlist}. #' @param omega_squared Compute omega squared as index of effect size. Can be \code{"partial"} (adjusted for effect size) or \code{"raw"}. #' @param eta_squared Compute eta squared as index of effect size. Can be \code{"partial"} (adjusted for effect size) or \code{"raw"}. #' @param epsilon_squared Compute epsilon squared as index of effect size. #' @param ... Arguments passed to or from other methods. #' #' @return A data frame of indices related to the model's parameters. #' #' @examples #' df <- iris #' df$Sepal.Big <- ifelse(df$Sepal.Width >= 3, "Yes", "No") #' #' model <- aov(Sepal.Length ~ Sepal.Big, data = df) #' model_parameters(model, omega_squared = "partial", eta_squared = "partial", epsilon_squared = TRUE) #' #' model <- anova(lm(Sepal.Length ~ Sepal.Big, data = df)) #' model_parameters(model) #' model_parameters(model, omega_squared = "partial", eta_squared = "partial", epsilon_squared = TRUE) #' #' model <- aov(Sepal.Length ~ Sepal.Big + Error(Species), data = df) #' model_parameters(model) #' #' if (require("lme4")) { #' model <- anova(lmer(Sepal.Length ~ Sepal.Big + (1 | Species), data = df)) #' model_parameters(model) #' } #' @export model_parameters.aov <- function(model, omega_squared = NULL, eta_squared = NULL, epsilon_squared = NULL, ...) { if (!requireNamespace("effectsize", quietly = TRUE)) { stop("Package 'effectsize' required for this function to work. Please install it.") } parameters <- .extract_parameters_anova(model) # Sanity checks if (!is.null(omega_squared) | !is.null(eta_squared) | !is.null(epsilon_squared)) { if (!"Residuals" %in% parameters$Parameter) { warning("No residuals data found. Omega squared can only be computed for simple `aov` models.") omega_squared <- NULL } if ("Group" %in% names(parameters) && ("Within" %in% parameters$Group)) { warning("Omega squared not implemented yet for repeated-measures ANOVAs.") omega_squared <- NULL } } # Effect sizes ------------------------------------------------------------ # Omega squared if (!is.null(omega_squared)) { if (omega_squared == "partial") { parameters$Omega_Sq_partial <- effectsize::omega_squared(model, partial = TRUE)$Omega_Sq_partial } else { parameters$Omega_Sq <- effectsize::omega_squared(model, partial = FALSE)$Omega_Sq } } # Eta squared if (!is.null(eta_squared)) { if (eta_squared == "partial") { parameters$Eta_Sq_partial <- effectsize::eta_squared(model, partial = TRUE)$Eta_Sq_partial } else { parameters$Eta_Sq <- effectsize::eta_squared(model, partial = FALSE)$Eta_Sq } } # Epsilon squared if (!is.null(epsilon_squared)) { parameters$Epsilon_sq <- effectsize::epsilon_squared(model)$Epsilon_sq } class(parameters) <- c("parameters_model", "see_parameters_model", class(parameters)) parameters } #' @export model_parameters.anova <- model_parameters.aov #' @export model_parameters.aovlist <- model_parameters.aov parameters/R/model_parameters.R0000644000176200001440000000511413617565555016307 0ustar liggesusers#' Model Parameters #' #' Compute and extract model parameters. See the documentation for your object's class: #' \itemize{ #' \item{\link[=model_parameters.htest]{Correlations and t-tests}} #' \item{\link[=model_parameters.aov]{ANOVAs}} #' \item{\link[=model_parameters.default]{Regression models} (\code{lm}, \code{glm}, \pkg{survey}, ...)} #' \item{\link[=model_parameters.gam]{Additive models} (\code{gam}, \code{gamm}, ...)} #' \item{\link[=model_parameters.zeroinfl]{Zero-inflated models} (\code{hurdle}, \code{zeroinfl}, \code{zerocount})} #' \item{\link[=model_parameters.mlm]{Multinomial, ordinal and cumulative link models} (\code{bracl}, \code{multinom}, \code{mlm}, ...)} #' \item{\link[=model_parameters.merMod]{Mixed models} (\pkg{lme4}, \pkg{nlme}, \pkg{glmmTMB}, ...)} #' \item{\link[=model_parameters.BFBayesFactor]{Bayesian tests} (\pkg{BayesFactor})} #' \item{\link[=model_parameters.stanreg]{Bayesian models} (\pkg{rstanarm}, \pkg{brms}, \pkg{MCMCglmm})} #' \item{\link[=model_parameters.principal]{PCA and FA} (\pkg{psych})} #' \item{\link[=model_parameters.lavaan]{CFA and SEM} (\pkg{lavaan}, \pkg{blavaan})} #' \item{\link[=model_parameters.kmeans]{Cluster models} (k-means, ...)} #' \item{\link[=model_parameters.rma]{Meta-Analysis via linear (mixed) models} (\code{rma})} #' } #' #' @param model Statistical Model. #' @param ... Arguments passed to or from other methods. Non-documented arguments are \code{digits}, \code{p_digits} and \code{ci_digits} to set the number of digits for the output. See 'Examples' in \code{\link{model_parameters.default}}. #' #' @seealso \code{\link[=standardize_names]{standardize_names()}} to rename #' columns into a consistent, standardized naming scheme. #' #' @note The \code{\link[=print.parameters_model]{print()}} method has several arguments to tweak the output. There is also a \href{https://easystats.github.io/see/articles/parameters.html}{\code{plot()}-method} implemented in the \href{https://easystats.github.io/see/}{\pkg{see}-package}. #' #' @details Standardization is based on \code{\link[effectsize]{standardize_parameters}}. #' In case of \code{standardize = "refit"}, the data used to fit the model #' will be standardized and the model is completely refitted. In such cases, #' standard errors and confidence intervals refer to the standardized coefficient. #' #' @return A data frame of indices related to the model's parameters. #' @export model_parameters <- function(model, ...) { UseMethod("model_parameters") } #' @rdname model_parameters #' @export parameters <- model_parameters parameters/R/convert_efa_to_cfa.R0000644000176200001440000000445113611655361016562 0ustar liggesusers#' Conversion between EFA results and CFA structure #' #' Enables a conversion between Exploratory Factor Analysis (EFA) and Confirmatory Factor Analysis (CFA) \code{lavaan}-ready structure. #' #' @param model An EFA model (e.g., a \code{psych::fa} object). #' @inheritParams principal_components #' @param names Vector containing dimension names. #' #' @examples #' \donttest{ #' library(parameters) #' if (require("psych") && require("lavaan")) { #' efa <- psych::fa(attitude, nfactors = 3) #' #' model1 <- efa_to_cfa(efa) #' model2 <- efa_to_cfa(efa, threshold = 0.3) #' #' anova( #' lavaan::cfa(model1, data = attitude), #' lavaan::cfa(model2, data = attitude) #' ) #' } #' } #' @return Converted index. #' @export convert_efa_to_cfa <- function(model, ...) { UseMethod("convert_efa_to_cfa") } #' @rdname convert_efa_to_cfa #' @inheritParams model_parameters.principal #' @export convert_efa_to_cfa.fa <- function(model, threshold = "max", names = NULL, ...) { .efa_to_cfa(model_parameters(model, threshold = threshold, ...), names = names, ...) } #' @export convert_efa_to_cfa.parameters_efa <- function(model, names = NULL, ...) { .efa_to_cfa(model, names = names, ...) } #' @export convert_efa_to_cfa.parameters_pca <- convert_efa_to_cfa.parameters_efa #' @rdname convert_efa_to_cfa #' @export efa_to_cfa <- convert_efa_to_cfa #' @keywords internal .efa_to_cfa <- function(loadings, names = NULL, ...) { loadings <- attributes(loadings)$loadings_long # Get dimension names if (is.null(names)) { names <- unique(loadings$Component) } # Catch error if (length(names) != length(unique(loadings$Component))) { stop(paste("The `names` vector must be of same length as the number of dimensions, in this case", length(unique(loadings$Component)))) } cfa <- c() # Iterate over dimensions for (i in 1:length(names)) { cfa <- c( cfa, paste0(names[i], " =~ ", paste(as.character(loadings[loadings$Component == unique(loadings$Component)[i], "Variable"]), collapse = " + ")) ) } cfa <- paste0(cfa, collapse = "\n") cfa <- paste0("# Latent variables\n", cfa) class(cfa) <- c("cfa_model", class(cfa)) cfa } #' @export print.cfa_model <- function(x, ...) { cat(x) } parameters/R/extract_parameters.R0000644000176200001440000005324313617206715016655 0ustar liggesusers# generic function ------------------------------------------------------ #' @importFrom insight get_statistic get_parameters #' @importFrom stats confint #' @keywords internal .extract_parameters_generic <- function(model, ci, component, merge_by = c("Parameter", "Component"), standardize = NULL, effects = "fixed", robust = FALSE, df_method = NULL, ...) { # check if standardization is required and package available if (!is.null(standardize) && !requireNamespace("effectsize", quietly = TRUE)) { insight::print_color("Package 'effectsize' required to calculate standardized coefficients. Please install it.\n", "red") standardize <- NULL } # for refit, we completely refit the model, than extract parameters, ci etc. as usual if (!is.null(standardize) && standardize == "refit") { model <- effectsize::standardize(model, verbose = FALSE) standardize <- NULL } parameters <- insight::get_parameters(model, effects = effects, component = component) statistic <- insight::get_statistic(model, component = component) # check if we really have a component column if (!("Component" %in% names(parameters)) && "Component" %in% merge_by) { merge_by <- setdiff(merge_by, "Component") } # check Degrees of freedom if (!.dof_method_ok(model, df_method)) { df_method <- NULL } # clean parameter names if (inherits(model, "polr")) { parameters$Parameter <- gsub("Intercept: ", "", parameters$Parameter, fixed = TRUE) } original_order <- parameters$.id <- 1:nrow(parameters) # column name for coefficients, non-standardized coef_col <- "Coefficient" # Std Coefficients for other methods than "refit" if (!is.null(standardize)) { # standardize model parameters and calculate related CI and SE std_coef <- effectsize::standardize_parameters(model, method = standardize) parameters <- merge(parameters, std_coef, by = merge_by) coef_col <- "Std_Coefficient" # merge all data, including CI and SE for std. parameters if (inherits(std_coef, "effectsize_std_params")) { parameters <- merge(parameters, ci(std_coef, ci = ci), by = merge_by) parameters <- merge(parameters, standard_error(std_coef), by = merge_by) } # if we have CIs, remember columns names to select later if (!is.null(ci)) { ci_cols <- c("CI_low", "CI_high") } else { ci_cols <- c() } } # CI - only if we don't already have CI for std. parameters if (is.null(standardize)) { if (!is.null(ci)) { if (isTRUE(robust)) { ci_df <- suppressMessages(ci_robust(model, ci = ci, ...)) } else if (!is.null(df_method)) { ci_df <- suppressMessages(ci(model, ci = ci, effects = effects, component = component, method = df_method)) } else { ci_df <- suppressMessages(ci(model, ci = ci, effects = effects, component = component)) } if (length(ci) > 1) ci_df <- bayestestR::reshape_ci(ci_df) ci_cols <- names(ci_df)[!names(ci_df) %in% c("CI", merge_by)] parameters <- merge(parameters, ci_df, by = merge_by) } else { ci_cols <- c() } } # p value if (isTRUE(robust)) { parameters <- merge(parameters, p_value_robust(model, ...), by = merge_by) } else if (!is.null(df_method)) { parameters <- merge(parameters, p_value(model, effects = effects, component = component, method = df_method), by = merge_by) } else { parameters <- merge(parameters, p_value(model, effects = effects, component = component), by = merge_by) } # standard error - only if we don't already have SE for std. parameters if (is.null(standardize)) { if (isTRUE(robust)) { parameters <- merge(parameters, standard_error_robust(model, ...), by = merge_by) } else if (!is.null(df_method)) { parameters <- merge(parameters, standard_error(model, effects = effects, component = component, method = df_method), by = merge_by) } else { parameters <- merge(parameters, standard_error(model, effects = effects, component = component), by = merge_by) } } # test statistic - fix values for robust estimation if (isTRUE(robust)) { parameters$Statistic <- parameters$Estimate / parameters$SE } else { parameters <- merge(parameters, statistic, by = merge_by) } # dof if (!is.null(df_method)) { df_error <- degrees_of_freedom(model, method = df_method) } else { df_error <- degrees_of_freedom(model, method = "any") } if (!is.null(df_error) && (length(df_error) == 1 || length(df_error) == nrow(parameters))) { parameters$df_error <- df_error } # Rematch order after merging parameters <- parameters[match(original_order, parameters$.id), ] # Renaming names(parameters) <- gsub("Statistic", gsub("-statistic", "", attr(statistic, "statistic", exact = TRUE), fixed = TRUE), names(parameters)) names(parameters) <- gsub("Estimate", "Coefficient", names(parameters)) # Reorder col_order <- c("Parameter", coef_col, "SE", ci_cols, "t", "z", "t / F", "z / Chisq", "F", "chisq", "df", "df_error", "p", "Component", "Response", "Effects") parameters <- parameters[col_order[col_order %in% names(parameters)]] # remove Component column if not needed if (length(unique(parameters$Component)) == 1) parameters$Component <- NULL if (length(unique(parameters$Effects)) == 1 || effects == "fixed") parameters$Effects <- NULL rownames(parameters) <- NULL parameters } # mixed models function ------------------------------------------------------ #' @importFrom stats confint #' @keywords internal .extract_parameters_mixed <- function(model, ci = .95, df_method = "wald", standardize = NULL, robust = FALSE, ...) { # check if standardization is required and package available if (!is.null(standardize) && !requireNamespace("effectsize", quietly = TRUE)) { insight::print_color("Package 'effectsize' required to calculate standardized coefficients. Please install it.\n", "red") standardize <- NULL } # for refit, we completely refit the model, than extract parameters, ci etc. as usual if (!is.null(standardize) && standardize == "refit") { model <- effectsize::standardize(model, verbose = FALSE) standardize <- NULL } parameters <- insight::get_parameters(model, effects = "fixed", component = "all") statistic <- insight::get_statistic(model, component = "all") original_order <- parameters$.id <- 1:nrow(parameters) # remove SE column parameters <- .remove_columns(parameters, c("SE", "Std. Error")) # column name for coefficients, non-standardized coef_col <- "Coefficient" # Degrees of freedom if (.dof_method_ok(model, df_method)) { df <- degrees_of_freedom(model, df_method) } else { df <- Inf } # Std Coefficients for other methods than "refit" if (!is.null(standardize)) { # standardize model parameters and calculate related CI and SE std_coef <- effectsize::standardize_parameters(model, method = standardize) parameters <- merge(parameters, std_coef, by = "Parameter") coef_col <- "Std_Coefficient" # merge all data, including CI and SE for std. parameters if (inherits(std_coef, "effectsize_std_params")) { parameters <- merge(parameters, ci(std_coef, ci = ci), by = "Parameter") parameters <- merge(parameters, standard_error(std_coef), by = "Parameter") } # if we have CIs, remember columns names to select later if (!is.null(ci)) { ci_cols <- c("CI_low", "CI_high") } else { ci_cols <- c() } } # CI - only if we don't already have CI for std. parameters if (is.null(standardize)) { if (!is.null(ci)) { if (isTRUE(robust)) { ci_df <- suppressMessages(ci_robust(model, ci = ci, ...)) } else { ci_df <- ci(model, ci = ci, method = df_method, effects = "fixed") } if (length(ci) > 1) ci_df <- bayestestR::reshape_ci(ci_df) ci_cols <- names(ci_df)[!names(ci_df) %in% c("CI", "Parameter")] parameters <- merge(parameters, ci_df, by = "Parameter") } else { ci_cols <- c() } } # standard error - only if we don't already have SE for std. parameters if (is.null(standardize) || !("SE" %in% colnames(parameters))) { if (isTRUE(robust)) { parameters <- merge(parameters, standard_error_robust(model, ...), by = "Parameter") } else { parameters <- merge(parameters, standard_error(model, method = df_method, effects = "fixed"), by = "Parameter") } } # p value if (isTRUE(robust)) { parameters <- merge(parameters, p_value_robust(model, ...), by = "Parameter") } else { if ("Pr(>|z|)" %in% names(parameters)) { names(parameters)[grepl("Pr(>|z|)", names(parameters), fixed = TRUE)] <- "p" } else { parameters <- merge(parameters, p_value(model, dof = df, effects = "fixed"), by = "Parameter") } } # adjust standard errors and test-statistic as well if (!isTRUE(robust) && is.null(standardize) && df_method %in% c("betwithin", "ml1", "kenward", "kr")) { parameters$Statistic <- parameters$Estimate / parameters$SE } else { parameters <- merge(parameters, statistic, by = "Parameter") } # dof if (!"df" %in% names(parameters)) { if (df_method %in% c("betwithin", "ml1", "satterthwaite", "kenward", "kr")) df_error <- df else df_error <- degrees_of_freedom(model, method = "any") if (!is.null(df_error) && (length(df_error) == 1 || length(df_error) == nrow(parameters))) { parameters$df_error <- df_error } } # Rematch order after merging parameters <- parameters[match(original_order, parameters$.id), ] # Renaming names(parameters) <- gsub("Statistic", gsub("-statistic", "", attr(statistic, "statistic", exact = TRUE), fixed = TRUE), names(parameters)) names(parameters) <- gsub("Std. Error", "SE", names(parameters)) names(parameters) <- gsub("Estimate", "Coefficient", names(parameters)) names(parameters) <- gsub("t value", "t", names(parameters)) names(parameters) <- gsub("z value", "z", names(parameters)) # Reorder order <- c("Parameter", coef_col, "SE", ci_cols, "t", "z", "df", "df_error", "p") parameters <- parameters[order[order %in% names(parameters)]] rownames(parameters) <- NULL parameters } # Bayes function ------------------------------------------------------ #' @importFrom stats sd setNames #' @keywords internal .extract_parameters_bayesian <- function(model, centrality = "median", dispersion = FALSE, ci = .89, ci_method = "hdi", test = c("pd", "rope"), rope_range = "default", rope_ci = 1.0, bf_prior = NULL, diagnostic = c("ESS", "Rhat"), priors = TRUE, iterations = 1000, ...) { # MCMCglmm need special handling if (inherits(model, "MCMCglmm")) { parameters <- bayestestR::describe_posterior(model, centrality = centrality, dispersion = dispersion, ci = ci, ci_method = ci_method, test = test, rope_range = rope_range, rope_ci = rope_ci, diagnostic = "ESS", ...) # Bayesian Models } else if ((insight::is_multivariate(model) && insight::model_info(model)[[1]]$is_bayesian) || insight::model_info(model)$is_bayesian) { parameters <- bayestestR::describe_posterior(model, centrality = centrality, dispersion = dispersion, ci = ci, ci_method = ci_method, test = test, rope_range = rope_range, rope_ci = rope_ci, bf_prior = bf_prior, diagnostic = diagnostic, priors = priors, ...) # Bootstrapped Models } else { data <- bootstrap_model(model, iterations = iterations) parameters <- bayestestR::describe_posterior(data, centrality = centrality, dispersion = dispersion, ci = ci, ci_method = ci_method, test = test, rope_range = rope_range, rope_ci = rope_ci, bf_prior = bf_prior, ...) } if (length(ci) > 1) { parameters <- bayestestR::reshape_ci(parameters) } # Remove unecessary columns if ("CI" %in% names(parameters) && length(unique(parameters$CI)) == 1) { parameters$CI <- NULL } if ("ROPE_CI" %in% names(parameters) && length(unique(parameters$ROPE_CI)) == 1) { parameters$ROPE_CI <- NULL } if ("ROPE_low" %in% names(parameters) & "ROPE_high" %in% names(parameters)) { parameters$ROPE_low <- NULL parameters$ROPE_high <- NULL } parameters } # SEM function ------------------------------------------------------ #' @keywords internal .extract_parameters_lavaan <- function(model, ci = 0.95, standardize = FALSE, ...) { if (!requireNamespace("lavaan", quietly = TRUE)) { stop("Package 'lavaan' required for this function to work. Please install it by running `install.packages('lavaan')`.") } # CI if (length(ci) > 1) { ci <- ci[1] warning(paste0("lavaan models only accept one level of CI :( Keeping the first one: `ci = ", ci, "`.")) } # Get estimates if (standardize == FALSE) { data <- lavaan::parameterEstimates(model, se = TRUE, level = ci, ...) } else { data <- lavaan::standardizedsolution(model, se = TRUE, level = ci, ...) names(data)[names(data) == "est.std"] <- "est" } params <- data.frame( To = data$lhs, Operator = data$op, From = data$rhs, Coefficient = data$est, SE = data$se, CI_low = data$ci.lower, CI_high = data$ci.upper, p = data$pvalue ) params$Type <- ifelse(params$Operator == "=~", "Loading", ifelse(params$Operator == "~", "Regression", ifelse(params$Operator == "~~", "Correlation", ifelse(params$Operator == "~1", "Mean", NA) ) ) ) params$Type <- ifelse(as.character(params$From) == as.character(params$To), "Variance", params$Type) params$p <- ifelse(is.na(params$p), 0, params$p) if ("group" %in% names(data)) { params$Group <- data$group } params } #' @keywords internal .extract_parameters_blavaan <- function(model, ci = 0.95, standardize = FALSE, ...) { if (!requireNamespace("lavaan", quietly = TRUE)) { stop("Package 'lavaan' required for this function to work. Please install it by running `install.packages('lavaan')`.") } # CI if (length(ci) > 1) { ci <- ci[1] warning(paste0("blavaan models only accept one level of CI :( Keeping the first one: `ci = ", ci, "`.")) } # Get estimates if (standardize == FALSE) { data <- lavaan::parameterEstimates(model, se = TRUE, level = ci, ...) } else { data <- lavaan::standardizedsolution(model, se = TRUE, level = ci, ...) names(data)[names(data) == "est.std"] <- "est" } params <- data.frame( To = data$lhs, Operator = data$op, From = data$rhs, Coefficient = data$est, SE = data$se, CI_low = data$ci.lower, CI_high = data$ci.upper ) params$Type <- ifelse(params$Operator == "=~", "Loading", ifelse(params$Operator == "~", "Regression", ifelse(params$Operator == "~~", "Correlation", ifelse(params$Operator == "~1", "Mean", NA) ) ) ) params$Type <- ifelse(as.character(params$From) == as.character(params$To), "Variance", params$Type) if ("group" %in% names(data)) { params$Group <- data$group } params } # Lame models ------------------------------------------------------ #' @keywords internal .extract_parameters_anova <- function(model) { # Processing if ("aov" %in% class(model)) { parameters <- as.data.frame(summary(model)[[1]]) parameters$Parameter <- trimws(row.names(parameters)) } else if ("anova" %in% class(model)) { parameters <- as.data.frame(model) parameters$Parameter <- trimws(row.names(parameters)) # Deal with anovas of models if (length(attributes(model)$heading) == 2) { info <- attributes(model)$heading[[2]] if (grepl("Model", info)) { parameters$Parameter <- unlist(strsplit(info, "\n", fixed = TRUE)) } } else if (length(attributes(model)$heading) > 2) { parameters$Parameter <- attributes(model)$heading[-1:-2] } # If mixed models... sumsq <- names(parameters)[names(parameters) %in% c("Sum Sq", "Sum of Sq")] if (length(sumsq) != 0) { parameters$Mean_Square <- parameters[[sumsq]] / parameters[["Df"]] } } else if ("aovlist" %in% class(model)) { if (names(model)[1L] == "(Intercept)") { model <- model[-1L] } parameters <- data.frame() rowmax <- 0 for (i in names(model)) { temp <- as.data.frame(summary(model[[i]])[[1]]) temp$Parameter <- trimws(row.names(temp)) temp$Group <- i row.names(temp) <- 1:nrow(temp) + rowmax rowmax <- nrow(temp) if (nrow(parameters) == 0) { parameters <- temp } else { parameters <- merge(parameters, temp, all = TRUE) } } parameters <- parameters[order(parameters$Group), ] } # Rename names(parameters) <- gsub("Pr(>F)", "p", names(parameters), fixed = TRUE) names(parameters) <- gsub("Df", "df", names(parameters), fixed = TRUE) names(parameters) <- gsub("Chi.Df", "Chisq_df", names(parameters), fixed = TRUE) names(parameters) <- gsub("Chi DoF", "Chisq_df", names(parameters), fixed = TRUE) names(parameters) <- gsub("Sum Sq", "Sum_Squares", names(parameters), fixed = TRUE) names(parameters) <- gsub("Mean Sq", "Mean_Square", names(parameters), fixed = TRUE) names(parameters) <- gsub("F value", "F", names(parameters), fixed = TRUE) names(parameters) <- gsub("Res.Df", "df_residual", names(parameters), fixed = TRUE) names(parameters) <- gsub("Res.DoF", "df_residual", names(parameters), fixed = TRUE) names(parameters) <- gsub("Sum of Sq", "Sum_Squares", names(parameters), fixed = TRUE) names(parameters) <- gsub("Chisq", "Chisq", names(parameters), fixed = TRUE) names(parameters) <- gsub("Pr(>Chi_Square)", "p", names(parameters), fixed = TRUE) names(parameters) <- gsub("Pr(>ChiSquare)", "p", names(parameters), fixed = TRUE) names(parameters) <- gsub("Pr(>Chisq)", "p", names(parameters), fixed = TRUE) names(parameters) <- gsub("P(>|Chi|)", "p", names(parameters), fixed = TRUE) names(parameters) <- gsub("Pr(>Chi)", "p", names(parameters), fixed = TRUE) names(parameters) <- gsub("Pr..Chisq.", "p", names(parameters), fixed = TRUE) names(parameters) <- gsub("Pr..Chi.", "p", names(parameters), fixed = TRUE) names(parameters) <- gsub("Chi.sq", "Chisq", names(parameters), fixed = TRUE) names(parameters) <- gsub("LR.Chisq", "Chisq", names(parameters), fixed = TRUE) names(parameters) <- gsub("LR Chisq", "Chisq", names(parameters), fixed = TRUE) names(parameters) <- gsub("p.value", "p", names(parameters), fixed = TRUE) names(parameters) <- gsub("logLik", "Log_Likelihood", names(parameters), fixed = TRUE) names(parameters) <- gsub("deviance", "Deviance", names(parameters), fixed = TRUE) # Reorder row.names(parameters) <- NULL order <- c("Group", "Parameter", "AIC", "BIC", "Log_Likelihood", "Deviance", "Chisq", "Chisq_df", "RSS", "Sum_Squares", "df", "df_residual", "Mean_Square", "F", "p") parameters <- parameters[order[order %in% names(parameters)]] .remove_backticks_from_parameter_names(parameters) } #' @keywords internal .extract_parameters_htest <- function(model) { if (insight::model_info(model)$is_correlation) { names <- unlist(strsplit(model$data.name, " and ")) out <- data.frame( "Parameter1" = names[1], "Parameter2" = names[2] ) if (grepl("Pearson", model$method)) { out$r <- model$estimate out$t <- model$statistic out$df <- model$parameter out$p <- model$p.value out$CI_low <- model$conf.int[1] out$CI_high <- model$conf.int[2] out$Method <- "Pearson" } else if (grepl("Spearman", model$method)) { out$rho <- model$estimate out$S <- model$statistic out$df <- model$parameter out$p <- model$p.value out$Method <- "Spearman" } else { out$tau <- model$estimate out$z <- model$statistic out$df <- model$parameter out$p <- model$p.value out$Method <- "Kendall" } } else if (insight::model_info(model)$is_ttest) { if (grepl(" and ", model$data.name)) { names <- unlist(strsplit(model$data.name, " and ")) out <- data.frame( "Parameter1" = names[1], "Parameter2" = names[2], "Mean_Parameter1" = model$estimate[1], "Mean_Parameter2" = model$estimate[2], "Difference" = model$estimate[1] - model$estimate[2], "t" = model$statistic, "df" = model$parameter, "p" = model$p.value, "CI_low" = model$conf.int[1], "CI_high" = model$conf.int[2], "Method" = model$method ) } else if (grepl(" by ", model$data.name)) { names <- unlist(strsplit(model$data.name, " by ")) out <- data.frame( "Parameter" = names[1], "Group" = names[2], "Mean_Group1" = model$estimate[1], "Mean_Group2" = model$estimate[2], "Difference" = model$estimate[2] - model$estimate[1], "t" = model$statistic, "df" = model$parameter, "p" = model$p.value, "CI_low" = model$conf.int[1], "CI_high" = model$conf.int[2], "Method" = model$method ) } else { out <- data.frame( "Parameter" = model$data.name, "Mean" = model$estimate, "mu" = model$null.value, "Difference" = model$estimate - model$null.value, "t" = model$statistic, "df" = model$parameter, "p" = model$p.value, "CI_low" = model$conf.int[1], "CI_high" = model$conf.int[2], "Method" = model$method ) } } else { stop("model_parameters not implemented for such h-tests yet.") } row.names(out) <- NULL out } parameters/R/model_parameters.htest.R0000644000176200001440000000205613607420532017417 0ustar liggesusers#' Parameters from Correlations and t-tests #' #' Parameters of h-tests (correlations, t-tests). #' #' @param model Object of class \code{htest}. #' @param bootstrap Should estimates be bootstrapped? #' @param ... Arguments passed to or from other methods. #' #' @examples #' model <- cor.test(mtcars$mpg, mtcars$cyl, method = "pearson") #' model_parameters(model) #' #' model <- t.test(iris$Sepal.Width, iris$Sepal.Length) #' model_parameters(model) #' #' model <- t.test(mtcars$mpg ~ mtcars$vs) #' model_parameters(model) #' #' model <- t.test(iris$Sepal.Width, mu = 1) #' model_parameters(model) #' @return A data frame of indices related to the model's parameters. #' @export model_parameters.htest <- function(model, bootstrap = FALSE, ...) { if (bootstrap) { stop("Bootstrapped h-tests are not yet implemented.") } else { parameters <- .extract_parameters_htest(model) } attr(parameters, "ci") <- attributes(model$conf.int)$conf.level class(parameters) <- c("parameters_model", class(parameters)) parameters } parameters/R/p_value.R0000644000176200001440000007024113617512772014413 0ustar liggesusers#' p-values #' #' This function attempts to return, or compute, p-values of a model's parameters. The nature of the p-values is different depending on the model: #' \itemize{ #' \item Mixed models (lme4): By default, p-values are based on Wald-test approximations (see \code{\link{p_value_wald}}). For certain situations, the "m-l-1" rule might be a better approximation. That is, for \code{method = "ml1"}, \code{\link{p_value_ml1}} is called. For \code{lmerMod} objects, if \code{method = "kenward"}, p-values are based on Kenward-Roger approximations, i.e. \code{\link{p_value_kenward}} is called, and \code{method = "satterthwaite"} calls \code{\link{p_value_satterthwaite}}. #' } #' #' @param model A statistical model. #' @param method For mixed models, can be \code{\link[=p_value_wald]{"wald"}} (default), \code{\link[=p_value_ml1]{"ml1"}}, \code{\link[=p_value_betwithin]{"betwithin"}}, \code{\link[=p_value_satterthwaite]{"satterthwaite"}} or \code{\link[=p_value_kenward]{"kenward"}}. For models that are supported by the \pkg{sandwich} or \pkg{clubSandwich} packages, may also be \code{method = "robust"} to compute p-values based ob robust standard errors. #' @param ... Arguments passed down to \code{standard_error_robust()} when confidence intervals or p-values based on robust standard errors should be computed. #' @inheritParams simulate_model #' @inheritParams standard_error #' #' @note \code{p_value_robust()} resp. \code{p_value(method = "robust")} #' rely on the \pkg{sandwich} or \pkg{clubSandwich} package (the latter if #' \code{vcov_estimation = "CR"} for cluster-robust standard errors) and will #' thus only work for those models supported by those packages. #' #' @examples #' model <- lme4::lmer(Petal.Length ~ Sepal.Length + (1 | Species), data = iris) #' p_value(model) #' @return The p-values. #' @importFrom bayestestR p_direction convert_pd_to_p #' @importFrom stats coef vcov pt pnorm na.omit #' @importFrom insight get_statistic get_parameters find_parameters print_color #' @importFrom methods slot #' @importFrom utils capture.output #' @export p_value <- function(model, ...) { UseMethod("p_value") } # p-Values from Standard Models ----------------------------------------------- #' @rdname p_value #' @export p_value.default <- function(model, method = NULL, ...) { if (!is.null(method)) { method <- tolower(method) } else { method <- "wald" } p <- NULL if (method == "robust") { return(p_value_robust(model, ...)) } else if (method == "ml1") { return(p_value_ml1(model)) } else if (method == "betwithin") { return(p_value_betwithin(model)) } else { # first, we need some special handling for Zelig-models p <- tryCatch( { if (grepl("^Zelig-", class(model)[1])) { if (!requireNamespace("Zelig", quietly = T)) { stop("Package `Zelig` required. Please install", call. = F) } unlist(Zelig::get_pvalue(model)) } else { # try to get p-value from classical summary for default models .get_pval_from_summary(model) } }, error = function(e) { NULL } ) } # if all fails, try to get p-value from test-statistic if (is.null(p)) { p <- tryCatch( { stat <- insight::get_statistic(model) p <- 2 * stats::pnorm(abs(stat$Statistic), lower.tail = FALSE) names(p) <- stat$Parameter }, error = function(e) { NULL } ) } if (is.null(p)) { insight::print_color("\nCould not extract p-values from model object.\n", "red") } else { .data_frame( Parameter = names(p), p = as.vector(p) ) } } #' @export p_value.lm <- p_value.default #' @export p_value.LORgee <- p_value.default #' @export p_value.lm_robust <- p_value.default #' @export p_value.truncreg <- p_value.default #' @export p_value.geeglm <- p_value.default #' @export p_value.censReg <- p_value.default #' @export p_value.ivreg <- p_value.default #' @export p_value.negbin <- p_value.default #' @export p_value.mlm <- function(model, ...) { cs <- stats::coef(summary(model)) p <- lapply(names(cs), function(x) { params <- cs[[x]] .data_frame( Parameter = rownames(params), p = params[, "Pr(>|t|)"], Response = gsub("^Response (.*)", "\\1", x) ) }) .remove_backticks_from_parameter_names(do.call(rbind, p)) } #' @export p_value.tobit <- function(model, ...) { params <- insight::get_parameters(model) p <- p_value.default(model, ...) p[p$Parameter %in% params$Parameter, ] } # p-Values from Zero-Inflated Models ------------------------------------------ #' @export p_value.zeroinfl <- function(model, component = c("all", "conditional", "zi", "zero_inflated"), method = NULL, ...) { component <- match.arg(component) if (is.null(.check_component(model, component))) { return(NULL) } robust <- !is.null(method) && method == "robust" if (isTRUE(robust)) { return(p_value_robust(model, ...)) } cs <- .compact_list(stats::coef(summary(model))) x <- lapply(names(cs), function(i) { comp <- ifelse(i == "count", "conditional", "zi") stats <- cs[[i]] # remove log(theta) theta <- grepl("Log(theta)", rownames(stats), fixed = TRUE) if (any(theta)) { stats <- stats[!theta, ] } .data_frame( Parameter = insight::find_parameters(model, effects = "fixed", component = comp, flatten = TRUE), p = as.vector(stats[, 4]), Component = comp ) }) p <- do.call(rbind, x) p$Component <- .rename_values(p$Component, "cond", "conditional") p$Component <- .rename_values(p$Component, "zi", "zero_inflated") .filter_component(p, component) } #' @export p_value.hurdle <- p_value.zeroinfl #' @export p_value.zerocount <- p_value.zeroinfl # p-Values from Mixed Models ----------------------------------------------- #' @export p_value.lme <- function(model, ...) { cs <- stats::coef(summary(model)) p <- cs[, 5] .data_frame( Parameter = .remove_backticks_from_string(rownames(cs)), p = as.vector(p) ) } #' @rdname p_value #' @export p_value.lmerMod <- function(model, method = "wald", ...) { method <- tolower(method) method <- match.arg(method, c("wald", "ml1", "betwithin", "satterthwaite", "kr", "kenward")) if (method == "wald") { p_value_wald(model, ...) } else if (method == "ml1") { p_value_ml1(model, ...) } else if (method == "betwithin") { p_value_betwithin(model, ...) } else if (method == "satterthwaite") { p_value_satterthwaite(model, ...) } else if (method %in% c("kr", "kenward")) { p_value_kenward(model, ...) } } #' @rdname p_value #' @export p_value.merMod <- function(model, method = "wald", ...) { method <- tolower(method) method <- match.arg(method, c("wald", "betwithin", "ml1")) if (method == "wald") { dof <- Inf } else if (method == "ml1") { dof <- dof_ml1(model) } else { dof <- dof_betwithin(model) } p_value_wald(model, dof, ...) } #' @export p_value.cpglmm <- p_value.merMod #' @rdname p_value #' @export p_value.rlmerMod <- function(model, method = "wald", ...) { method <- match.arg(method, c("wald", "betwithin", "ml1")) if (method == "wald") { dof <- Inf } else if (method == "ml1") { dof <- dof_ml1(model) } else { dof <- dof_betwithin(model) } p_value_wald(model, dof, ...) } #' @rdname p_value #' @export p_value.glmmTMB <- function(model, component = c("all", "conditional", "zi", "zero_inflated"), ...) { component <- match.arg(component) if (is.null(.check_component(model, component))) { return(NULL) } cs <- .compact_list(stats::coef(summary(model))) x <- lapply(names(cs), function(i) { .data_frame( Parameter = insight::find_parameters(model, effects = "fixed", component = i, flatten = TRUE), p = as.vector(cs[[i]][, 4]), Component = i ) }) p <- do.call(rbind, x) p$Component <- .rename_values(p$Component, "cond", "conditional") p$Component <- .rename_values(p$Component, "zi", "zero_inflated") .filter_component(p, component) } #' @rdname p_value #' @export p_value.MixMod <- function(model, component = c("all", "conditional", "zi", "zero_inflated"), ...) { component <- match.arg(component) if (is.null(.check_component(model, component))) { return(NULL) } s <- summary(model) cs <- list(s$coef_table, s$coef_table_zi) names(cs) <- c("conditional", "zero_inflated") cs <- .compact_list(cs) x <- lapply(names(cs), function(i) { .data_frame( Parameter = insight::find_parameters(model, effects = "fixed", component = i, flatten = TRUE), p = as.vector(cs[[i]][, 4]), Component = i ) }) p <- do.call(rbind, x) .filter_component(p, component) } #' @rdname p_value #' @importFrom insight get_parameters #' @export p_value.mixor <- function(model, effects = c("all", "fixed", "random"), ...) { effects <- match.arg(effects) stats <- model$Model[, "P(>|z|)"] parms <- get_parameters(model, effects = effects) .data_frame( Parameter = parms$Parameter, p = stats[parms$Parameter], Effects = parms$Effects ) } # p-Values from Bayesian Models ----------------------------------------------- #' @export p_value.MCMCglmm <- function(model, ...) { nF <- model$Fixed$nfl p <- 1 - colSums(model$Sol[, 1:nF, drop = FALSE] > 0) / dim(model$Sol)[1] .data_frame( Parameter = insight::find_parameters(model, effects = "fixed", flatten = TRUE), p = p ) } #' @export p_value.brmsfit <- function(model, ...) { p <- bayestestR::p_direction(model) .data_frame( Parameter = .remove_backticks_from_string(p$Parameter), p = sapply(p$pd, bayestestR::convert_pd_to_p, simplify = TRUE) ) } #' @export p_value.stanreg <- p_value.brmsfit #' @export p_value.BFBayesFactor <- p_value.brmsfit # p-Values from Survey Models ----------------------------------------------- #' @export p_value.svyglm <- function(model, ...) { cs <- stats::coef(summary(model)) p <- cs[, 4] .data_frame( Parameter = .remove_backticks_from_string(rownames(cs)), p = as.vector(p) ) } #' @export p_value.svyolr <- function(model, ...) { cs <- stats::coef(summary(model)) p <- 2 * stats::pt(abs(cs[, 3]), df = degrees_of_freedom(model, method = "any"), lower.tail = FALSE) .data_frame( Parameter = .remove_backticks_from_string(rownames(cs)), p = as.vector(p) ) } #' @export p_value.svyglm.nb <- function(model, ...) { if (!isNamespaceLoaded("survey")) { requireNamespace("survey", quietly = TRUE) } est <- stats::coef(model) se <- sqrt(diag(stats::vcov(model, stderr = "robust"))) p <- 2 * stats::pt(abs(est / se), df = degrees_of_freedom(model, method = "any"), lower.tail = FALSE) .data_frame( Parameter = .remove_backticks_from_string(names(p)), p = as.vector(p) ) } #' @export p_value.svyglm.zip <- p_value.svyglm.nb # p-Values from ANOVA ----------------------------------------------- #' @export p_value.aov <- function(model, ...) { params <- model_parameters(model) if (nrow(params) == 0) { return(NA) } if ("Group" %in% names(params)) { params <- params[params$Group == "Within", ] } if ("Residuals" %in% params$Parameter) { params <- params[params$Parameter != "Residuals", ] } if (!"p" %in% names(params)) { return(NA) } .data_frame( Parameter = params$Parameter, p = params$p ) } #' @export p_value.anova <- p_value.aov #' @export p_value.aovlist <- p_value.aov # p-Values from Survival Models ----------------------------------------------- #' @export p_value.coxph <- function(model, method = NULL, ...) { robust <- !is.null(method) && method == "robust" if (isTRUE(robust)) { return(p_value_robust(model, ...)) } cs <- stats::coef(summary(model)) p <- cs[, 5] params <- insight::get_parameters(model) .data_frame( Parameter = params$Parameter, p = as.vector(p) ) } #' @export p_value.aareg <- function(model, ...) { s <- summary(model) p <- s$table[, "p"] .data_frame( Parameter = .remove_backticks_from_string(names(p)), p = as.vector(p) ) } #' @export p_value.coxme <- function(model, ...) { stat <- insight::get_statistic(model) if (!is.null(stat)) { .data_frame( Parameter = stat$Parameter, p = as.vector(1 - stats::pchisq(stat$Statistic^2, df = 1)) ) } } #' @export p_value.survreg <- function(model, method = NULL, ...) { robust <- !is.null(method) && method == "robust" if (isTRUE(robust)) { return(p_value_robust(model, ...)) } s <- summary(model) p <- s$table[, "p"] .data_frame( Parameter = .remove_backticks_from_string(names(p)), p = as.vector(p) ) } #' @export p_value.flexsurvreg <- function(model, ...) { params <- insight::get_parameters(model) est <- params$Estimate se <- standard_error(model)$SE p <- 2 * stats::pt(abs(est / se), df = degrees_of_freedom(model, method = "any"), lower.tail = FALSE) .data_frame( Parameter = params$Parameter, p = as.vector(p) ) } # p-Values from Special Models ----------------------------------------------- #' @rdname p_value #' @export p_value.DirichletRegModel <- function(model, component = c("all", "conditional", "precision"), ...) { component <- match.arg(component) params <- insight::get_parameters(model) out <- .data_frame( Parameter = params$Parameter, Response = params$Response, p = as.vector(2 * stats::pnorm(-abs(params$Estimate / model$se))) ) if (!is.null(params$Component)) { out$Component <- params$Component } else { component <- "all" } if (component != "all") { out <- out[out$Component == component, ] } out } #' @rdname p_value #' @export p_value.clm2 <- function(model, component = c("all", "conditional", "scale"), ...) { component <- match.arg(component) params <- insight::get_parameters(model) cs <- stats::coef(summary(model)) p <- cs[, 4] out <- .data_frame( Parameter = params$Parameter, Component = params$Component, p = as.vector(p) ) if (component != "all") { out <- out[out$Component == component, ] } out } #' @export p_value.clmm2 <- p_value.clm2 #' @export p_value.cgam <- function(model, component = c("all", "conditional", "smooth_terms"), ...) { component <- match.arg(component) params <- insight::get_parameters(model, component = "all") cs <- summary(model) p <- as.vector(cs$coefficients[, 4]) if (!is.null(cs$coefficients2)) p <- c(p, as.vector(cs$coefficients2[, "p.value"])) out <- .data_frame( Parameter = params$Parameter, Component = params$Component, p = as.vector(p) ) if (component != "all") { out <- out[out$Component == component, ] } out } #' @importFrom utils capture.output #' @export p_value.cpglm <- function(model, ...) { if (!requireNamespace("cplm", quietly = TRUE)) { stop("To use this function, please install package 'cplm'.") } junk <- utils::capture.output(stats <- cplm::summary(model)$coefficients) params <- insight::get_parameters(model) .data_frame( Parameter = params$Parameter, p = as.vector(stats[, "Pr(>|t|)"]) ) } #' @export p_value.glmx <- function(model, ...) { stats <- stats::coef(summary(model)) params <- insight::get_parameters(model) .data_frame( Parameter = params$Parameter, p = c(as.vector(stats$glm[, "Pr(>|z|)"]), as.vector(stats$extra[, "Pr(>|z|)"])), Component = params$Component ) } #' @export p_value.rq <- function(model, ...) { p <- tryCatch( { cs <- suppressWarnings(stats::coef(summary(model))) cs[, "Pr(>|t|)"] }, error = function(e) { .get_pval_from_summary( model, cs = suppressWarnings(stats::coef(summary(model, covariance = TRUE))) ) } ) params <- insight::get_parameters(model) .data_frame( Parameter = params$Parameter, p = p ) } #' @export p_value.crq <- p_value.rq #' @export p_value.nlrq <- p_value.rq #' @export p_value.rqss <- function(model, component = c("all", "conditional", "smooth_terms"), ...) { component <- match.arg(component) cs <- summary(model)$coef p_column <- intersect(c("Pr(>|t|)", "Pr(>|z|)"), colnames(cs)) p_cond <- cs[, p_column] cs <- summary(model)$qsstab p_smooth <- cs[, "Pr(>F)"] params_cond <- insight::get_parameters(model, component = "conditional") params_smooth <- insight::get_parameters(model, component = "smooth_terms") out_cond <- .data_frame( Parameter = params_cond$Parameter, p = as.vector(p_cond), Component = "conditional" ) out_smooth <- .data_frame( Parameter = params_smooth$Parameter, p = as.vector(p_smooth), Component = "smooth_terms" ) switch( component, "all" = rbind(out_cond, out_smooth), "conditional" = out_cond, "smooth_terms" = out_smooth ) } #' @export p_value.biglm <- function(model, ...) { cs <- summary(model)$mat params <- insight::get_parameters(model) .data_frame( Parameter = params$Parameter, p = as.vector(cs[, 5]) ) } #' @export p_value.complmrob <- function(model, ...) { stats <- summary(model)$stats params <- insight::get_parameters(model) .data_frame( Parameter = params$Parameter, p = as.vector(stats[, "Pr(>|t|)"]) ) } #' @export p_value.fixest <- function(model, ...) { stats <- summary(model)$coeftable params <- insight::get_parameters(model) .data_frame( Parameter = params$Parameter, p = as.vector(stats[, "Pr(>|z|)"]) ) } #' @export p_value.feglm <- function(model, ...) { stats <- stats::coef(summary(model)) params <- insight::get_parameters(model) .data_frame( Parameter = params$Parameter, p = as.vector(stats[, 4]) ) } #' @export p_value.crch <- function(model, ...) { cs <- do.call(rbind, stats::coef(summary(model), model = "full")) params <- insight::get_parameters(model) .data_frame( Parameter = params$Parameter, p = as.vector(cs[, 4]) ) } #' @rdname p_value #' @export p_value.gee <- function(model, method = NULL, ...) { cs <- stats::coef(summary(model)) if (!is.null(method) && method == "robust") { p <- 2 * stats::pt(abs(cs[, "Estimate"] / cs[, "Robust S.E."]), df = degrees_of_freedom(model, method = "any"), lower.tail = FALSE) } else { p <- 2 * stats::pt(abs(cs[, "Estimate"] / cs[, "Naive S.E."]), df = degrees_of_freedom(model, method = "any"), lower.tail = FALSE) } .data_frame( Parameter = .remove_backticks_from_string(rownames(cs)), p = as.vector(p) ) } #' @export p_value.glimML <- function(model, ...) { if (!requireNamespace("aod", quietly = TRUE)) { stop("Package 'aod' required for this function to work. Please install it.") } s <- methods::slot(aod::summary(model), "Coef") p <- s[, 4] .data_frame( Parameter = .remove_backticks_from_string(rownames(s)), p = as.vector(p) ) } #' @export p_value.logistf <- function(model, ...) { utils::capture.output(s <- summary(model)) .data_frame( Parameter = .remove_backticks_from_string(names(s$prob)), p = as.vector(s$prob) ) } #' @export p_value.lrm <- function(model, ...) { stat <- insight::get_statistic(model) p <- 2 * stats::pt(abs(stat$Statistic), df = degrees_of_freedom(model, method = "any"), lower.tail = FALSE) .data_frame( Parameter = .remove_backticks_from_string(stat$Parameter), p = as.vector(p) ) } #' @export p_value.ols <- p_value.lrm #' @export p_value.rms <- p_value.lrm #' @export p_value.psm <- p_value.lrm #' @export p_value.rlm <- function(model, ...) { cs <- stats::coef(summary(model)) p <- 2 * stats::pt(abs(cs[, 3]), df = degrees_of_freedom(model, method = "any"), lower.tail = FALSE) .data_frame( Parameter = .remove_backticks_from_string(names(p)), p = as.vector(p) ) } #' @export p_value.betareg <- function(model, component = c("all", "conditional", "precision"), ...) { component <- match.arg(component) params <- insight::get_parameters(model) cs <- do.call(rbind, stats::coef(summary(model))) p <- cs[, 4] out <- .data_frame( Parameter = params$Parameter, Component = params$Component, p = as.vector(p) ) if (component != "all") { out <- out[out$Component == component, ] } out } #' @export p_value.gamlss <- function(model, ...) { parms <- insight::get_parameters(model) utils::capture.output(cs <- summary(model)) .data_frame( Parameter = parms$Parameter, p = as.vector(cs[, 4]), Component = parms$Component ) } #' @export p_value.BBmm <- function(model, ...) { .data_frame( Parameter = insight::find_parameters(model, effects = "fixed", component = "conditional", flatten = TRUE), p = as.data.frame(summary(model)$fixed.coefficients)$p.value ) } #' @export p_value.BBreg <- function(model, ...) { .data_frame( Parameter = insight::find_parameters(model, effects = "fixed", component = "conditional", flatten = TRUE), p = as.data.frame(summary(model)$coefficients)$p.value ) } #' @export p_value.wbm <- function(model, ...) { s <- summary(model) p <- c( s$within_table[, "p"], s$between_table[, "p"], s$ints_table[, "p"] ) params <- insight::get_parameters(model, effects = "fixed") .data_frame( Parameter = params$Parameter, p = as.vector(p), Component = params$Component ) } #' @export p_value.wbgee <- p_value.wbm #' @export p_value.gam <- function(model, ...) { p.table <- summary(model)$p.table s.table <- summary(model)$s.table d1 <- .data_frame( Parameter = rownames(p.table), p = as.vector(p.table[, 4]), Component = "conditional" ) d2 <- .data_frame( Parameter = rownames(s.table), p = as.vector(s.table[, 4]), Component = "smooth_terms" ) .remove_backticks_from_parameter_names(rbind(d1, d2)) } #' @export p_value.Gam <- function(model, ...) { p.aov <- stats::na.omit(summary(model)$parametric.anova) .data_frame( Parameter = .remove_backticks_from_string(rownames(p.aov)), p = as.vector(p.aov[, 5]) ) } #' @export p_value.gamm <- function(model, ...) { model <- model$gam class(model) <- c("gam", "lm", "glm") p_value(model) } #' @export p_value.gamm4 <- p_value.gamm #' @export p_value.gls <- function(model, ...) { cs <- summary(model)$tTable p <- cs[, 4] .data_frame( Parameter = .remove_backticks_from_string(rownames(cs)), p = as.vector(p) ) } #' @export p_value.pggls <- function(model, ...) { cs <- summary(model)$CoefTable p <- cs[, 4] .data_frame( Parameter = .remove_backticks_from_string(rownames(cs)), p = as.vector(p) ) } #' @export p_value.gmnl <- function(model, ...) { cs <- summary(model)$CoefTable p <- cs[, 4] # se <- cs[, 2] pv <- .data_frame( Parameter = .remove_backticks_from_string(rownames(cs)), p = as.vector(p) ) # rename intercepts intercepts <- grepl(":(intercept)", pv$Parameter, fixed = TRUE) pv$Parameter[intercepts] <- sprintf( "(Intercept: %s)", sub(":(intercept)", replacement = "", pv$Parameter[intercepts], fixed = TRUE) ) pv } #' @export p_value.htest <- function(model, ...) { model$p.value } #' @export p_value.multinom <- function(model, ...) { stat <- insight::get_statistic(model) p <- 2 * stats::pnorm(abs(stat$Statistic), lower.tail = FALSE) .data_frame( Parameter = stat$Parameter, p = as.vector(p), Response = stat$Response ) } #' @export p_value.brmultinom <- p_value.multinom #' @export p_value.bracl <- function(model, ...) { smry <- suppressMessages(as.data.frame(stats::coef(summary(model)))) p <- smry[[4]] names(p) <- rownames(smry) params <- insight::get_parameters(model) .data_frame( Parameter = params$Parameter, p = as.vector(p), Response = params$Response ) } #' @export p_value.maxLik <- function(model, ...) { p <- summary(model)$estimate[, 4] .data_frame( Parameter = .remove_backticks_from_string(names(p)), p = as.vector(p) ) } #' @export p_value.pglm <- function(model, ...) { p <- summary(model)$estimate[, 4] .data_frame( Parameter = .remove_backticks_from_string(names(p)), p = as.vector(p) ) } #' @export p_value.plm <- function(model, ...) { p <- stats::coef(summary(model)) .data_frame( Parameter = .remove_backticks_from_string(names(p[, 4])), p = as.vector(p[, 4]) ) } #' @export p_value.polr <- function(model, method = NULL, ...) { robust <- !is.null(method) && method == "robust" if (isTRUE(robust)) { return(standard_error_robust(model, ...)) } smry <- suppressMessages(as.data.frame(stats::coef(summary(model)))) tstat <- smry[[3]] p <- 2 * stats::pt(abs(tstat), df = degrees_of_freedom(model, method = "any"), lower.tail = FALSE) names(p) <- rownames(smry) .data_frame( Parameter = .remove_backticks_from_string(names(p)), p = as.vector(p) ) } #' @export p_value.vglm <- function(model, ...) { if (!requireNamespace("VGAM", quietly = TRUE)) { stop("Package `VGAM` required.", call. = FALSE) } cs <- VGAM::summary(model)@coef3 p <- cs[, 4] .data_frame( Parameter = .remove_backticks_from_string(names(p)), p = as.vector(p) ) } #' @export p_value.vgam <- function(model, ...) { params <- insight::get_parameters(model) stat <- insight::get_statistic(model) p <- 2 * stats::pnorm(abs(stat$Statistic), lower.tail = FALSE) .data_frame( Parameter = .remove_backticks_from_string(stat$Parameter), p = as.vector(p), Component = params$Component ) } #' @export p_value.rma <- function(model, ...) { params <- insight::get_parameters(model) .data_frame( Parameter = .remove_backticks_from_string(params$Parameter), p = model$pval ) } # p-Values from standard classes --------------------------------------------- #' @seealso https://blogs.sas.com/content/iml/2011/11/02/how-to-compute-p-values-for-a-bootstrap-distribution.html #' @export p_value.numeric <- function(model, ...) { 2 * (1 - max( c( (1 + length(model[model > 0])) / (1 + length(model)), (1 + length(model[model < 0])) / (1 + length(model)) ) )) } #' @export p_value.data.frame <- function(model, ...) { data <- model[sapply(model, is.numeric)] .data_frame( Parameter = names(data), p = sapply(data, p_value) ) } #' @export p_value.list <- function(model, ...) { if ("gam" %in% names(model)) { model <- model$gam class(model) <- c("gam", "lm", "glm") p_value(model) } else { insight::print_color("\nCould not extract p-values from model object.\n", "red") } } # helper -------------------------------------------------------- .get_pval_from_summary <- function(model, cs = NULL) { if (is.null(cs)) cs <- stats::coef(summary(model)) p <- NULL if (ncol(cs) >= 4) { # do we have a p-value column based on t? pvcn <- which(colnames(cs) == "Pr(>|t|)") # if not, do we have a p-value column based on z? if (length(pvcn) == 0) { pvcn <- which(colnames(cs) == "Pr(>|z|)") } # if not, default to 4 if (length(pvcn) == 0) pvcn <- 4 p <- cs[, pvcn] if (is.null(names(p))) { coef_names <- rownames(cs) if (length(coef_names) == length(p)) names(p) <- coef_names } } names(p) <- .remove_backticks_from_string(names(p)) p } parameters/R/model_parameters.BFBayesFactor.R0000644000176200001440000000351213607420467020707 0ustar liggesusers#' Parameters from BayesFactor objects #' #' Parameters of BayesFactor objects. #' #' @param model Object of class \code{BFBayesFactor}. #' @inheritParams bayestestR::describe_posterior #' #' #' @examples #' \donttest{ #' library(BayesFactor) #' model <- ttestBF(x = rnorm(100, 1, 1)) #' model_parameters(model) #' } #' #' @return A data frame of indices related to the model's parameters. #' @importFrom stats na.omit #' @importFrom bayestestR bayesfactor_models #' @export model_parameters.BFBayesFactor <- function(model, centrality = "median", dispersion = FALSE, ci = 0.89, ci_method = "hdi", test = c("pd", "rope"), rope_range = "default", rope_ci = 0.89, priors = TRUE, ...) { out <- bayestestR::describe_posterior(model, centrality = centrality, dispersion = dispersion, ci = ci, ci_method = ci_method, test = test, rope_range = rope_range, rope_ci = rope_ci, priors = priors, ...) # Add components and effects columns tryCatch( { params <- insight::clean_parameters(model)[, c("Parameter", "Effects", "Component")] out <- merge(out, params, sort = FALSE) }, error = function(e) { NULL } ) # Extract BF tryCatch( { out$BF <- as.data.frame(bayestestR::bayesfactor_models(model)[-1, ])$BF }, error = function(e) { NULL } ) # Remove unecessary columns if ("CI" %in% names(out) && length(stats::na.omit(unique(out$CI))) == 1) { out$CI <- NULL } if ("ROPE_CI" %in% names(out) && length(stats::na.omit(unique(out$ROPE_CI))) == 1) { out$ROPE_CI <- NULL } if ("ROPE_low" %in% names(out)) { out$ROPE_low <- NULL out$ROPE_high <- NULL } attr(out, "ci") <- ci attr(out, "object_name") <- deparse(substitute(model), width.cutoff = 500) class(out) <- c("parameters_model", class(out)) out } parameters/NEWS.md0000644000176200001440000002742113617625432013532 0ustar liggesusers# parameters 0.5.0 ## Breaking changes - `skewness()` now uses a different method to calculate the skewness by default. Different methods can be selected using the `type`-argument. - `kurtosis()` now uses a different method to calculate the skewness by default. Different methods can be selected using the `type`-argument. ## New supported models - Added support for `cglm` (*cglm*), `DirichletRegModel` (*DirichletReg*) ## General - Added new vignettes on 'Standardized Model Parameters' and 'Robust Estimation of Standard Errors', and vignettes are now also published on CRAN. - Improved handling of robust statistics in `model_parameters()`. This should now work for more models than before. - Improved accuracy of `ci.merMod()` for `method = "satterthwaite"` and `method = "kenward"`. - `select_parameters()` for *stanreg* models, which was temporarily removed due to the CRAN removal of package **projpred**, is now re-implemented. ## New functions - `dof_betwithin()` to compute degrees of freedom based on a between-within approximation method (and related to that, `p_value_*()` and `se_*()` for this method were added as well). - `random_parameters()` that returns information about the random effects such as variances, R2 or ICC. - `closest_component()` as a small helper that returns the component index for each variable in a data frame that was used in `principal_components()`. - `get_scores()` as a small helper to extract scales and calculate sum scores from a principal component analysis (PCA, `principal_components()`). ## Changes to functions - `n_clusters()` gets the option `"M3C"` for the `package`-argument, so you can try to determine the number of cluster by using the `M3C::M3C()` function. - The `print()`-method for `model_parameters()` gets a `select`-argument, to print only selected columns of the parameters table. - `model_parameters()` for meta-analysis models has an improved `print()`-method for subgroups (see examples in `?model_parameters.rma`). - `model_parameters()` for mixed models gets a `details`-argument to additionally print information about the random effects. - `model_parameters()` now accepts the `df_method`-argument for more (mixed) models. - The Intercept-parameter in `model_parameters()` for meta-analysis models was renamed to `"Overall"`. - `skewness()` gets a `type`-argument, to compute different types of skewness. - `kurtosis()` gets a `type`-argument, to compute different types of skewness. - `describe_distribution()` now also works on data frames and gets a nicer print-method. ## Bug fixes - Fixed issue in `model_parameters()` when `robust = TRUE`, which could sometimes mess up order of the statistic column. - Fixed issues in `model_parameters()` with wrong `df` for `lme`-models. - Fixed issues in `model_parameters.merMod()` when `df_method` was not set to default. - Fixed issues in `model_parameters.merMod()` and `model_parameters.gee()` when `robust = TRUE`. - Fixed issues with *coxph* models with only one parameter. - Fixed issue in `format_p()` when argument `digits` was `"apa"`. - Fixed issues in `model_parameters()` for `zeroinfl`-models. # parameters 0.4.1 ## Bug fixes - Fix CRAN check issues, caused by removal of package 'projpred'. # parameters 0.4.0 ## Breaking changes - The column for degrees of freedom in `model_parameters()` was renamed from `df_residuals` to `df_error` for regression model objects, because these degrees of freedom actually were not always referring to _residuals_ - we consider `df_error` as a more generic name. - `model_parameters()` for standardized parameters (i.e. `standardize` is not `NULL`) only returns standardized coefficients, CI and standard errors (and not both, unstandardized and standardized values). - `format_ci()` was removed and re-implemented in the **insight** package. ## Renaming - `model_bootstrap()` was renamed to `bootstrap_model()`. `model_bootstrap()` will remain as alias. - `parameters_bootstrap()` was renamed to `bootstrap_parameters()`. `parameters_bootstrap()` will remain as alias. - `model_simulate()` was renamed to `simulate_model()`. `model_simulate()` will remain as alias. - `parameters_simulate()` was renamed to `simulate_parameters()`. `parameters_simulate()` will remain as alias. - `parameters_selection()` was renamed to `select_parameters()`. `parameters_selection()` will remain as alias. - `parameters_reduction()` was renamed to `reduce_parameters()`. `parameters_reduction()` will remain as alias. ## New supported models - Added support for `vgam` (*VGAM*), `cgam`, `cgamm` (*cgam*), `complmrob` (*complmrob*), `cpglm`, `cpglmm` (*cplm*), `fixest` (*fixest*), `feglm` (*alpaca*), `glmx` (*glmx*), `glmmadmb` (*glmmADMB*), `mcmc` (*coda*), `mixor` (*mixor*). - `model_parameters()` now supports `blavaan` models (*blavaan*). ## General - Better handling of `clm2`, `clmm2` and `stanmvreg` models. - Better handling of `psych::omega` models. ## New functions - `dof_satterthwaite()` and `dof_ml1()` to compute degrees of freedom based on different approximation methods (and related to that, `p_value_*()` and `se_*()` for these methods were added as well). - `rescale_weights()` to rescale design (probability or sampling) weights for use in multilevel-models without survey-design. ## Changes to functions - Robust estimation (like `standard_error_robust()` or `ci_robust()`) can now also compute cluster-robust variance-covariance matrices, using the *clubSandwich* package. - `model_parameters()` gets a `robust`-argument, to compute robust standard errors, and confidence intervals and p-values based on robust standard errors. - Arguments `p_method` and `ci_method` in `model_parameters.merMod()` were replaced by a single argument `df_method`. - `model_parameters.principal()` includes a `MSA` column for objects from `principal_components()`. ## Bug fixes - Fixed issue in `model_parameters()` with non-typical ordering of coefficients for mixed models. - Fixed issues with models of class `rlmerMod`. - Fixed minor issues `model_parameters.BFBayesFactor()`. # parameters 0.3.0 ## Breaking changes Parts of the **parameter** package are restructured and functions focussing on anything related to effect sizes are now re-implemented in a new package, [**effectsize**](https://github.com/easystats/effectsize). In details, following breaking changes have been made: - Functions for computing effect sizes (`cohens_f()`, `eta_squared()` etc.) have been removed and are now re-implemented in the **effectsize**-package. - Functions for converting effect sizes (`d_to_odds()` etc.) have been removed and are now re-implemented in the **effectsize**-package. - `standardize()` and `normalize()` (and hence, also `parameters_standardize()`) have been removed ;-( and are now re-implemented in the **effectsize**-package. ## New supported models - Added support for `aareg` (*survival*), `bracl`, `brmultinom` (*brglm2*), `rma` (*metafor*) and `multinom` (*nnet*) to various functions. - `model_parameters()` for `kmeans`. - `p_value()`, `ci()`, `standard_error()` and `model_parameters()` now support *flexsurvreg* models (from package **flexsurv**). ## New functions - `degrees_of_freedom()` to get DoFs. - `p_value_robust()`, `ci_robust()` and `standard_error_robust()` to compute robust standard errors, and p-values or confidence intervals based on robust standard errors. - `demean()` to calculate de-meaned and group-meaned variables (centering within groups, for panel-data regression). - `n_parameters()` to get number of parameters. - `n_clusters()` to determine the number of clusters to extract. - `cluster_analysis()` to return group indices based on cluster analysis. - `cluster_discrimination()` to determine the goodness of classification of cluster groups. - `check_clusterstructure()` to check the suitability of data for clustering. - `check_multimodal()` to check if a distribution is unimodal or multimodal. - Add `plot()`-methods for `principal_components()`. ## Changes to functions - Added indices of model fit to `n_factors()` ([Finch, 2019](https://doi.org/10.1177/0013164419865769)) - `standard_error()` for mixed models gets an `effects` argument, to return standard errors for random effects. - The `method`-argument for `ci()` gets a new option, `"robust"`, to compute confidence intervals based on robust standard errors. Furthermore, `ci_wald()` gets a `robust`-argument to do the same. - `format_p()` gets a `digits`-argument to set the amount of digits for p-values. - `model_parameters()` now accepts (non-documented) arguments `digits`, `ci_digits` and `p_digits` to change the amount and style of formatting values. See [examples in `model_parameters.lm()`](https://easystats.github.io/parameters/reference/model_parameters.lm.html). - Improved `print()` method for `model_parameters()` when used with Bayesian models. - Added further method (gap-statistic) to `n_clusters()`. ## Bug fixes - Interaction terms in `model_parameters()` were denoted as nested interaction when one of the interaction terms was surrounded by a function, e.g. `as.factor()`, `log()` or `I()`. - Fixed bug in `parameters_type()` when a parameter occured multiple times in a model. - Fixed bug with *multinom*-support. - Fixed bug in `model_parameters()` for non-estimable GLMs. - Fixed bug in `p_value()` for *MASS::rlm* models. - Fixed bug in `reshape_loadings()` when converting loadings from wide to long and back again. # parameters 0.2.0 ## Breaking changes - `format_value()` and `format_table()` have been removed and are now re-implemented in the **insight** package. ## General - `parameters()` is an alias for `model_parameters()`. - `p_value()`, `ci()`, `standard_error()`, `standardize()` and `model_parameters()` now support many more model objects, including mixed models from packages *nlme*, *glmmTMB* or *GLMMadaptive*, zero-inflated models from package *pscl* or other modelling packages. Along with these changes, functions for specific model objects with zero-inflated component get a `component`-argument to return the requested values for the complete model, the conditional (count) component or the zero-inflation component from the model only. ## New functions - `parameters_simulate()` and `model_simulate()`, as computational faster alternatives to `parameters_bootstrap()` and `model_bootstrap()`. - `data_partition()` to partition data into a test and a training set. - `standardize_names()` to standardize column names from data frames, in particular objects returned from `model_parameters()`. - `se_kenward()` to calculate approximated standard errors for model parameters, based on the Kenward-Roger (1997) approach. ## Changes to functions - `format_value()` and `format_ci()` get a `width`-argument to set the minimum length of the returned formatted string. - `format_ci()` gets a `bracket`-argument include or remove brackets around the ci-values. - `eta_squared()`, `omega_squared()`, `epsilon_squared()` and `cohens_f()` now support more model objects. - The `print()`-method for `model_parameters()` now better aligns confidence intervals and p-values. - `normalize()` gets a `include_bounds`-argument, to compress normalized variables so they do not contain zeros or ones. - The `method`-argument for `ci.merMod()` can now also be `"kenward"` to compute confidence intervals with degrees of freedom based on the Kenward-Roger (1997) approach. ## Bug fixes - Fixed issue with wrong computation of wald-approximated confidence intervals. - Fixed issue with wrong computation of degrees of freedom for `p_value_kenward()`. - `paramerers_standardize()` resp. `standardize()` for model objects now no longer standardizes `log()` terms, count or ratio response variables, or variables of class `Surv` and `AsIs`. # parameters 0.1.0 - Added a `NEWS.md` file to track changes to the package parameters/MD50000644000176200001440000003711113620060023012720 0ustar liggesusersb1cc25abbe01d21ee117e4b1ab6d2004 *DESCRIPTION 502f87b359a27e0261d253a9bec3f5d8 *NAMESPACE 2d5d897ee3083923322f7799ecbc2556 *NEWS.md bb9eb6376a7631c17c85d75eeef763de *R/bootstrap_model.R 3a546822c013cbf66434c69c9eceb430 *R/bootstrap_parameters.R 25be27f673dfdacbe7c0cc8f812e583d *R/check_clusterstructure.R 128063550771ee6ffbcf122fab30f861 *R/check_factorstructure.R 6a36945e5726b570256ec33dd2043001 *R/check_multimodal.R d573c2157011a89f42fe4ddcadeaee77 *R/ci.R c4298c070e86f68079ee46836bf0de5b *R/ci_betwithin.R 9d20a107cfc61a5ff65eaa71da51075f *R/ci_kenward.R 6257a7af84cdc7f5a8978b1c0881c357 *R/ci_ml1.R e8a851dd18e4a10e55eb036593ab8b92 *R/ci_profile_boot.R d991cddf48840e4ffbe5f3715297a7db *R/ci_satterthwaite.R 964430b0df8ccc42b9d395a46e3ad0b3 *R/ci_wald.R 8a437f63f895f8d99c997a30704baefb *R/cluster_analysis.R 09983d6ac88d1c5719bbac4865a4b5d3 *R/cluster_discrimination.R f0164e2f09f01ccae82fb17a1282f450 *R/convert_data_to_numeric.R 617c172ede8515b1fe38362bf1bf570b *R/convert_efa_to_cfa.R 37f1ee09070f67a65a7f43b4f5e659d1 *R/data_partition.R f119848c6904accbb8d77e9393931ecb *R/demean.R ab14af8383a4f61ac88a89147c6655d5 *R/describe_distribution.R f7854eb6e5b943fd537672de96710431 *R/dof.R 6c0882c85827b31094528e81f3985c13 *R/dof_betwithin.R 90479c058c6fdd4a184793e0385ac350 *R/dof_kenward.R 41edb10a875bde4f1117f43878c8550e *R/dof_ml1.R 4b7b4eadb63c77de03f26faea9961f35 *R/dof_satterthwaite.R b4128aa5612e1d42d1398666646f9ffb *R/equivalence_test.R fb31062ed4470ec1ddba6dd97854af92 *R/extract_parameters.R e133c89dee0f1a6d058f7a2fa772b372 *R/factor_analysis.R 008796c2e1dd5638b0be638442523eda *R/fish.R 70f8800dd0202093bdf75393b9ca4625 *R/format_algorithm.R a8c2f62610c825283758659e5d003304 *R/format_bf.R 6a587e4b39d2e29ce3f1a0f59f330deb *R/format_model.R 9de766850a7c3c7a81da49ff11b3af62 *R/format_number.R 0e1011d3b0c62f04824f32396df2f3eb *R/format_order.R 9832c32644a1a7feb36c11f59d57da18 *R/format_p.R 71a9726a4c33b03e4fcca900fc456d71 *R/format_parameters.R b3de0e00b64456f02f3f9344052c1f13 *R/format_pd.R 24ad6da23ae5e30855034c5b6d05499b *R/format_rope.R b27f0fc3ea6760f46eb453438b41f766 *R/get_scores.R 4185001367f8df939f5c55e89466a643 *R/model_parameters.BFBayesFactor.R 9c7c3bb887f433b7fdbd8c697e5a1cb2 *R/model_parameters.BayesFM.R c00047ddd885fbfebf0fefea68a44ab6 *R/model_parameters.FactoMineR.R d831533a4d33930ce6baaf8d94be17db *R/model_parameters.Mclust.R 377bf08dcca7fb7edbb0e12cf5b93167 *R/model_parameters.R 2c9bdfbd858e5c6148cf1535d9fd7c71 *R/model_parameters.aov.R 48a1cf2b9f6a116a44d7dc4e2bfa3b5d *R/model_parameters.bayesian.R f742ccbb0ccef3b26994760d0a2a5097 *R/model_parameters.gam.R e9595672d27618e1cccf7f475ac70080 *R/model_parameters.htest.R 236b4ecb2ecdbd3eba8ef7b525225131 *R/model_parameters.kmeans.R 612f767a6a5592c887df71e5439356ad *R/model_parameters.lavaan.R eb16c9f4a6a2e79323d0a25a2fe092e7 *R/model_parameters.multinom.R 5cbcff8872e14bea06944caa7156a4c1 *R/model_parameters.psych.R 0d01a4960366b86b8d3793db76960373 *R/model_parameters.rma.R fdf5eacbef2667dfa1b4122194829570 *R/model_parameters.wbm.R 59f3de9bc622178527994b4ff667bddc *R/model_parameters.zeroinfl.R cfd226cef2f687959e12fa01f37ec527 *R/model_parameters_default.R 79b0c3be8f28e0b42b8f0f0f6b0305c7 *R/model_parameters_mixed.R 65ee3517c76004913afc11f6b22a5fb4 *R/n_clusters.R e18303f216aa01b1dab775f6d5dd692d *R/n_factors.R 660095417583466ced5428c85c4be7a5 *R/n_parameters.R b970f336a1ce6a1cc4340f4b41753667 *R/p_value.R 6fcbe530a6f4996610042df965e7075b *R/p_value_betwithin.R 2fa22851b95d224cbc53a3ce2c068841 *R/p_value_kenward.R 4f49b0db27246826e2f82470142cd79b *R/p_value_ml1.R c32970104760290034252c95ac065d71 *R/p_value_satterthwaite.R e6abe7544b3abd017e2778c2350b3cc0 *R/p_value_wald.R 3758e9e31846c2df1191a4ba5277b889 *R/parameters_table.R b3c9b9182ae91649a4a20c0e7b7f5ef7 *R/parameters_type.R 1a8f120a036b4ed2551d983583ed1161 *R/plot.R 7e584f4daa357d591e2aa6bb8f62042a *R/principal_components.R 288c2bc0ac4a885ae257bb96517d2580 *R/print.parameters_model.R 2b035f80df69895fd2dfc93f7db1e01f *R/random_parameters.R 5e112e142b52626d90fbc728deada7a3 *R/reduce_parameters.R bcb179c8df8ff65b46fe13e555248aaa *R/rescale_weights.R a9e78d194f7ad8f3dbf33b8fb1170b9d *R/reshape_loadings.R f44907a8940630ef4e7a9481456cbef8 *R/robust_estimation.R a2d25f22f49a485d7f243760183db930 *R/se_betwithin.R 32d04837c59953e35d4fc88e19876ba2 *R/se_kenward.R ea095a782cc918bf2a3be8a9d388f7f5 *R/se_ml1.R da542d545ce1d73f804644dd213d78fd *R/se_satterthwaite.R f8d7153e40246749dd2e56cc56c35d8f *R/select_parameters.R 61c4701c193bcda7cbcf0506d8807dc2 *R/select_parameters.stanreg.R 23e05366b08f35eb2e87ade7a0b0b14d *R/simulate_model.R 191a1dffa9a672637b8b71438a1ba6ea *R/simulate_parameters.R a0611f74b90bf29531a43070d17a2fea *R/skewness_kurtosis.R 9ae3e6809805e8b279d15c4909feb3c2 *R/smoothness.R 0f5366ab6f8c8e517779f5b9e94b2e13 *R/standard_error.R 3901d8aa0e3e76b566bc6c852067e9aa *R/standardize_names.R d4ae0398d8a1f09a346682f854c8edd9 *R/utils.R 969f41c9297f6999f03bcd6b9c37a891 *R/utils_cleaners.R 097b6d21db8ce22ffdd750503bc6c510 *R/utils_model_parameters.R 9b73a33b12cf902e2b0f66737595b6a1 *R/utils_pca_efa.R 77f0ff5fc8b81cbb7a54289bfb081efc *R/utils_values_aov.R 40703ae039acbaa4ca20ac36f7f7015f *README.md 4edc255ffe0919f41b7c3d2f07d047e3 *build/partial.rdb 473bab328e65d11b20c1c8ff48d1213a *build/vignette.rds fed293a745c7a75939e2f2156aaa9afe *data/fish.RData 8713159f16811b28fb9dab334970df1c *inst/CITATION 69d8eb7a445a384161d3f083a521bd91 *inst/WORDLIST 4e46aaeb6d487a94aed3f0ffc7c1bc8e *inst/doc/efa_cfa.R 72c509e8e136401657909875ead4a5bf *inst/doc/efa_cfa.Rmd ed2ca307cc6a485d8455f183d44ff09b *inst/doc/efa_cfa.html f5accf5676ad9f60270a9d81f0266aa2 *inst/doc/model_parameters.R ee04aebcd7dccea1fcdc7aea09b50d0c *inst/doc/model_parameters.Rmd 578c6ac6f0c04a23ccdacfa3d4cccc0b *inst/doc/model_parameters.html 7a4f7f906b6d7a4f8ad3d8c41e57a91e *inst/doc/model_parameters_robust.R 35f4893e1e03a6bac1aad99682fd5d52 *inst/doc/model_parameters_robust.Rmd d610f6a8af67e8b48be3189ec6a93d2c *inst/doc/model_parameters_robust.html 927022bb7d8e29927f5bca7096f2db00 *inst/doc/model_parameters_standardized.R 4200152aa9feb79f068942f6b5fbc304 *inst/doc/model_parameters_standardized.Rmd 5625f42dc7edc5a47e4978f786f7b81b *inst/doc/model_parameters_standardized.html 18f4c7b2f3466860a0f705e4884aa085 *inst/doc/parameters_reduction.R 135274bcfd6117ac567a0f8b5d406750 *inst/doc/parameters_reduction.Rmd 8a09e87798c00885096f526c4ce70b33 *inst/doc/parameters_reduction.html b148c90e6f2d4d0982658c014e9a5bd5 *inst/doc/parameters_selection.R 94c9743834b3f7722d09f08c2c1d4b1a *inst/doc/parameters_selection.Rmd 50aec1779fb74976d5832af1620be2f6 *inst/doc/parameters_selection.html 1cf9358169f35828f5adc543d5d546d2 *man/DRR.Rd 084bc25ebb22e724dd18624a57448787 *man/ICA.Rd 048b43ecb5d95a486b92d9b6bba14a30 *man/bootstrap_model.Rd 2a7a7a1a15bac2b97bcff9bc9b23c62b *man/bootstrap_parameters.Rd 5721d0886563c914ccd38a71c6e8e35c *man/check_clusterstructure.Rd 30d565f870a2443f89fc29ae0aafe6e3 *man/check_factorstructure.Rd 1883f23cbc609fd025e39aa69e1786a9 *man/check_kmo.Rd f1542db62b2cc07eafc7204bc12bb4b6 *man/check_multimodal.Rd 4a3be1a2fab8edc3c30d6ad4d4d2be0e *man/check_sphericity.Rd b919b74ed90411e65a198eeabfa10f30 *man/ci.merMod.Rd 0c507b3b80a4c408bc154f231c4c3ca9 *man/cluster_analysis.Rd f33be927564843d6f3f8f3ff1f29180f *man/cluster_discrimination.Rd 466a2564538e89875ad533232ee8f122 *man/cmds.Rd eb6fb45920a2727cc9a5eb34825ae0fd *man/convert_data_to_numeric.Rd 8d420e83e136a78b4fd055e6e7bdb3a3 *man/convert_efa_to_cfa.Rd 65addcaeb1d7d20bbd60096d12bdcd66 *man/data_partition.Rd f262f9952d0a500fc17350fb9a869f96 *man/degrees_of_freedom.Rd 48b4cfc537e56bef81702edc22a2a0e5 *man/demean.Rd b3c13548899b4386dfd0156022e6d30f *man/describe_distribution.Rd 1e3149058ce82a8713750ab2e331cb3e *man/dot-compact_character.Rd 27ef65890f359e61ef542265a20b7b04 *man/dot-compact_list.Rd 498e90dd507fbb85d7a8748533ffb10e *man/dot-data_frame.Rd 471381fe4f8bbca1317376f5c2992b66 *man/dot-factor_to_dummy.Rd dd47c706147f5964328c8f208d7b8556 *man/dot-factor_to_numeric.Rd 9405e0f50e2b293721afb8e38359b6d3 *man/dot-filter_component.Rd 3442f0d2cdb4169bc3ab006cd8d22be8 *man/dot-find_most_common.Rd 7c2dd491b9e53558da75d85893c83db3 *man/dot-flatten_list.Rd 5e306e399cb093af01c57ed896c87881 *man/dot-n_factors_bartlett.Rd deecafb6c551d23a8b600aac521e323a *man/dot-n_factors_bentler.Rd 3ee8604bcf850b5e72826dc4b36aa9b4 *man/dot-n_factors_cng.Rd d921851acea7ea034f9a42664c1e8dd7 *man/dot-n_factors_mreg.Rd ff4f35c66e48baa0068428f864f9321c *man/dot-n_factors_scree.Rd bd14319fd3cb515aac76cacb89adb9c9 *man/dot-n_factors_sescree.Rd d3bf7b7e20759f6a32c80e58f449bd57 *man/dot-recode_to_zero.Rd 3e75f236094463aac776b472f2d8a1ed *man/equivalence_test.lm.Rd 3dabbef09bce9055604687fff3d876d6 *man/factor_analysis.Rd f4ad44d1a1ec50af13b95af2666a7198 *man/figures/figure1.png 933d9ce29c410b65260deeb727739108 *man/figures/figure2.png 1828a05c39e9c1efeff4a03ba2a90b30 *man/figures/logo.png bea362446046d55759c5d61ec972c149 *man/fish.Rd ff2a859fcb598f0429706fe6e62ceeba *man/format_algorithm.Rd 70a7476d8663ec0bd4e3e3e6d4f9f952 *man/format_bf.Rd d2aae891cdb2e64898c1e2541ff9c7fa *man/format_model.Rd ee15c4cbd4ba3544c411e2a81baccc17 *man/format_number.Rd ef032a962c39171ef169057965f123ea *man/format_order.Rd 49e0b5177cf3b290d5ae6cae39ccd1a1 *man/format_p.Rd 014bdb79f70f2e8e9f9e1158c24a5764 *man/format_parameters.Rd 27a4bf30962fd010c8d8b96ec7c2cbb8 *man/format_pd.Rd 077ee5ae4d3fdc9bf02d6b2f4be760bc *man/format_rope.Rd eb24575db470bbed2ce28598852a77e5 *man/get_scores.Rd 5be7b1d938395710c46844857e5f5a4d *man/model_parameters.BFBayesFactor.Rd 46c53a8fc3204435f9f5618cdf4097e7 *man/model_parameters.Mclust.Rd 29647cf1e23c733ae533e0d13d7bae19 *man/model_parameters.Rd 827c8ce8b6ea2644725560b75af56516 *man/model_parameters.aov.Rd a402773805774bdb48d0586103a6951f *man/model_parameters.befa.Rd 697142b5ac6a45154dfbeaa90bc86809 *man/model_parameters.default.Rd d160114e5391e6496c8310e955cd92c2 *man/model_parameters.gam.Rd 67eaebb8dcd72532db4439bb3fdd43c3 *man/model_parameters.htest.Rd 2501cce5c6c250d6ee2b2776bd9907d0 *man/model_parameters.kmeans.Rd 96b44ded7e6df5d95e763f828d74efd6 *man/model_parameters.lavaan.Rd ded1bc0f12bd6e1dd0dfeb0d44879028 *man/model_parameters.merMod.Rd 2c33899b6924bdc093b9537284ea2491 *man/model_parameters.mlm.Rd ea06c2b8f0024aeae5195c27c9846106 *man/model_parameters.principal.Rd e5af1066077a7032470f9a90c8478ed5 *man/model_parameters.rma.Rd 45585775b5e912b82f6f867051544f68 *man/model_parameters.stanreg.Rd 86ea0508c07feeac5263bdc0ccf66b37 *man/model_parameters.zeroinfl.Rd d1ca709db230d0716c251e6c6b401719 *man/n_clusters.Rd 9865e8dee6e7eb184716db8d51e41a82 *man/n_factors.Rd 6e60954e700749beaf7b992dd8973a10 *man/n_parameters.Rd 2a1ea1cb8e030fa4e355c32b1d3e134f *man/p_value.Rd 149dca119cd3c9d9f68227b506834ab4 *man/p_value_betwithin.Rd a5667566637841b41e1104dec66d7308 *man/p_value_kenward.Rd 0c6a83a9afab18f23913091939f20b66 *man/p_value_ml1.Rd 3b7f917abb785f5b2f6d01d0bf7f91ff *man/p_value_satterthwaite.Rd 528b51114c8b813bff9d7cf381a25312 *man/p_value_wald.Rd 18a6460c5eb5e7594c1a2ae399a7513a *man/parameters_table.Rd c6dd5167b41e33215d5933391200a220 *man/parameters_type.Rd 50b805ac1872477a2be151ed3ec80ec8 *man/principal_components.Rd 2dd7f8f83f3ae0244aae25a9c4740e6a *man/print.Rd cbfeb736266555ec02f2d18ca4b8a0d1 *man/random_parameters.Rd e7445aacb981abd249e9fd3d4d0f5160 *man/reduce_parameters.Rd 76a70d026fd62e8dd6290d548c8edbfd *man/reexports.Rd 142f462ff39d406ff120fbe7ed57f96c *man/rescale_weights.Rd 57348feb7d8739a2f7c5c9de39ac2efd *man/reshape_loadings.Rd d10a2091fa252b18a9f1efabbe01ae23 *man/select_parameters.Rd d0bf6e83524afc0ca25e4e5192acea5d *man/simulate_model.Rd b0056f8ddbe6da078f7829d628024420 *man/simulate_parameters.Rd 3b35beb51dc35b6408c38ab97eaae048 *man/skewness.Rd ee71587c502b22e0ca8dcee2733b9c7b *man/smoothness.Rd 04b0951c08915b8f0dddccdc5cc34d47 *man/standard_error.Rd dcabadaf09cde92e898f6d2880adef64 *man/standard_error_robust.Rd 7fb4931f42ef52a683993b92d09336a5 *man/standardize_names.Rd dbd9bab057163c01c320d0b9d3bb9620 *tests/spelling.R d91515d00b068c688c6fb3eb0adc1808 *tests/testthat.R 9fa73b5fa2dd1664214c02a0396470db *tests/testthat/test-GLMMadaptive.R b40bdb2fc7fbd85f30bb94c050bd57cd *tests/testthat/test-MCMCglmm.R c8bb07dd8c5be42a3eecad58940d8808 *tests/testthat/test-backticks.R 94d96a3f5c75d5939d8727694b11d6d2 *tests/testthat/test-betareg.R 3dab0fa9bc46ad27ae63c076232efd5e *tests/testthat/test-bracl.R 73a9933ce03dea60bfd23ba9953bce2f *tests/testthat/test-checks.R 02262b8be8e053b47a7d22740151f3a7 *tests/testthat/test-ci.R eee292bc3301fd20521b96d834a90b9c *tests/testthat/test-coxph.R 7b6df18ab63ce04583380cef98dcd49c *tests/testthat/test-describe_distribution.R 11a7afad035cad5d474796f219fbd2b4 *tests/testthat/test-distributions.R a3e0a0be062ee550472bcbecbc22dcb8 *tests/testthat/test-equivalence_test.R acdabb1a86af9d7f3378543bc2325ae4 *tests/testthat/test-format.R 4a17ce8a022890ebfb5f4bb792c96f05 *tests/testthat/test-format_parameters.R bd852bbf7cc8b1a0fee2ea229d050a35 *tests/testthat/test-gam.R ad2cba56a188d1b89c995e1d21fe3ea6 *tests/testthat/test-gamm.R d598764bca9455a9f47d7afa7f531367 *tests/testthat/test-gee.R cf21f88698bb15950ff4eeef071f006d *tests/testthat/test-geeglm.R d2010d9a9439d72109fb01cae0e04927 *tests/testthat/test-glmer.R de63c654b55f5d4b7274635149d1b6be *tests/testthat/test-glmmTMB.R 27de1ac5874b1adf469e22d29c481a95 *tests/testthat/test-gls.R 32da8773408b6e682249b580cb29c0e0 *tests/testthat/test-ivreg.R 1cf1386a41e0f24f560cfa5a8bcb0f59 *tests/testthat/test-lme.R 041acd00c254073bf4e30ca20090f2f4 *tests/testthat/test-model_parameters.BFBayesFactor.R 10cafc32a8a6f71173221e3fded8c185 *tests/testthat/test-model_parameters.aov.R 748dfe6c924c624756e9f6ab321599eb *tests/testthat/test-model_parameters.cpglmm.R f6df69e2f166919dd7b60cbb97a21c33 *tests/testthat/test-model_parameters.efa_cfa.R 7ee3942cdc500aafd8d8fbf6466f2d21 *tests/testthat/test-model_parameters.glm.R 965c6e1175b9c5a96a65383f7051034a *tests/testthat/test-model_parameters.htest.R 6ae78fa9715506b58d2cf5ec27e6667c *tests/testthat/test-model_parameters.hurdle.R 19279646363f9261a961f5a614b24f0c *tests/testthat/test-model_parameters.lme.R 5ce459150e114652ba02b5a9f90cf323 *tests/testthat/test-model_parameters.metafor.R 3466081583369ee7e62e1e754453ea40 *tests/testthat/test-model_parameters.mixed.R ad36b40f21fe7c4bc6be49c2df13b144 *tests/testthat/test-model_parameters.truncreg.R 59de919f1802e83565d3ab7e82716848 *tests/testthat/test-model_parameters_df_method.R 4e77f07cac58ec0ebc488fe5763a009e *tests/testthat/test-model_parameters_labels.R 4a981917829248c0a3ebb84af8644ccb *tests/testthat/test-model_parameters_mixed_coeforder.R b0d482200589c7c38aa676676a3f5ba0 *tests/testthat/test-model_parameters_robust.R acad80a9ddf03f61f238d9212722bf56 *tests/testthat/test-model_parameters_std.R 1cbc9708a3b7ae282055cfbb4106af8f *tests/testthat/test-model_parameters_std_mixed.R 9e980b3e58306414b7c656a6c104d5ca *tests/testthat/test-n_factors.R ef6f3ff73a31f8e6fbce174001e28246 *tests/testthat/test-p_value.R d06bded48d2ed22c022ba883d5eb8227 *tests/testthat/test-panelr.R fe9812631618e3d56ebaeec4f8bccef0 *tests/testthat/test-parameters_selection.R 4dcab9e791bf50ae4741cd217634e486 *tests/testthat/test-pca.R 5f6722541a83f7f3164c14fd0a1d0da0 *tests/testthat/test-plm.R 00e5a2660fec19a3ee4ce9df7cd4763b *tests/testthat/test-skewness.R 4e54534c52fd6112d1a9418679934799 *tests/testthat/test-tobit.R 590065af2a2564f8aa364aeccc068723 *tests/testthat/test-zeroinfl.R 5d391aad26f204031f5c09a0c933f1f1 *vignettes/bibliography.bib 72c509e8e136401657909875ead4a5bf *vignettes/efa_cfa.Rmd ee04aebcd7dccea1fcdc7aea09b50d0c *vignettes/model_parameters.Rmd 35f4893e1e03a6bac1aad99682fd5d52 *vignettes/model_parameters_robust.Rmd 4200152aa9feb79f068942f6b5fbc304 *vignettes/model_parameters_standardized.Rmd 135274bcfd6117ac567a0f8b5d406750 *vignettes/parameters_reduction.Rmd 94c9743834b3f7722d09f08c2c1d4b1a *vignettes/parameters_selection.Rmd parameters/inst/0000755000176200001440000000000013620044013013363 5ustar liggesusersparameters/inst/doc/0000755000176200001440000000000013620044013014130 5ustar liggesusersparameters/inst/doc/efa_cfa.R0000644000176200001440000000500513620043700015621 0ustar liggesusers## ----message=FALSE, warning=FALSE, include=FALSE------------------------------ library(knitr) options(knitr.kable.NA = '') options(digits = 2) knitr::opts_chunk$set(comment = ">") if (!requireNamespace("dplyr", quietly = TRUE) || !requireNamespace("see", quietly = TRUE) || !requireNamespace("lavaan", quietly = TRUE) || !requireNamespace("performance", quietly = TRUE) || !requireNamespace("psych", quietly = TRUE)) { knitr::opts_chunk$set(eval = FALSE) } set.seed(333) ## ----message=FALSE, warning=FALSE--------------------------------------------- library(parameters) library(dplyr) library(psych) # Load the data data <- psych::bfi[, 1:25] # Select only the 25 first columns corresponding to the items data <- na.omit(data) # remove missing values # Check factor structure check_factorstructure(data) ## ----message=FALSE, warning=FALSE--------------------------------------------- # Fit an EFA efa <- psych::fa(data, nfactors = 5) %>% model_parameters(sort = TRUE, threshold = "max") efa ## ----message=FALSE, warning=FALSE, eval=FALSE--------------------------------- # predict(efa, names = c("Neuroticism", "Conscientiousness", "Extraversion", "Agreeableness", "Opennness")) ## ----message=FALSE, warning=FALSE, echo=FALSE--------------------------------- head(predict(efa, names = c("Neuroticism", "Conscientiousness", "Extraversion", "Agreeableness", "Opennness")), 5) ## ----message=FALSE, warning=FALSE--------------------------------------------- n <- n_factors(data) n ## ----message=FALSE, warning=FALSE--------------------------------------------- as.data.frame(n) summary(n) ## ----message=FALSE, warning=FALSE--------------------------------------------- library(see) plot(n) + theme_modern() ## ----message=FALSE, warning=FALSE--------------------------------------------- partitions <- data_partition(data, training_proportion = 0.7) training <- partitions$training test <- partitions$test ## ----message=FALSE, warning=FALSE--------------------------------------------- structure_big5 <- psych::fa(training, nfactors = 5) %>% efa_to_cfa() structure_big6 <- psych::fa(training, nfactors = 6) %>% efa_to_cfa() # Investigate how a model looks structure_big5 ## ----message=FALSE, warning=FALSE--------------------------------------------- library(lavaan) library(performance) big5 <- lavaan::cfa(structure_big5, data = test) big6 <- lavaan::cfa(structure_big6, data = test) performance::compare_performance(big5, big6) parameters/inst/doc/model_parameters_robust.R0000644000176200001440000000662113620044005021202 0ustar liggesusers## ----message=FALSE, warning=FALSE, include=FALSE------------------------------ library(knitr) options(knitr.kable.NA = '') options(digits = 2) knitr::opts_chunk$set(comment = ">") if (!requireNamespace("dplyr", quietly = TRUE) || !requireNamespace("sandwich", quietly = TRUE) || !requireNamespace("lme4", quietly = TRUE) || !requireNamespace("clubSandwich", quietly = TRUE)) { knitr::opts_chunk$set(eval = FALSE) } else { library(parameters) library(dplyr) } set.seed(333) ## ----------------------------------------------------------------------------- data(iris) model <- lm(Petal.Length ~ Sepal.Length * Species + Sepal.Width, data = iris) # model parameters, where SE, CI and p-values are based on robust estimation mp <- model_parameters(model, robust = TRUE) mp # compare standard errors to result from sandwich-package mp$SE unname(sqrt(diag(sandwich::vcovHC(model)))) ## ----------------------------------------------------------------------------- # change estimation-type mp <- model_parameters(model, robust = TRUE, vcov_estimation = "CL", vcov_type = "HC1") mp # compare standard errors to result from sandwich-package mp$SE unname(sqrt(diag(sandwich::vcovCL(model)))) ## ----------------------------------------------------------------------------- iris$cluster <- factor(rep(LETTERS[1:8], length.out = nrow(iris))) # change estimation-type, defining additional arguments mp <- model_parameters( model, robust = TRUE, vcov_estimation = "CL", vcov_type = "HC1", vcov_args = list(cluster = iris$cluster) ) mp # compare standard errors to result from sandwich-package mp$SE unname(sqrt(diag(sandwich::vcovCL(model, cluster = iris$cluster)))) ## ----------------------------------------------------------------------------- # create fake-cluster-variable, to demonstrate cluster robust standard errors iris$cluster <- factor(rep(LETTERS[1:8], length.out = nrow(iris))) # cluster-robust estimation mp <- model_parameters( model, robust = TRUE, vcov_estimation = "CR", vcov_type = "CR1", vcov_args = list(cluster = iris$cluster) ) mp # compare standard errors to result from clubSsandwich-package mp$SE unname(sqrt(diag(clubSandwich::vcovCR(model, type = "CR1", cluster = iris$cluster)))) ## ----------------------------------------------------------------------------- # model parameters, robust estimation on standardized model model_parameters(model, standardize = "refit", robust = TRUE) ## ----------------------------------------------------------------------------- library(lme4) data(iris) set.seed(1234) iris$grp <- as.factor(sample(1:3, nrow(iris), replace = TRUE)) # fit example model model <- lme4::lmer( Sepal.Length ~ Species * Sepal.Width + Petal.Length + (1 | grp), data = iris ) # normal model parameters, like from 'summary()' model_parameters(model) # model parameters, cluster robust estimation for mixed models model_parameters( model, robust = TRUE, vcov_estimation = "CR", vcov_type = "CR1", vcov_args = list(cluster = iris$grp) ) ## ----------------------------------------------------------------------------- # model parameters, cluster robust estimation on standardized mixed model model_parameters( model, standardize = "refit", robust = TRUE, vcov_estimation = "CR", vcov_type = "CR1", vcov_args = list(cluster = iris$grp) ) parameters/inst/doc/model_parameters_robust.html0000644000176200001440000011074213620044005021745 0ustar liggesusers

Robust Estimation of Standard Errors, Confidence Intervals and p-values

The model_parameters() function also allows the computation of standard errors, confidence intervals and p-values based on robust covariance matrix estimation from model parameters. Robust estimation is based on the packages sandwich and clubSandwich, so all models supported by either of these packages work with model_parameters() when robust = TRUE.

Classical Regression Models

Robust Covariance Matrix Estimation from Model Parameters

By default, when model_parameters(robust = TRUE), it internally calls sandwich::vcovHC(type = "HC3"). However, there are three arguments that allow for choosing different methods and options of robust estimation: vcov_estimation, vcov_type and vcov_args (see ?standard_error_robust for further details).

Let us start with a simple example, which uses a heteroskedasticity-consistent covariance matrix estimation with estimation-type “HC3” (i.e. sandwich::vcovHC(type = "HC3") is called):

> Parameter                           | Coefficient |   SE |        95% CI |     t |  df |      p
> -----------------------------------------------------------------------------------------------
> (Intercept)                         |        0.87 | 0.45 | [-0.03, 1.76] |  1.91 | 143 | 0.059 
> Sepal.Length                        |        0.04 | 0.12 | [-0.19, 0.28] |  0.37 | 143 | 0.711 
> Species [versicolor]                |       -0.78 | 0.69 | [-2.15, 0.59] | -1.12 | 143 | 0.265 
> Species [virginica]                 |       -0.41 | 0.63 | [-1.66, 0.83] | -0.66 | 143 | 0.513 
> Sepal.Width                         |        0.11 | 0.08 | [-0.05, 0.27] |  1.32 | 143 | 0.190 
> Sepal.Length * Species [versicolor] |        0.61 | 0.13 | [ 0.35, 0.87] |  4.65 | 143 | < .001
> Sepal.Length * Species [virginica]  |        0.68 | 0.12 | [ 0.45, 0.91] |  5.75 | 143 | < .001
> [1] 0.454 0.119 0.693 0.630 0.083 0.130 0.118
> [1] 0.454 0.119 0.693 0.630 0.083 0.130 0.118

Cluster-Robust Covariance Matrix Estimation (sandwich)

If another covariance matrix estimation is required, use the vcov_estimation-argument. This argument needs the suffix for the related vcov*()-functions as value, i.e. vcov_estimation = "CL" would call sandwich::vcovCL(), or vcov_estimation = "HAC" would call sandwich::vcovHAC().

The specific estimation type can be changed with vcov_type. E.g., sandwich::vcovCL() accepts estimation types HC0 to HC3. In the next example, we use a clustered covariance matrix estimation with HC1-estimation type.

> Parameter                           | Coefficient |   SE |        95% CI |     t |  df |      p
> -----------------------------------------------------------------------------------------------
> (Intercept)                         |        0.87 | 0.42 | [ 0.03, 1.70] |  2.05 | 143 | 0.042 
> Sepal.Length                        |        0.04 | 0.11 | [-0.18, 0.26] |  0.40 | 143 | 0.692 
> Species [versicolor]                |       -0.78 | 0.65 | [-2.07, 0.51] | -1.19 | 143 | 0.237 
> Species [virginica]                 |       -0.41 | 0.59 | [-1.57, 0.75] | -0.70 | 143 | 0.483 
> Sepal.Width                         |        0.11 | 0.08 | [-0.05, 0.27] |  1.38 | 143 | 0.170 
> Sepal.Length * Species [versicolor] |        0.61 | 0.12 | [ 0.37, 0.85] |  4.96 | 143 | < .001
> Sepal.Length * Species [virginica]  |        0.68 | 0.11 | [ 0.46, 0.90] |  6.15 | 143 | < .001
> [1] 0.422 0.111 0.653 0.587 0.079 0.123 0.111
> [1] 0.422 0.111 0.653 0.587 0.079 0.123 0.111

Usually, clustered covariance matrix estimation is used when there is a cluster-structure in the data. The variable indicating the cluster-structure can be defined in sandwich::vcovCL() with the cluster-argument. In model_parameters(), additional arguments that should be passed down to functions from the sandwich package can be specified in vcov_args:

> Parameter                           | Coefficient |   SE |        95% CI |     t |  df |      p
> -----------------------------------------------------------------------------------------------
> (Intercept)                         |        0.87 | 0.34 | [ 0.20, 1.53] |  2.57 | 143 | 0.011 
> Sepal.Length                        |        0.04 | 0.07 | [-0.10, 0.19] |  0.61 | 143 | 0.540 
> Species [versicolor]                |       -0.78 | 0.52 | [-1.80, 0.25] | -1.49 | 143 | 0.137 
> Species [virginica]                 |       -0.41 | 0.26 | [-0.94, 0.11] | -1.56 | 143 | 0.120 
> Sepal.Width                         |        0.11 | 0.07 | [-0.03, 0.25] |  1.52 | 143 | 0.131 
> Sepal.Length * Species [versicolor] |        0.61 | 0.10 | [ 0.42, 0.80] |  6.29 | 143 | < .001
> Sepal.Length * Species [virginica]  |        0.68 | 0.05 | [ 0.58, 0.78] | 13.28 | 143 | < .001
> [1] 0.337 0.072 0.519 0.264 0.072 0.097 0.051
> [1] 0.337 0.072 0.519 0.264 0.072 0.097 0.051

Cluster-Robust Covariance Matrix Estimation (clubSandwich)

Cluster-robust estimation of the variance-covariance matrix can also be achieved using clubSandwich::vcovCR(). Thus, when vcov_estimation = "CR", the related function from the clubSandwich package is called. Note that this function requires the specification of the cluster-argument.

> Parameter                           | Coefficient |   SE |        95% CI |     t |  df |      p
> -----------------------------------------------------------------------------------------------
> (Intercept)                         |        0.87 | 0.33 | [ 0.21, 1.52] |  2.62 | 143 | 0.010 
> Sepal.Length                        |        0.04 | 0.07 | [-0.10, 0.18] |  0.63 | 143 | 0.531 
> Species [versicolor]                |       -0.78 | 0.51 | [-1.78, 0.23] | -1.53 | 143 | 0.129 
> Species [virginica]                 |       -0.41 | 0.26 | [-0.92, 0.10] | -1.60 | 143 | 0.112 
> Sepal.Width                         |        0.11 | 0.07 | [-0.03, 0.25] |  1.55 | 143 | 0.123 
> Sepal.Length * Species [versicolor] |        0.61 | 0.09 | [ 0.42, 0.79] |  6.42 | 143 | < .001
> Sepal.Length * Species [virginica]  |        0.68 | 0.05 | [ 0.58, 0.78] | 13.56 | 143 | < .001
> [1] 0.330 0.070 0.508 0.259 0.071 0.095 0.050
> [1] 0.330 0.070 0.508 0.259 0.071 0.095 0.050

Robust Covariance Matrix Estimation on Standardized Model Parameters

Finally, robust estimation can be combined with standardization. However, robust covariance matrix estimation only works for standardize = "refit".

> Parameter                           | Coefficient |   SE |         95% CI |      t |  df |      p
> -------------------------------------------------------------------------------------------------
> (Intercept)                         |       -1.30 | 0.07 | [-1.44, -1.16] | -18.70 | 143 | < .001
> Sepal.Length                        |        0.02 | 0.06 | [-0.09,  0.13] |   0.37 | 143 | 0.711 
> Species [versicolor]                |        1.57 | 0.09 | [ 1.40,  1.74] |  17.84 | 143 | < .001
> Species [virginica]                 |        2.02 | 0.09 | [ 1.84,  2.20] |  22.49 | 143 | < .001
> Sepal.Width                         |        0.03 | 0.02 | [-0.01,  0.07] |   1.32 | 143 | 0.190 
> Sepal.Length * Species [versicolor] |        0.28 | 0.06 | [ 0.16,  0.41] |   4.65 | 143 | < .001
> Sepal.Length * Species [virginica]  |        0.32 | 0.06 | [ 0.21,  0.43] |   5.75 | 143 | < .001

Mixed Models

Robust Covariance Matrix Estimation for Mixed Models

For linear mixed models, that by definition have a clustered (“hierarchical” or multilevel) structure in the data, it is also possible to estimate a cluster-robust covariance matrix. This is possible due to the clubSandwich package, thus we need to define the same arguments as in the above example.

> Parameter                          | Coefficient |   SE |         95% CI |     t |  df |      p
> -----------------------------------------------------------------------------------------------
> (Intercept)                        |        1.55 | 0.40 | [ 0.77,  2.34] |  3.87 | 141 | < .001
> Species [versicolor]               |        0.41 | 0.55 | [-0.66,  1.49] |  0.75 | 141 | 0.453 
> Species [virginica]                |       -0.41 | 0.58 | [-1.55,  0.73] | -0.70 | 141 | 0.482 
> Sepal.Width                        |        0.66 | 0.11 | [ 0.44,  0.88] |  5.83 | 141 | < .001
> Petal.Length                       |        0.82 | 0.07 | [ 0.69,  0.95] | 12.52 | 141 | < .001
> Species [versicolor] * Sepal.Width |       -0.48 | 0.19 | [-0.85, -0.12] | -2.60 | 141 | 0.009 
> Species [virginica] * Sepal.Width  |       -0.36 | 0.18 | [-0.71, -0.01] | -1.99 | 141 | 0.046
> Parameter                          | Coefficient |   SE |         95% CI |     t |  df |      p
> -----------------------------------------------------------------------------------------------
> (Intercept)                        |        1.55 | 0.40 | [ 0.76,  2.35] |  3.87 | 141 | < .001
> Species [versicolor]               |        0.41 | 0.80 | [-1.17,  1.99] |  0.75 | 141 | 0.608 
> Species [virginica]                |       -0.41 | 0.19 | [-0.78, -0.03] | -0.70 | 141 | 0.033 
> Sepal.Width                        |        0.66 | 0.10 | [ 0.46,  0.86] |  5.83 | 141 | < .001
> Petal.Length                       |        0.82 | 0.05 | [ 0.72,  0.91] | 12.52 | 141 | < .001
> Species [versicolor] * Sepal.Width |       -0.48 | 0.35 | [-1.18,  0.21] | -2.60 | 141 | 0.172 
> Species [virginica] * Sepal.Width  |       -0.36 | 0.11 | [-0.57, -0.15] | -1.99 | 141 | < .001

Robust Covariance Matrix Estimation on Standardized Mixed Model Parameters

Again, robust estimation can be combined with standardization for linear mixed models as well, which in such cases also only works for standardize = "refit".

> Parameter                          | Coefficient |   SE |         95% CI |     t |  df |      p
> -----------------------------------------------------------------------------------------------
> (Intercept)                        |        0.97 | 0.08 | [ 0.82,  1.12] |  4.74 | 141 | < .001
> Species [versicolor]               |       -1.29 | 0.33 | [-1.95, -0.63] | -4.91 | 141 | < .001
> Species [virginica]                |       -1.81 | 0.23 | [-2.26, -1.37] | -5.33 | 141 | < .001
> Sepal.Width                        |        0.35 | 0.05 | [ 0.24,  0.45] |  5.83 | 141 | < .001
> Petal.Length                       |        1.74 | 0.10 | [ 1.54,  1.94] | 12.52 | 141 | < .001
> Species [versicolor] * Sepal.Width |       -0.25 | 0.19 | [-0.62,  0.11] | -2.60 | 141 | 0.172 
> Species [virginica] * Sepal.Width  |       -0.19 | 0.06 | [-0.30, -0.08] | -1.99 | 141 | < .001
parameters/inst/doc/model_parameters_standardized.R0000644000176200001440000000301113620044010022322 0ustar liggesusers## ----message=FALSE, warning=FALSE, include=FALSE------------------------------ library(knitr) options(knitr.kable.NA = '') options(digits = 2) knitr::opts_chunk$set(comment = ">") if (!requireNamespace("dplyr", quietly = TRUE) || !requireNamespace("lme4", quietly = TRUE)) { knitr::opts_chunk$set(eval = FALSE) } else { library(parameters) library(dplyr) } set.seed(333) ## ----------------------------------------------------------------------------- library(lme4) data(iris) set.seed(1234) iris$grp <- as.factor(sample(1:3, nrow(iris), replace = TRUE)) # fit example model model <- lme4::lmer( Sepal.Length ~ Species * Sepal.Width + Petal.Length + (1 | grp), data = iris ) # classic model parameters model_parameters(model) # standardized model parameters model_parameters(model, standardize = "refit") ## ----------------------------------------------------------------------------- # standardize continuous variables manually model2 <- lme4::lmer( scale(Sepal.Length) ~ Species * scale(Sepal.Width) + scale(Petal.Length) + (1 | grp), data = iris ) model_parameters(model2) ## ----------------------------------------------------------------------------- model_parameters(model, standardize = "posthoc") ## ----------------------------------------------------------------------------- model_parameters(model, standardize = "basic") ## ----------------------------------------------------------------------------- model_parameters(model, standardize = "smart") parameters/inst/doc/parameters_reduction.html0000644000176200001440000006230313620044012021240 0ustar liggesusers

Feature Reduction (PCA, cMDS, ICA…)

Also known as feature extraction or dimension reduction in machine learning, the goal of variable reduction is to reduce the number of predictors by derivating, from a set of measured data, new variables intended to be informative and non-redundant. This method can be used to simplify models, which can benefit model interpretation, shorten fitting time, and improve generalization (by reducing overfitting).

Quick and Exploratory Method

Let’s start by fitting a multiple regression with the attitude dataset, available is base R, to predict the overall rating by employees of their organization with the remaining variables (handling of employee complaints, special privileges, opportunity of learning, raises, a feedback considered too critical and opportunity of advancement).

> Parameter   | Coefficient |    SE |          95% CI |     t | df |      p
> -------------------------------------------------------------------------
> (Intercept) |       10.79 | 11.59 | [-13.19, 34.76] |  0.93 | 23 | 0.362 
> complaints  |        0.61 |  0.16 | [  0.28,  0.95] |  3.81 | 23 | < .001
> privileges  |       -0.07 |  0.14 | [ -0.35,  0.21] | -0.54 | 23 | 0.596 
> learning    |        0.32 |  0.17 | [ -0.03,  0.67] |  1.90 | 23 | 0.070 
> raises      |        0.08 |  0.22 | [ -0.38,  0.54] |  0.37 | 23 | 0.715 
> critical    |        0.04 |  0.15 | [ -0.27,  0.34] |  0.26 | 23 | 0.796 
> advance     |       -0.22 |  0.18 | [ -0.59,  0.15] | -1.22 | 23 | 0.236

We can explore a reduction of the number of parameters with the reduce_parameters() function.

> Parameter                                                              | Coefficient |   SE |         95% CI |     t | df |      p
> ----------------------------------------------------------------------------------------------------------------------------------
> (Intercept)                                                            |       64.63 | 1.57 | [61.41, 67.85] | 41.19 | 27 | < .001
> raises_0.88/learning_0.82/complaints_0.78/privileges_0.70/advance_0.68 |        4.62 | 0.90 | [ 2.78,  6.46] |  5.16 | 27 | < .001
> critical_0.80                                                          |       -3.41 | 1.59 | [-6.67, -0.14] | -2.14 | 27 | 0.041

This quickly hints toward the fact that the model could be represented via two “latent” dimensions, one correlated with all the positive things that a company has to offer, and the other one related to the amount of negative critiques received by the employees. These two dimensions have a positive and negative relationship with the company rating, respectively.

What does reduce_parameters() exactly do?

This function performs a reduction in the parameters space (the number of variables). It starts by creating a new set of variables, based on a given method (the default method is “PCA”, but other are available via the method argument, such as “cMDS”, “DRR” or “ICA”). Then, it names this new dimensions using the original variables that correlate the most with it. For instance, a variable named ‘V1_0.97/V4_-0.88’ means that the V1 and the V4 variables correlate maximally (with respective coefficients of .97 and -.88) with this dimension.

> Parameter                                                 | Coefficient |   SE |         95% CI |     t | df |      p
> ---------------------------------------------------------------------------------------------------------------------
> (Intercept)                                               |       64.63 | 1.41 | [61.73, 67.53] | 45.80 | 26 | < .001
> raises_0.85/complaints_0.84/learning_0.83/privileges_0.74 |        0.43 | 0.07 | [ 0.28,  0.57] |  6.14 | 26 | < .001
> advance_-0.60                                             |        0.32 | 0.13 | [ 0.04,  0.59] |  2.36 | 26 | 0.026 
> critical_-0.65                                            |       -0.24 | 0.15 | [-0.56,  0.07] | -1.61 | 26 | 0.120

A different method (Classical Multidimensional Scaling - cMDS) suggests that negative critiques do not have a significant impact on the rating, and that the lack of opportunities of career advancement is a separate dimension with an importance on its own.

Although this function can be useful in exploratory data analysis, it’s best to perform the dimension reduction step in a separate and dedicated stage, as this is a very important process in the data analysis workflow.

Principal Component Analysis (PCA)

PCA is a widely used procedure that lies in-between dimension reduction and structural modelling. Indeed, one of the way of reducing the number of predictors is to extract a new set of uncorrelated variables that will represent variance of your initial dataset. But how the original variables relate between themselves can also be a question on its own.

We can apply the principal_components() function to do the the predictors of the model:

> # Loadings from Principal Component Analysis (no rotation)
> 
> Variable   |  PC1 | Complexity
> ------------------------------
> complaints | 0.78 |       1.00
> privileges | 0.70 |       1.00
> learning   | 0.82 |       1.00
> raises     | 0.88 |       1.00
> critical   | 0.40 |       1.00
> advance    | 0.68 |       1.00
> 
> The unique principal component accounted for 52.82% of the total variance of the original data.

The principal_component() function automatically selected one component (if the number of components is not specified, this function uses n_factors() to estimate the optimal number to keep) and returned the loadings, i.e., the relationship with all of the original variables.

As we can see here, it seems that our new component captured the essence (more than half of the total variance present in the original dataset) of all our other variables together. We can extract the values of this component for each of our observation using the predict() method and add in the response variable of our initial dataset.

We can know update the model with this new component:

> Parameter   | Coefficient |   SE |         95% CI |     t | df |      p
> -----------------------------------------------------------------------
> (Intercept) |       64.63 | 1.67 | [61.22, 68.05] | 38.78 | 28 | < .001
> PC1         |        4.62 | 0.95 | [ 2.67,  6.57] |  4.86 | 28 | < .001

Using the psych package for PCA

You can also use different packages for models, such as psych (Revelle 2018) or FactoMineR for PCA or Exploratory Factor Analysis (EFA), as it allows for more flexibility, control and details when running such procedures. Thus, the functions from this package are fully supported by parameters through the model_parameters() function.

As such, the above analysis can be fully reproduced as follows:

> # Rotated loadings from Principal Component Analysis (varimax-rotation)
> 
> Variable   |  PC1 | Complexity | Uniqueness
> -------------------------------------------
> rating     | 0.80 |       1.00 |       0.37
> complaints | 0.85 |       1.00 |       0.28
> privileges | 0.68 |       1.00 |       0.53
> learning   | 0.83 |       1.00 |       0.32
> raises     | 0.86 |       1.00 |       0.26
> critical   | 0.36 |       1.00 |       0.87
> advance    | 0.58 |       1.00 |       0.66
> 
> The unique principal component (varimax rotation) accounted for 53.09% of the total variance of the original data.

Note: By default, psych::principal() uses a varimax rotation to extract rotated components, possibly leading to discrepancies in the results.

Finally, refit the model:

> Parameter   | Coefficient |   SE |         95% CI |     t | df |      p
> -----------------------------------------------------------------------
> (Intercept) |       64.63 | 1.37 | [61.83, 67.44] | 47.23 | 28 | < .001
> PC1         |        9.69 | 1.39 | [ 6.84, 12.54] |  6.96 | 28 | < .001

References

Revelle, William. 2018. Psych: Procedures for Psychological, Psychometric, and Personality Research. Evanston, Illinois: Northwestern University. https://CRAN.R-project.org/package=psych.

parameters/inst/doc/model_parameters.Rmd0000644000176200001440000001530513620043433020130 0ustar liggesusers--- title: "Summary of Model Parameters" output: github_document: toc: true fig_width: 10.08 fig_height: 6 rmarkdown::html_vignette: toc: true fig_width: 10.08 fig_height: 6 tags: [r, parameters, variable selection, feature selection] vignette: > %\VignetteIndexEntry{Summary of Model Parameters} \usepackage[utf8]{inputenc} %\VignetteEngine{knitr::rmarkdown} editor_options: chunk_output_type: console bibliography: bibliography.bib --- ```{r message=FALSE, warning=FALSE, include=FALSE} library(knitr) options(knitr.kable.NA = '') options(digits = 2) knitr::opts_chunk$set(comment = ">") if (!requireNamespace("dplyr", quietly = TRUE) || !requireNamespace("BayesFactor", quietly = TRUE) || !requireNamespace("lme4", quietly = TRUE) || !requireNamespace("metafor", quietly = TRUE) || !requireNamespace("lavaan", quietly = TRUE) || !requireNamespace("brms", quietly = TRUE) || !requireNamespace("psych", quietly = TRUE) || !requireNamespace("rstanarm", quietly = TRUE) || !requireNamespace("FactoMineR", quietly = TRUE)) { knitr::opts_chunk$set(eval = FALSE) } else { library(parameters) library(dplyr) } set.seed(333) ``` The `model_parameters()` function (also accessible via the shortcut `parameters()`) allows you to extract the parameters and their characteristics from various models in a consistent way. It can be considered as a lightweight alternative to [`broom::tidy()`](https://github.com/tidymodels/broom), with some notable differences: - The names of the returned data frame are **specific** to their content. For instance, the column containing the statistic is named following the statistic name, i.e., *t*, *z*, etc., instead of a generic name such as *statistic* (**however**, you can get standardized (generic) column names using [`standardize_names()`](https://easystats.github.io/parameters/reference/standardize_names.html)). - It is able to compute or extract indices not available by default, such as ***p*-values**, **CIs**, etc. - It includes **feature engineering** capabilities, including parameters [**bootstrapping**](https://easystats.github.io/parameters/articles/bootstrapping.html). ## Correlations and *t*-tests ### Frequentist ```{r, warning=FALSE, message=FALSE} cor.test(iris$Sepal.Length, iris$Sepal.Width) %>% parameters() ``` ```{r, warning=FALSE, message=FALSE} t.test(mpg ~ vs, data = mtcars) %>% parameters() ``` ### Bayesian ```{r, warning=FALSE, message=FALSE} library(BayesFactor) BayesFactor::correlationBF(iris$Sepal.Length, iris$Sepal.Width) %>% parameters() ``` ```{r, warning=FALSE, message=FALSE} BayesFactor::ttestBF(formula = mpg ~ vs, data = mtcars) %>% parameters() ``` ## ANOVAs Indices of effect size for ANOVAs, such as partial and non-partial versions of `eta_squared()`, `epsilon_sqared()` or `omega_squared()`, were moved to the [**effectsize**-package](https://easystats.github.io/effectsize/). However, **parameters** uses these function to compute such indices for parameters summaries. ### Simple ```{r, warning=FALSE, message=FALSE} aov(Sepal.Length ~ Species, data = iris) %>% parameters(omega_squared = "partial", eta_squared = "partial", epsilon_squared = TRUE) ``` ### Repeated measures `parameters()` (resp. its alias `model_parameters()`) also works on repeated measures ANOVAs, whether computed from `aov()` or from a mixed model. ```{r, warning=FALSE, message=FALSE} aov(mpg ~ am + Error(gear), data = mtcars) %>% parameters() ``` ## Regressions (GLMs, Mixed Models, GAMs, ...) `parameters()` (resp. its alias `model_parameters()`) was mainly built with regression models in mind. It works for many types of models and packages, including mixed models and Bayesian models. ### GLMs ```{r, warning=FALSE, message=FALSE} glm(vs ~ poly(mpg, 2) + cyl, data = mtcars) %>% parameters() ``` ### Mixed Models ```{r, warning=FALSE, message=FALSE} library(lme4) lmer(Sepal.Width ~ Petal.Length + (1|Species), data = iris) %>% parameters() ``` ### Bayesian Models `model_parameters()` works fine with Bayesian models from the **rstanarm** package... ```{r, warning=FALSE, message=FALSE, eval = FALSE} library(rstanarm) stan_glm(mpg ~ wt * cyl, data = mtcars) %>% parameters() ``` ```{r, warning=FALSE, message=FALSE, echo = FALSE} library(rstanarm) stan_glm(mpg ~ wt * cyl, data = mtcars, iter = 500, chains = 2, refresh = 0) %>% parameters() ``` ... as well as for (more complex) models from the **brms** package. For more complex models, other model components can be printed using the arguments `effects` and `component` arguments. ```{r, warning=FALSE, message=FALSE} library(brms) data(fish) set.seed(123) model <- brm(bf( count ~ persons + child + camper + (1 | persons), zi ~ child + camper + (1 | persons) ), data = fish, family = zero_inflated_poisson(), iter = 500, chains = 1, refresh = 0 ) parameters(model, component = "conditional") parameters(model, effects = "all", component = "all") ``` ## Structural Models (PCA, EFA, CFA, SEM...) The **parameters** package extends the support to structural models. ### Principal Component Analysis (PCA) and Exploratory Factor Analysis (EFA) ```{r, warning=FALSE, message=FALSE} library(psych) psych::pca(mtcars, nfactors = 3) %>% parameters() ``` ```{r, warning=FALSE, message=FALSE, eval = FALSE} library(FactoMineR) FactoMineR::FAMD(iris, ncp = 3) %>% parameters() ``` ```{r, warning=FALSE, message=FALSE, echo = FALSE} library(FactoMineR) FactoMineR::FAMD(iris, ncp = 3, graph = FALSE) %>% parameters() ``` ### Confirmatory Factor Analysis (CFA) and Structural Equation Models (SEM) #### Frequentist ```{r, warning=FALSE, message=FALSE} library(lavaan) model <- lavaan::cfa(' visual =~ x1 + x2 + x3 textual =~ x4 + x5 + x6 speed =~ x7 + x8 + x9 ', data = HolzingerSwineford1939) model_parameters(model) ``` #### Bayesian `blavaan` to be done. ## Meta-Analysis `parameters()` also works for `rma`-objects from the **metafor** package. ```{r, warning=FALSE, message=FALSE} library(metafor) mydat <- data.frame( effectsize = c(-0.393, 0.675, 0.282, -1.398), standarderror = c(0.317, 0.317, 0.13, 0.36) ) rma(yi = effectsize, sei = standarderror, method = "REML", data = mydat) %>% model_parameters() ``` ## Plotting Model Parameters There is a `plot()`-method implemented in the [**see**-package](https://easystats.github.io/see/). Several examples are shown [in this vignette](https://easystats.github.io/see/articles/parameters.html). parameters/inst/doc/parameters_reduction.R0000644000176200001440000000322413620044011020471 0ustar liggesusers## ----message=FALSE, warning=FALSE, include=FALSE------------------------------ library(knitr) options(knitr.kable.NA = '') options(digits = 2) knitr::opts_chunk$set(comment = ">") if (!requireNamespace("dplyr", quietly = TRUE) || !requireNamespace("psych", quietly = TRUE)) { knitr::opts_chunk$set(eval = FALSE) } else { library(parameters) library(dplyr) } set.seed(333) ## ----message=FALSE, warning=FALSE--------------------------------------------- model <- lm(rating ~ ., data = attitude) parameters(model) ## ----message=FALSE, warning=FALSE--------------------------------------------- newmodel <- reduce_parameters(model) parameters(newmodel) ## ----message=FALSE, warning=FALSE--------------------------------------------- reduce_parameters(model, method = "cMDS") %>% parameters() ## ----message=FALSE, warning=FALSE--------------------------------------------- pca <- principal_components(insight::get_predictors(model), n = "auto") pca ## ----message=FALSE, warning=FALSE--------------------------------------------- newdata <- predict(pca) newdata$rating <- attitude$rating ## ----message=FALSE, warning=FALSE--------------------------------------------- update(model, rating ~ PC1, data = newdata) %>% parameters() ## ----message=FALSE, warning=FALSE--------------------------------------------- library(psych) # Fit the PCA pca <- psych::principal(attitude, nfactors = 1) %>% model_parameters() pca ## ----message=FALSE, warning=FALSE--------------------------------------------- df <- cbind(attitude, predict(pca)) update(model, rating ~ PC1, data = df) %>% model_parameters() parameters/inst/doc/model_parameters_standardized.html0000644000176200001440000006355313620044010023106 0ustar liggesusers

Standardized Model Parameters

The model_parameters() function (also accessible via the shortcut parameters()) can be used to calculate standardized model parameters, too, via the standardize-argument. There are different methods of standardizing model parameters: "refit", "posthoc", "smart" and "basic" (see ?effectsize::standardize_parameters for further details).

Standardization by re-fitting the model

standardize = "refit" is based on a complete model re-fit with a standardized version of data. Hence, this method is equal to standardizing the variables before fitting the model. It is the most accurate (Neter et al., 1989), but it is also the most computationally costly and long (especially for heavy models such as, for instance, for Bayesian models). This method is particularly recommended for complex models that include interactions or transformations (e.g., polynomial or spline terms).

When standardize = "refit", model_parameters() internally calls effectsize::standardize() to standardize the data that was used to fit the model and updates the model with the standardized data. Note that effectsize::standardize() tries to detect which variables should be standardized and which not. For instance, having a log(x) in the model formula would exclude x from being standardized, because x might get negative values, and thus log(x) would no longer be defined. Factors will also be not standardized. Response variables will be standardized, if appropriate.

> Parameter                          | Coefficient |   SE |         95% CI |     t |  df |      p
> -----------------------------------------------------------------------------------------------
> (Intercept)                        |        1.55 | 0.40 | [ 0.77,  2.34] |  3.87 | 141 | < .001
> Species [versicolor]               |        0.41 | 0.55 | [-0.66,  1.49] |  0.75 | 141 | 0.453 
> Species [virginica]                |       -0.41 | 0.58 | [-1.55,  0.73] | -0.70 | 141 | 0.482 
> Sepal.Width                        |        0.66 | 0.11 | [ 0.44,  0.88] |  5.83 | 141 | < .001
> Petal.Length                       |        0.82 | 0.07 | [ 0.69,  0.95] | 12.52 | 141 | < .001
> Species [versicolor] * Sepal.Width |       -0.48 | 0.19 | [-0.85, -0.12] | -2.60 | 141 | 0.009 
> Species [virginica] * Sepal.Width  |       -0.36 | 0.18 | [-0.71, -0.01] | -1.99 | 141 | 0.046
> Parameter                          | Coefficient |   SE |         95% CI |     t |  df |      p
> -----------------------------------------------------------------------------------------------
> (Intercept)                        |        0.97 | 0.20 | [ 0.57,  1.37] |  4.74 | 141 | < .001
> Species [versicolor]               |       -1.29 | 0.26 | [-1.80, -0.77] | -4.91 | 141 | < .001
> Species [virginica]                |       -1.81 | 0.34 | [-2.48, -1.15] | -5.33 | 141 | < .001
> Sepal.Width                        |        0.35 | 0.06 | [ 0.23,  0.46] |  5.83 | 141 | < .001
> Petal.Length                       |        1.74 | 0.14 | [ 1.47,  2.02] | 12.52 | 141 | < .001
> Species [versicolor] * Sepal.Width |       -0.25 | 0.10 | [-0.45, -0.06] | -2.60 | 141 | 0.009 
> Species [virginica] * Sepal.Width  |       -0.19 | 0.09 | [-0.37,  0.00] | -1.99 | 141 | 0.046

The second output is identical to following:

> Parameter                          | Coefficient |   SE |         95% CI |     t |  df |      p
> -----------------------------------------------------------------------------------------------
> (Intercept)                        |        0.97 | 0.20 | [ 0.57,  1.37] |  4.74 | 141 | < .001
> Species [versicolor]               |       -1.29 | 0.26 | [-1.80, -0.77] | -4.91 | 141 | < .001
> Species [virginica]                |       -1.81 | 0.34 | [-2.48, -1.15] | -5.33 | 141 | < .001
> Sepal.Width                        |        0.35 | 0.06 | [ 0.23,  0.46] |  5.83 | 141 | < .001
> Petal.Length                       |        1.74 | 0.14 | [ 1.47,  2.02] | 12.52 | 141 | < .001
> Species [versicolor] * Sepal.Width |       -0.25 | 0.10 | [-0.45, -0.06] | -2.60 | 141 | 0.009 
> Species [virginica] * Sepal.Width  |       -0.19 | 0.09 | [-0.37,  0.00] | -1.99 | 141 | 0.046

Post-hoc standardization

standardize = "posthoc" aims at emulating the results obtained by "refit" without refitting the model. The coefficients are divided by the standard deviation of the outcome (which becomes their expression ‘unit’). Then, the coefficients related to numeric variables are additionally multiplied by the standard deviation of the related terms, so that they correspond to changes of 1 SD of the predictor (e.g., “a change in 1 SD of x is related to a change of 0.24 of the SD of y”). This does not apply to binary variables or factors, so the coefficients are still related to changes in levels.

This method is not accurate and tends to give aberrant results when interactions are specified. However, this method of standardization is the “classic” result obtained by many statistical packages when standardized coefficients are requested.

When standardize = "posthoc", model_parameters() internally calls effectsize::standardize_parameters(method = "posthoc"). Test statistic and p-values are not affected, i.e. they are the same as if no standardization would be applied.

> Parameter                          | Coefficient (std.) |   SE |         95% CI |     t |  df |      p
> ------------------------------------------------------------------------------------------------------
> (Intercept)                        |               0.00 | 0.00 | [ 0.00,  0.00] |  3.87 | 141 | < .001
> Species [versicolor]               |               0.50 | 0.66 | [-0.80,  1.79] |  0.75 | 141 | 0.453 
> Species [virginica]                |              -0.49 | 0.70 | [-1.87,  0.88] | -0.70 | 141 | 0.482 
> Sepal.Width                        |               0.35 | 0.06 | [ 0.23,  0.46] |  5.83 | 141 | < .001
> Petal.Length                       |               1.74 | 0.14 | [ 1.47,  2.02] | 12.52 | 141 | < .001
> Species [versicolor] * Sepal.Width |              -0.25 | 0.10 | [-0.45, -0.06] | -2.60 | 141 | 0.009 
> Species [virginica] * Sepal.Width  |              -0.19 | 0.09 | [-0.37,  0.00] | -1.99 | 141 | 0.046

standardize = "basic" also applies post-hoc standardization, however, factors are converted to numeric, which means that it also scales the coefficient by the standard deviation of model’s matrix’ parameter of factor levels (transformed to integers) or binary predictors.

> Parameter                          | Coefficient (std.) |   SE |         95% CI |     t |  df |      p
> ------------------------------------------------------------------------------------------------------
> (Intercept)                        |               0.00 | 0.00 | [ 0.00,  0.00] |  3.87 | 141 | < .001
> Species [versicolor]               |               0.23 | 0.31 | [-0.38,  0.85] |  0.75 | 141 | 0.453 
> Species [virginica]                |              -0.23 | 0.33 | [-0.88,  0.42] | -0.70 | 141 | 0.482 
> Sepal.Width                        |               0.35 | 0.06 | [ 0.23,  0.46] |  5.83 | 141 | < .001
> Petal.Length                       |               1.74 | 0.14 | [ 1.47,  2.02] | 12.52 | 141 | < .001
> Species [versicolor] * Sepal.Width |              -0.77 | 0.30 | [-1.35, -0.19] | -2.60 | 141 | 0.009 
> Species [virginica] * Sepal.Width  |              -0.61 | 0.31 | [-1.22, -0.01] | -1.99 | 141 | 0.046

Smart standardization

standardize = "smart" is similar to standardize = "posthoc" in that it does not involve model re-fitting. The difference is that the SD of the response is computed on the relevant section of the data. For instance, if a factor with 3 levels A (the intercept), B and C is entered as a predictor, the effect corresponding to B vs. A will be scaled by the variance of the response at the intercept only. As a results, the coefficients for effects of factors are similar to a Glass’ delta.

> Parameter                          | Coefficient (std.) |   SE |         95% CI |     t |  df |      p
> ------------------------------------------------------------------------------------------------------
> (Intercept)                        |               0.00 | 0.00 | [ 0.00,  0.00] |  3.87 | 141 | < .001
> Species [versicolor]               |               1.17 | 1.56 | [-1.88,  4.22] |  0.75 | 141 | 0.453 
> Species [virginica]                |              -1.16 | 1.65 | [-4.39,  2.07] | -0.70 | 141 | 0.482 
> Sepal.Width                        |               0.35 | 0.06 | [ 0.23,  0.46] |  5.83 | 141 | < .001
> Petal.Length                       |               1.74 | 0.14 | [ 1.47,  2.02] | 12.52 | 141 | < .001
> Species [versicolor] * Sepal.Width |              -1.13 | 0.43 | [-1.97, -0.28] | -2.60 | 141 | 0.009 
> Species [virginica] * Sepal.Width  |              -0.83 | 0.42 | [-1.65, -0.01] | -1.99 | 141 | 0.046
parameters/inst/doc/parameters_selection.html0000644000176200001440000006567013620044013021244 0ustar liggesusers

Parameters Selection

Also known as feature selection in machine learning, the goal of variable selection is to identify a subset of predictors to simplify models. This can benefit model interpretation, shorten fitting time, and improve generalization (by reducing overfitting).

There are many different methods. The one that is appropriate for a given problem depends on the model type, the data, the objective and the theoretical rationale.

The parameters package implements a helper that will automatically pick a method deemed appropriate for the provided model, run the variables selection and return the optimal formula, which you can then re-use to update the model.

Simple linear regression

Fit a powerful model

If you are familiar with R and the formula interface, you know of the possibility of including a dot (.) in the formula, signifying “all the remaining variables”. Curiously, few are aware of the possibility of additionally easily adding all the interaction terms. This can be achieved using the *.* notation.

Let’s try that with the linear regression predicting Sepal.Length with the iris dataset, included by default in R.

> 
> Call:
> lm(formula = Sepal.Length ~ . * ., data = iris)
> 
> Residuals:
>     Min      1Q  Median      3Q     Max 
> -0.5730 -0.2034  0.0035  0.1997  0.5702 
> 
> Coefficients:
>                                Estimate Std. Error t value Pr(>|t|)  
> (Intercept)                     3.14635    1.40272    2.24    0.027 *
> Sepal.Width                     0.48464    0.41852    1.16    0.249  
> Petal.Length                   -0.50461    0.87589   -0.58    0.566  
> Petal.Width                     3.53596    1.63832    2.16    0.033 *
> Speciesversicolor              -2.81723    1.84173   -1.53    0.129  
> Speciesvirginica               -6.98585    3.56973   -1.96    0.053 .
> grp2                           -0.97614    0.78226   -1.25    0.214  
> grp3                           -0.82032    0.75569   -1.09    0.280  
> Sepal.Width:Petal.Length        0.16021    0.23508    0.68    0.497  
> Sepal.Width:Petal.Width        -0.73885    0.46392   -1.59    0.114  
> Sepal.Width:Speciesversicolor  -0.06240    0.69994   -0.09    0.929  
> Sepal.Width:Speciesvirginica    0.15652    1.06683    0.15    0.884  
> Sepal.Width:grp2                0.21824    0.24099    0.91    0.367  
> Sepal.Width:grp3                0.28766    0.23779    1.21    0.229  
> Petal.Length:Petal.Width       -0.29280    0.36826   -0.80    0.428  
> Petal.Length:Speciesversicolor  1.07990    0.54672    1.98    0.050 .
> Petal.Length:Speciesvirginica   1.50030    0.74199    2.02    0.045 *
> Petal.Length:grp2               0.23747    0.19687    1.21    0.230  
> Petal.Length:grp3              -0.04170    0.17893   -0.23    0.816  
> Petal.Width:Speciesversicolor  -0.14775    1.40148   -0.11    0.916  
> Petal.Width:Speciesvirginica    0.48417    1.67788    0.29    0.773  
> Petal.Width:grp2               -0.76517    0.40421   -1.89    0.061 .
> Petal.Width:grp3                0.00258    0.39882    0.01    0.995  
> Speciesversicolor:grp2          0.24501    0.67784    0.36    0.718  
> Speciesvirginica:grp2           0.48118    0.94497    0.51    0.612  
> Speciesversicolor:grp3          0.41806    0.62824    0.67    0.507  
> Speciesvirginica:grp3           0.23647    0.87030    0.27    0.786  
> ---
> Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
> 
> Residual standard error: 0.28 on 123 degrees of freedom
> Multiple R-squared:  0.903,   Adjusted R-squared:  0.882 
> F-statistic: 43.9 on 26 and 123 DF,  p-value: <2e-16

Wow, that’s a lot of parameters! And almost none of them is significant…

Which is weird, considering that gorgeous R2! 0.882! I wish I had that in my research…

Too many parameters?

As you might know, having a model that is too performant is not always a good thing. For instance, it can be a marker of overfitting: the model corresponds too closely to a particular set of data, and may therefore fail to predict future observations reliably. In multiple regressions, in can also fall under the Freedman’s paradox: some predictors that have actually no relation to the dependent variable being predicted will be spuriously found to be statistically significant.

Let’s run a few checks using the performance package:

> OK: Residuals appear as normally distributed (p = 0.115).
> OK: Error variance appears to be homoscedastic (p = 0.348).
> OK: Residuals appear to be independent and not autocorrelated (p = 0.446).
> # Check for Multicollinearity
> 
> High Correlation
> 
>                 Parameter        VIF Increased SE
>               Sepal.Width      61.39         7.84
>              Petal.Length    4410.44        66.41
>               Petal.Width    2876.94        53.64
>                   Species  611985.94       782.30
>                       grp   23782.23       154.21
>  Sepal.Width:Petal.Length    2796.78        52.88
>   Sepal.Width:Petal.Width    2159.27        46.47
>       Sepal.Width:Species  461389.69       679.26
>           Sepal.Width:grp   22277.15       149.26
>  Petal.Length:Petal.Width    5555.71        74.54
>      Petal.Length:Species 1872684.46      1368.46
>          Petal.Length:grp   40317.76       200.79
>       Petal.Width:Species  616688.46       785.30
>           Petal.Width:grp   12613.55       112.31
>               Species:grp  282568.74       531.57

The main issue of the model seems to be the high multicollinearity. This suggests that our model might not be able to give valid results about any individual predictor, nor tell which predictors are redundant with respect to others.

Parameters selection

Time to do some variables selection. This can be easily done using the select_parameters() function, that will automatically select the best variables and update the model accordingly. One way of using that is in a tidy pipeline (using %>%), using this output to update a new model.

> 
> Call:
> lm(formula = Sepal.Length ~ Sepal.Width + Petal.Length + Petal.Width + 
>     Species + grp + Sepal.Width:Petal.Width + Petal.Length:Species + 
>     Petal.Length:grp + Petal.Width:grp, data = iris)
> 
> Residuals:
>     Min      1Q  Median      3Q     Max 
> -0.6912 -0.1841 -0.0084  0.2045  0.6284 
> 
> Coefficients:
>                                Estimate Std. Error t value Pr(>|t|)    
> (Intercept)                     2.20278    0.54238    4.06  8.2e-05 ***
> Sepal.Width                     0.83841    0.12868    6.52  1.3e-09 ***
> Petal.Length                   -0.02234    0.28169   -0.08  0.93690    
> Petal.Width                     1.18753    0.43904    2.70  0.00772 ** 
> Speciesversicolor              -1.22571    0.53650   -2.28  0.02389 *  
> Speciesvirginica               -3.30198    0.64766   -5.10  1.1e-06 ***
> grp2                           -0.35680    0.18083   -1.97  0.05053 .  
> grp3                            0.07139    0.18136    0.39  0.69446    
> Sepal.Width:Petal.Width        -0.34861    0.10225   -3.41  0.00086 ***
> Petal.Length:Speciesversicolor  0.57694    0.26857    2.15  0.03348 *  
> Petal.Length:Speciesvirginica   0.90715    0.27149    3.34  0.00108 ** 
> Petal.Length:grp2               0.24474    0.12851    1.90  0.05899 .  
> Petal.Length:grp3               0.00486    0.12514    0.04  0.96909    
> Petal.Width:grp2               -0.56926    0.29398   -1.94  0.05490 .  
> Petal.Width:grp3                0.01301    0.29155    0.04  0.96448    
> ---
> Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
> 
> Residual standard error: 0.28 on 135 degrees of freedom
> Multiple R-squared:  0.893,   Adjusted R-squared:  0.881 
> F-statistic: 80.1 on 14 and 135 DF,  p-value: <2e-16

That’s still a lot of parameters, but as you can see, but almost all of them are now significant, and the R2 did not change much.

Although appealing, please note that these automated selection methods are quite criticized, and should not be used in place of theoretical or hypothetical reasons (i.e., you should have justified hypotheses about the parameters of your model).

Mixed and Bayesian models

For simple linear regressions as above, the selection is made using the step() function (available in base R). This performs a stepwise selection. However, this procedures is not available for other types of models, such as mixed or Bayesian models.

Mixed model

> Linear mixed model fit by REML ['lmerMod']
> Formula: Sepal.Length ~ Sepal.Width * Petal.Length * Petal.Width + (1 |  
>     Species)
>    Data: iris
> 
> REML criterion at convergence: 95
> 
> Scaled residuals: 
>     Min      1Q  Median      3Q     Max 
> -2.4935 -0.8045  0.0176  0.6593  2.3356 
> 
> Random effects:
>  Groups   Name        Variance Std.Dev.
>  Species  (Intercept) 0.0352   0.188   
>  Residual             0.0932   0.305   
> Number of obs: 150, groups:  Species, 3
> 
> Fixed effects:
>                                      Estimate Std. Error t value
> (Intercept)                            1.7768     0.9128    1.95
> Sepal.Width                            0.7501     0.2681    2.80
> Petal.Length                           0.8793     0.4971    1.77
> Petal.Width                           -2.0380     1.5196   -1.34
> Sepal.Width:Petal.Length              -0.1012     0.1611   -0.63
> Sepal.Width:Petal.Width                0.3435     0.4894    0.70
> Petal.Length:Petal.Width               0.2805     0.2428    1.16
> Sepal.Width:Petal.Length:Petal.Width  -0.0467     0.0769   -0.61
> 
> Correlation of Fixed Effects:
>             (Intr) Spl.Wd Ptl.Ln Ptl.Wd Sp.W:P.L S.W:P.W P.L:P.
> Sepal.Width -0.948                                             
> Petal.Lngth -0.746  0.757                                      
> Petal.Width -0.289  0.222 -0.306                               
> Spl.Wdt:P.L  0.705 -0.762 -0.983  0.338                        
> Spl.Wdt:P.W  0.250 -0.214  0.336 -0.986 -0.356                 
> Ptl.Lng:P.W  0.754 -0.705 -0.459 -0.679  0.416    0.642        
> S.W:P.L:P.W -0.732  0.730  0.442  0.675 -0.428   -0.670  -0.976
parameters/inst/doc/model_parameters_robust.Rmd0000644000176200001440000001524213611655362021540 0ustar liggesusers--- title: "Robust Estimation of Standard Errors, Confidence Intervals and p-values" output: github_document: toc: true fig_width: 10.08 fig_height: 6 rmarkdown::html_vignette: toc: true fig_width: 10.08 fig_height: 6 tags: [r, parameters, variable selection, feature selection] vignette: > %\VignetteIndexEntry{Robust Estimation of Standard Errors} \usepackage[utf8]{inputenc} %\VignetteEngine{knitr::rmarkdown} editor_options: chunk_output_type: console bibliography: bibliography.bib --- ```{r message=FALSE, warning=FALSE, include=FALSE} library(knitr) options(knitr.kable.NA = '') options(digits = 2) knitr::opts_chunk$set(comment = ">") if (!requireNamespace("dplyr", quietly = TRUE) || !requireNamespace("sandwich", quietly = TRUE) || !requireNamespace("lme4", quietly = TRUE) || !requireNamespace("clubSandwich", quietly = TRUE)) { knitr::opts_chunk$set(eval = FALSE) } else { library(parameters) library(dplyr) } set.seed(333) ``` The [`model_parameters()`](https://easystats.github.io/parameters/articles/model_parameters.html) function also allows the computation of standard errors, confidence intervals and p-values based on robust covariance matrix estimation from model parameters. Robust estimation is based on the packages **sandwich** and **clubSandwich**, so all models supported by either of these packages work with `model_parameters()` when `robust = TRUE`. ## Classical Regression Models ### Robust Covariance Matrix Estimation from Model Parameters By default, when `model_parameters(robust = TRUE)`, it internally calls `sandwich::vcovHC(type = "HC3")`. However, there are three arguments that allow for choosing different methods and options of robust estimation: `vcov_estimation`, `vcov_type` and `vcov_args` (see [`?standard_error_robust`](https://easystats.github.io/parameters/reference/standard_error_robust.html) for further details). Let us start with a simple example, which uses a heteroskedasticity-consistent covariance matrix estimation with estimation-type "HC3" (i.e. `sandwich::vcovHC(type = "HC3")` is called): ```{r} data(iris) model <- lm(Petal.Length ~ Sepal.Length * Species + Sepal.Width, data = iris) # model parameters, where SE, CI and p-values are based on robust estimation mp <- model_parameters(model, robust = TRUE) mp # compare standard errors to result from sandwich-package mp$SE unname(sqrt(diag(sandwich::vcovHC(model)))) ``` ### Cluster-Robust Covariance Matrix Estimation (sandwich) If another covariance matrix estimation is required, use the `vcov_estimation`-argument. This argument needs the suffix for the related `vcov*()`-functions as value, i.e. `vcov_estimation = "CL"` would call `sandwich::vcovCL()`, or `vcov_estimation = "HAC"` would call `sandwich::vcovHAC()`. The specific estimation type can be changed with `vcov_type`. E.g., `sandwich::vcovCL()` accepts estimation types HC0 to HC3. In the next example, we use a clustered covariance matrix estimation with HC1-estimation type. ```{r} # change estimation-type mp <- model_parameters(model, robust = TRUE, vcov_estimation = "CL", vcov_type = "HC1") mp # compare standard errors to result from sandwich-package mp$SE unname(sqrt(diag(sandwich::vcovCL(model)))) ``` Usually, clustered covariance matrix estimation is used when there is a cluster-structure in the data. The variable indicating the cluster-structure can be defined in `sandwich::vcovCL()` with the `cluster`-argument. In `model_parameters()`, additional arguments that should be passed down to functions from the **sandwich** package can be specified in `vcov_args`: ```{r} iris$cluster <- factor(rep(LETTERS[1:8], length.out = nrow(iris))) # change estimation-type, defining additional arguments mp <- model_parameters( model, robust = TRUE, vcov_estimation = "CL", vcov_type = "HC1", vcov_args = list(cluster = iris$cluster) ) mp # compare standard errors to result from sandwich-package mp$SE unname(sqrt(diag(sandwich::vcovCL(model, cluster = iris$cluster)))) ``` ### Cluster-Robust Covariance Matrix Estimation (clubSandwich) Cluster-robust estimation of the variance-covariance matrix can also be achieved using `clubSandwich::vcovCR()`. Thus, when `vcov_estimation = "CR"`, the related function from the **clubSandwich** package is called. Note that this function _requires_ the specification of the `cluster`-argument. ```{r} # create fake-cluster-variable, to demonstrate cluster robust standard errors iris$cluster <- factor(rep(LETTERS[1:8], length.out = nrow(iris))) # cluster-robust estimation mp <- model_parameters( model, robust = TRUE, vcov_estimation = "CR", vcov_type = "CR1", vcov_args = list(cluster = iris$cluster) ) mp # compare standard errors to result from clubSsandwich-package mp$SE unname(sqrt(diag(clubSandwich::vcovCR(model, type = "CR1", cluster = iris$cluster)))) ``` ### Robust Covariance Matrix Estimation on Standardized Model Parameters Finally, robust estimation can be combined with standardization. However, robust covariance matrix estimation only works for `standardize = "refit"`. ```{r} # model parameters, robust estimation on standardized model model_parameters(model, standardize = "refit", robust = TRUE) ``` ## Mixed Models ### Robust Covariance Matrix Estimation for Mixed Models For linear mixed models, that by definition have a clustered ("hierarchical" or multilevel) structure in the data, it is also possible to estimate a cluster-robust covariance matrix. This is possible due to the **clubSandwich** package, thus we need to define the same arguments as in the above example. ```{r} library(lme4) data(iris) set.seed(1234) iris$grp <- as.factor(sample(1:3, nrow(iris), replace = TRUE)) # fit example model model <- lme4::lmer( Sepal.Length ~ Species * Sepal.Width + Petal.Length + (1 | grp), data = iris ) # normal model parameters, like from 'summary()' model_parameters(model) # model parameters, cluster robust estimation for mixed models model_parameters( model, robust = TRUE, vcov_estimation = "CR", vcov_type = "CR1", vcov_args = list(cluster = iris$grp) ) ``` ### Robust Covariance Matrix Estimation on Standardized Mixed Model Parameters Again, robust estimation can be combined with standardization for linear mixed models as well, which in such cases also only works for `standardize = "refit"`. ```{r} # model parameters, cluster robust estimation on standardized mixed model model_parameters( model, standardize = "refit", robust = TRUE, vcov_estimation = "CR", vcov_type = "CR1", vcov_args = list(cluster = iris$grp) ) ``` parameters/inst/doc/parameters_reduction.Rmd0000644000176200001440000001430013611655362021030 0ustar liggesusers--- title: "Feature Reduction (PCA, cMDS, ICA...)" output: github_document: toc: true fig_width: 10.08 fig_height: 6 rmarkdown::html_vignette: toc: true fig_width: 10.08 fig_height: 6 tags: [r, parameters, variable extraction, feature extraction, dimension extraction] vignette: > %\VignetteIndexEntry{Feature Reduction (PCA, cMDS, ICA, ...)} \usepackage[utf8]{inputenc} %\VignetteEngine{knitr::rmarkdown} editor_options: chunk_output_type: console bibliography: bibliography.bib --- ```{r message=FALSE, warning=FALSE, include=FALSE} library(knitr) options(knitr.kable.NA = '') options(digits = 2) knitr::opts_chunk$set(comment = ">") if (!requireNamespace("dplyr", quietly = TRUE) || !requireNamespace("psych", quietly = TRUE)) { knitr::opts_chunk$set(eval = FALSE) } else { library(parameters) library(dplyr) } set.seed(333) ``` Also known as [**feature extraction** or **dimension reduction**](https://en.wikipedia.org/wiki/Feature_extraction) in machine learning, the goal of variable reduction is to **reduce the number of predictors** by derivating, from a set of measured data, new variables intended to be informative and non-redundant. This method can be used to **simplify models**, which can benefit model interpretation, shorten fitting time, and improve generalization (by reducing overfitting). ## Quick and Exploratory Method Let's start by fitting a multiple regression with the `attitude` dataset, available is base R, to predict the overall **rating** by employees of their organization with the remaining variables (handling of employee **complaints**, special **privileges**, opportunity of **learning**, **raises**, a feedback considered too **critical** and opportunity of **advancement**). ```{r message=FALSE, warning=FALSE} model <- lm(rating ~ ., data = attitude) parameters(model) ``` We can explore a reduction of the number of parameters with the `reduce_parameters()` function. ```{r message=FALSE, warning=FALSE} newmodel <- reduce_parameters(model) parameters(newmodel) ``` This quickly *hints* toward the fact that the model could be represented via **two "latent" dimensions**, one correlated with all the positive things that a company has to offer, and the other one related to the amount of negative critiques received by the employees. These two dimensions have a positive and negative relationship with the company rating, respectively. > What does `reduce_parameters()` exactly do? This function performs a reduction in the parameters space (the number of variables). It starts by creating a new set of variables, based on a given method (the default method is "**PCA**", but other are available via the `method` argument, such as "**cMDS**", "**DRR**" or "**ICA**"). Then, it names this new dimensions using the original variables that *correlate* the most with it. For instance, a variable named 'V1_0.97/V4_-0.88' means that the V1 and the V4 variables correlate maximally (with respective coefficients of .97 and -.88) with this dimension. ```{r message=FALSE, warning=FALSE} reduce_parameters(model, method = "cMDS") %>% parameters() ``` A different method (**Classical Multidimensional Scaling - cMDS**) suggests that negative critiques do not have a significant impact on the rating, and that the lack of opportunities of career advancement is a separate dimension with an importance on its own. Although this function can be useful in exploratory data analysis, it's best to perform the dimension reduction step in a **separate and dedicated stage**, as this is a very important process in the data analysis workflow. ## Principal Component Analysis (PCA) PCA is a widely used procedure that lies in-between dimension reduction and structural modelling. Indeed, one of the way of reducing the number of predictors is to extract a new set of uncorrelated variables that will *represent* variance of your initial dataset. But how the original variables relate between themselves can also be a question on its own. We can apply the `principal_components()` function to do the the predictors of the model: ```{r message=FALSE, warning=FALSE} pca <- principal_components(insight::get_predictors(model), n = "auto") pca ``` The `principal_component()` function automatically selected one component (if the number of components is not specified, this function uses [`n_factors()`](https://easystats.github.io/parameters/articles/n_factors.html) to estimate the optimal number to keep) and returned the **loadings**, i.e., the relationship with all of the original variables. As we can see here, it seems that our new component captured the essence (more than half of the total variance present in the original dataset) of all our other variables together. We can **extract** the values of this component for each of our observation using the `predict()` method and add in the response variable of our initial dataset. ```{r message=FALSE, warning=FALSE} newdata <- predict(pca) newdata$rating <- attitude$rating ``` We can know update the model with this new component: ```{r message=FALSE, warning=FALSE} update(model, rating ~ PC1, data = newdata) %>% parameters() ``` ### Using the `psych` package for PCA You can also use different packages for models, such as [`psych`](https://cran.r-project.org/package=psych) [@revelle2018] or [`FactoMineR`](http://factominer.free.fr/) for PCA or Exploratory Factor Analysis (EFA), as it allows for more flexibility, control and details when running such procedures. Thus, the functions from this package are **fully supported** by `parameters` through the `model_parameters()` function. As such, the above analysis can be fully reproduced as follows: ```{r message=FALSE, warning=FALSE} library(psych) # Fit the PCA pca <- psych::principal(attitude, nfactors = 1) %>% model_parameters() pca ``` *Note:* By default, `psych::principal()` uses a **varimax** rotation to extract rotated components, possibly leading to discrepancies in the results. Finally, refit the model: ```{r message=FALSE, warning=FALSE} df <- cbind(attitude, predict(pca)) update(model, rating ~ PC1, data = df) %>% model_parameters() ``` # Referencesparameters/inst/doc/efa_cfa.html0000644000176200001440000013130213620043700016364 0ustar liggesusers

Structural Models (EFA, CFA, SEM…)

How to perform a Factor Analysis (FA)

The difference between PCA and EFA can be quite hard to intuitively grasp as their output is very familiar. The idea is that PCA aims at extracting the most variance possible from all variables of the dataset, whereas EFA aims at creating consistent factors from the dataset without desperately trying to represent all the variables.

This is why PCA is popular for feature reduction, as it will try to best represent the variance contained in the original data, minimizing the loss of information. On the other hand, EFA is usually in the context of exploring the latent dimensions that might be hidden in the observed variables, without necessary striving at representing the whole dataset.

To illustrate EFA, let us use the International Personality Item Pool data available in the psych package. It includes 25 personality self report items. The authors built these items following the big 5 personality structure.

Factor Structure (Sphericity and KMO)

The first step is to test the dataset for factor analysis suitability. Two existing methods are the Bartlett’s Test of Sphericity and the Kaiser, Meyer, Olkin (KMO) Measure of Sampling Adequacy (MSA). The former tests whether a matrix is significantly different from an identity matrix. This statistical test for the presence of correlations among variables, providing the statistical probability that the correlation matrix has significant correlations among at least some of variables. As for factor analysis to work, some relationships between variables are needed, thus, a significant Bartlett’s test of sphericity is required, say p < .001. The latter was introduced by Kaiser (1970) as the Measure of Sampling Adequacy (MSA), later modified by Kaiser and Rice (1974). The Kaiser-Meyer-Olkin (KMO) statistic, which can vary from 0 to 1, indicates the degree to which each variable in a set is predicted without error by the other variables. A value of 0 indicates that the sum of partial correlations is large relative to the sum correlations, indicating factor analysis is likely to be inappropriate. A KMO value close to 1 indicates that the sum of partial correlations is not large relative to the sum of correlations and so factor analysis should yield distinct and reliable factors.

Both tests can be performed by using the check_factorstructure() function.

> # Is the data suitable for Factor Analysis?
> 
>   - KMO: The Kaiser, Meyer, Olkin (KMO) measure of sampling adequacy suggests that data seems appropriate for factor analysis (KMO = 0.85).
>   - Sphericity: Bartlett's test of sphericity suggests that there is sufficient significant correlation in the data for factor analaysis (Chisq(300) = 18146.07, p < .001).

Exploratory Factor Analysis (EFA)

Now that we are confident that our dataset is appropriate, we will explore a factor structure made of 5 latent variables, corresponding to the items’ authors theory of personality.

> # Rotated loadings from Principal Component Analysis (oblimin-rotation)
> 
> Variable |  MR2 |   MR3 |   MR1 |   MR5 |   MR4 | Complexity | Uniqueness
> -------------------------------------------------------------------------
> N1       | 0.83 |       |       |       |       |       1.07 |       0.32
> N2       | 0.78 |       |       |       |       |       1.03 |       0.39
> N3       | 0.70 |       |       |       |       |       1.08 |       0.46
> N5       | 0.48 |       |       |       |       |       2.00 |       0.65
> N4       | 0.47 |       |       |       |       |       2.33 |       0.49
> C2       |      |  0.67 |       |       |       |       1.18 |       0.55
> C4       |      | -0.64 |       |       |       |       1.13 |       0.52
> C3       |      |  0.57 |       |       |       |       1.10 |       0.68
> C5       |      | -0.56 |       |       |       |       1.41 |       0.56
> C1       |      |  0.55 |       |       |       |       1.20 |       0.65
> E2       |      |       |  0.67 |       |       |       1.08 |       0.45
> E4       |      |       | -0.59 |       |       |       1.52 |       0.46
> E1       |      |       |  0.55 |       |       |       1.22 |       0.65
> E5       |      |       | -0.42 |       |       |       2.68 |       0.59
> E3       |      |       | -0.41 |       |       |       2.65 |       0.56
> A3       |      |       |       |  0.68 |       |       1.06 |       0.46
> A2       |      |       |       |  0.66 |       |       1.03 |       0.54
> A5       |      |       |       |  0.54 |       |       1.48 |       0.53
> A4       |      |       |       |  0.45 |       |       1.74 |       0.70
> A1       |      |       |       | -0.44 |       |       1.88 |       0.80
> O3       |      |       |       |       |  0.62 |       1.16 |       0.53
> O5       |      |       |       |       | -0.54 |       1.21 |       0.70
> O1       |      |       |       |       |  0.52 |       1.10 |       0.68
> O2       |      |       |       |       | -0.47 |       1.68 |       0.73
> O4       |      |       |       |       |  0.36 |       2.65 |       0.75
> 
> The 5 latent factors (oblimin rotation) accounted for 42.36% of the total variance of the original data (MR2 = 10.31%, MR3 = 8.39%, MR1 = 8.83%, MR5 = 8.29%, MR4 = 6.55%).

As we can see, the 25 items nicely spread on the 5 latent factors, the famous big 5. Based on this model, we can now predict back the scores for each individual for these new variables:

>   Neuroticism Conscientiousness Extraversion Agreeableness Opennness
> 1       -0.22            -1.327       -0.128        -0.855     -1.61
> 2        0.16            -0.572       -0.466        -0.072     -0.17
> 3        0.62            -0.043       -0.141        -0.552      0.23
> 4       -0.12            -1.063       -0.058        -0.091     -1.06
> 5       -0.17            -0.099       -0.460        -0.712     -0.66

How many factors to retain in Factor Analysis (FA)

When running a factor analysis (FA), one often needs to specify how many components (or latent variables) to retain or to extract. This decision is often motivated or supported by some statistical indices and procedures aiming at finding the optimal number of factors.

Interestingly, a huge amount of methods exist to statistically address this issue, giving sometimes very different results… Unfortunately, there is no consensus on which method to use, or which is the best.

The Method Agreement procedure

The Method Agreement procedure, first implemented in the psycho package (Makowski 2018), proposes to rely on the consensus of methods, rather than on one method in particular.

This procedure can be easily used via the n_factors() function, re-implemented and improved in the parameters package. One can provide a dataframe, and the function will run a large number of routines and return the optimal number of factors based on the higher consensus.

> # Method Agreement Procedure:
> 
> The choice of 1 dimensions is supported by 3 (13.04%) methods out of 23 (Acceleration factor, TLI, RMSEA).

Interestingly, the smallest nubmer of factors that most methods suggest is 6… Which is consistent whith the newer models of personality (e.g., HEXACO).

More details, as well as a summary table can be obtained as follows:

>    n_Factors              Method              Family
> 1          1 Acceleration factor               Scree
> 2          1                 TLI                 Fit
> 3          1               RMSEA                 Fit
> 4          3                 CNG                 CNG
> 5          4                beta Multiple_regression
> 6          4    VSS complexity 1                 VSS
> 7          5    VSS complexity 2                 VSS
> 8          5       Velicer's MAP        Velicers_MAP
> 9          6 Optimal coordinates               Scree
> 10         6   Parallel analysis               Scree
> 11         6    Kaiser criterion               Scree
> 12         7                   t Multiple_regression
> 13         7                   p Multiple_regression
> 14         7                  R2            Scree_SE
> 15         8            SE Scree            Scree_SE
> 16         8                 BIC                 BIC
> 17         8                 BIC                 Fit
> 18        11      BIC (adjusted)                 BIC
> 19        18                CRMS                 Fit
> 20        22             Bentler             Bentler
> 21        24            Bartlett             Barlett
> 22        24            Anderson             Barlett
> 23        24              Lawley             Barlett
>    n_Factors n_Methods
> 1          1         3
> 2          3         1
> 3          4         2
> 4          5         2
> 5          6         3
> 6          7         3
> 7          8         3
> 8         11         1
> 9         18         1
> 10        22         1
> 11        24         3

A plot can also be obtained (the see package must be loaded):

Confirmatory Factor Analysis (CFA)

We’ve seen above that while an EFA with 5 latent variables works great on our dataset, a structure with 6 latent factors might in fact be more appropriate. How can we statistically test if that’s actually the case? This can be done using Confirmatory Factor Analysis (CFA), that bridges factor analysis with Structural Equation Modelling (SEM).

However, in order to do that cleanly, EFA should be independent from CFA, in the sense that the factor structure should be explored on a “training” set, and then tested (or “confirmed”) on a test set. In other words, the dataset used for exploration and confirmation is not the same. Note that this procedure is also standard in the field of machine learning.

Partition the data

The data can be easily split into two sets with the data_partition() function, through which we will use 70% of the sample as training and the rest as test.

Create CFA structures out of EFA models

In the next step, we will run two EFA models on the training set, specifying 5 and 6 latent factors respectively, that we will then transform into CFA structures.

> # Latent variables
> MR2 =~ N1 + N2 + N3 + N4 + N5
> MR1 =~ E1 + E2 + E3 + E4 + E5
> MR3 =~ C1 + C2 + C3 + C4 + C5
> MR5 =~ A1 + A2 + A3 + A4 + A5
> MR4 =~ O1 + O2 + O3 + O4 + O5

As we can see, a structure is just a string encoding how the manifest variables (the observed variables) are integrated into latent variables.

Fit and Compare models

We can finally with that structure to the test set using the lavaan package, and compare these models together:

> # Comparison of Model Performance Indices
> 
> Model |   Type |   Chisq | Chisq_df | Chisq_p | Baseline | Baseline_df | Baseline_p |  GFI | AGFI |  NFI | NNFI |  CFI | RMSEA | RMSEA_CI_low | RMSEA_CI_high | RMSEA_p |  RMR | SRMR |  RFI | PNFI |  IFI |  RNI | Loglikelihood |      AIC |      BIC | BIC_adjusted | BF
> ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
> big5  | lavaan | 1357.27 |      265 |       0 |  5821.53 |         300 |          0 | 0.86 | 0.82 | 0.77 | 0.78 | 0.80 |  0.07 |         0.07 |          0.08 |       0 | 0.16 | 0.08 | 0.74 | 0.68 | 0.80 | 0.80 |     -29897.05 | 59914.10 | 60189.76 |     59999.24 |   
> big6  | lavaan | 1529.93 |      265 |       0 |  5821.53 |         300 |          0 | 0.84 | 0.81 | 0.74 | 0.74 | 0.77 |  0.08 |         0.08 |          0.08 |       0 | 0.18 | 0.08 | 0.70 | 0.65 | 0.77 | 0.77 |     -29983.38 | 60086.76 | 60362.43 |     60171.91 |  0

All in all, it seems that the big 5 structure remains quite reliable.

References

Makowski, Dominique. 2018. “The Psycho Package: An Efficient and Publishing-Oriented Workflow for Psychological Science.” Journal of Open Source Software 3 (22): 470.

parameters/inst/doc/parameters_selection.Rmd0000644000176200001440000001227213620032437021017 0ustar liggesusers--- title: "Parameters Selection" output: github_document: toc: true fig_width: 10.08 fig_height: 6 rmarkdown::html_vignette: toc: true fig_width: 10.08 fig_height: 6 tags: [r, parameters, variable selection, feature selection] vignette: > %\VignetteIndexEntry{Parameters Selection} \usepackage[utf8]{inputenc} %\VignetteEngine{knitr::rmarkdown} editor_options: chunk_output_type: console bibliography: bibliography.bib --- ```{r message=FALSE, warning=FALSE, include=FALSE} library(knitr) options(knitr.kable.NA = '') options(digits = 2) knitr::opts_chunk$set(comment = ">") if (!requireNamespace("dplyr", quietly = TRUE) || !requireNamespace("performance", quietly = TRUE) || !requireNamespace("lme4", quietly = TRUE)) { knitr::opts_chunk$set(eval = FALSE) } else { library(parameters) library(dplyr) } set.seed(333) ``` Also known as [**feature selection**](https://en.wikipedia.org/wiki/Feature_selection) in machine learning, the goal of variable selection is to **identify a subset of predictors** to **simplify models**. This can benefit model interpretation, shorten fitting time, and improve generalization (by reducing overfitting). There are many different methods. The one that is appropriate for a given problem depends on the model type, the data, the objective and the theoretical rationale. The `parameters` package implements a helper that will **automatically pick a method deemed appropriate for the provided model**, run the variables selection and return the **optimal formula**, which you can then re-use to update the model. ## Simple linear regression ### Fit a powerful model If you are familiar with R and the formula interface, you know of the possibility of including a dot (`.`) in the formula, signifying **"all the remaining variables"**. Curiously, few are aware of the possibility of additionally easily adding **all the interaction terms**. This can be achieved using the `*.*` notation. Let's try that with the linear regression predicting **Sepal.Length** with the [`iris`](https://en.wikipedia.org/wiki/Iris_flower_data_set) dataset, included by default in R. ```{r message=FALSE, warning=FALSE} model <- lm(Sepal.Length ~ .*., data=iris) summary(model) ``` ***Wow, that's a lot of parameters! And almost none of them is significant...*** Which is ***weird***, considering that **gorgeous R2! 0.882!** *I wish I had that in my research...* ### Too many parameters? As you might know, having a **model that is too performant is not always a good thing**. For instance, it can be a marker of [**overfitting**](https://en.wikipedia.org/wiki/Overfitting): the model corresponds too closely to a particular set of data, and may therefore fail to predict future observations reliably. In multiple regressions, in can also fall under the [**Freedman's paradox**](https://en.wikipedia.org/wiki/Freedman%27s_paradox): some predictors that have actually no relation to the dependent variable being predicted will be **spuriously found to be statistically significant**. Let's run a few checks using the [**performance**](https://github.com/easystats/performance) package: ```{r message=FALSE, warning=FALSE} library(performance) check_normality(model) check_heteroscedasticity(model) check_autocorrelation(model) check_collinearity(model) ``` The main issue of the model seems to be the high [multicollinearity](https://en.wikipedia.org/wiki/Multicollinearity). This suggests that our model might not be able to give valid results about any individual predictor, nor tell which predictors are redundant with respect to others. ### Parameters selection Time to do some variables selection. This can be easily done using the `select_parameters()` function, that will **automatically select the best variables** and update the model accordingly. One way of using that is in a tidy pipeline (using [`%>%`](https://cran.r-project.org/package=magrittr/README.html)), using this output to update a new model. ```{r message=FALSE, warning=FALSE} lm(Sepal.Length ~ .*., data = iris) %>% select_parameters() %>% summary() ``` That's still a lot of parameters, but as you can see, but almost all of them are now significant, and the R2 did not change much. Although appealing, please note that these automated selection methods are [**quite criticized**](https://towardsdatascience.com/stopping-stepwise-why-stepwise-selection-is-bad-and-what-you-should-use-instead-90818b3f52df), and should not be used in place of **theoretical** or **hypothetical** reasons (*i.e.*, you should have justified hypotheses about the parameters of your model). ## Mixed and Bayesian models For simple linear regressions as above, the selection is made using the `step()` function (available in base R). This performs a [**stepwise**](https://en.wikipedia.org/wiki/Stepwise_regression) selection. However, this procedures is not available for other types of models, such as **mixed** or **Bayesian** models. ### Mixed model ```{r message=FALSE, warning=FALSE} library(lme4) lmer(Sepal.Length ~ Sepal.Width * Petal.Length * Petal.Width + (1|Species), data = iris) %>% select_parameters() %>% summary() ```parameters/inst/doc/parameters_selection.R0000644000176200001440000000231313620044012020461 0ustar liggesusers## ----message=FALSE, warning=FALSE, include=FALSE------------------------------ library(knitr) options(knitr.kable.NA = '') options(digits = 2) knitr::opts_chunk$set(comment = ">") if (!requireNamespace("dplyr", quietly = TRUE) || !requireNamespace("performance", quietly = TRUE) || !requireNamespace("lme4", quietly = TRUE)) { knitr::opts_chunk$set(eval = FALSE) } else { library(parameters) library(dplyr) } set.seed(333) ## ----message=FALSE, warning=FALSE--------------------------------------------- model <- lm(Sepal.Length ~ .*., data=iris) summary(model) ## ----message=FALSE, warning=FALSE--------------------------------------------- library(performance) check_normality(model) check_heteroscedasticity(model) check_autocorrelation(model) check_collinearity(model) ## ----message=FALSE, warning=FALSE--------------------------------------------- lm(Sepal.Length ~ .*., data = iris) %>% select_parameters() %>% summary() ## ----message=FALSE, warning=FALSE--------------------------------------------- library(lme4) lmer(Sepal.Length ~ Sepal.Width * Petal.Length * Petal.Width + (1|Species), data = iris) %>% select_parameters() %>% summary() parameters/inst/doc/model_parameters.html0000644000176200001440000011216013620044003020341 0ustar liggesusers

Summary of Model Parameters

The model_parameters() function (also accessible via the shortcut parameters()) allows you to extract the parameters and their characteristics from various models in a consistent way. It can be considered as a lightweight alternative to broom::tidy(), with some notable differences:

  • The names of the returned data frame are specific to their content. For instance, the column containing the statistic is named following the statistic name, i.e., t, z, etc., instead of a generic name such as statistic (however, you can get standardized (generic) column names using standardize_names()).
  • It is able to compute or extract indices not available by default, such as p-values, CIs, etc.
  • It includes feature engineering capabilities, including parameters bootstrapping.

Correlations and t-tests

Frequentist

> Parameter1        |       Parameter2 |     r |     t |  df |     p |        95% CI |  Method
> --------------------------------------------------------------------------------------------
> iris$Sepal.Length | iris$Sepal.Width | -0.12 | -1.44 | 148 | 0.152 | [-0.27, 0.04] | Pearson
> Parameter | Group | Mean_Group1 | Mean_Group2 | Difference |     t |    df |      p |          95% CI |                  Method
> -------------------------------------------------------------------------------------------------------------------------------
> mpg       |    vs |       16.62 |       24.56 |       7.94 | -4.67 | 22.72 | < .001 | [-11.46, -4.42] | Welch Two Sample t-test

Bayesian

> Parameter | Median |        89% CI |     pd | % in ROPE |              Prior | Effects |   Component |   BF
> -----------------------------------------------------------------------------------------------------------
> rho       |  -0.11 | [-0.23, 0.02] | 92.90% |    43.13% | Cauchy (0 +- 0.33) |   fixed | conditional | 0.51
> Parameter  | Median |          89% CI |     pd | % in ROPE |              Prior | Effects |   Component |     BF
> ----------------------------------------------------------------------------------------------------------------
> Difference |  -7.30 | [-10.15, -4.58] | 99.98% |        0% | Cauchy (0 +- 0.71) |   fixed | conditional | 529.27

ANOVAs

Indices of effect size for ANOVAs, such as partial and non-partial versions of eta_squared(), epsilon_sqared() or omega_squared(), were moved to the effectsize-package. However, parameters uses these function to compute such indices for parameters summaries.

Simple

> Parameter | Sum_Squares |  df | Mean_Square |      F |      p | Omega_Sq (partial) | Eta_Sq (partial) | Epsilon_sq
> ------------------------------------------------------------------------------------------------------------------
> Species   |       63.21 |   2 |       31.61 | 119.26 | < .001 |               0.61 |             0.62 |       0.61
> Residuals |       38.96 | 147 |        0.27 |        |        |                    |                  |

Repeated measures

parameters() (resp. its alias model_parameters()) also works on repeated measures ANOVAs, whether computed from aov() or from a mixed model.

> Group  | Parameter | Sum_Squares | df | Mean_Square |    F |     p
> ------------------------------------------------------------------
> Within |        am |      145.45 |  1 |      145.45 | 5.85 | 0.022
> Within | Residuals |      720.85 | 29 |       24.86 |      |      
> gear   |        am |      259.75 |  1 |      259.75 |      |

Regressions (GLMs, Mixed Models, GAMs, …)

parameters() (resp. its alias model_parameters()) was mainly built with regression models in mind. It works for many types of models and packages, including mixed models and Bayesian models.

GLMs

> Parameter        | Coefficient |   SE |         95% CI |     t | df |      p
> ----------------------------------------------------------------------------
> (Intercept)      |        2.04 | 0.39 | [ 1.27,  2.80] |  5.22 | 28 | < .001
> mpg [1st degree] |       -0.33 | 0.61 | [-1.53,  0.87] | -0.53 | 28 | 0.599 
> mpg [2nd degree] |        0.10 | 0.32 | [-0.54,  0.74] |  0.31 | 28 | 0.762 
> cyl              |       -0.26 | 0.06 | [-0.38, -0.14] | -4.14 | 28 | < .001

Mixed Models

> Parameter    | Coefficient |   SE |       95% CI |    t |  df |      p
> ----------------------------------------------------------------------
> (Intercept)  |        2.00 | 0.56 | [0.90, 3.10] | 3.56 | 146 | < .001
> Petal.Length |        0.28 | 0.06 | [0.17, 0.40] | 4.75 | 146 | < .001

Bayesian Models

model_parameters() works fine with Bayesian models from the rstanarm package…

> Parameter   | Median |          89% CI |     pd | % in ROPE |  Rhat | ESS |               Prior
> -----------------------------------------------------------------------------------------------
> (Intercept) |  53.16 | [ 42.36, 61.52] |   100% |        0% | 1.002 | 188 | Normal (0 +- 60.27)
> wt          |  -8.19 | [-11.59, -4.40] |   100% |     0.20% | 1.006 | 184 | Normal (0 +- 15.40)
> cyl         |  -3.71 | [ -4.88, -1.77] |   100% |     0.20% | 1.000 | 206 |  Normal (0 +- 8.44)
> wt * cyl    |   0.76 | [  0.19,  1.25] | 98.40% |    32.00% | 1.004 | 179 |  Normal (0 +- 1.36)

… as well as for (more complex) models from the brms package. For more complex models, other model components can be printed using the arguments effects and component arguments.

> Parameter   | Median |         89% CI |   pd | % in ROPE | ESS |  Rhat
> ----------------------------------------------------------------------
> b_Intercept |  -0.82 | [-1.32, -0.43] | 100% |        0% |  75 | 1.012
> b_persons   |   0.83 | [ 0.70,  0.99] | 100% |        0% |  66 | 1.003
> b_child     |  -1.14 | [-1.30, -0.99] | 100% |        0% | 159 | 1.001
> b_camper1   |   0.75 | [ 0.59,  0.86] | 100% |        0% | 269 | 0.999
> # Fixed Effects (Count Model) 
> 
> Parameter   | Median |         89% CI |   pd | % in ROPE | ESS |  Rhat
> ----------------------------------------------------------------------
> (Intercept) |  -0.82 | [-1.32, -0.43] | 100% |        0% |  75 | 1.012
> persons     |   0.83 | [ 0.70,  0.99] | 100% |        0% |  66 | 1.003
> child       |  -1.14 | [-1.30, -0.99] | 100% |        0% | 159 | 1.001
> camper1     |   0.75 | [ 0.59,  0.86] | 100% |        0% | 269 | 0.999
> 
> # Fixed Effects (Zero-Inflated Model) 
> 
> Parameter   | Median |         89% CI |     pd | % in ROPE | ESS |  Rhat
> ------------------------------------------------------------------------
> (Intercept) |  -0.63 | [-1.84,  0.56] | 76.00% |     8.00% |  84 | 1.067
> child       |   1.81 | [ 1.27,  2.35] |   100% |        0% | 139 | 0.996
> camper1     |  -0.83 | [-1.61, -0.38] | 98.40% |     1.20% | 187 | 0.996
> 
> # Random Effects (Count Model) 
> 
> Parameter | Median |        89% CI |     pd | % in ROPE | ESS |  Rhat
> ---------------------------------------------------------------------
> persons.1 |   0.00 | [-0.26, 0.23] | 54.40% |    67.20% |  52 | 1.000
> persons.2 |   0.02 | [-0.13, 0.18] | 64.80% |    75.20% | 145 | 1.020
> persons.3 |   0.00 | [-0.15, 0.11] | 52.40% |    78.00% | 144 | 0.997
> persons.4 |   0.00 | [-0.18, 0.26] | 51.20% |    67.60% |  76 | 0.996
> 
> # Random Effects (Zero-Inflated Model) 
> 
> Parameter | Median |        89% CI |     pd | % in ROPE | ESS |  Rhat
> ---------------------------------------------------------------------
> persons.1 |   1.22 | [ 0.12, 2.62] | 95.60% |     1.60% |  56 | 1.093
> persons.2 |   0.23 | [-0.79, 1.61] | 64.00% |    10.00% |  56 | 1.097
> persons.3 |  -0.17 | [-1.13, 1.19] | 58.40% |     6.80% | 107 | 1.079
> persons.4 |  -1.34 | [-2.37, 0.07] | 96.00% |     3.20% | 107 | 1.058

Structural Models (PCA, EFA, CFA, SEM…)

The parameters package extends the support to structural models.

Principal Component Analysis (PCA) and Exploratory Factor Analysis (EFA)

> # Rotated loadings from Principal Component Analysis (varimax-rotation)
> 
> Variable |   RC2 |   RC3 |   RC1 | Complexity | Uniqueness
> ----------------------------------------------------------
> mpg      |  0.66 | -0.41 | -0.54 |       2.63 |       0.10
> cyl      | -0.62 |  0.67 |  0.34 |       2.49 |       0.05
> disp     | -0.72 |  0.52 |  0.35 |       2.33 |       0.10
> hp       | -0.30 |  0.64 |  0.63 |       2.40 |       0.10
> drat     |  0.85 | -0.26 | -0.05 |       1.19 |       0.21
> wt       | -0.78 |  0.21 |  0.51 |       1.90 |       0.08
> qsec     | -0.18 | -0.91 | -0.28 |       1.28 |       0.06
> vs       |  0.28 | -0.86 | -0.23 |       1.36 |       0.12
> am       |  0.92 |  0.14 | -0.11 |       1.08 |       0.12
> gear     |  0.91 | -0.02 |  0.26 |       1.16 |       0.10
> carb     |  0.11 |  0.44 |  0.85 |       1.53 |       0.07
> 
> The 3 principal components (varimax rotation) accounted for 89.87% of the total variance of the original data (RC2 = 41.43%, RC3 = 29.06%, RC1 = 19.39%).
> # Loadings from Principal Component Analysis (no rotation)
> 
> Variable     | Dim.1 | Dim.2 | Dim.3 | Complexity
> -------------------------------------------------
> Sepal.Length |  0.75 |  0.07 |  0.10 |       1.05
> Sepal.Width  |  0.23 |  0.51 |  0.23 |       1.86
> Petal.Length |  0.98 |  0.00 |  0.00 |       1.00
> Petal.Width  |  0.94 |  0.01 |  0.00 |       1.00
> Species      |  0.96 |  0.75 |  0.26 |       2.05
> 
> The 3 latent factors accounted for 96.73% of the total variance of the original data (Dim.1 = 64.50%, Dim.2 = 22.37%, Dim.3 = 9.86%).

Confirmatory Factor Analysis (CFA) and Structural Equation Models (SEM)

Frequentist

> # Loading type
> 
> Link          | Coefficient |   SE |       95% CI |      p
> ----------------------------------------------------------
> visual =~ x1  |        1.00 | 0.00 | [1.00, 1.00] | < .001
> visual =~ x2  |        0.55 | 0.10 | [0.36, 0.75] | < .001
> visual =~ x3  |        0.73 | 0.11 | [0.52, 0.94] | < .001
> textual =~ x4 |        1.00 | 0.00 | [1.00, 1.00] | < .001
> textual =~ x5 |        1.11 | 0.07 | [0.98, 1.24] | < .001
> textual =~ x6 |        0.93 | 0.06 | [0.82, 1.03] | < .001
> speed =~ x7   |        1.00 | 0.00 | [1.00, 1.00] | < .001
> speed =~ x8   |        1.18 | 0.16 | [0.86, 1.50] | < .001
> speed =~ x9   |        1.08 | 0.15 | [0.79, 1.38] | < .001
> 
> # Correlation type
> 
> Link              | Coefficient |   SE |       95% CI |      p
> --------------------------------------------------------------
> visual ~~ textual |        0.41 | 0.07 | [0.26, 0.55] | < .001
> visual ~~ speed   |        0.26 | 0.06 | [0.15, 0.37] | < .001
> textual ~~ speed  |        0.17 | 0.05 | [0.08, 0.27] | < .001

Bayesian

blavaan to be done.

Meta-Analysis

parameters() also works for rma-objects from the metafor package.

> Parameter | Coefficient |   SE |         95% CI |     z |      p | Weight
> -------------------------------------------------------------------------
> Study 1   |       -0.39 | 0.32 | [-1.01,  0.23] | -1.24 | 0.215  |   9.95
> Study 2   |        0.68 | 0.32 | [ 0.05,  1.30] |  2.13 | 0.033  |   9.95
> Study 3   |        0.28 | 0.13 | [ 0.03,  0.54] |  2.17 | 0.030  |  59.17
> Study 4   |       -1.40 | 0.36 | [-2.10, -0.69] | -3.88 | < .001 |   7.72
> Overall   |       -0.18 | 0.44 | [-1.05,  0.68] | -0.42 | 0.676  |

Plotting Model Parameters

There is a plot()-method implemented in the see-package. Several examples are shown in this vignette.

parameters/inst/doc/efa_cfa.Rmd0000644000176200001440000002072313617043573016164 0ustar liggesusers--- title: "Structural Models (EFA, CFA, SEM...)" output: github_document: toc: true fig_width: 10.08 fig_height: 6 rmarkdown::html_vignette: toc: true fig_width: 10.08 fig_height: 6 tags: [r, parameters, efa, cfa, factor analysis, sem, fa, pca, how many factors, n factors] vignette: > %\VignetteIndexEntry{Structural Models (EFA, CFA, SEM, ...)} \usepackage[utf8]{inputenc} %\VignetteEngine{knitr::rmarkdown} editor_options: chunk_output_type: console bibliography: bibliography.bib --- ```{r message=FALSE, warning=FALSE, include=FALSE} library(knitr) options(knitr.kable.NA = '') options(digits = 2) knitr::opts_chunk$set(comment = ">") if (!requireNamespace("dplyr", quietly = TRUE) || !requireNamespace("see", quietly = TRUE) || !requireNamespace("lavaan", quietly = TRUE) || !requireNamespace("performance", quietly = TRUE) || !requireNamespace("psych", quietly = TRUE)) { knitr::opts_chunk$set(eval = FALSE) } set.seed(333) ``` # How to perform a Factor Analysis (FA) The difference between PCA and EFA can be quite hard to intuitively grasp as their output is very familiar. The idea is that PCA aims at extracting the most variance possible from all variables of the dataset, whereas EFA aims at creating consistent factors from the dataset without desperately trying to represent all the variables. This is why PCA is popular for feature reduction, as it will try to best represent the variance contained in the original data, minimizing the loss of information. On the other hand, EFA is usually in the context of exploring the latent dimensions that might be hidden in the observed variables, without necessary striving at representing the whole dataset. To illustrate EFA, let us use the [International Personality Item Pool](https://ipip.ori.org/) data available in the [`psych`](https://www.personality-project.org/r/html/bfi.html) package. It includes 25 personality self report items. The authors built these items following the **big 5** personality structure. ## Factor Structure (Sphericity and KMO) The first step is to test the dataset for factor analysis suitability. Two existing methods are the **Bartlett's Test of Sphericity** and the **Kaiser, Meyer, Olkin (KMO) Measure of Sampling Adequacy (MSA)**. The former tests whether a matrix is significantly different from an identity matrix. This statistical test for the presence of correlations among variables, providing the statistical probability that the correlation matrix has significant correlations among at least some of variables. As for factor analysis to work, some relationships between variables are needed, thus, a significant Bartlett's test of sphericity is required, say *p* < .001. The latter was introduced by Kaiser (1970) as the Measure of Sampling Adequacy (MSA), later modified by Kaiser and Rice (1974). The Kaiser-Meyer-Olkin (KMO) statistic, which can vary from 0 to 1, indicates the degree to which each variable in a set is predicted without error by the other variables. A value of 0 indicates that the sum of partial correlations is large relative to the sum correlations, indicating factor analysis is likely to be inappropriate. A KMO value close to 1 indicates that the sum of partial correlations is not large relative to the sum of correlations and so factor analysis should yield distinct and reliable factors. Both tests can be performed by using the `check_factorstructure()` function. ```{r message=FALSE, warning=FALSE} library(parameters) library(dplyr) library(psych) # Load the data data <- psych::bfi[, 1:25] # Select only the 25 first columns corresponding to the items data <- na.omit(data) # remove missing values # Check factor structure check_factorstructure(data) ``` ## Exploratory Factor Analysis (EFA) Now that we are confident that our dataset is appropriate, we will explore a factor structure made of 5 latent variables, corresponding to the items' authors theory of personality. ```{r message=FALSE, warning=FALSE} # Fit an EFA efa <- psych::fa(data, nfactors = 5) %>% model_parameters(sort = TRUE, threshold = "max") efa ``` As we can see, the 25 items nicely spread on the 5 latent factors, the famous **big 5**. Based on this model, we can now predict back the scores for each individual for these new variables: ```{r message=FALSE, warning=FALSE, eval=FALSE} predict(efa, names = c("Neuroticism", "Conscientiousness", "Extraversion", "Agreeableness", "Opennness")) ``` ```{r message=FALSE, warning=FALSE, echo=FALSE} head(predict(efa, names = c("Neuroticism", "Conscientiousness", "Extraversion", "Agreeableness", "Opennness")), 5) ``` ## How many factors to retain in Factor Analysis (FA) When running a **factor analysis (FA)**, one often needs to specify **how many components** (or latent variables) to retain or to extract. This decision is often motivated or supported by some statistical indices and procedures aiming at finding the optimal number of factors. Interestingly, a huge amount of methods exist to statistically address this issue, giving sometimes very different results... Unfortunately, there is no consensus on **which method to use**, or which is the best. ### The Method Agreement procedure The Method Agreement procedure, first implemented in the [`psycho`](https://neuropsychology.github.io/psycho.R/2018/05/24/n_factors.html) package [@makowski2018psycho], proposes to rely on the consensus of methods, rather than on one method in particular. This procedure can be easily used via the `n_factors()` function, re-implemented and improved in the [**parameters**](https://github.com/easystats/parameters) package. One can provide a dataframe, and the function will run a large number of routines and return the optimal number of factors based on the higher consensus. ```{r message=FALSE, warning=FALSE} n <- n_factors(data) n ``` Interestingly, the smallest nubmer of factors that most methods suggest is 6... Which is consistent whith the newer models of personality (e.g., HEXACO). More details, as well as a summary table can be obtained as follows: ```{r message=FALSE, warning=FALSE} as.data.frame(n) summary(n) ``` A plot can also be obtained (the `see` package must be loaded): ```{r message=FALSE, warning=FALSE} library(see) plot(n) + theme_modern() ``` ## Confirmatory Factor Analysis (CFA) We've seen above that while an EFA with 5 latent variables works great on our dataset, a structure with 6 latent factors might in fact be more appropriate. How can we **statistically test** if that's actually the case? This can be done using Confirmatory Factor Analysis (CFA), that bridges factor analysis with Structural Equation Modelling (SEM). However, in order to do that cleanly, EFA should be independent from CFA, in the sense that the factor structure should be explored on a **"training" set**, and then tested (or "confirmed") on a **test set**. In other words, the dataset used for exploration and confirmation is not the same. Note that this procedure is also standard in the field of machine learning. ### Partition the data The data can be easily split into two sets with the `data_partition()` function, through which we will use 70\% of the sample as training and the rest as test. ```{r message=FALSE, warning=FALSE} partitions <- data_partition(data, training_proportion = 0.7) training <- partitions$training test <- partitions$test ``` ### Create CFA structures out of EFA models In the next step, we will run two EFA models on the training set, specifying 5 and 6 latent factors respectively, that we will then transform into CFA structures. ```{r message=FALSE, warning=FALSE} structure_big5 <- psych::fa(training, nfactors = 5) %>% efa_to_cfa() structure_big6 <- psych::fa(training, nfactors = 6) %>% efa_to_cfa() # Investigate how a model looks structure_big5 ``` As we can see, a structure is just a string encoding how the **manifest variables** (the observed variables) are integrated into latent variables. ### Fit and Compare models We can finally with that structure to the test set using the `lavaan` package, and compare these models together: ```{r message=FALSE, warning=FALSE} library(lavaan) library(performance) big5 <- lavaan::cfa(structure_big5, data = test) big6 <- lavaan::cfa(structure_big6, data = test) performance::compare_performance(big5, big6) ``` All in all, it seems that the big 5 structure remains quite reliable. # References parameters/inst/doc/model_parameters.R0000644000176200001440000001002213620044002017567 0ustar liggesusers## ----message=FALSE, warning=FALSE, include=FALSE------------------------------ library(knitr) options(knitr.kable.NA = '') options(digits = 2) knitr::opts_chunk$set(comment = ">") if (!requireNamespace("dplyr", quietly = TRUE) || !requireNamespace("BayesFactor", quietly = TRUE) || !requireNamespace("lme4", quietly = TRUE) || !requireNamespace("metafor", quietly = TRUE) || !requireNamespace("lavaan", quietly = TRUE) || !requireNamespace("brms", quietly = TRUE) || !requireNamespace("psych", quietly = TRUE) || !requireNamespace("rstanarm", quietly = TRUE) || !requireNamespace("FactoMineR", quietly = TRUE)) { knitr::opts_chunk$set(eval = FALSE) } else { library(parameters) library(dplyr) } set.seed(333) ## ---- warning=FALSE, message=FALSE-------------------------------------------- cor.test(iris$Sepal.Length, iris$Sepal.Width) %>% parameters() ## ---- warning=FALSE, message=FALSE-------------------------------------------- t.test(mpg ~ vs, data = mtcars) %>% parameters() ## ---- warning=FALSE, message=FALSE-------------------------------------------- library(BayesFactor) BayesFactor::correlationBF(iris$Sepal.Length, iris$Sepal.Width) %>% parameters() ## ---- warning=FALSE, message=FALSE-------------------------------------------- BayesFactor::ttestBF(formula = mpg ~ vs, data = mtcars) %>% parameters() ## ---- warning=FALSE, message=FALSE-------------------------------------------- aov(Sepal.Length ~ Species, data = iris) %>% parameters(omega_squared = "partial", eta_squared = "partial", epsilon_squared = TRUE) ## ---- warning=FALSE, message=FALSE-------------------------------------------- aov(mpg ~ am + Error(gear), data = mtcars) %>% parameters() ## ---- warning=FALSE, message=FALSE-------------------------------------------- glm(vs ~ poly(mpg, 2) + cyl, data = mtcars) %>% parameters() ## ---- warning=FALSE, message=FALSE-------------------------------------------- library(lme4) lmer(Sepal.Width ~ Petal.Length + (1|Species), data = iris) %>% parameters() ## ---- warning=FALSE, message=FALSE, eval = FALSE------------------------------ # library(rstanarm) # # stan_glm(mpg ~ wt * cyl, data = mtcars) %>% # parameters() ## ---- warning=FALSE, message=FALSE, echo = FALSE------------------------------ library(rstanarm) stan_glm(mpg ~ wt * cyl, data = mtcars, iter = 500, chains = 2, refresh = 0) %>% parameters() ## ---- warning=FALSE, message=FALSE-------------------------------------------- library(brms) data(fish) set.seed(123) model <- brm(bf( count ~ persons + child + camper + (1 | persons), zi ~ child + camper + (1 | persons) ), data = fish, family = zero_inflated_poisson(), iter = 500, chains = 1, refresh = 0 ) parameters(model, component = "conditional") parameters(model, effects = "all", component = "all") ## ---- warning=FALSE, message=FALSE-------------------------------------------- library(psych) psych::pca(mtcars, nfactors = 3) %>% parameters() ## ---- warning=FALSE, message=FALSE, eval = FALSE------------------------------ # library(FactoMineR) # # FactoMineR::FAMD(iris, ncp = 3) %>% # parameters() ## ---- warning=FALSE, message=FALSE, echo = FALSE------------------------------ library(FactoMineR) FactoMineR::FAMD(iris, ncp = 3, graph = FALSE) %>% parameters() ## ---- warning=FALSE, message=FALSE-------------------------------------------- library(lavaan) model <- lavaan::cfa(' visual =~ x1 + x2 + x3 textual =~ x4 + x5 + x6 speed =~ x7 + x8 + x9 ', data = HolzingerSwineford1939) model_parameters(model) ## ---- warning=FALSE, message=FALSE-------------------------------------------- library(metafor) mydat <- data.frame( effectsize = c(-0.393, 0.675, 0.282, -1.398), standarderror = c(0.317, 0.317, 0.13, 0.36) ) rma(yi = effectsize, sei = standarderror, method = "REML", data = mydat) %>% model_parameters() parameters/inst/doc/model_parameters_standardized.Rmd0000644000176200001440000001247413611655362022702 0ustar liggesusers--- title: "Standardized Model Parameters" output: github_document: toc: true fig_width: 10.08 fig_height: 6 rmarkdown::html_vignette: toc: true fig_width: 10.08 fig_height: 6 tags: [r, parameters, variable selection, feature selection] vignette: > %\VignetteIndexEntry{Standardized Model Parameters} \usepackage[utf8]{inputenc} %\VignetteEngine{knitr::rmarkdown} editor_options: chunk_output_type: console bibliography: bibliography.bib --- ```{r message=FALSE, warning=FALSE, include=FALSE} library(knitr) options(knitr.kable.NA = '') options(digits = 2) knitr::opts_chunk$set(comment = ">") if (!requireNamespace("dplyr", quietly = TRUE) || !requireNamespace("lme4", quietly = TRUE)) { knitr::opts_chunk$set(eval = FALSE) } else { library(parameters) library(dplyr) } set.seed(333) ``` The [`model_parameters()`](https://easystats.github.io/parameters/articles/model_parameters.html) function (also accessible via the shortcut `parameters()`) can be used to calculate standardized model parameters, too, via the `standardize`-argument. There are different methods of standardizing model parameters: `"refit"`, `"posthoc"`, `"smart"` and `"basic"` (see [`?effectsize::standardize_parameters`](https://easystats.github.io/effectsize/reference/standardize_parameters.html) for further details). ## Standardization by re-fitting the model `standardize = "refit"` is based on a complete model re-fit with a standardized version of data. Hence, this method is equal to standardizing the variables before fitting the model. It is the most accurate (Neter et al., 1989), but it is also the most computationally costly and long (especially for heavy models such as, for instance, for Bayesian models). This method is particularly recommended for complex models that include interactions or transformations (e.g., polynomial or spline terms). When `standardize = "refit"`, `model_parameters()` internally calls [`effectsize::standardize()`](https://easystats.github.io/effectsize/reference/standardize.html) to standardize the data that was used to fit the model and updates the model with the standardized data. Note that `effectsize::standardize()` tries to detect which variables should be standardized and which not. For instance, having a `log(x)` in the model formula would exclude `x` from being standardized, because `x` might get negative values, and thus `log(x)` would no longer be defined. Factors will also be not standardized. Response variables will be standardized, if appropriate. ```{r} library(lme4) data(iris) set.seed(1234) iris$grp <- as.factor(sample(1:3, nrow(iris), replace = TRUE)) # fit example model model <- lme4::lmer( Sepal.Length ~ Species * Sepal.Width + Petal.Length + (1 | grp), data = iris ) # classic model parameters model_parameters(model) # standardized model parameters model_parameters(model, standardize = "refit") ``` The second output is identical to following: ```{r} # standardize continuous variables manually model2 <- lme4::lmer( scale(Sepal.Length) ~ Species * scale(Sepal.Width) + scale(Petal.Length) + (1 | grp), data = iris ) model_parameters(model2) ``` ## Post-hoc standardization `standardize = "posthoc"` aims at emulating the results obtained by `"refit"` without refitting the model. The coefficients are divided by the standard deviation of the outcome (which becomes their expression 'unit'). Then, the coefficients related to numeric variables are additionally multiplied by the standard deviation of the related terms, so that they correspond to changes of 1 SD of the predictor (e.g., "a change in 1 SD of x is related to a change of 0.24 of the SD of y"). This does not apply to binary variables or factors, so the coefficients are still related to changes in levels. This method is not accurate and tends to give aberrant results when interactions are specified. However, this method of standardization is the "classic" result obtained by many statistical packages when standardized coefficients are requested. When `standardize = "posthoc"`, `model_parameters()` internally calls [`effectsize::standardize_parameters(method = "posthoc")`](https://easystats.github.io/effectsize/reference/standardize_parameters.html). Test statistic and p-values are not affected, i.e. they are the same as if no standardization would be applied. ```{r} model_parameters(model, standardize = "posthoc") ``` `standardize = "basic"` also applies post-hoc standardization, however, factors are converted to numeric, which means that it also scales the coefficient by the standard deviation of model's matrix' parameter of factor levels (transformed to integers) or binary predictors. ```{r} model_parameters(model, standardize = "basic") ``` ## Smart standardization `standardize = "smart"` is similar to `standardize = "posthoc"` in that it does not involve model re-fitting. The difference is that the SD of the response is computed on the relevant section of the data. For instance, if a factor with 3 levels A (the intercept), B and C is entered as a predictor, the effect corresponding to B vs. A will be scaled by the variance of the response at the intercept only. As a results, the coefficients for effects of factors are similar to a Glass' delta. ```{r} model_parameters(model, standardize = "smart") ``` parameters/inst/CITATION0000644000176200001440000000105613600122561014525 0ustar liggesusersbibentry( bibtype="Article", title="Describe and understand your model’s parameters", author=c(person("Dominique", "Makowski"), person("Mattan S.", "Ben-Shachar"), person("Daniel", "Lüdecke")), journal="CRAN", year="2019", note="R package", url="https://github.com/easystats/parameters", textVersion = paste("Makowski, Ben-Shachar & Lüdecke (2019). Describe and understand your model’s parameters. CRAN.", "Available from https://github.com/easystats/parameters." ), mheader = "To cite parameters in publications use:")parameters/inst/WORDLIST0000644000176200001440000000355213616322142014571 0ustar liggesusers’ Asparouhov al Alonso Ameijeiras analysing Analysing anova aov APA ateucher Bafumi BayesFactor Bentler Bezdek Biometrics Biometrika blavaan BMC boostrapped brglm brms canberra Carle Casal Catran Cattell Cattell's centred CFA cgam cglm ci clubSandwich clusterable clusterstructure cMDS CNG codecov complmrob computationaly condifence coxph cplm CrossValidated Crujeiras Davison de De derivating DirichletReg df diw DIW diwwpp DoF DoFs doi dp DRR easystats EFA effectsize Elff Epskamp et exponentiate Fairbrother FastICA Fidell fixest flexsurv flexsurvreg focussing gam gamm Garrido gaussianity Gelman Giesselmann github githuband GLMMadaptive glmmADMB glmmTMB glmx Golino Gorsuch HC HDI Heisig heteroskedasticity HEXACO Hinkley Hoffmann Hofman Hofmann Hornik http https hyperspectral ICA ifelse IJCNN interpretable iteratively Joanes joss jstatsoft Jurs Kenward kmeans kmo KMO Laparra lavaan Lawley leptokurtic lm lme lmer loadings Maechler Malo meaned meritourious Merkle mesokurtic metafor Minitab minkowski mixor modelled modelling Modelling MSA multicollinearity multinom Neter Nieto nlme nnet nubmer occured occurence Olkin PCoA perfoms performant Pettersson platykurtic PloS PLOS projpred pscl PSU Psychometrika Recode repec rescale rescaled Rescale rescaling Rescaling Revelle rlm Rocklin Rodríguez ROPE's Rosseel Rousseeuw Routledge rstanarm Sadana Satterthwaite Satterthwaite's SBC Schaeffer SEM SEs Shachar Shi Shikano sphericity Sphericity stackexchange stanreg statitics Struyf substracted subscales Tabachnick tailedness th theorethical Thiyagarajan Turkheimer untransformed Valls varimax Velicer VGAM Visualisation VSS Waggoner wald whith wikipedia Wisenbaker www Zoski