effectsize/ 0000755 0001762 0000144 00000000000 13613501553 012403 5 ustar ligges users effectsize/NAMESPACE 0000644 0001762 0000144 00000015374 13613235245 013636 0 ustar ligges users # Generated by roxygen2: do not edit by hand
S3method(change_scale,data.frame)
S3method(change_scale,factor)
S3method(change_scale,grouped_df)
S3method(change_scale,numeric)
S3method(cohens_f,anova)
S3method(cohens_f,aov)
S3method(cohens_f,aovlist)
S3method(cohens_f,glm)
S3method(cohens_f,lm)
S3method(epsilon_squared,anova)
S3method(epsilon_squared,aov)
S3method(epsilon_squared,aovlist)
S3method(epsilon_squared,glm)
S3method(epsilon_squared,lm)
S3method(epsilon_squared,merMod)
S3method(eta_squared,anova)
S3method(eta_squared,aov)
S3method(eta_squared,aovlist)
S3method(eta_squared,glm)
S3method(eta_squared,lm)
S3method(eta_squared,merMod)
S3method(interpret_parameters,lm)
S3method(normalize,data.frame)
S3method(normalize,factor)
S3method(normalize,grouped_df)
S3method(normalize,numeric)
S3method(odds_to_probs,data.frame)
S3method(odds_to_probs,double)
S3method(odds_to_probs,numeric)
S3method(omega_squared,anova)
S3method(omega_squared,aov)
S3method(omega_squared,aovlist)
S3method(omega_squared,glm)
S3method(omega_squared,lm)
S3method(omega_squared,merMod)
S3method(print,rules)
S3method(probs_to_odds,data.frame)
S3method(probs_to_odds,double)
S3method(probs_to_odds,numeric)
S3method(ranktransform,data.frame)
S3method(ranktransform,factor)
S3method(ranktransform,grouped_df)
S3method(ranktransform,numeric)
S3method(standardize,AsIs)
S3method(standardize,LORgee)
S3method(standardize,MixMod)
S3method(standardize,Surv)
S3method(standardize,betareg)
S3method(standardize,biglm)
S3method(standardize,bracl)
S3method(standardize,brmsfit)
S3method(standardize,brmultinom)
S3method(standardize,censReg)
S3method(standardize,cgam)
S3method(standardize,cglm)
S3method(standardize,character)
S3method(standardize,clm)
S3method(standardize,clm2)
S3method(standardize,complmrob)
S3method(standardize,coxme)
S3method(standardize,coxph)
S3method(standardize,cpglm)
S3method(standardize,cpglmm)
S3method(standardize,crch)
S3method(standardize,crq)
S3method(standardize,data.frame)
S3method(standardize,factor)
S3method(standardize,feis)
S3method(standardize,fixest)
S3method(standardize,flexsurvreg)
S3method(standardize,gee)
S3method(standardize,geeglm)
S3method(standardize,glmRob)
S3method(standardize,glmmTMB)
S3method(standardize,glmmadmb)
S3method(standardize,glmrob)
S3method(standardize,gls)
S3method(standardize,grouped_df)
S3method(standardize,hurdle)
S3method(standardize,iv_robust)
S3method(standardize,ivreg)
S3method(standardize,lm)
S3method(standardize,lmRob)
S3method(standardize,lm_robust)
S3method(standardize,lme)
S3method(standardize,lmrob)
S3method(standardize,logical)
S3method(standardize,logistf)
S3method(standardize,lrm)
S3method(standardize,merMod)
S3method(standardize,mixor)
S3method(standardize,mlm)
S3method(standardize,negbin)
S3method(standardize,nlrq)
S3method(standardize,numeric)
S3method(standardize,ols)
S3method(standardize,plm)
S3method(standardize,psm)
S3method(standardize,rms)
S3method(standardize,rq)
S3method(standardize,speedglm)
S3method(standardize,speedlm)
S3method(standardize,stanreg)
S3method(standardize,tobit)
S3method(standardize,truncreg)
S3method(standardize,vglm)
S3method(standardize,wbgee)
S3method(standardize,wbm)
S3method(standardize,zerocount)
S3method(standardize,zeroinfl)
export(F_to_d)
export(F_to_epsilon2)
export(F_to_eta2)
export(F_to_eta2_adj)
export(F_to_omega2)
export(F_to_r)
export(adjust)
export(change_scale)
export(chisq_to_cramers_v)
export(chisq_to_phi)
export(cohens_d)
export(cohens_f)
export(convert_F_to_d)
export(convert_F_to_r)
export(convert_chisq_to_cramers_v)
export(convert_chisq_to_phi)
export(convert_d_to_odds)
export(convert_d_to_percentage)
export(convert_d_to_r)
export(convert_odds_to_d)
export(convert_odds_to_probs)
export(convert_odds_to_r)
export(convert_percentage_to_d)
export(convert_percentile_to_z)
export(convert_phi_to_chisq)
export(convert_posteriors_to_r)
export(convert_posteriors_to_t)
export(convert_probs_to_odds)
export(convert_r_to_d)
export(convert_r_to_odds)
export(convert_r_to_t)
export(convert_r_to_z)
export(convert_t_to_d)
export(convert_t_to_r)
export(convert_z_to_d)
export(convert_z_to_percentile)
export(convert_z_to_r)
export(d_to_odds)
export(d_to_percentage)
export(d_to_r)
export(epsilon_squared)
export(eta_squared)
export(eta_squared_adj)
export(format_standardize)
export(glass_delta)
export(hedges_g)
export(interpret)
export(interpret_agfi)
export(interpret_bf)
export(interpret_cfi)
export(interpret_d)
export(interpret_delta)
export(interpret_direction)
export(interpret_ess)
export(interpret_g)
export(interpret_gfi)
export(interpret_ifi)
export(interpret_nfi)
export(interpret_nnfi)
export(interpret_odds)
export(interpret_omega_squared)
export(interpret_p)
export(interpret_parameters)
export(interpret_pnfi)
export(interpret_r)
export(interpret_r2)
export(interpret_rfi)
export(interpret_rhat)
export(interpret_rmsea)
export(interpret_rope)
export(interpret_srmr)
export(is.rules)
export(mad_pooled)
export(normalize)
export(odds_to_d)
export(odds_to_probs)
export(odds_to_r)
export(omega_squared)
export(percentage_to_d)
export(percentile_to_z)
export(phi_to_chisq)
export(posteriors_to_r)
export(posteriors_to_t)
export(probs_to_odds)
export(r_to_d)
export(r_to_odds)
export(r_to_t)
export(r_to_z)
export(ranktransform)
export(rules)
export(sd_pooled)
export(standardize)
export(standardize_info)
export(standardize_parameters)
export(standardize_posteriors)
export(t_to_d)
export(t_to_epsilon2)
export(t_to_eta2)
export(t_to_eta2_adj)
export(t_to_omega2)
export(t_to_r)
export(z_to_d)
export(z_to_percentile)
export(z_to_r)
importFrom(bayestestR,describe_posterior)
importFrom(insight,find_formula)
importFrom(insight,find_response)
importFrom(insight,find_terms)
importFrom(insight,find_weights)
importFrom(insight,format_value)
importFrom(insight,get_data)
importFrom(insight,get_parameters)
importFrom(insight,get_response)
importFrom(insight,model_info)
importFrom(parameters,ci)
importFrom(parameters,model_parameters)
importFrom(parameters,parameters_type)
importFrom(parameters,standard_error)
importFrom(stats,anova)
importFrom(stats,aov)
importFrom(stats,as.formula)
importFrom(stats,complete.cases)
importFrom(stats,cor)
importFrom(stats,lm)
importFrom(stats,mad)
importFrom(stats,median)
importFrom(stats,model.frame)
importFrom(stats,model.matrix)
importFrom(stats,na.omit)
importFrom(stats,pf)
importFrom(stats,pnorm)
importFrom(stats,predict)
importFrom(stats,qf)
importFrom(stats,qnorm)
importFrom(stats,quantile)
importFrom(stats,residuals)
importFrom(stats,sd)
importFrom(stats,update)
importFrom(stats,var)
importFrom(utils,capture.output)
importFrom(utils,head)
importFrom(utils,tail)
effectsize/README.md 0000644 0001762 0000144 00000015312 13550736634 013676 0 ustar ligges users
# effectsize
[](https://cran.r-project.org/package=effectsize)
[](https://cran.r-project.org/package=effectsize)
[](https://travis-ci.org/easystats/effectsize)
[](https://codecov.io/gh/easystats/effectsize)
***Size does matter***
The goal of this package is to provide utilities to work with indices of
effect size and standardized parameters, allowing computation and
conversion of indices such as Cohen’s *d*, *r*, odds-ratios, etc.
## Installation
Run the following:
``` r
install.packages("devtools")
devtools::install_github("easystats/effectsize")
```
``` r
library("effectsize")
```
## Documentation
[](https://easystats.github.io/effectsize/)
[](https://easystats.github.io/blog/posts/)
[](https://easystats.github.io/effectsize/reference/index.html)
Click on the buttons above to access the package
[**documentation**](https://easystats.github.io/effectsize/) and the
[**easystats blog**](https://easystats.github.io/blog/posts/), and
check-out these vignettes:
- [**Data
Standardization**](https://easystats.github.io/effectsize/articles/standardize_data.html)
- [**Parameters
Standardization**](https://easystats.github.io/effectsize/articles/standardize_parameters.html)
- [**Automated Interpretation of Indices of Effect
Size**](https://easystats.github.io/effectsize/articles/interpret.html)
- [**Effect size
conversion**](https://easystats.github.io/effectsize/articles/convert.html)
# Features
This package is focused on indices of effect size. But **there are
hundreds of them\! Thus, *everybody* is welcome to contribute** by
adding support for the interpretation of new indices. If you’re not sure
how to code it it’s okay, just open an issue to discuss it and we’ll
help :)
## Effect Size Computation
### Basic Indices (Cohen’s *d*, Hedges’ *g*, Glass’ *delta*)
The package provides functions to compute indices of effect size.
``` r
cohens_d(iris$Sepal.Length, iris$Sepal.Width)
## [1] -4.21
hedges_g(iris$Sepal.Length, iris$Sepal.Width)
## [1] -4.2
glass_delta(iris$Sepal.Length, iris$Sepal.Width)
## [1] -3.36
```
### ANOVAs (Eta2, Omega2, …)
``` r
model <- aov(Sepal.Length ~ Species, data = iris)
omega_squared(model, partial = TRUE)
## Parameter Omega_Sq_partial
## 1 Species 0.612
## 2 Residuals NA
eta_squared(model, partial = TRUE)
## Parameter Eta_Sq_partial
## 1 Species 0.619
## 2 Residuals NA
epsilon_squared(model)
## Parameter Epsilon_sq
## 1 Species 0.614
## 2 Residuals NA
cohens_f(model)
## Parameter Cohens_f
## 1 Species 1.27
## 2 Residuals NA
```
### Regression Models
Importantly, `effectsize` also provides [advanced
methods](https://easystats.github.io/effectsize/articles/standardize_parameters.html)
to compute standardized parameters for regression models.
``` r
lm(Sepal.Length ~ Species + Sepal.Length, data = iris) %>%
standardize_parameters()
```
| Parameter | Std\_Coefficient |
| :---------------- | ---------------: |
| (Intercept) | \-1.01 |
| Speciesversicolor | 1.12 |
| Speciesvirginica | 1.91 |
## Effect Size Interpretation
The package allows for an automated interpretation of different indices.
``` r
interpret_r(r = 0.3)
## [1] "large"
```
Different sets of “rules of thumb” are implemented ([**guidelines are
detailed
here**](https://easystats.github.io/effectsize/articles/interpret.html))
and can be easily changed.
``` r
interpret_d(d = 0.45, rules = "cohen1988")
## [1] "small"
interpret_d(d = 0.45, rules = "funder2019")
## [1] "medium"
```
## Effect Size Conversion
The package also provides ways of converting between different effect
sizes.
``` r
convert_d_to_r(d = 1)
## [1] 0.447
```
## Standardization
Many indices of effect size stem out, or are related, to
[*standardization*](https://easystats.github.io/effectsize/articles/standardize_parameters.html).
Thus, it is expected that `effectsize` provides functions to standardize
data and models.
### Data standardization, normalization and rank-transformation
A standardization sets the mean and SD to 0 and 1:
``` r
library(parameters)
df <- standardize(iris)
describe_distribution(df$Sepal.Length)
```
| Mean | SD | Min | Max | Skewness | Kurtosis | n | n\_Missing |
| ---: | -: | ----: | --: | -------: | -------: | --: | ---------: |
| 0 | 1 | \-1.9 | 2.5 | 0.3 | \-0.6 | 150 | 0 |
This can be also applied to statistical models:
``` r
std_model <- standardize(lm(Sepal.Length ~ Species, data = iris))
coef(std_model)
## (Intercept) Speciesversicolor Speciesvirginica
## -1.01 1.12 1.91
```
Alternatively, normalization is similar to standardization in that it is
a linear translation of the parameter space (i.e., it does not change
the shape of the data distribution). However, it puts the values within
a 0 - 1 range, which can be useful in cases where you want to compare or
visualise data on the same scale.
``` r
df <- normalize(iris)
describe_distribution(df$Sepal.Length)
```
| Mean | SD | Min | Max | Skewness | Kurtosis | n | n\_Missing |
| ---: | --: | --: | --: | -------: | -------: | --: | ---------: |
| 0.4 | 0.2 | 0 | 1 | 0.3 | \-0.6 | 150 | 0 |
This is a special case of a rescaling function, which can be used to
rescale the data to an arbitrary new scale. Let’s change all numeric
variables to “percentages”:
``` r
df <- change_scale(iris, to = c(0, 100))
describe_distribution(df$Sepal.Length)
```
| Mean | SD | Min | Max | Skewness | Kurtosis | n | n\_Missing |
| ---: | -: | --: | --: | -------: | -------: | --: | ---------: |
| 42.9 | 23 | 0 | 100 | 0.3 | \-0.6 | 150 | 0 |
For some robust statistics, one might also want to transfom the numeric
values into *ranks* (or signed-ranks), which can be performed using the
`ranktransform()` function.
``` r
ranktransform(c(1, 3, -2, 6, 6, 0))
## [1] 3.0 4.0 1.0 5.5 5.5 2.0
```
effectsize/man/ 0000755 0001762 0000144 00000000000 13613235007 013154 5 ustar ligges users effectsize/man/t_to_r.Rd 0000644 0001762 0000144 00000007677 13613235245 014756 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/convert_tFz_to_d.R, R/convert_tFz_to_r.R
\name{t_to_d}
\alias{t_to_d}
\alias{convert_t_to_d}
\alias{z_to_d}
\alias{convert_z_to_d}
\alias{F_to_d}
\alias{convert_F_to_d}
\alias{t_to_r}
\alias{r_to_t}
\alias{z_to_r}
\alias{r_to_z}
\alias{F_to_r}
\alias{convert_t_to_r}
\alias{convert_r_to_t}
\alias{convert_z_to_r}
\alias{convert_r_to_z}
\alias{convert_F_to_r}
\title{Convert test statistics (t, z, F) to effect sizes of differences (Cohen's d) or association (\strong{partial} r)}
\usage{
t_to_d(t, df_error, pooled = FALSE, ...)
convert_t_to_d(t, df_error, pooled = FALSE, ...)
z_to_d(z, n, ...)
convert_z_to_d(z, n, ...)
F_to_d(f, df, df_error, pooled = FALSE, ...)
convert_F_to_d(f, df, df_error, pooled = FALSE, ...)
t_to_r(t, n = NULL, df_error = NULL, ...)
r_to_t(r, n = NULL, df_error = NULL, ...)
z_to_r(z, n = NULL, df_error = NULL, ...)
r_to_z(r, n = NULL, df_error = NULL, ...)
F_to_r(f, df, df_error = NULL, n = NULL, ...)
convert_t_to_r(t, n = NULL, df_error = NULL, ...)
convert_r_to_t(r, n = NULL, df_error = NULL, ...)
convert_z_to_r(z, n = NULL, df_error = NULL, ...)
convert_r_to_z(r, n = NULL, df_error = NULL, ...)
convert_F_to_r(f, df, df_error = NULL, n = NULL, ...)
}
\arguments{
\item{t, f, z}{The t, the F or the z statistics.}
\item{pooled}{Should the estimate accout for the t-value being based on a repeated-measures design, or not (default).}
\item{...}{Arguments passed to or from other methods.}
\item{n}{The number of observations (the sample size).}
\item{df, df_error}{Degrees of freedom of numerator or of the error estimate (i.e., the residuals).}
\item{r}{The correlation coefficient r.}
}
\value{
A numeric value of the requested effect size.
}
\description{
These functions are convenience functions to convert t, z and F test statistics to Cohen's d and
\strong{partial} r. These are useful in cases where the data required to compute these are not easily
available or their computation is not straightforward (e.g., in liner mixed models, contrasts, etc.).
}
\details{
These functions use the following formulae:
\cr\cr
\deqn{r_{partial} = t / \sqrt{t^2 + df_{error}}}
\cr\cr
\deqn{r_{partial} = z / \sqrt{z^2 + N}}
\cr\cr
\deqn{Cohen's d = 2 * t / \sqrt{df_{error}}}
\cr\cr
\deqn{Cohen's d_z = t / \sqrt{df_{error}}}
\cr\cr
\deqn{Cohen's d = 2 * z / \sqrt{N}}
}
\examples{
## t Tests
res <- t.test(1:10, y = c(7:20), var.equal = TRUE)
t_to_d(t = res$statistic, res$parameter)
t_to_r(t = res$statistic, res$parameter)
res <- with(sleep, t.test(extra[group == 1], extra[group == 2], paired = TRUE))
t_to_d(t = res$statistic, res$parameter, pooled = TRUE)
t_to_r(t = res$statistic, res$parameter)
res <- cor.test(iris$Sepal.Width, iris$Petal.Width)
t_to_r(t = res$statistic, n = 150)
\donttest{
## Linear Regression
model <- lm(Sepal.Length ~ Sepal.Width + Petal.Length, data = iris)
library(parameters)
(param_tab <- parameters(model))
# > Parameter | Coefficient | SE | 95\% CI | t | df | p
# > -----------------------------------------------------------------------
# > (Intercept) | 2.25 | 0.25 | [1.76, 2.74] | 9.07 | 147 | < .001
# > Sepal.Width | 0.60 | 0.07 | [0.46, 0.73] | 8.59 | 147 | < .001
# > Petal.Length | 0.47 | 0.02 | [0.44, 0.51] | 27.57 | 147 | < .001
t_to_r(param_tab$t[2:3], param_tab$df_error[2:3])
# > [1] 0.5781005 0.9153894
}
# How does this compare to actual partial correlations?
if (require("ppcor")) {
pcor(iris[1:3])$estimate[1, -1]
}
}
\references{
\itemize{
\item Friedman, H. (1982). Simplified determinations of statistical power, magnitude of effect and research sample sizes. Educational and Psychological Measurement, 42(2), 521-526. \doi{10.1177/001316448204200214}
\item Wolf, F. M. (1986). Meta-analysis: Quantitative methods for research synthesis (Vol. 59). Sage.
\item Rosenthal, R. (1991). Meta-analytic procedures for social research. Newbury Park, CA: SAGE Publications, Incorporated.
}
}
effectsize/man/standardize.Rd 0000644 0001762 0000144 00000005563 13602561300 015760 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/standardize.R, R/standardize.data.frame.R,
% R/standardize.models.R
\name{standardize}
\alias{standardize}
\alias{standardize.data.frame}
\alias{standardize.lm}
\title{Standardization (Z-scoring)}
\usage{
standardize(x, ...)
\method{standardize}{data.frame}(
x,
robust = FALSE,
two_sd = FALSE,
select = NULL,
exclude = NULL,
verbose = TRUE,
force = FALSE,
...
)
\method{standardize}{lm}(
x,
robust = FALSE,
two_sd = FALSE,
include_response = TRUE,
verbose = TRUE,
...
)
}
\arguments{
\item{x}{A dataframe, a vector or a statistical model.}
\item{...}{Arguments passed to or from other methods.}
\item{robust}{Logical, if \code{TRUE}, centering is done by substracting the
median from the variables and dividing it by the median absolute deviation
(MAD). If \code{FALSE}, variables are standardized by substracting the
mean and dividing it by the standard deviation (SD).}
\item{two_sd}{If \code{TRUE}, the variables are scaled by two times the deviation (SD or MAD depending on \code{robust}). This method can be useful to obtain model coefficients of continuous parameters comparable to coefficients related to binary predictors (Gelman, 2008).}
\item{select}{Character vector of column names. If \code{NULL} (the default), all variables will be selected.}
\item{exclude}{Character vector of column names to be excluded from selection.}
\item{verbose}{Toggle warnings on or off.}
\item{force}{Logical, if \code{TRUE}, forces standardization of factors as
well. Factors are converted to numerical values, with the lowest level
being the value \code{1} (unless the factor has numeric levels, which are
converted to the corresponding numeric value).}
\item{include_response}{For a model, if \code{TRUE} (default), the response value
will also be standardized. If \code{FALSE}, only the predictors will be standardized.
Note that for certain models (logistic regression, count models, ...), the
response value will never be standardized, to make re-fitting the model work.}
}
\value{
The standardized object (either a standardize dataframe or a statistical model fitted on standardized data).
}
\description{
Performs a standardization of data (Z-scoring), i.e., centering and scaling, so that the data is expressed in terms of standard deviation (i.e., mean = 0, SD = 1) or Median Absolute Deviance (median = 0, MAD = 1). When applied to a statistical model, this function extracts the dataset, standardizes it, and refits the model with this standardized version of the dataset. The \code{\link{normalize}} function can also be used to scale all numeric variables within the 0 - 1 range.
}
\examples{
# Dataframes
summary(standardize(iris))
# Models
model <- lm(Sepal.Length ~ Species * Petal.Width, data = iris)
coef(standardize(model))
}
\seealso{
\code{\link{normalize}} \code{\link{standardize_parameters}}
}
effectsize/man/interpret_ess.Rd 0000644 0001762 0000144 00000004011 13571424147 016335 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/interpret_bayesian_indices.R
\name{interpret_ess}
\alias{interpret_ess}
\alias{interpret_rhat}
\alias{interpret_rope}
\title{Bayesian indices interpretation}
\usage{
interpret_ess(ess, rules = "burkner2017")
interpret_rhat(rhat, rules = "vehtari2019")
interpret_rope(rope, ci = 0.9, rules = "default")
}
\arguments{
\item{ess}{Value or vector of Effective Sample Size (ESS) values.}
\item{rules}{A character string (see details) or a custom set of \code{\link{rules}}.}
\item{rhat}{Value or vector of Rhat values.}
\item{rope}{Value or vector of percentages in ROPE.}
\item{ci}{The Credible Interval (CI) probability, corresponding to the proportion of HDI, that was used. Can be \code{1} in the case of "full ROPE".}
}
\description{
Interpretation of Bayesian indices, such as Effective Sample Size (ESS), Rhat, or percentage in ROPE.
}
\details{
\subsection{Rules sets:}{
\itemize{
\item \strong{ESS}: Can be "burkner2017" (default).
\item \strong{Rhat}: Can be "vehtari2019" (default) or "gelman1992".
\item \strong{ROPE}: Can be \href{https://easystats.github.io/bayestestR/articles/guidelines.html}{"default"}.
\item \strong{ESS}:
}}
}
\examples{
interpret_ess(1001)
interpret_ess(c(852, 1200))
interpret_rhat(1.00)
interpret_rhat(c(1.5, 0.9))
interpret_rope(0, ci = 0.9)
interpret_rope(c(0.005, 0.99), ci = 1)
}
\references{
\itemize{
\item Bürkner, P. C. (2017). brms: An R package for Bayesian multilevel models using Stan. Journal of Statistical Software, 80(1), 1-28.
\item Gelman, A., & Rubin, D. B. (1992). Inference from iterative simulation using multiple sequences. Statistical science, 7(4), 457-472.
\item Vehtari, A., Gelman, A., Simpson, D., Carpenter, B., & Bürkner, P. C. (2019). Rank-normalization, folding, and localization: An improved Rhat for assessing convergence of MCMC. arXiv preprint arXiv:1903.08008.
\item \href{https://easystats.github.io/bayestestR/articles/guidelines.html}{BayestestR's reporting guidelines}
}
}
effectsize/man/interpret_gfi.Rd 0000644 0001762 0000144 00000011103 13571424147 016310 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/interpret_fit.R
\name{interpret_gfi}
\alias{interpret_gfi}
\alias{interpret_agfi}
\alias{interpret_nfi}
\alias{interpret_nnfi}
\alias{interpret_cfi}
\alias{interpret_rmsea}
\alias{interpret_srmr}
\alias{interpret_rfi}
\alias{interpret_ifi}
\alias{interpret_pnfi}
\title{Interpretation of indices of fit}
\usage{
interpret_gfi(x, rules = "default")
interpret_agfi(x, rules = "default")
interpret_nfi(x, rules = "byrne1994")
interpret_nnfi(x, rules = "byrne1994")
interpret_cfi(x, rules = "default")
interpret_rmsea(x, rules = "default")
interpret_srmr(x, rules = "default")
interpret_rfi(x, rules = "default")
interpret_ifi(x, rules = "default")
interpret_pnfi(x, rules = "default")
}
\arguments{
\item{x}{vector of values.}
\item{rules}{Can be "default" or custom set of rules.}
}
\description{
Interpretation of indices of fit found in confirmatory analysis or structural equation modelling, such as RMSEA, CFI, NFI, IFI, etc.
}
\details{
\subsection{Indices of fit}{
\itemize{
\item \strong{Chisq}: The model Chi-squared assesses overall fit and the discrepancy between the sample and fitted covariance matrices. Its p-value should be > .05 (i.e., the hypothesis of a perfect fit cannot be rejected). However, it is quite sensitive to sample size.
\item \strong{GFI/AGFI}: The (Adjusted) Goodness of Fit is the proportion of variance accounted for by the estimated population covariance. Analogous to R2. The GFI and the AGFI should be > .95 and > .90, respectively.
\item \strong{NFI/NNFI/TLI}: The (Non) Normed Fit Index. An NFI of 0.95, indicates the model of interest improves the fit by 95\% relative to the null model. The NNFI (also called the Tucker Lewis index; TLI) is preferable for smaller samples. They should be > .90 (Byrne, 1994) or > .95 (Schumacker & Lomax, 2004).
\item \strong{CFI}: The Comparative Fit Index is a revised form of NFI. Not very sensitive to sample size (Fan, Thompson, & Wang, 1999). Compares the fit of a target model to the fit of an independent, or null, model. It should be > .90.
\item \strong{RMSEA}: The Root Mean Square Error of Approximation is a parsimony-adjusted index. Values closer to 0 represent a good fit. It should be < .08 or < .05. The p-value printed with it tests the hypothesis that RMSEA is less than or equal to .05 (a cutoff sometimes used for good fit), and thus should be not significant.
\item \strong{RMR/SRMR}: the (Standardized) Root Mean Square Residual represents the square-root of the difference between the residuals of the sample covariance matrix and the hypothesized model. As the RMR can be sometimes hard to interpret, better to use SRMR. Should be < .08.
\item \strong{RFI}: the Relative Fit Index, also known as RHO1, is not guaranteed to vary from 0 to 1. However, RFI close to 1 indicates a good fit.
\item \strong{IFI}: the Incremental Fit Index (IFI) adjusts the Normed Fit Index (NFI) for sample size and degrees of freedom (Bollen's, 1989). Over 0.90 is a good fit, but the index can exceed 1.
\item \strong{PNFI}: the Parsimony-Adjusted Measures Index. There is no commonly agreed-upon cutoff value for an acceptable model for this index. Should be > 0.50.
}
See the documentation for \code{lavaan::fitmeasures}.
}
\subsection{What to report}{
For structural equation models (SEM), Kline (2015) suggests that at a minimum the following indices should be reported: The model \strong{chi-square}, the \strong{RMSEA}, the \strong{CFI} and the \strong{SRMR}.
}
}
\examples{
interpret_gfi(c(.5, .99))
interpret_agfi(c(.5, .99))
interpret_nfi(c(.5, .99))
interpret_nnfi(c(.5, .99))
interpret_cfi(c(.5, .99))
interpret_rmsea(c(.5, .99))
interpret_srmr(c(.5, .99))
interpret_rfi(c(.5, .99))
interpret_ifi(c(.5, .99))
interpret_pnfi(c(.5, .99))
}
\references{
\itemize{
\item Awang, Z. (2012). A handbook on SEM. Structural equation modeling.
\item Byrne, B. M. (1994). Structural equation modeling with EQS and EQS/Windows. Thousand Oaks, CA: Sage Publications.
\item Tucker, L. R., \& Lewis, C. (1973). The reliability coefficient for maximum likelihood factor analysis. Psychometrika, 38, 1-10.
\item Schumacker, R. E., \& Lomax, R. G. (2004). A beginner's guide to structural equation modeling, Second edition. Mahwah, NJ: Lawrence Erlbaum Associates.
\item Fan, X., B. Thompson, \& L. Wang (1999). Effects of sample size, estimation method, and model specification on structural equation modeling fit indexes. Structural Equation Modeling, 6, 56-83.
\item Kline, R. B. (2015). Principles and practice of structural equation modeling. Guilford publications.
}
}
effectsize/man/adjust.Rd 0000644 0001762 0000144 00000004452 13571424147 014752 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/adjust.R
\name{adjust}
\alias{adjust}
\title{Adjust data for the effect of other variable(s)}
\usage{
adjust(
data,
effect = NULL,
select = NULL,
exclude = NULL,
multilevel = FALSE,
additive = FALSE,
bayesian = FALSE
)
}
\arguments{
\item{data}{A dataframe.}
\item{effect}{Character vector of column names to be adjusted for (regressed out). If \code{NULL} (the default), all variables will be selected.}
\item{select}{Character vector of column names. If \code{NULL} (the default), all variables will be selected.}
\item{exclude}{Character vector of column names to be excluded from selection.}
\item{multilevel}{If \code{TRUE}, the factors are included as random factors. Else, if \code{FALSE} (default), they are included as fixed effects in the simple regression model.}
\item{additive}{If \code{TRUE}, continuous variables as included as smooth terms in additive models. The goal is to regress-out potential non-linear effects.}
\item{bayesian}{If \code{TRUE}, the models are fitted under the Bayesian framework using \code{rstanarm}.}
}
\description{
This function can be used to adjust the data for the effect of other variables present in the dataset. It is based on an underlying fitting of regressions models, allowing for quite some flexibility, such as including factors as random effects in mixed models (multilevel partialization), continuous variables as smooth terms in general additive models (non-linear partialization) and/or fitting these models under a Bayesian framework. The values returned by this function are the residuals of the regression models. Note that a regular correlation between two "adjusted" variables is equivalent to the partial correlation between them.
}
\examples{
adjust(iris, effect = "Species", select = "Sepal.Length")
\donttest{
adjust(iris, effect = "Species", select = "Sepal.Length", multilevel = TRUE)
adjust(iris, effect = "Species", select = "Sepal.Length", bayesian = TRUE)
adjust(iris, effect = "Petal.Width", select = "Sepal.Length", additive = TRUE)
adjust(iris, effect = "Petal.Width", select = "Sepal.Length",
additive = TRUE, bayesian = TRUE)
adjust(iris, effect = c("Petal.Width", "Species"), select = "Sepal.Length",
multilevel = TRUE, additive = TRUE)
adjust(iris)
}
}
effectsize/man/cohens_d.Rd 0000644 0001762 0000144 00000004112 13571424147 015233 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cohens_d.R
\name{cohens_d}
\alias{cohens_d}
\alias{hedges_g}
\alias{glass_delta}
\title{Effect size for differences}
\usage{
cohens_d(
x,
y = NULL,
data = NULL,
correction = FALSE,
pooled_sd = TRUE,
paired = FALSE
)
hedges_g(
x,
y = NULL,
data = NULL,
correction = FALSE,
pooled_sd = TRUE,
paired = FALSE
)
glass_delta(x, y = NULL, data = NULL, correction = FALSE)
}
\arguments{
\item{x}{A continuous variable or a formula.}
\item{y}{A continuous variable, a factor with two groups or a formula.}
\item{data}{An optional data frame containing the variables.}
\item{correction}{If \code{TRUE}, applies a correction to the formula to make it less biased for small samples (McGrath & Meyer, 2006).}
\item{pooled_sd}{If \code{FALSE}, the regular SD from both combined groups is used instead of the \code{\link{sd_pooled}}.}
\item{paired}{If \code{TRUE}, the values of \code{x} and \code{y} are considered as paired.}
}
\description{
Compute different indices of effect size. For very small sample sizes (n < 20) Hedges' g is considered as less biased than Cohen's d. For sample sizes > 20, the results for both statistics are roughly equivalent. The Glass’s delta is appropriate if standard deviations are significantly different between groups, as it uses only the control group's (\code{x}) standard deviation.
}
\examples{
cohens_d(iris$Sepal.Length, iris$Sepal.Width)
hedges_g("Sepal.Length", "Sepal.Width", data = iris)
glass_delta(Sepal.Length ~ Sepal.Width, data = iris)
cohens_d(iris$Sepal.Length, iris$Sepal.Width, correct = TRUE, pooled_sd = FALSE)
cohens_d(Sepal.Length ~ Species, data = iris[iris$Species \%in\% c("versicolor", "setosa"), ])
}
\references{
\itemize{
\item Cohen, J. (2013). Statistical power analysis for the behavioral sciences. Routledge.
\item McGrath, R. E., & Meyer, G. J. (2006). When effect sizes disagree: the case of r and d. Psychological methods, 11(4), 386.
\item Hedges, L. V. & Olkin, I. (1985). Statistical methods for meta-analysis. Orlando, FL: Academic Press.
}
}
effectsize/man/convert_z_to_percentile.Rd 0000644 0001762 0000144 00000001241 13571424147 020376 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/convert_percentile_to_z.R
\name{convert_z_to_percentile}
\alias{convert_z_to_percentile}
\alias{convert_percentile_to_z}
\alias{z_to_percentile}
\alias{percentile_to_z}
\title{Z score to Percentile}
\usage{
convert_z_to_percentile(z)
convert_percentile_to_z(percentile)
z_to_percentile(z)
percentile_to_z(percentile)
}
\arguments{
\item{z, percentile}{Z score or percentile.}
}
\description{
Convert between Z scores (values expressed in terms of standard deviation) to percentiles (the proportion of a normal distribution below).
}
\examples{
z_to_percentile(1.96)
percentile_to_z(0.975)
}
effectsize/man/interpret_d.Rd 0000644 0001762 0000144 00000002473 13571424147 016000 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/interpret_d.R
\name{interpret_d}
\alias{interpret_d}
\alias{interpret_g}
\alias{interpret_delta}
\title{Standardized difference interpretation}
\usage{
interpret_d(d, rules = "funder2019")
interpret_g(g, rules = "funder2019")
interpret_delta(delta, rules = "funder2019")
}
\arguments{
\item{d, g, delta}{Value or vector of effect size values.}
\item{rules}{Can be "funder2019" (default), "gignac2016", "cohen1988", "sawilowsky2009" or custom set of \code{\link{rules}}.}
}
\description{
Interpretation of indices using different sets of rules of thumb.
\href{https://easystats.github.io/report/articles/interpret_metrics.html#standardized-difference-d-cohens-d}{Click here} for details.
}
\examples{
interpret_d(.02)
interpret_d(c(.5, .02))
}
\references{
\itemize{
\item Funder, D. C., & Ozer, D. J. (2019). Evaluating effect size in psychological research: sense and nonsense. Advances in Methods and Practices in Psychological Science.
\item Gignac, G. E., & Szodorai, E. T. (2016). Effect size guidelines for individual differences researchers. Personality and individual differences, 102, 74-78.
\item Cohen, J. (1988). Statistical power analysis for the behavioural sciences.
\item Sawilowsky, S. S. (2009). New effect size rules of thumb.
}
}
effectsize/man/chisq_to_phi.Rd 0000644 0001762 0000144 00000003075 13571424147 016131 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/convert_chisq.R
\name{chisq_to_phi}
\alias{chisq_to_phi}
\alias{convert_chisq_to_phi}
\alias{phi_to_chisq}
\alias{convert_phi_to_chisq}
\alias{chisq_to_cramers_v}
\alias{convert_chisq_to_cramers_v}
\title{Conversion between Effect sizes for Contingency Tables (Chi2, Phi, Cramer's V...)}
\usage{
chisq_to_phi(chisq, n, ...)
convert_chisq_to_phi(chisq, n, ...)
phi_to_chisq(phi, n, ...)
convert_phi_to_chisq(phi, n, ...)
chisq_to_cramers_v(chisq, n, nrow, ncol, ...)
convert_chisq_to_cramers_v(chisq, n, nrow, ncol, ...)
}
\arguments{
\item{chisq}{The Chi2 statistic.}
\item{n}{Sample size.}
\item{...}{Arguments passed to or from other methods.}
\item{phi}{The Phi statistic.}
\item{nrow}{The number of rows in the contingency table.}
\item{ncol}{The number of columns in the contingency tables.}
}
\value{
A numeric value between 0-1.
}
\description{
Convert between Chi square, (\eqn{chi^2}), phi (\eqn{\phi}) and Cramer's V.
}
\details{
These functions use the following formulae:
\cr\cr
\deqn{Cramer's V = \sqrt{\chi^2 / (n * (min(nrow,ncol)-1))}}
\cr\cr
\deqn{\phi = \sqrt{\chi^2 / n}}
}
\examples{
contingency_table <- as.table(rbind(c(762, 327, 468), c(484, 239, 477), c(484, 239, 477)))
chisq.test(contingency_table)
#
# Pearson's Chi-squared test
#
# data: ctab
# X-squared = 41.234, df = 4, p-value = 2.405e-08
chisq_to_phi(41.234, n = sum(contingency_table))
chisq_to_cramers_v(41.234,
n = sum(contingency_table),
nrow = nrow(contingency_table),
ncol = ncol(contingency_table)
)
}
effectsize/man/format_standardize.Rd 0000644 0001762 0000144 00000002102 13602561300 017312 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/format_standardize.R
\name{format_standardize}
\alias{format_standardize}
\title{Transform a standardized vector into character}
\usage{
format_standardize(x, reference = x, robust = FALSE, digits = NULL, ...)
}
\arguments{
\item{x}{A standardized numeric vector.}
\item{reference}{The reference vector from which to compute the mean and SD.}
\item{robust}{Logical, if \code{TRUE}, centering is done by substracting the
median from the variables and dividing it by the median absolute deviation
(MAD). If \code{FALSE}, variables are standardized by substracting the
mean and dividing it by the standard deviation (SD).}
\item{digits}{Number of significant digits.}
\item{...}{Arguments passed to or from other methods.}
}
\description{
Transform a standardized vector into character, e.g., \code{c("-1 SD", "Mean", "+1 SD")}.
}
\examples{
format_standardize(c(-1, 0, 1))
format_standardize(c(-1, 0, 1, 2), reference = rnorm(1000))
format_standardize(c(-1, 0, 1, 2), reference = rnorm(1000), robust = TRUE)
}
effectsize/man/F_to_eta2.Rd 0000644 0001762 0000144 00000007175 13613235245 015263 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/convert_tF_to_pve.R
\name{F_to_eta2}
\alias{F_to_eta2}
\alias{t_to_eta2}
\alias{F_to_epsilon2}
\alias{t_to_epsilon2}
\alias{F_to_eta2_adj}
\alias{t_to_eta2_adj}
\alias{F_to_omega2}
\alias{t_to_omega2}
\title{Convert test statistics (F, t) to indices of \strong{partial} variance explained (\strong{partial} Eta / Omega / Epsilon squared)}
\usage{
F_to_eta2(f, df, df_error, ...)
t_to_eta2(t, df_error, ...)
F_to_epsilon2(f, df, df_error, ...)
t_to_epsilon2(t, df_error, ...)
F_to_eta2_adj(f, df, df_error, ...)
t_to_eta2_adj(t, df_error, ...)
F_to_omega2(f, df, df_error, ...)
t_to_omega2(t, df_error, ...)
}
\arguments{
\item{df, df_error}{Degrees of freedom of numerator or of the error estimate (i.e., the residuals).}
\item{...}{Arguments passed to or from other methods.}
\item{t, f}{The t or the F statistics.}
}
\value{
A numeric value between 0-1 (Note that for \eqn{\omega_p^2} and \eqn{\epsilon_p^2}
it is possible to compute a negative number; even though this doesn't make any practical sense,
it is recommended to report the negative number and not a 0).
}
\description{
These functions are convenience functions to convert F and t test statistics to \strong{partial} Eta squared, (\eqn{\eta{_p}^2}), Omega squared (\eqn{\omega{_p}^2}) and Epsilon squared (\eqn{\epsilon{_p}^2}; an alias for the adjusted Eta squared). These are useful in cases where the various Sum of Squares and Mean Squares are not easily available or their computation is not straightforward (e.g., in liner mixed models, contrasts, etc.). For test statistics derived from \code{lm} and \code{aov} models, these functions give exact results. For all other cases, they return close approximations.
}
\details{
These functions use the following formulae:
\cr\cr
\deqn{\eta_p^2 = \frac{F \times df_{num}}{F \times df_{num} + df_{den}}}
\cr\cr
\deqn{\epsilon_p^2 = \frac{(F - 1) \times df_{num}}{F \times df_{num} + df_{den}}}
\cr\cr
\deqn{\omega_p^2 = \frac{(F - 1) \times df_{num}}{F \times df_{num} + df_{den} + 1}}
\cr\cr\cr
For \eqn{t}, the conversion is based on the equality of \eqn{t^2 = F} when \eqn{df_{num}=1}.
}
\note{
\eqn{Adj. \eta_p^2} is an alias for \eqn{\epsilon_p^2}.
}
\examples{
\donttest{
if (require("afex")) {
data(md_12.1)
aov_ez("id", "rt", md_12.1,
within = c("angle", "noise"),
anova_table = list(correction = "none", es = "pes")
)
}
# compare to:
F_to_eta2(40.72, 2, 18)
F_to_eta2(33.77, 1, 9)
F_to_eta2(45.31, 2, 18)
if (require("lmerTest")) { # for the df_error
fit <- lmer(extra ~ group + (1 | ID), sleep)
anova(fit)
# Type III Analysis of Variance Table with Satterthwaite's method
# Sum Sq Mean Sq NumDF DenDF F value Pr(>F)
# group 12.482 12.482 1 9 16.501 0.002833 **
# ---
# Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
F_to_eta2(16.501, 1, 9)
F_to_omega2(16.501, 1, 9)
F_to_epsilon2(16.501, 1, 9)
}
}
}
\references{
\itemize{
\item Friedman, H. (1982). Simplified determinations of statistical power, magnitude of effect and research sample sizes. Educational and Psychological Measurement, 42(2), 521-526. \doi{10.1177/001316448204200214}
\item Mordkoff, J. T. (2019). A Simple Method for Removing Bias From a Popular Measure of Standardized Effect Size: Adjusted Partial Eta Squared. Advances in Methods and Practices in Psychological Science, 2(3), 228-232. \doi{10.1177/2515245919855053}
\item Albers, C., & Lakens, D. (2018). When power analyses based on pilot data are biased: Inaccurate effect size estimators and follow-up bias. Journal of experimental social psychology, 74, 187-195. \doi{10.31234/osf.io/b7z4q}
}
}
effectsize/man/interpret_parameters.Rd 0000644 0001762 0000144 00000002044 13571424147 017712 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/interpret_parameters.R
\name{interpret_parameters}
\alias{interpret_parameters}
\alias{interpret_parameters.lm}
\title{Automated Interpretation of Effect Sizes}
\usage{
interpret_parameters(model, ...)
\method{interpret_parameters}{lm}(
model,
parameters = NULL,
interpretation = "funder2019",
standardize_method = "refit",
standardize_robust = FALSE,
...
)
}
\arguments{
\item{model}{A statistical model.}
\item{...}{Arguments passed to or from other methods.}
\item{parameters}{A custom parameters table. If \code{NULL}, will use \code{\link{standardize_parameters}} to get it.}
\item{interpretation}{Interpretation grid (i.e., the set of rules of thumb) used to interpret the effects.}
\item{standardize_method}{See \code{\link{standardize_parameters}}.}
\item{standardize_robust}{See \code{\link{standardize_parameters}}.}
}
\description{
Automated interpretation of effect sizes.
}
\examples{
model <- lm(Sepal.Length ~ Species * Petal.Width, data = iris)
}
effectsize/man/interpret.Rd 0000644 0001762 0000144 00000001416 13571424147 015471 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/interpret.R
\name{interpret}
\alias{interpret}
\title{Generic function for interpretation}
\usage{
interpret(x, rules)
}
\arguments{
\item{x}{Vector of value break points (edges defining categories).}
\item{rules}{Set of \link{rules}.}
}
\description{
Interpret a value based on a set of rules. See \link{rules}.
}
\examples{
rules_grid <- rules(c(0.01, 0.05), c("very significant", "significant", "not significant"))
interpret(0.001, rules_grid)
interpret(0.021, rules_grid)
interpret(0.08, rules_grid)
interpret(c(0.01, 0.005, 0.08), rules_grid)
interpret(c(0.35, 0.15), c("small" = 0.2, "large" = 0.4))
interpret(c(0.35, 0.15), rules(c(0.2, 0.4), c("small", "medium", "large")))
}
\seealso{
rules
}
effectsize/man/sd_pooled.Rd 0000644 0001762 0000144 00000001340 13571424147 015421 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sd_pooled.R
\name{sd_pooled}
\alias{sd_pooled}
\alias{mad_pooled}
\title{Pooled Standard Deviation}
\usage{
sd_pooled(x, y = NULL, data = NULL)
mad_pooled(x, y = NULL, data = NULL)
}
\arguments{
\item{x}{A continuous variable or a formula.}
\item{y}{A continuous variable, a factor with two groups or a formula.}
\item{data}{An optional data frame containing the variables.}
}
\value{
Numeric, the pooled standard deviation.
}
\description{
The Pooled Standard Deviation is a weighted average of standard deviations for two or more groups, with more "weight" given to larger sample sizes.
}
\examples{
sd_pooled(Sepal.Length ~ Petal.Width, data = iris)
}
effectsize/man/dot-factor_to_numeric.Rd 0000644 0001762 0000144 00000000523 13613235245 017735 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils_factor_to_numeric.R
\name{.factor_to_numeric}
\alias{.factor_to_numeric}
\title{Safe transformation from factor/character to numeric}
\usage{
.factor_to_numeric(x)
}
\description{
Safe transformation from factor/character to numeric
}
\keyword{internal}
effectsize/man/standardize_info.Rd 0000644 0001762 0000144 00000001763 13602561300 016771 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/standardize_info.R
\name{standardize_info}
\alias{standardize_info}
\title{Get Standardization Information}
\usage{
standardize_info(model, robust = FALSE, ...)
}
\arguments{
\item{model}{A statistical model.}
\item{robust}{Logical, if \code{TRUE}, centering is done by substracting the
median from the variables and dividing it by the median absolute deviation
(MAD). If \code{FALSE}, variables are standardized by substracting the
mean and dividing it by the standard deviation (SD).}
\item{...}{Arguments passed to or from other methods.}
}
\description{
This function extracts information, such as the deviations (SD or MAD) from parent variables, that are necessary for post-hoc standardization of parameters. This function gives a window on how standardized are obtained, i.e., by what they are devided. The "basic" method of standardization uses
}
\examples{
model <- lm(Sepal.Width ~ Sepal.Length * Species, data = iris)
}
effectsize/man/interpret_r.Rd 0000644 0001762 0000144 00000002123 13571424147 016006 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/interpret_r.R
\name{interpret_r}
\alias{interpret_r}
\title{Correlation interpretation}
\usage{
interpret_r(r, rules = "funder2019")
}
\arguments{
\item{r}{Value or vector of correlation coefficient.}
\item{rules}{Can be "funder2019" (default), "gignac2016", cohen1988", "evans1996" or custom set of rules.}
}
\description{
Correlation interpretation
}
\examples{
interpret_r(r = .015)
interpret_r(r = c(.5, -.02))
}
\references{
\itemize{
\item Funder, D. C., & Ozer, D. J. (2019). Evaluating effect size in psychological research: sense and nonsense. Advances in Methods and Practices in Psychological Science.
\item Gignac, G. E., & Szodorai, E. T. (2016). Effect size guidelines for individual differences researchers. Personality and individual differences, 102, 74-78.
\item Cohen, J. (1988). Statistical power analysis for the behavioural sciences.
\item Evans, J. D. (1996). Straightforward statistics for the behavioral sciences. Thomson Brooks/Cole Publishing Co.
}
}
\seealso{
Page 88 of APA's 6th Edition.
}
effectsize/man/standardize_parameters.Rd 0000644 0001762 0000144 00000014320 13613235245 020202 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/standardize_parameters.R
\name{standardize_parameters}
\alias{standardize_parameters}
\alias{standardize_posteriors}
\title{Parameters standardization}
\usage{
standardize_parameters(
model,
parameters = NULL,
method = "refit",
robust = FALSE,
two_sd = FALSE,
verbose = TRUE,
centrality = "median",
...
)
standardize_posteriors(
model,
method = "refit",
robust = FALSE,
two_sd = FALSE,
verbose = TRUE,
...
)
}
\arguments{
\item{model}{A statistical model.}
\item{parameters}{An optional table containing the parameters to standardize. If \code{NULL}, will automatically retrieve it from the model.}
\item{method}{The method used for standardizing the parameters. Can be \code{"refit"} (default), \code{"posthoc"}, \code{"smart"} or \code{"basic"}. See 'Details'.}
\item{robust}{Logical, if \code{TRUE}, centering is done by substracting the
median from the variables and dividing it by the median absolute deviation
(MAD). If \code{FALSE}, variables are standardized by substracting the
mean and dividing it by the standard deviation (SD).}
\item{two_sd}{If \code{TRUE}, the variables are scaled by two times the deviation (SD or MAD depending on \code{robust}). This method can be useful to obtain model coefficients of continuous parameters comparable to coefficients related to binary predictors (Gelman, 2008).}
\item{verbose}{Toggle warnings on or off.}
\item{centrality}{For Bayesian models, which point-estimates (centrality indices) to compute. Character (vector) or list with one or more of these options: "median", "mean", "MAP" or "all".}
\item{...}{Arguments passed to or from other methods.}
}
\value{
Standardized parameters.
}
\description{
Compute standardized model parameters (coefficients).
}
\details{
\subsection{Methods:}{
\itemize{
\item \strong{refit}: This method is based on a complete model re-fit with a standardized version of data. Hence, this method is equal to standardizing the variables before fitting the model. It is the "purest" and the most accurate (Neter et al., 1989), but it is also the most computationally costly and long (especially for heavy models such as, for instance, for Bayesian models). This method is particularly recommended for complex models that include interactions or transformations (e.g., polynomial or spline terms). The \code{robust} (default to \code{FALSE}) argument enables a robust standardization of data, i.e., based on the \code{median} and \code{MAD} instead of the \code{mean} and \code{SD}.
\item \strong{posthoc}: Post-hoc standardization of the parameters, aiming at emulating the results obtained by "refit" without refitting the model. The coefficients are divided by the standard deviation (or MAD if \code{robust}) of the outcome (which becomes their expression 'unit'). Then, the coefficients related to numeric variables are additionally multiplied by the standard deviation (or MAD if \code{robust}) of the related terms, so that they correspond to changes of 1 SD of the predictor (e.g., "A change in 1 SD of \code{x} is related to a change of 0.24 of the SD of \code{y}). This does not apply to binary variables or factors, so the coefficients are still related to changes in levels. This method is not accurate and tend to give aberrant results when interactions are specified.
\item \strong{smart} (Standardization of Model's parameters with Adjustment, Reconnaissance and Transformation): Similar to \code{method = "posthoc"} in that it does not involve model refitting. The difference is that the SD of the response is computed on the relevant section of the data. For instance, if a factor with 3 levels A (the intercept), B and C is entered as a predictor, the effect corresponding to B vs. A will be scaled by the variance of the response at the intercept only. As a results, the coefficients for effects of factors are similar to a Glass' delta.
\item \strong{basic}: This method is similar to \code{method = "posthoc"}, but treats all variables as continuous: it also scales the coefficient by the standard deviation of model's matrix' parameter of factors levels (transformed to integers) or binary predictors. Although being inappropriate for these cases, this method is the one implemented by default in other software packages, such as \code{lm.beta::lm.beta()}.
}
When \code{method = "smart"} or \code{method = "classic"}, \code{standardize_parameters()}
also returns the standard errors for the standardized coefficients. Then, \code{ci()} can be
used to calculate confidence intervals for the standardized coefficients. See 'Examples'.
}
}
\examples{
library(effectsize)
data(iris)
model <- lm(Sepal.Length ~ Species * Petal.Width, data = iris)
standardize_parameters(model, method = "refit")
\donttest{
standardize_parameters(model, method = "posthoc")
standardize_parameters(model, method = "smart")
standardize_parameters(model, method = "basic")
# Robust and 2 SD
standardize_parameters(model, robust = TRUE)
standardize_parameters(model, two_sd = TRUE)
# show CI
library(parameters)
params <- standardize_parameters(model, method = "smart", robust = TRUE)
ci(params)
iris$binary <- ifelse(iris$Sepal.Width > 3, 1, 0)
model <- glm(binary ~ Species * Sepal.Length, data = iris, family = "binomial")
standardize_parameters(model, method = "refit")
standardize_parameters(model, method = "posthoc")
standardize_parameters(model, method = "smart")
standardize_parameters(model, method = "basic")
}
\donttest{
if (require("rstanarm")) {
model <- stan_glm(Sepal.Length ~ Species * Petal.Width, data = iris, iter = 500, refresh = 0)
standardize_posteriors(model, method = "refit")
standardize_posteriors(model, method = "posthoc")
standardize_posteriors(model, method = "smart")
standardize_posteriors(model, method = "basic")
standardize_parameters(model, method = "refit")
standardize_parameters(model, method = "posthoc")
standardize_parameters(model, method = "smart")
standardize_parameters(model, method = "basic")
}
}
}
\references{
\itemize{
\item Neter, J., Wasserman, W., & Kutner, M. H. (1989). Applied linear regression models.
\item Gelman, A. (2008). Scaling regression inputs by dividing by two standard deviations. Statistics in medicine, 27(15), 2865-2873.
}
}
\seealso{
standardize_info
}
effectsize/man/interpret_odds.Rd 0000644 0001762 0000144 00000002166 13571424147 016505 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/interpret_odds.R
\name{interpret_odds}
\alias{interpret_odds}
\title{(Log) Odds ratio interpretation}
\usage{
interpret_odds(odds, rules = "chen2010", log = FALSE)
}
\arguments{
\item{odds}{Value or vector of (log) odds ratio values.}
\item{rules}{Can be "chen2010" (default), "cohen1988" (through transformation to standardized difference, see \code{\link{odds_to_d}}) or custom set of rules.}
\item{log}{Are the provided values log odds ratio.}
}
\description{
(Log) Odds ratio interpretation
}
\examples{
interpret_odds(1)
interpret_odds(c(5, 2))
}
\references{
\itemize{
\item Cohen, J. (1988). Statistical power analysis for the behavioural sciences.
\item Chen, H., Cohen, P., & Chen, S. (2010). How big is a big odds ratio? Interpreting the magnitudes of odds ratios in epidemiological studies. Communications in Statistics—Simulation and Computation, 39(4), 860-864.
\item Sánchez-Meca, J., Marín-Martínez, F., & Chacón-Moscoso, S. (2003). Effect-size indices for dichotomized outcomes in meta-analysis. Psychological methods, 8(4), 448.
}
}
effectsize/man/figures/ 0000755 0001762 0000144 00000000000 13606302124 014615 5 ustar ligges users effectsize/man/figures/logo.png 0000644 0001762 0000144 00001700541 13550226150 016275 0 ustar ligges users PNG
IHDR mG tEXtSoftware Adobe ImageReadyqe<