marginaleffects/0000755000176200001440000000000014560154575013420 5ustar liggesusersmarginaleffects/NAMESPACE0000644000176200001440000001254414560035476014643 0ustar liggesusers# Generated by roxygen2: do not edit by hand S3method(coef,comparisons) S3method(coef,hypotheses) S3method(coef,marginalmeans) S3method(coef,predictions) S3method(coef,slopes) S3method(get_coef,afex_aov) S3method(get_coef,betareg) S3method(get_coef,bracl) S3method(get_coef,brmsfit) S3method(get_coef,brmultinom) S3method(get_coef,data.frame) S3method(get_coef,default) S3method(get_coef,gam) S3method(get_coef,gamlss) S3method(get_coef,glmmTMB) S3method(get_coef,lmerMod) S3method(get_coef,lmerModLmerTest) S3method(get_coef,mblogit) S3method(get_coef,merMod) S3method(get_coef,mlm) S3method(get_coef,multinom) S3method(get_coef,nls) S3method(get_coef,polr) S3method(get_coef,scam) S3method(get_coef,selection) S3method(get_coef,svyolr) S3method(get_coef,workflow) S3method(get_group_names,bracl) S3method(get_group_names,brmsfit) S3method(get_group_names,clm) S3method(get_group_names,default) S3method(get_group_names,hurdle) S3method(get_group_names,mblogit) S3method(get_group_names,mlm) S3method(get_group_names,multinom) S3method(get_group_names,polr) S3method(get_group_names,svyolr) S3method(get_mean_or_mode,character) S3method(get_mean_or_mode,data.frame) S3method(get_mean_or_mode,default) S3method(get_mean_or_mode,factor) S3method(get_mean_or_mode,logical) S3method(get_model_matrix,default) S3method(get_predict,Learner) S3method(get_predict,MCMCglmm) S3method(get_predict,afex_aov) S3method(get_predict,bart) S3method(get_predict,betareg) S3method(get_predict,bife) S3method(get_predict,biglm) S3method(get_predict,brmsfit) S3method(get_predict,brmultinom) S3method(get_predict,clm) S3method(get_predict,coxph) S3method(get_predict,crch) S3method(get_predict,default) S3method(get_predict,fixest) S3method(get_predict,gamlss) S3method(get_predict,glimML) S3method(get_predict,glm) S3method(get_predict,glmmPQL) S3method(get_predict,glmmTMB) S3method(get_predict,hxlr) S3method(get_predict,inferences_simulation) S3method(get_predict,lm) S3method(get_predict,lmerMod) S3method(get_predict,lmerModLmerTest) S3method(get_predict,lrm) S3method(get_predict,mblogit) S3method(get_predict,merMod) S3method(get_predict,mhurdle) S3method(get_predict,mlogit) S3method(get_predict,model_fit) S3method(get_predict,multinom) S3method(get_predict,ols) S3method(get_predict,orm) S3method(get_predict,polr) S3method(get_predict,rlmerMod) S3method(get_predict,rms) S3method(get_predict,rq) S3method(get_predict,stanreg) S3method(get_predict,svyolr) S3method(get_predict,tobit1) S3method(get_predict,workflow) S3method(get_vcov,Learner) S3method(get_vcov,MCMCglmm) S3method(get_vcov,afex_aov) S3method(get_vcov,bart) S3method(get_vcov,biglm) S3method(get_vcov,brmsfit) S3method(get_vcov,default) S3method(get_vcov,gamlss) S3method(get_vcov,glimML) S3method(get_vcov,glmmTMB) S3method(get_vcov,inferences_simulation) S3method(get_vcov,mhurdle) S3method(get_vcov,model_fit) S3method(get_vcov,orm) S3method(get_vcov,scam) S3method(get_vcov,workflow) S3method(glance,comparisons) S3method(glance,hypotheses) S3method(glance,marginaleffects_mids) S3method(glance,marginalmeans) S3method(glance,predictions) S3method(glance,slopes) S3method(plot,comparisons) S3method(plot,predictions) S3method(plot,slopes) S3method(print,comparisons) S3method(print,hypotheses) S3method(print,marginaleffects) S3method(print,predictions) S3method(print,slopes) S3method(sanitize_model_specific,bart) S3method(sanitize_model_specific,brmsfit) S3method(sanitize_model_specific,inferences_simulation) S3method(sanitize_model_specific,mblogit) S3method(sanitize_model_specific,svyglm) S3method(sanitize_model_specific,svyolr) S3method(set_coef,afex_aov) S3method(set_coef,betareg) S3method(set_coef,crch) S3method(set_coef,data.frame) S3method(set_coef,default) S3method(set_coef,gamlss) S3method(set_coef,glimML) S3method(set_coef,glm) S3method(set_coef,glmmPQL) S3method(set_coef,glmmTMB) S3method(set_coef,glmx) S3method(set_coef,hetprob) S3method(set_coef,hurdle) S3method(set_coef,hxlr) S3method(set_coef,ivpml) S3method(set_coef,lm) S3method(set_coef,lme) S3method(set_coef,lmerMod) S3method(set_coef,lmerModLmerTest) S3method(set_coef,merMod) S3method(set_coef,mlm) S3method(set_coef,model_fit) S3method(set_coef,multinom) S3method(set_coef,nls) S3method(set_coef,polr) S3method(set_coef,rlmerMod) S3method(set_coef,scam) S3method(set_coef,selection) S3method(set_coef,svyolr) S3method(set_coef,workflow) S3method(set_coef,zeroinfl) S3method(tidy,comparisons) S3method(tidy,hypotheses) S3method(tidy,marginaleffects_mids) S3method(tidy,marginalmeans) S3method(tidy,predictions) S3method(tidy,slopes) S3method(vcov,comparisons) S3method(vcov,hypotheses) S3method(vcov,marginalmeans) S3method(vcov,predictions) S3method(vcov,slopes) export(avg_comparisons) export(avg_predictions) export(avg_slopes) export(comparisons) export(datagrid) export(datagridcf) export(deltamethod) export(expect_margins) export(expect_predictions) export(expect_slopes) export(get_coef) export(get_group_names) export(get_model_matrix) export(get_predict) export(get_vcov) export(glance) export(hypotheses) export(inferences) export(marginal_means) export(marginaleffects) export(marginalmeans) export(meffects) export(plot_comparisons) export(plot_predictions) export(plot_slopes) export(posterior_draws) export(posteriordraws) export(predictions) export(set_coef) export(slopes) export(tidy) import(data.table) importFrom(Rcpp,evalCpp) importFrom(generics,glance) importFrom(generics,tidy) useDynLib(marginaleffects) marginaleffects/README.md0000644000176200001440000001456514557317544014715 0ustar liggesusers ![](man/figures/zoo_banner.png)


The parameters of a statistical model can sometimes be difficult to interpret substantively, especially when that model includes non-linear components, interactions, or transformations. Analysts who fit such complex models often seek to transform raw parameter estimates into quantities that are easier for domain experts and stakeholders to understand, such as predictions, contrasts, risk differences, ratios, odds, lift, slopes, and so on. Unfortunately, computing these quantities—along with associated standard errors—can be a tedious and error-prone task. This problem is compounded by the fact that modeling packages in `R` and `Python` produce objects with varied structures, which hold different information. This means that end-users often have to write customized code to interpret the estimates obtained by fitting Linear, GLM, GAM, Bayesian, Mixed Effects, and other model types. This can lead to wasted effort, confusion, and mistakes, and it can hinder the implementation of best practices. ## Marginal Effects Zoo: The Book [This free online book](https://marginaleffects.com/) introduces a conceptual framework to clearly define statistical quantities of interest, and shows how to estimate those quantities using the `marginaleffects` package for `R` and `Python`. The techniques introduced herein can enhance the interpretability of [over 100 classes of statistical and machine learning models](https://marginaleffects.com/vignettes/supported_models.html), including linear, GLM, GAM, mixed-effects, bayesian, categorical outcomes, XGBoost, and more. With a single unified interface, users can compute and plot many estimands, including: - Predictions (aka fitted values or adjusted predictions) - Comparisons such as contrasts, risk differences, risk ratios, odds, etc. - Slopes (aka marginal effects or partial derivatives) - Marginal means - Linear and non-linear hypothesis tests - Equivalence tests - Uncertainty estimates using the delta method, bootstrapping, simulation, or conformal inference. - Much more! [The Marginal Effects Zoo](https://marginaleffects.com/) book includes over 30 chapters of tutorials, case studies, and technical notes. It covers a wide range of topics, including how the `marginaleffects` package can facilitate the analysis of: - Experiments - Observational data - Causal inference with G-Computation - Machine learning models - Bayesian modeling - Multilevel regression with post-stratification (MRP) - Missing data - Matching - Inverse probability weighting - Conformal prediction [Get started by clicking here!](https://marginaleffects.com/vignettes/get_started.html) ## `marginaleffects`: The Package The `marginaleffects` package for `R` and `Python` offers a single point of entry to easily interpret the results of [over 100 classes of models,](https://marginaleffects.com/vignettes/supported_models.html) using a simple and consistent user interface. Its benefits include: - *Powerful:* It can compute and plot predictions; comparisons (contrasts, risk ratios, etc.); slopes; and conduct hypothesis and equivalence tests for over 100 different classes of models in `R`. - *Simple:* All functions share a simple and unified interface. - *Documented*: Each function is thoroughly documented with abundant examples. The Marginal Effects Zoo website includes 20,000+ words of vignettes and case studies. - *Efficient:* [Some operations](https://marginaleffects.com/vignettes/performance.html) can be up to 1000 times faster and use 30 times less memory than with the `margins` package. - *Valid:* When possible, [numerical results are checked](https://marginaleffects.com/vignettes/supported_models.html) against alternative software like `Stata` or other `R` packages. - *Thin:* The `R` package requires relatively few dependencies. - *Standards-compliant:* `marginaleffects` follows “tidy” principles and returns objects that work with standard functions like `summary()`, `tidy()`, and `glance()`. These objects are easy to program with and feed to other packages like [`ggplot2`](https://marginaleffects.com/vignettes/plot.html) or [`modelsummary`.](https://marginaleffects.com/vignettes/tables.html) - *Extensible:* Adding support for new models is very easy, often requiring less than 10 lines of new code. Please submit [feature requests on Github.](https://github.com/vincentarelbundock/marginaleffects/issues) - *Active development*: Bugs are fixed promptly. ## How to help The `marginaleffects` package and the Marginal Effects Zoo book will always be free. If you like this project, you can contribute in four ways: 1. Make a donation to the [Native Women’s Shelter of Montreal](https://www.nwsm.info/) or to [Give Directly](https://www.givedirectly.org/), and send me (Vincent) a quick note. You’ll make my day. 2. Submit bug reports, documentation improvements, or code contributions to the Github repositories of the [R version](https://github.com/vincentarelbundock/marginaleffects) or the [Python version](https://github.com/vincentarelbundock/pymarginaleffects) of the package. 3. [Cite the `marginaleffects` package](https://marginaleffects.com/CITATION.html) in your work and tell your friends about it. 4. Create a new entry [for the Meme Gallery!](https://marginaleffects.com/vignettes/meme.html) marginaleffects logo



marginaleffects/man/0000755000176200001440000000000014560042044014156 5ustar liggesusersmarginaleffects/man/get_averages.Rd0000644000176200001440000000441214554076657017126 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/get_averages.R \name{get_averages} \alias{get_averages} \title{Average Estimates (aka "Margins")} \usage{ get_averages(x, by = TRUE, ...) } \arguments{ \item{x}{Object produced by the \code{predictions()}, \code{comparisons()}, or \code{slopes()} functions.} \item{by}{Character vector of variable names over which to compute group-wise average estimates. When \code{by=NULL}, the global average (per term) is reported.} \item{...}{All additional arguments are passed to the original fitting function to override the original call options: \code{conf_level}, \code{transform}, etc. See \code{?predictions}, \code{?comparisons}, \code{?slopes}.} } \value{ A \code{data.frame} of estimates and uncertainty estimates } \description{ Calculate average estimates by taking the (group-wise) mean of all the unit-level estimates computed by the \code{predictions()}, \code{comparisons()}, or \code{slopes()} functions. Warning: It is generally faster and safer to use the \code{by} argument of one of the three functions listed above. Alternatively, one can call it in one step: \code{avg_slopes(model)} \code{slopes(model, by = TRUE)} Proceeding in two steps by assigning the unit-level estimates is typically slower, because all estimates must be computed twice. Note that the \code{tidy()} and \code{summary()} methods are slower wrappers around \verb{avg_*()} functions. } \details{ Standard errors are estimated using the delta method. See the \code{marginaleffects} website for details. In Bayesian models (e.g., \code{brms}), estimates are aggregated applying the median (or mean) function twice. First, we apply it to all marginal effects for each posterior draw, thereby estimating one Average (or Median) Marginal Effect per iteration of the MCMC chain. Second, we calculate the mean and the \code{quantile} function to the results of Step 1 to obtain the Average Marginal Effect and its associated interval. } \examples{ \dontshow{if (interactive() || isTRUE(Sys.getenv("R_DOC_BUILD") == "true")) (if (getRversion() >= "3.4") withAutoprint else force)(\{ # examplesIf} \dontshow{\}) # examplesIf} mod <- lm(mpg ~ factor(gear), data = mtcars) avg_comparisons(mod, variables = list(gear = "sequential")) } \keyword{internal} marginaleffects/man/complete_levels.Rd0000644000176200001440000000104414541720224017630 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/complete_levels.R \name{complete_levels} \alias{complete_levels} \title{Create a data.frame with all factor or character levels} \usage{ complete_levels(x, character_levels = NULL) } \description{ \code{model.matrix} breaks when \code{newdata} includes a factor variable, but not all levels are present in the data. This is bad for us because we often want to get predictions with one (or few) rows, where some factor levels are inevitably missing. } \keyword{internal} marginaleffects/man/hypotheses.Rd0000644000176200001440000003163114557277362016667 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/hypotheses.R \name{hypotheses} \alias{hypotheses} \title{(Non-)Linear Tests for Null Hypotheses, Joint Hypotheses, Equivalence, Non Superiority, and Non Inferiority} \usage{ hypotheses( model, hypothesis = NULL, vcov = NULL, conf_level = 0.95, df = Inf, equivalence = NULL, joint = FALSE, joint_test = "f", FUN = NULL, numderiv = "fdforward", ... ) } \arguments{ \item{model}{Model object or object generated by the \code{comparisons()}, \code{slopes()}, or \code{predictions()} functions.} \item{hypothesis}{specify a hypothesis test or custom contrast using a numeric value, vector, or matrix, a string, or a string formula. \itemize{ \item Numeric: \itemize{ \item Single value: the null hypothesis used in the computation of Z and p (before applying \code{transform}). \item Vector: Weights to compute a linear combination of (custom contrast between) estimates. Length equal to the number of rows generated by the same function call, but without the \code{hypothesis} argument. \item Matrix: Each column is a vector of weights, as describe above, used to compute a distinct linear combination of (contrast between) estimates. The column names of the matrix are used as labels in the output. } \item String formula to specify linear or non-linear hypothesis tests. If the \code{term} column uniquely identifies rows, terms can be used in the formula. Otherwise, use \code{b1}, \code{b2}, etc. to identify the position of each parameter. The \verb{b*} wildcard can be used to test hypotheses on all estimates. Examples: \itemize{ \item \code{hp = drat} \item \code{hp + drat = 12} \item \code{b1 + b2 + b3 = 0} \item \verb{b* / b1 = 1} } \item String: \itemize{ \item "pairwise": pairwise differences between estimates in each row. \item "reference": differences between the estimates in each row and the estimate in the first row. \item "sequential": difference between an estimate and the estimate in the next row. \item "revpairwise", "revreference", "revsequential": inverse of the corresponding hypotheses, as described above. } \item See the Examples section below and the vignette: https://marginaleffects.com/vignettes/hypothesis.html }} \item{vcov}{Type of uncertainty estimates to report (e.g., for robust standard errors). Acceptable values: \itemize{ \item FALSE: Do not compute standard errors. This can speed up computation considerably. \item TRUE: Unit-level standard errors using the default \code{vcov(model)} variance-covariance matrix. \item String which indicates the kind of uncertainty estimates to return. \itemize{ \item Heteroskedasticity-consistent: \code{"HC"}, \code{"HC0"}, \code{"HC1"}, \code{"HC2"}, \code{"HC3"}, \code{"HC4"}, \code{"HC4m"}, \code{"HC5"}. See \code{?sandwich::vcovHC} \item Heteroskedasticity and autocorrelation consistent: \code{"HAC"} \item Mixed-Models degrees of freedom: "satterthwaite", "kenward-roger" \item Other: \code{"NeweyWest"}, \code{"KernHAC"}, \code{"OPG"}. See the \code{sandwich} package documentation. } \item One-sided formula which indicates the name of cluster variables (e.g., \code{~unit_id}). This formula is passed to the \code{cluster} argument of the \code{sandwich::vcovCL} function. \item Square covariance matrix \item Function which returns a covariance matrix (e.g., \code{stats::vcov(model)}) }} \item{conf_level}{numeric value between 0 and 1. Confidence level to use to build a confidence interval.} \item{df}{Degrees of freedom used to compute p values and confidence intervals. A single numeric value between 1 and \code{Inf}. When \code{df} is \code{Inf}, the normal distribution is used. When \code{df} is finite, the \code{t} distribution is used. See \link[insight:get_df]{insight::get_df} for a convenient function to extract degrees of freedom. Ex: \code{slopes(model, df = insight::get_df(model))}} \item{equivalence}{Numeric vector of length 2: bounds used for the two-one-sided test (TOST) of equivalence, and for the non-inferiority and non-superiority tests. See Details section below.} \item{joint}{Joint test of statistical significance. The null hypothesis value can be set using the \code{hypothesis} argument. \itemize{ \item FALSE: Hypotheses are not tested jointly. \item TRUE: All parameters are tested jointly. \item String: A regular expression to match parameters to be tested jointly. \code{grep(joint, perl = TRUE)} \item Character vector of parameter names to be tested. Characters refer to the names of the vector returned by \code{coef(object)}. \item Integer vector of indices. Which parameters positions to test jointly. }} \item{joint_test}{A character string specifying the type of test, either "f" or "chisq". The null hypothesis is set by the \code{hypothesis} argument, with default null equal to 0 for all parameters.} \item{FUN}{\code{NULL} or function. \itemize{ \item \code{NULL} (default): hypothesis test on a model's coefficients, or on the quantities estimated by one of the \code{marginaleffects} package functions. \item Function which accepts a model object and returns a numeric vector or a data.frame with two columns called \code{term} and \code{estimate}. This argument can be useful when users want to conduct a hypothesis test on an arbitrary function of quantities held in a model object. See examples below. }} \item{numderiv}{string or list of strings indicating the method to use to for the numeric differentiation used in to compute delta method standard errors. \itemize{ \item "fdforward": finite difference method with forward differences \item "fdcenter": finite difference method with central differences (default) \item "richardson": Richardson extrapolation method \item Extra arguments can be specified by passing a list to the \code{numDeriv} argument, with the name of the method first and named arguments following, ex: \code{numderiv=list("fdcenter", eps = 1e-5)}. When an unknown argument is used, \code{marginaleffects} prints the list of valid arguments for each method. }} \item{...}{Additional arguments are passed to the \code{predict()} method supplied by the modeling package.These arguments are particularly useful for mixed-effects or bayesian models (see the online vignettes on the \code{marginaleffects} website). Available arguments can vary from model to model, depending on the range of supported arguments by each modeling package. See the "Model-Specific Arguments" section of the \code{?slopes} documentation for a non-exhaustive list of available arguments.} } \description{ Uncertainty estimates are calculated as first-order approximate standard errors for linear or non-linear functions of a vector of random variables with known or estimated covariance matrix. In that sense, \code{\link{hypotheses}} emulates the behavior of the excellent and well-established \link[car:deltaMethod]{car::deltaMethod} and \link[car:linearHypothesis]{car::linearHypothesis} functions, but it supports more models; requires fewer dependencies; expands the range of tests to equivalence and superiority/inferiority; and offers convenience features like robust standard errors. To learn more, read the hypothesis tests vignette, visit the package website, or scroll down this page for a full list of vignettes: \itemize{ \item \url{https://marginaleffects.com/vignettes/hypothesis.html} \item \url{https://marginaleffects.com/} } Warning #1: Tests are conducted directly on the scale defined by the \code{type} argument. For some models, it can make sense to conduct hypothesis or equivalence tests on the \code{"link"} scale instead of the \code{"response"} scale which is often the default. Warning #2: For hypothesis tests on objects produced by the \code{marginaleffects} package, it is safer to use the \code{hypothesis} argument of the original function. Using \code{hypotheses()} may not work in certain environments, in lists, or when working programmatically with *apply style functions. Warning #3: The tests assume that the \code{hypothesis} expression is (approximately) normally distributed, which for non-linear functions of the parameters may not be realistic. More reliable confidence intervals can be obtained using the \code{inferences()} function with \code{method = "boot"}. } \section{Joint hypothesis tests}{ The test statistic for the joint Wald test is calculated as (R * theta_hat - r)' * inv(R * V_hat * R') * (R * theta_hat - r) / Q, where theta_hat is the vector of estimated parameters, V_hat is the estimated covariance matrix, R is a Q x P matrix for testing Q hypotheses on P parameters, r is a Q x 1 vector for the null hypothesis, and Q is the number of rows in R. If the test is a Chi-squared test, the test statistic is not normalized. The p-value is then calculated based on either the F-distribution (for F-test) or the Chi-squared distribution (for Chi-squared test). For the F-test, the degrees of freedom are Q and (n - P), where n is the sample size and P is the number of parameters. For the Chi-squared test, the degrees of freedom are Q. } \section{Equivalence, Inferiority, Superiority}{ \eqn{\theta} is an estimate, \eqn{\sigma_\theta} its estimated standard error, and \eqn{[a, b]} are the bounds of the interval supplied to the \code{equivalence} argument. Non-inferiority: \itemize{ \item \eqn{H_0}{H0}: \eqn{\theta \leq a}{\theta <= a} \item \eqn{H_1}{H1}: \eqn{\theta > a} \item \eqn{t=(\theta - a)/\sigma_\theta}{t=(\theta - a)/\sigma_\theta} \item p: Upper-tail probability } Non-superiority: \itemize{ \item \eqn{H_0}{H0}: \eqn{\theta \geq b}{\theta >= b} \item \eqn{H_1}{H1}: \eqn{\theta < b} \item \eqn{t=(\theta - b)/\sigma_\theta}{t=(\theta - b)/\sigma_\theta} \item p: Lower-tail probability } Equivalence: Two One-Sided Tests (TOST) \itemize{ \item p: Maximum of the non-inferiority and non-superiority p values. } Thanks to Russell V. Lenth for the excellent \code{emmeans} package and documentation which inspired this feature. } \examples{ library(marginaleffects) mod <- lm(mpg ~ hp + wt + factor(cyl), data = mtcars) # When `FUN` and `hypotheses` are `NULL`, `hypotheses()` returns a data.frame of parameters hypotheses(mod) # Test of equality between coefficients hypotheses(mod, hypothesis = "hp = wt") # Non-linear function hypotheses(mod, hypothesis = "exp(hp + wt) = 0.1") # Robust standard errors hypotheses(mod, hypothesis = "hp = wt", vcov = "HC3") # b1, b2, ... shortcuts can be used to identify the position of the # parameters of interest in the output of FUN hypotheses(mod, hypothesis = "b2 = b3") # wildcard hypotheses(mod, hypothesis = "b* / b2 = 1") # term names with special characters have to be enclosed in backticks hypotheses(mod, hypothesis = "`factor(cyl)6` = `factor(cyl)8`") mod2 <- lm(mpg ~ hp * drat, data = mtcars) hypotheses(mod2, hypothesis = "`hp:drat` = drat") # predictions(), comparisons(), and slopes() mod <- glm(am ~ hp + mpg, data = mtcars, family = binomial) cmp <- comparisons(mod, newdata = "mean") hypotheses(cmp, hypothesis = "b1 = b2") mfx <- slopes(mod, newdata = "mean") hypotheses(cmp, hypothesis = "b2 = 0.2") pre <- predictions(mod, newdata = datagrid(hp = 110, mpg = c(30, 35))) hypotheses(pre, hypothesis = "b1 = b2") # The `FUN` argument can be used to compute standard errors for fitted values mod <- glm(am ~ hp + mpg, data = mtcars, family = binomial) f <- function(x) predict(x, type = "link", newdata = mtcars) p <- hypotheses(mod, FUN = f) head(p) f <- function(x) predict(x, type = "response", newdata = mtcars) p <- hypotheses(mod, FUN = f) head(p) # Complex aggregation # Step 1: Collapse predicted probabilities by outcome level, for each individual # Step 2: Take the mean of the collapsed probabilities by group and `cyl` library(dplyr) library(MASS) library(dplyr) dat <- transform(mtcars, gear = factor(gear)) mod <- polr(gear ~ factor(cyl) + hp, dat) aggregation_fun <- function(model) { predictions(model, vcov = FALSE) |> mutate(group = ifelse(group \%in\% c("3", "4"), "3 & 4", "5")) |> summarize(estimate = sum(estimate), .by = c("rowid", "cyl", "group")) |> summarize(estimate = mean(estimate), .by = c("cyl", "group")) |> rename(term = cyl) } hypotheses(mod, FUN = aggregation_fun) # Equivalence, non-inferiority, and non-superiority tests mod <- lm(mpg ~ hp + factor(gear), data = mtcars) p <- predictions(mod, newdata = "median") hypotheses(p, equivalence = c(17, 18)) mfx <- avg_slopes(mod, variables = "hp") hypotheses(mfx, equivalence = c(-.1, .1)) cmp <- avg_comparisons(mod, variables = "gear", hypothesis = "pairwise") hypotheses(cmp, equivalence = c(0, 10)) # joint hypotheses: character vector model <- lm(mpg ~ as.factor(cyl) * hp, data = mtcars) hypotheses(model, joint = c("as.factor(cyl)6:hp", "as.factor(cyl)8:hp")) # joint hypotheses: regular expression hypotheses(model, joint = "cyl") # joint hypotheses: integer indices hypotheses(model, joint = 2:3) # joint hypotheses: different null hypotheses hypotheses(model, joint = 2:3, hypothesis = 1) hypotheses(model, joint = 2:3, hypothesis = 1:2) # joint hypotheses: marginaleffects object cmp <- avg_comparisons(model) hypotheses(cmp, joint = "cyl") } marginaleffects/man/print.marginaleffects.Rd0000644000176200001440000000505214560035476020747 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/print.R \name{print.marginaleffects} \alias{print.marginaleffects} \title{Print \code{marginaleffects} objects} \usage{ \method{print}{marginaleffects}( x, digits = getOption("marginaleffects_print_digits", default = 3), p_eps = getOption("marginaleffects_print_p_eps", default = 0.001), topn = getOption("marginaleffects_print_topn", default = 5), nrows = getOption("marginaleffects_print_nrows", default = 30), ncols = getOption("marginaleffects_print_ncols", default = 30), style = getOption("marginaleffects_print_style", default = "summary"), type = getOption("marginaleffects_print_type", default = TRUE), column_names = getOption("marginaleffects_print_column_names", default = TRUE), ... ) } \arguments{ \item{x}{An object produced by one of the \code{marginaleffects} package functions.} \item{digits}{The number of digits to display.} \item{p_eps}{p values smaller than this number are printed in "<0.001" style.} \item{topn}{The number of rows to be printed from the beginning and end of tables with more than \code{nrows} rows.} \item{nrows}{The number of rows which will be printed before truncation.} \item{ncols}{The maximum number of column names to display at the bottom of the printed output.} \item{style}{"summary" or "data.frame"} \item{type}{boolean: should the type be printed?} \item{column_names}{boolean: should the column names be printed?} \item{...}{Other arguments are currently ignored.} } \description{ This function controls the text which is printed to the console when one of the core \code{marginalefffects} functions is called and the object is returned: \code{predictions()}, \code{comparisons()}, \code{slopes()}, \code{hypotheses()}, \code{avg_predictions()}, \code{avg_comparisons()}, \code{avg_slopes()}. All of those functions return standard data frames. Columns can be extracted by name, \code{predictions(model)$estimate}, and all the usual data manipulation functions work out-of-the-box: \code{colnames()}, \code{head()}, \code{subset()}, \code{dplyr::filter()}, \code{dplyr::arrange()}, etc. Some of the data columns are not printed by default. You can disable pretty printing and print the full results as a standard data frame using the \code{style} argument or by applying \code{as.data.frame()} on the object. See examples below. } \examples{ library(marginaleffects) mod <- lm(mpg ~ hp + am + factor(gear), data = mtcars) p <- predictions(mod, by = c("am", "gear")) p subset(p, am == 1) print(p, style = "data.frame") data.frame(p) } marginaleffects/man/get_model_matrix.Rd0000644000176200001440000000350414541720224017774 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/get_model_matrix.R \name{get_model_matrix} \alias{get_model_matrix} \alias{get_model_matrix.default} \title{Get a named model matrix} \usage{ get_model_matrix(model, newdata) \method{get_model_matrix}{default}(model, newdata) } \arguments{ \item{model}{Model object} \item{newdata}{Grid of predictor values at which we evaluate the slopes. \itemize{ \item Warning: Please avoid modifying your dataset between fitting the model and calling a \code{marginaleffects} function. This can sometimes lead to unexpected results. \item \code{NULL} (default): Unit-level slopes for each observed value in the dataset (empirical distribution). The dataset is retrieved using \code{\link[insight:get_data]{insight::get_data()}}, which tries to extract data from the environment. This may produce unexpected results if the original data frame has been altered since fitting the model. \item \code{\link[=datagrid]{datagrid()}} call to specify a custom grid of regressors. For example: \itemize{ \item \code{newdata = datagrid(cyl = c(4, 6))}: \code{cyl} variable equal to 4 and 6 and other regressors fixed at their means or modes. \item See the Examples section and the \code{\link[=datagrid]{datagrid()}} documentation. } \item string: \itemize{ \item "mean": Marginal Effects at the Mean. Slopes when each predictor is held at its mean or mode. \item "median": Marginal Effects at the Median. Slopes when each predictor is held at its median or mode. \item "marginalmeans": Marginal Effects at Marginal Means. See Details section below. \item "tukey": Marginal Effects at Tukey's 5 numbers. \item "grid": Marginal Effects on a grid of representative numbers (Tukey's 5 numbers and unique values of categorical predictors). } }} } \description{ Get a named model matrix } \keyword{internal} marginaleffects/man/plot_slopes.Rd0000644000176200001440000002126014557277362017034 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/plot_slopes.R \name{plot_slopes} \alias{plot_slopes} \title{Plot Conditional or Marginal Slopes} \usage{ plot_slopes( model, variables = NULL, condition = NULL, by = NULL, newdata = NULL, type = "response", vcov = NULL, conf_level = 0.95, wts = NULL, slope = "dydx", rug = FALSE, gray = FALSE, draw = TRUE, ... ) } \arguments{ \item{model}{Model object} \item{variables}{Name of the variable whose marginal effect (slope) we want to plot on the y-axis.} \item{condition}{Conditional slopes \itemize{ \item Character vector (max length 4): Names of the predictors to display. \item Named list (max length 4): List names correspond to predictors. List elements can be: \itemize{ \item Numeric vector \item Function which returns a numeric vector or a set of unique categorical values \item Shortcut strings for common reference values: "minmax", "quartile", "threenum" } \item 1: x-axis. 2: color/shape. 3: facet (wrap if no fourth variable, otherwise cols of grid). 4: facet (rows of grid). \item Numeric variables in positions 2 and 3 are summarized by Tukey's five numbers \code{?stats::fivenum}. }} \item{by}{Aggregate unit-level estimates (aka, marginalize, average over). Valid inputs: \itemize{ \item \code{FALSE}: return the original unit-level estimates. \item \code{TRUE}: aggregate estimates for each term. \item Character vector of column names in \code{newdata} or in the data frame produced by calling the function without the \code{by} argument. \item Data frame with a \code{by} column of group labels, and merging columns shared by \code{newdata} or the data frame produced by calling the same function without the \code{by} argument. \item See examples below. \item For more complex aggregations, you can use the \code{FUN} argument of the \code{hypotheses()} function. See that function's documentation and the Hypothesis Test vignettes on the \code{marginaleffects} website. }} \item{newdata}{When \code{newdata} is \code{NULL}, the grid is determined by the \code{condition} argument. When \code{newdata} is not \code{NULL}, the argument behaves in the same way as in the \code{slopes()} function.} \item{type}{string indicates the type (scale) of the predictions used to compute contrasts or slopes. This can differ based on the model type, but will typically be a string such as: "response", "link", "probs", or "zero". When an unsupported string is entered, the model-specific list of acceptable values is returned in an error message. When \code{type} is \code{NULL}, the first entry in the error message is used by default.} \item{vcov}{Type of uncertainty estimates to report (e.g., for robust standard errors). Acceptable values: \itemize{ \item FALSE: Do not compute standard errors. This can speed up computation considerably. \item TRUE: Unit-level standard errors using the default \code{vcov(model)} variance-covariance matrix. \item String which indicates the kind of uncertainty estimates to return. \itemize{ \item Heteroskedasticity-consistent: \code{"HC"}, \code{"HC0"}, \code{"HC1"}, \code{"HC2"}, \code{"HC3"}, \code{"HC4"}, \code{"HC4m"}, \code{"HC5"}. See \code{?sandwich::vcovHC} \item Heteroskedasticity and autocorrelation consistent: \code{"HAC"} \item Mixed-Models degrees of freedom: "satterthwaite", "kenward-roger" \item Other: \code{"NeweyWest"}, \code{"KernHAC"}, \code{"OPG"}. See the \code{sandwich} package documentation. } \item One-sided formula which indicates the name of cluster variables (e.g., \code{~unit_id}). This formula is passed to the \code{cluster} argument of the \code{sandwich::vcovCL} function. \item Square covariance matrix \item Function which returns a covariance matrix (e.g., \code{stats::vcov(model)}) }} \item{conf_level}{numeric value between 0 and 1. Confidence level to use to build a confidence interval.} \item{wts}{string or numeric: weights to use when computing average contrasts or slopes. These weights only affect the averaging in \verb{avg_*()} or with the \code{by} argument, and not the unit-level estimates themselves. Internally, estimates and weights are passed to the \code{weighted.mean()} function. \itemize{ \item string: column name of the weights variable in \code{newdata}. When supplying a column name to \code{wts}, it is recommended to supply the original data (including the weights variable) explicitly to \code{newdata}. \item numeric: vector of length equal to the number of rows in the original data or in \code{newdata} (if supplied). }} \item{slope}{string indicates the type of slope or (semi-)elasticity to compute: \itemize{ \item "dydx": dY/dX \item "eyex": dY/dX * Y / X \item "eydx": dY/dX * Y \item "dyex": dY/dX / X \item Y is the predicted value of the outcome; X is the observed value of the predictor. }} \item{rug}{TRUE displays tick marks on the axes to mark the distribution of raw data.} \item{gray}{FALSE grayscale or color plot} \item{draw}{\code{TRUE} returns a \code{ggplot2} plot. \code{FALSE} returns a \code{data.frame} of the underlying data.} \item{...}{Additional arguments are passed to the \code{predict()} method supplied by the modeling package.These arguments are particularly useful for mixed-effects or bayesian models (see the online vignettes on the \code{marginaleffects} website). Available arguments can vary from model to model, depending on the range of supported arguments by each modeling package. See the "Model-Specific Arguments" section of the \code{?slopes} documentation for a non-exhaustive list of available arguments.} } \value{ A \code{ggplot2} object } \description{ Plot slopes on the y-axis against values of one or more predictors (x-axis, colors/shapes, and facets). The \code{by} argument is used to plot marginal slopes, that is, slopes made on the original data, but averaged by subgroups. This is analogous to using the \code{by} argument in the \code{slopes()} function. The \code{condition} argument is used to plot conditional slopes, that is, slopes computed on a user-specified grid. This is analogous to using the \code{newdata} argument and \code{datagrid()} function in a \code{slopes()} call. All variables whose values are not specified explicitly are treated as usual by \code{datagrid()}, that is, they are held at their mean or mode (or rounded mean for integers). This includes grouping variables in mixed-effects models, so analysts who fit such models may want to specify the groups of interest using the \code{condition} argument, or supply model-specific arguments to compute population-level estimates. See details below. See the "Plots" vignette and website for tutorials and information on how to customize plots: \itemize{ \item https://marginaleffects.com/vignettes/plot.html \item https://marginaleffects.com } } \section{Model-Specific Arguments}{ Some model types allow model-specific arguments to modify the nature of marginal effects, predictions, marginal means, and contrasts. Please report other package-specific \code{predict()} arguments on Github so we can add them to the table below. https://github.com/vincentarelbundock/marginaleffects/issues\tabular{llll}{ Package \tab Class \tab Argument \tab Documentation \cr \code{brms} \tab \code{brmsfit} \tab \code{ndraws} \tab \link[brms:posterior_predict.brmsfit]{brms::posterior_predict} \cr \tab \tab \code{re_formula} \tab \link[brms:posterior_predict.brmsfit]{brms::posterior_predict} \cr \code{lme4} \tab \code{merMod} \tab \code{re.form} \tab \link[lme4:predict.merMod]{lme4::predict.merMod} \cr \tab \tab \code{allow.new.levels} \tab \link[lme4:predict.merMod]{lme4::predict.merMod} \cr \code{glmmTMB} \tab \code{glmmTMB} \tab \code{re.form} \tab \link[glmmTMB:predict.glmmTMB]{glmmTMB::predict.glmmTMB} \cr \tab \tab \code{allow.new.levels} \tab \link[glmmTMB:predict.glmmTMB]{glmmTMB::predict.glmmTMB} \cr \tab \tab \code{zitype} \tab \link[glmmTMB:predict.glmmTMB]{glmmTMB::predict.glmmTMB} \cr \code{mgcv} \tab \code{bam} \tab \code{exclude} \tab \link[mgcv:predict.bam]{mgcv::predict.bam} \cr \code{robustlmm} \tab \code{rlmerMod} \tab \code{re.form} \tab \link[robustlmm:rlmerMod-class]{robustlmm::predict.rlmerMod} \cr \tab \tab \code{allow.new.levels} \tab \link[robustlmm:rlmerMod-class]{robustlmm::predict.rlmerMod} \cr \code{MCMCglmm} \tab \code{MCMCglmm} \tab \code{ndraws} \tab \cr } } \examples{ library(marginaleffects) mod <- lm(mpg ~ hp * drat * factor(am), data = mtcars) plot_slopes(mod, variables = "hp", condition = "drat") plot_slopes(mod, variables = "hp", condition = c("drat", "am")) plot_slopes(mod, variables = "hp", condition = list("am", "drat" = 3:5)) plot_slopes(mod, variables = "am", condition = list("hp", "drat" = range)) plot_slopes(mod, variables = "am", condition = list("hp", "drat" = "threenum")) } marginaleffects/man/expect_margins.Rd0000644000176200001440000000051414541720224017457 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/tinytest.R \name{expect_margins} \alias{expect_margins} \title{\code{tinytest} helper} \usage{ expect_margins( results, margins_object, se = TRUE, tolerance = 1e-05, verbose = FALSE ) } \description{ \code{tinytest} helper } \keyword{internal} marginaleffects/man/marginal_means.Rd0000644000176200001440000000100614560035476017432 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/deprecated.R \name{marginal_means} \alias{marginal_means} \title{Deprecated function} \usage{ marginal_means( model, variables = NULL, newdata = NULL, vcov = TRUE, conf_level = 0.95, type = NULL, transform = NULL, cross = FALSE, hypothesis = NULL, equivalence = NULL, p_adjust = NULL, df = Inf, wts = "equal", by = NULL, numderiv = "fdforward", ... ) } \description{ Deprecated function } \keyword{internal} marginaleffects/man/expect_slopes.Rd0000644000176200001440000000045214541720224017325 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/tinytest.R \name{expect_slopes} \alias{expect_slopes} \title{\code{tinytest} helper} \usage{ expect_slopes(object, n_unique = NULL, pct_na = 5, se = TRUE, ...) } \description{ \code{tinytest} helper } \keyword{internal} marginaleffects/man/deltamethod.Rd0000644000176200001440000000036014560035476016751 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/deprecated.R \name{deltamethod} \alias{deltamethod} \title{Deprecated function} \usage{ deltamethod(...) } \description{ Deprecated function } \keyword{internal} marginaleffects/man/posterior_draws.Rd0000644000176200001440000000165414541720224017703 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/posterior_draws.R \name{posterior_draws} \alias{posterior_draws} \title{Extract Posterior Draws or Bootstrap Resamples from \code{marginaleffects} Objects} \usage{ posterior_draws(x, shape = "long") } \arguments{ \item{x}{An object produced by a \code{marginaleffects} package function, such as \code{predictions()}, \code{avg_slopes()}, \code{hypotheses()}, etc.} \item{shape}{string indicating the shape of the output format: \itemize{ \item "long": long format data frame \item "DxP": Matrix with draws as rows and parameters as columns \item "PxD": Matrix with draws as rows and parameters as columns \item "rvar": Random variable datatype (see \code{posterior} package documentation). }} } \value{ A data.frame with \code{drawid} and \code{draw} columns. } \description{ Extract Posterior Draws or Bootstrap Resamples from \code{marginaleffects} Objects } marginaleffects/man/marginalmeans.Rd0000644000176200001440000000100314560035476017270 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/deprecated.R \name{marginalmeans} \alias{marginalmeans} \title{Deprecated function} \usage{ marginalmeans( model, variables = NULL, newdata = NULL, vcov = TRUE, conf_level = 0.95, type = NULL, transform = NULL, cross = FALSE, hypothesis = NULL, equivalence = NULL, p_adjust = NULL, df = Inf, wts = "equal", by = NULL, numderiv = "fdforward", ... ) } \description{ Deprecated function } \keyword{internal} marginaleffects/man/comparisons.Rd0000644000176200001440000007134714554104515017023 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/comparisons.R \name{comparisons} \alias{comparisons} \alias{avg_comparisons} \title{Comparisons Between Predictions Made With Different Regressor Values} \usage{ comparisons( model, newdata = NULL, variables = NULL, comparison = "difference", type = NULL, vcov = TRUE, by = FALSE, conf_level = 0.95, transform = NULL, cross = FALSE, wts = NULL, hypothesis = NULL, equivalence = NULL, p_adjust = NULL, df = Inf, eps = NULL, numderiv = "fdforward", ... ) avg_comparisons( model, newdata = NULL, variables = NULL, type = NULL, vcov = TRUE, by = TRUE, conf_level = 0.95, comparison = "difference", transform = NULL, cross = FALSE, wts = NULL, hypothesis = NULL, equivalence = NULL, p_adjust = NULL, df = Inf, eps = NULL, numderiv = "fdforward", ... ) } \arguments{ \item{model}{Model object} \item{newdata}{Grid of predictor values at which we evaluate the comparisons. \itemize{ \item Warning: Please avoid modifying your dataset between fitting the model and calling a \code{marginaleffects} function. This can sometimes lead to unexpected results. \item \code{NULL} (default): Unit-level contrasts for each observed value in the dataset (empirical distribution). The dataset is retrieved using \code{\link[insight:get_data]{insight::get_data()}}, which tries to extract data from the environment. This may produce unexpected results if the original data frame has been altered since fitting the model. \item data frame: Unit-level contrasts for each row of the \code{newdata} data frame. \item string: \itemize{ \item "mean": Contrasts at the Mean. Contrasts when each predictor is held at its mean or mode. \item "median": Contrasts at the Median. Contrasts when each predictor is held at its median or mode. \item "marginalmeans": Contrasts at Marginal Means. \item "tukey": Contrasts at Tukey's 5 numbers. \item "grid": Contrasts on a grid of representative numbers (Tukey's 5 numbers and unique values of categorical predictors). } \item \code{\link[=datagrid]{datagrid()}} call to specify a custom grid of regressors. For example: \itemize{ \item \code{newdata = datagrid(cyl = c(4, 6))}: \code{cyl} variable equal to 4 and 6 and other regressors fixed at their means or modes. \item \code{newdata = datagrid(mpg = fivenum)}: \code{mpg} variable held at Tukey's five numbers (using the \code{fivenum} function), and other regressors fixed at their means or modes. \item See the Examples section and the \link{datagrid} documentation. } }} \item{variables}{Focal variables \itemize{ \item \code{NULL}: compute comparisons for all the variables in the model object (can be slow). \item Character vector: subset of variables (usually faster). \item Named list: names identify the subset of variables of interest, and values define the type of contrast to compute. Acceptable values depend on the variable type: \itemize{ \item Factor or character variables: \itemize{ \item "reference": Each factor level is compared to the factor reference (base) level \item "all": All combinations of observed levels \item "sequential": Each factor level is compared to the previous factor level \item "pairwise": Each factor level is compared to all other levels \item "minmax": The highest and lowest levels of a factor. \item "revpairwise", "revreference", "revsequential": inverse of the corresponding hypotheses. \item Vector of length 2 with the two values to compare. \item Data frame with the same number of rows as \code{newdata}, with two columns of "lo" and "hi" values to compare. \item Function that accepts a vector and returns a data frame with two columns of "lo" and "hi" values to compare. See examples below. } \item Logical variables: \itemize{ \item NULL: contrast between TRUE and FALSE \item Data frame with the same number of rows as \code{newdata}, with two columns of "lo" and "hi" values to compare. \item Function that accepts a vector and returns a data frame with two columns of "lo" and "hi" values to compare. See examples below. } \item Numeric variables: \itemize{ \item Numeric of length 1: Forward contrast for a gap of \code{x}, computed between the observed value and the observed value plus \code{x}. Users can set a global option to get a "center" or "backward" contrast instead: \code{options(marginaleffects_contrast_direction="center")} \item Numeric vector of length 2: Contrast between the largest and the smallest elements of the \code{x} vector. \item Data frame with the same number of rows as \code{newdata}, with two columns of "lo" and "hi" values to compare. \item Function that accepts a vector and returns a data frame with two columns of "lo" and "hi" values to compare. See examples below. \item "iqr": Contrast across the interquartile range of the regressor. \item "sd": Contrast across one standard deviation around the regressor mean. \item "2sd": Contrast across two standard deviations around the regressor mean. \item "minmax": Contrast between the maximum and the minimum values of the regressor. } \item Examples: \itemize{ \item \code{variables = list(gear = "pairwise", hp = 10)} \item \code{variables = list(gear = "sequential", hp = c(100, 120))} \item \verb{variables = list(hp = \\(x) data.frame(low = x - 5, high = x + 10))} \item See the Examples section below for more. } } }} \item{comparison}{How should pairs of predictions be compared? Difference, ratio, odds ratio, or user-defined functions. \itemize{ \item string: shortcuts to common contrast functions. \itemize{ \item Supported shortcuts strings: difference, differenceavg, differenceavgwts, dydx, eyex, eydx, dyex, dydxavg, eyexavg, eydxavg, dyexavg, dydxavgwts, eyexavgwts, eydxavgwts, dyexavgwts, ratio, ratioavg, ratioavgwts, lnratio, lnratioavg, lnratioavgwts, lnor, lnoravg, lnoravgwts, lift, liftavg, expdydx, expdydxavg, expdydxavgwts \item See the Comparisons section below for definitions of each transformation. } \item function: accept two equal-length numeric vectors of adjusted predictions (\code{hi} and \code{lo}) and returns a vector of contrasts of the same length, or a unique numeric value. \itemize{ \item See the Transformations section below for examples of valid functions. } }} \item{type}{string indicates the type (scale) of the predictions used to compute contrasts or slopes. This can differ based on the model type, but will typically be a string such as: "response", "link", "probs", or "zero". When an unsupported string is entered, the model-specific list of acceptable values is returned in an error message. When \code{type} is \code{NULL}, the first entry in the error message is used by default.} \item{vcov}{Type of uncertainty estimates to report (e.g., for robust standard errors). Acceptable values: \itemize{ \item FALSE: Do not compute standard errors. This can speed up computation considerably. \item TRUE: Unit-level standard errors using the default \code{vcov(model)} variance-covariance matrix. \item String which indicates the kind of uncertainty estimates to return. \itemize{ \item Heteroskedasticity-consistent: \code{"HC"}, \code{"HC0"}, \code{"HC1"}, \code{"HC2"}, \code{"HC3"}, \code{"HC4"}, \code{"HC4m"}, \code{"HC5"}. See \code{?sandwich::vcovHC} \item Heteroskedasticity and autocorrelation consistent: \code{"HAC"} \item Mixed-Models degrees of freedom: "satterthwaite", "kenward-roger" \item Other: \code{"NeweyWest"}, \code{"KernHAC"}, \code{"OPG"}. See the \code{sandwich} package documentation. } \item One-sided formula which indicates the name of cluster variables (e.g., \code{~unit_id}). This formula is passed to the \code{cluster} argument of the \code{sandwich::vcovCL} function. \item Square covariance matrix \item Function which returns a covariance matrix (e.g., \code{stats::vcov(model)}) }} \item{by}{Aggregate unit-level estimates (aka, marginalize, average over). Valid inputs: \itemize{ \item \code{FALSE}: return the original unit-level estimates. \item \code{TRUE}: aggregate estimates for each term. \item Character vector of column names in \code{newdata} or in the data frame produced by calling the function without the \code{by} argument. \item Data frame with a \code{by} column of group labels, and merging columns shared by \code{newdata} or the data frame produced by calling the same function without the \code{by} argument. \item See examples below. \item For more complex aggregations, you can use the \code{FUN} argument of the \code{hypotheses()} function. See that function's documentation and the Hypothesis Test vignettes on the \code{marginaleffects} website. }} \item{conf_level}{numeric value between 0 and 1. Confidence level to use to build a confidence interval.} \item{transform}{string or function. Transformation applied to unit-level estimates and confidence intervals just before the function returns results. Functions must accept a vector and return a vector of the same length. Support string shortcuts: "exp", "ln"} \item{cross}{\itemize{ \item \code{FALSE}: Contrasts represent the change in adjusted predictions when one predictor changes and all other variables are held constant. \item \code{TRUE}: Contrasts represent the changes in adjusted predictions when all the predictors specified in the \code{variables} argument are manipulated simultaneously (a "cross-contrast"). }} \item{wts}{string or numeric: weights to use when computing average contrasts or slopes. These weights only affect the averaging in \verb{avg_*()} or with the \code{by} argument, and not the unit-level estimates themselves. Internally, estimates and weights are passed to the \code{weighted.mean()} function. \itemize{ \item string: column name of the weights variable in \code{newdata}. When supplying a column name to \code{wts}, it is recommended to supply the original data (including the weights variable) explicitly to \code{newdata}. \item numeric: vector of length equal to the number of rows in the original data or in \code{newdata} (if supplied). }} \item{hypothesis}{specify a hypothesis test or custom contrast using a numeric value, vector, or matrix, a string, or a string formula. \itemize{ \item Numeric: \itemize{ \item Single value: the null hypothesis used in the computation of Z and p (before applying \code{transform}). \item Vector: Weights to compute a linear combination of (custom contrast between) estimates. Length equal to the number of rows generated by the same function call, but without the \code{hypothesis} argument. \item Matrix: Each column is a vector of weights, as describe above, used to compute a distinct linear combination of (contrast between) estimates. The column names of the matrix are used as labels in the output. } \item String formula to specify linear or non-linear hypothesis tests. If the \code{term} column uniquely identifies rows, terms can be used in the formula. Otherwise, use \code{b1}, \code{b2}, etc. to identify the position of each parameter. The \verb{b*} wildcard can be used to test hypotheses on all estimates. Examples: \itemize{ \item \code{hp = drat} \item \code{hp + drat = 12} \item \code{b1 + b2 + b3 = 0} \item \verb{b* / b1 = 1} } \item String: \itemize{ \item "pairwise": pairwise differences between estimates in each row. \item "reference": differences between the estimates in each row and the estimate in the first row. \item "sequential": difference between an estimate and the estimate in the next row. \item "revpairwise", "revreference", "revsequential": inverse of the corresponding hypotheses, as described above. } \item See the Examples section below and the vignette: https://marginaleffects.com/vignettes/hypothesis.html }} \item{equivalence}{Numeric vector of length 2: bounds used for the two-one-sided test (TOST) of equivalence, and for the non-inferiority and non-superiority tests. See Details section below.} \item{p_adjust}{Adjust p-values for multiple comparisons: "holm", "hochberg", "hommel", "bonferroni", "BH", "BY", or "fdr". See \link[stats:p.adjust]{stats::p.adjust}} \item{df}{Degrees of freedom used to compute p values and confidence intervals. A single numeric value between 1 and \code{Inf}. When \code{df} is \code{Inf}, the normal distribution is used. When \code{df} is finite, the \code{t} distribution is used. See \link[insight:get_df]{insight::get_df} for a convenient function to extract degrees of freedom. Ex: \code{slopes(model, df = insight::get_df(model))}} \item{eps}{NULL or numeric value which determines the step size to use when calculating numerical derivatives: (f(x+eps)-f(x))/eps. When \code{eps} is \code{NULL}, the step size is 0.0001 multiplied by the difference between the maximum and minimum values of the variable with respect to which we are taking the derivative. Changing \code{eps} may be necessary to avoid numerical problems in certain models.} \item{numderiv}{string or list of strings indicating the method to use to for the numeric differentiation used in to compute delta method standard errors. \itemize{ \item "fdforward": finite difference method with forward differences \item "fdcenter": finite difference method with central differences (default) \item "richardson": Richardson extrapolation method \item Extra arguments can be specified by passing a list to the \code{numDeriv} argument, with the name of the method first and named arguments following, ex: \code{numderiv=list("fdcenter", eps = 1e-5)}. When an unknown argument is used, \code{marginaleffects} prints the list of valid arguments for each method. }} \item{...}{Additional arguments are passed to the \code{predict()} method supplied by the modeling package.These arguments are particularly useful for mixed-effects or bayesian models (see the online vignettes on the \code{marginaleffects} website). Available arguments can vary from model to model, depending on the range of supported arguments by each modeling package. See the "Model-Specific Arguments" section of the \code{?slopes} documentation for a non-exhaustive list of available arguments.} } \value{ A \code{data.frame} with one row per observation (per term/group) and several columns: \itemize{ \item \code{rowid}: row number of the \code{newdata} data frame \item \code{type}: prediction type, as defined by the \code{type} argument \item \code{group}: (optional) value of the grouped outcome (e.g., categorical outcome models) \item \code{term}: the variable whose marginal effect is computed \item \code{dydx}: slope of the outcome with respect to the term, for a given combination of predictor values \item \code{std.error}: standard errors computed by via the delta method. \item \code{p.value}: p value associated to the \code{estimate} column. The null is determined by the \code{hypothesis} argument (0 by default), and p values are computed before applying the \code{transform} argument. \item \code{s.value}: Shannon information transforms of p values. How many consecutive "heads" tosses would provide the same amount of evidence (or "surprise") against the null hypothesis that the coin is fair? The purpose of S is to calibrate the analyst's intuition about the strength of evidence encoded in p against a well-known physical phenomenon. See Greenland (2019) and Cole et al. (2020). \item \code{conf.low}: lower bound of the confidence interval (or equal-tailed interval for bayesian models) \item \code{conf.high}: upper bound of the confidence interval (or equal-tailed interval for bayesian models) } See \code{?print.marginaleffects} for printing options. } \description{ Predict the outcome variable at different regressor values (e.g., college graduates vs. others), and compare those predictions by computing a difference, ratio, or some other function. \code{comparisons()} can return many quantities of interest, such as contrasts, differences, risk ratios, changes in log odds, lift, slopes, elasticities, etc. \itemize{ \item \code{comparisons()}: unit-level (conditional) estimates. \item \code{avg_comparisons()}: average (marginal) estimates. } \code{variables} identifies the focal regressors whose "effect" we are interested in. \code{comparison} determines how predictions with different regressor values are compared (difference, ratio, odds, etc.). The \code{newdata} argument and the \code{datagrid()} function control where statistics are evaluated in the predictor space: "at observed values", "at the mean", "at representative values", etc. See the comparisons vignette and package website for worked examples and case studies: \itemize{ \item \url{https://marginaleffects.com/vignettes/comparisons.html} \item \url{https://marginaleffects.com/} } } \section{Functions}{ \itemize{ \item \code{avg_comparisons()}: Average comparisons }} \section{Standard errors using the delta method}{ Standard errors for all quantities estimated by \code{marginaleffects} can be obtained via the delta method. This requires differentiating a function with respect to the coefficients in the model using a finite difference approach. In some models, the delta method standard errors can be sensitive to various aspects of the numeric differentiation strategy, including the step size. By default, the step size is set to \code{1e-8}, or to \code{1e-4} times the smallest absolute model coefficient, whichever is largest. \code{marginaleffects} can delegate numeric differentiation to the \code{numDeriv} package, which allows more flexibility. To do this, users can pass arguments to the \code{numDeriv::jacobian} function through a global option. For example: \itemize{ \item \code{options(marginaleffects_numDeriv = list(method = "simple", method.args = list(eps = 1e-6)))} \item \code{options(marginaleffects_numDeriv = list(method = "Richardson", method.args = list(eps = 1e-5)))} \item \code{options(marginaleffects_numDeriv = NULL)} } See the "Standard Errors and Confidence Intervals" vignette on the \code{marginaleffects} website for more details on the computation of standard errors: https://marginaleffects.com/vignettes/uncertainty.html Note that the \code{inferences()} function can be used to compute uncertainty estimates using a bootstrap or simulation-based inference. See the vignette: https://marginaleffects.com/vignettes/bootstrap.html } \section{Model-Specific Arguments}{ Some model types allow model-specific arguments to modify the nature of marginal effects, predictions, marginal means, and contrasts. Please report other package-specific \code{predict()} arguments on Github so we can add them to the table below. https://github.com/vincentarelbundock/marginaleffects/issues\tabular{llll}{ Package \tab Class \tab Argument \tab Documentation \cr \code{brms} \tab \code{brmsfit} \tab \code{ndraws} \tab \link[brms:posterior_predict.brmsfit]{brms::posterior_predict} \cr \tab \tab \code{re_formula} \tab \link[brms:posterior_predict.brmsfit]{brms::posterior_predict} \cr \code{lme4} \tab \code{merMod} \tab \code{re.form} \tab \link[lme4:predict.merMod]{lme4::predict.merMod} \cr \tab \tab \code{allow.new.levels} \tab \link[lme4:predict.merMod]{lme4::predict.merMod} \cr \code{glmmTMB} \tab \code{glmmTMB} \tab \code{re.form} \tab \link[glmmTMB:predict.glmmTMB]{glmmTMB::predict.glmmTMB} \cr \tab \tab \code{allow.new.levels} \tab \link[glmmTMB:predict.glmmTMB]{glmmTMB::predict.glmmTMB} \cr \tab \tab \code{zitype} \tab \link[glmmTMB:predict.glmmTMB]{glmmTMB::predict.glmmTMB} \cr \code{mgcv} \tab \code{bam} \tab \code{exclude} \tab \link[mgcv:predict.bam]{mgcv::predict.bam} \cr \code{robustlmm} \tab \code{rlmerMod} \tab \code{re.form} \tab \link[robustlmm:rlmerMod-class]{robustlmm::predict.rlmerMod} \cr \tab \tab \code{allow.new.levels} \tab \link[robustlmm:rlmerMod-class]{robustlmm::predict.rlmerMod} \cr \code{MCMCglmm} \tab \code{MCMCglmm} \tab \code{ndraws} \tab \cr } } \section{comparison argument functions}{ The following transformations can be applied by supplying one of the shortcut strings to the \code{comparison} argument. \code{hi} is a vector of adjusted predictions for the "high" side of the contrast. \code{lo} is a vector of adjusted predictions for the "low" side of the contrast. \code{y} is a vector of adjusted predictions for the original data. \code{x} is the predictor in the original data. \code{eps} is the step size to use to compute derivatives and elasticities.\tabular{ll}{ Shortcut \tab Function \cr difference \tab \(hi, lo) hi - lo \cr differenceavg \tab \(hi, lo) mean(hi - lo) \cr dydx \tab \(hi, lo, eps) (hi - lo)/eps \cr eyex \tab \(hi, lo, eps, y, x) (hi - lo)/eps * (x/y) \cr eydx \tab \(hi, lo, eps, y, x) ((hi - lo)/eps)/y \cr dyex \tab \(hi, lo, eps, x) ((hi - lo)/eps) * x \cr dydxavg \tab \(hi, lo, eps) mean((hi - lo)/eps) \cr eyexavg \tab \(hi, lo, eps, y, x) mean((hi - lo)/eps * (x/y)) \cr eydxavg \tab \(hi, lo, eps, y, x) mean(((hi - lo)/eps)/y) \cr dyexavg \tab \(hi, lo, eps, x) mean(((hi - lo)/eps) * x) \cr ratio \tab \(hi, lo) hi/lo \cr ratioavg \tab \(hi, lo) mean(hi)/mean(lo) \cr lnratio \tab \(hi, lo) log(hi/lo) \cr lnratioavg \tab \(hi, lo) log(mean(hi)/mean(lo)) \cr lnor \tab \(hi, lo) log((hi/(1 - hi))/(lo/(1 - lo))) \cr lnoravg \tab \(hi, lo) log((mean(hi)/(1 - mean(hi)))/(mean(lo)/(1 - mean(lo)))) \cr lift \tab \(hi, lo) (hi - lo)/lo \cr liftavg \tab \(hi, lo) (mean(hi - lo))/mean(lo) \cr expdydx \tab \(hi, lo, eps) ((exp(hi) - exp(lo))/exp(eps))/eps \cr expdydxavg \tab \(hi, lo, eps) mean(((exp(hi) - exp(lo))/exp(eps))/eps) \cr } } \section{Bayesian posterior summaries}{ By default, credible intervals in bayesian models are built as equal-tailed intervals. This can be changed to a highest density interval by setting a global option: \code{options("marginaleffects_posterior_interval" = "eti")} \code{options("marginaleffects_posterior_interval" = "hdi")} By default, the center of the posterior distribution in bayesian models is identified by the median. Users can use a different summary function by setting a global option: \code{options("marginaleffects_posterior_center" = "mean")} \code{options("marginaleffects_posterior_center" = "median")} When estimates are averaged using the \code{by} argument, the \code{tidy()} function, or the \code{summary()} function, the posterior distribution is marginalized twice over. First, we take the average \emph{across} units but \emph{within} each iteration of the MCMC chain, according to what the user requested in \code{by} argument or \code{tidy()/summary()} functions. Then, we identify the center of the resulting posterior using the function supplied to the \code{"marginaleffects_posterior_center"} option (the median by default). } \section{Equivalence, Inferiority, Superiority}{ \eqn{\theta} is an estimate, \eqn{\sigma_\theta} its estimated standard error, and \eqn{[a, b]} are the bounds of the interval supplied to the \code{equivalence} argument. Non-inferiority: \itemize{ \item \eqn{H_0}{H0}: \eqn{\theta \leq a}{\theta <= a} \item \eqn{H_1}{H1}: \eqn{\theta > a} \item \eqn{t=(\theta - a)/\sigma_\theta}{t=(\theta - a)/\sigma_\theta} \item p: Upper-tail probability } Non-superiority: \itemize{ \item \eqn{H_0}{H0}: \eqn{\theta \geq b}{\theta >= b} \item \eqn{H_1}{H1}: \eqn{\theta < b} \item \eqn{t=(\theta - b)/\sigma_\theta}{t=(\theta - b)/\sigma_\theta} \item p: Lower-tail probability } Equivalence: Two One-Sided Tests (TOST) \itemize{ \item p: Maximum of the non-inferiority and non-superiority p values. } Thanks to Russell V. Lenth for the excellent \code{emmeans} package and documentation which inspired this feature. } \section{Prediction types}{ The \code{type} argument determines the scale of the predictions used to compute quantities of interest with functions from the \code{marginaleffects} package. Admissible values for \code{type} depend on the model object. When users specify an incorrect value for \code{type}, \code{marginaleffects} will raise an informative error with a list of valid \code{type} values for the specific model object. The first entry in the list in that error message is the default type. The \code{invlink(link)} is a special type defined by \code{marginaleffects}. It is available for some (but not all) models and functions. With this link type, we first compute predictions on the link scale, then we use the inverse link function to backtransform the predictions to the response scale. This is useful for models with non-linear link functions as it can ensure that confidence intervals stay within desirable bounds, ex: 0 to 1 for a logit model. Note that an average of estimates with \code{type="invlink(link)"} will not always be equivalent to the average of estimates with \code{type="response"}. Some of the most common \code{type} values are: response, link, E, Ep, average, class, conditional, count, cum.prob, cumprob, density, detection, disp, ev, expected, expvalue, fitted, invlink(link), latent, latent_N, linear.predictor, linpred, location, lp, mean, numeric, p, ppd, pr, precision, prediction, prob, probability, probs, quantile, risk, scale, survival, unconditional, utility, variance, xb, zero, zlink, zprob } \examples{ \dontshow{if (interactive() || isTRUE(Sys.getenv("R_DOC_BUILD") == "true")) (if (getRversion() >= "3.4") withAutoprint else force)(\{ # examplesIf} \dontshow{\}) # examplesIf} library(marginaleffects) # Linear model tmp <- mtcars tmp$am <- as.logical(tmp$am) mod <- lm(mpg ~ am + factor(cyl), tmp) avg_comparisons(mod, variables = list(cyl = "reference")) avg_comparisons(mod, variables = list(cyl = "sequential")) avg_comparisons(mod, variables = list(cyl = "pairwise")) # GLM with different scale types mod <- glm(am ~ factor(gear), data = mtcars) avg_comparisons(mod, type = "response") avg_comparisons(mod, type = "link") # Contrasts at the mean comparisons(mod, newdata = "mean") # Contrasts between marginal means comparisons(mod, newdata = "marginalmeans") # Contrasts at user-specified values comparisons(mod, newdata = datagrid(am = 0, gear = tmp$gear)) comparisons(mod, newdata = datagrid(am = unique, gear = max)) m <- lm(mpg ~ hp + drat + factor(cyl) + factor(am), data = mtcars) comparisons(m, variables = "hp", newdata = datagrid(FUN_factor = unique, FUN_numeric = median)) # Numeric contrasts mod <- lm(mpg ~ hp, data = mtcars) avg_comparisons(mod, variables = list(hp = 1)) avg_comparisons(mod, variables = list(hp = 5)) avg_comparisons(mod, variables = list(hp = c(90, 100))) avg_comparisons(mod, variables = list(hp = "iqr")) avg_comparisons(mod, variables = list(hp = "sd")) avg_comparisons(mod, variables = list(hp = "minmax")) # using a function to specify a custom difference in one regressor dat <- mtcars dat$new_hp <- 49 * (dat$hp - min(dat$hp)) / (max(dat$hp) - min(dat$hp)) + 1 modlog <- lm(mpg ~ log(new_hp) + factor(cyl), data = dat) fdiff <- \(x) data.frame(x, x + 10) avg_comparisons(modlog, variables = list(new_hp = fdiff)) # Adjusted Risk Ratio: see the contrasts vignette mod <- glm(vs ~ mpg, data = mtcars, family = binomial) avg_comparisons(mod, comparison = "lnratioavg", transform = exp) # Adjusted Risk Ratio: Manual specification of the `comparison` avg_comparisons( mod, comparison = function(hi, lo) log(mean(hi) / mean(lo)), transform = exp) # cross contrasts mod <- lm(mpg ~ factor(cyl) * factor(gear) + hp, data = mtcars) avg_comparisons(mod, variables = c("cyl", "gear"), cross = TRUE) # variable-specific contrasts avg_comparisons(mod, variables = list(gear = "sequential", hp = 10)) # hypothesis test: is the `hp` marginal effect at the mean equal to the `drat` marginal effect mod <- lm(mpg ~ wt + drat, data = mtcars) comparisons( mod, newdata = "mean", hypothesis = "wt = drat") # same hypothesis test using row indices comparisons( mod, newdata = "mean", hypothesis = "b1 - b2 = 0") # same hypothesis test using numeric vector of weights comparisons( mod, newdata = "mean", hypothesis = c(1, -1)) # two custom contrasts using a matrix of weights lc <- matrix(c( 1, -1, 2, 3), ncol = 2) comparisons( mod, newdata = "mean", hypothesis = lc) # Effect of a 1 group-wise standard deviation change # First we calculate the SD in each group of `cyl` # Second, we use that SD as the treatment size in the `variables` argument library(dplyr) mod <- lm(mpg ~ hp + factor(cyl), mtcars) tmp <- mtcars \%>\% group_by(cyl) \%>\% mutate(hp_sd = sd(hp)) avg_comparisons(mod, variables = list(hp = function(x) data.frame(x, x + tmp$hp_sd)), by = "cyl") # `by` argument mod <- lm(mpg ~ hp * am * vs, data = mtcars) comparisons(mod, by = TRUE) mod <- lm(mpg ~ hp * am * vs, data = mtcars) avg_comparisons(mod, variables = "hp", by = c("vs", "am")) library(nnet) mod <- multinom(factor(gear) ~ mpg + am * vs, data = mtcars, trace = FALSE) by <- data.frame( group = c("3", "4", "5"), by = c("3,4", "3,4", "5")) comparisons(mod, type = "probs", by = by) } \references{ \itemize{ \item Greenland S. 2019. "Valid P-Values Behave Exactly as They Should: Some Misleading Criticisms of P-Values and Their Resolution With S-Values." The American Statistician. 73(S1): 106–114. \item Cole, Stephen R, Jessie K Edwards, and Sander Greenland. 2020. "Surprise!" American Journal of Epidemiology 190 (2): 191–93. https://doi.org/10.1093/aje/kwaa136 } } marginaleffects/man/get_varcov_args.Rd0000644000176200001440000000063614541720224017627 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/get_vcov.R \name{get_varcov_args} \alias{get_varcov_args} \title{Take a \code{summary()} style \code{vcov} argument and convert it to \code{insight::get_varcov()}} \usage{ get_varcov_args(model, vcov) } \description{ Take a \code{summary()} style \code{vcov} argument and convert it to \code{insight::get_varcov()} } \keyword{internal} marginaleffects/man/inferences.Rd0000644000176200001440000001243114560042044016567 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/inferences.R \name{inferences} \alias{inferences} \title{(EXPERIMENTAL) Bootstrap, Conformal, and Simulation-Based Inference} \usage{ inferences( x, method, R = 1000, conf_type = "perc", conformal_test = NULL, conformal_calibration = NULL, conformal_score = "residual_abs", ... ) } \arguments{ \item{x}{Object produced by one of the core \code{marginaleffects} functions.} \item{method}{String \itemize{ \item "delta": delta method standard errors \item "boot" package \item "fwb": fractional weighted bootstrap \item "rsample" package \item "simulation" from a multivariate normal distribution (Krinsky & Robb, 1986) \item "mi" multiple imputation for missing data \item "conformal_split": prediction intervals using split conformal prediction (see Angelopoulos & Bates, 2022) \item "conformal_cv+": prediction intervals using cross-validation+ conformal prediction (see Barber et al., 2020) }} \item{R}{Number of resamples, simulations, or cross-validation folds.} \item{conf_type}{String: type of bootstrap interval to construct. \itemize{ \item \code{boot}: "perc", "norm", "basic", or "bca" \item \code{fwb}: "perc", "norm", "basic", "bc", or "bca" \item \code{rsample}: "perc" or "bca" \item \code{simulation}: argument ignored. }} \item{conformal_test}{Data frame of test data for conformal prediction.} \item{conformal_calibration}{Data frame of calibration data for split conformal prediction (\verb{method="conformal_split}).} \item{conformal_score}{String. Warning: The \code{type} argument in \code{predictions()} must generate predictions which are on the same scale as the outcome variable. Typically, this means that \code{type} must be "response" or "probs". \itemize{ \item "residual_abs" or "residual_sq" for regression tasks (numeric outcome) \item "softmax" for classification tasks (when \code{predictions()} returns a \code{group} columns, such as multinomial or ordinal logit models. }} \item{...}{\itemize{ \item If \code{method="boot"}, additional arguments are passed to \code{boot::boot()}. \item If \code{method="fwb"}, additional arguments are passed to \code{fwb::fwb()}. \item If \code{method="rsample"}, additional arguments are passed to \code{rsample::bootstraps()}. \item Additional arguments are ignored for all other methods. }} } \value{ A \code{marginaleffects} object with simulation or bootstrap resamples and objects attached. } \description{ Warning: This function is experimental. It may be renamed, the user interface may change, or the functionality may migrate to arguments in other \code{marginaleffects} functions. Apply this function to a \code{marginaleffects} object to change the inferential method used to compute uncertainty estimates. } \details{ When \code{method="simulation"}, we conduct simulation-based inference following the method discussed in Krinsky & Robb (1986): \enumerate{ \item Draw \code{R} sets of simulated coefficients from a multivariate normal distribution with mean equal to the original model's estimated coefficients and variance equal to the model's variance-covariance matrix (classical, "HC3", or other). \item Use the \code{R} sets of coefficients to compute \code{R} sets of estimands: predictions, comparisons, slopes, or hypotheses. \item Take quantiles of the resulting distribution of estimands to obtain a confidence interval and the standard deviation of simulated estimates to estimate the standard error. } When \code{method="fwb"}, drawn weights are supplied to the model fitting function's \code{weights} argument; if the model doesn't accept non-integer weights, this method should not be used. If weights were included in the original model fit, they are extracted by \code{\link[=weights]{weights()}} and multiplied by the drawn weights. These weights are supplied to the \code{wts} argument of the estimation function (e.g., \code{comparisons()}). } \section{References}{ Krinsky, I., and A. L. Robb. 1986. “On Approximating the Statistical Properties of Elasticities.” Review of Economics and Statistics 68 (4): 715–9. King, Gary, Michael Tomz, and Jason Wittenberg. "Making the most of statistical analyses: Improving interpretation and presentation." American journal of political science (2000): 347-361 Dowd, Bryan E., William H. Greene, and Edward C. Norton. "Computation of standard errors." Health services research 49.2 (2014): 731-750. Angelopoulos, Anastasios N., and Stephen Bates. 2022. "A Gentle Introduction to Conformal Prediction and Distribution-Free Uncertainty Quantification." arXiv. https://doi.org/10.48550/arXiv.2107.07511. Barber, Rina Foygel, Emmanuel J. Candes, Aaditya Ramdas, and Ryan J. Tibshirani. 2020. “Predictive Inference with the Jackknife+.” arXiv. http://arxiv.org/abs/1905.02928. } \examples{ \dontrun{ library(marginaleffects) library(magrittr) set.seed(1024) mod <- lm(Sepal.Length ~ Sepal.Width * Species, data = iris) # bootstrap avg_predictions(mod, by = "Species") \%>\% inferences(method = "boot") avg_predictions(mod, by = "Species") \%>\% inferences(method = "rsample") # Fractional (bayesian) bootstrap avg_slopes(mod, by = "Species") \%>\% inferences(method = "fwb") \%>\% posterior_draws("rvar") \%>\% data.frame() # Simulation-based inference slopes(mod) \%>\% inferences(method = "simulation") \%>\% head() } } marginaleffects/man/get_vcov.Rd0000644000176200001440000000776114543163156016305 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/get_vcov.R, R/methods_MCMCglmm.R, % R/methods_afex.R, R/methods_aod.R, R/methods_biglm.R, R/methods_brms.R, % R/methods_dbarts.R, R/methods_gamlss.R, R/methods_glmmTMB.R, % R/methods_inferences_simulation.R, R/methods_mhurdle.R, R/methods_mlr3.R, % R/methods_rms.R, R/methods_scam.R, R/methods_tidymodels.R \name{get_vcov} \alias{get_vcov} \alias{get_vcov.default} \alias{get_vcov.MCMCglmm} \alias{get_vcov.afex_aov} \alias{get_vcov.glimML} \alias{get_vcov.biglm} \alias{get_vcov.brmsfit} \alias{get_vcov.bart} \alias{get_vcov.gamlss} \alias{get_vcov.glmmTMB} \alias{get_vcov.inferences_simulation} \alias{get_vcov.mhurdle} \alias{get_vcov.Learner} \alias{get_vcov.orm} \alias{get_vcov.scam} \alias{get_vcov.model_fit} \alias{get_vcov.workflow} \title{Get a named variance-covariance matrix from a model object (internal function)} \usage{ get_vcov(model, ...) \method{get_vcov}{default}(model, vcov = NULL, ...) \method{get_vcov}{MCMCglmm}(model, vcov = NULL, ...) \method{get_vcov}{afex_aov}(model, vcov = NULL, ...) \method{get_vcov}{glimML}(model, vcov = NULL, ...) \method{get_vcov}{biglm}(model, vcov = NULL, ...) \method{get_vcov}{brmsfit}(model, vcov = NULL, ...) \method{get_vcov}{bart}(model, vcov = NULL, ...) \method{get_vcov}{gamlss}(model, ...) \method{get_vcov}{glmmTMB}(model, ...) \method{get_vcov}{inferences_simulation}(model, ...) \method{get_vcov}{mhurdle}(model, vcov = NULL, ...) \method{get_vcov}{Learner}(model, ...) \method{get_vcov}{orm}(model, vcov = NULL, ...) \method{get_vcov}{scam}(model, vcov = NULL, ...) \method{get_vcov}{model_fit}(model, type = NULL, ...) \method{get_vcov}{workflow}(model, type = NULL, ...) } \arguments{ \item{model}{Model object} \item{...}{Additional arguments are passed to the \code{predict()} method supplied by the modeling package.These arguments are particularly useful for mixed-effects or bayesian models (see the online vignettes on the \code{marginaleffects} website). Available arguments can vary from model to model, depending on the range of supported arguments by each modeling package. See the "Model-Specific Arguments" section of the \code{?slopes} documentation for a non-exhaustive list of available arguments.} \item{vcov}{Type of uncertainty estimates to report (e.g., for robust standard errors). Acceptable values: \itemize{ \item FALSE: Do not compute standard errors. This can speed up computation considerably. \item TRUE: Unit-level standard errors using the default \code{vcov(model)} variance-covariance matrix. \item String which indicates the kind of uncertainty estimates to return. \itemize{ \item Heteroskedasticity-consistent: \code{"HC"}, \code{"HC0"}, \code{"HC1"}, \code{"HC2"}, \code{"HC3"}, \code{"HC4"}, \code{"HC4m"}, \code{"HC5"}. See \code{?sandwich::vcovHC} \item Heteroskedasticity and autocorrelation consistent: \code{"HAC"} \item Mixed-Models degrees of freedom: "satterthwaite", "kenward-roger" \item Other: \code{"NeweyWest"}, \code{"KernHAC"}, \code{"OPG"}. See the \code{sandwich} package documentation. } \item One-sided formula which indicates the name of cluster variables (e.g., \code{~unit_id}). This formula is passed to the \code{cluster} argument of the \code{sandwich::vcovCL} function. \item Square covariance matrix \item Function which returns a covariance matrix (e.g., \code{stats::vcov(model)}) }} \item{type}{string indicates the type (scale) of the predictions used to compute contrasts or slopes. This can differ based on the model type, but will typically be a string such as: "response", "link", "probs", or "zero". When an unsupported string is entered, the model-specific list of acceptable values is returned in an error message. When \code{type} is \code{NULL}, the first entry in the error message is used by default.} } \value{ A named square matrix of variance and covariances. The names must match the coefficient names. } \description{ Get a named variance-covariance matrix from a model object (internal function) } \keyword{internal} marginaleffects/man/plot_comparisons.Rd0000644000176200001440000002314314557277362020066 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/plot_comparisons.R \name{plot_comparisons} \alias{plot_comparisons} \title{Plot Conditional or Marginal Comparisons} \usage{ plot_comparisons( model, variables = NULL, condition = NULL, by = NULL, newdata = NULL, type = "response", vcov = NULL, conf_level = 0.95, wts = NULL, comparison = "difference", transform = NULL, rug = FALSE, gray = FALSE, draw = TRUE, ... ) } \arguments{ \item{model}{Model object} \item{variables}{Name of the variable whose contrast we want to plot on the y-axis.} \item{condition}{Conditional slopes \itemize{ \item Character vector (max length 4): Names of the predictors to display. \item Named list (max length 4): List names correspond to predictors. List elements can be: \itemize{ \item Numeric vector \item Function which returns a numeric vector or a set of unique categorical values \item Shortcut strings for common reference values: "minmax", "quartile", "threenum" } \item 1: x-axis. 2: color/shape. 3: facet (wrap if no fourth variable, otherwise cols of grid). 4: facet (rows of grid). \item Numeric variables in positions 2 and 3 are summarized by Tukey's five numbers \code{?stats::fivenum}. }} \item{by}{Aggregate unit-level estimates (aka, marginalize, average over). Valid inputs: \itemize{ \item \code{FALSE}: return the original unit-level estimates. \item \code{TRUE}: aggregate estimates for each term. \item Character vector of column names in \code{newdata} or in the data frame produced by calling the function without the \code{by} argument. \item Data frame with a \code{by} column of group labels, and merging columns shared by \code{newdata} or the data frame produced by calling the same function without the \code{by} argument. \item See examples below. \item For more complex aggregations, you can use the \code{FUN} argument of the \code{hypotheses()} function. See that function's documentation and the Hypothesis Test vignettes on the \code{marginaleffects} website. }} \item{newdata}{When \code{newdata} is \code{NULL}, the grid is determined by the \code{condition} argument. When \code{newdata} is not \code{NULL}, the argument behaves in the same way as in the \code{comparisons()} function.} \item{type}{string indicates the type (scale) of the predictions used to compute contrasts or slopes. This can differ based on the model type, but will typically be a string such as: "response", "link", "probs", or "zero". When an unsupported string is entered, the model-specific list of acceptable values is returned in an error message. When \code{type} is \code{NULL}, the first entry in the error message is used by default.} \item{vcov}{Type of uncertainty estimates to report (e.g., for robust standard errors). Acceptable values: \itemize{ \item FALSE: Do not compute standard errors. This can speed up computation considerably. \item TRUE: Unit-level standard errors using the default \code{vcov(model)} variance-covariance matrix. \item String which indicates the kind of uncertainty estimates to return. \itemize{ \item Heteroskedasticity-consistent: \code{"HC"}, \code{"HC0"}, \code{"HC1"}, \code{"HC2"}, \code{"HC3"}, \code{"HC4"}, \code{"HC4m"}, \code{"HC5"}. See \code{?sandwich::vcovHC} \item Heteroskedasticity and autocorrelation consistent: \code{"HAC"} \item Mixed-Models degrees of freedom: "satterthwaite", "kenward-roger" \item Other: \code{"NeweyWest"}, \code{"KernHAC"}, \code{"OPG"}. See the \code{sandwich} package documentation. } \item One-sided formula which indicates the name of cluster variables (e.g., \code{~unit_id}). This formula is passed to the \code{cluster} argument of the \code{sandwich::vcovCL} function. \item Square covariance matrix \item Function which returns a covariance matrix (e.g., \code{stats::vcov(model)}) }} \item{conf_level}{numeric value between 0 and 1. Confidence level to use to build a confidence interval.} \item{wts}{string or numeric: weights to use when computing average contrasts or slopes. These weights only affect the averaging in \verb{avg_*()} or with the \code{by} argument, and not the unit-level estimates themselves. Internally, estimates and weights are passed to the \code{weighted.mean()} function. \itemize{ \item string: column name of the weights variable in \code{newdata}. When supplying a column name to \code{wts}, it is recommended to supply the original data (including the weights variable) explicitly to \code{newdata}. \item numeric: vector of length equal to the number of rows in the original data or in \code{newdata} (if supplied). }} \item{comparison}{How should pairs of predictions be compared? Difference, ratio, odds ratio, or user-defined functions. \itemize{ \item string: shortcuts to common contrast functions. \itemize{ \item Supported shortcuts strings: difference, differenceavg, differenceavgwts, dydx, eyex, eydx, dyex, dydxavg, eyexavg, eydxavg, dyexavg, dydxavgwts, eyexavgwts, eydxavgwts, dyexavgwts, ratio, ratioavg, ratioavgwts, lnratio, lnratioavg, lnratioavgwts, lnor, lnoravg, lnoravgwts, lift, liftavg, expdydx, expdydxavg, expdydxavgwts \item See the Comparisons section below for definitions of each transformation. } \item function: accept two equal-length numeric vectors of adjusted predictions (\code{hi} and \code{lo}) and returns a vector of contrasts of the same length, or a unique numeric value. \itemize{ \item See the Transformations section below for examples of valid functions. } }} \item{transform}{string or function. Transformation applied to unit-level estimates and confidence intervals just before the function returns results. Functions must accept a vector and return a vector of the same length. Support string shortcuts: "exp", "ln"} \item{rug}{TRUE displays tick marks on the axes to mark the distribution of raw data.} \item{gray}{FALSE grayscale or color plot} \item{draw}{\code{TRUE} returns a \code{ggplot2} plot. \code{FALSE} returns a \code{data.frame} of the underlying data.} \item{...}{Additional arguments are passed to the \code{predict()} method supplied by the modeling package.These arguments are particularly useful for mixed-effects or bayesian models (see the online vignettes on the \code{marginaleffects} website). Available arguments can vary from model to model, depending on the range of supported arguments by each modeling package. See the "Model-Specific Arguments" section of the \code{?slopes} documentation for a non-exhaustive list of available arguments.} } \value{ A \code{ggplot2} object } \description{ Plot comparisons on the y-axis against values of one or more predictors (x-axis, colors/shapes, and facets). The \code{by} argument is used to plot marginal comparisons, that is, comparisons made on the original data, but averaged by subgroups. This is analogous to using the \code{by} argument in the \code{comparisons()} function. The \code{condition} argument is used to plot conditional comparisons, that is, comparisons made on a user-specified grid. This is analogous to using the \code{newdata} argument and \code{datagrid()} function in a \code{comparisons()} call. All variables whose values are not specified explicitly are treated as usual by \code{datagrid()}, that is, they are held at their mean or mode (or rounded mean for integers). This includes grouping variables in mixed-effects models, so analysts who fit such models may want to specify the groups of interest using the \code{condition} argument, or supply model-specific arguments to compute population-level estimates. See details below. See the "Plots" vignette and website for tutorials and information on how to customize plots: \itemize{ \item https://marginaleffects.com/vignettes/plot.html \item https://marginaleffects.com } } \section{Model-Specific Arguments}{ Some model types allow model-specific arguments to modify the nature of marginal effects, predictions, marginal means, and contrasts. Please report other package-specific \code{predict()} arguments on Github so we can add them to the table below. https://github.com/vincentarelbundock/marginaleffects/issues\tabular{llll}{ Package \tab Class \tab Argument \tab Documentation \cr \code{brms} \tab \code{brmsfit} \tab \code{ndraws} \tab \link[brms:posterior_predict.brmsfit]{brms::posterior_predict} \cr \tab \tab \code{re_formula} \tab \link[brms:posterior_predict.brmsfit]{brms::posterior_predict} \cr \code{lme4} \tab \code{merMod} \tab \code{re.form} \tab \link[lme4:predict.merMod]{lme4::predict.merMod} \cr \tab \tab \code{allow.new.levels} \tab \link[lme4:predict.merMod]{lme4::predict.merMod} \cr \code{glmmTMB} \tab \code{glmmTMB} \tab \code{re.form} \tab \link[glmmTMB:predict.glmmTMB]{glmmTMB::predict.glmmTMB} \cr \tab \tab \code{allow.new.levels} \tab \link[glmmTMB:predict.glmmTMB]{glmmTMB::predict.glmmTMB} \cr \tab \tab \code{zitype} \tab \link[glmmTMB:predict.glmmTMB]{glmmTMB::predict.glmmTMB} \cr \code{mgcv} \tab \code{bam} \tab \code{exclude} \tab \link[mgcv:predict.bam]{mgcv::predict.bam} \cr \code{robustlmm} \tab \code{rlmerMod} \tab \code{re.form} \tab \link[robustlmm:rlmerMod-class]{robustlmm::predict.rlmerMod} \cr \tab \tab \code{allow.new.levels} \tab \link[robustlmm:rlmerMod-class]{robustlmm::predict.rlmerMod} \cr \code{MCMCglmm} \tab \code{MCMCglmm} \tab \code{ndraws} \tab \cr } } \examples{ mod <- lm(mpg ~ hp * drat * factor(am), data = mtcars) plot_comparisons(mod, variables = "hp", condition = "drat") plot_comparisons(mod, variables = "hp", condition = c("drat", "am")) plot_comparisons(mod, variables = "hp", condition = list("am", "drat" = 3:5)) plot_comparisons(mod, variables = "am", condition = list("hp", "drat" = range)) plot_comparisons(mod, variables = "am", condition = list("hp", "drat" = "threenum")) } marginaleffects/man/sanitize_model_specific.Rd0000644000176200001440000001362514554070071021333 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/methods_aod.R, R/methods_betareg.R, % R/sanity_model.R, R/methods_brms.R, R/methods_dbarts.R, R/methods_glmmTMB.R, % R/methods_inferences_simulation.R, R/methods_mclogit.R, R/methods_mlogit.R, % R/methods_ordinal.R, R/methods_plm.R, R/methods_quantreg.R, % R/methods_survey.R \name{sanitize_model_specific.glimML} \alias{sanitize_model_specific.glimML} \alias{sanitize_model_specific.betareg} \alias{sanitize_model_specific} \alias{sanitize_model_specific.default} \alias{sanitize_model_specific.brmsfit} \alias{sanitize_model_specific.bart} \alias{sanitize_model_specific.glmmTMB} \alias{sanitize_model_specific.inferences_simulation} \alias{sanitize_model_specific.mblogit} \alias{sanitize_model_specific.mlogit} \alias{sanitize_model_specific.clm} \alias{sanitize_model_specific.plm} \alias{sanitize_model_specific.rqs} \alias{sanitize_model_specific.svyolr} \alias{sanitize_model_specific.svyglm} \title{Method to raise model-specific warnings and errors} \usage{ \method{sanitize_model_specific}{glimML}(model, ...) \method{sanitize_model_specific}{betareg}(model, ...) sanitize_model_specific(model, ...) \method{sanitize_model_specific}{default}( model, vcov = NULL, calling_function = "marginaleffects", ... ) \method{sanitize_model_specific}{brmsfit}(model, ...) \method{sanitize_model_specific}{bart}(model, ...) \method{sanitize_model_specific}{glmmTMB}( model, vcov = NULL, calling_function = "marginaleffects", ... ) \method{sanitize_model_specific}{inferences_simulation}(model, vcov = FALSE, ...) \method{sanitize_model_specific}{mblogit}(model, calling_function = "marginaleffects", ...) \method{sanitize_model_specific}{mlogit}(model, newdata, ...) \method{sanitize_model_specific}{clm}(model, ...) \method{sanitize_model_specific}{plm}(model, ...) \method{sanitize_model_specific}{plm}(model, ...) \method{sanitize_model_specific}{rqs}(model, ...) \method{sanitize_model_specific}{svyolr}(model, wts = NULL, ...) \method{sanitize_model_specific}{svyglm}(model, wts = NULL, ...) } \arguments{ \item{model}{Model object} \item{...}{Additional arguments are passed to the \code{predict()} method supplied by the modeling package.These arguments are particularly useful for mixed-effects or bayesian models (see the online vignettes on the \code{marginaleffects} website). Available arguments can vary from model to model, depending on the range of supported arguments by each modeling package. See the "Model-Specific Arguments" section of the \code{?slopes} documentation for a non-exhaustive list of available arguments.} \item{vcov}{Type of uncertainty estimates to report (e.g., for robust standard errors). Acceptable values: \itemize{ \item FALSE: Do not compute standard errors. This can speed up computation considerably. \item TRUE: Unit-level standard errors using the default \code{vcov(model)} variance-covariance matrix. \item String which indicates the kind of uncertainty estimates to return. \itemize{ \item Heteroskedasticity-consistent: \code{"HC"}, \code{"HC0"}, \code{"HC1"}, \code{"HC2"}, \code{"HC3"}, \code{"HC4"}, \code{"HC4m"}, \code{"HC5"}. See \code{?sandwich::vcovHC} \item Heteroskedasticity and autocorrelation consistent: \code{"HAC"} \item Mixed-Models degrees of freedom: "satterthwaite", "kenward-roger" \item Other: \code{"NeweyWest"}, \code{"KernHAC"}, \code{"OPG"}. See the \code{sandwich} package documentation. } \item One-sided formula which indicates the name of cluster variables (e.g., \code{~unit_id}). This formula is passed to the \code{cluster} argument of the \code{sandwich::vcovCL} function. \item Square covariance matrix \item Function which returns a covariance matrix (e.g., \code{stats::vcov(model)}) }} \item{newdata}{Grid of predictor values at which we evaluate the slopes. \itemize{ \item Warning: Please avoid modifying your dataset between fitting the model and calling a \code{marginaleffects} function. This can sometimes lead to unexpected results. \item \code{NULL} (default): Unit-level slopes for each observed value in the dataset (empirical distribution). The dataset is retrieved using \code{\link[insight:get_data]{insight::get_data()}}, which tries to extract data from the environment. This may produce unexpected results if the original data frame has been altered since fitting the model. \item \code{\link[=datagrid]{datagrid()}} call to specify a custom grid of regressors. For example: \itemize{ \item \code{newdata = datagrid(cyl = c(4, 6))}: \code{cyl} variable equal to 4 and 6 and other regressors fixed at their means or modes. \item See the Examples section and the \code{\link[=datagrid]{datagrid()}} documentation. } \item string: \itemize{ \item "mean": Marginal Effects at the Mean. Slopes when each predictor is held at its mean or mode. \item "median": Marginal Effects at the Median. Slopes when each predictor is held at its median or mode. \item "marginalmeans": Marginal Effects at Marginal Means. See Details section below. \item "tukey": Marginal Effects at Tukey's 5 numbers. \item "grid": Marginal Effects on a grid of representative numbers (Tukey's 5 numbers and unique values of categorical predictors). } }} \item{wts}{string or numeric: weights to use when computing average contrasts or slopes. These weights only affect the averaging in \verb{avg_*()} or with the \code{by} argument, and not the unit-level estimates themselves. Internally, estimates and weights are passed to the \code{weighted.mean()} function. \itemize{ \item string: column name of the weights variable in \code{newdata}. When supplying a column name to \code{wts}, it is recommended to supply the original data (including the weights variable) explicitly to \code{newdata}. \item numeric: vector of length equal to the number of rows in the original data or in \code{newdata} (if supplied). }} } \value{ A warning, an error, or nothing } \description{ Method to raise model-specific warnings and errors } \keyword{internal} marginaleffects/man/get_predict.Rd0000644000176200001440000002011514543163156016746 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/get_predict.R, R/methods_MASS.R, % R/methods_MCMCglmm.R, R/methods_afex.R, R/methods_aod.R, % R/methods_betareg.R, R/methods_bife.R, R/methods_biglm.R, R/methods_nnet.R, % R/methods_brglm2.R, R/methods_brms.R, R/methods_crch.R, R/methods_dbarts.R, % R/methods_fixest.R, R/methods_gamlss.R, R/methods_glmmTMB.R, % R/methods_inferences_simulation.R, R/methods_lme4.R, R/methods_mclogit.R, % R/methods_mhurdle.R, R/methods_mlogit.R, R/methods_mlr3.R, % R/methods_ordinal.R, R/methods_quantreg.R, R/methods_rms.R, % R/methods_robustlmm.R, R/methods_rstanarm.R, R/methods_stats.R, % R/methods_survey.R, R/methods_survival.R, R/methods_tidymodels.R, % R/methods_tobit1.R \name{get_predict} \alias{get_predict} \alias{get_predict.default} \alias{get_predict.polr} \alias{get_predict.glmmPQL} \alias{get_predict.MCMCglmm} \alias{get_predict.afex_aov} \alias{get_predict.glimML} \alias{get_predict.betareg} \alias{get_predict.bife} \alias{get_predict.biglm} \alias{get_predict.multinom} \alias{get_predict.brmultinom} \alias{get_predict.brmsfit} \alias{get_predict.crch} \alias{get_predict.bart} \alias{get_predict.fixest} \alias{get_predict.gamlss} \alias{get_predict.glmmTMB} \alias{get_predict.inferences_simulation} \alias{get_predict.merMod} \alias{get_predict.lmerModLmerTest} \alias{get_predict.lmerMod} \alias{get_predict.mblogit} \alias{get_predict.mhurdle} \alias{get_predict.mlogit} \alias{get_predict.Learner} \alias{get_predict.clm} \alias{get_predict.rq} \alias{get_predict.rms} \alias{get_predict.orm} \alias{get_predict.lrm} \alias{get_predict.ols} \alias{get_predict.rlmerMod} \alias{get_predict.stanreg} \alias{get_predict.lm} \alias{get_predict.glm} \alias{get_predict.svyolr} \alias{get_predict.coxph} \alias{get_predict.model_fit} \alias{get_predict.workflow} \alias{get_predict.tobit1} \title{Get predicted values from a model object (internal function)} \usage{ get_predict(model, newdata, type, ...) \method{get_predict}{default}(model, newdata = insight::get_data(model), type = "response", ...) \method{get_predict}{polr}(model, newdata = insight::get_data(model), type = "probs", ...) \method{get_predict}{glmmPQL}(model, newdata = insight::get_data(model), type = "response", ...) \method{get_predict}{MCMCglmm}(model, newdata, type = "response", ndraws = 1000, ...) \method{get_predict}{afex_aov}(model, newdata = NULL, ...) \method{get_predict}{glimML}(model, newdata = insight::get_data(model), type = "response", ...) \method{get_predict}{betareg}(model, newdata, ...) \method{get_predict}{bife}(model, newdata = insight::get_data(model), type = "response", ...) \method{get_predict}{biglm}(model, newdata = insight::get_data(model), type = "response", ...) \method{get_predict}{multinom}(model, newdata = insight::get_data(model), type = "probs", ...) \method{get_predict}{brmultinom}(model, newdata = insight::get_data(model), type = "probs", ...) \method{get_predict}{brmsfit}(model, newdata = insight::get_data(model), type = "response", ...) \method{get_predict}{crch}(model, newdata = NULL, type = "location", ...) \method{get_predict}{bart}(model, newdata = NULL, ...) \method{get_predict}{fixest}(model, newdata = insight::get_data(model), type = "response", ...) \method{get_predict}{gamlss}(model, newdata = insight::get_data(model), type = "response", ...) \method{get_predict}{glmmTMB}(model, newdata = insight::get_data(model), type = "response", ...) \method{get_predict}{inferences_simulation}(model, newdata, ...) \method{get_predict}{merMod}(model, newdata = insight::get_data(model), type = "response", ...) \method{get_predict}{lmerModLmerTest}(model, newdata = insight::get_data(model), type = "response", ...) \method{get_predict}{lmerMod}(model, newdata = insight::get_data(model), type = "response", ...) \method{get_predict}{mblogit}(model, newdata = insight::get_data(model), type = "response", ...) \method{get_predict}{mhurdle}(model, newdata = insight::get_data(model), type = "response", ...) \method{get_predict}{mlogit}(model, newdata, ...) \method{get_predict}{Learner}(model, newdata, type = NULL, ...) \method{get_predict}{clm}(model, newdata = insight::get_data(model), type = "prob", ...) \method{get_predict}{rq}(model, newdata = insight::get_data(model), type = NULL, ...) \method{get_predict}{rms}(model, newdata = insight::get_data(model), type = NULL, ...) \method{get_predict}{orm}(model, newdata = insight::get_data(model), type = NULL, ...) \method{get_predict}{lrm}(model, newdata = insight::get_data(model), type = NULL, ...) \method{get_predict}{ols}(model, newdata = insight::get_data(model), type = NULL, ...) \method{get_predict}{rlmerMod}(model, newdata = insight::get_data(model), type = "response", ...) \method{get_predict}{stanreg}(model, newdata = insight::get_data(model), type = "response", ...) \method{get_predict}{lm}(model, newdata = insight::get_data(model), type = "response", ...) \method{get_predict}{glm}(model, newdata = insight::get_data(model), type = "response", ...) \method{get_predict}{svyolr}(model, newdata = insight::get_data(model), type = "probs", ...) \method{get_predict}{coxph}(model, newdata = insight::get_data(model), type = "lp", ...) \method{get_predict}{model_fit}(model, newdata, type = NULL, ...) \method{get_predict}{workflow}(model, newdata, type = NULL, ...) \method{get_predict}{tobit1}(model, newdata = insight::get_data(model), type = "response", ...) } \arguments{ \item{model}{Model object} \item{newdata}{Grid of predictor values at which we evaluate the slopes. \itemize{ \item Warning: Please avoid modifying your dataset between fitting the model and calling a \code{marginaleffects} function. This can sometimes lead to unexpected results. \item \code{NULL} (default): Unit-level slopes for each observed value in the dataset (empirical distribution). The dataset is retrieved using \code{\link[insight:get_data]{insight::get_data()}}, which tries to extract data from the environment. This may produce unexpected results if the original data frame has been altered since fitting the model. \item \code{\link[=datagrid]{datagrid()}} call to specify a custom grid of regressors. For example: \itemize{ \item \code{newdata = datagrid(cyl = c(4, 6))}: \code{cyl} variable equal to 4 and 6 and other regressors fixed at their means or modes. \item See the Examples section and the \code{\link[=datagrid]{datagrid()}} documentation. } \item string: \itemize{ \item "mean": Marginal Effects at the Mean. Slopes when each predictor is held at its mean or mode. \item "median": Marginal Effects at the Median. Slopes when each predictor is held at its median or mode. \item "marginalmeans": Marginal Effects at Marginal Means. See Details section below. \item "tukey": Marginal Effects at Tukey's 5 numbers. \item "grid": Marginal Effects on a grid of representative numbers (Tukey's 5 numbers and unique values of categorical predictors). } }} \item{type}{string indicates the type (scale) of the predictions used to compute contrasts or slopes. This can differ based on the model type, but will typically be a string such as: "response", "link", "probs", or "zero". When an unsupported string is entered, the model-specific list of acceptable values is returned in an error message. When \code{type} is \code{NULL}, the first entry in the error message is used by default.} \item{...}{Additional arguments are passed to the \code{predict()} method supplied by the modeling package.These arguments are particularly useful for mixed-effects or bayesian models (see the online vignettes on the \code{marginaleffects} website). Available arguments can vary from model to model, depending on the range of supported arguments by each modeling package. See the "Model-Specific Arguments" section of the \code{?slopes} documentation for a non-exhaustive list of available arguments.} } \value{ A data.frame of predicted values with a number of rows equal to the number of rows in \code{newdata} and columns "rowid" and "estimate". A "group" column is added for multivariate models or models with categorical outcomes. } \description{ Get predicted values from a model object (internal function) } \keyword{internal} marginaleffects/man/slopes.Rd0000644000176200001440000005334614554076657016011 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/slopes.R \name{slopes} \alias{slopes} \alias{avg_slopes} \title{Slopes (aka Partial derivatives, Marginal Effects, or Trends)} \usage{ slopes( model, newdata = NULL, variables = NULL, type = NULL, by = FALSE, vcov = TRUE, conf_level = 0.95, slope = "dydx", wts = NULL, hypothesis = NULL, equivalence = NULL, p_adjust = NULL, df = Inf, eps = NULL, numderiv = "fdforward", ... ) avg_slopes( model, newdata = NULL, variables = NULL, type = NULL, by = TRUE, vcov = TRUE, conf_level = 0.95, slope = "dydx", wts = NULL, hypothesis = NULL, equivalence = NULL, p_adjust = NULL, df = Inf, eps = NULL, numderiv = "fdforward", ... ) } \arguments{ \item{model}{Model object} \item{newdata}{Grid of predictor values at which we evaluate the slopes. \itemize{ \item Warning: Please avoid modifying your dataset between fitting the model and calling a \code{marginaleffects} function. This can sometimes lead to unexpected results. \item \code{NULL} (default): Unit-level slopes for each observed value in the dataset (empirical distribution). The dataset is retrieved using \code{\link[insight:get_data]{insight::get_data()}}, which tries to extract data from the environment. This may produce unexpected results if the original data frame has been altered since fitting the model. \item \code{\link[=datagrid]{datagrid()}} call to specify a custom grid of regressors. For example: \itemize{ \item \code{newdata = datagrid(cyl = c(4, 6))}: \code{cyl} variable equal to 4 and 6 and other regressors fixed at their means or modes. \item See the Examples section and the \code{\link[=datagrid]{datagrid()}} documentation. } \item string: \itemize{ \item "mean": Marginal Effects at the Mean. Slopes when each predictor is held at its mean or mode. \item "median": Marginal Effects at the Median. Slopes when each predictor is held at its median or mode. \item "marginalmeans": Marginal Effects at Marginal Means. See Details section below. \item "tukey": Marginal Effects at Tukey's 5 numbers. \item "grid": Marginal Effects on a grid of representative numbers (Tukey's 5 numbers and unique values of categorical predictors). } }} \item{variables}{Focal variables \itemize{ \item \code{NULL}: compute slopes or comparisons for all the variables in the model object (can be slow). \item Character vector: subset of variables (usually faster). }} \item{type}{string indicates the type (scale) of the predictions used to compute contrasts or slopes. This can differ based on the model type, but will typically be a string such as: "response", "link", "probs", or "zero". When an unsupported string is entered, the model-specific list of acceptable values is returned in an error message. When \code{type} is \code{NULL}, the first entry in the error message is used by default.} \item{by}{Aggregate unit-level estimates (aka, marginalize, average over). Valid inputs: \itemize{ \item \code{FALSE}: return the original unit-level estimates. \item \code{TRUE}: aggregate estimates for each term. \item Character vector of column names in \code{newdata} or in the data frame produced by calling the function without the \code{by} argument. \item Data frame with a \code{by} column of group labels, and merging columns shared by \code{newdata} or the data frame produced by calling the same function without the \code{by} argument. \item See examples below. \item For more complex aggregations, you can use the \code{FUN} argument of the \code{hypotheses()} function. See that function's documentation and the Hypothesis Test vignettes on the \code{marginaleffects} website. }} \item{vcov}{Type of uncertainty estimates to report (e.g., for robust standard errors). Acceptable values: \itemize{ \item FALSE: Do not compute standard errors. This can speed up computation considerably. \item TRUE: Unit-level standard errors using the default \code{vcov(model)} variance-covariance matrix. \item String which indicates the kind of uncertainty estimates to return. \itemize{ \item Heteroskedasticity-consistent: \code{"HC"}, \code{"HC0"}, \code{"HC1"}, \code{"HC2"}, \code{"HC3"}, \code{"HC4"}, \code{"HC4m"}, \code{"HC5"}. See \code{?sandwich::vcovHC} \item Heteroskedasticity and autocorrelation consistent: \code{"HAC"} \item Mixed-Models degrees of freedom: "satterthwaite", "kenward-roger" \item Other: \code{"NeweyWest"}, \code{"KernHAC"}, \code{"OPG"}. See the \code{sandwich} package documentation. } \item One-sided formula which indicates the name of cluster variables (e.g., \code{~unit_id}). This formula is passed to the \code{cluster} argument of the \code{sandwich::vcovCL} function. \item Square covariance matrix \item Function which returns a covariance matrix (e.g., \code{stats::vcov(model)}) }} \item{conf_level}{numeric value between 0 and 1. Confidence level to use to build a confidence interval.} \item{slope}{string indicates the type of slope or (semi-)elasticity to compute: \itemize{ \item "dydx": dY/dX \item "eyex": dY/dX * Y / X \item "eydx": dY/dX * Y \item "dyex": dY/dX / X \item Y is the predicted value of the outcome; X is the observed value of the predictor. }} \item{wts}{string or numeric: weights to use when computing average contrasts or slopes. These weights only affect the averaging in \verb{avg_*()} or with the \code{by} argument, and not the unit-level estimates themselves. Internally, estimates and weights are passed to the \code{weighted.mean()} function. \itemize{ \item string: column name of the weights variable in \code{newdata}. When supplying a column name to \code{wts}, it is recommended to supply the original data (including the weights variable) explicitly to \code{newdata}. \item numeric: vector of length equal to the number of rows in the original data or in \code{newdata} (if supplied). }} \item{hypothesis}{specify a hypothesis test or custom contrast using a numeric value, vector, or matrix, a string, or a string formula. \itemize{ \item Numeric: \itemize{ \item Single value: the null hypothesis used in the computation of Z and p (before applying \code{transform}). \item Vector: Weights to compute a linear combination of (custom contrast between) estimates. Length equal to the number of rows generated by the same function call, but without the \code{hypothesis} argument. \item Matrix: Each column is a vector of weights, as describe above, used to compute a distinct linear combination of (contrast between) estimates. The column names of the matrix are used as labels in the output. } \item String formula to specify linear or non-linear hypothesis tests. If the \code{term} column uniquely identifies rows, terms can be used in the formula. Otherwise, use \code{b1}, \code{b2}, etc. to identify the position of each parameter. The \verb{b*} wildcard can be used to test hypotheses on all estimates. Examples: \itemize{ \item \code{hp = drat} \item \code{hp + drat = 12} \item \code{b1 + b2 + b3 = 0} \item \verb{b* / b1 = 1} } \item String: \itemize{ \item "pairwise": pairwise differences between estimates in each row. \item "reference": differences between the estimates in each row and the estimate in the first row. \item "sequential": difference between an estimate and the estimate in the next row. \item "revpairwise", "revreference", "revsequential": inverse of the corresponding hypotheses, as described above. } \item See the Examples section below and the vignette: https://marginaleffects.com/vignettes/hypothesis.html }} \item{equivalence}{Numeric vector of length 2: bounds used for the two-one-sided test (TOST) of equivalence, and for the non-inferiority and non-superiority tests. See Details section below.} \item{p_adjust}{Adjust p-values for multiple comparisons: "holm", "hochberg", "hommel", "bonferroni", "BH", "BY", or "fdr". See \link[stats:p.adjust]{stats::p.adjust}} \item{df}{Degrees of freedom used to compute p values and confidence intervals. A single numeric value between 1 and \code{Inf}. When \code{df} is \code{Inf}, the normal distribution is used. When \code{df} is finite, the \code{t} distribution is used. See \link[insight:get_df]{insight::get_df} for a convenient function to extract degrees of freedom. Ex: \code{slopes(model, df = insight::get_df(model))}} \item{eps}{NULL or numeric value which determines the step size to use when calculating numerical derivatives: (f(x+eps)-f(x))/eps. When \code{eps} is \code{NULL}, the step size is 0.0001 multiplied by the difference between the maximum and minimum values of the variable with respect to which we are taking the derivative. Changing \code{eps} may be necessary to avoid numerical problems in certain models.} \item{numderiv}{string or list of strings indicating the method to use to for the numeric differentiation used in to compute delta method standard errors. \itemize{ \item "fdforward": finite difference method with forward differences \item "fdcenter": finite difference method with central differences (default) \item "richardson": Richardson extrapolation method \item Extra arguments can be specified by passing a list to the \code{numDeriv} argument, with the name of the method first and named arguments following, ex: \code{numderiv=list("fdcenter", eps = 1e-5)}. When an unknown argument is used, \code{marginaleffects} prints the list of valid arguments for each method. }} \item{...}{Additional arguments are passed to the \code{predict()} method supplied by the modeling package.These arguments are particularly useful for mixed-effects or bayesian models (see the online vignettes on the \code{marginaleffects} website). Available arguments can vary from model to model, depending on the range of supported arguments by each modeling package. See the "Model-Specific Arguments" section of the \code{?slopes} documentation for a non-exhaustive list of available arguments.} } \value{ A \code{data.frame} with one row per observation (per term/group) and several columns: \itemize{ \item \code{rowid}: row number of the \code{newdata} data frame \item \code{type}: prediction type, as defined by the \code{type} argument \item \code{group}: (optional) value of the grouped outcome (e.g., categorical outcome models) \item \code{term}: the variable whose marginal effect is computed \item \code{dydx}: slope of the outcome with respect to the term, for a given combination of predictor values \item \code{std.error}: standard errors computed by via the delta method. \item \code{p.value}: p value associated to the \code{estimate} column. The null is determined by the \code{hypothesis} argument (0 by default), and p values are computed before applying the \code{transform} argument. For models of class \code{feglm}, \code{Gam}, \code{glm} and \code{negbin}, p values are computed on the link scale by default unless the \code{type} argument is specified explicitly. \item \code{s.value}: Shannon information transforms of p values. How many consecutive "heads" tosses would provide the same amount of evidence (or "surprise") against the null hypothesis that the coin is fair? The purpose of S is to calibrate the analyst's intuition about the strength of evidence encoded in p against a well-known physical phenomenon. See Greenland (2019) and Cole et al. (2020). \item \code{conf.low}: lower bound of the confidence interval (or equal-tailed interval for bayesian models) \item \code{conf.high}: upper bound of the confidence interval (or equal-tailed interval for bayesian models) } See \code{?print.marginaleffects} for printing options. } \description{ Partial derivative of the regression equation with respect to a regressor of interest. \itemize{ \item \code{slopes()}: unit-level (conditional) estimates. \item \code{avg_slopes()}: average (marginal) estimates. } The \code{newdata} argument and the \code{datagrid()} function can be used to control where statistics are evaluated in the predictor space: "at observed values", "at the mean", "at representative values", etc. See the slopes vignette and package website for worked examples and case studies: \itemize{ \item \url{https://marginaleffects.com/vignettes/slopes.html} \item \url{https://marginaleffects.com/} } } \details{ A "slope" or "marginal effect" is the partial derivative of the regression equation with respect to a variable in the model. This function uses automatic differentiation to compute slopes for a vast array of models, including non-linear models with transformations (e.g., polynomials). Uncertainty estimates are computed using the delta method. Numerical derivatives for the \code{slopes} function are calculated using a simple epsilon difference approach: \eqn{\partial Y / \partial X = (f(X + \varepsilon/2) - f(X-\varepsilon/2)) / \varepsilon}{dY/dX = (f(X + e/2) - f(X-e/2)) / e}, where f is the \code{predict()} method associated with the model class, and \eqn{\varepsilon}{e} is determined by the \code{eps} argument. } \section{Functions}{ \itemize{ \item \code{avg_slopes()}: Average slopes }} \section{Standard errors using the delta method}{ Standard errors for all quantities estimated by \code{marginaleffects} can be obtained via the delta method. This requires differentiating a function with respect to the coefficients in the model using a finite difference approach. In some models, the delta method standard errors can be sensitive to various aspects of the numeric differentiation strategy, including the step size. By default, the step size is set to \code{1e-8}, or to \code{1e-4} times the smallest absolute model coefficient, whichever is largest. \code{marginaleffects} can delegate numeric differentiation to the \code{numDeriv} package, which allows more flexibility. To do this, users can pass arguments to the \code{numDeriv::jacobian} function through a global option. For example: \itemize{ \item \code{options(marginaleffects_numDeriv = list(method = "simple", method.args = list(eps = 1e-6)))} \item \code{options(marginaleffects_numDeriv = list(method = "Richardson", method.args = list(eps = 1e-5)))} \item \code{options(marginaleffects_numDeriv = NULL)} } See the "Standard Errors and Confidence Intervals" vignette on the \code{marginaleffects} website for more details on the computation of standard errors: https://marginaleffects.com/vignettes/uncertainty.html Note that the \code{inferences()} function can be used to compute uncertainty estimates using a bootstrap or simulation-based inference. See the vignette: https://marginaleffects.com/vignettes/bootstrap.html } \section{Model-Specific Arguments}{ Some model types allow model-specific arguments to modify the nature of marginal effects, predictions, marginal means, and contrasts. Please report other package-specific \code{predict()} arguments on Github so we can add them to the table below. https://github.com/vincentarelbundock/marginaleffects/issues\tabular{llll}{ Package \tab Class \tab Argument \tab Documentation \cr \code{brms} \tab \code{brmsfit} \tab \code{ndraws} \tab \link[brms:posterior_predict.brmsfit]{brms::posterior_predict} \cr \tab \tab \code{re_formula} \tab \link[brms:posterior_predict.brmsfit]{brms::posterior_predict} \cr \code{lme4} \tab \code{merMod} \tab \code{re.form} \tab \link[lme4:predict.merMod]{lme4::predict.merMod} \cr \tab \tab \code{allow.new.levels} \tab \link[lme4:predict.merMod]{lme4::predict.merMod} \cr \code{glmmTMB} \tab \code{glmmTMB} \tab \code{re.form} \tab \link[glmmTMB:predict.glmmTMB]{glmmTMB::predict.glmmTMB} \cr \tab \tab \code{allow.new.levels} \tab \link[glmmTMB:predict.glmmTMB]{glmmTMB::predict.glmmTMB} \cr \tab \tab \code{zitype} \tab \link[glmmTMB:predict.glmmTMB]{glmmTMB::predict.glmmTMB} \cr \code{mgcv} \tab \code{bam} \tab \code{exclude} \tab \link[mgcv:predict.bam]{mgcv::predict.bam} \cr \code{robustlmm} \tab \code{rlmerMod} \tab \code{re.form} \tab \link[robustlmm:rlmerMod-class]{robustlmm::predict.rlmerMod} \cr \tab \tab \code{allow.new.levels} \tab \link[robustlmm:rlmerMod-class]{robustlmm::predict.rlmerMod} \cr \code{MCMCglmm} \tab \code{MCMCglmm} \tab \code{ndraws} \tab \cr } } \section{Bayesian posterior summaries}{ By default, credible intervals in bayesian models are built as equal-tailed intervals. This can be changed to a highest density interval by setting a global option: \code{options("marginaleffects_posterior_interval" = "eti")} \code{options("marginaleffects_posterior_interval" = "hdi")} By default, the center of the posterior distribution in bayesian models is identified by the median. Users can use a different summary function by setting a global option: \code{options("marginaleffects_posterior_center" = "mean")} \code{options("marginaleffects_posterior_center" = "median")} When estimates are averaged using the \code{by} argument, the \code{tidy()} function, or the \code{summary()} function, the posterior distribution is marginalized twice over. First, we take the average \emph{across} units but \emph{within} each iteration of the MCMC chain, according to what the user requested in \code{by} argument or \code{tidy()/summary()} functions. Then, we identify the center of the resulting posterior using the function supplied to the \code{"marginaleffects_posterior_center"} option (the median by default). } \section{Equivalence, Inferiority, Superiority}{ \eqn{\theta} is an estimate, \eqn{\sigma_\theta} its estimated standard error, and \eqn{[a, b]} are the bounds of the interval supplied to the \code{equivalence} argument. Non-inferiority: \itemize{ \item \eqn{H_0}{H0}: \eqn{\theta \leq a}{\theta <= a} \item \eqn{H_1}{H1}: \eqn{\theta > a} \item \eqn{t=(\theta - a)/\sigma_\theta}{t=(\theta - a)/\sigma_\theta} \item p: Upper-tail probability } Non-superiority: \itemize{ \item \eqn{H_0}{H0}: \eqn{\theta \geq b}{\theta >= b} \item \eqn{H_1}{H1}: \eqn{\theta < b} \item \eqn{t=(\theta - b)/\sigma_\theta}{t=(\theta - b)/\sigma_\theta} \item p: Lower-tail probability } Equivalence: Two One-Sided Tests (TOST) \itemize{ \item p: Maximum of the non-inferiority and non-superiority p values. } Thanks to Russell V. Lenth for the excellent \code{emmeans} package and documentation which inspired this feature. } \section{Prediction types}{ The \code{type} argument determines the scale of the predictions used to compute quantities of interest with functions from the \code{marginaleffects} package. Admissible values for \code{type} depend on the model object. When users specify an incorrect value for \code{type}, \code{marginaleffects} will raise an informative error with a list of valid \code{type} values for the specific model object. The first entry in the list in that error message is the default type. The \code{invlink(link)} is a special type defined by \code{marginaleffects}. It is available for some (but not all) models and functions. With this link type, we first compute predictions on the link scale, then we use the inverse link function to backtransform the predictions to the response scale. This is useful for models with non-linear link functions as it can ensure that confidence intervals stay within desirable bounds, ex: 0 to 1 for a logit model. Note that an average of estimates with \code{type="invlink(link)"} will not always be equivalent to the average of estimates with \code{type="response"}. Some of the most common \code{type} values are: response, link, E, Ep, average, class, conditional, count, cum.prob, cumprob, density, detection, disp, ev, expected, expvalue, fitted, invlink(link), latent, latent_N, linear.predictor, linpred, location, lp, mean, numeric, p, ppd, pr, precision, prediction, prob, probability, probs, quantile, risk, scale, survival, unconditional, utility, variance, xb, zero, zlink, zprob } \examples{ \dontshow{if (interactive() || isTRUE(Sys.getenv("R_DOC_BUILD") == "true")) (if (getRversion() >= "3.4") withAutoprint else force)(\{ # examplesIf} \dontshow{\}) # examplesIf} # Unit-level (conditional) Marginal Effects mod <- glm(am ~ hp * wt, data = mtcars, family = binomial) mfx <- slopes(mod) head(mfx) # Average Marginal Effect (AME) avg_slopes(mod, by = TRUE) # Marginal Effect at the Mean (MEM) slopes(mod, newdata = datagrid()) # Marginal Effect at User-Specified Values # Variables not explicitly included in `datagrid()` are held at their means slopes(mod, newdata = datagrid(hp = c(100, 110))) # Group-Average Marginal Effects (G-AME) # Calculate marginal effects for each observation, and then take the average # marginal effect within each subset of observations with different observed # values for the `cyl` variable: mod2 <- lm(mpg ~ hp * cyl, data = mtcars) avg_slopes(mod2, variables = "hp", by = "cyl") # Marginal Effects at User-Specified Values (counterfactual) # Variables not explicitly included in `datagrid()` are held at their # original values, and the whole dataset is duplicated once for each # combination of the values in `datagrid()` mfx <- slopes(mod, newdata = datagrid(hp = c(100, 110), grid_type = "counterfactual")) head(mfx) # Heteroskedasticity robust standard errors mfx <- slopes(mod, vcov = sandwich::vcovHC(mod)) head(mfx) # hypothesis test: is the `hp` marginal effect at the mean equal to the `drat` marginal effect mod <- lm(mpg ~ wt + drat, data = mtcars) slopes( mod, newdata = "mean", hypothesis = "wt = drat") # same hypothesis test using row indices slopes( mod, newdata = "mean", hypothesis = "b1 - b2 = 0") # same hypothesis test using numeric vector of weights slopes( mod, newdata = "mean", hypothesis = c(1, -1)) # two custom contrasts using a matrix of weights lc <- matrix(c( 1, -1, 2, 3), ncol = 2) colnames(lc) <- c("Contrast A", "Contrast B") slopes( mod, newdata = "mean", hypothesis = lc) } \references{ \itemize{ \item Greenland S. 2019. "Valid P-Values Behave Exactly as They Should: Some Misleading Criticisms of P-Values and Their Resolution With S-Values." The American Statistician. 73(S1): 106–114. \item Cole, Stephen R, Jessie K Edwards, and Sander Greenland. 2020. "Surprise!" American Journal of Epidemiology 190 (2): 191–93. https://doi.org/10.1093/aje/kwaa136 } } marginaleffects/man/get_group_names.Rd0000644000176200001440000000443214543163156017637 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/get_group_names.R, R/methods_MASS.R, % R/methods_nnet.R, R/methods_brglm2.R, R/methods_brms.R, R/methods_mclogit.R, % R/methods_mlm.R, R/methods_ordinal.R, R/methods_pscl.R, R/methods_survey.R \name{get_group_names} \alias{get_group_names} \alias{get_group_names.default} \alias{get_group_names.polr} \alias{get_group_names.multinom} \alias{get_group_names.bracl} \alias{get_group_names.brmsfit} \alias{get_group_names.mblogit} \alias{get_group_names.mlm} \alias{get_group_names.clm} \alias{get_group_names.hurdle} \alias{get_group_names.svyolr} \title{Get levels of the outcome variable in grouped or multivariate models} \usage{ get_group_names(model, ...) \method{get_group_names}{default}(model, ...) \method{get_group_names}{polr}(model, ...) \method{get_group_names}{multinom}(model, ...) \method{get_group_names}{bracl}(model, ...) \method{get_group_names}{brmsfit}(model, ...) \method{get_group_names}{mblogit}(model, type, ...) \method{get_group_names}{mlm}(model, ...) \method{get_group_names}{clm}(model, ...) \method{get_group_names}{hurdle}(model, type = "count", ...) \method{get_group_names}{svyolr}(model, ...) } \arguments{ \item{model}{Model object} \item{...}{Additional arguments are passed to the \code{predict()} method supplied by the modeling package.These arguments are particularly useful for mixed-effects or bayesian models (see the online vignettes on the \code{marginaleffects} website). Available arguments can vary from model to model, depending on the range of supported arguments by each modeling package. See the "Model-Specific Arguments" section of the \code{?slopes} documentation for a non-exhaustive list of available arguments.} \item{type}{string indicates the type (scale) of the predictions used to compute contrasts or slopes. This can differ based on the model type, but will typically be a string such as: "response", "link", "probs", or "zero". When an unsupported string is entered, the model-specific list of acceptable values is returned in an error message. When \code{type} is \code{NULL}, the first entry in the error message is used by default.} } \value{ A character vector } \description{ Get levels of the outcome variable in grouped or multivariate models } \keyword{internal} marginaleffects/man/figures/0000755000176200001440000000000014541720224015624 5ustar liggesusersmarginaleffects/man/figures/zoo_banner.png0000755000176200001440000312243214541720224020500 0ustar liggesusersPNG  IHDR;~F,zTXtRaw profile type exifxڥY丮d57QZ5~MUqH`е )?Rs柷޿=|_~q?o_%5 }g-_ߟO7A+֟7j(>o?ouV?ؿ?oX~כ[/1ěcs;x)?W‚u^w}^_/mg}K-r++goϿ~} \ѿ}ߪ߹gn[<uG׋Kw[bՓ][fha'z>0S'/X Sb}|7_ߧ+Zs]x\, ߟEMgL.X{? 0]BYހ%";Bwy Y٠Υ?};Rz1f>~4?Ry|uH1޴٬MOy+6SLoJ)jjߜr% ^%\J^c}kZk=-VZmgv޹ӝ>;5(6|;̳:Yq+j1.}/}+_޵?/v-ٵ/,_-寷Ir0H`Nj[A?]>{"{;;< vԝڷo]ns][yv.g\Hy}}oyWgŽ^ƷS_z;㎼2gFXJ+PGfo[w.{Ln\aO(M c8f3^ ҸcbvnϺ"6w޹n.*>seyb}޺{Թ5>9ڭJ zb+wO_~`0D p'︞c/i.o|K.;d%~' ~0wzYŪދoY[sD"E7e9:NhG9owau\+oФr9VŴ'?ߔ6pO |m㍹q+x玹޹Fnn5Y ~n8 D0ܯֱ˲|M@L՞ L|-,yQ^j $ Xxޡ%J7ǐzXg hDL,U~>pÎKaג6aΗ{KĂOƢЌxx4 ׃! X=_55U*u5 n~="+x؀*s%|o6^߳@{M97X,? dJ>ycv[z=;v[ :O _`+R*'x'dn ;a[9``M!¢;SC:J_ Y?c|/ ː40$x0D9aec/~qI'ca)/HF`[/_/z6V]īG~JXO`{ Y݆Z#Ƶ1cu$]G6 HXED_hN0^V C 7ȰqW;"\Zu˶`a&~Eؿ[4 b'*O'TUЅ#@" @*!x1?mm7`ąin rؤJȸQ&D< kMn D,3: uRb޸A C0{08d<7;R?v4Wr &pdq"o%h Vڮ _ P02C}ᗟaz0:i_d`юzvwPdCcp`/_@7 k߅ vX9j*˦`G  $B0%2p n7,U"օH. ZݰHwk -U|;2+"A"H%۾?*vΟ-&:sm(o  2x%ȇ "/2tt g:ST`m59& MH*y[%Z&žS(ʈ̝E+!]^\YA" n8tȀx%bv`,/9̀Ђ" Yb U{KFMv[{t" x"1uB*o QC;ȵT*Tx@/dqmX-2J 8RDىoݽA^Y;;]$6C퉜+HXWŷ>pz/ 'Z񳁼x>t ;"T…A(A% Xu끲qX k\,) q$C w+@{iX$*>0Sp揷<@m4=ќ$hpgz > x V XDsDV}D, YJ4]Wpݏ +rPBp5x: 4́9Y:P_+6q8gᾡC1 TAk"J<,jǎ$LOEw2KuEۇ KVlx{7Џ% B'o=*\WDŽ9sO_X$Y z ;XB[: ׫G}&E--d b#͈J7 xthр<f@[G*O"( ^"ǒQWb*-~֑j%o@F`,áƛMbhAX9A27zӖ~ȯHycd𸬮x*I4?Dߖ Aa!0[`iU|-ic$>|tCo^0]~$ن30TlLxA4q!CIl }wJv=`ADy9fI2rJ;଄rXSFo3p&PGg'h '5\'(U A ќ<øN*$ﻣ*S1}W 5o(F_ Y>." !%P,|V  .LA7 >h$2YttXhmbߊ*k]q of}ੈ W)E`A)sϬkAnk'>{Jt%=+as`P'3~\÷ /6%|M˫HVq$bmC}=^8WT]f@*DR}+7H +fLFKjE"Ǡ?non헟Ek>zD-ӠJ䑷z䝈 r1`91,;sӤ<۳;aْ ^3OV \nF$C -,R?0_7N&+koe Y1!f~'x+9`_0N lǞCB_\X O I؇K:J!|W.@7B/n#ر.ƄM5ﻰkPwm }k^51b}B'HZj?=Q*W~X];|aAخ;Aja}BbM҄6a2Ŵߑq ڏ6y0m,},7WCK}{Ѓv 1;s]=|{&70WOPA'"߶GfMLi(aChWFBXrhOnpmJ  rUkȐj1 tܩW".MGSH=jMhtW4'\oP9 x7K@3%c[wk;z.UvPb DH܄޸݃Ï'f z0C唼'/!7ć͆ AI`lO d ‰V_^f WM# 1/ ϔya >H5,u‚&S|yg>fVh%_sl`&v2@ PN~tgچ|_5Ҁh$SSfF#7/8\}FDH3x2& a+ l =3>="'w v Xy}FCnG(k?G"-Z5S %WEstt{vDW=J|ZX@>?*==K T .h0 (C* >իQG5nؒ [=BH^ɾu0x8$a¨p?@ԏq'Rs-Θ2̗IhGp6|@Vi5BءjLqUPqڃ!Rqء ikUFpg7%i)+h\S=~a(ɵl).MX.Im=s7(;AF+_v]Æi7d DX9 KziL?$2\x62pQ(8X0:QUB2n+rpDoWa`.aXQ,K=of>/` ꞁ6{|,3k8H$BH>?tN!MJAf89{Qѣi G ` ).Nge@ދ>LyT<'|&HF ?,㦄#L݈'U3=Ħ|dY!JYeG]Vv)$emFsX&w,:ݎX) 7TF 7E.SaEqJ5dIWS6>_~b&>mDwF", bcx)x,\G6Bc=0_6M_=% %HCމjeq\σGݦ|6tTxLAp" |Y,?4"IT":brM>"=/9:lsZ؀ENaSRz~ k|܃LLWDIjl5$ϛkekA.$DjqX&A/*f0q{!\Bs7R= xH𙷃+b=E.sS,!"<:\zxY=ncż}cDP<9Ccj]:u!EcԠ?/kx1N (%h]!7 ]2?cH[xz fc3"pNBUKEOy~D\6aEz+<3Z:X&cܞgz*bmSw 0:;cv'0' ![\M'P2EpY@k}B6!xA\%*t8I Rn B,BAo{5LD $HGKZ̿E@TO没5dgZ`ZL0& Sy%.,Gۿ& p%|B_BwaOeIXqaxZ`PlJ={cRd%#>B!3^A~RcD)SXqS \=KOczbU;^d,8(5[p ػE YR^FղF x fv- N I/0-,-B[E~]-<21VK`4Fo!#\֙\3V(FEZ(G< wee(ְdTƊ@s_>7fߐ'4qɖ_rg0Rx ްWdp6sQB? zBw 7iX`(eU74)B`F+wBJSHWуFHh ZbzjC(DY(JM+^h@S-'gaL68e FDh{8r3L+I &14񸱨-Y4 &; @ Ӵ9NcO\l f]^2(`}}`I ǀUŶ&"~)*-+fl@53;͂XjР3&]OU)481^8g FGYˉu\* f `s?FƪkLIa0` p 7bX4L6"5ϻ瓌%mЛPP<qXɀwwP $J,q~StgKog~z`T*WaP7;>/3 2a"LƢ}Y #B-9cZfgޮ7796Z6mb- Ԇ)~#lB&OJm׃!"vpH+<w0j-p`jX"GZi)iʹDasԝ+;ޕX*z 2A,["}Yhq>3'4`(`yiAe!q_x|v7k {{} یCl;ە\([qXffo(r9rB)֮۰D, .L{<ߺe*H u3&i + ,=MvWWn/ԻJ7Z>p(l8ļּɣ,žV\u|: qx t_UmFW==о \ `W6%`Z Gİ| Z16P"qbYlK<Q8[tpFs J`K"P|0lv9"5|g@9,i3jv6Q 49wܭVC[V+AA#4|8y^-Ws}SVC/A{9t~]˸Fŷ{0o ZC-H8FU,^_# (Qy~F,yLR tTUtO,\PDӿM.o˧GGG^9@B le6rNًpƾLސ~")Ca*O5@;M\Cn+WHB3 C=?;]]})#rbEEn^4Kv~%jaffykE[ \kyh]yLphajq#5F|::ZBcAc؟j7MсhzvgA4@mVɯk!D֔ȫFqǰ (I`!y &>m|k+ȇ2ɞ dv,ycȸ#.E. ;\޻|M(Gz<-d}X/jḏnT.yƁt$4X)"ByE0`N'6. NG2KKQ!vË<xgK^tĨ,},vd[ :jv x ^o97E8VKbj/v=`j\H3AWB\WFabCJ`-0ŵW\ FvȮAKmތ(cRdeAnfR >[׼زj[RtL+WwRėheEo?"WYH{r*N^{_sY^`/_1ә .};Tn_̎2R qlGF6u"amF/ yXʥ?>g6MOi75 2Xwc.ǞB$~ZŮr8sBVuGB=coۓn&S#O6Y`2` !Wca9c"v9˗zwJ?vޭDAG"m6;g'&жubeHUBYWC:V5!ӳhJjpL0 @׼6t& @ѲP d!SlhyϳRp ٵ j6~t(9/B۠[Z՜ЫFKܧ;"٣ 7fƊzئZ!䛵IXw<;mmLF6y4yr,ܳ6ln"*U>=x5ְ<8GV߮gɠzXek-;=Mnu JkD)\:X$ЇUr]UqYٰ RǶPֱcxѶYd6x%ZbB@ ;3 5xU8Ďɴ,Ҫb 5@gBp:tN'F!`=Xѥq c_dg!_ohW=cqkC񦓥۶x~5f V58M~[h/ 1@ c\πnv=Y]ቯOl ?"<ì{gq1֧;zݽ)p,͌V'DX5z31zK⳽v^'S {P=.l#]xbp \FsM;Z26ݲ sĞ9p2l=X}ct=dɪ"ϯ랞4tq{I1~Z@7Ck_Dg xp ]9GBuHh-יP@{NA~)[>YFh zi|&z҉CMOe">:,wj4GV7xvhZji)n"?~ tG3R6U{2D8 "RMa 1۱zGM> pGMYЊSx Nc7\~&Frlp6L %Um _+v,ߙ4z9čW+ -A;egۖ$$OK fgvz&"6,4# ]C*n(L70w<=+&3%x[FK[& hLT,.崅Gpݳ{;W6[gf^;ZYW5ڡFֳ V'5 R< ¿l\a.GA:eWn6ύ>Ӿd3A$|+PujΞ$=mfkMJL_PPa_t++Ac_'yN530Т'UdG$@g8q EgwVSZo>0`Fs:ۄf3z RpJu-Ꚅ]$#C˼ypEӱ֪AL<)2q9isZ#wD-^xk ;BO+( hXQV [4d2')%ء 2ZOSŝg ;UْB ӊRˁ]3&ñɴYL! ũ!;Ⱦqm[6ES !2T}1wjk=j)L3,־sRҜQp:9֌ x/_5_Slv nc ;=f`cP!N NX$}ߍ<"<3ؿ@؄uQZ"qGt F3b v`Zhu/{hD*  4e+ ;#mk_vcOtW` ;vm(Xms|uzUK9IT&X.!I&K?>r ¸<0- *NlhɲYTy7ē~me6^p15t>K˜X<#o+\ k+]%JG,*iDl,rIҕ'L;6 !C [ x}کVYgN:0v u) (nOnŅ6K;UcӜV>yi&"ڥZXjl'GΣ` {pڱKa7g? C5Z_VZ>T3R_!~b3^%_a77ë vIܚM}S:G/Ֆz⟵!?=DxǺݪ60!t6Bip`@FƎkOĸb{6#"Ly'!h9j`Yf_q/J%9^1mݐ'0Sm=#w-IJ\>lҞIƚZu#0~{`8xYl E6N^=Gq^(Z4|J&s ÂnIv0:yRvm+saCa0_'ZSq^7S2UW>UܖFT )?B9Rz}̙b,ĩfkAH&XvX8`0[/b|* ,m_>niACxB 3F*ے~or9Lѩ0DxJЉ}j:Ąexh{uڻM,.ʢjqF[qm؎ ;!'A8q>žϳa,nEC)견m*93{\nF3=pyF Y'!4DԃC:&;8|R\a}[˞9fUvW8=̙p`J|VpmY@Ӈ}d u-j>A<M$g*js:olKw`h<ςW M3Ɖp:qhqn)gλ<>>[;qCP8͟^ 5Cb'ԞGĮP8pg{UYBc6< qTc# d1) vXDu@\iceIgk[~>+m)SnLw™3Ⱥj >(Z@ =lU;i';e+\㽝1aQ&06"qVogj38+iV(bnFH2bc)ΫV-[Op"'lu 3bs4C0= 1yO>qg6ă >Sؠ\G7ƪt5!79nףĠpj}5{m*pP< BbubOrhWw"L™ ldV9N'eP# ?Rj ѳۉLDʼF${B*G,91=OG,lt#_*"T">n%8A;N}z1+?-?>JRxY=Oh \G/$iVL'1 sY4&ìRCDy@sIA݉NDٟ.sgFL{l =QͺMvOXR<=UۖTلJ&u`Ȝut|_yHqAhgG>w9;N˨xWo8w3a7nmvT}"u>_v|d tK>0:Pߟ7hMѵo[@sMrsiCCPICC profilex}=H@_S*vqP,8J`Zu0 4$).kŪ "%/)=BTkP5Hcb6*^DBObH/f9]gys (y>xEAj88Ɗ=rx,]v iTXtXML:com.adobe.xmp bKGD pHYs  tIME   IDATxڬY$%[h:$Ui^@(e8}\gD ?OD42&"a?LDdFDjDDLLfDjJCLF̘%LŸ뽉m~W9β5~%u]B:?Ѻߟh { 3ͯTi?ќ:ٟj ^d_<~ ET݇=#ո2nKLsvوۈH6W1mٮG'ޏ|mW ,_(ˊ%*=½ȏug=Uc5+? ,QZK~B^{X5:ZLLJ`zx>w 7DNsğ;b\:1sxQ\7pagj}Ɲu~XƬ}~V:TC3}&ig>FuD|[?@}g1:9_>C<4_n}y5{!p\{ݷh qFhK0bq oUu388Wm y `?>u挃[Sy&npEy[XoALL]ު689.7:dp1GTg$pۿsf ឧաj4"֚3_:W:eMsS#2)9̏+1UeF/2$F8K^=0!nD# cʱq:scȊ?5'jt {εgy<~a\u1F$[>wzObM Ek|_bO4'3%H 0"`+T-61V[Fٜ>C搐ΩygQqƅJf-njCmpcH+*8WVLh\B:8,9&dX@>(_/z`XIAaI$bS_f"Ğ7g='8ɠ>K^j JTu+B[a..mEvx4:ITzE^l& ѐcDw%s7~mXگ:4Z\VM%;tsN_Z  "=Y-5iȱ`_FsݷZe[ogSaSz["PZRka:/bܫxy3g2Iш1XoE-^+nsQAě o}hm׏{=/޼ϤxmVMGU3XփU,DҠ-DC0݈hfrI}W,ƹO{ܱ,[|lT4u gcpG~-м}?Ea+[ϐ3ggi_ou̹p9zb#se}|g`ǸkEq=_5\UwfX X3)j> *&ӛmF,dbFHM 6fpʾ-gsHmX >80ElUYiKKjJ̡uf?iy?ǀE 3 \aG~Pzq"zT kjv{S5Y|P38l!W0F9'b691u}khĄWV">j[*V O%u1 vͻN["k52lX$ټDhNfBJWSd{_DʞҦFh*L7vWdA+,2@7Ĕ)LS$2% *05,%d/;G:`苿"\#XoԻQ8D51d.DڧHBSNb>65Lm ~ފ =,/MV:TOMֹ5*)2x}k<*uoLp-ʤ uKh h3ca f"f%HHnAU5dH@1 _yj!_%SיuϛLH3qnFry8`”7f6Tu5ͼyflW|PhUc⥷t}TxꌨF?cY ZVGY*7yog ΔͼHb|MCzc3@ڌQC8 ;{~kTc89'R9cjޛk1|okm<\k/G3§WS+iKW~vmLlX"% !Pj3Na;ZJZXF7#*ILIo]ӫ"-$4qSs&$[rANAP0 1&uNuԳ׋Q\ZL<μ?L3 9)MT 5HuHxм|^BtI"~7q˿\J5} Zs"Nڰ/7,'lhvژhCp()A3Ƃ9eg5;~aܜ|aا"|ȱ:`yjDŽ_7s]&XDj?*J;L!gg̜6cǔ=0j5c|GHX|.H@=WEպ$^+ڸQ4 :pLȢy6g%Uyޤl1}nӮ[[w5 InLڮN鄈$|Hka3/| 6 "0w$JxE3Dz2?3ѰlVHEH5>dgʗAj량~~$'T5`1OS/aq*%9l'm+$( -R9U55}2>C2[׊Z1Wzo()7?k^L`\ 1Ϡ䤣MkM8?b72aQ0o{A Ĵ~CPֹ­D||ij"q{%5bZP0Bpε>5@\oͽ-sޫxlf'n0^9vjS+gh!]Ş{g'?㵞kRkNܿctԜ3: ?TH+X6$Î [HfLJ[AL4cЕRz,B.?{!ܸDg~'nNr~J@a-|zBVU>#Ä'43 !9ѡo~#@FIKF )nROңY7v5nJ&%qp=*jź y[$5&9Id@4w4@(4e&xJdQH>Րg4HX M'i 8)i);j\=aS٩̀jܸd<̊vZ6[QL.mSlq[x6O|l53pfׇj:,;;+a5H6 k#9&P5MZ%(-8K $$lq)5YE ńi>\MGaayAڥwjqU 5ךyN)jHVh(Iʁ0MB.ll;I#q-nL7_\dQ t EwL-!MpWU^: sc# {5iE.OM׸A,"Q kB# 6( o< wɯ{}&Rs7WU ظ 1I~ j$.i59eCEi wˣJB4qgqIц ^0du$Έ5I]0if{ lԘsJF%!MlqOyYsq$E&HYBg(F O,^ɛCFEucA!#ܓGdNMړXVPz~[ӣ^ Fq6Y8$ "ˌkz%fAu"jec$Y%lt(\ Ag595~J̫9YM{Ş=M'HuQzRh֙mlc{k*C3o?\,&uVl~Uߋ d&-/y/dUQXq"Cc 8m΢o 3y ܞW=[̻g!$"ϵjNZ1ՇgP4 yV12vn 헮JBMxl4{^(9'qhS]%yx $&){c$]_oi"+=-l뽕Ƹ 쉛61vȹ+>zaqr5y|gV":y.S֘`P%J 68 p&cAE(̀XvK :OUlX.c9ɒP p!0bsޚ='*CG;=BZ̵%ZÖ]WGӮ](iNyχ\ԛv C3U; nqhfc`I-P!ϸ31}R`!s7guOb /4$93fRw/P1Wq>͂{%?0O ݎykj,(~q1*Ma_aE(֒|Hy; ;+~~{"klGE v] TBB%DbIDqbSn/f v! ;•{\1B:Å;z>GJ;X} H9P t'H$48NFdjMaHt%ۋ@7}}K}k# ܾ\:-(beĀ݌HhG{h] ;N43rj df {6xP+ѹ\Z3뜖RwT%~Y̾D\w1)><5'd1SW#gQF w:K#@ٶ {4&X{C|s=3)`Ȝj SDdI;T5"Z ??r#kS} Sn/j!(_ORjAsI 4%E$rD峈1j1РH( 9'\I'jA?7j.['ysPO: - +9fKZ3c GXHps70dNY)R0 fP"YûำOPuoNǷMe ~AXɌ>5*$ms0Tp.t7H VaHZ2{tVH>Y罦6v Uz(yϏ/A6Wd ͊ŷbL8{Ї"6ak |>ٝ&nx衈ʚ 1@ua|O6k-Hw(g% sXZO6&B\g> ~8mB,PZlA&lu`jIiVI& " J7.޷A+L˕]8900'uZQQNpg#;Z0Upkl0Vފ2"!$ɦdI)soC>a}(6kHjh݋442}D{Қ4uz :3@H8ŽJ490K3pȕ~bHX0 )gY-`scʽxG *r[!g_NXPR M$׹ D$v RɷAEtFy5Zdk  łOby 7p5nEg+VǠF37[(cc 8J 1Jh>=;Z@nG# KGCOz"a, f3 9`}(\M0pjCmEᱼv5,V|]eP Ш5;HDzGV^Tf4b 8FPި͏8' pX7]ו5ϋԭrsӐ_f":'pү`дHAhv(HP7,.}n=k#!/8>H@j~> 4BLO%j 4 IDAT-cv0mA{}"9 @o #џ'= &~x.QxW' Mh).h[4 E[#9;,?wՕ|Qr&_P6j[M @=& ש4A:Kt ! aUqHT@YVP|GC[bIV`dSCPC'q V#uȘ,3v,5T]hh q% ]K5iwfdo9<uPfg7XZf,y+!fWPɑOLV; Im㫆Z$nxt>_ڃN#Ks^;Ҵ5ޏ`%#9 95Nf;֣GjІ`'!1wU|)ִ7dih$^̤z\PBTO xFbq[9u2\op>&5)(5."kNMjၗCxGݛmO8J9J%%F &w/fi $li'zf@}OF2ox.rcD#,^n{<:·҅M(gZ0JMw56AWp%*J$[ʽ-@"6xU.g .0'3̧顽º Tx[)2 kԏMpBA0m4)Dfj"9c!ʒZ^keKK\KDI?\uSv{γ*$GV\IĔK-& %(&%Dv;jX֔ !EUss 1(ś5}mlv3éQM5ߚMt~hIlhdɀ- @dK4/epWL]FGu>!ʳakns WG{ =h&)ᄭQoz%#0+B TOEm9wM7[@z@%2 5BCb)433TEhO$ `PihJe gЖw8YI B(Cȶ0^4#fg&V{k,&ZϢ B`/Lu62,Ҫ )@٤ !^ *.(g\AbLYm‘t@`[C*AKa.Ѩ:w3tikyoNly ;J}h4mB́ 0}c=1a">+SFPt]r=؄*9t-9g1 .bhl3C;{5uDidV:c՞`}ZyMLbR6KyXGmZZhg b -:}=k[rlC4KEaF aumuxv+!O:O W_M[̵ƪRuo Z9ΉZy+Q\Q|UvT QvX왅aʦ( g@L,_O|H7"<7 Ş<-R{]Ӂb/T :C:"2xfP+О7 Kp:?6 մd G*5kQtӐiNaϔ5ʐVRX k@nP,#$eIH=N E ]ӴzdG >ڡ7tw _:6k'¡`-?t1TQ}Z=ۻ^55_01R4 p-Ԛ;^^?qx&|!㪅ĦR I'/xe}J4Μ-K(MAWXC:Z3-!AVMAB%{@32g}{vhLuS"/vLOt'M-)CЍ" @ꃂp kxb#8>LPj+|)'t{62??VuS={جA,lؗ1JօTܢXOIFJa`HuYvkI]lE,^rNxX ^c՘;rE͡{,4KJ\oNX'j5MEZSw EX@G'Z ^$u,{ bFD!.Wm'{ơDv9uH4)yjXs[m -i)TE~}֢ZQM?)/ x!.J77AGĜ}jYTR8ktDdkALH[&".WiK"uxo4`=SHUsuhͰ7/IDB }UЖN%nf}Jms\]]Ϫ!Sȩ\~  x p]BDI6Jkb=-P .f Œ6x{9hT[g AC4,zZ@!l.qXmQ9x Tbh< "kk{ 2ćeO&D{,Qjp(f6tX6rۭjVb{iJƇa ?`S6+ Psjz'˞M]}nM` D'Px*2iHJBRTal85ɺ `> :ɑpPN7%wZM%P*эǾi{s*CP69ߝq-CR)jg7ԽĹ/ IO? /r.$pB]+@ /ҾKlP3޼x dq ˬєY5k!/8IxPjFnvHyMǚ?ӖwJ;#ɲt5P;j]ҐoVOMm##I^>nc5bo5R]b0x̠)P !סdɺpՐ۝Z"V% UזּhOl h"0$M¨ʀS.á'(Jb{`3iޠ(>(C<mMӱ=dט@:hZ"F)i9 \@]g` Z*.uu -bޯ%⧘UlԻ5oFI&-]?QTS#&ᜦ`"h5 S5i װ‰xz B%:ZvV. S߸,v4X@{cZ %"xfANkhIi%Tq&+Jܮl^B_ֽ% Zcl$vL KrBIKuܛC瀗A?h7]h6p]Io8 [:RR%jhքz )ʝ#y>l87Cg`B{#mzXZشK[s:ǵQqV|ea84WUQà#ܼ%ej _5^ =m Ͷ} :=lMęFOvJ5*,'{:PU+mf;_ o;9)jq91yS*Y^ R1ä]ւGe-VJTNDQ vnf?Y1-l'r&4-Օ345 #f,u>_kpyxӟ EE ۞|$!vAzzH#PodOURe >ئ+Uo9|3ج&U@3BxG@Gm~H6!Mh3:@((1,(iMbj޳K@w՛ O ӣ6;񼺨JJ~0v"rM%Unc(2^f(?i;& I$B&v%~s !/$ޡ;]lD%\:tok%`Mc(TوX  j 8j\륹>f0=UG,ՈHkT ؘJ s=W&]qgn%/^$v39q VG=mߞgPQfBBDJ=],ǀݟUPg[6_(J%H*Im=XF |)l2^x(} X n!7\rc|`!K*Zռq8݇bT ;1 *9L 2\ Od<'hFoćNcMfOwQ#"ߥtyG( -_-`86o(la_P1ۋQ XZ-%᭄&)@mܗ5jT()ȯ%UT0ql-Xz!Lq ?թ; (VCK(sG=D'`[q.&,kmڊP..J1- )JJ-Uegl2$ov53K >$>ՈI',<->&!k|@/T,fF} )nNwkp˵)g(G30vC rY" =xt+,ikDUbvȝB^}kjtC?˺[5Xl-RJU]gt؅S+ !96Г/JKT<-6JwhI}Fe@3gl\xVhAS"=D9qc7hFsب$B7ʸ_I-QQ N_c&ǐ| CMڄ.C U ĽVbS*[r5{2v.{-Cs4]#w)OgkS`>u٬w_/eb`ȋ GV J_uh+,ό tEnSS4s6JYK-¢n52&vEZϮ9I,/4Oܯ׵։J;RD%HxڔBqiR޼05,*@Rnjd|jp݂]!=4^ q[ҠJxߓ("RHYgԔȜ{7Ή-އZ) 9J1ij1L]0ڠ6wE,#x\9 f`,PrN}spY=}`7j(_ymr!t53p<ڡ\˳{z[v@Nu 1v->P-^OI p`ɹn'᡾|QkRn|1l>#Iڽ8{UP13.:78ۘ`)aV_!bFtTS7XN5il4#D"EhX?eQ[$Q- :@00j!Jؕ[1'vw:yKuC% ]Po P(X_ƒE(/wC? @_+ERb[9%_\YC! d^C},4Z2iM$=G{,`I %k}(\B(Bdj"!LΞ*!ƛ QFQMAR3@iɬ .~4ǙYl gRò(*TAMz~L_f `~ȯVBLM81~o"aMHo1 ȫqgp51 Yf`s>`sV;Qn"{o|m8 ~IK~>5357W=b2P]} 4 ڔjyΙ?6 o Y $MzL,|o U\B?\S~S1+EIq Ϛ IDATZRU\[=CKLy))Jj4 h38~.cxQÑۙM+ϨL`Ĺ8[pa1 `lc[l| AoB]@4ceİk>\ KpP^6[@qP8P9m-F`Q"+wb 2v Uj8Rm߂' n:3K#+mAOxQx^<(T!Avҏx!Yb F!Yݮ`:om}a c.O~!iϸҭ6"2ǁ:pш `s>AxLde& )ѡM0Jg\iAr, Pk1Ey3! 4뉟m<>!Y K;3S"*RZQ⃁%I#lц) `wNHXmSF\mVbCs p0$feB:]t9 pg} ޥU USɞ2cEBZ"Zڦ'OXfMM<"\y*&6Fc[fcA>Ftρ4^%g-dW`2֠`'e>Ź=IEɓ' ,Unf[W wtD\@񃯴AmC&B>E>^QY7TB+ۚ+C ûahG;a0M}W>;rzZ}SBeYX!J9Aܸx[RkrHe[1?AŎ5x֘H{H(j}@WT]gY2fЮxґ/KYqf11'j1,un f屧:3 [5!uKSRpE.[hR`g}/W/?5 (}ڈ% |: }+%[]n؊j k*Lڅ3wn'-->/>!9A\h]~$P>ꐓ7|딟f߾.?9{9DDGv@v8#nMkf]?)x9!QXnP<ɝvܧ"5^`iq"'uQaȑ,vu]*HcP7ke{/OwON;硱>2ieXDc_1ۜmҰn\/6]p@S_!g?X7D#l kMkL4!~C[:gTU$T\yȭ_o>#'I gTSMP̟:@.{5=(U+y -td6ydHrIj_=NM&\:@?5DL#'A@rb:ւygdoS>h$"J~OtaMJp_POlСd,!56A{J̇洗CڨoPrX"|c=c8?s%| Ժ6WNVLiWg9]:bi91 r+ !yW &QGy)c=s.iQaw  42ra#c\76 V!ݹ'dqo،6\ÊZ:r qPԙS^18)BY Oc-Eei^DipiH\tOI-jNڴ7*BxEN ՙwl9!7A l!n䂷'܊Yӆ;2@$9'-uYC6/dDʾҺJjquG;<:w!\k}ut:up.HR=/i0Цodf,[0$=D>)ou`S)(878mTlQrL}RMg;+<Qjup]1;/9xdC x>9( 7SNVwq7~k($Pp#Fx3~@ Q4jChk!~w*A9!jӾAЃɂ>bkЄ94N}xiZjϨQtC3iNDŽ ӥ Ccδvy !}d`O;ɛ/mO%:%ۆ4U]:ھ59'ah;N]zfvm0.JdOڤ2N! >_RB8] &1Nno~g`.u}O딨XF~+8tyV̻7QY2d@+>0N;vh܋2& ٌ {nJi&Ѱ Hf~oТUlQdp!TiC#L)bjh23v}xd\h/L Va(b8$jONG| h,~CHs(0Crh#"X;ࢆ,5vew{ô6^,&Z;2`eGkl> ބj,gz&A592:XqNB*6Їf6F=l?#/1s7޻^+k6pLфA8kPPfR JkC6lb]tD sƺD=b~&۾EJwxx?N/%f9etR43ؿ#Dx s$ʉ7v(*թN4NȼNޝ`^̈́ ў[̮RSKWs}DiH?3yƞkt@ǧmڐ3|ч33U=#vRD!FLhX?q.K’5ڑ F=trFMp'h/AK}G $nuǭ.gel\ޟ<)4QV_Ú>\Q5(rxbo0R#ϩ=D^XSɛDFfnTBi݁(}ÞiC!k%pF%Vtbɩ֘C';reqjۦ[`'z\~Z< w0u/vht_G5Ӄ.>%jGxj `"CLJFALjC*@^q&T8%<ʵԷbIkRRjIq}^c,V cqO))ӝ/K/ VTj(|s%[VC 'K"C-~f1;<8'WF$hg,a+}_ ׄ_jPKrLwaGN}hf$=' nvfрP?(!,j cWx8͑8=^ROeUsNtC+sk\.u*@&bq.&n eq$RxV^02ZR.4@(ˬ_L? '{#QXZg@@y Wޠ]ޛmͼ5jְΚ&9]{D~o;6å~g,tl[bفZǿyğm]s)f B%3L ^(@a_yn@IGu\WA|۸8mӞ9e -ܮSkyY*P%˦E Nt7bCчy+ˆNq&ab_5A "g3}h'ҌI "ZL}DI' [w4gkqȂ%r^8 }SIuvp0|>/5Y,ۼ#{ [p I 1z9Ę]h*3WpBXp{鋆%aY8DŋiH63}Qޅ/**E.֚@r6*pj69fgh:->I?0HwQ9,2ŹZ?+iKAJkX}&HG4D~yL.WBLJB6 F:oh,uz]yO9;Hiϕ3Nyu3:{qO@>n + -P B].Γp☿%$z4Y:Nw) a˾ ˮ4gΊϻOk͢?Ή]g3NT^o &lq0&_=zXS-j/&"Ng b&zXxKT_)${z=@k,Ct,}om0>p۞K:D<>,OQg7PR9Ia+f=(˫t5N?[QqMeP|KC(yk'&!+qUb{I8gHS`2AyƓOprBfG {UElSp`LB,Xlci8{Ch`3O764+&bN^ߐRf|zejEf#[[ooqdn MJ5'W bߊ>it5oba5tiϘO~F^ſdLQ-ՅSVx&׮=^o0!XOk 1{e)>a--}?l)DmfEQ͘M3!5 @$Uk|sY\ 'ܒOnG폷5 &sh4[Sh݂s_yA~=ntAA5?Qa_ .ӅMC f:kEӠ<1>wgs,pw|l1T|-F`. 9k Z_=/y5sEn/]˪P9ڝHdd4>)4NKL͌~\$-:2(<6a?=9+(Fo??7ZjxAl.VҢt-Id\#?Ow{6XB;!JXVIɿĵmpɒ IDATxpemS"m8sk*JA)&WKŦƕ)%bT?#2;'(p(o-ZřD jULX q6}TeXHS8}i,<k{'p 1K^uz TWTl%vP3 n*dFjj No{|W+؄"kƳv*Șk=ܞ!jpTjU-=jL,$BX:{2PPZlMd>s(cT'QIfe+ rlDR[SZ!T#~ac;rJ 8m9śaw0ԃaUb5-#VI_r%CqF~G|oUP9fMSl`"#juO MHLh'pFP86 In/Bi ( ϫ~ByuTf|8k <,~NDk/zmȼ?z_`~КяC#)CdH!"oŸ?zݟRsK4AՉ}/yn;ňm6U+<~&5B1.zc8XV] Ì5{Ou,3jaܫ#hK!o#.3-8^pOTǚY([@B CyG '~uƥ Ʉ7DXL5~c!0=N+0٬[qc:h너&ޯ("ɛo7K߶$Z#R_s ȦڜKB65˦,GgawLDJl4u$d?ҵntУxuAJ%f"hjsMЬ^B!wV6 ^c]sl3KPP|gh*`)h`DK3 sw@ 2p;:f " TpPk烚 EGt? ~@ډ(խNh6xJOA;gDCAA /A]D5[%C32]S=ySpQ?mva='z04'#5LpIZRc`kEw_&VqHH4/&uRk|;X"^'6!"'\v$L۽zr*ڭT5@$+TY Ꭹ~J~Pb,| Ay&l{)v/ƃ_4BZ{Wf )Jջl,Q.brL_]ቈubRt[KtHYn#5u%ȇV@ͳ[a\I''uG 0 Q'壴9Cyf)o|^_Z;3l[YW9 H&kg5eqf8Cп/}۵|Ǔ?zZO]5*sȦ67Gv^(=L؈3beXϳ3_q6N ޯL4p&$|sө@9Uӷ0\{so%:9AILJoұ=)e XC/d ]H#n5գ7.8C5Rq? 8cWEUO<ьrPBB"d}1gk` ` l}1d arPPΣ&i4UQiU}Hp;9O޵X597Hb-2L+)q5DL̉\)lJ[k !#}(8'?HjG өRRt483k3kjD(F49ͼjw*Y5LC?Fm\X#>Br dPrɣ]3+$<~J%]"YOVLssE>sטlf^BCA"jaw "٘EN8)~;Rd{P"%n?P+ warZ JT"lk.QPP:t L-YDdB'+҇aVs=JFhGmޔ;Z#gw4 %9sd"Jms:6- kEB,OltyPa$?{aUFf#0FtR6grԂ#zp\LdҒ$U )+nsK pnmy4 j9`@*Jk1פkс&ϡ TjKkԭ!& *+O}_q]ޣICD\4 T83P4Q>%ֲdjyV% oG feH8R +#?;+8lB:[Fx'UvW=߫Y퇖3FWh# u< %=`A볏d9ؚv\f!q+.FR4 Zͭ&=i㦍J-N.T́$ų&QC }Ni1Ug4ٌ !s,/! ]  'BlOyѸreDLIGox@F~Xv?"9QGmQ]m#=6"yK9TLÐ+UJ]r*V8n3܊xSĚ|"OBް*cR{\ uf#$ ۀ gE-?k*YJEm,%y`B(޷,8mc5y5{5g.׋pd| $5>h= WcL9wgTDl{3oRZİNԺ6tĄ)5#N<7-sd{J;Ie M@hb\!k~r[O/ms? `{p.ŁFL|_)N}l8 gZ7d@Jm$q5}9LcŮIY'7\NB31Z܄NёyX-Z ul`E2 Zs'#@zݠ7015XL2/D Q^9,>k] ۦt*)DшiDM,:9bb\rTAk#PHR:Bd"2;>CJ&غƸ|䔦F|t0IDAOqkɌT8CQs 84܋ֵ1ia":d9_8 Y&B,7p?g@K4b*&\ܣ{f[ngOO$ٜ7Y`Q3XԵGeKX}J[viØ2dsfoEd U'&l(jD&M$:IOӊ9ZD9[!FCQPhFP˼!<<cUȨPe1}Q!/&S`4 )j`GOlG]TF3֭ayNZ.@#1͈u VQ . %WZo>4kQJ,0LHɌr׬Ȓ #p8D S;Y'JHUs:)N kgn5{?'I^\imrV{){ҟfU|յ4&y|p dc- Z~[R'A$Lh؍๵Ƨbٜ7EQ'?@ڴ~$+pƛɱ^@C̘_HU,@ ma0ynU%P (Jf!Ě~"AmkxESyݥ+C otI&eQjbiEFv`::ذٗ<<1MJ&f Yp8m%_M64)%WJAkj&h dž H\ȁϑn||PAy:=y!~A9NvcHSoFNvJy_ HA!As JR]'lf!e(h3F$srҮ=T̀Z2U9ȶۖ°(-Jr8^lTePP/sMA@``Јɠ<͒vG 0E8C` aqyw>T7EFP>[\iʖSMߔ4Ե{Fklkt!"1h)ВE!JUY (.5ðhO6 c:Dp$q#pLOܓj|dl[1) Rb4(L~%xQxd>\' CqF)hsl@'|ZJ258l ,6lƂEB$~AʁKQn, /-, 98qYǒ\U"J˒Nq1_0c7~م Zu0D FiT-Gb^0gEa˷i"aMd=-o s0?lZԤ+X Sy~9)6q&f&D_JyMz2ء>"AwZSv]MidL[N>R=z$`GnDңt0ݥVy#ifm4 ?+ Þ9Ҁ%,n{@drаaI]SnoWG˩wyL](Ce)k%m:SM!.*"-_ Jy f=0w{NtFXxXsLFH@N]'4$Pk!G& Ca P8(zZFM6DcHci&*Q4:)h4̙m jx~S$]1b2M+ٟ3ZVUaaHd[{@GD됌=yԴkh񌺶#'Zy{<*m$(וg=\Y vRB1}igPB)O/#u"pO+,읮!LZ3*{.k" zsv_ffm@c*%u*!fF¯2:]37H1U~7f%%^&pZR z ?H!EEH.Yr)Z&CAk@E _Y!܁#I$#i<〖 :Pӳ朗z*k`;dbWzp]w<:)BC)&~Ympgd4 6+Ul̐bvxVEC& 54N4ĴS"DVIIТ&T1PVW eY]N!-h'f w~ʦ1a2&e͟i##2q0#%AY~ ѣ|ZF%Y)2CtW 2~݄#ą-cDP]†N# kIS'clQ28qic@ ';JlҰ>*3.ϑZ(0kW GT!`f`*XZ$ hp lluD(R9dx%l!)?czgK5?% IDAT#>Z#0 aNX?{L5g Qf@2fI n+O28*R).UFr3SwJ.1U8@+GI8DQ7jvVm F hcij}/Ms׫`%{{ecw%|NQ'&:N]aKmR!|TìOAy&kU{Z>y\$Aj@;9{8~SeTi(mmD5Eb3r5LZOte&..ޕ@VFwθ٨4:"RF`M&"'& }&]W z<Aq F,qC9zu jnH""EG`6}!1%jQk"7Z>=kAX4 W˶Ӑ7h?1 fU (!ŢpENQ2kZ|3n,鞵!l}Y8 R]Z͗̋u2mv+x; D#>cg&dhUBњl'/EdMpx zA/3G4#}AkW[N J"p鴡JӲ[4p)68XbUqagMFs#p9CF[K@@֨tu%C1GWB蘲A@)BӁ77DGt@6Lkm% B!mkcugftJo1IГz`ij^V7"d% ZY.j`erseqXPXۜ*[A,~J,3nr> ֘Ը`5W+=D, m cj`jJWs"`KS)]Nw Rny-*k0N;v+>or30jAJdz0@p '`)bs&3&7^ %:jD:6/R#H et4D==P[ !k6ߴl֢+Ɔy0OW"9V\EXH3dO=?e3P! swl_6%f Ӷ.. lJ߃4(IS5f}ÒjCQN|~}A]8ע踽lbf%2-piho43s.1f;0gmD;I|| "G7؈( Zm/*:FAqxk6L-:>>t ZEDrZiE*OE GCaAYHTd@F c0jLvo6FedAz"xu_w@qݠKJtNIOD͂~賸e,P][^5ukzVgzWcMb Qֈ2Z:_ڠљiJ涁9?!#V<[iZiK@+)[w$:,Ļ&MNfWА( A^ 0rUY)ul8$nr=uRQlU`m?i>Z:9 PgŠ_Zdv*WT yûbq[s#عm7EЪU`@kc,PWo3oo~7\%إ@\Sajl9z ܲ =n@wGpS ӌ5s;?Ԋk{zt3^.Q}oWP3d,Рb<@CN"a pCchm9]hsۦH5 FW:jSrLT&Q1~GKƌH~ISl5 >'^;U ;dQmN*o0]P{. c4q|mpq5MJ'V4;*/{A Nj٬ o[(@vd͗ 3ݚ :(գD98O1AYfO1S^Ak!]MZU6Aޝ1eZ>QD +=[bvƑ6?]59Ͱ}kmmJ" aaz :+jAo)U~C‰櫘9{ DnjQ){o`Qx+_'6| 8ȵ0VAk &Cmؾs7~pí5jl<$< G\d vv/>-`#Ǟc٧(NLN8 N|W:e{'q ~1y;!}1ԌۗFOWgLU˄`BAO"Vm z0aL"qwmЃ1,][.%2m x=0G5aL^f}ܧN}_q)x}4C~O:bD4<K]:Cї?#bSr :M|FX)I1iFgmgQ<Ƥ!hFcrjxEGGWj8H(NfԊk0̈́?)\ww6_jW^[a;Kt+X XMLضm/>qw=5 gx nw3ԏn/<﹗_Ljht*غ MNvVv{Ko⿰4o3񒷿O9}b ,A [f=i[N/dV9#?Ê|N|i s}~D4q uQL0N ):/gBK#'PQws )ބD P05^cV d چ CRj6M usViBhX=DPu{b@3Ð@[(42j0 ` ?,9mu徘PW45@? \66㉇Z,Q9p%(6=[{ B6n!ek%aoAUi# /G1TNVӺ1fEeNH)՚Uf'nGOvZr7ʰvccOaQMȣ:n26P`؂ A|7x*s"*L@5M[SzU/䄛ikʊFYX?Q%֮)fsUJں>NX7s;Чɘ$?.)n*l>Rl[OxپhSq5 =aHшDy9]!7ϳeßM:9l_YTKfP4dPr=I(7}jkY2d&-h`Ph"X2IKU) !3HsZ\eCwV:,8n-+X6AiRQӍڜYIġ2c0+B$YI!eR*gJF14Z.xOkЃ>@-=%0Ii˼E54J6 jt"O$id>\5XD iRWL|_v#Gqc͌vg q<5n..@u4c3t£ssK|CQpg[o?|X5xhvG1yƹB 75na\O|?hr6_z)^?átx0{ /]_i:X`R}0 M'xۊ~SR'"u.,چ9Zet\G88vFBF1z+P+`Xnۍw+#O;vLgJq1s!=# w]+"#?h˜gr 1P+v;=frV/穐I(HT00(HL zyʔ"Cv٠ }0%iX^ ƘYz:4CC0xD˱2~7CΕNx,e"j|u@jMi:^">!>:`{;~l8κ[#@8!`uߡ S7O` km uCs嵑ƍn2TNdgb-hd2$RґɛV[`(B ^x8(pFr"t< K"$ PPzVyZs '4$f-σrg򚞐Q;M5Ӫ$)2̕ɮ9O`\sLcoIesQ.%JUKOASVYؖqr3'{UʔzA:ZEKg!Ƣa«AIVdEdJzYxdgѩD;u6bBSo[r0JS0}tJ+$XiBdtRqoڌFf,ځ6>>??{=?˰z &1c1~׍X_tIz^ oa~ N;_;&< [㝘pxO;7 W?WcB#X (wV5Xu_ 0 $jf?|泸 WAMUɌv zLLkLi >sapƑ4*#w~O)z݋ X'g$ Yp\:%%uH4c5@~2p=m04 ̙QU" ,:45Rqvj;|/?l LO`34nތqCʀ?ɧhv"ϺҰ)NYq6BNmIF\T6UV+Wûp˗-:-wu}Qx=KƎ%jSK7퍦5UTnnBxVp4!i5@ @H{S)g 5`jF\c^B  c3jV4 CbJT0@j FU04*s.6`ڊ c{+Yh"t5y*YL}Lu bIw`j?E.+@)4zBi6x#; n1U H`Y@e엂YQpܥpd\= D/*(kL8u*d+P$WdNRJV< J1HZe@fLDO|1h>lXN4i4M,I'9K8`kˍxs5W% zGx.ȋ ҐuTG|eIy,wv 44i#0[ǙƊe&+0 bOlKldɥF9L)GEAZ".C{h xT-6 eMŽ ӓFAڨCVmB6Ss(EVטzt?pC*K_m8b:l2 wGfLeҨƒҘ?xl {9/^~_Ytn>OyT> JbD&އ v?] }s,v*NyeXsYXuIX<N`4(^Jdl ͝( c>qNbh2ݩmzXhYWI9/eJ"Fjc@:XYv4Ѥ5l#4䖙ylm74B՞7퀙oޛ~oxt9x|?@g_r1_Ǒ\^,x 2+10Lj/8Wx{`] ͟7q9w4u,MƩpYgr,SqoWa,YC&%<"+SPF /IvΗK1B .1: |u]TX {vtMo=*rhЭ z +Taa{s4 ? ]D"imO D@|bՌ4a Eܭ[SSZBj 0U/9xqPԇMڟ&,j(,`Ǖ;N’ťA6N-)`)LOhT/wuh1b+w0y}k'=jøArj'''̠U\ : P¤w,f"NY^K8yus6w ܱ=RcV@%}Sq IDATrF)V ϨIk>)u7pN5pֵDN>D@U?SKŴ=~L̴nJI0yh[E>HZI tPeBKY7Y=53d. Y2QL4A)Ǡk( O cBoS BTE7=W6M s" 4 @j03w`0ڵA۰ SgPMN` jqpM/^/AU9[ozVW4-̽͘ \J%i4sּPs͡/b+Ū]A4J숇3GOa:\̦xލǀ X2xϼz+3pbt04,h~3`i~݇ffYc }O i#G儎vp!"u>wDl3EǰZ׺֝,rfe`a(I᜙uevU 4S|ƩRP8UqR؋}ꈭi ͭp4J <V`VԄ- c_;T9i,j2ҙHO9߇g8$eFQx>>7n;2LI^/~8/x[qs^5#H4Qkj%VfQI)Fjm`':d {uwwحz=_C@[[\gهp_^AW!9I';7 7 d/Z&vR:RJ8`)eD rW[ #eÙ7=n? t: M6P#rK6 P)gu]Cr{RR"Efty O>j 0.*kpѓ8~TkzbpT1S9!/:>UWc\'C8bb [40<LNd^katЂ%|y±qI!rvX(RK [': rZ1 Mla2d(8L逸SJF!C6ÍMJ p:8+DX A YxǦ Ӳ@ɸM32MyP& amػa(qVl:hqqZc௥`)g ,@arƏ^jOƍeEg9L,TH&`^Z]ܾ} 8 0v-gEKyG|oy˟?{8o'~7lU*֙vb|ZT6cRQȎB+>z%f$)%"wm/Pv?rOl] ߃C^zƉO]}Fcո =h"` h8h0O w3xxn]{v?[9ʊ=nCy 9J輪,<䥔X:#GK\dg")Ti'#`?wd:cڎy[1{GM* `646NX< aR+쮝xx>WsK^l%tq8^t [5ǝS89/cV,!Qxpbmf6`,4s/Y),Ѥw3ʀMfYBUTa7A&>,q; Ҟqdp _=q-7) Pơ'C,čL;ipҌƅ1SzX:ظnGTz쨳Wm]{;SxWvzt:C߃'K#_JwjLLV`PX-8(βl(/[z{Y9]vP6hGkˊ15GӸi|i[o/9n9}\e&ǣފ5A/SW<{,)7!H;g~9X4bMsc91,`UkȊV޸- ġhQ e50[g2c(nqIlEtscZtڷDGE+GQ6XͨQXLQ M[c\FϢՇ;?j11KJh^K9X6-4LRk*$àY62L~ǘvu7Z,m[ο'=% %cљ?ų~qOug< )!zeں=͆iIX>LEwhL 8jg0QO ZCTQd$n?C&Q35*jUDۣ >;(f⢳'pvܸuxQGvX_OqHS!{?܃;q/.6>4ဴxVZ(KsaESA0n v}\W V p~0Xa޷pǽ_2*=8>X5k0fja axmرc7/%Y'2q^Ycc==Y_ozkoN_Ɓ۾ko|q/Cv2 xq>uJߙ@ Fg7 >q&dKw.A'Q6 1; zjc&&Ν "7IeMq+^;/~v# v̝eAMqcE+_mѫ*l*@}=_go \ oO}6f!+i+(eb:jA.a5?vz؇gs}{?Q?l6 WX(T3[E4Հ5l7"zƪͰ)ab.pC9̯B#WOH%B7u rg `x [TWwdk߹Ys- Ͻ<9 )vE 1{pw63sgA`j$Qx~cƯ|Z4r F5%Ɓpd8Y+2>׌ca sƗoÛ?q:xXV{։O} (Cq3Uoηqg=F#L63V7$貮@.,:ugq;u*sXa+q j?o=PSǫ^*_N:7xPi<5ܻ0vJy5̶9⡛auO?6]_ca;vl.x~~g>l{flXoK=VK^r߁{ۆ>OW 7+??ĦsDY\X^ҨY~f]a "zZR/b;MG\ Ks8pdQ,y9yg1?5bBNյ=] Suz{'Wa_w }3jjdSCqݣ֨F\t37M`Oƀ'4:]00 {$n{d/8f 'VZwGZ 3}L貛j4-/䋺͠j}qЫ@͢?ww@?uⓗ+?~۱*li*=c=&kfԷ_;V n/Gs?8}FjzN\#ٰR @*zN0x[`ӛKPv!T;^" -+H!PYdm&J.\_c0XYhċ"U.; F?acAНJ0FͮB[7"Zc4?97`Z_ 00,N\gWaNßl3Ś }9\pd; "Z(QMΝjuXUkwGZ{WZ ݖW,H޿LVvEP9r|5߿q;GxFe+Jwb:Xo n> WP(bW?{ [ndۣc\|Jșzd0I&M'14)n,EG?s߆+\7#/1xj<F58t7h|#gUSED ;vk{|λog[߆oJ1~|SǢX#GǒdEWM]na4kS H)FJ’&ϞIe,O>/*}O+ D"DP@:|A IDATb~J 0~}/a߈)2̮qqds,ŢmźzhTk*GKnQOFOhP[R1QB êAz Ht݂KzKy_|8f570$Qly66.[.~\}l n&ORٻE< P11o>:Wls6r{|oŗ|9z51xnhm_zq=]"Dh(l#< eNQ$˞adNJ._>\};f(!*, sl[wҋ.~x(:'O=q7295Nʥ9IGt }'Xyfh#/}+o{ۯ&ԧq~wx7\8CE$ɥLb!!6n1یIؼ5P`:,`4̱;_ nO\O||;JwKߩxW+/@XK=y;`假詌໠d*]}_y1Ͻ]W3YJ 椲Y35'I+fQ`rnY+8b}@ hq$B<ĒeKAUWHHh-Uh#4a9S9F&XeIhu CXMlxF:$0'::4!~b%H Ozݨ$yGٳ  שn5S)hp|q{D,gmYWqpkiiBXIEh9̎8Oy?~c܍t#-Hq)Kc*#]lsTs٫eM&/Y=&mQY3{׶$Mof ˪9-i6tEk&&AIPJdb(f)(ƤQ1H"[i2IKj$Uz\DA(_ѰbGbTU=;Omf"j|f5=>KvDt|4+r :KڿHL-AeW_20l.DO[sgQO0)Qhc;7MoRp*OTB]$W>|75( L[34'k& 7H1pqY Z [ 3Z]ӆ`^!R пljٺSń`TS]?a`̛9+7(?M:)>u JJyV,0B D )[@П-.C52ƙY{Vj/)L8-}4Dŷ bC^za \F4, ciHIjC`@ Yޑa-"7WZUq2< ;Uйl=!X<9H|Kӻyn "(kf6)~@~v,.29wwj/Ě\Bo85#:x- 1PjJgg%#X3>YcdLA|Ų^K-iˡ=!CEz68v?b|?Ϣ3AlXQ7o\L<+^͏>7qoz#:[ɥɚo&]̄ҷn-g`U,殦шfF 7!2Ƶ"qeH$5ʨeY%:3DJL( qVɳ*S6֚r~SGOm1ȏ=uB!HB%9A''|^ض +$`:7o&x)_~~'-!_@A/Tϡ4PBy!} :]y$gۋb.\cDz|솫[ᡟ0̣06%_'ʱNLshTѿcO<]+@v}GVvV^.,dyYbD z9WTL 2gh*.A`=B P#^G i|%fXyiHE1䟹@O8k=Ȋ~FG) !T>/x1#>{拦 S$5>y)-˺ ˨ h$, ˷ΰyIUnlŝ9V,@/=MO>͗ }1]f <$ؙ9D29;n3\:;`YXǧAN($]E ff܂ӈv֮Z.~5\+Fup+oMVsB$c E5" abfm)x8%5| ls~߃'&z;<_`pe%E1ZcnoBf9_D*qĽG {;sLc\Rfcn3n 5Zzʃ a=],Rsw&P; PYi !Q <5xX,53DJNT[Vok $/Ly~$m?oQvHg-SBl}(b==/}qF'D&imЌU(ƸL[&ڤt$;U:RĈ#V k|z:ԬfHhxp³3p}St&:𳣋1`QTOpF$ݖ1);%dƵ4͢o-}(Oi7]S*i2kנNC U$XkU Lis|9-=B F-6`UL[5m3Y1K_y~D@D^HobHO--l^rah%jгCm&Yh?J~&bX<]89#O>˴ˡ sfv rǃ{ؠ;G30ϕW_d$aqpM'$Zacp쬯b4zcȬRJ)raD/xnJM˻PE0P d95jxhi<%zˊNJȽocru!a,|sb?S)Kaf7Bv °w _O";r@$&}K95W1,]Gɟ|x"c+TP望oG/5,)?yt>sbr.N4"t8ce EV$qI_zsЖm0*P*芖cuF6b݇KX}(N~c9<h|Kf/s#,Ġ,t2Y_\˴),yniSà>+9 *(:NVKA @ϭ-%cV*& ܰ~:0hiPiФs-ËGXMirwb/|6y֋yaZ't^&5՚>J||p0Fx^E\Jc#JVX]@<{ڵ^#Q̉98C;k9gӞ?ivoR$(8jTQ:v"]⚗mīLPynu Nؑ}Q'} N=ݎrݵLdu,:R@фf>c/`B{ ߛ .c2l/С7_["9Dp˰9?җy߾@i=ot6v1W8<׿8<; +Pyf~Oc ,]t1Hz$W& |ϭʼYLi8^ 0 z,JuLNY\c6!:xRQkY MvsvG[gb̑.H$չ2Ӎ.A`LP?U*v3+u %c4}5` Z(w}LQ^qBqevXͩ2t܈B/] u:\V⾣3윩A5ONjvNXiD)gG{[Jƹ[Iw\t GDﻛ,^1EE %_AM{O>K!)wQ=weϰj4f\G{}E m1$.nh%VyǢ \aLi]TYcYVAt E;%mlgd:)OirسEZ{\~ g?3U.uֿn¿\Wg>{(S^;'Ȟ]c#ԏZ^/^Orǚ9XywZ -,].+6_ڋ .rۧ=_Ж}+ki}RwC\#HO˄ sƴivE=1#4ͦOeHW^jJb3W>~Qi fEΙ*8orƘf vm0G 6C\!'CSu5Z=q)dm6 l8޼LpyY|04~W _Bqf2-ṋۢB8+4]d4R!uB}B71!32a ($ٝ"c2ZzuC=V'(e4Alg;Wkw( kx6KILTDŒ4`tzmkhT2iJ*%f˘$FA Ha5y#"E:e#5rj\:JDBRסKJ Dz1<<8.NZ$"c|kRo:BdMp.2V1#IQ&`_YS!Zz.bCJ $J;|X%"QkJ %DFjAZx\ywӧy{dռs zQ|@iRg5_7\dR&vg VR:)l3:#ho;J'Mcd7 DXLm|eAYE\+=լPh)8 >3|HXBC\b&39C{Xff&gB&#~"EDfE<8Qgw2+N,'&6T9vcwի'<(tWsM'v_Byxesn'Hq"~>6zK7,,q}E6d 5}m_Ym~Fa>?e + \.|7xbAvoJv}8,+!Q Z:&aDsOҡ;]'[jr7m^-fbP_=S킁okEfRzZ; df`P)\d.y:$BՊn<|,Kכ@CH[)Y+~Z(HMݐ.1Vi[) iꓼrHKֲlH#do w0KVi~1;m5:K:=lOGo](P]V =*"Pc U(vN:mRS)Ybg9Ap)F&t'#7up05CE0QT\4>cjkD2a^pyJ&B7糔6CIK^S&|6Z$!v1o ̟cy 1^^72aMc-D^ ^`-Rfd5K+$k,3{9;D[r1˧NrqI %uY`Bb1YS1,QSrdyjpxABW7=uVi' _R`&+0Fkg <{ Uԥ+$tdN&5m_QoXs IDATpQ@pZ㙑 }*A'Vg{tf}l|k])Y[dWQX!{ X ٱ MҚ2bU;<(@܈5h p&o XV XM_[ 0<9HaydOmkI  ,_$ɪ!RC9WzjŞe&:JMN0KoR4ͬᡣϮ!pP hDE& L,e 'X`4֬Q왴L=s7!zsN"y[N\ ,orY6 O?sE >a 0:î" oā1k/t΋~"h!*`M}LiՒ? qRcQ簗{o04r97so(N4dx`\K DB*ƭG˼zY+sEB|""(C3 NhR(<~1sZNq<ª=4{!gKĉ[p,E&.h콗Bh蜚&oęM Ş&4tz)2msn%أjaiQpG'"j"Ez &b]G}S1SzF3?>sbƵeiIJW{cbs7I2h[xI$YLZ 9]*a`Ȟ@1u}^ʐX:J9OdrAhr+ C.2b(NMaY5͆.;s3(c[Pxh81=AgfjՄBkF:6 VHS`B@*W4<3MtŸ,G13BWw/)uZTG"9}:s*bA[8M<Q@3و!?kITb WMk:*+䋰6_9wM+m(x$KQ&unJImXZP0R@Fe֥K;ű? SK40Bqߵ"&M}0x>׾}\j-LiXO)63!\bK1.rfE4@FZj"-9aRJ/h4L?AEנ'g鳦jute()uJf\ͷ5VEOr]eqk MC2~EW%"Bs>F;5]i )<_E PkA)R ){=]y|{>@gᄑ5,~ok/Mox#Ḟ|E^ƒRMɕG"HZt|u[QQ$_or˷oNx/c8al ?NB[e mBz*w%q!fM yu5i.rew)"k.gtٵ2jQZ[BR05b%V {/w4sKEZt+ɨhپ)@&5 Lan3>Z$쾴6nqOZ 4cXvRՋk,x2txtSgH֚K:H6oV; 0xF&r :*O%< Yqw.tg$*8JNFuV0ïslWy捬 %1Q1~p}QPBTMT_>C98by!A.%zkc0!<~̑QsZV]"z;G"Q6&P(1uA4F?\A)&qjl:Z2(~6vI{B,hu&YͥMv㯨<m6t&pf Ҳm&Hi\*W#_b_ :-ìVG鴛bi?`UI1À x8]xo~>z-!n #'ǹ$M?cj l^ql È/-΂:vZ}(x4X9@`CGSŃ݌GV[@84b1`BZr>wXAi&_b[)mYKqz6^XHjbMŎ Ϧ_O~ٚ8t>DFfH+y"V"*XP9@ӣ5GC~b_`1kB!\ RJh5l||2IVw2⏏Cx+I-cqgRP)dK.͸;(u`<ͼNg{dqQ^eDz;G8԰2}3¹.V^KLaHyq?/>9Ҭj,sg.>lmq< Ol{oQꤣ] nZIGJ-紀s 1tuZ1LaD`Htو+HqljUYzQbU& nz7ucp#Ǯbh^UOɺI1RFD]l}G{Yn 'v5Ƌ1g 99Kamkd#{ʢnd K5LD_ڎړl;Tx#Vh^'tGceޘ ac1 gRhTD," ?g8&ZB*Eu&Z.@5_G'KqT |rçxW5>D<7P1/{am#(Yiܘ4B`(QE2JG @"AbcIPXƲk7]ws93n~ VB)8~?|OLߚ) xogŁBaǐeE#{qtj0&myDz}Mj|e<6b0Òpq;IQ:{6ײo<_>vs*?g]y]FNhrVB)sfr~5ft>~ACH&"C/>r0MO!z:NfFeہp-}\X[@*NLVEkZ8S%Vf5J9O ۖ@ݶߚئ\kՊ;bt $;6mµiX1V8Shi^EI.žF:t EGxi]TX: +!:9~R6 n=\@ =wUMxLwպ4kJ 9D :IӾ(mw#;t"[|u_,r.ldX˦%+zn-#2MB]MsHsx759j4zId(R:}K9՚HqfPq̬f/-2tFY)[dXt1 3ʥذJ4}[b^GY|$L5<mצY91 ANZV [Wyt{ $N P"5=sT>j0e,V%zfߢ1 $XL~o?<3q'?fz7 I'< ۿu;rKGX )31 &g9^k\pSw͕$2J'=4^dgk!;/՝x*f&98ջ;"ϋ!7l Y: E9&Ulkmo~xJ䓤C[AJt/Xo?DxXWn`垟b"r_o~RB ¨*uk1(Cyz=uDIVg QL\@HAl_t]=7m"s )P~#rr0}f;OMsNr񤋪8Bz|.iS 1ɵ4i-{1V縺p ˏAx|E9f=*DŽG5{k13y?_fQRT(qv3nyLLqZN(j+ $2M"5_Z tu):!0Lu:30B<ì[!a6| aVhFMcLg3t MO lVM mҸ&OlޜIF.`tӐm0UQ9:#<L} /_+h%NJ a[s"(J u~rL&;u*V*tFCj|ce}lz)kng>.C_)P&J[`a2/h+!Y qzs9вn> +hF6vޔ9CS-mE]۬m%,ʅߜ-푙 @uڧFw2D!LT2Zv4-Z?gbKe6)U@D1γ~.ۂKDG5M$zh̯T4i &\rwٹ{;QeKoEpl]zC/b {P~يHnTy͚.֪̊5Ħ&V"8˖X3WKrMn6TQEIK8~ 3jV4fHb,+<4 FLKTOs=Ǵpqs]-%z tb)N`M; $)!غ Z۴h #1VUdY6;d(a$rSg%9QeTda"A n g6⠀51p&}!]hTXZ_wmN:6R$v]WMC IDAT{܌?\WJ8mK+7ny ΪM,;>'~ϒh %GV`U+a}ڑp ,T䔠xf2^U *>qI[J'9nH"*d̥֠ S!s'ؼz3P-T3OD?H*lJm}+#ehm P!EMVgրmײ"Af8k^- !({>#lXhy,M:ζ/AHST1 GkؙiهØ@K:&Qk[s>Cd*m`93]7& mǑW'vN )r£$~ɒ2(: @ Iv͛ȏ׾j2jtOfӓLT 0qzV;4dSdzڨۉzȴKp4)XFKx'GIYȡq2`q1z?oY i]@$ЈxNl@B jV:لi i%մ6 vdp2-Lg;90m H4X+=& ,eזI`@\mb^^7e+8Q4._a6ohݞ`XAVɚ  :;ȑGbjObHQb] ^=Se**a06Vb3JnёAgm@]\iPvEF2yd8s{C!lȥ lDMtwkmؼMn1AGJ>nug^n$%KR8]eFP`_ʹSN'"1BBEPpGH jFn*$sT?/r>ƶދA O4fI}\ӔjޫqyflFVL,j_w)jWڜRTcځhMQyEYh@[ě;hg1gRXC5s% FENB ƫEbMBhaf˫3m<}x^z_6\ N0;M ZM+)rBҩ*,O( &m$ t\fqdDȚ߂vHm>G̞8ɮ?)bMlUXpu+DN ea]`/BP1`1/~NղC* \ eM>M[Tri>GQJ O.5iq !'ٱ2 Z%Ȅ!۪c?uT=VP*L``/ scƠ³Uf>Lۉ57?qW'n;QOӭ+hj:Ĝu+>km+=F֘8shY~$yO DLr9 yU[y>݇ʝ-y[>R&Zw9A*/ $aBsg ,] u0ei)l̦\|K)3K@`84K>CHQl$w 'BR`71{1Ù>*Jz"bv8#Y,/%Ő((jE 7CɁbāLEdmfOD@S_$:93U/ۆadJv3E@Y63FJIJN$C? !P $ݞ冁/l;ck%GNfM!Q:9=db- _3ˑ[qBOMnqrnd`8}$3j;geD.wVy߇8 <{d'Xp;;t-IOnYJUEyH|1I}Y9 ,ZX"뱙*tn.bIE/!'ggIt$p_e{j|PceKHE4e| qyJ"aw~> LFHs1Ƭh"mMYeðKX#+yE`F:*Rlm,[=T4nY甸Wӟ D.fjgM|0sqƳN-YL5j g6Vk46\˰`yA#vgIP6x&4 -[jB8GE"t{;AO9$(rAkne0!Yal`%߿dݝ~dnԑzEEC Ct9Pu<͗g׋X5R~S&BJaTNoi% y Ʃ}FXԄֱm}_mbeF]Ș>/8e?5sM&" 2vPta m2LXcH.Ljر(iq\IR A5bm݌MA9)#IZMʊ\b RW)|)pBHW >K#pt/O&1b((ypJÞIFE1*|YA%$׮ JV:dHi`O7"&7a u,N[7gi\1d:dY70c 2ɷu+lg%!D@517WaeX (~!9JS_|W8$Dt>6oNg&&h FPJeaMc5C?.O#&FYoxۛ@.-af}N k-/)GRE=xhKI3jy3"˼fMz ƝA{Ȅ1BjSرV8HGCo%qY9~C}vO68@gYR4E2 pK۸!+*$2$ X|'KhƤ@HV+tL"+JNA;t),F9L;9~;^u]%SL*$7:l1L^"S{ DLZ"eD&^@ZYAoz,042`R.s2t}BL#eWQqG8ER %CLKH4Ʃ3[OߒrRgv0z] X$ގ-:|7ۏQ 0½$rt_aȹKcdC8"HS/NKH[zȳ,˜7[Ǟ4N⧷EKsx wy=(d|KgbimyiXIy#?$Cg* d2Qe͜u!Td#,Vx E-]ZK֯~ƜZq,d !^_6~> ]٪z,ؓ7.j/\H,y,,x=#Բ+9W:E{҉)2WQ' \@3bc,c@X88U9ⰄE(Ir-QF7 m̉yk/E~o F :E+E`FPAG)++~=8YS+-kh2oihJCV<_;q7^A2O\y/%\҉`Xu3[DaqGGLz m.$%vWV+ܔuH5ͬv9wGw8BZ6\k8gv!:3/|ROQ=XwNN~;E"->x)hcHtwУNc%Ҧ츏Kj1Ibx/,s=|>Kg77vN呥bX$Mc;0&UWFz6=3GgyϾxď1tJ9(';x<>i51@ 3+I]N X/)M=BX(MԌi@HrHY<[S<4i[^}XwV̰v1N.g$y)_ܖ[s=Up`U9蓒n%6`Շ",Btث/¡ pqMЯ P5Qoz>.c\CtBXj-2*h"xF&p72]߂dḄS6}@\?9ˡT5AFjhv\Q{żZcWQ!.Ķ]DJM,F7BQP0R6vT0}i2M4-5-[[HV|Jgqe)g%+7s[?_\3?M%T% S"h6_FK>gRY7 "2CB>awo(Y2%֨Q*c$M u1H m|UK*%ui!x6i(ݨ˹j*P֒=[n6hJL@)WVH*Xw„9߂*_?FO2x3s aIg;?qdsݬ{˻Щ`_PU1ݬÄʼn2i%R"h -g3ZιM:xT eW+ ~-i{XEyd 4jMV[Vyw?F)AG\/D$Hy>r8[П~-ox;}Bzr2~ _ zn>M,IGv91|1zވ e&6F !߷:xD wE%R!ţ" *Hh'&-uqBՀFۄ4bhMd<(e .b=N67oNb6gT#cQT8{0ry%D[Jӳjk@TpA G̑ mLtaY>.[n[Fq \qD2`AEjMj&~i6qm* }DG@[-Nᴷ %lD…J#@F$ɬG`N$B`";Yp'ICgT>`|ˇIsD?1gj+}9F!z o@jV$OXj%RoLtZq o%x*~K߂5 %ٍK_IGmճH!(~Nf.gZuP ,[`h9% !B=<ݞ׹z2N%W*2gheiERXF8%JK xi-T}_(@yv,A%cHwMdܞWhmL[9]ν^ԩ̂MEi7/bk1hkѺilV Qkr4^Sml -h/̢~Ӵ37N=63M?AϢ e#Sφ=UY7JquLtYױk)p1R^n]״Y84oй'o!}'܃l"2ClK^6m&};8v={h[쵐ㅯ> '^;ټrűx,qv@^>ǧ@4Nr&3lilnݵǧ; OkOpR'( 7=~EXcSzm<ӧ -VMhDe$TZkͤڵ`N (][uYH%b N=WUjU'뎉& n6ҁG/⦫.c#鐚vPikm1=?i0}{'>Iu`o 'UH\4†pIU** "h$.Bt2lйjO>.f<șsYC2i%wrfl^\mȗd)R,:BŚ>[DIںBp>?J.\9.c秘;$FFI0 dQfO&D*I+NHMx(*ZtmXHrޟ(}|U[6Z>VTXkC=f3-ix-h&,ϹL/~3c!V:Lq}ǖmx2fmhj 8{{a^3 ?&YsǞû`ŕ7pmcEUz{<•H6o ~jQJa2w9&JR>Se +Kq(Ǐe ̌g'q7~ o|'#"S(1,G`4HaBTV)8@qem%#֙B֫xaAkE@3 F7*=WPi. ֯ZEgO;'O;_|{sp~l6 z1 Kr,jKˆb?'ѡ,IaiDfڳIo& c/R"KV o|;F _&*0zv0UN-ݒ?q|8q/ݿ7齷rudV?sd 5>g@[qv4Ʉr7Up\lCGZNXC b†5~l Jķ\Me,(xiB'hݤHaEؑP 4$HV $h#j4٨M2Ixj!acUX3#Zɦb':dKfPG(T~=_JLW y_I;MkG )뇖'ZU:2c'9Z-I%Ohϲ5=x6UQΤ995M=C8I<yb"xd:pJ2t$V O ^ (dtRN"NΦqxtI ,tuS*I$jo?&<5+73 Tײ$%yp{;=C# H !v 4f5X cWIrɇ~N^"q7ޟa3mۉHT73-VQ@)B[RE6V<Š cX%sd@a<ػ2TTV aCOUMkpcndlw-(g0WEVVIQ 4 ,?AZh:3<3/It!I]4zH$miU*s k'<ltu5݇ prF $wV=W4Rwdwp;qa޾| b(ڵ8$I^@zV?++I{ftU;q)ɃXi)I.gy4ܵICS_Af[#)g(a-[@OՊrO[3)SY"\ЫNk8 l,}㈏Aqַq䩗dCY,gl%gItdFV(8'9ǖCp1©4/n&ufə8T)nuʩr.Dumc\Tx$tk*u cB!|aE8/mMGnK0l1S v*$fsLGhŵ=Y͂'@`/)4{ϰaC(Djs@,٢0 ,~D% B<;L-Ą{1zJz,k.Le O7 I5GQj+d@Ǜd)Z竿D ʼnnb QL4Q1GSjR-+ \_UeV&+ J tpm 3uWXKـl?Z" 5+5"_`?a~Y۟B K`ELSbc1 lE,I* )EsZHpԊ1k]?dKe_@O7ś︃_;iϺH=yg?cν<3܅ !UL{J:ݐ]B;gtM?iSTc?k{=0,{ûx CA2cTynvg~}sa\ygȮ^N')&̢_>.Z9ИO3F%d#yV{xEr'sbA+I?JlVQ٧J֢ ŋƝ#挍gv㣥i͌uu[3 !pq$a4./1_ei7F;,ˤ}d/.3$;8].{F'zjO|['|I Ō[g5pG_wnk;pl!BOTHkcRL$ɼd{'??zc?Ğkc;d+LwF` Ws=;XV>@LKL =/$de|\r^cFݴ rXh̝.脵Ǎ=p]Tq6y88s NL^밾M2Õ j6 H3{YhJAp$#ϱuشA]ZX2_zOUot^ɹӤ+4{vxǖ)Mr ѻCymncltv {Z׀ c>x3<-klݔYRk0'uu%T|Cu-ZõZ85:]ײt\3,"jd 5NĮzP >[Yyx E .9DbLƏh`CEL tD7qfWUZk̙I&I BA@ +x-WDYA("% ғI$^N?{~}ڔ$3d>[.dy7?&͏~5^̚$쾈: tߏMcܰrR~[hhg˶ iÁ-;Xr>JD^͊{_t !&DwEΝt>^W!{watѻc :G(00HoFIɷDx^x @*$y>w~twW׾,o{Cwh<Omo}J 6>$twv0wչX o1nU[t l;]*@$*RJ0y0b|fΝϥ_Y,`ߞٽ9X+, n kB`K6ɪ3yqk;>ݎkYopğVH?]SIYUƈU1Ĩ趾h0j; _ TML1RJkcJ9?׵҅I!N{;;q|4ޛҺUeT,2>%8@)kwwltL8Fq.*G @̋K뉆` <ۗVb'XJ@'\w[xގt {?йԑcRUSM$#*Ϯ̓C"MMsGo/:s0iПN13=ώT aރ;y1kvc4Ju<74bp=_@$25@ XP"NU=t9}{5lbz$ qP71xzj$@57p^7}8UL9:؝)(V,Βx[zgt,C T~dM 5ɭ4e:^DɬAPN)@<-^'X5ԊbP!bC`B{m̥0Z 3dٴҚ'F'(~,*xE&Dgj ]Ԙp LGͺ !-C:7mC%rW{ VƢ+.c+.@D|Y篈A?IHYswzS_YVO,+̜e&Ec?gTb҄Q@qkRdvojEijcJqh?CBh~I3t;m,}U, !{Tߛ~BKNBkb82䁟nkECxV&Tmft0-(ZYPԹ>C[)'IrF+?z. ;9wH Kd(X׳kdBMHRY^]'=#"E5ג2yf[̌Xd -# Q/$9f(xÞIf|L0xːcň_ϊ[g%u9qLP:UVz>Q65e,RݥdpZex& IDATWSe"'X*J7GɅHݓeWJ}1/@2?wc .RA:* wiZ,'h>i #zOH{9v&t K5໒[*QUmǰ~'?v"^}y7KTz91S7I+Wbѻi9#ɏwm!_–F $޽ב:I=rYr2)A|qwiD$ XbFbڂ%\v ӧqd$81(F<> lNpcZ|}!<@ NjsgεEU1EK|!SuAsEScZ;7 2y9;Zy+Z\02 xt}d/We<,Y46H#LGgԈrK-FQڪYW$Tv!#btQ/ 㘄_GCzcg9 `MH"'2ѩ /?~ `ЮhY0\T ϔZB103ܓM9K! <ݝDZ}ԐR>=_h?& NaI|egI{!b %2K}*/ۻ8C>,>ry6L#acl7un~pLqd[%Kyb{; a` #;iibK ZFr!)O"#)`sK=74:Va\a}F-:Xxưc(҃ lM$`NE 5 ٴ6Աag;a|yMwZn-kDB҉po|˛?]wo}__߰U˸5LcrC=ʬ@aP#r\qѝU"x@v}FVi2䃏f(LNƃv- su fL[64MV>$rAQ"NWO,OxԚd`)Y :fx C3m+GHp<6_TJ~TJplx*P>}ۗ*K =p2{}I#:҉i,*Hyz g9ZRZ֯# gNjBPpK ҏ56.6g/e$H k-tp fPԣ bSèlbU|^`6>RQ;*)\U1Jd.:m/7B]z¯ Di%s­paeJݴ+ CL9!m|EUrtsn& Iy(!5&lK M)ǸmϟmQ*Ա$j%4=bPNta:[P.3Zyi!46 Eͼ=u/+/ƍ%Fr1ZQKԤis}4b3ɗw#V\<nf[c`Ck?}i45A8`¹8F`I Cp*C_:b[I+>(k[i{IHC#Gq *ʉzX /e7*q*\XAah\!aLgU[\kӛ*A-S`PU5ǤD"@SΏ.Ƙ" 'ߴn|, bF3U1Zk*X[J8*z1"(hT $2blr" ,26S)J^"h(bNI,hȤ[4V^se|g<^ O=ªEבaIEب¢j&%Õ<h4Wqcac'uϛ)^ S NR78ۏ8A^{&]S ^y&1#Q2cy8 BفUNXƗ? Rbx/JgL;-a6c7ϞEH!J yh3ΎL>%>5̠=lO&3L&8z#Z+i{Ro61 Vj̞kA\ }qo]M`) p!2(GWFŎF*0 MD7F!-#Da0s2=^bA"!4Šʮ NCkY*~րFۢd2Пq6>#Ex_ *.@snQQS1/Ʈ7xc* qJ>F8mF-/)L9x[Kr SCc] [`8ű я>sF$-m@v1k:>A1j 'vY#M_.}|]׸x%w8 Ϡ2\sťԶcz؅dߦwHp]p1(ozmX,A !֫sg#7c!)؟b_}{|pa\Xy .]̲ hl .V%gjrֱ1H{ˇnxT`wл6ЗPm\Yp4ΛtR<~/p̙-+m#xNmj/p~2`A''MaJ2F][GoC/<3؇%d|Dj,ᥘBC -eQ)V6%׳ß/~=gu@W;71|5]RiL@#}6Rj.% 29Iv!FnⅩ1)5>8"o̱DBh,ŽC sฅ"Ud(hf#d>O "[ N@p~b>G Q]!(A 8S JʩE6!*&5`V?Xܰ]0RUnF(Kb*%*E͚T55jMS 8U]f؛rJĦ969RJܕ(Lῠ'F? }{+AF)r"f ql!kj4gq ,_bL!4FY65d[%}~vO;\ zTt#L͒% x+.7M;?&y T hw\yŅXf}3Bf$S@*QDiVw<х3uΕ|ww1v6 V-MTYȩ0iDFjXZ09_庛yOgUE%'.H l"KCI?'x "H<4 9mqd o q\a8+x>ץ˰EC0I{! ?0 \+ZbWYJKecƧL{:ڞ`#zY"JkX>=MNr)hJNkm![Xudp4# XY'Kzt=Gx>v B͜|~CWgerC#MWx|}5L)KNh14LJylk~r;= L X1A*+GeɀfJ):1y ,bԓcV-QO@8ciBfǘlaFij }d(B xR0 7hY굑!jfZDmACZ (^=S;؃TH,,ؘ'a an;P <=/FjJ1t?0+8ؼbL \Ȕ_Yg*p?VT]khL/eR >h(%=ceLBoO9?V!O|F_ #Hd\d!BXh?o2 ^"I.wVa7bhPu:i_ҋ+7ŒnW͙K~$KD+ ,V%ZFsh7]SCAt/z9:9ęs Drq`9Q=9g4Ȑ+AfKT݊v.|3nrEW&ߒ?3w^yzky+k=.463p2)nȥDG&ӵ1ǿ4*@Ifp˭r3wPXh65I_?{leI\K =JwW2444mn'HE-.N"e{Zs sU#OLm-,k>q˘۳[gyqݳ>dB2 +F)NZ˦|_W[T0; ^#Ҿw{DZ p q\VaA$dwƞCSc1=j"NsM*kTwn&hIE"d%QENJD9WvtV.ohLV5fsU "'L^qu8^ :zmȌM|<BiXrW}Zӵ n&5%!.Y-93ҧ oI NUj}  iW ̬s`#,64oO; Γ n/X9O+PҰQ+Y${{~#8˗4ۮyx;hhl !0]`RB? "v9]ս948 >))/0y NDI@ BC$^~9>Lk{o~ Hmw7%i0^L\줰{7ϊWO<3թ6N!P,00{6x~ v u( (m,%5}l9:KO^;;/m&ȥoMeQ*~s2ǛkW?g[`$T60UљBB 6Yqd8Hl2Ԓf6ہ4< GZDieru14u7('25-ٺ5k{P}vY`f:V-fOg} ]w #|_-G4aR<|d%+"sjvdێ"O-FJUhڧ4 IDATx|#>,t cLAƧX(6͝tCԭ\+Rb)8uE?tڿH롢ϗk"4"T&@'0j^ `{Đ]vv?qN7 c)/05#{ZsHڊI`*jaÚ=xb}Ĕ141ANCSd0U#5B*`# +mmM۷vgW(ЏXaGdfBœS鎹 =)фؙʳO5. wOchL?n Au}ynwU#$/қ'8ynI$B#`ҘN %"!,oɣ,":_=<-qu 76  CLdY';ۚYgbժU'o+$_tb?~`O*ON$H7`'B'n#6i\ph7 k$X^Ue]DJ4R*:!jhK5[{0]/Y0^yȀf ~ /]XAg,ܛ"Wo0(]MPK )layH8̙TӃi\a<BL).FE+XP?۩"Սf KE|]I/*F=u 0٧0Xk#|Iznr)pBy3u2I%yeShUg1uoZDHehk4Jǣlb bqM|N@L0*=h\/ &$mAu*1pxiXE":HM7P=g%3z.#j2>jvF0sד?8JZ㒈Y 2ly\b$'‚ O2KG2̫4ƔB&H1iE@/ET"Gf청,L.OKXfz,J5y P. ise2 t߶K3:r,;CxI7c+c. ϲ`r 0;9(x,:WOx5z`Bkg\|zr4b_ E D1sQ6(l#ڛf/̜{x-C{e)pu.b̓?w>o7p-1)k]G04mYb#KӇhi$KIg_2C& ,7{fF^>) #Y^}|Cri 1nR~=~~w/| /lHHuwcy`?m, +ή6I9B$ ,ÏmuQ; YD8, iQQz zWXOM2"Z M {cIXyb]wfS̖Z>j :8ncϜd?^y?G\7o \|mg? 솧:˗%Nײ}LRo'ӆ 1\Y#l5@ڎG摩^8?cy-_:j뙗nEhJb@% #Q8SJ."&ɚ-U,s{Trbh B<@\m>xh_Iy@ )]tj߸."*JqO\X fҝC0BcQ"`kt*@39-fdY't>c,K-f-Ʈ}2H%1o~׾>>~SuTV^yxŅdrxkq 4xsMGRkq5ŧkl['θMFDAܴ dE_1Zc , \oF <Łz)nBxBV2x fAOѺmc/gc͹t&MH.C>1Y\B:#Xna xPIu!zWI1VeI,y4@[wA]o f+xV4MݜbiE52%)՘b QAHH>Gb;qi 5Ĭ e-R) 8~>1oQm0lP4 bLU.+@,+GϏddBםy# d۟.\?ose|~ !8Jz ґYjS1@iל^Jw-؃ۘO`m<ڜY[Kf"XM5nͱ/|ilُ!xضM^{1ovH* }4q-BTO\Dyd|jɵk6l㮝{ Oj幵台`z*p^|e3 Rf~~l"A^Y1\GFK4.`99g/`mw8H&+,f|t}qa__ i4!Pb mo~f J&S[<W橗.6w :@AJHG7%y઒Ȅ fKwt>&˧DAq(4`ÔMt%֧*1ΉBI?w4lؠMfVz4OGH5Z).ǦO%ʆ ;Y ^54/} !>q$94lY["A')\KpK&V^CAx EV<3ū`,GEqnzymQȥ!^;gh31;E~hHĐO~*`VL#о$Ĉ.Q0t/H #B l40Z({~ ps#Xt">u*';вis}Lڤ A *CPWZEA:'dH$L2}ܹ^k}ʭ3 O2k=I>gggfBɷNrͭ ^}s&FOqn?P[*ot_T΅xR?aLgDhrlZKpd ңHg;HC)FD/57bæMb)z0'ҫ47ݳ|}iV͞ _~;ٵ)6yU 9Z3<7|+ټ3`:(ј92)zy\h*̓5cb޼?A"ЍĐsZdd=i*MfU5Y0]u&d94a(t,NNGFlӶ v̌ؽs3E5ʮ-=`'N:kCd3z 3< |0s%S_OhO410׊ "D8wm Lo˂><؃\w0y| 7m.n%l0aSeG6?Z\=bf88[D0'ৱ"&Jy%IJqDE$$>o9HVVŠLZqz,v|6y!)[`yDwnE&SƂ~t':ٛ,$B:s"` l]5#5Sm)j;v,[7='WAp$ tن(YwPFrMm/|je vgx_*gZVx uϛB6c~]H^βgw?S񯿂/FֺnҦ3E]r]; -:aH8ޗS{1jYM5GWPz<#u/65xC3L%mM#& 1[N(+/YM{,^H V>ՅZs:K6Ze1nΛ. 9a@xaQ7d!h%m&q2^0sϠ9GU jl"EDE/.9R} ݌qgnf|lG; 08'cyP=~8Yv3^gpr bqc/?nwqK_Fm.#I{\uOXLA RT fH@.# Qf aQ ~Ó#s*R0V{pչ=!z0N<ɭBiLxcȔ&-$SGoDF,l1H_cנta:X3jdU@uiDb,]]zRz"INhr?G~jz\u#13sCDg(t1>Ua}JR*E\sa/yQ/}1])Ld2N\T +PEaG˹ v1NUyLibCtB2D}.kw2!9{nKO=ěd|{8qj!##r8 ꗽ~[n4t-p?AW^q Ï09 bl'Aw~({sonlƄx("[qr XCr/"yǗRVŖk "+EC88 YT"mR'~}|#Fu޻gW^02+m+]c ӁH0350 =UDIՕ(#+☟~xۻ=Gl IDATr1BЗ0BT&ba}&>NX zQ%A{Kt?)pS= gFz_{/N/| !D/Au@(!qp.bUc!&:ńŷU|\\?wN7}|w}>^s D=\jY,K{KrU璵yvȉ IIX8LF-7L7PNc-+߆1ГҳaE@aD ="3pYaI$ٗS$,ل/$8͈i G: ?z)]izSg, .:ka/<ma8r;/$?QkE-sl9͡\~n_}>x6{46Q^|# uװa fLC; d ~7`*g~Z3{hɾ3d8}B:Hju ~ 7r"uC}Ld_Fj edhɬ"DuE[,".u`d֊3p]^Y+F+ǥwk2kJ]ڃjc7f+cL#|Y,&pRSߖM06FՖN ϚbAc9qم4EAGkothXOx,259RySlѡSh9/Dk:nՍYV:BOjƇ %M]v8Կz& qjld=Ilp j/)y8d7nMn ƷF ǏOU8\TdX/dc9)ŚaX! ] qs _?Zz5ěD]G-=Azc//?wh46o' #nq.a{o\G1ZQNoWGr0c!kEK G| =eT(|z]asu`I# }AuSE./d}*#5[1'hL$QB9ȭBWz!۷S$,f"?ʱ{AŒ h)i@>u`}<檋}y5?[fT%8 6xgovlRkQK/x))Ejfv\zV+`y]bXa&Xe&z@aQ:\I:ANW|{)? 7Mpv_mQEX{|x[Izvz}_ΫRU?ywɞ8'SyAyC§iz{Åt*0cq)+Nn Z2Q ݋ E`HTPKw9Tݽ[~7o~:yf2_;)Ę7;{%Wr)'6-M+L(|U_G#Z.Ac+O˟vHAnhVpA!o\NHq"$ϋ&x vsQZ2X [ʸdKɄkYz N,afP 9<+2!DЌ(!ywƻB>@aO|KiB,6s"njq"riP6aIRNޡLlxnW$~yx)d՟/τA< I4qNputyLK~Idm@PjN8 vY4mrϖQ~yN/pNNC?qhcwyZԅBIh|ޅ_9_`O(kùGUᱶ L<=ZI.&HaX?'r5'*%r1)u>-,JVtIL2c|%4,,c`k%}u>Szsێ&$'i)I$t1hi*$jMqD#:b:c4Y3/;F\';-}dgj'[uk>r2`del2޺88~z`d[wۓ!;Oh4KKǺ[) 1GͦU@ D^HD:-Y=ifU)jS'"(hgy+"ń!orY\y 0!L!)  FÏNTI~&@؅{̝xOgX'#h:jtt$:y?V%;UN{]{գ~)QG#{ꓤVm&$")/f|_jTd*A ^N[Sޘ.JAMqֆ7q{Xa㟏"w+<Ir˙]8VD#jtU$D,\w&[.'[)G>;WP+0ӓ|`m#;Fy99_QT]/$uK**U2Ix.˷Qu?tw~纛Y/y)g ـ) SkCq5qge"D;'hJC^3{F+D|2eLͧcU̡%3?.\'YV`򞢠 MV\|Q'\-oMdغ4Hao"?@U~A)55F^~M;ÿ9#ڍf 8A0:`baq 9A$w~(לGT-qoo5#{7ep% i;p:eb.U?TLb{Q)/x`hktgiN`Y8jpsw O I5DN]h/G X ߚ+Ka"~qʕ4g 8O "p.Os :͉n{{;»38F,hڍPJ[k% Z GݜЎgt/EurD5]IXJ~[cRC&oW3 2 b'Zp,{ 8M\FR ? }RRsJ eA": ,s}`ِs^o5Cd09*=cObV_ji@*>q^1{ȟuMZ0JQzٹ^eneS\qlxJSgWB`EXs%wq^i#Y{ #jhSF9fd?n%%uX"J8Y$=2шp)]އWAwwкFm!kLJ&h;vCW!$(Ba7$]Pc&aYthb?~6CcӤr`^a0#0ҢdD6 1>ufxlޘB:kfQ~,),r~(uͮ+ۦ m(`c WI鰛r+$-')đK,FqE]}?=AX9f)"=/>'䭗l^m/7̉^D:Kdb-y5gJm?QD3m^]'ϴO33cI٬8~YA0qHP" pƴ &Ցڤ !G2Q58%."[7|f{*DoU*hS{6rYZ7*޲ \vb&}>D }_>?Äb eЇd281Qo##LS 2j<THKh)3}VѳDߠvi/|ydV&pÕ)9NT#҂LL-əYߣĻj8U٭$ox+w<h2q8w^ky>d?S̶~p_QwLPgW媭=I*h1$^4ǓGlCPcLsiYt%sx"5k7JKeZel' >+ lAP jr;>ALBJY/pJ[4~n>}2=Eb x8zbB:8tC_Ƀ|;yS<C&xٞ \uŴ[Lٳ'㭹K(V6Aoffʤz5!$s%St]کQMqݤ 薬uXẍ́h,˦hžaǯS]6o(3ǜ_GPhOCJF-'7}kEQJ%-^hK+%Lmn}۶&.X;d¯7Ez{$F4piAFJA]z;ʓ}N9Ak!m8opNͣɋtg!~m|Dbck!}ɪ W&]hƗR4݊}3OҺi`ց*Hv5lzT'NalQcI^|ɮ_Qnʫ.ǯwuVAP0%>QL%8h0%xѸuCU2ήF nfnt )iQ[S>t-(>Inj/P~ a$aMXAXC=He,?J:?@ cvQTk'^n;щ-R=$E{twm$Q4YhP׮#c3G9NMM0`8uZ/́W!ew í]ʉ#̜rS-1 NN{0Fht6+/[bֶَ'VggSiD@ǦZZX+\׼>jUt:ַ碍[J{X{ehi jEi~Pఄiξz}(8ݴHN+"MHG1QDv-4q[x l.y8}ι(IQʧ!62k6Ն'=d}9|783P8!ɹ>>i09'/?%,R[ITGZG.l>+LRIic3C#'l~6r8L(wT2#/(NV0R}.>'Bǥ_AR!ChŷZ˧_?7(O%ڀRb+dBV8Y."e\ݝgcNQKu"ZℇH} [fu?͍[x6vG?NtԀKm>g"x/àa\a6=/ϳ^$IOk{{itph;4c:qF:G tgpƼmṩ:u-6=^j=d7ݓ'|l#c&i0v3ND_u>rWTv5[ț{̎% EXs^:YWs ܴ*SIbӺu\|Go0M[e[uWA"L DT\'PRo !R-E+N8/Ǡ]HSDIB>5MXj=ϢM&+;ƀxX{kWg?מ:M]kM-YRF>R/-@.'\!z )rmF@!r[WEG8o<\{oCk4.jE-"ˎbCFVioӑ1v:(։+-B y+ɝ}fNaJ\k]v#AY xV UtD՗&!3 \cKtĈKަlƹN4KˆJԭd@ w=܁Bi88eC(QҐl aۺ)(Ѻ"aP%2՗9Bbq`7[_wlgWW`T[-.8!|5Ken_^~yraBgQ2&v++b@WG4f&צDd HGKѕ*:թ A"ED)C$3x)P+ؓ|mKHmQ;.bu< +iW\ l8k; cǺ=F6pyE sy}ܾ/vs-hlD ]{oZ_WnV*aR  k_|2;~oc|o~3W z鲇 | h葆k8T}dDi\&EƓ-ہH RFe~vz>2?6cL6+!#??wp[Q>WD IDAT6͛EvGIfi"k6uExN'waI-!h,J\aM%ީ; ?i,ph4(qP..[N5qa,CX|Ōq 7\ѝϒMq"{AJO|.qr0#E7ìNc ģ* PB2QQ0 |6U4M'+q!x8\3;j[$^%17m4ה&CkC7l,M̞B1-[얀yڸߏPC)u.Rx8n=T7m3@sB+$ }l^{/h.EPXM$$j6J$N Ifov%$:vho뤈ʔ謥\]Rk>sB_xY,-xfv9`n^筹lK:cǗ;u3~5h%1I+e_A$ZRak]FN ĢLX)xd}as/+Q2>"䥛 BXrUK۝lMBʘk\że&Kt+fi.&IJ@Z)jS>lP&KȘ›w<$ 쫤8K4l) 5d|8/ȆQ@.6ܪEOVX6#."ՐO%Gfԯt q⢤l2 AG6"va=+}'C~/ڑ2sX$J* &<W"^~}w< &Pw,*/u\CnD1,,٠?=)vr|?K^F6?gOOm>*n".*2(v吩 ˟FzDgjz;w2;:FKI!`5gЍ:O&VźN{&';ihf5"^;Xj9߬u_-x}ws}'6riL$Q sӶA6i Es:chD=LR*`ko7_Cӌ8~NwoTLun";YJ\~:*5XJw8QOhF s;ׁ窘x'/g_wǥPpe9gZ'&ٖOeUI6>.rZcNth$Q$J2d mg6ۛ,J :#G{~-\?GEf`lU>: Nj?Βk2l :6 ϗ>MbdSv\k Knwƀܲ3 &z uVP(#9[{g wը8'u$QajynrY*z'j(ϥҏ0fXʥW.Jj2V4G4lYpNaknUz&>UKWF|[N_7Mi-2|-<\ih0;3סp"kXD@$+FB F🈾^oٌtiXKWH .ܬ`F86K7" ί%=._޶B_m'y[ U;7*1shkHEUVYy[ՐϮDTe1q[n8HiX'\J+0NoCC,G^D &#M6ɺ+PS?o3C!8OlΝhtm.ٌƆx@&PԔ`eI%e,f,yHo+?꯿ 8F^4E-:I`T`<o5tN%Q) 1P@x hk>/21NAҀ:N Qm[!.!ЄC1Peppͬ\ Q'8҆H-'qơł,iiS\nF3 Uc%T?~z. _@a,In28[t&/8EVMbtS)5!e(2Y6iL>~ΙMPℚ%c Ci1+D8aBIF~oCXR҈ "Q+S8AvhgBqDY"ሢB%u ,GG-x'Erԭ!B˭BYwFԪ%ǿO}$mMPxVsN*%d_U})S!fU#"u/K809@{C)oWvkO}Tʰ ON$^_H9* bT?\{|OQ}Sh8tbgaB0%P9L'3ΞWj9Q3  ֢f\0B`^>:wlL:΂fg^"E< )2& Wx=ͷ@g-JsJ%eI҉$Lewc|̅R4j!H|дSyݤ/+%" KhRx]$LЦ}scsЛnMȣsơne3*?[*(ecHڂrq >8΄H!Nhz6&" HS)fᗼe$3}x 鵳nM:o;(!P&~9 D2ZGLXps4liV͂ls=xOXƢb`8n;Q'i\#IN:/{OLI\{6.nZf`e_'?9o\[brd~{M_~,Y,K5gLbZz/CIpI\A˗3aHQ5Da8Gz6t[-@)NF"W YI` ,b|Kfꈕ!^4_ /Go_4 )OM;iI60QWcN:CҔ+U|g2-?ɖX2Kat?0̧ܵH1=GJ8 WG?J;nzy;ɯ[ fЄtyKm`Cme# $"l0V2 \Wl[í{ǙYH p 47 WV3ldϷ .ϒg},, XE >T _KE]j}? AӽK, 'Sɉֈq8<, 4Bj` #J%VpWr+:akqT^CF듼z^W4.QcڲNfO.F` q6״`#p$:?<=12+|icRF$), CO TQHZM=@?GàS5P]O'LΝz-b^9@58q,]ٮ'DQ-Jbyx3( Ls(8UDyU7cAegM? 'OK v.*{D`R~_峟W{06pq0Pec u¾.fu0ݨRR0mfDJ5BI;.e}tZiA] _E:hE%,/8VrK0tts)nR+@L lV?9L>m'KJ4$4Hj3L90r53n!?Ch,j(l`gɓGX:#iNQ\)Bf?nf33u S˯!)]Biˡw=N ,i]DaҞ?ŧP8RS5ݬ46LSV6"9X"^ Hw$aXa8/_[.M$f$"q$]ay [Th)ʭROARN'k3vͷV͌X5M1u+A1A_-e]s(RA00Yzk}ײ$5M̴R8_ExRm)S}oWtGvNU Q՘JQ+-jN_0iU4EKyD#`"֩;+ҤM3Rqֵ7.k8T/4 fQV'x+J BE`aˏ%頩_ЮUC y*EǦXeV (wry^fUp,=Tg*(&QTgzNArB58$sWvʵ]i)$H 7ڏ#w}sM<2v[a_2bEU{^G:FvGX-_RT&}RRoɨ7 TV'Pa|_zܦw͍e⍈\38y5mŎuJ(<)ʨBšpވ-yb]DilJ= jg7ufa)qtr!|C ,Sjkii CY[0dC:FmS`) v[wv7̘eFӊ;͞i>48s5nÅuzoʁǸ2_ Ɬ RzJ]%䃐bsT&ciE`SS"A&DС!0] 4]{U "}521N0[ gpO06V8'XbybWInϛP«SJ?I,$@hNh"QB Ŷ Ds9/E])^E)y/oZƦF'ae`xet3N ! REYТ~[{77l7ƁXsժ:/Pmx`'?~Y(tDY NȔA $&P?cƵBY初违5;Jmgix!P,=:͝,6ȡB+rTw=tȕ) CbBW qOP(7v1@;TذV*<+F )'^kttM!ZPfK787q([Fdӽ²#Au*0he$DTu!8͞+-Ht. !:V:'_#A϶ \ 8D \vIZ*+=A)oB#Z g>cvM߬ggImseAZNkWrFX%G@1_G:{C5U"טernFpK[LS1by`H$ʧ[ k3os z?3|_.iLrx m . 2=R5 -k6NYԚ@nV2:5K8BtmQ"FEBt+o~G5@f yL͘f&T?Vkvި-;6VYߣ+OOF}ZӵkoT |TDV/ jQ1j 5h88L cۭ ]E}]hG&]Ѫ3NO)v{P+>:esYK# HP/կ/=*)֭x"vާV{:FuDd&@٬Xm/8KdGFkߑKȲ"F3QT !R:Qr_/n?.}V_ ˨!A HP Ae$/r1s8R(`Ĭc,R[ZC$O"SIANS~vFK&`4eqSv: elv3|:i6$ @l]PҼΡU;Up IDATBUrUHuxPRhY.[*5hi-GiN#:G:hjִ q3@7t $\D0BnROw_JEZKyqmR )#S[KnXҏXv"$uヒ>'ēT%Ij\6쉹6ଠ+Dbdg.e ;Z<ɞ5STTk_ EDST3̳iv 09R[/D5#A% '@QqՎ5GQ&w~sף8k*(pAxE}Gx:8O\{-t:d?ϴs WwU6.#(Ut4zؕFƀr{'>5aLTJhGh+,Ѫ.w~b 'Vyϟst>4lvꦸ.!%v 9aZjTC%Ґpֲ{TG @O:2]a}amLmCI,auȒ=umY K|͌5!r44Tq׳.&Pz*戂n)c+PhE@5D$će"G"V96YggxK8MH) o4$AIc@;B%N qhђYH#kG!U`U@` . HȥXjD|=:ͮOT\7;69m= !r^saaτ7˂:z6_Aj\>Xd2fx9YPq3}b)59ըѪC%Wl4UU)4MTdl/tԌG=i֭js`/ש~Fiӽ l# KXצӯyMT3O[n鶑|~"T2lZ$ S(4:ܗ:Gs~:RsVm8 잲l,/jcu*rґ"{b._@xhN9sZ,gOjr+MW ϗQAbZ&Wʃ!(V_=?Yb! bhJ͜I>Q(A@'pi4p! PM)Ď9:/O m4P(m|dWlQ8ID缉dHjuDo 8N\M` ~u!7>^VIWz$rDhyRy퇾Mߑ7}]'8g6`yW͢+1&/}SHbKo/v}5qoFEuNyj 7Vۇ(*s#Lh^ryxkv:f jGǯ"^Ƣ~;ɛ~͈Q@؀M]ncax&䨖~ ȃ9q,*%okK [^B7Ja+3yXP4n[6gNV7evrp,,":uۓuWX.A&0kmK)Cq>;5(w(?ʦ [_gm[,ArtO^q?l 2Яtqf*z #D_!׿L5pՔeʃɃ@Y)HeŪt֔hwNq}[`#~FƋpB4ٽ%^ h|z<&Aa (gP?c4{<as(pju47I4AlO=B`Ĝ3P0s`U(;r"ȱ`,)1L| phLsxHAed0ϮӒefki|RY+6| 6Y])i/`Ȼ00=>-~&3@:gW7K5j<MFF?:yohKG|ۅ8vQJߢ!}}9g3t-]A=FJpmhi{]fӛT<I5%dM@BØfLX]4Xѕ' wl}[}^Sd`qAs&QT1tWu6|fqXUsKl=֠Fmrs`vFOdV^KO0]pjuX'q.8u ھ _Q.sh}#1\Ѭqw6 Z)r^h*ϧƞ{;0]8 -q96)>[`xG^=vv]:lY8okxOˏ>b6RKR )rMS*8NN0cyήKϰ=K" Ug'vZ:1K*ADQEp!v@`P}ZkUλA25nǞ`ڕG' c{T+Ys(r_  g3`},ݴ]O`a2r <˅\{'o#`#&8Ib˴/g/aCg8y2v8]]%\-$.ށEۻ]LRrZs F!=.KkPϰ̵yz}8k/k֝ޣYr#)JQ 04C1s0`$-gzf1;%W(3 UcIA<4+ONQXtU*G1Fge:[GhB{4&eĐaDRWmY+18k:B?8D>OUgϋ<۟1].̟>bs)ZchatSkP(`]c 2N4@Y̡>NmM=/@t7\x.}=t)eJԙU7n$hc A- Z6p5_K2  y ?ۧ4 <;P Wro6հDlcHbgTiR՜ ;Y\cWtF[ud&}a0#Y?] 6u"4ҮR!Js~_ 6/B^TT2yo3hR~W(ey B84ϹI5=Gpm6y7@0r%A5FiĠLKϭڨVs)gg~(Cz>|BXf,|]l/^67l$-P麪h2*Uz\:dxI!Gt!IZ4,ӕ6(rL4 **>w*4ƀ:EtP.Hj}@5Y?I~"˪X*GLSkS&'gCUXjjhI PҾD扯Sǭ ՗@73g4n~2v [3;C=HkMXdp96J%4shpI:IV- ]6* l^/=NҲ> Ǧ7 ;Y}ElO:ƊU+ص}}]E~֟{>:3r0A_O>7ncML>LM̐DD /gwbjKӬY`|bH5AGIKk/Fs}Bft9eV˲rô?XU NM gSJSԪӜQ.LI; z3%eZW)q\y#*Yr啿 %Q=͠ho|T]41fMq0SRhr?3Xmq؎*^O(A3,"doq`CE+Y| S##Jw2W-Zk+c^XcmB.2LŖc$"2+z ,R%t+D Ԅ̚5h44x_d5|+7MUpۭ5ǒgqDV| yv\54*+'T\Ṣ ?IޞF PCzG"8oǓ՜WX 5L؄g D}S~a6\q XqO#~B߻M?w+ PPk$Ԫ3v*iM:KyiF8 O Wob36|F~w*a!uFIYg?) !N[^Zx isu<ƨg\/@. r?Ô׽IK1!er:%R @tyh*AD{K.xJ]mDs/qj`j"U { "DLq!S:OL*r.ae)"Hz"tpO b-%Ӡ'HuB0ʯK?xl"zcI׷XjZus&2cqJ>󤨴ት> M֦mYzZ{:mus 7'P:ERhĩNcs)SJO*0{ٹ'> n\V}Z0A;;vF t!bE,_]I8U|#ΕW\Χ0?>v cŵܾm/'X' Ó;((`HTUGwɑ/νS *0xn)!H̴u|:>]1Rk3j%inۿ{ g,26F)l䭵iJLIj|QEFa1Q383x~3p1V-E^漩(,S.5ZS)[ܑXA9vZ1*I\#&4 sH@(lK#jRm4q6j3d4Qc|;hiџCϨSnNA/Y4bFG0A@}lgBHWƁQ40?(Ͷ[o`2]Pq rtP+qX^m#>@n Zmw "l6BcGEetxQ'T{n^ϡzH)fYl&vBwi*8Lq`V֘0p,[~HFE-Q+ z"L\ʀ2k1_ۏ@u8FB!7*ä1]G;O1rLR*%~zoXýQI{wXyI*eW1~} *@IAnbQ7XWJr! )IUk9F w.:8b,j5u(H|w9NW^<͚"cq~jn2W()%i:wzgjXRq(.xYߦr! k`e<3n-(֕B?{]>+.KmO|vƛk^k\vy> @iZHS69:oWQQM0L6os_p\2V uauUW]I#q<6۾ TW]t-.3[r=t=|ukrLڈM;X:wJ?|ws{֟\yo.$E#nJ_uVZTJD9qkh&Bڎ4ˆGFl/ПWR-!Ab#DŽQDA $q%-՜ ;Gy@Ñb07lJ EBC ,F <2pG6%m4,9Q/6gx{gYubk Vk]fQ|2]GzBǷ{~|ad IDAT3y)OHdܯĿ'D!$> {Eve7BSIwz=$z5=+P5KK)F<(N[d~6{WV;kr KoX\4E8Fa ueUMWNZ767oeAVɉVj8i6h ,6YGt4GjV2?9$I8sF김\DK2Qvmu&I=C%cpβ|"&ƨtV)k2WmUI.EI֚tYTb8`4Gz*HXUmaeobmgl['27BSYMM T9Ef=[ gqE)LȎI-sӳ;#&L (KpnX'טD4nQ{b! /^ LGͫ_ȟS6n6]@"iV`ynt /_6,T,TXi eh 则)|Nt- 7d.lXy_9o_;L9:h *uV"_ߊ;jlog;ygxEEcbbwq@7bU۸wpyho>$W.-ŋLj;aq!ϽjͻG}>K/Φ}&=d~;6f$͢e:ɮcWN%p.:9'y\kNv8-Hu;Y.((|y`Նd%( ɌM" \.`{]Cl< ̌K |ƛ>h'~͕߾PCӧ?gx1GRj8]6FљM@cuGE_r}^MױdJ>'_`zYvq @+\~bL[pѳ-PN8 <ꀈ1a~R@aqpHz v4ŠEi5y%h aP*gE!P5oy E!k㡱-fHr%lXLS.gSȑk&|ohr+8 N+X$yV0~ҁҒ֝Kiihmf[luPrKlbш.d.rP㒬$+-5W3ZH(u?ھ=|]6S^ؤmҦukǐs˿| L\v|J.;o%9.>b_?.;k_w/v'x~ꯨS!oۨZWsͦHiTsa]z:)x4m{2klEN>W-^QҎP"S(puJ.y},Уkk-VD!+,huCCtjs&pM*J+Y m:&. I6 ŲQ *#PɜdZpBpPD7o挵Kyow,j6h45N)>79g.wkE/3M7ssQP.|/ӽtSʡĵߥ{F"1sMm-FB䘉UW txX<s2U0nE*9!=Kz B%}W1H!0ܶw:DŞ2:AG>"_xˇ/? \;o+/sl(Lp!>>v ǫ37022; .32|죘Z }Is<; NI3(&P)a;7M'LfNxU!ւ)ҪS>u lJE욪h؛e@,KD* zyrJqe=:]8ͷ_/ć ʒɂ{}6I5f}Wwr3zHFFjIGDނ,9gy@7eA'M8kJ֯?3}r )-Ǩ%9 B0_qc,]9*q͹mk׭eҢJ]sl^?ɱml\ס2#2wM]PAbr:oנWҕjDfj`c9YD^FϢvo7iIAGd+xᢀ.1w{ǹq4hBŠBUT))1QN5&맫?ܩ)/!"T->2"qL1} '󇜵'\ _z[kX5C;!W?/?J4ɥK0=]OWx!MFf|yhCn J=uG3dG{uws-Ӷiջdٲ% cb 01qHBHB/ )OH$ &1eܫ,۲}wv{ywfgH+[7~]K);SS4V&Ydg6z-hOWZ̹Dh]PyuP)ǐtZ-qj& UljF Nd7:ekKt:$HJsXknW^a::-|kDclw 9|%t3dj3I4t{4BE eA nb*I̟#40o+5V3}GD (7RN 58P&L@ tPD2p+,poxټR1^z("È%y ktӷqޅ<̳wp._~Z/آ!MQi :HA08W!J $\zeuo/?x{2Dt_NtDM(Z5H}ZUH+chlA] 4<(A(D]Du~ik1mNX|Bk0A #T=0hĎV5]RB1(CSkzH,%bYpα6T$o}ºd$vv͹UvTU.׭vY% jPo{7ZdsD8ob#ؖܞdmeƏ}gcYnIې(b9ľq.@hA*O߾λUKGiN|ͥ\_Ώ#7&#^&P#t~)O߽+w^.>ןn߼;LM'L5ɇ?u{M|؟P;<>O%}͜@ژ!*V-3fMqX 7Y {`_q O7' 8^uKaB\@)B& ׭F/ie4\?Sy4WD'#?#{ክ9i}c>v}|磿;"I&D0ZXp^pAaVeyBkWB<O&:/$tL J92@Vǂx&څ)cP Bʡ\D0 /O}>څ+,J'~r־gbhxRS>cO=F~,,x׷XJb{q4{x ܸ[[{B:F2~gGmF"bIT\-5Ai v?Q$O;d#mSo%]-r^o3[,&J RI_f_b֘Z{ڥ)3WȂ)DHQEtcPsD$[XFo檈2DD2Y"5S竱wJ|g KDJ8:wՎ14C֨07i1X FWY/MׯFju^>Vp$ <{ǭ?`= 2BS ,xFpV kM;xo"RRNryyr~Ƌx hxXQF7PXDYk_DW8\FWۧ8֋U3hFi>`K(vB"B"~!~~z٩赵k$> Dgz߈ثUɡ|#dc?;~뫼)l=VaK('Ob=&KI/p\jBl_'*XYSDOA HR[!{c >\&fE+Ѹ"$T[(Z;  EjCS\T%#ALDJhT 2RM5Ѽg޲^U"9Y< .G%j/%jSB+6\V%UB7I†"ac,!@ aG [D H+bt6%n uᣣQ2#<$FG":DZ53#; G%;2BwW;+pw>/s#Os  U.aE:JI\<%lY֔RqUp]I^v+x[䡛>c:;:eK༭ηo+ƒ7~mG\pf9[0Oa/[x9g.X6(뵼!#ábTȶ6Z`u82%#j@8QLՠJiA {x"96{ /?-gPu'nܑqk+PVPn" ҷ\7 D EcTUUiB,"QHₛĖZAJTw :4u)-6xy@)M88 ˖]y;G'aojjYs_a3bюl̀6FI,re d7vJ#Wӳ54JY@#"jo?u≛>L-CJuy j2.xh0T 9cR*j@"LV-aP%W\eIe}i&ILm>]kaMk V4PVX?!,q 㐛ֹ)'>+!9HړȂ_Koj _*&K>VpTc~㾥xQ %գ#K_fS1_գ"~r?{r%~p ,/yk_nGSi8*^YV3U yzljȚ ](-RWcxt }1hU;G>2DP]}DH.Zez]hn !X5*IV纩p$߆%>~v:veFPSN'R*fmli0ڦZe:^2rX2h&j' u;8Wvm(5i柝*?5l,VLX$*(qM#9MUHb#8n&oBr;>Tn= 7ˇ?6~'9#cwqݻ88\ {4%Q%h+I)k#43UfB~bn1$d <*A2$mAH6rE#dRpOwrpx‘ŝ~Yl>p+lzHqCFjĢ@ []wH]jpiT2SkZYq1Ğڦ5rGvHIl$vęE'fE/b'Үp%Ap&9}fBjD :DJ92ߝuQ"9Ě/WWo,Ҭu0u1+ IDATF #9% 85y6!׿^5^Iʡ@JzQH[ _6,- L‘!}fzw'ѷ\g~ҽ-,c,Orůg9w{L:Ϊ5څGG2LXC=Ie>XqXIҹ?9lh[[9c E7u!a_ xa̠CC]mP0]ԙt\w9_'h)=#ncQ[.uضa~A?·2pto/֊;+: ;׼HT^,|E$f%**,(@XAF 8JV@w")U8m!Ԇ֮\C.p[ݝ4LL8,n&]28qw^u ユuW\LԭՅ@0 glNpSLhe }ޒU=2+DHø:~g U󇢭Rs0umNC0QWB9uY]˥_g XJL&N搊Xh.$0 JT5}{E&GOO횬{Gk-DPoNa-fz+Nx scM 1>quQtPjӼiQ >} "B?@ xN:q4'H?j.k${W *bdR TG\L>d%zdcR D-w{:FcCnZ!n Qzaȸ+h]%c٢H9aj^y5LR!PF 6>sS(eiY#bBytE <&8]Lz-q)A ev>,zq<}qFV]Z* {ړ*3Ƣȟ֬ t+bQ0Jf¸qWr0Mzӛض}3U?|oG̗{b go7_w-6m$]ed SP!6,[o ,H͸21 *_3Р-F|9$&{p86O=M=Öm8?ȑW0>8 O?7>w x`%Ch%ch;#7~|~tX5"ʼa=JcARE SWqfWóM -Vx½AѓT#az}͐3kK͔d:DͥX2-p 8sE5Ptj6fkB{U]rF(dH4/v1:k_N è#%sk/U#"u  #Ttr#hKHRۉs%7o# IP~D`ϏEİJu&Oe(";DZURJ K%[~݄=tKh톶FWvwvs^ޘ[P5xf%z.$U<:TwQ+Ǭ ʪ"` &e/x}Hl[@xbʰp]AYjmpÐnWa?_rwNK8J" &QeI8.6\og&?[eMfyG(͗,?HU"a%rE\v-sk Z6.VJ9Zd-ɓ$ \=w/xab[레%>qqm ;su_Uo'S!#/>TvQ k"*V._M&gZ7oqAЦ 8&O'zgX fosaw+,2sӵ i2Ϯ4[E3BPҗ%/K#]ͮI3E}%vX!xfJAstqiFQXс~S1˞xN(D2I5 |0,NsX4ⱟ% /'<#.ɵgP>Tkg U:7YWcx!v߯ڢ9%#Ge+5ʄGxˢ4paݬZ}*`\T(ͺN^X -c(\b=.i)(v5jDŋ6k mwŪyaGn ьpdhȹ]utcBcFTش #"TU 0r;w1޸y1m2S C{S$kó1e*1B% PZq-4SN{hY8Q+׶ H1 _8z0Do3O\p5"*]y!ŬJp\vC#|ɣ%xd~xX_y5Z.ځsYdS+iؙ"rruC΅was0ʊ}L/Iz<4u"L}Om66BrUYiOg %F(͚%X,>_d€mI2LE$u0~ӿ-o65MX"AK{y"e,^RaCa[Ot6'eVLe@̵Z↴=VW684/5[cvT;UHA>k6[ Q6#8ʼn۬ͷE S׿k?Ūd|k*}dwu~Q1m=vK EQ[(CPk۸I~Dc`-pZO?v ~1H#bM,Zaj`'7Cv:JuP2:$4]m4z׋6*D^jN:W@$B-)'O:ZA|8%dND-A\[`JUԾb e'.rm嗗f13fmS)Lpth<\ײC 6ϔ0Zี.RV9g`:}/W D- F@G4gam!C@C9ya/4&X)aHb/υV!33Pk4VmW֔nWRZ&xH,l=s3JikSPQK߲E?pxÇ=:gw?}&-fsosCuDc9!4}޶e}}gs(II@PDdlj+tzx}fmQ/nOh$ZJ:s+yo?s.bǻNщ-Evk}%x`8ѣYuRkKOt`,K=! '.U┅GUx߯G/l\0sO%ҬD5E9:a-ny,cf:If suL'uAe#|tqYSShɽcjcfFx>cU[)W:.hqqc#3xG&#=2",0E:[,F׉8P>*(cJRFs`t&k/N _oYd3Zf@yc (PJ6kX;ms+f@  n,:58C`khٰ׬~l8mԀc ,U&Q`AvI6XƱMZ1MWk ZӔ'Kj\W;2}_4 }k.:I`JT$:~ټ\i5 /TrT?︅-W/*a|ݢ]!DvA&؏arZ}q&z 6M;Ѥ"]EPa?Ň¯]BU-![dZ, :^5WЖ(v}YS ior9FB+i=2(@KC!_ee<80Ƞ2f j>H Rz[0AMhԠ FAZ\'E]<ڶC鸠 #C m:$!㳰\x OZ%5K0X6<"H]t4>o38EVc'ּK$F<= izUMsS6lUN&(,II޵T?yi5ccrX2ko^[/k;pƦvv>E@K[mDZelyomO29xG u7Lu-t4Dk!յ&:`eGJc-ģ˦4Dxrl'z DSo:c[{ͭV nȮW4h4_K\htiZ'um1)f'uO/%Z4]r1Xک2N(XGj|ÄD5N ~k< _Ix@c|'t6Be,%xy|T !Z[ʃaY2E;Y&"Z,W08Z`hd>u҅Jӱ|5pg_v6D>-N5w4ظYΑW 1R,ҏBqov k#3 lm"9]_rې-] vPfM/bG~QJ*Q޽I ³jVn`ͲVG[G<3fI*'Xpii>$ObمS*Vy* ]p&A8?i( &VϝFl4WkM/0wj2b'raeB-cJFV B0gf>ֽv,)%Em)(.F2!) 'e*!2.=I*EJ2)6h]U <ٱe!{z*9Tu$]sVX؜ɤhcjljߟ7,LT2II@?5" jV9Xl*HPWu2g[:C]=xvkI NZΒL&@X]s=Opݘ0I %SE  \YsWpםRꓨVLxj-QE BH0Zm4; IBzJuNMGXLmQe߮Yw[1bΑ85?.U "7HQO5[ZkmNa¡HQD174ˀIIB3 $D3Er<:4O.+ ~H(<P)_D"&p0$LpĪ4-3#I[2{S'AR`U; L_D߆#t ) yc,"4=-9t Kgb&tI Y6)LdrDn~K9mx 5}ݟG<YrZAU15AN;qj~IВKAb= V,{^3J /ՂN!RJ2hv2.~QA9 ͼQ(E1Ⱥ}JtŮ( gYG#*_n)bKsBCha y嶛xFK&RiInR`]m"Ef5w}gMTS 0N[&*Bҽ]u]`qk^;%J.gY$>i^aݻ>sV:Y_AYH,UQcFCő<X>iCp4y8֊4x2ȕCX+ē"ōe"x &݆&D9~*18m}UE2aio塀n@ٴxvQK.P{kH8IQeۚ4"wkk;'$y'6EQ.e}CRTPg%zSX*2cDkA)RFs)vt(1I*krJ5$ Q5{:nV<,KsncxU@[0LsewǏWH1$ [/^<6t(jjUm<+gZ$ HT."B_@4Ha ,61$ټ0،(⸡:2S)U#-6DmCXCP.pmyZ[[ H 'LN cZ*SB M-\n 7ܟFA֩$ )q&8S\P~eYDž IDAT&KxE$Y%Na3T Jg Ƅ( Q!6)Z2i[VoxaƋzZټ}Iyu͸NrN'vsV?C>{\>oe]/O񑷯MɃZU.&aYp0a[u1ȣtu,jr%$s-dFyӒEFaIiAv`=(**L[!JlZ VTr#rݽ՚BEZFr%p؀}L)L5+!-*/UBG6xc28Rp9R͍/VV;mj63^a=q96˻I5gm;+ZۥB!,xJgҐq\B`[,V,nKP ҞTV<6gD=EjZ%Sz1.oAnyqM=:)&wsc%z0.[yϘN Ȩ,rVj4qi 1=Ag.|\?QAf(`'A{ )#bN׺wΕ( hz\qr95Q|1Vv4`ݰ8_:vZf8]<c"71WwɚJ$5L<.btl)MdVkgZ Bm {z,)b38Yں;$X+ f,Dߧ+$ʡ<#4)SeZ;4{MҞvd DeڜA||^r1):RBR1f\砜e2X,X ht"t mMНѐ|Q"GF^'+Y/*|UaI_+CGHw&,# qQLE9h!5JװNlKV"Kl_(rM )H5`%vU19z[)I-Bu(2k^l 4̨ŝyq\Y(rW3Uʖ|  '<=VKzO"pRYK-71vYXiu.*7*w#ȓmag))rDX ܤfֽŜKHD…0tIŔKX)p\O _|F"kETd^Xd:{-8(lM7I>5"bS#tvҞia`|Q+YqȈ"f*KWS 2%QM=5<VhG9m6N`x~ By@ kYrydm3Eg!끒JM˹W? |D3-haMBbc؈ЏmhqL|B )3@ ~R!I}Rf%KU+"=.]!JJp}ct{.JrUpeSPb%$;I.{$գ6%k]dū| 6VS ټA+2.yLbv}ٚy"e a eɜٞ{QL:'USxGXA1qּVtጤx2HtvnF@X1swkPj.JXBs.t^wahBSu\ Y^"B Go%dB)㤝nH`p-f300m\ɎՊ X׏>K*Lr]NT]P2aPoeSil`V%T'I<SS%/ѽr%/dŚpA:)?y#0o&;|ZR8{wΙ#_{6wmcrɯ}-[$BjPo>|%HqDX~8W[d2s锤wσG&XӑaUGR?ZҝMR]R;Xce@j^GF^:/6vKKZӠ2BxXZU.и̥ [w4?;ͯ{ pd %, 2ABْI xis!i6 TbEx)F1;)bpX)F JGZݭ||b:S(ih6vх[$b CuNH *҉d8٩d§FbbW*mF\QM3'7yG /LX)8ᲦU$eGaMd(L$ Pfmf}{4+wOI "Ӻgm5td:Q4IF*C2pO(! F\@HzWcOsTxX\$tϋNa6&/FpS*'D rD"`-T+eŠĂ>T?L\кa@u=-Jjѡ;h!BdhquK[!,~'ь{^H;)Xe!Ţ&ݒ"Xc${9:ٜ$V+iE c_kc}mdlc0I@Y4f'vtsGUݝ{yv{9 ^ k51k,j4p]EQ|!>yoFF 4ִZ Xg?ͷ:˶'_2Ha0&6#2vo +$6J/CҽMTXň Ci*CQ,]Ă/ 8͕)z*:c&X0ѠM*Ǟ} /$c`)-BHF /#^ر2^Li<:AP/D$2F!R($,l3\:l^ֵLfo QX,£6%+gʖ'Ȳ{RL"+@i'&tcKhLO1\iX+/OV&LG5GJ<|w@[^}ˋg"0@KRФE4!hq=AA6dTM攚\J&c9|!/{ &I ۃg!}O5ض4^!չCñl1钏F ër<ȡub UjLgM*>KߚqDcxjM[yb+:a铆ua̒ļc̲ >aJJYiM2>U#m5"G&Y;TD=H.hS[_qΫZZ@JX",ҠkmXF"(?HE'8Aix}irOBl*vq<%Mǜ$) F*ZPQM6], j4i4ю$奙 0@;f)d <hz6]Vz)dz(m[Ȧ Ÿ6ӏw% z\o[;vn{Ǭj^%~ ehk] X MrЏ޾mKY,oг#eX ~q}/MbЌ2j,K=u|!]s,@ G{V9x(xEpc%پ!҃k:̮}R|}\YҒ8̧B:7]a$M7 h,\64_-7TXcI @X2X!!sՈ=K2;&g60!;Gzgqדevʁ"֜zKњuz +98>xK67Z+/-(,xm2q.XHh69/kc@IX nJ`gCjSTa !VGNѨҩĊE4e|?GsK+G!.=u?Wzric3ǁѓ3if3t72<@![}ʩ{_L35/U81rY\)m<β 2L&gDL[;OVx"x %;}h^[)Bƭ}GHD8v4$d jsɘ(ri$׌9%1#I9o!n灀vIV&ſÍk!GIҝ&+bNI@jD~ bv/@⫲TfY)(ybh+/ʃq`%!50HĉXi, lZŴICB)HymKgߪu;Y-,yFvsĺc[v$3hsmGM'vA ~tfkǾe>[(zKNp\BO#Y}V>yh2`BeQYv]'V W}59'R跄U/aBwTMƻ%_f7{M\s^,i2\Fl-/ahW1TZpjs&M#z ,ᜦdM.:e!D-)D&iƛ[{n k%%j9ww2I294ͥXMG{}~ѧb7`d6b^-:mkriѶ^H* iGMɂ8Slwhi0R0K/#r r5bq;st.جeag-Q/eZ{904ZԠ'sE+&5pIY8stӸ\rvxߍ%E ɐ![B.*mԪ5kXa=JJ,aioR#MP*8Wk4j؍xJݰH:,Z?i銶v.Q6,o~eW ]ʦl9Q}6ֹ8IzD%B襛`=]#P.a8 :PgR,j{ N-VGHY,Bэ،bj2E`"NNh$=h)>~ (Tj.fxx86K.&ӣOLEPݜ/Ӥ5t5P+u|]$Ph25|$x7~H %=t8#9VbhiKPH9R eߥX9"iqIi|-o7lLbԘČ;N4Tٙ&4S=fRippGU&bE| +{9Rop!M_S!cvfUABVV.Y2YX L1#JEػYUax ;ǿ}":??;#g cDXEtj ^ՏnHC(`W)CAqMIh8iB堥iraZdks<ٯL9%f{rLK@ V|}5'gII'כ $+TѥW_?~?˵/܆sT$猵,>HE6+*A ryTX3Zj!Qaz>`n~}+lh}bs+qEWZCȺ7~38t :OGllF"":hҨYm:^h6c'ɠ*CA6y.zwD'JТEH-cI X^΀]+dҠ[)8EԹT'} ǞÉh VjhB ɭg#xLYt*tA :Da=~'CH#K†qBjH{)5Aw R1Sّ82&9zm^LɃă='_OF< iɃy<^1˱C{c\pY=\f(^!ñ9@OpW'OB3Y O?KM*v::ddQVҲfwG+2IJGpY3cdXEFxӡz,@ڃa5oYKwp=3` QbS(mQ/ld] Vm."C$BzWW9eTH'X1T`L,2ڋ/e|֦cJZGl{6΅Cm2f(ڡSj[ /6N26YsZs|nX I#!-BX!LhIT@d( IDATMBss)ey]$tkbsYv jCGgCogI p_ƅ7Lh%͆~|z _\!)7BtTR*Xi411ĄR# pG#PxhĴ;t_G'Vq5|uҨ(ExS , *f5hPuMXtG63? O4 @֬c~WIӜS~aT3k2pZJcMVPؑ`/@,,^#LrD hKbVK ꦜvDI Mܫ = 4b$ ALGwpb<22@ͮ8.ifipqKOugƃ^_M ,SZ#K=bBKM~Im t&(\'h\l1DE.<ɭ߷a' !{ad3"vթAY09aML.)-`#ɦ)4~#6S 7r]_5ox d+s✠&nbTр3;ZI D)CH@&;gГ,]6InSr:Kw(l҈ۤsm;ӊ-=}j3M%RFiS$sQI;B+('osU\ZXnV]E96 4>fgrIP nw1.Hۉl'>^:@y,aba+;^/s@}Nngy%P{ ȧ@*͵*dn=q0 &Zdbjn/HC3 AJFFKPJQt^a 4RlrH\W8?q][Ⱥ+1h~0;ʾdqd V8H֯w8MV#CI*AS!|_1(NjpYI2i bG ϑN R=CLRkJ!tm "(xLטvX Y!NpkBkw #?;Oj;q`7/| 8Sm?C_Ze_SBTg~ҿiGUgHو۶HP3]+̣y9?]a >'JAD6d<8*#ViN-Z ,*Y#$͠rRʶޕ344wq<`0qPңěb~-V>Z g"܋׹W|vI=:pAz?iOAJ X71c:oWjNI^֕E5$mv`!al`F'G4f(&'=96Ɩ>6<;5KM2g,G'm5[i2n1W 7ׅt`DQGEjajb^jnx1w _7:6׹ 36s|g48^O"%u*W93hhdl,1SI! Mt+5p$pa-= s6I`›_pG?=ooZB,m|nXEdsQjG Vr4*lKT-Eg (rV8*'LrhZ`ǡ30">񩏰m*<ɣǘ)5Io=ƕ7nceeX#A=B ZDLI;P9t Y+jy's5wb .cqM.#tܨF4!RG4PpPF$/th PrF5>ptԴQIW uǰz7iIaM ih$j4Md|vSvV.~,Hqrj.v8 !:bMW" oHEK$i"r ^$faXKd Ӛ^m\WZ̔a!w(gP@ JJőaK`45:ǧ9I3W3D_K+*~,/)EJܳ]!p @" _koOW [f˸3ac |u$Nd#ӄ@BOj&'45ߢB`W/aLPr:-nɂ#@mIW a¤+sNK.!X>*!r:JFܸ{96=F>(^F}f'WJ{TVbͶ8izEz&`{Q Vѷv 2%DSc}bSi%GjaZ̭눝jHQt #^e5_>Xf:U8b3L-Y#@! k>Ia^V48(-TNbGAXk/|OAj5Ĩx5lTBztqR.n5JH D(y?2FD){gJ\dCm 6ҫbm7UKAg1G:lObk ΓBvݢh68j-ga߹4nej+ @$Ej~ZӖXGڥs:Jl)Œ:NУ;fZd#ͽ0Ik0b` 9ocY|5~5MLG)MT[C Ö$R[ƒMޑ17_Nn;9(9?׋^Qg`7f&dkw C]=z97aSo3uHxL c Vr(159ի7q#`2}*Zz2`ͅ:]+5\["Vf(tgHr0ďXq@eAml߼/ۿ|_+|ʋnmS̔R_\KUL6)UXWX,:s=y $7\9y]K8^kZ0&M/>ʱ㯿{o?q7rCy|Jrnz_1te4]2\IAVqY!Ey]OTyZluMWx#@ShΞZH +ӬH9M\7$iL%M)2=f8T"bރz-rz+B[/߃븜wˈ<9gOȬel +,Ԥ`WϏU"tǴ@H,Uלq6tSk0q&vd >w4F\4i\J5L@PJh6j 0n5*)Zd"Y2OalSdv0Ӝ9kl{Ync.U XہQZT`"+U<}0⏕<[!FFFeSdp  _۱TP 9Z ?w=&1y^`eK6~)ⶭƞa*bOuJ*:$C Iu#|nv\[LI*ӡ:tkHtsEè ,#ڬ!滷WnpJPs5"C\R H55HD$ҚpZbN#I1#(x Gҋ]KgIgrwW/dVɊaĭ[ıR3SX&lQlTj՘]XM43K$Oy("(fFSß/IjG3+St9i6oC~׾`fz'+^rKy޻5&K%H5xXJte"::rw^n C OO XS@@w8l㲏 dj-8w\b3\SCӬ,S]/xŋK5ePp.!J!^ߴcQ+cF#XBlh=Q'+tD2S5-繜Ѭh=#ɭO =E!(KLbɟ͝b.pI/g:nZ'cgTZh)E:/;ӯ՜inVA&Ҭ ʄ2ñLbsh1،m+Tbdr~I|lAx(q̡N8}M9K;&"i P5.§?*#E_}{9_:o_tF\I³OoJ `EIՙP'OͰmOOVX6OSJ .uq}쟞+d88z{M#\%-iLdr@lB:2 5?9̩QC578~82!\>ĺyw3|յJ ~i9!ulUkOA;:( ,L?-ʊYZ^+mJ3:z9qw9y^+D&vZ²4C8`+ 9=<G#3Mwlz٦(0%zt"B.< p ةoD"zB7pU ]?Tx/dMwWPDDß?_vaB7i)WNafOruyـA cf!^b6g8&U5w<䗮+m w4ȦF)$6+0c&Qg`Zٺ{S"PC^rБ]kox 5V4LYXQtX"G8 Tz)_r\25F7 g<\2F&5krva͖1}yDwo"/ TƓ6${wU;KЖ+6=vnEW2STffbxwX0{whLP Mc+%Ba $,"8P/:rKS.@=]m8NAFD8a>w~'r^ D!iPs>9)r(8^87P3.mGQ;^#+ύ͢]Kn(_{vf2KoMbp^/z? ;_nXֈ(!v\|>7|+Aö #G5A$! Uz~rʸL RЗUt2O{i)^}!gs_V87 \/es9<+U! A` K! jʳ8u020/z ҃'Nr虝̞܈|{F GEpǿ}J̎+/cd3T'99Ȧx4cku6ʼngڔXc-DWiJ 5ްm 08!Ƌ5rD"vgUK ~/gR)hCN=C0[#"3 'v)?ͷ=C }،5- L˳L32bN!QB4brѶgcmz/W˷^CWЍ&(2V{Yy6r_D<m)Zn\-v)>.ykݻ8N>Ovz7GcFN{b,Qul[;ƞz5c CG-~#"-s۟{\>#|S<e-7mS5&>ck/;RrO0_=cG88^lW8\nt 0vEHVHE8(A4%C~0-uXwŋHiM滩Mkfxqpv D7w3Bid6 rrj YQ B`UWdbjs|]O똜dՌ\䑓ة1JD rFO2S1d)s@KPM>i j4&Fb(Ttc̏7՞tq,FVȘn"a*q['!DǬ8l+E,E8`qzBwĵ7:YrZDghDNGYo?X.L t"R,e!n=>C5unCNkA9wq7E[-yt]'\fsdz'rBnE.Db. YL3qbٻPO)dXZH5n,Bk!0qч @A"IƉyFn6~p痦 0}D`?WefSHaulIؐ CDlΥYoPrxvƲ萑Al.^삪X͠ W3- >==‹xP?^*F 96; uH= sZ]\a=O/Xvsqb 9=ɮ1i9Hת"^z*2vbթqׯ'1w7y=d=CٵM#<:DN>glUM2&ԱfQ9uH; D9.DS9A \E7 +3kY":-y+{ys2}gHZuYVlȽJ'8܄@!n 嗄HHh vm-ٖ%YK[Ny9gfvv_K򔧝Oj밼eU!,MwrPclC1YkϡkQ( a 2ERPɪ1!fY):Ш GLj6`>b#,zM ؃E#te2/OS'3C F ہ lgvXnCHL)nEs}|gxjNcDg]{)q}C `(7ժsrCuZV*z{C Čp?_7es fN@?e66H>3'3hAGȤ: 1U5Lav]=2hD)W%Llx&ŴHf ab዗.2302!oJW 1}4tM'2(0'QQ_qv9X Z%kARAZ8 J)^7]h^N |_n)W̒O Œ xu7|jLDM])EF} T`44ѹ|V.ZVQ kRH) TXzۀ&GCetNl8("1H xfO>m_?Nбܻu {MH X⊋7'_ "K'}:a k$#Oj|S(& \;~4?Ҥ& Ͽ9." IH\rWͽG_ ~~p_?Jq7%iZAhIy!b2#!8r(Eߤ;Do"Ђ+Y}մwAA|=1 Nez*%1&yWw!WLз{cGlwɌ}¼h#HϾW (bߋH=NGꤹ–&@T<4)aR ͠#PfgbNlUB *_hXLŴ-] R O ydzӈA(+Ǝ (A戏N'ٞ*qv;ah@rGoE2S)}7J(T$[i>U@ VJYk.1о $AJeW^^"pJ:T2f͑( LDf?Eun:ڧg/a}DS6x0w*0izE Y1ԛ+^գD:a-A Lt&G˲ghГ :Dj%:б|9 ֬f]?Cet>O8Z11(rw{^Ů>Tvp'^wMt4EXq%V/ndIAzFwo7hνJ%R)&w0D!11gV4O|ۜ/DIbOwPO@@Wgt?9p&^s|yXeHZ~#od[| ć>322&td CBzTLNS*h`͒3}WtkvksϿOTo׾^[ tߌz}69\9WRʙޙ b_v5]B Ȕ`hC%Sv:MXEַ$W>#,.lΦXXn0ɲY9ttt}ıdal^UkHD1478Iwa #HZ"t=vaAlqRJlƌGЎc1 ;G)} ^|ex:/7 Mr B H95Z, ^|/q74mtѱ8l%^G|X Z,UFU#SelM^)k-c0- +M\{ i7rhQ*A4F}tY`c#. ''h1ٴ20 X>3o.Õ lFa Źֲ|j S9EN9Ή}HЊ'-F $eݓl+8a!/g,D@_X͆?'_Pl/⃗[,o#Oe]2>oc MvL*0E֝Ǻwf6k]LMZ34f*O<H H-%]J6[o^6QVw-"{9 9gZ=Dkk Xb*Gijf?¶6/^S>͵W^;r%󿿉ЌaDbQ5hM&A R H5Ö %гm+p~k~Rh2XH ?~d1H-R|.JY[)nt&Yф:XHS='7y."Ў+󨷾!l΄r5κ&>2@% v<.|mH9G]μ@YzyJN8:Xo4! rWt'E|9ZM/WqYF`o(LJd8CbF.Nj.6dpHIXi;n B6vm8Erȇ*|qa.M!LU)$hUqA#WZb3ZhMb'Z>HԮf.tgw=@%ٲX-Ɍ)O$x9K,&q> 6xYű~xvIfu#@%zL H~WCOkncGz͙3J0>?56Fvrb.GT?)LDLOp5FB1K44+V21#ZT<|)ʔ8UëP14jC*7CN;}l|iʛ+)EA'W&CMx26jq.^Rb 8y>e-募$q*qo>iyt?y] R YTHCr͐u9A (aMĻWwlɣ58QHW*AS3;yri$,vMfZt&b' eF.hct'85yDôΤyiO?O`hNJiKJąGj!ȥK.O$Q>vҥPv| ߡ,z aM~:C8 aG9 yԁ$.cZ|#HfuŬ>ER?<='׽OcOw]=ϑita^zyZZ7}M}57܈+@N"ݔu էʗzfUC[ ՋhQ" >zf~ͫI[)ʄ"& 0iH %Bآ.K:!^:"1# |KKcAE5_(uie͊l Z B6},M,04q0xdnoOŸ`\4P; |\.WP]d%qg_'."Y0ARhS&|!WXHRJO'2hZV_=0*KZ ͠>gc5f5QchӜ^>Zuѻm]DHc,_rIR:3E~9[VY}/=؁3Iۛ-mEZVe:.8Qgo&3%ᖋtzS8t43yd"B<1>]JaEcifL~;z';\`2]"R O3\]rJ9n_ YjiD dk%ל|v=yp04)3YrVƅkA./*g4{}tڣ$l `5hWJi"AYaX1G.W..O n?܀ n$bR͓ bG橭;X:ȇ-'-%Zswsw+71TL0m3iTÊ5|xĬ4H= !hjY&qmhC@ P!GشE TQ35d{}޳4N\*0D#_|&7@hZ1\i(G >/!k"#3){ Vh* G8dEczYF{Nռ3,ޓ) tu1p#4-h1 <4Yi2V 8~(%$ ,iմNWs>ooZ93\dWEy—&@Dj>Slݷ^ \ x"v' s?N8Ot烢 F ԁ#O,n}K(diZ‘>0.Xx{q'qdOL%瞏gY,ei4SL\Q"|F^)v^+wn5y~[ɉYv?>VE Pާ~/?i8u U4}vӼbKjTڳ8du'C b/b^x|NܘCdcA{7}kIq4z>u:zMS峊s**z2M<4+:(s>OZU#ytPuy` ܌7lR; Uy3_/L\w3Woॻ~_a7Ǔ}lK,L4GNira͊5l?y;Yi;]ɉQDGh7l5C*)wSZ#U>k,=ǐ1X&W%¾~I_ƌ0B`&)mdd݄mmfʕ|/> 3:֬%vuPUUOUPK׊\ {0m4ClGp7⫮%˰_??<͗_–y:!B ̣bV :M5G/R "T"d 5? MVQ9ʣC{  L EdҢ4砥K$q8tv4#Y:%>-Ja"nZWO X'SĄÈ;SܛR h3ɧ?J(c"&$bQ>|ۭ\~xoh4et̡! dn#nP(fC|1JiPGo㑟GYԄ',LH6570՛5%,&He;m{}y;! n)^Ks )##C4H_Fy^q2ܺz!d15B3A6ʪĖ ~󜖸?:b0W(M<]9BUe( ÒtYxu;l{^)ND"htv0:hln%34JE|(- %>DkUA;p-Uk3wNVOqLo&>paCSNB9ť[A>=ԛ,O[Tl( @CzS;G(tB@ rUB;H& XhlNXЂB](,ZNqr/BױrY7? n5gwRg{ڽA 0^OkXT&XUOefgY4η oz6w?_sS}ǸS_ܺ&Z*qN&VE,a!Ɋ^H:s0Bq3ОdnUW_K&>${y^"#s4=j;vs\"3h(+) X e [`v6RJioE`b)J; <>SOq/p߇HCmf $~Άd&AY4cxf<; Rpӹ8؝lzIO0ptfn`6Z3T){=e TZuS7N t8–|v=h;n"Bj IXe&bیLA4Tus t.m(itp"/IXhթK4+%[$_y.??qrӸq7wh/dyd0%aXz!pWc>P XibVK)]?v~D@(Oh F!AU6 %K7C&Z{>$4S(psnЕ0r)Z,MQZbV,S$mk%|݊VXn@_A _GJ. ?8F ք-yHmgѴ ֍*kܲ\!Ͽ? ba( Ybe_UDY[ ͬմ~+CtkDj [.lX(=a3y͹ZfV4--(NqIb.ۼ8xTonc$ X Kr$W')>!yq゜>}'j3 CEP6;\:KQi$]# O(){2FuU@JVX{{+wg<6 o/if E2:8iXؤ1%3VƲt #Zyoq- P*b@VbZ:۞'u$笹ugD!ytn\DE.f;wߊOp67|CׂekdbòPBd}M_+\qWASS+Ƕm06ʑclZfDh"̡91HNeC)V^L<;u7;TDPáy6G4\Y[nAEZ~a36:M6{& 0]˿#_tB 괛Bk 3D M,#ʱG=g2mbdU`y08+IMNҰ| .9uY{e 8FyYqý=; _kqK]w|6Wn7Ɉ08]&شvg)\?ΊsX2 adTG^kC˂u6!hR.+ҵӎQa"EJwPޱ(Ej &l9.FN=A[{'pg*5Lb 뮿3lٱI1ֽJJ@@gw͉]Q+nVqiS(!iI50 >,2<9x'1g7=~Q#9FpVa aGiS, L0")q~4;n&>O#(jShaٲ2[$ M4'i5 7m9HS4I[C+ͱ&ÍtD<3Vhnԝ/楝8xM K"Y02V8.N9hHiP'FzD[C5qqcVzZ9qolq읤| h$Bt$)+)20mnB1{s 3'4Has"39aX}v;wp#Ȼ.kW A-Ѓ k+B=/5g1ZNsH|O>I" f\5z` 8G쇄Kb0< |ZWP aB` (-EclI.v/$F4l)>EHĔeR6Wedd^agVׯu fЂPBw4lfo[nb AH⧾И޻b n?mEAL%"VشQ`ϓϒ)O?KqG]S- p4v&h eD%ioBEdJA>Lѭ?AWnV>G*/,Td /*,2TTҶ5K>E[0OBᇘܱYek&|8j#x(~C g^mF4(zm)W^4DU'x}T2s>8Le-T`(aBv}G:dѺa-g}OԱ =3> <\BB(B-$p7кdw |`-fi889ƸE1UGMR 0.# <!?1>yr_,?_7H!b[NB_[)\.hDJjV8_x94O[-MQ53@,ȹ>ƪA5 pÖ]6P*J;Ss,Zw1J9܄D)I:g͊&<+bkWY,Ww>Ȯ;5glڰ㻄B&UeSU(s޺N~_~Щ ³=iZ|s= +$d1zkYR >7W_|)" b$X#A![ew2;B^N[Bxq r%}sǎ FtuC3ӄ*E˶%,)JgٴƏğ GSSW`(~znƋ/ c8yF}_v0_y'B9A ~R,ݬ@9eG=#F_@&=M/~B)qW"%wx?+HgSm¾#m;})Kҹx1Ұ0, 4 <5fUuǠ*GllA!5LeGz#p3~6ipD;0E+zSNД"QFze%Qv}$( V)LK>TX%PXZ|eX69@ފ4ťFB1" icFc -|n^L9L>|6 R K{ h? }+9'jR(O`?WVt/_??AoO!{2yBVoބVB1Dž46Lڧ+oPFgf1~ɁBtut"-J#ئjL08V}V Hb`6f$lEih r0Caz('pz6exŃ0o#Hկ}m/mݿ`Yg÷kvBu.L0AmR.i#C%jbLu#Wy hE~`.D ?W;s? \w8{r{e4d9>,TsݧJAGE:EZ,IiRG&z `7#8VGhPYj t" :ji&k t=( iEG$'L32D2&\W]ϸC datX*DqJF6G)-g0C"k!!YjkP: YY В.λ㬹3-g|yba9!Y EOL1bH 4hYf`'Wvnz6G^z ;wyE8\cNU W"|? fؤuJ.q.ʥx57s3wĿ|[-nT"VJ]zv#~y9#t_zcU}YC!nJ*{LN"8bh|D#5hw<ʫD 篸;jR2.ڲ7eY{rEF\p9vJ^Mv$f)(614Orp[كPC`iQ,hҞE+kڹjIUmgqCe (-Q>!x~~dc465嬹X=b VRϓbuiWe|p]yod] ;HsC(0NHA( 2\T4” X N8A}%c=='N}P`pCJ&a/"`hEI+BM.Sń_b$RȮ#'hO18ˆC{qsNU<ir$[8ؖ`.faa%GI6&-,cKlF3عQ==A뽷hZ]}Z\E IDATɸHw7uS#,!wr ˧IFj,u,r*>ޫFqœFӴb7M6W7G"pJsFxh",-FGu,^x¦fİN^c5й"QŽG88Jr_8Sb.Ŧ@0 mߟ*`)]AJϸȈv=?S}a5_܁Q6J','yʦ'];{%% U[̣1𤢐g\cKG.Su뒼cA:϶|~b_|5btYUv?SwʾT;DEct;BǁckhkH~DϱẫM1:2D]̻" k-Z8SQO΀xSOeݺKPBWE7e`l/'@DZ^!-ς iP k,IjX >5Zjp]"+,t׾[! YH\8f ZAK,}:G4YKՊuhPt&N-*!;F&!lH;- a~Ch|GjGX"u=\e, |oV{P"FmCo/ApUOGTFYv%mNIg^ԥWedZ^K7[<5!@:ilbخNJ1nE+|Tv zf*. VC< ɓ`:JZBQAx,.duϘ~idE&% d(ЦoDѦ؜,BP3/)-.fߕ-f|9U:RDXMO ),A$8mM~ "3)s \)Iż^/s?2=' "ûWǩz]A6n6ǨYK$6{rXu2bͳHh/Fl#ݽ؜E;׽j|h3^@A %J>IDY€OkTD5D`3oBiBkVp5骿-[_Qxڔ[c脜2%.;Se@K/_Hn\!^w#&|668IIbIeCyo,exB3k6V5ime Y<| ?t'׿E%8^5X ] 7=mES!k6y dy|;_gnBP(!=V$,~7B&q\S,[so|ӻX3klexW7Fg @2"f1QTׇq8OoJ]Mˢ}p:|{C=̝FWn"3eh_f*V̼㛲I SHy 'R#XpbvƺOQ;ykcΝ?܆F< ֬( RT}8K/XMH\ h›+_.i0EI@(B!q@?I+Fxuu=[F3+lV=8B.aKQ,OIe*tEnѓCK=p+dF1f{ףI .:dТ0 KF&D : R!SMO""i0CRGLVxA* '+H ^!_o E8Bb*VDb`oi/h|?AߌhDt=PCNG&aCCR厃ғ,zB-@&gu"_͏ݿiqIqr)Lr'{$D|c&t^mpOzҞJ0BHl4`:P.Z1&5W^wb1.^Qbpq= 5gG%smc*DLLKfJ AH!^;z- +aY>Q=EΌLpG8J!fZ%F}>G͸ q$*QXT2Ŭ!sV{_J%`*羄44j1@\FQJ5i- |vgKWJjf"s稓?#bT+ $pG@' OJ˧ۖB c PYhȢ`{iAbΡ"EWD|x?˝s㗾K+ -f$H#A)hI_ I!Rx%38e60k tOH}qiOw\`I_uLƊf>/v="'R3.y ~/2kݕ J;L:^L.ʛӢ0qfW<ƮXJGczI1TMe>kWdU(Y@wNZ#U ,M;S^b=miyIz#8 I[j<Ϯtè帪X-,(=2fZ/; l(1HO=oz&?oObeK~ Z=gvW(ᐗ~?pi  ׬O|?s;V-@H$ǿD`2{&Jǎ{$w(FBuuxD7)8Æ=cN.Wa6_^wo%|Nbl@gg?=,S[#rU&J"LS} KS W142NC]gy\~kc6&lC8\d'ѐEZ! "c 1~ :sDy.^\,>sƐ#s#INˆkx>Sw2Ǒ#]#.}@G{;N.nj%8wQa!I0mcV"5H&zEm/˨f}dVM Gɫ;X~FQΊ3239L y8þB3:\cu̝69BrUM hm6֡6X79fþݻڊRd⪑x(pQA?K13$ fH/fb  Mɥv4"<1 `0d5$ҳ7dsc/T6 No4MPu5~"#IqE XFO:S,0Jg)U\U-u\>ϊ868lKbKЮ;Qy W]y%;5RQQB+ɧdN<BP[_I28J*mp_݂[`$ўqB+k,?9€0 0% sCL@ˌHPU[pE p瘿|]]tNm8đ!ȉd{)t*2!8[c,]yDZF Zld^$KWpg̦~2R?χЗo΢+7g_?|>/h*4lkѫ'|%EyRuEJuR%)( T)fBСlOLg >o&(+6^Ph\ynfAL)%qQ6rPeuk cfM_&}so۟%nxjQL ,;ECdFd ъ$a"f0sZPV;sZVX::h߶EiJd,BFVzYqɇ"7/$GWw\}5z'Oy'5+ϧn:eT$QB_lŨˣ<by+JNOA LrM1[ d/ O}=|,[ETeT$1my JWR<{Lw 3ytu?`4S"X:uNS?Gd֬涵|b^}FfrtPK6 ΈMRbs,[h7/'ȋ9lg:an- #K?.%T+b|0Y|~,?/.kwS],=|*baysT|S"^%% E];j'qVimC1/jc h<0)2)m:M,-?*MuX\Zb%X镍e\RI%1 /ch A ”89ci Œ0(\ahXgkdBMAQx33ڸxîd;K6TG]k#OUM+1Da0^n(;0Bgӗ.$I<+PBZȅ)c-PsSNNyfjFH`&>'УAg\~=qP "tY|ktœGwode p0P:Td3lFYZˣD`>$ /k5q߉.0f}( Z-/ʽ$Ɏ/lbJɰhN3=,Xw>qnX{x !6 ءR (- ⧛Mq r2`5bI ;,%pRIFlK` Y[4RYTqn?V_wmaS_r_bi?̈e;b&&GuZ~&ʌ 犢3&}s6bf8+?*d,=)HSRu*N< ^ШJ "e:>aͲ)r*6(yӣaRCHIDqF1nd6VGXr^'Dfw³H+J8^pdNږvQ+N<n\mO?AUe֮f}==8C#,Xpikiigm'{x~ny//b̝ڬ9MDb :;ho?O߻;pr $S;B~t^@84b/;G} 9飲qpOh͛O8#^YACwm>]]<âVzN洶qa.UYhI #Dĥgo`ӛQm3Eg M>U  G =džqx(o>I]lY-l?` 9 e\6,tظ\b_`0R3ɽϦvtD1$k6vL%=2BSM֗X)㲶BqUs |u .zXFi%CQaZ^P(LgO=s?X W4T90攅|Ϭy!"Q20&=Ax,DEѴ5kXZr ̲5B;8vvqԋ7f49&HLAx̞y0> N*t,M\w_ dMA4(*8|$>yqåKvWן?ͽ=rV Wqi rzdSPi((5O 6 kǶH4'fP w,/ϥkp'~y" ^1IǍ@ Y$3Ț.I-ZX!U̳ip}'wo4.$[?8=Xz[擟vR)flf\ v .# j" K&Pw7Cs0ظ&)DS'N`zIrtkdSzE,ǘq8١;u* W%tS=K99{FI)ӌmgRLn'7tX%3o3#Ae'tܲMty'ڟZ% 7Mf9] C aiIʉ\@] wQڧH&8 "n5 w8x8dhKū3;HEod}rMt4h+ބlXȏO/s$W^s3a C@i:vkyӇ>Ekbl{~;U򍴪-~?1EE.5Wwԣ|jWc*Zh IDATlʝ>yL_)LAyfxAIJ_0Zȷ!*ǟ9NJ ~=Ţz[!Daz; :ЛͶhR!֮Mj&"B&~)hF~Do"_ pFB/'Q^O$C-7JgeɍxWoC ɒzúQF<4]J.*B6SU &К9B(BxRd=2G׿~;.ZrO۶mSPz9w2K8L0+ISs|ݯO l +QeaLH%@m>(M=$INzOȎO 6t?l2=8Kg7"9r/ iAOu%T_?Ӑ<-(BH\,)\ds@dfB\kds"@8.2ós8a2~&"skҏRvC{t?FXKŠE_+JZ$¨=Rj&DK`''q (tO; ˹ض!̀S@Y o 3R|K4P2`:% #09U q* $B@l`/ ckgMX&V&Lkqr!xW@xjҀ(0| >M*0?| M@4 .EeZG,8Hc?ՋS<;!M`t{wqXb;<%sͯ塻fT74c۳-XHjhqdxy.&$ɁPOKq1ҚdV&zT9]%`4T$&/ncf`h 1yM"c:Q|в6H)E)FLْ8]MKQl0{0MDb|cOrYrd|'SC$[EfS"+tS6o3w~}r> [o,7X!A1~__X ƑFzэdATT`I3#6%" 'Uq0¦?ҠiǍvŅXGN1:VEK,I ijpk8u qSD`^8^ hK#QZ 96LbYz'BNCTT>ؕ/v¾9-p,{k/XDs(RM ..h簦fA$ObE: tʈRAeM4|Ÿ$`xv$S뀎5Xyyχ5ٌMpkxQB!('"GLő(,&T$P#QR__M($ w#bi˧)g?؉*kjD|^zv$udzXv=e^.=2+Z-пRM8 l0T M/~gvHtXJ1E&vFhXPV]8CT>y6dNScE˗߶z/&‘Te'?xz- 'qE+g7]%m m fvSQBPB>$xl X>ƿ|_vC;)DL0;nbh9b< 5MUT%=ogӦX[9]l}wR'm(?ɀ= ו,҈}N @i4a%8q`_Ƌ/cWmLvX[Go_G)!(0EVge ZGNxrլ~ו$/hd& ?^205&gEa }=&3C}\pl޲h 6nx-ͳ7tubchL`K?uz*r`FYxR)-A܈ j+,Fy EcP94C}ر0Y# ~ݗJj7.ºuA#b]#L(piRT)-M >TiQr-J0MŔDPr9 lJFJU Lц`[BPq~#*|)M)7L"K s)Q~#亼[{.&/aedziam(IH{\jQwHheRժ;硍M3u6wh?mYh1#? u5ZE50F0su<>wcK7P״hX?g|Tñ΀AB1F$!" /ʓW]k/aW\zR r]@3|c[fz=98WIdF񍵧E:mlx^ϒ5 h X 9<| ) ʒ~l[ٴVFnRoC@`y.i<"xS6?ZeD(#Gnモ~+oލHQ{W2կ~y+#qx_e]iM,iȏdшDPl;U!9MYlX'e$uV`{rEAm6jz}laN⅋9|b!$dQ#`R}8Bk8ssp jt!52ҵX F*;O qMz%B$s_w~{@Y6x-__Iǐ l&$8gR)k8z=O?W5QH?h5<1f7aV0 bXO<AS@q񿓫5r˖~kggbq^zJjcW5EXQ&A{q[RNDh7{lϡֶ*"=# `px  l|6C(B(8˲@pZB"dE<$> V. bs:H#(x,?}xD:37?>Ah<4BMU52WP #-}ݝ{@Hp_O6v> zK\[k6._tif׿zA~v dm~xĦ+VaX*lj2cs(7fNubzNa"GcR3v_~RhTYؖnVe%E$O94CDļ{Y5KڜapcM}  cC +twolK&[(PeI*f%1M^«/wr?}R*?! kcfsR` js@EE&~vw9Թo^/#3ʢ׿c)\da04p]d1[ pgbҖ")9M]e*g\S*}]n~'}|3OO$H4X ox3ڮ^gWd1q٘/ ẊP2R =2'7:D;`!0ddS60p6IsHǁ̙ϦJ6jsIr>&ۻ= U֮aJbb7dQR9AU^@HMʚzPS9gIޔc@>6^~1z?` #~g^ ^c4U P߻#7Ǩ]k{;%8ؓvpAw BO#m ɴ~\dn&"s1~秹|-#^Wſyoe-λƷq.$EG;K <Km6\;7NE65Mw =֓<R5(0AnY(xAQaLɂ &2Okzxci;DSGM$;ɮN.;S`$C#}~顑kf%fw\aͻnb&eZPp d".-Ͽ%>O*GϜB hԩ. )Z}޿yc+` C]c|oߞv!f cf?7x4ɢ'Ȏ1l¢eKP^c:PW7-bvu.?nao?̡G`nc 5qj{h  ]YɈu1F^ K#VQ tMCWwm{uޟ.Nͨw˒eKn؀q!@ IHBޛ@H &bB(΅K 6j!^pem43z{{33H}yxGs朽^_\]a̺ܶ(BGSvFbE@k^f~zozY+ 3AaX?Ñ];8O* q~'7?YJι \pklewО}=2P 4UYҿy:cͻDJU o?sIȻV5(4zdA%k.̣ݤ26pxhJYx&jtc,{Bj5LY4K{IܱaD(\q|3gSS)fx4 upK:q/D`uC_' tDAֱhGѲa1+HEii??‘\7}ڟ|`Rqq86q 1o/_Ck{K/c8Bm+>XWJ6\x!5-IiLr',]  8NO『zJjOrqp(JF2݇ Z&p/^FDBMt˙{ligzIڈi4M'ɄRP-ɷwqCf\C=(d2DQvfǑZH',"̋bL̞D`kL\ K,2.2ju Χ񱧞Gab=Yz;6{v IDATI* m1Rh0tSL 5!l1TʥjLZ&J%i՝ |He8 `n#͙) !4Fۀi(ýi믢%gϺ}˜uXd'cOc#Pc 1.R"ӭ `]Sq}[qQ(0QDv|-ZAmȮ{Th,k42Y*#t/Vwa7AkVmm=@ &|e\bʕ?k-/>^Sl2$g\u[xeW|;7gXG_{#S*/'*sTJN#Də)!ߌ+*NkH 1FDqUaO SgL<SpD'Q_z.&M/-&%x3Uy:Hfœq 3~3grrP8)EO`ꜵ yjyyZ܀R JU՞ΈkErfz29?%=6\K {E6-;;vmq8k3;#H/^:7^׭ҾϒEk9T,p﷿Ƶ.[=ު)FGdǴ/Qn_-DdOͼRgژD6Y0-%lJDb&O4^T4Ij{M (fL>llЭy:(! pp[&#,E޶,KD1!J8~T씞ི AHκl^>g L _&~q]l=.k,t)T +]T:H"R&% 2:D-lAD5FTl&XN%dѬJCUVD]A=! Q澇ocKOsX|V._Ͳ^g l/z;~ Cc tOR z7nbM:CrSᕕ'eL* .2ϕ{?={:k%[`Ȉ{KxA֞Vv=]a3ȇ7cc<",tpN+`mL9Eg.l>pJjJ͸;|D5?fyF?'-Ws_򕛹֟\K@SR:H!lP8R)i!ʡGuk(8|qjL.$|1}#,eܿHXDfmEc Y#y?rEhw]~eag[ÓfVWcoaV(z ΅%R$!1({[n6 6mi/"lR2 oOȯ5ȅ9xz 8"gVzy6|T?|6o~ϓL؇{bu"L b`X }yÃ)UrllbAy:CZ¢p]X\#6KЙfdrҺ21+FE?p7i=}[aqMAhg)= =.>:lГOaaQ$%aWpty,Xh :#вZ;S7xXõ! J6Xj03{ZqTHP3QL_Fo|ACgN ukO4ے]EZpe !Z/b@fG"D(\|'ygX2S_/uOT┃Db0<Nid$g,J!tV9a5m5+Fxi0!)A萕!a0Y ԩEUrdFNހ4SuIFǵi!Bl QY%?&ۊ9fn6'W ".oOSm>Rc,]֬cD#?AO>c1ebd,ZQJqAǿ}_|(??S5_/#yyez,e:Ş:,;<:)q,0&-m Q #y?έah8>).ҩ:L@.|ʟ}ҍ~o#×?Oen+d`~/<+ROqp>ᅤ"(֘ 8 - NqEw +Ǹ4`jc jH95R 1!g;[7ЇpڿK/G~v7˗ӚS tGQV.jdI\@ 4g\2iYp0Qor6DU$O: ")ݴ,k˳3ٿrZLɑȰ پK3:2)K,YJ$XJ@mJlvFb_rfGn,[l,>,&QnAMCIx`)5R1(sH(IԘ=%a1ʣȑqpddR.9Lb#Ĝ_m$ rKi Bp=&'fHIZ%hf0Z=TT*U>]VoNs"ddlnxCʼnne4i+oC L.ji]<(dn6nt"lļ|)a!q$b6R ->/k )DBQDPfaⱢ,S-Wڂpqq_6'x&B B98YycoaAFsVo tre#[(^AMn9SW)&S\8 V"eQf96J1M[kp|]#]!GBYTІFhhc+cH)0A㸜ׇ3| "uzJd-/M/A3s&y&wƉ4N'CT*9( B@qp"⾩5IbJq:rz#r1UyI@uT* #eGK8/ |DuG=IܸTDQr$q`V5:!V,)N dK7fro!S/\no8f)'M#CD~|&;JmJxdE]翯%>wHICwPK|7ow~5FUFN?v+ΰŻz -GhWi4P@ƍx?M~rŏ_\"Z;ѿ``hBaAM D+z4+iigdm_} 8bax&FFo^VXI}x6]jZ<{۷joM;chZ1ͽpo=CxZflmxF }{ҙ)]LG'vBk( pm 8pІr YJDJ "G>{|]e [XzR~#\+x9FbXz7zh_ѹGIz ?Ka?CGoZSۓƼL~ ((]G*G$e&fjΈILnUTP9#$ dB DZm)|bNT -TsZa qVǦeQ#ӝ ^PǥblREH.*4`&ZKxc>28z +׮gbtDNN^,ts޵1T)T tvp[*P*4>؆IRO=EEkD6C4 kiz^B_D*B/l&ֱ&6j±}d[Q} yeTvaF#dy'w=ă7 oR'gLMaH6̦jsJnH$صfqtH\G$G uyk$?|4U7@hɩڞ{q}UWŗ_Bznb*706nJtRmHR au<Ep\tu[ұ]pZr,GkfGÝhq |߿\z6&IʑC J[?O 0DK<Q+j"ZRpIySh, GӇ9* $!&`4ֶz@&Ml{_ o%FHIpV,QhP+c(aЎ׭e7nصmO<{z{::<`$bvP(1tz56,sF ,-zC*\ye߄N0E)TO--\B$-N&C^|k8-?I(PR sG)8S(Bp:jG$ZW7ļ,^#mCvmގ- cZ`,Z0,"u2UĴTڣеp>CG oq L=Fw"^^w%smnyg< :HSqABP(XXG!D80yRLҤ[ǥ么l2Nv%)R\16֛ޛZiŚqi'c1)f37Mq`8 񑶎.:Y`!nqi&)gplm|'-8Fqh ꬉ#xNNڙr <7p`BdL~ 807a8a(tD{S119E9u k"Ri/Yo*64\5r$a.ezedUšcEVuGyMZ!d$WfӔJeK\:&86%XR *e'1Q0Zn~J-@*IR&ֆ9Bp=VnmXŒJIF'ihURQF#L L2xpp7GXJS# +%Bb }:T$TZIp5;{uF`(}Ke.V?G(NٵܒmRqadq ɇT1UBJ|/Mt:4(@- ( 'KUXH#IiM( IDAT"0tF"U+2@ρ$̴\dFꜜOHC84A녵 ^KPhAH=g=(и)k5Z8ZoJK*߾Lu2~=&mklEiyv񆡭3M 9s8vKǡӴ251;5EzN'1rw>Qa>jc^u=}Kho1~`'Zj1iE S\s֛YheӢ) DOH]ףTpck-)8J'ɲL&V+X64l])L]JSut)M>N?73]m!d i7s4Ḯ qr:lt3 '&{Тˤ8mhqqd"a"x"U^uO.+C)tIJ/!7"{'zp6h1? }lx1 ޹a7f9i*ȌA:4Zkb m]/ n;:A&qּ,"bRLJ-Irȡ"~*TkJ ,F M=k'l#x3賘ZPK;'/2q&4a3W?x af~ &Sc8Nhp)' )yW&Tºaƫ'/8k*]lu-]]d7^kvp1:z}-ѷ>OAYW+}xﻗkVsf|lɑ!&FHg8w_A2h2R ^cekn;R8?z.LHzADZoRť;"2 pTd!Y iF-CijmD[Xz*~v ϓqx&, qZ< AD*EP.!(KG{RDGk2` رm[|r$X1RW$,a!2!%pXr:vE9xML{ۋW03O/5 xSi4ylt H3蠄G8K4EhXb %xJ"m,U2vDZ'ٷ=i63U 㗘lbd`M#&ɤ=4^J-*!cG'ʧp^ @"ͰᖝGxht'ocm^x<ݔ":at6 8\ڻrKS`pnSK/zYR&69L3yXdbJ5a}|K__(u0y3#Ɲ?>K7nD$Q b(-(ztZRӪͧ3(7}WB 2j爂#%n: Fc Z<:8rf{P˶ehjvxr3Vybnsa-n=Rf=FϸE)z3i*A?\IX"yo& cO݂!}jBE+{_iMc+g\l\h݇GlSN!MA2E&0tL0 @yjT 'gs<ԓ^8Rg[Y|);ﻛ^& "{Zb`jB@};o xe(rرs.Z a*&ꖾB\gsĬ %1AD4>uЕ)ȼGRm8CeiڗnjA!ypm%t,_Q!'b߾!r\D)œA{P T(`qȤҔy^9º+ڨp'zNB#:`'axᑧYjU[ѸtI^6dj|+L%Eytד4؍cf]"NVn< p.d͜7b#s$ٰ ($un|X;C"{=}^q'Ei剡V$ !erXfV|O>1~im4XvVTeA$ʓ֐.X/VC\/( r36E}\n?z( h/O3tS.h70H{iԥ\КRSkR)mu Du,WP kNNC QK[w2U )or>$V16:H$ܜKd;yQ}q :f1 'GŁ0$]0u ] mƅ61w7Ã,^ #Zrdb 8 y78UlAH;5ҰsE> +<jtڡ85E>%G'N3mYp&)ľE:)b`&q,$`4NOg]Xaנ}U.MkR(%~.;G'g7L 8KT-#Ҹ*6ZHi Ubt9!(+8C)"dfj.8G yn\@#T:Q,J-1L:O`]9|3?O}Slr|s_;~ w~&g[ORDVSj4X*a:+U- M8G@W;mF>:6Nwg+n*MijܼNr*M\F1 aQ52KPO=s-4ô5ȸd[\"iiFq&Ό, elXt9 !;6 t;9J)zk! ~C"Hm9yw<ꮍA/43 ˘dZ7tJK*07mA 4~JJ%ձaz֟Al]P(ӿp)\=Չ 浧)Ƣ%gļvx$ǷwRHo"cR gDk ޤ_״JaFdsP Q"n&ԈeDH{/~e,-}?3}vf{g^@Ph0hb4jј_obI$Dk&ʲi;~v߿?93 슦vfΜy}_ia,@mc .&I|䯸7M_e/'3",0m e%yXIj ;nY5h ך68p0ÚUgF:tޞ$mcbǮqnr#."2]'=RNNTfUעBAzZj| "b;LAI* 3}*$Dk}MbAWz~g7g CD̬EKFwT qe foZm[K# Ԋ;kivzxFߔ H#.@e m CC+,ˈ ͘@ZƐFd  #"OufP* /FDy"C' b ҶƟ9̳ Wc8Q>ߦM ɒ" k6ײ{Tޕ+x;E%ֹW}44'*T`( fũ1z;`ɧ4!nWsk_ǫ^s%֯0{08L0JcC0eXfH8Pw!fΣ}[ɧ*dH>džlBҖOD + =Nl겹gi;sm{L׊4N*={ܶ? (ɷu016'8hwRiIϘ$MJIZT2֑R^1Mr,u0,,fdsP!L%^KpU@խL$1$*h$ j>>%ɯ~~7|>Cqs}֛n;_U I9PH'AwVpyh?~H6™1M{B3W˦(MD2}"$mjE,+Z+`*mYJДs9+Cqv+II,`rb!/u8l2#y~ c$I:s( +6R)lt>ҌC?rg#LyNf#UoxXY䲞S]J/gkȼu\p"x]A8 CAOm }t珏:@d"B֭\wƙ3|;ڍ5;UTfK؉\ƖFu6^k [0$m9y8/:7$ޟ_ş Q/Gw|;>9jJ@]$֯!!-eHteY;lC[=f;xKy[Ưۿ+nG^h_/=]k%s5??\_cüWHᱧzqQ|<+ S2y&ȵ! kׯczz.zN BI\)lYM̀>c!Vˆ'b5,@iÎRVp|BCHt.kχ~rH0F$MR 2rm@λZ懣ɟS?r=DZ;(ON %WV5Ҧ쳶ro N?}B( )Y%?g (ҫtDiuBL"R`0Am /v@HqInkd/ A \?N٭[IwMtsN}=e=5<@Zi+PC*0#Z{2mRW>OdmV=|#js|#{5R[i9gRkKqq;m$ҚiRÀlע^lC̥0)q~aݶK)#l޲n+_{% ɝߣ#ݜ{i(\}ïs睵RQ,\: IX(b8n#d=ڀ|IL&I$ffleO%H1E 0{R;Yvc E9PaH\lܻ yD9F-qE^Z|>d&nu< y-?N##FŒh2X( Eұr=\6W/4A W,W0VR_[^2R) ykM-ӑQׂ92+Vph?:ˈQۓmI ʥ"Lٙyr0@"073GGWiQTf3H!r&ۖVX__ IDATIsZfP{_&'3(ar`&Ҝ!0q^xռwU$>aaHO6e\K+X 2l2uO(\FNÇf5^!H u\%p N4\C[ئ[!@6JV(Βk# lO c;6tpYoRUwIU%8Ts\X/-z:gm8)|juv*ЄEPΤ1D!a<4,3ڤ%Ք~_0jA-;?띏"*R!JV\}2پ{w!BL8Z,^KuKVdzMB@)W|{d2B^!!L.fxjW984DNJ>t2!Ց7a~&zݹz5dAgƕd Bh0{Q=E )"Ə2n@ɡ>壻pԦG|!~x*6o3tLӈYNc`5E#vMDa8.t!uќ],Dxnr2P-8-Eф3⬳c>@xpv±QG#'>T}0^P70DfhHnNе$if Skj^ m)TH8.X:n>w;9Am|Os}ݫy(AhSä)dBd2lBh9dBd-|]:Z~ιc$U20QdP %:WBmȜN JThU^pك?߭On:\x#:BΝ֘iz#lRy[Oa^j%| {L("/I*v-[Lӻm %oo=EJŁ9t /}ͫOY~5%sB-33zD}z{saF84;g eQ݉8X -`hS8 YPB)MRczz䩲eM-0y^m,z9n^Ϧsl[̔^0jk""IQ9"n#2.f")EP!±-L)Pf~!jUкAcm;ߎH5Z#- X!=|?59WXD$PM;Z uF믌"'a߶#X8^\ℂLN`3>N și~:Łv |eykz"?%0К\0BիH B.|Eʕ "A[ѓ#-"Ը7OB03#?δsbL"@)ʞϱIǎO%7v3֞eqV!s bFML͍o[#VQvx!L䘬`e}h0cU4>itrņռ,ebdN2+KRVG "N2CL r %Аj|M$HR3R<4{5x$T cY0Jl[$К9XaQ=e3fV>TkL$y0mK5480U7fyϖUnn`a,@ g8EK,sj҃EGCz,>)DP$5hb"Q{Hl8H%Z# *̋2 vRP`b$ɏ-Jlݲ'.]d 8!FDOJ|!QmD֭=G@Ym+|)6ۍM׊谥!X ܇J"4n'cF$JcdXF8T' $,vΥפ\1-̂טD3%\MCLXh-`;tqfv~ 1fjdEijۭ0?=ź9vHyz<ڬh@kɎ;~=Jφ/u6DYg_3r196 9tEi^R-WEWʼW2<2HRm8~` _viTj%|exB}XBGjS,E͈Bg\$EY}u1F4P,A:{WPch직Z|K|3^RV/)SyofNȻԋHYvtvѩ;׎i)D&@82FȤQa$TTǦ@:KgҠP @kiiO}~mS juؤgQq}t BRC3jLu:)]{Oqᖕ}L}#rdIݫP}W+z#\P̓OeI0 !ȷJ06j"Z0 l \dvaE#IJ`1Ar޿eNWi#LX䴴/`-wrL69%'+\w%L4!ŐXX]4 PdL 072 I=j$ű[Jip{3hl+5{'wgWAZQwe{XM( #3{.Zzr+Wٳs+7oczbkfozs_{- ''yUo+ӽOCu;l04Lrm]i#ͮr믊m 0%@(GKQu:!02`u4Fڣ:uTyd?MH*8n}-xi2[L:fɇXUG{q,g-44/73{ Rp3o}}Iy$F_'h5?el5w?V|1u臾#F;Br y^K [|X랅$ch"Xr)jєk). :!گ!*w^eݴr*ƞE2a174JʶkXv3v;W\pfp^Fyt+a[>˶+!#1abd t@G‹N0b#eY7)TXΨUd>16&jCfx*ˏ|2I.0$ ۻhIk(V!PN3ouiJddŝbTe|#E,*|? s3=\|iLc4a"KogssdݸAHwpbtrѹL3=Zǜ}|(m/s&HeLzRLϖ`[a#.|r+-֘цiJ|!ġ=s۷i[] JP>6cttRFZ62cd3'LƎ'CA:%L)К{5I-kmYI?[bط6s_ᴷgRZ173CVCuzC4vGT 3`xQ2҄BQp]\CR5mj2؉)(XEY2y >IJDQv R6v` Sx_TVSO?(bR+`ň2ME+pa$xA!s]jAZ Xm0 fTRGQ4=.:Kqj zkF4) &ufL))9fTRdU3P bRuzL=5R uI%J2u9I10g9+T>Z۷g'&$݀}SL63t4抔Bp9fDC#h഍+9:]sBtw7Fa^.}?ξ=O.g6:45##LB)O5uϧRDy|}lیУ^"&H3?9Mc*a`H6v0ifk`zl*{arG_6I]cXI&&le$|&suP3].c mxR.k::ydD[#':tѺ2%?DPs>G*I@&dW"fk\Ww7p]l9d 9z:x:7mΟH0>3ϕo|# ~oㄚ)2Hx)5(ˠ뜳!_Of qs_kbIH\dTޞnCC>!FVC%-dA{ɶ4 N?BuAPPal[HG}l4I,3D@Fly_a!:y/7OJH{nߌe.|{8of6`Yg+Ϳ_u5ި9o͍CƠsBtltf ,Ө0$a' t['~-5 qT|T|)hb6XKwkh#)h$ҏ0THӈ3>dCװ'ДBˉNV!271(7nbp>,\ F~׬fGGHXA{?{} ߠw&z g)O04GR3zp'6 Ä>Ōa|G:r a8&YD;Q!{՘j"B.h#e#!amL󼝆R,"6BjS-Zܛ[/ٛ9颡nV#b!maQL)*A2[e4 &p}հSIB,wsj澩EǗGM*[Zm ɚS#|D_c96F* +Vj&G Bm9B?`zzl6MR#mI?g/1>'Qɴ;X$'kVt16W nFKm,qm$tU( eM7F¡=a0㳬Y$#n*2ԜՙmVЉyN|Cww'jqH&Q($uU!$ȗ0ɧ3lI#YäYzeaYj X4Ca&ISI9I"CżiCŪ>6텸hmOG0MM:BUDj'Z̫Mkω66HdTc.}_c$e)~+1˂F~u^tA \a`emM?4\niűGDU\~p޹ۅ~-g^2& qͷ vu/]w%22Lqk=-_ȓkrb!~yeZ&n6%]B İhXQ ,VE( oz;$(CW/݉gYH`!U$Nn}߸;?k>QD:0n8Ϲ5҈Xv"I({g?}.^orK!,)”;ρo_{@΋` tu:0U%lW],V7G*f7TB]b49bQe$==hyNm۔eT_WiE&K|GbFBX\ƪi_#F#Al4}XDn-Ĵ y=-\t ["À@f?3=K*i{gpPI̲뮣0q}8祗pthMڴٰ 'tf]нi#9Ə_"ӿ>N?L=Gs<|=5DGX3{pDL4~(,4&y{h}gf)L'|GG{:h^fj<6ufCdk2"!r̄B8QCp: TέhhO2 0ͯ*( )|@( !%)iХ) kן寿y߯?0;Eַ;wf6q&^t56N{2Y[ӏpnb|(FXJ6ۯy /xEr2S.Fu'Lӕb`gd)N聩MzN+ ӊ*aHݜKh $tĈr_&L;{w'v̏95/}T<ZzH^7gs,>aRS,AP HGC)1L0 Rk{B6wZkiHA&tOpqZ5męZ#cMjgDEXӔM$2Ҳal?e&Ncxu0jENFh֪ Iu ӂ0 KE3Mi4LZHX'"wm)Cu~TfY^bOlQhEĚQ&"J)M i LNz7J;\t @HEg{RL6iQ)LpƕL qD8, |"|3–.+ut*LDOG5Osbb5+{ 188Y70:_A.T]yVn@:ǧI23iƧYH=y627)LxYw7쭝 `e>tqaX$sڐ2rqwk.iF(?q Q}d`[\ϢLRhm|MhB0='NX׷㣔8G)dP Wdv5eb)Kwwrm8IG2Oy80rqd>{r+5[YE ͺq*_?{ moood8v~)描!V2k^Ά7rѯE2[*Z Ķ8aYξ]`xʫbX^IEPaFeN\ Eǣ=ɕybxLvjx&lɿa97I I1,Tf懏\1;ԛQ;VPHl,hTVQ:zɘrJI`|nM ?x?4 4et0L ҋ@X$?Nkc`a׏[ z|kFV& <]<EXX׹RXO5Uc-l'ej- ylW=[+_|Ww?7> 2{NymIoXG6jҊ9L"΄m..r}PPћ3Ncz(s`$眛7=!h *L +%5z̾FHT0 3`8u~~:8ȽTfQ0e~VyOj%i# jc.U5'BX%ZLh)Q$O#W)B!|NL  n5 %mgAX ݃$fffeV.kk0V,%AA@ұ4}+Mlw`頍|6J;#i'c_FFwuPbxuF3+jMK Nr)c335N{OM>jXf!%4\DnVf'&%aJ%t{[T4ꅻ]I6$عu|L4D M>džL8'fWLbRD2D3`pN IX??ɮImZlpP&?{*bPXŲCdMq7yy=V2_,c&)ڍhiYoZK+BOqb'oi.bRkk"XY؛R9F5O l%f]91~eLVnN>g'o*pPuNG)!Z&dbDJJr1viTT*UIb!IƆi\~wyށ<5==cS>V^ʁOe`罷a'QCCÔ{U-W1up7s0*w1w"֔skXaEGLJz<:9q!:[hkJ]\ L}e NL{1F8I&:]DRlZ(]vP֣j,r(n"hR<[G ,rŕ/o> |&-9>-4 _@\b0@ԋ8qhd e2 &g 9Z2nPz^Ib<*,b1ӳ@# :xHS6d \M{KLXs3TvPj鲥V]o~;orwBp`LqHuHih=SI1&HtS r.5\뙘dv%\@{{=wqg3w=9t@*Є/'A%vlcPct5:QD? ,ՠL \ѡA̱AlXB֜)a0 ] O͹I2QIW q}蔅=uytC\bjJ%+*Tf1Z"dP8>=M[GoKѣu2~GF\vŕ~ij~bs[­^s:㣣lW> ҙSJ eyO)r:N:>D,.X)%Щ¹X|#l! K[-=Eak ]"jLĦķ>)|szbt6nsKmRaʞ/l],ҹj57F')X{v+!5e22"sf}i.c<B8* az$#'C1;8pjCKTW y ,Y>CbHx+\x}l}oбi#dr&Tnpi.O2q?3<qnx͍wz5C$D0 zj'au"܇E1Q+T>LT;NG 2@Z+\lm: <ɲD_i46j:x.PpJVDkSEQbBtekLio-g_=<84~SJ~ 1fnKc \~5{l㻬i\ gׅ}fF8^s2xIV^N̑mSȼ C&+7 G]:ܿ!aM6Ui&nRh`IEc!Nr_TRk(B( űqn\pnt(OMM,\JHi,meNhiކ,T*'#m.m\!^W|pMS8 `qc ]aLHі #  @l>(L/ɾ%PڄRFQZi 5$z GIÑJx]˙ ezۙE1 B.㑱I!āD 2hg2KwK+cSa*婹| #U^ 6rtd kVphd::.ՠfp)̲kg|BGW=}U(BʚNm0yebl0E^maswqX"ATVq|O?j.C8k:T$rXm{0q-6fÈsj0re@jB12+03=Ŧ]]ҧzhQ#\dE#=u%}&d;ݿ/̌蘪˽wƚdLqֆD_!>#} _~cuЭ6r.5}y]VCL(/C\/x r !bJf#DC4 ȇ*}W5()%!:>cO=R%g1S|?W_˨ /=X n3Q[w8ﲋZw Gy(!q%cJY2z{䪫*u6uw~\u+Rly ֟}q3Q85yQwx{ݷCk@x=oeeuW'Fk_/]xOO'N+A{82 d?7-5I'q)BQ868?]+!0sѝ'@ٿ86ڠ7t)OHIF41ofƄ"H`60[L Scvt͛Ϧ#3? 6&G}ۜf֟w~21cѣX ("V@8 wRĘ8!S9O֓`jr-`Ο#XE86V.X;+=ڋ.$6il e Eb@zHGil&ڂ"?e-_86?׼mϥYTf1sS{U;Fwl I 'ے'Au > mgPZd24 &%*Ҋ-,M#$lv'$'iWJeXPpkS5xAVtP2z(VP9s͕X׼ɨ br8c AZtir!CNʥi#ʻxBӖiJ@g7 &[ l|ĺ0: SR!Rcq:X|#C E:IEHo&„*h[4H}hYޙԎ]p)D!UH$#C,_OLOփӿm40F[KUTu-3Ss D Hل%Ypmz֦<%Y "t2q32o[%q#Y@5pbAg!sZ\K:iU&$=%KVwSuMg|}½ӄ$NԅM$5KXeUq%,`f|ŮZC!ΓS -Ͼc#XӣioSla_(pprR&jT&n9a 7N~}řWcHYpbOFomOjKxox_ ~}$ig IDAT VHdhp d aMMÿ8\xR"ѻpflh5ãmβ͗Րł4 ?߼}t&!Git\k+Z㴵z:lbT-/a˖K$.\t N )2c^ײs.:x77ٷ1<~.r<|r<\t&33t.]2>眳{z_rA Rb Lb\JP]Icl9O][A<]KP[/a&D:nP](R FyI:,:R<'$e)KEH4RVν|K(SO0}t7\Az6Ȏ휳C?ϔpRF 07:(F)M8X'4Z<:It}:D8_MGa2$+ 5O$]{=ly7b[n#q$Dz2҄L*R6MnגpM% j ۏ2]܈WW ,IZ)]p(DU6fY$1J!6nJE8],ԥ]dVֈW-IQuLELE7 GM?XtDrPR?)Z=ӳ9&(Fu {OqQ^P7uL"ˁVDkNgpb˅f]U׈" _?||W+׼tus˷O[2kz5ۙ'w݌ 0k…Wv8BGVhZW*r8",0]'>_Uؚ;ҕc)$Bj'YFMOM ^(,`j@+#󌛥nd#C! b 4՘GT\a`*~#aHLڔM׬Y(s$h6 .9\,SIrNɐu5o _ElY')hϳH_rm!Hŋ-P} &/.MEz.(Y6B |k1ƚYq;B\Ogw.\_ X^s0Ƚ|SOWG  f:@f"}I#k., s5D,Rq/Db @vƵ'5d "C=f^?ޟO슭TBR 6qq ǖ- 94;-?{P;gDXu+Vr>õ\ktSv qD#.0\uӍ{}\2#0vmڱq:=ɱ}{rՕ|'kobgYϑyھgI-Sе|$wp<4 맇oរ4M1^k"D4]6 gL!/LL@>tๆU()T8M =8[9."wٸjr. qy'8-1@c ¤w/Vh j\v0lxgF6z@cZ"G1x?mN~jFNQZgml&1 mcg?=@_[/\sUx%3}7zaD(Nn8QExy;=>ɯ)/۾ EAVV])mi%.O#ӂ $PIDu dbw03DM6n5TغC4¦>ta4tqz"ٖG&>:7p=;GY9sYLV[+D(blڸcGٰ~%ٰjmy29f9Z"kH5GYCIr1j`l/a%@c,{Z4iNBhFp_[a-:}g&')ؼ;a ]ggZD'AZ6xXf$װS Brh@g&RYxx1Te|OR,lCow^aPekTgeW;5B2ALL0;:ɗ+pO!æMɷѷ|JzL{k+#C2G&<!,<Ҭ33]8^r>q)$)CftAvw:|p܊/HrZ-1JIjVzWÓ~|m Y[h8~VPekzmzL;;9M_C2oK/dˈW䝬 g|蹈2)ج|SDZ:E?MkXD"Z>mq:׬aSOp s]SQlܷs;;?GY4}j P/WdVxje/]tw|뜻z-+_}?!r;(yO<>+~tWXj ձt::¶;ߴ9;_O֝O>~;{v.~ay?/{ w}f^wO5{bht "(} ,7#l,&Re Y5BiR,j2G9'tci 9l<$!x m "Z7MDݦw旟DoLh!cc75o{zƆz'\rVgX}T%#\x5:gyqԇcN iǩ2K± jIR?66ួsvC\#Tϟ7՚clLk NW\gϟg%yy+zAf9וּUٸ\~ 3ljenzUmx |Z$Hk VzW2I88du~j -Uâ:HP&CTe-m9_Q"H롃8g>+=]1Z7xR8V犳0& QPۗ)щ2z;I MG:j$8'^`9_')L9dEZ(Z`-S|jsd*2f Z,QY&EADuHq\&CPW1L^f HMs9Bn'i84M+UCbWQu…O;BjJt>ԓ"y~!9iv6щYzqe?ŀ=3)ҸqF9 t,\Y_**2LB!CEJa=J:Dhv)ϏhR! Z`xbپ6y`isj}6BC[X>.qM[u5SWpVۧ,sLCL' eP,j?©D29orLw;&8LQ!&6=g)ird +կwo#lC)b|`,jy_q6{ya& VE}!S0> D!BX+SS!Vx@j4g/㇎ѳlš1}߻ZOUk7s a"N/rԦ~)]Uoo~+m _#+{ "I\:TH|cL>oٿw/>-2=oZWvsEtLёv)q}`˜zL}rX[rA7E)\s?qao?[5V]cַQ.8<>Egm?fa`LevL䒋Uk0>rm{w/3>:JϪutycSSӓPH{FCV-1[_~^>%J/(7 7W,xܛmnD3P5YR,9[$kd*_+Qo%f^)rBR;KXAjTҭ eℼDS\-B)Bf1n{28秳nJc|o\AQK_/Nsx7( 4%VGG:Iit9 !O\=FXx+LCjb ԉ] Ga(. ;_1D!\2l!wIXUdCa&=y@R0",UȷPVl'S6Oލ&(Ush,cq&xkĄa8nu-A-DG1k 8bٛx?[A.rJZX"e ǏfB|Y/S3e&g()QvEfbM5&քArnzFGRx '矵oѽ&9o V^ɹ/xy7 ~L\{լ\뷾Ͽ  K`݉Ǣ']d1J B5'6(mqB Dj9Pw0:a<K%0R[u_~W߬7uHǺCkщx1 ^dBAg{{*^Z:-ɤ1ݝkL h:whXYIy݁NLRh VO-SHϑ E -r:K IDAT<,$AiE1πcbKЊI:5֐s\+u*f.Yރ.W0ڤάlbheWCWB[?ODX$WdU" ȯ"0>֪&~"'8LU=JVs|:"6lDBR:n1t~o"-p!D' :B U m(R(qjу#2y+\qsþkͺt".X֏| 9}6`[_A4v>u6xl#IT"ڎ7M. 'Se\'9: SGXyF~vLz3V.c=?f{uvJ;&Gۿ*U"23 ` ׉el0q|:SGbe *#>3' }=߸{n>Cڲ'F8G^Aeh?q28/}*Gq c#"o ݛY gDS.(DRצ1x wX4NK > H!+q[:QhI_b#L@< XYqz+x/b4DVB;.4~/h<' tS/QXllPʥ GTbu<:s5`++!#Z5sS(DJź8(Z 9,*ۗ@$ ^D.ywG.9"s3'4p.dk=xu7^..t!P&*s;Ad{kyVgC,M$H+h=yky[o"c1+83̽weG}n{;AF9$H ƶ0Yw`?Ƌ׀ c0AhH P49Ox'T9'jFH~~LLsϩ_D#=|rmܺi%n3z-r%t]߼%p -:N ұN'ѩۃcƎczq. tWrmD>K)˝ʬ=cmhI܁@,50?_!1(c6rqԁL˜ۨ16OS!"?>t\8B(I;hcP(d:{㸬^!3q=|?R*Q=CL(arQ8"OjVnQvp=L*Lȑcǿ}~)ḛSۏq\369ü(bp0b`֤Q t$狸K&37WL(2%YݛG5.놋3.#`rI7.JVd,o_X?3xӠ]Ƽi::pqMDNdBw|y;䁩Gan׼Ͽ@?sƟ{T: o;xǿ!|?߃*,Ǹoeu\O~ ^EnҨ0[Igۙ d#n}Ee9 WгfMavg>Ys$YcxRVY[4^1#(F[GfMI l4)2oPUgq+6}rZO=%A9BphSs뭿 ߓk֐^G(p&Ɓ61^2zæ_x˼eZt.B9I LjgEG 5&ȶYDG@"fwfCÃw;wޱ}hMqp.t‰hPrٰ$sHb u ۟&bcnTr͵ײb*T=qbRg=|_eUdycr 7Peh(|IZ X .Ϙ\ {OD `Rucm$tM:n)t}9fz TF|sk-h,kW;L,ppЮUο~<@.K{(=YBN8-V*LSBzu&IV?X^qE.Xq!cϺ.49e-qL DI3-M,btgʗo ƏsbR,x7s ܰFnͻcÉ '80#5/ZUHZs?͛y3-^w|_w}._׿o \Nzı՞ߴɄ5E4u(P"[( I8R:JP.$IRN`F$YG:){XөCbq2PN.t4mʵIk:G1<2{Dʐ$QDQ%&AXXZ̃={CbA8.q,`1xe(J$B0fLO8N6J8j56e(B |DqDZu]”xTAW"BGI=8əv!)aBBBIߦ:?r%q_bD8Z鐠C'8┰M+ 8B ?]VX^`+bVO(bӠ01 bS.@k7\\pH/32}YgV!kyך"$Wt3]W9R[rL>(J]]^"%Hp]V<1eUp&"JT0(=_'?zn5ikFK!$kksCxNKYs;'鹜:5~135oeΈ+ؿƁmstdk%>q 7-- &+~|_z+32-'w.oAF#pO@x%6h!+x'w_?BiBav!b)"6ۿb7n)px' s/G$A5=K[eu/#F˸4Dfgc$Z4%+Z\EJAr?i&bC1/h hrX%tzkphP,i=qQ /WXy m0jrƦDa\un6}8 ?K, S$ӓi,R.bc0`1J0ԧlj+e6m\Cr)W_u Gy-Vr]ߡxa=nA"spe~uV4#enzU25A\k'y?_9wNwR'Wgxt['ZVt-wǝ|IJyjG(d<6^YmO* HqKL1%HAϥXtǤ͔3uNu"ڤ 88ӒnHAA.Bvl nϾb㕗̣Æ+P^8sWqb^JJb4 6§+$Q}FyɊu<45Qs%˅ oV Q=W_:`Ra.,Y+ {ExhhzF2o'ifؼm&eSWOZ9 Dp{ħ?t QmEFOC}=thFL%ZV < 33UF90H7&)=.ycL5>O9. R4IA2Pm*5 sPqhBE _HڠZdѕ( nXEnjxfYf*zs]+"N)s*lbDП"MD@D!n,DԺlFz 8~FX 2zGCw>=nFwQ o鱇 |nz'8y~{K^5\qUa&(sw㰡4 {Br9sU}w>[u~A"W BHei,Tiv,nnkd1bL+Ih Q!qҞIj2$6S'XD-68IDLn.޳z1hulʉ#]LqlۮdZKxX#[8JKes,g8!rQaC܌ȕrT<Ԟ1(anfL&NH.A6qJ? À .% bF'M&A%&~&KPZp$1\ǙxrM!xϺKu=Q8ž#EQ#ifO,]Ċِ9UJUF)&U[!šshj&BGtgQqb H5/O7PYJ=+r?v7^,?E]\yVRM8(s$O4cXIMW1!X(Hr ju Xy@ KRS (J6?V{[{~OOʶ=o2ܿXkRQke)?Hl /^L=u5=k(#7qte=3uD\ !(`S%̷g\8BP ZIj,|Dq Bpʔ"22'Pcmځ!O6K&R bY tdP^r.g&KePD3?Q(5'?On&@2D`+Mib BbR48*GUKbp(c f={)*Evݏ=NOwGN,^{hڇ0XQ9g?X':yXsӸ%tЃ2D"hU:?O>)=̫r3?+ՄMWш,}A2,yQ,~ |n^+'K`:'Ljgr:U?.#F?e~`J% K˕;GB Rt-"gBue;|8b.ʈG.b~jq&e;ƏM@Qʅ'鰛bY#-]=(b(w8XpAUQȫKpQǞ;-zsq0DJ?q)l`cݤ:Ksz5NgAcpH|U[gy;_g)f$9#8b4T!W瑮B%.I+i5ICǬ*eha _ьb>EW!€UH== 䑶|k=)Ӱtpl;#kA0ZhE_}G" ыQvDU;s_snPuڣhf=ϓGw'qnJZ$0ap @IWLat4F%Ez,$;ŶnFt"UD!@=RR)%X,bApܤTuesysWUXJpu/Wgtdjޠqcb#cff>XJ3͠EPq@6#p3\ȓel(O;2b}0KRdCrzJ<(|Z#ew,=+{T~Ro%B y:6yBߊ(mM ,R8O>RSSqxZj϶FD<}e YC@'aEB2mXsYXjM.i,PF)d0A7UAߧU RJ zմɼ6Qrsؒv5AHMo?{E% lӠ"`eG4:]Ktk,JΗP\"x /}+TM_f!r ht&"?<% E/%e5VMBQ+q|1/ k6_خD UzVnwp-7 IDATkVqxýn\϶{9};\Z={X}x]1N˕8-#vkti#yMP~JeZҸ>&}87&iA9ӄFJZPZ Jm\ʫ}^ U`vvW-p[}݌M %(*t\\)b6ijȤj'M/,ݐVP45._Ld B{.RF6GYZݵi\Z!!'x>Oipo<|?lgT_ [i-x|R6O A=T[ ەza4)^fkYn[F4,Yd/F܌Kwo?s  ]MƏsA5.w-xuu%Gc*\(B`D.)W&'_%q'ͣt(!q=Ii;K=IXށz VJϑ5!~&uZKOơA*BvJ^rtA'(f| "q>HV&C- d"CŅYJ`hb8m=ʟ'Ï `e|!sQ%f?:g-d6ǿW{ -r9~\{g0u#`X)O=e (O։=m\i%rƢ 5ZbRQהw.%\1=Xȥfq챱E[^adK$5QPyXvG]P.5N2=uksR&)Lzg<i{ `EcBx=>õ3dCf!nrqu0cK.Q6s6Y*-b%m^c LNrD-Fٿ7lf>ljḵgF Eya+qiA`\uS0jM.شAyzF(Ã3^1i$d\̮/=B3%åk\ QXPlP1$^s-Α^w:>b1 dh"5AuE= blƠߏ;u P9ÊYG-3k@s%l`wF#zr^r)VvE[V9+.g JkD+\̑#t Dvldkb9ՉU֑ uOUim |!O˚33&i7Ccp/F( )tS}.fp!È2LjjuA-y#',Tm:絮ԸZ"26]Ff%L/2듢! o_gIn0QG0JnYC)^v42OP,L#GXJNm2^7hRr-sSSaSs5V!fsOFkddHVjce~Y?Ro16^e4D>'<Ҥ'_5bIYE0[ǭE(WR KdZ'ώOҨuaM̱Ѩ"/o_rZf ۿTme,d|0gt5+.4J>ênW#D}=J^*㘺t%lXDpr{~ Y??wc݆!J &I2^$ BVv>&^Mҝ nnz P";IgfxKRuJd(KkȸH dU2m5jUU-=d4< Z@4U` B^i͓$ceh*'\tXfs9Cr]> Ҭ7&)]!r.T*ZrBV pRjJOw7AA<%q]Y|ߧjzRwbn- ;1m)exű9"M/^f]ǛZXcx˖X4TH%upplX~"⾹d%K+hq'c-92|ɠoι 9d΅Vj CEÉ%%]P54xϏxd_(R~@xHSx{4M%=?r(%-.Y끪0! Ñ(9&졝2+99#P5fjp7yK6qٝ9jU\M`As]4A!ͷ)p oW,w׹?z5'E[iD>ؕh^kGn4БE&7C ˶\gg&MU8wo96x2Dz Fi smjHȑ)md\3 KT19{+XyGw݁_Wټe &!4Q>zY+Pa)B "T?x =|WXrA+CS(;nj7&QT /z%Ȯ yWモooߙZB4jLAๆ L !,dboRL.J'v,ܜk ALPhWGQ%bKQ'_B3vMw֣?GH6#n O E2ADf~O}R B4\AUC-7:W'r忼y9zU7z/KZ~#9ܟe*е6PEṟ$|+sVfan}xwy7oy͠$2R)wpwSjAX`lbw1ftҚO2ځ~P?seLteH/d&7pq;ɮ[I}aVXi ز;omޱ~=eq=FMk9Ƚyq7:ꒋoY9xjŢ(2ہ۔^(J')*Foo jOj,!2Qm4b,Jdzopאbm"a]=e8w/AF6ȑq:Gi=ʼn={ؾ{$2V&B$*R%ՏpuL&i18x$SƘ6$J@ <&ri\e[Ƀ:zg>_41`ķ:QZڳ ߗRDb%%&e*n"6Gz?Qx+nccMy1c8A+|]Tt|3'{^~;sg?};>Dлq s`&/4z?&j;Xyƽ?Tq_(&v LT4ds6ԟHFW8ZUXl4J- j`h' PdTe(%4P|Zu8$GbVI\ Q$#k"rfBbmI8flIneC>UL,hNF,*7_d[3ٷ_㯾4t,}*kz+5)*fBjO{Է5} p lN)< K-lLNCǃ {Wf~HQ8g, 0{W%380PL!RId|cMEJ=Y, 3ŠaS3 d9ɠeѰy7'^R$:ɣIDM<} ʖXg: z#ǛkvCp\ns6O &y{& 3KXRA-/ܶ4R&( f&+K_zzf[TGqlvzcG!ddlL :nu+B'%( |eTg^5BEo̻,B1~?af oEaӦ |ʦWOqtA 3RM;D"ij6ə :D%6Ie")A]0S*B.qP$n*=xՙ(pql t1,)K6)1)FRAnZES1K c0$~*|XAJښY.Xh[ X݉ӻP^u>B.eY ekDF&x3(µ:3 *O909F[HtVЙwH8%Esi6X ʂ^~),3: }0R}ʗz>'^]} <%^![g0}V3g/?): 4T:ؿi֬[iir7͌sեpuד6LDZ(!P`b150d'Y']@ރGB\E(Է#ز| VXZDL;kSkd:lluYvVg']<zVd> L_IS5 bՋk-y%[jsof{Q#[XD}]λluխe.ۜ~L;hp92|pD 38gYDOmrD5+5|#jY~مlHypf)<4XjBx@#ͫ%goZCCݜ \LeV{h;Q8քaIB5j\u3O`PEiT j?%<2ӸAnzS蠄!4Eo-F"_`ISAJ{M'MXEtB!nQ.i1H ygKF>g>1wG{W?bu(WS860&X`3@AB0-6%4RR8MZ`W[L*g "9g$G)Fk&Ѧ~B6-Jc CBĜd6RNs wTLk!~pW3t1&>tXl=߸+2՜ )6.T-{(9:uz8CژŋBܒ\}kEgfz?O=ǩL 3p_{x/+/^ v/v%ҳr]64 lemF %Q qr`a&EJjLQ/i2NS-2%k524PβtUfLNL )Jb,"#,lcb`n3CL"-KSN MG~m&5j4<|ǭ+EӖ8c8Wv\#V69ôi.Te&y~>"Dkuvuؼe ?@Ow7FM(E_bz)bsA LF*ea:{z>Wt"csx?O%r^ء4Zhc3|2(4q$Eӏnm]ͪ(EE("uR9[ 3KG6mLbi.!$IQrCzzL>,:4^# up_UpI-0[GGƸeQ8]MLG35]5~VRgLSĆ/ˀ[ EQ}j9޴S4HTùyT>q{1S t8tpaLH>s|d Vdhf?wn JDDs `L?X *~<Žp2dJe#d,At,)"Cӗ+QZs.n\x;~tw`8if6]́ݻ[)x(z_:YnQfbJG]xNA,ʟ  :kuQOs -4a3;[ mE$B\퇦<:0Ɣݤ)@ptr1\waV,s6XUKyӕǓk:62MJHњ2n͛斻a #ޗk.v|#2=7p{Q*q?y[x絯晻{i?=}0X ߇^Ev>`5mg[cӨVd^;*-_|~ky.O0?lLDziJO"nFF5{!Tyft[5d557B`Yyݬz65+5Y咫Ν,_F6baj.{+sl2BXR1Toُ')em t)rH(9oe$bXrA3.Hz2y )`<|=F nѴ`8D:5aR74iL -Lʑk:F%IWQAx.)#vqltc0L4SJ;$5ad@Y2(c:׿J1E%:&c='t- `-g" I&1؂b6: "jZ& !" CϖP$N, d }`0Yx)』JtwZ|pEH?oH!i#CI\Oa;q4%)'B8 U&; YP"I lszn˴#!-lb6iv\gSKџe'h;nc4Ⱦ33N4.H8 9/ b4V~(xٌJZ@oz5#kVZ5OyBrrſ rx`}3##`= t%vsnBk}PRil$܈v))JEB),3q-GIZ\. +Vd|d)"Xf0AE@aHNVȥRLNrz{?t#*eTG1>QfŊT 0.t*2ebP+mv*X| zAutP"eDqtw?=2MD,0MRi^JKTeGؼy IE^ɓ!]^"7n0W"edBb6FL''*T~O'e {KnllKEfAhb7r\7cĽ{yӦ>^b $G`&e/.,9iR% L{2Qr1SQ]|h;X0D2YEv^bL ,3K!7=z獫x)D&yb:69PltAtRy{;; $6n0y픽e;Ô ;: VݴUg`B燾(c'evA%"s x64PQ@_" !īXD tRQY\H*zN#欗^αKd.8DYOo_jd3KĢgݚ*th?We#Ø+eSX7iݤe>$\sWwc:רO8mgMkce)f:-5bh-T4JrJKWV!CkBbx%zټ"^|먻#xul ` Q/XZ'^G -QKƎuK,E %.CTC53չk TjF6lTSLe`Ų0(ۂy8[y!f.E4v}biӋTF['A)f ?22F#C5fJ z{ⱬ%j9e `* /.8Zxsya1=1JOOcSةBThiqԤIӣ!2Y[rwMqG0$±yfnRrWQ+Y?ZWX-kyŎU*k"oRiO~3ɦĭE.^wE/ 1B5  \h%Fӈ@nJ͡c 2Vqm|&0S ouu[҄մz{:l=l7<_ͭS|)}J%,k(VF"88ޑoٺn"Zej3f`fq;H/餘S>LկpUW5*w }<{k7nF4|J{sϡ8fAҙ(*tY?MЌ05DKܳ71T"BK:%rj^Hڍ;=8Vk;o3x =5=&?0 FۤG }B: ؒy(hK!B_ ҧ"bYPP-!B$f< SD9B ل{uB DV߸B Ŗ#g1l,^@TA{ n$C4ƒ gZ{Mwh*` AEbDs^bAf*D29)K?[6߸um`U4$ jRYˇ8fO 锅_ q I k̔Ij IDAT TbcWqRܡ?''$` wB}b,O|dd<7 : HsxJPGkMj5h%-~ZNh\ɯҟp|rKֺ|Yl~0p`B) P61k#HUq\EB12/QW)ytu aɍ>{FAMn띌&NsX7:9x*ڋIU5 `amtb @d拄PYBB{ح5$$*R)rb M ёi\骔Qw9wmmy15eْ!0i _TBg'RqDI@1=H:f ZF>gϾl^zrE,2;ιLMNBHHBt-4%ЏqL" E,b?htSf.fZ /NFӋS"%hOxTwӕalN/GIc' 'Zf/@͠3! vѧ@(2dC7tww/8N:YeA bd%NvHi1tQaӺ9ՙYdQpSڦ+. ҘAHIC<3ӤeKx2Li<|_4Q[_CsaP25|Ihᴝan'#tr]R(U9 OƔλg>UQDHjPA)PGYvZ=~Aq2q SjP3l>J#hWBu;־?xcD p` = [a<4,JCc`{BR="V6ý?wο21{e<}tྻ_Fuv6V! -IB!u"tv+ GQD)'4"i\s˙v`*jQZcTCfϒuh'mqNv*DQ{d,٣Cxr^4-[(:)4Ǐt4\AiX}0_[@n_sR-{{oHZHvDT/Dڿۜiq]6ue*,<Xf2aW)&g6cSh\ױp 05PFfBC-,VK1F#6>f KY'&eCo!vΙC#g_= D( Ļ2)qɛĹ>mDMc:Q4 fHTB<2t54QQ >}no)lsHg^'?qx!}pϾo=CIѶNĂD  YNз&KXB_bJVk2n6R#iJ,H&D&GDUf6Nkh:F#h&xDKknjUضCƁCSZ׃e͟:+WaH(\ .5Xq)*Qֲw3RlVϩ =},P (f]i23eI[hSSEn;tD*2;dҙzhc4EvinNcx B~IOECm5xffʧ7OTI3 ;`yۄMggǏV!REOb2M1S (!Vl\ɩ%=8vܛG(bzv5똜]FhR reI&SY\A&g4W: ) ٶ͡6eA߸u7y HGgy׹+,36y%'o! ҚfOU%RՌYi3 C|hSUQ )5LsIc"ô,t!l6bRF*)4&EG|e' 9\lIPV6ɋ"H St kmRXLteۏ qyۢU(q2TF3.ǭXЬ NRM&T! ۶{Mb#rNDH_so?Xis KȘriʕQ'i)Q=j$H%P>]Y8kB^_cӦ  iQ?|={qngǓ/mN(f>MO (LC[19IVB\n tTWe9{52hj~7 1* {I+Q19$l$MCGeIW(SMF!_'=iĈAfvH[Aٻj }6fg|S+.C 8>=7?-[cvӕQ0gnpe/0)`[ж&Yk i)0zz$q5rϿ+9zV`*Ŗuk{ EN89A\%f~كmS6OZc,sEc5Ns $ʻ_-<4?_w/@{)ۇѮfdC!0m$ :nŢ3;W``~Y{_xg,[#"b%s?_|YkXb<ԼH\x*Hs,΅Yz{q.>k6}2Oc CAB\Gw݂`Y'``S λ` zXyґI|TF0u#1TH6ќV7( jQD`C'B_AL7 DUTdREьgLiI{Ybn`Ed2N>M85ы4ltuQ\ 3UJT^a)\L1MACYl岠`ꈜ4ޮo7ׯw{# G<} I;Q,Zɵ5 XĭWd gT(s|LZ?0lu nm_Pk V&>%bkK*TEqs-;0+,lP,ԑbo/)DA{!]43S`y_7JELNVZ (%ƲLr9߇"z@DF<{VK矜>\jڜlzjMaL)XE 'Q0[s)Q`"w]PbtJ81E! "lAhI\im'li,aT)4F:A=\d"dI1#mE0 4:2IYPLNY@0fv!30]DPHk 9zXrp>6nmV kТdaU ]E5fO?22cF8<طU3{K`t0" QB(6tA̪)Ϳoy ֜w;y,CPZUo׿kʟ^͕=>aưKodz$ǮAx4\ D[i ia NSPZٗ\BVLgᕯ|;gs,ٴtvs\[Uȅv[V>Z?O9>9Ilk&bzteҨy|^nl& ۥČ=R(`6U0~2BǺ)|$[ž:cg<i! 0A `8qXBk0Z?zGHM(@|!N8B) a2ֿ=~7fڱ޻L|n)!l"щzݐDװB&p8 -!1Lb21`\]0jZ cm\!sP5:N&`QFEe^g7]W08̿|<1!n'NQml~-K< mk703]fNLBH.Yj)ƭtuC1F4; )M4DR9\3ss(e1u֔zY/R)_ (P^mɇbFU$Z#31G4( I4DPtI3DAHf2x`j qx=0M~y|ͿiQxNj1Sd(<\j6??_}v.b1}x0sm ɟ ƧS"*d!V1t;9ѠaQB͔}w֭>vNsVD1 T+9&tB2)M~u*Ԟ+Ĩ"U~  BW RB>9y-kWo㷹W召#3?%<seQ{j}=9I0 cccƾ}}}1N.. ʣhF{b'TU{w AvKk4=gW>R^2|+EK"Bi\ &&FB-g&iȑ#4SE0'MXSNok(-3>I2Yt 1ڻV1=5l]IJ?Gͨyҟ5x|s#LbVe/?Lﺘ}Oϡxm`:`U AQgvjr|6mus|.:6m_,^Ɲ'. &u+g04֢RQlrjfQe(hRsnqSh1<>H8G\1;4o%﹄ dx2aaN_9Q+?p'guKdC ? s nUb0 Hʞ!Ϧ Hk*@+5[\nȹ<TUҜPKh54:D/G^k_C1w0W o2r ȣg}561B@ʢNJ,[gޣAEJǎpsݫ4+8v5E՛FEX2v3 L\H5ND\ҕ셀{EҘ%)?_@欄7wEqu'v~7f?e78Vya?#M?΃ٿDwpŖaV+IQRS^3]{{]}Pxr)J,.e}͓1 n\o{#{v>[fsc;&9?S䒕E0a^Nڐemln?z̢P22͚[\}qޘX`4;p*k" IDATGteb}AӴ8vp/{:q c'KǕ]<1 7t_]pPq֢Nv"??z͜maݣ؞%j"[1BB&"LJKB$\[Yv>,) ^WX=$<ypQ}- +I<+塃:uA<~Ll ixm/gيjb QiCcJ Q@{6K_G5dͦmXr9)]2^ Wx-) qbNN=;9j+&c09_គP"L#DttQ̺vuvH[֨ɑc3OP,LJfw`ɓɗpT3]1Dpz(M)0"ٖb!M2}i͹I9 >iVCZ3uC%rq2Y%ϖ5|6KPkPferZ.eI+:F8J*DF' G&OO Jyjӳa56X  B UꌦD9o:ai"uUoss24>Ɋkmx^rguaiwQn:S:2,b?YѷZAiǓe$j^6mQh!1(Uhݞ|g4 eRj[S*?F8xs6 qI?4 ;2)Hap@7KD=foڞ^ YE>X7K~ay*3*cRKGp$2 0A,c 塀`Rv`d TT*m*[󁋊F؜!熔99-yz68E)?f&Zfa*Ubx("PcT3'Hyp#2LU a-S/Έ(tq!pcn^U/Wx4nY@I-P[6.:vi ŶZ2J\"&fe|>i&uIeE a 3 '}!33%}~;w}_VX8sšHza-ҽEYvR ,"sUE ?-ҍA"D03K|saJU{^D~ 5rəI?|+ٸuH-g=ɽO9rHRߗwWct8ƢaʵX&`Y,q}bnJ٩)R8^SD\x{?᩻364@֕!$Bj3dB{='NՍ)ܴ@C)fH=`S4V4[ I-'S:JҌƙŊ(^ZV#`!1Y׭O^3xeDQV7Ո#4w]fEˆNdcm]嗻D)N_ʞ&6&Zt*ܶݰU:7g6x<v;a;@e/ :FJ<|&ۓN&ZR[6|hMo\ƫW*L+:pajW0\{kMYFg90—sa~Jk|+V#9#Z9c$)2<$2i(">5_tj9tE|Wm ]8 9֤g>~O%|4!-sy'w:!9%" X b'fuC321}[_~3_gi=#qمY,Ȑff W(fݐ5y'M;xHsѣ!!DI:2wu4êUvS FDhx[b~m};Ts_|kv/RXQu+?CN/+L8n68ZDFݟ$8hQI&{*K7q! 1ȥڷ!8)-}V+#ʫ5h.qt`# nF%rڀQz%2EB5qc Y&ףRT &d+Jãߤl\'} v}̹Tgў=ZJ%^XmQf^xQ;8˭iSc@ @Cclta}]>xojJm mЊ#-n62x[0#;)Z[Bq"Z:<|+bI- Y?*-Cuw7x_恧~Z]o"6oj($@c=cW*ؙ#1yCyM_zJl=ܺۿ<@e*gl3sX)`g=C4OxG'XCcjNܰ^18BRid dY5"s@DcH1+p} s !37n]OWn䨥R3^Oƒ8mK6ieQ2§IX֠i$,hB"Y8FrsE۷[6ӲS?622;4@__C:NZ`Oq{W_J.2=Ӣ2jQش*]e {xqA MbżlL\ *5لf#8LNfX,{t, !ep&L >ޞE{D6k5BA&acF&IҌv Ĵ lθ=?_?'͚_p /݌V`KKS&l͇b:V $ ŶX-iQeD8~L&dszg60Y[ nFj^D1"2ɥf_ R*Dr9 DX$+4 "0XaZTz|kGrI!CP`d7`&lutM?*xu/cKCBǯP;x9G,Q̀"vYg;.z-:(9_'1-^I[{5}+͐JGt˽ݎ(EUcb%i!0!eArI.L?y4,ְku*^ؔL§)gAPn0 h|w~| ]?b 5Ndʳjq6ڡEZ-q>K}l8@Itd4u9lߪa0|bRrFɻ eH4V󤮞9T'Fl^ a6o@+C.klLÕWsVYѩe!:Iߺe;ɘqq#As`h- ~75kX'Lb?eihRT- QIJ[}6mmcզH1䧁S'[v7]?57:>;`;glcH?s2 c9ptRrV dZtuar"`6m\QdN#}ͷlz֚O,1^>ZjJ/$Ԣz 5l&13J "фSu'sLYB.zΐ<:;|['䂓4?M W;m||'y)tdqj]_sIO.glrvh{kdYZxG!R@BGez(lv`ED޼OF3{=i:BYIu/^tWqpW'҂Ya&XTKWpXc:Wlcxl+zWW2GcÝ}} k:f L=q+ug8u ҾlyqIfE21?џ0I.B w.)0qA!p+;Y*^{o78o/ym[nƓ|U_0-ty/?+˷E5`*-ůW|>̴?5p5iNxǟrt{4'd۰B>PXD)H"E ]%%-c""霾!<ǥ!tK'7x Bװ3O~ 6&ђj aPlTd&M~[g3,y%q۸+Ьo յ(!kh_) Ѭ<" U":zM_;D RZlJLzOM^tktD8xRG?le$\4W"" B:xA\"&^bfTF_@/Gz#86)\dS[7 lTa16fkoa_&lJ\yIّX,?%WBP&:lS3qf}.0664Ns('FyBi(+0FbeYv%9/ǡ})ۑAk{Auk-] ePBRV XGg"΀+O5k+ə@gldE&5)V#Lj%N;} a7_ee,0FYN.// AY=;gf>;..2::ېƣZo~qǷٽgІsFgc֭,@?#!SwvѤrMG\cdө"؂!S"w[/߼n&f}b)S9I# &lUBf2:by5 ٶr\giոUtZ>Q-Ӕf̠:ͭY'7J% ڷQ+Ÿcj%V#~NdAf8n"]یm| NY6 g<+M|S`%Ry7սDyGW^jcUvwD(9cNm]ǧSӺ缡gpel|VQI,5l>ot*_KpQewd[_Hg{X.j_la;nYF32sp(`$4]cWL^Qi,`+<[62<4Jef IDATCVe/{9fL!Ku4i*rKqMrIDž8=yn{߳<vYr&{>f-~<;J4r!bf~zHF5'?1o},&F6f;eh_}+X.<]Xj!Y,Du:婍4)A#FPtPYAmVρʨrA+!t,.>8ꪫA|O#OkbO$:MຠcLts8^2˦hfɀQ)1&zM4"%.QN16mKƼ װrcq~|XI'8(&#l0Z'MRz,LrUǹ7:\\A116c\͖7n1Fm>h}©az45L9W`6:Wҋ=opӞe{(4;9Agb"Bi;ya&n&miLNe(b-2x.g'eo=aR扑8h$yktf4drxX :zsAEMxz2D&(,4/rd5¸ ,Rt?)e ٱ4h\~tw91җ8qJ]e6#ufdYw;Sd Q 6J5[r`lK9\*pD {M; HR4X=[Hd8t`()ܟuc5FXVH._ 5#:6`9K~!< nIbt d-7:=D*:˙{6@Acbr^%U5 ݪ'>0DKYKشxE@=?H]" hZ$]ZvmƠE"5 M*ɍ/kw6:bQka6+0:A:2/wi|Z>.y|Iy:I MjZ߾}gwI%azja Ǐ ev>BFem>3KС8~_ DVЖo'wiYs%eڤy]yaiEYk'90};Orda /]WhNN2:3M"{/~eڥ .r/ع',ƾ w}6zZzo24{;'fD;DvNf|Oqd',b zxɋ/f#X!I,vEW2.hH o`2"a{Eʢ0GϛsdJ /ՙNJ)\q_O{xp}_/pɦnLWn,BZX2?Sf\dhrz77BGw~.FPA=\6N h[{Ҷ"rD-B1i2j#J9Jm9p_!??6ipM1\#A[f-ֳ6f1y7CcCa2Qā7Wl|߹>n|ϲl zdY`6!6\E9z+GK S<`2Pïh\eM2#S{XDӛ3 %SƭHw9@0ILbj=?x?|AP9h=D$gp5lXҋhj¦M+GYшB`xXhH.EçAG+/ cb>c\MAP:Mvz`'fA7z#.=p:,jykM̳[e9l/C}(14 a̦ a_c.x5ÉE8``zR6_JGG4B!B,شX9=9͗C8+ RV9"kȟ%V(|9m;)|Zf1VE KKo8'}̉ľyA'qhN͘X~y=|LO7Glaկoڜ⦷N#HgjmlM%b{3%=S~ \Jv200|LPGzcs-o`rm[X!%2YKz#7Eː8sqZѿß/2=.c2 ̜XY yǫTr9Ӡ╙;꛲9Q8x.q\ y:H؁ǩ<M9NXUž]!:46pMJhk7YQnc}egE0M^xS>{A9yt $“!J!ހD:f(`9r8o\;C:;Oe>}s>@j 2*Vd>nhZD:j:SgfhO8!?8\8SÙ-%%!t"?N\gCpeH^EJk5TO&)ltIqS37n" R~)qQ9os7/LCdDb"bMlCO~[.x.JC[p溍׶2nA*o&ef`0Zjn9DGKĂvìB2-[oa(I&4Cy{;^O¸h|cAD ˿x?_I#bpSAFz;lк| MH_Nrd>kA=Y^P~.3pF! * Y<ؿE:Z:7g)j28ڨ\[O/r6ԑFrn-'NIGRA d`9ITl7Ia*8g$.c's^B,3 "u?=!|wbmU'Xf#(":A[kr%ՅGJWL̹2833 T_Z.O?iʆN^g~sݱf z~Y^U~BC\JYWR愊OएFbM̆+dž)(A#Y2b‹ͬHB KQ!18IZuxRA] k5"ftf3Aq5K D9S@-בL*\|if*c68'pv_gv^{+lS3dLrGPY A6V1y =JҪz$V2ڴsҡ ю|>I΅HD\KxASSb!XDs:-^^w?Dn55H|'j'>eqI0lL'BqA2E\sTD j Tj{mz N4Y1UvG]a38#}#d^2%Ij@Ղ&sJR2=2F8 ӴT+DLm.[ŋDMTX*OmheWpx=x=u~ǿ?B"M?Pȼ=1BdAT+9(qY*8kSttA4VLZh! p=Tpwr,]%U~smyv[.5P  }U nBu,.!@\\{h^"4d+#GyTZ{/=qԔ^C#+KMw,a~='֍.E8Tci;"[~t ?W%ՙ{A) Eo ]bTc)}B2qꃬ^+yK))n_gEk {o>3>v9?oG]Mm4KN9{ $ wg e). Xާ8;΀6Ytmr*t |GPٞd`t O݊ @gh7=VE5XԗZk$Si0:2S}#{3GR՝[XמX獾'%Pja?nMg!qJD}sSH:7J8^[ΟG!̓ X]\8;t[?>NXjbv x j쀶N6H8%nֺGTU)|["tVk1y vb mbgH%66N^I!bLx|9=c- NuH f. (ap"9|? A-%:EYdm(Rj(NࢤOHCGġ3XtđYMK9_:͉ " )iti %" qK#RHr9B/#$GfcLR'2 PMv ѱ`)伿'I7'[8ss{ f܂:4>x-($|h {$S4o` IDATGFێ)=Z̗ՠ{aX'_ߦ8env"X4Ҕl ktoy-;̯ϲa"u< & -|V=&@FL@/OH@gw@9o34x~+}ضs;׽6ƫ V{EKYqf"}!z-]ɞ}95Z PE(tE瀢DH,ɺ dHfmSdYb'xwq+YlTÆ5Ne2 4BOe+EIRN%pt 8+Hldq5?JbV4]؁3#ڬ\8 9"%^J--dt%)Ů+ԋ1ONQotQ7b |K_k_HBAIXϓOu5LZuxe>ybf|Ӝ)V@"x{]w?cf `YwF`!ʼn:L o;B>=]ԧLNMY{zZStXk]]G&Hd]$P89PW+tLslEJš$YRˁ|KK:N.t^ۘ8]ų_b; Ey0Z]U+1Fi"T>CakOy5–%Ox"q ~ 0VGl*XRc$qv]Ǹ\CX:Z'a֣Zd󖞁<8Ma2IQ,g@r?({*VtbaavlFdz H!J[QjȖ߉!1x>ߗV`F`)<\.OY6̖hAi:TgEOҠ9 ÁgGb-`RG|&:wꥼl_ڛoW~9g!J6~s1F|v1^&Mtt. Z#J}ړ8"mUB{d 1m8ӹ>w#~M>˵w{:>OpѕW#%kpô5`GGPBau&>H+ѽ$KV K7͖l Z*{ヒ$ӗzS<̓=?~s!TfސAع~iaQ:=cyRgPA0e-~zH(^J¥V t3>ɯDpOڠnbT58[Չ>U+xFyV'V11/y{]dŗ r|jQL0;rna~fEݴ*xȜd*l0=&W#bK9Dc AVca1,cG#j &FKJyPܽ;m7vORKW s RAEix=:Rq1q \:qP?vhc=SZF#KR'w13r R^;7<۞:Nz1tDttѼLS9Ioeq@,\WxS<<W:jc9艈P O%099T}};Kz9vm$=ν:c0I^!f֖!:QfQkB`M"7vo_"WdR&9ڞ >yg8\|lt1+q>JU ҅*B"-VƟ==w( Ol{ac5A6Jz6rӵ/FN/UؖtLH9:)L 2Y&iPۚxJrtOD< & &Mf e NK.'ۮbMr:>3,f͕q@cSJ-s(!Nu?4,&6q; JYc\m. JJM&bnq& vʡ*F$:HghS8%n^nCPL/U5"` a) z)@)I>ӿ1 c ZyGVz@$ /35jB$E P3BϔTY%8qyy:ݦiq/{C|1vK wcVHaZh) lj(P. Kb<{ ag;,gἾXxTST9ͮ܄#D fhxp܅OSp_2;̐E4?o˧>1:5v2~B*9uN-7^>VF&7XwyjPc}źd)s.]DN"DsYn2݊8/Ug-P=ƑѭDDhzu8֒1|8G[^h^^w:֭˲jEC Y-!#)xx WQ8}V{*Hmb&")ByJڄJ'W#LUc%%|iazzbݣdx~dhoE'qspңi4!VGI\d> ' pH!K[+fDu: i;ĔZeN392tgNCI?ǵSÎhQ8G `"絈qih[⸃'El/`Ik-ڤƀB,8^쳑^B3`_4Op"YR2)x֞k>JqP [y>A\hI27d 0 ũMqQ#K{,Oh"Mr,^mj>@;Ef _8O-tG f VDƬݡH 8j6{DӊVQbX yp5,[ʿ5l6U<فnHX44m۹kX{=BײAϰuTxN$GI'Z=A-9m*%3+%uڸ.@?nD:^܆{Z8M"X@ɺS^rX>Ҥ#x ?&'IbR9!0=Pϊs:qPG܊ r0vMߺ<hl%hB1^ :)deEl1 {*, Wq5L$x`pFzH6Ap,`Coc9vh]!7)v$!/B)y*yDYJY6[mGNN`@^!]KB9d (2Z1DQQwA)@fY1o1^H !NLjhhO@;8t0&2Q7)lj: 0|1f<~Gcnc`x_Hzd /qZ49&iNY>_?w9SMו%qJ LiKOl!lޫ.w`vl0<ˀ_P06/`a4}=YƏF*_ҜY5hXcm+igN8\}zr՗yuۂ~h<_@gG<_-ޒ f~O3:JSry!pvULx~kv gr ggfPv g= )$xwS݇,dAP9!( tutXv "ʒP 5V' *#hjTӪjDNqdIq/s9aס!p!kD-آxOy9ۯ  {}dZS1/Ej6`RٰG?yobn?j-%<v#69&KKgGn#kGBd2E2]u☱vV[H3nAڥ+c$ÇPXF(&P1Lh6|wWIMot`]QOIՓqiM :: LMݸJBp٧9w//bXhZ%K%e bgr 'yycO0D(K ?D;Xa>Vm'8k'=hawzxͻpr/Yۮeec#BL^a& WG'gB9s݄;\u$wRƹl%X2r/=PY)ի&OR&`~@HGB|KahM6jk1ZKWJp" t". aL#20O{AWOIi\TGB-ZNi!K6=yġ EQulp2xr#cb"\3y]DbK>5crhξ,D8m!sAq` e d:Q<}}ƆDh%KYsXJ<ūrsdm'pL\^gBxlo7~^|ӷۀʌ%!/ ;>:IlZr  Zm J~EoxSO||onA堞[6}Xxywޜ'Hٶ%&P>;|)i8ZDLϚ㐩l\!E:A|4*@97O}[ߣh<~Ȼ6P9Ӕs9'x6} |P9S1!AgsX)<tǙ mRe`pSLᦛ-0WYn:߶)JEYD@3C J#| a4uH-JE+Wd#K9kK(j<)'q6]|_gY1CV9ܗS"h 7ʃ,@$vɳ$?RY¸&j ӿǩ\Qh";ϻAIC8K|[96\,x/pY˜\9xl~s8c\*.pT(P/>~ g;}/y[i6護@g)~*iթTrEXi1Ea> IDAT=D+șD9~>\tE/BE}:#m3ӏDإԞCs((fM"<ݽI|?ʈݴ\DѠ#6Fsu!F̀d||9Kiz+`rL̮14ս%Gǩի0٨C}rpcRHh:N7s>'BLhJ^a41;) B'gSPBqwRWD[0йq-Qwɚ8wzS8C= tjq`h ,ppGDPB&'"R c}ZѹfÊr:9DvܳSވ+*Q&H>BK(։UKȳRRGw\ؑUt #q޽.6)$+Xb)ccl߱z^r5f<ϸv+Xx4[4Wahe /ȱivo.73n!,T;\G>r+Y|F-=Ah8yӝKn 𹿛ą] Y,-rqDߺt;٨  ,8g sC8ߋqZ<>7QqzFDg&-uoI}>$ɤXV8Wg9]eWIj4(k srSRwދ"\s2ľӜ44B;w,^Фk+AupFTU"2J`GA?:ţ#' ݜ|Z9ԾBϒ рN)[)vrɺAтc1Xy$ Ȓ4Ke-Z!bKW"T4:ny|?享f}q-OmK_wGsRm4t5nz,DH9+%P䍡h[w/+ %}쬜Sd#ci_׸l%d0 c_[s7x睳O_sov`gNTcmc+ǫq}ey<ô*3皬ZG&#UvS05VZ[28{}Mԁ;2ŗ7CJ'%ly6w䏦契 Nk/PoHw9{OĴ'cv\7~wRX?xi:򯿄^{ G6ǖ=~Q~W--l*J,kEl[fHLfhc욠s t-jIt ~r%fb:̜owjçd@e!;a Y R454BJuks!$a;&1x{(KtRAYQ@Dcs &k/c06``l.9@HB#FvZg kVڵ>|iItFHPPLFVo"L ݰD2J%`z1r\vac5t\G ɮpXJ `xtٶ8r؜IpG6ج F6`\߭HgP<7:#dQidCyr,ؾ%0 /nh(GdƬZ;cTGn7E UіU(0#4ˢo<8>%^y9T 1pq>YWό doY8<[n ^rw7PO&mO ضyv6_w֊G5km׳sG|kyꤥPPr{02snz1??zQ}!t]GL J:uKK:"2ʁWhu > CZN/aE?RNx1VCTF8I(V ey,j^ 43w+~+NR]޶y&lyDJn}HSG=ߣگJ_^Jy`j:y%V$DB| mt(PQ8iR$\#R1\օcŦˣMdt85*o̤`/;yMȴo-^zWZaYLiQI"C B%V9: jbyJSb"G?1ẔUuzFU1V (dF t jY.&"v5n=p"ҢHg4:G>RK59p`sM糧M>Muge:TF"}a_$(/ꈸ5_bVJBTsS~];XjS3㴕&D:Pzg=g?m"Uu/-[_R x3pó= G3YsO7r6 Gd膝*XoH-2^qh޿~.ֿ\8#>X~)/(VLD#WadU^(?P'/KiqՓWPa118lD0Sw6epu'\OU4R n\yA IkmgњZ:њeJJ}!=d";ȩK@:g{~. nK~j2&`)k@,mpn-`O }dꋐ({Xt<['Yw1b @)w=rXW/B4r)XHg`xޥls͞3JjOs:?a*!۱ޜN䀣)Ne[j}*UfWm|_l7ىkb(W!pbMZVEN] L2⚎dKLha'@ KhdGui-YҚ C>sMVH|^x?nAehsfi+ކ˹7jIæ /oC!`-2W=qSˇ:tEĢ(Ֆy,&;aN?T[HcG@E4`=A_V C}@(:N@*nF !5s8aMJlo  ̮@yhuBB8I)N"Ђ Qਰ@2{`1CiŬ6_#e+1-L2}PT謇 pXNC4x_3X\D8(?^.Jsm䫹o\@0 fν{[Fȴߖt8wW‹_Gud2;KP=3_ޏM489B/X4RY%~ugQrb.PA~Ⱥ}v]f ׿ iQ e~Z›޳Y~fƦqQ 8{WK1G2pIɁ%;*o;\/cpE>|t!ՄcrgzČBc$F+g9Ǎ"J|.<@`%RxZFds2a$84!6zoNNtyKGF d&3h};MZ7o`i<:K8m,-2CC;[8: 4>sS$~fz} 4Jπ̍ UA4:D̈́xgqd^jJ>[&8KϕuN`AU:\yV.!&?%[¯ 4,Ү ܰpm|i֨XctkW|š>ľXR<,ͳk^ks/Ǹ]=`_ys&۶nmN{z)c4MwĄqtKQ9lju͛1Fa!n4ErUR>:)0W&"ƹͱ.bO{A)MVB,r|C=VGm]Me$.# mTK YL yXPWԏ@rT%ܨ2hrwqGPGB\9dr6%=_dXG.?S AI븓f0d? s2IqZIzYweOT*kB0Vtuvm\?O5-p7E/{>>LZǩ\ FְFRfwZ4Ȩ_YN^ cGЦ[̠K78K.JZHN{^Gx!Հ34!ο,8N`ZƩhBM.XGuSSe/C7N "ڧ=e?(gȆ1&s]%B'sF4:@T+r[~hG7ȝ_zyA6^Іkih$&a<ŷ &, :(qREpQYtܣGd&s o ħ2!BO 4:ш!`a:ۈ&JҜ% #iܣhN^" #!Iv;7 ǐC;Gt+xꓮ rVv3 SXDW6qe5nV':o*X BL=Oůb]J \G8Qc|]s#93pF]ά3XdE5" SRؤs;Wعc#d&RkJO4Z3T*5v8՛ش;z7q{+ʟOz|B.9s޶@)xU/dtpG9:{iɥ?w5$ z*]:aqx+`fynS) wsNpï;2'?8Ic~SQ`9$\zu7%h?iY:XPXR=/\ cUy?Lr]qƈ8cKXZr ˴_6PݳUK+uD^qC ^>NL M-c܇{!Z1\KиS#s!OE5~ZPz`ea|; s-B8]LNyf"afx^ܢ#Prhn .7SK數}a/Lta1tW_{ W_;U/|￁W 3X&3 b=ʴLdYW }o|lj];uvҥZy>I:dh:#CXB רlkw8TYϤ3@'ɖzL)NԪK{ĽF 6loI1G%\޸hW~;F"c4omynjZmS J-'2" 36iԳʘ:4:>yL$VdH@gHQ?Zag X!C=|Jdi4/ҊoPUiPG֨@Y6ٰI5QWY}B"SV2)yO"}0I谂^(e $qź} IDAT̝K^¡󲋐3}<_eSQ~+RB~"V3W@!Vaƺ|^ 4'ahvy i/ٹ*'&=6`ea,X|,}"}({s7QqŹ]Ŷ+/ApήsV/B|x޹!#Ydm&IKnO S&4g9:A%:0N3z%dV匠8#i xLR:fiSc Y`bm:RIcaF~)\)3tHjە2$}cdf=Mֆ4[R]2stёjK}Gy\"w/'<xeT Qyƫ/8!&)KuTLMN/:h]G.htP&I,R2T"Ilf=YPQy  JD%A2@?Gjӓr咏)dRnUshm)J;qRghTgKr͂dY˧_ؖAlڣkTYQ_jmii@ءumagi&s h(ZY̗?{~+bCT5#GT\z&>?ŋ{ON|w ίqs !3fgml?W;2+ @3/q.OWisޣx֋8[S osӇpr8vdb:m[ujƦm%Xo_fM\L?;F6ϱsǁ7}DC4zGAŴ L:d92ɹ{Y$BkoV#P\i<I4@` ^l=3g5;,j[7@i 8Uaeٺg&^ ~b-N4ůἣ<йl=sМb+c`"7b D06nDsM80mEnij֗ԃߞ,j$e?A?Ǫ|AF|၀G#}-ߌ&iYXL+(apѮ esSI+{!-i il;²lR,,,066'=q V?epp b4 j$i!Ӣ6N{pa[gtS{$c{Uv^Pgys汉6XHåfߡy7mj hQU?'+ Uvش?]Z~ im2!dd,B)# `i9+'&t=<0%?G}+񱏽pjnNLE\/Zр:rTkі8/ƖIAҕ#sɼfX^pt*rbSKGWöcP)U8>N Gr^"4yrj IgH:q✕ vG۴f-\UbtV9Uԗx8}FO|y7c}@M37cI2ް BpRaC[^55Q5D!exؤC,Pie̬pg ^c` DHkbOH*qO2&s9%t+g~*%mf,2Cen,4Ye6N1iX8ĵn=SQ*%#pάDW1wCv2FkRS.e^V|jpX`1cd!'>x]۠7敼.B258c=1sI;EB3 7p2t P 2h2Ap#I8].O\fif*8o#H6Bv}b!_2}@ 4_~M<FuZ[|_aؓ`NN\ʽ.\2Р5RJPɓT[W>m[&7~u͈4Edu$[CJjM8V˩X+K$1)a|21Cgw9^X86gWE ša>g^KXu̦]N]঎fPkhxc8n #P1}hk,qlri0Z}‹ʄV :z=CJT#N.n_Z 5Mu"Ci )2,RD%s>ˋ rTjt 'D))UiNAYyRB sOHBTs'MX$)|%V8XQ9&}(7@k*iPF|m,SH @d='4 Er]$:dd'4[6xAiA Nklb4!AC>CN "k#gA +~G7N9=P kt imA#ɐ 1=KIl%sAZ׎LHep!KlM|#ۇ6]*aRC3>#;0 Nmrrd9pW2}Y1^uԛ}/eeaH&d 2Lq1eg^4]P`:PR^`q5MK' ſU :!lvS ByHA7(jmgX և,Øӈi^\piBނHeAt3.QFvHhYA]C&󏡐a*EK rOm7g?̳Cr阽tYͨ/.efZ+NuyS„;I7Bw]Wuڹ.M[QBF>B),6QOŞ:I#'[HA EDyvqEGo5N!N7f/hw'ٰqHauͿMqb_2ٗ$k%0tx;C9HQєdoPBYN92K?Uߚev!9m {r?3( FKyU2U"DNw]ŊˀuW]z"00/6@ RI3gd-Mdv@jd8 0ΝEjmQx:KX!Kز h/5عq3JdFtZm.0r!ʞ$":RYeyݲ:yV\dRx\tv۸̲8eR B)A  [' esxsOo'ėh6ev;ehq F_Ǣu7}cق4[&@ q,)rm;Kjif3:I? ,| o|^:AX/wNjB[oºu56C Ԃͤz'?{xoe|QAWL8b|2IS h,;Dg* CKB!Å#ãN )Hя B a1IYR`TncdZa3ݷϣ$\{69K7%w8|,NqZx^~9F(k<8)9}Đ% e4_bbt/gnTkXz>ӂt`cXp-u%]qt[S4"ke{@ 0d -(dZr-WJE=| ꊉʹws8-V&++0I.7 7-'=|Oy;x|C<ǁUf7_Euf+cj0h&oxWŵl#fNGNsa 7/N+hUZwB=" wC)%աat&s)83ˏop`P*B+vO=U0zSCcUxʌ)*=q5 kQkR(%{v z.6!R׮{ BPCH ! 3 z-3ӳbHzF]# X:ʏh,6i6$t +/'f $Zp`U8 v(<6nb_!}T7̧$(>!T4Rfx>(p,t4c1-Cc';,g q> Q0 ( 0J2VxH =C OGU#BU"@(MҾ[鞸e> Eel)!b^g*m=?i/հʣX*ێe3}|S,No#`#O֥JIzqK !+ 86߸'}8EOfyB_4ąq/He-^3iӌwC DSEN8jlP fWNՊŚ2 ()B+;epr~#7Y6.hXj1J ULkFU|?rθaq27vsF+^$I0ʑhД$:ͭ\C -_85PZ l߶F l8Sܾf3F LpuҚbTu#;}yXhL%,e=ͱ(Ne'?% 9x9Օ f> pB 5A9'=3D+;9+&:QU-yX`-^Aln`[-MY@'Vl6ͩJ*38(HIfnaӹDLI$b'RT,gk%GG uc  z 9| .ӴMlcF&1#gq.RyIvZCDFStٰfA"$$>m-Sel]0ГaSSSE ߞ.߆<麋*Ss O\^fʹguW>2 .&,/9XZj08$r )>B)/Eseh5D{to}M|#o-oEzHhI!a |f>~2{{?]d5;+gPν@Ib 5A\8c2f'c^-$:YONxtՐJ~z[}{IVR;H,d6g8` %qy /OfN:kئ:} ]>ʎe7 r{<}Cjk=z\Ûvzӥ\|Fزw7{I?*A8%)7,sx͑C0§#kƸsi;63|l %I\.<>uOTJ6104mLJViS]kc0֐ Pdւ6GQ2dYggtiO!p:æGZ&J:`Yn+o،HcC1d=KHl BD& T}ضĵ"unLc>#?Ŷ! #ifDvV:Dr-v32m0NՒ:Hq3s+Ync e'hMRQ%T!B_q L"W*s ׃ 6gÛѺn@E!iRM8]"kv}̗? ^L2\L,54TYO flɳn*q`?f27Zm9EUeK84q8?'vAm_{mGp!ws*qVk\楿[y6{ae<o8ww'9Ը PE#+|/&໖i,yEnyf_a7~9 :GZ+%2[_"MT@.OR^"w|7o`Uh܊\ i cx,8zxƏgpB{ڳ[3/9}N0r #YBx0i;̲UJsz~N笵{!tx~s[C s9_VW[8FbH sGHoe@V p8 +󼗗A&EA_r ÊX;n6;y[ *[gS_~GƿQݿ;zÇ?2&gN }|Os@J4DP0Ѩ|k%mOZt%_R)iAdV4А'TI-aRT&4e`Čn  . c'zؼ|qO=Y$N:0: par@H \>d1;Β^ )- ud0rQX?hg 3%=K2dtl@b〨x %Z';\w a pa<3}=S?`)W;7IƗ4s`sMVgy$(kMQ"K fBn6+1#4͐}uS?ET(okx[^ &AوOJyG1wgG߳=O?I^Zw iwIo}댡B(B`$GZ26]ed\DY_3XjA'a)˫,9Ŷq¥;/_2 |Owu'I$3<8{sFR IDATNmp<v^Q;C ﵉5@r2 2nͮ-hI 2ÎMqfKqέ>X]4o\q f)UPưq y{08tfYI"U]\7eu5'QվEF+`i$KMVlYktJ%3 I[g BJSPM.ū6]/`>lkW.s:Hc0jRb(qw8guNjشk87[ Ju:&5'{ o]wp7o]!)缰^H?~|3q;rmzO.PQԀFi%H*GnbtVV{V-ֲMW]֟x%V[L_cB3X:BxQȬ+UMXdbB D%|=eB!,Q:l ^snrq[OGJN6{w0X1,;V,ӳ5Vv6 QZ%ݵ JT}PSKo!S-$m}CXtճ1Fz)B%51EA-r(_WK0TwV[Q6U|&x UH;}VO!0mc5dpKR-7X[4/}6/kTu?6oi6Q$Σ)je?XiOBZvr=7l*;Of# («RtC s)QXb,$%LdtRAhw[],򀎋XlвJLHVEX䪂Kz@$H,I7'299|JEJQs'H?JC9ӳ5@ =Hc/Vy*YB5YgcJ(# ev ϤYɰB#d X_ *"/As,.5s9LZωd9\ _[K8?9 ?2.7z ycW_-Kj]>;,oy زuxGGǞӯOu!4!I@=m;GW/*'N:)<6 ϊB^B;HVdTj8/A}6@'}ΉWQ맘"aldT˩W`q}&G&UBe%ȨU.31V%P'ɻ-Ny_9vp.yк FcXEBഄ^%M|vxX J+.w:Hm&"?]e[ؙO2-%XwD?$K6 lG8>*;4FZ'O.sc5I:  ?t9ӥQ9Kp@ %֋9۾GD}gx+a=V=a14jx㋟\—|o}Cͷn}߾/CuQCb& NI8%Y1^@,dLH*i i :ncs#ǩHK8!jAu\%/;~7|-g8\p<{w6 ^?+FŴM!# ٹZ8qzM@rTL<߿@T i-7wQΜCE!2-c] gr/$ ytHaљÓ>NC c tt8'[AY,dRv!`o زGۜ΀c%WşTSjЦm,uHVϾky[O_%WO pO()U,_|/_5oozӛQ{W:wWd Rm0:X 4%Xaie;ˣosM|='O^>QFy‰ !RRLꗇ˯Xz+,.:~xvQ.E61ڠZbrzJO:/Mfh UR ){aۘo|/uWqޮQu)rcY[Zy/4\oig~8[n.ksxxD% +#];{?ik(#_:Q(Σ[C6Rgr'u4Ȇ4WUq@M`qֲrȏ8pC(掵щis}&& ]uչg ߮R-KYAId%+Tdd,˩HAk~EcA\K#rzk9A* @>nA^Z0j!R(f O>O-iVq*n"S\q2B!p#vGgelQqڡdy^x2Ζ[8~f~}yZ&A uѩqSlޢ\3X8!KRj IbCONwX,I+^rCR |^Nt{g=JO42fJcN811>ƹN,Y8Ŕ=ym,4vG$J e45Vٴ{ ,lb/,=]J bgG(=};Wgp%w >_zz q,ifq/p&CDuΜnޯI(Uy[_ɶWD`AXr /w\A[fJ}&+ a|L8V:Düp[@F{wqITK>@DZ b&KuL2 5i+KAckaP ict# cF-AJ# g5/;2Pt+W5:=-+~vERI=D=ft7$.(rmY\119F]R.8 K5 OX^~1 ~0PDU|6_tY.B3',;-_ `h΂o3q^p/w| ?>_fznL>Fs47FGHb5YhTF8\cwrZPm 8w49ø5иJ{񯽎쾚ocD k^'UL]Qcmyۿͻ<. d`&̍sm,6~Oal|fyMc%mA+fm,FHl]59<:4%_*G:}B?A!XRIràcɠsnq_H\1:>Ku$q~Xa%;)[387hw?X2,z/=KrKJIJ$DHYTbu3Fv,lAXAKF@!3bs_"X6 l}x^c8 'O Mo=NZ OU{>ۏSchCc>UNtXR Ée;CS\>:&s^Ď^Gy^S(~+GQznڃp4%\e{;?~Adkg,&Ev]BŏBn7x⛷F5VŐ sׁpxmϹ x.!lh(va!@yg'[^Ȳ&b*^}Պ7TZ [:SL A|A)V>N}= .dN[=G/&($x12``ɰďMӜZ]`cwsj>9 ^c㻮<{;{\w֚$8xl봙pcuteUbrR S^ĥY |]Y>[*SQtPE4_5vY-q#/5nTa-ߚԠ㬠7x+ R?q-;D*VC(?mFm'AVq9#|:`8!^(J -|eo&o]*Jef*9z'IH 3sfN>3x˧z~g#0B4('9j>4OGL*d?Ve8'cap /M%8t.R&T D+ "$gl4yKLݣN0,4si}ʀk+f8 L2JDATG"l!S.1KdR>AZsjFF"^z&ɘf1)+dnIQpdWT{`L9HHʤP*%q?[A K?x BHFAQ= 'KNP =p*Z<'Ѓ96\ann,N9 gtA',Vyk+.//] 'jۿGq9i# L5v4z<qy;<A*$H΃QٷҖmgZ {;ޭ_ J;wy|TL(*aor̠e`A8 ʬj|e̱t3˟=Bcl^U#Μ]!Ghͥsڶ#X8|;èAaYS@9Ey"Ic~pFP(O̐B T0w\y.nQN/efJo+GNryn(7CIiwjuAc]48KPTH~A U OMXc*A`aTwń =\f2[ı!G%I Pr(`;N7KEscPxٱxJa 82:+ (49R *OǺ9C:̌6Xh鋐7t֖(Hپg_NF &G+ǴejʺrP1.mC5Te Ӈ]Ql)3Srk7o%Y;n?*(!3,F %HP,06g4Gz74k5=>ǹ慒M[V*Z=cv?߷Ƚ_ӈF畔J(+$Azn.9p C c9+Hv J""x_`L7w@pàslQ#@)4bU1EV  IDAT5LVFyA ?fqN <:Ar1W[T=B\ BVX&zQ$}sy^^bHa#8H?!U΁d|GX%쭒szqN,QGe˴:A]P󣳍vǟ>s'jw$Kپe!{i?x.8#yL(ȏHҌ$ H>F;ldJٳok[>sc8o~ïhIزǦp˦=26s7_!v>S<|o R/5|Co =#J!*g[\}|ؓ8 AY&?VLy@P0Ttcv/)"II]D9%]^J\!H b6o+mUbâY ޏ:!DJ YE9K(A)<%=,RQJi5Y ʾ W@8qlj5uDFg4[wX1q()EGӢ|AjJiJ ژ#E!): )rNT-O*)1]\QRU[vGrN˙(RZ=TxͣKf9.]mN>S9RJ#IV(̓9L=wr; k5:ݘ[o9qeU9gPxC!m7pSirQۮsW/0 f/شy+bu6_mx tgOLd!wB[.u;*ĕ/\a?sދ{,[0?~gPD{>|!2_ʳ Ȇ042 *tG&F0Q9ĠWHO<4Gog\C>09}uaxx6 EmN"\\ #9xKo\yj7C/M Y;?Ya7o=T|'*ءZg2]$>c{/gtF?7xD)KTť ENz>K+_ @m1-=oz!YKi1 *& +: `*i .Ģ80[x^_rhs{6[.09’dr[ k2Dj0%qI]]4պGҪ U&I4=ѦMS#8ºG:atNaRygXEL!=ӐDg|+Z |=鬦 4yR,ff6ZG9!z4;s>AB*=bo@RjPJORTkA&6)P*{ش/`#`%=jMuDOHQ3oy㇛ȆU~,iS)Edy:~ m{NbwJ-B*1qw 1E f'')Qw0ysgrrgA͕E^r/-טR)3a@11^bmm 05UBc2g\k$€?>jgdvOSR>þ׿CGN#g .s͝|n}hYs]`Au`[An9a}.&dlkUJ9$JDS$qJa-V}Dէcˌ*a3ҝCl!#*zQo}{ UÈr|_ۧK^*.tJVG?ħR[xїB>#EșZ Np]AeO0hg]K_HF .Vc[4GD=Idm?=/Gɢo5 0R G6 a dDN1sMEP |z%B8rst {,i3Sq.WRJ;J$̈́*V֤'#ҞCSdrmRadPs>W=78ޝ1S ק^aդTf}fVvr]?[y|Sg{9nd#>,,,<Ϝo;?uד+w7\e%ﵩ4^]f%#ҴJ11R"%KfFt{vm5m?^j#룈4ƒ:= ަYtwy,|F*S*<5}]Q!X3'ĩYv^c>3XF^믁m~ϞgWN{=nz3iPQit}F!)RTd,V .w8ٯpǭ'G8#I3G||)hr)eQ1yNcc1vN<[1C[lb#,muzGic<$r%$x>/}˹JTv8JXL~Av)N_gb|!C@>Ap?:y_QK3xa~5V`MXmp"N4qUcM XtI2M,@4gr[HHLa" 2 /zu_ d6Th29֠s ).b=gH#CF@{Ow25h53†92-Ðhg ck. e>e`hNEGʬ: +H4AGRO !SU"*~ yKe̅|ncL`/xI+AJNc38)גKWHˢZmQ*T* p݈$ R1ͱ-(bTZ0 "q=jh;M66c~:b|KCͳ:Vj+\pUW6'~T`d-^E+V~`/q-'s3uA:"m gj qIV Z$&6aj,a;;k"ְw#IJB,gcѝ*"YˈAY4!fi6'ےWL}ui l[ĕHj1XBCw JH`RHXTl='yP0tK{+.i6f YVI0}h$Fplu)|fin2SֲF4#5tBNC^ #VX\PtF Tm,OR2~QzƸwqoXc,nA;\6P,Y,-r\%B m6(\Ϟ`|} A(MH~s>w#\c܅cUc28ȸ6p5=,({2hD; *'6C>o'v'ޓp($C# to NLwDMɔ-v<{-tlIPf$}:Oe>á= 9AJF:y-9|9b.)oO//|!cL'l 晚oRW!wU9)hޖ1.}54ʃL8>'+k8.+̀"i cH]I{CyANBjGWX3I#'hl!" P=mZ*k,8:_s(vS4$qBs&4bQ_I(.q(ϛ'`'Hmo)5Ql|vKgFوq)Rdiet,2Y'5!nݣ1G$62=!E[Y$&;L—?^GDhegoBJ9U;_ˡC1dνx;K|峟k7'oyis vOv-?>w+Y:qJf,iC;R&m#\!mjSضК9Ρ}5)u'2 bc, &>+>?F;*a Ψ NW2>A"[v3j _;0LEȌk*\) NRS( (Sz:1OB08vfOOFVXR ;n6^rC5BB)R:!Ѧ (3HI;K2JH""\tscSr0ԇU(cZ!zv}';+^Lԉd}ZA!$&bJRpm<&OS5㌬]QO8FbDHB 'EJ34c.2tyuF/% e 25bcQ-IT4NmEψ@I]q,F&E I=1,z'ra3-إSg nS!\H|dA6X$Hc"F E@YȰ @E iX4N&H8Ʀ27tˇ3yú0bKlK/<\቎룰<.L/X3V[cO-lTb49c~J`PJMqF-:7&&>\bU9Lby.N>+u2F DSdဉ! "EXF%nV2U,A%V+/B~ۻ}zۏٽTaꦓL=wr%UV R,x$I\kO'i|Ђ:P,wjt{au.x0D :ݦ2h]Ě"-BhEQ8mrld::s+lH4n diV] gx^ih`-9w.@sزs3͕9Ɔ9t1n{$s8Ub!,]/.cmtTڏZxKcdvBKu#5wݟ?w>B6P(H4v|:_N#@HNQ1~en>fZOp~nm  I67S2Hc4a0g?rVMM<7dª, ǏٸCSM"e<չ*zKz6P5$+ 4;M취 cOH.zV AK2qp=;7={5'9kχ_z^RXc#MfFZSMT͒B qO[gee-:DWƸ2oi?I;-?h,Kđ’R*O7NIAb.af]lNE"IXXb[ 㳴gmZo9Aҿ3L HBg\Lb{G(! 5±Q+xG5q>Q80XqlYAfS;w׿"o^ H(M@I7у.DE>SKiCӠhPn1y#;H\'i-g#mߊ2Q0@Dt@TrP@D[W |6y*>Q2]D9^1nZ{z5$A/Gdì^3bv ֬FlfZÎYٽ='Wn JfYSTL&㓄6Uzؿ &CY'<~nyo#}v eUK/L4v w}˼3|V`t/k/} YpO.⛵4 V ,w0+s ^uln"4'0W^o}> .^7̅{%5놫9GP}Iۃ_zU7Xbt8%~5"-#uq0ճˉmEJߛ #a+Sei4ŒYlmG\@SV.c=oxqGcb,K;{bɽ[y*dk,QAi&j&BP*8hmAgfRd˂9Cϐ"p\:̈́|Fj!IT[ae| vI)Ʋ<|iH*8 qEs ]vPAg~Zuo!0B',ū=>ZXeλ$G KՈ{Ksi-5Uv:cĘLJQayq z{{4ꔆפmXZXoVw~Gx8e#OcÌMsft*]Jyfg,B I4Zk< N@}~eJ-fʤy.·1^V-޳Xn䊌]XDq'R(jwBQh$5oy⡟|?trǩ]Mπw<"ZSKyr8@ Hz gY^_]*GߗJE}J,aS*T"2rk,,$Q:6}m`cqtpANev@FZ%XIgŇFZ 0mITbY؞OЈ"&qԄZ0lCc>X;$-Iq';> %H0D" IJ-[` # jff8$>sӿTmo\XYnDaa,#}̠u`e1 ru.BrzG_8I'a[DŪOgAJ Qlþ^k<,v8ѢקB080=?϶װjMGVXm ~T;ef{tNL8pdq/ɀe~CuV(J&Z'ǖ$"NVضMLsu Q ן!p.6LUۑkffZ>9J6nQ_y&!)BkKuzjh-pkl#30v$BQEY$ c^:~Vq6MRu\u8(S61i_&$R<ʇ.hM`g$mc @^_j# Q͚V *2<=%6}7fgfJ?ea~C@!T+ aRŞ_W:eX%L͇Xa@4ǎ<#8$Pqd@phχ V2h9mPzQĥ\r~ص>E0#d指7xR1_k= z{ :`03Uհ8ztlA^dJ'k щKeY  >4N2sYڵ E `vß.s{[mxRbEߦ(gm #),o9{{x [z{7LLrvLth$j?kW0!pCG$27zoř :ͯx17>Zn~ |p?{nd;8G4]ywwϽ4-/ 86Wq^.}<̎7ip^~~9y039"B[LGBKR*PSEXPt-JSt3:Rɡ34:5Ɉ|1OXKh5Ä$<ZJ,cZ`,VsB:S+4fYh~rLXTf&#JB0Dd|lCuWE{QbI3 ڭ ;WU*c9<}y$Y^z9jlKd nl΂ġg/q2}"i|V$[:oJwlv*'#  &ƒ'n/bIAf\ؖGNX UIpQ;& lla[Je*G넱lmsK'ܳՇ:"rssv^CTZa#l)27s Q6i,!',\{ ,..[wFƇy[F2: ]ɃʅWDb} XcX)0y,G&dIn7|c/aepU/?䢝y3OCĺ%)uo]ߺEl97K+:@ixXEtC 4i%ë 9X?8A[yX _/k7-1GHeSt%st}Br͖mݓ=a}Ջy+9^N-Pۢ=#D*{[vvdN~mDJ qך^!޻;ZfIdqbwܼOȁG"H2eHEc_c^Fɳ bAExqw|t63VJԇyCi.p镛8rlrc\ledU)%cbxu&lݾR&;4fMZv'$y9qhHGg\uF6e5W0e3Q$ UFG r%kvC= pʳg+kRoM/'e5u# avj3þ#6+dS{aCu>zKI3ݰcs/\py5b^86ʚ `$21ztF&' F~0%}+ċU^&d8y$j1q:1S' ]t3 kװQzXӛ+p" aac,u0Z"K b˟áޣVzږXHdsB⻂ڒwlZU%ᱪgUĖ!`ߑ \y.bv{cH+»޿Qj)&HJ(h!e:瘭6,# hs`ַ{[ˑeM"QqG\z YlS S$?ey[rOJEYщzk3'hlq45"VMC(Fr%$AEO EdK(I%nWS) I=D88 vD;Nq˴V"zz:ӏѮe(5gb# "<ڎ}ohǞ_|^v0k7줽fU;6s|Bkyѡý;rqb16F~W*9AϢR $*TD+zvL(80&(fCO~lZf Yy.fOcIG|`r9T;$Q- iOTթH4 S(<) BUo&<wkw-"}=M\Wc`{eh7; I}2Ia #Zv( MRjd\RYid4n/q6 !-$YZZ`txYzG@q`Ʌ B&ݽfcyG=oUWl2k{Z͠ɉGt^$чֲjsM'y?Qv^>K)k] _fmpqxR ֜VJ0&eָ\Rʗ`q*eZ+i; |$5D&<%SX~ś^˥^*_GɅ6Ͻ\n[lԥo0'gUXOO/~]/k7dr6քJ Fz(SUzxgǨOm.zjl8|cH̱B0R|ϨAhn'kXo?w`K~)[ٰtDgq;7lj9$]f"F`KvMb]os&ȼ9>gw">"nXQHXC(alMscEKU׆zB̯h:ha(d{}!w=.)IߩQ2KyS*$O 4Ʉ#sdvJZINO?ΟjIڬPdnv52_ՃSl:_ģ| qRLhm)HfvqP!x [!r#[(`|*Agj W_·ߨX[ߋmI&!Rlb2ug+sl+%clb?~ ڋUeA@H܋3<& Jt#EQwY8ޠdz083.s8g,2W79Iw DPRTY'7|ao]>! Tm9jKmkJWZ I☌+tS42y FDЮkgO7I*N}c#=_lI,L!u~.O<:?\ň'.͖CCˡ:2qʔP >en~B򵟴dO{ިJc?ROl<`#}מ[oo~?|]o6=re(b6fr_sUlKE89ȻG$"v8 N.:Qh'qGq`j܋zvy8^$n$YjxTpDDQgg;[ޒXV @f IDAT?>HcѳY`I敼}療o9Q{EU$.B4[F\XX`Br#P{ȎbeW,#:1}a0v 8ʁsq[>SAG 2䶌ϧx8A&DiQ]YılܸG17ȏ Gx6q+gja,fX%G \a? ^vSPob ɭ?eHI򭤦 ru[Y5Gwx.O!}4f4<1^v9%xs1c3y߻Xa iV y6y_~hĴ77R DAA?1Dg˞W" X \ߧ0X,W P20C Xv0=wbApjէ4(aN~ڵ{j?^KEש#e HQIOxDh%Rl8Kef/}$oZ\ldXU{af$L),N5[(FvȮ{Յ9%}H;SA6\~?=ʸ#Ply \woGYH֠.1{2w`BzFDW\eoqyy?x[$:5g STWNSdMX!VyKTjn$5i5B'#$hDS/DL}:g990 ӌg,S\)*jm yͧ~@]N,(L*99]4\X*ӬYJ LQ,RYP*]!XEX|( >>u/q:TakW]崫lY߁l#T ͙ئ4xS=58PӁ"q]6^ YT}+r PHZQ-dU  }4:Q}eeui>o* L"%[-ʲ-vaqOqjV;%dK@ɲ(DR$D.P9N>{Ϗs$ǽVխ*Խ콿{* Iw fsP"nd!V>[sl'{X=sa [dUe0v9j*C8 = QLKhSa;dHMvoXtΉ-TS*e.l񑫶q|Fi6"F t;ll4 l'iԛ+Aƅ.z!$yšDu2"NOn-,Dy+k!۱t$}C%A ܍L~%Re~7v;?<2(`f}Zם*b-Y6l^^d=?3/` Jf 0 F<_!p*:`nŭuN/\b96W+4.>[ e 6.w]8MIDynkHScHE$2!NbT' *i-4DAȵ[Aj='V&[M@<+qFꘚTa:s^Dv++D(=tĴkauP 񳟘p8j! \b%y\^fxl^޲^t({`NF˙Lz2R#7d"Y}Zȧnᅪn=?S.r0Ӹ!6`|1ؖIH#eкLӽ [|ZoO!4NMMoL]Ib֪AON(n] DK"¹-`x>Ft6[]g3hZHůh:BKضgzNm;xTۨ5yk#um'kYWmTLyr1M Zߤ8qMqPcM|D1 =7"GxBcB- a3WAk;v;xq IT8t4go/r/㒨Et\&I~()&?\ZTb C;W k/E-m6?ofjBv$BKjٿzɫ<bRfL\;Zb87ȇ 'VS F%IV3ЅLW8ˍitLV b6o/P mٓUnH8wD*p5osS7G,/$vJd&ooK{<TMxI?,qRB"<֗V9sTgJM?Qwr,(c2JR>⣼4Y(V- E M z{D%?p5y OqϚ-At UoV(b~Ã&?"1I/+V,sg~+%&k_0n֢ j>a}EomA$Ɏ!Nv_)41N [kβ1o!72F۠vIi/Fj f-N4ƕ&6[H܊S*#jB؂LdA.UɗlaXԻV(cjpzAc Roɷ;}`9GhmXY\ch4ixU?Hf#ae%%CO Vec5RI4LG0bl6HIn0w:Cgeh2Ri6ʀVJh x$"A)Fm,\f#2\C+LV1כB !i\5xf@Y"ҥ z/H""_%v g@d I<; q t}<[oWvJc,-5huY\`bbq 1;Ɂkn%\,!6yC LDK7ƫ?.h_w6:f@`FX^zf*-i&!~O*:qDQ޿f'ms/2qn{)<@ـiCR !ХF4]GZw/.sk3>!jܔc7sīnOszn;~U450̹3g3f;q4aN[4[A%b,G ٬A [nH$ȐڥKsf3t>٬M 0L4ӠP, d7(ej.*&^=꼺DV̎}y4CQrL~~7/qd ]7;4:|$u1V|;-]?eoN^]>WݔC 8uI_ &erGv! 6 %CW@1AZ{ }t4LM 7NP!*JVwRh&[b~VN7!L0O"Gj3<&5n͜_myrCghnףФ{؎KWa8KH!b$Mgm#D6QVuͼ 8Wd'R˱庞ś%*-֓^:ZH^2>H@*^7@z_S` r_]G >Nad! fR ў 6Lm=/zڅ {PEE,SyjmR r>tbH ТמP6O? @ ta&r/]bs 8I77 Ϋ8ro|igl !~i'^>OXXs|*"7Yu4(8q=e^yeX",V™uM8Ki0Gu@]?&nB*3i|bEb|G?+G7 K1Q0i])hDFh bbcLa$섔&t(ce* 5#}$F$$a{Fh'J&X6T:tM $RF٬X]'5ssyTIEϡ.u/gdOicBvH ZL XLȍNH4[ÔIS]I.lA8D&Qz w'KCɔf?%kuy $Nw_2ˑƗ[&C趑JCØN#/2zuL S_"_3{00n/i&J,;4Mvp 8w;e9 "S۶ax1Ϝ0, 6Љ 6BO . rJWlg={8[ln5d%_]-Gx/*au{YP >U%7x nzUok DWIhnqv,-$ǎ?q%s/𕇿Kçǩgф$#aN-#MTMxh>4ʝ7#,Փ'e._/˗Zġk񴈿&OԸ'00J>󭿬b1W{)),W4œMe%ķO#t-wƷ{nC#"&wf} | =bQG W_{,: UgMvovp8~̈Hh2"u%<+]Ʒ 3An1t +.՘+^QI탬0P+ 4mj AO^#Q, }K4tW\Z0|Cz@rh#{K-Ft2ػHu-nŹ6|A*Qt=It3^R Dl1hk;Fbh2O s!M4-ӋW{帗w~>|SW2["C38t\ՄťuyW|3kYIL&KijW5˷O'Zkp[OpncacdVVIzqMӹ;Y>%*N.^0JT)0!td/$Ldo>F@~D3C犫=r;˫d0I,qR 7L2+ NCN ;<؆MҪm !l$Gf8\w cϣ:k4tr](_A:;n+w¦9TכinQk 5&/\ΰG $ mOGL b,ۢӌܟsC.oŨDYdljgKcs\| ?qeyj:/ ԹآFC0ʄOfP] $qbV05kBc AdsY.8ۊEPX_12CvČseVN8!l7q=kael&J݈(rׇ{VPq?8KS NY+_ sF,r>_|w~|'hȈ[rcD.vAt B'p>Ω6N@84>Lk%;IR.8$f~BH4%5!c_ *ihdCAKTNu-!vݷJ^ 0~PwxcJKC [g P<0(Զܲjrg|ZYpnbN`o`iy//*hW*Ȍ@y Naj "IgptgR` (O.= n *3[{V(`Lnc?wJ[( ~X_*|qc*k/ t/.TX]w%|2rY|wcUb]]8.qX[ȁ[1e[+`g zE*<#7x$~xG3go d|6^B[*RClj`363WḺsn~ت5bO0wf2m^x]ݷ^n=A`})d}ėt֚;:͎Θ$C!ͲXex@"PPRl9^~TJ Pa:V=W!* QJvQz MD xg3ˇ;/p=}~ i6\ 0GD0- r IDAT8Y̝- ܉m>q$( ٬lPGjm} ?G&Ù$*i1;injKv-N}!)޳.<>k.7n:Иu0&I|N/'\8Zc蚏?l9n,NZ _ZBuz"#pIM@u$4:|_jLu0=739ca#t1^B}vUW]MGn Qr} t2&*QoLMf2mhJn6ČS-N\{0ss5 % K;4 `[:k4$d nHoDi`;bh7ZGifaBG]mLsð]6j-FZxk8+k{sc<^Uìm18,d9wIBBLx?!1OؘAcҥ`;HT ,==̐x//s3Gqx BM"xAc䝐_ԨN z?B̋M~"%Wٿ'v6B!x.i"L&20!lBaZ)C0M/ۊ.RE)u w ~[n (os;6ΟEhwWkVV#FCc6.xBm˧>~`l&;̭LҨUY>EsliRH&Av0_wd`eakƼU]=Hupn"(9ӏor\ȍ)U ~R ?p }VqIdBu#Wb Nwrj8Ghcr9jG#m 0vt$!LMq%XY ) <]O<6%-Cc d""b*ibe,Z34\ aV8L _g,i]ӐQaD^S2P$nI"pJĮ5 8B7CB?p" ЬaqJˊCAF"B !҂@C%>%_25qm2+1mRNbYZ;,FJ M\QϦ3S 6\ ib["^L71 JhB54 D"ҨnnD@Ʉ(^V޻L~{0՛o~H-XkJ d4DHZAOefA8 ſ{ga(Jy'Bt3h"d-FX:}`V)O($Cu7FGSW)$CHdX?P?Rsr";v$|o?9QyS4Ώw7s-'߬q nÀJ;2QH%W_`j:.|=!\O!ʛop/g+D䱿YnUCč$(Fft/hƹ&ԇnN9dpƮ}2=KXkaz3-3=aAU'/BÉ?ce{K/qDLF+kEC(MJCC,_E:q- B XB#0 3YN]: *ȣdIXlGfLL cϏF8^$vsQG&n ;A>(ZxJP&2e7o< PIlC^e'{&g0t )B44GU6 b?F޾r 1-@?t7l>y%ok?U/ N+˃,\clv_@"q@q`5 +!N0" I)67Dܡ 3lj8BAэ NiTCJD2:[N#nMJaV Nɏ{N{``*Ky`E^]l fbrL :2 K֫R!< oK%ddI! }d|3sh} Y<E\s%! Iu'N3CY0LA,b\WqD=YnL^ٌx8TVL?LvN,fgKwM %cj&C9{J琚diJ=Gn80h R{CX4(~\vSjtGyO})z]YB@Ӫ_J#ۀ=9w;Cv+n.#lyNS5f}}ıGarv~F" 6AWGoJ171Ԯ/>yz/{{kW$0$OLAΰ)r<?G9eګ:W^c`" ȱ8̿2{ɶkm bcsXPC's̝[m|]癳kt4R"t832ڟ [A;c~ɣZXt/&5:;w扣Lyi,f%l#Ij &4:E /1kԗ%<:v6GnLT *GEc'[L)á! !JN*Ĵ N T`9:$IJ3nnk(MIΉ" leYn=4Hmܢ(z`g죵 '4^?RLn]^9vcu}n,k]RGϷYsL2 5p=2Ehl .1hYxavtx>kK MN έ:wwΎW+-{Q:IU΅5?$ 4W!>ś_{?{ƽLw#^KsoVbaduċҽxi>v#Z.ڷ.4m0'_9`n7g߉_\XJʅo0y9 dGQV63c +6B<7`m}K$mڹM$L<[q EP²4dХOHb{Z5vlΝ|6X0 I'*M-OЯ sBPʌxi?: g[j8 MS(h`1c'8<D[K Ž~Yt :4*V}?v{\֪ W݊)LPg˫s|Xq)("^&8LLmı}@ B qdoa 3'y'w16=в4L06[|'N3q,aAܩ=v s*|ahWChi,i,MhU }vxLJx{{n 4ᱲ Zk(43`hf>%͎jv]`]rd͐c A:Cm< %D1B1rXls~9Fd!bf|L\&H:`!Hz2W% ?0 AHZ53CÊ%7ҽGONR(a& pP ϞP|m"%=$3\7]eVeyw6H )):I䈜P;CmȍJ;ҬF8!KrmhSmM4U3?wdIhGFFTDU'9{1K$mzj-D('76j 躂\6FMdYkm}zuB߲fY1N24}0@w|~aQ ![ ?h?ZC[UcNjc7q^qK R94dd[Rh{1=N1)-ט>I"oQtب~sO ܠY Nm0K] BxKW)]I5ʧqcX' C ؇(XlѦ$n[/61rx#5MTpHQ&{ ?t/}PD2$1n$ 2/ |c1IIJ~N&MUgAOgk|Xf@/Dpr 0c+^6npgϠ 2tVHbi8JnWٱg/үepK !; B 5j&塇1R1xB(:mPS(i9=ݑۖf߈Mrr=b5a0M|Cjj4m4*ԗ%ңLu[K9Ν'_BE` mw Z}tJu_%ϩ!1HG>zES 06:…*S8?ZW5j92ȱ_HnORu"7O\>}y6Fg15rI&x}7g)-rJLZCN@(biMpy4yO8yԻr 0w>x̹-oG4 {%u`3{KwH& t\MLzR%1lS䮇1k{OaI:\{0Q~nēL~H?ɡ)*n ^ecY-[7Z>R u,/1sI@fڻ)SR}Nq*m-BM:=+n4Ҡ&[Dc)hC,4rykH/&tFT\pWK`"AV=Iu(qjO y˜ZPnvE~%y3.X FMS#V([@ =I}5bA']BB! /~GT<-YǓ!!*1U(t ع J'j5,9Է#w6i+C;PhvJ OJh: qO)-\I IDAT:}e⢤05> #l2<ȶףIKtͱ[l4"Y1k.{&htBRـTƢv'St[-۶Tr8J!U )blu{)E*=cZEZlK'U~_35XEަPE7Q2+?DǽIDbu60mDʠi>v*Ԟ FOV 4[̞[di6ۛۏeFtqla˳eBQXc6R,u0,GXms!||N\ahJad{+"6oxLחX:Imgj^Bwe a|M=@zz8VGt${- !jSC5Iez3ê1ϵu^FfB"a2dAJ$k֛ Ӫ Ʃx7ʌZD3H7dr|;^^lDu ☽O4CKdJC }򵅘KU M4Zgzr Eq!AC)^0h6$mʪ>JXl޲Q]x6ƈMTu #i-T&MB7h ޹{V˩N.+nd6K>1%fweR[u N+T{eEN(m]ta^W ib_RtRmD@pF|4C#ì^j1 x^vAݬ4+56n%0lm.nX7%f[yh;j'"4Fim?|R:ߝCGyз!=bEfHKsl,r1YߗXv1~7<M47znnTtnkLȒLlV\t}_x'W:~n[Ҭ̞ xfJcek/P$V2Fܹ$\x+1oل4UҬ.@ߨ]7|K(.9&wC+[őd۞mqa46i,@dl:-$M C07+x:?8j*"equV6T-Q0 L4Fx&HQ%7سo3'o{sSG} B1h"TaXdKMJk.O+ [glhdӥZNRLfPhlL ir.}vf G1HcQqR19:ְL|*ō C֔TJ 6ReY_]'U* >s!hyf{P<h[ ج/{dIF5m2g*^ç%; x*@l\!䐂tN!7އ_k]ŀDBōBLMP&3 aAgi ]TFIt Հ8"J/-4GjO$4H̠*mD[}6IU#98t^\U\A1T50 QEDN 0`%ub*kqK",j9NDJ477*o\YT@7@[:X8l#i6__u;F r5MGx}1|S]Z046NTAuJ0mӷ{n<׮2=jyK}{cqM|5Iwad`~:Y^g]FjcVϾ_O>xށۧ*>zF 1XG,8`9ȫSaa\Z@ꠍck+jBfPH0"mfҭt}/ر>!>.T K-^\fd~8W *aҔ!o׮^dI3XCj5"BY枇!rmI$b&ƠxrO>Fvjs3<0?BVи\f|$K!09cw,3쿧ЋVEU446R_^XTA2b  FMUzHv'Br|6`WDZ6ǘMr] t|EacG4| s^Z_~}~vۏNU]kt8$Z!cADŽqeR\Ys}t6Ԩ/Q_  ALh 3?œk ޷Hu09}aCQ@ $?e}}t:Iȳ^葃T/~}s/4x<?/ ti; $~3t+JvWv ~XVk.t p%NPM gUTㆰk.J&#DJ?K'WX…dΉp=8|h?]Om~\ε^5uDEx{BEo?fBn@FR%Fb.@X xpl:= t"j t+1R`=R{|/qq:c4}I.X!L Z~EZ܈ipsI'Q A"z !"bG.$OC#1}<|BJ+Hwo[.~On2['V~FLfvH%脡iXEW7Ib#%Q#$rMI8.16>F1:>Ac&stfV^^bUAOd{ǘ9^9kahӮj}V_]Yl;g>Ycx][>n8=#Y;q+%?6T1w; %1~;jDh Dh]*IA~faH$Y.7Y v*|C|lfOcyZTuZED>MwB m##GCHRÔn.&QGFHCH}b ]n"0!Unc/w T!6Y.JN݈x\a4^yA1=td_0ђ&mlY7iocs-vE71یQupn-UϞ8towfŹW4sD02`h&g_c~@{!t҂o}* v6Y\Bp@ nA,lFRDjL2e~=Ye=EQ(ZyZ刅G ;\@?If7Hz~vtjHCa]%֖%AlPLά,I ͖936fqcM4̀=3Y|"Nvaf6OiA_΢gjvL=ҹ"Ik*ץ/kmtl8ة$Ã:ߋljˇ $M.C [!4#RgXA';o8$2iZt>PttKAU B/BN.;F+(BE1Q/O@ HA؍P4 Day?d_y`;.Y.GB&_AEH0400 F aE-DӨxR:QPq!>B |7&!{=Z+[B6OYގeuM?n/)&a<9ȵ \xW<)Ć$Qgd|J6(Mb@i6Ϝehbuٶs72pk 4/$C5x~W؅l jD.M29]};h5\~X䛯-sbrvIgs&mrkLSڸ⫝̸*/]24b2 aMn]}wnqmcnr;/1n^MCXr|dnb,d2, νeB * *1|7vsE/x@ B7L"`gI[z TP:Sy-'&@MBuR+B!ӓT="z/ pH5|* !kBvͨqa9AI= `2]E&1k2‹}}Q>( v ?DOk'*BV̞tY#Cxf)L8)Kb\n#Jq"H_s |/<[',hhaD1":7֙z`AqĚN,uuN6T*1=5ɵjk|£Y&!XWF"q=n)x~P"%hAHe|w*躆e(j*Hk6p,vRKvp K%*QjlAnW*T]qjW;tdsǽPD4pcD{UFu}F~>ӟd_\b0_-#cݸJ*UyEsL ,=ՉzPƄ Hi{}s5JJPVҐLg a)NQ,^Yu\Ss>PFD}UvZl[Hٕi'|O$g?1#dUTQEHАBA!Ϟ'AO!߀ƽUdkkF 5<<~x1V ֊uF'QkQLi\:q:v;VD3oBIZL$u6ҍU@'VS\i' KkkN *zβٯ"o~~~ﺇ'?!~c׶)*op1J,Ur#H" IJOmy8m]W  c:Wy:K>EW񯬁 vmd\æK. 4oۤMD߾hRaR]&t؝Iphm)EjRIh^]eդt[WH%H24t*+s4o*l56 o ^}m4C|;dzWLT^&ivOC<#1}+)W\^&?ΈGBPr$Wa-IBǞ[ɸ1"ҐR\&T8]t9pVX՘c`8mndwVǫ;\j)jD; :.3IB Eld2=AD*i`iF@G'FG U0}t㏎7,u$0@icuAcär5 R nk>%ÈXD*O}]lF >dH-n01lW!ihY9uu윢(QLq1BA}Jd#i\]B6G݉!~I3|]?aP"U A۴.^ :6t@N$I28#y702&QαPD&U0v`J{b;3-_Et#n/G*V'a[D u֗'!nںaY=٦^9uC2l;t3k$z-QU[]BQ#ZoEM7?tH?|rI{R̗ytm.>}=,c "!CHiUɤȤN>a@Ns Fa?de{NV |/[rme!#هhMd$\:A+i*)Ik4K9fOh5: rhY62 4JKWyϓ;zlf'{'e"zWU~rǛWc g[\^k33Ga~n$"OwN`uZHY Hu.`D:$n 8BQF>{,o!&O0Y <˹N?E.yC "P=\S(\op1V>fx0N#T_BQBVH$t7"+ĝ )4C zm~-޺ZGOGj6K$P­HR_}5 8tm-/?g;biϭVT< -pdoJ m .8{" HAoŻJDS*lBcoIT?2a`'M:UzNGSURXQi o:xLMq0Rx}CPqRXR] mCoe}Q4!zzYڃ D !:4Tb`X~vlg\bldnq}̎t\'\yTOG(+E&G5_F $F%_exC̷+7$5Aj)Ď|ZhDUn1]FEQ>QE(jL’ Z r׉*?Q${t U R߂ /-9ǿs>5̾LʳT0U&Ƨ99<̅+IZlZZ6{.(^k`}JOb<4#5T@v<8ĕ4]gd{' # _H,d8&{1DfH)64**D̿W~ ؊F،QBa>ć9<>oubc婗hWC4'"b-B1"PnUPi uV;Cի 0MA:* Qݹag#7q_G!c2y K ( e0qmLCsm ð٘q:O<B0֗s8IsP}gRA1+^ZJB#~ۏ#JF]_`"ŋ :@{}Nqb%09}OlrpN1**O&0qZ.n!5C)(c?Jlt`vC^<_u>`"ao"? QT8Jv ʟrX_]ƴ{ɵn%Gl+."2q,pcul 3^h:i QbQV(UX(1$nece2H NzjT7-F` R M]ξĮ/DЃ{ jF ̸Fm_%70HqcWY@!ÞM Ma$2:W@N2~$qNya1 - X:R^QNKb}"QuG'A`͌ TGSs}Tr.e9oӾA)"i 77 =T@lm҆fIЃ8dq֋W(:8b\>JmՀXՑ kE&F8zdƍYFt> |Q|N2t )F +Qn \7Ȼwrl&u+q:CͨD_^7BtB:^aǭû M5l[IP 's(|Dիx 0X3?7 6aE>^ZWMM&y{lߝҁo] T3$ӧCӷDO<8`yQG]ب40;@@<6+,`j[Xo9'5!ҩ.4+{4K}J`10 >kUR=ѷ=luL #5fd U*F;# )G1?j?BaxnL&Ϸ:cʥ2O7 0& uߵv\t<͌F983\c{cl_LI Da9W];u?TKoU]{οvok\\hQ>AǴkoz9g],.,Е\|E6~qw`̞>ͮgui5|uT(D&_(rSlܷk J9Sg N_Wyꐸ5`@a a4aY^\|0\0 \x.`n0$=89Ð(2%Z/kJ\{ F=y˭πCs:A+ĖH0m\e:u$M m TB -M#L+M8`_{ذw d!KjHmAў j!:qݽ9:hB Aج86co>blmoAs `y#|Eΰv O:D (젅EƜ^0j>!MyGHҤ=GxSX)9 ,iT_F\M a ;~LC0 kDAUs f#duFh3-&nϐ.|=_{7B_%_sGyO ډA=5qt` AJFL-!1B$DZdUdUJtw;1sHAIcץwp]]WGy Yba+Q6CC1vJjyIG`;޲ {ahfo>&\͋/R*"ĴR}'Ed -sy\AvvcqBqc9EVO5a"5%XLV||i)0Ef &زm/3//CT5t'$ԥ:q`&~'"1[O?|#)N 6:a IR Xx 8Dj<[If8Xp NU#I2 . 3<9L,Q2ϽeAn+m[:"YWTu:mƢ iL5ֈ|vMka :s>\y%zeBi~W'|{|?'*!?~F1'РD 0mo(C=~+bJȍ l'&Qd**Ez7ρ}{"Bq! Oo0;] +6k!o5f90iߵ}K CAw(gC¶qPER,ش!}D% bhea' /Tؼ1?m7c;~-w_ r==-aK6h:>Y<0-[J 2]Tg;#?8J$D74; 3Ta;6 L8I n) 2X,ȕ݌ZA>Ҁ8@$ k')B"10 h !^^;5%Fk >1зxe.<(1ve#O/YlbŽxt 7 z8j|Dd4Hg$sTjSع,-6ͱC`ښTTVa:.Zk7!zŤt:UH!~S1 ׭#8|nfqS0 'vh )~հ{@2{2צ7G5*/_ 04D(s⹈Yš Q`&e*KOv"v9ťK3d q>SQd c{{! sz)@!lQ拘dLnvrC&Rl! Ѩ \ Dl*ƸNţ:De O)r9X=D&2lm/hqdQ`zۮ HB8}{ Ǩ8a_A03S\Ǥ% [Yv"J<A-"SpHIPI LJ^Mi-)D\A2RLPOH]}~j_}Sls> b)rFyf2?`rwʦuy20/<cEb 6D8,Q piEZ>6iEcWo(`IBO˳ i<Zmb)dHS:6jJEhGo0I"6F[ڤO<,eݿny*]bc#|/o!6~Sȏ4.`f \?cYDe|v`̟9M "{MX$ 5:I7ba_ƧCKߦ,[kOǼ_kwr۸jmd"/{T- ?}O5M5n=MHSx&BAFt2%k'γ<7 w7> ^UV_Wdy=x ?[~Ɇ vȰ3w.b̡ &hlȅeo 6y{6dgNM6Jʲ1tO@i@kҙKKLlqQGCZ)JH ﮠꊻM8*Νɛ.}+}fL즜/hpk؀-j{pBD@ ޔr9f[u8jYDMز|9ar]nŵ sl!d6 i"HIr9﹑w_M#р8Xf[G}8jB_MM֨Vp$XDkb r;JU1O&J$3D@Fj%D!zYo[h"/4UX)d߆wk~f[euH`umNhJWƽɊnC~_ѻuzHA87fdm?1S~s[J~&Aڭ:JҔSk2aΰ갰ܠQIR)j&Rm(Phr2|lO>y4t|E')|l+% 4Z<l΢t4ۀnZԎ0}P,q~AZciZ3- : #Gٳ&VL͞A BsN2%OM1<6+9:翍^XCn(p_-W49XF](T;-:jXqQK-a2v* <Ȏ #YI\ۥ/tz|o ڥQm)f.GЉ"+ss|_t7Ss@F>-}ۍ<{mIdc׼}D&cb!R&*Qdr˘Ʋʝ6lf: 9֡)TB H1Dz>/fW RIOYqw}Qɸ4>i FAz:U 9$R8CΑ]UcpL^c)yS9ܲĝ] 'zfICpڧPf%DH( >oR[iŰ]¦bwO#f[H#Cuu ,/R-6\5QJB% 0ț+'^,iuQBe; Ed~Kt ѿ4JR#m39*g'_$$;&\C(M:<4Ud R9C3i ,ΦwqqhIP.tX>@&s"[2,k8)/?դl)MR)3?X!jgqkYiLYϒ21s􏭼22ٴ IDATxkQ90:XI4_x-5sǸq B3uV+Bj)Z=Uٽľ$v4S81I!^hJD&[&LBGck[Ay~~7AU4~c.Q166D Q>|Ō a w ?[Wuyk >k}MwS39qq E??`$ُW!_3̭W&d_E&\U)$.40WoQdq{}4>KkMGOo~2zy B'XqLt-|Qſ/2;̶1:9?(N7GAXvr_o<rV᱈=#m[ncf!,' /rc8B?':RHҴ6o x.R5G?fy0$yFI3$fD*5wһ*4LljD܏dٴe+M(K&c`0mB"۴0 *Mtg*  V vp6aSBQ-ș }+s w&v~3Vʙrn#v2BL~.vP+r4 Aj (rm HNg_:fk6S0Mt$QX44i)B$ B_qeTI]峟cc#q/s}8TI| q,C,|l87^EG|?ر!~ϑ$"a&hضDZ&ͺTv Rmd-!mT&j),:k`;&ZHШ$dm a\uRAFd ƹ/Ay =`껈=Bx;,.Ci&1R$Ԗڬ**˗:6:qQEﶭB"]֠8BәBmFEkHI~TѸ2i\G8Ap AІ2/<x͑0=D*:\/<}6 HBPF?g)YM |GBk$<4]{$K5g4HˢgdDYfeͥleF6oQ3d*+3Y:gޛFykjW/Mmfz_Om2#J\O݈32OB}N`;6R4(dۼ֭0OaH'exF6PY]e8V@K]fq3\~aUy;"Q5Yl1'ʫN"c}I3׷2J+ۄI`-2켡*MBĎ> `eҧYq26l#NZ]o6|Ro>YӶ3w_MY Z69'wۗ<\(u~Mc~&cg492ablvv(VıB5=JRI܏94j.aX&1:4N=RMYHj:WPB&CmqI]@yuC,og>gO#pE!jZ!hg Х}v)$"9:,O5IDL6OJD#aă_\Fwk^%oc9.4m ٍ|c'|<\&>}7i&&blc*K1΅"<|c+)>]ʶ-#?xS/ͳ\-qÿ=\;Gcyxu+dU>$?Cf트h$MZ&>+ uKMz 54_- `d)|ߤvOV9d>DRx72QAwxvﰸouXj3%]NčބT'HD]Iȩy(3HDNBR=$$BPh#\}(6"S!OQ}0Y~HqHCM$Q%c2"Z]fmA(NךDkiڄ+U:~\ I'yq!%~>HJԼ6ʖDQZ+^w'* Y9vCٳ6F\iaͫn,ծ,%oepxsS'TY#l8 ,ztv߅5܏J@b iY]S8=KT "揜n/X-|oN|LM3wHKwr+3 8RpfbM,^6;O'[H;Z62{GoX yOfH:Ǿb3Z#Wh,r3J_}']~w?,~tq}$e9БcXǦPG0OOR͐b,m>|d#Q-nVX) x/s'' wrcSl\By*ͥ\֡ -(HU7]%VS,"F KDK]KeImAoC(,vMFck [1L &ZHnT dLjKTq "S'ax.]OH HhR;2,VLխ!] n5HVy[,a(MH01BadCHs+ k[ N!]F2 Щ%"5pi,̟Y摖HAt"秨9E2bZ JЋT-H _Zy+3(m1G'z.eTٯ'RE/ U+@Ni^^aX~D{ lA&}c"y; pn&?';4Vltm lrp-L'ǩ]0d^mjr]uviB.-rc.$09U5>O|xSع"ds:ߏHvR.g!hW fglO^dbbM_`|| u3YN\W?QJmZ>秩u1pߨq(!0q(pLi ~'$Qr! ݕ_hUb&jQڜu2n̳_*7,ٞ#I#,ư!L)\)ϕw|JXTĥiHVj 65d,R!Y7K=pPULr:BLA/pn?p˄JzU18GkE08Xfv^:€$l}_٨I֎iW2$&a=EJhTbzMP ,beA'8h%@HSmIDaf;@t4/118qzߜ$ ,57!3RSr2Q_˚X( Kw%H8N"SAZH5(E8iw"1P°5zaBx'h1Ġ%4BheQ *Q~an?   WJ3xuRgY DOz;edM5˟N|։$exVI)ڤՀɣg7)!݉s&]9T62iQ삑R,YtAHL4ڬqeg]&% -W"MG[1Muo؂U\WV׏4a%B/R|/تoܶɓz+7]xZZOO yQ'#xK\|%&^{#wR|ThZzz|Dc{@4q\z;f``+uIZб@&9cHEB.ǯkdhaSKVNiis~Z U, mЬ7ɗJ䣈4i$X} Ye*l"33EXa7dZOsD㺐0Xhe"4[9$uU4ɈۼGfD5Jm &5oߩY6ɍ7I yJ!:Z=^֤@0Si\3-P icnғ/p#|?/މdwaLJ&:ҤR MP͈8D-hQ`[h6y]bnL3P.r~zTmGfH@Q0kIXe/rE>;B|xHoOc_s<]TZ$a!,(ry|+T.J^wZk J0M^ BjVrkኛ6rD9s9 n^k`Jk5V0m~dmdC1ˤGZEWQ4S:@cmU{90KrpL+Y[]c5<iʃ"A" [B,bN(FЉ`ox>_D .S[hU*g@FȌ[ݸ%BDJFo%њB"8w<`p{q;8y>pt?n7MpzM#Y/{odu}9w}kfu$DE")R[dJD9J*lJJ?\I*vIRNSX%(HA@>l`go9'_w?0}߽ۗKk X/A,\> k|i>a% 2)~^}i>~W\c^_{9x5+_[.Ea"iCOI,]zo2:,dUH>to(4 XMzX {H4=w߉&p +?+C-jx!z¼1)n(NjCD$I6ǂcmFUĄ{.x@8;+;Ԏ{^K8\i5nD$6Rm&/t۔~V8|}ax|koVo]bVźE e ךm>9aAy1Lw`Σz)N1]dR ܉µv Ryg}8ɐ!>{Zu:H sЗML'?u\A?O)qS?QƋ-CW+ĭoOȐazTfKb,xm#wR*qr/P V.PS!8rdG̶֫Ss΍p.F GMIH[ۣ 9T`~Ѓq?+/‡~oq9̾YrU"CiX7. >y4\zQ|E2i!T=ˁ(wFNP Lj~0~Jjk>gk-Ac$ilbP*2.! C AňeI |g#Of_GGySթNcla }P*C!e2D82u^= t;@M!#Kŕ(Si2K\+!58RbE٢XRNHVBlQFP%d>h+O )WD I!jpJF ]RHrF1bSPZ 4K>Ud%tki,J96-Wڈ1+ (mIq}~c˿"6H&?c_{1/wdCҖ6?sX®18#$j4q!YiyM!i7^Dkzɵ^ck6 h^"rknc]ĴxB|iX2?Z/.}uRpWY={#<0npO/r*Xk/.WH]E>}{9:'>5nv' ;I ( jzYǾG:e ?+N?BgN5Kw zk IL]LW;)O2gW<@p 8)]0/X[q ??9P ̼>^%]j]Foa{}?#y/fzi(IdB;$hȅW"ʇ) *,_oa謇4둤IVh aY2xG>z?p8D >o˯XڭN'tcOq8-7w_={fbf<~J8 $Kŷ._̯?O}f+Z1w IDATȱcGiLɓdQZJe>UEVW^B'k 5>Fez^0/_k1mnwK4>> o2v=GcZ;D_??Wm~/HKL`mSn-HG |U1XqBl;RHXç{_:)FfN?F=> w>zcwFWR TnEx:壇iҸ4.6YkpSW-D=pg9t1*F*5UN5Hc֢1PFB0%N1~=x _JoM%ܙ%)BXJzm,2g~/ }aQJͯ2"'Xw y\f(Y}x5zA[#Ģ2/^5(k5ׅ^ 羅$!iV1.P%^(I 4HiC]CaM iw ax|d} 8N7hҎROR ΁,u=EU7œXjn#^jt$xa>Y:F3>^CHc,Q_)I z c-iCA Ok"!m |DQlҟU}Km,= jW-^ : IDcHYk&KHԡzO/'#Ft p%SS)S;yt-g76iu+%$HI?꡻ ^b" It4V^~ z i!z?qᗰZ"܈}$\t2hw=νʵ}cG?A&iӜ}HIc$ԃq53ljwj4V{@5N=\x,t8t`O9p`FJ 4|RRFkOra rB rF.uD[1:R>Fy4WXPVdJe($& 5ͥ)sc5j2pX:{hE[@N E,ʣeQD$6T<;6 >q$NHͶk@c!c|Uw\Y \R$ ]*eIƠ3Cxiϭ#J @g0F@jQB'gJYݨ^V'QuzaIZmV6c,]!ӄeJA(JUf_?BRp؎blnp/{4:Bris9y?xɳpy~sq#w%)5s%iI1ؒ+8O}C0X 7">\Egy&Ia*KO?$Dx_NŹKGQTG%U>vg;9ݩOf$ũӐ]4E2zC|:OIw;dQ+O>c{ŗW2vL} fW,Mre3| !\_=žc}bbh=qMɃ?*:KqUZ:`Z-\Ut?w}bK>}xqV2fxO[tOMG?avxRKU%ŕƛsA&rYNd{~埓-X|ADӥZ lt=9qǎ[]4WH f`J1UCmE"$mq+";Pe-Rj,W8T%}wv /$)dɴLܡO/wWx6I?swPkk}e8#d~H_8Jyl%%nFXr4R,uiJ*o5tG9!3.-3c`XAէ,!̅Rm6O(* E 2)"˓ PF 6 <@Xlz`f@p(M$A{Si .4ӘD;**!SA/N1".z׹9DPG9mڱCS4^_Enp$`*#\?|Oɼ*/ļ>8=6^jx"=%IIR1AJ1y)sVKH)< ?DF`F00V" )$i J"*c5eVWTU$Kdv"\Bd& NPVFxn,M\!3# h.|.`$6w?0xxSjdHxJwG/K 7 GqGgm̶ZX̠KI@9$=a>)ѩ@I5UDFLH,kD0RKPnYcgG:@Xk|%\]&#V232FSIu;Tb+LR:x>dJn,/EDkIMG mS2P;4"*œRmpJ.I?C.Nt:X_?L CcIT# z]dM{;iwqf?i-/~}W;H8s PNTwYK&1O/ޟ84%t=O{=X7U'鴩t|v,dt4."6Vb(ji^ZĩiIzY82}N }9W^ &px7,r d GO˥o+ )bum)2JĽ7n122>9#..%(S 9JV18H|ky\×c͇tw"moXT2/{/կ}I*^,ݬ[1 Ś i3gBMII$#CξA3_ݧƩXe3!> <$$q)5dƧR;6ϪJQ7—/qqƧx^3, m,'Tl \bCnQՋAJd W2шFlLxQVCGssMuq_Mmp2 Mp8o:V`}xqA{ELSތhN)1j+LJ.JRiYF 8#F<.,r%ɃqҜ) xʧl!PRt&D_o;hK,3c^%g='|)&8VL^ϫ(būd…>_~-/1L)"Tɀ &q^9Ut6!9ւL>zًd-qeZ 9q"y$\{sqih,Ǐ&RL;OͿ|˴=n̫0y S g2=>EҡR iw;,䙃_piJ|w %H##i .I\Gp-[[Fۨ]2mqdWjB}b|R󕙷 ''JC3*.!4HVvheS,J_1,͓)3Ŏc]:o`(f.[ÛgZGIBJ)^[`݈MœnJ&׮Hlj'f 㪄ѭ,,G<5/Pd>9+] 1*Z-_淟w#I\Y0_.s9c(Y>ۀ evHG`p\*'> r`}_ϫ+]Uyy.X҉L>%e5#v.!]NnJOX XLQ’c~{d*G! FVjDŶS.|i>0KkM9zn,t@F 8 ~ۜY+Qe> r+BJþǘ9\N BQQfYoB ?fI>P稣y_!9J7iAy$O3:WN=Bor>6xoQD,7i69htYZl`mB()Y78bMЋ &[#D6gZXqMJ|-BX*֢Hiz0; 4Nƒ !Q2+0[m7Z, 6BY}ȍ JdFczM\mAK}#(: Hf 54<4Ƙ1@Bl2:/ Б_P'%WZ2+p$XNVR>POՄ6ŠBANwEN9hs}{+;qRϼM1F7*c_r9&Jrg(2a2u 1Ƃ`z]쿰H3w9d[)<<.*d giSi6 pa,2X)1ғPwLjP& F"iѓ+HŨ>*LRRYpI;y8\`;&udTYǤX,zrda+-miy Y:Hf$*PȌ ZFT[g(!1bͫߍD!fL2 luBZ&LNؐg +b0l!yQ9#lmNG 00M ! Ӕ;.d^N&5 AVdCs I]t v\oh #v}U^e(K -plV &ű,JXP!p}QbI'<˥v/H2Qw63ӉL֫fr\FڠwGZuVXScdRBڣ#1X|;=zI"Gz4a>UD5piGHOhXT͞)HblaG!%fi֐\Iȃ2@4H-IT3^ C q|_4x"AT-t*&=4•q!֚M׀KS] DYPaiZ7XWf=h-lN<íU=iL:sLAkٯ*tŬL R1)2CzVΧrc}pLBZFrыpdy0u=pӭM 5 2Du:JI }{6_{@ei6йqoް#rwixdy߫ͨ*Y2kAKB*s ߘϼNWkmeYȗZP;v,>+cyy|2f:H BXB1aG]^Y.FOy魰*$QBKMl`ۖ@N۩A QcyǺ {x ځ)ǮtHYJj׾}GkƁRfr|,<\}K1WEhrc0u. |Y=ŨJ3, Al -yI/=UHBXPomIprJ\$ۅ,8X!'A._td IDAT} gآs!!-d,h)@YADօrG?Ļ:GaN!7qrY*d!K\X"_C] eb3 ưY̑+_ ;Oo~9f<_i:s1p4^+l[ն8Êgh7w&>C@{ll[xb7EE{j3 >wBa[a#țF mqf4q lZd)QGhɎ#kF>kr䵁?ϯ"E]HACxBHq?fIi~-J"Us_L7k Ge>MAAe#f2yv^ n+Mf뾛[|Q8L5rØ>[^\i˘aݐ3)m^@{=s+mk "{",uE㥘m'߇ZMsp9f9mhH Άb|ֿ,Ps[Am=ˣhsQk.ևXX8B?nGngIYm[ Z甴)^h,k7`~ O 6ݠw-vu8c1BIm_PТ`loJ%,v[_ȡF"]'e6jm~'_%c')#GdZeS6oK<-g.6cl זnTB z48"DE@`nuM=;o?nb@Qy7},E;͜msCq!w gby܋u}M ao6N ϭFs9:[,&)!{^҅K{Az "<[?[*0zC~^:?gm+}ŎDv>l>NcPT|Zamu Lul DgY";({ѣ%ES 8!;d ֒w*e}lsYv[ۜ[6ngyi ɗގكUi78)N&D8+,,f[1#E {Nk,xmoe%kIw X[ _;jkw,qo#?fĽP+Ebv/g9lmv@^u;ɒ}6h*&RѱCơfYvSY8XM#HګqwchZ9a9'Fv:9GnRf T +޸Pnpm rߌ# ;[hܾ^5G=K ^o-1ĿCe,I[{70V3u֓ԪʑH+a)fF23 ZQDZۍh XbM0_u9O7 Y_ &3n!#4^ږu؈:N>Ś8`= щUmwԿrgFyί_s;mTcQ:ynb۰6+sv́ 4h z]GG\|)$%PrUóWb27#p0)H& Q~A_ra6aHk^dX67xY]D'PgNiNH>+T-KqX{7?&n4ۯng}$;BmӓV&MNЈAvlwPN~Tèܫ `KN(ML3h^hM+*čqJQ0gm{'9;Ѻ ͘M=wkl H!bMq d BjP7"`Ŗʿ~Jd*Xy~~_ZEAnhWu@?8 g3F&B%7P[# 2T)\z ww ѥ|{ub+^KS.'˚_=Yc*-5 1(ZrLMUI 1,|¡I`5~*_]tHaE3ȮC! 0kCݮt[ ;bklq#B[Tn.=)`wQu+f>7f3O ZEKqm(Y;d(>kvmT|lQgZvix^c[tԮsmKAA_PmClbfubfOvʨm67ﶀdmms9nn912ooo [vMaBʞ;JZυ_oC|{vm Y|2IT21~4Ano [ayx 1 <і8ሀ`ƍ}ܕ3/M_}]^ĩ>̫_:[>du 4U_( %xkSw:EL&mɉ;ytlO,~% Ȭ!Z=(do> cVά!g.ƛ+W+e*ƹrFD>9UK9zǍ$iߺۧ5 O^RWQN(cF5}x,[ V;[G_/ yaGjgRVn1@tN |N֯n+*֍skˏbwqںFۻg\Fǵy+gm&#l@bq=[Zv=Q9nR,ys<#Fj['tuq][FtX2osCCa"oK寮Ͽ^ؽ h] k5tG_fc9yUxYO|_OCj̳/,zMp-|gݧ4s1bG*&gd>OoL971<ԭ;RJ6ہFE1y(WJ/OQ.3^e1JkS'?5,#2'G:?g?cU{<<>MXq|e#<ės.7wSj 4h7oRp Q|9[ZuCf^%#@Zw_Z%>C&OJވ 2#joF ϊ",̈V{f͵Rign7}(.Cr[ {+!=)2~7 ݄vran}z*\ܘVai0{]uirwn|S}xfwI2iSa]NfY(~EF־`CpW#?׸txg٥ @*5e 3c'?+gqm_!JpR2 =VedÜ/]BB~v:@&=s9k$eQCZ5w,?юJ=+5$?# N |QMؔgDIHT JQ5uͺMIߌ7wfKˬ Wɍ¶^Q{djvo嶀F_ӭ;I\|e7@ seڸlzyAwt M9#%{ֱ =~ܙ ;L@FXUݜd3ٳ;tU'Mo|*g^6NVVĮΈhl~9b6~aNVvl vEwb&`x!6ɼ_Amb[ڼ2 ^'bZ vM*z=mc&lC={ 3E7.ECv_h@썾EXJɕ<8k4FD8Ww9+uI$C1q ֹ<&`J(!<ȻNs~~W7-Mw~$ v{{}1ՋyQ$ iˆ)0JN12Š1s܄KI5e)uwQǘA}?1w_}NoKb:g#EKV ~۸荼w&>_YNNE Y˙ņڤ{yq~&b?-ϩrXfYgٷ)k$FJ&Ϻ%{lfSlf|m;e͡['ic,l8Sjn퐝'3[6љ99+*a_`,x(Nv Ұ&flQlHibғ;]w${}w /Xk0ƛ3 =G֦=4|Խm1rn]Mlp˞Swq|;9)7dm5yYT!ɽ{!H$yݨU8 .ftٯ/-w%:K'+ӐN~˸J hA"f!bkXN[Lf@m '.W`;wew?\8ztI9<|.Lw@c쐷1p+4cSSBLƏL3FH6[ 4Zc< 1H٩Y)?o?ŇQ~zeLQ>XK6bR"{Is\{9Uu/_Ҟ/{KWx#k?SI"(M{:A=FMx^3Fs4!ӊ3n9[Fla.}>0/Au7ph!# !a6v-tٱ1xaͶو.U`Ju2#qC"2 K"^tNq#QKΘ#@rZ*gP^"{rR vtgܛ̟MFwL46k)AԘ}2ëf"ʕKtV7%9L9 ! {A}{A"`xN<&{=[oSjzG8V$u5ɨi(NQ.ߨ(BQ7 zgi{@&0neT, ߄*Mt]dPh,hof҃qr:Ά7 2M\}ҧtNT{%n9plpXc7sm%КJ̺m]ߘM@g$hM7hLN˖ϝxk[|J:NAez\3yƲm.Wq$|iֿ71泱o%לzRt 5kN8ec#an<'ä3DDŽN`~Hzb6M[ r36 RKsI Ax'x_s?yDT/z=\S9^v-THdJg>Es:)"Y 1=qr}?|[?Ԉ&ssTGg3LhY*6̊>w>lPcz.y7ez#7Xtƌm.O( \ً%ĔsՋ=<vHq{$0ҡ0폊tӲ2F̸1y681` hX;q2 e?>dgj TnnXVMk7^C"tDYύ=j{q6ٺfIukwGGG\~ #_Ui .Z-֍u{9B`m5uĸ54C} ܸ7c m!!;89=#XxsUW1lبsLIMllbm"̨㽦}9'q( _eq+VV^Mm/0#H2%&LNVF>P6A[N`}^aol2b5Iv';t7Re;ܗ.UnH=}:delT>+l@Me\1DR뺮-⫊k|1cjڶ{OJLk {Ko4 c ;iQ-)-=[nĔ gal_UXc1]KUUГRft}D>RU{ڮ¸fl~c3C⽣l5} @]Urkbȉ>JUU1u8)w-u]X̩zfzsn&cRNOOx"'׹|jBLi7/xֶ)E{M1d˕:ehMbځ;Xm=Un Sz{2貕D ?>7k=42b?{ IDATS+(>!@"O)>ا?i5`G~jaAR7{_}3~:5; ѓ%=Pq<#Oym<{/aHxg3VJ[˪gNHi7.Kz&]_!=ݘ)ev+`>S+}͐^WR絪%(YYd c6Ɲdlfڵ8O)PU4)%2!Bt.2?_] =a%BfOU|Oth-$ܨ]i9òc lRzܟk-?1}2;Qfؽn~ fF-&[C ~8˥{I9Vr-iZ "nI Ƭ}UOqD/^ H٧0R'I#Fa%M 1bF?i*LbHMuY%jCLKc)0,c2j1_ej{%Mqɩ)R#nJn&!Zl\X}X*| a;h_k۾6ىΔR"-,6Tʔ4z ;0ؘtC9<42=eI?a<Yq~L 7~JW~\t|tqߥp4mo"~=w>9cM) $ڿ#sWքcZ]{O|}\=_u$jS 2w7([uP[eJb`y|;O؍Gm!#=mmA0 )wϧDӮ?}KޔT**uۏ1[fq .;?mu1eӴ^St7 H׾u7{,؎=i]捁~Fn03u{Yl i[dsh89hlhFV˚u5x);^\˪{jp9ew>[ΘR5zƘa3PV)'rl(qdJ:}r7u _͹/dBì{_rvzsA:sMJ;!lMF\&z7ϛ):p*Gr5f++]/[LY9o |}gOks3GP+UNQ-qm;Ousƚft6+lh?$gΕ[)+n g)Pv١]vXDtl4bw9#m-6N-NU7 z!dEdP؍"2yI%i! 6ùu"qr.I+{-_}FΞ I鳟<.#TⰮz}w?tXOncose+q d3iK,o|7~|Ǹ/W?!쵁; A"HUeuK=? G 섙(.^WewS `;z$VEM] MǬOwDH.'>Iy N-T  b/ %sv'^ҹ|Kx~cՖժs'סdʈ*7 X|#;{;qǦa/ě O[ |Rw9/ ^;48+${U'#=g=ʇ}S_>>ȍ q?{ho䅜/.ʃA=?oVK^glMX}0þpS8{rf"ikM/|__;&0{7yƏM_m[="=砊tܲQsFxk_߿oo{G?sh}^[z 4ZZliuT`{i?@7{'~˷{N_c@S^AFhx7K|!w佦rkjksK|S女f5)",(2 ͤqovsl (̧Y;~łwsx꓿ccI-?Vi*Yl9Ns^fF|k~"\!=Hzoxc^bq*|G@:KOdR$ZU9n!:#|_Ok|a4Aj NfSʡĖ FG`F 1\laTJĠ"EAHVҠ,RMTe:[~%{`%7wCch#iu"D$hsT~b' 8|L߹04cQ5ƱDvZb6lq,I{VFMiI(u$k):(Ed_RcN>oݭP쯔^H)Q,+YvRU._cEFl(wD*n´4,R.秆{J Qc.G-cf.2ן;[ki8M"i9WCZ~]1¢Au3Oy 2}밞){&M1u]rvZ{`krUF1Y,ɴ~Xu42sSکY{3١mzS-n-(;Az}+wKeK$1ٯ|u-}ҍ[.5e*)D-[͔x6M{ T?OW>9ߚqe Α6Qw蟆21ƚ 7Dْ1b g L>sƸf3l2yvkOhb,>=6ZCFs'_G ~GS7'eB F̴لJ@H 8[AhZگ{%jxRWwv[3_W{j[8eCA㐿][ R:6eRm`.s׫2Ey˛9|Nsm-mҦ,Su-Q*sbu͂ ւ7u̟o?(g}BK&V >Y$]e)= tp$' }Gp/S}K=kH逐*A%q.st=Ms6` j~?gKfءF" " rX"JO, @舨$(BQ<`,=kC3HaStb|YR 89Z2(.}? ]1ҧ@4*ҥ@*G$D1D#D6Ԁ; *sa (&zc+ӖݢzibPv4l "f0!#)183({"κ1 1zD TJ5 IwB(cONCɚmk`<8'r:S"hb*Q3ܑ3y49!iJ&쓦 ~'<6ߕϞg3/MIW egR&>g;ҸW;8XHzZwXQ`xl BIṌ{`0#hIeKkt}0*k"a>ʺbAsdvm['3a 9!P1@[L:9v'kBEge Ѳ65 U8;D,1f!THAoӵQDMcvO엡u`-3ƎIGaQ#f@7tq3[S@LͶC٠#`gAc=dк2m[hybhmSmѮD<6ن P쮈!84X'5tG`>S{E%&t gp5M>0 JDDQ ] K13rjㆅ%e廬2hIL5vqZl>@?꼔D\ؔsG7G j==;@{Yc_rLh COxD*^xV&0!3"J1@1Pi{XB"x&\m1$cXk=b(>FbtF0tO}䐜"93:[AԆOHMʓU60V ȱd+'49NGQb v#~9?Ṅ?8=`92\BxcN9{ m[%|g/-tp&N13厗Ά<|I}F/;iF~\?}玟"Ŗ2-ҷK_cSLcpeo~G]!yܴ5Y&Z{:Pކ|'o~CfHŷz'L3"pE1-R JQ25 VՉ_>{d#%x?W Wn!mwVsfG5?dW瞀"Oå7|Ǘ;xWt;_կ7?Coyx jTbٴ\Z2Z}Rq%v=1,%yVZ`ZQ9ϲ[R9OwXkw } #j8^*Grz6;YڮCWt'j"Xv-{B9ax aX1`ˆ"b065= XKfCRr]@ eX R#Q>v+,:N 5BDHeQ8-FU"r*"<34ھZjY|e޵kbCvbX-ymK .۔׬=2^̗y ГNVKZc+O$QЎjHI" CF0JyR@ԼNhV4F~ѕi )FBJ!+L;G:KXQ6#]H86?r6+X<& JDr:qkXv-mec,6N s)fj| ¢]ac`}Yv=\?=|8?W5'9WZLVh=>=U}J,*]GBhC [CN| ~2QtYڮǹ.1ѥ ]_Tu89].ц)}b$L`1=ӭrb>(9Xˢkc8].$v-)D'",bAX,X_jx_qm~JkV}|"9â]1,qjqr5ڊY]>FСfmǺ3`!%w_W}CZP}̤*Y9eRR(ٚ.t)fulb )@d1#!8A]aɪ)dOU&o(NmL*[mV>]u+D1U"Edk,XǪ TU@\IAį=}Keaq-U88rTO˶҅Ї>N7 {ۿǞ0ꔣ|ʝ/wϸ᷼ǟ$ǟuʭӍď~%N"P2)F[\\хoXlT"ŐG N D30_-xpbkVzV&(I({Lp1e9Sr]Gskǹ+.i9sxRUc`&i9jD,b波*x_4ю IDATT Jytj*b̙=o=Ikt'9:OO8RJ,bcJ=l[W@5*yc:fv0C5fubjG0y1|0',8׉.΢ ]lvPFQ1Ubrtp!]^$zU orwl3"W!U,VKfԇ%M݌敫9V4u3Ղnω1(Y"M5r^G$/g}M_cd%<cb">dZPg-v v쐔]bV7mK< Cm Q/! k=F,!&݈z*!l\Ďc\&W5KטKy;XBa޷4ɾ5%jUL&>B9=UUc҆Yub9WчDS5, J9NNO8h2x\.9]`ѷ4UC:jF;,+jWa%Y/u>Sv!TY&YUgWL&VKj_5g16[?;x/N}d?%ͤw9}6q0|Ū Xc| :suHq*fUM*ZGu.1>%f JE |Spe#UM,UKG! r1?9T\XiD{j}8.@[F1! ގ|jERʵU>k7d%)d2(W/{?nl3lA):#wu+o W|y==i>w {_r 40s˓$KdV K\="c.EЎzFJY<'[ޛ =J=ih+g3xk^ϣAjpqgC<(?eV7yλ=!4^JyS&{B^|V++sMQRaFϚ|B۷3mK]U`R\nb ]#jsX-sPn ]f06/ }XՂWr;K*PJZ ]IY䄣jg16?]Q tNS7 J1F\1쩜SgX"e̘ƼN(h*cK4? w%0o||~BS}"Usl2d>Y*e2s5nE%XJ6u]5HoHIÔg):XX8B TgR ֤RWhfd" m$*֕=h#J?5'$zPj׼ erY1Z۹|e[#hxř, V{H>tΣv7s2so{RɁg|]V$`;ZTPvE]9RVSj ]Qs﬙q.C2riY|xR3yN8+*WѵlIJX̩S.ٌ}mb1砀6rr`4MbD5e%ЩjI)҆o-q\?N,ι:f|1gVϐmFuH%˶C VŷDyT,Ù4&>* %&2иb!acb!ۊ7X0ε[3l)l_eS.פLYcL&DM>k?7$NqTrUP8g8)v5U !2fHS!pS.M]SϪ_QfECL<̶3E]bVcL r}L-f,XaI N : aKa1c '3|&ʔ$3v @ eb'}}m~昪|^$R 4g.8gXL@=xk8pU䠪1)bAS#)se@jst݊91tݒ͝ᳪVL'~I gon5@;j4He )v8#8>a ~d_[ K1k*RD$ǣ1c2_PyWN ĞA tqLSUיU3Ns9F.r|~s%gnFGcNxý秹Q :7;5k,{i=ͅ#"ĜL@K'}<)l3+lR}ȥG?'Cy{/3ȫ#V/q_CyCsxzf+s8-}qUM2|*'Ooߟ%o;0TO5\;>ڊÚEu4FǞɬɤ˔QĮۿ[+y./ 粸|vrΡ33h,)r^kfSDT ǻdjcO<780X\*'v^,9f Uac3k d68fB݊#?C9epPcE5גdLEC5g"Fc8"VhۖY.UE׵$Όr$Q[O_iVr8h4at֗2 8$gn0ޕ0A Ib 4gb%v]~}E:'Ӷ=mi!t+|Bs@=[Qۊo\yLb1W,:}ϭhl1ضݔLb3 HX9#k=vA㪒΁Ī]VI!p||za=+ ^r%4dbC4a(~, HudIZ*f-W;Q<93e%H9,z&uX2v%2xq8ɁFe]Tz{ -əf# J}eAisR*9칔uNl(Z|ɼz9%KjM}oL29Ce]2%l*W TMWT&"03"03}ߖ:{WЗIq*AՄbR˺o>\ZMɐ1PUe ē/S,_l#U,!Am)wC՗5nmMާdL]ːEg2 |rx 6U!B 6W hٳmϽ2xee<8ͰɄB=uCL9c#)J*o}!'2<,O~KZ]R97f·^4gRb\rP5+t9K ̗M1("0FU5}{JwX#(՜r~R+F5ya8 M1WY8o}_z2SZ:Ӕ6A*3 \Jۆh>K^L!0 jWB+[LޯbqCBhdcO4-(;V ֺ?^0+\0,3U׵$6c HS (1PWa\9e%{ i\+k2`*g2[0.Dw|B#P{S(D%v̕DE$A,ʯ3brRߌQ4D+%EHݨllII8o V_F=Ҹ I8FM%vL׮9&|D EdP1M&}Ep6⌢[H)sB-sUj6-\ir|PRk^ti/1Ye Wo{w~}<cZpO}?p7ׯ=˓O>j;r:_p1g+8n;.ЭNr)?Wo|:r`wy?E_U\}a{ z+bҔ壽ޭ.qlTOjwpaRX w]$/c!|pk}ƿGGGv*呺y_U"m, -,8C[HIE;F%:]}?̻ IVͪݥZ#1yS310X.t nY4vLw(]B&嚽b.:{~M=iE`Xa=V )o(]0 T'inEaXQ)Z*Vl#Jjc!0ʒ(%$qYJ%[B2fmIfSVaj! JcYWҳ]!}\@`:y>+"Mk1m-շw0]IB؂X%$szq= ]o(YM^T9^8S-y-U~%v]R8d%\Wd)2>`B8ڇal0J"mM[Zk2%b)$aR^Sdv^MlABS9ʊRi^c 9xJق㊯u`(W &ZTCIIa juΠDk[c|PZh?k-eZZ:J"rtIBV= c|^ka 2y޷-.!0r~^ +RSaT`[:Iyb+Պ=JqoȾFHeis,i!*tܴ3hy0d1Qfvjy4<5D881v.':ӪAm7!R ́ :ɊRȓ 0p|4tCh%~OZd'W@We L69v Z64Oƙ-N0rk" -X*#Q[Oc%alA%gR9}$I>_@ 5#mښ!,-ŮzjbQN'AdkߏB. ξ[d!jjZ٘ɽ;9_bEgE(iB1-T(m82L$(!p|Vc tQ.}TTY]8#17 IDATJ)aZ(r*a^cO+T(x vi.WDvS݄k5H:>|.; |ǴRUH%w_RvZTt,6d5Qu <"' p׏+zʊޥ$az Mf-ŖK I{kA,uN nahŒZ[-?,uD9jQVʶ Ɋ-i0gh_j F[xl qe~F$r挅"Z4Hz|rbl,;[Z3P C^/Cg0v:Z@\C[hEARhMղ5n1P.#$">BTB(5=m{'nD5Z0z7fOjhgk\JtdTUAh$Ί#՚EuUj%acȸ6a'1b-'4@ IRsN5 Bxi*щrHZbp\:J0ZgQ8-?20&$*.$rtjv4lC<B1gUcס7Š`9z"N&m_YSEti>W#i~D>L!4xXęsح)M\B+ 1n)WwĖ˨ji/R=ozl\9nOD1_4!ԏ69H@RhE IPl`s!4)v e\\.&&i 7l<\5іk8bm}ϓhS|m]8"׽]q憭 ?b'6J9hA7Lݍ(w\* XYZ[Ab 0ƀVkB ct0kSߋs1N`l4NIE֒bmΝ^"  aX& Ot"]Gpi\ƘY 0( Xd(KyI]ZRV01HH78AJnh-bժ2C9-R)\SN%%PDl뉚hu|>5k:-HYkR&qׇu&!gͩԪuCFiC{Qơ3wَ}hUF+(mcB~ۉ&mi!s!P31ՊD e?z,nC+ %fNa^* ƞlBgO,\HL9៕~ l y4f*lD!-bt(ּ2 ֮Db^0&={Lc|;V0aY(%Rx^o c'Mf.B]&cGO{nf5BG+^?X(2j?R5@w3fvbǽvϸ3]3}|{HNJ+09/U+ hmyaF[N\8uAD6r̗.E7@٧Fe-E@-JP> ;#BeVZ;Yb%'N Vܻ;~;sGs G3P%ɪ va7k/eƤn T)њ/l롭ckBf-RҍUI dNQ}=? Cr\0*Uqc??İqVB\P(Pװ=b'YsAz fʁq}1N\}/r#!lxnKiJGw+A_oXOGg+rbTA1W-GE)抴G{yQc2νhuhST0 Ԯ?zIn!LF甉tΚP`dEPϱ ]!=\j.q]4tVJ"6\ż~5#]nƲ\ǧs%1@h<'vī$'ng^"\g0VLŕ?y%fp'B*2րK7WV1yD"y~L1kzPJ1nDARD5 F)*JP}!"k#6qI!_ r\c$IDj(C.BX/ʰ#ޞEoT2g Ri YttFww7z0s4 =Ij/9 =LOzAؖ9{P9r_:w& FJ}?#U ᓻgACd9AMg#3̄qctaFZjS;ɶ婐%kS=z`alp%*Itf^ORh|]VnVZY; b+`0-Rm3ưwG(c@L9 m8`ˬY'n¥“Jc ];"iBy>"c'P ˬ[Wa|u=(Y2(j hS+Q=2ެֿɋ/:p`6ӹױz՚!s៵\*sWdL~cxۼqٛ|+,oy1~;m7~$[d5t<&M{ >Ƀ<2&pQoѾbū\} m Gy(ӦMr_oo/d!8q<61cF7=.de``„q\Ǝ]27r/BEw~dh7ty1bD`60vT蝯=E ̦dc0hcpilrkELQ[I'Jy!=Ouيl^'\ #X8"P OAoW=q{Aǂѣdzer vVXiÆk~xrbEB{ 7>ލ^ b #f`E)RGYYHI,4mLko!s9e+Aռ%uu2mYh!(Ay1WKu4;Mc}gԭ>2ʠ & 6ϯc4py70 H4&SI6uAaPh"_BWys.3(?o;,z&!9p =vr[vcǍVKcIS?DW]E5z7HČڵ1c:5[yJBo6 ~@9|ZND#y^CCd\ U;d#gbE:Ƒ̈́a`5ͯ++QL#Y DQLUJ+Sv8r4xvO=,_^ql#g [xS$Q|g/0Lem?zkON濿#vy~5Gw+VpQo1Nx]cA 1G˰gpqsN@C]7y1s߰^GqB?2W=jiT1~֓M\qev:}=OY2M-o^YmAM1o\B`18YxHj3A&PmL ,ot6Xk7J;ԫ ^@5hXݷ1ܰ|>w9w ۋy{^X2yu;aE+_eܩ_ۏ* J̆ (_1UIߋ O_/^8 "<歌s[-ЏTSCN霷^[o#B1ile‰q{:H 짝X} ʀbH) =׋'Y+YrTzXk\$Ij9(?nLEh'7qfTW㥪ٛ[[k)f+իC `A)/Q#qjv|Rj Gq'O`ѢM[{]ȂO6?e$>s}3Ͽ>߇vߙ^[H1>x,xq# xigw6n1c,UG.0c6{Cv7*O~ìXo:?N4wI}sl[ghTKlK/i;]g>1~hmi??yvpᅗǩvwQc| 'p57aaIr]qWȆ =Mn,#,lpjDluwBA64V^7hh|_:N4vL=D&iBocZc&-_~1 hϝ"R-'ZzP|Z"j /^1K>{ 2>eZãދƺM6~~Yvy{p>ڝJ+cM IDATsYb'tcF Z6R?[ƁbH< f%\8a)IЄ2mAmzHtLר|!U%A\gu˖kʆǽ%nᴧɼ ϐj`jDݳsll]vϑa12';xCLn?tLZ g1Kп9r)gf=34܍1~=߿/mJwe'~{/2rʏ5k.xefΜ{ϝwW'3gn9F#?Zz'k׬vvi9묓0q<=l~ugӧC=;yu& M=xƛ12o%\z[>dv YYjmh|#$D$I6 I$t3y Ť;jWq3gO vbQt-cF[y'~z:O߁E2~tz׭a̸I|z nJOn E B$(!*'Hb SZB A%HZ'-ҍs*d-sIT1A^Yj㔧X8˻}$ZwZ#[XO{' o+P]\le}cįx`ǣ߾ˡ5f{?*ADlcG4yM9@Iȓcݣ+Xr tKAWy dV["VOf1BKRblV & Sel>mA" k+8GU^x5kZ ʵ &n!܎f0Xx eQ{YOҽ D׿m?T {cu=[cXЃ,LqGֽ&kg  BUܲ ݖ9R.–oBv視}fz$A#cu oM[d5^Zϻ|L:mgtW_uo潕L^t_t;︇/Ղ]7p㍷o~{'Bt1qN;,.|2 6y"Dא=1 俿#f8z?rQ1qlr<|}=d^sMޫɫ̎_*=]^n}[̘+;'}g+Jv|{?CE.ñW~yDQ--E=7\w%\{Ō78N8#覂ϾźI||jƍޞ5fr~7Btq!򳟟ʣ.$>roo|$>y.2.]ή-*{!8܋~G D /,Sa!8rÏe=#M4?sO~&2q3BN>wt4?/(-z~G>pچP뿾ˍ7J__Ȓ?2:rPUG8fsc`u!:Խ9\^غ!Рaa~Eq6oSlOiϷ' 8B"hoiCA%(*8Ѵ ybcSSvujg`f< ʣU/ yu+{Xښ@Lo /x=e<Vyێ)F=v,+Zr$[necgiIک[(U E$jQ"C S O%0@dW"fR F2nN馥ps㗿 wy/aX7;9?rWַ3d.7[{C>+_<_׳~S9❇ $c.쾣>bx9,Y>.*.*~˓?8vަ ^sdznyb:קɯO9/Cpʯ'cfŋ_=+O>?Uח9͇r;i-I \S"oqͯbJ>5Q-n.n.0,g}|_[n.Okٛ5v!ykk}mR85ݶ{s}隆3n{{8wp٧k'XV9l|Q|Q~S3g;.ji^RD7(7g0oȹxj8o FD3ՒJt E!yYE(6mĆKV0kq1NJ疰~2G1m:.; Ln3ள`)3ݓ9G!sh ѨS1 Qhki(ڄn}οQ}@miq̚5kX Zhh6(6HA&=\IkAkV C{=SI>^:I}N"qBJ }+W1smyч۰~-KAӫM(2n.G d]J J &% !U6߷/O3 %!Cq`k{0S'@у8bgutOJ__?g+ ڐ)Zsy'oJ_pYqA50 yY{Gr=׳>?I7oA~1goKwu?1cc굼=m,_*w@ڷk9q?O7ӷaC/|fz{8'0eD{wrUM^я|~a4/bǾ9c?~lSQG?<-J>m#3g$I&Qo2L,"MTA$٪RqSLMәzcr)dv z{ PKb"G!(ڏN4-me "aCT1Z`]"&` jlP 2fT'8be%DG;;;qn HbM"g\{>>6(< Gr ^dgY A'a |*M9 6kMo~&l73˟$3@#׍O Te!ٍUo'@'R땒$q3,Z-?w ȓ sڲ:'h 5I別W='M_='N3{2-DF" I{&JR0-a+GMmǔ?=3ݍ߇hB,xƆ(h I磤 B8FkJT#Iڂ"XǴX0ro*o+UyqVmIG>\\.Ե[>oY\~Ź,]$W]}~}+VdDzȟ .8/>̙=`;i=ϙ=߬X[⥗z1U_~8tOq]qgsgr qW>T>Ūq͵_7lЃyL:m6_[}ta3r |S~r ͷ\ΪՋ7@5\eOwV(Xz/?w}=˖?)뮽)@ |.: /:/|yu\|9\p\}ͅ|{_oz?XbrGOnݴGH+W5#fچg=ĪUR۬ZqݣyXj]]Y_&Kη~7'.οLBN:gy5u '}gMyg(.\,}g}:arw؎UUq?à_ Kaƌi̞=bW}|+rɿ4o;wm_Os1}zݿmz~POZ߳lٓ,XxR=}hsdds$x4SKkrW>tC2^ ktvtP3qdCM[H{Iڊ- tOdexU f٫GvJt /_|eO0=)1#mY@$#1>C%VBg26 ϝ~ w\SX2"+77M}Ⱥ$:fwg)r$Ζ&uЍtQ^kd*l)gOu++ԝz7&D_QF#$ H^ Y Y$͚hI]i|IIBb W/tWsoo2ܺ8L=k;O]p+VLIR7>~8B)%0{I0xGzM0& */4ശ|朌zuskv38ihkkSR{y鱿=z;4i"7r%tLf-ر:=Z-ܬ"c)+MB15\D[k+QG|DW\qmjZ(i>5B|c짧00`ׯ`!w7߅3zHݣVe\)Ř1;;O3gԋ@yG+yQ>眙mI- 0`:! 6[ĎuM~I%&`PlcL3fl:B $jUVos{-JTy^ع3sgSgmdnzu5կ~e˖wUݶiy.-- |_iʼys8ylذ% Cx}0hʼn}oY?<\WWzftvpÍWq)fcAZZrM\m.|,3;/~Ĺ\cw_^[lbyC/O˼}oňWaGKQ(Ri i5K{~ FX,ִ6NP* .f#΃,6@RNOWf*]zӓءSOZQ;3À-qXW'K2Z"PkAzv0s7bk7tF pVat*r\<m7c(`a=#r:ϓ2JQϲ A,JvZpCJGŪqPRbѡA*Y%^NM4'9 F:cCˆ#VŪ2(κEvnJBT1} frЉGZ҂# !!'xwԵ厵tm]D.;sq|b9Ø5-[(Wv:>ez&[wwpooW-gv%g}:qBD^K~k5C\ǟN;ЃX⤺y.…TFyc%`{nN~{-k9yʼnsz2ҙtAދ_x{Z7mx}}Oݗ̦M[(ʸ˶a?^W^szMOU魼7.?~\>b%_8|9GߩzחFՖb5kG<0>ds;wV]ڵ8r/N9'_c9 M\yr<#,Z8TW"^>XB!JV1D~J*2M"J9 .Qth!a ;B!OX̣-\ !̸vI(+ViWdϽOw@o>w^!E%Z7XÄb!^:Et.\FIS*2nY<"H'ֶ$+q:nؗB#Ac#I۳bůT/n'2s>}۷@?خ%Qq#9ilܨAncûyjMNjGlNomT$Yr p]\w/l-s?c?tgt:?u^O{JDT!HyX *14Qȃ q&Xߎ{#^4ɈؗM7Aloϑ_}%9\Z#>R) /<ɹ~۸F*FzO?dz1K2=GY[{C#Ovp=_Xd! //6o޺֏bwGyڞj IDATrY; 9_ڗh㿿s 7n~aF_@JE WUE~z=Lׯ=sͮ~\ß ⨣\.{6qѢc)%]GA3[43:ݽ#E=~OL14o]qVX1*sX*c)4H`!x%'8(eK}-m=DyG@:gxsSАwBGPP%a‹VPzz7u McGvjmTEք1%ᥐf %vmp(Yzl mGΣHVAxĨCR[NBm]s}鍼귯w^ ͱgW]֧J6Dw]26 bm 8n!QJ!ڿ;9-xw\2Exk~̒6mgϚm Kh CCxG:% /d:đGKdR) Ơ;"0)c9Kx#}qcq< $80V=]soy^}._p uwo@)S9HRlٲW||TuYoorkuK2A__?tO>l.CwC/عsߡCM5_׿g 8nOg"N[%LL}(yGy{5W_5\Ǻu쳛kSwsǝa֬Z o}ضng?q;>pbeϞ(S.l#pk"[/Y.nj]\sܔ_클uyiۺ1 iζP%"t]LiZEd:\#X(Z[ 魔J>]k3s\{.Zgң[Pm 7},lBx#IZo+(ZhE"NyVv)69#cne%JyR==[~vv1ww6rv-FG {<[Ľzm.&6V#TE50uH=̘ c@YNU!B A JG4Y^ɷs#Zk 4^G38c#[|L&>3ZŒys`ڬ2.fRHJ C:SPhɥ4ZG/yJ&D4A/,GrH9u8L&=N>ŢE 8u1?ok ;w/S2nW\zw|aܗBu9Cz]<ƺyzzzٶmv]l:=~W_ͷ\͵^>-K c,^&n4EOzL:35Q{]T^ۦ-[uի]7 o-:9ܳuW{o5oJg̭9dȷ?uVN]y2{$3ft `Y{u~_? ΣT*q]}w?I{{۸aóx1܄nwϝ;h~8vo`R믿/}kcIwNcqQ$qGT} ?W1];w_>O=[zA5/`ʓ_Q'>9~tsAg?+ǾK' ^@Tɱ0"V P6uH.I9.BY|545Q*6v/8s9`gb[Jd˱AJP?k]T~UqwY ? ՙP Z`w7K_}ms3)J0Zc*-(ьcF.6vlC>.R3֢`xOzp/K;|l~݊$A7·LJBa\*`covbD}bD]QK.6" 3eoYh6P޸WjUϞ sϽܷM8W\|^I5Oq5j՝,Z3N#sv . ޱӣe]{G>߿?>QXi?t&/Wu>W{߻t|o|9\sO&d= |3:qYi!n7Ľc ό݊^Ƅ~G[87&|6:jW`` .xg]noߘLflW^ 13Vq!%V$BH=f#$>"8n>m(Mcۘl19pR k@H&7ZJjvrc@($]O ~.(Amssq =y $&YE/gA0jJw Eˀ'D+ ‼&RZd!Pjt9}o`ax6v!v!E$9.炈2pBƧ 9!Fُ|@r$־$7׽2we %KkCԏ9Niy2 h˿lj'S~_'U%e.{$@(wG}xTynu;$Z~*Ug}^8B>x`gq+O=*N]yҸfcF1;M:f͞]9fϞ1)ζq|e=׼x>Ź缅O>oN?cŸ%d)<T#7MxǛWPGӜp7ב/|kXt1O9x=w\\G_3&n կ~' ǟY%0ٖY1e*R\&Kh,EҐm!)`AZp?K[~ y.r~ȀQhZkB h0VaDA 2CV3l5,)ZKZjeDxlwG7f$y4$=N9BGʺ%ʰX͙zRݎD^!*U_0kɫؾf5C}AXPT`AIlam,5cuy,4؄^ZSt4'*m\HJa8oXMw?̢SJKqgj"BIC^cDI)gͮzR)K,^s37oEys 'GB޷?㿳qf2smo|=ܷjwB)Xh>W^q 7o!BΝ;y1oy8S%@|sଳN_yf#;vbOO/l6fK_4rqޥxAOO/::೟vr]K g'_e6f͚ysޟ]?w~3nbOO/S9|oA47|?7B׶CKK3ms̫?Ō.?gL\񳫹+frXh>_qM*FN w1db9 q4pHI dPCK:$JcA3<{3#>xŔlЈ*hBDrҀqQ!ʚPpqKh-VJ1hבɶ'qrV?% imS,ľ{9ZGl[k[7Ra葠A)ml+@}VOXۣ2vM)KyՅ+MBѱbU%XGJ68clu5U\$0Q)B  !\RC5K9ڱ kfjcQ_m V!u 8HOeMh^ITv2e ^`pR󶷷ɜuw$ ikk+Er8Lkǥھ1&\%=C2T& CP,_~yˏekw=#?fS]|룜HDGXR*mBH'ME/MTrүGH¡gx zCQHZkUGvر4_B,Z@AkLJ/*gL ƞuBeL+m"dP?Z/ xA<ÿ?v[]4bdR!C@ͤp{>C^A )!<ՑNZ,:! AO D3g'浬]:m'&H A| :af J(70ɪ߄cRrO~·?EDS;T l&}̔ G;a,8B%pHY;5cL@?@8]Xflʄ2XϞUB 8#(T*͂לʲބJ3)Yຊ8qWM˸JΕj,icQhaG)l 0&W :ʸ=2 va<~Ի68{q, m1ؓY ȻttG/[Єa l,:$Sx HP2l  ,=nP:ȹDj@+|@p^)a O~ ͛!,11;w_:r~qGٞ Z z%-oM˔VJ6mfHP8DKS.1R\$[t#}GőiCgog=mܼmG%R]S*pq\ԸUHu"R^%9[$2.SCC}lKqXvh#r321cLQ@wRTȥzE ʸcbQG?EW\cᡯ;)mܱ`lNm#&v{#1­`(+F`G 4̟ZA` C8㈷! &zt** ivrX"`_K%L#K =w#C24X/7Gߟg&H N_b*Osuz~5BԦi qCˀh%ҘR+IC%*87iĺ`ƩͶUM7݅]j.bM( 0ZK?]HJqI-fڢLF!hStGuF~Զc2Q:651 l( po~ qbm/ʵfًA 4ZN-:Rk~| rTNxBkF! -TlEDYKK# "H A? >_WMv:zj}9Bw\ě.B-D֎G bѺ"WaXmh$7)YD5GK'Bz@d Q߸lt a%n$Ggxp;n/i'E"^sl]Iz;Jշc()Gl֌Qq ks!2Riԏ0ZSVpՃw e{#aQBg,'~ba rx+yZz5#Co-ZM*vJ0)mki1DH vY%%Ulbl t}j+0"! B؊%H A $HCĖ'fP a}]Lojd DYٳ6B1VdkA($4pK tdhTbJ{ӏ?~QF{*c72NսR]Av˨Ks<΃[(捛8גkSRg+}5njL9Ԕ3Fdq*kcCQ% X^C/s[N=«#a}@,VF$\!HYx_aˑʀRe<萟?mXiXQ&! {hO\ڦ!1٨DRp08 $H A $Hs6!C%m1Ƨ2"jr-ȥLom>B0:KTw)_ `MB )c0+$!.N42gQ"43ܺ[@6os$6${)nNCY!oyEF5NᒂQ)ښmDqMA~n'--c.1Ϣ<Oc>K,0z  5FRL!TH'B#RiVSb-qF A $H KbCP:K P@z !dZ)S&@[Xײmz.(X0K3e$vG?ǏPYT!@(5&撶6-0[;*(+Ì56֑r*q٧5xw2~[s}P)/EX`똱tQ>^(e$ b-dq+C4G?#1ATQcN$_W_6.3ZZhɶ[y$!Uq5+ 3R8lDlVƎ* 0tCpKl~q?BnAh@(项eJ) SI !bLS65{B --A $H A/pQ_"$alB[͔F<%lķ|Bz9KK#d%F!%vcA*xtK:xgWR R܍,,Xm[1NtI2Rh;h>2%B,X^Cʣuqm⩛n{TN` MM%r)ѐΠ ir!M4 )$ $H A $H%FLn@ 3etA 06ıIW9(092c`z3ړ I$&+8obÏ3eEhf;+Tv4?WEޅ6+qxڼ ?(յyd}*Q.</>$4x (Aj]r08.2 A`?>#)cW< aoYb[U1k#ʴʌiʀ ZP|z 뮽-q`FYtqٹC?.z(ɉY1ژ!ď[ب|,Rdu=ZMeӱl[1asbo2&(m<_J_amcGȺ>{^D8*"fU$Y͎Bt6(s`iM5,H!qe⅞ A $H A0ʒ_BXhA '}Ď>JPE(K,B*t1D&"VGmT>6NRz+m,րvc"-'(J뻹MEdt2FCC4Oʜ vDjWWϺ*t%хsj i jTt!Xunld΂ť2"`XPNvM gRv%ǜ͈Ze%wQ~ VKZSةf禲\q҄aȀ)J )ORn$x6Z,ja $H/ϋ̘ 3,Ar%CeK7f&%L/a=G ZZ!Fq˜jvԌڥ Ƙ*zr"YQ\n`~b\%T=+lレOP%`ĺj{?Ϣr͛6ѱdQԣmEwwJT:vkl^{XٺT}5#wͲ}#<ע_0#c$m̈tBP޲?1ХfjSXIH!jAtg*vT"*K+qr (E'J * } @6xQ h [9w mh!$^F{xAvAL@́Տw“kS#G1{i2~/}pZk s(ɧ\:Q7Zֻ#$k5g+b,5>bE$]= [e% E#o,R($J[< *QE-l\mHmpȘ `jk+{e8ˮca8kBDB&\r$[>)92_P"2I2T[+nkn*Grg^μ)9f̚Ͷ$yh EZ- &8d-YZϣs<2M89lEz.>-*`G\;:6em&'Q4 ap&BDJf%rc}~R9>taQ.mK\H^xEA7q?`cÖ-[nƾBqH46tMM/ϱYjԕpYP./"tkƍ'CYu{Ů< N? |6<~#d5gAL-Z=?]/W>hڣ7EJ/ˈH>ޑ?{/墭%ZM5JolFZMHS446Oa8xNT4F]1*[xmb,Fh8D P"j3"d 4/cK>Pݽ`K(s{़nt彦Ff̞ڵOvؑ"N֋UcZi&J}cFEAڣQ󎓫nz  !.UDSXb(E"JD,`$ $6}Kٙݝ&뵯lf疧-8UߩhlCGk7v\=[ D.; VKNr_diLжt-/ *OCqU%Hv$ & H%`z-ʸ1$@(5Y࡬i_p!i|P) ⋏UPH$./'mwyQMw@Q~8nK7㘇B;ڛut`_#oDj7]ouqM^[eҲt<_&}5:^/cu!ݰ#'(r\#ގϡfr:~\˷02\C{46b εbLbn.F? "ުapC$ e0v2sρ7S{ӏ!^Hݺu uPXBG 6O@kyvh{>^ǟC:Bx&6XRGh TT$T|LH(LKG"N8RJ*2t# ! p !4Qq[It"4X"MSm>Q]|:GF8ǰe=T-Mk*] ll :aY[SEr tKu\Xg},"LgYp‰l{4l߁x^}Xؾqawp߃x3As! q "QtO0YMKxqR[is2P`"!ʆVP4* U.)  \_ =,ǡ.Jq1U#I:Ua0y92}k;MMo}ȷc6H`~w#ToA>);u}?0m&revm-uP#چoQЃƵpOc~_e(./i/ 0n[w-Z`!5N͍79u1UȯnycS)|ynjA17_~sF\G{07f.@52䭱jk1!eY_#G s"Gc|8hJO2 ۪1I8]>{bn Zk~^|6՚̡ F"8bhK&D dҴDCa\(Qr[M0#UK4(,0-͝}e3YsgUei}^E~ã.0Bv WqS]ω@P Ext-`Jvli# '-mulڛ-RzPT]QLؑD+px%88 G$I:&\X뇈@fcR2t)8(b rX-I"^3% IDATF5W#t؎Icfq'g/z82epnkρ~T z(V<U _;( /?7 O3ȹ}}e ~f_u8t׃dG.!W:dl-c_ jɾWM͘{oo?0#sz>gl=yxg, 0sr%]0ftJZƨ2ڕak{K0ۛ'D;r x>ϯSo}K=9Q'e|l?|#N9z ;Uo?srnk(A 2`y!!vd2E4R&cÈEO˳mX&R fjt&X cS`C"sӤI7ol!IZhi ͝[lLw=qC),/'Q׊ 0e&Jc:NBzw0?öT!6ʝB<__7Dbo^ 3*(( *>b~֛oHܜ)T&-~ oS-hFHSu:ep[7lv?lzu ZjK~8}{r[Zj/_L?Es=-~ֶxu1$;=meqBs$΂oஸͽR)xbTcwm2\< K+0,<-pbi0}TW;>Q{Uoܿut RN8ke`;1GII1vt&\&x]H!"i<,@ X!7TQ:45KaFDMhh#+$:|(NY E¡c nߥhh*ɶg^{NaC VI,ZmͲr ᚛BqKrߋK$Zر~2JR^:+[_\zz" Xޒb|MTܾq+[c>?Bhh7PI<"bؒ!xI;BmRD6BZz+zk ēPX :EiXZ5gt t_zwT wD_u>0nWwM't$sw?9Ky{߶nPǠЁ{{ jʲ҃|Wy ZȿL+xCxu)"k^Pȯ~ګKO 8w]غQYs_,; 6دQ"/^1WN:w)eG`t?IXo}#j ٥z0?cF2PzălmL0>ExPM9eQ&J+p,H|U_] sб1"Ue*Q1r8&&\Rg[ Q]X!0a<wcյ~Z=F «+V0gu&?*[BdMG.avP>2xrӚmz>r(Uċ,֖VUZ}z :,Eu vӘLW׳ 1iW#eD*ґNaي|A ԮݘzdQ?<̒%eI' |l::0q߭U\!0d(h^/!' 't:g/f*t?iXc0trY6`<زUA.w$iԠv+>}1ה?^Q› .ay~B}h۳󯇠fdg$rj.duaẒ+!Lk͚|OOᄡ=VVb8|+vsb9W \ÇgHbtCcM`l龆EW1< ] ?}BpW7CG 8/v?>9dëb^Y;)Qw+WEUWc~K|k,x̒~$я_;vb֯C=/sԈ1޲4ګ Uv5uo'>CҊY91=jjU]`1z]¥2K_-!Hʸ '{M>3Z2x$tvbz .NBqV5c}^|9=p%W±#LSRUPZy9|@Wy*P[PZ8@sը/-C>|!TGs,y^~IsN:u_y5F{k0 BvU8ti8=ec2 3 SQCAގYV=FE0Ї̣ ȹk6< jJ@]S)%p{Uر`p `cԤf@JuҸ|!ٸ z6y9%˗c론9(ߋ[y ̚KB.,''SXᡇuu9wz};,^ٴFA>[GrWPD8%Q "S:r(Rb[`\ː>ͩ(!`> ar`޼a}ɿLf_5ݯJ9ݡZ Cw8*.A>s%LBGT s!g '`n-7?'A1ic/U+`F_}6mF֬#+3u'n(>fhi$_|P l.K[B8Cq1r‰8;~Y2۲΂vϋ0e2Y -wu]LK/#[09| GY; 7AK 2b8f')S` ֊rxg}Lb5v ˜ۮǬ\)SUc~0gwvtjDty:d0dB8(CԵy{߯ǿ NB.D x\D`Ld( f V2$znj /ôO? +ÂL`xik?~)@UH1|0<8`{ktˆ弫ϟnd,X}#˶n;#G~y!Sp޹H& cTvqxVeW ٳ󋟪.!\<'Zl% 1vLH8ʒrX m ݇h8-|҈1VӦWpY#⺔ꫫ9欷S[!S&^`_ضV,l=,Js,[Ƨ`$䝁,CX_s=Hi)IÚXtIزvqRм#be\^S+Vb]lҕY^9ލHI|vœt\ lPQ\ Ƣ!2FTkNJD(0+Dҋ ut |{;s%/ʫJFsg^+CsO!?Jr*>s?Qw-mԧA3N AaFU{W 5{pcSV?uuQ5?cU'̪՘O]btF]#0(4Qp2K<:LgLzsc~Choa^Q?vs$R䮩 / jbEM} cyFx,Que[n"3g#7Ə\mg鿍 ~yo)RkFu1 ` R4PڪԄ֕0w>rO0#`w*urٸĿ`̎H&UR?-AQ#uǟ4fWu:PFTgL,0Ƨ9_۳sȗ e]LfD"pOa޹īktm3sσ5Ș1N9뺛R~!|r52ST?FAfc lق1SP޸I}Vc~?ώ -c>b~<:-{)p/4WcB: FQL+~gF̽w#b. AS߂\C>sgaF̶mȜy3"W^3gr~ꌞ1#M0w݉=2`Dd̥Bd̘ KP\//?ES"w@{}Fߧ:hg plܤ{P5zwˆ@8{un)*=;g [cAaP_'=1*T\h@p:RmXLCrs(Ђ 5(|if^e?N[@3MqR577\dBaOM-ǜt&vgm1N+ܶ '8e7kgcTI@6C>>1b^n9m CTwjbQ0F2|AM-!Yzv8bW|CUi!F c8!R(LcDa%Za²CPd8~V:48s:^[ ,`$2]g: |u,(`މ|;aG4pॠ~2?oߎ~*̱z@YɹP5w'tnjC޿F !#œ~Zgxť #mepzЭY(yw3 N6H@aa`:WdX\^x11W<\'ٱd1߸wl!E(Ҽ#ckv-*)ANz*tS&P 5"1n,Aݍ 2#%c-觐O}]\ңrf~],~#LjiyAA cL9r;υhoe ॐp>\[Çm[!2eFYJ܅}-yigvyVeOh۞*ג=St1_`\}; 5;17npgpQv[0XXޡqT? Sw0yD+0q?g4"[->Ngm/"'fCܵkC6ҧNɿ֬[χa#2r$wcI{ 5@U-Uj>smg'l^\^쁴]q IDATh`{Oþ=ȷ?7lXp;{9TpQ$̞-‹突4rj.g2wye;#"Gk=ŠZo3iBsK.<; g9Qk@$q,z8,q47gknmj ̏ so|BF߾5&MnF88̾E؅y,,l)v_kmSP?ʇ"_z{zÅ}`J7=c͞tbm[qEoyʄah$;[V UփF&MɤTPN(,ո::1~|OmI/ jIر fܶo_(W {6ʕKĿ=759Hk<b_@>2K]QC՞J/-d  \H%)۷r֕ oaʯeϫ@(l;"xȯx]gMe:4Ɵ]p:i3-V䦻3^%Y$&0zf`aGvzu%7w=(%.8AJ?Iig++ l[ew&_>ӶnUgBh()zudnɤ `aw^ 2')c" hOHyCK 96ޖ 08a g +kJ@l (žE2X*}mXbjcc) 46ǚJ\ֶ6 bq (2DA(ߙey y3ԌqFdj6K@<ɩgkw)ƆQ8}? 3gvV7ƒg.~KNԚ$#lf]DGj$&Ca"-)porK.gk/_=%( 3|e#-<4IQU\F}[eE% u !K3z1Xa[>+ͨKa+)D_;RֆyH'+cnQ} ,F_J"Y{6bE2 kV#}su?ܜPRbp~nw2CuxA;oh/Pq2l|hd{H&?3;A9Iוv6R~_wmRH)~=A%P[ cZaJla֯{Xp*8Ⱦn=\/;5SZ^^υƑ#GSDՐ2H0]$G,b= gƘeY2~ 'X uu5lxnO'Y[#O=Lv Qg]24J)|l $umeIQXEQJW!Xlyۣ1}\:|TelJmaٴ=CțTW]/7d paP_YV9}$ƲTe1(-B߅vv=|[#O.Bzeϫb8~(AяRd<۷c#|A?9m"1_/`nQ#k}K_V%uμ[Wgy#揷a"?ڹoՀrFT˿ <Li9=_}กƍg'L}Ϧ&XiWu{y.n{Pxeiɥ5=o ͛?}}Y;E?ޭ}CO蘋}vEZ_.GÄ kmqs-w5y猛6BsqІe}b/Q\Ӿ!i3>ZaZErۻ;D`5ZO:4)8N1f9T̢BnO 9n6*)a DHs'w{<`?zo׮`͌e<( e¾m6a!Νկ cF#]0x¢ $UfrS7R)VݫL/ڬ ={tEU^pwNA>{*>{k`bv2}H]D 1k"hCPͬQtM\RFa<#i|\V'8e]b(-#FQeio$:d<݅2v >7$lVؚ%It['~[5H7w@% ̕!72>1~$'Pd%m2?Z#ݾDyp;:>lV/gްJ,GquI-I$ND3Q$qOpq1& O娑?wVS\E|xe׬IfwlRm47uxPaU2BB&d]w" &ṄgD8B}"NȶJ/OyAJ+Vω7`EMժ뿣_nh_}Pv7tWѨ}Vodݣ=# .ō}Jܶ{XVQې._Zg9uc_ EfL{=I>uAYY.xv3{+Q\X$x8wB(ݲE#UUjIй;T^ԥoC^vVo?!?zNx~EŰ}2|Y›֬pwiS _ ӫ`@>uWQt/}|޵iz W޹S||o`}ѠNI}dL7cƎE=;tCM h}9ݐPetX۶ASVνFP3LrE0qn`M|Kn-?8|3{rx^9DC aASהu{M#'W0O?'ǞbܴQςX @.x2.#G2訾ƶꇘ~jc>k?kۃY5{4{ c:QCv(!s5poc,R)->{qɋp~b]iU;>եแèek8eyA:iqBJt!Mˌ}wBCyOvk ]7̶m(q8gLC}_]MQ,;?sozwٴYy.֦=5,۶q| ˦KQloifxi)rPLjʡR666 ף BQ8Dؖl%'CuCX4^" G*& v- ,/^;C<qls,DcG{P!:;y!ٶHu3 $EtXPN1ߞm|,d-`j9=`0]8\|B#GD( dѾϯwǟ`uޯDc 0!D&〟3F!~2uB>ca(-,fk{e8(."qhN&H FhPUXJ#q -cx39!XN*wiI7KJ+wD<ٲEIV u<'yN mc?uAn%ZL3k@2 /< 3 g'}Ë?y²5sLٳWDzbDi ˮ?=LAsA8yFo esܾDV^wr%'m'e΂sŴ'_?>{LumDJm91qY J+{ @sƫj2sea"~FCo7Ț/m\g#"0zuhqT/mtA BG3,<=a}^67-V{3"ߺS쒿%^>F G櫟byVDYJϩy^a? }عEє)H~Z?4xs+!c[egw,qR;R}6iMP:Ͷu)R'-ࡻcΝ%}`:eK;IvTmoوevA\lݍFiHiO&T2H]m-$\v=F5 O0mQZHy9"PN6ЗW/mwúw5x% "9*!pjP$_z' HD)޳gW=w ^v.tW D `zH}HKʂz9ZqZE矓ꀅ{{+fxBc4B6KUh5U!?g~ȧ~wCg^pL"kW9?3 a^x>aZlѿoyvb{Xg'62~Seǎ,a#Q, `|0uke٩gn? ΀^Oaǂ瞫(YcyШž¿Ei۶id)JhMDQ9rq#cճfHyD0 %C礣$V_] gAC x7VE)䯰1z4rYO?v`{6Ơݣgf΃G.nn};qӱ!<4/#f%=2e5\~Yҭl?hꑇQF 4 M34Duvv!Mb0ПU+fcO>}=O6 J%C0Uao~ûcşvͤxmM2f7ӵ=~}Oz)cB|?G~?\kߏ YUc 83[ݍzRܙxxj¨= ,-E;k`}^ڗ_ݵ0~=_jTNҵў5{DĦg:>ݻ>ryU,%ÙE>{wܟ󙵡}5>8/|cKc=_sw~ -7=_X'lNΑ9q]ɥ?<Ӏ]k&eܸ;ZϽpڿ*/}ɕpι\$5>޻M!׾GcՇgی#-yϻ#re +KIkMC  69KW&5?Isp5?vk`  g\:þ6xm6ǯkpE&.ʼnA5TvT%O[f&摃5yqh'wfK/b*)JbVmpP 6u'\O0,agSxqB"|a/r'|)R[dÞݻ/}Ι0qĭ+:@J;*bs~\=>xT35ǫo q IDAT+n- 50ztpuA6$CUN+X瘟x%fўY-*pvPaqq6urZI E`)|E|g/ݱ rg޻wpC S6ba@zx_wy }ן=||gb{m8jT9PEn1WKH!1[yghx~͢@з\z}yC{v|¶㱿n7ز!CO< } zK[rYG^Rm Lǹsqn}o2!wVnfvLX!4̼1GF÷?Hd:t)*|>_B>gzQw4zxD-?DE/f-:V .V㩹I_w|!~=(;\;w_^F}my`Ϛ0𯅹\pgFteA>[.K&(3.;@S#p#wxϑ֍Jɝwyw;=[<^{]$FP=HܟN: z=\?"Ypȇ>8AI~#1J#AS gWc?zxvdi))x~_yD4ܟ>Qz 㜺뎨D? 3랇[|)_BE/4Ls v[9];oa_9b8c\Rn)1{3{a>=ɳr ȵע'hGTȧo@~b`𬽼ҫ\܏gG-cWl,8q*rǣ`~zwf 376?y/|UfTxv|k/=r0[be5\]5zq[{LZ:;}s „OB4#M <%T۷Q .Jf4s;yaTbL ) #fgoG UFfǁb#7>hJ-f9R8s{!p_I3bOxuvb|kHRqAMba*tc<'BUx;q>c_1 q7&SN]xW_b= `.v{, f:btCJUK*#m&ggavj.CH1& {|sw/u6t\8SSdu,fMț >W;oG~解޻\mJ({/ h~[Ыyy"FkHg ~+93?<536Kxg/A8ئ6}_?~}k>1/[?pg?bA@_ښu-|3fiOI~QM̳կv.|X G7=gdێV>w#RP|?,."68K|.a,ω{w#dПw#?:ėȧ>=W43?+Do|^\"}O6Zߍu3,A~Up/{xDs.>EB{GkWO=pѫ{7Ga㖖c ϴ~=H|c{ݱzx+p٥ӈF_% .2l\>s? g_:z=Tًĉw!xR{(6;^ʸ'u|NڅT]xL{Z#`}ߟ Mȶp ,Z{o' ~롻^~9z{)YOo*p r'r"W4q΋'B~cpD5_;={ǿ~_ï=9tʩ}>x $k|ĐO}k{!wڛր;ݏ|Ӱ^qEjw G[/^q煗-O#7|*?u)3{7 D&~_t3 .,'evd)q"FK>}}GmbwD 7_;aH: ce3π;韋{CE6s@wVgDTcɔXD0P% 0;5K6|I}8gNiPR^TJ]̶R\gFdr9a7EENs>$>l'ב]qK:,˪>ȇy&@ܜz][R\~OȺЀ7Â{GuxCqM8L%/Xۍs,ы.{7k",dyCǟ "wp%G_! O?=>1oFffϞ(]S\P|*ҫ} |c$FcE%<Ë@+L3t3mF~q,;/FqXYkaIG7&*zE~/PsΉ kVڻs";"|E ԤCqЏ}</:bM>Hͣpx2OCjM6h~x{W^sbRz=䦛aЏ^ux>\pa<֓wv{ ޳'e1g塇஻b_( (L$UI6^)c8dD"?[Tš^_{,c jXq 'F`46p%x u睑87D>M߹P}|kL|w<9'zyJk8hJ^! uEeZG4z{1tFqpu4[a|g*z""Y J$(/z#KN0}Ӯ{?lX@]PIMK8 j͹W]#4VE>梏O5gAD^q@kbLp!rlGϗ}yK6w5 g`>xyǾD%Ѻ Ge,,1 &sl {9m~:x:.+.;H Pc44賫=C 3+նڱ 3 VѿsoնVj[mVW?;Mq-y,F ڎǾ(lq5屭j_3(LA%|F-7g)Q "V}ҤMP`԰B{vȬwɦ;(tHH-4K]m^x.H6LZ? 0b[ODui|ݢi.}!}2}{,;CШ53Fn/6eub G-X8]6Hы' BmO>gs90!?J;u ieT! 5Neh&oMR3)k8txS̷:{zeKRN"7pxG/h7M(=,Ɍr|_7="to uTKUcɛ(5 P qWNH&]\G!ǖ&V %va 7bߔg25K/):S,~&b5"$Ts|cYhs-wet=3Yɝ)zRQ1?=XİT𔢬RR (U)4PUOK &,U+0YFQj[bPoP;x?Gj[mmվO ?v,ZZX=P@}}N.J '-11b>^tټlkFtyw'9T}ZfZET ӝ'WN-bK/X.<,c ޓj#s<퐊j̶POwW9f*U(BT9#qZRt*bP4KoHE(E ^Հ DȰL,;̿ƈmXL`1h"%EF,eͬc`G\Q3ھFu胄5xk &dFgHAn:Z%`0U%b(JЀKаfMRR$ft&ϸF x0ao5Qf6:6]׏yqǮY#0mY.T9kEWmb" gb$CP1(55A5k iYt#>4lp/4d/kh] :\f"!tE]peA%%:/]JMhּR`%. ڬ4458G\qIb֘_XNt/!K){"XcDWdUt$S3/hؘ8sHrxT<'(X J:a*šf S?ýcN{mB|nm IDATb2~6YO؟;jv{$z  ẕ5sozh6$:2ix'=u*dZdЀ5nԷqnꑻha2PʼnmPFA&v~h6)٢5}[FDd $}nM4]T<Ęhln`+:1~cgxoK>V}j8uv^b[k.S-`(|mԕgeq~:n]Eoq욲~1kQ -0$iST2 !W@)@: 0D5PORMQUzgļ*ל$=i\DcZ)iE۰ƀ5Ҭ; &bDh^ 8gFQBwr|}t<޹??#.֡5Zk&L@!&):0u(yWp`iӶaYþ혥WUjVz}v3:aN6䎕ғ .2ke"fgI.s *%q KUɔsLӨ]|*&n%/i y*@!sX05G]cڵ;[JYL e]% E]Ym@CdJJT&,uG~X,De_жX.L9XJZ.A*q\EGDk  lJ/mIZFlg#&?c":Nq. pPiΊW$6%RV%Yָ$@a"pxQ 4 禕EZ)xRe!>읪(HH׾9G]8=ceZnw驩HUIj`fĪC4f791a\mTg\^)Ldʘp;)MeFXtF;(1##LUҀa M$ `RUA][}42JgzB]CW,,ӳ,t֙bCf @fDlEР^^a 5, V3ZNF\WG,3 p\tfZӬi:T^RӶ){A]$IBߗd6cygV5.q BM]y,[^+8 ~,FB4 jgAR GD\J$t=:yljcZѐKK%*x2\ U#́cLG(VCL<1IERC,)U P7L_<dRz%IrTたBl36ľ D8UyUE&XzvS'tˊ6L@<GEh*94eXk*9DI]JCLKh-Vv|'w)>WV1;G<R  >\J@9Aa2fzEAbK E((N%@(hgPC* ZޓؔA]ls)* i2DIx 9UiXBM1΂(u]ctdUXCkxic !0@$TUqbG%H:sBQ㲔WH8=Iʪ/#W V1qHjWӲ=Y֢QTuAr&蓄\ ET5TkM4LVBb!-]j)1$$z/$IcāKq]1H0ԭ FZ%ǁ$&$x D`b\eVV>SiD;h~(c,Rk(ƥ 8k5Zu(&1Ɛ$9C@$4ml\ \QUK5٣j_bȬÇ &X_xpiFwDό?"Ьmc#O=*b?4$%xXC壨nJ@Q.1eb|DUSb+  18q ,M%XXkki!$4{qDȒ6:HsAk{j~m5޷RNBQl*d6ltl Ҍ 42Vw% פDEA]Xg,UQqv=yrG"ǡa:,Khn= 5\Yjpa%˦b0~)qD/JT iMc+rkq6TBKp.SCVXkIڄP!Q(H#BRؗe3+KaJd(O+˩FNCȗ}2 Kel6EWkzNަ `@RrTmPcp6k<'4iR#$&c1nY9^AieYڢ_di\7VR$AKbV-ЫH~)ڌ*`He(i-qXb5%lF84"8,Xb *XuM+iShLP,XǙ庤m7$6d- Я*L>XxZ &e#M^bj9[!Is,^ )b(I[ SyOb +>$.*<T IhBF($@ؔR\ Q$VC,q.􊘌IJZdiW K`x3ÊIlެcWC2)AcdQѐ4{,;/gokۖ=sεgs*(2`d*EPa0[ )Ď#JDIؖ#Q)RqdY)Mll(:iLmN~t1=VC {5c|ZjB RPkbugܔ#Y*4 \ZB!eUԃ qdl_wZu(R3Bp?'6c2V; )0ĪEiQɚ2ɜ+V k7R[?Jcꑢ:ЕdzQ:!M?@y%r*2MoO|g ~W /޹&]| ][-7E&t^7L *r2d~sŗ tD@_ U_K̑S!$HL<ǘxu$;NㄓO[kC'u %qYR捇o6Xy:L9kZZ)׵wdNT!ZIBpxibΙ9G~`Ӽ⬦Ő%eś>Fp3uOR,Ip3lzYú"f 3Sူ}'Z8/8m׿o,BTUU +$!-4\h~iJ,N(sh9CHô4ױt :JF :R0, $`d-N&Shϛ#k%|0*dmOXesncG4,߰ IDATJs:4%VxgPBY,<'u/6+ BB[MAXJͷ X@ ,!gyc+ IXCkWm "8%(K&8<ȅ׏b#[+}*LԆSx!ELXk3h8RUqp>my;Ҵ0.329%*4ӯɿg M>/筻),W+|:n6|?Sf˶U|-ᓿr=Rl9-^;l_],²Ď8oc-2ddeq{vo_%_/M7_K2QSxTm~L{:މT. Q{(~)oS7:g^#zxɟ>K| .q9@D0.|RH,/zgdU⟓%}/fmȏ=;!8cXXcA3b^W2c\eUJu~]y2 JVԸLLdXYR⑭֧1lD=HӱĜُW7{xݲ5q\d.E۲UrÅ7%j1ڠVkT)H%i_GvcLqQ8g%#[k9+%V?$vyw˂@Xϰaɪ,E"Rіoy:AwI$H"L=,YQv?tf \خZC Ya)vamS qXUOZBK(mXs!̥3s5ڝRh0Ze9ŀuRl{CR5Jy×s-ZC֦Aڕ\[9ڄRd)h5iB%hmkn9׼\]۲X-UYOZ1kNʰ+O-P8ֹzT3Gy7J]1 һ iB:O;8TmhE ۶`䬫RFq%R<2.Q/1yrg #zvyυB…-q߄$׃_XKzoI4V!%T@wr槙/Y˗54òB +D .e[@[?rxn?=N"` "X_ Hy-)_6-2O|MW4?w|ןབྷ<6B,rFBL<8sYN#Y Hڗ29%Z8XWqZhxdvjeNk !+"0c4I2QϑV)5UZVЊ$¹~2Z9NRJidc,o s4hmyX .EdD*yiM=9jCF>g)SU s+,I43F:5sDS9qX K^BЩ UuR(˔2[+0@<Ӻ:5|C #%;gks\vEuὛتY]pڰV.j>h%h~Y@CA@du>UUJ}O<(EBY>lU2/RiMeu)Oyߏ6e% \wU_5{Hw;iL@ZE^ԯoF~_']v+d:__|o|Io|~;" ' AIJA}4$e[5aB+3u jUB{@hM4xchBgsb%y#jmܳm6\8Oac} idyڳ5p?1N( iFj9Z m8z.a, 5;Rp;q=9WTI 53 -*) 8NjÉj)qݎx ?3~EhERR:l^ooyQFTOZ"$Uwӏo|#}_$kd#<5N:㯈Qo!9y_ #Cm +.ܿ+g>7>il `Q%k~n ("~~G>!|Y%fH_ܭy -Xu𱯒|ǾR/|H >./W+~dzn᏿fx>O9>+>^:Zw3uǮm9kwjǁ4bjfXɣF\=ޢtv8a[ϼ,4XRHsu[;v )Ҋ뾧 9&7֋{^ 00x.D#َ߾vK#8m;t[{,3TYۘR@Ajy,\ס-NW f)൭H,Ky4C1q>_-KJEn•tyWh9jsBٻXĚ:FEqlv\Gr'.>1Qӑ:0y\l]ǜxnxi,藉znFkb. stqscW7Ƒ!k+:<ۖ4Saظe ae}=ےpd]cxqس\ps6GvhcHEc,ӉcՖb<%K6pB~.J"ňt㞍 k:F` R[ޟy#zʴ,|=G6rqayZhE_Z*R&j+c<A@ja*sPA-8=n/LNH񼘎\h]Tg,O*VEpOs}<ҺĪ%;8*#R+Z1ѯ>eeX]D;˳l%5H<7"qYhf禍4"&RX ߂8l8̑m>RSwӀ-wǞqYqm[w7zqIIgNjQ#WM4sf.w;aHiˋ="(Yye"l;^k<'ҲG)]w3-hg!qѴ+^hM?P5~6 qM@s.ܞ|Rsn#;Y’CNh8#:wZ3/3R*nw&^ر_kZxYx7 (q۰gyiYsz4繯 Q%f0]&^i[@߶]n,Z_ IDAT#STĜraZ.ۦBúm=¶mƑ,-\'㑶iɹҢ\J|1V b )F 1tLWw;)%iSX!V@T%a҇y [h`M~]9#AחĴuYzOGux]-Ӷ@s |&1=jHkmx@tRǐii83F+r¶xc?ww4]iZC;4:\m ht5-Ӳ Ly?`g~pZq3xkrWNP|,5zBk#RKXHP s[s܅:Nr^<+TEx\ϫvXc(N4xx%(cه+Mǐ0\im,<4òm=H.;ЖGM>:i5 3]Sy6LܮSSOZy`it:W6]C-ȦMG.&m/>l K)j]ݚr5|avêcIl-k>em4]u_mܯ4Rflw~ođlW/mG)u+؋+n%Nhyʼ~z+S#\K5h% g !^6@Nx'/{ ŕ?1}šxglo.0QeFb g]9_w^;3/ ED$Kڛښ]5}pOݚm;vwN`BlG2`ȑPD"HA()8HIVHV(B%;Ug7?޵]NtgyPNGf?d? JWd ʼAF3̕}fYH/w~GWo'R֢sx{_9[ʛKcwlj˫%ihJKHbá禩p ƌqZ.ʰjǞQ azJR f]0ĺDIe.ښ>x*kmjV<5FqaAf튄Mcq.0ʳlWaGg+xpvŲ2́ǫՙ6#,K[S[H5Ge'[̐u4(ѬlMVq깨tMM0ymq3cJ\550yT32mCsYLZ4WaQU-Ҡa.MPdd]c}YKU ) O6 uCeeu֖u5ԧLU(Qu 3)p E07hM[5$\-jєUEchZWTQc L:u"4 X 8UFk%RYZԶ"LZ8rYcEJ[Ub.ZYu#Ť,^誚5T&W F}U֚j~+'E(*FsQL$@1V O-/93@Ӵ4ZfbeXT(sP7)%VmG>עjq\ EcDnqR\Tgm2J:̺]f-UL@]::j-JYq<]CSfkc nn uͺkՅΙU%m"ĢY`4YK48g-VkV"j.hK[Uݠta V q5`ԚG%eǙ=dW pe-FxYC[h@ ]C=ȣ afDnX?k:aZm0NGlNrXgitmKUDj8Qx%ZbŮ0J1YZՊ)eqPZLZ,V(bX+3ʑtX]DhExtI P4b'heEF\›2K&?KLcTNJ3Y/kAeӰ/f{f+Mkq9}")haeY;slGچEغnâiv%NkvMIeLe]ɬRrVtMΌH054α p-mUSɠ4STP0D}G/ҢjŚa>4ZʉEK2J#' c`ˇi"ɫ}q b\j^\Nh 6zu`rD+fo|fAΖ?5|62"#ͼsf~+;|0.5nr`nD#|w fuɫoj*4ĉ.Ƅ3Ze{$g3NN_.Vs?$8}c\\CLI\dՄz~]8Y$Js?fsʟJ\y)7pz&g\AB<-?s ׯaѯ1{_{OOԿ~׿Ώ칬+Ex!pQ5> KTfXזhXyirUPˉ LmhM͖"J޲x2gh!(*O<Á3ϟcЏ^fDDU杇;4~!0|(7aCH" 7 ̋϶{f4>v-S/WD7lTR\/kR mHs﨔b?=y"m(0snleGAXowU1Fa"q=(i01FއyDnp)cr E=Ϸ8my2H x9֤y&Aqp!991=1E4s8޳nh~![RH83{b=ܣcx8Pd>#ĈO;Q#R@N<6X%<IsbJ<췬Mv*MIf7H r]سq8 *m$c>iVw駑4`cdx<3ׄ0 HP j縿(ŦJgI8R! a"g,!#E3O[ @>q~y{'Iyv<0!)fxv<~>;HwCy#Jks[gˮai:#MeQ)pwU d4iRn<$v0J|`m H#V+6`M!<0 '<K Ϟ,/wZ*N]dʞ(Ll6\X+xæ&D4Rbҿ`B"{\f%="M6g(2u?'K0H.Va՛5϶:VT&)ybE?τXUC՚9kF?Cb0 ãp C~'D *'in=;[O3햔F8nsO ϶[ 绽hy+[YVa˪ewLR7C\]>kn+3ooܼn]͡gY5dcEiTa7O 3УAbF?s 3iec3l#%)r;i޺۲&-Oޱ[@SY>;,Ail׶3qvG*`|?mhyi1#:mxh}WlnINñG)8z9))Cb30Zp@mN(B C#jFaG=G*2h A0\X#.Z[~|! ?w·Pe[q'&ͮZ ._oxSr"eyF)>_9Ztϓ$ҋ$4¶Ra@[J_R8͝/8 972K=O{~!?{u$TdiO|ۑgfE#9uzp_SYAVWxlA>(_ wbxET4\[o7=qalE񱇉ԁ9x{'G|ś9=,b q,jf\-FqߋŪ3yve-ǬHE L&!1X6&iBT<2)X)Sda?vߞb<3ƘK4c, mau,d89ae_trA1DU bVcME L*)8.RRHD 4cB8Z21rL„Dr$@39lM1Ce4T臔1*J ȗ$P 9S"ϔD9I ,CGsf朒tuH (Yɨd\ $S9.I-|ҦҤ$)g\x *C=r(Ŝ()bP-LR3k<#tC1l,# a*B"1G0u#ɜ 4_Ϣn?QRBid3Ak +<" c9Ӕ6b#H*\NLqNǩ c8,L?Fj34NŢc'3 Ԩ: vl136}rɻ=wkF2Dk5Yࡆ!Bg4CʬTOI}1I'L+"8'9'r,貎}%(mxw\K ara8Um_RJ:~d(Q8r1 T&@K"e&Hfeʩet6sD梪9xυq8V+1hkB8Wr&(] ѳĒb8Q0!}`]~N c!\]B,51zuYt=;Y:QkB9*M2YBh1കtDIIQagNl1 R{(2*hx"N9?E: = )h'*QC$1դp%`{a c9 )ke/SϴbF8:D.vK3')!U!B.L,̀nAO!)$gsde*P)aE,Vp!xSk`&? k{ӹ[#c0YbD;+q9 | N1F؋QZhI4 + $(SZX…O|Ac44ڢnroMU&Lc5#]:ęZbR$UӪ0'jaIJkvb¢&Ϫ2{Yv"3f3O3_ZvpzdșaY.轐M*-amJ&Br&*E ĐFI[bsda?gZ+sTΡ}$ZA AXcBL4>$rP YeJ9R(/tֆ{LI]أ 9` lf펋 : w"0shDt0NUD!5) +"c 4Ƒ@TѳONE%$94 t&G7bbNIX篛2·#X=K! ƠP8HUƒi% 5,O`5ljESKeTk [Y$**jIY YI'7g36zuŐ# .)y~w滿M䁏+4PYd'V"3$xdUF޸W|߷-^bgΗ? ?3U~YdAa,HMo9WcVװY1O_P9OK+>=:뾖($2vc&F!>ә_Ͽq% z>S~Ϭ{,NL pxϡOrӿ{6%??g./Kԕј Ie$?f2@*D _e=t&Lo|-QQB:trd2in-!ǘ1*c3,NVetZ g ?e_R`R+ F IDATI(R>UJo,A֢g<]w3NK\P ]XEjY) VrJ)!;xJ+Qz`)\f21hX yB(S:r}̔ BA4j8V8BY.A<5^U]9KqӘuIRLsXAD E]΢9F%R0,Tg.)T:ݍd,"i:_S-˓3Ξ5r/|Tw,EW}}"]}&Lel2ǐNVjS_>ͲnA"VT).z9\%nUZZ3L5hB2Ң_)3X)ƈJ^IY+tQJaJ!oMYZ{{%c-)M܄,Sxwy+NxOdC#to|_.3hM!O-4/k rR&/^soVO/4~SRH=7x+ 剳%k!y@n3Sh*r2ۜR&)!PɑW-ԋTft}r`Y9O|׭䫎{?o#t84-s4M#• x`u(S~J3pX,I>ƐbZ+5 6iL%ڤI+1蘙r{@qd , hHQI\U Cc$kh4S<'rWUyUJ+N8"1NTb>>YTN+ MI^I`+%W_+}<B%u]*RJ0LOdQ!$'$R/ m%] ΞI QIQ $ZpIty5JbB+˳ծXN۔@Y!T!2k)8`^4i8e3*&nEf#Xy~)8iY>IRE1wFc@ٓNH'fm]B"@TL%8H%=՚04UM@ӴI I965Xe֔3ld?t)FY[9/ $+Em1f-cb9' uML? 4&R)eJʓ8IL%KE>PmʞϚs)<ŮiKCT,+! !t[яY:J>1]y1%{/*'x&*h L|S?Ÿ;vo=`Ix_{ +Đ4_U_z2ˠQW|-J1'#U?K_0$k~'{Z7>`Usil |71q?lӑ1jq!D)cc'Z1 1Oh#hmFtXA>*I4-8`m]X VN1HZ-G]7c @DR1DmbY~%a3 rg?i֕[Cjԏb|:!~: e|R%Y$+lMӰvbf% ދ4aqY/W0B<TiwDikBI<ܵ#LIRd%%*ɘ@!PF1 USIU`'!y)֝:)&uf%9\\^p<J!X|u2p5ł(J7ޓ ܙvcD\}B[6 ~TTg9g\qY=fu]S"qX8,VKbӐBisG[k$r_@f۶eڶϓ.8k8R@0d\*- <+CքPxJHPB[UokhRZWly9r.0VbBܔ 2N60Jޫqi>+gOVإ/1JZٓ.uBօT,d>#BL$1<ϴM&l2`KQ8G?{lS01Pյ$n!t&D/FJwq Ɏ1!C,*F(v1jg_r RKDI:M0Re%{04^rE|"4Ia$ LMb'brnJ(nm8?1zc }?@j_OSΧV.1V8Z2Q *Dl):m0ƐSY1D)Hu c/,hŊPԓ6ErPF5tXW3QYK a@L05sTopkkx-,[C7,^PӀYt?SdA>)ԥLeOS% IRŦj%{~1ws{okۿ7~1]FO>9A` lhicϟ.÷~ ?CS~6y]tN_7 )/?K# V<յۗ &Uui ,Ƕ~ѕcJ.BabSdbPlkʓ*CVxnZO|*3-h~pmJǁIj9\^.9ԫ~₡1iD$Y.d4UUcX=޼?,k-ݢy $0p^4l7qSi Ŗ˫knxt6[+qFZr/p1a@ݻmx,.P c:4e:̴W w45nOaW.Qזq5af&V =zNh+6rnx4KLjQ'T?y+_A|sU+W=j*JA9ͰY,V?y͚ۻ+BXmX+AlUSח"cm4aĘF !`+{*[V%=WnP0&,uPFBbT:fB?OLҭ:=⎲{CPq|R]nug\3O3>HeyqJG+v-Zkk|=5ǽc xjE?ԏ-ƴ0pf9ZY7SFYǰ߲\nTWyx]pDl?<~564xd$ΞWCah%<"$;l^=7ww8zpKoItu:4"gY"B:pQr&dgp}U34zϪk2az#r>gl+ dig?_{br.}snU͌}0.ezN \m}֡$.LSæ953frـGv s5 M2h ;~org@Fc?-Rw>޿rO} ٍĶқb)~'4)&?}7aJ07U2Y.Ci,J)<ȯ|l(.5玙UC/g2k+,?rCb!^66'и)O%f\_*X ^,Br=H0iR0 jY?c7T?f=' um!]7}%O~=A(PE~vqbpLM,0,q IZR'AnVMd{ܟ9|#s3/h9ҜC5}BqE}}-'|*B.]3!stuERPˈ6e[R/( Z/ĥ%Icsr}75e$ 3$1l)!KͅԬ &c姟%Zwͮ&qL# ń3- ^gx~W< gO&\`HUd*9lDv5v^3ӵWv_]qo^"XtgRf8݆Y'̐eQ$ۜX]H)2oRd}aGBf0mc"71Յ.z"M-&L=V81ae *$xy u|aw;=Kʧ+) Y> 5<rV,Q@ 4V ͕\KZH(jB,#N,$ˀ[RℵgR+ ıLy)Gؔ8#h4~`>||@;OC/3zZ2cUt81D#6Xb07E@yb$VLxz!x R/BJ2'pA.=F-v)>Nx-٥ۻ=SSYIf"v&إG aDd3JЂݾ77[>}[Tۊ[445MNw9rbəglq(ei6!ꊕPPV1ˌY;)﯀d).==?XVBƮfTg<Д'6 ˌ &bU7+s3Eݕ]i s߲朎'R'248 C}85*N!eS3aۖy8#Ç#^.FD"^2%&Du25` IDATS|Fu EbVURXA"%Icn0, {1yX.g㉐'tWM^8qؤ+E-̤A_ÀYFჃጊ-i8n=7v9;Ըڀd++6ۜPSOH=}];_kԡ>lCm2J:8a@ ȡeZ5陛݆)[ә, C3"Eřkȷ[,mJ07LTVlS٢Mljȍ4 0W6Z26N3T~e ۊ7}i1و74Ga}K N,yD=FFbb2;J,#jj)6 bXn#gv7'g, DZ򭇩2EGvH+ l c63c_5VdV>"LHҗȿ' ۯߒn =w7.F`,-&tw l_vx.G6'؈ZM@6C5*|qLY{أ yňZwJ-Jfvv-3Eأݷ$;$0'izM<I@t o+o%sUF/_* ӑ`,nYE5$*b5V8IDw%C@uf{4uZݥTGC.6`BKa>/x$ӆ~31fl AČG yPran݅lpv!I$Eə1,(,ۛIvE6GCō'#CNI?w]f8Xf7qa)ne_w2,Jp$?; bϑyϾ(?Ϳ;[P/K}[|RW+]TDɁS?J^`+J}F'?>]Ɵ5~ɧ -KU_ɚǟ ɹ/Ŋ@c# ' J]^TV+~w1֔%×K?Q9Wϕ.%wA†_;E&t}#铗?6lvW?lY(?@u| կ.6ym533m3A6iivі\#8Q_ ]uelZ0EnĘ(SDUuS1T= =2|x{$d#jHyЇ[3R ~ MܵzXQPW,G4:sP` ٮ[@VOxao a$ ڮC8N, PcZ4nIF&izߑͲؠy: ˴fpyaW/8852 0S^&,x燆ni͆ˇ+Ri6.XT'e5uEkG,/OMWX1kut;%׊o/Wϙqq"84(1iޭJ"+bb;Ʀ$8P^q,cK}5mRi aI1B2SK8/MD3^KLp0OLCG˙ZLyDl:z&r&:o3͆<"\D:u qۀ#q<COq{C;ԤoqVp<|e񞮮Z#m.صlwD;-R [r8O`N'4 kHZƢ&,ezysԭ)3v-8MꖠR+L⃥dau8~1:V(2n #;ܰĎpȱlil.i+>Oӵ=6e:f"%3a|)Fj[=K۠ej;|b{ҢkkyD0ca[$Y4ڦHJyE=Ϋ:Fik28^s0-jH)Wmp rfeҴ$774- ёp{zz JbY%t v';6e'ȗDW "4ptŸ[WozgY[?nj#;AٵF~~V ${d_ߣm__haR/ADicVn/_i|A{/~B}wq^ %|"9U~!}9QkwhfVO 1kg~gBPE]f>T_߰@lFxY慮Iq eG^:LRRH,ՇGIG,C喱I, 1N8B i\3<'(%uK3M=dwƆ5n@u ~y~f@W^V2` C}q%jƥgk0+hHy啥ta7wBӵM<|Pk |!zKM4=!N Ӆe,]ІT>ΫA$׹ _3wґ6g̫H7u1!Di}!%^);STh^#H{ھEk?lޡBN#R t1j|ZB%EN/jYBHVh 9MMU3.3Ϟ9zpiT}CqiڱCCwY6ĸV<[kʲ3]UBJQ1Yi01-Y,bYR9q4eImQBq:W^($E2/(!D\蚆, D!jƺHIXcDѶ-s7ۖ78K mN]6d-R뗬j>0 2m/o\Ql6\'2/nIyY-0[JT5zDv2됤Th!ljemNWH0 mdd:4mi ;. ȨpCt 5-@_չkX#Ǟo)-ݥ(\6i e-nL0vDxOsH_m_p.0^\Zetׁӏ%:hgOh%Z+a"Z`_; =H1Y"X~@Lk옇5-~sSh\7ݎmHnn@H|cI_m<$ez蛖|'7nu-\E3N^+Ѹɚ61 }Sa^_ӲP-\1UƢH߮0ƭYa'RDdK]Crس f,ex6*c{:"oHۻ1V*nbzq&e&/6T=aY2Zڞt{u]i=2J#y:޽|ӹ$|i޽m(c,H sKs-ӈ`Z]]; w\NGMN\QaR|#Mжho7tQI`jG›HdYN;,ajzQ}髆i3 8$OƑAn$;p_6' VOSG{͞i o 2"0L&LuM8-sUGMqyӜLخne4MYriKyVH$8FY0fukSƲ"06-J*[KMRlY R@۳T-}Kb_`8`&zLh㘗դFd C>f)H2+C#8`P8HҬ@)ͥ!E& e>˙N+|(DhecZuYVlz 8yb+`GiE+Y=?>__#?t&Y5?g*HËɕ5?Fʘ} r;"3Uq}D) Ye|O'O%w;(Nr*-FV{57/pN~Zȫ3{ǂ~.$FX>~{G{#쑟AzjگJ?_Oտ:,$~=q7v)F VKkNIP cOwmVQ`YLjy%S۳iePVuZ %2c\p7{OӲP4HfOkLV` i͞-j DYOq𷷈c",lf%m$cZ7|Yz#6;˅D85F 4khadn^xx3 ]2 vnT68)5o~g||>#o+=. hb]uEYM|n&K("1MxFXд$r-&М,m-M#x* I݇giuDK=T+tua3*I3Gj%B [LH=LќO0qٖnx~gFn9%Ȳr%/v\yJ[Ul &˸|vH[spp&]ӷ!8g<']d84xd ;gr?@ե"Z %\Sk>]balzjhIW7J$rqBƴ0^k0x%x\OlO%K7- OO,}Ӓ%~ݧl Yt+W48ZsU+My)v{. 4#] $` lvh)1!l1 R*LE}ӷ-&`6fG]]|:Tsc2]+n\ v1#?#K2q=jB:eDf#2az[K[뺭q3(a eoouI/&ax\hLz{@#4bk8LՅ*ەRfje땦kXplW_N,7xZG.P?>>qyN{>3 3v2W}]2N 1=Jve)<7my&S"x|nk6g{6oo9>Fcӡn FVʓ2M3ih|"4)FMASE9t= ~|fi]spqiڎ -D`|:bV0#2,$Dޣ\@맟OO!)$xoj#KR>{nzgi (YYmXze]y:C6Mֱy>ƞa0"gnYf"QKi @9'p¦g'ݖTB È֖V$3<lsӉPKh/.eu9mӐ k-;(0QN'sԧ jLK?L<>>b!^*Oጆab֘$9EH726=4S5bYbtYFCHLL] 9Hu/kEHڧ32 TXL(r4K >I1uaAVt$ͱ$+xkƭ7TP#!s; rivJh4'r.OG6!H_O79b+46d(HXqY wHVW[ aiZ#wXʚID`zk_3)[ߦpw4~7Xږs~z@$ y1>jzE}էHmg9ދ IDAT[DLRBJrXGHfCtS0 )n ݈yKUb^DH~= e|Ä%ud@a^pyD VZԴ& CbCÁnP6I2E mOzxE_X# mOX`I$Ga]R'MS~VjXY5nv,CJM-Pc - "ç;8$#5qCUD_@g1(O'o|A Hh/Wt!0k} Ӱ2i"iԴذ7; ZM眣#kM`c0ش9_^6~1Zhϐ&nobw;ɫ~yv;bw`I{"qYw4!֣"ܾFlva0/\'#C DCMڲ$p>]Ǒ礻Kڂ Ciڰǵ==,!ry)+D"JiB MXjiNY6LUJRn@")BВ]cנiNӀ}ExQa=넔9:>`9.~xGr_ j^@{ V*l2\kfcpxf?+jɥ,HbZeYrsxt!6xp|_&#h,g{ݎ(,n}JPlp'&= ⌰7ıÿcl*$y$ {yasqo$w7kY>m$ue,6H_6t0ZvC*զ)~$O =]y%sfn/ug,&5sK0w38޾bjg~p0>Eg9b,gQƉŮtCkkzMSq9&,{eu~~{sܩvR IY6-IVO$ˉ▼<%qc'v%$߳eYUM֒eɒ#HEILz޿>N8 ) g-,3瞻޿ۿ ^_cbbdÅ!5Z+Kՙ橺!ޢ9p0j3Jmn1#}1&HR1>1yQ5qSY$7Z,WI*1"7>K=Ze||eHFY\Z\*LSҜŧiDؓ) x(Ɓڠws?y9#'h.p~C裧:r Fh;4,b!Vo\ŢVkP:jI+Ds'9x++4)ij,>6 *ccQ)H̲!P7W(K@ӧ -.SirUl<+MA$ϑ %m~FSCմh~Z)RfEr1Z%*,]XPC/W"˫+y*K Bh[8ʣ,eRaue={iԃ(Y׈9l+J.VetlFs reP^k*WXXXd=,--Q(},.-Q(!VW(>PVYq"VE1Z]2TQS*piBT[!LIFד*$i222΅ ^BVATh%I˳V2w/KKTi6$IJjFkjsssLLayyrU"Lɋa'XZ^&c|P3S{'0HjR'or8!ϳ>.rUی:j5;tY;Os!ϝctdV #/*X\Z,! xD\@co4H^`2ą LNemujƁSL&i9>yZ{eeuB{Nj5Y[$.ԖV4uFY^]!.2"ϟ?OX/Dyt$ftnI8 S( @EYV ;Y\H QdU8š)>t =ɁÜgtt٨S4EE) ,,,16:SX\Xf}~}{p<'qJ2(mYl.\8!8G<⅀+Z Xf*Z@WsΞ9K.*#Opc:f֪\u}2 ff(gZ=#jUcϞ IS/,R,`vv\.8F\.&u Y]^flty*!f\^4MJVB1.pna Q&UccΝgldJFQV`fvр.Vaeeh6h[,0?8;=Pn'&&aRcqnf뮽'Oqup:t/X,VX̳Hb}j'9S"+"8t7|K:sܛrGԉ:q9IV%T=ppQq-rϒx:TiKLҟ#* j>s$A);7 H:m}CTP+综;wХZDf}IQ(Zj?}r,'?AN}Dp `/`9 x,Ge8bq<{L23{RyݵʯB[tM}^Au Ǣs7&FBD0n(IDյPVQ Qjb!MϪ!M@҄Dri0"˳4(9֚?<, \T(bДJF#To/Ƙ"EO1HL;- B $M[7BU<%WrNq^H"@hDDc_THh&)JK]:Hx&%if\"õͥ"f"lQТi 8qLjC3Ip8LSPHT2;Ll >MCD۸6uAucix 4LLTQ6k>T(P Y-kI"^-YU<8T'VG1I{RQ4"M#xGqD+Wk,IrO]9mD8 ˒+Gd3jf%,UMŤ*DB;|<HRGl r*P3@!60Br1faWZrs!BChR\"l)QdI3n]c &$88l!MPY!s @fFB]F` Z)cYpD  ~a#B!Vv$;&rk#6iUh@p΅B^hE hBX]!0U("MU' ]bCՖXiO/K@if 6JK-mP[o 4I 71QgZ)lå I4@Řh8Q4qĹ+X,f\*?X=YLˑ#ji#*hM" h7;owy!*_$`x =X"Sos !h=ЖlYv"f3dQ}l9nzU}(5*oD$mv@}»D6ThOD2UW%i+z'8}XT&\+hwot3MvDÙo[(VI!Q46¹4%sb:Qy QȆ2p4%1QHD#4͠ (ssi&"l%DQ' G؏l-h'_ :\ BN#>k IbC{@╜5VJ.2390alӚE l8 8+$ΓC"$I@| 碎|,BDyOd- "2IXhf&h4Ĺj4A3H %&Pk!g} b Qhj\S<&k hg] ؇g0c*vv\A1p`40h9 n(C)R('V!D|!_h0{UhYK^s31m*ڷbəg jW^{%j(컌k˟vޮh%{Wftګ&OVVԞ j4*ҝwwf`K۠*GbT a@LڳŽnКދS^ejޘzi?tӴJsnfȨ{oY;T56a'o{le=֍w氧Kz׷gC z^:O}Gڕ5*5TYhMUڴ1.yÖ_}geSv Kvzf |F2`.WAךu ^|{TeY:y Abo{Ov3u72`~1*xmMe!J=k2!l/[}n覨tLd"ϕ^d( hlWh;i(1{OuLvP18N>+W97^w@>z k۳{{tWo.#Qϰ3ʲāmi\kR^s2 _Nj+@Mb7xH(B<^ĭ/9t;6Y(,m}ZG *i zo6wx׺fFfZ]]]_͜FWf{v <ƃَIBmSĈ-I;[۱c#f m98Õ~j CY #}pbM@w$ZcIb6Xz ؄pD8ed*3*[k(`3z A4Ԁ73 |BOT0OבSYyP  *t!xݗY !{D \6^q#vk#QCOǦڈ9Ƕ@H@{6P)ɐv$(B*mG>! F'Ƒc].3:41ACV{=0A.kePNu<8YlOeб;{ Boitc"ɋUɂmCųvKOQ jϓ_A+ _!i[8~VCvCi$E}z Mn4\aЛTzJ:;fbU&{N m[tiuh7@.b uev@7C1@`~y>7_7WR,&~ORwoB&SUI$ Dz_7o4؁36 s2?uλJ,~2_΂WQ;= cj3=%gaLgoP˵3R{YD686>64cQC ^ IDATCvVzWWܤhfc"!^NѻdcO/ KgjFU l /]F=\b ǯ:5IN=d6f҇ʾbz ^`%1kk#g~^O>ȣ|*5d..AQ?ҫ2%(Չ20~tm g Qcr<yV+/{/iD {N͋ᇿ sqAmspdH=A;MD/n-"ƼOq}t#]ݶvn2&X>?C{׿@Te TЗKF;ų}\) u'p3qpݠLlul5a+) tO>g|nuf[wr[,ĎHvcN7ך[롧~j-sdϾioqpElBkTJcq% /+'h<(uE4)cHQɒx?Y!*71tcP)s׻F[WȲ F1>`^Eq|bkXy]/lE]ʓEzC/wbo:vNJWϰ򝷾?珱YtMN]ƞS[K Q y^t') ދ܏SRѝޫ(;:zOE6q:7Qblzef1݁{Qg~:ˬm?u R^wy1 cl]\gsTEyYe{e ۻg[憓^!G+;kQ@78ooSAק8e2r]v`rÜP睿JYwtCcSeUN-k^*P 4 Fn!irE;L1hߛ=WMՅ%f 9돠I|:_zZH;U޾lېm)nbo'i-c2G9wpg?lQe ![n}-ˑ)5| ?Kϗlkgy&^<ԑ#Z؞m.; qvw{8Lf:`ôxg2g)sέQpckn[rL#_T_bfq?)4s-fAmʅ9ĶHmzWLw_\ur}WmTjɯ\W+וk,'=| pȚTb 8Qhmp@ yJ)(N79:Nt𣶳L9C1t#kKꈬ;qQyqXl,3^*+$̗ff.iB F! 7 0H!ъf̡=ys/bՌ';M?Cg̃?Ʌi(աr G:-W߂JY#LQx҄\fih}˴dFQ~|/o\QqF2DIoԦӝb;r> WsF/Vؤz~8v{o[}{f6np ߲^߰؏B HBvSߦ%{OYzځd1a]WŦݟJF/ށ)t1clo73{UrKFsE^6} څQYlӬ ~^Y3*C:%%jUny \!hh!B@l9=5Ǐu$iJ[h\lȞDnhc/&}b;@Ch8ӏ_.m*qڥ6z r(7s~.W r?y7pxpKo#6)vZo=-4_,V@ EjK`kN_F'e'Mۭ7e5, eo2J4O!%;_w:}O[ RHDscgU$p'Sqe/bXncNHwxWgb?JAW0)decD;~':6]`{\ݯҗD3^8t$ =!0>y/᥷|N5J1hvu2L'k Q?]j@(*QC.q4e-JIMLFߵWZ`kU)1+xwn!_\W+ߕkT(s| &J8|t_{5##Ôe:w|1Dk 7S}9,^@ofTiS^PI Jr% |Id\JjNMQƟ/PJkm{u&Q G[(9t G\!ǾÇhJG$(HWe0dy=B98ځH9j9A CM'2g//=_9u7|l6p i3̍Wqlqqbnde0G_MR58P5N<%"DFbUtT*, )mT@nu4kf&Vu=D)Z|w\S ჳVf:/)OZ3 3}0W+וv`k|)Wp@f-{F`E:Cyk_:^quQ,HSG>c$MY^^|><_#B.X_3{03TBe"jH+q\>[Z&Ybȶ>H)#g>h2t[v~H4QOEvgҵy^sCFgrI\:+f֥hŒ0YTǚ˴WٗEx])VX!M\_YEQb Nfxd**!gt`6 0 Yp^BxsBl_0M=#1|ׯf}_;/4EU/B|e}!x>˳ \s;ϽpyQ&M̉|蹇80MUPJȁs xLV[Jou|˝_(Һ Tw[߾V_FE|ebܥQD-b^\\pC^KA쥙6Rehn}a/>`ҭtl/xNf%8fsՉQO0< z 6E8Av1:g<&;_\:'O"ؤ^u&) DGT{%Ü~=o㟾,/|__=S;P]l8Yhs[U8)j#'/4/Tup-QDånŲAwDʝTj[Lpص''ëv;p IÏ~IfϢlm6`Tqp@4o!*UARԅk"јs-Z"TOnǸ፷xX$3n;]־LGc%Ɗ xWS{~$ k3D OlYl&AR!8|b1Ͱ&SN K /")>5]qγϨNkjZ^|-/-eDp{CR8gY[P$zo Oёa۾|ɭ|33(ƧYVWx5;S~8晙S\d~ skW~56TZWx .VlUĺ07s63.*gnx>G/er=Dd~Url|r)O+2疊|yF_DMKIBEug{vnH"ko~.O)yW杸R Jx{E4C)b]H|v Gk q ^39K;p~PQ4{')U wvK;Ώ~?14b2dmD @mMgVe 9*S4U^ݯ?0Y;-  tĢDHjЦo/%|/憽7ob? My<=N~=YN/~Y7*oG,t%+EqPP iֆ7;la?e1e"[TY5!ᝡƸ~Cx *n4󖛹mHy G+gHvNv ^bN|!gzbC fӵ_;5yG*c zF0P\kmkA\-%ʭcE*{XXSK4a\J+Yg$LY/ [[V:I-UiF9`/ϱu0dNB}}xƇ)Orn%@!W~|//!hЉؕoYh"e@׾y//0޹g 5IVzExv=w`sz~cA&hl3"1n?r .mC؂+:#5y u~ +uZvxqiio'<,W dv`0T;^S&s̳Rs}E׎K#)BR!sM=fW_O9$$6񐁦;؄t`6ĬWmixޥGn(}zG'7oqq8U~/~U_{EQc86,uj5]+KZy%جs/}hw://ECK뜩 ]R{g=[6,/Fvphž#R29 6 >AEL1]FIPi"> \+n;d8o?s_|M)duŠ{;:ʱ]x[iAi)w~liŜ;U#Y)J8'o]HYq5*{"&y+"JŘS\wq~+CݶS@37֛Q4gtNP*3[ΤJn.:) k;hE#q&FHr.; U*~'i:Iv[}rzcZv%yJqQD2+n ?KODk?mӉ%* o+%!R٠DcD#rՈ4 {׼^RV=/z~^i Q%1y4%0# ó!"ư?s'F8_AEQZlSu^<*.D]ĕH/]ŜmrzG#CyR,10v woRhs )j9<wUX[d2]O mcAGxS[.Op6klW5C Q3 ̀AF9pGеu/#8,u[='/g"Z@Ů!@\TCf [uKL;ppvAԽkV6xEJq~pw>CcqfUEJ<C=oBEahAӇG`܇<;Hַ8W{IHwwvI9Њu'x/#/$1 'fzx|a.'{ոn[-S`F:½alWf*!a$z Gq5BLרXۡ6^{%u G.[XY3j?Etuy''=gE;0plzSzwB]PlS) r}4˫+w8Y&5c(GZe7O*K $ph=Γlm]%iFy]{N{[3cU 2fvNvϾjmyڡ%s-T`4 sߧDi{r]s'?|98r={/mlyΩRPmY]oN&k^<~iLaQk4( hc+Z[.~t~oU[z3֥TWoڰ (AJ%;DRYs|j;\+ޤPPDm~Ai`CPL[z${L6ۏz$Xbq?I~W7}}h!}w$_yؽ劣Z_hgټW_mzFv }}Q}+~#40PN]@%;t >5` }x_|ip&* }f\Rc8< JTFdc:ef[()%wQe /{NWQ-n;ss-.AdE-m=pEá$bBQeGߧJ) UgO~\ֹl璬Tr;;phs6~}N3iZXn= 9z(%E(ELFO#vҽ|TFkv|Cd G[=>9wRh-c\N4X3$ՂwHܱD|~~O?H1IYHX{`_󟿛 AFuI%f%(U$rJ\{u6}{ &hObSCz,qΡ O>i!QԱLOۿ׶~ڦNV;2it{kcB:Nxf='󝟩##'7Ȩ.^gJ}禸khFf﹧ݧ}f^;5KOhˋggE,k-xAuCgryM36w0iJkszݠ.`K)[Gj.j"Sc̞Qf:63|[=;_fY#oOOeTxmGsM f7֍}KźϤ\/fOH6>@]'eH-~ݫb'f`x2d&1YN94Zr}(>n;ˏ->R3CM-l4[ں[^An0<J>1uNoKʧ4wP)q~ 5Raz,B &"5jc7?/gvK=@ щP&g,t[kOSMӤ&dPP"JR "W@gu:::FDqL J&h]rZ9g ]-:}n=OM^[~~ecmD<]3;=#THGk?+o"2b+謙67MPJ۴?<1`}De395!PBWfvj W[F`NF`lLO;)kQLk,FԺ4fIrlD[3<oϨZWJD WuFӨ,# i`hEem!"p_YЌ sENm][Uj@r_L(89|#& WGI4rH9kdd+< NB9*]J4R'~91S;ѧlE&U]6c{YX2OF]l^bWlv8Ċ9S=47оJ.ƛrH42[j-[FG ׹Ќ0 1Q|_F州WQ^Tj`+Db^GVHj,n`"N@L@p+O Žۉy?T.anQD4x1+)0K ortmQ[D< ˎzh#4uײ|6B3#Jind\7ŵ,\ du;13b?ܷ:W\dB.PI=\knbtoi btN$"DaȤwݭL{o8\v%cStTOɟ䨤ۄ.$eTJ_QNoxy"d83vWqRDZ&xcyF68hF %^܄et;@T qܚ~&)`˖m;Hq6gr3"z1" r!62_ǿu$}.t܁2~%:da|Ɓc u|=~s/!i?``h\b y>hcT*(xB@P 85kLBa6a2,aXFIE9,kj" C(SB~+@L$Pg}2G!ͦCgRzgØQcJ[3I3s(m^GHe^xpFB ` #5x^K ZSC;kW؜燻y;k/32v:w8ͷ b{G[5 ڃ"^WϰS89[\SȀ;$/:۶Tz;o{X`BJp 1FG0crM^P7S (! PQe~ev-b2|,ibZ[uFBIIVc|tt !'[-ög?o_Oع};Kg.*1E)$&nfW(qHGF)Ei~`k @ǿWy G%u,au(ϘT~^I֭ .it68}'k6=4>.^9t E3dzAfȤ6k(yFo6N F̦ZaǾa"EʙVu[)߽d< D)g"b*4iThc\ĂA;!\KkQHO)PJ#]X8!N;TUZ6Nz hJ%|G*ǼVjV #MXV ׿c*Fj ic_+o@gDىmV)U-;P ,zz[sZ1{MH_$Nk &~wDV׼=˯!Vy CfM;t*Iǝç;044 4xNGE5!Z~Bx9{=@L@TAL0 `cC̙4kȱE\k.^$X5JX9R)l[:jz7m ߽|CxGa s kisO`yR:t|Wl޾},g/_jR 8xIYZ6um%2ظʞn^vf.>5LRX{JId]EP+2FvOc夬{1B貊w{""ފSy"C&JAdjҼ+|7dⳋgqhUE"3"Jt0jfW{#<Tp,ӎ>ƒmf1m\T@h> ,G4ޱsN0,\e`YInC!_q;OtYld30):) \b&ڍT K#!68ښ:ТK/%Ⱥ/8{x|t@(,bi%,, , <H)G`FK0%d)Eb='c " tŵ' =Sqȑ. aQ"EQRT y^@:ܜSOCqۭS{}{JIeϧ޲јxb~`VҦ?w)qB^&O 5Vb2",!FFE)!4”I:ʓ4TDH||=/I% fLȚ!VAr9@1KV@Kcw<5Œoj{BV?O~&F)xX#Q%TRlX+^dR4>o2Ũ| C֋uR%ƸQ&X0F031gs̱G_Xc(q1z}:`H+iwȁ 3YSzH+9lV3yl7+jMmi<^=rLV"Hy+_GJDl [V]H8( lM\[dlPyZBto#TWػCL; SS.~A%ޫLED PLtbo|*OAkSWs6s;'r21ypbOޱ"u4XK ,{=&Hq*Ew򎷽_6cH׶3$x5"yij+E$.15ra>R fM! aV8LhξEd47]xU,lņ2X "0r#gǙǟúk(Ra7m}g]M$%2”PB#lTD2 MUNכ!Σ744 ƅRjQlQx gbc$"glHӧ'ZBC9[=IGВcxVabd< {^u%ܾ`@TCl/`IrfsxB񮳮oUwTa@"Z0)rr+#koæ bTCNzBzԍFiFuAza55g=6Z1 =+v+누 uxf%ث>.ʬ@]I(֮uul+Nk{Qk˸H:9m~ CCaiȦپ  )>py6oSݞìXݧ_yvQX\'Yv]XT<>-;7\:Z[1} CC4fs0<<#?C?cO=ͶT"h@ /6t t\=K 8 T*nAM*`vn>:DQH*MO5JhB>QLsDeT:R k#TU1*^BaMX`ū%^p6Oڷ ZRk+{Zhtݣ5mxMO `e+el'$@8f) J)FPFh=r%.[9shll$Jed3S)ҙ4|ۏRZO<ζ-;{q ۈ?:2+ӗiڌ2+ Ȥv1CfΜLhrv6[vICJ(VPB:+xr*~);M&Xs2=RGg4^*cz8hGsc3s /FX/v0^-Lw2$e)w^Z˦./ V.Ck`woȺ ܺ=Oncސ_]?֖],XԆ#`cg̤Z\|xJbM㗨8< 'Qٳ|.z"v׾2q]'%#! ;U:[5?kL>yݧ8#[0웰^w&H"1ȫ$0; {}~֋eSVovトhn4\z H+vvU \Hd-%jm5P0=쨳&Y4@|. 7?QIx~jGce $ vZ I!mi,)fdfqQg}YΌiyY7+΍CNo.(ƣ,@Sm1D-2)@j4Wy{GFX|1"`sdpS*AY:a 7gZe(-ew=/lgh^zI2 vvEКjYaۯrcx˹oMS8q%ՠ*4>m\sGiC:T(.8߰{zU7FhD5oM`-Df h;u>'@M]C!: 1`#Lv:爥KIRiGKR%QU_ˎ ^0@h875pRώrɻQ5"kuzV}q0 SZGʵ\Tf[k1(5Zha!ՈJH/@k3m?c,=hf~ϥ2 P(k)勜yj*5al?+Yʞ$X)el*PFgk 0ýigY8\|ծl!0hf͞NYy4wq#/ aLoS<}}B<_UTiw}?DS0xi8cenmw(?͹ˆQsqBLk[W}:u23gtrI+ieP2,֔Oژut.N: ر};=x?O<4EЌzey+hƝR\p}Y yaǐϸ^99=Q_HsR`J5!E`bM9ʆl@Җ6=7nzKz'"Wn*m;k@kZZZ pUwѯxiK?\qŀXzd6ϙǜin&Әcsi+4ZbVit'c" Q -V=LWO9F>s1F[N=etĬ%^Pw{W=ԥP5#TPJ*G$$Q4~oG#< bi}+f& ~,:;vw.ѦyUOѩ(`U di.YAg7OxG瞻Uw_ٿ7N+l$9~r|׳kLMYXXQcÄ ^16I&YN'/>3.FsnjFp5x*V$畦q|f꼱JLg]p9a)X%%"^WSB";en֯ pKN_wxiK3?u%+1los_y;[+*DbtI#5c%lT:[U QYj=>`ӥ+Z;@wb.viEʽfjB;~bW( 4m8r)X:Zg"-O<{;7QJ>UBR1+(͇. 瓓Q0eQxm\O,x 4uNgZG'}8xD&6ޕJa 03cpw%1qJ^WW (wGQ(q\WX0}a~fFd%FZ~H^{E~q# M/<^@%ٗ/G%h(c(2Wfl|?=Q_Zr;A3Sd4]#[(aTL ȗ|oz%dti84}" $at3x:;u msoӉlQ]+Dhoʉ Q :ZS|C^{nPش"`oa҃ *+P{lݍ@`Ft0@[Ct3((>h2*ôqy_G7S6̸q.1tTgg[9j9Ln(ƀar`>]8u)b>8LX!=xM\RWu9&18&Fii2ޔ:0c3>夕Gsᇰa@0Ϭ:s))~xFn!eD2p)H]-I+͊#=ƬacZLbƠNɋDNQ_)Q+1a:HTtNkUp|#7Vy0TX!Ji=}wjvx?L(ڄv}E'Lƿ6~@XsLgZPARdRHP%,Y Yf~tG{9j\iSjY1}FxO J<˔B$%)B~pï0|]1:ꌟ %VVauuɦc[(6?5~P2Fc)ZTVupbadI=0Hciin:2֣?E -D W1[tcm˟QŏKƝV3f5KX*oLs1%*^P&QiF*~|&ѾEb6ZI`i9q,Y#r E߽'^z5w+2ryϔ^dMѦƐ\|L_?loaK~1[7năȊAN]6@A-+\bgk'dho7*?U``2O5p0Y`["}_J0?w:9okWarWQq,#4J"f@O:C 755ed.;y1'q訿JJp4 1ZZya:凱xf6{vaaY4MZb>͊<ē|kb} G('?hǹ>qšS_`ƴ8SHKFrMB L]H @P>3KC)>2@؈YBra+0)Ǝ$;P6b1),Y ]OMn) 1XR UM$NVGHVGrO:ơ;lƹ֋ac+)!.KEᡰr+hkKcʃHcQBHt>b\!} o2{>}g랇P*CXH!iT<;pA[ ǒOR|̰da2leT M~^V҅걌 imiXDyM:P"Kdao!{z'h @) ~u>#@.yqW30NH!+|w -'p b `5zOfa-Ӗ )Dy9MVk}At@ۜ\&q|36k Ī!q)Ï>T޽ʸ4wdUmux%`hp]{XC n`00IIgu\T-eRgv=ǯ{Wu%7==GZ1n:͹v:8Ξ=!0c\&&͑.G=!ROn~!b7}:]z4l"H릦iMޔؿls};!h`SC%;@3M9y9He\sq1+&bhZdUyRɃt0&;wnAB >SƝ+&c|/a6ǟGH4 WЄ{1DȊۤKSY r@Vu|?ʓO XCwBƦFZ[ijiI#t޲Ys;b >uq~f-3ΦlIp) َVGɎᑡ*mH3sf+sgt2=<ȣ|?Σ<ʶ3B0ܽzޒW7;=[gғFJBZ#GNCRD&*Gd%%s3"sLWh<'hf}S.U DUU݋ۦ-mx)crP+-C ^xr øSbu9N;T>ϱqz)}] eVF?dY/-;\L$UŁM*D2ego76^UWZZQU˥J$\3] y"F+*)Qo>,7rɫ,bfh:նp朢]lԎpuXMV5s P.MrB[`tCwI.&9S(&{grڊ2VQn7rCes?s8kDDdؔ('4ŲT(ډJU2qi(/$8Icb E\$ߐ%mænrLXgV8Yj "(7rC8سX<{ /1# l/`PwCF,CC\a&s0f iB6uKlh'bvEY⾵%jHA˖ٵm9[XN ̺;^MV;$D\>DzC6.+^h"tņN=4)!lb玍uPDN#+SexW~wEto:'@W?Gt6~?ʍ<ՏFcJY.>M- ;oPv){eR7p|oϠfrF8@T‹ΡA0y/s[_KQh8$Z>_57r_OfV'3.~ ]2e$h̒S r] 7cS(x{P"Dc)}n *DYE9ǶD=ÖkN;ٺe+°nLK#On]K:9Ćލ g70˒ =f4t7C8e7smwꏫkȡ<m*^cgJ%1:at jAx%cĒkH6q}44"g -FXtp+V@AZK H JǍ=FS= t:_#՟%m/}\H|JYn֭w>J1IFH/1%7wme%okb"6*^̷:\V<&2 $j8,|AXDS)|`!_#}JKv'yfǪA= }Soe+>Ȇ"]r~Qr}).sB[CF]}`lѲau3a_s+_iG^z6nCZ=~>yqYMaոR4eG߰B!lE;ch^Zc (uKGF^*F['3R1֬Uzqv V@OUuO_9OQHSyP'qEYa1d᲌+Ī {-I;Z4{[x͉!K~7L 1SJD=Ya tj!!8 5X@ D2*{|WTUOV]cLsƐ|FҒB1>5?AC8c9YpZ)!vpۃqۣwi`?%5dcfI60+,a'4 Yʽg$m}(7!'. (\Fx e eZwrfrȡ Ed-=,sRVY+cMwƘ]6i]IrѶ:KkQtL.޴uG1ю"O=NW_+>|>A9 @3+Y'.=46|)^s9$o]nYpSYqȱ9iU_΢)8ElT4!%i.:E>B+ʖrDn 7ӉKR IDATC,v·$cci_8fR2*oGE7Ʃƿ5SB0!ף}IcFqg?G}A3M92M<̳tJ[yadv6[~[ A:D.yb/K`Y݇1W ߾%BӕM5еw6MO4":1KcD%ػ-2K$d)*A% bUwt2/ʊ [$]ٍ9h 7qUWvf~a6~L?tF0ZdS4saIA*M'V{(AZDIS(\'SXljˡ`q.urɜ I2)?qaO{a$9LLg`+XT* O̫iC/)+=|K`!:Z|8t̡0Hɽ?d֌0 ( .eK!hP*`6NnU2b#}wEZ4+SqR4Em7/sSɗ?󯥳i:r&RAIwqm?C<"Q J3& !=$OO#0>#UnVi y/s+/m~Wy:: sGo|bjdZqMF<.Zw_I~z׏0HUn\v>d$90"YUQ֧ jgK; :XO9UѺ3m`5XN>cNi*]rWFKJxCmL10EcDᘌ5r͒e5 "H$G% U0ZV&fUb |UwXicxQ8^;VlO_eP~/&z.b@)LC.TD9*m݌FaS7^^V5OEh+YEbi+,TsҲC:qԒXyę,<_(JQ{pۣ膇ڻWx%dń DAJbȍz44 ױLUi4"9y uXebkSnd,(oKy[{J8&aSLxr>h]6v{^g>I*㣉(K ?kblMÌvZٸw6ff 474R((ZZ:-3I/$JUQ\+z];*U@ E!Bz2If2̩{~}I&?I&gf{w>5%O$О+zd8NL!+ڻ ߼7F1͊s^3s;1"dZ(uK%+{\  yoIѺdŷK%]Qo {. {|ME>ksߥ96-{;ۏc[OH J/tQ+|` l 0Yʚ09 **Û cX/tyqê c²Z.L-)mn2<432h"-Yu\zncEs ep)R~Is_]gZ;bXz 낿KѸ( JC[neSg8V{J3 |i!BK)Y5t+ m8N%4c fGT JThC8A|b, KxeoBn~0|[c*Nte7o2Cb?՗/ p}_ *r "~HvzfU@4Y,^b)q/ ~Ǡa*]op #$.T1ÎR%E)ɚ A.PL]^[jf ?᜗2Ūr,R#Px>gU` +$"."iN0L0PϬ3ʥZ{W'f^_lf~&vk͌#_Nc/B {IJ)YU>}[̙3,E||t# (C@jxli]g3 3حI ˷nfw[?0-L-lBzo'b9D @n7zr%HtV&Kâc<ÁRVBa psǯ ҊlA9 ӮFVj D4A-Jeo)yRuɤ*%S*jKu;or21"<$8 S%Xx($^X 04T3mζ#8`lYC{g_~8GrHˡ}/[d< ,?%VDRk(ւx"-nx'?Gٵ?W~*)72If[Mdfs8aY|O0!px~ӈ NQn-:x/\}U,A2𿍺eR?PP*vU)&:^=):U1nWձNFOPHȶP PCgZ6K>CY^cMJYO|=k*LڒV嘥\_*do|BpK9SPxlB$kI̝"wLMY0kC$ZXNB7$yHn4C2#\v߇DSd}')>p1ZZZqm_[^cY|w:Msz9 mlJ y"/ئCQy̼#gttX&=k?ϟ|vfϞM+Qs.b2GI7QE\Uj2RZlU*'RJj^ս#X @@l"=mCD XYjz@AdgXIAPP)tt Z ?ENRvOS*'maT(F >H3Ӛ_#\#yihe0g7JlP*AZI|Cmb*wK&Ji%YFe/RJh_iB)ۨ>g a}]Rx^~)xr㼺i}y"2[Qnլ111|MHpٗgpНe=NfknROJ˷@9ɶm̝+Vh>mH U/5-!݉zΐ2.kƶ1zkְuF2Lj  2! ?v+',=!^YzcpUh]N7jec5.d> m Uק>-o2d@,-=*Pt x`"v"킶FDBiDu)+ -Pگ|ZSFRڨvUG-jP%S2B^LJ^%ʫEXT5N59RU:P"@DEEU%]Εz:,$A"L/jZCtܪjNVu1ݐYe ] oBG'v4UyQy.}wTcdLj[QᲡo;}?†WPjM &&QvTD^BiGDM(s_֓fiDN{\o6lcEaM*Vm~_ [_Gj4[մ'z.Gs ӣq"6x6+eYu%O$= eލֺXyIof7"tS9Jv 1HA"=KVoy-u"{ic>imCzpfۥ5ē=e쁟In̦ɳqYƏnZ2nL U;F!*28wiŹxu %xba)s 3,{~z7AŸ `"|őT2ܶ"톝!VdrfKm g#+,J|(8RQ5҆Zy˝/ .h "|Ó.7A5K 6,Dθ;n#8i)$kV"kgBtx{֍ u?K龜:+ NBrDž< g03:lG8ui#2e= ˤ𻇇j+G2g5]Pp$3ڨI"c >>DBKKI-0F=<苔]@^T R=jZ)!5Ǹw^ȴcNŗ/Щt}ϯqei4w IDAT'9eJcy=/ \6N~ e׊C[_ɺLNٓsıl 0,>Ȇ yşȋM8K8 \=:#o[(AA<# qy'8"K>IGgѸS ~}7O?s w?u#.9gu$0z++*jjH zRiL vBe8wp#BQ .c>'\$7Osat8MĉEM<8+yfwښy< `L!<ѐVQ_`&yaO~OFS(*,jnљE\/l\LLWJxY+؇" FtPhș9R"њ㜓"(}G7ZXx#3Ts@ !8#A֦N`Iy~$q{EH++y`h[iD? T|,vQ#l40Bu,b5uEPiFGFr.Th6w]P d{Rn[8wt)0 /5hT+ngJYbO)QLKpTC&^!YoWQ|i7+y6mldeGKy`)lob,8P (1? Kך_yܖn&c"wAP>U0O{_z3?'^y <|qlrq E }wu` y`($~,PՂ;z Ye r.go%29hHAŖ&H^6ҵA_Ud4H iITv)&;LUVĤVT^dDII4Eu:CT3LwOp,>Ĭl!_I>XpAS9G]dA5H/7u.@@di6V2{i'B[a 1aH(nK7fqa>dXRccXD-[0kLfEhɲ'xާ#>2P~%Ekm+{X.>1;KsGښdkjUp1`fgұ9.l^g҈ÊMk0 Χ{k7M$h0y޿)hncQfܸ Y|inU$?ivQ#!h)1pËr>,{ZCFQVp-pBJۻc( b wB35]qsWQwrXg.K} (R3p"/岼 |;SXx'="fO#f9( 0@4} jG_BfkL.OO6~_yɗq&bXLok0rosG;bd`/}Z\0Q۝Y{``]WC…g>ǰ4Loql);1KOgyOE9ye3}qs[ѾKks38mV 6NehoOy{˂ hcʁsc?1Zc #uE|yqW2^pş=.c؃s?}71JJޢ0uuU{Z5[T=RGS ]ME9A^WQ*oED3oxx~$I=̷/ſCf>^R=4lE j@!t(&65̒2A54"Hfc9J.IqCNܪD% MD$YI_3|_%re4j*R#e  tuEв4 Bݭ".Dӿr3>U;yqmF"لyd3YdC@Zr@ Wk0 ) CG j-)ҵ}uHU]j 0GHk3x𞷂 1E9K'ύ]ˡa^v%~t=DL%%ln~PS/W{UhsU"56ډjWhh,ش&^Zou%bBA\¨.Orbͽz40M ]5(+zg_E{pȼY&оxz<ؚM \$=(x(24j="t29 L1͐6afOs衇[dLm8a,VSbmb A*aZtR؎֞m4P,am(.bqqFFfN V?2[ba0#96{67!)^ǫGxqY-d:;)9jz1&^D:YEcz>Hr$?8oy,zEw@Jc.$5І@:SS8QQe9d jmQ 0PP骤U^-2l`ii ~)iH90vihhKn-ʧo2y}޳¤8 Ilw˾ŊmO`7.プys!@҅!`ɢ3l_Qn}','nM&7䤋;ꊏ/qi-C/8:A粋H1mc]qqb|#4t9*0u!v'n唹8I}dr _X6&.ZQc1c:c|%a;XU@<`/M9bx05PaX$Wiijg> BsȞ200piWi=&kd_Vksr7V[\L~8&* ɟrB!kwiI-YKe]&ԪRt&Ȏ]RLMc=c&w4B_ k6 Mdbs 4'%lM' X H<t{h%{b#U{{u%wеUP To {1RgLAtӈL&Ui͆4³eB^N MftxoQ;ٚ+&Gb~/>K AZhJXX!&N *'`daâ\9m",Ɯ @>i)ӌ2GJftcGlUXeR\W L^@Btaa| /Y ;Bh"QfVf4wݴS 7RR5ßﻑ.4q =0M(ynq&8yS8~ylcXv-KQX < jVZ`LV:;LZ.nR11Z)FLoĦk})$ZinGuE;VhS\EW4l$_xi3cNF~hU=gO2 -{vY(\puK7W|J\pyXFM][Y1҃%rt(6n‰EQRQ. g,XU֑lmF-xEN >Ţi>0B>mmxX+Xu}E,9x6cU6'm[F~Sͼ8fX޸mt͟Mb@7nH,7JHF?}6ž1" :>퀫Oq4Q>x`ʟb/-9- hͶӎh+G"-GͫDP,-QҟPa,D5X6/קn=#dc5 ô/8u̅Rin{Vew1Łv]`vfk>M*`f[̑ T1FeD#L׺/ղTa3zE]'*z"Vn,^Y brо|[[XpI[Q0cg QO=-#ؖAS2uNḒxh¡٤ҴvNg6E#[G)&j;sHc$L|N<#RD] C9ikU3hhi] cbN84 #cey! R65GYIxjGyK#z\kzZ=WVP"&wwNq*FiC߲+,]=޹PoE؛ڷ\hb}RZ)BRM`KQ/AtL+`VH[c-l@1FoygN#]rΛl%k຦M,[x(1Uo!Z*#JA6')k@Zscn4 ȇt'9CJM>kKp !p,߲Zj  |[BoU9JpI]2UhQdFd(8Bq18@A`nbM ۦoQ-,J5yUAt05h̄ØZ 2 Rey<vJa]P4Bo$?UTU U@f<>OfZgC 'hQVm^<2pM/P\1] GdE]@Xe~[h~r㘦TE8i:=Frݹ51Szd^Dz3kJDOhv*C䅇xHQff;pcy[C'B=P2_()ʂk}@ (P`Ȓץq? Vj,vY|Y )@l RfY3?$BTI3u}B"86 J1(r&T-|+>y,Z"]L0F̱ŞG, 3k^c6#^]Ǣ`]Fr%fF^ض&J{lCKS mή.R) efQZhJrZMvupx[/g5۪г!5{&CKMk1yW1«O{Xzb9ɍ3izw)l+l2-ֶ1s].hDol>o*Dl 5C&$!c|W޷o_kdq痿֤-[%PK٧pU_㶧NK[+vQ)Ŋ9|׿-FjuwX&C3&URm0bTJ #w2DGg`YIwR`w:B!XWA%F%:[V`iE }Z|ٻ-CT1y!;xuÝͶ/eWJyF0w2Ҷ&Yf- IDAT/H8~`Htɥu]LDH+Ic2tU ,9Y~P5ISso?,L<#ɋO.–~UMԺ$PCU>ɴl^-^˦sp։׳xcdT|0lgUMKxGXu%Ws94Iʍ_`C4ƛHI`\F6bb$dKZ2ю*BWg^h&:(*4!|3Ƙ`J]e.DKMSNR*e;]V:u-Ut"{E״JVΘzu/jkx,> w[WxcYl&0s\̟n%H%W6?0=,=ˋ8kc%*W ڒdk. VoD[3J# fznX0D%ڬQHf(d Db?wbq''4$Smk>1j{O|QAn7N_`h(o~'nsh s_| ocN_8y5=}>˄5>^{P{;ε-(R{eu Z-|M<=u}) {w2͊@Ȋ)Vus %eO_ 6Ui}c"8^c&FŐQ r7|(AL 31S:O]WIڧ328@{rx%I*- _)10%ʯ0ZEYO!Lb+|g@~F7mb@4$;R7Iw޾z8;{q*RYݱM|kpǓsңK_NB8MH7lsEdsÃesw)KXN/ttXDrqvWrי/d'$B*""UkV~~֥UZU[ԂVp"* ,$-$$$df;srn$>s罼 2P'%)(HYfI/mX2s)<a Av-xP@U$erQHXG85zRPn,`vL8BWqf!C"<62M_V d~*y Q kL$ibHjk=BR&jJ,1ӨX^.8y|>Oks# 30äCY޵KYNcDs&9gGF^NנWҺ!hIҚ0=h`T{{Ya%V#-A\}`sNg t\B*|ҜǢaDo'U(8#/k&mxۈ}HD">bq> ,h7\÷c@HDWFDB/, >B(.\Owȯ>k߾Ө!A][Yw\Nob6⿂DO!QAh)% 1GY[ҥ L7J8x0eqG3DXhcY55}w&v%K:io%?U`zt`ttd:%wb&c_>moC֤sJ5+Nu ][K8ÅL{S~Ǧ}=Һ,0qt D 2"MF f#[o jLm, Fr YslksϞ؞fz|wt k=w`.AA^z9k i}| @<\p)wPP7j9}k}T=A`D4לW2l>o ):|bšL_=5pOB_ 삢YS 'ox Hza ".ut0U!WVebTA1} -m-tְG8뜳y shg@Fhf]h[ qB۴Է. שr"v"`XswsV3mEY;ン||4f#ZbF"B?Nj]}Z/N胍@.7pۯehȫ4C.le{<=u1NCߎ6>t B #F'ItQcV$ ڌa&XIlu=1MIh45BJғiLDmCCصԞS؞QZ66\88ʋQNM|`Y%kYx)AZ$R|V{s|8xE:s[(|}K9c4\&ҙETtTs2:2 xzk۲cbY9$}&ïF @ •dd("Jܠ.)59'G ]] ,ˍ>rfmK#5c f{vuw{|Kp~mI#Hيqs#?fxp ?+,nk@S(+05wEfÆu|[8p[UpuGf,[_}7w?y'^ULIz=֬^uvn3ņv𳣜.F]:B ,FUd"W^ss\l6C"`ddT]ko6]YbSVd5==ݴ5jc2:[pyt2rI~rMl#BHQEX%sZp~E?]jØq `z+t*u [w`#\TUR3OtAƣRZ+,Ǿ1NX!EK2KkJ4ibX2! i(֊xETYZ .P(x?0}BTPϚVOO8J[|(a~"Cf`eS@7R"ւ<q!hJK;w ~yWQ()caGh5'Q;X'F~A@.>JHmCck(>wjT}]5349A+PZ麡Yi"E bk>_\W38CsZy'֦{׮6ǣ7qSN3)sh#T+0SzsGTE2 nL8chw>5qroW"}Ek.ao68[DaItmˈX !ܐ\]`β"R8ڤ1+}\0Ah"R.r+UOƊ\%̃Lf H2SI+6ӾϞ VG0(T5Tg@^U32krs`9<^dD1 ;il+Sv`9ṧ/\dL,7c{ i~w_7x.8a5COuc;i>Ŭ_>H͵C-穮nnx3k:5V#l4`"/7p(dUy%5r@$h?ŔeD5)8b.0aYͲ`ժ:uX·I>#?ǐ8-%V8NH~P3EX`Soz̳0t6\EBeDJh︌"@̼GbP;AJg?B+؟sצ;9yiԊ:L`ǓSkO0RI Bg3 PB|mipw7qy8IFQM[ɜ8\GoT5348Nc}Y $riYC#Z Sjq ;. )"fq:K[k+˖-g`;'1Oy0EUm?uə\~ {oO잋=c=cU< h#Wz]jJiL8RN!th6-E bbָŋCm"Rq3+s*šXxh"s;E_֗Yp:6xqL2j,@@# *NRN!0WX^}5`80f13yygRavS D)A%e*\]5W3t  ϙZԐ$3&3=MW~V>3yc8#pBZ; 4<+mCز"<)>?}V>s֪-YN>?MH$;wSHMly7,&bbeIdTrJ[ rE35F%WNڱR)Yg5 º9erVE\WN.bsּjNc%ɻtdZU=i+)bO@Q+|>RU:Dx\tzwhn(Noyz'b(d3*q5M-t?KOWhRp Z[p|qR )\T:t5.7§&Y4PpǢds9S3Ρ=(&DG Sܲg¹[N\yleԪ =ݤ0`HsoyZ64]jWOgmO;yEe`X ༁.Bk,48 V<+)>$ȑR.:<+o_y8J&yŸ+(ubք\ A\)|Aq3ϣƠ>uͿ0i\|3-bcJ3<R[hVj  fj)^ʦioo ׿\rtEBK@p,baU90E ןy7z#Xy vb7G駭5:h<6#B*M-yX;(WAmb ]{) p-]hjgxlN|z!Zٶ};E#{Coz=Wkƹ 'j^;)gGjX#b[T6Xs =4oB (DT3kI\h "EAab~3AvKHp,9 wZ|Uk!t{ 3il~gzZ3_ve nǟ|H$q+㊷\"?3W|&jVx .nq`ӸâE8qښ@H)5vGWfF%ǙWqֹWŝ`8RG*@Hsb}㓚QX&͛dl2"ZCWgkP[(+Gݮ?7xc`ZI5Ƃ088Ⱥ5'gYz S)xaj#)lӢ.v/쿏Q*)!1X8 M񻭏ssyyh7v< (۾xE/VZ5v'*<ʏ(5N.4&'m} {Gk#'KGs s IDATSW%bhB„fYu͠h1SbF;,Fܙk6ZSUBꬲ(R>LQ6QqrONr—8"B{=)'|jvL 5Š6 Q0Br00>ئ-øJ?'Rnip(/)c-0hy>GSpsiDp],~Z-`WFOS\z:n݅^ M0EF0KDy6yk'kmU.J{owԬ[gݒV&h$ b9biB+:Ͻ]"\#PD{LSWbيeQk 5bYV.%WZڑ 1j^,"x TyV:hhkײ[0㗞Bn0"xHǧM:[dI'~`0)NY_?h0F#Up6KDʜEP |#/QPen)^P942GA$nkFa!  =_݁-]*?yHRFD!P]{tbhE 1T>ԧ%"}ьČk3 t`LUQ.d~ys:x{x8%i]k=Md i,;ˎg9 kcO{ؿYqQ+um553 Iȹl~qNZy)#ضE]}uB2>1MKs#uVw MP2m$;eR(XF1Srikm%ѼL6f0O-sO}O}a1G\L׮]VnjE{*(R&o}HVjjкo}[7n ֢pxu6lMgUe$8/ rҹplzqG<_>Rk^ۙ}u{QQ ;Q?"W΁T|?Y<ύp"Hwt߸}7Ns!BL][+dio`=|Ǩ'iH%9{{`]π=ocH&̏odi0ײ!/^hvd WDIWA"st]7tF2H6L[Nw^}!gɢyr(1EVA/0^uSO6T xy1)_9PlDM_pWrݻoˎ}Vٗb^ .dOn+֭D)#D:&f hc4W]"$^Y+1?R^.KWw2=l%4(S@A!Fyt_So">N00hy;!+n#២'=Nxױh)J@Hp@'9p@c-(f%Q:[Xђ%c$"웰 MPfE׾5u,:npb.LکiTc挾` BVJi )8pa '93 l BBF++_t Clp ijy+Q4CpM?`? aA29]My." Q͉{0kR"9Dy CTՈ!fLF` +۶j ~H,j[IcL44{^9bFET=oFS5+!hcp[m*- W ZWRG܀!QRQ:L!hkWOoqe RY<#x l}z9fʃ3>嵯=U/|VzAH2!U`$P((4EG@rQi%C!jNBU^p5~J6:R7<@%4 H[^\>Ϊ14~=$9' A-*ɜɜh@Ls{{$"eܲG aD:6Y1!%Zl{qEiF(H_y!Ĵ|@.*vޫpm DK/QeǣL̘19K1~sU y:\),k/CQC@;=^80 _b*\JeEj BU6\Q+a|J~p 6NPϗWnρ(í%[BKe ;߽H. . .]Ǎ7|QG=ǃ=C/EA->kX%u|vQyjۣ(!TO]~BT46#>l1hp4y}bb(gYyJ "Q ;R捘Aɜ!]Y0tM%llذhnkcp`BGx*l rUZrJbehzFw'i:ѡ:C޿+(ĕO[CI^h\(0G8u#_AN9 D,{\q~oBb"I΢,c Qm$"eCĭɎ=1U\d]䭖J=g C3 T0 eV:T$G31Ws<"-0M\$/@{J7|~=ϑENF9 S$Fc@<%bBH&XO 6y69):١1bv$gl)׀K$ɌxDTH'N􍓨`90җfb۷׳3u L3dr,^ 1vr;C4 ^sRǥHv..^R{R&}L2+ΰͳ__LKka6)}~07Đ}A! pYC ֆFn<jXU-n w .p#z k;xP*V<3e%>-Rv/Eˏ4B ]ZUk1vYH# #n ڨm߾n~qDYXR,y9 DH6)tw|֮9_R'{l3/|2 :Em]o}t?/<(?mX #T,贛x-4H$<Ԭsp}H1L fgܘk_NlKGkdx\$gy- P$ZZ<}^ڻ"8a)u$YU,H{Dy>M"siI>7B8êTp "HgM,L];ʐ㾝[~ NҘhP4u>CCdr<ݏ'Fdg !J1zƒTZF B@_ ]h3B#^]+@kOR ;-b&' %C_ҵ$PΑī9Gu'hDΙ;5(sUU"C'rEW2o8LCo8_3 Mw4SjPiλ作y |wx" C_ӲqCJaHJc3($Ϭwɐ;9|+ Ʀ)jQ4<<=ï}IhH@DX<7:Z;hlHlrN^N^:_w-Z7Ǿu eBAI`"UCEYռ%hMvrn&^|if.Zs1K~\N?{-йMO$L%i\RzQ6|@]J=k+:S!CmִixihfA&%䏮GƎ%==tER ̀?k޼ZN=Lw c* M9u KAD0dz>aP;цju'RZ)[xo Ւ>i9U9+he^l2頣W)WL<ʍ-+QXPx142["T ^D$. FqlhoIXxSUrk-q\ZD%j wrQq\ FUYb}?H?K˛P<*Fڣnclm095$)j.&%Xm HcD&sv4154eX߽ǶڍL:YR-} -l /Ւw\sOМÈܷESPu"%ŬWb%>u9>xu;O>w09p[s1~e{Mx ?|)^l U1P"܏aSYamt \zYBxOiW3F=jݢ{XtGsX1MR/-R ^b5E sN'|"Fj#ӈ6wxikljg@৿2*PATH>~x+ihW4*Li~7 (Ē1bZ`[DUrvXxa &yiN?638=2l e'BGNN< -[ƕ$ٜfޭl v8\}]-_Un۴z:XV?ZfoO/4h x/Zf߻,Jq BY=,I51;ڌ9DGbfMXu+Pf_P8А12Ć_@~">ygs8;@*8 Yj۳NQlP ͎JS #RQ"=Y@T r[Qna!1l7I{m;'< kNeyj׷abc }KKE!u>ZEجBe81j*$I >úڋEϜD~?HW(a`3τ~2n FEU|3x7X\K&7lAx?14y5s#s3Z`x(R4/Z㚡KacCןo퀈M:Ce? u$^ߖ"-`ӣc$$x"Ý̒sq82#/|M̲~inEӃhCBty5xyB!0Cdq5^Y@AVK(\|VmaWiئ)Yק eޠ.QK<M;7Ra:C;R2l.ZU A$=Gm'||⋕Fa %;D iQKm](=GJ+bA¢i IDAT2LX d%1:;O ѥ"a Ӝ_&/A0M/7Ǵ8x 'lu3dHY֮#@^=}SO_^xcb>0!JapP-XX2:h7 2BH,]j:XydwOk&7XlOl},|/@h6^)L,*DH]ˣѢ8R[:(R֐ Za2TR 9֯n#zNJ2︜/^D/kq;nI YT/~Coz ß­KN zn3n VCq=#VEx¯A]˦aXKOē`W*@s}٧S li+l8gv}qYڡ`ȅfp5%"~tb|FU(ʄ 4pQqߵ89^֔)ͪ NbPh l MYѱ l\{2K[1|Cl={ygxj`@[C 0Xl-{'_|mPć_<]K ,ZLC}BŌW#)!u 3q*ȋ_ɜ.+;6诫TI NRM刺1<>NZ34_v4xW|l5sY;?T / ~ܙ$T#v ciaɵ\+5Yu&1:ң96es`4axarDDhr]!PE!/s6-q, Ǿg&e 31i`CǒriA"r.d|MvZV2ʴ4$j1 ÿyB/2Ƿr"X&\"&h"Xu$f/ Ho~ƞz/iQ 9%Oj]']};'5iڣ (ӽ}8|d69q˯nL\[,lJ]⁧pҪŜZOs%3 5홤 akdً 4b@N\gtO;;0Ը$H8c|)̣N߳u ͠7(wM(RQS>9eQZN.iiP wI6$@T ESJ=kœΞ<|NtT4*D ny2 s5Pԑ"j!yV@(6SVPVЖi&st>/!MLlUCF#!QO>_U*S-VlG!iFC0#K0ţDUDbSO\Xr%<seGOI.ig" ݅#hT5R+WDϳ] &T9홯 fu U}'c&d1XX1X\v®t|2OPR'}gv==x6Wr>^jDĢσAcضooC.d*r ՜bbOb6 C?f% _^>?@(!)xl0yn,l_1-Vgbʆ kuLrR !|jZR8~fCk]3Xalz]3ʻ͒0.MUa31XسGfU,o~eWD9EsutqWxn>޷ёHQY%߫%x!:Mt?A?kV2MgE@͛o۞CE{65ab>?ᕯ4Pp_2RGP#hJxƊN[E$h#A#P#R@q-5 ܳVsd!l!-\΢W9p#nƇ̑l!N;c1AbGGHWlOOÂQDᆋxK2}ȏxrGȭVIotÙK;d?Ħrw?A#D XJa87ވܴEˀGMHm%MNHs| hq[v};̞*:!"-3sz)饫y[>F99%O}sStg)%h 0IL!ƶA֨]:xl$/>V^I~IC*ήy֭<;WvvFvwko~{m_>uϣJsdp5 oxq/|mDBFKH;K.IbVJ$]l :y@)BaTЩ CP ph@\x>%ZD5IUQe4㴶ԎbO9צf.sqoR: [׺(,- k-ʪ+lk1>0å1vOfPXEt=i^3;  2ƞi$A`吙H(YpY<<0 drDr:HIF3-gV_ďz7q=-s_ C~t@> Xzzs{@F4/YYG<̼vA8anW\w q?_y _V34˫.ʐv~GF5]߻"\50ǺG#9E/I,S&KxN'](K=2r$k> qRlsRI()N^뤶:jNI{ QH U 6hcG۩ISjSoR*\_TGԪǶjkf,֋j YkP*ēB' 'cE IYFkRI3%E.pC€7Y&Du%v +(ϫ2W* jfILKX妄SiKiKI20[,/;2uXnbi.co3a*Z>h)!ELn,3gOJX*rlGv=Ȏ8ҿc0AX!W<]v18)]2 j^;k.aZ}6ٌEA/WHCh%H^KFmRUX1xJDE)g CaiC,$g, )j<'%&6vHv>;'סBo?qfdӆ.lgs-,J7~5|͔N࿮oH(N~{mp-[a 'P|_Oәa0O~Vqv:@i;14GPu:E5-ۛo [o`!C6# E(mߏBTB GdŦPfcCZ;@<9 x\(Kr6zc b!$1I[GeN=_>em\Y@@zr>ݥ"0{%݁9]1h/C#Ongx vV[c9.ɣK)s.`$"kJw#,ygd:{x⒫ԛ'Z&QCf#cT޲<+?Z345L$I@V觜!jnNm'6o5.:n([Ha>wᶯ DI4 O07hOIgS(%ozg.'e%:,## J(dH#%<;}w:IY|>.ΏKe~0}IaFO͊޳@V7g syùZXB@"9Hǂ\Axq(}HS ^7^pV/`ͬkMtT_Qz<K4m0RGFgI 6bmTuON%xt_q02 cEim1&kv?4"0j@~2OkJc5 ۹t/\'7$V}]u5[cxGvI3yw[#/Q-Җ5\I*ߍ0+6ZB _x2`}!LXkF7M@dav:x>'>O+ʰsժz*OB " C|? DQ:J=%dRe5G3Ǻ.Fh&",`|ZD8Fɩ>֚S C|'b<߫ܛ( eHHy'ok-rvTɡva򂚄Be6 Ck\ z2>qF%p)-co\+SdChwGꄁYR#!*_drVD -Rss*P~tLm;(P4Ml#mRȟfekFehIQ7m9c|{y=yFG y`'54x>Ҿdy=wM+}q9&ZJD"J89BS *FDZ!=0Rb⌹oh &򴞟Kx;^Gk[H bJra1huN`.g:Oe$G:#lwӚ8cItY.r@up?~t6jR(SX,ʹ՞T[ DSmddMRfoA]5LRfVmamgOkK_Z>g=sg?/gc2 &N$Eةј[_$/D4>S}P-@LWh|iLԆBK_~sľlj՟Y2e˙,co}k2`(U$GOľ͓gF) ~]W6>!NS5刡28ar'nV z3\|7Q INEzONO`UDZ};B7OyYCKk '"hJT~Z4OmMZ[i×!zM:x, >\خkK$e20[W`v#rط! 㓧8)T* blF'U==2 # ls1J}ÜX̄IRBPe8C,<<&HL*vFo s{|$ܬK԰yg8PSLrgDqgZ:Wq,`}W ˬS[w1g ƽO"z&&rܱB$vmw(vˋ^y 'syB1HHXIb1{yh(iB (2y}'G 9/ {6Ri~| ._^KEy-^@*.[Af{?_šTRI 49 )FKt`Qc(8q&Y \sfvFv&ftT3QAwRbf E dsadlLJY?^}8Y6.Dlexꚻ@7IO6k!Ylxz3ƲCJ!bؗAw0ǾoWOmaBu5ohU\~ťHtY$QQr,kB Sf{6-ϒ{[dA~ Y!bJcmQF)U؄˟J ?2ӱ4VZ"d]":8x:-zȇZ[} Gq ॷ\/>lPFK(=ɹ%Xe&Jr%j;nMMqy&nXo@V8轩cM,m9itʟܞks-7-4R2fZ]ΌKq.1e9tgUozk}bM[c6q2:3[0p;&3 ]*]xwo~)شI NM-k6gղ,%m_l3癊CsvƷIm.m>43뛻e>XRIaS9.i [bkO0 g61|2PUVũ$:eW\ą\H17927+|.Ío^IY" < ZP]x"KlA8[Vp c$XcyMM)yhfd ]^T-}jEAA"Z&8cy -ƓܠoW>mXUa^M`\Xe9r( ZW$q.y0Zp9V6nΫ_urn+!0V#}EUh LT=S&[OO=˪RL{(X(XFF%ѽW*@cox j&3 `ٰqV-X -F;B8!! 5ǒѦllѼp*LS?s\?jX J[Bst0"F `Ӣ: ,jV )M|[t;&ɧњwz*dR |l+V5[xOxu\rѹ ^~Kp2p1L{ß8)D/A/CC8Dgm}b/S/TstM֌6oT7I8Z#Xc#cjbnkVFRḚ.nB`Oy|#2Ü8V-2MG-R6Avvr#Hr4uYDbŮH=cӎB(0jc3w||mT(7㬹Ɲxw>d6!;v .%J-I4LIr.DcHY-7kz<ѴU%κcs2'<Պ =RR"=cjl;Y vkЂ;i)(Xvۖ@ӯICYyꊷ"[Ҳp3fN\)MIcvaM < ?Nq> JηV4;?_45vOj`kNLT4It6bǨ پy3/fB<x_ǿ~}b`(C14ڏIk0Q8J!BK0(V. !IgA!&qd?Lw8+Zq In+8siǃwbBB6a>nF;I"aA͍9FJI%(--)lK*-_M8"B(vr%~%=ms\8$]:R"#* s~m˝07, ZV):q*GT4RL$T}5 S /ߋ&fgѷ3(Ȧ(B[2E7u'f'-Y!Rv.Vi(ײSn_YL);z$s>q8P StIed^s$]m[I?L]`H F=IIƭbN'hERׯ+jdQ\`ULpᕃq^2QMkːJdC$m(&cB[w]+{`e|"5YDZ '/-&vUiy|a(+sdS\BN_zrʂ$7.6łT7_{T1\0p>:\bq&`3lю% O `ְ{>.9",{wg޲%TIL^A l G22}}لFÞ}ʮhZ)uצҴ_̍ZG!("7SVn9OmD5 </mf>)G[E(!F& ]Xi;ͪ>OS]1FXd.(*RrLFbcsx0"g%cGFq|[V/sB  PKuV!>tGIwҎa ~Ꜩ2|?)V90b\[.HF?Dt^H1.w͊bHCk.59YE`'|#dg)Se ݅/ϤjbҖGr]B0}%Wa%}@c3⦳Wr|k7bYS`H#_H1k˴rٚs]'?7x*rLؤS؟2))s%Nam" .XC.#9 %<0y %xUדE" Fphd/Fl9z4];z#<kqҜqÚ.v+1t ׸fy.\7#C(ȬڄTܩw>y3{\$6֑4JƸb%tKr}*q5|1MikeʤfuEaǖ QX8Z:ɝdQ{0%H ~L.k!_ʲ02 2p6:tWkWKJ+X;姯$!w?:c~j1~,j"VDRRH,w=7\d'9/_w6/Z@_~V',ȗc/ř W|џf4R)GygO+n9f8" ';^S9l 8aDp!p1Y ihmkEJe(k)ьG܈l1 P;Xڦ,JJ1(阽+ v֎D&5hL6҅ 9 Foo#N\`3w+Os 4CxM+Blڲ[osBC$Ad#o!2ax6:{O~wnqu]dNȏ(ZsN 9Y' 60jZ,AHO1ȱ1rف>z}՞bd`-s )]^ժm\ z茧"f"/jHHPʕ2J`yKoߥi/x9 'ao:k%?gl2fk>i5њ2Xq:jڭqȻ]s18+S;a3ݮ'$'FadO˱dJ.|ȝ;Gpɫ_OwX2L38OKG{㺮eTk.!wA<’% 4ٞMBF_]*rLxdH*)ц_p~PѴ`SNܮ/}y̿Vb %= %K{egmD(1# Bt3 _%d8Tc C&_>t62^9䞇 QJS=t=SwM+;[`Lx#҇XOp8(ϧ0Z""ѣQz =n ^? 1Bb2s!HRw$3k7.0r IDAT2?a}O"$ngA$)jIF V|u^S4&EBX+V9_!I*Xr&bI#Ĉe})pbdKԜrLS`͆?[7|4YZK5XigplF\׾9٘]oXƱ#Bִ G-ro??S$V)(޾#98to}{韹8K O[\b|R16m^O*Y`E}s Iyؓr5ؘ ?|s:r(gC#Pfv&#:Q-/&{jЈ%š1bic` NԺ1jF`C7c1|D*Ơuc=|Y]=$9S2M]K6IT6'eZN+ qLJ JŐ۶Ѣ$yRXD{⃜yjX0iFP,&ڹ (R?J#O'z!i4h K,{ 1{E`FIMe$N*⤭ OT9mm5uDGx:8̭FJ1'gfȴu*>s}2;; wa\4In&47m߻'~t|+s$湋pR"zcDyR! c!&3qthͻ]wĊrZd0l{BZeU0ɲ6Ag.d~*vX)5lVȦb؍6f~[K#KL8ۓqϖiskFi = XpVfY[7o|vucE HS21EġBkXV&$BMc E#bJw񺹽N$/]زȊ)cpOxD*Avg磛x3w2v(Z>-hg_I~| '6n7Xkn$%[ R7*X(\PleSnA%S/}#iOR*|?o8/a;QޙtT9mlGy'gțv#uϭeڦ&-#=ee-#؍Ci?;L\Xw*Qp,ё{fb]1Ǿ>F| __jyKy`0>9KRm HY~xnYIsg$U;{ގ͝"$j+Z`DACZ'ZR/y% V^2hO4 #*+|=Ws9b ~vn{*B<=^Lc&"j$ƽXQ[VMD~GE jZ*նNOlj:)S7RJB{M֐N{S9s-\#}5i}*brTD(!1=\W`"_B1-y rsnj\vlw1`[yɡ[_N{z9/@Þmt mC5QtkP>H=z0姍֚mVbP_@F%n6ʸ=QJ6I01PBT3/>3O~}:ȅ4o>t~ygkH:XPƆ(FJN3o|Nct:@JXGGd "DZ#D9UfbkqT IF+PDg9#050-Y,_"\(@X8Rmh8O)^DʴS{ܓ/%W^{5<07q^ Mqk_T)p]U+Ofn`!oPt^XNCW{.v];;;&7>yr: g. XriI}{ضihH;qO?ln%..#Y1gyܷ-NQry3g~m<)CyߺsM{85A䋱8ar}$I+q6X[@+Ơ賤ExaWUqkU@EȖ&]X#U/N$Ssc`Ea,jl!Aq*bfO֩x՗RrkPY'NqbWNf-e\$qI5'e3m.TS{/ߑ %2yAMrAq,[٣}.'-k F'oZ)f90PBQ-=l)MrgrR-m12 5Xʲ|iqpаc_ы9a\OplzkMo䓏GP!WWA肗gSr_Wa+rzUy;sUX eų m5kô9̝lTZ1 o?G^{ "=uF->/pG=l~.cUR?{7XƲyk'R@šU BGhxྵ ѫ^` M9?P|?ɝ<5׎5QBt YOjvď1DZ$0."%S S"LkV_eg&u{!-ƦÆoWD9BEO tDIm-p @[4fP_\l)jv(z}7Q/CIda>+, 2 Ọ]҇U4z *a#X/yuTcvbe+pMBؑQX)f)Mvq[ OJDX4 1Wzt <DCOVu[yD/*Q4 5XꩰoBp_6.k•0o$Qw?ˡHRXrShL6&2I=B\l-*{:<*Ђ 7k7Сhn 󾷾 P[ᅤ{3Bh3H' "''OL1Z}A2cy|||R'N RX$1^ $P6P&6T3U!cs5!C]/XE)⤕(rpS ؇9}S8\ A C$>B"j$*)N]}:'t*Z>яr׽ckB) ʦC ,׮y9pc )V_k#SN:KYFS^yj̸=OBj}zۑN#Fݵㅼk-h|o#4oY<Ҵs0bD N*):DQ"h 8eiPαkK-6S= e9m+gdD@mXrͩiq}Aߓ'__1y+ѲaS"OFG} C!"L~6v +GGyn߻9idm 7i>Owd?ZaT+v>SC҅_X$U^TɉR)O;|>r* "v 7{NIh,3O\Ne+Och h)%wM#w~UNޯyy~^t&.\׳k#oo/w#͝lMKkMwOw?8(%HI^*@Hm!˜ciY޷q<\:X)Ql1(Ƹ;DX+"̛ױb8#ENX>+_{!;ݷC*FdNtIKʠYh?ⲜODʏ:(,%aa#KQ/|0Vy;^Ռ}@ gP!=4(C/Q*OW.n[H|kEI0)=Z# 4ΛhW7/1;4BH&aV/}oC"ұpFQ 6\6N&[69 ֲk*cbkqò[=\dP,1.R$ɱ@@^cɤ%"@Jf-{m?6UW1beN]HbЀ)t0Gx)'QK9ႳQsط6eP^DQr]^6xYi%sgr\kG4R#t* T{Q5!ƽ~/;ꚶX#12M4|wgkݷ@{9=M/|_>͆AP+|ߦduy& DǪj(I! ׷lf =2nywҥTR3ax\+.~<x~>e4cYS"ZN.:Jr鈡R@6撳No_/{v>^Nn9cJa+qA[e*,[)&zq2 mCG΃Ji>Ob7X0w$&^$@q}|C88qiИn@BLV18𻵿aMS|'~HPsT!aρCױ|~?57eps?E_aŴVU+j hڤ вVU_NRRԎpL*/Jڍx9>,% 暐TP;@.PS,nkp&>ɹU2Rȥ9B *m3 Tba>[Ȇd[`*k n &] qQjo: ۲!o nnYԠYXa)\њdowl|G&9{< HkcJUXv+H/IC'Bm$J%"V4K)?OWP -.%TWߥUJ6̨~EgLDYƞ2)y>8e[,60NNdߡCW}o~|\sūR(({n:xֽ \ۍWw}P4a>.x77nv#1:}m/\r gZZ1wt`{GVaElî-;=#l$gx!cïCd Gt">O/gŗ^Ǎ_o|+pDX5eo AJ9g>~r(Bie֣ Wo| wXdZd7 7'?OHZIwtw\r)ca`Kܻ.TF# S|. f.Ky#t`s^>Oz wH"OZ͒6 kKh#I+ cthۤ)6>{$OvoGW0NBױcHl`A Y()#Ѫ~#*Lb",%m2IBBP6āZH(X\*Ƞ 0iK* ,A1"ȢZ]n;mQTB"ڔUm=gj!&F o 8tr-g8%'ƗW"qZKK{ld9jj\K\2>4kEP`"t}V< IDAT8u&q$IQkun&FĭxV`b]L;Z-I,%JVYz"M]ZbU3G ʳr\a|=YW( kVK/|=ƻjeZbEջvqOu8e|/іmfٸaRK~4J ^L^K2uPܕbJSM|Z $kM{dnkXAL*XyjMOkp 0./ɿ,G@2i-$_xŖ1sSVԀ 51dkK% W2n3jo/ ^rl`9Ν\T&:(gAJI`_?MQ  LĹg0鴇cۿ}azI*J dtƺ %@4O~1 0$3;(aUqHA;ǏDO%6wq[Qpp 1 3#⪫`2_ bȓ??I}vΌ;> |BA8 'vc m_ %jmP:ͻ.{`k^>/w;z:ѩQ?_+ #9XШ8CĞg0dl|&,a$]=(u?ζ4%˒Cx@c]熤@ 2.Z tЯR<ݶM\ |hD61.13aȤaWyF"!-p-NhW5JJ:B Qr`qLjZKL3VI$X7:bYƢ IX dk杆I^11IT>K8uqV~7d\ &b28LbjhZ@YFuߦԎ47{y+/.r1Ξ'B NȥFl78ލD|cѹiB=LhN^L}^>i*&{yϪ`qk8LEOVNpPe:=8 +&edL"_qyIΜCSɶPϕp\Ic'1HeL8$4JX8.IgS̟7' 1DD!BH< 7|-ۍ&ZG:a*Zh#AOv{`ƕsmo.\;B8Tkaޟ":e *kN'mα{C7q)(6ዓ%\+A46i[)uA)-{F'ZD΅HH&Q-uTq}4BK/BP#"Ahkg81*Jl>v[-"HuXWTX|S88Z*()hwwxvGcMD@PPbc1Q9Da]={Dc66I85FG(2cFhT.ވu7`9=}ܱ%>psa :䕠e#F5[Q+#  Y2.˲٣4{ 2\0mDHA>.U~NXxv0Ԅ@ؐήY:ބt|vn~bޤև|xt>-<%?@ >fFlQMTa[k$u+ Z]"USX0!ewk'N؀;dwn\F"75ysl77׼>2{VMM9R鸝KC>#clܸfp`R;eKNkcDzt OQ)^'MذG,a5ocsPKhkyǘ=k.˾m5@浍oq`()wv8؉)DʢR(aj Q+Z$ fKSV#-zY*fF&I]2DBa-˓Ĭ!%u 3NHKm8 ]֍o'6QtK٣^>Vn\aGn ]crZ2t̙-sVY(z/iml1&!عPS'*$hܲ`j."Jo?Q.+0@(c"V`2~F'Dzlb5!cvc-D1|>_ "HF(:BU kbpG&6Øej90f#p\+':~U^r2=J;vreR^1"U҆Q'زێ#+{c9N)n J*"+1G: Q@yFvmK&|(07\YgCo:R'TV|H3Y踲-}Rf#m_'(d8oyG?QO)G&9(l1)j(.gEctw?uXZYV^2Tt%,96Wh!65a-Rh2Y@_y܊ݲf|e 05v7{udDx1ۉaMG+ :윕ѵWc=DJ /+zqȹY$eF9FQK1@h {2:vBꈊHijlMjsPCLbdR>AXd7}2n-- F7r{6Osϣv=?-?ůIJ ill@GR@^v&%"3hi^)'AJNp.DDCξ{fێDz/w'-Iss#d=z)"tr\rky98!\1~lC}}tɚW\ͳ35#'VT{=܍TbdRb !ZGx)ac!%|t:1MZGfR^0ى #27mhk۝l+F }.]sgQ*iomcltBl6KPb1T#"|GJI.T*! kHe3DC^KۻQоpQ9 gnityќ cM,E&n3JQ{N%}3&LzE D.% "f͟mmEp2oxk|jnCy;C@afE[ʏ*<fG&b3v3I_Ȼfn[P\9XA0^=\cc&/1%(fRDRC{lCUƎL'4]),*x?zaeZ?^[ּ24VZf^r-۶wrϦ{ҏA>e{m'sVx5Ad񍮂?U^#d0h'OU8OY̗.B*%D!艊 ,?R Gh 3g06x\pk7g%|'7bš:J ºHܦ4frsYt-f7ҔG_& e,N-'.m-y˯{g~Ǟާ!gbt u1:hIb.,ųfӐrtR~P(D46H)DT$j̐` -+W#b}n#-Ocp $55nD'9+{xp^McOd1qD8 lH׊:5W}'+[i- mBrMm8ٚe!btb"ugM=Жb~Xx[ htv)[q7s%ogd\/fK/%ϧeaٴkO*X=ȗv=7Oc`Ja0 \!$.FXd9|1F96߅ͤ_~K kUZ!In+㊴5Q\ʪ\Mcs_7ƕB 5h=׳Q͖r ( 2i }du0ND! ~ZC:)D8 FKORp7ŮtFX 'd0G˲N׼ zL1C՛,>kkbR+8VMؒ,: ׭l)?Pw7JdUTXci]d6- f'O\i#`uTr/3d[ZG8'b +8xv$A?Y;aکUV <^ \^|3[9㬧PyK+(yUTF[C_o/سAqq֙rwpSjD9{7=BhC)0sO ~7}93P2NCm^ɰ&[Ӝ_z~iZ7Il,Mmx_L*O`shRqa1Z] (5͙Bnj 84 0G\, b:[3ܳz`s1̝btJ)e"VLqہ1!*Sw# ͺD5+عk~v+Aً,Z{ߟh`G;y`'sѷeρ]aflTչĽ[]M6e'LǓ}Xelu*c&cj5l3yFqq>72ټAj~#%b1r+9OHP)T&P*|'/9;{f*ߘZػYR]9aIfŢKoҹk+:\lj:Lv.^ A I="9JKryQ"#КYq Ws]wҽ-٥Udzst~r(184©+Wtr-ZISv̜Y!ډ5 d\lΥXY29f24p(N'-/ü jR;*֗D-63]\wLNWd|s4ob l*3(k'T+/j *an4P @2:&0 skRn8Qi@GBB Uu3ZMXTձYeVf &!QSlsЛaT1&lO/LKd[D8Dsރ1Q\[8IG{7j,`5όuO7;Z/%F@Jyyr{˷в5T2jE5ckI,,W8\+3h)"Y}F_fjD-7}ʯU"1sL<ͅ'|G$5d ~/;ɂoVƂcp!|9p?lS|0|  q ?';ZQ͸˦ R,f(XTtT!׌,WXj29so{ OvBأ"f. P A@4mn]Z0$KU({уa-Ky>{լ:CaF(hL[1ZX8,Bk ##fL )Ƴt2və=OSx@r+55EY3tn|Hs=DCzn'fRsD͠P.oЯ2 ԭZp\';@~ڦ߻p*Sz3*dZkhq;p P-jU1ueoGu^-й;?vn71FaazlZ˒Ek_ sQBsgn6sӧ$00b.o8xyQ_mYq*7qǑTcѻ]֯{ ϐRNK*I(`RU[VO IDATS\Z,3 ځ B¬ .A!`V/vُ+UVj&^BHo${Kql`Ԥ(~t/"ѱcҨD'5Ͼݜq\3W 4o8`43\<_e^ ]!_1m>zIkqV F>uptnKWY=ydx=˚J%1ДkP,*1)*HθUxKZE(٦FiKӪ\?_Q0{2{ iĞ={Yd!sfq%{z"m`޴4(Ū'"eg)Z8xŋc(ͻ22M*;Ss43ft%"ar2\06pms561c I2 H39Vwsc33szw-w "-r旰}5.:26naafعk1>8x Ije˘ z;)&v2`zr֖@.EcHDC{%*8Kc|B0,[G>:6ٰ/6g,Bc^Ǭt AgBABp2!?W&$קAz'wj5D%lS\LLG6 Bx[XRaϓZ FzwYSւJ]g 7"ZvlUaT[6ѿ'{"ZVaUW%Ro# 7Fv"$Shj-]$.pQ@DP4-6Y࿠w-"Xv&o%kqȆbd黵ߏV[S&ߨzƲ kgM(BJx^@[jh_Ad݉=ХdHRKJrlV TP AzZId!U5 B[􌥣1jqZ*nb_x󛙶-B*:bH0=3CKK UaAfR!FXP#&$qp}Dt-^Š8k3lёu k֐M=y:mˁEgl`,x`}U0~ߡͽ\\`\535X%I5\3RBD QimŌ '|&B(y? Ug֒KԘ 'F[ cqYιR?ҁ#>"xVj9u+vϤ9z s&zc| @~:mцG#`sAPv _6_񋄅 ENPmL`_a>!!̏W(\5'9e&s !$V~03y>źimcd3y 0.ݏnsm\xvc߳d} zg>!~9+7y'/$'kpF( {[,i V':ѓU&|(͸C!cwߵ:r^lׯ3g͊`Xf+@ CJ ogVŌ"[n97s+FAl*ď;,BҒme%LNA?5d7`egSrh{p (\PI]t]Mσ}7P:gEJ-Hݸa=ss5r֭kWd$m"Yz :QVHlBCgXB"dV%V,Zxj\cԨv\"-V:ZB8)QI c[,S,@1U8kHEUUQc^ 7LT 5AbM_Ee/BuUvOvTCM*K/%<]׷g[pL./yuU[ȒEѼ/|!s6{K $IsO2A= aaCgs{g1O<z,H]k<VHRWFiE49/:Υ Ɠzi:ڋhbDcuf%o%c_,] O#lt.҆̌Wѻ^ơgoq0lH~5+y+9Z4oLcT؄VQAAVZM8si,D1 =DsġL P,q(_{+ǹFrdzAfĺIxn3k6Ѷv+o恵<ՇMics禛ďHĠ[FYE:-s_0&WČӸ9P 3 KXo8ac}r>,Z( 1nG]/LN~~ǝ/Y ,||9C;{;׬C:Ԏb9%xk{^>*#|8$45\pE|ßG⟿9N#rUS/fS=46|5L { NջqM8W;2 KG&@4bDȀZ|")ol1n) 69Ev&Sl)H庍ГX )ab)ZV΁489c?3dUS؟Jp$Bb (U m=ݔ8 Lad35 Ǭ^]L Oj5Sei 3uhmB %s\s F %H0LM095l ˷RF]G&'R$ Ya9cdYx#l C6ٴt; ytI1W;{rۥLv}RNnYN  6PN# c[bDtSHt&`*m(iPisuFH7ӝ"eC.*ihU &)5r,nZF,ZtN`!)!@BW.8&m̗\ݴ)@aHtcX jE8*4W5)cu u&]4afq~JPXgGp\xIe燬?ikټPhTwgKf*Xnxխ袢6ѩRUl Ir1ՑP˿~5sImOkB#sKgGXjQLk{+ٮvCgG';`H!VmX#nice% ʶFn^"1x0AͼVJ $q 3..%Vw#C)y(R7{NH>]譆J1HTd'%V[ęr¦B@A0$mЏmQw)!.hfgiYt#?~r+x-e!*J, 7K \ /~Wu3V fF.U tGFVb "ǛeވvThϵDaVu OWJqs_m9k#HtD#m"Fz ٙfh VtG(}ڻyHP )lNS'ֈ+Q D,&jq3\sAOIZQzR!n}0mCy]o='\qʥH !y~qByɵ/ +yk>̓#}jS Xm8g7}3[1 F_s^EN PùY4Г9lҸ8AןCj :&v#Irʑ9j3$ME[޹0G4m[[hr+r?ȺXf%I,"+e>g$I(L;LFAio䬭g6,5Lղn:\*۷&LYI b Z" }yC$NdCT* Þx艻z}{A8gZΰm݅l;2f] B:|s"(,)HM>np0#C5> 7^Ai5eRu^4DSCؐ/0u- #-4R,rPvTBfЀ3ť;oQEKLc*Ae'Z &{F^[/mqᇸO bфSa**QRJmE 0x!R3!fAX6"VvmAɕEI#1³ 6Eg%)}tRf.RtpKܗ 4  1K˗scOݚV>k&ow,z *JZx_^~(0%ȴXpkgǒYmZE{0[_p/oɱLh{fe Ȃ[nI_hNj^rAWк<挀}VDóSTTDӣ"bʳeV%ԢfB>l.k׬H FB#ycyc w|(*sazcp[ÝU~'ca sBQɜ(c}wN wwoIll $5FHcp$ 􏕠5.M]K\-jd4:&r<{9drxto: ?q<\W_G[gUG 5$/Ԛ&{G]aRev6)-3I GWVJ4'JarGaTp9A 2(3UD*CIgrdͳNQg@n!(Pc[F3LiSQӚc !#jc&|3b0?xp9ʂnO?1LUpˣ߄3u/䊳WZ m$'D4=Et'g:ؠf,8@fGTsM H]n ,,TlO ZߜB0WN8pLa$tS՘bLrWWwW( 阬y&WF':s !tY\yv̗o9P%|pn~ͻ?=Ě ` LgvT<`EB6\@`7o#9$f/zL_y֧=6Ip`w^7_8;<Y0zV,-x5̙aS_"u=u.a/_~)TLS7^ΆkCb r'EH3':hm֞Q# B&'KiaJh2ERXKww'CcY={g+O糴8x i)$q+=]yf&Xqz:Z?Ņϧ+VDU: h)Vc6lar2f%A$1,d3IAc<5,ZKd~K 4nIE[C\kVr^ bx9ǷB,1jWg~6ggUjhDtT3:S8BME(XȼUbpM fęt= BUE A?P+3l|;B(볆;lf>PaUZ"10C_}T"RS2O%GPHSA- C2P B@Q \ezģcTJ`` ރ fGpGFIWk8d暰@DıׁHDσwME[ș; \4jIJWsvW389"W6ѸZ;p5\1sD* 5r$ IM+L.pi\ Q6@:wF@-7LLA!lI|vKHak *$60%_2Qbf#6֜< V\ ɯ]s)ƺ#qJcz!{h2,Ԥ4ZK#Gzov!v.lK< `.2L@ `3P ,JDQA;'Bf9&UwnvQ)  Υ?ٕs@xA#tLdjg8r՚@IC8G _~57^r9_iJF(4L4ZL)¤BA(l u4!"Prs24H\s MF( Q5$rltVbdd"==Lϔv053F`]!6LNNR$N Z䲚bLN#Dl\jŰgW?\$ LM͑1<2NGge !\'qbPB*￙oq"Qe˦^WpõWaZ$&LM38v0Wn0ϧlyg8w?|~JMFAˀ |,Z?tC9},!NQ7Zd:2!@ve(PȂVQڟmd{$&BmNxog a3BzDLlL3 IDAT^&8<ӞC!;EF;bz dH,h "A Ȁf9\ˆՈpZqLW'1h1 'v.uFh€$?DJ]F:ya*ٷ6ϋZʲe׸Eo׈QkJpn:KZDų|mx1$ᨋ pnֱ)h1+n0y2*f4kJ}!]ŻzTI5n{&}z [WX #ȓ{_5tfXn4fVRS ?tׄq*d02CFhSH&~MMgqV 6\o|/gȷ-m\LکL+=~:8w>u20Άyp8֠"1$x|~Or&W޵t'n%fR9g$MS7gQ#N:zK !JƎ\CXiH)?o0VZe2Ha(طoB>accDPʴV16!Z E` $epp}}$I5Z[CՈζH`cCr֦>3g-78JVcsK9AZ[qloy zYuu H~|]ws]wsw%"dpt3܎&Л1 zֶBuw|i/|T=X,&77(*ʄ!E)(.p֤0ֿ@\*9_X7܉k' OP(vS9vM"OLu>1(Iaf d"@IX=9(?jX1q>RQ"kəL1$` NV,[U,0KkcXxfwcM Hw7rq^gVe_N6\iA|²')NlvLp-Ϋ*\"f1B"X:ud]jwgBq}iJ)Xt`bwظ+BxS6YML:郠2F/ˏF욮aHa _5Ȅ&$L${Pʻ'I2fS *EV` s8-\Ӑ1aH-ٷwgd YLc >$y1Ō^<%&Hx9Wl~P FGz JSS܌Z&M11+< ϭ`CJ57UZք| ܼ7˿<<0Xj8f*E>aޙeZ#tyk`&Nix4 :jsmnꎷ첨[pӊ`*2 pB4kfQ8X"q5bA_]K#s@hYHNdΚHHrl 0~sVKw3`Jqc.{2d]z.fKńl%ۯWvy$bcp6m ;~Y?.a,1¡~8qЋڛܥM%Ŗ*-cl>πǬVbwnţʋ$FHdD5ZZujձnmٙ*BI:;d4QbBRi gr٠b.jՄ==JHerL[HRBh-l:k=8&&h@)IR MH#cf_'A5u/}-[WINpm[sw=_ z;cSР\d'&-R#yCXG%\G޾7={Vrώn?ɱ.*a.-N\(VkdVK{ceyplb9aR\hQ2@XR"  Q @cMIʌgDfJx&c##84e-m *T Αd34mQoqj1oi, 'lEJet;WoFBLY8ׂ1+:Q.bLz hܓ :17fjux^XN2޸-UkU Ri޺MS](݃ V|9LJ2H{FD,H)pKkCM"9mY~sިc@cHܩ#1%lr>1!'K)ϕؠ,ԴOď*k)S/Nhlb$ r`j #PӨOr cx)>nX)ؤA@WZ:K@LDKWSŷk%B/c|RzR뀶QW83qԏCJo&ˑX^@CC 5_lh@8Lm睏@]z;spdibx=Dw[&KֈzAۅܰ1d`df+9̴Ef$-Ꮿᬎ*+$GHlD8o*lͧƙ i\tP[ Tf!N`ojx]mno= '%mnEɶ-y^S"_H[#ct_x-]+[yxKx9&| "d:hG|ar,X#QBEPֳRG3*We˯ xQ ޘGz%uʾ;9wt7G/4*1%[ZaB t8y4YA!]A/9qSgAwmgVs zh)o."ļ-W.;n!7_K_&rv{W=ߤV@)xɻxz#CGK|v?3ؼ][_nv35u?BvV$<;3oů3rO}c<yK3?}&t&M7+ytS6g)M![BkPDs>47?tQ8m.5 F*RTX0( @iEPZ5 CӬZKjPϿuwjFKk :ݳ֑D tuv#RqkB\ޞVXJF$lڴk։~%d:.8 ~r3"#wLZI!yS\Z@"|-[ܧ?EA>%WAйww⌳NH/ℎh?zg ov5q.&m?3Ȕ#,pkXw:ɼon}?` l)&$N9 t"Q3u#\M' T:9R!"Ǭ̰/.+QIM:enԠ!'ݧ$.K8(bAY?i)PuDMib*3j&`39ՁLR*'.fiQh,NY^Tm]Hg{y#ξ Tr+{_"'veIKZiZLSׇ$Lji(._C(h]/uwnuw,m]S ;d!q@cS o:&I>YRI4ӛy.( 3$AWlqnU ~,|?Emb H眢hZ:V059niANOa3H i/{WޔR"6sG+IO8R)B!' \#| nl:knlPbkrm]."B({0j<>H5f3?(Lh>6"> b"C9븳s%#HUEt5G]&vz/yL!Iٰb+#sc7sټf3oFYi3םnu,#{w_*z=omdT U38i.eKS2[t1dUxҧ<:AjvӭgbkB3Ȃj'vT.$z/gf2HigŊnH-b˖M Ρ%% B1T|V{\H p3u^VV(dLj:8sd/_H=Z~o WhJJ㎨$kXqZ4@dIB [6S 㠲܏О(o<߸6 btv#>0>{5X)N'|(uuS<˙T^+`c%E>FKL JbD0j<*~\"a\$ T-`.6 1XH,$'EyԤcG($-FˢM-̵veFOݱ $!tm\)M)erIR8a!{jF:ϴ` VlI:d#'7iPڥXD- ļjT A6)}[8JSRbsh"e|d}?B T4WCʐ DaM$[h`/ NYSlW 8J,INfcD=+!|+[Wfz^>U(#y~7~yo=\AFIs޼X'zx'UtӽyWSliPA\@6xF݈4.2R77nHBјK7qak;822U[Np"Eqyjw\"MN`Un!;%5O%qVah.h Y1k&J2Gt MM"&WBI\5~$R:J ?\:8t@+I7p!ՅgX35ڋoA`1_9d>ߦbJtu311pUKG[t+sa?c:!v0MkxaK%jrȜ@% *Ipq24=CQX 5"ДGfebRWϮ:2q,Yp!9"݂MIT]sܡ !żu ֊}HQr&9AG[;nw6jiRd}xR=/\gG)Jq}%?6]|_>gKo׈Z\=~DLn[]^}@rbH2Ù = 12gyşg};w7 Vs,S“뗜pt3\gc$|\JGGR|>`j"!`hxVrES2& BˋT +s-Q¦(B&qB`xLV37Ғ9k-r4'ui3*߽ ګ2yhDe1sC5J1I -*ch ش Z $yj3)QD6[qw ULh  #L93q+óbQzXE;:f]_nF8eϪæ|HYp6\+B *X47 vDKZ4,h&FަL[KA*r'E;XטOGP#1| ϵmXiD=u!AaOR# %,2RMfu!Sq59:lB9ϐDs,;D _׽Qf>Kd^*r?G{ug߿33iԻ+6 SxQC'7%$j$$ސ!$qBHjc+.eKzHNe9sf4F̕w˾d9gֳͣuD]/Sށ)`02q’0tҏPcx򩧹hj,nހ6 "kٲI«CǚKyݵ/[<@XM׾>?ٗн!ɢR7u0lw&e"H~ IDATs^4NER 4(1ԅeZIfh4_?y"'yP\p.g]sl=c)}HWyL [L2}NvWxg* Hrg`R!Y*"T ZH`C,[Z ,q JrZ**A#'U6TZl6$Bnԝ)ZVb or?n  *#mc \K2 ګ1Y{A|UT/> 6l8OTQD:54j|# c:un /FjC"lճ?:Ê"md=-` hJ'7i*&4A:Ձ5k.̮NpYB~QJyF~=՚_ل9ؔ3+SңkJ9ܽc$/0w9ìkN S,ߕ%"+םٝHvNEņ˖D&!m>9w 6a.ӞDmN 5RQ16JfJd.Ce+HJ%b9bJc[Cimkg * %K;<}{% H&I%I98wBgrJgҍqjLr\AP]tȫP CL=OX&sG승NdF²#K:;$"nBN wS*6oG_ :U\lĬNq4uo|_| y9FRWkd`b5Y飍F BX4x)%a;]?#!zG@V]_x9Hը2%Z?hO'H@JZ Z/ax&(tǪ:*c|*R:W1;[99>z?vs&n~HZT%H F}^干60x:|!H{vG"$5Ӻ,uC@ߦ v+g%1LUUDel, 6pVUW)!Bjѽ bcQi{KֶO5=3$!yB^AKILBaZeJ0񺘮3awJ$z:)1,0er^9;%R8Q %D()#tv^G% K];|}J#ܳ fzQ4uDujzIoJHXӕ`Ԇ~Qc}yIp;Zv=^e]]&e]o\J7yFDeg!T ll·N^ec8"ڭщ+w;vR@$#F0 ئ 3m-d1&ڒ%bTҒK-fq&S&XFsCœ!4TEа*m 1ֶM*y6SELbmmMݞ3P[LkRL1 Nk3z] .!Io{ .{ u!uv7ؖ&I,մEl:vv8jԓ}J>IŖǜE \' $MxZT+Mr雙36|N$Q?*rb ;Qk/X">>`Z!\1Iϫ~koYw(U47ؖ)©[8xk%6N3c䷰Zɬssߧ1 %/Cg+xZ ը0<$E#o{7v\[n$V˛8/e|6~5 VQ4DQygd<Õ% _5 iLFV\ƵӱcޔH͸T3>eoëVƤ}[ͩX!^{NJ|AEݬde#Qx6Fꄾa#AIN.Bi )K.$1GAHI!Z%TP}5xܵ/g0IL9K}prևtʌ( Zœ§n9̮&Ŝ]x%H#l@Ȩ`@- =*YAԪp9K:е޽^<{y]rOsc{X檋(--b2D.;-':j߳Gp;!h)SAf<jX *iFXFFF݅svh(bO)*\VtXd!`块8bF=$_,:QD[yj턅<!m/3-Xyַ݇l >lyatm:V~2'!)r`[Cl$˺Kr%_v)7|#xzרChlʶ5nE,pۺy2KdXo(X=)o)E=Gc{J|HI0F.:(b қ@тv 5q#4*Z>HGu0ֱ:. (晐k$!Jʹ(iX8vG,nPS}|bhmiXS $6Ot (-|E.(٬S9i-z8I} |^J7'JNi$J:sr-&6|rocGزr9f )ye`` n2nb+\E1ϟ3xpwI=苫$yeHgKL舺gQ V͍ࠨa9g81q:.+k%O-6tz$E`Ye0ã=> $W\r%d;3|$%2yXEKS_wQlK7\)Ѳ%? ~53l-ǡБ/a,*씞_d7RZ)`. JK8R'PajQo`Oi,~kGKb)}A6Pez4);(@Y\|{Hѕ-Ҡsn#:h1Q*CFY(6}ѵy"+˼|w l'ESl< 7@2}(<ϼzWXN01оAVAk$*9=O8O&߉p AG( 1{)vytfCtufZ1"|󷿷K;o?ζ}1kV/$Y3/˒M#NcZĥifzh$3l zc³yg)Op!] e/̭k;enKi֞TZa!c,k)gt-E(ŦCܸ sG+Ppu\r-Og8g:J:7Rj8hwRcjk #~'(myw;(" # ֱC_D $a/Hc 1~PshN.\/lg֘HK+mjd7-S`NGmT%r'8k:)?LaR Fr]C<49(tyxt??>GY\fC A*bC,( ёcbp-c[cť"#:&*f`"e Zl" HPHuNhTJ$2Lshkl'd9@aN'0S. QuFFpn|0UյmE,|;$\Y~3唣*WF\z YVy04a.GsO&X ƢUMP"Ajaj>~ɭ}QQD&U .u :rʍ3) /.|MK_]#|Au@JD>H/zfS14ܻz$bIR}Oz%^?ڏ㾧D%Kϟ|jJ#o]wۻ<3.>\v0!HTBOn.]ꍐ_81S#;RL047r/ %4=w0֩Ye϶"뮺UW_L쥘@P %y.7rÄG6M_PHbI& mWm!w2L"[|A{Z[YEFđZ=$rI;-F \6C#cs`0g {[1lFe(IICk+n9RB 0^K2 bvߠ|_,SiGxL"i6ۿ4gWΠ؄>XM, .SSBE7)$`&:XiW|s+-/#zȐ|ςNd2ha] CL*S #COYP!׊& C#c&Ka,;!JR3 kTx%٢hM$&՚$Il˩ٲj~6I !O65@/i"߃jjNJM2- *qE'< U37nAdr-7`{9U}>u.3h↔1N'΄{#^syO-;iI#b-RǴܱI^\.^Nlptd '<Θo=qFyWsQbX)@)gpS( I H#>08Eˤ[ ԰T0Q| A>[?/3H `;eѡ:.cvwqptO5wXx6\v ޜ.|5V6vwcM?4xsUsœioX3I90r5K.~}ǡ|]lNa-CTu>rACq.rBg@ 2TdoC<3ſZC$9`TD[?Ed Ow)vBR9ҋq ֺ6eS81S'I ^RLu&se'Mը@Ty$ <L9k 9$Qu$Bb( 士ct N&FHv2e(ĉ< ~b'uĶsfJm[mlTG8W YP&!!*tc(^VR?2[9o/XʖUq ųi4e>'s@|x Ai>71 Iޯ.Ele'SYz)=3lCg}xEu0lQD*ձkI "#QèVC;ZT:P)U!I \Ð I2A;~tǏyOo[yodbC{DBiX?↻e?Hm={&IQSa[ƚ؋0*v@8&$kyɥE(!cS$JAoIR(D$&>B]CR>Ji$o5%gڑ&XB1<519ֱSnLћ}ՌViS>ڮ<ϛ:mh+kF[ZslEB VHD$kDX?A$5xE*kFǾL7y$CFEDZ-fzEa 2I@AIDy$jڑ!'71HnV:dA {6_q ŹL<)`&L P&1S7 g% ͒1ͭtS)SAO9ebȬ@"lsƴ #xY!&L]:”Vh[)J9+%-[s1EٰdK 35f21):S<<` y@ZCF ѭ IDATUii{ e8M=#$f)&u0C(#c}=NWu:}/=GU ={d'[nM9/Xg7wWwSRBk:a}r"$NKEAr^ڗ>OlaoIYdi4]n<]ދ_JaRHOM Bmì*8v4Q@2.Y:>+K8!1_).zыܪi91kbZ#D*j`WBy8FeD[ R)wfÙ;8{0 tj] 12ϏfrT)n%GBR8(~¿m5eJi' dF@,Cq dz*;L%D Aܨ-$aR m42l{ؘڋmf1 V"=ӘM\ aEHr98*JZ He C|L1iD ad磤 Fst@ d8ш($"3ICs:tl oV#$K:Y;G Rd%7?2; ~ynykn˨ߠs2m|R=U Y}nO =(<.3-{Sqkϧ~joKH5i#ֵ .*=}/2jmGcHC?U@1bs-'[ceT2R8)bCKC-cbꢫxǵ羖 ʱi}݃/}0"8#ͷ?|w?cf~)+v&SeFcyx egyKwcU_ 70^ ڤNXE^5!30sٵ+>vk{t̚˞4Zv4ĉflN+-ıF5|%cJJv粄aD.M\_yph9]]! J|ލ|8$OΎ0o^p\qSlyD\`=.^MEϖ!;*LxA^i&Ic ϋWMWP`Dl8̚hz2 !rxK_v1EV |9xrg$qdW J ?B^ïU'Mlߒ9B>K_m+&ZoJUIt24H>xl&<\TF٤)m[ȉF>l_X<趄a!J3RH=wsgǃwsxiީju"cWs>?=<5=Uj̏71d[R ڛԤ+M;"iVy(IV8뛤ql>w\qQCd4*x*Cqh4%#b [`=H&)Ie2U#D/%L P`& IjcRFxL&2͸XMY9ִR: @ݚlg?|fO Gy.2pj#QV"I#V%H{˔\9o]rͣRP,=Ews P3^aXzv'}\> סI_u>_Y?cϼs\+?[(Y mZ*H hWCJ'F| .Xۯ|Q"+AXc0uzTLCBw^̻Em>uwxB&ďa>r3wcoKV粴s!;fn]$__QRꤣQt㱨)^z+iT$~O?V3VAD d&']`导E˗w񬏞֩Boeۏ򏾁tvw`bl߁=CF(.,@"}8n\LƆz5$_ȢZȠ02:JGs1S*\Χ^}k/b~ IӖCUv? k L6a L[p>pOx[W|+z5"JcT5ub]Il;pZT:mnF NY-,)TQ!Rmb1J'iv#\lgͮJ7?:&B7=IN ĔUal eV%CbVH_N:TUYiSOsihRڦy('QlٲR($Bױ#>@R TF9âE2=T~_'{p59 &2daŢBw,?ٻ&~ jw/~J1qqB,d&Н-j~MGmvrqM;zXMcrf35fkb[T-~~AZq7o{J|ɓcGpK]k pM&k"*ZUb-@*AkS,y~)4f,97\o!U=4+ G?Պ7Ĩ!ׁ<k۸ *yەؼOn|GNCL]+JF :N}l4 JZ3NQLBvF*e!oFQR P*EbS?PRF1Ͼ\16FGOn+ q"{֪CpϏƹ,6[{?7^HgcTuoMk[6~$EwFzYU Q)!FYRlZc1"?Ưn2YCӊAq~2/3 |W[7~a}>/ϡ7S5hѺۿldIЊ_:,7= : (8J"6ӓMױֿ". ^ݟ@""X لeIMXaLL0<0(Si;)p3#Bn|:E͌Z+B\L @JQ,1R( 4xw3$r ZXWYgMŔQNxƂ|r3!e>],<;u}k}=BSf'\'uv|~*IQ=՘b')YvPh4+7gq 5CYTݒ ZBbǡE(Q^g1w[:נ0$6GSWy-WB`= FKG)WBzLڻ6BRW"aD3KovΛˡ9LIlNX9i?fŤ;k B:*{fh;gcZG1LsjM+X+0Tټy'%}&,7Kᕯ;#P=ၲ5/{(gҢʴX_Abʹd`R(CGtzWIS_O5i*ZHZ(xr'CbzM PdH%"ɸ7s 9O^kS->ZkTxyK}] FɸY-e%0z!1_]} lT6^:k,yu!Z5Qg=m?? H1o=cҨ|użl6n %Z%X6hPSTe3\tDQȼEjQN=296\ܛl9RśY:[e&eߟ$FLg.DUv?KsWILf3', a?>.{ )wuQ(h4wofWU{oݚ+sHIsUfqqV[Zmm~lnh[Q'Af!! [s>֭1UE uskwo|>1WcjJE̿Oi\j.%p TܡE {_wy7a9R$8ӌ3ڐu/3!GLF MP˄y2/fÛpNy+g4)% 2hP^Ȝ@WYph;z9sWI%% -{*EKYxIYs5f=/ ! 1OzeOhs_\7pcUVEiEiMl6TՕ!SX'  j* k.}',?o(=J1 =w}9y_6x'紐= P^U"N 1chU%Y0S8A װkXsmUP 5S^{1.8~ "mЖ=իFDq{ػX$`ރt5-6HlCe. aJ(귻H'hKF5oA^;$s^p@\bh mPDeAg]~5OS=xb ᾟrnʜa>_7W]~ 6 p+D}N O+Xūw}NPXv7d}xNA* @#!ʃ1a9uP&0P@9!*s`(„'klϾ$ƠRGXlB"$P|HZ 9xr"vtt?(EQsc9|!rj yqt C\ Dc-MAB)TuSF)7m4+yS!?7hxE%,l Z.3?/)u4g5ig}~7k JQ8bY'̰Xt.y ?gˮArH_ =0& 2@nP⮝ =p3溆N`Y뮏F7"[B@Ax־MT6Rkᱮ)tNJ ge!T| 5Tlg4tjRăYZZ i:SIU&ZiTߺbmMz6xFF[0q5O<ʂ}>DGM A?Kec:+Gܽø6IYLb&JL=O<;7n+_`ߎ]Hl9)^!PM[X~c+ @D}}V^xL:ی?k j(V|#h5>k-R8"i_XQc2t؄,f$[CmF.bց`msB++$mDk ;Ny q m\Okrh Ŏ6q]C4?A.k?|4W[I@f)PoDz⊗w͇Qa$\pѡxemr}f^vQ$V(<jcKT'c۸cg;w}e˫N/ ;߮ wMTuG7ICoUpʀ8mt-Gʤa}v9K*75kogɝz-7;.`( "CA䕶ă %ERJk3 $ơ3nZWF  C)RN-9ŖӛAUE: 5VoKGdjS&cXA~w8y:Z\\s)ȉϨv:\Cs<܉_ }W_l^l0XRwPQ?:q mE 2[?R(UÝtG~Үs#Q@vҝYT9Hh2Y,3^o1 ⏢Ĝ3Yv1w-fǞ30cW3eg6%Eu3PMvqyiLزL-Ibhd) 8Rqx/sN'Zytiʏ% q$ *ș$^ *\O?:#i# &DhĚ᎛9֖8uyvv 4%Nr%DAs46@%`+fT!viB=rH+} +9.OdE8!*+fV3ZȈx`=]J% mU㷻IKJ q١Ey",âF!<n)>6-SZCld,'sѱUt7R I Ht3M+pUo t16ޤZ|Щ5{ͼ8+sN 8 pbsKm| ż"X,|k*IU5.ո,V3I]Ma#Nljeg7pϚq-eQ=[>;^z /h ܻ>klqOͽOӌmp0Vg)Eo[|Ŝ88H06ugg bR"N]OM86rd~p@kGXdB*"}|*K[m͒T_Z"BiGw."\t#a̞ѴqPC#7.#4>DQ*oЏXg:Ʉ=>2lM}Tǘc䘺=X #0юjb^O_6#u&f!vػ|\.ǜٳr7( ::[oasUqJ51H~[!6jWR^:x.\t<%r6صˋH]g*i B:-yXi*t,~g&ݻq}Km H,A-?`߂􇸛~)ȾX@v8 r]-Ѩ "F sP޽}BKF4Q|2=;pMzOe<wosΪT<^2l1+ 'm1)=BZbn}k sS6 ts G=xǩivͣw= - `9ƄyE[B =+|9?*k+VobY1KRb[>onݯNJ-dekPQ~8mqD a'z5σ\ӽq?#[4߽m'u(FZV! ǟŒ_Ec;j$㫷\˽!>xg_~ N ϻ`bX(둰3Zb9v~O_㷬渗>^f-O!gTuCu)4n;:$IJ& Ù]Oh??B52H!ȞLaY4 ^pT0lniAIyKE¦X!2'پ8={!N+Ɔ0)U-Nȸ tĮ ~N;Kq y{%p:= nѰ(ۭB)R.Kݰf,ïh6.zgXqB2!_۩,܍[=u#[nę SX73?5zb>} FloOqT `ߧ{ F>nKck1eWO:Gٻ4MMEPqO?OPHE0Tݗi~4LxSf-ǝ~2+ORcu5r;Ŀ&Qף0W[Y]U>tv晇Je3Yc:BTo p헾ʼvN>8R8cMF-t=gt )3wjE:?E[?']4\A4Kuk~63X7 T긠(^TIuCӿ rM!ݨȆFQ,衊KJAw^v)4g{LAD%QQvr\ieQF aC.e7.TKe-9BUUZ0jq&BWN4!MPt:Aq18H[&a UN"od=l΅llKDK 5RcBPۖ-K1"nyhv=t5W{z>c%AJ*͊rM` E `Dze6OmȽ7?ݼ4+¢,E`pb}YB{5yN,}ﻆ>G6`OkpahHsܺg~qjl Ye8Hm6WS>nrN^.^\v}6. JVpGYbb&V?~h>[O܊ NSR7{Ws?%?{+'-9okAHT薃(EZ xe'ŚsyRzeK XIML?JBf皍_bw{$gjʎLRv%(FRG!gLȥ36)+87xDa*Ա{?%좫8dJL[c"Ɓ¡%ٸQBf-,zJOwÁ#Bā-)(N>8>3y*%~ *Blk#`%7<:U^R BjV3s[IAxǟT06G̠c C;8F:ޖ*Tmx {K #0 (cA@,:6JruPLNR7˷BKO+aHk6&&1IդsT$:QZVesh8QZg~Zk|PhRc{jʮYWMF!!s g2>w+^NW{| LZkJq7:Z9A P6R)a;X!u{Yϵ3n|ѐBCG\J)-A&YZ<>OLJDwksRDHl9u_Vs|_GAύs[\ bll~T㾝}8tɔ#SvMQ֏yh=g\PO6.)JI -W(d$ Пt.0ZwI:\u)2jVQO$k2ifZ9\=~y 2 tq\H܈P6ޣ14qqZ6k{PATRzQ Y0o._ߘ>D2}\J$=KmH4#^MPs:rSRSf/ALU@Y(PhnҟBerRS!RHS|tzREDJ&L CO )?;~_ ͢z0EZ dTP Is( ͹̒|PU'9\qJ6N ;;8fr6l=GYJ<2w O!8&ڹWw!lgE,e. YyB]XBDYNIE}H{XzZ+<Ǟ Wê{xͲPAHA/Cs܊wkR ?]@ŕLY˺xS-8qQUd/9 L> "MNuw r5\vH>cxwpl, &A]ZW_-~kD*iYKe|xQ8٬7>zsX>CE_(رk+,~I5%M ´D=V'^g-N,F͉os]$$`,ݍd3=RXUb@J\7s]w;ƄrjjMhJ,r kCh)FT!O'{"sFtޒr[6ju@PЊAmVEDJcC.' (!pQ B,0dO5J19M%õSj*4"Y't ZLj@ϧ*U?1HEZ4X$ęA.y1]3Hp$TR}K,]<6t} I3 FsΎf[_X1M/[olۼ/1,]RghĬUc>\M 7jS #z&AH.E5&͘my=ذdg@ċ*ŚM6~ܴHdZ2)[ƍ:8 လT tb#3Zhpƒ%āBY4!*&jCI|o][VM:cZ-wllL9eUhf9YTz1 y}#ѣlؙ)ICBQ)[0wO9 ~b-cgwc7e*N?v3ox[xK_’1gz{{w $*5+4렠4_6 6l`0I LyC#*]e5o1kUWP' VRy@&͒t?GmgQĮƾHkꈬ&*.ۺ Ep-<%W^۲[o9T` $]sn,ܨPcq1BLb,&0f`SZ,%PI A$&@1I XyQSFB՛Hap^:{x/;<1z7V%Mɻ"ĞU;xwfpVV9 8*[)W7srKUTQFaE)uy;&T[@K߾}ފ9,* Ё]dY*8 wǸqѼM=wlbBx1CD0…W.*3/>AJyxl$esw#r%b~%`Aocac ũd ӣJ(xp'U\etE 1B*Iض 0!֖eX 9REب)q'p]@ SL hC ,63qtKa>LP[0b[s;:ND:S7>Vh5:ШDk~=%Awnޯس3}k^$8'£raA i4<) )~ kHdyoCLi?DE&ccw|x1™uJ1=x?SȷV߈ e <'ӂ9EVeEOmq< V!}51aI2`Dto~W۞u/y eb~tfwa wyz=包iw[A$x]aE\]ZR$ӷ?ƦOƱKOs_,{%+cx-Gpqr=?@&#Ÿ1zVmGͣMٸkC*3u HPN:ޑÈ?⻾* /&C.*yRyl)q`a3/|wSHrBmpy65(2i[:~W.ťۡ5՝%7BHʢv;?,Aȶ\j^LEkpC2eH0Ս弙KQuXVFO3%1Yie1ǯ z5Rf'A0-9SAIYzٞalMV3dMHG_w홫BDˢNp NSi`DX\VK Xx$IMƠDaw|Z20BFc,Z)D9IG0: wFh&F+m 5MO>qG ,!˨:HV?אuErkYUg#B@[1lI[A`yz[pcX5#:BOo귱=Qӵu.>a@rh0^Ԕ4 h `V !?<ʷp0Uسg՝TV݃^ů{=tv ɖMDJH%_(`wYj5"pN7.5fMlm&5ҀHOB%Ƨxmk[Q!$mbѿ0GYJ Ij5y~?S\ jM) -l5CWdVT\u5pVFw H'i}}Zy!I .$h;%o[ǩ+N[! h?խۙ'|cxtml=j9kxi,LH4DHDF毱`Rg1ȡ!-U|mo+ڸN+yC=|ny穅Ac "2 ¥@Cyb GI8~xy53n6߿{B-H1o`Ȗ? s387}S,Iw MTw.GtdTXz(' ֖v$kGow?]:$ N܄~n ~! T L62{D~"XQ\H`3Fk(ʦDǦ1"4jA|7[~[X#d # r|W8G45u2PemҠ]>K (`u,e^C@Ja@i!V0Oc 'm*l=ה-^ۄmq9JggF{+|צhRf;@aW?A)94-- 9Аw g2=[ P[5G]j5D<9l]rjy#Hd.X:;N(qVwsHe6l([{gIeɨ Dg!et~{ c T=b}: 7}_>\8h#O1w/Ӻ9Tbϑ!P2fdrnHBi@:\`Ŵ̀%Bn#4"Xy,McM3 yj ٹ?QaU](L96KsEZ[l4%-,;n1mGml< 9;jN/H 7fxħVXHukpf؜6+ջ"ilv W.;Gh㲦9N>‹hjO쭭Xqcv!% V6s3E\qU~|!ٚpiյm(#tќ8}It샔CsI9bCNplu>7az<^WGylfS'eBqF vDAee"@ˎy阧7w;#-v+W8ǫ_5Wŀʷé`̪l&Y\:H\p T<>h޲YSp;$lNo?wy%ocޜe|YD`b*b^zތiD4MF?8V-nfrзq}i?xD}KѠLg6Oֈ(kv ]]5+["Mč85^'1g2g HG Bw1e0NDC[+rgbVB嗳E"͸[0?@1um"vy، x dLgLAE|o-|'Kuku { 7uZBkiηsʲsxYd^T`Yl<<~'.󻾪 lY(f!Qi?Wڧp#~hr. 7}XugJAU!}vpC>6EZb&T"¹+%G,ዟ4 ׳iĩLEZ1 sxA1X\wyB1%Q؎Ҋ-^u֩\uVXE4/cnv-73Dwܼ7/hEywve{Tzii?_'yrãKT3^xX |UHm$O+c$羵)2uRj*$iD̙A5::GOO/sPT}=[iniݦ.5 Oؾsrb, gu|_pݿ߈!֩)~[MҒ)bšTDFY 5fՃ( JQr^c/'HL!֚rsI)OITft)0 jXR#pB>p |n8AoliB[UN2R!YEbX;{-gK,* F8P;PA̡^gKJ w| r>c%XcPJ\=\\bFs?Oˍ)X}as~@GyLj7qƌ-k?Iy_~~7ݎj2fVwZ`7'ֱG(mߎoRI1~LJ _"ZO^ƐTH9v0^CTK%|Q ӷv >ˡ^Ov" CvIǮ X% "qfBMba׺*ClUaQ$<ӄ:`+)m{v $8e8UV X2;ǚgۯ(-F8z -:Hi.eލGd0 <͔*Jg>:QSx$6h;DT;v@5Vҳ/}E]ᚧ(܏d\ܰϟuRY l@uto$ǩll4«gt}qXP˃EƽK8SpmeUlo+sf-uG2ZIg.5@R 1'D*J.`S>yd36nfM)/%l]w EW̢&@jn~7BtG jbs0B !9RJ{=t[>QxKW?[n2(qn⎁L$t؊C`*;BZZl;3roɹ4"+℄usYxuhY:bU18(`"8ERR7! KOϝ toO]k:4m*FuHKc;B#3@ĴuPxZ6͔8(EiE(AwOmٝ4qvqXÑY7bhguSNrP"c#W͢HMt#lj'j J]&,eZS8@2sc8Lz@oTO`nݏ@4TQ~nq!*#hMMr8]K.-d] s *lx)k` F{!c.QfkfyciIS%eUh'gn+=IJXh1;: " e[%v8͖tk2a)5┈E隚g#$+{ ŷk*I `lA2zF2s\OJ =eO"byvl/k4L|/C6M\tM隆 |{7۲]Q^C1L2鞕bRSlmx&?J8.e~Р> ϬvDWY;G9HBfSZ5 Kן~1z|z ~\eC){MlT* *_/+;yA]D)a a0ZM&_+k>q#ZO< A{)7[_xXo5;x Qpr"(7VÔt9O2XyzԜc@*ЭSnۂR҈qhĔ;`VivzEB-Wg@ir{7?Ć|Ʀ"Hks_~!;9sX܌2q&;t2#lL'fsbY?[r]q]쪮}kIUBWWGuT @DL6X`0?{\l…ka&|13~ȀYIsUwS眽Z ueO:թ֚k1UKVy/ `6%'.\fFDez\gɼebmݩBfw)$uVT&Cju BE\X0-W$<|sGy \qy?$4X;Rz; hzTa'ԧ?Wi6]FO|rɯ'2]DQ J()v|^ÿbNMgRG!Y¼%5\06i گUB]YTj<-^%XeX'tQ7~͋/|,ƸTuuU+ҁa?cpђ\Db@-Sh 0n4&)## ;})s,j\$.fB½]LB{AY&&S\+ik6T/Uf/Lk!\I"3Yu/y>ҷ<`5Y:J|iͿB-amB5GiZ`Sw֠qEQ!ϸ%ʝ;)!ӝdj;D| \5GI=KE"t1٨lsԒ"i QEHo֐|Ӗ7&lk[Gj7sS8!@@9t`ok~o O==h(6 USկЙJ4Y=Mϯ ʼn`ly[ $-3bIR uH mz 6@piXT v׽J$q?kzq5,H9\cnDB$ם_qrTL>:YJyÿ[L~RM.Ȇt֒W 6sy@2C$CBi݈]jKf (P˟C[*@M&6AvsyD랃7`re0dj.=SxLqɡ`,xc.+7""4IP117n3EQ|O IDATRSdW.)C8{}715$|7O),p8=p ARJu$j,!)=Xe1⽶ށ&)p7 KNΖqN* <3Yw ak FyOxO +֏̟s6Q+FuLDr.n2s:=AYP5ӫvwdD'.*ͣCBuB4z7fN6E/R0!)!Ǚ>fY-:l@x:rVE֯_)A੹ 8rdeR.#0z"G\';t(˖B[Pcd|}~:=mawXƒe)%Gk[0U8֞}A/p3@iS (6AI*C4JȓPʑ`y=j0Z"!H-.SeZ4WXRa% BZeId s^԰u@S4gbDL.s6Tjua /(jj0*r(4ZdU~M97j(  |\g5>|44md|Ť5z+uܩ9QB# ^w5sǩSi$CCy }d*rmS=Ye#JUVQ\.&E.G b8Z״3UI9׌\XB$$MXkzaW(3J\M '6ߥBjyEvi=1֟o3]v>xI gi+b&ѤR=64AY9eaDBhm\jjs=%~1w-gtQ Y,kCkzeqW#Ӟ],lr8\i =:,T;*.{zf)sPYalSFyM.Yo6*"#{5jNC-8_F0B˙-M[e }q#A7ìz5/|l|.郛dҏ 8QNE^ .o]Q|u.Yj]WEr0c8QU(D 116贊| զ8sWϡPO7ۆ&(GWhMP[3B?@rt:B> ӣ&S.[CC*߇u32eu %X'8IVSq,@Q*XSj1|~jZit虊jm(W Ёb`bB!U:^06q_ILivCNC6#?ӧ" Nsz9-;+stOa7O~k$kz|+IE'bP=;>C=D{jyz%F$YE2iaJ4dDU I@J5JmHug. hϦ48(/¤4*a:ǚ1=8CGGZv)]a~&8Z H%\;.YI qʠL V[ Ո%cvLCtKMogwoQ*+£(**i" I(.w sG>wt,# *e1ns;ws#[u(ONOT1N?Fl+4612SUW(B-Y] 価qa}RTJ JѕNy ֳ e:Ymcql|n@ZS+|&M'z7`B߂6ٸg^hipis$Tc;mpjKeb*#<P써6,\\D1Ө{ޥk>N&0NA`2ۆg647 U O$PԊQH/m@葄=XX"Tdoz{0a@ɖwyXm!5E1Q/֌"@)Jyy2+i 7)JѭģS֑S^84Oq\sc$n ދ#۾Mmr^:oޱv Sl򤋟Տ(<~￟JmQFص3@ 唟}YeQ(;^ tU)2ldB<Oq۩WHWP׈>'S%n<֟?y]ϗ> G?;Hg[HL-w5En>JD}1G6[y%zSX"Xloxk7}frnjq:-IvBiDr8jBgxA$ˋ./ah6PJ'D!^V RDqJ73jxf' N\I};q)Jͨoy h;‡}{C`m&I-eO~TjlX8vNw9@ڔPTk ιi qB>5 4uM1߭q^[ALDfjJE/hb>N SZdMUSū.!,I/I'>+~*ApquyJaw9'! -"݋X"qNqMI=jX@-v-Sǹ~MPac1{e,`] cfnGnG6Med:G y|a?i فŪ)PxI8 ũ);P<0D>̍_|/U:6:q Q^lZ;M`Gx5ׯ'4-@c܋%,]pن%̛?qoNܬֵ̄RF7UyN^r4N/V)FNh fWq-1E,g~ttl(eZj)T~Yn@\[qSz<>4ORd=͙3'j;-),:,RTot2R9V[5k[QmE~6>(:Acg7*!U z݃wj{~F})AǶƉ>S${L$SVg۳[7) J4#^J۷׻&o3`b'QuI֙ªA>(<3S3BXc^cudlE="yS!RʯHaPt]/_ba:m/7|(,)8x1:1 Iwލ=zf3 V< =)؋.'??y?G Lx,օ?w&_M̋z:j6M?>.,DdL}hs.*pَec<|Azm;_O8ڵo p[FEIޚ?CS5@QS\|_ppH+=<%V󒗾w'1 YddxR"$Ǟ`xZ8Ts*Z>Ѕ.*`r,fRd2N`瞝`zq02)z) CЕ*64\6{mij:։>D9P'C=lsZQΐ)A@<Xxj\.YY>%^qeY5ɟtfvLYv+J.x"'X7uqF2&5j4S)MC{B5??{xy;v0л+tzW.8MK&u8jRbqA祸ŮTX|Y{wq=۾=~kzCRS[޴;ޟɵvFE݂^.Y~$9AMOdf&D3'M7R9)J ,\j1AgaʬtkΰhެWslf1sVSz#MM%ΏcK(fzRxʥQZTl'ݍ+l$A|m"!9hDǃO9S$)xS㯸u Vg]/ $LOMuŚ}sCKz%KW7U>Jks|j0[UlT LD}V PGk&t%v3̏oK_|׍ Sz/-7\o|/0FM1mKqR. &'jxk]QWn^?/3"Os8?I9롻zkH Gkd728h<2JKYSB=1^eڅRy'M3檊O IǙR\^f<[+J -],> ^uB>Q)ٺ'l'PT)rXb)"Iq29YLww7 A*Z.hE\OBx|!O zLEhxGظ,P,qNc`I[56D9@Bæҝk i:EO T1?(V &T! !aq%yc*0:c(jSxi^W291ɦ)e$;Cip {; Ct~T͇(WF)AqiJ^s^p-0ʄ.=gRشRX …Ô8WFˬ4:05E1T &p6/Ժ Vj=+YiVA罳F#iJD Ȧ ECljp*i*GL[ÆkpNbk3߹ܜR3' :mP1nj+to={0i/H){S|mv6 wWy(VɬmP7=[gɄCJ{Dq/o KKM;D~-M;dYp`1 'n-f}޾"ՖvBoc2˵L:2 3h8_H I,pSQvߩЏGn9V aJ"Lv0dRb2ΡEJqYNl3;F+ΦvASk C$>0Ż6χ4,iH@F0 U!1pI'QiBl)v r'R|&/s|9ϢpA` Hݬm{HS *.X[dl g2ȒRT1u ;dQs]Ȃ W%'x~=u)S%%*諎% _5o/~>&) J<Û-<s%uC8[S}wM_i9mg!'~5NL LF~Tm;ъ{ֽp| }߻x“.Dm)X5e~5Vǝ̛6wuh. ~Y39|ȝ+ '8sN%Lè|PqthricBj:eB>VO)#DQ.Z30DB!IVWflF>]Αus9BD z]rG&'Yl9c ãEtpt4t48ȹuZ),lƹP=gx=W|D4 @4*زy I|p]82v+Anu7Zb`F〺͓?GӲNd֬seg[ƟlDl3x=i=5uF5f.]kϪR-b :l(V- Us|EIkMkz&8"A?IkB> /uNc"#o|>f (8}&SpaAH: kY@ND7v Y-hggOi^wg{ 7~gb,(gc/7_x%d0on߾Flydq ",a/S[d_?Й=w|VcW?MbТHi5Lh sdY͍ZK` Ҩ>'vۂYL&wA>>>%I^s GvD9sj 6tSft*M߹6r~?Q'Z$!Ch?b=¥)ZiϤY4ֆXjФLx^m&8E#P+Xrɟrg{Ũɫ)C D>;Ny$E7"G:K-*HXT4oWEk#Tݎ f> ӄ[-u(r/KS6?졫6ӊy}%z)-ܺ[s_\\NQR?2_%Ǔ3{enhI[>x٣G r(56}=z r\;'&J!/SWc>V`k~&_v}7m"txX/n Ӥ,j%X:fZT(l4j9+p0?3_n)\|-fZJ2|zJJFTat\&'72E9Z-9Eww]YfIRURQroAO_e)Rb13ϣ_GGXus"Q G hc* ^Ñ#1ji IDAT7j$'Z5֮=qKJB V񖷼粂:\Y~f5Sso 4Њ? )'^666REl%''ODmv;:7lHGiKExRFbg"tޒm% ;++ۼ韛фK88~|谀mӞFZvΓnꜮPV*~(q٘f% Q8兞 2#sx& YKֱoqxճW^Zxn70;Df krZޔs]`r,dz0O ɴK_r'l^u\jZ%Avœ^Ynq}{`Tt?!#J&cd2Ť!f{_ڦ0 O*&X@G&١u3&RPc͊eV)FĪ]Q) < f;k]K=s A_8!Кu׳^V[Þ{s#'hմ?Bn~q\跎S=x/h9Ͼu=vO~V5˷dPlt{q$* +gew,5]DQ ,g"ZW3vm] N ) j^*v7fKPf#! b?jȘnVPTA@4ߍp %0"MmD->.,4P-3{; ;Ph# ue[ֈnshӄYސ35:p^W,o[c6&tShu|(b~| w ;psm=_ ,6RiOMv 7{T.MLAhخΞkζ"3} ZTUWI,b?+,$Eg99( p}i V+M͖(NaЩ{Ӎ?ɪ ɴvӺWX.Sµ_{h'X˟½-^PekGf88_g.y• o{ri6 OفnZ))kV:gOoe*YعȢ;EhDrGgEH4Π`~Iݖ]YrjF<֘uj&?!nsduc[1杳T`6lf]9`?G?L! 8pl[d$i*v͜w~OZD]qD ׫js=U6 wh M<0YLSˋoD|ӤXcFѠ^my|'xE]wS{F!)%P@DH9'8A-U9%P!)]:m*;=3!B3 hNYE&ʨvq1r4 TWh75^_fcЯ}d/c.GSX5 *(ӜƘk_mūx]D!@XB(k4]dMr1]R#L⹯EH/y^`WZSL8 Mf>$mɘtv;] id!Q!iV8JfnBcℊ33F[ w%m"D-2{D픿U pi|1B:AR[4VK֧亮4I<9ĩ?7 `ltB!ZwUVn2r0+W.VM)juǒ.q/::΅"8Tc5j_a{9O8(yq2>hċyE5-wh]T~ڂdH T7|8׽u iXa /Eo"q pf`q]u9+y򺈟lMw2&}Ll)u}ۇ>..BR,k쉉 $! Q\Q pM3MM+iE;I^`u:Z2^)9҈4[E#7,Ga4@;kSnFAurqE@ g-*[wӟ 7}0T[n&맲q DŽ,$B%2RG%)qCTT?HG\ˣUHbut+Wf9̸ ,w|Tmj.r (m,cEc+'@<tQg?BAȋJ{l 3BƄČE+ Б‰nmp#m*gh PeC]]rlDW!PB&̛wg M+Lқfp֩}TgY[h-@]K8f}Ћɘ2:9F+͎{I|ǎӽ~v(:! {riŹjeeY͍xtZC^5fhe{:BLԸJTrmZ1;v~ҽcǟQC)ܸjIFCT+ޕ1$Aa0숚x{ҙpQy )n"M&ؒ ѴҬ9N'L1O#׵o~0>J#>a 弼ɂP'7z{(w qE+/e|۟7g''Y8t:^k<ڑ@PgA9G/`Ɩ.G'Tq⽉E{F= Qe;nox9锡Z2:1Jbj5˞={YvaZD B00CU Z+z{y=S3֬De^VZR02:Frղr>v3Hu PzAX;]3OR rK'-m\ xm5V^Cy_]Feբec+0ל+zZђTPwН 2P{m7(3,2|9،: QI:7P" 5l&ٖignzҴgsΎ0΢/mҞn&1a+:Gu3LW\9 ŧ~誏GWrϞxꮌ5 3ʕ`ӎ(RF5]J١\kp8΂IU2=mkg# ;"9  ;|(J:0WNtO +v)iHS 'Ӹ=VxG˭[b"Io 8Vm }`D̹vZe4 Pګ&=C{Fc#LLTWExiωyoM6 9+\`1X"@7JglFAqFo**ꜧc(?8O@mxS39dqm,$تUbNpR 4Kq*FmJ ?'1ʱ%@#9[z\sńQT'X#y;Wr_}`,N&2dV"djW'k^UO@ϻ.8׽眻;dfL2C6 5M@DPq)jZVkoڟmUPQu-! @X}Lٷ{gvqΝ3lP]pg=|>{y-bA{~/\D,!Mo Ж饶/.Q[Hg 5T0\NdZC mgcOfg4OI#;+O88a٫ n mR3l ^%474PUGv dq uw.mRRu͝R77s!Kh%RWS'4 ,2sM3_d;e;Tg/(FgٺLZX'X#I.ٸjg 7Ll\~??{C: KI>dfΜIfE[&3sF[^M[[,}6;:0k+DR& id.Gɕ0S9qUJiLu ņtXhsBdW> WZ3f u}QaZr&XcWS(34?m Wą=x,5s M[l^O~]rdlJOeQòhqbRQ*hS-ŏU|IDĘSџ:H8XU%L&߽}y}13bdD5ĘBfI~4+,z Q7<.,zNwLNq+EeAQ)*EOیx;RuUhG?=_o@sW?Jyh! ""QlbbM_HsX8ms_%GS,;w^V{t\rL1Bm"drOQ~\bieތ7k{#(x;3 2r-&֚%c Ic"bﵔMIoqLs99:bZj'eO&>iXFcZ?z}yyDQ'|7{ﻟWvVAq -ThK<pkmGlִG 2Q[<7QnǦFkE2#9Z/{#֎oҽxA)>kmcC-6?[U) y#4^: RꯆSl㨢:5ƄTvjTrL#\{%&+PaA5q?EK`'qMԝo0SI4Ye~Sċlju16XcqA1rqM`%C} 5en#Ou|fsY\x\1k0 5 BP66@ӊ*.r} -ORSY I| &0T%yp06@V|Ȫ Z}>?.Lm屟A2aAYhT#qa-1.ණ^_|<>3T PRɸݽ#,ij^ SLL.ㄐcrko`…NT%EEeUh"lLّ }]=l۹GZs>B$uʔo$aoM}"&ٳENlskW%Q;á}Œ IDAT%T%F04<@k s =PUS05յ8p׫bQ=DuM bf͢:fX܂"ӧ hni" =fbظi .-/qɥb둟#`fK nf&w<]#qf -e?K<)!\JEHiq\DT1$ޜ$'&5D!C{oOr u#q˞3ns EɩG>Sz~V_Vky^Kx &#k 5FX$\e6hi=BXE]Lw{7x( g +︙:ۅLby\)CB\6њD"ՖMOVb2sxX(>67L89.Bc")vm!y2$hKIĹ ~9kǸ_k~ƙ#m z*~i+}.en*ZM[,so~u|m [P0(E,Cz# >XSWPq'Wʫ?`_'>,׮gӳ[ؿeŹ8q @spZCDFD硵[>þ},^^t 1ctruuhm'`]x!3"p6rE~<|ira+[ ͂(JU9|/aV"ǹhM#"q_|n9|=io%='UxR?7CڭY"F6Y2GI ӧd_WD} L?w?iEцm(jV$w"YB.0S5!{;BEJ#'N$; Bգac7.CK2G7<:κy~\w^Cu^WLjNΣIS^WO%|$A#XRwZ ֤_ƕ i<OQQ:j[0A 0ad,00:*!!#*q9U6 aLc^FWXAxxb< 8Nʨ!l#Eqښ(lԬ ;RㄋREύAjK6G.qB]T=-h1WX`xB57v+X09Q-T)z̶%Ib4\Wkۻzy|K˒Nji`D'{IG Lj[Iz{,| ,^Јd  I!Q28PyH9z`PSSSON8/1Bl%]Q:L?~XUl7'T͙g,&+#\ ΪEبkOaX2sZo~-\yU:LGyz 4zSp;vw$*Sho!)ǣ ] 1?@69|XU+1Hϑ1MhQeD[kؒ U]Opdp ;6 MӦEML&M3d0ڄk|2~$ DTVV$B88*SL _)/Ϝq3sjC{O`/-Ψ q5#@3f= 0ĂQ.?;?},VUgFzz ^]^G*3gSYUI.2HVM5Vභm$ǷݝfPWxYeă3_ 'z (Ǐ7,=ϱ`p̆I?pAR5 ||G_o#X][AS!<^O3<H[b%!/Iao|nV\7͠.* "/)N_Nn?8ˆY꒪俿kd\9v  [;yD`:97D;o YoG8-7}?LDé0bx (Ϟ?5bpy?ߕ؀}sg _`z;~C [BGQ Q,B؟λvf͚/YDiMrv%[餯Y(Vf28n,wFGЙQ\UU֒R/a$[O < L\#qP$8ҶԗHB40/k 1=Y$cXg8Sw'|-qaL ] ǎWSu j#1˓-jtUCgbdMgܳh M'#̸8-[XRq6NDǏP]#L%E4Pa]mF1>d!x\u!%Eo݃P>-0oBdz:Ƹ맣[%vi%Gpp EI6+%(p') a x@D#n@[-PQA\N jCv94*jd6?B csˊqQ.h nMB="cp 5nΛ\drd90B XCa"a uIԫP[$(FFVjv nI8Bab:i* Ң̔d7%\x^bݯ;6ιd: #x"eMC+?1ʺ|+]yx%\ X뭍} cbrMcP e=G,KZGCA!dwIlΒq9MסoQ_Rq@%IAᅴsWbjZ̥g]k/_!bs ZL"׿o1" doٶc?mU#ʍeKd8I/FNQYr|c+(< nZò v]3SHzu]Xu`z7\|׿ﵜ9$ $)t {_YrK?WmgTKP:)Ծ%:QSTT%,=}Y>xG8Ra[vv'q3^1D9SȒ8E9<\QKedcBK7w*Hn@X3*P|9v'SXEI_rT1,ۓ呇ѓѼիjkkhC&yj+W-P .<;wq`Aotr^_}.<*SVEyh4a 1B]b*5l(_ 輏)BrĬ͸ղF ETcM\Ǐ1#$[CJHnٹ62b s &5nPnSmAF @(IHbJhE apC\I1jøkM$ _*LRD#`b ]TY~?/eLEtFPWegXJ 3L9shhnm WHȨ8/aK::\e٬q৺9P\LY\$_G;Fl5XmK :.ebP|rB?n! T'U8tOrqNlLYaԼL87V[cy@#J͍r ((~UЇ ֜+,0bLtȖ~-D )aNBD;˒fέetjI,df={"fJaQ[*X}y3 44j"صrǿ) lc#cju˻xanpM53808{\%1 EX:OtX5% )) ܎--7_COIdmuc ^2G.-y=¯w/M"Eɀ^p15TVћ 3kH = 18ℸ#5`p|7^{ ʅ=t0;FnhvK1gv 2mal:^xtxbFVu&C@#;H<C>n< Wd1UOFf\͕\m﹕gnMwT7=Ȣa,]I臨z݇ittt2$buu >$矷!fb$3Jee5 CH%hiiFN[l54mvfnű9s{njkȌHg2%z?9sg1olK{ƐvJ[|JHHV]0'Õ/)|Fl4;ǟ^RQ3Q#v?Eshptwe֜   300HMM5Z+\7Ձ--Axn a3w4*ŅbAU.b+5c0|!^f-BnxMX)J_ekW!h7g䞞϶4*͌l_:46PUWǮﭤ&jX[:6ֆRGs lDZSƄCSA())|g[9q2_)J^_‚o"pǦ%Zo>~zCվ'z XL< +bGGV1b|+VH5H.{yKv_=74܏:,KccEV_PCU2?9EW4rYI<Z,Up`چ^)PL?LvSCb5lyi3&8Aۗ1RZ3Ͻz lh?U V,9c-}GHD|BĴ8 57]W߈$GFac1dLpϰwbYc~Zs{1`@S{/M^Ň^9 ş_}?;v 2sCۼR>n,>TЩA̢ubayc=[=|[¢g_y# &uH  tƂto^/:̘97ę˖pP?gΧ^/;.2Zgdf-\A**gpA ;w9Jz|w;nŠPhaZ~bD&ٺ>f9UGɛzyt?}f|!<#)k/b(Q%RX+ˠo9Fe0׵Rhgtw57PP[Xʍ,?ʃ;3<,߉͖!~T.t1E(zԞ'8zb*#u4 "A o7ւ -Ĝ=hh+ʳn])(jL/)f2BP0TZk硔8AH#%1O5x80B5Ժ=G7~Eߌqg7Bԡ* oQ Ǔ\⺡&WXSbp"=tB3cq<8v _{)&>R>R_ùk.ekn͎R߶:)h 7$)-@Zt &BՌu#.x @ٰcUϖ!8Js=)1BtSX a)6w:Z"Vo Qlzܐ>l:;Ow(6_ c-2!vObJP+I Hws,),lP b41;֏i}<| Y{DZe+#Bv$:Z2N:H4F"#B6m$ A%fk5aK`X"5ק U(5Aȡb .T3Ϯ1ƃrh(k/kB9a Qw!pl?nm&q+<5 u6IJ\oq<t S aOP$[r 7\RR0Rۏ܉[?y46~Q?*ZSKP}e=JǤ6yˏWҫ0.:RU]o6=Ŝ0ctۻ8NTEē3ݸ#<, -[P8>F{SW e&zFܻ`ɂjZg6i*ADEVE|IWq6`W=Ew Ïヒs't"0Qqv%Nո^ǬsX.Rjj`m,[0B@'7w>6od3륱FOmmdìYmxqCT78sVċ9mٻeV,H>jgq$&!N7sas&Iq9RGrkאx1K #t+rZuc6^'lĊܸaǟaP3}u&$ք: QIybX>Ryוլ\YG>6oy:+XxijGʓUp[_+xͥ` qw^XhI7_s& Uྍi*M>";-h8t(WhO aʅ9Մ|x} @.# ao*3%Ea4^*)lS$ȷ$u ҿBbjU uYQNx*S"$s.hGpa 90Ed>y!xbE!t5 }L.a 놪 fH˿WJXS)2{1{Ȕ&T1R$k{,en+Zk 3ey2nKnEs_کgՕRLGB' >;w$7wdLяZt:OD!ҸQ,FX'\e(w&z QRaUZSNg0 s{ě:*`&@ a8q c_ qNԕDN8.yox>~&hVb4uId7^s%mN8sI 9qGEA/Hy FlQ )`;OE)%XaHjf^t67abeF_/$_ǟ.Ӆ2> ½P?0Xg-VXBC]Ԯ >MHj-O :;ɍolG?㛽ۋjH- z67] C9BI ZոCOPJli㮯o|] 츑 {&؂W\gNIT\.*HЙxx;+<N-;Eə74C$ \v+SΧ*Gg!6;,HL !473zz,jBke @#-2a(z9Pl{f4+}Jv@@D֙VcͿ`[q2Խw\W}>);JZbɒero؀ S PPB<@?p 6wK%KU2;s{gvvVLdv[>%4ڶ344̪U`&2 ׍bAsDJCUUX,J2P/neYt98=trrQgF9| STWǺO7 eҥؖYdM5?VA (\nd]=Xn.x9 Om]deё,rHD0"Q@)p8PK VZF>1Fb&5ø6#V;VEVxOE"¸7{6+^B"ptS+ykoWs:cDidPMB`ij5u7cj_S95f|8b{14]_q[}Oڽt7tHS ", _qش6@(n}Gm,+:z>q18bIQ.< {cF،.ZEd+4b !!w^;_gpo{M+Zӂ-9Lnl- ^^b"j+ ʕxUU ظc^Om6kx+ nS\cA@Yu]gso7|߹+"7 Wt!vA,3^_ BZa&KҢ*)Ϡ,h0S1"Fn&xS,I )6ȦFZ<%Q]t ҒX?K {Ӹk d1MBɠKdB Q, 4Ho=0?MUR֛!nwLDX9@Oo?ի"AT5S7ÂҝBh-hm?~5ؕ(L(B3shߖ[Oyxp#[xQT:?#|_ݷ)59!6 opH[r7>c# QQgtp˷*g|{o~ۼM?5gL@ :+B ,WGQaHrvq6={cjc$>F,_ihR~'~ĎnƊE;Z~v7ͷlÜFh#\Ǟ׼tDe#$UD"e8v@Oq_Xp!CbTAYY;vQQQNwO' \ޞ!{`!HX ilaM45⢋.AXds9jZ ,2>F]p\|gŲy<{udIy4n"=mm!F}́GXH(C< #l=,s jj" HH$Bjk0'|/GyspODFB'l-喷G~ׅ<_dcy_>^jhqBHE ΜBłJ(&sbU#? 9 !<,u-O)nGȲ3jqyhgdص_;!8y8!'޸D9|4”6vvF)2DjQ`"i5R`*zOm@/!j ^ؽ9+*R P\omYAYA_s0h㄂C!; Q>ehlG6+쐘3r _gj3.,˨zF~s;p9}IյKV108@Ye~)#jfI1XabFHPL(sZcYӲ':3Q ,))[u$"k<jL#ИXQٚcPC 5҅;XD2w%]C812,!*IGw>.ێj]fSfDFc>W7QBz}t/|] 5cX2GNqϣY_o8: r fdrDuwRmĮ{^WO~f> ֞ ץ2Y^Ꮍ{>n}|g_ųyIʉ~?fɒG9V1FcxHwZϨloG#Tװi!Y{*Ҧ]nmlGPv 324T_c c ߀/mD"hQ:J=<U4$`Fm v& AD յ~VLJ?y-5܆&ѾsI07ǁQ^N%CrZ9|4ɎhrG9p -$qaӤSnY(D!ظi=lxZZJps&͞};8kj餹yT;pdzHiaaxngsyc3MM]kc_!߶(+A%QQJ.֐I{>yϣ)W d$ )7D1X7{ `ˢP[dV,hdHƐ,XkKbmūyߥopjw'ETFߒEgq'ߎ_a=QOعFy$Aɉ8qz NRܛx1fXjaZԙQ 9btv.M\`}.z#DJ$ˊ0o9!VV6gx+y9 Ŋ&?U<o&PDx;Ky1 H=Ud!0( Z/LO#X<#9ykp*; 'yr/>àc!*f#Krꚤq(N&c IDATEi@EZuѷ+<#'h$Ӧ$'b-Z~4e_6d?7(* cȰQ! CUo€eEJ(h3un=ӳ 잍~ 6/ Š%yk,[F:ҹւ 2!bATHDFZ2@,@N>-*[LLP&^u ]\tõ{ֹd;va iE/'vXo~7*ۏ⸗]GdgpdiYXB`E#K 2oS`h``8fqh#4dо̴6;[@AΘAL뢉46[E='ŁqnWl1@T@\P'" hG ;Xpjvb:;3P< D{yqj+|}|5̮)}w .s|A,1ETAI T l1,ZSq1w/ (4!]3|d ;&YqJWy{[ #]Q#"H6sg`"78n?/yMo䳟nbk;F2; oa"ZJ `\Z^Z<×*+%?z6y,&5b#H,kC:/X`N.>Ȍ!@ P!7X)<ڥimÏK4geן˾}{i]OcS=$lذDE eϾ45610ࠔmKG:͛s׬d,5sg-k5W]s[g47RQs牕ddb;f9=i"\ꕌgilrYd.o?ʳ"03 󨯯@Aw&Oiz.2+y q],Pyu8ط!yWHG4T%7~z6m%oJ`Sc\usƚF0nڷ<}"r"/ }yR F[w]x&?y&ɵh$܄n(0[ An$:Vw7L'a`)d}Hc&cj5\[e/81Zi\[(ARz0u4rnK21>^ aHrY7WFU'4=?N( b1?ƪB*1B?(OƷq$Ӽ{ BL9"k.D-v(MMhn¶{gp~K7ŏ|[FI(4iF@oKPZkW;^ɥpPEfeLI.Tȵ/DsP+kc')CTq™͋ ΅4̪ϱs-kOC&`I+l {~T@AL {  +D{zQlPhf%A=pg{Xxw+]9lfpCˡ*%q=^j\@O#kڠ|D.Cq2L~ܳOn0q7#t(J!)q8eSC&fOCFaN)ug|Qmcn| 'T%FK s͔p}],7@&5AM!)e[ *WHJF[ #uT6[VX&% WHRFa'VG+E* 0{?њlBpg`=KD@gua1I]L| ;$ų*CVؕrc B*:7Tq R. 1}iH& V UA4 q|?]_1ou!N,a^/8ޔƚʯ'تYXy|Kf7>4dfچF鏥^:.^r-qr0?W8s׎3 \13)ATBj?G}|}oG;H,O!oAϑzkM>=FP/Ç;)O;l;|с( &c4:3C)`-D`þ?P^D4&Y,[ٽo7T9/wd M Q(cٻKشi.RxYZ-⊫س{{"ɒ0 e$s, V{ϻ=m09`Lu ?rVg5(QW[CmmxlJٺ(˖-$ >}/p~F! `x$OZg)MƦ "BsM})XO8Βj:<% rks%s#YQ,KOK2u8L䳴elM ϮcUi8frSE! b>AP+`N"~ڷ_8́lIsߏcXEeM З+:/[lAh Wr C<%O.4ݞ` "E+/%سw[W\y2`PLvi'd˜q+5!mL-"h\nrYF)e\ϥ}w=(̎PZ(/JRnYx*%lG{ß_}qÉ'7CnY¶8;Ko_D#ģܹիΣv(֯ٳg͍H#Ks پ};u$̛7L*ϥ^޽f߾,\/0u}ds\~M<ď&& }c_7aP#aH+ ppjPg/\UI@܀t,G9BhZ LL WPp.- SiaWj~9p" ɏD'ԺDsKI"*ˉň9Qa6OاVXqE3>6̮S'֋x+y%sxvOweeUaB䖚t,/υ%6aC&d5րB&9Fb,DH|6*Wرm*"  X cn9͊ 뢊!7V?s6Kqw\絰j9@+v= 4|PU{/#\LSh*<#SU/K\B.\8qPY.~2> 6I!P0d3i!lh '4!|CD"( )aaJE  <8O 5FjŎTXrK@҂S*ۯM ̽(,.r}޻K|*`+(Bce(23;2P9YdX Pak>z"e0!tOYYK*|U >7vo"ˠ*xCU22Xhi+/_F:HhPkF2BdE!m;8ԳF2:Q'ؖ :7Ѳ8#N ?{?w ߚGZ3s'_y矈.gѢ"XZ ׿& QlJٴiY~$++I$*Y[I6ijl{S[[KMml N--4^<_ɦLp6?߯co,G?EҒ9z.i>SegIƲYI|z:cs 5L_}Yt '`4%(+UПxh% &tW*9iVn RLMK&b:9;SB}ɪ9^?fP5%X:^Ɣ _jƀm؂/gxJr F {?rww]nE9a˩Z t} D9J0FS9c{Yksqzls"DHhV(' LlT&933E7L, ML:B;cF9i2~<%G_76ɨcwA &,HSVI02˻jͧۈ `@Q8+b 0YJVp~)pE3 { Y /X4&j WCkfcseK[i=#GwnfZp 1dee(gM·-T5ɻ9AF; 89ƪV{_Z<]m5Wo)k=a=ErGpd!q,1AmC9srx0ٶQ dO 4l; cpk?s595ZƱhZQJdO~C;)Ev\0DveCϽ|>;pa>8Ō/>Ki_`ly~3_"N_؋e٬Z۷tJ|ǍEqZ[#rEshodhmXr {v(.-֓,[H&=ts@g8,g/y :&VXlesMW}s^2HkמMO #qmI 9*lBFH[to6Rw}W LqtD ˼rNLMR3d{fʧdE͊BL4'H_+M!RU !Kr(Xjzٺ^h; >.HY *SPP^i(9J 80*.RAK;:8~{9kW`e&({*CVU&6 W9 TF!FzM߼ G|SDV&>>ӏ(H=ՃDS)[[ς|HK mQ#CT0 Ry*N1|aLSȀ_x!0J%5= aP~>vI&1|S ҢU^ &+N1Q :\X}ī&TA QX_?ejKt Rɵ0A⁜4}}611c陝]cc$Y!udqVpm"(=>.(xgmF6?*/n0“m0/tDG P45-dII6;xcEc?f=P)\fq\hPA>σ*=;ǯ71>aU@}k\t0h-4,jjr45͝Hކu$۹]_߂[aC )dq`g;.}|ᓖ1V\6 IDAT͘gq-{G}*#B>`EÞt(\~;֎@idhsZVl66(9K t5|!,GV8=O Ehq(6|{6M2isv jX,J& R[WfDZrm?B46bæXb9lD.W\x1fZĜ!#^!Б](bxY-چd%-k2H"F>|0m!.ÍX 񍦢"OMUK?Cn%w0 o,H9Mo6TzYNsDe%xd=T4oL00mTU'Gۻ0R#&I !gx/CbfyUa@9x~~^sN%!K=f'3kA!CC~1E /&ТϦ aEp?tLzLPDe UϘLRz(C?c<./gw[[E*6}E$$KY[,"Be ${.O2+YOکEcZ=(g5o<~m!;vjḦ"RJFH;52"pN=J6`DF6bFՈI%.#?4qtrT6k[J|F.2neDXg5ˇ,4)d}>mc^Cκcϑg(GK|_a«r [3imz<,4ϗDXSZLͨ ךt 뭐?^gq 'ZGLҿ| #\@oq TyChh X!-Q9J% ءz~C^hf13@/Ej$aK/YLcQ.Q5!h.(X)E=D"St($,cciXs2Xmu='rX %sr޹\ξ|uw9(&fRg*_=0Q/a6m$i=Q&b>r_uu׾y؟e ;LhDtZ3 O,fO+ D|vq~*/o?~.;f w,Ж`z{S3(AG׽z"B2SHa$%26:B‰r eǐeq:w݃7m_@6~!1(Bz(41D`- Q_ٳ栅œ>Cm]/!Z%7.12'(\͒{& te9P{RI1}3cc eihl--sxXhuukͪUkزm uM2dsY"r/Zylbܵn.b^q9c.Zh;0{v+:s."q*5XTL!T Oyk?}Jx\}I4T͓,-a`pJf6`9tFQQe`h}f-LD-X~@q|IKdXR2H?a+P&,&6f{[SRv~ԇ $s?jxzs5,i9C_}] ‚18Тd1: 6|w#e͕qzz"A^hT];Yu1׻Xwų#eK:OޑcljT5ѽ(cXLR$퍰`{G`z*J Isb@8\4Hc "=rF3X%hBu S ؛Ʋx[};h!+lV8A~/ 36HXSգx}Bb/'>:Lf%Ph?[" Bb*q3){һS6%%MdӦ2o'0tR n3ٹnmla$08M&*  1]p+rY%%<=1"Ё(j8ʙrB~AeR/пJz2[^BgK`b>U4iC^=~; D%[Kpj\k[?|7s<>qo'4q3kM`x1t_HyOlvtY2M`>"eet dXbze2lĎ8d( ߷e8hV2.Jh0&>dm5gq'Bkuso^q|Nv hR%p?т!*DIoBF"eY9|pk+sl99Sђ^} _zF#fvvG43/{:k.gKOW5sWa-nmXPFrtcx1jkŲV\޽ې/wL&I&8-sHVVmv.\NW-մ^LcH96tk "sW}.mp-i'M<hSH,z<.9}ͻ}h7~2y-Z F~ V&52>W/yۗ =y#[qE-VDwo7SW$#cXT%ȱ}Hʏr;;^hgy9d&a h*LAU&fA:&^VN-߳yFh03>AP5E߅AakȦ08BF0:<ԥQ -fB~gYľ|S1%mc@|U 2672|pd-!g{0;sΩխ.YlY1 ˄5dBB$aHBB#@&, ` ljm-T9Gս}ՒZL<<¶vݺ~/:2.Or#x*WB"j$HG'h|[Mc^x'473ˤMMM 8,I"$cq47'TD-lpF0 c3^VYњ72+%J!|ͥ۷p b(s,{)>i8h93v3Zs$1-ye0 3-Po cFK%+1V0gU,hsiSE0˲n6ڼԗaV#Ĺ{J[ez:eDU DR[|aP>4>cy+P((HÞaPfE/ FKo>v:p$*wF.:;J"o>JN{ncF[Bl)=M_Aֱ[c˹E6iiH$SkP]g?A&I30xM ?d"3Ox;~U=|a.tgW<=Ƨ3]otf?LQ(Dtwe9"0y}-$BXB KTD)bTкu=k70w|C?EUutHU{,8R}'IryN !c*JJ吖]"JqC+SQY󠋿N&(%('В@LQ2̤Iegin\E\dtldӻ#aFGplR# A]͞/0;;ھ5̥昜ȓIkaBH˥{UM?vd@_`,FxD K .Y}x>GΌľSfؾr']]``bHsSX4 m̀Z4܀֋&]3HAˊ}|PH4Mc]LKw?5Ǒt;Z&.bB>/Tc>I6 N?jaKG߽gBXcQMF4kfM _M{tkZ;EFl"i)Vg Lao}e۪2`l #(I$?df\4Z=#],@-QA P "MKtLZ2Ee^`ׅ0 ]*r1( ?ADb@{#K/Hi1/!u1,L@(V DM.)XL-07]q t_JaHr87D eBs`?/ばYs0'1 |l[3E A9|Z"j@"M0) 9V^[FV\w*ڮxRdزfc|OPP k~*(<T2/OoQ2Pc1?c$CʐՐEl],OeVS#9FDc$!l"0CGYXǿz?w:oo1> ǁ|P2$,u1Xa]mXA|ZuE){P aEx Ю~&{p | `9x+~pA۶lee%%ҹzNVSigAjT rrCeT謼e}qٻ߇ɰBy%#=#?iKwKdg|JrK6]ƁH>ySӳJзG}[7H!8r0(;hll'^0x M{['Ǐa˖LMN211%mꓭ\6~ekb8e|>=8v lx5x natx:ʾDݼm.~k8gX۶wB^,[P,enSWg;> Mt;7^s\(Je8@L6sT57Ċh=)vxpK׮c-sh.`iM-"Ha%Uw^#O_ >u/BmS6& Ϋqa-J#\ H~W?z)ab'K#k6 A,~An'Չ0!yScpfΏBsғghp heR8*+xF,cN` ZH j[Qҧ9#rGPŲF1;i. 2nWFYU_F-zϡ ^)y>Cbx= µF=73? "BWmlUgU13:UR\[*d9XV8@(ƒF|v9d 7͢UWrDYe[j_|"Sc2tЭTP5/ˇm޸ VEY#Jw+o>Scl1z$Pg8YNsOvFU Z/kXV.|Z %|H?`6jl3|q(h2"ܜ~ԗ`{5Q\O o\%SǡN1Хxݍ ُ>1pdm(RdQ]eUql}}44?% %(bTŹ34|2 gHR`$i).;}7&0]g(ezv9Mϳy:z)-w~I٧p撝vJʾB|e1e('RshnNP)gM# IpF~v]{v9ҵ fƕic:vȨa7?odzv/0:v+/x2ihL.b}lظ.V248%255%ha_dxo|PDy{/<4{fGOVv\z;kW^|,++ldS/}ON?tۯ{;sYӷ,d"6PL$.;>͏ = JY?:MW݂Q.`֭9k4 qƇ MAJ&ݜFby2R8'JDhbR$uovo)υV8b߀@q$g0n Χ IDAT, #+暦:HRa\ʺzھ@G%LHI"X;u*2s#s~n҈RFkʥ0BZS;c*h?Grٚ\hZh ΞN2$rlnk67X9KgU} Rc%ŸӋᄁW31:HICrY;W'KD3ONjBe_,7~qyG3Nd"05˼m$^^Ǎ7\]8|Kca* B8tHf.K.N`8=f${浑_cj̎m0׻l.G>ҼG)zzͳ=F{[ M>St-:pzl)YMnfkx"s.A,'bpn֮ʚUGK]-u)aͺx^[Wtz Ǚeqyյ33m۾,$7::.{O>ʾgoqնlX}5#.z( C&"᷐d8ӹj NřP|$o4H|07Bmq $%@Wڶ!-J;3uґ̫^ҽ^|&K)FYE5,'#&}dz υA{pltA 7y9l RsD-JGY)0GzD)T, ~kj%U$QΪC0>̮{W#2_'pڄ T}|e=՘:1<;tX߲DZ&e<̞.`^֍Wb AG?MRP߀,^mjؕ4 Sߍidyau 8&@!E8ȿJ3`e j *"b -- ʀ- Q3{ݮ~8N,. T롊0bRͫZ]~o;*ܕqAH>GȖ[p@+C,Xc q+oaڛ?ii66^;gçGQ8qϓz o9 }bD #l|e;3Wp)HR81z[z  6^b:Wo{ll^QS([ Sȑ/Gz=Q4Wz)9k-yDZ#-=-Qz V4EnahyEDDA`y'DZ-|<#Vr8@yX988<-kO+oö%Oؘ\*mLόeefgU׽Gg200@oorTfk8~;w^ţODad2Y G?^SH$d2-ȟeM&&ßyc"2/aRۡ \uNt|O矃J%y]~s?QK#ծ_-4o?'H (Msd0MmLOxIs$*vZ/zvptav4DK/4HZ ӗApQ􌏰-+h5eX=@:f\D[cZ"D,Gf.FN?{V:$[ԑKYH H]fC^LC) cDtnkP+/Q -'fV 83Clvr6$]eF Q[~CDbDk6L0݉r$'"M+8Q, %%\}p]ѱ!رո<#ã457ac9DtXˮ=qeIOXk 0CW!l$1Uۘr)LF@ Bhe> Ce@ Z1:?2d9Ӈu>|-YST 2cN! j"QVG# K`VS$8:f&?=v' {3>V0`|rZt)ndjjgv AA}ytIwNVlmƃ4jw LƐ7D*)" lJ @2@!>"kJ9bf$E-dPqN)ұ^ (aYUn.oNY!<43B-uOhj,4{GY˺ 8`>Pvl8u"}o9*|~2V"/4^r]A ph5C';0,^䓮AUJ&~~ Itۖx>C׺4`|_M$qRe\e>wG0WB@F'蜽w|5Lq_ZT 3P%ɖ4"aJЩ IBT͹%ծj 0U=4[5F`r+Mܯ6!TFkS 9413\޼+~Ի42XPVL+hՑp8АPT"r,p+L~ ERNSЙ"xqFsA"b/읥v[i2Ʈ <:V`hќ3*"yk+l6E|L;XZc?:goPDaLcMqͦ8G֧7rE(V SpGQ8c;"/V¿}Xʗ&p!|+ĩhd` ~Q`“y'=(`8+)\RecEI量tǿ}?D 4tt# dO16E9"3Q:_١1s 2Mq&Ld=cD#@sG7Gw/PgZ^dHčMP,~x)\'w!$uP"Ԡ,M C8E~+l]n?rzp UEbI{7tngv4>5ڒտF!l?]:UI=8h> \}z~߇ƺv ӣ!^B<alzT>qKWd ւ1 ȞWFCZT9)f#4)1~!Xs>FԩsӓC/hP4Fo*j?$>H>6x?K^r74wSF 0}HsW]s _/ MgA4Mϫ9_qFmYGDX)mZ ­IyZkpw K׽܌Ps|GCejgfr1> S6wvpIqK#XEL h0*Ô@`F c,H'E63"8}どf%Q*e2^l kj3_9B7B pOX򹮔B)FPD=M>"?5L"„u%M_Λq* HNo3KyƳEh~]1|hlhf,cC3ͧ%b-Z Ks"γO=Mh pD&dk:GR녍YU %2;g GNfrR(Ә.8VjګQ(eIvRԸ꽒."c~~ѝ2 5+{q,4,7]*/A\YTALJMVn뾄I'vc'Ls4 _7;da[V SD s}.[4sAP_/HqT/y+*nq`dyH{6 WEh2D;?|U.TH rTS Ε򪠊e`mykGߜ饩qtÌs{jNeL3Hs:cǎ7<VPl\Gxaq ^#EM,| b|&vR̖F"~"&q¿`iʚ[(^y,.~?Y\AZDNHd jzFO̯yٶX0l,b /j _C_m$[q9=|7\1<3fD q8^dK*le1xrs C&X 4 '8V<XqoԪ.j.^a*;'mpK9a7mt%AOL Ę\4CcN4+UQtH%1^xw_6Gޱ kX$2CZzxϯ3eŐc'ߏÌ SV; &bF)} >7t97y5O;xNf/ NL08<ƚu03-/1?pH |w>sT#RF)+|c -02Bll"VhիIӈ|be%5p/ɱ-M;x4ن^ Rz ],D͍ LWՑpV*55fI(|eS4hdMbqG89!Rc͚&˔dKocS?/]T%,ĥ<].0˭[߆֝D }=k8}zёiWn"g?%[ ?C[G׭cppa,&,Y&&غe8̍׿|iハeD6h4Qhs:RXDu -)`y]t-iqGBY'"-b8\5:uRcEga"VېI1,*>'u5^_D " /an;Ǒ_wDª_ ftxd_F\ :ʲkYqf81fPyZ^ؿ{F<~4dx`4}][/*,暨ylS@X^\DHs!NTtF96,yz~{vMBi`G'kؼ{,(ZL啸^v,A`̝5YB9wnAIJ٤FsTn0ԤstEcup̎>{,Aa nE1L|xQ (PF-: -T/ed:,P#T<:k|g%8x0k*\G)HL] eAZeNQYK}3ssT`,n$5;[I`y۳=λ8f%Å,+۶]lBhvlH hM:^PX:^a)2Zdajr=-lb%{+W(0k\I IDAT|gyg2)Sq>9k,#N( U >VU#hqn4m.bu-$Em47b' 4Žڲ';}l|H*X26sG1xr? ]t洏)YA@lh`|nͦfzW624pƮ*yQuy?r7>dž{J.ٴz֒2=6-w :~yfS.Vvo#x {Yb%xdz'HD)3 ,a֮܄^S'g jP[eJڧd DlʂR#M Dh`WLn@(Gdbf^`.5l|)Vo1|"Og&,RõL4.I$d2Dd?rm|_M+}G4 i.b!B zhWy\ լ92K1~ґg8x(MO`ug]'\v V[q7gSóp <~0jo 殫Mq"$حpNeY8"5lRprb},ZT׀; F*EX-cV_԰cwFBkЖQA_e?+vϲDbm;ȚWЎ |ף.HS#xHJ/hk] Xjr/X2}u Q!J&56NS{MmL #_&ẎWut8aZ;U m C%Z3!6ZK,D~\Bhn t'XH/w Ic4jYٌ $FRё)PEo$FXZ,+X˲r RQ5- ٽ>ɫ-,#"l#\{iۯy?%hurQF\*!\RGIl]">;Kvk 9<_qbL 0{ѽ׽ ;tMkPj0 lH.6- =“M+nF g! "2.2@PG_Ď+_ţO՛ql \ruL'Xs)Жg8=4m7f|3J'FM1=3Υ[, ã464b8CΎs7ߦ7|5j ^ wΠq0`,} K7sbnv)!*0:6U;)Mz.DZ'8x$JC]G!gO?{03K &)0>W.cm>d19_$8NB0pLF @Jd;Z)CKd of5H Nu s^d 1Hvn8郬=YPbٶ N%wʱcPB|e ;`cϡ~w_j~~nAuy37 p(ߗݲysQUH+$,Ps9178fr&,5 x;FR3ЌJ.O \y?k2B935=Eu.+^̝0Ӽ& @dN?11bLa51'ޓ`Rg m8 ~ɆV2 ɵRpiZPf7A Re fiMS13hOAs s89dH`kZx!##:ealT[fHodqN6=Cvٸ3z ?XڜdQH,Iyzbg7iRu\ u\ nֻcsk|Xh8?qj1h}f|ϜNxE & ϥ<;=lHdC(K?8s6ǬY[ Yh<#n7\ʇnS6ۈ#IBBʑ<ͻf>B+ ^7|KG.…qd- 1ֽ96? hi6)vթ/ =lehX-[*?z }1~2jI^@zu30`Hm3hͻ=̓F&։z4f4~_ګ2|Ek0{k{? 8~(Qz<1Mx/ Obw#r'xA]DPљь-\эH&=L^"K2PsAMZѽD%1"q* aIW|hvkc(nK\zvcEzYגPITCc{Oє=ՍЉmmCXt^C;(mDqVĪrEkgfs\5x0X txV9 rV)0!5&#.HaӹcODy)50^HA)g~5\9g?K OG17]v5J%V8LNsHpSz\e BP`Hьr_a39]c351k֬Z*N,,fɻv%co/Fu/dzJ b,ne߾c e+ޞ<|^wsoyt\p!u-l\ e4cӼM&822r S# QLW^.(,m[w4aF5˗AgAߔ9!1˓ 2Cf#O#V#@9[lGDzuӛp6 v a )8kff,h~3]U!kXlsZՇPTBzp]q2ѷ 乪amkuv4À5MgK6FSˮMjxmm(MMLx~ofx`KEjb!+g~\dV cpuٺ9r5GpMQn}g[͈MLVHb o n 灎Sλhv|J*+V0,Kݠt33l hRQlN~p{˧ށIS%ML#8QD䬦/>(QU26?EdF/cKIK4$m*b: yocH҄Tm-ƲUԧk\qޫӘhz.#v0!5g5H9?qDZq`W[4NsD91v\’m >娎rRc,;1?=%kBOᨍit _׿rsql{ѡfD-:$qz&xsHI"/Ez;)2L$%M,rV]ljʙ0VTZ:ڙ^ENF53_ -kAɘ/ϋb2/%+N؈Ҋf̗n,Nΐr1@X{ pi,T27(ft-yԐc.ҁM^75_ D 6|k$m/*7/`hj <#헄-K@jgw\W˯㳿.*IH:H΂g$>Nzs1?v79[Ԃ3j[Yw{ă)E1&Jҷ vδcy3?k=kC4A+) 9MG"\H5R+;q]Ūϥ\UT+R9 n9cr ߁FHA%Ӻ3]!c'ѣcHGIEU]7,yk00W>;n%Xe+J:YGybTBjbV,wRY~ Q EDN6zEt:_akV#L۩%X%ū*0+RggǡCLn7> ٷo֬,迄 Ldh_202ʍoU058M{k۶?E^xDgW?mlyQ6wɱaimiC'!~jTe Iu܂HWȖfM @^U26:A4-Rcs,Y& x_SS=cg10<12}cn%_ O Ƽau7۔gفBGc9 .[ŝ#tw-'07\m"zpNevM( Ʊ5*ly')Wǁ10)V^ 69>ڤ WaxnW02s3f(]]]N5 a!OT=\E+}60nZ BsE$ AMLiƐ>eciMaOm00ة1)]u'0J$ JJSzisIku|TgcT/ 6 z1,34rD 8HEm)lW=|;^zrn{to+Ӻ`)bYAзڣk li9ttE#IEwXp`A*F}L'l?#r!O-hAibR鼠B.ߺwIt?AJV,|YѲP1Acj6D{8љT'r~)}N4m S40Y4MA IK[Fڬk&x^!(mf,7Sg k7z?~Jϥ\/fG˻8IqWCtlpQG//BXx\ƗBILL#$NbD.*RxJkzRg6F1ֹů^+XiqUmyp6OY 2ДX=ed̉ #* h6) s%a<팛iyee"B R]}D CBQ0_̺>I5[ۃ2jHtñs!m>Y(sa7I'N9 f0_Pe/4BLgn& s5뢒T+-\"w}Db~޸_Ll}Lg73<ɢx2ErZ:K/Ԍ98ɄAHiа{002dc8Y6nk|f~P`pf:@[^2pt픦dP̻Lio(b i CIP,xuJc' p T@:r2AI,0c/C5.h7 Oڂɀ3%ĞHZ%Ơ{%rNw 8p$N G⫷gù>B x:.-$X1̝Kc,10Y$d}wp-oeo|TiZ63qIV.'`]&&TgHϹx1zhh=v{I~ wضxې6'J(3T* =b: T:Qi' [dbZ9>##:w+y#~ohkҲЯqUoʯ]*c ><ͣ=jX`yѿ\b8ʃQҷ<6a.cL׌En>^s(\"ϣ0%:H$rCbSeKyLlZ#w~h4C:H YxU<$zri,|KRa#Ğy)^s۸2||bgq ox'cr]gw~.F xr=< nf_FŴ{ٻQ$359t/bh195D[{3pG;lu?nJ*Mr b:uQp ׾V.t3!mȁY~Ó,[plmK/O~ƪm`Yzn#$MX`b|0>Y-u$ Wv?0>XyioYyig0;ך~=ha=0A7N]4SO\-KqG-KXGFԫ9q7 =->Nۢ>A>|KA$p}{P;!W7SN-J{_q}l4V8hf7$^B,gْY{7ڦYi&jB$M5#bE{[+R SQq =)M*rG+hD{{-ֽ_) %}39FL1=|L%e Vo$) |8G;9mx^-; ~E%Nh a:koUcl;Kϧo9:yDP<(d+1^ Oj"yd˖x|>n?~}sh5d{') vw5NϚֳe9<1#lj?;sMPVr5VoXLqQѿq~y|l$GÐGY{f X>I#y22vQxL!#t:)*)P k1;]w#1i1nE}9TǾCuV_[a7mƠqUDr-˳u~8x2Uw/u N1|V'@A91·]Q#)ڕr3QNF:i`abhNH* B/~ʥ]l%y[y1i=!bבEx69==0B#C3ݬZ| mŝ(E۲+נ1}ӎk\V.5/ᶻCa毾[,ě_AJ#l^"hǏN/vm>T 1"h?'~ bH2Ͱ|2޾>am?mm lu~gck\#"]\5a>er $Mսl9 RS'1$)K;V ?A1Β8̜T<Dίԩ*U&&BiM.TSlil߷ZT# -ޥ N;xFY94XJ}c#%ZZttL't05Y%,G `^yR犫諏!g9$5ۀF%7Exxh#)n oY+.d(.4-+ T# -ڴ ^?$^KB n;|<ϠFIJ7ay9S~e+/YW#7B_|}G QhiToרtLabLGk>vVbOűc+;Y<*2tq/C/헹9P 㵭L"y ]'̶H\ϡRNx`Cd<"$!Iebbp ݽ<3>¶ŒZIYԸuc;.6rWUE%uLIc7صG qz&NSo_ʧUѼe -1iy7yr?s}L'rҷP/z!jF,9 U::+;wScL_i߱HPRs%m} DZLs(ʼn )Sƽ|G+/ 3=QcsGY|kxi 6u S&mw00x_.ImO>ۖ#Q1{f 860kfq2:dd#2^AD*ٮI\r 886~9W_|#WB\mN }BiJ5qy+Sa~[<^_fcu1\{a S:[>tR@'.;T'O">Oպ/~s$8Z?ᵯy#plp+ +PQ.E]嗭I ցgz9ue80g#<aoҶ`VqBhKw7qBˡ J -ET؈*i#f&m N I(×.b5iuSZ}5nɑW[D^u:ߏy0{* ܴ kYQpcԂ:#POK¢t\gj{]-%Va`CCBTtZ3suW7%m#t]?}y;`-/E<{yy wO"4^f7b+Y6-X|9A@TNj. O?D?[?>α ;N5xѬ?ύqU-9NNM5-j:JeR!G$BRu hx"%>su+6qt-]x1%̥KmEP%^d(tpmRm_ߔEN|GT~xa~La}GF$U7|Ƿ2_YrUOe|2Mى("pL1)ns_Gwdϥ|I!gcI!A"Ҙr2f֟ʗ^> ?}CGh=gvk'篼_Fkÿy7S 2Ub,$q 8|t/#Clp!a-.ZV2:{܆t5ĩ*!)bi5B ̿}JE)cTEGhw\"~xc# |~}Nk/7‚2V6"$󔢋S|+cELף30dR& vCx, igcb!]c\2J&A^A/b'AcHԼ|/hGR"l eS*>gH_ZgxZS*Mmx IDAT60 m܊lJL07k3!F CǸKGGZ)Ḹ:\.сH#ڂNr6tRxA4I*xzao2)h 8HŢcJ.:Q֐CjLejv-^k Ͽ,Xڤ$ID 4MHTrg|l~N=Q ɼ@P*N I]q:WHmĐ$bZB.9J|c Q-EDGgV57L_+43F?gkuG"\*"d #aPge&x*-!G1I`|C},"Wԏ,1=Z%ג%uTGp[ꕼvͻ&x EqbIggĥ6 B*Q/qtuV-2\L앯g幼7~@_g_W?Ʒj!=ϊkJu&=NGk+Dqm,9w妷n։y7N4@ӱ G؈gQZX4X,\8IIS#OLHđ@\8w1#t)MOF S"$/_)Q3:Zܥ !< 2[JynZ߆SsI\'h Iq>{om6d|j\ςҽ̫{~C|oG| zW.l{b;E)!xNB# 1vc}X3h! Ǣ.JB!Rđ}m|2“w7dq,1?ʾq|1w־n>'3̲.v:9TI9Y-˶, 1c0\|c䄱eˊdح]]]I;TU[j}j}p{ާϳVDt :C˸M:Z$:M;9p0KLQTfz|+=~N/'x\};xC{=aHABiƒ]exc's%N*VVٲ`tsO÷|fn቗w^Dd)O-7FCM1ϳw%̙6nĖmha)8Mbʦ^~#ʗ)|Aprˆ*·߭ Lr@@sZ;i:[!JBD.Kpdʟ:U! YQ#Poe(GNYyψ(v6^_u 1Wguf !ru)T&Fan:PFCѭ2>$GlygY91 _mX "uE>*c Ǥ˟JqNDV1.`ubo.pkؒbM IӉeާ~J8yzE IdO"Ep#yW~QBf:င$\M;0O~+#$ {_bR:~Qeâ $C,\6 d ӫA76v=) Cu?ʷZsڄ}<8Rr,#!ab .8F z \i^xWW)KQ_.J̎X7^m)%n|nw2e`2ecA9Vkq=$P( I˜JM5:cT0FbY! P;s^~&\q45fR% 6L2c4Dkc/X0m\S:V}1V# Ejcctb7hγL{IG8~Dm3 !Ba/4 C\/)qQ,AIѹ C+? C>J+.'N.yscG)#>'O,15dqEP,S- j_Ϡ3M-F"ʋ>2Y@{< XD̮\阣T0!z[f\UIia)t:"8J)K$qK+TE$8]GV\~_↫d" IC;Ȳ@Ȁ$n_==ߎ*YEyC|;#Mv r_?>~?~?E=߼>?^j~)A[nW;75 | ,'K'b4=uj ϋ'!vpaY\]JQޝ$q>e҅e$1,t0PH ҃K9&#Z7d82rt잼=]xKE}v ˌ-J[9q4&k~!N$gM^FTv]ĔY&Uo s3?R%&4~)Xo=`%=²C';^Ϟɫrj ӛ6Q9~t;D=M%dq+ef.dDE ݵ쐊z҅^O6=N>wEJIL p^e 8/ENu6*0 (L *̆y L66EcHmxְm{3:;Thcg7j Q_ݚ(%2 ]dzpE1j.o 2?I91CҀƊ k;<ǾS3ʀUu9D d k4rXۨ ' ȇ̂ $ʷXe.JRnLN&͋plxl|#?CƮ<ɹ{I1Z s~+4k2i,]$Ilp ;ͮUYt^n.H a(6(, YtjSaP J)NRZLg[\QW*I┉qlcM";v0h{o܂&_swq<T[B`LFJ l+Fk$Xl)5Pc/(:[ Wrߑ& q2ia&ew<?8A:nf˦@S){hzz(= "(ЩU!O`U_|Tf%NkvLo_cim cppAk9B?=}`ÇKT|ڠ\b4q Y֡ђ^C 0tY#`q P(8dV ?(۟bw=Vmګ{f`*yA I3Q~њӧ癚t>Z8N2>5E'(Y?CVK4ju0aLR&nweų3\\Ta A!{7)%b~ALer!}}$*H΢-+6uرLmN_5PBqgeZ%M4g綾tf5#Ke&gfhwbB'ٴ]d[|2!<豼):M^CJK :l2FUc.4J*20>:HjQH ˦*-hn\$^YD0F5sm  $(X6O}chӛ!Ftw(;E~~G+O}}{{1R3FUpY8fvEg~I)#8vHwClrKxoyw?S'OsrO?$^/<^=4eӵKO,3?2Gߦu&IFRg} /-|Nc7:=´W^OgblSѨ3{3ӼaGh6vph!eRoRv:i’|x}3;_x}'t;N32:鐙U4/.ѕ Wy`.9zQc1tbQT.5k~/?on|J2㼦 2q F&Mk P)H8 ҄ Amt{Xlx r&v355ÑSS.¸@EHi![ -JMe>q ͱX8R5H݇+ȇ>kؼ_WG(3= =?ǟ1K6wY |OpUWQ/7% X7_..z%'^[{k˭xUǕ qP*riK \{|rr}@ M_Ͽռes|߇tU[|wM6]JK.|x1AxWWR㊤opFAJ<:˖-[1Fq67PJ4QNM\u\/wmJ_v9I~8LXwImtJV#Nh*<#VYA*:[fFyyZJlJtC\G-M1(clr `8}>ʪHGwt{bA۩#EsfQ⠊up(&}Cۘxy 'RHH:(>bT')R K Wq>NŃNBiR پ/}p?;kr* : t~#',۸|[%BD}4Y޴E~v\|nu~48|1 ~T(YaӥU'q,9tN?gA¦Lt˘5x#c#=cr cq'JQ؀$т*S,+'EVx_~:#&DC#G%tى\YIWS뜢VU4 IJ$Td稢8]Zv.9͌?m\?'R-7IAI݆/xL)msENϝu1҇љ9|`[}vnb϶v6z' _͊3ʃb5ؾeʙ< as_ט_L*vM߹A$5D S̿x'Wm7T߅&fXm-29ÇU.jNetI:WR.Ьn|+|zvS*hIm`.qϾs$3ٻ[2juS!Гrv,_ţLO䅅Y(FwslWﺉMۨ5ƈ J[T*'8;@LH׆1TmYK? ̆ ja,6,eYh$Rljֶ8kpk‡Qvg.^_1RI RbV['"Ye}f{5$.8ɊԲw=`a)Ң1W\"' NSΒ鬱C@96۶=39zŇ>KCyY T8~"&o%;ױw|n1YʇoeBrnqr%ܴG REAdT:\o20+#,s3?+ͭ($Ͼ_yĹ"~M@~\-AIDm (Hb+5-q ! /Jz+b%{-&M 6DF9|&rJm6yJ*f=z(j~\܂jRK?(M1{'s3k+qb;=dN f$ +Vffm7Tq&ļ$VzodjrM6QݱRţl0ۘ\ ʁv95x.3d~iԊy'2o]pls.%\{羁ј~#cz]cui(ar nSW1i& AJeE!Yzi Y\\9:GA)EkEޠR(PN5:$hHHcP*!۝u^Wt ƨxV8̺T)#m3/⻄.kw)T+Qrd7:gI8!NC^!f )Ĥ1hCt<Rl,/1ϢkbPw,Ju~v%kI#-_ޗnrX cpR" Cl~QJQ,yH!v#TI: OtcJU :qX~$y(O,2~yv7I>H"n4ʫsǾt=>f3OJ)=1zk~yoMx;##p-m My~]4 tO|d6K!SlDk ~L9cbSڄ85\Wb LNËOaӖrU`p3e 0C"$].ŐxEoO5QThY~6Xid&̮Ӄ4u#Z*+\\WCMBǢpBxya/,n7LpqYm.4RaW\,HUN/>r1=ACx)=/ &fr l+\K9q1D_=kMIa嚗|Mϊͯ~.=74Z.!m9΋Gx~VrC^fF :v '733@+swm%|kʽO܉\ ]O = ,މCv. _o"9JGRjH*iT>@E3^I>a!bmR>rm7",AFzcU X`rtƅdf١m}$S!I\ju^|yZ>wߝ l@v};tp/|{|Q[0^Ʊk0xTdф?=iD\FPvޱ,qBo DoƽQDKC]$=Юm7@շ/бQ8ʑ:aS :8C> j<_(K:h{br1Y|;F1~fu$.(d 9۷qbvnLLϰ)@j,A  UqKܭ͗?`<^ER2Ľ|1 dY H+0Rv@G(ʱǚhc.,a"갑Ǒ)?ong'aCypKOmbKv Ε ʒkbc4?={ ~a^xAl4N\sTL&ye? QjPpHv%h<73;!s=ljˤ>&IŒ=J&ʀwن] AH!^  +FĆzuMu{x۶&b/7Ɛ-* E& a'FUlW/ly꬇\Z#S5VϜ2ҤLZVMR&NSfAT!N%Nf=t|RMACR/;H nU1,alq;)(0ũ#<)P r:RtG jxQ5$ݕM=s$xIFE"F(O"^7"(UrAfJp R 2"Ȋ{ sy,` M#qޓmIn|4b3*E8}<'#Gq'wy!}R   m޾~" y*ك+A8?d1hei'Om;'0ILZE ƃq]P,1Eu:$]2؊G>4ᠭm L[Qź51*kU~3|MD1c|G#!͟IP8Z=(cwob:sQ}+nf4+?ǿK>N Y*2ժ`fE0Fg塞91ƌ2 RL I],b ;N'k#gɰx` ñvH-h^7 l NݐS Y,v61bes[H˲ʷLc { }Ӧ [lJwvRx;H'-\Q#-`q0rxm;XmP%+.w|6>#Xx2SSNn 7|OȖÌ`e,lJXj;ilVڗ3!j:#"ǘZeC){r/Wl}=]O/ Ǒ4LOrl0f.fżK}_~ s]@lZ|uA;95T-٠J0j،"̱Q Ć`Nfw266WXh,} @.(M|_G1ؿ%E drE̗>K B#[ʚn(,$DF %${`BCLs)q@h8MKcVD]6NT$P6 k8iQ$V$Y$PDXYk}bPHիI3rvRV!$]Ȯ-Rnݯ1۞>bw\n{`~v]  _]oz(wvكK_.sdY,Ƭm!¬aj+Jq僼25ȽhphŲ!jϻ[ߏXaAU-QW)qX׌n0W5{υ1fSj܌E%D8YcbrXLUo5i7C>n2m!kb|ɝ;H>9r< O̐hVH%l`_Q I\gR@mN?x>Š@զ8֤3{6p[T5lj %)T ,AEgK-Rb@-S**/3DjۏFǚVǚPH>'IL)s[ `}/?wc%[9gN;*yi#5&jHA8e)t[;/\%@aν{qZ;@xXDRY >Fƙw]eCb4Xf'qiI+#T& %jcXq@hJzF2"&"5`#9.h]O \b_%t0>Q$ $^YYI)Uґ,G1ڤCO!\YBaZ!\Q*֚0)8%21ڂPr]/@X tW,BQfW?X.Rϴ-,-6g=a{ŅELN2nRU則:ۋ>=_?ceDS:ti-8`Zs?>ȿ?\wwN%G hkKM޾+,ql{Hs4W< 8ZE.,6gFH{4'=:i{݄~Y:2ZBVZ1 Ћ>1IDy2`6#WxQlhOo~>ݥ;[w(fe;.cs(MsIJК# ? 53WM1w~4Drfwϱ}GjHTg5>'*SoJo,q–>+1B'`R# lH;~Zer R C;f*S˽Zk-#lx,pU6)ӖXQϳǜ^Rrڧh᪚-7mphb5׋(\<21{[qIxJRT@k!bq$cv^Z~l'CݻVA#$vLzE-4||@,υX04jqye'oX|<={?|ZTs-Qwk K,u^1 {u*wͿLnY7|Z" Sc|?˻nAƜ þS'iSrn-<<7VA6(BfWص n&1~Jt륌dvcE8^>LMV&iH IPb69ڦc j~)glbRs *.gtje6Olb3/K}[Wz4u~YvmǟkC^Ft(,//lic\*[mS,)֪Ɣ&Hz}v ԫ~Hݣ8DABHȟ_c qL`$귱Km/>H;=rXxa,;mB8}d%)z.O`y>ۓ cX˴ǟ<<1#+  ' )lMB'EKkxSͷ0g6g"-I!.x$.JYf9n)"N [`yiz)=7OaJp 6MM #\c: TJa%e bˊ aGT"0IΚXA:*MRRyY8(IpɩLFm3ϋR_rֲ|pǹ͓#ҨWpCAXna XX+a^R*jN{Tucpn"<#ꇉ!,iFLu]T IN)Ld%{&ؐh-BK)u8y0fjGF O4AGc 8+`U׋cڭqjD(MIi4Z]X{O(I E''˜œl:MۥRЕk|q~C&LΌ%Ds =VX^"ϼ￞;޳ӇW9Ld+8'T.fA҅ZJ[#ElauQxFOl E$G';7'D‡WE- )bYY9}Zd b4 œ!}R$x,^TR46VIW;K!R0t5µlj`}!NۉY 9ӣeiiFex>_};~ ~oMk405!CUc1$?-|[8)]+o('wCwO'yۻAədjdxK;oۮCB"6$UE<9öpJ>mU ~iv]{=X" @<z'SD/vݿRoR$Ua4hĖ9eNFm߿'"N(W dwUpI7oRtB5jZ^P]!>D0;3L6޻?~zm]!=sxSi'Bv4iC N]>Bqg@ɽbU}lsuY˂6lJ۴}>IWr){6vt&-_(;v' b݄}2(U&S<'Oz\FfU{[\vyA^<(OОdg(W}Z"ޟ\Ǹxs[N)@tr=_+V_{ȗNk<܋(pH"OC'"I%&QO܄S EX7ao%$ /,G$Z8Iy"M:̰Ȱ6*lL ^Jccl֩NJlPMAǬ%g}MYօݫZ0W<ľl7=2%ʵmu vΜY& F(.>=,>y7{rxG#هe@{w\- Xs'C~Cf.j%?ޛ[}֞i!DgzIh6&06cM=HAS3 >QOQ057ylom31@x.F XL X_ 0 ƞу^J)P)U?l m0>G#b%'!+7,` u)}\%X%KR H͞mMG+ukh!,n2| =`)wkgc0;v7F_Tӹoq'e} Hǎ?h][㍥e8w<$|k5> s.T T,KZ%B P ISSS% AZSj"Z5 XQ.clbMTFpw=sK99m 3 zz.sG2 KWsZVkq[nqq.+9vjj9˘ X,!Ll1BKY!8"BV%܃Pfr &H*ShcnM5}>̠3}L$f\:[#*JVXEQmzH\M,odTj3Y7:.h=8E{1D9o0 VWiF"\Y7>ձag{r5&*2Q? %sN9RRaKWy@qʲDZG=;_J2i婌aF#*\|5?@?X%} g:;jBܫJ*<.G-˳!p2~@-LO=3T>1ăA7Xp b0KT&kygu4$p∼hi_R&)h:[)ՒZЖt9ÝK-S )ӶwYt u}\lc1 2O ͈Fw}o1ν_FGUiq/o{*A[20ql^g_bI8fX1$8WDjY1:%1B)pq IDAT٠Jк9HX㉀0P䞡/^dѧ F nlB0%I[/\doo' 1 Dz%%<,8e2F '0܄UFЧ1-2b"݀nk@mJ{Ks>aLO9I~k3TI 2_~?<'p [Ws >Qд$_| o(٫< !%RU4moj51R1bbRL!;"\{Mb8Z,9 1nΥ*BJlFtMn^kπuN+Q &:TR>#ԚLU;J傴Xg_]~3q"?0>N7;>Cd3>wS+γaw<˫~?QFO<$X`7n-~BS*J(ECԍm!ϯsp˗:trI1L4jV#+s{U*2!->B@`ѳ2pV&{",0=S$#!(hkQZa :u8cC2A xp$:L] qN.SB{4>ֈ_&o-_? ?Y(\#%x35L  Y๏̹OԊhQKxǪ-nS<9zxF$fʼf 7.sEΜ`$M*NMg,"1`|tL@%(),zG.a6QFgi˗s;L`vPX5 D%2I+NlxϬù I'RrBq£B2 -/1J2 K<2\-C=ϿzeO.~杍! '$)ix7 3 /pE|A:ARS9Dx{+>-ڲo]XwWr-B':Ř t0ca1H:ck;3ǎxR&(*ti'9t0Q i"!N.HUX`{ws>|;TĐ"Mx4g&T%0jS䴫4I@Sv1>\`lλ2%D"& A I m5!2^>fr<-0sy[#5nJtw?yOB[%X9p7Ai L0VV,3_`L*nJP.QNV!!Ky>VYRvtz$(iwi,[ݢ ∉)/h4֍T&'=OM\~-,$3g-J38@[<>w$xSX$\\- aNS^i[" R(Mԉz5k`~OkOȜ2 |jd{n࣭&rdyRi G#J %ug 5vddх$s)$k˹  d8P7a ոi )\@ou Tbӓ(4 ZI6EjA͈)^.~<87{HIhJ*OF.C|_u}Q,ك>GO謝efPB@p pd'RTUsGrKXAD;X$Bb~eD " )F dڥ^+\ep*t4ֽ]}(IH߽e8VѼP3i5i(B|GrB&j%2:Y[|r#g_`3m|iE-N@$` W^ūD/u&:T\$2eN9 'ʸmmoK|zc-fadc E]~ΤpY|Ӂ8Dfd&eljC Pe^xkkTQH^gZ,)NX"nk 3ƒ4;.}I8fUޜrT>Ac2+(2N %Cp$c|zބME?q܄)r Jbp0.owq#:E+[]9J#@P*E ꜷ}$?]i 37/Γsܯ `>??'?.y\Z|Koz7qqexȋe!' g,"Nn;{S5}ґ@ͥlF8bUANPd  \!bظcZ rh)5X#܊,ZӄA^P3aB9q X8p8S Xk zMgwV^NjŎ8ssӍa? xK˭yKEa <@ST# 8d_8C!QRBSd%(EFA%aDlm܆|7[J%e8cqbHf\g#9_Nxa;BJv[۩c]_9oLL)w'楌{..2_> qD'#/,~r->3ȧq!͓J/dnuIimm359k7nr<0fpE9/ Eň ȓ{dqS,\Bd|_0DY(![9{^W9t ڝG|_x ^O2S_d!S3C:[̟8`}NGq+#ؤOHq^q|s$iFXX,E1K:RH0fjz*sBzǎj,4NDf)aA nJѧRȌ)af]n<2ϣT\xtrڇ٫}O*xu{QwH-=yK-Lsd&inQ A 1X%gkuGJt{m63HF!Fg]6j>ُ;@*/q3hub̲3ml3@2wέuaVpfr k=vS'l,.Y8d#ɲ?EW!&JIo̕- #J*וЏ{8>Q')OUYfj°,޵- ։Oj^9CF)׿pQ"؅!?U?=ZmF1(- Rܫ/#<_! ӻ^DVw|Qo? 黾>g @ŤfHsZqHx Wx=G Ax7^ṃx"lVLqnVk !:NQA@f34%2t=!Zk:mF?`B( 2 üs|ɟ؁\G?Ո8ę3/#ѓgqG'N}&ky]l_b̝>N)e3^W7<.EE=}lnX|ڃް.fvt~sa/b\P-*M0ng*w31 a_CZ٣O ~K_3߽{_^feQG3=`mi|@EW!C4 LVm5fN$ ΐOǙ(C:H?̓_g4J&g, IƱyK6K!AZɜ\Jİa@ࡵD')|5T(CIzCg zo^BH#)vlCşT)ޤ:.,17{0QLPVR-BQ\8IϷO˯ OslS xFAh@'q8TTM6^;ekD{s %RH-מpTrn M%~o]~.|ޤP6B;+u!JatcLj\TQg7fG!6IYR꾏Yc m0?+|CsgÿjN4R:(3kcesc R :W!֙$bmM^p Yv 3d?.Lw5@NqW;) )nR!DI:ΡMql1xR7{$ C(&MSd;kRT;>GjY\7q(}?4IO|'$#BuURC&3rOC1^LVҲo:ckزkX<2.6 a4:8(lu)bvD{-K֍i'u(Jl,Z C+P.."PZ@"p +%T079E",qT@Ɔ@pI,By ~GOS^n,4(&PZZ?.rwlq1;<"ʨbAn^( -luzX]' YQrj?(un1233$qB\&q B*zJ)F IDAT.Nz&mBdq K$}ۿUfShc}@P'"U-Pl6iLs9}{@ed<-WӼjfi `25z#r0I lmo36:ap\~!t#zCv/(+G!ABQ݉3cUY|6l^_IoڭUN9F~'x/R\or*Μ"*y_*aO9Ic.^c&%Iqqi V7t{X\ߤ3u8<@'{fgg(avv+Wjˁ:68f;p^Ž/M5,adҐsV\GUK]LOpk1ݍGUGdQ\< c)s׿@怩CJC{G{g3YY\1=Mcbot</}NĹGJb5g_daz>вqu#!`"~ѡ \={ADv M_:zQќ㗊$ajA-bLQՊjuTPoVi6W94j\_NZzA2%|!@H\uJ|v5?jn٥** AP-0jrPRu(S WLBk ڡXȓ=%2[f[5 |O]𾓊_GGoM{:xY02rMD#F<:?Ǩʘ>B6j`&]T#o8J_l'c6I 'G9v_x9~|kϯm; l*Љ%rHa(mp-hVޅӯk!~m~Mo.&.:|kE)FzSAY#냁fET{l%3}n$, K9YNxJ]qHdF+8w">Ea"Q Le\DfI' yb 6.[|Bɼ9 N,Q ,@yd1wbڨ}_ tžf{lku65_"=v j ak&XpB'wph lCr%D6py "Y2=UR| ՌS W%lrqHuբ v-*"7J>:~m GʇCj:ݍ xOW_|c%Yt .ɋÌ(faRY;ssv#gr_F^C6lrG[TӎFqz AW[p}Z{#pamH 3o"L36!ajt:}DiPLsqqc ++8`2l01168%2*[AFVO`ӥR:KEj pq:RY+zC%).B8xL\r\6ݐAgԡWXz5&llӻrf$ (-'_ ,I^djTVkE;BOF[T8F0AֈVK;Lf 1.dv +묶Bę`s( EaPSz2A5p) q?(T1d D"SDSXr2rJ{kW,_}<'do䂩)R.$)8<3V= A\VnܢVqQ6WOlNoo#8#2YDF'/r 8ޝKcw BcXo#~7g魧|7^/1.iҺ~^c}WyTeQt-3!ø}1v(䏕Lwk-FS%eNW~a:ύ(ea^h JUϏ]ɛfed7OxiQm\ C%b1D6W)C9vЃ1) r:Ovki%FZFvDd&rدϵ7J9^P3D(NҦb߱on+w5c]uGʘ~ur?%̒ݹ3=~ӧʮ±y=v+ꕀ'ɜW76(\磍BK\Ydr D^Zgq &D 2 Aˋ$h#XM-aՕ "rnmʘL&wo0xOia5*:r7H1QV؂w >Ib[,SG\4=M6wxii0 Q*El!b#ՔS\Z٤ڬ30Y#uF&^cv&qkuDq(OOP#,I<#v| 0gK $]+U#4Re(W")³#Z+%?Q*+aJBs(87.܂ }N'\sjF }N=rAv]眛_~{hD bJdaǒlnff=[ʳkkv3P^z-[%Y(RHS'tz_>P%[C (t7~s^՜?LZyY8s4ah0sv棺kstW:Dk!t6WfV-,S]'Nq,_C(R!W^g21z#?AR,z+'/uJfieMc9@T6#ͅ )tT:~H*vȞ}]s4ٜ#:"[4ҩy c[_Vɸ*#qRD1cѪ7Ϯ~[lDĎICX̮~pM?9҅-{+|h"bK!e _ϽwΤ<3]%u;id"FDnx8lX4o }lf"S@v'~ MdpyrIO]W#5~BO?9/Ư[\;斡,zg( 5SYHcFV ֞k[dI *|{gy={ߥs-a`Gޠ3 kX'L?\>)pU}%_Od Wu"MwCrO1ث1"1 3cAĉ.CyC!ho(f zěeANMӹ:GPDAçc"EgtZ@"U,rqm;jG'1|ZMci\iҘ_–#V;u1C"Ͱw(l .ָѠ։MNۧmїNV|fY k9KחGcXhJb+kluF1F5B.QceFu?&Kx4;KL˙}Z6ݫ IRg?K~i$4\B,zb&άPE^kUX"߹/j+F)f}mL!k^Y9me I2f8ok!aڢ8>ՅLq~aGXhR7R\W Q_e@b㺴mhG5Lchbj+VW) Vp= )5~'fmƖc47:QBHn1W`WFmThJgml!v22 2ћץPE1o#@hJŎ,|[GJtGG66 )4RE,"aWH43CǙsg4HFfV-/+9}XW|+\]0~."baYKvUMa96HgGuwI959v.b3E[+RNjɁMowOR__b+ܺ~Ops>cؑM9э:Y~SEL]'-^kp}2C1_GfhD˘nCY 'MSr2,7 v -12\V_A)|>M۴mY׶YQR=ή 3̸i](T in;WbeibIh[8{4RL*MJw:9lmP.ɧsdey];YNsst${Ջo <0RF,̰Zr /6 v :m//3 N07;CPjR*@*VTrKknuɕabY(B7=`B ^[b#!`Ɂ'Meh8$A S fO&kBׅvϷ䢪ŐNܳ2ܯP |0p qVCuRR}qq!M:&X @d@l~oG"R] ! }Dt q6@#Rk:(±=E&ml`=NĘ=bjP+QW,X% 7B)ZvoȠC78^va*&o۬6dҠbXq.VXcbĝB*^FH<167 Nɠ)[@C MZ- ѭW  0!z No$B FNEo&90H|?Ah(L$ozlMD݀! (@HgJJ<3K5ނ,%0-6{ Lay1+ b`0lPp n <A)i3G"m+F l GdLU1HIEHS"0l|1FuׯT|D%н`d 鷹kg2uTu:OE2f 2&"YB&#QZ%ߋ[-T1B& -EB41RZ!\;f( m(#UoN0orpzcZ&҈XcJ"^P{Rt'!缢=R sHB7` &W1 P0Nk5R#Tr m (iA) L4*$rFK"ER'iZ)f!N}$^^>EJ`jH* 5yOHi%14Z08ѢW0=Ga2&tXǘ"q\ѦF:&Sz5q D|=ACaZ]"050bڴfdo2TJ%6ER bbыsTmH8k$;[~UJ! 3YG~C{+@HL:NJ"&ojL tojaoNYFKX^G/F23zq )Q^FRBa d{^+ĽaaP#E0D(LĬћJM|εJ01BH@uY"]Ř#|xm8iڝ0MqRc阨G.+ mIԻRJvr.ZXIh5tVb`؉tJ'^+5$ ĽRa79> 0RdZ6.&C TQICЌBlaaiH$40TrK1f, kJLbaJHL=e;6(IKH{} @d%GA02H$ڴXh@ EAlXtSBG?0 %c>Kq聻Ipnz)n}@;N 9A==lMWj翹#&91q^4NqnѦ}V%C6ɭt[ PEY3U$ HQ>d'hu%f ]]#R*N 8UAr(+"ܑ-FP0#.ps^ôٴF+&[_,VP8 aDt9MA.Hж چt &X**J{a5<0d8IqA*f.Ol/Mdy|FV+]@*ʲ!'Uǰ<Z2cT(ѹ[ .!|mbdKhebe3]dRVrHJ;ـ<q&QvrP<89(Elz %X#$~ I2U"],[$V(QS03(w; X`yHa"BuQAt"E6FF'֩MkY,bMq 6]oA&"=aLOGapb/pR)|^_YMt*㓴#c6UV.v\tQk  &!d[X[^6LLcWC$F.Vih' \H0 396KҨLK!ŤǙA:&"L}`>@v_H:L8] Skxxi4m6i%!2E" @K  h7C#<"WFV!R&vBONYj%2!p1KEbaa+`ФQ^zNaDNR9t63JbAi*'jR|gtشp+HE ~V/RPޱ*[duvoEmjoM{cG>E'&94]Vy5C %t|{UdFB >sg.1˱kxNk3IE15GG~t:XY%meW_brbgO19(ݬ-- Ccm4KW/(sE6 )O|>~??3rwq?(׎ao/~{>^^z%nٿ340LO?澟8%N8ŏ=yO$G)/kغ} drYZ])ַGyn9t7F fWNp2;oҙfe*#VB1O8d}q7f瘘^bz^f\dtzgΟe`dڥ g\8w.~a@ڄF}wSO?\:?LO>s<ēc|ɿx]ۈLIsaեU҃ 7r9|'gj>.~m;p<3Gر{/NǸ/~-_ҟ ~~}P.Wvm/:0I>Ge~ Oy~ʟ4_}5Rs's]qc;9s#c,2ug^.w?O?{mwpÌn>fͰYc"p?3sYjMRi6>6We=C5D߭8eNS2'?W7a}nl>J Ĺ,Sӛ9#Ô9n\J:@my!R&ݨC e \7C71U@vx.yllm_ w/\qS)Ԟ̾<;nṧᎻcnZK7)WqtXߨc0q^;~m۶zcH Loʅ yi[M8z(z{8⋤Ke]»0'"~lNN:M3Yv@Bkk,ro_C铯rwsO3eF O;k}p!^"S0smq9LeyrgټiZ]a|z g^~3{v_ Cc<_ |~o?E&P.x#>c_c>νt;|s/ah(y w },[ŧsCS[ضE;bn;\Φ;x9pݬ,0m[Xk-kGx߻7'3=go>#l>.~tm+(qo?O3Ok-|g9t}?~RoY[d}<ܳۻv}PNi\>Gf^^z䫼S6mރw-7yʿ׿{}og+kףCWa\ѾM s45‘+`q1za:v}ݘquX_iFr%$hlCɹfؙElnR CݧιX'.Uprz1ҩ8p! GKR `sҕƟWϠ/~r=xN Wna뮝XBrumz~]kWcժ~3O^'?4=֕Y?:o{~:~oᑇµ^(p26Giv%'0;vZq s[܈~?wҗ|O>4SOS`INԵu+/Wا?t2Kl܂}~ş㋿E㱟4og>o7oaÓaw'?xWc Mpe<Ɨ#??#мt)ΣcJ\;s kbnh%Aڀ(F% uu`i%8q;Z0SY@k[;f+% 6n܄wm;vA,Y7o^1tv,EwS+N=U[̭i uz0?5٩9AK/#O?m֛رc^K'>;___տ˟_"~F^~ULO>,Η_g| wgY?.^e+P^GK\z?py5ڶ*flYpmd| ҋq!\(,Zڭq9a~A5A9s ӻC`ɒ^HA& :;;0_.cvn}ʟ'|Ο! ;0zC\Xi ^װard߃b/xLcz:铈d R,$.ǭ7pq~x}>$.;B˗/[o+P/m>n6uC8Alܼbզ-:x.K~yO|_|O|38~>~_O}뿆mtnނbO||g?  -q[[qkt016q}c~|=~_&|a+xGq}}ڏqナboM]o4b5ZBG׺|[w;o`ffQC c2?@ظ |܂ݟ/x pMгx Wo\,%X3o>?u<//'_׆JX ڛpV\։SGOb5n^E^LBw{'RO})d27`YظWUۋ׾M '1=2%҈[Z"L_8{ 3hkW.'kB"ظf ^_bg>[X?v ~|[/5k߷ . wi=sӋ7_y< ?'qXm ?{a\Ҿe݃cgaf\yK{zqiS| }c8{0nضcHݷO=pio䭛Xm ^ /|oGë?y}I=,[ 4715zWcvl Qk+:y {{?۶Xc .9[q1utLui.?;vbM' ,ЅAo/^x%4u#7^S~^) < p0::PXCCo/&'F޿'N` ^8֦)#HJ $XHȕ(ѵ##_ch_҅G=#}I=z+W/dP'0&d3^ދP+b|}/{m( 1=6[׆b,]և/?oGhj>Kз|~~o/~+>؅ NbӚ{.6=(1>rvƷu|W~A]1k1ri=~Fѳr9*oǔSPBo^Zl&]{]H#_UK)c*q۶'j `d*V؃bC=&&02xmCa۽;!Yc=6Mt-Rܼ5NTbfjm퍸ysK;q5t._$(,blr ), XWb@?N^ pU,yڼo%GwW7Xr$c,з.9Gf$i`rf8Q-`nJ U.)[1=9$A>$.͛hnjťWnwFo^GC[;HD01=v_BW MQ171B#2ESs#&&P#E/`};p0֬_si*؎/aiO/nNN SE\(bfa !PY@KR9tkVFX:::P^G03=W`vb8tKtCϡZ\0N|J3vܟ|K} ._E{WX Ԅt~A}Y18 hkE\GNǎ;p҈ٱ9t\'ѷq=g55@LLyEfi sĊ 8}m݈S(ci_.}!LA):Zpݳ'džm[0rn֭(:NΠ{ {jr.Lݸ澥(Ϡ `q)8$AkS3JuD3صt,E&(˨kn"T0;3*RywtDR :rݎ'ѿa * )&'ؽbfŦ:2G Nj\e+->bojifM)\WQ(6RIPZ(DH<H5Y*3hkji("AR037B pEX7ױj =y7n c^=كӧOaUى)bnr RDqB"K @|v '((0;f% (c(1J"  o*bbb (Osshhn(;0=5z0B03f0A)AynME{G'*3hll:Rb1B9EPI/yZ?@wzs)Xi<sDPs)GEVoR4`QrP#F8EB0}},1%"-M07z\! R!Jb@u Hʆ!f IDAT\H"ՂV D1E(p @\2$Hp=h(F/Hx mU"dR}&cFJS-HէiR 8bnlmTzh4-RRrBr9QQ )up@ Z濣q^H XH&/4ACC܄p)>5ft7=E #DB"P*#etlbH0Gm d}q;H S""$<c"%/~JoP2BDޠ`>F"VJb;$EĔ P-EBZl/YP Ddʅ"ަRBuha;}%QkS0F13=,/P_BOpW5RY8BA՝PC73Sz-4R FkSw&'s)35߈ޠ 1D*E(UEY3=d>I* XDRX,s}GHF)R}sϡ_X@%0JA)P*UP@ٔglrqkEl^r0Jsz Eb=JuHRK 37J)cu)%HT}oF@Q J:$i B7ӞK{' .(ATqeTBkBH>pQ;W#ƭNc1Q̐&*'W9Abrqt<ѹ_]CiAޞ點B qDTʑ`@Ið uF]R/]Lޓ)Q Dk+H"V{ n0e$AJ+BJ]/q4IQ@i )&S)2`܇8>a@GDjU"qyӼIȄA' MfP_SF(bp]T({zt\DXU:h)EER$@P* FpBjOff%H2P\tR?7B36rDBؠ%tR",3p!Rw%T"j:(~".|O^0qhC^/"HWkM( K߲UA%i~ D s ,bTbAtJbI(QitbfB {BƨZCKUj )H8w{TrR"YXK,R}I,BL i.ĥzN P2n>j/KP| N VRF~eB!c A"ԟ BօkBO}韣q6,TbLh4 q}JBHz.qz_rm$#:ƚ{[LB$[fUx4eKTGʹ4(B٠AL 36{+]⽞2:FDs(u,eRbpwL/'@Pu׏/r .JIל^β*/g;w+FH95EcwئQBӂ6)UHH\MDBBp!FD1M*7EB}6HZL+P<1]b1v'~NvʥZc6d,5H\@3ZTM0"]2T#{>VwR*p%$2"咡rjWdBO#H2<*F~]pnJ $sM)3E%qgׂU*'TE,8' ipkn6fBr*M k)I9VH]XDu2+K܁j<)*q3m~;,7#H$ x=6ʂ6NIQY!wyAL 0ƪg$IHE,!?Ƿ$wydY/:ӋF}y9~,=K%5=ϋl&;_ W"k5QNxW{I}[hN]5f~>ׇ*M˜޺{Z*։_3M=i9!f̌qk,Ny,+[X~@J1B=!8=FfE| ;@?+eի~1LBJ4j,,eXgǨeSXm KKQĂn V^~PX$!"At}+f}jIR,5BAG1L`@+x=.]Qeƨ+g:U˻<ߣ$xFMMwb2 H'Ok!?I= Jf ? v 犩~?Xε*i˙ D J‹C bL1]ɹut1$T(/I @AS+D b|%Hain"Z*$ y P5g@ lZkgF-Ԏ $H=("Q0yE"-,{7:QiTIWh1_PWWS kc$b8Gkߋm[vztJ]N)az'9bŌ<n^yiAf|R7Y2ɿkǜ}Nj3 D,7Y tmo jYT5Bj~2W}E.&>6.CzhZqDt;z:7tbci4lIgvntg,3;٧0H$/6P#ͅi*@f `< )AM즦61 6W憥700r=keڼCͭFqbT*fV/=2.%BU\VU ledv9<245s7z^TF;`Y&3JCK|+vkGm(o_?㞉ĝZ6z3W.-PL!pe^'bIҬf ;Y:9isE)CFKmMʜBңee N̎mn3MzF1/6Hh< Ϙb[$Mk ٭!PA O9LSFǛYx;plRd :TlҌIbTa!Kg `PR3/I#mOc6xDk0Kx\8ܝ'\Vɔ8 t4@KN;b=]`ZTCe2 rDPay 03X_),>D;X1d iǢL'ƌWUsjhY;>nc1ND~&6Wytl3O E7#fT擐wRrwNFoMn 3|AbG'Σ )!I"4&NCI{@hXHk*])K?,lrNH-Qjxa;eY{+/SynR:7`zICl4 0/3!`tWUʉ'$Ά@F$,+z&_3XJh.I2cSЍl3tBH2UW4"WӚ nyV{a;n.R֤褁p\ WDv߉ `"q_4] dǛ Eq£%}T呞XJCHB Y2k/1CJJDr7_.N2*3p6A`%-aB)5 "qmWc(6 |˪& EjY-&Qj$p.gpAT"c$xIb:a)?rP`a֍RbE#uJ)%ᬶ$ u56 X%XD%X 5hJ"L˜-߳: X@e@8]]jvB!Ӝ"[ZAN䑑CͥLG2?&ny-9gQI7\_|nva-Ybz[8}R2IȜKZE0 Al^D[7!STodWzM-yJfV2]Ѕ`@9]ibɗpk@ !ԸCȍ>q}IX=E/~lbn\ο_H6-821f1櫡h_Ҙ)wiDmgf t=0l7B*p1@d՞""z8B .('f FVyՖ\MNJS&Y (?C\z{NK- !mg'5XoP:_`9`(ZrnghS@Ey]R'25pXtX)lKP^Xv 2ՔZ 8?O:s;Չ";Co;^RLGHB[t5Kp!^Sh{Pt ,bj^>2%Ho.Kh>/#Giw:鋘R u%L< i_PnΘ!EFnp0YlA?H&=мI)2c/{jn!R=OSLae.Iٷٛ9ȇ:LuEJ/diڼN |piFRZCid9.2iNe<1^V8<#aU@%LNQ9/shY$Ռ#&Z=i25sn>[ J3i*ە8fbZbuT3H;BzMG'$ a2F́^mb?2&pŹM>E!\ĞƃL-brG;DLƘjG=)nPCh`!X9nHS3{gʜׄs2k †KJ[g.lи5E|Lazy-ZYJ 6iqDhSy@-tD5XA3Qۤg cZ0E礵J][A<Z( !Tp<@.jlЖ&Ă:4}BϭcuS"+\'kPM@;A QWn6x#xE(jv Giʙx.mq:C H5@z9_ !I0P{J_FuRb骦e&IR6ix~Ʋj$hdkhl!Оq̬+~fo줋KДMEFy-G,܁ Ł1?vB(gg 4\,s2^8츪3 "`pf\EsNl-7)ۙMBDax4v I2v[ ;`\RUG 4]1JVŌtF`ϘYו~4Q3jƩ_'tgO%yތK˂o򎖙o4)ː4>5C#^aa#lqIT5cj Gf`TsSŴU`F@\Tߍ,rgV>Xw>)~JxFa( cO=ЬCտTFBuG퉪_0&F1Z]#,,MyjfM*wTҌ1{ҍeTFsJyDW6řZBgS؆\ܛ:{ VXfZgŏ&n+mʴ63\ŢK'n|=`Yao11Y ~" ndq?0y\0O3" طnx5$x>#6뼬6R/d!v蠔B3IQ3wqKE[5HmG}{6%(  ݉0ފ "K܃,eK󔏃ȱ5fMmRՓM?43ij:*]C;s([G 瑞P`I7#5w))%t"%WcKa&y}!)3g&=_g'R>RA_XVjQEh{:bAVQ(h;D&L{_k!5MtA'`#ٻX$ 4W5Bu}D o:Cvv狂;$r8ŎkĚ3=Y5[J?jTϗ )Vd0o4"khLt;V_xƓ\Cm f<#P n5,U_ U!Uȥ#UPrN,pR-Q׼G̘E?'36Ѫ$\ܕu͝x1Gϓh!!U@nF2&t ]g<) v[>1_]!/1.71$M虦c˚PP-L@8EV)έC ICQB԰O$^a3, #e<3bUv3n;F3ZGvw|EmSc*g:e5[LWg/RǀDFYN}63!`f:a#py2y_HU^@ėgddU⿧Ol ȼ^7/41t(ق{i B@?Ga2dQ]d $7+IMdNU16Bw&j@N7Aj'ICi~ָVSmi._\$@;Bڥ4"ls=K&"Cf =]Ksڒzve*@v&FؑK#Α@3bo !R.'2ǖ(i8lx^"hunTz ңj;YŒUh~%UBg9L(TnSU{g,/uU+\UUF["=Q+2$85ZtkmSGk'/ R"" q ST. L}1,&Hu~Bx -}|`?:TL `!8a]ywsbAQ.H$Igj6jn^ZuJ#Ydp)*YcZ{Ao /[Z \ ,%G0NUk:}$,B2y-5[5S"SЪBfF#֣DWg͝v: kgjPVųteegaƠ; l@YBCNtPztŒRfZѴ/c:|yRF%e)ukƊ4I;<z֝uy6]wɓh-&}"oChy;?aZ@_\4zʇ" ݄mBk{&)!IRq.JZ([+$Pw虧 A@0NK*uc!vRO0nڛ~zޥ82ڿnUBq\/=VfM"&<921n$ùP߂9joP"L`=Q-|FrQau,I&Ӌz/sUwA@\tuD"L,fE £y@H&,YZtc/IRg7qھV\Jc;^`,/-DQNltUIk6I&fImw5;5kB.$HF-o (ecx>͡Z)N&O,WU MBU`ήM_Bd;z]OjVAP\w'{胔k#%,{frxtIYw,컕$BDA?5qY(5*+6he,tvZRksUj[4 <̆:ufȓB5ZY3kn~?NOZB] ]ي& A3:ꍌ9fKZUyQle)> x܈LAUu2RSYـM(85,Lo!S3ty!Y7t_6}gpFdiBAjL3.I+<ݳʙ:+WV]X24(i'pV컗s/G+m2:)Q2r3pw]R^4dda7N[<ǓY4T^ۇ4K'mEH:O9vM;2ntMrbJ61|H&dTw*߉k1P(1Ƕ}/B8(ؘ^A8CDH@Z5fĆO JD6STh&oڢ$:߀%}CCw2{*Z=6c4W0ӣ8=݅$)Z6m=^{h֊Ձ'Nl.蔦c~F)Dۊ*IBYvSbCi2'ēNȯ r뢥ù DrOZQ l:oL9 jC,96 g\u ux` ?ӣF/2fX RBMɏАVz"UO=&}2v9 J.{X+z2Ѡk ߸HS,o0?uMyAy@|@ muD<D*=eTh S^,貵8g7̸bW s'fo4[f.Utؔ'g*tEK:'2ݥ 410JËIpnT!z1yh7MИ)DÚ1$|r.PCajmvv]PsZ.[?aUg$/9b\,Bn5``X1$^ՌY# Ě=KyUrRho7q<<=W~N=jf4-eӇ3ED1;.Ii1좍 =Tw\TINm1*x>P[,Ҍ3~#Ɩ#s|Ug@;z<Ioc\{Ӄt9@tQu%/dFCz 0MzXw h( *C X!bR ߨɼDZ Avs Әtmo%Ii] $JhzDa&VG0)""R_Or27~k'X:AQRLc*hb\/xfINgbpV l`vz>υ>uAθO ޓ:XA L,}ٗxYW{M|$9YYwSgΡ{'R!Vd|`6 ) 3JFPH{|.Ѫ-K e3dJ h}JbZcʈA0g]o+*%Ggz[P1zs-xD3o[_#|8cCkf`a C#2&]/1F"?~O6zC &K[' ݼdJhFv]ۉcs=M~ mMq}!UDͨjTV'Ojpu8/>C[ߝB3 6_nuk& i3>R?uiӟl+sYI$R"醂FAGSndx8S l_iiLÌY_!DWwT?.ib!n+ A/1fN9??&-pmmCX Rc(<#Y)=96c!#Rf0#_.R~h@Y@v=mR;~*tt?}!Q[)Clg{Gv$ qbJ;􆡋IU[ ~!__2鈚gӗ .wWI+DKk5њq_F9AZZz(i, IDAT#k7Y@BbphSt^[ˡ6Qoj Zڟka #X2)8PlJ'Sl(x"^Eo> 3kx9r"@Hd1cB#5aEDL a0!Mo=iFbǐƢu<$^ laB~.!ޯ|6ӓ\i!iLݹoJ-C>Eڗ(9ɐ0vDF1k Tk9%cܑS-Z iJxS=oM, ̺$ 8m`(S3!!74v}`#8#}X g$2r8؃Gmlׅ-66x{Ta7](ؚ snffk|`LaJTL u[G91uXҹ=0" h{S:XiAwvjધ[ 9~r?ERM\%lEttdhriun,Sk '-w=jivpMWowT<^S\="R !UY1@hD. l 3@28TY^kQUy&MrP'*)Od]>iVSQ}{ Ҭ)~]X Dw6"^4ujFM5ňwi" rf~ISsEkD7J<  |½b&7TAkP!=,{ spUGKmMi(> S#n', 7CmQ:4\L؆LӇkb_O/Udu5hi)NOfMvVƦqf`TqhK5h&0='` F+暻.:X+|Fd0]kfuÄ7m {[A0r{c%+FֶE' U0˟QߏpIQRp'jT@|;5MO$!N"+PRd!Tc4"ټ!Q{}2?ո+j4GUD8rh@Ɏ{ 7=@sޒaC[|[d)t{|C|!'|_kh-ڙ|!ڕbYa^WiFMl͢*3y('lXĵ,arNO@6'c|8 k 1%ӐAˈ 05ǩ.FI|PN^莛\!R %vbsRH:=ue)S-nG[BH E0i)4E @݀:Si&N'_hj=hhTbqzr9AB6/tqބF*ƺ>Hh1l) lܕ 5al{ /Կj((:Lcy͘(0$4֪n!aэ)*/PhP_G4p}z{FZa0b|o!35Jѩ7#h"}9{P-P/N?~g\}|pDfBY'- ,&lΗ+sXߛ--`Z/d^笙1閙[щn(jEmTp3jv^+lY)_Zc.|PALTm;Ri[N7BN ki0wqw쐧Ԉa Њ#j>+mUrdJd%4!CEZ_#** r<A |Ӄz=zC@S=,C)-i4pf;8u&d8ؼ}ͭyS}ߣdSqu ;adC- J*Hr gn+Mxѻ/瓲T'OiUMEi㢧GS}:Y_schTyNi'E *)OD/fz𺟀Hed}Q`gdLlցNY1d:ShOIaW -s+ NN=K{k:C+/LX]Z)U3HuwC4>| oY6LIAɓ LY!mE!tffsYqgͼӉy .m>=l(D4IPƥ{a 1aVql"*`p>^= ϯoԏQu߸[„[lmicCGh;|zRJz<(m,S#bڲ`jl-^VxcCL$)LThЈHW\v?Sǐتjxft:',7o3 LLȳ0S* }DOO،i=lXZs\/^O6gfth}8-rO߰yDT17 ϔ9KVyBs4< XFY˟?؁NU7Pq))4٧)t%k-IJO_ M*hdh&M4hՇHywfz N~z~2uL/Ifi!2v&8(̋'h̃Yge;츇fDZ17E:k²4Kj= Qlmypgp<@3n|կa) !hc BZl.xT|O:k< jl^;o@Ya`Ix|#% )M$ ܖ(rK5GC6S^Z.VODE"3A:ܒujZۍfR"Ԙ2oH;?UT/ lCFM=;z 7;:yfAx; DY@ai,lE聅Pң錠GR 05S D/Q$ܷ@$&zMiY"Ă %`DG)V(0"Fyz#:@TE{ddNYmT=?:qoRtJƞ%Tx iSth)Cìam1G7$H 5&f(zCW߂dXI* g5 T_?kp05R|=kW>cCeI1G$iEzz?/@b/E-kRk? ~ǎVeU갨[tf^AQذȎ@3dC?Nmu+ Y5YyWĔPUt;2cФz爟C1b<^2TqFqsAIAY犸!^g<k=\`/Q=8T\L!4uSI= "b)֣3hᨾdV4׶f> ܎4`ޢ&83YLoc&ԝ'3=^ mR1Dp63Ŵ=x[j}(' Gwb 㳷ǦK4CuoF!E4u 9QkyJ:F~!U]X,EhSu#,ജLz6 Z=7+;HH;lYJV:|N4l5L݊-FUY CbmPAC^5`처ՉS5Er4 /a<.Me}]׫#B5e}T~0@?.pt0S`ktoq]k5'5b>1Ly1>ArWi$ܓ_ "+ AmiEJ0/$N^eZ5Zj/ox>s,|9vRi^[xρi :Ik4喟U0s A^,zghHׯxQ4q< K/l Z۔ ?kg)^ZVal<~:րvOIk`5_=<gؓ0&#`{@jm?N=KsS }Ǹ;tw̸9!f2n-~7 3-j曜m f9g9U 4oz8B-k-~5ms#u746I a9s'Vr aaEQ98qRzV+q=ŵ1V;22 윚{;n*"ht18y[$v:U:DPie!KĽ"u^Z8GF,8a53:Jåts0WOR/JVBݬi}8f]2=QbCW<}!0M~͢|7;fg*WNtwpGqpZPۑ֝4g0q\Y췾 vчT\j?3\R1G-2wP3P_hht;y~][?&'_KiK(v=QH+A`P74rN=j怳I,}C0MfkGX - 7aɬsE>_ڛhsy>kzc[l;I!g+>O=qT^twU6 Jӝ(!7<1,puMRtBLjs6¿7  'JӘ3sA;bWӊ賡S}2g+UצY$W"P/_u73ɵn\Ok+5>FjK1D81{XQ kI)ieuQ}gRfr78P]RzI*((PKģeKZk[1=[Uf7s¨}ҳD' SVg}](:=OEղӢ˽~ѿVulVP;mQLEg 2% ?bM["JzZMD-=W::@y^n#whxh$i0?RzV GT'n6"Y4|[tҐ1&HT *4u=7l\Rm~"jbb"Zu_bϯVC4B: 1`[vA7iTe3kxSielDZQߥHtxS?q'P${}HV4^!1buoB!Ux"(A- zv/], U}Ӹ{zJ-ʱ--okOf4صǨI݋)Db*5p4؃OQlZW7S\ӳWE} rS @PnL[epRd粝K t0=Ö;ׅ):pF1ٲs\\M T& ʍóy fԫ#v蜰~Rn?WYCs'6!hehSdY6(аk#ϞeɳA=.-))P8m:e>@T&ĨSt:猻aީ }9ݪp'`Fi~p|~蠠gbKn - W4:s4sy%"hHmSVwVw"Wzu^Ƒ?¯csvR !5a,_?0LHs}?Is57{Ѵ.R){HEuWGt5bFg#g!"XHK`(fCWkA/NX'ל ѭ9w5:Z7ix\_n86^}k sBS`k ~{aLiPgut#a5A8X)NLJ7~٤/6d5NR Y{Y F9s4Y3<6z6N{ΤLz #4?VX ՘9Ŕml5gc>`zvlT*\=V6Mg^j4]A-q(ZOf8hc<f3M88X֧6OtNUQq"6kV}L1sIo R:勦 X& ~pV)5]y S2}}ѸS1![DG\( 7e #ՙ! IH7&*I$H IDAT;" ~޼u%Ⱔd%ulU5ĮQ{wUjkZRc7O(iGOD.3f ȶ+ƍHK%-4Ey9Du>.lVṂTY1Q$NM:D*oXd~89GuQ"SӐ5ɽFiC[|-fɡ *JSD9Z{0j!ummGOfOJ Xa Cz:Y4l+bހ=j 6kfl&>F`1SQb`228\WƚQ{h}(6>Bt"ZaL>@#`{T&KfBߎ%c`ݧ9ﱎ[$ Mܸ*q2(>Z&;dIX%DAZ[Cf3+<;O%v]O̫d j %bɐ*kjp9L5Y!WK,36]P,N:<ԺFp. 0M;DBbj6Ly,=ky>'B_pA> iw̐zopǟBv?GSaG?囟#='Ǡ}]+Prh`@~~* ]D=6LOe~C0?vj4@YlTg&GUdcD>й]4Lz\C]C f4ۇ3 '}:`c$bqZ[} =!1EvEA'Sb<-ݰ 3sFQxsHS\&P[AVɜќ\?Mʓˆ,?l)eYNf{/#)#!o8b瀟Kwg8Fܽ)_fz2{5MQ_<>EC1M9҇تMs8{RihWf\ &wKTIؐ-Uw #S>2ߕHC8VxH/i\_2dAeq[Ϫrt*UЭ,yɐGCo4l(0x"{wp=,Mfo0[{Ժ/MQKc*)Uc]Fxhh0 q4FR@scP|ۇmɮ%IX:Cyq#KW%Кf%v|] &@B1 (6c%0X#xq)[cg^Y2a"(.a PE]Ne2[UXԷ d4L93Ttf>lOS}&a8i[o6lN9lj IO1dl"OٔG<,Y֐4Qd}:yH\xغ#Tf*ඁõJrMb҈2^c y6tStTٶ~(Rk\eS;D09{:5RⅽG1lDtFO4(S٪ '| #M޻/򰋢iWrգI"s籨~-_:uCF}Jq1%}&iL=j@,(PK:u0d lgHި_|lٵYXjgDݻUFh %: #C,T3BPm&S,ϩF"نC\)LsyCШ+l!{x:fQ8 BtE24 ]Ё\IEntg{Ȕ\q UOվ#٠*[HFd?ߛsP[T} h^| K aI4lίۢ (- hL:qE(ӮiSW3m[6Z< ~jN1v|%G'BןF`_#XB^ž{qOD bwmsA_n.;$&mXʰth kkod߼ SſDޚ;[ėo{Jߘ^R-K ~>unCw~XڱYgҘY!u'Iw]@A[~XКm}5_:f*ghiNX Is ]HTjIx1:y漘v<}HahYq$]PGt?e>ʙޏaGk;3}= ! %'V`s~K2:9ȝUlqZ$S\ָ+}/kzHQmC ky'EpC9kyD-*I (H\o!<1xw[XwpjO/J-?> ?sLcH\[.ɺ'[dQ LsR{iMy&H'4z\8O}}Ez*hUesn2BhX3Sf6 gY` nAUBTQSWˢjڻTemNӰl}+\C3.|f2 )Mͫ ?wn?=4cjBk `JtmVY">9Z&NO&9MM`LЂxcaG}iLsiٔDsǩH3ߥ'|Dy<]E[?S;E!8&[-M=iƫHI5b>Nd0 }Fa8ϺS+  d9H3rۖ>ls1D̬YZ?/(z2RN吮+S@. cJ ]:z]r:sd]4٢mCzM:n3g%.|L$`]S0" XhBۚc5ʁTu|ƴ!jo6âو!z:2?}ܸlz4dzdO?mP&ܧcUú`-0$x̼<|>5׈H:Xq1rnf )K3} ^\09(B\KA.q-9( T5d)`BlQ^TT9OzCm>4eU]w B[Mx8 ܘ:e+Ry&]BgOljuV"Op%G%x?fԴ*8v8bQ砡m̗g@Q#b>NV}7NcXCj"<ݵR$I$х]'3寃٠SD׬" zO[wto9􆴽ES1ʯ!zB%-zi D尿4) Sbmh1~oz94u @+냯Kj8YEcE Zus%=mu L($,i{YȺ?z8dlu0ʔ?3{~`[Pa3Dknfqʤ0-i;?ۀi̽1c_IObp dO8՘ˀ|]L Ȁg&3LOpyfu7ze] *v>QZvJ%z˵r*{sY7u #"`敽 %*y=KZ>{ꔎrU P(m!=sn{@xSM5۾R;߻9C&H6/s?xE7h`S0DZBJ8-W/C`, sf4l%Iل˲(Fgu IX ]Tkk$7'TOsn^%g$ O ztKƎ >~2nLQfz"F2VfvU6s0>fp{/E&?¡f)7> IQ b>Sl^#~'f^. ~XC[Ғ1׿kt1}74zu+qec$! }2sZon@՟(5N4B0xFaP37A{OXbU 77DN\?l rF굦Kyb @7\.A{:Ϳ}]m39#iSꪾo+Y~US@0ЎK 4V:2:dZ|?Vxnpfl~*URc2/Ʈ4I C˳G!mݘ)g f*٨yyI-Ek1<N]R@1:N.zU›h,HJ,!{sƆP =A.eS4TzN &]=fFh<Z#x2Dhu+/'Y1JVݞZMD~%kKCy(swGPdڎ@O6:ٲY^쭌ziM_32IIŝɞ7iН-vA]OXدKޤbU<_D a!8YZLӂ(݊`׀G!?PhQ8fd0ٽT! ky;S pUMtVV-j3I$mݬtRd Qzu:JX"-lGЫv;8C9oӉƵl.e:Mt]Ш0" 4Dp!Eʔw,U)rW!|޷j=iu&噮̤pnù UyJ=ʑs}bP\ )XBnkA9+g;_ΐ$I<{! 1M:{rLЈ³LԆTq#+yХ"ԺvР#udQ YLD$^= {1ޖ $̣pvBgͻ`kMMT}XRnˆ<뷙im+OhDjE={3OY0,9Iy!:Q1uMvusGG{7猭 kְl[dE뉈?D|HY'hVލ1_oju>tcIa]oz{o_Z\KGcŌOr"s90b} $)Q+3 Ys6daߛ:=[؇n&.?uGry~ĺ0Ai>[11 x֣96Qi,CLA32deX=z="LʩBs]{11}ɓls@կ>kպr-^\3!Vc/k$Rp(dsoEY9m_DoNWd%7U/f&EL?x IG:6mv**,CޞP qc Nm@}-cIʟy0]yucT)n=RɋtoڊӝsȂ6 n4áV>]7=|&/=^rrʩH_A5؅: U$$y_)ju7Z*;a.MTՖcGvDn^>լ#9б/Z%Cp'ͷZ:O>gboDj\ӑyR %VX}C g=_&>8 ~OQI1XKYi%a*Hv8de,$%9ZŲ ,p끥7@W [ DIDAT@ L`Z+JB ij{Fe'D]jBùGS#޸hqkdadb~\T_l{$jv4q!RpTjs"uI#:n}iTd0rc4z]tpuyxP[g.GD5 ҼӥU΀y F ޫq^#鱡54taS_mp8]o" gc@m$^n?Vվy(oR0\l!Z fOef$^AP 9^=L5?yA9OQ":Ȯf0 Qa4UO]=kkP}\#aa\n ڢJ:f>ɀsakC/R%V&]VLdi0٬:T.|9pK-[c8Wh𙮎cңg71~m6 iHjճED$ߗ>!Zע5so)1~480&9b>'``V/0&ޟ `x•3es/B3М#;Sgƭw\~`8 pF Xݞt5Bz=i>9_dh2Q".ZՌ$%_da2tM%Ի)S#h2n|42o϶ˢy[,'K3 ၲ"1<`B#F1J|:-sFARQc\dzݢP{Ms31Rk1+)DPh][SRIgMX\cq3.iհ A#rbDj=ϛuRgh.n\MbLdڮsw03GQ$ N\5r( RWbTLy+]5ulÃ;u?)V[[t4J<5vݯ1s`LDX3-lT%0mc qϐFy_FFDy ~^00]`x~F.Hвo~?NZYO.ߏ,ڞ Mv'fAS=&֯0tj 88 w nJ F5bBxbT&Qdҡq2f'@(:wFShLkG9W6V KǠUi瑒 N =(³Vg&Pdd9(bR'0P()4~jW6L$4>4|4vI+͑C%'>]o!*'o}!JЏX{B_{u@`$u&R:[}>7940$Mjs)l{8gkt1 GmԨ>=9R@k'=L4u풸TjA b5ZkXasaMt35al=8׽dZM%p<uՙ$(rX#}aI>xМX YP4av-)t~xOp=Yş< ȍ\ڠpOS /o{D'}xJC2hܓ\N}΄=G!e]s\z5j| v&2?><#jz֛X`5Lı&Ad0ud6b-jZ=r?>%4IENDB`marginaleffects/man/reexports.Rd0000644000176200001440000000066714541720224016513 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/broom.R \docType{import} \name{reexports} \alias{reexports} \alias{tidy} \alias{glance} \title{Objects exported from other packages} \keyword{internal} \description{ These objects are imported from other packages. Follow the links below to see their documentation. \describe{ \item{generics}{\code{\link[generics]{glance}}, \code{\link[generics]{tidy}}} }} marginaleffects/man/get_coef.Rd0000644000176200001440000000534214543163156016235 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/get_coef.R, R/methods_MASS.R, R/methods_afex.R, % R/methods_betareg.R, R/methods_nnet.R, R/methods_brglm2.R, R/methods_brms.R, % R/methods_dataframe.R, R/methods_gamlss.R, R/methods_glmmTMB.R, % R/methods_lme4.R, R/methods_mclogit.R, R/methods_mgcv.R, R/methods_mlm.R, % R/methods_sampleSelection.R, R/methods_scam.R, R/methods_stats.R, % R/methods_survey.R, R/methods_tidymodels.R \name{get_coef} \alias{get_coef} \alias{get_coef.default} \alias{get_coef.polr} \alias{get_coef.afex_aov} \alias{get_coef.betareg} \alias{get_coef.multinom} \alias{get_coef.brmultinom} \alias{get_coef.bracl} \alias{get_coef.brmsfit} \alias{get_coef.data.frame} \alias{get_coef.gamlss} \alias{get_coef.glmmTMB} \alias{get_coef.merMod} \alias{get_coef.lmerModLmerTest} \alias{get_coef.lmerMod} \alias{get_coef.mblogit} \alias{get_coef.gam} \alias{get_coef.mlm} \alias{get_coef.selection} \alias{get_coef.scam} \alias{get_coef.nls} \alias{get_coef.svyolr} \alias{get_coef.workflow} \title{Get a named vector of coefficients from a model object (internal function)} \usage{ get_coef(model, ...) \method{get_coef}{default}(model, ...) \method{get_coef}{polr}(model, ...) \method{get_coef}{afex_aov}(model, ...) \method{get_coef}{betareg}(model, ...) \method{get_coef}{multinom}(model, ...) \method{get_coef}{brmultinom}(model, ...) \method{get_coef}{bracl}(model, ...) \method{get_coef}{brmsfit}(model, ...) \method{get_coef}{data.frame}(model, ...) \method{get_coef}{gamlss}(model, ...) \method{get_coef}{glmmTMB}(model, ...) \method{get_coef}{merMod}(model, ...) \method{get_coef}{lmerModLmerTest}(model, ...) \method{get_coef}{lmerMod}(model, ...) \method{get_coef}{mblogit}(model, ...) \method{get_coef}{gam}(model, ...) \method{get_coef}{mlm}(model, ...) \method{get_coef}{selection}(model, ...) \method{get_coef}{scam}(model, ...) \method{get_coef}{nls}(model, ...) \method{get_coef}{svyolr}(model, ...) \method{get_coef}{workflow}(model, ...) } \arguments{ \item{model}{Model object} \item{...}{Additional arguments are passed to the \code{predict()} method supplied by the modeling package.These arguments are particularly useful for mixed-effects or bayesian models (see the online vignettes on the \code{marginaleffects} website). Available arguments can vary from model to model, depending on the range of supported arguments by each modeling package. See the "Model-Specific Arguments" section of the \code{?slopes} documentation for a non-exhaustive list of available arguments.} } \value{ A named vector of coefficients. The names must match those of the variance matrix. } \description{ Get a named vector of coefficients from a model object (internal function) } \keyword{internal} marginaleffects/man/datagrid.Rd0000644000176200001440000001213214543162241016226 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/datagrid.R \name{datagrid} \alias{datagrid} \title{Data grids} \usage{ datagrid( ..., model = NULL, newdata = NULL, by = NULL, grid_type = "mean_or_mode", FUN_character = NULL, FUN_factor = NULL, FUN_logical = NULL, FUN_numeric = NULL, FUN_integer = NULL, FUN_binary = NULL, FUN_other = NULL ) } \arguments{ \item{...}{named arguments with vectors of values or functions for user-specified variables. \itemize{ \item Functions are applied to the variable in the \code{model} dataset or \code{newdata}, and must return a vector of the appropriate type. \item Character vectors are automatically transformed to factors if necessary. +The output will include all combinations of these variables (see Examples below.) }} \item{model}{Model object} \item{newdata}{data.frame (one and only one of the \code{model} and \code{newdata} arguments can be used.)} \item{by}{character vector with grouping variables within which \verb{FUN_*} functions are applied to create "sub-grids" with unspecified variables.} \item{grid_type}{character. Determines the functions to apply to each variable. The defaults can be overridden by defining individual variables explicitly in \code{...}, or by supplying a function to one of the \verb{FUN_*} arguments. \itemize{ \item "mean_or_mode": Character, factor, logical, and binary variables are set to their modes. Numeric, integer, and other variables are set to their means. \item "balanced": Each unique level of character, factor, logical, and binary variables are preserved. Numeric, integer, and other variables are set to their means. Warning: When there are many variables and many levels per variable, a balanced grid can be very large. In those cases, it is better to use \code{grid_type="mean_or_mode"} and to specify the unique levels of a subset of named variables explicitly. \item "counterfactual": the entire dataset is duplicated for each combination of the variable values specified in \code{...}. Variables not explicitly supplied to \code{datagrid()} are set to their observed values in the original dataset. }} \item{FUN_character}{the function to be applied to character variables.} \item{FUN_factor}{the function to be applied to factor variables.} \item{FUN_logical}{the function to be applied to logical variables.} \item{FUN_numeric}{the function to be applied to numeric variables.} \item{FUN_integer}{the function to be applied to integer variables.} \item{FUN_binary}{the function to be applied to binary variables.} \item{FUN_other}{the function to be applied to other variable types.} } \value{ A \code{data.frame} in which each row corresponds to one combination of the named predictors supplied by the user via the \code{...} dots. Variables which are not explicitly defined are held at their mean or mode. } \description{ Generate a data grid of user-specified values for use in the \code{newdata} argument of the \code{predictions()}, \code{comparisons()}, and \code{slopes()} functions. This is useful to define where in the predictor space we want to evaluate the quantities of interest. Ex: the predicted outcome or slope for a 37 year old college graduate. } \details{ If \code{datagrid} is used in a \code{predictions()}, \code{comparisons()}, or \code{slopes()} call as the \code{newdata} argument, the model is automatically inserted in the \code{model} argument of \code{datagrid()} call, and users do not need to specify either the \code{model} or \code{newdata} arguments. The same behavior will occur when the value supplied to \verb{newdata=} is a function call which starts with "datagrid". This is intended to allow users to create convenience shortcuts like: \preformatted{ library(marginaleffects) mod <- lm(mpg ~ am + vs + factor(cyl) + hp, mtcars) datagrid_bal <- function(...) datagrid(..., grid_type = "balanced") predictions(model, newdata = datagrid_bal(cyl = 4)) } If users supply a model, the data used to fit that model is retrieved using the \code{insight::get_data} function. } \examples{ # The output only has 2 rows, and all the variables except `hp` are at their # mean or mode. datagrid(newdata = mtcars, hp = c(100, 110)) # We get the same result by feeding a model instead of a data.frame mod <- lm(mpg ~ hp, mtcars) datagrid(model = mod, hp = c(100, 110)) # Use in `marginaleffects` to compute "Typical Marginal Effects". When used # in `slopes()` or `predictions()` we do not need to specify the #`model` or `newdata` arguments. slopes(mod, newdata = datagrid(hp = c(100, 110))) # datagrid accepts functions datagrid(hp = range, cyl = unique, newdata = mtcars) comparisons(mod, newdata = datagrid(hp = fivenum)) # The full dataset is duplicated with each observation given counterfactual # values of 100 and 110 for the `hp` variable. The original `mtcars` includes # 32 rows, so the resulting dataset includes 64 rows. dg <- datagrid(newdata = mtcars, hp = c(100, 110), grid_type = "counterfactual") nrow(dg) # We get the same result by feeding a model instead of a data.frame mod <- lm(mpg ~ hp, mtcars) dg <- datagrid(model = mod, hp = c(100, 110), grid_type = "counterfactual") nrow(dg) } marginaleffects/man/predictions.Rd0000644000176200001440000005550414554076657017025 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/predictions.R \name{predictions} \alias{predictions} \alias{avg_predictions} \title{Predictions} \usage{ predictions( model, newdata = NULL, variables = NULL, vcov = TRUE, conf_level = 0.95, type = NULL, by = FALSE, byfun = NULL, wts = NULL, transform = NULL, hypothesis = NULL, equivalence = NULL, p_adjust = NULL, df = Inf, numderiv = "fdforward", ... ) avg_predictions( model, newdata = NULL, variables = NULL, vcov = TRUE, conf_level = 0.95, type = NULL, by = TRUE, byfun = NULL, wts = NULL, transform = NULL, hypothesis = NULL, equivalence = NULL, p_adjust = NULL, df = Inf, numderiv = "fdforward", ... ) } \arguments{ \item{model}{Model object} \item{newdata}{Grid of predictor values at which we evaluate predictions. \itemize{ \item Warning: Please avoid modifying your dataset between fitting the model and calling a \code{marginaleffects} function. This can sometimes lead to unexpected results. \item \code{NULL} (default): Unit-level predictions for each observed value in the dataset (empirical distribution). The dataset is retrieved using \code{\link[insight:get_data]{insight::get_data()}}, which tries to extract data from the environment. This may produce unexpected results if the original data frame has been altered since fitting the model. \item string: \itemize{ \item "mean": Predictions at the Mean. Predictions when each predictor is held at its mean or mode. \item "median": Predictions at the Median. Predictions when each predictor is held at its median or mode. \item "marginalmeans": Predictions at Marginal Means. See Details section below. \item "tukey": Predictions at Tukey's 5 numbers. \item "grid": Predictions on a grid of representative numbers (Tukey's 5 numbers and unique values of categorical predictors). } \item \code{\link[=datagrid]{datagrid()}} call to specify a custom grid of regressors. For example: \itemize{ \item \code{newdata = datagrid(cyl = c(4, 6))}: \code{cyl} variable equal to 4 and 6 and other regressors fixed at their means or modes. \item See the Examples section and the \code{\link[=datagrid]{datagrid()}} documentation. } }} \item{variables}{Counterfactual variables. \itemize{ \item Output: \itemize{ \item \code{predictions()}: The entire dataset is replicated once for each unique combination of \code{variables}, and predictions are made. \item \code{avg_predictions()}: The entire dataset is replicated, predictions are made, and they are marginalized by \code{variables} categories. \item Warning: This can be expensive in large datasets. \item Warning: Users who need "conditional" predictions should use the \code{newdata} argument instead of \code{variables}. } \item Input: \itemize{ \item \code{NULL}: computes one prediction per row of \code{newdata} \item Character vector: the dataset is replicated once of every combination of unique values of the variables identified in \code{variables}. \item Named list: names identify the subset of variables of interest and their values. For numeric variables, the \code{variables} argument supports functions and string shortcuts: \itemize{ \item A function which returns a numeric value \item Numeric vector: Contrast between the 2nd element and the 1st element of the \code{x} vector. \item "iqr": Contrast across the interquartile range of the regressor. \item "sd": Contrast across one standard deviation around the regressor mean. \item "2sd": Contrast across two standard deviations around the regressor mean. \item "minmax": Contrast between the maximum and the minimum values of the regressor. \item "threenum": mean and 1 standard deviation on both sides \item "fivenum": Tukey's five numbers } } }} \item{vcov}{Type of uncertainty estimates to report (e.g., for robust standard errors). Acceptable values: \itemize{ \item FALSE: Do not compute standard errors. This can speed up computation considerably. \item TRUE: Unit-level standard errors using the default \code{vcov(model)} variance-covariance matrix. \item String which indicates the kind of uncertainty estimates to return. \itemize{ \item Heteroskedasticity-consistent: \code{"HC"}, \code{"HC0"}, \code{"HC1"}, \code{"HC2"}, \code{"HC3"}, \code{"HC4"}, \code{"HC4m"}, \code{"HC5"}. See \code{?sandwich::vcovHC} \item Heteroskedasticity and autocorrelation consistent: \code{"HAC"} \item Mixed-Models degrees of freedom: "satterthwaite", "kenward-roger" \item Other: \code{"NeweyWest"}, \code{"KernHAC"}, \code{"OPG"}. See the \code{sandwich} package documentation. } \item One-sided formula which indicates the name of cluster variables (e.g., \code{~unit_id}). This formula is passed to the \code{cluster} argument of the \code{sandwich::vcovCL} function. \item Square covariance matrix \item Function which returns a covariance matrix (e.g., \code{stats::vcov(model)}) }} \item{conf_level}{numeric value between 0 and 1. Confidence level to use to build a confidence interval.} \item{type}{string indicates the type (scale) of the predictions used to compute contrasts or slopes. This can differ based on the model type, but will typically be a string such as: "response", "link", "probs", or "zero". When an unsupported string is entered, the model-specific list of acceptable values is returned in an error message. When \code{type} is \code{NULL}, the first entry in the error message is used by default.} \item{by}{Aggregate unit-level estimates (aka, marginalize, average over). Valid inputs: \itemize{ \item \code{FALSE}: return the original unit-level estimates. \item \code{TRUE}: aggregate estimates for each term. \item Character vector of column names in \code{newdata} or in the data frame produced by calling the function without the \code{by} argument. \item Data frame with a \code{by} column of group labels, and merging columns shared by \code{newdata} or the data frame produced by calling the same function without the \code{by} argument. \item See examples below. \item For more complex aggregations, you can use the \code{FUN} argument of the \code{hypotheses()} function. See that function's documentation and the Hypothesis Test vignettes on the \code{marginaleffects} website. }} \item{byfun}{A function such as \code{mean()} or \code{sum()} used to aggregate estimates within the subgroups defined by the \code{by} argument. \code{NULL} uses the \code{mean()} function. Must accept a numeric vector and return a single numeric value. This is sometimes used to take the sum or mean of predicted probabilities across outcome or predictor levels. See examples section.} \item{wts}{string or numeric: weights to use when computing average contrasts or slopes. These weights only affect the averaging in \verb{avg_*()} or with the \code{by} argument, and not the unit-level estimates themselves. Internally, estimates and weights are passed to the \code{weighted.mean()} function. \itemize{ \item string: column name of the weights variable in \code{newdata}. When supplying a column name to \code{wts}, it is recommended to supply the original data (including the weights variable) explicitly to \code{newdata}. \item numeric: vector of length equal to the number of rows in the original data or in \code{newdata} (if supplied). }} \item{transform}{A function applied to unit-level adjusted predictions and confidence intervals just before the function returns results. For bayesian models, this function is applied to individual draws from the posterior distribution, before computing summaries.} \item{hypothesis}{specify a hypothesis test or custom contrast using a numeric value, vector, or matrix, a string, or a string formula. \itemize{ \item Numeric: \itemize{ \item Single value: the null hypothesis used in the computation of Z and p (before applying \code{transform}). \item Vector: Weights to compute a linear combination of (custom contrast between) estimates. Length equal to the number of rows generated by the same function call, but without the \code{hypothesis} argument. \item Matrix: Each column is a vector of weights, as describe above, used to compute a distinct linear combination of (contrast between) estimates. The column names of the matrix are used as labels in the output. } \item String formula to specify linear or non-linear hypothesis tests. If the \code{term} column uniquely identifies rows, terms can be used in the formula. Otherwise, use \code{b1}, \code{b2}, etc. to identify the position of each parameter. The \verb{b*} wildcard can be used to test hypotheses on all estimates. Examples: \itemize{ \item \code{hp = drat} \item \code{hp + drat = 12} \item \code{b1 + b2 + b3 = 0} \item \verb{b* / b1 = 1} } \item String: \itemize{ \item "pairwise": pairwise differences between estimates in each row. \item "reference": differences between the estimates in each row and the estimate in the first row. \item "sequential": difference between an estimate and the estimate in the next row. \item "revpairwise", "revreference", "revsequential": inverse of the corresponding hypotheses, as described above. } \item See the Examples section below and the vignette: https://marginaleffects.com/vignettes/hypothesis.html }} \item{equivalence}{Numeric vector of length 2: bounds used for the two-one-sided test (TOST) of equivalence, and for the non-inferiority and non-superiority tests. See Details section below.} \item{p_adjust}{Adjust p-values for multiple comparisons: "holm", "hochberg", "hommel", "bonferroni", "BH", "BY", or "fdr". See \link[stats:p.adjust]{stats::p.adjust}} \item{df}{Degrees of freedom used to compute p values and confidence intervals. A single numeric value between 1 and \code{Inf}. When \code{df} is \code{Inf}, the normal distribution is used. When \code{df} is finite, the \code{t} distribution is used. See \link[insight:get_df]{insight::get_df} for a convenient function to extract degrees of freedom. Ex: \code{slopes(model, df = insight::get_df(model))}} \item{numderiv}{string or list of strings indicating the method to use to for the numeric differentiation used in to compute delta method standard errors. \itemize{ \item "fdforward": finite difference method with forward differences \item "fdcenter": finite difference method with central differences (default) \item "richardson": Richardson extrapolation method \item Extra arguments can be specified by passing a list to the \code{numDeriv} argument, with the name of the method first and named arguments following, ex: \code{numderiv=list("fdcenter", eps = 1e-5)}. When an unknown argument is used, \code{marginaleffects} prints the list of valid arguments for each method. }} \item{...}{Additional arguments are passed to the \code{predict()} method supplied by the modeling package.These arguments are particularly useful for mixed-effects or bayesian models (see the online vignettes on the \code{marginaleffects} website). Available arguments can vary from model to model, depending on the range of supported arguments by each modeling package. See the "Model-Specific Arguments" section of the \code{?slopes} documentation for a non-exhaustive list of available arguments.} } \value{ A \code{data.frame} with one row per observation and several columns: \itemize{ \item \code{rowid}: row number of the \code{newdata} data frame \item \code{type}: prediction type, as defined by the \code{type} argument \item \code{group}: (optional) value of the grouped outcome (e.g., categorical outcome models) \item \code{estimate}: predicted outcome \item \code{std.error}: standard errors computed using the delta method. \item \code{p.value}: p value associated to the \code{estimate} column. The null is determined by the \code{hypothesis} argument (0 by default), and p values are computed before applying the \code{transform} argument. For models of class \code{feglm}, \code{Gam}, \code{glm} and \code{negbin}, p values are computed on the link scale by default unless the \code{type} argument is specified explicitly. \item \code{s.value}: Shannon information transforms of p values. How many consecutive "heads" tosses would provide the same amount of evidence (or "surprise") against the null hypothesis that the coin is fair? The purpose of S is to calibrate the analyst's intuition about the strength of evidence encoded in p against a well-known physical phenomenon. See Greenland (2019) and Cole et al. (2020). \item \code{conf.low}: lower bound of the confidence interval (or equal-tailed interval for bayesian models) \item \code{conf.high}: upper bound of the confidence interval (or equal-tailed interval for bayesian models) } See \code{?print.marginaleffects} for printing options. } \description{ Outcome predicted by a fitted model on a specified scale for a given combination of values of the predictor variables, such as their observed values, their means, or factor levels (a.k.a. "reference grid"). \itemize{ \item \code{predictions()}: unit-level (conditional) estimates. \item \code{avg_predictions()}: average (marginal) estimates. } The \code{newdata} argument and the \code{datagrid()} function can be used to control where statistics are evaluated in the predictor space: "at observed values", "at the mean", "at representative values", etc. See the predictions vignette and package website for worked examples and case studies: \itemize{ \item \url{https://marginaleffects.com/vignettes/predictions.html} \item \url{https://marginaleffects.com/} } } \section{Functions}{ \itemize{ \item \code{avg_predictions()}: Average predictions }} \section{Standard errors using the delta method}{ Standard errors for all quantities estimated by \code{marginaleffects} can be obtained via the delta method. This requires differentiating a function with respect to the coefficients in the model using a finite difference approach. In some models, the delta method standard errors can be sensitive to various aspects of the numeric differentiation strategy, including the step size. By default, the step size is set to \code{1e-8}, or to \code{1e-4} times the smallest absolute model coefficient, whichever is largest. \code{marginaleffects} can delegate numeric differentiation to the \code{numDeriv} package, which allows more flexibility. To do this, users can pass arguments to the \code{numDeriv::jacobian} function through a global option. For example: \itemize{ \item \code{options(marginaleffects_numDeriv = list(method = "simple", method.args = list(eps = 1e-6)))} \item \code{options(marginaleffects_numDeriv = list(method = "Richardson", method.args = list(eps = 1e-5)))} \item \code{options(marginaleffects_numDeriv = NULL)} } See the "Standard Errors and Confidence Intervals" vignette on the \code{marginaleffects} website for more details on the computation of standard errors: https://marginaleffects.com/vignettes/uncertainty.html Note that the \code{inferences()} function can be used to compute uncertainty estimates using a bootstrap or simulation-based inference. See the vignette: https://marginaleffects.com/vignettes/bootstrap.html } \section{Model-Specific Arguments}{ Some model types allow model-specific arguments to modify the nature of marginal effects, predictions, marginal means, and contrasts. Please report other package-specific \code{predict()} arguments on Github so we can add them to the table below. https://github.com/vincentarelbundock/marginaleffects/issues\tabular{llll}{ Package \tab Class \tab Argument \tab Documentation \cr \code{brms} \tab \code{brmsfit} \tab \code{ndraws} \tab \link[brms:posterior_predict.brmsfit]{brms::posterior_predict} \cr \tab \tab \code{re_formula} \tab \link[brms:posterior_predict.brmsfit]{brms::posterior_predict} \cr \code{lme4} \tab \code{merMod} \tab \code{re.form} \tab \link[lme4:predict.merMod]{lme4::predict.merMod} \cr \tab \tab \code{allow.new.levels} \tab \link[lme4:predict.merMod]{lme4::predict.merMod} \cr \code{glmmTMB} \tab \code{glmmTMB} \tab \code{re.form} \tab \link[glmmTMB:predict.glmmTMB]{glmmTMB::predict.glmmTMB} \cr \tab \tab \code{allow.new.levels} \tab \link[glmmTMB:predict.glmmTMB]{glmmTMB::predict.glmmTMB} \cr \tab \tab \code{zitype} \tab \link[glmmTMB:predict.glmmTMB]{glmmTMB::predict.glmmTMB} \cr \code{mgcv} \tab \code{bam} \tab \code{exclude} \tab \link[mgcv:predict.bam]{mgcv::predict.bam} \cr \code{robustlmm} \tab \code{rlmerMod} \tab \code{re.form} \tab \link[robustlmm:rlmerMod-class]{robustlmm::predict.rlmerMod} \cr \tab \tab \code{allow.new.levels} \tab \link[robustlmm:rlmerMod-class]{robustlmm::predict.rlmerMod} \cr \code{MCMCglmm} \tab \code{MCMCglmm} \tab \code{ndraws} \tab \cr } } \section{Bayesian posterior summaries}{ By default, credible intervals in bayesian models are built as equal-tailed intervals. This can be changed to a highest density interval by setting a global option: \code{options("marginaleffects_posterior_interval" = "eti")} \code{options("marginaleffects_posterior_interval" = "hdi")} By default, the center of the posterior distribution in bayesian models is identified by the median. Users can use a different summary function by setting a global option: \code{options("marginaleffects_posterior_center" = "mean")} \code{options("marginaleffects_posterior_center" = "median")} When estimates are averaged using the \code{by} argument, the \code{tidy()} function, or the \code{summary()} function, the posterior distribution is marginalized twice over. First, we take the average \emph{across} units but \emph{within} each iteration of the MCMC chain, according to what the user requested in \code{by} argument or \code{tidy()/summary()} functions. Then, we identify the center of the resulting posterior using the function supplied to the \code{"marginaleffects_posterior_center"} option (the median by default). } \section{Equivalence, Inferiority, Superiority}{ \eqn{\theta} is an estimate, \eqn{\sigma_\theta} its estimated standard error, and \eqn{[a, b]} are the bounds of the interval supplied to the \code{equivalence} argument. Non-inferiority: \itemize{ \item \eqn{H_0}{H0}: \eqn{\theta \leq a}{\theta <= a} \item \eqn{H_1}{H1}: \eqn{\theta > a} \item \eqn{t=(\theta - a)/\sigma_\theta}{t=(\theta - a)/\sigma_\theta} \item p: Upper-tail probability } Non-superiority: \itemize{ \item \eqn{H_0}{H0}: \eqn{\theta \geq b}{\theta >= b} \item \eqn{H_1}{H1}: \eqn{\theta < b} \item \eqn{t=(\theta - b)/\sigma_\theta}{t=(\theta - b)/\sigma_\theta} \item p: Lower-tail probability } Equivalence: Two One-Sided Tests (TOST) \itemize{ \item p: Maximum of the non-inferiority and non-superiority p values. } Thanks to Russell V. Lenth for the excellent \code{emmeans} package and documentation which inspired this feature. } \section{Prediction types}{ The \code{type} argument determines the scale of the predictions used to compute quantities of interest with functions from the \code{marginaleffects} package. Admissible values for \code{type} depend on the model object. When users specify an incorrect value for \code{type}, \code{marginaleffects} will raise an informative error with a list of valid \code{type} values for the specific model object. The first entry in the list in that error message is the default type. The \code{invlink(link)} is a special type defined by \code{marginaleffects}. It is available for some (but not all) models and functions. With this link type, we first compute predictions on the link scale, then we use the inverse link function to backtransform the predictions to the response scale. This is useful for models with non-linear link functions as it can ensure that confidence intervals stay within desirable bounds, ex: 0 to 1 for a logit model. Note that an average of estimates with \code{type="invlink(link)"} will not always be equivalent to the average of estimates with \code{type="response"}. Some of the most common \code{type} values are: response, link, E, Ep, average, class, conditional, count, cum.prob, cumprob, density, detection, disp, ev, expected, expvalue, fitted, invlink(link), latent, latent_N, linear.predictor, linpred, location, lp, mean, numeric, p, ppd, pr, precision, prediction, prob, probability, probs, quantile, risk, scale, survival, unconditional, utility, variance, xb, zero, zlink, zprob } \examples{ \dontshow{if (interactive() || isTRUE(Sys.getenv("R_DOC_BUILD") == "true")) (if (getRversion() >= "3.4") withAutoprint else force)(\{ # examplesIf} \dontshow{\}) # examplesIf} # Adjusted Prediction for every row of the original dataset mod <- lm(mpg ~ hp + factor(cyl), data = mtcars) pred <- predictions(mod) head(pred) # Adjusted Predictions at User-Specified Values of the Regressors predictions(mod, newdata = datagrid(hp = c(100, 120), cyl = 4)) m <- lm(mpg ~ hp + drat + factor(cyl) + factor(am), data = mtcars) predictions(m, newdata = datagrid(FUN_factor = unique, FUN_numeric = median)) # Average Adjusted Predictions (AAP) library(dplyr) mod <- lm(mpg ~ hp * am * vs, mtcars) avg_predictions(mod) predictions(mod, by = "am") # Conditional Adjusted Predictions plot_predictions(mod, condition = "hp") # Counterfactual predictions with the `variables` argument # the `mtcars` dataset has 32 rows mod <- lm(mpg ~ hp + am, data = mtcars) p <- predictions(mod) head(p) nrow(p) # average counterfactual predictions avg_predictions(mod, variables = "am") # counterfactual predictions obtained by replicating the entire for different # values of the predictors p <- predictions(mod, variables = list(hp = c(90, 110))) nrow(p) # hypothesis test: is the prediction in the 1st row equal to the prediction in the 2nd row mod <- lm(mpg ~ wt + drat, data = mtcars) predictions( mod, newdata = datagrid(wt = 2:3), hypothesis = "b1 = b2") # same hypothesis test using row indices predictions( mod, newdata = datagrid(wt = 2:3), hypothesis = "b1 - b2 = 0") # same hypothesis test using numeric vector of weights predictions( mod, newdata = datagrid(wt = 2:3), hypothesis = c(1, -1)) # two custom contrasts using a matrix of weights lc <- matrix(c( 1, -1, 2, 3), ncol = 2) predictions( mod, newdata = datagrid(wt = 2:3), hypothesis = lc) # `by` argument mod <- lm(mpg ~ hp * am * vs, data = mtcars) predictions(mod, by = c("am", "vs")) library(nnet) nom <- multinom(factor(gear) ~ mpg + am * vs, data = mtcars, trace = FALSE) # first 5 raw predictions predictions(nom, type = "probs") |> head() # average predictions avg_predictions(nom, type = "probs", by = "group") by <- data.frame( group = c("3", "4", "5"), by = c("3,4", "3,4", "5")) predictions(nom, type = "probs", by = by) # sum of predicted probabilities for combined response levels mod <- multinom(factor(cyl) ~ mpg + am, data = mtcars, trace = FALSE) by <- data.frame( by = c("4,6", "4,6", "8"), group = as.character(c(4, 6, 8))) predictions(mod, newdata = "mean", byfun = sum, by = by) } \references{ \itemize{ \item Greenland S. 2019. "Valid P-Values Behave Exactly as They Should: Some Misleading Criticisms of P-Values and Their Resolution With S-Values." The American Statistician. 73(S1): 106–114. \item Cole, Stephen R, Jessie K Edwards, and Sander Greenland. 2020. "Surprise!" American Journal of Epidemiology 190 (2): 191–93. https://doi.org/10.1093/aje/kwaa136 } } marginaleffects/man/meffects.Rd0000644000176200001440000000034714560035476016260 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/deprecated.R \name{meffects} \alias{meffects} \title{Deprecated function} \usage{ meffects(...) } \description{ Deprecated function } \keyword{internal} marginaleffects/man/marginaleffects.Rd0000644000176200001440000000037414560035476017616 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/deprecated.R \name{marginaleffects} \alias{marginaleffects} \title{Deprecated function} \usage{ marginaleffects(...) } \description{ Deprecated function } \keyword{internal} marginaleffects/man/datagridcf.Rd0000644000176200001440000000035514560035476016553 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/deprecated.R \name{datagridcf} \alias{datagridcf} \title{Deprecated function} \usage{ datagridcf(...) } \description{ Deprecated function } \keyword{internal} marginaleffects/man/plot_predictions.Rd0000644000176200001440000002310014557277362020045 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/plot_predictions.R \name{plot_predictions} \alias{plot_predictions} \title{Plot Conditional or Marginal Predictions} \usage{ plot_predictions( model, condition = NULL, by = NULL, newdata = NULL, type = NULL, vcov = NULL, conf_level = 0.95, wts = NULL, transform = NULL, points = 0, rug = FALSE, gray = FALSE, draw = TRUE, ... ) } \arguments{ \item{model}{Model object} \item{condition}{Conditional predictions \itemize{ \item Character vector (max length 4): Names of the predictors to display. \item Named list (max length 4): List names correspond to predictors. List elements can be: \itemize{ \item Numeric vector \item Function which returns a numeric vector or a set of unique categorical values \item Shortcut strings for common reference values: "minmax", "quartile", "threenum" } \item 1: x-axis. 2: color/shape. 3: facet (wrap if no fourth variable, otherwise cols of grid). 4: facet (rows of grid). \item Numeric variables in positions 2 and 3 are summarized by Tukey's five numbers \code{?stats::fivenum} }} \item{by}{Marginal predictions \itemize{ \item Character vector (max length 3): Names of the categorical predictors to marginalize across. \item 1: x-axis. 2: color. 3: facets. }} \item{newdata}{When \code{newdata} is \code{NULL}, the grid is determined by the \code{condition} argument. When \code{newdata} is not \code{NULL}, the argument behaves in the same way as in the \code{predictions()} function.} \item{type}{string indicates the type (scale) of the predictions used to compute contrasts or slopes. This can differ based on the model type, but will typically be a string such as: "response", "link", "probs", or "zero". When an unsupported string is entered, the model-specific list of acceptable values is returned in an error message. When \code{type} is \code{NULL}, the first entry in the error message is used by default.} \item{vcov}{Type of uncertainty estimates to report (e.g., for robust standard errors). Acceptable values: \itemize{ \item FALSE: Do not compute standard errors. This can speed up computation considerably. \item TRUE: Unit-level standard errors using the default \code{vcov(model)} variance-covariance matrix. \item String which indicates the kind of uncertainty estimates to return. \itemize{ \item Heteroskedasticity-consistent: \code{"HC"}, \code{"HC0"}, \code{"HC1"}, \code{"HC2"}, \code{"HC3"}, \code{"HC4"}, \code{"HC4m"}, \code{"HC5"}. See \code{?sandwich::vcovHC} \item Heteroskedasticity and autocorrelation consistent: \code{"HAC"} \item Mixed-Models degrees of freedom: "satterthwaite", "kenward-roger" \item Other: \code{"NeweyWest"}, \code{"KernHAC"}, \code{"OPG"}. See the \code{sandwich} package documentation. } \item One-sided formula which indicates the name of cluster variables (e.g., \code{~unit_id}). This formula is passed to the \code{cluster} argument of the \code{sandwich::vcovCL} function. \item Square covariance matrix \item Function which returns a covariance matrix (e.g., \code{stats::vcov(model)}) }} \item{conf_level}{numeric value between 0 and 1. Confidence level to use to build a confidence interval.} \item{wts}{string or numeric: weights to use when computing average contrasts or slopes. These weights only affect the averaging in \verb{avg_*()} or with the \code{by} argument, and not the unit-level estimates themselves. Internally, estimates and weights are passed to the \code{weighted.mean()} function. \itemize{ \item string: column name of the weights variable in \code{newdata}. When supplying a column name to \code{wts}, it is recommended to supply the original data (including the weights variable) explicitly to \code{newdata}. \item numeric: vector of length equal to the number of rows in the original data or in \code{newdata} (if supplied). }} \item{transform}{A function applied to unit-level adjusted predictions and confidence intervals just before the function returns results. For bayesian models, this function is applied to individual draws from the posterior distribution, before computing summaries.} \item{points}{Number between 0 and 1 which controls the transparency of raw data points. 0 (default) does not display any points.} \item{rug}{TRUE displays tick marks on the axes to mark the distribution of raw data.} \item{gray}{FALSE grayscale or color plot} \item{draw}{\code{TRUE} returns a \code{ggplot2} plot. \code{FALSE} returns a \code{data.frame} of the underlying data.} \item{...}{Additional arguments are passed to the \code{predict()} method supplied by the modeling package.These arguments are particularly useful for mixed-effects or bayesian models (see the online vignettes on the \code{marginaleffects} website). Available arguments can vary from model to model, depending on the range of supported arguments by each modeling package. See the "Model-Specific Arguments" section of the \code{?slopes} documentation for a non-exhaustive list of available arguments.} } \value{ A \code{ggplot2} object or data frame (if \code{draw=FALSE}) } \description{ Plot predictions on the y-axis against values of one or more predictors (x-axis, colors/shapes, and facets). The \code{by} argument is used to plot marginal predictions, that is, predictions made on the original data, but averaged by subgroups. This is analogous to using the \code{by} argument in the \code{predictions()} function. The \code{condition} argument is used to plot conditional predictions, that is, predictions made on a user-specified grid. This is analogous to using the \code{newdata} argument and \code{datagrid()} function in a \code{predictions()} call. All variables whose values are not specified explicitly are treated as usual by \code{datagrid()}, that is, they are held at their mean or mode (or rounded mean for integers). This includes grouping variables in mixed-effects models, so analysts who fit such models may want to specify the groups of interest using the \code{condition} argument, or supply model-specific arguments to compute population-level estimates. See details below. See the "Plots" vignette and website for tutorials and information on how to customize plots: \itemize{ \item https://marginaleffects.com/vignettes/plot.html \item https://marginaleffects.com } } \section{Model-Specific Arguments}{ Some model types allow model-specific arguments to modify the nature of marginal effects, predictions, marginal means, and contrasts. Please report other package-specific \code{predict()} arguments on Github so we can add them to the table below. https://github.com/vincentarelbundock/marginaleffects/issues\tabular{llll}{ Package \tab Class \tab Argument \tab Documentation \cr \code{brms} \tab \code{brmsfit} \tab \code{ndraws} \tab \link[brms:posterior_predict.brmsfit]{brms::posterior_predict} \cr \tab \tab \code{re_formula} \tab \link[brms:posterior_predict.brmsfit]{brms::posterior_predict} \cr \code{lme4} \tab \code{merMod} \tab \code{re.form} \tab \link[lme4:predict.merMod]{lme4::predict.merMod} \cr \tab \tab \code{allow.new.levels} \tab \link[lme4:predict.merMod]{lme4::predict.merMod} \cr \code{glmmTMB} \tab \code{glmmTMB} \tab \code{re.form} \tab \link[glmmTMB:predict.glmmTMB]{glmmTMB::predict.glmmTMB} \cr \tab \tab \code{allow.new.levels} \tab \link[glmmTMB:predict.glmmTMB]{glmmTMB::predict.glmmTMB} \cr \tab \tab \code{zitype} \tab \link[glmmTMB:predict.glmmTMB]{glmmTMB::predict.glmmTMB} \cr \code{mgcv} \tab \code{bam} \tab \code{exclude} \tab \link[mgcv:predict.bam]{mgcv::predict.bam} \cr \code{robustlmm} \tab \code{rlmerMod} \tab \code{re.form} \tab \link[robustlmm:rlmerMod-class]{robustlmm::predict.rlmerMod} \cr \tab \tab \code{allow.new.levels} \tab \link[robustlmm:rlmerMod-class]{robustlmm::predict.rlmerMod} \cr \code{MCMCglmm} \tab \code{MCMCglmm} \tab \code{ndraws} \tab \cr } } \section{Prediction types}{ The \code{type} argument determines the scale of the predictions used to compute quantities of interest with functions from the \code{marginaleffects} package. Admissible values for \code{type} depend on the model object. When users specify an incorrect value for \code{type}, \code{marginaleffects} will raise an informative error with a list of valid \code{type} values for the specific model object. The first entry in the list in that error message is the default type. The \code{invlink(link)} is a special type defined by \code{marginaleffects}. It is available for some (but not all) models and functions. With this link type, we first compute predictions on the link scale, then we use the inverse link function to backtransform the predictions to the response scale. This is useful for models with non-linear link functions as it can ensure that confidence intervals stay within desirable bounds, ex: 0 to 1 for a logit model. Note that an average of estimates with \code{type="invlink(link)"} will not always be equivalent to the average of estimates with \code{type="response"}. Some of the most common \code{type} values are: response, link, E, Ep, average, class, conditional, count, cum.prob, cumprob, density, detection, disp, ev, expected, expvalue, fitted, invlink(link), latent, latent_N, linear.predictor, linpred, location, lp, mean, numeric, p, ppd, pr, precision, prediction, prob, probability, probs, quantile, risk, scale, survival, unconditional, utility, variance, xb, zero, zlink, zprob } \examples{ mod <- lm(mpg ~ hp + wt, data = mtcars) plot_predictions(mod, condition = "wt") mod <- lm(mpg ~ hp * wt * am, data = mtcars) plot_predictions(mod, condition = c("hp", "wt")) plot_predictions(mod, condition = list("hp", wt = "threenum")) plot_predictions(mod, condition = list("hp", wt = range)) } marginaleffects/man/posteriordraws.Rd0000644000176200001440000000166114541720224017542 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/posterior_draws.R \name{posteriordraws} \alias{posteriordraws} \title{\code{posteriordraws()} is an alias to \code{posterior_draws()}} \usage{ posteriordraws(x, shape = "long") } \arguments{ \item{x}{An object produced by a \code{marginaleffects} package function, such as \code{predictions()}, \code{avg_slopes()}, \code{hypotheses()}, etc.} \item{shape}{string indicating the shape of the output format: \itemize{ \item "long": long format data frame \item "DxP": Matrix with draws as rows and parameters as columns \item "PxD": Matrix with draws as rows and parameters as columns \item "rvar": Random variable datatype (see \code{posterior} package documentation). }} } \value{ A data.frame with \code{drawid} and \code{draw} columns. } \description{ This alias is kept for backward compatibility and because some users may prefer that name. } \keyword{internal} marginaleffects/man/expect_predictions.Rd0000644000176200001440000000046314541720224020345 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/tinytest.R \name{expect_predictions} \alias{expect_predictions} \title{\code{tinytest} helper} \usage{ expect_predictions(object, se = TRUE, n_row = NULL, n_col = NULL) } \description{ \code{tinytest} helper } \keyword{internal} marginaleffects/man/set_coef.Rd0000644000176200001440000000632514541720224016244 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/set_coef.R, R/methods_MASS.R, % R/methods_Rchoice.R, R/methods_afex.R, R/methods_aod.R, R/methods_betareg.R, % R/methods_nnet.R, R/methods_crch.R, R/methods_dataframe.R, % R/methods_gamlss.R, R/methods_glmmTMB.R, R/methods_glmx.R, R/methods_lme4.R, % R/methods_mlm.R, R/methods_nlme.R, R/methods_pscl.R, R/methods_robustlmm.R, % R/methods_sampleSelection.R, R/methods_scam.R, R/methods_stats.R, % R/methods_survey.R, R/methods_tidymodels.R \name{set_coef} \alias{set_coef} \alias{set_coef.default} \alias{set_coef.polr} \alias{set_coef.glmmPQL} \alias{set_coef.hetprob} \alias{set_coef.ivpml} \alias{set_coef.afex_aov} \alias{set_coef.glimML} \alias{set_coef.betareg} \alias{set_coef.multinom} \alias{set_coef.crch} \alias{set_coef.hxlr} \alias{set_coef.data.frame} \alias{set_coef.gamlss} \alias{set_coef.glmmTMB} \alias{set_coef.glmx} \alias{set_coef.merMod} \alias{set_coef.lmerModLmerTest} \alias{set_coef.lmerMod} \alias{set_coef.mlm} \alias{set_coef.lme} \alias{set_coef.hurdle} \alias{set_coef.zeroinfl} \alias{set_coef.rlmerMod} \alias{set_coef.selection} \alias{set_coef.scam} \alias{set_coef.glm} \alias{set_coef.lm} \alias{set_coef.nls} \alias{set_coef.svyolr} \alias{set_coef.model_fit} \alias{set_coef.workflow} \title{Internal function to set coefficients} \usage{ set_coef(model, coefs, ...) \method{set_coef}{default}(model, coefs, ...) \method{set_coef}{polr}(model, coefs, ...) \method{set_coef}{glmmPQL}(model, coefs, ...) \method{set_coef}{hetprob}(model, coefs, ...) \method{set_coef}{ivpml}(model, coefs, ...) \method{set_coef}{afex_aov}(model, coefs, ...) \method{set_coef}{glimML}(model, coefs, ...) \method{set_coef}{betareg}(model, coefs, ...) \method{set_coef}{multinom}(model, coefs, ...) \method{set_coef}{crch}(model, coefs, ...) \method{set_coef}{hxlr}(model, coefs, ...) \method{set_coef}{data.frame}(model, coefs, ...) \method{set_coef}{gamlss}(model, coefs, ...) \method{set_coef}{glmmTMB}(model, coefs, ...) \method{set_coef}{glmx}(model, coefs, ...) \method{set_coef}{merMod}(model, coefs, ...) \method{set_coef}{lmerModLmerTest}(model, coefs, ...) \method{set_coef}{lmerMod}(model, coefs, ...) \method{set_coef}{mlm}(model, coefs, ...) \method{set_coef}{lme}(model, coefs, ...) \method{set_coef}{hurdle}(model, coefs, ...) \method{set_coef}{zeroinfl}(model, coefs, ...) \method{set_coef}{rlmerMod}(model, coefs, ...) \method{set_coef}{selection}(model, coefs, ...) \method{set_coef}{scam}(model, coefs, ...) \method{set_coef}{glm}(model, coefs, ...) \method{set_coef}{lm}(model, coefs, ...) \method{set_coef}{nls}(model, coefs, ...) \method{set_coef}{svyolr}(model, coefs, ...) \method{set_coef}{model_fit}(model, coefs, ...) \method{set_coef}{workflow}(model, coefs, ...) } \arguments{ \item{model}{object to modify} \item{coefs}{vector of coefficients to insert in the model object} } \value{ Model object of the same class as the \code{model} argument, but with different stored coefficients. } \description{ Set the coefficients in a model to different values and return the modified object (internal function) } \details{ To compute the variance of marginal effects we need to take the Jacobian with } \keyword{internal} marginaleffects/DESCRIPTION0000644000176200001440000001333614560154575015134 0ustar liggesusersPackage: marginaleffects Title: Predictions, Comparisons, Slopes, Marginal Means, and Hypothesis Tests Version: 0.18.0 Authors@R: c(person(given = "Vincent", family = "Arel-Bundock", role = c("aut", "cre", "cph"), email = "vincent.arel-bundock@umontreal.ca", comment = c(ORCID = "0000-0003-2042-7063")), person(given = "Marcio Augusto", family = "Diniz", role = "ctb", email = "marcio.diniz@cshs.org", comment = c(ORCID = "0000-0002-2427-7843")), person(given = "Noah", family = "Greifer", role = "ctb", email = "noah.greifer@gmail.com", comment = c(ORCID = "0000-0003-3067-7154")), person("Etienne", "Bacher", email = "etienne.bacher@protonmail.com", role = "ctb", comment = c(ORCID = "0000-0002-9271-5075"))) Description: Compute and plot predictions, slopes, marginal means, and comparisons (contrasts, risk ratios, odds, etc.) for over 100 classes of statistical and machine learning models in R. Conduct linear and non-linear hypothesis tests, or equivalence tests. Calculate uncertainty estimates using the delta method, bootstrapping, or simulation-based inference. License: GPL (>= 3) Copyright: inst/COPYRIGHTS Encoding: UTF-8 URL: https://marginaleffects.com/ BugReports: https://github.com/vincentarelbundock/marginaleffects/issues RoxygenNote: 7.3.0 Depends: R (>= 3.6.0) Imports: checkmate, data.table, generics, insight (>= 0.19.7), methods, rlang, Rcpp (>= 1.0.0) LinkingTo: Rcpp, RcppEigen Suggests: AER, Amelia, afex, aod, bench, betareg, BH, bife, biglm, blme, boot, brglm2, brms, brmsmargins, broom, car, carData, causaldata, collapse, conflicted, countrycode, covr, crch, DALEXtra, DCchoice, dbarts, distributional, dplyr, emmeans, equivalence, estimatr, fixest, fmeffects, fontquiver, future, fwb, gam, gamlss, gamlss.dist, geepack, ggdag, ggdist, ggokabeito, ggplot2, ggrepel, glmmTMB, glmx, haven, here, itsadug, ivreg, kableExtra, knitr, lme4, lmerTest, logistf, magrittr, margins, MatchIt, MASS, mclogit, MCMCglmm, missRanger, mgcv, mhurdle, mice, miceadds, mlogit, mlr3verse, modelbased, modelsummary, nlme, nnet, numDeriv, optmatch, ordinal, parameters, parsnip, partykit, patchwork, pkgdown, phylolm, plm, polspline, poorman, posterior, prediction, pscl, purrr, quantreg, Rchoice, rcmdcheck, remotes, rmarkdown, rms, robust, robustbase, robustlmm, rsample, rstanarm, rstantools, rsvg, sampleSelection, sandwich, scam, spelling, speedglm, survey, survival, svglite, systemfonts, tibble, tidymodels, tidyr, tidyverse, tinysnapshot, tinytest, titanic, truncreg, tsModel, withr, workflows, yaml, xgboost, testthat (>= 3.0.0), altdoc Collate: 'RcppExports.R' 'backtransform.R' 'bootstrap_boot.R' 'bootstrap_fwb.R' 'bootstrap_rsample.R' 'broom.R' 'by.R' 'ci.R' 'comparisons.R' 'complete_levels.R' 'conformal.R' 'datagrid.R' 'deprecated.R' 'equivalence.R' 'get_averages.R' 'get_coef.R' 'get_contrast_data.R' 'get_contrast_data_character.R' 'get_contrast_data_factor.R' 'get_contrast_data_logical.R' 'get_contrast_data_numeric.R' 'get_contrasts.R' 'get_group_names.R' 'get_hypothesis.R' 'get_jacobian.R' 'get_model_matrix.R' 'get_model_matrix_attribute.R' 'get_modeldata.R' 'get_predict.R' 'get_se_delta.R' 'get_term_labels.R' 'get_vcov.R' 'github_issue.R' 'hush.R' 'hypotheses.R' 'hypotheses_joint.R' 'imputation.R' 'inferences.R' 'mean_or_mode.R' 'methods.R' 'set_coef.R' 'methods_MASS.R' 'methods_MCMCglmm.R' 'methods_Rchoice.R' 'methods_afex.R' 'methods_aod.R' 'methods_betareg.R' 'methods_bife.R' 'methods_biglm.R' 'methods_nnet.R' 'methods_brglm2.R' 'sanity_model.R' 'methods_brms.R' 'methods_crch.R' 'methods_dataframe.R' 'methods_dbarts.R' 'methods_fixest.R' 'methods_gamlss.R' 'methods_glmmTMB.R' 'methods_glmx.R' 'methods_inferences_simulation.R' 'methods_lme4.R' 'methods_mclogit.R' 'methods_mgcv.R' 'methods_mhurdle.R' 'methods_mlm.R' 'methods_mlogit.R' 'methods_mlr3.R' 'methods_nlme.R' 'methods_ordinal.R' 'methods_plm.R' 'methods_pscl.R' 'methods_quantreg.R' 'methods_rms.R' 'methods_robustlmm.R' 'methods_rstanarm.R' 'methods_sampleSelection.R' 'methods_scam.R' 'methods_stats.R' 'methods_survey.R' 'methods_survival.R' 'methods_tidymodels.R' 'methods_tobit1.R' 'modelarchive.R' 'myTryCatch.R' 'package.R' 'plot.R' 'plot_build.R' 'plot_comparisons.R' 'plot_predictions.R' 'plot_slopes.R' 'posterior_draws.R' 'predictions.R' 'print.R' 'recall.R' 'sanitize_comparison.R' 'sanitize_condition.R' 'sanitize_conf_level.R' 'sanitize_hypothesis.R' 'sanitize_interaction.R' 'sanitize_newdata.R' 'sanitize_numderiv.R' 'sanitize_type.R' 'sanitize_variables.R' 'sanitize_vcov.R' 'sanity.R' 'sanity_by.R' 'sanity_dots.R' 'settings.R' 'slopes.R' 'sort.R' 'tinytest.R' 'type_dictionary.R' 'unpack_matrix_cols.R' 'utils.R' Language: en-US Config/testthat/edition: 3 NeedsCompilation: yes Packaged: 2024-02-05 02:13:40 UTC; vincent Author: Vincent Arel-Bundock [aut, cre, cph] (), Marcio Augusto Diniz [ctb] (), Noah Greifer [ctb] (), Etienne Bacher [ctb] () Maintainer: Vincent Arel-Bundock Repository: CRAN Date/Publication: 2024-02-05 12:50:05 UTC marginaleffects/tests/0000755000176200001440000000000014560035476014560 5ustar liggesusersmarginaleffects/tests/tinytest.R0000644000176200001440000000053614560035476016572 0ustar liggesusersif (requireNamespace("tinytest", quietly = TRUE) && isTRUE(Sys.getenv("R_NOT_CRAN") == "true") && isTRUE(Sys.info()["sysname"] != "Windows") && # do not run test when checking package because .Rbuildignore excludes the modelarchive directory dir.exists("inst/tinytest/modelarchive")) { tinytest::test_package("marginaleffects") } marginaleffects/tests/spelling.R0000644000176200001440000000021014541720224016500 0ustar liggesusersif(requireNamespace('spelling', quietly = TRUE)) { spelling::spell_check_test(vignettes = TRUE, error = FALSE, skip_on_cran = TRUE) } marginaleffects/src/0000755000176200001440000000000014560042124014171 5ustar liggesusersmarginaleffects/src/eigen.cpp0000644000176200001440000000033214541724350015771 0ustar liggesusers// [[Rcpp::depends(RcppEigen)]] #include // [[Rcpp::export]] SEXP eigenMatMult(const Eigen::Map A, Eigen::Map B){ Eigen::VectorXd C = A * B; return Rcpp::wrap(C); } marginaleffects/src/RcppExports.cpp0000644000176200001440000000224014541724350017173 0ustar liggesusers// Generated by using Rcpp::compileAttributes() -> do not edit by hand // Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393 #include #include using namespace Rcpp; #ifdef RCPP_USE_GLOBAL_ROSTREAM Rcpp::Rostream& Rcpp::Rcout = Rcpp::Rcpp_cout_get(); Rcpp::Rostream& Rcpp::Rcerr = Rcpp::Rcpp_cerr_get(); #endif // eigenMatMult SEXP eigenMatMult(const Eigen::Map A, Eigen::Map B); RcppExport SEXP _marginaleffects_eigenMatMult(SEXP ASEXP, SEXP BSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< const Eigen::Map >::type A(ASEXP); Rcpp::traits::input_parameter< Eigen::Map >::type B(BSEXP); rcpp_result_gen = Rcpp::wrap(eigenMatMult(A, B)); return rcpp_result_gen; END_RCPP } static const R_CallMethodDef CallEntries[] = { {"_marginaleffects_eigenMatMult", (DL_FUNC) &_marginaleffects_eigenMatMult, 2}, {NULL, NULL, 0} }; RcppExport void R_init_marginaleffects(DllInfo *dll) { R_registerRoutines(dll, NULL, CallEntries, NULL, NULL); R_useDynamicSymbols(dll, FALSE); } marginaleffects/R/0000755000176200001440000000000014560042044013604 5ustar liggesusersmarginaleffects/R/plot_predictions.R0000644000176200001440000001637214557277362017344 0ustar liggesusers#' Plot Conditional or Marginal Predictions #' #' @description #' Plot predictions on the y-axis against values of one or more predictors (x-axis, colors/shapes, and facets). #' #' The `by` argument is used to plot marginal predictions, that is, predictions made on the original data, but averaged by subgroups. This is analogous to using the `by` argument in the `predictions()` function. #' #' The `condition` argument is used to plot conditional predictions, that is, predictions made on a user-specified grid. This is analogous to using the `newdata` argument and `datagrid()` function in a `predictions()` call. All variables whose values are not specified explicitly are treated as usual by `datagrid()`, that is, they are held at their mean or mode (or rounded mean for integers). This includes grouping variables in mixed-effects models, so analysts who fit such models may want to specify the groups of interest using the `condition` argument, or supply model-specific arguments to compute population-level estimates. See details below. #' #' See the "Plots" vignette and website for tutorials and information on how to customize plots: #' #' * https://marginaleffects.com/vignettes/plot.html #' * https://marginaleffects.com #' #' @param condition Conditional predictions #' + Character vector (max length 4): Names of the predictors to display. #' + Named list (max length 4): List names correspond to predictors. List elements can be: #' - Numeric vector #' - Function which returns a numeric vector or a set of unique categorical values #' - Shortcut strings for common reference values: "minmax", "quartile", "threenum" #' + 1: x-axis. 2: color/shape. 3: facet (wrap if no fourth variable, otherwise cols of grid). 4: facet (rows of grid). #' + Numeric variables in positions 2 and 3 are summarized by Tukey's five numbers `?stats::fivenum` #' @param by Marginal predictions #' + Character vector (max length 3): Names of the categorical predictors to marginalize across. #' + 1: x-axis. 2: color. 3: facets. #' @param newdata When `newdata` is `NULL`, the grid is determined by the `condition` argument. When `newdata` is not `NULL`, the argument behaves in the same way as in the `predictions()` function. #' @param points Number between 0 and 1 which controls the transparency of raw data points. 0 (default) does not display any points. #' @param draw `TRUE` returns a `ggplot2` plot. `FALSE` returns a `data.frame` of the underlying data. #' @inheritParams plot_slopes #' @inheritParams predictions #' @template model_specific_arguments #' @template type #' @return A `ggplot2` object or data frame (if `draw=FALSE`) #' @export #' @examples #' mod <- lm(mpg ~ hp + wt, data = mtcars) #' plot_predictions(mod, condition = "wt") #' #' mod <- lm(mpg ~ hp * wt * am, data = mtcars) #' plot_predictions(mod, condition = c("hp", "wt")) #' #' plot_predictions(mod, condition = list("hp", wt = "threenum")) #' #' plot_predictions(mod, condition = list("hp", wt = range)) #' plot_predictions <- function(model, condition = NULL, by = NULL, newdata = NULL, type = NULL, vcov = NULL, conf_level = 0.95, wts = NULL, transform = NULL, points = 0, rug = FALSE, gray = FALSE, draw = TRUE, ...) { dots <- list(...) checkmate::assert_number(points, lower = 0, upper = 1) if ("variables" %in% names(dots)) { insight::format_error("The `variables` argument is not supported by this function.") } if ("effect" %in% names(dots)) { insight::format_error("The `effect` argument is not supported by this function.") } if ("transform_post" %in% names(dots)) { # backward compatibility transform <- dots[["transform_post"]] } # order of the first few paragraphs is important scall <- rlang::enquo(newdata) newdata <- sanitize_newdata_call(scall, newdata, model) if (!is.null(wts) && is.null(by)) { insight::format_error("The `wts` argument requires a `by` argument.") } checkmate::assert_character(by, null.ok = TRUE) # sanity check checkmate::assert_character(by, null.ok = TRUE, max.len = 4, min.len = 1, names = "unnamed") if ((!is.null(condition) && !is.null(by)) || (is.null(condition) && is.null(by))) { msg <- "One of the `condition` and `by` arguments must be supplied, but not both." insight::format_error(msg) } modeldata <- get_modeldata( model, additional_variables = c(names(condition), by), wts = wts) # mlr3 and tidymodels if (is.null(modeldata) || nrow(modeldata) == 0) { modeldata <- newdata } # conditional if (!is.null(condition)) { condition <- sanitize_condition(model, condition, variables = NULL, modeldata = modeldata) v_x <- condition$condition1 v_color <- condition$condition2 v_facet_1 <- condition$condition3 v_facet_2 <- condition$condition4 datplot <- predictions( model, newdata = condition$newdata, type = type, vcov = vcov, conf_level = conf_level, transform = transform, modeldata = modeldata, wts = wts, ...) } # marginal if (!isFALSE(by) && !is.null(by)) { # switched from NULL above condition <- NULL newdata <- sanitize_newdata( model = model, newdata = newdata, modeldata = modeldata, by = by, wts = wts) # tidymodels & mlr3 if (is.null(modeldata)) { modeldata <- newdata } datplot <- predictions( model, by = by, type = type, vcov = vcov, conf_level = conf_level, wts = wts, transform = transform, newdata = newdata, modeldata = modeldata, ...) v_x <- by[[1]] v_color <- hush(by[[2]]) v_facet_1 <- hush(by[[3]]) v_facet_2 <- hush(by[[4]]) } dv <- unlist(insight::find_response(model, combine = TRUE), use.names = FALSE)[1] datplot <- plot_preprocess(datplot, v_x = v_x, v_color = v_color, v_facet_1 = v_facet_1, v_facet_2 = v_facet_2, condition = condition, modeldata = modeldata) # return immediately if the user doesn't want a plot if (isFALSE(draw)) { out <- as.data.frame(datplot) attr(out, "posterior_draws") <- attr(datplot, "posterior_draws") return(out) } # ggplot2 insight::check_if_installed("ggplot2") p <- plot_build(datplot, v_x = v_x, v_color = v_color, v_facet_1 = v_facet_1, v_facet_2 = v_facet_2, points = points, modeldata = modeldata, dv = dv, rug = rug, gray = gray) p <- p + ggplot2::labs( x = v_x, y = dv, color = v_color, fill = v_color, linetype = v_color) # attach model data for each of use attr(p, "modeldata") <- modeldata return(p) } marginaleffects/R/get_contrast_data_numeric.R0000644000176200001440000001067014541720224021144 0ustar liggesusersget_contrast_data_numeric <- function(model, newdata, variable, modeldata, ...) { h <- variable[["eps"]] s <- m <- NA if (is.numeric(modeldata[[variable$name]])) { s <- stats::sd(modeldata[[variable$name]], na.rm = TRUE) m <- mean(modeldata[[variable$name]], na.rm = TRUE) } x <- newdata[[variable$name]] xmd <- modeldata[[variable$name]] make_label <- function(lab, val) { if (identical(lab, "custom")) return(lab) args <- append(list(lab), as.list(val)) out <- tryCatch( do.call("sprintf", args), error = function(e) lab) return(out) } # slope # by default variable$value, so we need to check this first slopes <- c( "dY/dX", "eY/eX", "eY/dX", "dY/eX", "mean(dY/dX)", "mean(eY/eX)", "mean(eY/dX)", "mean(dY/eX)") # manual high if (isTRUE(checkmate::check_data_frame(variable$value))) { if (all(c("low", "high") %in% colnames(variable$value))) { low <- variable$value$low high <- variable$value$high } else { low <- variable$value[[1]] high <- variable$value[[2]] } lab <- "manual" } else if (isTRUE(variable$label %in% slopes)) { low <- x - h / 2 high <- x + h / 2 lab <- variable$label } else if (identical(variable$label, "exp(dY/dX)")) { low <- x - h / 2 high <- x + h / 2 lab <- "exp(dY/dX)" # contrast_label is designed for categorical predictors # numeric contrasts first } else if (isTRUE(checkmate::check_numeric(variable$value, len = 1))) { direction <- getOption("marginaleffects_contrast_direction", default = "forward") if (isTRUE(direction == "center")) { low <- x - variable$value / 2 high <- x + variable$value / 2 } else if (isTRUE(direction == "backward")) { low <- x - variable$value high <- x } else { low <- x high <- x + variable$value } # wrap in parentheses, unless mean() because there are already parentheses # important to display ratios of x+1, etc. # label should not be `(mpg+1) - mpg` because that is misleading for centered contrast if (!isTRUE(grepl("mean", variable$label))) { lab <- sprintf("+%s", variable$value) } else { lab <- sprintf("mean(+%s)", variable$value) } } else if (isTRUE(checkmate::check_numeric(variable$value, len = 2))) { variable$value <- sort(variable$value) low <- variable$value[1] high <- variable$value[2] gap <- diff(variable$value) lab <- make_label(variable$label, rev(variable$value)) # character contrasts } else if (identical(variable$value, "sd")) { low <- m - s / 2 high <- m + s / 2 lab <- c("x + sd/2", "x - sd/2") if (!isTRUE(grepl("mean", variable$label))) { lab <- sprintf("(%s)", lab) } lab <- make_label(variable$label, lab) } else if (identical(variable$value, "2sd")) { low <- m - s high <- m + s lab <- c("x + sd", "x - sd") if (!isTRUE(grepl("mean", variable$label))) { lab <- sprintf("(%s)", lab) } lab <- make_label(variable$label, lab) } else if (identical(variable$value, "iqr")) { low <- stats::quantile(xmd, probs = .25, na.rm = TRUE) high <- stats::quantile(xmd, probs = .75, na.rm = TRUE) lab <- make_label(variable$label, c("Q3", "Q1")) } else if (identical(variable$value, "minmax")) { low <- min(xmd, na.rm = TRUE) high <- max(xmd, na.rm = TRUE) lab <- make_label(variable$label, c("Max", "Min")) } else if (isTRUE(checkmate::check_function(variable$value))) { tmp <- variable$value(x) low <- tmp[, 1] high <- tmp[, 2] lab <- "custom" } lo <- hi <- newdata lo[[variable$name]] <- low hi[[variable$name]] <- high out <- list(rowid = seq_len(nrow(newdata)), lo = lo, hi = hi, original = newdata, ter = rep(variable$name, nrow(newdata)), lab = rep(lab, nrow(newdata))) return(out) } marginaleffects/R/print.R0000644000176200001440000002351414560035476015103 0ustar liggesusers#' Print `marginaleffects` objects #' #' @description #' This function controls the text which is printed to the console when one of the core `marginalefffects` functions is called and the object is returned: `predictions()`, `comparisons()`, `slopes()`, `hypotheses()`, `avg_predictions()`, `avg_comparisons()`, `avg_slopes()`. #' #' All of those functions return standard data frames. Columns can be extracted by name, `predictions(model)$estimate`, and all the usual data manipulation functions work out-of-the-box: `colnames()`, `head()`, `subset()`, `dplyr::filter()`, `dplyr::arrange()`, etc. #' #' Some of the data columns are not printed by default. You can disable pretty printing and print the full results as a standard data frame using the `style` argument or by applying `as.data.frame()` on the object. See examples below. #' #' @param x An object produced by one of the `marginaleffects` package functions. #' @param digits The number of digits to display. #' @param p_eps p values smaller than this number are printed in "<0.001" style. #' @param topn The number of rows to be printed from the beginning and end of tables with more than `nrows` rows. #' @param nrows The number of rows which will be printed before truncation. #' @param ncols The maximum number of column names to display at the bottom of the printed output. #' @param style "summary" or "data.frame" #' @param type boolean: should the type be printed? #' @param column_names boolean: should the column names be printed? #' @param ... Other arguments are currently ignored. #' @export #' @examples #' library(marginaleffects) #' mod <- lm(mpg ~ hp + am + factor(gear), data = mtcars) #' p <- predictions(mod, by = c("am", "gear")) #' p #' #' subset(p, am == 1) #' #' print(p, style = "data.frame") #' #' data.frame(p) #' print.marginaleffects <- function(x, digits = getOption("marginaleffects_print_digits", default = 3), p_eps = getOption("marginaleffects_print_p_eps", default = 0.001), topn = getOption("marginaleffects_print_topn", default = 5), nrows = getOption("marginaleffects_print_nrows", default = 30), ncols = getOption("marginaleffects_print_ncols", default = 30), style = getOption("marginaleffects_print_style", default = "summary"), type = getOption("marginaleffects_print_type", default = TRUE), column_names = getOption("marginaleffects_print_column_names", default = TRUE), ...) { checkmate::assert_number(digits) checkmate::assert_number(topn) checkmate::assert_number(nrows) checkmate::assert_choice(style, choices = c("data.frame", "summary")) if (isTRUE(style == "data.frame")) { print(as.data.frame(x)) return(invisible(x)) } out <- x nrows <- max(nrows, 2 * topn) if ("group" %in% colnames(out) && all(out$group == "main_marginaleffects")) { out$group <- NULL } # subset before rounding so that digits match top and bottom rows if (nrow(out) > nrows) { out <- rbind(utils::head(out, topn), utils::tail(out, topn)) splitprint <- TRUE } else { splitprint <- FALSE } # round and replace NAs ps <- c("p.value", "p.value.nonsup", "p.value.noninf", "p.value.equiv") for (i in seq_along(out)) { if (colnames(out)[i] %in% ps) { out[[i]] <- format.pval(out[[i]], digits = digits, eps = p_eps) } else if (isTRUE("s.value" == colnames(out)[i])) { out[[i]] <- sprintf("%.1f", out[[i]]) } else { out[[i]] <- format(out[[i]], digits = digits) } } if (is.null(attr(x, "conf_level"))) { alpha <- NULL } else { alpha <- 100 * (1 - attr(x, "conf_level")) } # contrast is sometimes useless if ("contrast" %in% colnames(out) && all(out$contrast == "")) { out$contrast <- NULL } statistic_label <- attr(x, "statistic_label") if (is.null(statistic_label)) { if (any(out[["df"]] < Inf)) { statistic_label <- "t" } else { statistic_label <- "z" } } # rename dict <- c( "group" = "Group", "term" = "Term", "contrast" = "Contrast", "value" = "Value", "by" = "By", "estimate" = "Estimate", "std.error" = "Std. Error", "statistic" = statistic_label, "p.value" = sprintf("Pr(>|%s|)", statistic_label), "s.value" = "S", "conf.low" = ifelse(is.null(alpha), "CI low", sprintf("%.1f %%", alpha / 2)), "conf.high" = ifelse(is.null(alpha), "CI high", sprintf("%.1f %%", 100 - alpha / 2)), "pred.low" = ifelse(is.null(alpha), "Pred low", sprintf("Pred. %.1f %%", alpha / 2)), "pred.high" = ifelse(is.null(alpha), "Pred high", sprintf("Pred. %.1f %%", 100 - alpha / 2)), "pred.set" = ifelse(is.null(alpha), "Pred Set", sprintf("Pred Set %.1f %%", 100 - alpha / 2)), "p.value.nonsup" = "p (NonSup)", "p.value.noninf" = "p (NonInf)", "p.value.equiv" = "p (Equiv)", "df" = "Df", "df1" = "Df 1", "df2" = "Df 2" ) if (inherits(x, "marginalmeans")) { dict["estimate"] <- "Mean" } # Subset columns idx <- c( names(dict), grep("^contrast_", colnames(x), value = TRUE)) # explicitly given by user in `datagrid()` or `by` or `newdata` nd <- attr(x, "newdata") if (is.null(nd)) { nd <- attr(x, "newdata_newdata") } tmp <- c("by", attr(nd, "variables_datagrid"), attr(nd, "newdata_variables_datagrid"), attr(x, "variables_datagrid"), attr(x, "newdata_variables_datagrid") ) if (isTRUE(checkmate::check_character(attr(x, "by")))) { tmp <- c(tmp, attr(x, "by")) } idx <- c(idx[1:grep("by", idx)], tmp, idx[(grep("by", idx) + 1):length(idx)]) if (isTRUE(attr(nd, "newdata_newdata_explicit")) || isTRUE(attr(nd, "newdata_explicit"))) { idx <- c(idx, colnames(nd)) } # drop useless columns: rowid useless <- c("rowid", "rowidcf") # drop useless columns: dv dv <- tryCatch( unlist(insight::find_response(attr(x, "model"), combine = TRUE), use.names = FALSE), error = function(e) NULL) useless <- c(useless, dv) # selection style data.table::setDT(out) # drop useless columns idx <- setdiff(unique(idx), useless) idx <- intersect(idx, colnames(out)) out <- out[, ..idx, drop = FALSE] if ("term" %in% colnames(out) && all(out$term == "cross")) { out[["term"]] <- NULL colnames(out) <- gsub("^contrast_", "C: ", colnames(out)) } for (i in seq_along(dict)) { colnames(out)[colnames(out) == names(dict)[i]] <- dict[i] } # avoid infinite recursion by stripping marginaleffect.summary class data.table::setDF(out) # recommend avg_*() rec <- "" if (isFALSE(attr(x, "by"))) { if (inherits(x, "predictions")) { rec <- "?avg_predictions and " } else if (inherits(x, "comparisons")) { rec <- "?avg_comparisons and " } else if (inherits(x, "slopes")) { rec <- "?avg_slopes and " } } # head cat("\n") print_head <- attr(x, "print_head") if (!is.null(print_head)) { cat(print_head, "\n") } # some commands do not generate average contrasts/mfx. E.g., `lnro` with `by` if (splitprint) { print(utils::head(out, n = topn), row.names = FALSE) msg <- "--- %s rows omitted. See %s?print.marginaleffects ---" msg <- sprintf(msg, nrow(x) - 2 * topn, rec) cat(msg, "\n") # remove colnames tmp <- utils::capture.output(print(utils::tail(out, n = topn), row.names = FALSE)) tmp <- paste(tmp[-1], collapse = "\n") cat(tmp) } else { print(out, row.names = FALSE) } cat("\n") # cat("Model type: ", attr(x, "model_type"), "\n") # if (!inherits(x, "hypotheses.summary") && isTRUE(getOption("marginaleffects_print_type", default = TRUE))) { # cat("Prediction type: ", attr(x, "type"), "\n") # } ## This is tricky to extract nicely when transform_* are passed from avg_comparisons to comparisons. I could certainly figure it out, but at the same time, I don't think the print method should return information that is immediately visible from the call. This is different from `type`, where users often rely on the default value, which can change from model to model, so printing it is often # if (!is.null(attr(x, "comparison_label"))) { # cat("Pre-transformation: ", paste(attr(x, "comparison_label"), collapse = ""), "\n") # } # if (!is.null(attr(x, "transform_label"))) { # cat("Post-transformation: ", paste(attr(x, "transform_label"), collapse = ""), "\n") # } vg <- attr(x, "variables_grid") if (length(vg) > 0) { cat(sprintf("Results averaged over levels of: %s", paste(vg, collapse = ", ")), "\n") } if (ncol(x) <= ncols && isTRUE(column_names)) { cat("Columns:", paste(colnames(x), collapse = ", "), "\n") } if (isTRUE(type) && !is.null(attr(x, "type"))) { cat("Type: ", attr(x, "type"), "\n") } cat("\n") print_tail <- attr(x, "print_tail") if (!is.null(print_tail)) { cat(print_tail, "\n") } return(invisible(x)) } #' @noRd #' @export print.hypotheses <- print.marginaleffects #' @noRd #' @export print.predictions <- print.marginaleffects #' @noRd #' @export print.comparisons <- print.marginaleffects #' @noRd #' @export print.slopes <- print.marginaleffects marginaleffects/R/conformal.R0000755000176200001440000001003314541720224015711 0ustar liggesusersget_conformal_score <- function(x, score) { response_name <- insight::find_response(attr(x, "model")) response <- x[[response_name]] if (!is.numeric(response) && score != "softmax") { insight::format_error('The response must be numeric. Did you want to use `conformal_score="softmax"`?') } if (score == "residual_abs") { out <- abs(response - x$estimate) } else if (score == "residual_sq") { out <- (response - x$estimate)^2 } else if(score == "softmax") { model <- attr(x, "model") response <- x[[insight::find_response(model)]] if (is.numeric(response) && is_binary(response)) { # See p.4 of Angelopoulos, Anastasios N., and Stephen Bates. 2022. “A # Gentle Introduction to Conformal Prediction and Distribution-Free # Uncertainty Quantification.” arXiv. # https://doi.org/10.48550/arXiv.2107.07511. # 1 minus the softmax output of the true class out <- ifelse(response == 1, 1 - x$estimate, x$estimate) } else if ("group" %in% colnames(x)) { # HACK: is this fragile? I think `group` should always be character. idx <- as.character(response) == as.character(x$group) out <- 1 - x$estimate[idx] } else { insight::format_error("Failed to compute the conformity score.") } } return(out) } get_conformal_bounds <- function(x, score, conf_level) { model <- attr(x, "model") response_name <- insight::find_response(model) response <- x[[response_name]] d <- min(score[score > stats::quantile(score, probs = conf_level)]) if ("group" %in% colnames(x)) { q <- stats::quantile(score, probs = (length(score) + 1) * conf_level / length(score)) out <- x[x$estimate > (1 - q),] data.table::setDT(out) out <- out[, .(pred.set = list(unique(group))), by = c("rowid", response_name)] setorder(out, rowid) data.table::setDF(out) class(out) <- c("predictions", class(out)) attr(out, "variables_datagrid") <- response_name return(out) } else { # continuous outcome: conformity half-width x$pred.low <- x$estimate - d x$pred.high <- x$estimate + d } return(x) } conformal_split <- function(x, test, calibration, score, conf_level, ...) { # calibration # use original model---fitted on the training set---to make predictions in the calibration set # p_calib is the `predictions()` call, which we re-evaluate on newdata=calibration p_calib <- attr(x, "call") p_calib[["newdata"]] <- calibration p_calib[["vcov"]] <- FALSE # faster p_calib <- eval(p_calib) score <- get_conformal_score(p_calib, score = score) # test # use original model to make predictions in the test set p_test <- attr(x, "call") p_test[["newdata"]] <- test p_test <- eval(p_test) # bounds out <- get_conformal_bounds(p_test, score = score, conf_level = conf_level) return(out) } conformal_cv_plus <- function(x, test, R, score, conf_level, ...) { # cross-validation train <- get_modeldata(attr(x, "model")) idx <- sample(seq_len(nrow(train)), nrow(train)) idx <- split(idx, ceiling(seq_along(idx) / (length(idx) / R))) scores <- NULL for (i in idx) { data_cv <- train[-i,] # re-fit the original model on training sets withholding the CV fold model_cv <- stats::update(attr(x, "model"), data = data_cv) # use the updated model to make out-of-fold predictions # call_cv is the `predictions()` call, which we re-evaluate in-fold: newdata=train[i,] call_cv <- attr(x, "call") call_cv[["model"]] <- model_cv call_cv[["newdata"]] <- train[i,] call_cv[["vcov"]] <- FALSE # faster pred_cv <- eval(call_cv) # save the scores form each fold scores <- c(scores, get_conformal_score(pred_cv, score = score)) } # test out <- attr(x, "call") out[["newdata"]] <- test out <- eval(out) # bounds out <- get_conformal_bounds(out, score = scores, conf_level = conf_level) return(out) } marginaleffects/R/methods.R0000644000176200001440000000202414541720224015372 0ustar liggesusers#' @noRd #' @export vcov.comparisons <- function(object, ...) { attr(object, "jacobian") %*% attr(object, "vcov") %*% t(attr(object, "jacobian")) } #' @noRd #' @export vcov.predictions <- vcov.comparisons #' @noRd #' @export vcov.hypotheses <- vcov.comparisons #' @noRd #' @export vcov.slopes <- vcov.comparisons #' @noRd #' @export vcov.marginalmeans <- vcov.comparisons #' @export #' @noRd coef.comparisons <- function(object, ...) { if (!is.null(object$estimate)) { out <- object$estimate if (is.null(names(out))) { lab <- tryCatch(get_term_labels(object), error = function(e) NULL) if (length(lab) == length(out)) { out <- stats::setNames(out, lab) } } return(out) } else { stop("The input object does not contain an 'estimate' element.") } } #' @export #' @noRd coef.slopes <- coef.comparisons #' @export #' @noRd coef.marginalmeans <- coef.comparisons #' @export #' @noRd coef.predictions <- coef.comparisons #' @export #' @noRd coef.hypotheses <- coef.comparisonsmarginaleffects/R/methods_inferences_simulation.R0000644000176200001440000000246314541720224022046 0ustar liggesusers#' @rdname get_vcov #' @export get_vcov.inferences_simulation <- function(model, ...) return(NULL) #' @rdname sanitize_model_specific #' @export sanitize_model_specific.inferences_simulation <- function(model, vcov = FALSE, ...) { tmp <- model class(tmp) <- setdiff(class(tmp), "inferences_simulation") B <- get_coef(tmp) # at this stage, the `vcov` has been pre-processed, so we get all the "HC3" shortcuts V <- get_vcov(tmp, vcov = vcov) simfun <- attr(model, "inferences_simulate") R <- attr(model, "inferences_R") attr(model, "inferences_coefmat") <- simfun(R = R, B = B, V = V) return(model) } #' @rdname get_predict #' @export get_predict.inferences_simulation <- function(model, newdata, ...) { coefmat <- attr(model, "inferences_coefmat") # coefmat: BxM checkmate::assert_matrix(coefmat) # remove the special class to avoid calling myself mod <- model class(mod) <- setdiff(class(mod), "inferences_simulation") FUN <- function(coefs) { mod_tmp <- set_coef(mod, coefs = coefs) get_predict(mod_tmp, newdata = newdata)$estimate } # should never compute SE via delta method for these models out <- get_predict(mod, newdata = newdata, ...) attr(out, "posterior_draws") <- apply(coefmat, MARGIN = 1, FUN = FUN) return(out) }marginaleffects/R/methods_lme4.R0000644000176200001440000000254114541720224016317 0ustar liggesusers#' @include set_coef.R #' @rdname set_coef #' @keywords internal #' @export set_coef.merMod <- function(model, coefs, ...) { # in 'merMod', predictions work the slot called "beta", which is unnamed # `fixef(model)` returns the same thing named beta <- methods::slot(model, "beta") beta[match(names(coefs), names(lme4::fixef(model)))] <- as.numeric(coefs) methods::slot(model, "beta") <- beta model } #' @include get_coef.R #' @rdname get_coef #' @export get_coef.merMod <- function(model, ...) { lme4::fixef(model) } #' @rdname get_predict #' @export get_predict.merMod <- function(model, newdata = insight::get_data(model), type = "response", ...) { get_predict.default(model, newdata = newdata, type = type, ...) } #' @rdname set_coef #' @export set_coef.lmerModLmerTest <- set_coef.merMod #' @rdname get_coef #' @export get_coef.lmerModLmerTest <- get_coef.merMod #' @rdname get_predict #' @export get_predict.lmerModLmerTest <- get_predict.merMod #' @rdname set_coef #' @export set_coef.lmerMod <- set_coef.merMod #' @rdname get_coef #' @export get_coef.lmerMod <- get_coef.merMod #' @rdname get_predict #' @export get_predict.lmerMod <- get_predict.merMod marginaleffects/R/methods_mclogit.R0000644000176200001440000000333314541720224017114 0ustar liggesusers#' @rdname get_group_names #' @export get_group_names.mblogit <- function(model, type, ...) { out <- get_predict(model, type = type) if ("group" %in% colnames(out)) { out <- unique(out$group) } else { out <- "main_marginaleffects" } return(out) } #' @include sanity_model.R #' @rdname sanitize_model_specific #' @export sanitize_model_specific.mblogit <- function(model, calling_function = "marginaleffects", ...) { if (calling_function == "marginaleffects") { variables <- insight::find_variables(model, flatten = TRUE) dat <- insight::get_data(model) dat <- dat[, intersect(variables, colnames(dat))] flag <- any(sapply(dat, is.character)) if (isTRUE(flag)) { stop("Cannot compute marginal effects for models of class `mblogit` when the data includes character variables. Please convert character variables to factors in the dataset before fitting the model, and call `marginaleffects` again.", call. = FALSE) } } return(model) } #' @rdname get_predict #' @export get_predict.mblogit <- function(model, newdata = insight::get_data(model), type = "response", ...) { out <- suppressMessages( get_predict.multinom(model = model, newdata = newdata, type = type, ...)) return(out) } #' @rdname get_coef #' @export get_coef.mblogit <- function(model, ...) { # get_coef.default uses `insight::get_parameters`, but does not combine # Response and Parameter columns. This also matches naming scheme from # `vcov` stats::coef(model) }marginaleffects/R/sanitize_comparison.R0000644000176200001440000001010114541720224020002 0ustar liggesuserswmean <- function(x, w) { stats::weighted.mean(x, w) } comparison_function_dict <- list( # default = difference between predictions "difference" = function(hi, lo) hi - lo, "differenceavg" = function(hi, lo) mean(hi - lo), "differenceavgwts" = function(hi, lo, w) wmean(hi - lo, w), # slopes and elasticities "dydx" = function(hi, lo, eps) (hi - lo) / eps, "eyex" = function(hi, lo, eps, y, x) (hi - lo) / eps * (x / y), "eydx" = function(hi, lo, eps, y, x) ((hi - lo) / eps) / y, "dyex" = function(hi, lo, eps, x) ((hi - lo) / eps) * x, # average slopes and elasticities "dydxavg" = function(hi, lo, eps) mean((hi - lo) / eps), "eyexavg" = function(hi, lo, eps, y, x) mean((hi - lo) / eps * (x / y)), "eydxavg" = function(hi, lo, eps, y, x) mean(((hi - lo) / eps) / y), "dyexavg" = function(hi, lo, eps, x) mean(((hi - lo) / eps) * x), "dydxavgwts" = function(hi, lo, eps, w) wmean((hi - lo) / eps, w), "eyexavgwts" = function(hi, lo, eps, y, x, w) wmean((hi - lo) / eps * (x / y), w), "eydxavgwts" = function(hi, lo, eps, y, x, w) wmean(((hi - lo) / eps) / y, w), "dyexavgwts" = function(hi, lo, eps, x, w) wmean(((hi - lo) / eps) * x, w), # ratios "ratio" = function(hi, lo) hi / lo, "ratioavg" = function(hi, lo) mean(hi) / mean(lo), "ratioavgwts" = function(hi, lo, w) wmean(hi, w) / wmean(lo, w), "lnratio" = function(hi, lo) log(hi / lo), "lnratioavg" = function(hi, lo) log(mean(hi) / mean(lo)), "lnratioavgwts" = function(hi, lo, w) log(wmean(hi, w) / wmean(lo, w)), "lnor" = function(hi, lo) log((hi / (1 - hi)) / (lo / (1 - lo))), "lnoravg" = function(hi, lo) log((mean(hi) / (1 - mean(hi))) / (mean(lo) / (1 - mean(lo)))), "lnoravgwts" = function(hi, lo, w) log((wmean(hi, w) / (1 - wmean(hi, w))) / (wmean(lo, w) / (1 - wmean(lo, w)))), # others "lift" = function(hi, lo) (hi - lo) / lo, "liftavg" = function(hi, lo) (mean(hi - lo)) / mean(lo), "expdydx" = function(hi, lo, eps) ((exp(hi) - exp(lo)) / exp(eps)) / eps, "expdydxavg" = function(hi, lo, eps) mean(((exp(hi) - exp(lo)) / exp(eps)) / eps), "expdydxavgwts" = function(hi, lo, eps, w) wmean(((exp(hi) - exp(lo)) / exp(eps)) / eps, w) ) comparison_label_dict <- list( "difference" = "%s - %s", "differenceavg" = "mean(%s) - mean(%s)", "differenceavgwts" = "mean(%s) - mean(%s)", "dydx" = "dY/dX", "eyex" = "eY/eX", "eydx" = "eY/dX", "dyex" = "dY/eX", "dydxavg" = "mean(dY/dX)", "eyexavg" = "mean(eY/eX)", "eydxavg" = "mean(eY/dX)", "dyexavg" = "mean(dY/eX)", "dydxavg" = "mean(dY/dX)", "eyexavg" = "mean(eY/eX)", "eydxavg" = "mean(eY/dX)", "dyexavg" = "mean(dY/eX)", "dydxavgwts" = "mean(dY/dX)", "eyexavgwts" = "mean(eY/eX)", "eydxavgwts" = "mean(eY/dX)", "dyexavgwts" = "mean(dY/eX)", "ratio" = "%s / %s", "ratioavg" = "mean(%s) / mean(%s)", "ratioavgwts" = "mean(%s) / mean(%s)", "lnratio" = "ln(%s / %s)", "lnratioavg" = "ln(mean(%s) / mean(%s))", "lnratioavgwts" = "ln(mean(%s) / mean(%s))", "lnor" = "ln(odds(%s) / odds(%s))", "lnoravg" = "ln(odds(%s) / odds(%s))", "lnoravgwts" = "ln(odds(%s) / odds(%s))", "lift" = "lift", "liftavg" = "liftavg", "expdydx" = "exp(dY/dX)" ) sanity_comparison <- function(comparison) { # wts versions are used internally but not available directly to users valid <- names(comparison_function_dict) valid <- valid[!grepl("wts$", valid)] checkmate::assert( checkmate::check_choice(comparison, choices = valid), checkmate::check_function(comparison)) } sanitize_transform <- function(x) { good <- c("exp", "ln") checkmate::assert( checkmate::check_choice(x, choices = good, null.ok = TRUE), checkmate::check_function(x)) if (is.null(x)) { return(x) } else if (is.function(x)) { out <- list(x) names(out) <- deparse(substitute(x)) } else if (x == "exp") { out <- list("exp" = exp) } else if (x == "ln") { out <- list("ln" = log) } return(out) } marginaleffects/R/sanitize_newdata.R0000644000176200001440000002352314541720224017267 0ustar liggesuserssanitize_newdata_call <- function(scall, newdata = NULL, model) { if (rlang::quo_is_call(scall)) { if (grepl("^datagrid", rlang::call_name(scall))) { if (!"model" %in% rlang::call_args_names(scall)) { scall <- rlang::call_modify(scall, model = model) } } else if (rlang::call_name(scall) %in% "visualisation_matrix") { if (!"x" %in% rlang::call_args_names(scall)) { scall <- rlang::call_modify(scall, x = get_modeldata) } } out <- rlang::eval_tidy(scall) } else { out <- newdata } return(out) } build_newdata <- function(model, newdata, by, modeldata) { if (isTRUE(checkmate::check_data_frame(by))) { by <- setdiff(colnames(by), "by") } else if (isTRUE(checkmate::check_flag(by))) { by <- NULL } args <- list(model = model) for (b in by) { args[[b]] <- unique } newdata_explicit <- TRUE # NULL -> modeldata if (is.null(newdata)) { newdata <- modeldata newdata_explicit <- FALSE # string -> datagrid() } else if (identical(newdata, "mean")) { newdata <- do.call("datagrid", args) } else if (identical(newdata, "median")) { args[["FUN_numeric"]] <- args[["FUN_integer"]] <- args[["FUN_logical"]] <- function(x) stats::median(x, na.rm = TRUE) newdata <- do.call("datagrid", args) } else if (identical(newdata, "tukey")) { args[["FUN_numeric"]] <- function(x) stats::fivenum(x, na.rm = TRUE) newdata <- do.call("datagrid", args) } else if (identical(newdata, "grid")) { args[["FUN_numeric"]] <- function(x) stats::fivenum(x, na.rm = TRUE) args[["FUN_factor"]] <- args[["FUN_character"]] <- args[["FUN_logical"]] <- unique newdata <- do.call("datagrid", args) # grid with all unique values of categorical variables, and numerics at their means } else if (identical(newdata, "marginalmeans")) { args[["FUN_factor"]] <- args[["FUN_character"]] <- args[["FUN_logical"]] <- unique newdata <- do.call("datagrid", args) # Issue #580: outcome should not duplicate grid rows dv <- hush(insight::find_response(model)) if (isTRUE(dv %in% colnames(newdata))) { newdata[[dv]] <- get_mean_or_mode(newdata[[dv]]) newdata <- unique(newdata) } } if (!inherits(newdata, "data.frame")) { msg <- "Unable to extract the data from model of class `%s`. This can happen in a variety of cases, such as when a `marginaleffects` package function is called from inside a user-defined function, or using an `*apply()`-style operation on a list. Please supply a data frame explicitly via the `newdata` argument." msg <- sprintf(msg, class(model)[1]) insight::format_error(msg) } # otherwise we get a warning in setDT() if (inherits(model, "mlogit") && isTRUE(inherits(modeldata[["idx"]], "idx"))) { modeldata$idx <- NULL } # required by some model-fitting functions data.table::setDT(modeldata) # required for the type of column indexing to follow data.table::setDF(newdata) out <- list( "newdata" = newdata, "newdata_explicit" = newdata_explicit, "modeldata" = modeldata ) return(out) } add_wts_column <- function(wts, newdata) { # weights must be available in the `comparisons()` function, NOT in # `tidy()`, because comparisons will often duplicate newdata for # multivariate outcomes and the like. We need to track which row matches # which. if (!is.null(wts)) { flag1 <- isTRUE(checkmate::check_string(wts)) && isTRUE(wts %in% colnames(newdata)) flag2 <- isTRUE(checkmate::check_numeric(wts, len = nrow(newdata))) if (!flag1 && !flag2) { msg <- sprintf("The `wts` argument must be a numeric vector of length %s, or a string which matches a column name in `newdata`. If you did not supply a `newdata` explicitly, `marginaleffects` extracted it automatically from the model object, and the `wts` variable may not have been available. The easiest strategy is often to supply a data frame such as the original data to `newdata` explicitly, and to make sure that it includes an appropriate column of weights, identified by the `wts` argument.", nrow(newdata)) stop(msg, call. = FALSE) } } # weights: before sanitize_variables if (!is.null(wts) && isTRUE(checkmate::check_string(wts))) { newdata[["marginaleffects_wts_internal"]] <- newdata[[wts]] } else { newdata[["marginaleffects_wts_internal"]] <- wts } return(newdata) } set_newdata_attributes <- function(model, modeldata, newdata, newdata_explicit) { attr(newdata, "newdata_explicit") <- newdata_explicit # column classes mc <- Filter(function(x) is.matrix(modeldata[[x]]), colnames(modeldata)) cl <- Filter(function(x) is.character(modeldata[[x]]), colnames(modeldata)) cl <- lapply(modeldata[, ..cl], unique) vc <- attributes(modeldata)$marginaleffects_variable_class column_attributes <- list( "matrix_columns" = mc, "character_levels" = cl, "variable_class" = vc) newdata <- set_marginaleffects_attributes(newdata, column_attributes, prefix = "newdata_") # {modelbased} sometimes attaches useful attributes exclude <- c("class", "row.names", "names", "data", "reference") modelbased_attributes <- get_marginaleffects_attributes(newdata, exclude = exclude) newdata <- set_marginaleffects_attributes(newdata, modelbased_attributes, prefix = "newdata_") # original data attr(newdata, "newdata_modeldata") <- modeldata if (is.null(attr(newdata, "marginaleffects_variable_class"))) { newdata <- set_variable_class(newdata, model = model) } return(newdata) } clean_newdata <- function(model, newdata) { # rbindlist breaks on matrix columns idx <- sapply(newdata, function(x) class(x)[1] == "matrix") if (any(idx)) { # Issue #363 # unpacking matrix columns works with {mgcv} but breaks {mclogit} if (inherits(model, "gam")) { newdata <- unpack_matrix_cols(newdata) } else { newdata <- newdata[, !idx, drop = FALSE] } } # we will need this to merge the original data back in, and it is better to # do it in a centralized upfront way. if (!"rowid" %in% colnames(newdata)) { newdata$rowid <- seq_len(nrow(newdata)) } # mlogit: each row is an individual-choice, but the index is not easily # trackable, so we pre-sort it here, and the sort in `get_predict()`. We # need to cross our fingers, but this probably works. if (inherits(model, "mlogit") && isTRUE(inherits(newdata[["idx"]], "idx"))) { idx <- list(newdata[["idx"]][, 1], newdata[["idx"]][, 2]) newdata <- newdata[order(newdata[["idx"]][, 1], newdata[["idx"]][, 2]),] } # placeholder response resp <- insight::find_response(model) if (isTRUE(checkmate::check_character(resp, len = 1)) && !resp %in% colnames(newdata)) { y <- hush(insight::get_response(model)) # protect df or matrix response if (isTRUE(checkmate::check_atomic_vector(y))) { newdata[[resp]] <- y[1] } } return(newdata) } sanitize_newdata <- function(model, newdata, by, modeldata, wts) { checkmate::assert( checkmate::check_data_frame(newdata, null.ok = TRUE), checkmate::check_choice(newdata, choices = c("mean", "median", "tukey", "grid", "marginalmeans")), combine = "or") tmp <- build_newdata(model = model, newdata = newdata, by = by, modeldata = modeldata) newdata <- tmp[["newdata"]] modeldata <- tmp[["modeldata"]] newdata_explicit <- tmp[["newdata_explicit"]] newdata <- clean_newdata(model, newdata) newdata <- add_wts_column(newdata = newdata, wts = wts) newdata <- set_newdata_attributes( model = model, modeldata = modeldata, newdata = newdata, newdata_explicit = newdata_explicit) # sort rows of output when the user explicitly calls `by` or `datagrid()` # otherwise, we return the same data frame in the same order, but # here it makes sense to sort for a clean output. sortcols <- attr(newdata, "newdata_variables_datagrid") if (isTRUE(checkmate::check_character(by))) { sortcols <- c(by, sortcols) } sortcols <- intersect(sortcols, colnames(newdata)) out <- data.table::copy(newdata) if (length(sortcols) > 0) { data.table::setorderv(out, cols = sortcols) } return(out) } dedup_newdata <- function(model, newdata, by, wts, comparison = "difference", cross = FALSE, byfun = NULL) { flag <- isTRUE(checkmate::check_string(comparison, pattern = "avg")) if (!flag && ( isFALSE(by) || # weights only make sense when we are marginalizing !is.null(wts) || !is.null(byfun) || !isFALSE(cross) || isFALSE(getOption("marginaleffects_dedup", default = TRUE)))) { return(newdata) } vclass <- attr(newdata, "marginaleffects_variable_class") # copy to allow mod by reference later without overwriting newdata out <- data.table(newdata) dv <- hush(unlist(insight::find_response(model), use.names = FALSE)) if (isTRUE(checkmate::check_string(dv)) && dv %in% colnames(out)) { out[, (dv) := NULL] vclass <- vclass[names(vclass) != dv] } if ("rowid" %in% colnames(out)) { out[, "rowid" := NULL] } categ <- c("factor", "character", "logical", "strata", "cluster", "binary") if (!all(vclass %in% categ)) { return(newdata) } cols <- colnames(out) out <- out[, .("marginaleffects_wts_internal" = .N), by = cols] data.table::setDF(out) out[["rowid_dedup"]] <- seq_len(nrow(out)) attr(out, "marginaleffects_variable_class") <- vclass return(out) }marginaleffects/R/tinytest.R0000644000176200001440000001235714560035476015635 0ustar liggesusers#' `tinytest` helper #' #' @export #' @keywords internal expect_slopes <- function( object, n_unique = NULL, pct_na = 5, se = TRUE, ...) { insight::check_if_installed("tinytest") object <- hush(slopes(object, ...)) diff <- "" # class fail_class <- !isTRUE(checkmate::check_class(object, "slopes")) if (fail_class) { msg <- sprintf("Wrong class: `%s`.", class(object)[1]) diff <- c(diff, msg) } # tidy() tid <- hush(tidy(object)) fail_tidy <- !isTRUE(checkmate::check_data_frame(tid)) if (fail_tidy) { msg <- "tidy() failed to return a data frame." diff <- c(diff, msg) } # na fail_na <- isTRUE(hush(mean(is.na(object$estimate)) * 100 > pct_na)) if (fail_na) { msg <- sprintf("More than %s of missing values.", pct_na) diff <- c(diff, msg) } # unique fail_unique <- isTRUE(hush(length(unique(object$estimate)) < n_unique - 1)) if (fail_unique) { msg <- sprintf("Fewer than %s unique values.", n_unique) diff <- c(diff, msg) } # unique if (isTRUE(se) && !"std.error" %in% colnames(object)) { msg <- sprintf("Fewer than %s unique values.", n_unique) diff <- c(diff, msg) fail_se <- TRUE } else { fail_se <- FALSE } # diff message diff <- paste(diff, collapse = "\n") # pass/fail fail <- fail_class || fail_tidy || fail_na || fail_unique || fail_se # tinytest object out <- tinytest::tinytest( result = !fail, call = sys.call(sys.parent(1)), diff = diff) return(out) } #' `tinytest` helper #' #' @export #' @keywords internal expect_predictions <- function(object, se = TRUE, n_row = NULL, n_col = NULL) { insight::check_if_installed("tinytest") diff <- "" # class fail_class <- !isTRUE(checkmate::check_class(object, "predictions")) if (fail_class) { msg <- sprintf("Wrong class: `%s`.", class(object)[1]) diff <- c(diff, msg) } # std.error if (isTRUE(se) && !"std.error" %in% colnames(object)) { msg <- "No standard error." diff <- c(diff, msg) fail_se <- TRUE } else { fail_se <- FALSE } # rows and cols if (isTRUE(n_row > nrow(object))) { msg <- sprintf("Number of rows: %s", nrow(object)) diff <- c(diff, msg) fail_row <- TRUE } else { fail_row <- FALSE } if (isTRUE(n_col > ncol(object))) { msg <- sprintf("Number of columns: %s", ncol(object)) diff <- c(diff, msg) fail_col <- TRUE } else { fail_col <- FALSE } # diff message diff <- paste(diff, collapse = "\n") # pass/fail fail <- fail_class || fail_se || fail_row || fail_col # tinytest object out <- tinytest::tinytest( result = !fail, call = sys.call(sys.parent(1)), diff = diff) return(out) } #' `tinytest` helper #' #' @export #' @keywords internal expect_margins <- function(results, margins_object, se = TRUE, tolerance = 1e-5, verbose = FALSE) { insight::check_if_installed("tinytest") is_equal <- function(x, y) { all(abs((x - y) / x) < tolerance) } results$type <- NULL margins_object <- data.frame(margins_object) term_names <- unique(results$term) flag <- TRUE # dydx for (tn in term_names) { unknown <- results[results$term == tn, "estimate"] lab <- paste0("dydx_", tn) if (lab %in% colnames(margins_object)) { known <- as.numeric(margins_object[, lab]) tmp <- is_equal(known, unknown) if (isFALSE(tmp)) { flag <- FALSE if (isTRUE(verbose)) print(sprintf("dydx: %s", tn)) } } } # std.error if (isTRUE(se) && "std.error" %in% colnames(results)) { for (tn in term_names) { lab_se <- paste0("SE_dydx_", tn) lab_var <- paste0("Var_dydx_", tn) if (lab_se %in% colnames(margins_object)) { unknown <- results[results$term == tn, "std.error"] known <- as.numeric(margins_object[, lab_se]) tmp <- is_equal(known, unknown) if (isFALSE(tmp)) { flag <- FALSE if (isTRUE(verbose)) print(sprintf("se: %s", tn)) } } else if (lab_var %in% colnames(margins_object)) { unknown <- results[results$term == tn, "std.error"] known <- sqrt(as.numeric(margins_object[, lab_var])) tmp <- is_equal(known, unknown) if (isFALSE(tmp)) { flag <- FALSE if (isTRUE(verbose)) print(sprintf("Var: %s", tn)) } } else { flag <- FALSE if (isTRUE(verbose)) print(sprintf("missing column: %s", lab)) } } } # tinytest object out <- tinytest::tinytest( result = flag, call = sys.call(sys.parent(1)), diff = diff) return(out) }marginaleffects/R/methods_rstanarm.R0000644000176200001440000000014714541720224017305 0ustar liggesusers#' @include get_predict.R #' @rdname get_predict #' @export get_predict.stanreg <- get_predict.brmsfit marginaleffects/R/bootstrap_rsample.R0000644000176200001440000000500514541720224017471 0ustar liggesusersbootstrap_rsample <- function(model, INF_FUN, ...) { # attached by `inferences()` conf_type <- attr(model, "inferences_conf_type") checkmate::assert_choice(conf_type, choices = c("perc", "bca")) insight::check_if_installed("boot") # attached by `inferences()` conf_type <- attr(model, "inferences_conf_type") checkmate::assert_choice(conf_type, choices = c("perc", "norm", "basic", "bca")) # bootstrap using the original data and call modcall <- insight::get_call(model) modeldata <- get_modeldata(model, additional_variables = FALSE) data.table::setDF(modeldata) # evaluate the {marginaleffects} call to get output without inferences() # use ... because arguments are not the same for different {marginaleffects} functions dots <- list(...) dots[["vcov"]] <- FALSE # avoid recursion attr(model, "inferences_method") <- NULL out <- do.call(INF_FUN, c(list(model), dots)) # default confidence level may be implicit in original call, but we need numeric if (is.null(dots[["conf_level"]])) { conf_level <- 0.95 } else { conf_level <- dots[["conf_level"]] } bootfun <- function(data, ...) { modcall[["data"]] <- data modboot <- eval(modcall) modboot <- eval(modboot) args <- c(list(modboot, modeldata = data$data), dots) out <- do.call(INF_FUN, args) out <- tidy(out) # `rsample` averages by `term` columns; we don't use it anyway and assume things line up out$term <- seq_len(nrow(out)) return(out) } args <- attr(model, "inferences_dots") args[["data"]] <- modeldata args[["apparent"]] <- TRUE # require for "bca" splits <- do.call(rsample::bootstraps, args) splits$estimates <- lapply(splits$splits, bootfun) if (isTRUE(conf_type == "bca")) { ci <- rsample::int_bca( splits, statistics = estimates, .fn = bootfun, alpha = 1 - conf_level) } else { ci <- rsample::int_pctl( splits, statistics = estimates, alpha = 1 - conf_level) } out$conf.low <- ci$.lower out$conf.high <- ci$.upper attr(out, "inferences") <- splits draws <- lapply(splits$estimates, function(x) as.matrix(x[, "estimate", drop = FALSE])) draws[[length(draws)]] <- NULL # apparent=TRUE appended the original estimates to the end draws <- do.call("cbind", draws) colnames(draws) <- NULL attr(out, "posterior_draws") <- draws return(out) }marginaleffects/R/methods_betareg.R0000644000176200001440000000400614541720224017065 0ustar liggesusers#' @include set_coef.R #' @rdname set_coef #' @export set_coef.betareg <- function(model, coefs, ...) { # coefs are split between mean coefs (which can be length 0) and precision coefs # (which must be length > 0 and always start with "(phi)_" due to get_coef.betareg(), # to match with get_varcov(., component = "all") output). In betareg object, these # are stored as two elements in a list, with precision coefs lacking the "(phi)_" # prefix, so we remove it. mean_coefs <- coefs[names(coefs) != "(phi)" & !startsWith(names(coefs), "(phi)_")] precision_coefs <- coefs[names(coefs) == "(phi)" | startsWith(names(coefs), "(phi)_")] names(precision_coefs) <- sub("(phi)_", "", names(precision_coefs), fixed = TRUE) if (length(mean_coefs) > 0) { model[["coefficients"]]$mean[names(mean_coefs)] <- mean_coefs } model[["coefficients"]]$precision[names(precision_coefs)] <- precision_coefs model } #' @include get_coef.R #' @rdname get_coef #' @export get_coef.betareg <- function(model, ...) { mean_coefs <- model$coefficients$mean precision_coefs <- model$coefficients$precision # precision coefs have "(phi)_" appended to their names in covariance matrix; mean coefficients # never have this, so no risk of duplicate names, and precision coefs are always determined. # Mimicking coef.betareg(., "full"). if (!identical(names(precision_coefs), "(phi)")) { names(precision_coefs) <- paste0("(phi)_", names(precision_coefs)) } c(mean_coefs, precision_coefs) } #' @include get_predict.R #' @rdname get_predict #' @export get_predict.betareg <- function(model, newdata, ...) { out <- stats::predict(model, newdata = newdata) out <- data.frame(rowid = seq_len(nrow(newdata)), estimate = out) return(out) } #' @rdname sanitize_model_specific sanitize_model_specific.betareg <- function(model, ...) { insight::check_if_installed("insight", minimum_version = "0.17.1") return(model) } marginaleffects/R/methods_mlm.R0000644000176200001440000000110714557752334016256 0ustar liggesusers#' @include set_coef.R #' @rdname set_coef #' @export set_coef.mlm <- function(model, coefs, ...) { model$coefficients[] <- coefs return(model) } #' @include get_coef.R #' @rdname get_coef #' @export get_coef.mlm <- function(model, ...) { out <- insight::get_parameters(model, ...) out <- stats::setNames( out$Estimate, sprintf("%s:%s", out$Response, out$Parameter)) return(out) } #' @include get_group_names.R #' @rdname get_group_names #' @export get_group_names.mlm <- function(model, ...) { resp <- insight::get_response(model) return(names(resp)) } marginaleffects/R/get_contrast_data_factor.R0000644000176200001440000002111714541720224020756 0ustar liggesusersget_contrast_data_factor <- function(model, newdata, variable, cross, first_cross, modeldata, ...) { if (is.factor(newdata[[variable$name]])) { levs <- levels(newdata[[variable$name]]) convert_to_factor <- TRUE } else if (get_variable_class(newdata, variable$name, "binary")) { levs <- 0:1 convert_to_factor <- FALSE } else { msg <- "The `%s` variable is treated as a categorical (factor) variable, but the original data is of class %s. It is safer and faster to convert such variables to factor before fitting the model and calling a `marginaleffects` function." msg <- sprintf(msg, variable$name, class(newdata[[variable$name]])[1]) warn_once(msg, "marginaleffects_warning_factor_on_the_fly_conversion") if (is.factor(modeldata[[variable$name]])) { levs <- levels(modeldata[[variable$name]]) convert_to_factor <- TRUE } else { levs <- sort(unique(modeldata[[variable$name]])) convert_to_factor <- FALSE } } # string shortcuts flag <- checkmate::check_choice(variable$value, c("reference", "revreference", "pairwise", "revpairwise", "sequential", "revsequential", "all", "minmax")) if (isTRUE(flag)) { levs_idx <- contrast_categories_shortcuts(levs, variable, interaction) # custom data frame or function } else if (isTRUE(checkmate::check_function(variable$value)) || isTRUE(checkmate::check_data_frame(variable$value))) { out <- contrast_categories_custom(variable, newdata) return(out) # vector of two values } else if (isTRUE(checkmate::check_atomic_vector(variable$value, len = 2))) { if (is.character(variable$value)) { tmp <- modeldata[[variable$name]] if (any(!variable$value %in% as.character(tmp))) { msg <- "Some of the values supplied to the `variables` argument were not found in the dataset." insight::format_error(msg) } idx <- match(variable$value, as.character(tmp)) levs_idx <- data.table::data.table(lo = tmp[idx[1]], hi = tmp[idx[[2]]]) } else if (is.numeric(variable$value)) { tmp <- newdata[[variable$name]] if (convert_to_factor) { levs_idx <- data.table::data.table( lo = factor(as.character(variable$value[1]), levels = levels(tmp)), hi = factor(as.character(variable$value[2]), levels = levels(tmp))) } else { levs_idx <- data.table::data.table(lo = variable$value[1], hi = variable$value[2]) } } else { levs_idx <- data.table::data.table(lo = variable$value[1], hi = variable$value[2]) } } tmp <- contrast_categories_processing(first_cross, levs_idx, levs, variable, newdata) lo <- tmp[[1]] hi <- tmp[[2]] original <- tmp[[3]] if (is.factor(newdata[[variable$name]]) || isTRUE(convert_to_factor)) { lo[[variable$name]] <- factor(lo[["marginaleffects_contrast_lo"]], levels = levs) hi[[variable$name]] <- factor(hi[["marginaleffects_contrast_hi"]], levels = levs) } else { lo[[variable$name]] <- lo[["marginaleffects_contrast_lo"]] hi[[variable$name]] <- hi[["marginaleffects_contrast_hi"]] } contrast_label <- hi$marginaleffects_contrast_label contrast_null <- hi$marginaleffects_contrast_hi == hi$marginaleffects_contrast_lo tmp <- !grepl("^marginaleffects_contrast", colnames(lo)) lo <- lo[, tmp, with = FALSE] hi <- hi[, tmp, with = FALSE] out <- list(rowid = original$rowid, lo = lo, hi = hi, original = original, ter = rep(variable$name, nrow(lo)), # lo can be different dimension than newdata lab = contrast_label, contrast_null = contrast_null) return(out) } contrast_categories_shortcuts <- function(levs, variable, interaction) { # index contrast orders based on variable$value if (isTRUE(variable$value %in% c("reference", "revreference"))) { # null contrasts are interesting with interactions if (!isTRUE(interaction)) { levs_idx <- data.table::data.table(lo = levs[1], hi = levs[2:length(levs)]) } else { levs_idx <- data.table::data.table(lo = levs[1], hi = levs) } } else if (isTRUE(variable$value %in% c("pairwise", "revpairwise"))) { levs_idx <- CJ(lo = levs, hi = levs, sorted = FALSE) # null contrasts are interesting with interactions if (!isTRUE(interaction)) { levs_idx <- levs_idx[levs_idx$hi != levs_idx$lo, ] levs_idx <- levs_idx[match(levs_idx$lo, levs) < match(levs_idx$hi, levs), ] } } else if (isTRUE(variable$value %in% c("sequential", "revsequential"))) { levs_idx <- data.table::data.table(lo = levs[1:(length(levs) - 1)], hi = levs[2:length(levs)]) } else if (isTRUE(variable$value == "all")) { levs_idx <- CJ(lo = levs, hi = levs, sorted = FALSE) } else if (isTRUE(variable$value == "minmax")) { levs_idx <- data.table::data.table(lo = levs[1], hi = levs[length(levs)]) } if (isTRUE(variable$value %in% c("revreference", "revpairwise", "revsequential"))) { levs_idx <- levs_idx[, .(lo = hi, hi = lo)] } return(levs_idx) } contrast_categories_df <- function(variable) { # manual data frame if (all(c("low", "high") %in% colnames(variable$value))) { low <- variable$value$low high <- variable$value$high } else if (all(c("lo", "hi") %in% colnames(variable$value))) { low <- variable$value$low high <- variable$value$high } else { low <- variable$value[[1]] high <- variable$value[[2]] } levs_idx <- data.table::data.table( lo = low, hi = high ) return(levs_idx) } contrast_categories_processing <- function(first_cross, levs_idx, levs, variable, newdata) { # internal option applied to the first of several contrasts when # interaction=TRUE to avoid duplication. when only the first contrast # flips, we get a negative sign, but if first increases and second # decreases, we get different total effects. if (isTRUE(first_cross)) { idx <- match(levs_idx$hi, levs) >= match(levs_idx$lo, levs) if (sum(idx) > 0) { levs_idx <- levs_idx[idx, , drop = FALSE] } } levs_idx$isNULL <- levs_idx$hi == levs_idx$lo levs_idx$label <- suppressWarnings(tryCatch( sprintf(variable$label, levs_idx$hi, levs_idx$lo), error = function(e) variable$label)) levs_idx <- stats::setNames(levs_idx, paste0("marginaleffects_contrast_", colnames(levs_idx))) if (!"marginaleffects_contrast_label" %in% colnames(levs_idx) || all(levs_idx$marginaleffects_contrast_label == "custom")) { levs_idx[, "marginaleffects_contrast_label" := paste0(marginaleffects_contrast_hi, ", ", marginaleffects_contrast_lo)] } lo <- hi <- cjdt(list(newdata, levs_idx)) original <- data.table::rbindlist(rep(list(newdata), nrow(levs_idx))) return(list(lo, hi, original)) } contrast_categories_custom <- function(variable, newdata) { original <- newdata if (!"rowid" %in% colnames(original)) { original$rowid <- seq_len(nrow(original)) } hi <- lo <- original if (isTRUE(checkmate::check_function(variable$value))) { variables_df <- variable$value(newdata[[variable$name]]) } else if (isTRUE(checkmate::check_data_frame(variable$value))) { variables_df <- variable$value } checkmate::assert_data_frame(variables_df, nrows = nrow(original)) if (all(c("low", "high") %in% colnames(variables_df))) { lo[[variable$name]] <- variables_df[["low"]] hi[[variable$name]] <- variables_df[["high"]] } else if (all(c("lo", "hi") %in% colnames(variables_df))) { lo[[variable$name]] <- variables_df[["lo"]] hi[[variable$name]] <- variables_df[["hi"]] } else { lo[[variable$name]] <- variables_df[[1]] hi[[variable$name]] <- variables_df[[2]] } out <- list( rowid = original$rowid, lo = lo, hi = hi, original = original, ter = rep(variable$name, nrow(lo)), # lo can be different dimension than newdata lab = "custom", contrast_null = rep(FALSE, nrow(lo)) ) return(out) }marginaleffects/R/hush.R0000644000176200001440000000052214541720224014677 0ustar liggesusers# Execute code silently # Do not export to avoid conflict with modelsummary hush <- function(code) { void <- utils::capture.output({ out <- invisible( suppressMessages( suppressWarnings( tryCatch(code, error = function(e) NULL)) ) ) }) return(out) } marginaleffects/R/utils.R0000644000176200001440000001007414560035476015104 0ustar liggesusersget_unique_index <- function(x, term_only = FALSE) { idx <- c("term", "contrast", grep("^contrast_", colnames(x), value = TRUE)) if (!term_only) { by <- attr(x, "by") if (isTRUE(checkmate::check_data_frame(by))) { idx <- c(idx, colnames(by)) } else { idx <- c(idx, by) } explicit <- attr(x, "newdata_explicit") if (isTRUE(checkmate::check_character(explicit))) { idx <- explicit } } idx <- intersect(unique(idx), colnames(x)) if (length(idx) == 0) { return(NULL) } else if (length(idx) == 1) { return(x[[idx]]) } out <- x[, idx, drop = FALSE] for (i in ncol(out):2) { if (length(unique(out[[i]])) == 1) { out[[i]] <- NULL } } out <- apply(out, 1, paste, collapse = ", ") return(out) } get_marginaleffects_attributes <- function(x, exclude = NULL, include = NULL, include_regex = NULL) { out <- list() attr_names <- names(attributes(x)) attr_names <- setdiff(attr_names, exclude) if (!is.null(include)) attr_names <- intersect(attr_names, include) if (!is.null(include_regex)) attr_names <- attr_names[grepl(include_regex, attr_names)] for (n in attr_names) { out[[n]] <- attr(x, n) } return(out) } set_marginaleffects_attributes <- function(x, attr_cache, prefix = "") { for (n in names(attr_cache)) { attr(x, paste0(prefix, n)) <- attr_cache[[n]] } return(x) } warn_once <- function(msg, id) { msg <- c(msg, "", "This warning appears once per session.") if (isTRUE(getOption(id, default = TRUE))) { insight::format_warning(msg, call. = FALSE) opts <- list(FALSE) names(opts) <- id options(opts) } } # Cross join a list of data tables # Source: https://github.com/Rdatatable/data.table/issues/1717#issuecomment-545758165 cjdt <- function(dtlist) { Reduce(function(DT1, DT2) cbind(DT1, DT2[rep(1:.N, each = nrow(DT1))]), dtlist) } # recurse up. mostly useful for `tinytest` # this is dumb, but it's late and i don't feel like thinking about this evalup <- function(xcall) { out <- myTryCatch(eval(xcall)) if (inherits(out$error, "simpleError")) { msg <- out$error$message out <- NULL } else { msg <- NULL out <- out$value } for (i in 1:10) { if (is.null(out)) { out <- hush(eval(xcall, parent.frame(i))) } } if (is.null(out) && !is.null(msg)) stop(msg) return(out) } merge_by_rowid <- function(x, y) { # return data # very import to avoid sorting, otherwise bayesian draws won't fit predictions # merge only with rowid; not available for hypothesis mergein <- setdiff(colnames(y), colnames(x)) if ("rowid" %in% colnames(x) && "rowid" %in% colnames(y) && length(mergein) > 0) { idx <- c("rowid", mergein) if (!data.table::is.data.table(y)) { data.table::setDT(y) tmp <- y[, ..idx] } else { tmp <- y[, ..idx] } # TODO: this breaks in mclogit. maybe there's a more robust merge # solution for weird grouped data. But it seems fine because # `predictions()` output does include the original predictors. out <- tryCatch( merge(x, tmp, by = "rowid", sort = FALSE), error = function(e) x) } else { out <- x } return(out) } # faster than all(x %in% 0:1) is_binary <- function(x) { isTRUE(checkmate::check_integerish( x, null.ok = TRUE, upper = 1, lower = 0, any.missing = FALSE) ) } sub_named_vector <- function(x, y) { # issue 1005 xlab <- gsub("^`|`$", "", names(x)) ylab <- gsub("^`|`$", "", names(y)) idx <- match(ylab, xlab) if (length(stats::na.omit(idx)) > 0) { x[stats::na.omit(idx)] <- y[!is.na(idx)] } else if (length(y) == length(x)) { return(y) } else { stop("set_coef() substitution error. Please report on Github with a reproducible example: https://github.com/vincentarelbundock/marginaleffects/issues", call. = FALSE) } return(x) } marginaleffects/R/methods_mlr3.R0000644000176200001440000000115714541720224016335 0ustar liggesusers#' @include get_predict.R #' @rdname get_predict #' @keywords internal #' @export get_predict.Learner <- function(model, newdata, type = NULL, ...) { if (!is.null(type) && !type %in% model$predict_types) { msg <- sprintf("Valid `type` values: %s", paste(model$predict_types, collapse = ", ")) insight::format_error(msg) } out <- drop(stats::predict(model, newdata = newdata, predict_type = type)) out <- data.frame(rowid = seq_along(out), estimate = out) return(out) } #' @include get_vcov.R #' @rdname get_vcov #' @export get_vcov.Learner <- function(model, ...) { return(FALSE) }marginaleffects/R/plot_build.R0000644000176200001440000001277214541720224016077 0ustar liggesusersplot_preprocess <- function(dat, v_x, v_color = NULL, v_facet_1 = NULL, v_facet_2 = NULL, condition = NULL, modeldata = NULL) { for (v in names(condition$condition)) { fun <- function(x, lab) { idx <- match(x, sort(unique(x))) factor(lab[idx], levels = lab) } if (identical(condition$condition[[v]], "threenum")) { dat[[v]] <- fun(dat[[v]], c("-SD", "Mean", "+SD")) } else if (identical(condition$condition[[v]], "minmax")) { dat[[v]] <- fun(dat[[v]], c("Min", "Max")) } else if (identical(condition$condition[[v]], "quartile")) { dat[[v]] <- fun(dat[[v]], c("Q1", "Q2", "Q3")) } } if (get_variable_class(modeldata, v_x, "categorical")) { dat[[v_x]] <- factor(dat[[v_x]]) } # colors, linetypes, and facets are categorical attributes if (isTRUE(v_color %in% colnames(dat))) { dat[[v_color]] <- factor(dat[[v_color]]) } if (isTRUE(v_facet_1 %in% colnames(dat))) { dat[[v_facet_1]] <- factor(dat[[v_facet_1]]) } if (isTRUE(v_facet_2 %in% colnames(dat))) { dat[[v_facet_2]] <- factor(dat[[v_facet_2]]) } return(dat) } plot_build <- function( dat, v_x, v_color = NULL, v_facet_1 = NULL, v_facet_2 = NULL, dv = NULL, modeldata = NULL, points = 0, rug = FALSE, gray = FALSE) { checkmate::assert_flag(rug) checkmate::assert_flag(gray) # create index before building ggplot to make sure it is available dat$marginaleffects_term_index <- get_unique_index(dat, term_only = TRUE) multi_variables <- isTRUE(length(unique(dat$marginaleffects_term_index)) > 1) p <- ggplot2::ggplot() if (points > 0 && !get_variable_class(modeldata, v_x, "categorical") && !get_variable_class(modeldata, dv, "categorical")) { if (!is.null(v_color) && get_variable_class(modeldata, v_color, "categorical")) { p <- p + ggplot2::geom_point( data = modeldata, alpha = points, ggplot2::aes(x = .data[[v_x]], y = .data[[dv]], color = factor(.data[[v_color]]))) } else { p <- p + ggplot2::geom_point( data = modeldata, alpha = points, ggplot2::aes(x = .data[[v_x]], y = .data[[dv]])) } } if (isTRUE(rug)) { p <- p + ggplot2::geom_rug(data = modeldata, ggplot2::aes(x = .data[[v_x]])) if (!is.null(dv)) { p <- p + ggplot2::geom_rug(data = modeldata, ggplot2::aes(y = .data[[dv]])) } } aes_args <- list( x = substitute(.data[[v_x]]), y = substitute(estimate) ) if ("conf.low" %in% colnames(dat)) { aes_args$ymin <- substitute(conf.low) aes_args$ymax <- substitute(conf.high) } aes_args_ribbon <- aes_args aes_args_ribbon$fill <- aes_args$color aes_args_ribbon$color <- NULL # discrete x-axis if (is.factor(dat[[v_x]])) { if (!is.null(v_color)) { if (gray) { aes_args$shape <- substitute(factor(.data[[v_color]])) } else { aes_args$color <- substitute(factor(.data[[v_color]])) } } aes_obj <- do.call(ggplot2::aes, aes_args) if ("conf.low" %in% colnames(dat)) { p <- p + ggplot2::geom_pointrange( data = dat, mapping = aes_obj, position = ggplot2::position_dodge(.15)) } else { p <- p + ggplot2::geom_point( data = dat, mapping = aes_obj, position = ggplot2::position_dodge(.15)) } # continuous x-axis } else { if (!is.null(v_color)) { if (gray) { aes_args$linetype <- substitute(factor(.data[[v_color]])) aes_args_ribbon$linetype <- substitute(factor(.data[[v_color]])) } else { aes_args$color <- substitute(factor(.data[[v_color]])) aes_args_ribbon$fill <- substitute(factor(.data[[v_color]])) } } aes_obj_ribbon <- do.call(ggplot2::aes, aes_args_ribbon) aes_args$ymin <- aes_args$ymax <- NULL aes_obj <- do.call(ggplot2::aes, aes_args) if ("conf.low" %in% colnames(dat)) { p <- p + ggplot2::geom_ribbon(data = dat, aes_obj_ribbon, alpha = .1) p <- p + ggplot2::geom_line(data = dat, aes_obj) } p <- p + ggplot2::geom_line(data = dat, aes_obj) } # facets: 3rd and 4th variable and/or multiple effects ## If pass two facets then make facet grid if (!is.null(v_facet_1) && !is.null(v_facet_2)) { fo <- stats::as.formula(paste(v_facet_2, "~", ifelse(multi_variables, "marginaleffects_term_index +", ""), v_facet_1)) p <- p + ggplot2::facet_grid(fo, scales = "free", labeller = function(x){ lapply(ggplot2::label_both(x), gsub, pattern = "marginaleffects_term_index: ", replacement="") }) ## if pass only 1 facet then facet_wrap } else if (!is.null(v_facet_1) && is.null(v_facet_2)) { fo <- stats::as.formula(paste("~", ifelse(multi_variables, "marginaleffects_term_index +", ""), v_facet_1)) p <- p + ggplot2::facet_wrap(fo, scales = "free") } else if (multi_variables) { fo <- stats::as.formula("~ marginaleffects_term_index") p <- p + ggplot2::facet_wrap(fo, scales = "free") } return(p) }marginaleffects/R/methods_survival.R0000644000176200001440000000073514541720224017334 0ustar liggesusers#' @rdname get_predict #' @export get_predict.coxph <- function(model, newdata = insight::get_data(model), type = "lp", ...) { out <- stats::predict(model, newdata = newdata, type = type, ...) out <- data.frame(rowid = seq_len(nrow(newdata)), estimate = out) return(out) } marginaleffects/R/methods_afex.R0000644000176200001440000000244314557752334016420 0ustar liggesusers #' @include set_coef.R #' @rdname set_coef #' @export set_coef.afex_aov <- function(model, coefs, ...) { if (isTRUE(checkmate::check_matrix(model$lm$coefficients))) { mat <- matrix(coefs, ncol = ncol(model$lm$coefficients)) dimnames(mat) <- dimnames(model$lm$coefficients) model$lm$coefficients <- mat } else { model$lm$coefficients <- coefs } return(model) } #' @include get_coef.R #' @rdname get_coef #' @export get_coef.afex_aov <- function(model, ...) { b <- insight::get_parameters(model) b <- stats::setNames(b$Estimate, paste(b$Parameter, b$Response, sep = ":")) return(b) } #' @include get_vcov.R #' @rdname get_vcov #' @export get_vcov.afex_aov <- function(model, vcov = NULL, ...) { if (!is.null(vcov) && !is.logical(vcov)) { stop("The `vcov` argument is not supported for models of this class.") } insight::get_varcov(model) } #' @include get_predict.R #' @rdname get_predict #' @export get_predict.afex_aov <- function(model, newdata = NULL, ...) { out <- stats::predict(model, newdata = newdata) out <- data.frame(estimate = out) if (isTRUE("rowid" %in% colnames(newdata))) { out[["rowid"]] <- newdata[["rowid"]] } else { out[["rowid"]] <- seq_len(nrow(out)) } return(out) } marginaleffects/R/methods_glmx.R0000644000176200001440000000041214541720224016420 0ustar liggesusers#' @include set_coef.R #' @rdname set_coef #' @export set_coef.glmx <- function(model, coefs, ...) { out <- model out$coefficients$glm <- coefs[names(out$coefficients$glm)] out$coefficients$extra <- coefs[names(out$coefficients$extra)] return(out) } marginaleffects/R/get_hypothesis.R0000644000176200001440000002657314541720224017004 0ustar liggesusers get_hypothesis <- function(x, hypothesis, column, by = NULL) { if (is.null(hypothesis)) return(x) lincom <- NULL # lincom: numeric vector or matrix if (isTRUE(checkmate::check_numeric(hypothesis))) { if (isTRUE(checkmate::check_atomic_vector(hypothesis))) { checkmate::assert_numeric(hypothesis, len = nrow(x)) lincom <- as.matrix(hypothesis) } else if (isTRUE(checkmate::check_matrix(hypothesis))) { lincom <- hypothesis } } # lincom: string shortcuts valid <- c("pairwise", "reference", "sequential", "revpairwise", "revreference", "revsequential") if (isTRUE(hypothesis %in% valid)) { if (nrow(x) > 25) { msg <- 'The "pairwise", "reference", and "sequential" options of the `hypotheses` argument are not supported for `marginaleffects` commands which generate more than 25 rows of results. Use the `newdata`, `by`, and/or `variables` arguments to compute a smaller set of results on which to conduct hypothesis tests.' insight::format_error(msg) } } if (isTRUE(hypothesis == "reference")) { lincom <- lincom_reference(x, by) } else if (isTRUE(hypothesis == "revreference")) { lincom <- lincom_revreference(x, by) } else if (isTRUE(hypothesis == "sequential")) { lincom <- lincom_sequential(x, by) } else if (isTRUE(hypothesis == "revsequential")) { lincom <- lincom_revsequential(x, by) } else if (isTRUE(hypothesis == "pairwise")) { lincom <- lincom_pairwise(x, by) } else if (isTRUE(hypothesis == "revpairwise")) { lincom <- lincom_revpairwise(x, by) } lincom <- sanitize_lincom(lincom, x) # matrix hypothesis if (isTRUE(checkmate::check_matrix(lincom))) { out <- lincom_multiply(x, lincom) return(out) # string hypothesis } else if (is.character(hypothesis)) { out_list <- draws_list <- list() lab <- attr(hypothesis, "label") tmp <- expand_wildcard(hypothesis, nrow(x), lab) hypothesis <- tmp[[1]] labs <- tmp[[2]] for (i in seq_along(hypothesis)) { out_list[[i]] <- eval_string_hypothesis(x, hypothesis[i], labs[i]) draws_list[[i]] <- attr(out_list[[i]], "posterior_draws") } out <- do.call(rbind, out_list) attr(out, "posterior_draws") <- do.call(rbind, draws_list) attr(out, "label") <- labs return(out) } insight::format_error("`hypotheses` is broken. Please report this bug: https://github.com/vincentarelbundock/marginaleffects/issues.") } get_hypothesis_row_labels <- function(x, by = NULL) { lab <- grep("^term$|^by$|^group$|^value$|^contrast$|^contrast_", colnames(x), value = TRUE) lab <- Filter(function(z) length(unique(x[[z]])) > 1, lab) if (isTRUE(checkmate::check_character(by))) { lab <- unique(c(lab, by)) } if (length(lab) == 0) { lab <- NULL } else { lab_df <- data.frame(x)[, lab, drop = FALSE] idx <- vapply(lab_df, FUN = function(x) length(unique(x)) > 1, FUN.VALUE = logical(1)) if (sum(idx) > 0) { lab <- apply(lab_df[, idx, drop = FALSE], 1, paste, collapse = ", ") } else { lab <- apply(lab_df, 1, paste, collapse = ", ") } } # wrap in parentheses to avoid a-b-c-d != (a-b)-(c-d) if (any(grepl("-", lab))) { lab <- sprintf("(%s)", lab) } return(lab) } sanitize_lincom <- function(lincom, x) { if (isTRUE(checkmate::check_matrix(lincom))) { checkmate::assert_matrix(lincom, nrows = nrow(x)) if (is.null(colnames(lincom))) { colnames(lincom) <- rep("custom", ncol(lincom)) } } return(lincom) } lincom_revreference <- function(x, by) { lincom <- -1 * diag(nrow(x)) lincom[1, ] <- 1 lab <- get_hypothesis_row_labels(x, by = by) if (length(lab) == 0 || anyDuplicated(lab) > 0) { lab <- sprintf("Row 1 - Row %s", seq_len(ncol(lincom))) } else { lab <- sprintf("%s - %s", lab[1], lab) } colnames(lincom) <- lab lincom <- lincom[, 2:ncol(lincom), drop = FALSE] return(lincom) } lincom_reference <- function(x, by) { lincom <- diag(nrow(x)) lincom[1, ] <- -1 lab <- get_hypothesis_row_labels(x, by = by) if (length(lab) == 0 || anyDuplicated(lab) > 0) { lab <- sprintf("Row %s - Row 1", seq_len(ncol(lincom))) } else { lab <- sprintf("%s - %s", lab, lab[1]) } colnames(lincom) <- lab lincom <- lincom[, 2:ncol(lincom), drop = FALSE] return(lincom) } lincom_revsequential <- function(x, by) { lincom <- matrix(0, nrow = nrow(x), ncol = nrow(x) - 1) lab <- get_hypothesis_row_labels(x, by = by) if (length(lab) == 0 || anyDuplicated(lab) > 0) { lab <- sprintf("Row %s - Row %s", seq_len(ncol(lincom)), seq_len(ncol(lincom)) + 1) } else { lab <- sprintf("%s - %s", lab[seq_len(ncol(lincom))], lab[seq_len(ncol(lincom)) + 1]) } for (i in seq_len(ncol(lincom))) { lincom[i:(i + 1), i] <- c(1, -1) } colnames(lincom) <- lab return(lincom) } lincom_sequential <- function(x, by) { lincom <- matrix(0, nrow = nrow(x), ncol = nrow(x) - 1) lab <- get_hypothesis_row_labels(x, by = by) if (length(lab) == 0 || anyDuplicated(lab) > 0) { lab <- sprintf("Row %s - Row %s", seq_len(ncol(lincom)) + 1, seq_len(ncol(lincom))) } else { lab <- sprintf("%s - %s", lab[seq_len(ncol(lincom)) + 1], lab[seq_len(ncol(lincom))]) } for (i in seq_len(ncol(lincom))) { lincom[i:(i + 1), i] <- c(-1, 1) } colnames(lincom) <- lab return(lincom) } lincom_revpairwise <- function(x, by) { lab_row <- get_hypothesis_row_labels(x, by = by) lab_col <- NULL flag <- length(lab_row) == 0 || anyDuplicated(lab_row) > 0 mat <- list() for (i in seq_len(nrow(x))) { for (j in 2:nrow(x)) { if (i < j) { tmp <- matrix(0, nrow = nrow(x), ncol = 1) tmp[i, ] <- -1 tmp[j, ] <- 1 mat <- c(mat, list(tmp)) if (isTRUE(flag)) { lab_col <- c(lab_col, sprintf("Row %s - Row %s", j, i)) } else { lab_col <- c(lab_col, sprintf("%s - %s", lab_row[j], lab_row[i])) } } } } lincom <- do.call("cbind", mat) colnames(lincom) <- lab_col return(lincom) } lincom_pairwise <- function(x, by) { lab_row <- get_hypothesis_row_labels(x, by = by) lab_col <- NULL flag <- length(lab_row) == 0 || anyDuplicated(lab_row) > 0 mat <- list() for (i in seq_len(nrow(x))) { for (j in 2:nrow(x)) { if (i < j) { tmp <- matrix(0, nrow = nrow(x), ncol = 1) tmp[j, ] <- -1 tmp[i, ] <- 1 mat <- c(mat, list(tmp)) if (isTRUE(flag)) { lab_col <- c(lab_col, sprintf("Row %s - Row %s", i, j)) } else { lab_col <- c(lab_col, sprintf("%s - %s", lab_row[i], lab_row[j])) } } } } lincom <- do.call("cbind", mat) colnames(lincom) <- lab_col return(lincom) } lincom_multiply <- function(x, lincom) { # bayesian draws <- attr(x, "posterior_draws") if (!is.null(draws)) { draws <- t(as.matrix(lincom)) %*% draws out <- data.table( term = colnames(lincom), tmp = apply(draws, 1, stats::median)) setnames(out, old = "tmp", new = "estimate") attr(out, "posterior_draws") <- draws # frequentist } else { out <- data.table( term = colnames(lincom), tmp = as.vector(x[["estimate"]] %*% lincom)) setnames(out, old = "tmp", new = "estimate") } out <- out[out$term != "1 - 1", , drop = FALSE] return(out) } eval_string_hypothesis <- function(x, hypothesis, lab) { # row indices: `hypotheses` includes them, but `term` does not if (isTRUE(grepl("\\bb\\d+\\b", hypothesis)) && !any(grepl("\\bb\\d+\\b", x[["term"]]))) { bmax <- regmatches(lab, gregexpr("\\bb\\d+\\b", lab))[[1]] bmax <- tryCatch(max(as.numeric(gsub("b", "", bmax))), error = function(e) 0) if (bmax > nrow(x)) { msg <- "%s cannot be used in `hypothesis` because the call produced just %s estimate(s). Try executing the exact same command without the `hypothesis` argument to see which estimates are available for hypothesis testing." msg <- sprintf(msg, paste0("b", bmax), nrow(x)) insight::format_error(msg) } for (i in seq_len(nrow(x))) { tmp <- paste0("marginaleffects__", i) hypothesis <- gsub(paste0("b", i), tmp, hypothesis) } rowlabels <- paste0("marginaleffects__", seq_len(nrow(x))) # term names } else { if (!"term" %in% colnames(x) || anyDuplicated(x$term) > 0) { msg <- c( 'To use term names in a `hypothesis` string, the same function call without `hypothesis` argument must produce a `term` column with unique row identifiers. You can use `b1`, `b2`, etc. indices instead of term names in the `hypotheses` string Ex: "b1 + b2 = 0" Alternatively, you can use the `newdata`, `variables`, or `by` arguments:', "", "mod <- lm(mpg ~ am * vs + cyl, data = mtcars)", 'comparisons(mod, newdata = "mean", hypothesis = "b1 = b2")', 'comparisons(mod, newdata = "mean", hypothesis = "am = vs")', 'comparisons(mod, variables = "am", by = "cyl", hypothesis = "pairwise")') insight::format_error(msg) } rowlabels <- x$term } eval_string_function <- function(vec, hypothesis, rowlabels) { envir <- parent.frame() void <- sapply( seq_along(vec), function(i) { assign(rowlabels[i], vec[i], envir = envir) }) out <- eval(parse(text = hypothesis), envir = envir) return(out) } draws <- attr(x, "posterior_draws") if (!is.null(draws)) { insight::check_if_installed("collapse", minimum_version = "1.9.0") tmp <- apply( draws, MARGIN = 2, FUN = eval_string_function, hypothesis = hypothesis, rowlabels = rowlabels) draws <- matrix(tmp, ncol = ncol(draws)) out <- data.table( term = gsub("\\s+", "", lab), tmp = collapse::dapply(draws, MARGIN = 1, FUN = collapse::fmedian)) } else { out <- eval_string_function( x[["estimate"]], hypothesis = hypothesis, rowlabels = rowlabels) out <- data.table( term = gsub("\\s+", "", lab), tmp = out) } setnames(out, old = "tmp", new = "estimate") attr(out, "posterior_draws") <- draws return(out) } expand_wildcard <- function(hyp, bmax, lab) { # Find all occurrences of b* bstar_indices <- gregexpr("b\\*", hyp)[[1]] if (bstar_indices[1] == -1) return(list(hyp, lab)) bstar_count <- length(bstar_indices) if (bstar_count > 1) { insight::format_error("Error: More than one 'b*' substring found.") } # Replace b* with b1, b2, b3, ..., bmax labs <- character(bmax) result <- character(bmax) for (i in 1:bmax) { result[i] <- sub("b\\*", paste0("b", i), hyp) labs[i] <- sub("b\\*", paste0("b", i), lab) } return(list(result, labs)) } marginaleffects/R/set_coef.R0000644000176200001440000000252514560035476015535 0ustar liggesusers#' Internal function to set coefficients #' #' Set the coefficients in a model to different values and return the modified object (internal function) #' #' @rdname set_coef #' @param model object to modify #' @param coefs vector of coefficients to insert in the model object #' @export #' @keywords internal #' @return Model object of the same class as the `model` argument, but with #' different stored coefficients. #' @details To compute the variance of marginal effects we need to take the #' Jacobian with # respect to the model coefficients. These functions manipulate model objects # to change the coefficients stored internally, which changes the output of the # `predict()` function. set_coef <- function(model, coefs, ...) { UseMethod("set_coef") } #' @rdname set_coef #' @export set_coef.default <- function(model, coefs, ...) { # in basic model classes coefficients are named vector # in ordinal::clm models, there are sometimes duplicates, so name matching doesn't work # sometimes names are backticked, others not Issue #1005 a <- gsub("`", "", names(coefs)) b <- gsub("`", "", names(model$coefficients)) flag <- length(model[["coefficients"]]) == length(coefs) && all(a == b) if (flag) { model[["coefficients"]] <- coefs } else { model[["coefficients"]][names(coefs)] <- coefs } model } marginaleffects/R/methods_fixest.R0000644000176200001440000000250714541720224016762 0ustar liggesusers#' @rdname get_predict #' @export get_predict.fixest <- function(model, newdata = insight::get_data(model), type = "response", ...) { insight::check_if_installed("fixest") if (is.null(type)) { type <- sanitize_type(model = model, type = type, calling_function = "predictions") } dots <- list(...) # some predict methods raise warnings on unused arguments unused <- c("normalize_dydx", "step_size", "numDeriv_method", "conf.int", "internal_call") dots <- dots[setdiff(names(dots), unused)] # fixest is super slow when using do call because of some `deparse()` call # issue #531: we don't want to waste time computing intervals or risk having # them as leftover columns in contrast computations pred <- try( stats::predict( object = model, newdata = newdata, type = type), silent = TRUE) if (inherits(pred, "try-error")) { return(pred) } if ("rowid" %in% colnames(newdata)) { out <- data.frame( rowid = newdata$rowid, estimate = as.numeric(pred)) } else { out <- data.frame( rowid = seq_len(nrow(newdata)), estimate = as.numeric(pred)) } return(out) } marginaleffects/R/comparisons.R0000644000176200001440000006476114554104503016304 0ustar liggesusers#' Comparisons Between Predictions Made With Different Regressor Values #' #' @description #' Predict the outcome variable at different regressor values (e.g., college #' graduates vs. others), and compare those predictions by computing a difference, #' ratio, or some other function. `comparisons()` can return many quantities of #' interest, such as contrasts, differences, risk ratios, changes in log odds, lift, #' slopes, elasticities, etc. #' #' * `comparisons()`: unit-level (conditional) estimates. #' * `avg_comparisons()`: average (marginal) estimates. #' #' `variables` identifies the focal regressors whose "effect" we are interested in. `comparison` determines how predictions with different regressor values are compared (difference, ratio, odds, etc.). The `newdata` argument and the `datagrid()` function control where statistics are evaluated in the predictor space: "at observed values", "at the mean", "at representative values", etc. #' #' See the comparisons vignette and package website for worked examples and case studies: #' #' * #' * #' #' @inheritParams slopes #' @inheritParams predictions #' @param variables Focal variables #' * `NULL`: compute comparisons for all the variables in the model object (can be slow). #' * Character vector: subset of variables (usually faster). #' * Named list: names identify the subset of variables of interest, and values define the type of contrast to compute. Acceptable values depend on the variable type: #' - Factor or character variables: #' * "reference": Each factor level is compared to the factor reference (base) level #' * "all": All combinations of observed levels #' * "sequential": Each factor level is compared to the previous factor level #' * "pairwise": Each factor level is compared to all other levels #' * "minmax": The highest and lowest levels of a factor. #' * "revpairwise", "revreference", "revsequential": inverse of the corresponding hypotheses. #' * Vector of length 2 with the two values to compare. #' * Data frame with the same number of rows as `newdata`, with two columns of "lo" and "hi" values to compare. #' * Function that accepts a vector and returns a data frame with two columns of "lo" and "hi" values to compare. See examples below. #' - Logical variables: #' * NULL: contrast between TRUE and FALSE #' * Data frame with the same number of rows as `newdata`, with two columns of "lo" and "hi" values to compare. #' * Function that accepts a vector and returns a data frame with two columns of "lo" and "hi" values to compare. See examples below. #' - Numeric variables: #' * Numeric of length 1: Forward contrast for a gap of `x`, computed between the observed value and the observed value plus `x`. Users can set a global option to get a "center" or "backward" contrast instead: `options(marginaleffects_contrast_direction="center")` #' * Numeric vector of length 2: Contrast between the largest and the smallest elements of the `x` vector. #' * Data frame with the same number of rows as `newdata`, with two columns of "lo" and "hi" values to compare. #' * Function that accepts a vector and returns a data frame with two columns of "lo" and "hi" values to compare. See examples below. #' * "iqr": Contrast across the interquartile range of the regressor. #' * "sd": Contrast across one standard deviation around the regressor mean. #' * "2sd": Contrast across two standard deviations around the regressor mean. #' * "minmax": Contrast between the maximum and the minimum values of the regressor. #' - Examples: #' + `variables = list(gear = "pairwise", hp = 10)` #' + `variables = list(gear = "sequential", hp = c(100, 120))` #' + `variables = list(hp = \(x) data.frame(low = x - 5, high = x + 10))` #' + See the Examples section below for more. #' @param newdata Grid of predictor values at which we evaluate the comparisons. #' + Warning: Please avoid modifying your dataset between fitting the model and calling a `marginaleffects` function. This can sometimes lead to unexpected results. #' + `NULL` (default): Unit-level contrasts for each observed value in the dataset (empirical distribution). The dataset is retrieved using [insight::get_data()], which tries to extract data from the environment. This may produce unexpected results if the original data frame has been altered since fitting the model. #' + data frame: Unit-level contrasts for each row of the `newdata` data frame. #' + string: #' - "mean": Contrasts at the Mean. Contrasts when each predictor is held at its mean or mode. #' - "median": Contrasts at the Median. Contrasts when each predictor is held at its median or mode. #' - "marginalmeans": Contrasts at Marginal Means. #' - "tukey": Contrasts at Tukey's 5 numbers. #' - "grid": Contrasts on a grid of representative numbers (Tukey's 5 numbers and unique values of categorical predictors). #' + [datagrid()] call to specify a custom grid of regressors. For example: #' - `newdata = datagrid(cyl = c(4, 6))`: `cyl` variable equal to 4 and 6 and other regressors fixed at their means or modes. #' - `newdata = datagrid(mpg = fivenum)`: `mpg` variable held at Tukey's five numbers (using the `fivenum` function), and other regressors fixed at their means or modes. #' - See the Examples section and the [datagrid] documentation. #' @param comparison How should pairs of predictions be compared? Difference, ratio, odds ratio, or user-defined functions. #' * string: shortcuts to common contrast functions. #' - Supported shortcuts strings: `r paste(names(marginaleffects:::comparison_function_dict), collapse = ", ")` #' - See the Comparisons section below for definitions of each transformation. #' * function: accept two equal-length numeric vectors of adjusted predictions (`hi` and `lo`) and returns a vector of contrasts of the same length, or a unique numeric value. #' - See the Transformations section below for examples of valid functions. #' @param transform string or function. Transformation applied to unit-level estimates and confidence intervals just before the function returns results. Functions must accept a vector and return a vector of the same length. Support string shortcuts: "exp", "ln" #' @param equivalence Numeric vector of length 2: bounds used for the two-one-sided test (TOST) of equivalence, and for the non-inferiority and non-superiority tests. See Details section below. #' @param by Aggregate unit-level estimates (aka, marginalize, average over). Valid inputs: #' - `FALSE`: return the original unit-level estimates. #' - `TRUE`: aggregate estimates for each term. #' - Character vector of column names in `newdata` or in the data frame produced by calling the function without the `by` argument. #' - Data frame with a `by` column of group labels, and merging columns shared by `newdata` or the data frame produced by calling the same function without the `by` argument. #' - See examples below. #' - For more complex aggregations, you can use the `FUN` argument of the `hypotheses()` function. See that function's documentation and the Hypothesis Test vignettes on the `marginaleffects` website. #' @param cross #' * `FALSE`: Contrasts represent the change in adjusted predictions when one predictor changes and all other variables are held constant. #' * `TRUE`: Contrasts represent the changes in adjusted predictions when all the predictors specified in the `variables` argument are manipulated simultaneously (a "cross-contrast"). #' @template deltamethod #' @template model_specific_arguments #' @template comparison_functions #' @template bayesian #' @template equivalence #' @template type #' @template references #' #' @return A `data.frame` with one row per observation (per term/group) and several columns: #' * `rowid`: row number of the `newdata` data frame #' * `type`: prediction type, as defined by the `type` argument #' * `group`: (optional) value of the grouped outcome (e.g., categorical outcome models) #' * `term`: the variable whose marginal effect is computed #' * `dydx`: slope of the outcome with respect to the term, for a given combination of predictor values #' * `std.error`: standard errors computed by via the delta method. #' * `p.value`: p value associated to the `estimate` column. The null is determined by the `hypothesis` argument (0 by default), and p values are computed before applying the `transform` argument. #' * `s.value`: Shannon information transforms of p values. How many consecutive "heads" tosses would provide the same amount of evidence (or "surprise") against the null hypothesis that the coin is fair? The purpose of S is to calibrate the analyst's intuition about the strength of evidence encoded in p against a well-known physical phenomenon. See Greenland (2019) and Cole et al. (2020). #' * `conf.low`: lower bound of the confidence interval (or equal-tailed interval for bayesian models) #' * `conf.high`: upper bound of the confidence interval (or equal-tailed interval for bayesian models) #' #' See `?print.marginaleffects` for printing options. #' #' @examplesIf interactive() || isTRUE(Sys.getenv("R_DOC_BUILD") == "true") #' @examples #' library(marginaleffects) #' #' # Linear model #' tmp <- mtcars #' tmp$am <- as.logical(tmp$am) #' mod <- lm(mpg ~ am + factor(cyl), tmp) #' avg_comparisons(mod, variables = list(cyl = "reference")) #' avg_comparisons(mod, variables = list(cyl = "sequential")) #' avg_comparisons(mod, variables = list(cyl = "pairwise")) #' #' # GLM with different scale types #' mod <- glm(am ~ factor(gear), data = mtcars) #' avg_comparisons(mod, type = "response") #' avg_comparisons(mod, type = "link") #' #' # Contrasts at the mean #' comparisons(mod, newdata = "mean") #' #' # Contrasts between marginal means #' comparisons(mod, newdata = "marginalmeans") #' #' # Contrasts at user-specified values #' comparisons(mod, newdata = datagrid(am = 0, gear = tmp$gear)) #' comparisons(mod, newdata = datagrid(am = unique, gear = max)) #' #' m <- lm(mpg ~ hp + drat + factor(cyl) + factor(am), data = mtcars) #' comparisons(m, variables = "hp", newdata = datagrid(FUN_factor = unique, FUN_numeric = median)) #' #' # Numeric contrasts #' mod <- lm(mpg ~ hp, data = mtcars) #' avg_comparisons(mod, variables = list(hp = 1)) #' avg_comparisons(mod, variables = list(hp = 5)) #' avg_comparisons(mod, variables = list(hp = c(90, 100))) #' avg_comparisons(mod, variables = list(hp = "iqr")) #' avg_comparisons(mod, variables = list(hp = "sd")) #' avg_comparisons(mod, variables = list(hp = "minmax")) #' #' # using a function to specify a custom difference in one regressor #' dat <- mtcars #' dat$new_hp <- 49 * (dat$hp - min(dat$hp)) / (max(dat$hp) - min(dat$hp)) + 1 #' modlog <- lm(mpg ~ log(new_hp) + factor(cyl), data = dat) #' fdiff <- \(x) data.frame(x, x + 10) #' avg_comparisons(modlog, variables = list(new_hp = fdiff)) #' #' # Adjusted Risk Ratio: see the contrasts vignette #' mod <- glm(vs ~ mpg, data = mtcars, family = binomial) #' avg_comparisons(mod, comparison = "lnratioavg", transform = exp) #' #' # Adjusted Risk Ratio: Manual specification of the `comparison` #' avg_comparisons( #' mod, #' comparison = function(hi, lo) log(mean(hi) / mean(lo)), #' transform = exp) # #' # cross contrasts #' mod <- lm(mpg ~ factor(cyl) * factor(gear) + hp, data = mtcars) #' avg_comparisons(mod, variables = c("cyl", "gear"), cross = TRUE) #' #' # variable-specific contrasts #' avg_comparisons(mod, variables = list(gear = "sequential", hp = 10)) #' #' # hypothesis test: is the `hp` marginal effect at the mean equal to the `drat` marginal effect #' mod <- lm(mpg ~ wt + drat, data = mtcars) #' #' comparisons( #' mod, #' newdata = "mean", #' hypothesis = "wt = drat") #' #' # same hypothesis test using row indices #' comparisons( #' mod, #' newdata = "mean", #' hypothesis = "b1 - b2 = 0") #' #' # same hypothesis test using numeric vector of weights #' comparisons( #' mod, #' newdata = "mean", #' hypothesis = c(1, -1)) #' #' # two custom contrasts using a matrix of weights #' lc <- matrix(c( #' 1, -1, #' 2, 3), #' ncol = 2) #' comparisons( #' mod, #' newdata = "mean", #' hypothesis = lc) #' #' # Effect of a 1 group-wise standard deviation change #' # First we calculate the SD in each group of `cyl` #' # Second, we use that SD as the treatment size in the `variables` argument #' library(dplyr) #' mod <- lm(mpg ~ hp + factor(cyl), mtcars) #' tmp <- mtcars %>% #' group_by(cyl) %>% #' mutate(hp_sd = sd(hp)) #' avg_comparisons(mod, #' variables = list(hp = function(x) data.frame(x, x + tmp$hp_sd)), #' by = "cyl") #' #' # `by` argument #' mod <- lm(mpg ~ hp * am * vs, data = mtcars) #' comparisons(mod, by = TRUE) #' #' mod <- lm(mpg ~ hp * am * vs, data = mtcars) #' avg_comparisons(mod, variables = "hp", by = c("vs", "am")) #' #' library(nnet) #' mod <- multinom(factor(gear) ~ mpg + am * vs, data = mtcars, trace = FALSE) #' by <- data.frame( #' group = c("3", "4", "5"), #' by = c("3,4", "3,4", "5")) #' comparisons(mod, type = "probs", by = by) #' #' @export comparisons <- function(model, newdata = NULL, variables = NULL, comparison = "difference", type = NULL, vcov = TRUE, by = FALSE, conf_level = 0.95, transform = NULL, cross = FALSE, wts = NULL, hypothesis = NULL, equivalence = NULL, p_adjust = NULL, df = Inf, eps = NULL, numderiv = "fdforward", ...) { dots <- list(...) # backward compatibility if ("transform_post" %in% names(dots)) { transform <- dots[["transform_post"]] insight::format_warning("The `transform_post` argument is deprecated. Use `transform` instead.") } if ("transform_pre" %in% names(dots)) { comparison <- dots[["transform_pre"]] insight::format_warning("The `transform_pre` argument is deprecated. Use `comparison` instead.") } # very early, before any use of newdata # if `newdata` is a call to `typical` or `counterfactual`, insert `model` scall <- rlang::enquo(newdata) newdata <- sanitize_newdata_call(scall, newdata, model) # extracting modeldata repeatedly is slow. # checking dots allows marginalmeans to pass modeldata to predictions. if (isTRUE(by)) { modeldata <- get_modeldata(model, additional_variables = FALSE, modeldata = dots[["modeldata"]], wts = wts) } else { modeldata <- get_modeldata(model, additional_variables = by, modeldata = dots[["modeldata"]], wts = wts) } # build call: match.call() doesn't work well in *apply() # after sanitize_newdata_call call_attr <- c(list( name = "comparisons", model = model, newdata = newdata, variables = variables, type = type, vcov = vcov, by = by, conf_level = conf_level, comparison = comparison, transform = transform, cross = cross, wts = wts, hypothesis = hypothesis, equivalence = equivalence, p_adjust = p_adjust, df = df), dots) if ("modeldata" %in% names(dots)) { call_attr[["modeldata"]] <- modeldata } call_attr <- do.call("call", call_attr) # required by stubcols later, but might be overwritten bycols <- NULL # sanity checks sanity_dots(model, ...) sanity_df(df, newdata) conf_level <- sanitize_conf_level(conf_level, ...) checkmate::assert_number(eps, lower = 1e-10, null.ok = TRUE) numderiv <- sanitize_numderiv(numderiv) sanity_equivalence_p_adjust(equivalence, p_adjust) model <- sanitize_model( model = model, newdata = newdata, wts = wts, vcov = vcov, calling_function = "comparisons", ...) cross <- sanitize_cross(cross, variables, model) type <- sanitize_type(model = model, type = type, calling_function = "comparisons") sanity_comparison(comparison) tmp <- sanitize_hypothesis(hypothesis, ...) hypothesis <- tmp$hypothesis hypothesis_null <- tmp$hypothesis_null # multiple imputation if (inherits(model, c("mira", "amest"))) { out <- process_imputation(model, call_attr) return(out) } # transforms comparison_label <- transform_label <- NULL if (is.function(comparison)) { comparison_label <- deparse(substitute(comparison)) } if (is.function(transform)) { transform_label <- deparse(substitute(transform)) transform <- sanitize_transform(transform) names(transform) <- transform_label } else { transform <- sanitize_transform(transform) transform_label <- names(transform) } marginalmeans <- isTRUE(checkmate::check_choice(newdata, choices = "marginalmeans")) newdata <- sanitize_newdata( model = model, newdata = newdata, modeldata = modeldata, by = by, wts = wts) # after sanitize_newdata sanity_by(by, newdata) # after sanity_by newdata <- dedup_newdata( model = model, newdata = newdata, wts = wts, by = by, cross = cross, comparison = comparison) if (is.null(wts) && "marginaleffects_wts_internal" %in% colnames(newdata)) { wts <- "marginaleffects_wts_internal" } # after sanitize_newdata # after dedup_newdata variables_list <- sanitize_variables( model = model, newdata = newdata, modeldata = modeldata, variables = variables, cross = cross, by = by, comparison = comparison, eps = eps) # get dof before transforming the vcov arg # get_df() produces a weird warning on non lmerMod. We can skip them # because get_vcov() will produce an informative error later. if (inherits(model, "lmerMod") && (isTRUE(hush(vcov %in% c("satterthwaite", "kenward-roger"))))) { # predict.lmerTest requires the DV dv <- insight::find_response(model) if (!dv %in% colnames(newdata)) { newdata[[dv]] <- mean(insight::get_response(model)) } if (!isTRUE(hush(is.infinite(df)))) { insight::format_error('The `df` argument is not supported when `vcov` is "satterthwaite" or "kenward-roger".') } # df_per_observation is an undocumented argument introduced in 0.18.4.7 to preserve backward incompatibility df <- insight::get_df(model, type = vcov, data = newdata, df_per_observation = TRUE) } vcov_false <- isFALSE(vcov) vcov.type <- get_vcov_label(vcov) vcov <- get_vcov(model, vcov = vcov, type = type, ...) predictors <- variables_list$conditional ############### sanity checks are over # Bootstrap out <- inferences_dispatch( INF_FUN = comparisons, model = model, newdata = newdata, vcov = vcov, variables = variables, type = type, by = by, conf_level = conf_level, cross = cross, comparison = comparison, transform = transform, wts = wts, hypothesis = hypothesis, eps = eps, ...) if (!is.null(out)) { return(out) } # compute contrasts and standard errors args <- list(model = model, newdata = newdata, variables = predictors, cross = cross, marginalmeans = marginalmeans, modeldata = modeldata) dots[["modeldata"]] <- NULL # dont' pass twice args <- c(args, dots) contrast_data <- do.call("get_contrast_data", args) args <- list(model, newdata = newdata, variables = predictors, type = type, original = contrast_data[["original"]], hi = contrast_data[["hi"]], lo = contrast_data[["lo"]], wts = contrast_data[["original"]][["marginaleffects_wts_internal"]], by = by, marginalmeans = marginalmeans, cross = cross, hypothesis = hypothesis, modeldata = modeldata) args <- c(args, dots) mfx <- do.call("get_contrasts", args) # bayesian posterior if (!is.null(attr(mfx, "posterior_draws"))) { draws <- attr(mfx, "posterior_draws") J <- NULL # standard errors via delta method } else if (!vcov_false && isTRUE(checkmate::check_matrix(vcov))) { idx <- intersect(colnames(mfx), c("group", "term", "contrast")) idx <- mfx[, (idx), drop = FALSE] args <- list(model, vcov = vcov, type = type, FUN = get_se_delta_contrasts, newdata = newdata, index = idx, variables = predictors, marginalmeans = marginalmeans, hypothesis = hypothesis, hi = contrast_data$hi, lo = contrast_data$lo, original = contrast_data$original, by = by, eps = eps, cross = cross, numderiv = numderiv) args <- c(args, dots) se <- do.call("get_se_delta", args) J <- attr(se, "jacobian") attr(se, "jacobian") <- NULL mfx$std.error <- as.numeric(se) draws <- NULL # no standard error } else { J <- draws <- NULL } # merge original data back in if ((is.null(by) || isFALSE(by)) && "rowid" %in% colnames(mfx)) { if ("rowid" %in% colnames(newdata)) { idx <- c("rowid", "rowidcf", "term", "contrast", "by", setdiff(colnames(contrast_data$original), colnames(mfx))) idx <- intersect(idx, colnames(contrast_data$original)) tmp <- contrast_data$original[, ..idx, drop = FALSE] # contrast_data is duplicated to compute contrasts for different terms or pairs bycols <- intersect(colnames(tmp), colnames(mfx)) idx <- duplicated(tmp, by = bycols) tmp <- tmp[!idx] mfx <- merge(mfx, tmp, all.x = TRUE, by = bycols, sort = FALSE) # HACK: relies on NO sorting at ANY point } else { idx <- setdiff(colnames(contrast_data$original), colnames(mfx)) mfx <- data.table(mfx, contrast_data$original[, ..idx]) } } # meta info mfx <- get_ci( mfx, conf_level = conf_level, df = df, draws = draws, estimate = "estimate", null_hypothesis = hypothesis_null, p_adjust = p_adjust, model = model) # clean rows and columns # WARNING: we cannot sort rows at the end because `get_hypothesis()` is # applied in the middle, and it must already be sorted in the final order, # otherwise, users cannot know for sure what is going to be the first and # second rows, etc. mfx <- sort_columns(mfx, newdata, by) # bayesian draws attr(mfx, "posterior_draws") <- draws # equivalence tests mfx <- equivalence(mfx, equivalence = equivalence, df = df, ...) # after draws attribute mfx <- backtransform(mfx, transform) # save as attribute and not column if (any(!is.na(mfx[["marginaleffects_wts_internal"]]))) { marginaleffects_wts_internal <- mfx[["marginaleffects_wts_internal"]] } else { marginaleffects_wts_internal <- NULL } mfx[["marginaleffects_wts_internal"]] <- NULL out <- mfx data.table::setDF(out) out <- set_marginaleffects_attributes( out, get_marginaleffects_attributes(newdata, include_regex = "^newdata.*class|explicit|matrix|levels")) # other attributes attr(out, "newdata") <- newdata attr(out, "call") <- call_attr attr(out, "type") <- type attr(out, "model_type") <- class(model)[1] attr(out, "model") <- model attr(out, "variables") <- predictors attr(out, "jacobian") <- J attr(out, "vcov") <- vcov attr(out, "vcov.type") <- vcov.type attr(out, "weights") <- marginaleffects_wts_internal attr(out, "comparison") <- comparison attr(out, "transform") <- transform[[1]] attr(out, "comparison_label") <- comparison_label attr(out, "transform_label") <- transform_label attr(out, "conf_level") <- conf_level attr(out, "by") <- by if (inherits(model, "brmsfit")) { insight::check_if_installed("brms") attr(out, "nchains") <- brms::nchains(model) } class(out) <- c("comparisons", class(out)) return(out) } #' Average comparisons #' @describeIn comparisons Average comparisons #' @export #' avg_comparisons <- function(model, newdata = NULL, variables = NULL, type = NULL, vcov = TRUE, by = TRUE, conf_level = 0.95, comparison = "difference", transform = NULL, cross = FALSE, wts = NULL, hypothesis = NULL, equivalence = NULL, p_adjust = NULL, df = Inf, eps = NULL, numderiv = "fdforward", ...) { # order of the first few paragraphs is important # if `newdata` is a call to `typical` or `counterfactual`, insert `model` scall <- rlang::enquo(newdata) newdata <- sanitize_newdata_call(scall, newdata, model) # Bootstrap out <- inferences_dispatch( INF_FUN = avg_comparisons, model = model, newdata = newdata, vcov = vcov, variables = variables, type = type, by = by, cross = cross, conf_level = conf_level, comparison = comparison, transform = transform, wts = wts, hypothesis = hypothesis, eps = eps, ...) if (!is.null(out)) { return(out) } out <- comparisons( model = model, newdata = newdata, variables = variables, type = type, vcov = vcov, by = by, conf_level = conf_level, comparison = comparison, transform = transform, cross = cross, wts = wts, hypothesis = hypothesis, equivalence = equivalence, p_adjust = p_adjust, df = df, eps = eps, ...) return(out) } marginaleffects/R/recall.R0000644000176200001440000000427714541720224015205 0ustar liggesusers# fancy way to catch the call so that get_averages(slopes()) does not evaluate twice # and is fast recall <- function(x, ...) { funs <- c("comparisons", "slopes", "predictions", "marginalmeans", "hypotheses", "avg_predictions", "avg_comparisons", "avg_slopes") # 2-step estimation with already evaluated & assigned call if (!is.call(x)) { # unsupported evaluated object: return `NULL` if (!inherits(x, funs)) { return(NULL) } # retrieve call mc <- attr(x, "call") if (!is.call(mc)) { msg <- sprintf("Call could not be retrieved from object of class %s.", class(x)[1]) insight::format_error(msg) } # unsupported call: return `NULL` } else { if (!as.character(x[1]) %in% funs) { return(NULL) } mc <- x } dots <- list(...) # don't overwrite certain arguments if ("hypothesis" %in% names(mc) && "hypothesis" %in% names(dots)) { if (is.null(dots[["hypothesis"]])) { dots[["hypothesis"]] <- NULL } } # safe to work with original objects when available objs <- c("newdata", "model") for (obj in objs) { if (!is.null(attr(x, "call")[[obj]])) { dots[[obj]] <- attr(x, "call")[[obj]] } } # overwrite previous arguments for (n in names(dots)) { # named NULL should not remove the corresponding argument from the call if (is.null(dots[[n]])) { mc[n] <- list(NULL) } else { mc[[n]] <- dots[[n]] } } ## old `rlang` convenience. I don't think the current version is toooo unsafe. # FUN <- rlang::call_modify # args <- c(list(".call" = quote(mc)), dots) # # evaluate call # mc <- do.call("FUN", args) # # expand user-supplied arguments (don't think this is necessary) # funs <- list( # "predictions" = predictions, # "comparisons" = comparisons, # "slopes" = slopes, # "hypotheses" = hypotheses, # "marginalmeans" = marginalmeans) # mc <- match.call( # definition = funs[[as.character(mc)[1]]], # call = mc) out <- eval(mc) return(out) } marginaleffects/R/equivalence.R0000644000176200001440000000304214541720224016231 0ustar liggesusersequivalence <- function(x, equivalence = NULL, df = Inf, ...) { if (is.null(equivalence)) { return(x) } if (!is.null(equivalence) && any(!c("estimate", "std.error") %in% colnames(x))) { msg <- "The `equivalence` argument is not supported with models for which `marginaleffects` does not estimate a standard error (e.g., bayesian)." insight::format_error(msg) } checkmate::assert_numeric(equivalence, min.len = 1, max.len = 2) if (length(equivalence) == 1) { equivalence <- c(equivalence, equivalence) } delta <- abs(diff(equivalence)) / 2 null <- min(equivalence) + delta # definitions from `emmeans`, with a different user interface based on symmetric "equivalence" x$statistic.noninf <- (x$estimate - equivalence[1]) / x$std.error x$statistic.nonsup <- (x$estimate - equivalence[2]) / x$std.error ## keep this in case we return to the emmeans-style user interface # x$statistic.inf <- (x$estimate - null + delta) / x$std.error # x$statistic.sup <- (x$estimate - null - delta) / x$std.error if (is.infinite(df)) { x$p.value.noninf <- stats::pnorm(x$statistic.noninf, lower.tail = FALSE) x$p.value.nonsup <- stats::pnorm(x$statistic.nonsup, lower.tail = TRUE) } else { x$p.value.noninf <- stats::pt(x$statistic.noninf, lower.tail = FALSE, df = x[["df"]]) x$p.value.nonsup <- stats::pt(x$statistic.nonsup, lower.tail = TRUE, df = x[["df"]]) } x$p.value.equiv <- pmax(x$p.value.nonsup, x$p.value.noninf) return(x) }marginaleffects/R/methods_aod.R0000644000176200001440000000256014541720224016222 0ustar liggesusers#' @rdname get_predict #' @export get_predict.glimML <- function(model, newdata = insight::get_data(model), type = "response", ...) { insight::check_if_installed("aod") out <- aod::predict(model, newdata = newdata, type = type, ...) out <- data.frame( rowid = 1:nrow(newdata), estimate = out) return(out) } #' @rdname set_coef #' @export set_coef.glimML <- function(model, coefs, ...) { # in basic model classes coefficients are named vector model@fixed.param[names(coefs)] <- coefs model } #' @rdname get_vcov #' @export get_vcov.glimML <- function(model, vcov = NULL, ...) { insight::check_if_installed("aod") if (!is.null(vcov) && !is.logical(vcov)) { stop("The `vcov` argument is not supported for this kind of model.") } aod::vcov(model) } #' @rdname sanitize_model_specific sanitize_model_specific.glimML <- function(model, ...) { mdat <- get_modeldata(model, additional_variables = FALSE) if (isTRUE("character" %in% attr(mdat, "marginaleffects_variable_class"))) { insight::format_error("This function does not support character predictors. Please convert them to factors before fitting the model.") } return(model) } marginaleffects/R/deprecated.R0000644000176200001440000003526314560035476016053 0ustar liggesusers#' Deprecated function #' #' @keywords internal #' @export deltamethod <- function(...) { .Deprecated("hypotheses()") } #' Deprecated function #' #' @keywords internal #' @export marginaleffects <- function(...) { .Deprecated("slopes()") slopes(...) } #' Deprecated function #' #' @keywords internal #' @export meffects <- marginaleffects #' Deprecated function #' #' @keywords internal #' @export datagridcf <- function(...) { .Deprecated('datagrid(x = 1:2, grid_type = "counterfactual")') datagridcf_internal(...) } #' Deprecated function #' #' @keywords internal #' @export marginal_means <- function(model, variables = NULL, newdata = NULL, vcov = TRUE, conf_level = 0.95, type = NULL, transform = NULL, cross = FALSE, hypothesis = NULL, equivalence = NULL, p_adjust = NULL, df = Inf, wts = "equal", by = NULL, numderiv = "fdforward", ...) { .Deprecated(' predictions(mod, by = "x", newdata = datagrid(grid_type = \"balanced\")) ') # deprecation and backward compatibility dots <- list(...) sanity_equivalence_p_adjust(equivalence, p_adjust) if ("transform_post" %in% names(dots)) transform <- dots[["transform_post"]] if ("variables_grid" %in% names(dots)) { if (!is.null(newdata)) { insight::format_error("The `variables_grid` argument and has been replaced by `newdata`. These two arguments cannot be used simultaneously.") } newdata <- dots[["variables_grid"]] } if (!is.null(equivalence) && !is.null(p_adjust)) { insight::format_error("The `equivalence` and `p_adjust` arguments cannot be used together.") } numderiv = sanitize_numderiv(numderiv) # build call: match.call() doesn't work well in *apply() call_attr <- c(list( name = "marginal_means", model = model, newdata = newdata, variables = variables, type = type, vcov = vcov, by = by, conf_level = conf_level, transform = transform, wts = wts, hypothesis = hypothesis, equivalence = equivalence, p_adjust = p_adjust, df = df), list(...)) call_attr <- do.call("call", call_attr) # multiple imputation if (inherits(model, c("mira", "amest"))) { out <- process_imputation(model, call_attr, marginal_means = TRUE) return(out) } # if type is NULL, we backtransform if relevant type_string <- sanitize_type(model = model, type = type, calling_function = "marginal_means") if (type_string == "invlink(link)") { if (is.null(hypothesis)) { type_call <- "link" } else { type_call <- "response" type_string <- "response" insight::format_warning('The `type="invlink"` argument is not available unless `hypothesis` is `NULL` or a single number. The value of the `type` argument was changed to "response" automatically. To suppress this warning, use `type="response"` explicitly in your function call.') } } else { type_call <- type_string } modeldata <- get_modeldata(model, additional_variables = FALSE, wts = wts) checkmate::assert_flag(cross) transform <- sanitize_transform(transform) conf_level <- sanitize_conf_level(conf_level, ...) model <- sanitize_model(model, vcov = vcov, calling_function = "marginalmeans") checkmate::assert_choice(wts, choices = c("equal", "cells", "proportional")) if (wts != "equal" && is.data.frame(newdata)) { insight::format_error('The `wts` argument must be "equal" when `newdata` is a data frame.') } tmp <- sanitize_hypothesis(hypothesis, ...) hypothesis <- tmp$hypothesis hypothesis_null <- tmp$hypothesis_null sanity_dots(model = model, ...) if (inherits(model, c("brmsfit", "bart"))) { insight::format_error("This model object type is not yet supported by the `marginal_means` function.") } # fancy vcov processing to allow strings like "HC3" vcov_false <- isTRUE(vcov == FALSE) vcov <- get_vcov(model, vcov = vcov, type = type, ...) # focal categorical variables checkmate::assert_character(variables, min.len = 1, null.ok = TRUE) if (any(variables %in% insight::find_response(model))) { insight::format_error("The `variables` vector cannot include the response.") } if (is.null(variables)) { variables <- insight::find_predictors(model, flatten = TRUE) } idx <- vapply( variables, FUN = get_variable_class, newdata = modeldata, FUN.VALUE = logical(1), compare = c("logical", "character", "factor")) focal <- variables[idx] if (length(focal) == 0) { insight::format_error("No categorical predictor was found in the model data or `variables` argument.") } # non-focal categorical variables checkmate::assert( checkmate::check_null(newdata), checkmate::check_character(newdata), checkmate::check_data_frame(newdata)) if (is.null(newdata)) { nonfocal <- insight::find_predictors(model, flatten = TRUE) nonfocal <- setdiff(nonfocal, focal) } else if (is.character(newdata)) { if (!all(newdata %in% colnames(modeldata))) { insight::format_error("Some of the variables in `newdata` are missing from the data used to fit the model.") } nonfocal <- setdiff(newdata, focal) } else if (is.data.frame(newdata)) { nonfocal <- colnames(newdata) } idx <- vapply( nonfocal, FUN = get_variable_class, newdata = modeldata, FUN.VALUE = logical(1), compare = c("logical", "character", "factor")) nonfocal <- nonfocal[idx] # grid args <- list(model = model) if (is.data.frame(newdata)) { for (v in focal) { args[[v]] <- unique(modeldata[[v]]) } newgrid <- do.call(datagridcf, args) } else { for (v in c(focal, nonfocal)) { args[[v]] <- unique(modeldata[[v]]) } newgrid <- do.call(datagrid, args) } # by: usual tests + only data frames in `marginal_means()` # after newgrid checkmate::assert_data_frame(by, null.ok = TRUE) sanity_by(by, newgrid) # weights if (identical(wts, "equal")) { newgrid[["wts"]] <- 1 } else if (identical(wts, "proportional")) { wtsgrid <- copy(data.table(modeldata)[, ..nonfocal]) idx <- nonfocal wtsgrid[, N := .N] wtsgrid[, "wts" := .N / N, by = idx] # sometimes datagrid() converts to factors when there is a transformation # in the model formula, so we need to standardize the data for (v in colnames(newgrid)) { if (v %in% colnames(wtsgrid) && is.factor(newgrid[[v]])) { wtsgrid[[v]] <- factor(wtsgrid[[v]], levels = levels(newgrid[[v]])) } } wtsgrid <- unique(wtsgrid) newgrid <- merge(newgrid, wtsgrid, all.x = TRUE) newgrid[["wts"]][is.na(newgrid[["wts"]])] <- 0 } else if (identical(wts, "cells")) { # https://stackoverflow.com/questions/66748520/what-is-the-difference-between-weights-cell-and-weights-proportional-in-r-pa idx <- c(focal, nonfocal) wtsgrid <- copy(data.table(modeldata)[, ..idx]) if (length(idx) == 0) { newgrid[["wts"]] <- 1 return(newgrid) } else { wtsgrid <- data.table(modeldata)[ , .(wts = .N), by = idx][ , wts := wts / sum(wts)] # sometimes datagrid() converts to factors when there is a transformation # in the model formula, so we need to standardize the data for (v in colnames(newgrid)) { if (v %in% colnames(wtsgrid) && is.factor(newgrid[[v]])) { wtsgrid[[v]] <- factor(wtsgrid[[v]], levels = levels(newgrid[[v]])) } } wtsgrid <- unique(wtsgrid) newgrid <- merge(newgrid, wtsgrid, all.x = TRUE) newgrid[["wts"]][is.na(newgrid[["wts"]])] <- 0 } } # `equivalence` should not be passed to predictions() at this stage args <- list( model = model, newdata = newgrid, type = type_call, variables = focal, cross = cross, hypothesis = hypothesis, by = by, modeldata = modeldata) args <- c(args, list(...)) args[["equivalence"]] <- NULL mm <- do.call(get_marginalmeans, args) # we want consistent output, regardless of whether `data.table` is installed/used or not out <- as.data.frame(mm) # standard errors via delta method if (!vcov_false) { args <- list( model, vcov = vcov, type = type_call, FUN = get_se_delta_marginalmeans, index = NULL, variables = focal, newdata = newgrid, cross = cross, modeldata = modeldata, hypothesis = hypothesis, by = by, numderiv = numderiv) args <- c(args, list(...)) args[["equivalence"]] <- NULL se <- do.call(get_se_delta, args) # get rid of attributes in column out[["std.error"]] <- as.numeric(se) J <- attr(se, "jacobian") } else { J <- NULL } out <- get_ci( out, conf_level = conf_level, vcov = vcov, null_hypothesis = hypothesis_null, df = df, p_adjust = p_adjust, model = model, ...) # equivalence tests out <- equivalence(out, equivalence = equivalence, df = df, ...) # after assign draws if (identical(type_string, "invlink(link)")) { linv <- tryCatch(insight::link_inverse(model), error = function(e) identity) out <- backtransform(out, transform = linv) } out <- backtransform(out, transform) # column order cols <- c("rowid", "group", colnames(by), "term", "hypothesis", "value", variables, "estimate", "std.error", "statistic", "p.value", "s.value", "conf.low", "conf.high", sort(colnames(out))) cols <- unique(cols) cols <- intersect(cols, colnames(out)) out <- out[, cols, drop = FALSE] # attributes attr(out, "model") <- model attr(out, "jacobian") <- J attr(out, "type") <- type_string attr(out, "model_type") <- class(model)[1] attr(out, "variables") <- variables attr(out, "call") <- call_attr attr(out, "conf_level") <- conf_level attr(out, "transform_label") <- names(transform)[1] if (isTRUE(cross)) { attr(out, "variables_grid") <- setdiff(nonfocal, variables) } else { attr(out, "variables_grid") <- unique(c(nonfocal, variables)) } if (inherits(model, "brmsfit")) { insight::check_if_installed("brms") attr(out, "nchains") <- brms::nchains(model) } class(out) <- c("marginalmeans", class(out)) return(out) } get_marginalmeans <- function(model, newdata, type, variables, cross, modeldata, hypothesis = NULL, by = NULL, ...) { if ("wts" %in% colnames(newdata)) { wts <- "wts" } else { wts <- NULL } # predictions for each cell of all categorical data, but not the response if (isTRUE(cross) || length(variables) == 1) { out <- predictions( model = model, newdata = newdata, type = type, vcov = FALSE, modeldata = modeldata, wts = wts, by = c("group", variables), ...) if (length(variables) == 1) { out$term <- variables out$value <- out[[variables]] } # predictions for each variable individual, then bind } else { pred_list <- draw_list <- list() for (v in variables) { tmp <- predictions( model = model, newdata = newdata, type = type, vcov = FALSE, modeldata = modeldata, wts = wts, by = c("group", v), ...) tmp$rowid <- NULL draw_list[[v]] <- attr(tmp, "posterior_draws") tmp$term <- v data.table::setnames(tmp, old = v, new = "value") pred_list[[v]] <- tmp } # try to preserve term-value class, but convert to character if needed to bind classes <- sapply(pred_list, function(x) class(x$value)[1]) if (length(unique(classes)) > 1) { for (i in seq_along(pred_list)) { pred_list[[i]]$value <- as.character(pred_list[[i]]$value) } } out <- rbindlist(pred_list) } data.table::setDT(out) if (isTRUE(checkmate::check_data_frame(by))) { # warnings for factor vs numeric vs character. merge.data.table usually still works. bycols <- intersect(colnames(out), colnames(by)) if (length(bycols) == 0) { msg <- "There is no common columns in `by` and in the output of `marginal_means()`. Make sure one of the entries in the `variables` argument corresponds to one of the columns in `by`." insight::format_error(msg) } for (b in bycols) { if (is.factor(out[[b]]) && is.numeric(by[[b]])) { out[[b]] <- as.numeric(as.character(out[[b]])) } else if (is.numeric(out[[b]]) && is.factor(by[[b]])) { by[[b]] <- as.numeric(as.character(by[[b]])) } else if (is.factor(out[[b]]) && is.character(by[[b]])) { out[[b]] <- as.character(out[[b]]) } else if (is.character(out[[b]]) && is.factor(by[[b]])) { by[[b]] <- as.character(by[[b]]) } } out <- merge(out, by, sort = FALSE) out <- out[, .(estimate = mean(estimate)), by = "by"] } if (!is.null(hypothesis)) { out <- get_hypothesis(out, hypothesis, by = by) } return(out) } #' Deprecated function #' #' @keywords internal #' @export marginalmeans <- marginal_means #' @noRd get_averages.marginalmeans <- function(x, by = FALSE, ...) { if (!isFALSE(by)) { insight::format_error("The `by` argument is not supported by the `averages()` function for `marginal_means` models.") } x }marginaleffects/R/get_term_labels.R0000644000176200001440000000132314541720224017060 0ustar liggesusersget_term_labels <- function(x, idx = NULL) { if (is.data.frame(x)) { if ("term" %in% names(x) && length(unique(x$term)) == nrow(x)) { return(unique(x$term)) } else if (any(grepl("^contrast", names(x)))) { tmp <- grep("^term$|^contrast", names(x)) out <- x[, tmp, drop = FALSE] if (length(unique(out[["term"]])) == 1) { out[["term"]] <- NULL } out <- do.call(paste, c(out, sep = " ")) } else { out <- paste0("b", seq_len(nrow(x))) } } else if (is.vector(x)) { if (!is.null(names(x))) { out <- names(x) } else { out <- paste0("b", seq_along(x)) } } else { return(NULL) } if (!is.null(idx)) out <- out[idx] return(out) }marginaleffects/R/methods_dbarts.R0000644000176200001440000000275614541720224016745 0ustar liggesusers#' @include get_predict.R #' @rdname get_predict #' @export get_predict.bart <- function(model, newdata = NULL, ...) { args <- c( list( object = model, newdata = newdata), list(...)) p <- do.call(stats::predict, args) p_med <- collapse::fmedian(p) if ("rowid" %in% colnames(newdata) && nrow(newdata) == length(p_med)) { out <- data.frame( rowid = newdata$rowid, group = "main_marginaleffect", estimate = p_med ) } else { out <- data.frame( rowid = seq_along(length(p_med)), group = "main_marginaleffect", estimate = p_med ) } attr(out, "posterior_draws") <- t(p) return(out) } #' @include sanity_model.R #' @rdname sanitize_model_specific #' @export sanitize_model_specific.bart <- function(model, ...) { insight::check_if_installed("collapse", minimum_version = "1.9.0") if (!isTRUE(as.character(insight::get_call(model))[1] %in% c("bart2", "dbarts::bart2"))) { msg <- "`marginaleffects` only supports models estimated using the formula interface in `bart2()` function, not the matrix input in `bart()`." insight::format_error(msg) } return(model) } #' @rdname get_vcov #' @export get_vcov.bart <- function(model, vcov = NULL, ...) { if (!is.null(vcov) && !is.logical(vcov)) { insight::format_warning("The `vcov` argument is not supported for models of this class.") } return(NULL) }marginaleffects/R/sanitize_numderiv.R0000644000176200001440000000176414541720224017500 0ustar liggesuserssanitize_numderiv <- function(numderiv) { checkmate::assert( checkmate::check_choice(numderiv, c("richardson", "fdforward", "fdcenter")), checkmate::check_list(numderiv, min.len = 1) ) if (isTRUE(checkmate::check_string(numderiv))) { numderiv <- list(numderiv) } if (length(numderiv) > 1) { if (numderiv[[1]] %in% c("fdforward", "fdcenter")) { if (any(!names(numderiv)[2:length(numderiv)] %in% "eps")) { stop("The only valid argument for this numeric differentiation method is `eps`.") } } else if (numderiv[[1]] == "richardson") { valid <- c("eps", "d", "zero_tol", "size", "r", "v") if (any(!names(numderiv)[2:length(numderiv)] %in% valid)) { stop(sprintf("The only valid arguments for this numeric differentiation method are: %s. See `?numDeriv::grad` for details.", paste(valid, collapse = ", ")), call. = FALSE) } } } return(numderiv) }marginaleffects/R/methods_ordinal.R0000644000176200001440000000401714541720224017106 0ustar liggesusers#' @rdname get_predict #' @export get_predict.clm <- function(model, newdata = insight::get_data(model), type = "prob", ...) { # `predict.clm()` only makes predictions for the observed response group of # each observation in `newdata`. When we remove the response from # `newdata`, `predict.clm()` makes predictions for all levels, which is # what we want. resp <- insight::find_response(model) # otherwise `predict.clm` does not see some columns (mystery) data.table::setDF(newdata) newdata <- newdata[, setdiff(colnames(newdata), resp), drop = FALSE] pred <- stats::predict(model, newdata = newdata, type = type) contenders <- c("fit", "eta1", "eta2", "cprob1", "cprob2") tmp <- NULL for (con in contenders) { if (is.null(tmp) && con %in% names(pred)) { tmp <- pred[[con]] } } pred <- tmp out <- data.frame( group = rep(colnames(pred), each = nrow(pred)), estimate = c(pred)) # often an internal call if ("rowid" %in% colnames(newdata)) { out$rowid <- rep(newdata$rowid, times = ncol(pred)) } else { out$rowid <- rep(1:nrow(pred), times = ncol(pred)) } return(out) } #' @include get_group_names.R #' @rdname get_group_names #' @export get_group_names.clm <- get_group_names.polr #' @include sanity_model.R #' @rdname sanitize_model_specific #' @keywords internal sanitize_model_specific.clm <- function(model, ...) { # Corner case: The `predict.clm` method does not make predictions when the # response was transformed to a factor in the formula AND the response is # missing from `newdata`. lhs <- names(attr(stats::terms(model), "dataClasses"))[1] if (isTRUE(grepl("^factor\\(", lhs))) { stop("The response variable should not be transformed to a factor in the formula. Please convert the variable to factor before fitting your model.", call. = FALSE) } return(model) } marginaleffects/R/get_contrast_data.R0000644000176200001440000001611214541720224017417 0ustar liggesusersget_contrast_data <- function(model, newdata, variables, cross, modeldata = NULL, ...) { lo <- hi <- ter <- lab <- original <- rowid <- list() # after variable class assignment if (is.null(modeldata)) { modeldata <- attr(newdata, "newdata_modeldata") } # sometimes needed for extensions when get_data doesn't work if (is.null(modeldata) || nrow(modeldata) == 0) { modeldata <- newdata } # safety need for extensions not supported by `insight` variable_classes <- attr(newdata, "newdata_variable_class") if (length(variable_classes) == 0) { newdata <- set_variable_class(newdata, model) variable_classes <- attr(newdata, "marginaleffects_variable_class") } if (length(attr(modeldata, "marginaleffects_variable_class")) == 0) { modeldata <- set_variable_class(modeldata, model) } if (any(c("factor", "character") %in% variable_classes)) { first_cross <- names(variable_classes[variable_classes %in% c("factor", "character")])[1] } else { first_cross <- NULL } # must use `as.data.table()` because `setDT()` does not handle columns with # more dimensions (e.g., "idx" in {mlogit}) newdata <- as.data.table(newdata) for (v in variables) { args <- list( model = model, newdata = newdata, variable = v, cross = cross, first_cross = identical(v$name, first_cross), modeldata = modeldata) args <- append(args, list(...)) # logical and character before factor used to be important; but I don't think so anymore if (get_variable_class(modeldata, v$name, "logical")) { fun <- get_contrast_data_logical } else if (get_variable_class(modeldata, v$name, "character")) { fun <- get_contrast_data_character } else if (get_variable_class(modeldata, v$name, "categorical")) { fun <- get_contrast_data_factor } else if (get_variable_class(modeldata, v$name, "numeric")) { fun <- get_contrast_data_numeric } else { msg <- sprintf("Class of the `%s` variable is class is not supported.", v$name) stop(msg, call. = FALSE) } tmp <- do.call("fun", args) lo[[v$name]] <- tmp$lo if (isTRUE(cross)) { lo[[v$name]][[paste0("null_contrast_", v$name)]] <- tmp$contrast_null } hi[[v$name]] <- tmp$hi ter[[v$name]] <- tmp$ter lab[[v$name]] <- tmp$lab original[[v$name]] <- tmp$original rowid[[v$name]] <- tmp$rowid } clean <- function(x) { for (col in colnames(x)) { # tobit1 introduces AsIs columns if (inherits(x[[col]], "AsIs")) { x[[col]] <- as.numeric(x[[col]]) } # plm creates c("pseries", "numeric"), but when get_contrast_data # assigns +1 numeric, we lose the inheritance if (inherits(x[[col]], "pseries")) { x[[col]] <- as.numeric(x[[col]]) } # strip labelled data which break rbindlist() cl <- class(x[[col]]) if (length(cl) == 2 && cl[1] == "labelled") { class(x[[col]]) <- class(x[[col]])[2] } # mlogit uses a `newdata` with one row per unit-choice and returns # an `idx` column with the choice label in second position if (inherits(model, "mlogit") && inherits(x[[col]], "idx")) { x[[col]] <- NULL } } return(x) } lo <- lapply(lo, clean) hi <- lapply(hi, clean) original <- lapply(original, clean) # single contrast if (!isTRUE(cross)) { lo <- rbindlist(lo, fill = TRUE) hi <- rbindlist(hi, fill = TRUE) original <- rbindlist(original, fill = TRUE) # long names to avoid user-supplied colname conflict marginaleffects_ter <- unlist(ter, use.names = FALSE) marginaleffects_lab <- unlist(lab, use.names = FALSE) lo[, "term" := marginaleffects_ter] hi[, "term" := marginaleffects_ter] original[, "term" := marginaleffects_ter] lo[, "contrast" := marginaleffects_lab] hi[, "contrast" := marginaleffects_lab] original[, "contrast" := marginaleffects_lab] # cross contrast } else { # drop variables for which we have contrasts for (i in seq_along(lo)) { if (i == 1) { # keep rowid and original data only in one of the datasets idx_lo <- setdiff(names(variables), names(lo)[i]) idx_hi <- setdiff(names(variables), names(hi)[i]) idx_or <- setdiff(names(variables), names(hi)[i]) } else { # exclude rowid and variables excluded from `variables`, for # which we do not compute cross-contrasts contrast_null <- grep("rowid|^null_contrast_", colnames(lo[[i]]), value = TRUE) idx_lo <- c(setdiff(names(lo[[i]]), c(contrast_null, names(variables))), setdiff(names(variables), names(lo)[[i]])) idx_hi <- c(setdiff(names(hi[[i]]), c(contrast_null, names(variables))), setdiff(names(variables), names(hi)[[i]])) idx_or <- c(setdiff(names(original[[i]]), c(contrast_null, names(variables))), setdiff(names(variables), names(original)[[i]])) } lo[[i]] <- data.table(lo[[i]])[, !..idx_lo] hi[[i]] <- data.table(hi[[i]])[, !..idx_hi] original[[i]] <- data.table(original[[i]])[, !..idx_or] lo[[i]][[paste0("contrast_", names(lo)[i])]] <- lab[[i]] hi[[i]][[paste0("contrast_", names(hi)[i])]] <- lab[[i]] original[[i]][[paste0("contrast_", names(original)[i])]] <- lab[[i]] } fun <- function(x, y) merge(x, y, all = TRUE, allow.cartesian = TRUE, sort = FALSE) lo <- Reduce("fun", lo) hi <- Reduce("fun", hi) original <- Reduce("fun", original) # faster to rbind, but creates massive datasets. need cartesian join on rowid # lo <- cjdt(lo) # hi <- cjdt(hi) # if there are fewer null_contrast_* columns, then there is at least # one always non-null variable type, so we keep everything idx <- grepl("^null_contrast_", colnames(lo)) idx_df <- lo[, ..idx] lo <- lo[, !..idx] if (sum(idx) == length(variables)) { idx <- rowSums(idx_df) < ncol(idx_df) lo <- lo[idx] hi <- hi[idx] original <- original[idx] } } # get_predict() is much faster if we only build the model matrix once lo <- get_model_matrix_attribute(model, lo) hi <- get_model_matrix_attribute(model, hi) original <- get_model_matrix_attribute(model, original) out <- list(lo = lo, hi = hi, original = original) return(out) } marginaleffects/R/methods_rms.R0000644000176200001440000000167514541720224016266 0ustar liggesusers#' @rdname get_vcov #' @export get_vcov.orm <- function(model, vcov = NULL, ...) { if (!is.null(vcov) && !isTRUE(checkmate::check_flag(vcov))) { msg <- "The `vcov` argument is not supported for models of this class." insight::format_error(msg) } out <- stats::vcov(model, intercepts = "all") return(out) } #' @rdname get_predict #' @export get_predict.rms <- function(model, newdata = insight::get_data(model), type = NULL, ...) { if (is.null(type)) { type <- sanitize_type(model, type, calling_function = "predictions") } # {rms} predict methods break on additional arguments get_predict.default(model, newdata = newdata, type = type) } #' @rdname get_predict #' @export get_predict.orm <- get_predict.rms #' @rdname get_predict #' @export get_predict.lrm <- get_predict.rms #' @rdname get_predict #' @export get_predict.ols <- get_predict.rmsmarginaleffects/R/plot_slopes.R0000644000176200001440000001104714557277362016320 0ustar liggesusers#' Plot Conditional or Marginal Slopes #' #' @description #' Plot slopes on the y-axis against values of one or more predictors (x-axis, colors/shapes, and facets). #' #' The `by` argument is used to plot marginal slopes, that is, slopes made on the original data, but averaged by subgroups. This is analogous to using the `by` argument in the `slopes()` function. #' #' The `condition` argument is used to plot conditional slopes, that is, slopes computed on a user-specified grid. This is analogous to using the `newdata` argument and `datagrid()` function in a `slopes()` call. All variables whose values are not specified explicitly are treated as usual by `datagrid()`, that is, they are held at their mean or mode (or rounded mean for integers). This includes grouping variables in mixed-effects models, so analysts who fit such models may want to specify the groups of interest using the `condition` argument, or supply model-specific arguments to compute population-level estimates. See details below. #' See the "Plots" vignette and website for tutorials and information on how to customize plots: #' #' * https://marginaleffects.com/vignettes/plot.html #' * https://marginaleffects.com #' #' @param variables Name of the variable whose marginal effect (slope) we want to plot on the y-axis. #' @param condition Conditional slopes #' + Character vector (max length 4): Names of the predictors to display. #' + Named list (max length 4): List names correspond to predictors. List elements can be: #' - Numeric vector #' - Function which returns a numeric vector or a set of unique categorical values #' - Shortcut strings for common reference values: "minmax", "quartile", "threenum" #' + 1: x-axis. 2: color/shape. 3: facet (wrap if no fourth variable, otherwise cols of grid). 4: facet (rows of grid). #' + Numeric variables in positions 2 and 3 are summarized by Tukey's five numbers `?stats::fivenum`. #' @param rug TRUE displays tick marks on the axes to mark the distribution of raw data. #' @param gray FALSE grayscale or color plot #' @param draw `TRUE` returns a `ggplot2` plot. `FALSE` returns a `data.frame` of the underlying data. #' @param newdata When `newdata` is `NULL`, the grid is determined by the `condition` argument. When `newdata` is not `NULL`, the argument behaves in the same way as in the `slopes()` function. #' @inheritParams slopes #' @template model_specific_arguments #' @return A `ggplot2` object #' @export #' @examples #' library(marginaleffects) #' mod <- lm(mpg ~ hp * drat * factor(am), data = mtcars) #' #' plot_slopes(mod, variables = "hp", condition = "drat") #' #' plot_slopes(mod, variables = "hp", condition = c("drat", "am")) #' #' plot_slopes(mod, variables = "hp", condition = list("am", "drat" = 3:5)) #' #' plot_slopes(mod, variables = "am", condition = list("hp", "drat" = range)) #' #' plot_slopes(mod, variables = "am", condition = list("hp", "drat" = "threenum")) #' plot_slopes <- function(model, variables = NULL, condition = NULL, by = NULL, newdata = NULL, type = "response", vcov = NULL, conf_level = 0.95, wts = NULL, slope = "dydx", rug = FALSE, gray = FALSE, draw = TRUE, ...) { dots <- list(...) if ("effect" %in% names(dots)) { if (is.null(variables)) { variables <- dots[["effect"]] } else { insight::format_error("The `effect` argument has been renamed to `variables`.") } } # order of the first few paragraphs is important # if `newdata` is a call to `typical` or `counterfactual`, insert `model` # should probably not be nested too deeply in the call stack since we eval.parent() (not sure about this) scall <- rlang::enquo(newdata) newdata <- sanitize_newdata_call(scall, newdata, model) valid <- c("dydx", "eyex", "eydx", "dyex") checkmate::assert_choice(slope, choices = valid) out <- plot_comparisons( model, variables = variables, condition = condition, by = by, newdata = newdata, type = type, vcov = vcov, conf_level = conf_level, wts = wts, draw = draw, rug = rug, gray = gray, comparison = slope, ...) if (inherits(out, "ggplot")) { out <- out + ggplot2::labs(x = condition[1], y = "Slope") } return(out) } marginaleffects/R/methods_tidymodels.R0000644000176200001440000000464114541720224017636 0ustar liggesuserssupported_engine <- function(x) { insight::check_if_installed("parsnip") tmp <- parsnip::extract_fit_engine(x) flag <- inherits(try(sanitize_model(tmp), silent = TRUE), "try-error") return(!flag) } #' @include set_coef.R #' @rdname set_coef #' @export set_coef.model_fit <- function(model, coefs, ...) { if (!"fit" %in% names(model)) { return(model) } model$fit <- set_coef(model$fit, coefs, ...) return(model) } #' @include set_coef.R #' @rdname set_coef #' @export set_coef.workflow <- function(model, coefs, ...) { if ("fit" %in% names(model) && "fit" %in% names(model$fit)) { model$fit$fit <- set_coef(model$fit$fit, coefs, ...) } return(model) } #' @include get_predict.R #' @rdname get_predict #' @keywords internal #' @export get_predict.model_fit <- function(model, newdata, type = NULL, ...) { out <- stats::predict(model, new_data = newdata, type = type) if (type == "numeric") { v <- intersect(c(".pred", ".pred_res"), colnames(out))[1] out <- data.frame(rowid = seq_along(out), estimate = out[[v]]) } else if (type == "class") { out <- data.frame(rowid = seq_along(out), estimate = out[[".pred_class"]]) } else if (type == "prob") { colnames(out) <- substr(colnames(out), 7, nchar(colnames(out))) out$rowid <- seq_len(nrow(out)) out <- data.table::melt( out, id.vars = "rowid", variable.name = "group", value.name = "estimate") } return(out) } #' @include get_predict.R #' @rdname get_predict #' @keywords internal #' @export get_predict.workflow <- get_predict.model_fit #' @include get_vcov.R #' @rdname get_vcov #' @keywords internal #' @export get_vcov.model_fit <- function(model, type = NULL, ...) { if (isTRUE(type == "class")) { return(FALSE) } if (isTRUE(supported_engine(model))) { tmp <- parsnip::extract_fit_engine(model) out <- get_vcov(tmp) } else { out <- FALSE } return(out) } #' @include get_vcov.R #' @rdname get_vcov #' @keywords internal #' @export get_vcov.workflow <- get_vcov.model_fit #' @include get_coef.R #' @rdname get_coef #' @keywords internal #' @export get_coef.workflow <- function(model, ...) { if (isTRUE(supported_engine(model))) { tmp <- parsnip::extract_fit_engine(model) out <- get_coef(tmp) } else { out <- NULL } return(out) } marginaleffects/R/methods_tobit1.R0000644000176200001440000000057314541720224016663 0ustar liggesusers#' @rdname get_predict #' @export get_predict.tobit1 <- function(model, newdata = insight::get_data(model), type = "response", ...) { out <- stats::predict(model, what = type, newdata = newdata) out <- data.frame(rowid = seq_len(length(out)), estimate = out) return(out) } marginaleffects/R/methods_dataframe.R0000644000176200001440000000122514560035476017411 0ustar liggesusers#' @include get_coef.R #' @rdname get_coef #' @export get_coef.data.frame <- function(model, ...) { checkmate::assert_data_frame(model) if (!"estimate" %in% colnames(model)) { insight::format_error("The model object is a data.frame but doesn't contain the column 'estimate'. Make sure these columns are present") } out <- model$estimate if ("term" %in% colnames(model)) { names(out) <- model$term } else { names(out) <- seq_along(out) } return(out) } #' @include set_coef.R #' @rdname set_coef #' @export set_coef.data.frame <- function(model, coefs, ...) { model$estimate = coefs return(model) } marginaleffects/R/methods_glmmTMB.R0000755000176200001440000000632014541720224016757 0ustar liggesusers#' @include get_predict.R #' @rdname get_predict #' @keywords internal #' @export get_predict.glmmTMB <- function(model, newdata = insight::get_data(model), type = "response", ...) { if (inherits(vcov, "vcov.glmmTMB")) { vcov <- vcov[[1]] } out <- get_predict.default( model = model, newdata = newdata, type = type, allow.new.levels = TRUE, # otherwise we get errors in marginal_means() ...) return(out) } #' @include get_vcov.R #' @rdname get_vcov #' @export get_vcov.glmmTMB <- function(model, ...) { out <- stats::vcov(model, full = TRUE) return(out) } #' @include get_coef.R #' @rdname get_coef #' @export get_coef.glmmTMB <- function(model, ...) { out <- model$fit$par names(out) <- colnames(stats::vcov(model, full = TRUE)) return(out) } #' @include set_coef.R #' @rdname set_coef #' @export set_coef.glmmTMB <- function(model, coefs, ...) { # internally, coefficients are held in model$fit$parfull and in # model$fit$par. It looks like we need to manipulate both for the # predictions and delta method standard errors to be affected. # random parameters are ignored: named "b" # the order matters; I think we can rely on it, but this still feels like a HACK # In particular, this assumes that the order of presentation in coef() is always: beta -> betazi -> betad out <- model out$fit$parfull[names(out$fit$parfull) != "b"] <- coefs out$fit$par <- stats::setNames(coefs, names(out$fit$par)) return(out) } #' @rdname sanitize_model_specific sanitize_model_specific.glmmTMB <- function(model, vcov = NULL, calling_function = "marginaleffects", ...) { if (isTRUE(vcov) || is.null(vcov)) { insight::format_error( "By default, standard errors for models of class `glmmTMB` are not calculated. For further details, see discussion at {https://github.com/glmmTMB/glmmTMB/issues/915}.", "Set `vcov = FALSE` or explicitly provide a variance-covariance-matrix for the `vcov` argument to calculate standard errors." ) } REML <- as.list(insight::get_call(model))[["REML"]] if (isTRUE(REML) && !identical(vcov, FALSE)) { msg <- insight::format_message("Uncertainty estimates cannot be computed for `glmmTMB` models with the `REML=TRUE` option. Either set `REML=FALSE` when fitting the model, or set `vcov=FALSE` when calling a `slopes` function to avoid this error.") stop(msg, call. = FALSE) } # we need an explicit check because predict.glmmTMB() generates other # warnings related to openMP, so our default warning-detection does not # work if (inherits(vcov, "vcov.glmmTMB")) { vcov <- vcov[[1]] } flag <- !isTRUE(checkmate::check_flag(vcov, null.ok = TRUE)) && !isTRUE(checkmate::check_matrix(vcov)) && !isTRUE(checkmate::check_function(vcov)) if (flag) { msg <- sprintf("This value of the `vcov` argument is not supported for models of class `%s`. Please set `vcov` to `TRUE`, `FALSE`, `NULL`, or supply a variance-covariance matrix.", class(model)[1]) stop(msg, call. = FALSE) } return(model) } marginaleffects/R/get_contrast_data_character.R0000644000176200001440000000566114541720224021442 0ustar liggesusersget_contrast_data_character <- function(model, newdata, variable, cross, first_cross, modeldata, ...) { # factors store all levels, but characters do not, so we need to extract the # original data from the model. tmp <- modeldata # unsupported by insight (e.g., numpyro) if (is.null(tmp)) { tmp <- newdata } levs <- sort(unique(tmp[[variable$name]])) # string shortcuts flag <- checkmate::check_choice(variable$value, c("reference", "revreference", "pairwise", "revpairwise", "sequential", "revsequential", "all", "minmax")) if (isTRUE(flag)) { levs_idx <- contrast_categories_shortcuts(levs, variable, interaction) # custom data frame or function } else if (isTRUE(checkmate::check_function(variable$value)) || isTRUE(checkmate::check_data_frame(variable$value))) { out <- contrast_categories_custom(variable, newdata) return(out) # vector of two values } else if (isTRUE(checkmate::check_atomic_vector(variable$value, len = 2))) { if (is.character(variable$value)) { tmp <- modeldata[[variable$name]] if (any(!variable$value %in% as.character(tmp))) { msg <- "Some of the values supplied to the `variables` argument were not found in the dataset." insight::format_error(msg) } idx <- match(variable$value, as.character(tmp)) levs_idx <- data.table::data.table(lo = tmp[idx[1]], hi = tmp[idx[[2]]]) } else if (is.numeric(variable$value)) { tmp <- newdata[[variable$name]] levs_idx <- data.table::data.table( lo = as.character(variable$value[1]), hi = as.character(variable$value[2])) } else { levs_idx <- data.table::data.table(lo = variable$value[1], hi = variable$value[2]) } } tmp <- contrast_categories_processing(first_cross, levs_idx, levs, variable, newdata) lo <- tmp[[1]] hi <- tmp[[2]] original <- tmp[[3]] lo[[variable$name]] <- lo[["marginaleffects_contrast_lo"]] hi[[variable$name]] <- hi[["marginaleffects_contrast_hi"]] contrast_label <- hi$marginaleffects_contrast_label contrast_null <- hi$marginaleffects_contrast_hi == hi$marginaleffects_contrast_lo tmp <- !grepl("^marginaleffects_contrast", colnames(lo)) lo <- lo[, tmp, with = FALSE] hi <- hi[, tmp, with = FALSE] out <- list(rowid = original$rowid, lo = lo, hi = hi, original = original, ter = rep(variable$name, nrow(lo)), # lo can be different dimension than newdata lab = contrast_label, contrast_null = contrast_null) return(out) } marginaleffects/R/type_dictionary.R0000644000176200001440000000500114551335126017137 0ustar liggesusers #' internal function to build the type dictionary #' #' @noRd type_dictionary_build <- function() { text <- 'class,type other,response other,class other,link bam,response bam,link bart,ev bart,ppd betareg,response betareg,link betareg,precision betareg,quantile betareg,variance bife,response bife,link bracl,probs brglmFit,response brglmFit,link brmsfit,response brmsfit,link brmsfit,prediction brmsfit,average brmultinom,probs brmultinom,class clm,prob clm,cum.prob clm,linear.predictor clogit,expected clogit,lp clogit,risk clogit,survival coxph,expected coxph,lp coxph,risk coxph,survival crch,response crch,location crch,scale crch,density hetprob,pr hetprob,xb hxlr,location hxlr,cumprob hxlr,scale hxlr,density ivpml,pr ivpml,xb fixest,invlink(link) fixest,response fixest,link hurdle,response hurdle,prob hurdle,count hurdle,zero iv_robust,response lm,response gam,response gam,link Gam,invlink(link) Gam,response Gam,link geeglm,response geeglm,link Gls,lp glimML,response glimML,link glm,invlink(link) glm,response glm,link glmerMod,response glmerMod,link glmrob,response glmrob,link glmmTMB,response glmmTMB,link glmmTMB,conditional glmmTMB,zprob glmmTMB,zlink glmmTMB,disp glmmPQL,response glmmPQL,link glmx,response ivreg,response lmerMod,response lmerModLmerTest,response lmrob,response lm_robust,response lrm,fitted lrm,lp lrm,mean mblogit,response mblogit,latent mblogit,link mclogit,response mclogit,latent mclogit,link MCMCglmm,response model_fit,numeric model_fit,prob model_fit,class workflow,numeric workflow,prob workflow,class multinom,probs multinom,latent mhurdle,E mhurdle,Ep mhurdle,p mlogit,response mvgam,response mvgam,link mvgam,expected mvgam,detection mvgam,latent_N negbin,invlink(link) negbin,response negbin,link ols,lp "oohbchoice", "probability", "oohbchoice", "utility", orm,fitted orm,mean orm,lp polr,probs rlm,response selection,response selection,link selection,unconditional speedlm,response speedglm,response speedglm,link stanreg,response stanreg,link survreg,response survreg,link survreg,quantile svyglm,response svyglm,link svyolr,probs tobit,response tobit1,expvalue tobit1,linpred tobit1,prob zeroinfl,response zeroinfl,prob zeroinfl,count zeroinfl,zero' out <- utils::read.csv( text = text, colClasses = c("character", "character")) for (i in 1:2) { out[[i]] <- trimws(out[[i]]) } return(out) } #' type dictionary #' #' insight::get_predict accepts a `predict` argument #' stats::predict accepts a `type` argument #' this dictionary converts #' @noRd type_dictionary <- type_dictionary_build() marginaleffects/R/get_jacobian.R0000644000176200001440000000625214541720224016343 0ustar liggesusersget_jacobian <- function(func, x, numderiv) { numDeriv_options <- getOption("marginaleffects_numDeriv", default = NULL) if (is.null(numDeriv_options)) { method <- numderiv[[1]] numderiv[[1]] <- NULL numderiv[["func"]] <- func numderiv[["x"]] <- x if (identical(method, "richardson")) { df <- do.call(get_jacobian_richardson, numderiv) } else if (identical(method, "fdforward")) { df <- do.call(get_jacobian_fdforward, numderiv) } else if (identical(method, "fdcenter")) { df <- do.call(get_jacobian_fdcenter, numderiv) } } else { insight::check_if_installed("numDeriv") numDeriv_options[["func"]] <- func numDeriv_options[["x"]] <- x ndFUN <- get("jacobian", asNamespace("numDeriv")) df <- do.call(ndFUN, numDeriv_options) } return(df) } get_jacobian_fdforward <- function(func, x, eps = NULL) { # old version. probably not optimal. Keep for posterity. # h <- max(1e-8, 1e-4 * min(abs(x), na.rm = TRUE)) baseline <- func(x) df <- matrix(NA_real_, length(baseline), length(x)) for (i in seq_along(x)) { if (is.null(eps)) { h <- max(abs(x[i]) * sqrt(.Machine$double.eps), 1e-10) } else { h <- eps } dx <- x dx[i] <- dx[i] + h df[, i] <- (func(dx) - baseline) / h } return(df) } get_jacobian_fdcenter <- function(func, x, eps = NULL) { baseline <- func(x) df <- matrix(NA_real_, length(baseline), length(x)) for (i in seq_along(x)) { if (is.null(eps)) { h <- max(abs(x[i]) * sqrt(.Machine$double.eps), 1e-10) } else { h <- eps } dx_hi <- dx_lo <- x dx_hi[i] <- dx_hi[i] + h / 2 dx_lo[i] <- dx_lo[i] - h / 2 df[, i] <- (func(dx_hi) - func(dx_lo)) / h } return(df) } # Code adapted from the `numDeriv` package by Paul Gilbert and Ravi Varadhan # GPL-3: https://cran.r-project.org/package=numDeriv get_jacobian_richardson <- function( func, x, eps = 1e-4, d = 1e-4, zero_tol = sqrt(.Machine$double.eps / 7e-7), side = NULL, r = 4, v = 2, ...) { n <- length(x) f <- func(x, ...) a <- array(NA, c(length(f), r, n)) h <- abs(d * x) + eps * (abs(x) < zero_tol) pna <- (side == 1) & !is.na(side) # double these on plus side mna <- (side == -1) & !is.na(side) # double these on minus side for (k in 1:r) { # successively reduce h ph <- mh <- h ph[pna] <- 2 * ph[pna] ph[mna] <- 0 mh[mna] <- 2 * mh[mna] mh[pna] <- 0 for (i in 1:n) { a[, k, i] <- (func(x + ph * (i == seq(n)), ...) - func(x - mh * (i == seq(n)), ...)) / (2 * h[i]) # if((k != 1)) a[,(abs(a[,(k-1),i]) < 1e-20)] <- 0 #some func are unstable near zero } h <- h / v # Reduced h by 1/v. } for (m in 1:(r - 1)) { a <- (a[, 2:(r + 1 - m), , drop = FALSE] * (4^m) - a[, 1:(r - m), , drop = FALSE]) / (4^m - 1) } # drop second dim of a, which is now 1 (but not other dim's even if they are 1 return(array(a, dim(a)[c(1, 3)])) } marginaleffects/R/get_vcov.R0000644000176200001440000001122414541720224015545 0ustar liggesusers#' Get a named variance-covariance matrix from a model object (internal function) #' #' @inheritParams slopes #' @return A named square matrix of variance and covariances. The names must match the coefficient names. #' @rdname get_vcov #' @keywords internal #' @export get_vcov <- function(model, ...) { UseMethod("get_vcov", model) } #' @rdname get_vcov #' @export get_vcov.default <- function(model, vcov = NULL, ...) { if (isFALSE(vcov)) { return(NULL) } vcov <- sanitize_vcov(model = model, vcov = vcov) if (isTRUE(checkmate::check_matrix(vcov))) { return(vcov) } # {insight} args <- get_varcov_args(model, vcov) args[["x"]] <- model args[["component"]] <- "all" # 1st try: with arguments fun <- get("get_varcov", asNamespace("insight")) out <- myTryCatch(do.call("fun", args)) # 2nd try: without arguments if (!isTRUE(checkmate::check_matrix(out$value, min.rows = 1))) { out <- myTryCatch(insight::get_varcov(model)) if (isTRUE(checkmate::check_matrix(out$value, min.rows = 1))) { msg <- "Unable to extract a variance-covariance matrix using this `vcov` argument. Standard errors are computed using the default variance instead. Perhaps the model or argument is not supported by the `sandwich` or `clubSandwich` packages. If you believe that the model is supported by one of these two packages, you can open a feature request on Github." insight::format_warning(msg) } } if (!isTRUE(checkmate::check_matrix(out$value, min.rows = 1))) { msg <- "Unable to extract a variance-covariance matrix from this model." warning(msg, call. = FALSE) return(NULL) # valid matrix with warning } else if (!is.null(out$warning)) { warning(out$warning$message, call. = FALSE) } out <- out[["value"]] # problem: no row.names if (is.null(row.names(out))) { coefs <- get_coef(model) if (ncol(out) == length(coefs)) { termnames <- names(stats::coef(model)) if (length(termnames) == ncol(out)) { colnames(out) <- termnames row.names(out) <- termnames } } else { return(NULL) } } # problem: duplicate colnames if (anyDuplicated(colnames(out)) == 0) { coefs <- get_coef(model, ...) # 1) Check above is needed for `AER::tobit` and others where `out` # includes Log(scale) but `coef` does not Dangerous for `oridinal::clm` # and others where there are important duplicate column names in # `out`, and selecting with [,] repeats the first instance. # 2) Sometimes out has more columns than coefs if (all(names(coefs) %in% colnames(out))) { out <- out[names(coefs), names(coefs), drop = FALSE] } } return(out) # NOTES: # survival::coxph with 1 regressor produces a vector } #' Take a `summary()` style `vcov` argument and convert it to #' `insight::get_varcov()` #' #' @keywords internal get_varcov_args <- function(model, vcov) { if (is.null(vcov) || isTRUE(checkmate::check_matrix(vcov))) { out <- list() return(out) } if (isTRUE(checkmate::check_formula(vcov))) { out <- list("vcov" = "vcovCL", "vcov_args" = list("cluster" = vcov)) return(out) } if (isTRUE(vcov == "satterthwaite") || isTRUE(vcov == "kenward-roger")) { if (!isTRUE(inherits(model, "lmerMod")) && !isTRUE(inherits(model, "lmerModTest"))) { msg <- 'Satterthwaite and Kenward-Roger corrections are only available for linear mixed effects models.' stop(msg, call. = FALSE) } if (isTRUE(vcov == "satterthwaite")) { return(list()) } else { return(list(vcov = "kenward-roger")) } } out <- switch(vcov, "stata" = list(vcov = "HC2"), "robust" = list(vcov = "HC3"), "bootstrap" = list(vcov = "BS"), "outer-product" = list(vcov = "OPG"), list(vcov = vcov)) return(out) } get_vcov_label <- function(vcov) { if (is.null(vcov)) vcov <- "" if (!is.character(vcov)) return(NULL) out <- switch(vcov, "stata" = "Stata", "robust" = "Robust", "kenward-roger" = "Kenward-Roger", "satterthwaite" = "Satterthwaite", "HC" = , "HC0" = , "HC1" = , "HC2" = , "HC3" = , "HC4" = , "HC4m" = , "HC5" = , "HAC" = , "OPG" = vcov, "NeweyWest" = "Newey-West", "kernHAC" = "Kernel HAC", vcov ) return(out) } marginaleffects/R/unpack_matrix_cols.R0000644000176200001440000000351714541720224017624 0ustar liggesusersunpack_matrix_cols <- function(x) { # what data types are we working with dcs <- sapply(x, function(x) class(x)[1]) # do something if we have matrix columns if (any(mcols <- dcs == "matrix")) { # assume that any matrix columns are of the same number of cols nc <- ncol(x[mcols][[1L]]) out <- lapply(x[mcols], as.vector) other <- lapply(x[!mcols], function(y, nc) rep(y, times = nc), nc = nc) out <- as.data.frame(out) other <- as.data.frame(other) out <- cbind(out, other) # put order back as it was out <- out[names(x)] } else { return(x) } out } # The content of this file was adapted from the `gratia` package # https://github.com/gavinsimpson/gratia # The MIT License (MIT) # # Copyright (c) 2013-2020 Gavin L. Simpson # # Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # marginaleffects/R/sanitize_condition.R0000644000176200001440000001566714541720224017644 0ustar liggesuserscondition_shortcuts <- function(x, tr, shortcuts) { if (identical(tr, "threenum")) { m <- mean(x, na.rm = TRUE) s <- stats::sd(x, na.rm = TRUE) out <- c(m - s, m, m + s) } else if (identical(tr, "fivenum")) { out <- stats::fivenum(x, na.rm = TRUE) } else if (identical(tr, "minmax")) { out <- c( min(x, na.rm = TRUE), max(x, na.rm = TRUE)) } else if (identical(tr, "quartile")) { out <- stats::quantile(x, probs = c(.25, .5, .75), na.rm = TRUE) } return(out) } sanitize_condition <- function(model, condition, variables = NULL, modeldata = NULL) { # allow multiple conditions and/or effects checkmate::assert( checkmate::check_character(condition, min.len = 1, max.len = 4), checkmate::check_list(condition, min.len = 1, max.len = 4)) # c("a", "b") or list("a", "b") -> named list of NULLs flag1 <- isTRUE(checkmate::check_character(condition)) flag2 <- isTRUE(checkmate::check_list(condition, names = "unnamed")) && all(sapply(condition, function(x) isTRUE(checkmate::check_string(x)))) if (flag1 || flag2) { condition <- stats::setNames(rep(list(NULL), length(condition)), unlist(condition)) } # validity of the list for (i in seq_along(condition)) { if (identical(names(condition)[i], "")) { if (!isTRUE(checkmate::check_character(condition[[i]], len = 1))) { msg <- "The `condition` argument must be a character vector or a named list." insight::format_error(msg) } else { names(condition)[i] <- condition[[i]] tmp <- stats::setNames(list(NULL), names(condition)[i]) condition <- utils::modifyList(condition, tmp, keep.null = TRUE) } } } # get data to know over what range of values we should plot if (is.null(modeldata) && isTRUE(checkmate::check_character(condition))) { dat <- get_modeldata(model, additional_variables = condition) } else if (is.null(modeldata) && isTRUE(checkmate::check_list(condition))) { dat <- get_modeldata(model, additional_variables = names(condition)) } else { dat <- modeldata } resp <- insight::get_response(model) respname <- insight::find_response(model) flag <- checkmate::check_true(all(names(condition) %in% c(colnames(dat), "group"))) if (!isTRUE(flag)) { msg <- sprintf("Entries in the `condition` argument must be element of: %s", paste(colnames(dat), collapse = ", ")) insight::format_error(msg) } # condition names condition1 <- names(condition)[[1]] condition2 <- hush(names(condition)[[2]]) condition3 <- hush(names(condition)[[3]]) condition4 <- hush(names(condition)[[4]]) # build typical dataset with a sequence of values over "condition" range at_list <- list() shortcuts <- c("threenum", "fivenum", "minmax", "quartile") # condition 1: x-axis if (is.null(condition[[1]])) { if (get_variable_class(dat, condition1, "binary")) { at_list[[condition1]] <- 0:1 } else if (is.numeric(dat[[condition1]]) && !get_variable_class(dat, condition1, "categorical")) { at_list[[condition1]] <- seq( min(dat[[condition1]], na.rm = TRUE), max(dat[[condition1]], na.rm = TRUE), length.out = 50) } else { at_list[[condition1]] <- factor(unique(dat[[condition1]])) } } else { if (isTRUE(checkmate::check_choice(condition[[1]], shortcuts))) { at_list[[condition1]] <- condition_shortcuts(dat[[condition1]], condition[[1]], shortcuts) } else { at_list[[condition1]] <- condition[[1]] } } # condition 2: color if (length(condition) > 1) { if (is.null(condition[[2]])) { if (is.numeric(dat[[condition2]])) { at_list[[condition2]] <- stats::fivenum(dat[[condition2]]) } else { at_list[[condition2]] <- unique(dat[[condition2]]) } } else { if (isTRUE(checkmate::check_choice(condition[[2]], shortcuts))) { at_list[[condition2]] <- condition_shortcuts(dat[[condition2]], condition[[2]], shortcuts) } else { at_list[[condition2]] <- condition[[2]] } } } # condition 3: facet_1 if (length(condition) > 2) { if (is.null(condition[[3]])) { if (is.numeric(dat[[condition3]])) { at_list[[condition3]] <- stats::fivenum(dat[[condition3]]) } else { at_list[[condition3]] <- unique(dat[[condition3]]) } } else { if (isTRUE(checkmate::check_choice(condition[[3]], shortcuts))) { at_list[[condition3]] <- condition_shortcuts(dat[[condition3]], condition[[3]], shortcuts) } else { at_list[[condition3]] <- condition[[3]] } } } # condition 4: facet_2 if (length(condition) > 3) { if (is.null(condition[[4]])) { if (is.numeric(dat[[condition4]])) { at_list[[condition4]] <- stats::fivenum(dat[[condition4]]) } else { at_list[[condition4]] <- unique(dat[[condition4]]) } } else { if (isTRUE(checkmate::check_choice(condition[[4]], shortcuts))) { at_list[[condition4]] <- condition_shortcuts(dat[[condition4]], condition[[4]], shortcuts) } else { at_list[[condition4]] <- condition[[4]] } } } at_list[["model"]] <- model at_list[["newdata"]] <- dat if (isTRUE(checkmate::check_list(variables))) { flag <- all(names(variables) %in% names(condition)) } else { flag <- all(variables %in% names(condition)) } if (!flag) { # sometimes we use the same condition as effect (e.g., GAM vignette), # but otherwise we don't want it at all if (isTRUE(checkmate::check_character(variables))) { dups <- setdiff(variables, names(condition)) for (d in dups) { at_list[[d]] <- NULL } } else { at_list[[names(variables)]] <- NULL } } # mlr3 and tidymodels are not supported by `insight::find_variables()`, so we need to create a grid based on all the variables supplied in `newdata` if (inherits(at_list$model, "Learner") || inherits(at_list$model, "model_fit") || inherits(at_list$model, "workflow") ) { at_list$model <- NULL } # create data nd <- do.call("datagrid", at_list) out <- list( "modeldata" = dat, "newdata" = nd, "resp" = resp, "respname" = respname, "condition" = condition, "condition1" = condition1, "condition2" = condition2, "condition3" = condition3, "condition4" = condition4) return(out) }marginaleffects/R/complete_levels.R0000644000176200001440000000352714541720224017122 0ustar liggesusers#' Create a data.frame with all factor or character levels #' #' `model.matrix` breaks when `newdata` includes a factor #' variable, but not all levels are present in the data. This is bad for us #' because we often want to get predictions with one (or few) rows, where some #' factor levels are inevitably missing. #' @keywords internal complete_levels <- function(x, character_levels = NULL) { checkmate::assert_data_frame(x) # fixest returned an empty list() if (is.null(character_levels) || length(character_levels) == 0) { return(data.frame()) } # store variables with missing factors or characters vault <- list() for (v in colnames(x)) { if (is.factor(x[[v]])) { if (!all(levels(x[[v]]) %in% x[[v]])) { vault[[v]] <- factor(levels(x[[v]]), levels = levels(x[[v]])) } } else if (is.character(x[[v]])) { if (v %in% names(character_levels)) { vault[[v]] <- character_levels[[v]] } } } # create padding if (length(vault) > 0) { # HACK: Some models use a character variable with many levels (e.g., # mixed-effects groups). This creates a massive padding dataset, and making # predictions can become very expensive. if (isTRUE(sum(sapply(vault, length)) > 100)) return(data.frame()) padding <- utils::head(x, 1) data.table::setDT(padding) for (v in names(vault)) { padding[[v]] <- NULL } fun <- data.table::CJ gr <- do.call("fun", vault) padding <- cjdt(list(padding, gr)) to_keep <- colnames(x) padding[, ..to_keep] setcolorder(padding, to_keep) data.table::setDF(padding) } else { padding <- data.frame() } padding$rowid <- -1 * padding$rowid return(padding) } marginaleffects/R/methods_mhurdle.R0000644000176200001440000000143514541720224017117 0ustar liggesusers #' @rdname get_predict #' @export get_predict.mhurdle <- function(model, newdata = insight::get_data(model), type = "response", ...) { out <- stats::predict(model, what = type, newdata = newdata) out <- data.frame(rowid = seq_len(length(out)), estimate = out) return(out) } #' @rdname get_vcov #' @export get_vcov.mhurdle <- function(model, vcov = NULL, ...) { if (!is.null(vcov) && !is.logical(vcov)) { insight::format_error("The `vcov` for this class of models must be TRUE or FALSE.") } out <- try(stats::vcov(model), silent = TRUE) if (inherits(out, "try-error")) { out <- tryCatch(model[["vcov"]], error = function(e) NULL) } return(out) } marginaleffects/R/methods_Rchoice.R0000644000176200001440000000031714541720224017031 0ustar liggesusers#' @rdname set_coef #' @export set_coef.hetprob <- function(model, coefs, ...) { model[["estimate"]][names(coefs)] <- coefs model } #' @rdname set_coef #' @export set_coef.ivpml <- set_coef.hetprob marginaleffects/R/predictions.R0000644000176200001440000006456614554076230016302 0ustar liggesusers#' Predictions #' #' @description #' Outcome predicted by a fitted model on a specified scale for a given combination of values of the predictor variables, such as their observed values, their means, or factor levels (a.k.a. "reference grid"). #' #' * `predictions()`: unit-level (conditional) estimates. #' * `avg_predictions()`: average (marginal) estimates. #' #' The `newdata` argument and the `datagrid()` function can be used to control where statistics are evaluated in the predictor space: "at observed values", "at the mean", "at representative values", etc. #' #' See the predictions vignette and package website for worked examples and case studies: #' * #' * #' #' @rdname predictions #' @param model Model object #' @param variables Counterfactual variables. #' * Output: #' - `predictions()`: The entire dataset is replicated once for each unique combination of `variables`, and predictions are made. #' - `avg_predictions()`: The entire dataset is replicated, predictions are made, and they are marginalized by `variables` categories. #' - Warning: This can be expensive in large datasets. #' - Warning: Users who need "conditional" predictions should use the `newdata` argument instead of `variables`. #' * Input: #' - `NULL`: computes one prediction per row of `newdata` #' - Character vector: the dataset is replicated once of every combination of unique values of the variables identified in `variables`. #' - Named list: names identify the subset of variables of interest and their values. For numeric variables, the `variables` argument supports functions and string shortcuts: #' + A function which returns a numeric value #' + Numeric vector: Contrast between the 2nd element and the 1st element of the `x` vector. #' + "iqr": Contrast across the interquartile range of the regressor. #' + "sd": Contrast across one standard deviation around the regressor mean. #' + "2sd": Contrast across two standard deviations around the regressor mean. #' + "minmax": Contrast between the maximum and the minimum values of the regressor. #' + "threenum": mean and 1 standard deviation on both sides #' + "fivenum": Tukey's five numbers #' @param newdata Grid of predictor values at which we evaluate predictions. #' + Warning: Please avoid modifying your dataset between fitting the model and calling a `marginaleffects` function. This can sometimes lead to unexpected results. #' + `NULL` (default): Unit-level predictions for each observed value in the dataset (empirical distribution). The dataset is retrieved using [insight::get_data()], which tries to extract data from the environment. This may produce unexpected results if the original data frame has been altered since fitting the model. #' + string: #' - "mean": Predictions at the Mean. Predictions when each predictor is held at its mean or mode. #' - "median": Predictions at the Median. Predictions when each predictor is held at its median or mode. #' - "marginalmeans": Predictions at Marginal Means. See Details section below. #' - "tukey": Predictions at Tukey's 5 numbers. #' - "grid": Predictions on a grid of representative numbers (Tukey's 5 numbers and unique values of categorical predictors). #' + [datagrid()] call to specify a custom grid of regressors. For example: #' - `newdata = datagrid(cyl = c(4, 6))`: `cyl` variable equal to 4 and 6 and other regressors fixed at their means or modes. #' - See the Examples section and the [datagrid()] documentation. #' @param byfun A function such as `mean()` or `sum()` used to aggregate #' estimates within the subgroups defined by the `by` argument. `NULL` uses the #' `mean()` function. Must accept a numeric vector and return a single numeric #' value. This is sometimes used to take the sum or mean of predicted #' probabilities across outcome or predictor #' levels. See examples section. #' @param type string indicates the type (scale) of the predictions used to #' compute contrasts or slopes. This can differ based on the model #' type, but will typically be a string such as: "response", "link", "probs", #' or "zero". When an unsupported string is entered, the model-specific list of #' acceptable values is returned in an error message. When `type` is `NULL`, the #' first entry in the error message is used by default. #' @param transform A function applied to unit-level adjusted predictions and confidence intervals just before the function returns results. For bayesian models, this function is applied to individual draws from the posterior distribution, before computing summaries. #' #' @template deltamethod #' @template model_specific_arguments #' @template bayesian #' @template equivalence #' @template type #' @template references #' #' @return A `data.frame` with one row per observation and several columns: #' * `rowid`: row number of the `newdata` data frame #' * `type`: prediction type, as defined by the `type` argument #' * `group`: (optional) value of the grouped outcome (e.g., categorical outcome models) #' * `estimate`: predicted outcome #' * `std.error`: standard errors computed using the delta method. #' * `p.value`: p value associated to the `estimate` column. The null is determined by the `hypothesis` argument (0 by default), and p values are computed before applying the `transform` argument. For models of class `feglm`, `Gam`, `glm` and `negbin`, p values are computed on the link scale by default unless the `type` argument is specified explicitly. #' * `s.value`: Shannon information transforms of p values. How many consecutive "heads" tosses would provide the same amount of evidence (or "surprise") against the null hypothesis that the coin is fair? The purpose of S is to calibrate the analyst's intuition about the strength of evidence encoded in p against a well-known physical phenomenon. See Greenland (2019) and Cole et al. (2020). #' * `conf.low`: lower bound of the confidence interval (or equal-tailed interval for bayesian models) #' * `conf.high`: upper bound of the confidence interval (or equal-tailed interval for bayesian models) #' #' See `?print.marginaleffects` for printing options. #' #' @examplesIf interactive() || isTRUE(Sys.getenv("R_DOC_BUILD") == "true") #' @examples #' # Adjusted Prediction for every row of the original dataset #' mod <- lm(mpg ~ hp + factor(cyl), data = mtcars) #' pred <- predictions(mod) #' head(pred) #' #' # Adjusted Predictions at User-Specified Values of the Regressors #' predictions(mod, newdata = datagrid(hp = c(100, 120), cyl = 4)) #' #' m <- lm(mpg ~ hp + drat + factor(cyl) + factor(am), data = mtcars) #' predictions(m, newdata = datagrid(FUN_factor = unique, FUN_numeric = median)) #' #' # Average Adjusted Predictions (AAP) #' library(dplyr) #' mod <- lm(mpg ~ hp * am * vs, mtcars) #' #' avg_predictions(mod) #' #' predictions(mod, by = "am") #' #' # Conditional Adjusted Predictions #' plot_predictions(mod, condition = "hp") #' #' # Counterfactual predictions with the `variables` argument #' # the `mtcars` dataset has 32 rows #' #' mod <- lm(mpg ~ hp + am, data = mtcars) #' p <- predictions(mod) #' head(p) #' nrow(p) #' #' # average counterfactual predictions #' avg_predictions(mod, variables = "am") #' #' # counterfactual predictions obtained by replicating the entire for different #' # values of the predictors #' p <- predictions(mod, variables = list(hp = c(90, 110))) #' nrow(p) #' #' #' # hypothesis test: is the prediction in the 1st row equal to the prediction in the 2nd row #' mod <- lm(mpg ~ wt + drat, data = mtcars) #' #' predictions( #' mod, #' newdata = datagrid(wt = 2:3), #' hypothesis = "b1 = b2") #' #' # same hypothesis test using row indices #' predictions( #' mod, #' newdata = datagrid(wt = 2:3), #' hypothesis = "b1 - b2 = 0") #' #' # same hypothesis test using numeric vector of weights #' predictions( #' mod, #' newdata = datagrid(wt = 2:3), #' hypothesis = c(1, -1)) #' #' # two custom contrasts using a matrix of weights #' lc <- matrix(c( #' 1, -1, #' 2, 3), #' ncol = 2) #' predictions( #' mod, #' newdata = datagrid(wt = 2:3), #' hypothesis = lc) #' #' #' # `by` argument #' mod <- lm(mpg ~ hp * am * vs, data = mtcars) #' predictions(mod, by = c("am", "vs")) #' #' library(nnet) #' nom <- multinom(factor(gear) ~ mpg + am * vs, data = mtcars, trace = FALSE) #' #' # first 5 raw predictions #' predictions(nom, type = "probs") |> head() #' #' # average predictions #' avg_predictions(nom, type = "probs", by = "group") #' #' by <- data.frame( #' group = c("3", "4", "5"), #' by = c("3,4", "3,4", "5")) #' #' predictions(nom, type = "probs", by = by) #' #' # sum of predicted probabilities for combined response levels #' mod <- multinom(factor(cyl) ~ mpg + am, data = mtcars, trace = FALSE) #' by <- data.frame( #' by = c("4,6", "4,6", "8"), #' group = as.character(c(4, 6, 8))) #' predictions(mod, newdata = "mean", byfun = sum, by = by) #' #' @inheritParams slopes #' @inheritParams comparisons #' @export predictions <- function(model, newdata = NULL, variables = NULL, vcov = TRUE, conf_level = 0.95, type = NULL, by = FALSE, byfun = NULL, wts = NULL, transform = NULL, hypothesis = NULL, equivalence = NULL, p_adjust = NULL, df = Inf, numderiv = "fdforward", ...) { dots <- list(...) if ("transform_post" %in% names(dots)) { transform <- dots[["transform_post"]] insight::format_warning("The `transform_post` argument is deprecated. Use `transform` instead.") } # very early, before any use of newdata # if `newdata` is a call to `typical` or `counterfactual`, insert `model` scall <- rlang::enquo(newdata) newdata <- sanitize_newdata_call(scall, newdata, model) if ("cross" %in% names(dots)) { insight::format_error("The `cross` argument is not available in this function.") } # extracting modeldata repeatedly is slow. # checking dots allows marginalmeans to pass modeldata to predictions. if (isTRUE(by)) { modeldata <- get_modeldata(model, additional_variables = FALSE, modeldata = dots[["modeldata"]], wts = wts) } else { modeldata <- get_modeldata(model, additional_variables = by, modeldata = dots[["modeldata"]], wts = wts) } # build call: match.call() doesn't work well in *apply() # after sanitize_newdata_call call_attr <- c(list( name = "predictions", model = model, newdata = newdata, variables = variables, vcov = vcov, conf_level = conf_level, type = type, by = by, byfun = byfun, wts = wts, transform = transform, hypothesis = hypothesis, df = df), dots) if ("modeldata" %in% names(dots)) { call_attr[["modeldata"]] <- modeldata } call_attr <- do.call("call", call_attr) # sanity checks sanity_dots(model = model, ...) numderiv <- sanitize_numderiv(numderiv) sanity_df(df, newdata) sanity_equivalence_p_adjust(equivalence, p_adjust) model <- sanitize_model( model = model, newdata = newdata, wts = wts, vcov = vcov, calling_function = "predictions", ...) tmp <- sanitize_hypothesis(hypothesis, ...) hypothesis <- tmp$hypothesis hypothesis_null <- tmp$hypothesis_null # multiple imputation if (inherits(model, c("mira", "amest"))) { out <- process_imputation(model, call_attr) return(out) } # if type is NULL, we backtransform if relevant type_string <- sanitize_type(model = model, type = type, calling_function = "predictions") if (identical(type_string, "invlink(link)")) { if (is.null(hypothesis)) { type_call <- "link" } else { type_call <- "response" type_string <- "response" insight::format_warning('The `type="invlink"` argument is not available unless `hypothesis` is `NULL` or a single number. The value of the `type` argument was changed to "response" automatically. To suppress this warning, use `type="response"` explicitly in your function call.') } } else { type_call <- type_string } # save the original because it gets converted to a named list, which breaks # user-input sanity checks transform_original <- transform transform <- sanitize_transform(transform) conf_level <- sanitize_conf_level(conf_level, ...) newdata <- sanitize_newdata( model = model, newdata = newdata, modeldata = modeldata, by = by, wts = wts) # after sanitize_newdata sanity_by(by, newdata) # after sanity_by newdata <- dedup_newdata( model = model, newdata = newdata, wts = wts, by = by, byfun = byfun) if (is.null(wts) && "marginaleffects_wts_internal" %in% colnames(newdata)) { wts <- "marginaleffects_wts_internal" } # analogous to comparisons(variables=list(...)) if (!is.null(variables)) { args <- list( "model" = model, "newdata" = newdata, "grid_type" = "counterfactual") tmp <- sanitize_variables( variables = variables, model = model, newdata = newdata, modeldata = modeldata, calling_function = "predictions" )$conditional for (v in tmp) { args[[v$name]] <- v$value } newdata <- do.call("datagrid", args) # the original rowids are no longer valid after averaging et al. newdata[["rowid"]] <- NULL } character_levels <- attr(newdata, "newdata_character_levels") # trust newdata$rowid if (!"rowid" %in% colnames(newdata)) { newdata[["rowid"]] <- seq_len(nrow(newdata)) } # mlogit models sometimes returns an `idx` column that is impossible to `rbind` if (inherits(model, "mlogit") && inherits(newdata[["idx"]], "idx")) { newdata[["idx"]] <- NULL } # padding destroys `newdata` attributes, so we save them newdata_attr_cache <- get_marginaleffects_attributes(newdata, include_regex = "^newdata") # mlogit uses an internal index that is very hard to track, so we don't # support `newdata` and assume no padding the `idx` column is necessary for # `get_predict` but it breaks binding, so we can't remove it in # sanity_newdata and we can't rbind it with padding # pad factors: `model.matrix` breaks when factor levels are missing if (inherits(model, "mlogit")) { padding <- data.frame() } else { padding <- complete_levels(newdata, character_levels) if (nrow(padding) > 0) { newdata <- rbindlist(list(padding, newdata)) } } if (is.null(by) || isFALSE(by)) { vcov_tmp <- vcov } else { vcov_tmp <- FALSE } ############### sanity checks are over # Bootstrap out <- inferences_dispatch( INF_FUN = predictions, model = model, newdata = newdata, vcov = vcov, variables = variables, type = type_call, by = by, conf_level = conf_level, byfun = byfun, wts = wts, transform = transform_original, hypothesis = hypothesis, ...) if (!is.null(out)) { return(out) } # pre-building the model matrix can speed up repeated predictions newdata <- get_model_matrix_attribute(model, newdata) # main estimation args <- list( model = model, newdata = newdata, type = type_call, hypothesis = hypothesis, wts = wts, by = by, byfun = byfun) args <- utils::modifyList(args, dots) tmp <- do.call(get_predictions, args) # two cases when tmp is a data.frame # get_predict gets us rowid with the original rows if (inherits(tmp, "data.frame")) { setnames(tmp, old = c("Predicted", "SE", "CI_low", "CI_high"), new = c("estimate", "std.error", "conf.low", "conf.high"), skip_absent = TRUE) } else { tmp <- data.frame(newdata$rowid, type, tmp) colnames(tmp) <- c("rowid", "estimate") if ("rowidcf" %in% colnames(newdata)) { tmp[["rowidcf"]] <- newdata[["rowidcf"]] } } if (!"rowid" %in% colnames(tmp) && nrow(tmp) == nrow(newdata)) { tmp$rowid <- newdata$rowid } # degrees of freedom if (isTRUE(vcov == "satterthwaite") || isTRUE(vcov == "kenward-roger")) { df <- tryCatch( # df_per_observation is an undocumented argument introduced in 0.18.4.7 to preserve backward incompatibility insight::get_df(model, data = newdata, type = vcov, df_per_observation = TRUE), error = function(e) NULL) if (isTRUE(length(df) == nrow(tmp))) { tmp$df <- df } } # bayesian posterior draws draws <- attr(tmp, "posterior_draws") # bayesian: unpad draws (done in get_predictions for frequentist) if (!is.null(draws) && "rowid" %in% colnames(tmp)) { draws <- draws[tmp$rowid > 0, , drop = FALSE] } V <- NULL J <- NULL if (!isFALSE(vcov)) { V <- get_vcov(model, vcov = vcov, type = type, ...) # Delta method if (!"std.error" %in% colnames(tmp) && is.null(draws)) { if (isTRUE(checkmate::check_matrix(V))) { # vcov = FALSE to speed things up fun <- function(...) { get_predictions(..., wts = wts, verbose = FALSE)$estimate } args <- list( model, newdata = newdata, vcov = V, type = type_call, FUN = fun, J = J, hypothesis = hypothesis, by = by, byfun = byfun, numderiv = numderiv) args <- utils::modifyList(args, dots) se <- do.call(get_se_delta, args) if (is.numeric(se) && length(se) == nrow(tmp)) { J <- attr(se, "jacobian") attr(se, "jacobian") <- NULL tmp[["std.error"]] <- se } } } tmp <- get_ci( tmp, conf_level = conf_level, vcov = vcov, draws = draws, estimate = "estimate", null_hypothesis = hypothesis_null, df = df, model = model, p_adjust = p_adjust, ...) } out <- data.table::data.table(tmp) data.table::setDT(newdata) # expensive: only do this inside jacobian if necessary if (!inherits(model, "mclogit")) { # weird case. probably a cleaner way but lazy now... out <- merge_by_rowid(out, newdata) } # save weights as attribute and not column marginaleffects_wts_internal <- out[["marginaleffects_wts_internal"]] out[["marginaleffects_wts_internal"]] <- NULL # bycols if (isTRUE(checkmate::check_data_frame(by))) { bycols <- setdiff(colnames(by), "by") } else { bycols <- by } # sort rows: do NOT sort rows because it breaks hypothesis b1, b2, b3 indexing. # clean columns stubcols <- c( "rowid", "rowidcf", "term", "group", "hypothesis", bycols, "estimate", "std.error", "statistic", "p.value", "s.value", "conf.low", "conf.high", "marginaleffects_wts", sort(grep("^predicted", colnames(newdata), value = TRUE))) cols <- intersect(stubcols, colnames(out)) cols <- unique(c(cols, colnames(out))) out <- out[, ..cols] attr(out, "posterior_draws") <- draws # equivalence tests out <- equivalence(out, equivalence = equivalence, df = df, ...) # after rename to estimate / after assign draws if (identical(type_string, "invlink(link)")) { linv <- tryCatch(insight::link_inverse(model), error = function(e) identity) out <- backtransform(out, transform = linv) } out <- backtransform(out, transform = transform) data.table::setDF(out) class(out) <- c("predictions", class(out)) out <- set_marginaleffects_attributes(out, attr_cache = newdata_attr_cache) attr(out, "model") <- model attr(out, "type") <- type_string attr(out, "model_type") <- class(model)[1] attr(out, "vcov.type") <- get_vcov_label(vcov) attr(out, "jacobian") <- J attr(out, "vcov") <- V attr(out, "newdata") <- newdata attr(out, "weights") <- marginaleffects_wts_internal attr(out, "conf_level") <- conf_level attr(out, "by") <- by attr(out, "call") <- call_attr attr(out, "transform_label") <- names(transform)[1] attr(out, "transform") <- transform[[1]] # save newdata for use in recall() attr(out, "newdata") <- newdata if (inherits(model, "brmsfit")) { insight::check_if_installed("brms") attr(out, "nchains") <- brms::nchains(model) } if ("group" %in% names(out) && all(out$group == "main_marginaleffect")) { out$group <- NULL } return(out) } # wrapper used only for standard_error_delta get_predictions <- function(model, newdata, type, by = NULL, byfun = byfun, hypothesis = NULL, verbose = TRUE, wts = NULL, ...) { out <- myTryCatch(get_predict( model, newdata = newdata, type = type, ...)) if (inherits(out$value, "data.frame")) { out <- out$value } else { # tidymodels if (inherits(out$error, "rlang_error") && isTRUE(grepl("the object should be", out$error$message))) { insight::format_error(out$error$message) } msg <- "Unable to compute predicted values with this model. You can try to supply a different dataset to the `newdata` argument." if (!is.null(out$error)) { msg <- c(paste(msg, "This error was also raised:"), "", out$error$message) } if (inherits(out$value, "try-error")) { msg <- c(paste(msg, "", "This error was also raised:"), "", as.character(out$value)) } msg <- c(msg, "", "Bug Tracker: https://github.com/vincentarelbundock/marginaleffects/issues") insight::format_error(msg) } if (!"rowid" %in% colnames(out) && "rowid" %in% colnames(newdata) && nrow(out) == nrow(newdata)) { out$rowid <- newdata$rowid } # extract attributes before setDT draws <- attr(out, "posterior_draws") data.table::setDT(out) # unpad factors before averaging # trust `newdata` rowid more than `out` because sometimes `get_predict()` will add a positive index even on padded data # HACK: the padding indexing rowid code is still a mess if ("rowid" %in% colnames(newdata) && nrow(newdata) == nrow(out)) { out$rowid <- newdata$rowid } if ("rowid" %in% colnames(out)) { idx <- out$rowid > 0 out <- out[idx, drop = FALSE] draws <- draws[idx, , drop = FALSE] } # expensive: only do this inside the jacobian if necessary if (!is.null(wts) || !isTRUE(checkmate::check_flag(by, null.ok = TRUE)) || inherits(model, "mclogit")) { # not sure why sorting is so finicky here out <- merge_by_rowid(out, newdata) } # by: auto group if (isTRUE(checkmate::check_character(by))) { by <- intersect(c("group", by), colnames(out)) } # averaging by groups out <- get_by( out, draws = draws, newdata = newdata, by = by, byfun = byfun, verbose = verbose, ...) draws <- attr(out, "posterior_draws") # hypothesis tests using the delta method out <- get_hypothesis(out, hypothesis = hypothesis, by = by) # WARNING: we cannot sort rows at the end because `get_hypothesis()` is # applied in the middle, and it must already be sorted in the final order, # otherwise, users cannot know for sure what is going to be the first and # second rows, etc. out <- sort_columns(out, newdata, by) return(out) } #' Average predictions #' @describeIn predictions Average predictions #' @export #' avg_predictions <- function(model, newdata = NULL, variables = NULL, vcov = TRUE, conf_level = 0.95, type = NULL, by = TRUE, byfun = NULL, wts = NULL, transform = NULL, hypothesis = NULL, equivalence = NULL, p_adjust = NULL, df = Inf, numderiv = "fdforward", ...) { # order of the first few paragraphs is important # if `newdata` is a call to `typical` or `counterfactual`, insert `model` scall <- rlang::enquo(newdata) newdata <- sanitize_newdata_call(scall, newdata, model) # group by focal variable automatically unless otherwise stated if (isTRUE(by)) { if (isTRUE(checkmate::check_character(variables))) { by <- variables } else if (isTRUE(checkmate::check_list(variables, names = "named"))) { by <- names(variables) } } # Bootstrap out <- inferences_dispatch( INF_FUN = avg_predictions, model = model, newdata = newdata, vcov = vcov, variables = variables, type = type, by = by, conf_level = conf_level, byfun = byfun, wts = wts, transform = transform, hypothesis = hypothesis, ...) if (!is.null(out)) { return(out) } out <- predictions( model = model, newdata = newdata, variables = variables, vcov = vcov, conf_level = conf_level, type = type, by = by, byfun = byfun, wts = wts, transform = transform, hypothesis = hypothesis, equivalence = equivalence, p_adjust = p_adjust, df = df, ...) return(out) } marginaleffects/R/get_se_delta.R0000644000176200001440000001112014560035476016354 0ustar liggesusersget_se_delta_marginalmeans <- function(model, variables, newdata, type, cross = FALSE, ...) { get_marginalmeans( model = model, variables = variables, newdata = newdata, type = type, cross = cross, ... )$estimate } get_se_delta_contrasts <- function(model, variables, newdata, type, hypothesis, lo, hi, original, cross, ...) { get_contrasts(model, newdata = newdata, variables = variables, type = type, hypothesis = hypothesis, lo = lo, hi = hi, original = original, cross = cross, verbose = FALSE, deltamethod = TRUE, ... )$estimate } #' Compute standard errors using the delta method #' #' @inheritParams slopes #' @param FUN a function which accepts a `model` and other inputs and returns a #' vector of estimates (marginal effects, marginal means, etc.) #' @param index data.frame over which we aggregate J_mean (matches tidy() output) #' @return vector of standard errors #' @noRd get_se_delta <- function(model, vcov, FUN, type = NULL, newdata = NULL, index = NULL, eps = NULL, J = NULL, hypothesis = NULL, numderiv = NULL, ...) { # delta method does not work for these models bad <- c("brmsfit", "stanreg", "bart") if (any(bad %in% class(model))) { return(NULL) } coefs <- get_coef(model, ...) # TODO: this is a terrible sanity check # some vcov methods return an unnamed matrix, some have duplicate names flag <- anyDuplicated(colnames(vcov)) == 0 || anyDuplicated(names(coefs)) == 0 if (flag && !is.null(dimnames(vcov)) && all(names(coefs) %in% colnames(vcov))) { bnames <- intersect(names(coefs), colnames(vcov)) vcov <- vcov[bnames, bnames, drop = FALSE] colnames(vcov) <- row.names(vcov) <- names(coefs) coefs <- coefs[bnames] } # input: named vector of coefficients # output: gradient inner <- function(x) { names(x) <- names(coefs) model_tmp <- set_coef(model, x, ...) # do not pass NULL arguments. Important for `deltam` to allow users to supply FUN without ... args <- c(list(model = model_tmp, hypothesis = hypothesis), list(...)) if (inherits(model, "gamlss")) args[["safe"]] <- FALSE if (!is.null(eps)) args[["eps"]] <- eps if (!is.null(type)) args[["type"]] <- type if (!is.null(newdata)) args[["newdata"]] <- newdata if (!is.null(J)) args[["J"]] <- J if (!is.null(eps)) args[["eps"]] <- eps g <- do.call("FUN", args) return(g) } if (is.null(J) || !is.null(hypothesis)) { args <- list( func = inner, x = coefs, numderiv = numderiv) J <- do.call("get_jacobian", args) colnames(J) <- names(get_coef(model, ...)) } # align J and V: This might be a problematic hack, but I have not found examples yet. V <- vcov if (!isTRUE(ncol(J) == ncol(V))) { beta <- get_coef(model, ...) # Issue #718: ordinal::clm in test-pkg-ordinal.R if (anyNA(beta) && anyDuplicated(names(beta)) && ncol(J) > ncol(V) && ncol(J) == length(beta) && length(stats::na.omit(beta)) == ncol(V)) { J <- J[, !is.na(beta), drop = FALSE] } else { cols <- intersect(colnames(J), colnames(V)) if (length(cols) == 0) { insight::format_error("The jacobian does not match the variance-covariance matrix.") } V <- V[cols, cols, drop = FALSE] J <- J[, cols, drop = FALSE] } } # Var(dydx) = J Var(beta) J' # computing the full matrix is memory-expensive, and we only need the diagonal # algebra trick: https://stackoverflow.com/a/42569902/342331 se <- sqrt(rowSums(tcrossprod(J, V) * J)) se[se == 0] <- NA_real_ attr(se, "jacobian") <- J return(se) } marginaleffects/R/sanity.R0000644000176200001440000000467114541720224015250 0ustar liggesuserssanity_equivalence_p_adjust <- function(equivalence, p_adjust) { if (!is.null(equivalence) && !is.null(p_adjust)) { insight::format_error("The `equivalence` and `p_adjust` arguments cannot be used together.") } checkmate::assert_choice(p_adjust, choices = stats::p.adjust.methods, null.ok = TRUE) } sanity_df <- function(df, x) { checkmate::assert( checkmate::check_number(df, lower = 1), checkmate::check_numeric(df, len = nrow(x))) } sanity_predict_vector <- function(pred, model, newdata, type) { if (!isTRUE(checkmate::check_atomic_vector(pred)) && !isTRUE(checkmate::check_array(pred, d = 1))) { msg <- sprintf( '`predict(model, type = "%s")` was called on a model of class `%s`, but this command did not produce the expected outcome: A numeric vector of length %s. This can sometimes happen when users try compute a marginal effect for some models with grouped or multivariate outcome which are not supported yet by `marginaleffects` package. Please consult your modeling package documentation to learn what alternative `type` arguments are accepted by the `predict` method, or file a feature request on Github: https://github.com/vincentarelbundock/marginaleffects/issues', type, class(model)[1], nrow(newdata)) stop(msg, call. = FALSE) } } sanity_predict_numeric <- function(pred, model, newdata, type) { if (!isTRUE(checkmate::check_numeric(pred))) { msg <- sprintf( '`predict(model, type = "%s")` was called on a model of class `%s`, but this command did not produce the expected outcome: A numeric vector of length %s. This can sometimes happen when users try compute a marginal effect for an outcome type which is unsupported, or which cannot be differentiated. Please consult your modeling package documentation to learn what alternative `type` arguments are accepted by the `predict` method.', type, class(model)[1], nrow(newdata)) stop(msg, call. = FALSE) } } # OBSOLETE CHECKS KEPT FOR POSTERITY # sanitize_return_data <- function() { # return_data <- getOption("marginaleffects_return_data", default = TRUE) # checkmate::assert_flag(return_data) # return(return_data) # } # sanitize_numDeriv_method <- function() { # numDeriv_method <- getOption("marginaleffects_numDeriv_method", default = "simple") # checkmate::assert_choice(numDeriv_method, choices = c("simple", "complex", "Richardson")) # return(numDeriv_method) # } marginaleffects/R/methods_scam.R0000644000176200001440000000221314541720224016375 0ustar liggesusers#' @rdname set_coef #' @export set_coef.scam <- function(model, coefs, ...) { # in basic model classes coefficients are named vector model[["coefficients.t"]][names(coefs)] <- coefs model } #' @rdname get_coef #' @export get_coef.scam <- function(model, ...) { model$coefficients.t } #' @rdname get_vcov #' @export get_vcov.scam <- function(model, vcov = NULL, ...) { if (isTRUE(checkmate::check_matrix(vcov))) { return(vcov) } # email from developer Natalya Pya # "one of the elements of the returned 'scam' object is 'Vp.t' which is an # estimated covariance matrix for the reparametrized parameters, # 'model$coefficients.t'." if (!is.null(vcov) && !is.logical(vcov)) { stop("The `vcov` argument is not supported for models of class `scam`.", .call = FALSE) } V <- model$Vp.t b <- model$coefficients.t if (length(b) != nrow(V)) { stop("The size of the variance-covariance matrix does not match the length of the coefficients vector.", call. = FALSE) } colnames(V) <- row.names(V) <- names(b) return(V) } marginaleffects/R/by.R0000644000176200001440000000522314541720224014345 0ustar liggesusersget_by <- function( estimates, draws, newdata, by, byfun = NULL, verbose = TRUE, ...) { if (is.null(by) || isFALSE(by)) { out <- estimates attr(out, "posterior_draws") <- draws return(out) } missing <- setdiff(setdiff(colnames(by), "by"), colnames(estimates)) if (length(missing) > 0) { idx <- intersect(c("rowid", "rowidcf", missing), colnames(newdata)) estimates <- merge(estimates, newdata[, idx], sort = FALSE, all.x = TRUE) } if (isTRUE(by)) { regex <- "^term$|^group$|^contrast$|^contrast_" bycols <- grep(regex, colnames(estimates), value = TRUE) } else if (isTRUE(checkmate::check_character(by))) { bycols <- by } else if (isTRUE(checkmate::check_data_frame(by))) { idx <- setdiff(intersect(colnames(estimates), colnames(by)), "by") # harmonize column types for (v in colnames(by)) { if (isTRUE(is.character(estimates[[v]])) && isTRUE(is.numeric(by[[v]]))) { by[[v]] <- as.character(by[[v]]) } else if (isTRUE(is.numeric(estimates[[v]])) && isTRUE(is.character(by[[v]]))) { by[[v]] <- as.numeric(by[[v]]) } } estimates[by, by := by, on = idx] bycols <- "by" } if ("by" %in% colnames(estimates) && anyNA(estimates[["by"]])) { msg <- insight::format_message("The `by` data.frame does not cover all combinations of response levels and/or predictors. Some estimates will not be included in the aggregation.") if (isTRUE(verbose)) warning(msg, call. = FALSE) tmp <- !is.na(estimates[["by"]]) draws <- draws[tmp, drop = FALSE] estimates <- estimates[tmp, drop = FALSE] } bycols <- intersect(unique(c("term", bycols)), colnames(estimates)) # bayesian if (!is.null(draws)) { estimates <- average_draws( data = estimates, index = bycols, draws = draws, byfun = byfun) # frequentist } else { if (!is.null(byfun)) { estimates <- estimates[, .(estimate = byfun(estimate)), by = bycols] } else if ("marginaleffects_wts_internal" %in% colnames(newdata)) { estimates <- estimates[, .(estimate = stats::weighted.mean( estimate, marginaleffects_wts_internal, na.rm = TRUE)), by = bycols] } else { estimates <- estimates[, .(estimate = mean(estimate, na.rm = TRUE)), by = bycols] } } return(estimates) }marginaleffects/R/methods_biglm.R0000644000176200001440000000203514541720224016546 0ustar liggesusers#' @rdname get_predict #' @export get_predict.biglm <- function(model, newdata = insight::get_data(model), type = "response", ...) { type <- sanitize_type(model, type, calling_function = "predictions") type_base <- unname(type) out <- stats::predict( model, newdata = newdata, type = type) out <- as.vector(out) out <- data.frame( rowid = seq_along(out), estimate = out) return(out) } #' @rdname get_vcov #' @export get_vcov.biglm <- function(model, vcov = NULL, ...) { if (!isFALSE(vcov)) { insight::format_warning(c("The `vcov` argument is not supported for this model type. Set `vcov=FALSE` to silence this warning, and visit this link to learn why standard errors for this model are not yet supported and how you can help:", "https://github.com/vincentarelbundock/marginaleffects/issues/387")) } return(FALSE) } marginaleffects/R/sanitize_hypothesis.R0000644000176200001440000000174614541720224020046 0ustar liggesuserssanitize_hypothesis <- function(hypothesis, ...) { checkmate::assert( checkmate::check_character(hypothesis, pattern = "="), checkmate::check_choice(hypothesis, choices = c("pairwise", "reference", "sequential", "revpairwise", "revreference", "revsequential")), checkmate::check_numeric(hypothesis), checkmate::check_matrix(hypothesis), checkmate::check_null(hypothesis)) hnull <- 0 if (isTRUE(checkmate::check_character(hypothesis, pattern = "="))) { out <- paste(gsub("=", "-(", hypothesis), ")") attr(out, "label") <- hypothesis hypothesis <- out } else if (isTRUE(checkmate::check_matrix(hypothesis))) { attr(hypothesis, "label") <- colnames(hypothesis) } else if (isTRUE(checkmate::check_numeric(hypothesis, len = 1))) { hnull <- hypothesis hypothesis <- NULL } out <- list( "hypothesis" = hypothesis, "hypothesis_null" = hnull ) return(out) } marginaleffects/R/get_contrast_data_logical.R0000644000176200001440000000153414541720224021113 0ustar liggesusersget_contrast_data_logical <- function(model, newdata, variable, ...) { # custom data frame or function if (isTRUE(checkmate::check_function(variable$value)) || isTRUE(checkmate::check_data_frame(variable$value))) { out <- contrast_categories_custom(variable, newdata) return(out) } lo <- hi <- newdata lo[[variable$name]] <- FALSE hi[[variable$name]] <- TRUE lab <- suppressWarnings(tryCatch( sprintf(variable$label, TRUE, FALSE), error = function(e) variable$label)) out <- list(rowid = seq_len(nrow(newdata)), lo = lo, hi = hi, original = newdata, ter = rep(variable$name, nrow(newdata)), lab = rep(lab, nrow(newdata))) return(out) } marginaleffects/R/posterior_draws.R0000644000176200001440000001024214541720224017156 0ustar liggesusers#' Extract Posterior Draws or Bootstrap Resamples from `marginaleffects` Objects #' #' @param x An object produced by a `marginaleffects` package function, such as `predictions()`, `avg_slopes()`, `hypotheses()`, etc. #' @param shape string indicating the shape of the output format: #' * "long": long format data frame #' * "DxP": Matrix with draws as rows and parameters as columns #' * "PxD": Matrix with draws as rows and parameters as columns #' * "rvar": Random variable datatype (see `posterior` package documentation). #' @return A data.frame with `drawid` and `draw` columns. #' @export posterior_draws <- function(x, shape = "long") { checkmate::assert_choice(shape, choices = c("long", "DxP", "PxD", "rvar")) # tidy.comparisons() sometimes already saves draws in a nice long format draws <- attr(x, "posterior_draws") if (inherits(draws, "posterior_draws")) return(draws) if (is.null(attr(x, "posterior_draws"))) { warning('This object does not include a "posterior_draws" attribute. The `posterior_draws` function only supports bayesian models produced by the `marginaleffects` or `predictions` functions of the `marginaleffects` package.', call. = FALSE) return(x) } if (nrow(draws) != nrow(x)) { stop('The number of parameters in the object does not match the number of parameters for which posterior draws are available.', call. = FALSE) } if (shape %in% c("PxD", "DxP")) { row.names(draws) <- paste0("b", seq_len(nrow(draws))) colnames(draws) <- paste0("draw", seq_len(ncol(draws))) } if (shape == "PxD") { return(draws) } if (shape == "DxP") { return(t(draws)) } if (shape == "rvar") { insight::check_if_installed("posterior") draws <- t(draws) if (!is.null(attr(x, "nchains"))) { x[["rvar"]] <- posterior::rvar(draws, nchains = attr(x, "nchains")) } else { x[["rvar"]] <- posterior::rvar(draws) } return(x) } if (shape == "long") { draws <- data.table(draws) setnames(draws, as.character(seq_len(ncol(draws)))) for (v in colnames(x)) { draws[[v]] <- x[[v]] } out <- melt( draws, id.vars = colnames(x), variable.name = "drawid", value.name = "draw") cols <- unique(c("drawid", "draw", "rowid", colnames(out))) cols <- intersect(cols, colnames(out)) setcolorder(out, cols) data.table::setDF(out) return(out) } } average_draws <- function(data, index, draws, byfun = NULL) { insight::check_if_installed("collapse", minimum_version = "1.9.0") w <- data[["marginaleffects_wts_internal"]] if (all(is.na(w))) { w <- NULL } if (is.null(index)) { index <- intersect(colnames(data), "type") } if (length(index) > 0) { g <- collapse::GRP(data, by = index) if (is.null(byfun)) { draws <- collapse::fmean( draws, g = g, w = w, drop = FALSE) } else { draws <- collapse::BY( draws, g = g, FUN = byfun, drop = FALSE) } out <- data.table( g[["groups"]], average = collapse::dapply(draws, MARGIN = 1, FUN = collapse::fmedian)) } else { if (is.null(byfun)) { draws <- collapse::fmean( draws, w = w, drop = FALSE) } else { draws <- collapse::BY( draws, g = g, FUN = byfun, drop = FALSE) } out <- data.table(average = collapse::dapply(draws, MARGIN = 1, FUN = collapse::fmedian)) } setnames(out, old = "average", new = "estimate") attr(out, "posterior_draws") <- draws return(out) } #' `posteriordraws()` is an alias to `posterior_draws()` #' #' This alias is kept for backward compatibility and because some users may prefer that name. #' #' @inherit posterior_draws #' @keywords internal #' @export posteriordraws <- posterior_drawsmarginaleffects/R/methods_nlme.R0000644000176200001440000000022614541720224016407 0ustar liggesusers#' @rdname set_coef #' @export set_coef.lme <- function(model, coefs, ...) { model[["coefficients"]][["fixed"]][names(coefs)] <- coefs model }marginaleffects/R/settings.R0000644000176200001440000000305314541720224015572 0ustar liggesusersmarginaleffects_settings <- new.env() settings_cache <- function(setti) { out <- list() for (s in setti) { out[[s]] <- settings_get(s) } return(out) } settings_restore <- function(cache) { for (n in names(cache)) { settings_set(n, cache[[n]]) } } settings_init <- function(settings = NULL) { settings_rm() default_settings <- list( marginaleffects_safefun_return1 = FALSE ) checkmate::assert_list(settings, null.ok = TRUE, names = "unique") if (!is.null(settings)) { settings <- c(settings, default_settings) } for (i in seq_along(settings)) { settings_set(names(settings)[i], settings[[i]]) } } settings_get <- function(name) { if (name %in% names(marginaleffects_settings)) { get(name, envir = marginaleffects_settings) } else { NULL } } settings_set <- function(name, value) { assign(name, value = value, envir = marginaleffects_settings) } settings_rm <- function(name = NULL) { if (is.null(name)) { rm(list = names(marginaleffects_settings), envir = marginaleffects_settings) } else if ("name" %in% names(marginaleffects_settings)) { rm(list = name, envir = marginaleffects_settings) } } settings_equal <- function(name, comparison) { k <- settings_get(name) if (!is.null(k) && length(comparison) == 1 && k == comparison) { out <- TRUE } else if (!is.null(k) && length(comparison) > 1 && k %in% comparison) { out <- TRUE } else { out <- FALSE } return(out) } marginaleffects/R/methods_sampleSelection.R0000644000176200001440000000142014541720224020600 0ustar liggesusers# TODO: heckit standard errors are not available because `vcov` is block diagonal with NAs #' @rdname get_coef #' @export get_coef.selection <- function(model, ...) { # sampleSelection::selection if (as.list(model$call)[[1]] == "selection") { out <- model$estimate # sampleSelection::heckit } else if (as.list(model$call)[[1]] == "heckit") { out <- model$coefficients } return(out) } #' @rdname set_coef #' @export set_coef.selection <- function(model, coefs, ...) { # sampleSelection::selection if (as.list(model$call)[[1]] == "selection") { model[["estimate"]] <- coefs # sampleSelection::heckit } else if (as.list(model$call)[[1]] == "heckit") { model[["coefficients"]] <- coefs } return(model) } marginaleffects/R/get_model_matrix.R0000644000176200001440000000134114541720224017253 0ustar liggesusers#' Get a named model matrix #' #' @inheritParams slopes #' @rdname get_model_matrix #' @keywords internal #' @export get_model_matrix <- function(model, newdata) { UseMethod("get_model_matrix", model) } #' @rdname get_model_matrix #' @keywords internal #' @export get_model_matrix.default <- function(model, newdata) { # faster if (class(model)[1] %in% c("lm", "glm")) { out <- hush(stats::model.matrix(model, data = newdata)) # more general } else { out <- hush(insight::get_modelmatrix(model, data = newdata)) } beta <- get_coef(model) if (!isTRUE(nrow(out) == nrow(newdata)) || !isTRUE(ncol(out) == length(beta))) { return(NULL) } else { return(out) } }marginaleffects/R/RcppExports.R0000644000176200001440000000036114541720224016222 0ustar liggesusers# Generated by using Rcpp::compileAttributes() -> do not edit by hand # Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393 eigenMatMult <- function(A, B) { .Call('_marginaleffects_eigenMatMult', PACKAGE = 'marginaleffects', A, B) } marginaleffects/R/methods_gamlss.R0000644000176200001440000002400214541720224016740 0ustar liggesusers#' @include get_coef.R #' @rdname get_coef #' @export get_coef.gamlss <- function(model, ...){ dots <- list(...) if (is.null(dots$what)) stop("Argument `what` indicating the parameter of interest is missing.") out <- stats::coef(model, what = dots$what) return(out) } #' @include get_predict.R #' @rdname get_predict #' @export get_predict.gamlss <- function( model, newdata = insight::get_data(model), type = "response", ...) { # Get predictions dots <- list(...) if (is.null(dots$what)) { msg <- sprintf( "Please specifiy a `what` argument with one of these values: %s", paste(model$parameter, collapse = ", ")) stop(msg, call. = FALSE) } # if (!isTRUE(checkmate::check_flag(vcov, null.ok = TRUE))) { # msg <- "The `vcov` argument is not supported for models of this class." # stop(msg, call. = FALSE) # } # predict.gamlss() breaks when `newdata` includes unknown variables origindata <- insight::get_data(model) originvars <- colnames(origindata) data.table::setDF(newdata) index <- which(colnames(newdata) %in% originvars) tmp <- newdata[, index] hush(out <- predict_gamlss(model, newdata = tmp, type = type, data = origindata, ...)) if ("rowid" %in% colnames(newdata)) { out <- data.frame(rowid = newdata$rowid, estimate = out) } else { out <- data.frame(rowid = seq_along(out), estimate = out) } return(out) } #' @include get_vcov.R #' @rdname get_vcov #' @export get_vcov.gamlss <- function(model, ...){ dots <- list(...) if (is.null(dots$what)) { msg <- sprintf( "Please specifiy a `what` argument with one of these values: %s", paste(model$parameter, collapse = ", ")) stop(msg, call. = FALSE) } p <- match(dots$what, model$parameters) vc <- stats::vcov(model, what = dots$what) index <- which(cumsum(rownames(vc) == "(Intercept)") == p) out <- vc[index, index, drop = FALSE] return(out) } #' @include set_coef.R #' @rdname set_coef #' @export set_coef.gamlss <- function(model, coefs, ...){ dots <- list(...) if (is.null(dots$what)) stop("Argument `what` indicating the parameter of interest is missing.") p <- paste0(dots$what, ".coefficients") model[[p]][names(coefs)] <- coefs out <- model return(out) } # Modified predict method from the R-package gamlss # Renamed with underscore to avoid conflict with exported method from the package. predict_gamlss <- function(object, what = c("mu", "sigma", "nu", "tau"), parameter = NULL, newdata = NULL, type = c("link", "response", "terms"), safe = TRUE, terms = NULL, se.fit = FALSE, data = NULL, ...) { concat <- function(..., names = NULL) { tmp <- list(...) if (is.null(names)) { names <- names(tmp) } if (is.null(names)) { names <- sapply(as.list(match.call()), deparse)[-1] } if (any(sapply(tmp, is.matrix) | sapply(tmp, is.data.frame))) { len <- sapply(tmp, function(x) c(dim(x), 1)[1]) len[is.null(len)] <- 1 data <- rbind(...) } else { len <- sapply(tmp, length) data <- unlist(tmp) } namelist <- factor(rep(names, len), levels = names) return(data.frame(data, source = namelist)) } if (is.null(newdata)) { predictor <- gamlss::lpred(object, what = what, type = type, terms = terms, se.fit = se.fit, ...) return(predictor) } if (se.fit) { warning(" se.fit = TRUE is not supported for new data values at the moment \n") } if (!(inherits(newdata, "data.frame"))) { stop("newdata must be a data frame ") } what <- if (!is.null(parameter)) { match.arg(parameter, choices = c( "mu", "sigma", "nu", "tau")) } else { match.arg(what) } type <- match.arg(type) Call <- object$call data <- data1 <- if (is.null(data)) { if (!is.null(Call$data)) { eval(Call$data) } else { stop("define the original data using the option data") } } else { data } data <- data[match(names(newdata), names(data))] data <- concat(data, newdata) parform <- stats::formula(object, what) if (length(parform) == 3) { parform[2] <- NULL } Terms <- terms(parform) offsetVar <- if (!is.null(off.num <- attr(Terms, "offset"))) { eval(attr(Terms, "variables")[[off.num + 1]], data) } m <- stats::model.frame(Terms, data, xlev = object[[paste(what, "xlevels", sep = ".")]]) X <- stats::model.matrix(Terms, data, contrasts = object$contrasts) y <- object[[paste(what, "lp", sep = ".")]] w <- object[[paste(what, "wt", sep = ".")]] onlydata <- data$source == "data" smo.mat <- object[[paste(what, "s", sep = ".")]] if (!is.null(off.num)) { y <- (y - offsetVar[onlydata]) } if (!is.null(smo.mat)) { n.smooths <- dim(smo.mat)[2] y <- (y - smo.mat %*% rep(1, n.smooths)) } # Modified from the original prediction function. if (safe) { refit <- stats::lm.wfit(X[onlydata, , drop = FALSE], y, w) if (abs(sum(stats::resid(refit))) > 0.1 || abs(sum(stats::coef(object, what = what) - stats::coef(refit), na.rm = TRUE)) > 1e-05) { warning(paste("There is a discrepancy between the original and the re-fit", " \n used to achieve 'safe' predictions \n ", sep = "")) } coef <- refit$coef } else { coef <- stats::coef(object, what = what) } nX <- dimnames(X) rownames <- nX[[1]][!onlydata] nrows <- sum(!onlydata) nac <- is.na(coef) assign.coef <- attr(X, "assign") collapse <- type != "terms" Xpred <- X[!onlydata, ] Xpred <- matrix(Xpred, nrow = nrows) if (!collapse) { aa <- attr(X, "assign") ll <- attr(Terms, "term.labels") if (attr(Terms, "intercept") > 0) { ll <- c("(Intercept)", ll) } aaa <- factor(aa, labels = ll) asgn <- split(order(aa), aaa) hasintercept <- attr(Terms, "intercept") > 0 p <- refit$qr$rank p1 <- seq(len = p) piv <- refit$qr$pivot[p1] if (hasintercept) { asgn$"(Intercept)" <- NULL avx <- colMeans(X[onlydata, ]) termsconst <- sum(avx[piv] * coef[piv]) } nterms <- length(asgn) pred <- matrix(ncol = nterms, nrow = nrows) dimnames(pred) <- list(rownames(newdata), names(asgn)) if (hasintercept) { Xpred <- sweep(Xpred, 2, avx) } unpiv <- rep.int(0, NCOL(Xpred)) unpiv[piv] <- p1 for (i in seq(1, nterms, length = nterms)) { iipiv <- asgn[[i]] ii <- unpiv[iipiv] iipiv[ii == 0] <- 0 pred[, i] <- if (any(iipiv > 0)) { Xpred[, iipiv, drop = FALSE] %*% coef[iipiv] } else { 0 } } attr(pred, "constant") <- if (hasintercept) { termsconst } else { 0 } if (!is.null(terms)) { pred <- pred[, terms, drop = FALSE] } } else { pred <- drop(Xpred[, !nac, drop = FALSE] %*% coef[!nac]) if (!is.null(off.num) && collapse) { pred <- pred + offsetVar[!onlydata] } } if (!is.null(smo.mat)) { cat("new prediction", "\n") smooth.labels <- dimnames(smo.mat)[[2]] pred.s <- array(0, c(nrows, n.smooths), list( names(pred), dimnames(smo.mat)[[2]])) smooth.calls <- lapply(m[smooth.labels], attr, "call") data <- subset(m, onlydata, drop = FALSE) attr(data, "class") <- NULL new.m <- subset(m, !onlydata, drop = FALSE) attr(new.m, "class") <- NULL residuals <- if (!is.null(off.num)) { object[[paste(what, "wv", sep = ".")]] - object[[paste(what, "lp", sep = ".")]] + offsetVar[onlydata] } else { object[[paste(what, "wv", sep = ".")]] - object[[paste(what, "lp", sep = ".")]] } for (TT in smooth.labels) { if (is.matrix(m[[TT]])) { nm <- names(attributes(m[[TT]])) attributes(data[[TT]]) <- attributes(m[[TT]])[nm[-c( 1, 2)]] } else { attributes(data[[TT]]) <- attributes(m[[TT]]) } Call <- smooth.calls[[TT]] Call$xeval <- substitute(new.m[[TT]], list(TT = TT)) z <- residuals + smo.mat[, TT] pred.s[, TT] <- eval(Call) } if (type == "terms") { pred[, smooth.labels] <- pred[, smooth.labels] + pred.s[, smooth.labels] } else { pred <- drop(pred + pred.s %*% rep(1, n.smooths)) } } if (type == "response") { if (methods::is(eval(parse(text = object$family[[1]])), "gamlss.family")) { pred <- eval(parse(text = object$family[[1]]))[[paste(what, "linkinv", sep = ".")]](pred) } else { pred <- gamlss.dist::gamlss.family(eval(parse(text = paste(stats::family(object)[1], "(", what, ".link=", eval(parse(text = (paste("object$", what, ".link", sep = "")))), ")", sep = ""))))[[paste(what, "linkinv", sep = ".")]](pred) } } pred } marginaleffects/R/methods_quantreg.R0000644000176200001440000000402714541720224017305 0ustar liggesusers#' @rdname get_predict #' @export get_predict.rq <- function(model, newdata = insight::get_data(model), type = NULL, ...) { # type argument of the method is used to specify confidence interval type insight::check_if_installed("quantreg") if (isTRUE(getOption("marginaleffects_linalg", default = "RcppEigen") == "RcppEigen")) { MM <- attr(newdata, "marginaleffects_model_matrix") if (isTRUE(checkmate::check_matrix(MM))) { beta <- get_coef(model) out <- hush(eigenMatMult(MM, beta)) if (isTRUE(checkmate::check_numeric(out, len = nrow(newdata)))) { out <- data.frame(rowid = newdata$rowid, estimate = out) return(out) } else { out = data.frame(rowid = seq_len(length(out)), estimate = out) } } } out <- quantreg::predict.rq(model, newdata = newdata, ...) if (isTRUE(checkmate::check_numeric(out, len = nrow(newdata)))) { out <- data.frame(rowid = newdata$rowid, estimate = out) } else { out = data.frame(rowid = seq_len(length(out)), estimate = out) } return(out) } #' @include sanity_model.R #' @rdname sanitize_model_specific #' @keywords internal sanitize_model_specific.rqs <- function(model, ...) { stop("`marginaleffects` only supports `quantreg::rq` models with a single `tau` value.", call. = FALSE) } # #' @rdname get_model_matrix # #' @keywords internal # #' @export # get_model_matrix.rq <- function(object, newdata) { # tt <- terms(object) # Terms <- delete.response(tt) # m <- model.frame(Terms, newdata, na.action = na.pass, xlev = object$xlevels) # if (!is.null(cl <- attr(Terms, "dataClasses"))) # stats::.checkMFClasses(cl, m) # X <- model.matrix(Terms, m, contrasts.arg = object$contrasts) # if (!isTRUE(nrow(X) == nrow(newdata))) { # return(NULL) # } else { # return(X) # } # }marginaleffects/R/modelarchive.R0000644000176200001440000000105214541720224016371 0ustar liggesusers# unexported functions for use in tests modelarchive_model <- function(name) { tmp <- tempfile() url <- paste0("https://raw.github.com/vincentarelbundock/modelarchive/main/data/", name, ".rds") try(utils::download.file(url, tmp, quiet = TRUE), silent = TRUE) out <- try(readRDS(tmp), silent = TRUE) return(out) } modelarchive_data <- function(name) { dat <- sprintf( "https://raw.githubusercontent.com/vincentarelbundock/modelarchive/main/data-raw/%s.csv", name) out <- utils::read.csv(dat) return(out) }marginaleffects/R/inferences.R0000644000176200001440000002165314560042044016057 0ustar liggesusers#' (EXPERIMENTAL) Bootstrap, Conformal, and Simulation-Based Inference #' #' @description #' Warning: This function is experimental. It may be renamed, the user interface may change, or the functionality may migrate to arguments in other `marginaleffects` functions. #' #' Apply this function to a `marginaleffects` object to change the inferential method used to compute uncertainty estimates. #' #' @param x Object produced by one of the core `marginaleffects` functions. #' @param method String #' + "delta": delta method standard errors #' + "boot" package #' + "fwb": fractional weighted bootstrap #' + "rsample" package #' + "simulation" from a multivariate normal distribution (Krinsky & Robb, 1986) #' + "mi" multiple imputation for missing data #' + "conformal_split": prediction intervals using split conformal prediction (see Angelopoulos & Bates, 2022) #' + "conformal_cv+": prediction intervals using cross-validation+ conformal prediction (see Barber et al., 2020) #' @param R Number of resamples, simulations, or cross-validation folds. #' @param conf_type String: type of bootstrap interval to construct. #' + `boot`: "perc", "norm", "basic", or "bca" #' + `fwb`: "perc", "norm", "basic", "bc", or "bca" #' + `rsample`: "perc" or "bca" #' + `simulation`: argument ignored. #' @param conformal_test Data frame of test data for conformal prediction. #' @param conformal_calibration Data frame of calibration data for split conformal prediction (`method="conformal_split`). #' @param conformal_score String. Warning: The `type` argument in `predictions()` must generate predictions which are on the same scale as the outcome variable. Typically, this means that `type` must be "response" or "probs". #' + "residual_abs" or "residual_sq" for regression tasks (numeric outcome) #' + "softmax" for classification tasks (when `predictions()` returns a `group` columns, such as multinomial or ordinal logit models. #' @param ... #' + If `method="boot"`, additional arguments are passed to `boot::boot()`. #' + If `method="fwb"`, additional arguments are passed to `fwb::fwb()`. #' + If `method="rsample"`, additional arguments are passed to `rsample::bootstraps()`. #' + Additional arguments are ignored for all other methods. #' @details #' When `method="simulation"`, we conduct simulation-based inference following the method discussed in Krinsky & Robb (1986): #' 1. Draw `R` sets of simulated coefficients from a multivariate normal distribution with mean equal to the original model's estimated coefficients and variance equal to the model's variance-covariance matrix (classical, "HC3", or other). #' 2. Use the `R` sets of coefficients to compute `R` sets of estimands: predictions, comparisons, slopes, or hypotheses. #' 3. Take quantiles of the resulting distribution of estimands to obtain a confidence interval and the standard deviation of simulated estimates to estimate the standard error. #' #' When `method="fwb"`, drawn weights are supplied to the model fitting function's `weights` argument; if the model doesn't accept non-integer weights, this method should not be used. If weights were included in the original model fit, they are extracted by [weights()] and multiplied by the drawn weights. These weights are supplied to the `wts` argument of the estimation function (e.g., `comparisons()`). #' #' @section References: #' #' Krinsky, I., and A. L. Robb. 1986. “On Approximating the Statistical Properties of Elasticities.” Review of Economics and Statistics 68 (4): 715–9. #' #' King, Gary, Michael Tomz, and Jason Wittenberg. "Making the most of statistical analyses: Improving interpretation and presentation." American journal of political science (2000): 347-361 #' #' Dowd, Bryan E., William H. Greene, and Edward C. Norton. "Computation of standard errors." Health services research 49.2 (2014): 731-750. #' #' Angelopoulos, Anastasios N., and Stephen Bates. 2022. "A Gentle Introduction to Conformal Prediction and Distribution-Free Uncertainty Quantification." arXiv. https://doi.org/10.48550/arXiv.2107.07511. #' #' Barber, Rina Foygel, Emmanuel J. Candes, Aaditya Ramdas, and Ryan J. Tibshirani. 2020. “Predictive Inference with the Jackknife+.” arXiv. http://arxiv.org/abs/1905.02928. #' #' #' @return #' A `marginaleffects` object with simulation or bootstrap resamples and objects attached. #' @examples #' \dontrun{ #' library(marginaleffects) #' library(magrittr) #' set.seed(1024) #' mod <- lm(Sepal.Length ~ Sepal.Width * Species, data = iris) #' #' # bootstrap #' avg_predictions(mod, by = "Species") %>% #' inferences(method = "boot") #' #' avg_predictions(mod, by = "Species") %>% #' inferences(method = "rsample") #' #' # Fractional (bayesian) bootstrap #' avg_slopes(mod, by = "Species") %>% #' inferences(method = "fwb") %>% #' posterior_draws("rvar") %>% #' data.frame() #' #' # Simulation-based inference #' slopes(mod) %>% #' inferences(method = "simulation") %>% #' head() #' } #' @export inferences <- function(x, method, R = 1000, conf_type = "perc", conformal_test = NULL, conformal_calibration = NULL, conformal_score = "residual_abs", ...) { # inherit conf_level from the original object conf_level <- attr(x, "conf_level") if (is.null(conf_level)) conf_level <- 0.95 checkmate::assert( checkmate::check_class(x, "predictions"), checkmate::check_class(x, "comparisons"), checkmate::check_class(x, "slopes"), checkmate::check_class(x, "hypotheses") ) checkmate::assert_number(conf_level, lower = 1e-10, upper = 1 - 1e-10) checkmate::assert_integerish(R, lower = 2) checkmate::assert_choice( method, choices = c("delta", "boot", "fwb", "rsample", "simulation", "conformal_split", "conformal_cv+")) if (method %in% c("conformal_split", "conformal_cv+")) { checkmate::assert_class(x, "predictions") checkmate::assert_choice(conformal_score, choices = c("residual_abs", "residual_sq", "softmax")) checkmate::assert_data_frame(conformal_test, null.ok = FALSE) } if (method == "conformal_split") { checkmate::assert_data_frame(conformal_calibration, null.ok = FALSE) conformal_fun <- conformal_split } if (method == "conformal_cv+") { checkmate::assert_integerish(R, upper = 25) conformal_fun <- conformal_cv_plus } mfx_call <- attr(x, "call") model <- mfx_call[["model"]] # default standard errors are Delta anyway if (method == "delta") { return(x) } else if (method == "boot") { insight::check_if_installed("boot") attr(model, "inferences_method") <- "boot" attr(model, "inferences_dots") <- c(list(R = R), list(...)) attr(model, "inferences_conf_type") <- conf_type } else if (method == "fwb") { insight::check_if_installed("fwb") dots <- list(...) if (!"verbose" %in% names(dots)) { dots[["verbose"]] <- FALSE } attr(model, "inferences_method") <- "fwb" attr(model, "inferences_dots") <- c(list(R = R), dots) attr(model, "inferences_conf_type") <- conf_type if (isTRUE("wts" %in% names(attr(x, "call"))) && !is.null(attr(x, "call")[["wts"]])) { insight::format_error('The `fwb` method is not supported with the `wts` argument.') } } else if (method == "rsample") { insight::check_if_installed("rsample") attr(model, "inferences_method") <- "rsample" attr(model, "inferences_dots") <- c(list(times = R), list(...)) attr(model, "inferences_conf_type") <- conf_type } else if (method == "simulation") { insight::check_if_installed("MASS") attr(model, "inferences_R") <- R attr(model, "inferences_simulate") <- function(R, B, V) { MASS::mvrnorm(R, mu = B, Sigma = V) } class(model) <- c("inferences_simulation", class(model)) # do not use simulation mean as point estimate # https://doi.org/10.1017/psrm.2023.8 b <- get_coef(x) } if (isTRUE(grepl("conformal", method))) { out <- conformal_fun( x, R = R, conf_level = conf_level, test = conformal_test, calibration = conformal_calibration, score = conformal_score) } else { mfx_call[["model"]] <- model out <- recall(mfx_call) } # do not use simulation mean as point estimate # https://doi.org/10.1017/psrm.2023.8 if (method == "simulation") { out$estimate <- x$estimate } return(out) } inferences_dispatch <- function(model, INF_FUN, ...) { if (isTRUE(attr(model, "inferences_method") == "rsample")) { bootstrap_rsample(model = model, INF_FUN = INF_FUN, ...) } else if (isTRUE(attr(model, "inferences_method") == "boot")) { bootstrap_boot(model = model, INF_FUN = INF_FUN, ...) } else if (isTRUE(attr(model, "inferences_method") == "fwb")) { bootstrap_fwb(model = model, INF_FUN = INF_FUN, ...) } else { return(NULL) } } marginaleffects/R/sanitize_type.R0000644000176200001440000000272214541720224016623 0ustar liggesusers#' check type sanity #' #' @param model model object #' @param type character vector #' @noRd sanitize_type <- function(model, type, calling_function = "raw") { # mlr3 if (inherits(model, "Learner")) { if (is.null(type)) type <- "response" valid <- setdiff(model$predict_types, "se") checkmate::assert_choice(type, choices = valid, null.ok = TRUE) return(type) } checkmate::assert_character(type, len = 1, null.ok = TRUE) if (inherits(model, "model_fit")) { cl <- "model_fit" } else { cl <- class(model)[1] } if (!cl %in% type_dictionary$class) { cl <- "other" } dict <- type_dictionary # raw is often invoked by `get_predict()`, which is required for {clarify} and others. # we only allow invlink(link) in predictions() and marginal_means(), which are handled by {marginaleffects} if (!calling_function %in% c("predictions", "marginal_means")) { dict <- dict[dict$type != "invlink(link)", , drop = FALSE] } # fixest: invlink(link) only supported for glm model if (inherits(model, "fixest")) { if (!isTRUE(hush(model[["method_type"]]) %in% c("feglm"))) { dict <- dict[dict$type != "invlink(link)", , drop = FALSE] } } dict <- dict[dict$class == cl, , drop = FALSE] checkmate::assert_choice(type, choices = dict$type, null.ok = TRUE) if (is.null(type)) { type <- dict$type[1] } return(type) }marginaleffects/R/broom.R0000644000176200001440000000436114541720224015053 0ustar liggesusers#' @importFrom generics tidy #' @export generics::tidy #' @importFrom generics glance #' @export generics::glance #' tidy helper #' #' @noRd #' @export tidy.comparisons <- function(x, ...) { insight::check_if_installed("tibble") out <- tibble::as_tibble(x) if (!"term" %in% names(out)) { lab <- seq_len(nrow(out)) if ("group" %in% colnames(out) || is.character(attr(x, "by"))) { tmp <- c("group", attr(x, "by")) tmp <- Filter(function(j) j %in% colnames(x), tmp) if (length(tmp) > 0) { tmp <- do.call(paste, out[, tmp]) if (anyDuplicated(tmp)) { tmp <- paste(seq_len(nrow(out)), tmp) } lab <- tmp } } out[["term"]] <- lab } return(out) } #' tidy helper #' #' @noRd #' @export tidy.slopes <- tidy.comparisons #' tidy helper #' #' @noRd #' @export tidy.predictions <- tidy.comparisons #' tidy helper #' #' @noRd #' @export tidy.hypotheses <- tidy.comparisons #' tidy helper #' #' @noRd #' @export tidy.marginalmeans <- function(x, ...) { insight::check_if_installed("tibble") tibble::as_tibble(x) } #' @noRd #' @export glance.slopes <- function(x, ...) { insight::check_if_installed("insight") insight::check_if_installed("modelsummary") model <- tryCatch(attr(x, "model"), error = function(e) NULL) if (is.null(model) || isTRUE(checkmate::check_string(model))) { model <- tryCatch(attr(x, "call")[["model"]], error = function(e) NULL) } gl <- suppressMessages(suppressWarnings(try( modelsummary::get_gof(model, ...), silent = TRUE))) if (inherits(gl, "data.frame")) { out <- data.frame(gl) } else { out <- NULL } vcov.type <- attr(x, "vcov.type") if (is.null(out) && !is.null(vcov.type)) { out <- data.frame("vcov.type" = vcov.type) } else if (!is.null(out)) { out[["vcov.type"]] <- vcov.type } out <- tibble::as_tibble(out) return(out) } #' @noRd #' @export glance.predictions <- glance.slopes #' @noRd #' @export glance.comparisons <- glance.slopes #' @noRd #' @export glance.hypotheses <- glance.slopes #' @noRd #' @export glance.marginalmeans <- glance.slopesmarginaleffects/R/sanity_model.R0000644000176200001440000000767314554072601016440 0ustar liggesusers#' Method to raise model-specific warnings and errors #' #' @inheritParams slopes #' @return A warning, an error, or nothing #' @rdname sanitize_model_specific #' @keywords internal sanitize_model_specific <- function (model, ...) { UseMethod("sanitize_model_specific", model) } #' @rdname sanitize_model_specific sanitize_model_specific.default <- function(model, vcov = NULL, calling_function = "marginaleffects", ...) { return(model) } sanity_model_supported_class <- function(model) { checkmate::assert_character( getOption("marginaleffects_model_classes", default = NULL), null.ok = TRUE) custom_classes <- getOption("marginaleffects_model_classes", default = NULL) custom_classes <- as.list(custom_classes) supported <- append(custom_classes, list( "afex_aov", "amest", #package: Amelia "bart", # package: dbarts "betareg", "bglmerMod", "blmerMod", # "bife", "biglm", c("bigglm", "biglm"), "brglmFit", "brmsfit", c("bracl", "brmultinom", "brglmFit"), c("brnb", "negbin", "glm"), c("clogit", "coxph"), "clm", c("clmm2", "clm2"), "coxph", "crch", "fixest", "flic", "flac", c("Gam", "glm", "lm"), # package: gam c("gam", "glm", "lm"), # package: mgcv c("gamlss", "gam", "glm", "lm"), # package: gamlss c("geeglm", "gee", "glm"), c("Gls", "rms", "gls"), "glm", "gls", "glmerMod", "glmrob", "glmmTMB", c("glmmPQL", "lme"), "glimML", "glmx", "hetprob", "hurdle", "hxlr", "ivreg", "iv_robust", "ivpml", "Learner", "lm", "lme", "lmerMod", "lmerModLmerTest", "lmrob", "lmRob", "lm_robust", # "logitr", "loess", "logistf", c("lrm", "lm"), c("lrm", "rms", "glm"), c("mblogit", "mclogit"), c("mclogit", "lm"), "MCMCglmm", "mhurdle", "mira", "mlogit", "model_fit", c("multinom", "nnet"), "mvgam", c("negbin", "glm", "lm"), "nls", c("ols", "rms", "lm"), c("orm", "rms"), c("oohbchoice", "dbchoice"), "phylolm", "phyloglm", c("plm", "panelmodel"), "polr", "Rchoice", "rlmerMod", "rq", c("scam", "glm", "lm"), c("selection", "selection", "list"), "speedglm", "speedlm", "stanreg", "survreg", "svyolr", c("tobit", "survreg"), "tobit1", "truncreg", "workflow", "zeroinfl")) flag <- FALSE for (sup in supported) { if (!is.null(sup) && isTRUE(all(sup %in% class(model)))) { flag <- TRUE } } if (isFALSE(flag)) { support <- paste(sort(unique(sapply(supported, function(x) x[1]))), collapse = ", ") msg <- c( sprintf('Models of class "%s" are not supported. Supported model classes include:', class(model)[1]), "", support, "", "New modeling packages can usually be supported by `marginaleffects` if they include a working `predict()` method. If you believe that this is the case, please file a feature request on Github: https://github.com/vincentarelbundock/marginaleffects/issues") msg <- insight::format_message(msg) stop(msg, call. = FALSE) } } sanitize_model <- function(model, newdata = NULL, vcov = NULL, ...) { model <- sanitize_model_specific(model, vcov = vcov, newdata = newdata, ...) sanity_model_supported_class(model) return(model) } marginaleffects/R/methods_mlogit.R0000644000176200001440000000240414541720224016747 0ustar liggesusers#' @rdname get_predict #' @export get_predict.mlogit <- function(model, newdata, ...) { mat <- stats::predict(model, newdata = newdata) if (isTRUE(checkmate::check_atomic_vector(mat))) { out <- data.table(rowid = seq_along(mat), group = names(mat), estimate = mat) } else { out <- data.table(rowid = rep(seq_len(nrow(mat)), rep = ncol(mat)), group = rep(colnames(mat), each = nrow(mat)), estimate = as.vector(mat)) } setkey(out, rowid, group) if ("term" %in% colnames(newdata)) { out[, "term" := newdata[["term"]]] } return(out) } #' @include sanity_model.R #' @rdname sanitize_model_specific #' @keywords internal sanitize_model_specific.mlogit <- function(model, newdata, ...) { if (!is.null(newdata)) { nchoices <- length(unique(model$model$idx[, 2])) if (!isTRUE(nrow(newdata) %% nchoices == 0)) { msg <- sprintf("The `newdata` argument for `mlogit` models must be a data frame with a number of rows equal to a multiple of the number of choices: %s.", nchoices) stop(msg, call. = FALSE) } } return(model) } marginaleffects/R/get_averages.R0000644000176200001440000001244314554110041016363 0ustar liggesusers#' Average Estimates (aka "Margins") #' #' @description #' Calculate average estimates by taking the (group-wise) mean of all the unit-level #' estimates computed by the `predictions()`, `comparisons()`, or `slopes()` functions. #' #' Warning: It is generally faster and safer to use the `by` argument of one of #' the three functions listed above. Alternatively, one can call it in one step: #' #' `avg_slopes(model)` #' #' `slopes(model, by = TRUE)` #' #' Proceeding in two steps by assigning the unit-level estimates is typically #' slower, because all estimates must be computed twice. #' #' Note that the `tidy()` and `summary()` methods are slower wrappers around `avg_*()` functions. #' @param x Object produced by the `predictions()`, `comparisons()`, or `slopes()` functions. #' @param by Character vector of variable names over which to compute group-wise average estimates. When `by=NULL`, the global average (per term) is reported. #' @param ... All additional arguments are passed to the original fitting #' function to override the original call options: `conf_level`, `transform`, #' etc. See `?predictions`, `?comparisons`, `?slopes`. #' @return A `data.frame` of estimates and uncertainty estimates #' @details #' #' Standard errors are estimated using the delta method. See the `marginaleffects` website for details. #' #' In Bayesian models (e.g., `brms`), estimates are aggregated applying the #' median (or mean) function twice. First, we apply it to all #' marginal effects for each posterior draw, thereby estimating one Average (or #' Median) Marginal Effect per iteration of the MCMC chain. Second, we #' calculate the mean and the `quantile` function to the results of Step 1 to #' obtain the Average Marginal Effect and its associated interval. #' #' @keywords internal #' @examplesIf interactive() || isTRUE(Sys.getenv("R_DOC_BUILD") == "true") #' @examples #' mod <- lm(mpg ~ factor(gear), data = mtcars) #' avg_comparisons(mod, variables = list(gear = "sequential")) #' get_averages <- function (x, by = TRUE, ...) { xcall <- substitute(x) if (is.call(xcall)) { if ("by" %in% names(xcall)) { if (!isTRUE(checkmate::check_flag(by, null.ok = TRUE))) { insight::format_error("The `by` argument cannot be used twice.") } if (length(list(...)) == 0) { # bug in predictions.Rmd out <- eval(xcall) } else { out <- recall(xcall, ...) } } else if (isTRUE(checkmate::check_flag(by, null.ok = TRUE))) { by <- c("term", "group", "contrast") out <- recall(xcall, by = by, ...) } else { out <- recall(xcall, by = by, ...) } return(out) } UseMethod("get_averages", x) } #' @noRd get_averages.predictions <- function(x, by = TRUE, byfun = NULL, ...) { if (!is.null(byfun) && !inherits(x, "predictions")) { insight::format_error("The `byfun` argument is only supported for objects produced by the `predictions()` function.") } if (!isFALSE(attr(x, "by")) && !is.null(attr(x, "by"))) { return(x) } if (is.null(by) || isFALSE(by)) { by <- grep("^type$|^term$|^group$|^contrast_?", colnames(x), value = TRUE) } # `bynout` requires us to re-eval a modified call out <- recall(x, by = by, byfun = byfun, ...) # sort and subset columns cols <- c("group", "term", "contrast", attr(x, "by"), "by", grep("^contrast_\\w+", colnames(out), value = TRUE), "estimate", "std.error", "statistic", "p.value", "conf.low", "conf.high") if (isTRUE(checkmate::check_character(by))) { cols <- c(cols, by) } cols <- intersect(cols, colnames(out)) # hack to select columns while preserving attributes for (v in colnames(out)) { if (!v %in% cols) { out[[v]] <- NULL } } data.table::setDF(out) return(out) } #' @noRd get_averages.comparisons <- function(x, by = TRUE, ...) { if ("byfun" %in% names(list(...)) && !inherits(x, "predictions")) { insight::format_error("The `byfun` argument is only supported for objects produced by the `predictions()` function.") } # already used `by` in the main call, so we return the main output if (!isFALSE(attr(x, "by")) && !is.null(attr(x, "by"))) { return(x) } if (isTRUE(checkmate::check_flag(by, null.ok = TRUE))) { by <- grep("^type$|^term$|^group$|^contrast_?", colnames(x), value = TRUE) } # `bynout` requires us to re-eval a modified call out <- recall(x, by = by, ...) # sort and subset columns cols <- c("group", "term", "contrast", "by", attr(x, "by"), grep("^contrast_\\w+", colnames(out), value = TRUE), "estimate", "std.error", "statistic", "p.value", "conf.low", "conf.high") if (isTRUE(checkmate::check_character(by))) { cols <- c(cols, by) } cols <- intersect(cols, colnames(out)) # hack to select columns while preserving attributes for (v in colnames(out)) { if (!v %in% cols) { out[[v]] <- NULL } } data.table::setDF(out) return(out) } #' @noRd get_averages.slopes <- get_averages.comparisons #' @noRd get_averages.hypotheses <- get_averages.marginalmeansmarginaleffects/R/myTryCatch.R0000644000176200001440000000072314541720224016022 0ustar liggesusers# License: https://creativecommons.org/licenses/by-sa/3.0/ # Source: https://stackoverflow.com/a/24569739/342331 myTryCatch <- function(expr) { warn <- err <- NULL value <- withCallingHandlers( tryCatch(expr, error = function(e) { err <<- e NULL }), warning = function(w) { warn <<- w invokeRestart("muffleWarning") } ) list(value = value, warning = warn, error = err) } marginaleffects/R/methods_stats.R0000644000176200001440000000545114560035476016630 0ustar liggesusers#' @include set_coef.R #' @rdname set_coef #' @export set_coef.glm <- function(model, coefs, ...) { model[["coefficients"]] <- sub_named_vector( model[["coefficients"]], coefs ) ## But, there's an edge case!! When `predict(model, se.fit = TRUE)` is called without `newdata`, `predict.lm()` isn't called. ## Instead `model$linear.predictors` is returned directly if `type = "link"` and ## `model$fitted.values` is returned directly if `type = "response"`. ## `marginal_effects()` for "glm" is always called with `newdata`, so we won't hit this. model } #' @rdname set_coef #' @export set_coef.lm <- function(model, coefs, ...) { model[["coefficients"]] <- sub_named_vector( model[["coefficients"]], coefs ) model } #' @rdname get_predict #' @export get_predict.lm <- function(model, newdata = insight::get_data(model), type = "response", ...) { MM <- attr(newdata, "marginaleffects_model_matrix") beta <- get_coef(model) if (!isTRUE(checkmate::check_matrix(MM)) || ncol(MM) != length(beta)) { out <- get_predict.default(model = model, newdata = newdata, type = type, ...) return(out) } p <- model$rank p1 <- seq_len(p) piv <- if (p) qr(model)$pivot[p1] if (!all(seq_len(ncol(MM)) %in% piv)) { MM <- MM[, piv, drop = FALSE] beta <- beta[piv] } if (getOption("marginaleffects_linalg", default = "RcppEigen") == "RcppEigen") { pred <- eigenMatMult(MM, beta) } else { pred <- drop(MM %*% beta) } # `pred` is a secret argument which re-uses the default get_predict to format a vector a data frame using correct `rowid` out <- get_predict.default(model = model, newdata = newdata, type = type, pred = pred, ...) return(out) } #' @rdname get_predict #' @export get_predict.glm <- function(model, newdata = insight::get_data(model), type = "response", ...) { out <- NULL MM <- attr(newdata, "marginaleffects_model_matrix") if (isTRUE(checkmate::check_matrix(MM))) { if (isTRUE(checkmate::check_choice(type, c("link")))) { out <- get_predict.lm(model = model, newdata = newdata, ...) } else if (isTRUE(checkmate::check_choice(type, "response")) || is.null(type)) { out <- get_predict.lm(model = model, newdata = newdata, ...) out$estimate <- stats::family(model)$linkinv(out$estimate) } } if (is.null(out)) { out <- get_predict.default(model = model, newdata = newdata, type = type, ...) } return(out) } #' @include set_coef.R #' @rdname set_coef #' @export set_coef.nls <- function(model, coefs, ...) { out <- model out$m$setPars(coefs) return(out) } #' @include get_coef.R #' @rdname get_coef #' @export get_coef.nls <- function(model, ...) { model$m$getPars() } marginaleffects/R/methods_bife.R0000644000176200001440000000127514541720224016366 0ustar liggesusers#' @include get_predict.R #' @rdname get_predict #' @keywords internal #' @export get_predict.bife <- function(model, newdata = insight::get_data(model), type = "response", ...) { pred <- stats::predict(model, X_new = newdata, type = type, ...) sanity_predict_vector(pred = pred, model = model, newdata = newdata, type = type) sanity_predict_numeric(pred = pred, model = model, newdata = newdata, type = type) out <- data.frame( rowid = seq_len(nrow(newdata)), estimate = pred) return(out) } marginaleffects/R/methods_MASS.R0000644000176200001440000000441414557752334016240 0ustar liggesusers#' @include get_coef.R #' @rdname get_coef #' @export get_coef.polr <- function(model, ...) { out <- insight::get_parameters(model) out <- stats::setNames(out$Estimate, out$Parameter) names(out) <- gsub("Intercept: ", "", names(out)) return(out) } #' @include set_coef.R #' @rdname set_coef #' @export set_coef.polr <- function(model, coefs, ...) { # in basic model classes coefficients are named vector idx <- match(names(model$coefficients), names(coefs)) model[["coefficients"]] <- coefs[idx] idx <- match(names(model$zeta), names(coefs)) model[["zeta"]] <- coefs[idx] model } #' @include get_group_names.R #' @rdname get_group_names #' @export get_group_names.polr <- function(model, ...) { resp <- insight::get_response(model) if (is.factor(resp)) { out <- levels(resp) } else { out <- unique(resp) } return(out) } #' @include get_predict.R #' @rdname get_predict #' @export get_predict.polr <- function(model, newdata = insight::get_data(model), type = "probs", ...) { type <- sanitize_type(model, type, calling_function = "predictions") # hack: 1-row newdata returns a vector, so get_predict.default does not learn about groups if (nrow(newdata) == 1) { hack <- TRUE newdata <- newdata[c(1, 1), , drop = FALSE] newdata$rowid[1] <- -Inf } else { hack <- FALSE } out <- get_predict.default(model, newdata = newdata, type = type, ...) # hack out <- out[out$rowid != -Inf, ] return(out) } #' @include set_coef.R #' @rdname set_coef #' @export set_coef.glmmPQL <- function(model, coefs, ...) { model[["coefficients"]][["fixed"]][names(coefs)] <- coefs model } #' @rdname get_predict #' @export get_predict.glmmPQL <- function(model, newdata = insight::get_data(model), type = "response", ...) { out <- stats::predict(model, newdata = newdata, type = type, ...) out <- data.frame( rowid = seq_len(nrow(newdata)), estimate = out) return(out) } marginaleffects/R/get_group_names.R0000644000176200001440000000062214541720224017107 0ustar liggesusers#' Get levels of the outcome variable in grouped or multivariate models #' #' @inheritParams slopes #' @return A character vector #' @rdname get_group_names #' @keywords internal #' @export get_group_names <- function (model, ...) { UseMethod("get_group_names", model) } #' @rdname get_group_names #' @export get_group_names.default <- function(model, ...) { return("main_marginaleffect") } marginaleffects/R/sanitize_variables.R0000644000176200001440000004204514554070071017616 0ustar liggesusers# input: character vector or named list # output: named list of lists where each element represents a variable with: name, value, function, label sanitize_variables <- function(variables, model, newdata, # need for NumPyro where `find_variables()`` does not work modeldata, comparison = NULL, by = NULL, cross = FALSE, calling_function = "comparisons", eps = NULL) { checkmate::assert( checkmate::check_null(variables), checkmate::check_character(variables, min.len = 1, names = "unnamed"), checkmate::check_list(variables, min.len = 1, names = "unique"), combine = "or") # extensions with no `get_data()` if (is.null(modeldata) || nrow(modeldata) == 0) { modeldata <- set_variable_class(newdata, model) no_modeldata <- TRUE } else { no_modeldata <- FALSE } # variables is NULL: get all variable names from the model if (is.null(variables)) { # mhurdle names the variables weirdly if (inherits(model, "mhurdle")) { predictors <- insight::find_predictors(model, flatten = TRUE) predictors <- list(conditional = predictors) } else { predictors <- insight::find_variables(model) } # unsupported models like pytorch if (length(predictors) == 0 || (length(predictors) == 1 && names(predictors) == "response")) { dv <- hush(unlist(insight::find_response(model, combine = FALSE), use.names = FALSE)) predictors <- setdiff(hush(colnames(newdata)), c(dv, "rowid")) } else { known <- c("fixed", "conditional", "zero_inflated", "scale", "nonlinear") if (any(known %in% names(predictors))) { predictors <- predictors[known] # sometimes triggered by multivariate brms models where we get nested # list: predictors$gear$hp } else { predictors <- unlist(predictors, recursive = TRUE, use.names = FALSE) predictors <- unique(predictors) } # flatten predictors <- unique(unlist(predictors, recursive = TRUE, use.names = FALSE)) } } else { predictors <- variables } # character -> list if (isTRUE(checkmate::check_character(predictors))) { predictors <- stats::setNames(rep(list(NULL), length(predictors)), predictors) } # reserved keywords # Issue #697: we used to allow "group", as long as it wasn't in # `variables`, but this created problems with automatic `by=TRUE`. Perhaps # I could loosen this, but there are many interactions, and the lazy way is # just to forbid entirely. reserved <- c( "rowid", "group", "term", "contrast", "estimate", "std.error", "statistic", "conf.low", "conf.high", "p.value", "p.value.nonsup", "p.value.noninf", "by") # if no modeldata is available, we use `newdata`, but that often has a # `rowid` column. This used to break the extensions.Rmd vignette. if (no_modeldata) { reserved <- setdiff(reserved, "rowid") } bad <- unique(intersect(c(names(predictors), colnames(modeldata)), reserved)) if (length(bad) > 0) { msg <- c( "These variable names are forbidden to avoid conflicts with the outputs of `marginaleffects`:", sprintf("%s", paste(sprintf('"%s"', bad), collapse = ", ")), "Please rename your variables before fitting the model.") insight::format_error(msg) } # when comparisons() only inludes one focal predictor, we don't need to specify it in `newdata` # when `variables` is numeric, we still need to include it, because in # non-linear model the contrast depend on the starting value of the focal # variable. found <- colnames(newdata) if (calling_function == "comparisons") { v <- NULL if (isTRUE(checkmate::check_string(variables))) { v <- variables } else if (isTRUE(checkmate::check_list(variables, len = 1, names = "named"))) { v <- names(variables)[1] } flag <- get_variable_class(modeldata, variable = v, compare = "categorical") if (!is.null(v) && isTRUE(flag)) { found <- c(found, v) } } # matrix predictors mc <- attr(newdata, "newdata_matrix_columns") if (length(mc) > 0 && any(names(predictors) %in% mc)) { predictors <- predictors[!names(predictors) %in% mc] insight::format_warning("Matrix columns are not supported. Use the `variables` argument to specify valid predictors, or use a function like `drop()` to convert your matrix columns into vectors.") } # missing variables miss <- setdiff(names(predictors), found) predictors <- predictors[!names(predictors) %in% miss] if (length(miss) > 0) { msg <- sprintf( "These variables were not found: %s. Try specifying the `newdata` argument explicitly and make sure the missing variable is included.", paste(miss, collapse = ", ")) insight::format_warning(msg) } # sometimes `insight` returns interaction component as if it were a constituent variable idx <- !grepl(":", names(predictors)) predictors <- predictors[idx] # anything left? if (length(predictors) == 0) { msg <- "There is no valid predictor variable. Please change the `variables` argument or supply a new data frame to the `newdata` argument." insight::format_error(msg) } # functions to values # only for predictions; get_contrast_data_numeric handles this for comparisons() # do this before NULL-to-defaults so we can fill it in with default in case of failure if (calling_function == "predictions") { for (v in names(predictors)) { if (is.function(predictors[[v]])) { tmp <- hush(predictors[[v]](modeldata[[v]])) if (is.null(tmp)) { msg <- sprintf("The `%s` function produced invalid output when applied to the dataset used to fit the model.", v) insight::format_warning(msg) } predictors[[v]] <- hush(predictors[[v]](modeldata[[v]])) } } } # NULL to defaults for (v in names(predictors)) { if (is.null(predictors[[v]])) { if (get_variable_class(modeldata, v, "binary")) { predictors[[v]] <- 0:1 } else if (get_variable_class(modeldata, v, "numeric")) { if (calling_function == "comparisons") { predictors[[v]] <- 1 } else if (calling_function == "predictions") { v_unique <- unique(modeldata[[v]]) if (length(v_unique) < 6) { predictors[[v]] <- v_unique } else { predictors[[v]] <- stats::fivenum(modeldata[[v]]) } } } else { if (calling_function == "comparisons") { predictors[[v]] <- "reference" } else if (calling_function == "predictions") { # TODO: warning when this is too large. Here or elsewhere? predictors[[v]] <- unique(modeldata[[v]]) } } } } # shortcuts and validity for (v in names(predictors)) { if (isTRUE(checkmate::check_data_frame(predictors[[v]], nrows = nrow(newdata)))) { # do nothing, but don't take the other validity check branches } else if (get_variable_class(modeldata, v, "binary")) { if (!isTRUE(checkmate::check_numeric(predictors[[v]])) || !is_binary(predictors[[v]])) { msg <- sprintf("The `%s` variable is binary. The corresponding entry in the `variables` argument must be 0 or 1.", v) insight::format_error(msg) } # get_contrast_data requires both levels if (calling_function == "comparisons") { predictors[[v]] <- 0:1 } } else if (get_variable_class(modeldata, v, "numeric")) { if (calling_function == "comparisons") { # For comparisons(), the string shortcuts are processed in contrast_data_* functions because we need fancy labels. # Eventually it would be nice to consolidate, but that's a lot of work. valid_str <- c("iqr", "minmax", "sd", "2sd") flag <- isTRUE(checkmate::check_numeric(predictors[[v]], min.len = 1, max.len = 2)) || isTRUE(checkmate::check_choice(predictors[[v]], choices = valid_str)) || isTRUE(checkmate::check_function(predictors[[v]])) if (!isTRUE(flag)) { msg <- "The %s element of the `variables` argument is invalid." msg <- sprintf(msg, v) insight::format_error(msg) } } else if (calling_function == "predictions") { # string shortcuts if (identical(predictors[[v]], "iqr")) { predictors[[v]] <- stats::quantile(modeldata[[v]], probs = c(.25, .75), na.rm = TRUE) } else if (identical(predictors[[v]], "minmax")) { predictors[[v]] <- c(min(modeldata[[v]], na.rm = TRUE), max(modeldata[[v]], na.rm = TRUE)) } else if (identical(predictors[[v]], "sd")) { s <- stats::sd(modeldata[[v]], na.rm = TRUE) m <- mean(modeldata[[v]], na.rm = TRUE) predictors[[v]] <- c(m - s / 2, m + s / 2) } else if (identical(predictors[[v]], "2sd")) { s <- stats::sd(modeldata[[v]], na.rm = TRUE) m <- mean(modeldata[[v]], na.rm = TRUE) predictors[[v]] <- c(m - s, m + s) } else if (identical(predictors[[v]], "threenum")) { s <- stats::sd(modeldata[[v]], na.rm = TRUE) m <- mean(modeldata[[v]], na.rm = TRUE) predictors[[v]] <- c(m - s, m, m + s) } else if (identical(predictors[[v]], "fivenum")) { predictors[[v]] <- stats::fivenum } else if (is.character(predictors[[v]])) { msg <- sprintf('%s is a numeric variable. The summary shortcuts supported by the variables argument are: "iqr", "minmax", "sd", "2sd", "threenum", "fivenum".', v) insight::format_error(msg) } } } else { if (calling_function == "comparisons") { valid <- c("reference", "sequential", "pairwise", "all", "revpairwise", "revsequential", "revreference") # minmax needs an actual factor in the original data to guarantee correct order of levels. if (is.factor(modeldata[[v]])) { valid <- c(valid, "minmax") } flag1 <- checkmate::check_choice(predictors[[v]], choices = valid) flag2 <- checkmate::check_vector(predictors[[v]], len = 2) flag3 <- checkmate::check_data_frame(predictors[[v]], nrows = nrow(newdata), ncols = 2) flag4 <- checkmate::check_function(predictors[[v]]) flag5 <- checkmate::check_data_frame(predictors[[v]]) if (!isTRUE(flag1) && !isTRUE(flag2) && !isTRUE(flag3) && !isTRUE(flag4) && !isTRUE(flag5)) { msg <- "The %s element of the `variables` argument must be a vector of length 2 or one of: %s" msg <- sprintf(msg, v, paste(valid, collapse = ", ")) insight::format_error(msg) } } else if (calling_function == "predictions") { if (is.character(predictors[[v]]) || is.factor(predictors[[v]])) { if (!all(as.character(predictors[[v]]) %in% as.character(modeldata[[v]]))) { invalid <- intersect( as.character(predictors[[v]]), c("pairwise", "reference", "sequential", "revpairwise", "revreference", "revsequential")) if (length(invalid) > 0) { msg <- "These values are only supported by the `variables` argument in the `comparisons()` function: %s" msg <- sprintf(msg, paste(invalid, collapse = ", ")) } else { msg <- "Some elements of the `variables` argument are not in their original data. Check this variable: %s" msg <- sprintf(msg, v) } insight::format_error(msg) } } } } } # sometimes weights don't get extracted by `find_variables()` w <- tryCatch(insight::find_weights(model), error = function(e) NULL) w <- intersect(w, colnames(newdata)) others <- w # goals: # allow multiple function types: slopes() uses both difference and dydx # when comparison is defined, use that if it works or turn back to defaults # predictors list elements: name, value, function, label if (is.null(comparison)) { fun_numeric <- fun_categorical <- comparison_function_dict[["difference"]] lab_numeric <- lab_categorical <- comparison_label_dict[["difference"]] } else if (is.function(comparison)) { fun_numeric <- fun_categorical <- comparison lab_numeric <- lab_categorical <- "custom" } else if (is.character(comparison)) { # switch to the avg version when there is a `by` function if (isTRUE(checkmate::check_character(by)) && !isTRUE(grepl("avg$", comparison))) { comparison <- paste0(comparison, "avg") } # weights if user requests `avg` or automatically switched if (isTRUE(grepl("avg$", comparison)) && "marginaleffects_wts_internal" %in% colnames(newdata)) { comparison <- paste0(comparison, "wts") } fun_numeric <- fun_categorical <- comparison_function_dict[[comparison]] lab_numeric <- lab_categorical <- comparison_label_dict[[comparison]] if (isTRUE(grepl("dydxavgwts|eyexavgwts|dyexavgwts|eydxavgwts", comparison))) { fun_categorical <- comparison_function_dict[["differenceavgwts"]] lab_categorical <- comparison_label_dict[["differenceavgwts"]] } else if (isTRUE(grepl("dydxavg|eyexavg|dyexavg|eydxavg", comparison))) { fun_categorical <- comparison_function_dict[["differenceavg"]] lab_categorical <- comparison_label_dict[["differenceavg"]] } else if (isTRUE(grepl("dydx$|eyex$|dyex$|eydx$", comparison))) { fun_categorical <- comparison_function_dict[["difference"]] lab_categorical <- comparison_label_dict[["difference"]] } } for (v in names(predictors)) { if (get_variable_class(modeldata, v, "numeric") && !get_variable_class(modeldata, v, "binary")) { fun <- fun_numeric lab <- lab_numeric } else { fun <- fun_categorical lab <- lab_categorical } predictors[[v]] <- list( "name" = v, "function" = fun, "label" = lab, "value" = predictors[[v]], "comparison" = comparison) } # epsilon for finite difference for (v in names(predictors)) { if (!is.null(eps)) { predictors[[v]][["eps"]] <- eps } else if (is.numeric(modeldata[[v]])) { predictors[[v]][["eps"]] <- 1e-4 * diff(range(modeldata[[v]], na.rm = TRUE, finite = TRUE)) } else { predictors[[v]]["eps"] <- list(NULL) } } # can't take the slope of an outcome, except in weird brms models (issue #1006) if (!inherits(model, "brmsfit") || !isTRUE(length(model$formula$forms) > 1)) { dv <- hush(unlist(insight::find_response(model, combine = FALSE), use.names = FALSE)) # sometimes insight doesn't work if (length(dv) > 0) { predictors <- predictors[setdiff(names(predictors), dv)] } } if (length(predictors) == 0) { insight::format_error("There is no valid predictor variable. Please make sure your model includes predictors and use the `variables` argument.") } # interaction: get_contrasts() assumes there is only one function when interaction=TRUE if (isTRUE(interaction)) { for (p in predictors) { flag <- !identical(p[["function"]], predictors[[1]][["function"]]) if (flag) { stop("When `interaction=TRUE` all variables must use the same contrast function.", call. = FALSE) } } } # sort variables alphabetically predictors <- predictors[sort(names(predictors))] others <- others[sort(names(others))] # output out <- list(conditional = predictors, others = others) return(out) } marginaleffects/R/hypotheses_joint.R0000644000176200001440000000753214541720224017336 0ustar liggesusersjoint_test <- function(object, joint_index = NULL, hypothesis = 0, joint_test = "f") { checkmate::assert_choice(joint_test, c("f", "chisq")) # theta_hat: P x 1 vector of estimated parameters if (inherits(object, c("slopes", "comparisons"))) { nam <- object$term if ("contrast" %in% names(object)) { nam <- paste(nam, object$contrast) } theta_hat <- stats::setNames(object$estimate, nam) } else { theta_hat <- get_coef(object) } # index checkmate::assert( checkmate::check_integerish(joint_index, lower = 1, upper = length(theta_hat)), checkmate::check_character(joint_index), checkmate::check_true(joint_index) ) if (isTRUE(joint_index)) { joint_index <- seq_along(theta_hat) } else if (isTRUE(checkmate::check_string(joint_index))) { joint_index <- grep(joint_index, names(theta_hat), perl = TRUE) } # V_hat: estimated covariance matrix V_hat <- stats::vcov(object) # n: sample size n <- tryCatch(stats::nobs(object), error = function(e) NULL) if (is.null(n)) n <- tryCatch(stats::nobs(attr(object, "model")), error = function(e) NULL) if (is.null(n)) insight::format_error("Could not extract sample size from model object.") # R: Q x P matrix for testing Q hypotheses on P parameters # build R matrix based on joint_index R <- matrix(0, nrow = length(joint_index), ncol = length(theta_hat)) for (i in seq_along(joint_index)) { if (is.numeric(joint_index)) { R[i, joint_index[i]] <- 1 } else { R[i, which(names(theta_hat) == joint_index[i])] <- 1 } } # null hypothesis checkmate::assert( checkmate::check_number(hypothesis), checkmate::check_numeric(hypothesis, len = nrow(R)), checkmate::check_null(hypothesis) ) if (is.null(hypothesis)) hypothesis <- 0 r <- matrix(hypothesis, nrow = nrow(R), ncol = 1) # Calculate the difference between R*theta_hat and r diff <- R %*% theta_hat - r # Calculate the inverse of R*(V_hat/n)*R' inv <- solve(R %*% V_hat %*% t(R)) # Calculate the Wald test statistic if (joint_test == "f") { wald_statistic <- t(diff) %*% inv %*% diff / dim(R)[1] # Q is the number of rows in R } else if (joint_test == "chisq") { wald_statistic <- t(diff) %*% inv %*% diff # Not normalized for chi-squared joint_test } # Degrees of freedom df1 <- dim(R)[1] # Q df2 <- tryCatch(insight::get_df(attr(object, "model")), error = function(e) NULL) if (is.null(df2)) tryCatch(insight::get_df(object), error = function(e) NULL) if (is.null(df2)) df2 <- n - length(theta_hat) # n - P # Calculate the p-value if (joint_test == "f") { p_value <- 1 - stats::pf(wald_statistic, df1, df2) } else if (joint_test == "chisq") { p_value <- 1 - stats::pchisq(wald_statistic, df1) df2 <- NULL } # Return the Wald joint_test statistic and p-value out <- data.frame(statistic = drop(wald_statistic), p.value = drop(p_value)) class(out) <- c("hypotheses", "data.frame") if (joint_test == "f") { attr(out, "statistic_label") <- "F" } else if (joint_test == "chisq") { attr(out, "statistic_label") <- "ChiSq" } # degrees of freedom print if (joint_test == "f") { out$df1 <- df1 out$df2 <- df2 } else { out$df <- df1 } # Create the print_head string print_head <- "\nJoint hypothesis test:\n" if (is.character(joint_index)) { for (i in joint_index) { print_head <- paste0(print_head, i, sprintf(" = %s\n", hypothesis)) } } else if (inherits(object, c("marginaleffects", "comparisons", "slopes", "marginal_means"))) { tmp <- paste0(get_term_labels(object, joint_index), sprintf(" = %s\n", hypothesis)) print_head <- c(print_head, tmp) } else { tmp <- paste0(get_term_labels(stats::coef(object), joint_index), sprintf(" = %s\n", hypothesis)) print_head <- c(print_head, tmp) } attr(out, "print_head") <- print_head return(out) }marginaleffects/R/plot_comparisons.R0000644000176200001440000001453114557277362017351 0ustar liggesusers#' Plot Conditional or Marginal Comparisons #' #' @description #' Plot comparisons on the y-axis against values of one or more predictors (x-axis, colors/shapes, and facets). #' #' The `by` argument is used to plot marginal comparisons, that is, comparisons made on the original data, but averaged by subgroups. This is analogous to using the `by` argument in the `comparisons()` function. #' #' The `condition` argument is used to plot conditional comparisons, that is, comparisons made on a user-specified grid. This is analogous to using the `newdata` argument and `datagrid()` function in a `comparisons()` call. All variables whose values are not specified explicitly are treated as usual by `datagrid()`, that is, they are held at their mean or mode (or rounded mean for integers). This includes grouping variables in mixed-effects models, so analysts who fit such models may want to specify the groups of interest using the `condition` argument, or supply model-specific arguments to compute population-level estimates. See details below. #' #' See the "Plots" vignette and website for tutorials and information on how to customize plots: #' #' * https://marginaleffects.com/vignettes/plot.html #' * https://marginaleffects.com #' #' @param variables Name of the variable whose contrast we want to plot on the y-axis. #' @param draw `TRUE` returns a `ggplot2` plot. `FALSE` returns a `data.frame` of the underlying data. #' @inheritParams comparisons #' @param newdata When `newdata` is `NULL`, the grid is determined by the `condition` argument. When `newdata` is not `NULL`, the argument behaves in the same way as in the `comparisons()` function. #' @inheritParams plot_slopes #' @inheritParams slopes #' @template model_specific_arguments #' @return A `ggplot2` object #' @export #' @examples #' mod <- lm(mpg ~ hp * drat * factor(am), data = mtcars) #' #' plot_comparisons(mod, variables = "hp", condition = "drat") #' #' plot_comparisons(mod, variables = "hp", condition = c("drat", "am")) #' #' plot_comparisons(mod, variables = "hp", condition = list("am", "drat" = 3:5)) #' #' plot_comparisons(mod, variables = "am", condition = list("hp", "drat" = range)) #' #' plot_comparisons(mod, variables = "am", condition = list("hp", "drat" = "threenum")) plot_comparisons <- function(model, variables = NULL, condition = NULL, by = NULL, newdata = NULL, type = "response", vcov = NULL, conf_level = 0.95, wts = NULL, comparison = "difference", transform = NULL, rug = FALSE, gray = FALSE, draw = TRUE, ...) { dots <- list(...) if ("effect" %in% names(dots)) { if (is.null(variables)) { variables <- dots[["effect"]] } else { insight::format_error("The `effect` argument has been renamed to `variables`.") } } if ("transform_post" %in% names(dots)) { # backward compatibility transform <- dots[["transform_post"]] } # order of the first few paragraphs is important scall <- rlang::enquo(newdata) newdata <- sanitize_newdata_call(scall, newdata, model) if (!is.null(wts) && is.null(by)) { insight::format_error("The `wts` argument requires a `by` argument.") } checkmate::assert_character(by, null.ok = TRUE, max.len = 3, min.len = 1, names = "unnamed") if ((!is.null(condition) && !is.null(by)) || (is.null(condition) && is.null(by))) { msg <- "One of the `condition` and `by` arguments must be supplied, but not both." insight::format_error(msg) } # sanity check checkmate::assert( checkmate::check_character(variables, names = "unnamed"), checkmate::check_list(variables, names = "unique"), .var.name = "variables") modeldata <- get_modeldata( model, additional_variables = c(names(condition), by), wts = wts) # mlr3 and tidymodels if (is.null(modeldata) || nrow(modeldata) == 0) { modeldata <- newdata } # conditional if (!is.null(condition)) { condition <- sanitize_condition(model, condition, variables, modeldata = modeldata) v_x <- condition$condition1 v_color <- condition$condition2 v_facet_1 <- condition$condition3 v_facet_2 <- condition$condition4 datplot <- comparisons( model, newdata = condition$newdata, type = type, vcov = vcov, conf_level = conf_level, by = FALSE, wts = wts, variables = variables, comparison = comparison, transform = transform, cross = FALSE, modeldata = modeldata, ...) } # marginal if (!is.null(by)) { newdata <- sanitize_newdata( model = model, newdata = newdata, modeldata = modeldata, by = by, wts = wts) datplot <- comparisons( model, by = by, newdata = newdata, type = type, vcov = vcov, conf_level = conf_level, variables = variables, wts = wts, comparison = comparison, transform = transform, cross = FALSE, modeldata = modeldata, ...) v_x <- by[[1]] v_color <- hush(by[[2]]) v_facet_1 <- hush(by[[3]]) v_facet_2 <- hush(by[[4]]) } datplot <- plot_preprocess(datplot, v_x = v_x, v_color = v_color, v_facet_1 = v_facet_1, v_facet_2 = v_facet_2, condition = condition, modeldata = modeldata) # return immediately if the user doesn't want a plot if (isFALSE(draw)) { out <- as.data.frame(datplot) attr(out, "posterior_draws") <- attr(datplot, "posterior_draws") return(out) } # ggplot2 insight::check_if_installed("ggplot2") p <- plot_build(datplot, v_x = v_x, v_color = v_color, v_facet_1 = v_facet_1, v_facet_2 = v_facet_2, gray = gray, rug = rug, modeldata = modeldata) p <- p + ggplot2::labs(x = v_x, y = sprintf("Comparison")) return(p) } marginaleffects/R/methods_MCMCglmm.R0000644000176200001440000000167114541720224017055 0ustar liggesusers#' @rdname get_predict #' @export get_predict.MCMCglmm <- function( model, newdata, type = "response", ndraws = 1000, ...) { ndraws_mod <- nrow(model$VCV) if (ndraws < ndraws_mod) { idx <- sample(seq_len(ndraws_mod), ndraws) } else { idx <- seq_len(ndraws_mod) } draws <- lapply(idx, function(i) stats::predict(model, newdata = newdata, it = i, ...)) draws <- do.call("cbind", draws) out <- data.frame( rowid = seq_len(nrow(newdata)), estimate = apply(draws, MARGIN = 1, FUN = stats::median)) attr(out, "posterior_draws") <- draws return(out) } #' @rdname get_vcov #' @export get_vcov.MCMCglmm <- function(model, vcov = NULL, ...) { if (!is.null(vcov) && !is.logical(vcov)) { insight::format_warning("The `vcov` argument is not supported for models of this class.") } return(NULL) }marginaleffects/R/hypotheses.R0000644000176200001440000003724514557752334016155 0ustar liggesusers#' (Non-)Linear Tests for Null Hypotheses, Joint Hypotheses, Equivalence, Non Superiority, and Non Inferiority #' #' @description #' Uncertainty estimates are calculated as first-order approximate standard errors for linear or non-linear functions of a vector of random variables with known or estimated covariance matrix. In that sense, [`hypotheses`] emulates the behavior of the excellent and well-established [car::deltaMethod] and [car::linearHypothesis] functions, but it supports more models; requires fewer dependencies; expands the range of tests to equivalence and superiority/inferiority; and offers convenience features like robust standard errors. #' #' To learn more, read the hypothesis tests vignette, visit the #' package website, or scroll down this page for a full list of vignettes: #' #' * #' * #' #' Warning #1: Tests are conducted directly on the scale defined by the `type` argument. For some models, it can make sense to conduct hypothesis or equivalence tests on the `"link"` scale instead of the `"response"` scale which is often the default. #' #' Warning #2: For hypothesis tests on objects produced by the `marginaleffects` package, it is safer to use the `hypothesis` argument of the original function. Using `hypotheses()` may not work in certain environments, in lists, or when working programmatically with *apply style functions. #' #' Warning #3: The tests assume that the `hypothesis` expression is (approximately) normally distributed, which for non-linear functions of the parameters may not be realistic. More reliable confidence intervals can be obtained using the \code{inferences()} function with `method = "boot"`. #' #' @inheritParams comparisons #' @param model Model object or object generated by the `comparisons()`, `slopes()`, or `predictions()` functions. #' @param FUN `NULL` or function. #' * `NULL` (default): hypothesis test on a model's coefficients, or on the quantities estimated by one of the `marginaleffects` package functions. #' * Function which accepts a model object and returns a numeric vector or a data.frame with two columns called `term` and `estimate`. This argument can be useful when users want to conduct a hypothesis test on an arbitrary function of quantities held in a model object. See examples below. #' @param joint Joint test of statistical significance. The null hypothesis value can be set using the `hypothesis` argument. #' - FALSE: Hypotheses are not tested jointly. #' - TRUE: All parameters are tested jointly. #' - String: A regular expression to match parameters to be tested jointly. `grep(joint, perl = TRUE)` #' - Character vector of parameter names to be tested. Characters refer to the names of the vector returned by `coef(object)`. #' - Integer vector of indices. Which parameters positions to test jointly. #' @param joint_test A character string specifying the type of test, either "f" or "chisq". The null hypothesis is set by the `hypothesis` argument, with default null equal to 0 for all parameters. #' #' @section Joint hypothesis tests: #' The test statistic for the joint Wald test is calculated as (R * theta_hat - r)' * inv(R * V_hat * R') * (R * theta_hat - r) / Q, #' where theta_hat is the vector of estimated parameters, V_hat is the estimated covariance matrix, R is a Q x P matrix for testing Q hypotheses on P parameters, #' r is a Q x 1 vector for the null hypothesis, and Q is the number of rows in R. If the test is a Chi-squared test, the test statistic is not normalized. #' #' The p-value is then calculated based on either the F-distribution (for F-test) or the Chi-squared distribution (for Chi-squared test). #' For the F-test, the degrees of freedom are Q and (n - P), where n is the sample size and P is the number of parameters. #' For the Chi-squared test, the degrees of freedom are Q. #' #' @template equivalence #' @examples #' library(marginaleffects) #' mod <- lm(mpg ~ hp + wt + factor(cyl), data = mtcars) #' #' # When `FUN` and `hypotheses` are `NULL`, `hypotheses()` returns a data.frame of parameters #' hypotheses(mod) #' #' # Test of equality between coefficients #' hypotheses(mod, hypothesis = "hp = wt") #' #' # Non-linear function #' hypotheses(mod, hypothesis = "exp(hp + wt) = 0.1") #' #' # Robust standard errors #' hypotheses(mod, hypothesis = "hp = wt", vcov = "HC3") #' #' # b1, b2, ... shortcuts can be used to identify the position of the #' # parameters of interest in the output of FUN #' hypotheses(mod, hypothesis = "b2 = b3") #' #' # wildcard #' hypotheses(mod, hypothesis = "b* / b2 = 1") #' #' # term names with special characters have to be enclosed in backticks #' hypotheses(mod, hypothesis = "`factor(cyl)6` = `factor(cyl)8`") #' #' mod2 <- lm(mpg ~ hp * drat, data = mtcars) #' hypotheses(mod2, hypothesis = "`hp:drat` = drat") #' #' # predictions(), comparisons(), and slopes() #' mod <- glm(am ~ hp + mpg, data = mtcars, family = binomial) #' cmp <- comparisons(mod, newdata = "mean") #' hypotheses(cmp, hypothesis = "b1 = b2") #' #' mfx <- slopes(mod, newdata = "mean") #' hypotheses(cmp, hypothesis = "b2 = 0.2") #' #' pre <- predictions(mod, newdata = datagrid(hp = 110, mpg = c(30, 35))) #' hypotheses(pre, hypothesis = "b1 = b2") #' #' # The `FUN` argument can be used to compute standard errors for fitted values #' mod <- glm(am ~ hp + mpg, data = mtcars, family = binomial) #' #' f <- function(x) predict(x, type = "link", newdata = mtcars) #' p <- hypotheses(mod, FUN = f) #' head(p) #' #' f <- function(x) predict(x, type = "response", newdata = mtcars) #' p <- hypotheses(mod, FUN = f) #' head(p) #' #' # Complex aggregation #' # Step 1: Collapse predicted probabilities by outcome level, for each individual #' # Step 2: Take the mean of the collapsed probabilities by group and `cyl` #' library(dplyr) #' library(MASS) #' library(dplyr) #' #' dat <- transform(mtcars, gear = factor(gear)) #' mod <- polr(gear ~ factor(cyl) + hp, dat) #' #' aggregation_fun <- function(model) { #' predictions(model, vcov = FALSE) |> #' mutate(group = ifelse(group %in% c("3", "4"), "3 & 4", "5")) |> #' summarize(estimate = sum(estimate), .by = c("rowid", "cyl", "group")) |> #' summarize(estimate = mean(estimate), .by = c("cyl", "group")) |> #' rename(term = cyl) #' } #' #' hypotheses(mod, FUN = aggregation_fun) #' #' # Equivalence, non-inferiority, and non-superiority tests #' mod <- lm(mpg ~ hp + factor(gear), data = mtcars) #' p <- predictions(mod, newdata = "median") #' hypotheses(p, equivalence = c(17, 18)) #' #' mfx <- avg_slopes(mod, variables = "hp") #' hypotheses(mfx, equivalence = c(-.1, .1)) #' #' cmp <- avg_comparisons(mod, variables = "gear", hypothesis = "pairwise") #' hypotheses(cmp, equivalence = c(0, 10)) #' #' # joint hypotheses: character vector #' model <- lm(mpg ~ as.factor(cyl) * hp, data = mtcars) #' hypotheses(model, joint = c("as.factor(cyl)6:hp", "as.factor(cyl)8:hp")) #' #' # joint hypotheses: regular expression #' hypotheses(model, joint = "cyl") #' #' # joint hypotheses: integer indices #' hypotheses(model, joint = 2:3) #' #' # joint hypotheses: different null hypotheses #' hypotheses(model, joint = 2:3, hypothesis = 1) #' hypotheses(model, joint = 2:3, hypothesis = 1:2) #' #' # joint hypotheses: marginaleffects object #' cmp <- avg_comparisons(model) #' hypotheses(cmp, joint = "cyl") #' #' @export hypotheses <- function( model, hypothesis = NULL, vcov = NULL, conf_level = 0.95, df = Inf, equivalence = NULL, joint = FALSE, joint_test = "f", FUN = NULL, numderiv = "fdforward", ...) { dots <- list(...) call_attr <- c(list( name = "hypotheses", model = model, hypothesis = hypothesis, vcov = vcov, conf_level = conf_level, df = df, equivalence = equivalence, joint = joint, joint_test = joint_test, FUN = FUN, numderiv = numderiv), dots) if ("modeldata" %in% names(dots)) { call_attr[["modeldata"]] <- dots[["modeldata"]] } call_attr <- do.call("call", call_attr) ## Bootstrap # restore an already sanitized hypothesis if necessary hypothesis <- if(is.null(attr(hypothesis, "label"))){ hypothesis } else{ attr(hypothesis, "label") } # Apply inferences method out <- inferences_dispatch( INF_FUN = hypotheses, model=model, hypothesis = hypothesis, vcov = vcov, conf_level = conf_level, df = df, equivalence = equivalence, joint = joint, joint_test = joint_test, numderiv = numderiv, FUN = FUN, ...) if (!is.null(out)) { return(out) } ## Done with Bootstrap if (!isFALSE(joint)) { out <- joint_test(model, joint_index = joint, joint_test = joint_test, hypothesis = hypothesis) return(out) } args <- list( conf_level = conf_level, vcov = vcov, df = df, equivalence = equivalence) # keep this NULL in case `hypothesis` was used in the previous call args[["hypothesis"]] <- hypothesis if (length(dots) > 0) { args <- c(args, dots) } xcall <- substitute(model) if (is.symbol(xcall)) { model <- eval(xcall, envir = parent.frame()) } else if (is.call(xcall)) { internal <- c( "predictions", "avg_predictions", "comparisons", "avg_comparisons", "slopes", "avg_slopes", "marginal_means") # mfx object if (as.character(xcall)[[1]] %in% internal) { args[["x"]] <- model out <- do.call(recall, args) if (!is.null(out)) { class(out) <- c("hypotheses", class(out)) return(out) } # non-mfx object } else { model <- eval(xcall, envir = parent.frame()) } } # marginaleffects objects: recall() if (inherits(model, c("predictions", "comparisons", "slopes", "marginalmeans"))) { args[["x"]] <- attr(model, "call") out <- do.call(recall, args) if (!is.null(out)) { class(out) <- c("hypotheses", class(out)) return(out) } } numderiv = sanitize_numderiv(numderiv) # after re-evaluation tmp <- sanitize_hypothesis(hypothesis, ...) hypothesis <- tmp$hypothesis hypothesis_null <- tmp$hypothesis_null vcov_false <- isFALSE(vcov) vcov <- get_vcov(model = model, vcov = vcov) vcov.type <- get_vcov_label(vcov = vcov) if (is.null(FUN)) { FUNinner <- function(model, ...) { if (inherits(model, c("predictions", "slopes", "comparisons"))) { return(model) } else if (inherits(model, "data.frame")) { if (!all(c("term", "estimate") %in% colnames(model))) { insight::format_error("The model object is a data.frame but doesn't contain the columns 'term' or 'estimate'. Make sure these columns are present") } idx <- intersect(colnames(model), c("term", "group", "estimate")) return(model[, idx]) } else { param <- insight::get_parameters(model, ...) idx <- intersect(colnames(model), c("term", "group", "estimate")) colnames(param)[1:2] <- c("term", "estimate") return(param) } } } else { FUNinner <- FUN } FUNouter <- function(model, hypothesis) { out <- FUNinner(model) if (isTRUE(checkmate::check_numeric(out))) { out <- data.frame( term = seq_along(out), estimate = out) } if (!inherits(out, "data.frame") || any(!c("term", "estimate") %in% colnames(out))) { msg <- "`FUN` must return a numeric vector or a data.frame with two columns named `term` and `estimate`." insight::format_error(msg) } tmp <- get_hypothesis(out, hypothesis = hypothesis) out <- tmp$estimate if (!is.null(attr(tmp, "label"))) { attr(out, "label") <- attr(tmp, "label") } else { attr(out, "label") <- tmp$term } if ("group" %in% colnames(tmp)) { attr(out, "grouplab") <- tmp[["group"]] } return(out) } b <- FUNouter(model = model, hypothesis = hypothesis) # For simulation based inference generate posterior draws from inferences_coefmat # Doesn't support data.frames which aren't mfx objects if (inherits(model, "inferences_simulation")){ if (inherits(model, "data.frame")){ msg <- "Simulation based inference not yet supported for data.frame type." insight::format_error(msg) } model_sim <- sanitize_model( model = model, vcov = vcov, calling_function = "hypotheses", ...) posterior_draws <- matrix(nrow=length(attr(b, "label")), ncol = nrow(attr(model_sim, "inferences_coefmat"))) rownames(posterior_draws) <- attr(b, "label") for (sim_n in 1:ncol(posterior_draws)) { model_tmp <- set_coef(model_sim, attr(model_sim, "inferences_coefmat")[sim_n,]) b_tmp <- FUNouter(model = model_tmp, hypothesis = hypothesis) posterior_draws[,sim_n]<- b_tmp } attr(b, "posterior_draws") <- posterior_draws } # bayesian posterior if (!is.null(attr(b, "posterior_draws"))) { draws <- attr(b, "posterior_draws") J <- NULL se <- rep(NA, length(b)) # standard errors via delta method } else if (!vcov_false && isTRUE(checkmate::check_matrix(vcov))) { args <- list(model = model, vcov = vcov, hypothesis = hypothesis, FUN = FUNouter, numderiv = numderiv) args <- c(args, dots) se <- do.call("get_se_delta", args) J <- attr(se, "jacobian") attr(se, "jacobian") <- NULL draws <- NULL # no standard error } else { J <- draws <- NULL se <- rep(NA, length(b)) } hyplab <- attr(b, "label") if (!is.null(hypothesis)) { if (is.null(hyplab)) { hyplab <- attr(hypothesis, "label") } if (!is.null(hyplab)) { out <- data.frame( term = hyplab, estimate = b, std.error = se) } else { out <- data.frame( term = "custom", estimate = b, std.error = se) } } else { if (!is.null(hyplab) && length(hyplab) == length(b)) { out <- data.frame( term = hyplab, estimate = b, std.error = se) } else { out <- data.frame( term = paste0("b", seq_along(b)), estimate = b, std.error = se) } } # Remove std.error column when not computing st.errors and in bootstrap if(vcov_false) { out$std.error <- NULL } out[["group"]] <- attr(b, "grouplab") out <- get_ci( out, conf_level = conf_level, vcov = vcov, draws = draws, estimate = "estimate", null_hypothesis = hypothesis_null, df = df, model = model, ...) if (!is.null(equivalence)) { out <- equivalence( out, df = df, equivalence = equivalence) } out <- sort_columns(out) class(out) <- c("hypotheses", "deltamethod", class(out)) attr(out, "posterior_draws") <- draws attr(out, "model") <- model attr(out, "model_type") <- class(model)[1] attr(out, "jacobian") <- J attr(out, "call") <- call_attr attr(out, "vcov") <- vcov attr(out, "vcov.type") <- vcov.type attr(out, "conf_level") <- conf_level return(out) } marginaleffects/R/methods_pscl.R0000644000176200001440000000236514541720224016423 0ustar liggesusers#' @include set_coef.R #' @rdname set_coef #' @export set_coef.hurdle <- function(model, coefs, ...) { # in pscl::hurdle, coefficients are held in a named list: # model$coefficients. Each element of the list is a vector which # corresponds to one equation (e.g., "zero" or "response"). When calling # "coef(model)", the equation label is prefixed to the term name with an # underscore. out <- model for (lab in names(out$coefficients)) { idx <- paste0(lab, "_", names(out$coefficients[[lab]])) idx <- match(idx, names(coefs)) # probably too conservative if (anyNA(idx)) { stop("Mismatched coefficients names. Please check the `marginaleffects::`set_coef.hurdle` or `set_coef.zeroinfl` function.", call. = FALSE) } out$coefficients[[lab]] <- stats::setNames(coefs[idx], names(out$coefficients[[lab]])) } return(out) } #' @rdname set_coef #' @export set_coef.zeroinfl <- set_coef.hurdle #' @rdname get_group_names #' @export get_group_names.hurdle <- function(model, type = "count", ...) { if (type == "prob") { out <- colnames(stats::predict(model, type = "prob")) } else { out <- "main_marginaleffect" } return(out) } marginaleffects/R/plot.R0000644000176200001440000000210014541720224014700 0ustar liggesusers# The reason why I don't supply these functions is that there is too much ambiguity about which kind of plot people are expecting. The `plot_predictions()` function has a bunch of extra arguments, and makes assumptions based on the order of the user-supplied `condition` values, for example. Having another `plot.predictions()` method in addition would require me to add all those arguments, or to make opinionated decisions that would require a ton of `if/else` based on the original `predictions()` call's `by`, `newdata` grid, and others. This is hard, and almost certainly unsatisfactory. Also, it duplicates the plotting user-interface. It feels better to steer all users to a single point of entry for plots. #' @export plot.predictions <- function(x, ...) { insight::format_error("Please use the `plot_predictions()` function.") } #' @export plot.comparisons <- function(x, ...) { insight::format_error("Please use the `plot_comparisons()` function.") } #' @export plot.slopes <- function(x, ...) { insight::format_error("Please use the `plot_slopes()` function.") }marginaleffects/R/sort.R0000644000176200001440000000233414541720224014722 0ustar liggesusers sort_columns <- function(x, newdata = data.frame(), by = NULL) { if (!inherits(x, "data.table")) { data.table::setDT(x) } if (isTRUE(checkmate::check_character(by))) { bycols <- by } else if (isTRUE(checkmate::check_data_frame(by))) { bycols <- colnames(by) } else { bycols <- NULL } stubcols <- c( "rowid", "rowidcf", "term", "group", "hypothesis", "by", grep("^contrast", colnames(x), value = TRUE), bycols, "estimate", "std.error", "statistic", "p.value", "s.value", "conf.low", "conf.high", attr(newdata, "newdata_variables_datagrid"), "marginaleffects_wts", sort(grep("^predicted", colnames(newdata), value = TRUE))) cols <- intersect(stubcols, colnames(x)) cols <- unique(c(cols, colnames(x))) x <- x[, ..cols] if ("group" %in% names(x) && all(x$group == "main_marginaleffect")) { x$group <- NULL } # return contrast column only when relevant if ("contrast" %in% colnames(x)) { x[is.na(contrast), "contrast" := ""] x[contrast == "dydx", "contrast" := "dY/dX"] if (all(x$contrast == "dY/dX")) { x[, "contrast" := NULL] } } return(x) }marginaleffects/R/sanity_dots.R0000644000176200001440000000656314541720224016303 0ustar liggesusers# This function is very strict. sanity_dots <- function(model, calling_function = NULL, ...) { dots <- list(...) if (isTRUE(calling_function == "marginaleffects")) { # comparison: this would break `dydx` normalization # interaction: cross countrast+slope do not make sense # transform: should we really be back-transforming slopes? unsupported <- c("comparison", "transform", "cross", "transform_pre", "transform_post") unsupported <- intersect(names(dots), unsupported) if (length(unsupported) > 0) { msg <- sprintf( "These arguments are supported by the `comparisons()` function but not by the `slopes()` function: %s", paste(unsupported, collapse = ", ")) stop(msg, call. = FALSE) } } # deprecated if ("interaction" %in% names(dots)) { msg <- "The `interaction` argument has been deprecated. Please use `cross` instead." insight::format_warning(msg) } valid <- list() # mixed effects valid[["merMod"]] <- valid[["lmerMod"]] <- valid[["glmerMod"]] <- valid[["lmerModLmerTest"]] <- c("include_random", "re.form", "allow.new.levels", "random.only") valid[["brmsfit"]] <- c("draw_ids", "nlpar", "ndraws", "re_formula", "allow_new_levels", "sample_new_levels", "dpar", "resp") valid[["brmsfit_multiple"]] <- valid[["brmsfit"]] valid[["selection"]] <- c("part") # sampleSelection valid[["glmmTMB"]] <- c("re.form", "allow.new.levels", "zitype") # glmmTMB valid[["bam"]] <- c("exclude") # mgcv valid[["gam"]] <- c("exclude") # mgcv valid[["rlmerMod"]] <- c("re.form", "allow.new.levels") valid[["gamlss"]] <- c("what", "safe") # gamlss valid[["lme"]] <- c("level") # nlme::lme valid[["bife"]] <- c("alpha_new", "corrected") # nlme::lme valid[["process_error"]] <- # mvgam::mvgam white_list <- c( "conf.int", "modeldata", "internal_call", "df", "transform", "comparison", "side", "delta", "null", "equivalence", "draw", "flag", # internal dev "transform_pre", "transform_post", # backward compatibility everywhere "variables_grid", # backward compatibility in marginal_means() "at" # topmodels procast ) model_class <- class(model)[1] good <- NULL if (model_class %in% names(valid)) { good <- valid[[model_class]] } backward_compatibility <- c("conf.level") good <- c(good, backward_compatibility) bad <- setdiff(names(dots), c(good, white_list)) if (length(bad) > 0) { if (model_class %in% names(valid)) { msg <- sprintf("These arguments are not supported for models of class `%s`: %s. Valid arguments include: %s. Please file a request on Github if you believe that additional arguments should be supported: https://github.com/vincentarelbundock/marginaleffects/issues", model_class, paste(bad, collapse = ", "), paste(valid[[model_class]], collapse = ", ")) } else { msg <- sprintf("These arguments are not supported for models of class `%s`: %s. Please file a request on Github if you believe that additional arguments should be supported: https://github.com/vincentarelbundock/marginaleffects/issues", model_class, paste(bad, collapse = ", ")) } warning(msg, call. = FALSE) } } marginaleffects/R/methods_crch.R0000644000176200001440000000422014541720224016371 0ustar liggesusers#' @include set_coef.R #' @rdname set_coef #' @export set_coef.crch <- function(model, coefs, ...) { # coefs are split between location coefs (which can be length 0) and scale coefs # (which must be length > 0 and always start with "(scale)_" due to get_parameters(), # to match with get_varcov(., component = "all") output). In crch object, these # are stored as two elements in a list, with scale coefs lacking the "(scale)_" # prefix, so we remove it. location_coefs <- coefs[!startsWith(names(coefs), "(scale)_")] scale_coefs <- coefs[startsWith(names(coefs), "(scale)_")] names(scale_coefs) <- sub("(scale)_", "", names(scale_coefs), fixed = TRUE) if (length(location_coefs) > 0) { model[["coefficients"]]$location[names(location_coefs)] <- location_coefs } model[["coefficients"]]$scale[names(scale_coefs)] <- scale_coefs model } #' @include get_predict.R #' @rdname get_predict #' @export get_predict.crch <- function(model, newdata = NULL, type = "location", ...) { pred <- stats::predict(model, newdata = newdata, type = type) sanity_predict_vector(pred = pred, model = model, newdata = newdata, type = type) sanity_predict_numeric(pred = pred, model = model, newdata = newdata, type = type) out <- data.frame( rowid = 1:nrow(newdata), estimate = pred) return(out) } #' @include set_coef.R #' @rdname set_coef #' @export set_coef.hxlr <- function(model, coefs, ...) { # in crch::hxlr, coefficients are held in: # model$coefficients$intercept # model$coefficients$location # model$coefficients$scale # # note: there are no prefixes in coef() output, so coefs may have same name out <- model idx_int <- length(model$coefficients$intercept) idx_loc <- length(model$coefficients$location) out$coefficients$intercept[] <- coefs[1:idx_int] out$coefficients$location[] <- coefs[idx_int + 1:idx_loc] out$coefficients$scale[] <- coefs[(idx_int + idx_loc + 1):length(coefs)] return(out) } #' @export #' @noRd get_predict.hxlr <- get_predict.crch marginaleffects/R/get_coef.R0000644000176200001440000000113314560035476015513 0ustar liggesusers#' Get a named vector of coefficients from a model object (internal function) #' #' @inheritParams slopes #' @return A named vector of coefficients. The names must match those of the variance matrix. #' @rdname get_coef #' @keywords internal #' @export get_coef <- function (model, ...) { UseMethod("get_coef", model) } #' @rdname get_coef #' @export get_coef.default <- function(model, ...) { ## faster # out <- stats::coef(model) # more general out <- insight::get_parameters(model, component = "all") out <- stats::setNames(out$Estimate, out$Parameter) return(out) } marginaleffects/R/methods_nnet.R0000644000176200001440000000701314557752334016437 0ustar liggesusers#' @include set_coef.R #' @rdname set_coef #' @export set_coef.multinom <- function(model, coefs, ...) { # internally, coefficients are held in the `wts` vector, with 0s # interspersed. When transforming that vector to a matrix, we see that the # first row and first column are all zeros. # NOTE: must use `newdata` in predict otherwise returns stored object. b_original <- get_coef(model) model$wts[match(b_original, model$wts)] <- coefs return(model) } #' @include get_coef.R #' @rdname get_coef #' @export get_coef.multinom <- function(model, ...) { out <- insight::get_parameters(model, ...) out <- stats::setNames( out$Estimate, sprintf("%s:%s", out$Response, out$Parameter)) return(out) } #' @include get_group_names.R #' @rdname get_group_names #' @export get_group_names.multinom <- function(model, ...) { resp <- insight::get_response(model) if (is.factor(resp)) { out <- levels(resp) } else { out <- unique(resp) } return(out[2:length(out)]) } #' @include get_predict.R #' @rdname get_predict #' @export get_predict.multinom <- function(model, newdata = insight::get_data(model), type = "probs", ...) { type <- sanitize_type(model, type, calling_function = "predictions") is_latent <- is_mclogit <- is_nnet <- FALSE if (isTRUE(type == "latent") && inherits(model, c("mblogit", "mclogit"))) { is_latent <- TRUE is_mclogit <- TRUE type <- "link" } else if (isTRUE(type == "latent") && inherits(model, "multinom")) { is_latent <- TRUE is_nnet <- TRUE type <- "probs" } # needed because `predict.multinom` uses `data` rather than `newdata` pred <- stats::predict(model, newdata = newdata, type = type, ...) # atomic vector means there is only one row in `newdata` # two levels DV returns a vector if (isTRUE(checkmate::check_atomic_vector(pred))) { y_original <- sort(unique(insight::get_response(model))) two_levels <- length(y_original) == 2 if (isTRUE(two_levels)) { pred <- matrix(pred) colnames(pred) <- as.character(y_original[2]) } else { pred <- matrix(pred, nrow = 1, dimnames = list(NULL, names(pred))) } } if (is_latent && is_mclogit) { missing_level <- as.character(unique(insight::get_response(model))) missing_level <- setdiff(missing_level, colnames(pred)) if (length(missing_level == 1)) { pred <- cbind(0, pred) colnames(pred)[1] <- missing_level pred <- pred - rowMeans(pred) } else { insight::format_error("Unable to compute predictions on the latent scale.") } } else if (is_latent && is_nnet) { inverse_softMax <- function(mu) { log_mu <- log(mu) return(sweep(log_mu, 1, STATS = rowMeans(log_mu), FUN = "-")) } pred <- inverse_softMax(pred) } # matrix with outcome levels as columns out <- data.frame( group = rep(colnames(pred), each = nrow(pred)), estimate = c(pred)) # usually when `newdata` is supplied by `comparisons` if ("rowid" %in% colnames(newdata)) { out$rowid <- rep(newdata$rowid, times = ncol(pred)) } else { out$rowid <- rep(seq_len(nrow(pred)), times = ncol(pred)) } return(out) } marginaleffects/R/get_predict.R0000644000176200001440000000730614541720224016230 0ustar liggesusers#' Get predicted values from a model object (internal function) #' #' @return A data.frame of predicted values with a number of rows equal to the #' number of rows in `newdata` and columns "rowid" and "estimate". A "group" #' column is added for multivariate models or models with categorical outcomes. #' @rdname get_predict #' @inheritParams slopes #' @keywords internal #' @export get_predict <- function(model, newdata, type, ...) { UseMethod("get_predict", model) } #' @rdname get_predict #' @export get_predict.default <- function(model, newdata = insight::get_data(model), type = "response", ...) { dots <- list(...) if (is.null(type)) { type <- sanitize_type(model = model, type = type) } # some predict methods raise warnings on unused arguments unused <- c("normalize_dydx", "eps", "numDeriv_method", "internal_call", "draw", "modeldata", "transform_pre", "transform_post", "flag") dots <- dots[setdiff(names(dots), unused)] # first argument in the predict methods is not always named "x" or "model" dots[["newdata"]] <- newdata dots[["type"]] <- type args <- c(list(model), dots) # `pred` is a secret argument called by `predict.lm` to turn a numeric vector into a data frame with correct `rowid` if ("pred" %in% names(dots)) { pred <- dots[["pred"]] } else { fun <- stats::predict pred <- suppressWarnings(do.call(fun, args)) } # 1-d array to vector (e.g., {mgcv}) if (is.array(pred) && length(dim(pred)) == 1) { pred <- as.vector(pred) } # 1-d array to vector (e.g., Gam from {gam}) if (is.array(pred) && length(dim(pred)) == 3 && dim(pred)[1] == 1 && dim(pred)[2] == 1 && dim(pred)[3] > 1) { pred <- as.vector(pred) } # phylolm if (isTRUE(checkmate::check_matrix(pred, ncols = 1))) { pred <- drop(pred) } # atomic vector if (isTRUE(checkmate::check_atomic_vector(pred))) { # strip weird attributes added by some methods (e.g., predict.svyglm) if (length(pred) == nrow(newdata)) { # as.numeric is slow with large objects and we can't use is.numeric # to run it conditionally because objects of class "svystat" are # already numeric class(pred) <- "numeric" if ("rowid" %in% colnames(newdata)) { out <- list( rowid = newdata$rowid, estimate = pred) } else { out <- list(rowid = seq_len(length(pred)), estimate = pred) } } # matrix with outcome levels as columns } else if (is.matrix(pred)) { if (is.null(colnames(pred))) { colnames(pred) <- seq_len(ncol(pred)) } # internal calls always includes "rowid" as a column in `newdata` if ("rowid" %in% colnames(newdata)) { out <- list( rowid = rep(newdata[["rowid"]], times = ncol(pred)), group = rep(colnames(pred), each = nrow(pred)), estimate = c(pred)) } else { out <- list( rowid = rep(seq_len(nrow(pred)), times = ncol(pred)), group = rep(colnames(pred), each = nrow(pred)), estimate = c(pred)) } } else { stop(sprintf("Unable to extract predictions of type %s from a model of class %s. Please report this problem, along with reproducible code and data on Github: https://github.com/vincentarelbundock/marginaleffects/issues", type, class(model)[1]), call. = FALSE) } data.table::setDF(out) return(out) } marginaleffects/R/bootstrap_fwb.R0000644000176200001440000000365314541720224016613 0ustar liggesusersbootstrap_fwb <- function(model, INF_FUN, ...) { # attached by `inferences()` conf_type <- attr(model, "inferences_conf_type") checkmate::assert_choice(conf_type, choices = c("perc", "norm", "basic", "bc", "bca")) # bootstrap using the original data and call modeldata <- get_modeldata(model, additional_variables = FALSE) # evaluate the {marginaleffects} call to get output without inferences() # use ... because arguments are not the same for different {marginaleffects} functions dots <- list(...) dots[["vcov"]] <- FALSE # avoid recursion attr(model, "inferences_method") <- NULL out <- do.call(INF_FUN, c(list(model), dots)) # default confidence level may be implicit in original call, but we need numeric if (is.null(dots[["conf_level"]])) { conf_level <- 0.95 } else { conf_level <- dots[["conf_level"]] } bootfun <- function(data, w) { # If model has weights, multiply them by random weights if (!is.null(w0 <- stats::weights(model))) w <- w * w0 # Update the model's call and evaluate modboot <- stats::update(model, weights = w, evaluate = TRUE) # {marginaleffects} function needs to incorporate weights if # averaging. May be a problem if other weights supplied to # `wts` argument. dots[["wts"]] <- w args <- c(list(modboot), dots) out <- do.call(INF_FUN, args)$estimate return(out) } args <- list("data" = modeldata, "statistic" = bootfun) args <- c(args, attr(model, "inferences_dots")) args <- args[unique(names(args))] B <- do.call(fwb::fwb, args) # print.boot prints an ugly nested call B$call <- match.call() # Extract SEs and CIs fwb_summary <- summary(B, conf = conf_level, ci.type = conf_type) out$std.error <- fwb_summary[, "Std. Error"] out$conf.low <- fwb_summary[, 3] out$conf.high <- fwb_summary[, 4] attr(out, "inferences") <- B attr(out, "posterior_draws") <- t(B$t) return(out) } marginaleffects/R/get_model_matrix_attribute.R0000644000176200001440000000177014541720224021344 0ustar liggesusersget_model_matrix_attribute <- function(model, newdata = NULL) { # supported models (no inheritance) if (!isTRUE(class(model)[1] %in% c("lm", "glm", "rq"))) { return(newdata) } # stats::model.matrix creates all-0 columns with splines::bs() and other functions # this may be too aggressive, but it avoids all functions flag <- any(grepl("\\(", setdiff(names(get_coef(model)), "(Intercept)"))) if (isTRUE(flag)) { return(newdata) } # we don't support offsets, so revert to stats::predict() if (!is.null(model[["offset"]])) { return(newdata) } # subset variables for listwise deletion vars <- unlist(insight::find_predictors(model), use.names = FALSE) vars <- c(vars, unlist(insight::find_response(model), use.names = FALSE)) vars <- intersect(vars, colnames(newdata)) MM <- hush(get_model_matrix(model, newdata = data.frame(newdata)[, vars])) attr(newdata, "marginaleffects_model_matrix") <- MM return(newdata) } marginaleffects/R/methods_brms.R0000644000176200001440000001075214557752334016442 0ustar liggesusers#' @include sanity_model.R #' @rdname sanitize_model_specific #' @export sanitize_model_specific.brmsfit <- function(model, ...) { insight::check_if_installed("collapse", minimum_version = "1.9.0") # terms: brmsfit objects do not have terms immediately available te <- tryCatch(attr(stats::terms(stats::formula(model)$formula), "term.labels"), error = function(e) NULL) if (any(grepl("^factor\\(", te))) { stop("The `factor()` function cannot be used in the model formula of a `brmsfit` model. Please convert your variable to a factor before fitting the model, or use the `mo()` function to specify monotonic variables (see the `brms` vignette on monotonic variables).", call. = FALSE) } return(model) } #' @rdname get_coef #' @export get_coef.brmsfit <- function(model, ...) { out <- insight::get_parameters(model) out <- collapse::dapply(out, MARGIN = 2, FUN = collapse::fmedian) return(out) } #' @include get_predict.R #' @rdname get_predict #' @export get_predict.brmsfit <- function(model, newdata = insight::get_data(model), type = "response", ...) { checkmate::assert_choice(type, choices = c("response", "link", "prediction", "average")) if (type == "link") { insight::check_if_installed("rstantools") draws <- rstantools::posterior_linpred( model, newdata = newdata, ...) } else if (type == "response") { insight::check_if_installed("rstantools") draws <- rstantools::posterior_epred( model, newdata = newdata, ...) } else if (type == "prediction") { insight::check_if_installed("rstantools") draws <- rstantools::posterior_predict( model, newdata = newdata, ...) } else if (type == "average") { insight::check_if_installed("brms") draws <- brms::pp_average( model, newdata = newdata, summary = FALSE, ...) } if ("rowid_internal" %in% colnames(newdata)) { idx <- newdata[["rowid_internal"]] } else if ("rowid" %in% colnames(newdata)) { idx <- newdata[["rowid"]] } else { idx <- 1:nrow(newdata) } # resp_subset sometimes causes dimension mismatch if (length(dim(draws)) == 2 && nrow(newdata) != ncol(draws)) { msg <- sprintf("Dimension mismatch: There are %s parameters in the posterior draws but %s observations in `newdata` (or the original dataset).", ncol(draws), nrow(newdata)) insight::format_error(msg) } # 1d outcome if (length(dim(draws)) == 2) { med <- collapse::dapply(draws, MARGIN = 2, FUN = collapse::fmedian) out <- data.frame( rowid = idx, group = "main_marginaleffect", estimate = med) # multi-dimensional outcome } else if (length(dim(draws)) == 3) { out <- apply(draws, c(2, 3), stats::median) levnames <- dimnames(draws)[[3]] if (is.null(levnames)) { colnames(out) <- seq_len(ncol(out)) } else { colnames(out) <- levnames } out <- data.frame( rowid = rep(idx, times = ncol(out)), group = rep(colnames(out), each = nrow(out)), estimate = c(out)) } else { stop("marginaleffects cannot extract posterior draws from this model. Please report this problem to the Bug tracker with a reporducible example: https://github.com/vincentarelbundock/marginaleffects/issues", call. = FALSE) } # group for multi-valued outcome if (length(dim(draws)) == 3) { draws <- lapply(1:dim(draws)[3], function(i) draws[, , i]) draws <- do.call("cbind", draws) } attr(out, "posterior_draws") <- t(draws) return(out) } #' @include get_group_names.R #' @rdname get_group_names #' @export get_group_names.brmsfit <- function(model, ...) { if (!is.null(model$family) && "cumulative" %in% model$family) { out <- unique(insight::get_response(model)) } else { out <- "main_marginaleffect" } return(out) } #' @rdname get_vcov #' @export get_vcov.brmsfit <- function(model, vcov = NULL, ...) { if (!is.null(vcov) && !is.logical(vcov)) { insight::format_warning("The `vcov` argument is not supported for models of this class.") } return(NULL) } marginaleffects/R/sanity_by.R0000644000176200001440000000312114541720224015727 0ustar liggesuserssanity_by <- function(by, newdata) { checkmate::assert( checkmate::check_flag(by), checkmate::check_data_frame(by, min.cols = 1, min.rows = 1), checkmate::check_character(by, min.len = 1), checkmate::check_null(by)) known <- c("by", "group", "term", "rowid", "rowidcf", "contrast", colnames(newdata)) if (isTRUE(by == "group") && "group" %in% colnames(newdata)) { msg <- 'The "group" variable name is forbidden to avoid conflicts with the column names of the outputs produced by the `marginaleffects` package. Please rename your variable of change the value of the `by` argument.' insight::format_error(msg) } if (isTRUE(checkmate::check_flag(by))) { flag <- FALSE } else if (inherits(by, "data.frame")) { flag <- !all(colnames(by) %in% known) || !"by" %in% colnames(by) } else { flag <- !all(by %in% known | grepl("^contrast_", by)) } if (flag) { bycols <- paste(setdiff(colnames(newdata), c("rowid", "rowidcf", "term", "group")), collapse = ", ") msg <- c( "The `by` argument must be either:", "", sprintf("1. Character vector in which each element is part of: %s", bycols), "", sprintf("2. A data frame with a `by` column of labels, and in which all other columns are elements of: %s", bycols), "", "It can sometimes be useful to supply a data frame explicitly to the `newdata` argument in order to be able to group by different columns." ) stop(insight::format_message(msg), call. = FALSE) } }marginaleffects/R/get_contrasts.R0000644000176200001440000004353714541720224016624 0ustar liggesusersget_contrasts <- function(model, newdata, type, variables, original, lo, hi, wts = NULL, marginalmeans, by = NULL, hypothesis = NULL, cross = FALSE, verbose = TRUE, deltamethod = FALSE, ...) { settings_init() # some predict() methods need data frames and will convert data.tables # internally, which can be very expensive if done many times. we do it once # here. data.table::setDF(lo) data.table::setDF(hi) data.table::setDF(original) # brms models need to be combined to use a single seed when sample_new_levels="gaussian" if (inherits(model, c("brmsfit", "bart"))) { if (!"rowid" %in% colnames(lo)) { lo$rowid <- hi$rowid <- seq_len(nrow(lo)) } both <- rbindlist(list(lo, hi)) pred_both <- myTryCatch(get_predict( model, type = type, newdata = both, ...)) # informative error in case of allow.new.levels level breakage if (inherits(pred_both[["error"]], "simpleError")) { insight::format_error(pred_both[["error"]][["message"]]) } else { pred_both <- pred_both[["value"]] } data.table::setDT(pred_both) pred_both[, "lo" := seq_len(.N) <= .N / 2, by = "group"] pred_lo <- pred_both[pred_both$lo, .(rowid, group, estimate), drop = FALSE] pred_hi <- pred_both[!pred_both$lo, .(rowid, group, estimate), drop = FALSE] data.table::setDF(pred_lo) data.table::setDF(pred_hi) draws <- attr(pred_both, "posterior_draws") draws_lo <- draws[pred_both$lo, , drop = FALSE] draws_hi <- draws[!pred_both$lo, , drop = FALSE] attr(pred_lo, "posterior_draws") <- draws_lo attr(pred_hi, "posterior_draws") <- draws_hi } else { pred_lo <- myTryCatch(get_predict( model, type = type, newdata = lo, ...)) # tidymodels if (inherits(pred_lo$error, "rlang_error") && isTRUE(grepl("the object should be", pred_lo$error$message))) { insight::format_error(pred_lo$error$message) } else { pred_lo <- pred_lo[["value"]] } pred_hi <- myTryCatch(get_predict( model, type = type, newdata = hi, ...)) # otherwise we keep the full error object instead of extracting the value if (inherits(pred_hi$value, "data.frame")) { pred_hi <- pred_hi$value } else { pred_hi <- pred_hi$error } } # predict() takes up 2/3 of the wall time. This call is only useful when we # compute elasticities, or for the main estimate, not for standard errors, # so we probably save 1/3 of that 2/3. elasticities <- c( # "dydx", # useless and expensive "eyex", "eydx", "dyex", # "dydxavg", # useless and expensive "eyexavg", "eydxavg", "dyexavg") fun <- function(x) { out <- checkmate::check_choice(x$comparison, choices = elasticities) isTRUE(out) } tmp <- Filter(fun, variables) if (!isTRUE(deltamethod) || length(tmp) > 0) { pred_or <- myTryCatch(get_predict( model, type = type, newdata = original, ...))[["value"]] } else { pred_or <- NULL } # lots of indexing later requires a data.table data.table::setDT(original) if (!inherits(pred_hi, "data.frame") || !inherits(pred_lo, "data.frame") || !inherits(pred_or, c("data.frame", "NULL"))) { msg <- "Unable to compute predicted values with this model. This error can arise when `insight::get_data()` is unable to extract the dataset from the model object, or when the data frame was modified since fitting the model. You can try to supply a different dataset to the `newdata` argument." if (inherits(pred_hi, c("try-error", "error"))) { msg <-c(msg, "", "In addition, this error message was raised:", "", as.character(pred_hi)) } msg <- c(msg, "", "Bug Tracker: https://github.com/vincentarelbundock/marginaleffects/issues") insight::format_error(msg) } # output data.frame out <- pred_lo data.table::setDT(out) # univariate outcome: # original is the "composite" data that we constructed by binding terms and # compute predictions. It includes a term column, which we need to # replicate for each group. out[, "marginaleffects_wts_internal" := NA_real_] # default (probably almost always overwritten) mult <- nrow(out) / nrow(original) regex <- "^term$|rowid_dedup|^group$|^contrast|^marginaleffects_wts_internal$" if (isTRUE(mult == 1)) { for (v in grep(regex, colnames(original), value = TRUE)) { out[, (v) := original[[v]]] } # group or multivariate outcomes } else if (isTRUE(mult > 1)) { for (v in grep(regex, colnames(original), value = TRUE)) { out[, (v) := rep(original[[v]], times = mult)] } # cross-contrasts or weird cases } else { out <- merge(out, newdata, by = "rowid", all.x = TRUE, sort = FALSE) if (isTRUE(nrow(out) == nrow(lo))) { tmp <- data.table(lo)[, .SD, .SDcols = patterns("^contrast|marginaleffects_wts_internal")] out <- cbind(out, tmp) idx <- c("rowid", grep("^contrast", colnames(out), value = TRUE), colnames(out)) idx <- unique(idx) out <- out[, ..idx] } } if (!"term" %in% colnames(out)) { out[, "term" := "cross"] } # by if (isTRUE(checkmate::check_data_frame(by))) { bycols <- "by" data.table::setDT(by) tmp <- setdiff(intersect(colnames(out), colnames(by)), "by") if (length(tmp) == 0) { if (all(colnames(by) %in% c("by", colnames(newdata)))) { nd <- c("rowid", setdiff(colnames(by), "by")) nd <- newdata[, nd, drop = FALSE] out <- merge(out, nd, by = "rowid", sort = FALSE) tmp <- setdiff(intersect(colnames(out), colnames(by)), "by") } else { insight::format_error("The column in `by` must be present in `newdata`.") } } # harmonize column types for (v in colnames(by)) { if (isTRUE(is.character(out[[v]])) && isTRUE(is.numeric(by[[v]]))) { by[[v]] <- as.character(by[[v]]) } else if (isTRUE(is.numeric(out[[v]])) && isTRUE(is.character(by[[v]]))) { by[[v]] <- as.numeric(by[[v]]) } } out[by, by := by, on = tmp] by <- "by" } else if (isTRUE(checkmate::check_character(by))) { regex <- "^term$|^contrast_?|^group$" by <- c(by, grep(regex, colnames(out), value = TRUE)) by <- unique(by) } # comparison function could be different for different terms # sanitize_variables() ensures all functions are identical when there are cross fun_list <- sapply(names(variables), function(x) variables[[x]][["function"]]) fun_list[["cross"]] <- fun_list[[1]] # elasticity requires the original (properly aligned) predictor values # this will discard factor variables which are duplicated, so in principle # it should be the "correct" size # also need `x` when `x` is in the signature of the `comparison` custom function FUN <- function(z) { (is.character(z$comparison) && z$comparison %in% elasticities) || (is.function(z$comparison) && "x" %in% names(formals(z$comparison))) } elasticities <- Filter(FUN, variables) elasticities <- lapply(elasticities, function(x) x$name) if (length(elasticities) > 0) { # assigning a subset of "original" to "idx1" takes time and memory # better to do this here for most columns and add the "v" column only # in the loop if (!is.null(original)) { idx1 <- c("rowid", "rowidcf", "term", "group", grep("^contrast", colnames(original), value = TRUE)) idx1 <- intersect(idx1, colnames(original)) idx1 <- original[, ..idx1] } for (v in names(elasticities)) { idx2 <- unique(c("rowid", "term", "group", by, grep("^contrast", colnames(out), value = TRUE))) idx2 <- intersect(idx2, colnames(out)) # discard other terms to get right length vector idx2 <- out[term == v, ..idx2] # original is NULL when cross=TRUE if (!is.null(original)) { # if not first iteration, need to remove previous "v" and "elast" if (v %in% colnames(idx1)) { idx1[, (v) := NULL] } if ("elast" %in% colnames(idx1)) { idx1[, elast := NULL] } idx1[, (v) := original[[v]]] setnames(idx1, old = v, new = "elast") on_cols <- intersect(colnames(idx1), colnames(idx2)) idx2 <- unique(merge(idx2, idx1, by = on_cols, sort = FALSE)[, elast := elast]) } elasticities[[v]] <- idx2$elast } } draws <- attr(pred_lo, "posterior_draws") # frequentist if (is.null(draws)) { draws_lo <- draws_hi <- draws_or <- NULL # bayes } else { draws_lo <- attr(pred_lo, "posterior_draws") draws_hi <- attr(pred_hi, "posterior_draws") draws_or <- attr(pred_or, "posterior_draws") } data.table::setDT(pred_hi) out[, predicted_lo := pred_lo[["estimate"]]] out[, predicted_hi := pred_hi[["estimate"]]] if (!is.null(pred_or)) { data.table::setDT(pred_or) out[, predicted := pred_or[["estimate"]]] } else { out[, predicted := NA_real_] } idx <- grep("^contrast|^group$|^term$|^type$|^comparison_idx$", colnames(out), value = TRUE) # when `by` is a character vector, we sometimes modify the comparison # function on the fly to use the `avg` version. this is important and # convenient because some of the statistics are non-collapsible, so we can't # average them at the very end. when `by` is a data frame, we do this only # at the very end. # TODO: What is the UI for this? Doesn't make sense to have different functions. if (isTRUE(checkmate::check_character(by))) { tmp <- intersect(colnames(newdata), c(by, colnames(out))) if (length(tmp) > 1) { tmp <- subset(newdata, select = tmp) out <- merge(out, tmp, all.x = TRUE, sort = FALSE) idx <- unique(c(idx, by)) } } # we feed these columns to safefun(), even if they are useless for categoricals if (!"marginaleffects_wts_internal" %in% colnames(out)) out[, "marginaleffects_wts_internal" := NA] if (isTRUE(marginalmeans)) { out <- out[, .( predicted_lo = mean(predicted_lo), predicted_hi = mean(predicted_hi), predicted = mean(predicted), marginaleffects_wts_internal = mean(marginaleffects_wts_internal)), keyby = idx] } # safe version of comparison # unknown arguments # singleton vs vector # different terms use different functions safefun <- function(hi, lo, y, n, term, cross, wts, tmp_idx, newdata) { tn <- term[1] eps <- variables[[tn]]$eps # when cross=TRUE, sanitize_comparison enforces a single function if (isTRUE(cross)) { fun <- fun_list[[1]] } else { fun <- fun_list[[tn]] } args <- list( "hi" = hi, "lo" = lo, "y" = y, "eps" = eps, "w" = wts, "newdata" = newdata) # sometimes x is exactly the same length, but not always args[["x"]] <- elasticities[[tn]][tmp_idx] args <- args[names(args) %in% names(formals(fun))] con <- try(do.call("fun", args), silent = TRUE) if (!isTRUE(checkmate::check_numeric(con, len = n)) && !isTRUE(checkmate::check_numeric(con, len = 1))) { msg <- sprintf("The function supplied to the `comparison` argument must accept two numeric vectors of predicted probabilities of length %s, and return a single numeric value or a numeric vector of length %s, with no missing value.", n, n) #nolint insight::format_error(msg) } if (length(con) == 1) { con <- c(con, rep(NA_real_, length(hi) - 1)) settings_set("marginaleffects_safefun_return1", TRUE) } return(con) } # need a temp index for group-by operations when elasticities is a vector of length equal to full rows of `out` tmp <- grep("^term$|^contrast|^group$", colnames(out), value = TRUE) if (length(tmp) > 0) { out[, tmp_idx := 1:.N, by = tmp] } else { out[, tmp_idx := 1:.N] } # bayesian if (!is.null(draws)) { # drop missing otherwise get_averages() fails when trying to take a # simple mean idx_na <- !is.na(out$predicted_lo) out <- stats::na.omit(out, cols = "predicted_lo") # TODO: performance is probably terrrrrible here, but splitting is # tricky because grouping rows are not always contiguous, and the order # of rows is **extremely** important because draws don't have the # indices that would allow us to align them back with `out` draws <- draws[idx_na, , drop = FALSE] if (isTRUE(checkmate::check_character(by, min.len = 1))) { by_idx <- subset(out, select = intersect(by, colnames(out))) by_idx <- do.call(paste, c(by_idx, sep = "|")) } else { by_idx <- out$term } # loop over columns (draws) and term names because different terms could use different functions for (tn in unique(by_idx)) { for (i in seq_len(ncol(draws))) { idx <- by_idx == tn draws[idx, i] <- safefun( hi = draws_hi[idx, i], lo = draws_lo[idx, i], y = draws_or[idx, i], n = sum(idx), term = out$term[idx], cross = cross, wts = out$marginaleffects_wts_internal[idx], tmp_idx = out$tmp_idx[idx], newdata = newdata) } } # function returns unique value idx <- !is.na(draws[, 1]) draws <- draws[idx, , drop = FALSE] # if comparison returns a single value, then we padded with NA. That # also means we don't want `rowid` otherwise we will merge and have # useless duplicates. if (any(!idx)) { if (settings_equal("marginaleffects_safefun_return1", TRUE)) { out[, "rowid" := NULL] } out <- out[idx, , drop = FALSE] } FUN_CENTER <- getOption("marginaleffects_posterior_center", default = stats::median) out[, "estimate" := apply(draws, 1, FUN_CENTER)] # frequentist } else { out <- stats::na.omit(out, cols = "predicted_lo") # We want to write the "estimate" column in-place because it safer # than group-merge; there were several bugs related to this in the past. # safefun() returns 1 value and NAs when the function retunrs a # singleton. idx <- intersect(idx, colnames(out)) out[, "estimate" := safefun( hi = predicted_hi, lo = predicted_lo, y = predicted, n = .N, term = term, cross = cross, wts = marginaleffects_wts_internal, tmp_idx = tmp_idx, newdata = newdata), keyby = idx] out[, tmp_idx := NULL] # if comparison returns a single value, then we padded with NA. That # also means we don't want `rowid` otherwise we will merge and have # useless duplicates. if (anyNA(out$estimate)) { if (settings_equal("marginaleffects_safefun_return1", TRUE)) { out[, "rowid" := NULL] } } out <- stats::na.omit(out, cols = "estimate") } # clean if ("rowid_dedup" %in% colnames(out)) { out[, "rowid_dedup" := NULL] } # averaging by groups # sometimes this work is already done # if `by` is a column name, then we have merged-in a data frame earlier auto_mean_fun_sub <- any(grepl("^mean\\(", unique(out$contrast))) if (nrow(out) > 1) { if (!auto_mean_fun_sub && !(is.null(by) || isFALSE(by)) && any(grepl("^contrast[_]?", colnames(out)))) { out <- get_by( out, draws = draws, newdata = newdata, by = by, verbose = verbose) draws <- attr(out, "posterior_draws") } else { bycols <- c(by, "group", "term", "^contrast[_]?") bycols <- paste(bycols, collapse = "|") bycols <- grep(bycols, colnames(out), value = TRUE) } } # issue #531: uncertainty estimates from get_predict() sometimes get retained, but they are not overwritten later by get_ci() # drop by reference for speed bad <- intersect( colnames(out), c("conf.low", "conf.high", "std.error", "statistic", "p.value")) if (length(bad) > 0) { out[, (bad) := NULL] } # before get_hypothesis attr(out, "posterior_draws") <- draws # hypothesis tests using the delta method out <- get_hypothesis(out, hypothesis, by = by) # reset settings settings_rm("marginaleffects_safefun_return1") # output attr(out, "original") <- original return(out) } marginaleffects/R/ci.R0000644000176200001440000001603314541720224014327 0ustar liggesusersget_ci <- function( x, conf_level, df = NULL, draws = NULL, vcov = TRUE, null_hypothesis = 0, p_adjust = NULL, model = NULL, ...) { checkmate::assert_number(null_hypothesis) if (!is.null(draws)) { out <- get_ci_draws( x, conf_level = conf_level, draws = draws, model = model) return(out) } required <- c("estimate", "std.error") if (!inherits(x, "data.frame") || any(!required %in% colnames(x))) { return(x) } normal <- FALSE if (!"df" %in% colnames(x)) { if (identical(df, Inf)) { normal <- TRUE # 1 or matching length } else if (length(df) %in% c(1, nrow(x))) { x[["df"]] <- df normal <- FALSE # multiple, such as rbind() contrast terms } else if (length(df) < nrow(x) && "rowid" %in% colnames(x)) { rowids <- unique(x$rowid) if (length(rowids) == length(df)) { rowids <- data.table(rowid = rowids, df = df) x <- merge(x, rowids, all.x = TRUE, by = "rowid", sort = FALSE) } else { insight::format_error("The degrees of freedom argument was ignored.") } # mismatch } else { insight::format_error("Satterthwaite and Kenward-Roger corrections are not supported in this command.") } } p_overwrite <- !"p.value" %in% colnames(x) || null_hypothesis != 0 || identical(vcov, "satterthwaite") || identical(vcov, "kenward-roger") z_overwrite <- !"statistic" %in% colnames(x) || null_hypothesis != 0 || p_overwrite ci_overwrite <- !"conf.low" %in% colnames(x) && "std.error" %in% colnames(x) && is.null(p_adjust) if (z_overwrite) { x[["statistic"]] <- (x[["estimate"]] - null_hypothesis) / x[["std.error"]] if (normal) { x[["p.value"]] <- 2 * stats::pnorm(-abs(x$statistic)) } else { x[["p.value"]] <- 2 * stats::pt(-abs(x$statistic), df = x[["df"]]) } } if (ci_overwrite) { alpha <- 1 - conf_level if (normal) { critical <- abs(stats::qnorm(alpha / 2)) } else { critical <- abs(stats::qt(alpha / 2, df = x[["df"]])) } x[["conf.low"]] <- x[["estimate"]] - critical * x[["std.error"]] x[["conf.high"]] <- x[["estimate"]] + critical * x[["std.error"]] } if (!is.null(p_adjust) && "p.value" %in% colnames(x)) { x$p.value <- stats::p.adjust(x$p.value, method = p_adjust) } # s-value if ("p.value" %in% colnames(x)) { x$s.value <- -log2(x$p.value) } return(x) } get_ci_draws <- function(x, conf_level, draws, model = NULL) { checkmate::check_number(conf_level, lower = 1e-10, upper = 1 - 1e-10) critical <- (1 - conf_level) / 2 # faster known case if (inherits(model, "inferences_simulation")) { insight::check_if_installed("collapse", minimum_version = "1.9.0") CIs <- collapse::dapply(draws, MARGIN = 1, FUN = collapse::fquantile, probs = c(critical, 1 - critical)) x$std.error <- collapse::dapply(draws, MARGIN = 1, FUN = collapse::fsd) x$conf.low <- CIs[, 1] x$conf.high <- CIs[, 2] return(x) } else if (identical("eti", getOption("marginaleffects_posterior_interval", default = "eti")) && identical("median", getOption("marginaleffects_posterior_center", default = "median"))) { insight::check_if_installed("collapse", minimum_version = "1.9.0") CIs <- collapse::dapply(draws, MARGIN = 1, FUN = collapse::fquantile, probs = c(critical, .5, 1 - critical)) x$estimate <- CIs[, 2] x$conf.low <- CIs[, 1] x$conf.high <- CIs[, 3] return(x) } # faster known case if (identical("eti", getOption("marginaleffects_posterior_interval", default = "eti")) && identical("mean", getOption("marginaleffects_posterior_center", default = "median"))) { insight::check_if_installed("collapse", minimum_version = "1.9.0") Bs <- collapse::dapply(draws, MARGIN = 1, FUN = collapse::fmean) CIs <- collapse::dapply(draws, MARGIN = 1, FUN = collapse::fquantile, probs = c(critical, 1 - critical)) x$estimate <- Bs x$conf.low <- CIs[, 1] x$conf.high <- CIs[, 2] return(x) } # option name change FUN_INTERVAL <- getOption("marginaleffects_posterior_interval") if (is.null(FUN_INTERVAL)) { FUN_INTERVAL <- getOption("marginaleffects_credible_interval", default = "eti") } checkmate::assert_choice(FUN_INTERVAL, choices = c("eti", "hdi")) if (FUN_INTERVAL == "hdi") { FUN_INTERVAL <- get_hdi } else { FUN_INTERVAL <- get_eti } FUN_CENTER <- getOption("marginaleffects_posterior_center", default = stats::median) checkmate::assert( checkmate::check_choice(FUN_CENTER, choices = c("mean", "median")), checkmate::check_function(FUN_CENTER) ) if (identical(FUN_CENTER, "mean")) { FUN_CENTER <- mean } else if (identical(FUN_CENTER, "median")) { FUN_CENTER <- stats::median } CIs <- t(apply(draws, 1, FUN_INTERVAL, credMass = conf_level)) Bs <- apply(draws, 1, FUN_CENTER) # comparison returns a single value if (nrow(x) < nrow(CIs)) { CIs <- unique(CIs) Bs <- unique(Bs) } x[["estimate"]] <- Bs x[["conf.low"]] <- CIs[, "lower"] x[["conf.high"]] <- CIs[, "upper"] return(x) } get_eti <- function(object, credMass = 0.95, ...) { checkmate::assert_numeric(object) checkmate::assert_number(credMass) checkmate::assert_true(credMass > 0) checkmate::assert_true(credMass < 1) critical <- (1 - credMass) / 2 out <- stats::quantile(object, probs = c(critical, 1 - critical)) out <- stats::setNames(out, c("lower", "upper")) return(out) } # this is only used for tests to match emmeans. we use ETI as default for bayesian models. get_hdi <- function(object, credMass = 0.95, ...) { result <- c(NA_real_, NA_real_) if (is.numeric(object)) { attributes(object) <- NULL x <- sort.int(object, method = "quick") # removes NA/NaN, but not Inf n <- length(x) if (n > 0) { # exclude <- ceiling(n * (1 - credMass)) # Not always the same as... exclude <- n - floor(n * credMass) # Number of values to exclude low.poss <- x[1:exclude] # Possible lower limits... upp.poss <- x[(n - exclude + 1):n] # ... and corresponding upper limits best <- which.min(upp.poss - low.poss) # Combination giving the narrowest interval if (length(best)) { result <- c(low.poss[best], upp.poss[best]) } else { tmp <- range(x) if (length(tmp) == 2) { result <- tmp } } } } names(result) <- c("lower", "upper") return(result) } marginaleffects/R/datagrid.R0000644000176200001440000004565614554047604015540 0ustar liggesusers#' Data grids #' #' @description #' Generate a data grid of user-specified values for use in the `newdata` argument of the `predictions()`, `comparisons()`, and `slopes()` functions. This is useful to define where in the predictor space we want to evaluate the quantities of interest. Ex: the predicted outcome or slope for a 37 year old college graduate. #' #' @param ... named arguments with vectors of values or functions for user-specified variables. #' + Functions are applied to the variable in the `model` dataset or `newdata`, and must return a vector of the appropriate type. #' + Character vectors are automatically transformed to factors if necessary. #' +The output will include all combinations of these variables (see Examples below.) #' @param model Model object #' @param newdata data.frame (one and only one of the `model` and `newdata` arguments can be used.) #' @param by character vector with grouping variables within which `FUN_*` functions are applied to create "sub-grids" with unspecified variables. #' @param FUN_character the function to be applied to character variables. #' @param FUN_factor the function to be applied to factor variables. #' @param FUN_logical the function to be applied to logical variables. #' @param FUN_integer the function to be applied to integer variables. #' @param FUN_binary the function to be applied to binary variables. #' @param FUN_numeric the function to be applied to numeric variables. #' @param FUN_other the function to be applied to other variable types. #' @param grid_type character. Determines the functions to apply to each variable. The defaults can be overridden by defining individual variables explicitly in `...`, or by supplying a function to one of the `FUN_*` arguments. #' * "mean_or_mode": Character, factor, logical, and binary variables are set to their modes. Numeric, integer, and other variables are set to their means. #' * "balanced": Each unique level of character, factor, logical, and binary variables are preserved. Numeric, integer, and other variables are set to their means. Warning: When there are many variables and many levels per variable, a balanced grid can be very large. In those cases, it is better to use `grid_type="mean_or_mode"` and to specify the unique levels of a subset of named variables explicitly. #' * "counterfactual": the entire dataset is duplicated for each combination of the variable values specified in `...`. Variables not explicitly supplied to `datagrid()` are set to their observed values in the original dataset. #' @details #' If `datagrid` is used in a `predictions()`, `comparisons()`, or `slopes()` call as the #' `newdata` argument, the model is automatically inserted in the `model` argument of `datagrid()` #' call, and users do not need to specify either the `model` or `newdata` arguments. The same behavior will occur when the value supplied to `newdata=` is a function call which starts with "datagrid". This is intended to allow users to create convenience shortcuts like: #' #' \preformatted{ #' library(marginaleffects) #' mod <- lm(mpg ~ am + vs + factor(cyl) + hp, mtcars) #' datagrid_bal <- function(...) datagrid(..., grid_type = "balanced") #' predictions(model, newdata = datagrid_bal(cyl = 4)) #' } #' #' #' If users supply a model, the data used to fit that model is retrieved using #' the `insight::get_data` function. #' @return #' A `data.frame` in which each row corresponds to one combination of the named #' predictors supplied by the user via the `...` dots. Variables which are not #' explicitly defined are held at their mean or mode. #' @export #' @examples #' # The output only has 2 rows, and all the variables except `hp` are at their #' # mean or mode. #' datagrid(newdata = mtcars, hp = c(100, 110)) #' #' # We get the same result by feeding a model instead of a data.frame #' mod <- lm(mpg ~ hp, mtcars) #' datagrid(model = mod, hp = c(100, 110)) #' #' # Use in `marginaleffects` to compute "Typical Marginal Effects". When used #' # in `slopes()` or `predictions()` we do not need to specify the #' #`model` or `newdata` arguments. #' slopes(mod, newdata = datagrid(hp = c(100, 110))) #' #' # datagrid accepts functions #' datagrid(hp = range, cyl = unique, newdata = mtcars) #' comparisons(mod, newdata = datagrid(hp = fivenum)) #' #' # The full dataset is duplicated with each observation given counterfactual #' # values of 100 and 110 for the `hp` variable. The original `mtcars` includes #' # 32 rows, so the resulting dataset includes 64 rows. #' dg <- datagrid(newdata = mtcars, hp = c(100, 110), grid_type = "counterfactual") #' nrow(dg) #' #' # We get the same result by feeding a model instead of a data.frame #' mod <- lm(mpg ~ hp, mtcars) #' dg <- datagrid(model = mod, hp = c(100, 110), grid_type = "counterfactual") #' nrow(dg) datagrid <- function( ..., model = NULL, newdata = NULL, by = NULL, grid_type = "mean_or_mode", FUN_character = NULL, FUN_factor = NULL, FUN_logical = NULL, FUN_numeric = NULL, FUN_integer = NULL, FUN_binary = NULL, FUN_other = NULL) { dots <- list(...) # backward compatibility: 20231220 if (identical(grid_type, "typical")) { grid_type <- "mean_or_mode" } # sanity checkmate::assert_choice(grid_type, choices = c("mean_or_mode", "balanced", "counterfactual")) checkmate::assert_function(FUN_character, null.ok = TRUE) checkmate::assert_function(FUN_factor, null.ok = TRUE) checkmate::assert_function(FUN_logical, null.ok = TRUE) checkmate::assert_function(FUN_binary, null.ok = TRUE) checkmate::assert_function(FUN_integer, null.ok = TRUE) checkmate::assert_function(FUN_numeric, null.ok = TRUE) checkmate::assert_function(FUN_other, null.ok = TRUE) checkmate::assert_character(by, null.ok = TRUE) checkmate::assert_data_frame(newdata, null.ok = TRUE) if (grid_type == "mean_or_mode") { if (is.null(FUN_character)) FUN_character <- get_mode if (is.null(FUN_logical)) FUN_logical <- get_mode if (is.null(FUN_factor)) FUN_factor <- get_mode if (is.null(FUN_binary)) FUN_binary <- get_mode if (is.null(FUN_numeric)) FUN_numeric <- function(x) mean(x, na.rm = TRUE) if (is.null(FUN_other)) FUN_other <- function(x) mean(x, na.rm = TRUE) if (is.null(FUN_integer)) FUN_integer <- function(x) round(mean(x, na.rm = TRUE)) } else if (grid_type == "balanced") { if (is.null(FUN_character)) FUN_character <- unique if (is.null(FUN_logical)) FUN_logical <- unique if (is.null(FUN_factor)) FUN_factor <- unique if (is.null(FUN_binary)) FUN_binary <- unique if (is.null(FUN_numeric)) FUN_numeric <- function(x) mean(x, na.rm = TRUE) if (is.null(FUN_other)) FUN_other <- function(x) mean(x, na.rm = TRUE) if (is.null(FUN_integer)) FUN_integer <- function(x) round(mean(x, na.rm = TRUE)) } else if (grid_type == "counterfactual") { if (!is.null(by)) { insight::format_error("The `by` argument is not supported for counterfactual grids.") } args <- list( model = model, newdata = newdata) args <- c(dots, args) out <- do.call("datagridcf_internal", args) return(out) } if (!is.null(by)) { if (is.null(newdata) && is.null(model)) { insight::format_error("One of `newdata` and `model` must not be `NULL`.") } if (is.null(newdata)) { newdata <- get_modeldata(model, additional_variables = by) } if (!all(by %in% colnames(newdata))) { insight::format_error("All elements of `by` must match column names in `newdata`.") } data.table::setDT(newdata) idx <- subset(newdata, select = by) newdata_list <- split(newdata, idx, keep.by = TRUE) for (i in seq_along(newdata_list)) { args <- c(list(...), list( model = model, newdata = newdata_list[[i]], FUN_character = FUN_character, FUN_factor = FUN_factor, FUN_logical = FUN_logical, FUN_binary = FUN_binary, FUN_numeric = FUN_numeric, FUN_integer = FUN_integer, FUN_other = FUN_other, by = by)) for (b in by) { args[[b]] <- unique } newdata_list[[i]] <- do.call(datagrid_engine, args) } out <- data.table::rbindlist(newdata_list) data.table::setDF(out) return(out) } out <- datagrid_engine(..., model = model, newdata = newdata, FUN_character = FUN_character, FUN_factor = FUN_factor, FUN_logical = FUN_logical, FUN_binary = FUN_binary, FUN_numeric = FUN_numeric, FUN_integer = FUN_integer, FUN_other = FUN_other) return(out) } datagrid_engine <- function( ..., model = NULL, newdata = NULL, FUN_character = get_mode, # need to be explicit for numeric variables transfered to factor in model formula FUN_factor = get_mode, FUN_logical = get_mode, FUN_binary = get_mode, FUN_numeric = function(x) mean(x, na.rm = TRUE), FUN_integer = function(x) round(mean(x, na.rm = TRUE)), FUN_other = function(x) mean(x, na.rm = TRUE), by = NULL) { dots <- list(...) tmp <- prep_datagrid(..., model = model, newdata = newdata, by = by) at <- tmp$at dat <- tmp$newdata variables_all <- tmp$all variables_manual <- names(at) variables_automatic <- tmp$automatic # commented out because we want to keep the response in # sometimes there are two responses and we need one of them: # brms::brm(y | trials(n) ~ x + w + z) # if (!is.null(model)) { # variables_automatic <- setdiff(variables_automatic, insight::find_response(model)) # } if (length(variables_automatic) > 0) { idx <- intersect(variables_automatic, colnames(dat)) dat_automatic <- dat[, ..idx, drop = FALSE] dat_automatic <- stats::na.omit(dat_automatic) out <- list() # na.omit destroys attributes, and we need the "factor" attribute # created by insight::get_data for (n in names(dat_automatic)) { if (get_variable_class(dat, n, c("factor", "strata", "cluster")) || n %in% tmp[["cluster"]]) { out[[n]] <- FUN_factor(dat_automatic[[n]]) } else if (get_variable_class(dat, n, "binary")) { out[[n]] <- FUN_binary(dat_automatic[[n]]) } else if (get_variable_class(dat, n, "logical")) { out[[n]] <- FUN_logical(dat_automatic[[n]]) } else if (get_variable_class(dat, n, "character")) { out[[n]] <- FUN_character(dat_automatic[[n]]) } else if (get_variable_class(dat, n, "numeric")) { if (is.integer(dat_automatic[[n]])) { out[[n]] <- FUN_integer(dat_automatic[[n]]) } else { out[[n]] <- FUN_numeric(dat_automatic[[n]]) } } else { out[[n]] <- FUN_other(dat_automatic[[n]]) } } } else { out <- list() } if (!is.null(at)) { for (n in names(at)) { out[n] <- at[n] } } # unique before counting out <- lapply(out, unique) # warn on very large prediction grid num <- as.numeric(sapply(out, length)) # avoid integer overflow num <- Reduce(f = "*", num) if (isTRUE(num > 1e9)) { stop("You are trying to create a prediction grid with more than 1 billion rows, which is likely to exceed the memory and computational power available on your local machine. Presumably this is because you are considering many variables with many levels. All of the functions in the `marginaleffects` package include arguments to specify a restricted list of variables over which to create a prediction grid.", call. = FALSE) } fun <- data.table::CJ args <- c(out, list(sorted = FALSE)) out <- do.call("fun", args) # na.omit destroys attributes, and we need the "factor" attribute # created by insight::get_data for (n in names(out)) { attr(out, "marginaleffects_variable_class") <- attr(dat, "marginaleffects_variable_class") } # better to assume "standard" class as output data.table::setDF(out) attr(out, "variables_datagrid") <- names(dots) return(out) } datagridcf_internal <- function( ..., model = NULL, newdata = NULL) { dots <- list(...) if (length(dots) == 0) { insight::format_error("Users must specify variable values when `grid_type='counterfactual'") } tmp <- prep_datagrid(..., model = model, newdata = newdata) at <- tmp$at dat <- tmp$newdata variables_all <- tmp$all variables_manual <- names(at) variables_automatic <- c(tmp$automatic, "marginaleffects_wts_internal", "rowid_dedup") # `at` -> `data.frame` at <- lapply(at, unique) fun <- data.table::CJ args <- c(at, list(sorted = FALSE)) at <- do.call("fun", args) rowid <- data.frame(rowidcf = seq_len(nrow(dat))) if (length(variables_automatic) > 0) { idx <- intersect(variables_automatic, colnames(dat)) dat_automatic <- dat[, ..idx, drop = FALSE] dat_automatic[, rowidcf := rowid$rowidcf] setcolorder(dat_automatic, c("rowidcf", setdiff(names(dat_automatic), "rowidcf"))) # cross-join 2 data.tables, faster than merging two dataframes out <- cjdt(list(dat_automatic, at)) } else { out <- merge(rowid, at, all = TRUE) } data.table::setDF(out) attr(out, "variables_datagrid") <- names(out) return(out) } prep_datagrid <- function(..., model = NULL, newdata = NULL, by = NULL) { checkmate::assert_data_frame(newdata, null.ok = TRUE) at <- list(...) # e.g., mlogit vignette we plot by group, but group is of length 0 because # we don't know how many groups there are until we make the first # prediction. for (i in seq_along(at)) { if (length(at[[i]]) == 0) { at[[i]] <- NULL } } # if (!is.null(model) & !is.null(newdata)) { # msg <- "One of the `model` or `newdata` arguments must be `NULL`." # stop(msg, call. = FALSE) # } if (is.null(model) & is.null(newdata)) { msg <- "The `model` and `newdata` arguments are both `NULL`. When calling `datagrid()` *inside* the `slopes()` or `comparisons()` functions, the `model` and `newdata` arguments can both be omitted. However, when calling `datagrid()` on its own, users must specify either the `model` or the `newdata` argument (but not both)." insight::format_error(msg) } if (!is.null(model)) { variables_list <- insight::find_variables(model) variables_all <- unlist(variables_list, recursive = TRUE) # weights are not extracted by default variables_all <- c(variables_all, insight::find_weights(model)) } else if (!is.null(newdata)) { variables_list <- NULL variables_all <- colnames(newdata) newdata <- set_variable_class(modeldata = newdata, model = model) } variables_manual <- names(at) variables_automatic <- setdiff(variables_all, variables_manual) # fill in missing data after sanity checks if (is.null(newdata)) { newdata <- get_modeldata(model, additional_variables = FALSE) } attr_variable_classes <- attr(newdata, "marginaleffects_variable_class") # subset columns, otherwise it can be ultra expensive to compute summaries for every variable if (!is.null(model)) { variables_sub <- c( hush(insight::find_variables(model, flatten = TRUE)), hush(unlist(insight::find_weights(model), use.names = FALSE))) # glmmTMB needs weights column for predictions variables_sub <- c(variables_sub, variables_manual) variables_sub <- c(variables_sub, c("marginaleffects_wts_internal", "rowid_dedup")) variables_sub <- intersect(colnames(newdata), variables_sub) if (length(variables_sub) > 0) { newdata <- subset(newdata, select = variables_sub) } } # check `at` names variables_missing <- setdiff(names(at), c(variables_all, "group", by)) if (length(variables_missing) > 0) { warning(sprintf("Some of the variable names are missing from the model data: %s", paste(variables_missing, collapse = ", ")), call. = FALSE) } idx <- vapply(newdata, is.matrix, logical(1L)) if (any(idx)) { if (any(names(newdata)[idx] %in% variables_all)) { insight::format_warning("Matrix columns are not supported as predictors and are therefore omitted. This may prevent computation of the quantities of interest. You can construct your own prediction dataset and supply it explicitly to the `newdata` argument.") } newdata <- newdata[, !idx, drop = FALSE] } # restore attributes after subsetting attr(newdata, "marginaleffects_variable_class") <- attr_variable_classes # check `at` elements and convert them to factor as needed for (n in names(at)) { # functions first otherwise we try to coerce functions to character if (is.function(at[[n]])) { modeldata <- attr(newdata, "newdata_modeldata") if (!is.null(modeldata) && n %in% colnames(modeldata)) { at[[n]] <- at[[n]](modeldata[[n]]) } else { at[[n]] <- at[[n]](newdata[[n]]) } } # not an "else" situation because we want to process the output of functions too if (is.factor(newdata[[n]]) || isTRUE(get_variable_class(newdata, n, "factor"))) { if (is.factor(newdata[[n]])) { levs <- levels(newdata[[n]]) } else { levs <- as.character(sort(unique(newdata[[n]]))) } at[[n]] <- as.character(at[[n]]) if (!all(at[[n]] %in% c(levs, NA))) { msg <- sprintf('The "%s" element of the `at` list corresponds to a factor variable. The values entered in the `at` list must be one of the factor levels: "%s".', n, paste(levels(newdata[[n]]), collapse = '", "')) stop(msg, call. = FALSE) } else { at[[n]] <- factor(at[[n]], levels = levs) } } } # cluster identifiers will eventually be treated as factors if (!is.null(model)) { v <- insight::find_variables(model) v <- unlist(v[names(v) %in% c("cluster", "strata")], recursive = TRUE) variables_cluster <- c(v, insight::find_random(model, flatten = TRUE)) } else { variables_cluster <- NULL } data.table::setDT(newdata) out <- list("newdata" = newdata, "at" = at, "all" = variables_all, "manual" = variables_manual, "automatic" = variables_automatic, "cluster" = variables_cluster) return(out) } marginaleffects/R/get_modeldata.R0000644000176200001440000001540714541720224016531 0ustar liggesusersget_modeldata <- function(model, additional_variables = FALSE, modeldata = NULL, wts = NULL, ...) { # mice if (inherits(model, c("mira", "amest"))) { return(modeldata) } # tidymodels: always require `newdata`, because sometimes there needs to be # some pre-processing, and we want to rely on the workflow to do that. # workflows are triggered on `stats::predict()` if (inherits(model, c("model_fit", "workflow"))) { return(NULL) } # otherwise, insight::get_data can sometimes return only the the outcome variable if (inherits(model, "bart")) { modeldata <- insight::get_data(model, additional_variables = TRUE) return(modeldata) } if (!is.null(modeldata)) { modeldata <- set_variable_class(modeldata, model = model) return(modeldata) } # often used to extract `by` if (isTRUE(checkmate::check_data_frame(additional_variables))) { additional_variables <- colnames(additional_variables) } # always get weights if (isTRUE(checkmate::check_string(wts))) { additional_variables <- c(additional_variables, wts) } # feols weights can be a formula if (inherits(model, "fixest")) { fwts <- tryCatch(all.vars(model$call$weights), error = function(e) NULL) additional_variables <- c(additional_variables, fwts) } # after by if (isTRUE(checkmate::check_flag(additional_variables))) { out <- hush(insight::get_data( model, additional_variables = additional_variables, verbose = FALSE) ) out <- set_variable_class(out, model = model) return(out) } # always extract offset variable if available off <- hush(insight::find_offset(model)) if (isTRUE(checkmate::check_formula(off))) { additional_variables <- c(additional_variables, hush(all.vars(off))) } else if (isTRUE(checkmate::check_character(off, max.len = 4))) { if (isTRUE(grepl("~", off))) { additional_variables <- c(additional_variables, hush(all.vars(stats::as.formula(off)))) } else { additional_variables <- c(additional_variables, off) } } # always extract weights variable if available wts <- hush(insight::find_weights(model)) if (isTRUE(checkmate::check_formula(wts))) { additional_variables <- c(additional_variables, hush(all.vars(wts))) } else if (isTRUE(checkmate::check_character(wts, max.len = 4))) { if (isTRUE(grepl("~", wts))) { additional_variables <- c(additional_variables, hush(all.vars(stats::as.formula(wts)))) } else { additional_variables <- c(additional_variables, wts) } } out <- hush(insight::get_data(model, verbose = FALSE, additional_variables = additional_variables)) # iv_robust and some others if (is.null(out)) { out <- evalup(model[["call"]][["data"]]) } if (is.null(out)) { out <- evalup(attr(model, "call")$data) } out <- as.data.frame(out) out <- set_variable_class(out, model = model) return(out) } set_variable_class <- function(modeldata, model = NULL) { if (is.null(modeldata)) return(modeldata) # this can be costly on large datasets, when only a portion of # variables are used in the model variables <- NULL if (is.null(model)) { variables <- tryCatch( unlist(insight::find_variables(model, flatten = TRUE), use.names = FALSE), error = function(e) NULL) } if (is.null(variables)) variables <- colnames(modeldata) out <- modeldata cl <- NULL for (col in variables) { if (is.logical(out[[col]])) { cl[col] <- "logical" } else if (is.character(out[[col]])) { cl[col] <- "character" } else if (is.factor(out[[col]])) { cl[col] <- "factor" } else if (inherits(out[[col]], "Surv")) { # is numeric but breaks the %in% 0:1 check cl[col] <- "other" } else if (is.numeric(out[[col]])) { if (is_binary(out[[col]])) { cl[col] <- "binary" } else { cl[col] <- "numeric" } } else { cl[col] <- "other" } } if (is.null(model)) { attr(out, "marginaleffects_variable_class") <- cl return(out) } te <- hush(insight::find_terms(model, flatten = TRUE)) # in-formula factor regex <- "^(^as\\.factor|^factor)\\((.*)\\)" idx <- gsub( regex, "\\2", Filter(function(x) grepl(regex, x), te)) cl[names(cl) %in% idx] <- "factor" # in-formula categoricals regex <- "^(^mo|^strata)\\((.*)\\)" idx <- gsub( regex, "\\2", Filter(function(x) grepl(regex, x), te)) cl[names(cl) %in% idx] <- "strata" # in-formula numeric regex <- "^numeric\\((.*)\\)$|^as.numeric\\((.*)\\)$" idx <- gsub( regex, "\\1", Filter(function(x) grepl(regex, x), te)) cl[names(cl) %in% idx] <- "numeric" # in-formula logical regex <- "^logical\\((.*)\\)$|^as.logical\\((.*)\\)$" idx <- gsub( regex, "\\1", Filter(function(x) grepl(regex, x), te)) cl[names(cl) %in% idx] <- "logical" # in-formula: fixest::i() fi <- NULL idx <- grepl("^i\\(.*\\)$", te) if (sum(idx) > 0) { arg1 <- lapply(te[idx], function(x) hush(as.character(str2lang(x)[[2]]))) arg2 <- lapply(te[idx], function(x) hush(as.character(str2lang(x)[[3]]))) arg1 <- unlist(arg1, recursive = TRUE) arg2 <- unlist(arg2, recursive = TRUE) arg2 <- gsub("^i\\.", "", arg2[grepl("^i\\.", arg2)]) fi <- unique(c(arg1, arg2)) } for (f in fi) { cl[f] <- "cluster" } # attributes attr(out, "marginaleffects_variable_class") <- cl return(out) } get_variable_class <- function(newdata, variable = NULL, compare = NULL) { if ("marginaleffects_variable_class" %in% names(attributes(newdata))) { cl <- attributes(newdata)$marginaleffects_variable_class } else { newdata <- set_variable_class(newdata) cl <- attributes(newdata)$marginaleffects_variable_class } if (is.null(compare) && is.null(variable)) { out <- cl } else if (is.null(compare)) { out <- cl[variable] } else if (is.null(variable)) { if (isTRUE(compare == "categorical")) { out <- cl[cl %in% c("factor", "character", "logical", "strata", "cluster", "binary")] } else { out <- cl[cl %in% compare] } } else { if (isTRUE(compare == "categorical")) { out <- cl[variable] %in% c("factor", "character", "logical", "strata", "cluster", "binary") } else { out <- cl[variable] %in% compare } } return(out) } marginaleffects/R/slopes.R0000644000176200001440000004273214554076230015253 0ustar liggesusers#' Slopes (aka Partial derivatives, Marginal Effects, or Trends) #' #' @description #' Partial derivative of the regression equation with respect to a regressor of interest. #' #' * `slopes()`: unit-level (conditional) estimates. #' * `avg_slopes()`: average (marginal) estimates. #' #' The `newdata` argument and the `datagrid()` function can be used to control where statistics are evaluated in the predictor space: "at observed values", "at the mean", "at representative values", etc. #' #' See the slopes vignette and package website for worked examples and case studies: #' #' * #' * #' #' @details #' A "slope" or "marginal effect" is the partial derivative of the regression equation #' with respect to a variable in the model. This function uses automatic #' differentiation to compute slopes for a vast array of models, #' including non-linear models with transformations (e.g., polynomials). #' Uncertainty estimates are computed using the delta method. #' #' @param model Model object #' @param variables Focal variables #' * `NULL`: compute slopes or comparisons for all the variables in the model object (can be slow). #' * Character vector: subset of variables (usually faster). #' @param newdata Grid of predictor values at which we evaluate the slopes. #' + Warning: Please avoid modifying your dataset between fitting the model and calling a `marginaleffects` function. This can sometimes lead to unexpected results. #' + `NULL` (default): Unit-level slopes for each observed value in the dataset (empirical distribution). The dataset is retrieved using [insight::get_data()], which tries to extract data from the environment. This may produce unexpected results if the original data frame has been altered since fitting the model. #' + [datagrid()] call to specify a custom grid of regressors. For example: #' - `newdata = datagrid(cyl = c(4, 6))`: `cyl` variable equal to 4 and 6 and other regressors fixed at their means or modes. #' - See the Examples section and the [datagrid()] documentation. #' + string: #' - "mean": Marginal Effects at the Mean. Slopes when each predictor is held at its mean or mode. #' - "median": Marginal Effects at the Median. Slopes when each predictor is held at its median or mode. #' - "marginalmeans": Marginal Effects at Marginal Means. See Details section below. #' - "tukey": Marginal Effects at Tukey's 5 numbers. #' - "grid": Marginal Effects on a grid of representative numbers (Tukey's 5 numbers and unique values of categorical predictors). #' @param vcov Type of uncertainty estimates to report (e.g., for robust standard errors). Acceptable values: #' * FALSE: Do not compute standard errors. This can speed up computation considerably. #' * TRUE: Unit-level standard errors using the default `vcov(model)` variance-covariance matrix. #' * String which indicates the kind of uncertainty estimates to return. #' - Heteroskedasticity-consistent: `"HC"`, `"HC0"`, `"HC1"`, `"HC2"`, `"HC3"`, `"HC4"`, `"HC4m"`, `"HC5"`. See `?sandwich::vcovHC` #' - Heteroskedasticity and autocorrelation consistent: `"HAC"` #' - Mixed-Models degrees of freedom: "satterthwaite", "kenward-roger" #' - Other: `"NeweyWest"`, `"KernHAC"`, `"OPG"`. See the `sandwich` package documentation. #' * One-sided formula which indicates the name of cluster variables (e.g., `~unit_id`). This formula is passed to the `cluster` argument of the `sandwich::vcovCL` function. #' * Square covariance matrix #' * Function which returns a covariance matrix (e.g., `stats::vcov(model)`) #' @param conf_level numeric value between 0 and 1. Confidence level to use to build a confidence interval. #' @param type string indicates the type (scale) of the predictions used to #' compute contrasts or slopes. This can differ based on the model #' type, but will typically be a string such as: "response", "link", "probs", #' or "zero". When an unsupported string is entered, the model-specific list of #' acceptable values is returned in an error message. When `type` is `NULL`, the #' first entry in the error message is used by default. #' @param slope string indicates the type of slope or (semi-)elasticity to compute: #' - "dydx": dY/dX #' - "eyex": dY/dX * Y / X #' - "eydx": dY/dX * Y #' - "dyex": dY/dX / X #' - Y is the predicted value of the outcome; X is the observed value of the predictor. #' @param wts string or numeric: weights to use when computing average contrasts or slopes. These weights only affect the averaging in `avg_*()` or with the `by` argument, and not the unit-level estimates themselves. Internally, estimates and weights are passed to the `weighted.mean()` function. #' + string: column name of the weights variable in `newdata`. When supplying a column name to `wts`, it is recommended to supply the original data (including the weights variable) explicitly to `newdata`. #' + numeric: vector of length equal to the number of rows in the original data or in `newdata` (if supplied). #' @param hypothesis specify a hypothesis test or custom contrast using a numeric value, vector, or matrix, a string, or a string formula. #' + Numeric: #' - Single value: the null hypothesis used in the computation of Z and p (before applying `transform`). #' - Vector: Weights to compute a linear combination of (custom contrast between) estimates. Length equal to the number of rows generated by the same function call, but without the `hypothesis` argument. #' - Matrix: Each column is a vector of weights, as describe above, used to compute a distinct linear combination of (contrast between) estimates. The column names of the matrix are used as labels in the output. #' + String formula to specify linear or non-linear hypothesis tests. If the `term` column uniquely identifies rows, terms can be used in the formula. Otherwise, use `b1`, `b2`, etc. to identify the position of each parameter. The `b*` wildcard can be used to test hypotheses on all estimates. Examples: #' - `hp = drat` #' - `hp + drat = 12` #' - `b1 + b2 + b3 = 0` #' - `b* / b1 = 1` #' + String: #' - "pairwise": pairwise differences between estimates in each row. #' - "reference": differences between the estimates in each row and the estimate in the first row. #' - "sequential": difference between an estimate and the estimate in the next row. #' - "revpairwise", "revreference", "revsequential": inverse of the corresponding hypotheses, as described above. #' + See the Examples section below and the vignette: https://marginaleffects.com/vignettes/hypothesis.html #' @param p_adjust Adjust p-values for multiple comparisons: "holm", "hochberg", "hommel", "bonferroni", "BH", "BY", or "fdr". See [stats::p.adjust] #' @param df Degrees of freedom used to compute p values and confidence intervals. A single numeric value between 1 and `Inf`. When `df` is `Inf`, the normal distribution is used. When `df` is finite, the `t` distribution is used. See [insight::get_df] for a convenient function to extract degrees of freedom. Ex: `slopes(model, df = insight::get_df(model))` #' @param eps NULL or numeric value which determines the step size to use when #' calculating numerical derivatives: (f(x+eps)-f(x))/eps. When `eps` is #' `NULL`, the step size is 0.0001 multiplied by the difference between #' the maximum and minimum values of the variable with respect to which we #' are taking the derivative. Changing `eps` may be necessary to avoid #' numerical problems in certain models. #' @param numderiv string or list of strings indicating the method to use to for the numeric differentiation used in to compute delta method standard errors. #' + "fdforward": finite difference method with forward differences #' + "fdcenter": finite difference method with central differences (default) #' + "richardson": Richardson extrapolation method #' + Extra arguments can be specified by passing a list to the `numDeriv` argument, with the name of the method first and named arguments following, ex: `numderiv=list("fdcenter", eps = 1e-5)`. When an unknown argument is used, `marginaleffects` prints the list of valid arguments for each method. #' @param ... Additional arguments are passed to the `predict()` method #' supplied by the modeling package.These arguments are particularly useful #' for mixed-effects or bayesian models (see the online vignettes on the #' `marginaleffects` website). Available arguments can vary from model to #' model, depending on the range of supported arguments by each modeling #' package. See the "Model-Specific Arguments" section of the #' `?slopes` documentation for a non-exhaustive list of available #' arguments. #' @inheritParams comparisons #' #' @details #' Numerical derivatives for the `slopes` function are calculated #' using a simple epsilon difference approach: \eqn{\partial Y / \partial X = (f(X + \varepsilon/2) - f(X-\varepsilon/2)) / \varepsilon}{dY/dX = (f(X + e/2) - f(X-e/2)) / e}, #' where f is the `predict()` method associated with the model class, and #' \eqn{\varepsilon}{e} is determined by the `eps` argument. #' @template deltamethod #' @template model_specific_arguments #' @template bayesian #' @template equivalence #' @template type #' @template references #' #' @return A `data.frame` with one row per observation (per term/group) and several columns: #' * `rowid`: row number of the `newdata` data frame #' * `type`: prediction type, as defined by the `type` argument #' * `group`: (optional) value of the grouped outcome (e.g., categorical outcome models) #' * `term`: the variable whose marginal effect is computed #' * `dydx`: slope of the outcome with respect to the term, for a given combination of predictor values #' * `std.error`: standard errors computed by via the delta method. #' * `p.value`: p value associated to the `estimate` column. The null is determined by the `hypothesis` argument (0 by default), and p values are computed before applying the `transform` argument. For models of class `feglm`, `Gam`, `glm` and `negbin`, p values are computed on the link scale by default unless the `type` argument is specified explicitly. #' * `s.value`: Shannon information transforms of p values. How many consecutive "heads" tosses would provide the same amount of evidence (or "surprise") against the null hypothesis that the coin is fair? The purpose of S is to calibrate the analyst's intuition about the strength of evidence encoded in p against a well-known physical phenomenon. See Greenland (2019) and Cole et al. (2020). #' * `conf.low`: lower bound of the confidence interval (or equal-tailed interval for bayesian models) #' * `conf.high`: upper bound of the confidence interval (or equal-tailed interval for bayesian models) #' #' See `?print.marginaleffects` for printing options. #' #' @examplesIf interactive() || isTRUE(Sys.getenv("R_DOC_BUILD") == "true") #' @examples #' # Unit-level (conditional) Marginal Effects #' mod <- glm(am ~ hp * wt, data = mtcars, family = binomial) #' mfx <- slopes(mod) #' head(mfx) #' #' # Average Marginal Effect (AME) #' avg_slopes(mod, by = TRUE) #' #' #' # Marginal Effect at the Mean (MEM) #' slopes(mod, newdata = datagrid()) #' #' # Marginal Effect at User-Specified Values #' # Variables not explicitly included in `datagrid()` are held at their means #' slopes(mod, newdata = datagrid(hp = c(100, 110))) #' #' # Group-Average Marginal Effects (G-AME) #' # Calculate marginal effects for each observation, and then take the average #' # marginal effect within each subset of observations with different observed #' # values for the `cyl` variable: #' mod2 <- lm(mpg ~ hp * cyl, data = mtcars) #' avg_slopes(mod2, variables = "hp", by = "cyl") #' #' # Marginal Effects at User-Specified Values (counterfactual) #' # Variables not explicitly included in `datagrid()` are held at their #' # original values, and the whole dataset is duplicated once for each #' # combination of the values in `datagrid()` #' mfx <- slopes(mod, #' newdata = datagrid(hp = c(100, 110), #' grid_type = "counterfactual")) #' head(mfx) #' #' # Heteroskedasticity robust standard errors #' mfx <- slopes(mod, vcov = sandwich::vcovHC(mod)) #' head(mfx) #' #' # hypothesis test: is the `hp` marginal effect at the mean equal to the `drat` marginal effect #' mod <- lm(mpg ~ wt + drat, data = mtcars) #' #' slopes( #' mod, #' newdata = "mean", #' hypothesis = "wt = drat") #' #' # same hypothesis test using row indices #' slopes( #' mod, #' newdata = "mean", #' hypothesis = "b1 - b2 = 0") #' #' # same hypothesis test using numeric vector of weights #' slopes( #' mod, #' newdata = "mean", #' hypothesis = c(1, -1)) #' #' # two custom contrasts using a matrix of weights #' lc <- matrix(c( #' 1, -1, #' 2, 3), #' ncol = 2) #' colnames(lc) <- c("Contrast A", "Contrast B") #' slopes( #' mod, #' newdata = "mean", #' hypothesis = lc) #' #' @export slopes <- function(model, newdata = NULL, variables = NULL, type = NULL, by = FALSE, vcov = TRUE, conf_level = 0.95, slope = "dydx", wts = NULL, hypothesis = NULL, equivalence = NULL, p_adjust = NULL, df = Inf, eps = NULL, numderiv = "fdforward", ...) { dots <- list(...) # very early, before any use of newdata # if `newdata` is a call to `typical` or `counterfactual`, insert `model` scall <- rlang::enquo(newdata) newdata <- sanitize_newdata_call(scall, newdata, model) # build call: match.call() doesn't work well in *apply() call_attr <- c(list( name = "slopes", model = model, newdata = newdata, variables = variables, type = type, vcov = vcov, by = by, conf_level = conf_level, slope = slope, wts = wts, hypothesis = hypothesis, df = df, eps = eps), list(...)) call_attr <- do.call("call", call_attr) # slopes() does not support a named list of variables like comparisons() checkmate::assert_character(variables, null.ok = TRUE) # slope valid <- c("dydx", "eyex", "eydx", "dyex", "dydxavg", "eyexavg", "eydxavg", "dyexavg") checkmate::assert_choice(slope, choices = valid) # sanity checks and pre-processing model <- sanitize_model(model = model, newdata = newdata, wts = wts, vcov = vcov, calling_function = "marginaleffects", ...) sanity_dots(model = model, calling_function = "marginaleffects", ...) type <- sanitize_type(model = model, type = type, calling_function = "slopes") ############### sanity checks are over # Bootstrap out <- inferences_dispatch( INF_FUN = slopes, model = model, newdata = newdata, vcov = vcov, variables = variables, type = type, conf_level = conf_level, by = by, wts = wts, slope = slope, hypothesis = hypothesis, ...) if (!is.null(out)) { return(out) } out <- comparisons( model, newdata = newdata, variables = variables, vcov = vcov, conf_level = conf_level, type = type, wts = wts, hypothesis = hypothesis, equivalence = equivalence, df = df, p_adjust = p_adjust, by = by, eps = eps, numderiv = numderiv, comparison = slope, cross = FALSE, # secret arguments internal_call = TRUE, ...) data.table::setDT(out) attr(out, "vcov.type") <- get_vcov_label(vcov) attr(out, "newdata") <- newdata # recall attr(out, "call") <- call_attr # class data.table::setDF(out) class(out) <- setdiff(class(out), "comparisons") class(out) <- c("slopes", "marginaleffects", class(out)) return(out) } #' Average slopes (aka Average partial derivatives, marginal effects, or trends) #' @describeIn slopes Average slopes #' @export #' avg_slopes <- function(model, newdata = NULL, variables = NULL, type = NULL, by = TRUE, vcov = TRUE, conf_level = 0.95, slope = "dydx", wts = NULL, hypothesis = NULL, equivalence = NULL, p_adjust = NULL, df = Inf, eps = NULL, numderiv = "fdforward", ...) { # order of the first few paragraphs is important # if `newdata` is a call to `typical` or `counterfactual`, insert `model` # should probably not be nested too deeply in the call stack since we eval.parent() (not sure about this) scall <- rlang::enquo(newdata) newdata <- sanitize_newdata_call(scall, newdata, model) # Bootstrap out <- inferences_dispatch( INF_FUN = avg_slopes, model = model, newdata = newdata, vcov = vcov, variables = variables, type = type, conf_level = conf_level, by = by, wts = wts, slope = slope, hypothesis = hypothesis, ...) if (!is.null(out)) { return(out) } out <- slopes( model = model, newdata = newdata, variables = variables, type = type, vcov = vcov, conf_level = conf_level, by = by, slope = slope, wts = wts, hypothesis = hypothesis, equivalence = equivalence, p_adjust = p_adjust, df = df, eps = eps, numderiv = numderiv, ...) return(out) } marginaleffects/R/methods_mgcv.R0000644000176200001440000000016614541720224016413 0ustar liggesusers#' @rdname get_coef #' @export get_coef.gam <- function(model, ...) { out <- model$coefficients return(out) } marginaleffects/R/package.R0000644000176200001440000000242414541720224015326 0ustar liggesusers# https://stackoverflow.com/questions/9439256/how-can-i-handle-r-cmd-check-no-visible-binding-for-global-variable-notes-when#comment20826625_12429344 # 2012 hadley says "globalVariables is a hideous hack and I will never use it" # 2014 hadley updates his own answer with globalVariables as one of "two solutions" #' @importFrom Rcpp evalCpp #' @useDynLib marginaleffects #' @import data.table utils::globalVariables(c( "marginaleffects_contrast_hi", "marginaleffects_contrast_lo", "..bycols", "..to_keep", "..cols", "..idx_by", "..idx_hi", "..cl", "..idx_lo", ".data", "..idx_or", "..idx", "..idx1", "..idx2", "..nonfocal", "rowidcf", "elast", "..tmp", "..variables_grid", "..w", "marginaleffects_lab", "marginaleffects_ter", ".", "tmp_idx", "comparison", "condition_variable", "conf_int", "conf_level", "conf.high", "conf.low", "contrast", "draw", "dydx", "eps", "estimates", "estimate", "formatBicLL", "gn", "group", "logLik", "m", "marginaleffects_function", "marginaleffects_wts_internal", "marginalmean", "model", "N", "p.value", "part", "patterns", "position", "predicted_hi", "predicted_lo", "predicted_or", "predicted", "rowid", "rowidunique", "hi", "lo", "section", "stars_note", "statistic", "statistic1", "std.error", "term", "ticks", "tmp_grp", "comparison_idx", "value", "where", "wts"))marginaleffects/R/methods_robustlmm.R0000644000176200001440000000155514541720224017506 0ustar liggesusers#' @rdname set_coef #' @export set_coef.rlmerMod <- function(model, coefs, ...) { model@beta <- coefs model } #' @rdname get_predict #' @export get_predict.rlmerMod <- function(model, newdata = insight::get_data(model), type = "response", ...) { args <- list(...) # some predict methods raise warnings on unused arguments unused <- c("type", "normalize_dydx", "step_size", "numDeriv_method", "conf_level", "conf.level", "internal_call", "return_format", "vcov", "eps", "modeldata") args <- args[setdiff(names(args), unused)] args[["object"]] <- model args[["newdata"]] <- newdata out <- data.frame(rowid = seq_len(nrow(newdata)), estimate = do.call("predict", args)) return(out) } marginaleffects/R/methods_survey.R0000644000176200001440000000415014557752334017027 0ustar liggesusers#' @include get_coef.R #' @rdname get_coef #' @export get_coef.svyolr <- function(model, ...) { out <- insight::get_parameters(model) out <- stats::setNames(out$Estimate, out$Parameter) return(out) } #' @include set_coef.R #' @rdname set_coef #' @export set_coef.svyolr <- function(model, coefs, ...) { # in basic model classes coefficients are named vector idx <- match(names(model$coefficients), names(coefs)) model[["coefficients"]] <- coefs[idx] idx <- match(names(model$zeta), names(coefs)) model[["zeta"]] <- coefs[idx] model } #' @include get_group_names.R #' @rdname get_group_names #' @export get_group_names.svyolr <- function(model, ...) { resp <- insight::get_response(model) if (is.factor(resp)) { out <- levels(resp) } else { out <- unique(resp) } return(out) } #' @include get_predict.R #' @rdname get_predict #' @export get_predict.svyolr <- function(model, newdata = insight::get_data(model), type = "probs", ...) { type <- sanitize_type(model, type, calling_function = "predictions") # hack: 1-row newdata returns a vector, so get_predict.default does not learn about groups if (nrow(newdata) == 1) { hack <- TRUE newdata <- newdata[c(1, 1), , drop = FALSE] newdata$rowid[1] <- -Inf } else { hack <- FALSE } out <- get_predict.default(model, newdata = newdata, type = type, ...) # hack out <- out[out$rowid != -Inf, ] return(out) } #' @include sanity_model.R #' @rdname sanitize_model_specific #' @export sanitize_model_specific.svyolr <- function(model, wts = NULL, ...) { if (is.null(wts)) { warning("With models of this class, it is normally good practice to specify weights using the `wts` argument. Otherwise, weights will be ignored in the computation of quantities of interest.", call. = FALSE) } return(model) } #' @include sanity_model.R #' @rdname sanitize_model_specific #' @export sanitize_model_specific.svyglm <- sanitize_model_specific.svyolr marginaleffects/R/github_issue.R0000644000176200001440000000040114541720224016416 0ustar liggesusersgithub_issue <- function() { msg <- insight::format_string( "Please report this error along with a minimal reproducible example using publicly available data on Github: https://github.com/vincentarelbundock/marginaleffects/issues ") } marginaleffects/R/sanitize_conf_level.R0000644000176200001440000000077114541720224017760 0ustar liggesuserssanitize_conf_level <- function(conf_level, ...) { # periods in arg name are bad style because of s3, but we want to accept both because `broom` uses `conf_level` dots <- list(...) if ("conf.level" %in% names(dots)) { conf_level <- dots[["conf.level"]] } checkmate::assert( checkmate::check_numeric(conf_level, len = 1), checkmate::check_true(conf_level > 0), checkmate::check_true(conf_level < 1), combine = "and") return(conf_level) } marginaleffects/R/imputation.R0000644000176200001440000000416214541720224016125 0ustar liggesusersprocess_imputation <- function(x, call_attr, marginal_means = FALSE) { insight::check_if_installed("mice") if (inherits(x, "mira")) { x <- x$analyses } else if (inherits(x, "amest")) { x <- x } mfx_list <- vector("list", length(x)) for (i in seq_along(x)) { calltmp <- call_attr calltmp[["model"]] <- x[[i]] # not sure why but this breaks marginal_means on "modeldata specified twice" if (isFALSE(marginal_means)) { calltmp[["modeldata"]] <- get_modeldata( x[[i]], additional_variables = FALSE) } mfx_list[[i]] <- evalup(calltmp) if (i == 1) { out <- mfx_list[[1]] } mfx_list[[i]]$term <- seq_len(nrow(mfx_list[[i]])) class(mfx_list[[i]]) <- c("marginaleffects_mids", class(mfx_list[[i]])) } mipool <- mice::pool(mfx_list) for (col in c("estimate", "statistic", "p.value", "conf.low", "conf.high")) { if (col %in% colnames(out) && col %in% colnames(mipool$pooled)) { out[[col]] <- mipool$pooled[[col]] } else { out[[col]] <- NULL } } if ("df" %in% colnames(mipool$pooled)) { out$df <- mipool$pooled$df } out$std.error <- sqrt(mipool$pooled$t) out <- get_ci( out, vcov = call_attr[["vcov"]], conf_level = call_attr[["conf_level"]], df = mipool$pooled$df) attr(out, "inferences") <- mipool attr(out, "model") <- mice::pool(lapply(mfx_list, attr, "model")) return(out) } #' tidy helper #' #' @noRd #' @export tidy.marginaleffects_mids <- function(x, ...) { if (!"std.error" %in% colnames(x)) { insight::format_error('The output of `marginal_means` does not include a `std.error` column. Some models do not generate standard errors when estimates are backtransformed (e.g., GLM models). One solution is to use `type="response"` for those models.') } out <- as.data.frame(x[, c("estimate", "std.error")]) out$term <- seq_len(nrow(out)) return(out) } #' glance helper #' #' @noRd #' @export glance.marginaleffects_mids <- function(x, ...) { data.frame() } marginaleffects/R/sanitize_interaction.R0000644000176200001440000000140414541720224020155 0ustar liggesuserssanitize_cross <- function(cross, variables, model) { # cross: flip NULL to TRUE if there are interactions in the formula # and FALSE otherwise checkmate::assert_flag(cross, null.ok = FALSE) if (isTRUE(cross) && is.null(variables)) { msg <- "When `cross = TRUE`, you must use the `variables` argument to specify which variables should be interacted." insight::format_error(msg) } if (isTRUE(checkmate::check_flag(cross))) { return(cross) } inter <- try(insight::find_interactions(model, flatten = TRUE), silent = TRUE) # variables is length 1 means we don't want an interaction if (length(variables) > 1 && isTRUE(length(inter) > 0)) { return(TRUE) } else { return(FALSE) } } marginaleffects/R/mean_or_mode.R0000644000176200001440000000303314554076764016375 0ustar liggesusers#' find mode, preserve type, and pick an arbitrary value when multi-modal #' https://stackoverflow.com/a/8189441/342331 #' @noRd get_mode <- function(x) { ux <- unique(x) ux[which.max(tabulate(match(x, ux)))] } #################################################################### # The functions below were copied from the `prediction` package. ## # Copyright: Thomas J. Leeper 2016-2018 ## # MIT License ## #################################################################### #' Compute the mode or mean of `x` #' @param x extract the mean or the mode of vector or data.frame x depending on its type #' @keywords internal #' @noRd #' @return numeric vector get_mean_or_mode <- function(x) { UseMethod("get_mean_or_mode") } #' @export get_mean_or_mode.default <- function(x) { mean(x) } #' @export get_mean_or_mode.character <- function(x) { get_mode(x) } #' @export get_mean_or_mode.factor <- function(x) { get_mode(x) } #' @export get_mean_or_mode.logical <- function(x) { get_mode(x) } #' @export get_mean_or_mode.data.frame <- function(x) { out <- list() for (n in names(x)) { # variables transformed to factor in formula are assigned a "factor" # TRUE attribute by insight::get_data if (isTRUE(attributes(x)$marginaleffects_variable_class[[n]] == "factor")) { out[[n]] <- get_mean_or_mode.factor(x[[n]]) } else { out[[n]] <- get_mean_or_mode(x[[n]]) } } return(out) } marginaleffects/R/backtransform.R0000644000176200001440000000215114541720224016564 0ustar liggesusersbacktransform <- function(x, transform) { # transform can be a function or a named list of length 1 with a function, but could be NULL if (!is.function(transform)) { if (is.null(transform[[1]])) { return(x) } else { transform <- transform[[1]] } } checkmate::assert_data_frame(x) checkmate::assert_function(transform) cols <- intersect(colnames(x), c("estimate", "conf.low", "conf.high")) draws <- attr(x, "posterior_draws") if (!is.null(draws)) { dim_pre <- dim(draws) draws <- transform(draws) dim_post <- dim(draws) if (!identical(dim_pre, dim_post)) { insight::format_error("The `transform` function must return an object of the same class as its input: a matrix input must return a matrix of the same size, and a vector input must return a vector of the same length.") } } for (col in cols) { x[[col]] <- transform(x[[col]]) } for (col in c("std.error", "statistic")) { x[[col]] <- NULL } attr(x, "posterior_draws") <- draws return(x) } marginaleffects/R/bootstrap_boot.R0000644000176200001440000000530314541720224016772 0ustar liggesusersbootstrap_boot <- function(model, INF_FUN, ...) { # attached by `inferences()` conf_type <- attr(model, "inferences_conf_type") checkmate::assert_choice(conf_type, choices = c("perc", "norm", "basic", "bca")) # bootstrap using the original data and call modcall <- insight::get_call(model) modeldata <- get_modeldata(model, additional_variables = FALSE) # evaluate the {marginaleffects} call to get output without inferences() # use ... because arguments are not the same for different {marginaleffects} functions dots <- list(...) dots[["vcov"]] <- FALSE # avoid recursion attr(model, "inferences_method") <- NULL out <- do.call(INF_FUN, c(list(model), dots)) # default confidence level may be implicit in original call, but we need numeric if (is.null(dots[["conf_level"]])) { conf_level <- 0.95 } else { conf_level <- dots[["conf_level"]] } bootfun <- function(data, indices) { d <- data[indices, , drop = FALSE] modcall[["data"]] <- d modboot <- eval(modcall) modboot <- eval(modboot) args <- c(list(modboot, modeldata = d), dots) out <- do.call(INF_FUN, args)$estimate return(out) } args <- list("data" = modeldata, "statistic" = bootfun) args <- c(args, attr(model, "inferences_dots")) args <- args[unique(names(args))] B <- do.call(boot::boot, args) # print.boot prints an ugly nested call B$call <- match.call() # HACK: boot::boot() output is non-standard. There must be a better way! # NG: just compute them manually as the SD of the bootstrap distribution pr <- utils::capture.output(print(B)) pr <- pr[(grep("^Bootstrap Statistics :", pr) + 1):length(pr)] pr <- gsub("std. error", "std.error", pr) pr <- paste(pr, collapse = "\n") pr <- utils::read.table(text = pr, header = TRUE) out$std.error <- pr$std.error # extract from weird boot.ci() list (inspired from `broom::tidy.broom` under MIT) ci_list <- lapply(seq_along(B$t0), boot::boot.ci, boot.out = B, conf = conf_level, type = conf_type) pos <- pmatch(conf_type, names(ci_list[[1]])) if (conf_type == "norm") { cols <- 2:3 } else { cols <- 4:5 } ci <- lapply(ci_list, function(x) x[[pos]]) ci <- do.call("rbind", ci)[, cols] # add CI to original {marginaleffects} call if (is.matrix(ci)) { out$conf.low <- ci[, 1] out$conf.high <- ci[, 2] } else { out$conf.low <- ci[1] out$conf.high <- ci[2] } attr(out, "inferences") <- B attr(out, "posterior_draws") <- t(B$t) return(out) } marginaleffects/R/sanitize_vcov.R0000644000176200001440000000243714541720224016622 0ustar liggesuserssanitize_vcov <- function(model, vcov) { # TRUE generates a warning in `insight::get_varcov` for some models if (isTRUE(checkmate::check_flag(vcov))) { return(NULL) } # no vcov matrix for bayesian models if (inherits(model, c("brmsfit", "stanreg", "bart"))) { return(NULL) } # strings should be case-insensitive vcov_strings <- c("stata", "robust", "HC", "HC0", "HC1", "HC2", "HC3", "HC4", "HC4m", "HC5", "HAC", "NeweyWest", "kernHAC", "OPG", "satterthwaite", "kenward-roger") if (isTRUE(checkmate::check_choice(hush(tolower(vcov)), choices = tolower(vcov_strings)))) { idx <- match(tolower(vcov), tolower(vcov_strings)) return(vcov_strings[idx]) } checkmate::assert( checkmate::check_null(vcov), checkmate::check_function(vcov), checkmate::check_matrix(vcov), checkmate::check_formula(vcov), checkmate::check_choice(vcov, choices = vcov_strings)) out <- vcov if (isTRUE(checkmate::check_function(out))) { out <- hush(out(model)) } if (isTRUE(checkmate::check_matrix(out))) { if (ncol(out) != nrow(out)) { stop("The `vcov` matrix must be square.", call. = FALSE) } } return(out) }marginaleffects/R/methods_plm.R0000644000176200001440000000131614541720224016245 0ustar liggesusers#' @include sanity_model.R #' @rdname sanitize_model_specific #' @keywords internal sanitize_model_specific.plm <- function(model, ...) { if ("within" %in% model$args$model) { stop('The `plm::predict` function does not appear to support the `newdata` argument when `plm(model="within")`. Therefore, `marginaleffects` cannot support "within" models, even if it supports many other models produced by the `plm` package. You may want to try the `fixest` package instead.', call. = FALSE) } return(model) } #' @rdname sanitize_model_specific sanitize_model_specific.plm <- function(model, ...) { insight::check_if_installed("insight", minimum_version = "0.17.1") return(model) } marginaleffects/R/methods_brglm2.R0000644000176200001440000000114014557752334016653 0ustar liggesusers#' @rdname get_coef #' @export get_coef.brmultinom <- function(model, ...) { out <- insight::get_parameters(model) out <- stats::setNames(out$Estimate, sprintf("%s:%s", out$Response, out$Parameter)) return(out) } #' @include methods_nnet.R #' @rdname get_predict #' @export get_predict.brmultinom <- get_predict.multinom #' @include get_group_names.R #' @include methods_nnet.R #' @rdname get_group_names #' @export get_group_names.bracl <- get_group_names.multinom #' @rdname get_coef #' @export get_coef.bracl <- function(model, ...) { stats::coef(model) } marginaleffects/NEWS.md0000644000176200001440000010362214560042044014505 0ustar liggesusers# News ## 0.18.0 This release represents a major step towards 1.0.0. Some functions are renamed and now raise deprecation warnings. After 1.0.0, the API will become much more stable, and any change will have to be very deliberate with much lead time and deprecation delays. Breaking changes: * `tidy()` no longer takes the average of estimates in the original model object. Users who want an aggregate estimate should call the relevant `avg_*()` function, or use the `by` argument explicitly. The previous behavior led to unexpected behavior and increased code complexity a lot. * `summary()` methods are removed. These have never provided any additional information; they just reprinted the output already available with the standard print method. At least the default `summary()` for data frames (which is now triggered on `marginaleffects` object) provides a different view on the content of the object. * `plot_cco()`, `plot_cme()`, and `plot_cap()` were renamed in version 0.9.0, one year ago. They are now fully removed from the package. New: * `datagrid(grid_type = "balanced")` creates a balanced grid with all unique values of categorical predictors. This can be used with `predictions()` to compute marginal means as in the `emmeans` package. * `mvgam` package support (multivariate generalized additive models) Deprecation warnings: * `deltamethod()` has been named `hypotheses()` for a while. We now issue a deprecation warning and it will be removed eventually. * `datagridcf()` will eventually be deprecated and removed from the package. We will raise a warning for at least one year before removing the function. Identical results can be obtained with `datagrid(..., grid_type="counterfactual")` * `marginal_means()` will eventually be deprecated and removed from the package. We will raise a warning for at least one year before removing the function. Identical results can be obtained using the `predictions()` function and the `grid_type="balanced"` argument of `datagrid()`. Examples are in the marginal means vignette on the website. Minor: * Better warning messages for unsupported matrix columns, enhancing user experience and troubleshooting. * Various improvements to documentation. * Typos * Repository hosts model objects for easier testing. Bug fixes: * Error on `hypotheses(joint = "string")` for `comparisons()` objects (no result was returned). Thanks to @BorgeJorge for report #981. * Enhanced support for multi-equation Bayesian models with `brms` models. Thanks to @winterstat for report #1006. * Parameter names with spaces could break standard errors. Thanks to @Lefty2021 for report #1005. ## 0.17.0 Breaking changes: * The `comparisons()` now uses "forward contrasts" by default for numeric predictors, instead of "centered contrasts". This can lead to small numerical differences in non-linear models. * The `variables` argument of the `comparisons()` function no longer accepts numeric vectors unless they are of length 2, specifying the low and high contrast values. This is to avoid ambiguity between the two vector version. Users should supply a data frame or a function instead. This is nearly as easy, and removes ambiguity. New supported packages: * `dbarts`: https://cran.r-project.org/package=dbarts * `mvgam`: https://nicholasjclark.github.io/mvgam/ Not available on CRAN yet, but this package maintains its own `marginaleffects` support function. * `rms::Gls`: https://cran.r-project.org/package=rms Misc: * `comparisons()`: The `variables` argument now accepts functions and data frames for factor, character, and logical variables. * Deprecation warning for: `plot_cap()`, `plot_cme()`, and `plot_cco()`. These function names will be removed in version 1.0.0. * `options(modelsummary_factory_default=...)` is respected in Quarto and Rmarkdown documents. Bugs: * `wts` argument now respected in `avg_slopes()` for binary variables. Thanks to @trose64 for report #961 * Custom functions in the `comparison` argument of `comparisons()` did not supply the correct `x` vector length for bayesian models when the `by` argument is used. Thanks to @Sandhu-SS for report #931. * Add support for two facet variables (through `facet_grid`) when plotting using `condition` * `comparisons()`: When `variables` is a vector of length two and `newdata` has exactly two columns, there was ambiguity between custom vectors and length two vector of contrasts. Bug reported by C. Rainey on Twitter. * Superfluous warning with `fixest::fenegbin`. ## 0.16.0 Machine learning support: * `tidymodels` package * `mlr3` package Misc: * New vignettes: - Inverse Probability Weighting - Machine Learning - Matching * Add support for `hypotheses()` to `inferences()`. Thanks to @Tristan-Siegfried for code contribution #908. * Support `survival::survreg()`. Thanks to Carlisle Rainey for Report #911. * `column_names` argument in `print.marginaleffects()` to suppress the printed column names at the bottom of the printout. * The function supplied to the `comparison` argument of the `comparisons()` function can now operate on `x` and on `newdata` directly (e.g., to check the number of observations). * More informative errors from `predict()`. Bugs: * Some `gamlss` models generated an error related to the `what` argument. Thanks to @DHLocke for Issue #933 ## 0.15.1 * `hypotheses()`: The `FUN` argument handles `group` columns gracefully. * Native support for `Amelia` for multiple imputation. Documentation: * New section on "Complex aggregations" in the Hypothesis testing vignette. Bug fix: * Results of the `predictions()` function could be inaccurate when (a) running version 0.15.0, (b) `type` is `NULL` or `invlink(link)`, (c) model is `glm()`, and (d) the `hypothesis` argument is non-numeric. Thanks to @strengejacke for report [#903](https://github.com/vincentarelbundock/marginaleffects/issues/903) ## 0.15.0 New: * Conformal prediction via `inferences()` * `hypothesis` argument now accepts multiple string formulas. * The `type` argument now accepts an explicit `invlink(link)` value instead of silently back-transforming. Users are no longer pointed to `type_dictionary`. Instead, they should call their function with a bad `type` value, and they will obtain a list of valid types. The default `type` value is printed in the output. This is useful because the default `type` value is `NULL`, so the user often does not explicitly decide. * Allow install with Rcpp 1.0.0 and greater. Support new models: * `survey::svyolr()` Misc: * `inferences(method="simulation")` uses the original point estimate rather than the mean of the simulation distribution. Issue #851. * Better documentation and error messages for `newdata=NULL` * Some performance improvements for `predictions()` and `marginalmeans()` (#880, #882, @etiennebacher). Bug fix: * `newdata="median"` returned mean of binary variables. Thanks to @jkhanson1970 for report #896. ## 0.14.0 Breaking changes: * Row order of the output changes for some objects. Rows are not sorted alphabetically by `term`, `by`, and variables explicitly supplied to `datagrid`. This can affect hypothesis tests computed using the b1, b2, b3, and other indices. * New procedure `numderiv` argument use a different procedure to select the step size used in the finite difference numeric derivative used to compute standard errors: abs(x) * sqrt(.Machine$double.eps). The numerical results may not be exactly identical to previous versions of `marginaleffects`, but the step size should be adequate in a broader variety of cases. Note that users can use the `numderiv` argument for more control on numeric differentiation, as documented. * `bife` models are no longer supported pending investigation in weird results in the tests. Looking for volunteers write more thorough tests. New: * Support: `logistf` package. * Support: `DCchoice` package. * Support: `stats::nls` * `hypotheses()` can now accept raw data frame, which gives a lot of flexibility for custom contrasts and functions. See the Hypothesis vignette for an example. * `numderiv` argument allows users to use finite difference (center or forward) or Richardson's method to compute the numerical derivatives used in the calculation of standard errors. Bug fixes: * `inferences()` supports the `cross` argument for `comparisons()` objects. Thanks to Kirill Solovev for report #856. * `splines::bs()` in formulas could produce incorrect results due to weirdness in `stats::model.matrix()`. Thanks to @chiungming for report #831. * `mgcv` with `ocat` are now supported. Thanks to Lorenzo Fabbri for Issue #844. * `quantreg` problem with `rowid` merge did not affect estimates but did not return the full original data. Issue #829. * `get_modeldata()` extracts weights variable when available. * `predictions()` is no longer broken in some `inferences()` calls. Issue #853 * Inaccurate averaging with `comparison=differenceavg` some models where all predictors are categorical. Thanks to Karl Ove Hufthammer for report #865. Misc: * Major refactor to simplify the code base and make maintenance easier. ## 0.13.0 Breaking change: * `glmmTMB`: Standard errors are no longer supported because they may have been erroneous. Follow Issue #810 on Github for developments: https://github.com/vincentarelbundock/marginaleffects/issues/810 New: * `hypothesis` argument accepts wildcards: `hypothesis = "b*=b1"` * `s.value` column in all output: Shannon transforms for p values. See Greenland (2019). * `marginal_means` supports `mira` (`mice` objects). * `comparisons()`: The `variables` arguments now accepts arbitrary numeric vectors of length equal to the number of rows in `newdata`. This allows users to specify fully custom treatment sizes. In the documentation examples, we show how to estimate the difference for a 1 standard deviation shift in a regressor, where the standard deviation is calculated on a group-wise basis. * `comparisons()`: the `variables` argument now accepts "revpairwise", "revsequential", "revreference" for factor and character variables. * `comparisons()`: the `comparison` argument now accept "lift" and "liftavg". Performance: * Computing elasticities for linear models is now up to 30% faster (#787, @etiennebacher). Bug fixes: * Better handling of environments when `newdata` is a function call. Thanks to @jcccf for report #814 and to @capnrefsmmat for the proposed fix using the `rlang` package. * Degrees of freedom mismatch for joint hypothesis tests. Thanks to @snhansen for report #789. ## 0.12.0 Breaking change: * Row order of output has changed for many calls, especially those using the `by` argument. This may break hypothesis tests conducted by indexing `b1`, `b2`, etc. This was necessary to fix Issue #776. Thanks to @marcora for the report. New: * `hypotheses()`: Joint hypothesis tests (F and Chi-square) with the `joint` and `joint_test` arguments. * `vcov.hypotheses` method. * `wts` is now available in `plot_predictions()`, `plot_comparisons()`, and `plot_slopes()`. Bug: * Wrong order of rows in bayesian models with `by` argument. Thanks to @shirdekel for report #782. ## 0.11.2 * `vcov()` and `coef()` methods for `marginaleffects` objects. * Strings in `wts` are accepted with the `by` argument. * `predictions()` and `avg_predictions()` no longer use an automatic backtransformation for GLM models unless `hypothesis` is `NULL`. * `vcov()` can be used to retrieve a full variance-covariance matrix from objects produced by `comparisons()`, `slopes()`, `predictions()`, or `marginal_means()` objects. * When processing objects obtained using `mice` multiple imputation, the pooled model using `mice::pool` is attached to the `model` attribute of the output. This means that functions like `modelsummary::modelsummary()` will not erroneously report goodness-of-fit statistics from just a single model and will instead appropriately report the statistics for the pooled model. Thanks to @Tristan-Siegfried for PR #740. * More informative error messages on some prediction problems. Thanks to @andymilne for Report #751. Performance: * `inferences()` is now up to 17x faster and much more memory-efficient when `method` is `"boot"` or `"rsample"` (#770, #771, @etiennebacher). Bugs: * `brms` models with `nl=TRUE` and a single predictor generated an error. Thanks to @Tristan-Siegried for Report #759. * `avg_predictions()`: Incorrect group-wise averaging when all predictors are categorical, the `variables` variable is used, and we are averaging with `avg_` or the `by` argument. Thanks to BorgeJorge for report #766. * Bug when `datagrid()` when called inside a user-written function. Thanks to @NickCH-K for report #769 and to @capnrefsmmat for the diagnostics. ## 0.11.1 Breaking change: * Row orders are now more consistent, but may have changed from previous version. This could affect results from `hypothesis` with `b1`, `b2`, ... indexing. Support new models: * `nlme::lme()` * `phylolm::phylolm()` * `phylolm::phyloglm()` New: * Vignette on 2x2 experimental designs. Thanks to Demetri Pananos. * `comparisons()` accepts data frames with two numeric columns ("low" and "high") to specify fully customizable contrasts. * `datagrid()` gets a new `by` argument to create apply grid-making functions within groups. * `plot_*()` gain a `newdata` argument for use with `by`. Bug: * `comparisons(comparison = "lnratioavg")` ignored `wts` argument. Thanks to Demetri Pananos for report #737. * `ordinal::clm()`: incorrect standard errors when location and scale parameters are the same. Thanks to MrJerryTAO for report #718. * Incorrect label for "2sd" comparisons. Thanks to Andy Milne for report #720. * Invalid factor levels in `datagrid()` means `newdata` argument gets ignored. Thanks to Josh Errickson for report #721. * Error in models with only categorical predictors and the `by` argument. Thanks to Sam Brilleman for report #723. * Elasticities are now supported for `ordinal::clm()` models. Thanks to MrJerryTAO for report #729. * `glmmTMB` models with zero-inflated components are supported. Thanks to @Helsinki-Ronan and @strengejacke for report #734. ## 0.11.0 Breaking changes: * `type` column is replaced by `type` attribute. * `predictions()` only works with officially supported model types (same list as `comparisons()` and `slopes()`). Renamed arguments (backward compatibility is preserved): * `transform_pre` -> `comparison` * `transform_post` -> `transform` New: * `p_adjust` argument: Adjust p-values for multiple comparisons. * `equivalence` argument available everywhere. Performance: * Much faster results in `avg_*()` functions for models with only categorical predictors and many rows of data, using deduplication and weights instead of unit-level estimates. * Faster predictions in `lm()` and `glm()` models using `RcppEigen`. * Bayesian models with many rows. Thanks to Etienne Bacher. #694 * Faster predictions, especially with standard errors and large datasets. Bugs: * Multiple imputation with `mira` objects was not pooling all datasets. Thanks to @Generalized for report #711. * Support for more models with offsets. Thanks to @mariofiorini for report #705. * Error on `predictions()` with `by` and `wts`. Thanks to Noah Greifer for report #695. * `afex`: some models generated errors. Thanks to Daniel Lüdecke for report #696. * `group` column name is always forbidden. Thanks to Daniel Lüdecke for report #697. * Blank graphs in `plot_comparisons()` with a list in `variables`. * `type="link"` produced an error with some categorical `brms` models. Thanks to @shirdekel for report #703. * Error on `predictions(variables = ...)` for `glmmTMB` models. Thanks to Daniel Lüdecke for report #707. * `by` with user-specified function in `comparison` and factor predictor did not aggregate correctly. Thanks to @joaotedde for report #715. * `ordinal::clm`: Support `cum.prob` and `linear.predictor` prediction types. Thanks to @MrJerryTAO for report #717. ## 0.10.0 Performance: * 2-4x faster execution for many calls. Thanks to Etienne Bacher. New models supported: * `MCMCglmm::MCMCglmm` * `Rchoice::hetprob` * `Rchoice::ivpml` * Multiple imputation using `mice` and any package which can return a list of imputed data frames (e.g., `Amelia`, `missRanger`, etc.) Plot improvements: * New `by` argument to display marginal estimates by subgroup. * New `rug` argument to display tick marks in the margins. * New `points` argument in `plot_predictions()` to display a scatter plot. * New `gray` argument to plot in grayscale using line types and shapes instead of color. * The `effect` argument is renamed to `variables` in `plot_slopes()` and `plot_comparisons()`. This improves consistency with the analogous `slopes()` and `comparisons()` functions. * The plotting vignette was re-written. Other: * Support multiple imputation with `mice` `mira` objects. The multiple imputation vignette was rewritten. * The `variables_grid` argument in `marginal_means()` is renamed `newdata`. Backward compatibility is maintained. * `avg_*()` returns an informative error when `vcov` is "satterthwaite" or "kenward-roger" * "satterthwaite" and "kenward-roger" are now supported when `newdata` is not `NULL` * Informative error when `hypothesis` includes a `b#` larger than the available number of estimates. * `avg_predictions(model, variables = "x")` computes average counterfactual predictions by subgroups of `x` * `datagrid()` and `plot_*()` functions are faster in datasets with many extraneous columns. * In `predictions(type = NULL)` with `glm()` and `Gam()` we first make predictions on the link scale and then backtransform them. Setting `type="response"` explicitly makes predictions directly on the response scale without backtransformation. * Standard errors now supported for more `glmmTMB` models. * Use the `numDeriv` package for numeric differentiation in the calculation of delta method standard error. A global option can now be passed to `numDeriv::jacobian`: - `options(marginaleffects_numDeriv = list(method = "simple", method.args = list(eps = 1e-6)))` - `options(marginaleffects_numDeriv = list(method = "Richardson", method.args = list(eps = 1e-6)))` - `options(marginaleffects_numDeriv = NULL)` * Print: - Print fewer significant digits. - `print.marginaleffects` now prints all columns supplied to `newdata` - Less redundant labels when using `hypothesis` * Many improvements to documentation. Bugfixes: * Standard errors could be inaccurate in models with non-linear components (and interactions) when some of the coefficients were very small. This was related to the step size used for numerical differentiation for the delta method. Issue #684. * `avg_predictions(by =)` did not work when the dataset included a column named `term`. Issue #683. * `brms` models with multivariate outcome collapsed categories in `comparisons()`. Issue #639. * `hypotheses()` now works on lists and in calls to `lapply()`, `purrr::map()`, etc. Issue #660. ## 0.9.0 Breaking changes: * All functions return an `estimate` column instead of the function-specific `predicted`, `comparisons`, `dydx`, etc. This change only affects unit-level estimates, and not average estimates, which already used the `estimate` column name. * The `transform_avg` argument in `tidy()` deprecated. Use `transform_post` instead. * `plot_*(draw=FALSE)` now return the actual variable names supplied to the `condition` argument, rather than the opaque "condition1", "condition2", etc. New models supported: * `blme` package. New features: * New functions: `avg_predictions()`, `avg_comparisons()`, `avg_slopes()` * Equivalence, non-inferiority, and non-superiority tests with the `hypotheses()` function and `equivalence` argument. * New experimental `inferences()` function: simulation-based inferences and bootstrap using the `boot`, `rsample`, and `fwb` package. * New `df` argument to set degrees of freedom manually for p and CI. * Pretty `print()` for all objects. * `by` argument - `TRUE` returns average (marginal) predictions, comparisons, or slopes. - Supports bayesian models. * `hypothesis` argument - Numeric value sets the null used in calculating Z and p. - Example: `comparisons(mod, transform_pre = "ratio", hypothesis = 1)` * All arguments from the main functions are now available through `tidy()`, and `summary()`: `conf_level`, `transform_post`, etc. * Bayesian posterior distribution summaries (median, mean, HDI, quantiles) can be customized using global options. See `?comparisons` Renamed functions (backward-compatibility is maintained by keeping the old function names as aliases): * `marginaleffects()` -> `slopes()` * `posteriordraws()` -> `posterior_draws()` * `marginalmeans()` -> `marginal_means()` * `plot_cap()` -> `plot_predictions()` * `plot_cme()` -> `plot_slopes()` * `plot_cco()` -> `plot_comparisons()` Bug fixes: * Incorrect results: In 0.8.1, `plot_*()` the `threenum` and `minmax` labels did not correspond to the correct numeric values. * Fix corner case for slopes when the dataset includes infinite values. * `mlogit` error with factors. * The `vcov` argument now accepts functions for most models. Other: * Removed major performance bottleneck for `slopes()` ## 0.8.1 * `deltamethod()` can run hypothesis tests on objects produced by the `comparisons()`, `marginaleffects()`, `predictions()`, and `marginalmeans()` functions. This feature relies on `match.call()`, which means it may not always work when used programmatically, inside functions and nested environments. It is generally safer and more efficient to use the `hypothesis` argument. * `plot_cme()` and `plot_cco()` accept lists with user-specified values for the regressors, and can display nice labels for shortcut string-functions like "threenum" or "quartile". * `posterior_draws`: new `shape` argument to return MCMC draws in various formats, including the new `rvar` structure from the `posterior` package. * `transform_avg` function gets printed in `summary()` output. * `transform_post` and `transform_avg` support string shortcuts: "exp" and "ln" * Added support for `mlm` models from `lm()`. Thanks to Noah Greifer. Bug fixes: * `hypothesis` argument with bayesian models and `tidy()` used to raise an error. * Missing values for some regressors in the `comparisons()` output for `brms` models. ## 0.8.0 Breaking change: * The `interaction` argument is deprecated and replaced by the `cross` argument. This is to reduce ambiguity with respect to the `interaction` argument in `emmeans`, which does something completely different, akin to the difference-in-differences illustrated in the Interactions vignette. 71 classes of models supported, including the new: * `rms::ols` * `rms::lrm` * `rms::orm` New features: * Plots: `plot_cme()`, `plot_cap()`, and `plot_cco()` are now much more flexible in specifying the comparisons to display. The `condition` argument accepts lists, functions, and shortcuts for common reference values, such as "minmax", "threenum", etc. * `variables` argument of the `comparisons()` function is more flexible: - Accepts functions to specify custom differences in numeric variables (e.g., forward and backward differencing). - Can specify pairs of factors to compare in the `variables` argument of the `comparisons` function. * `variables` argument of the `predictions()` function is more flexible: - Accepts shortcut strings, functions, and vectors of arbitrary length. * Integrate out random effects in bayesian `brms` models (see Bayesian analysis vignette) New vignettes: * Experiments * Extending marginal effects * Integrating out random effects in bayesian models Bug fixes and minor improvements: * The default value of `conf_level` in `summary()` and `tidy()` is now `NULL`, which inherits the `conf_level` value in the original `comparisons`/`marginaleffects`/`predictions` calls. * Fix typo in function names for missing "lnratioavgwts" * Interactions with `fixest::i()` are parsed properly as categorical variables * For `betareg` objects, inference can now be done on all coefficients using `deltamethod()`. previously only the location coefficients were available. * For objects from `crch` package, a number of bugs have been fixed; standard errors should now be correct for `deltamethod()`, `marginaleffects()`, etc. * Fixed a bug in the `tidy()` function for `glmmTMB` models without random effects, which caused all t statistics to be identical. ## 0.7.1 * New supported model class: `gamlss`. Thanks to Marcio Augusto Diniz. * `marginalmeans()` accepts a `wts` argument with values: "equal", "proportional", "cells". * `by` argument - accepts data frames for complex groupings. - in `marginalmeans` only accepts data frames. - accepts "group" to group by response level. - works with bayesian models. * `byfun` argument for the `predictions()` function to aggregate using different functions. * `hypothesis` argument - The matrix column names are used as labels for hypothesis tests. - Better labels with "sequential", "reference", "pairwise". - new shortcuts "revpairwise", "revsequential", "revreference" * `wts` argument is respected in `by` argument and with `*avg` shortcuts in the `transform_pre` argument. * `tidy.predictions()` and `tidy.marginalmeans()` get a new `transform_avg` argument. * New vignettes: - Unit-level contrasts in logistic regressions. Thanks to @arthur-albuquerque. - Python Numpy models in `marginaleffects`. Thanks to timpipeseek. - Bootstrap example in standard errors vignette. ## 0.7.0 Breaking changes: * `by` is deprecated in `summary()` and `tidy()`. Use the same `by` argument in the main functions instead: `comparisons()`, `marginaleffects()`, `predictions()` * Character vectors are no longer supported in the `variables` argument of the `predictions()` function. Use `newdata="fivenum"` or "grid", "mean", or "median" instead. Critical bug fix: * Contrasts with interactions were incorrect in version 0.6.0. The error should have been obvious to most analysts in most cases (weird-looking alignment). Thanks to @vmikk. New supported packages and models: * `survival::clogit` * `biglm`: The main quantities can be computed, but not the delta method standard errors. See https://github.com/vincentarelbundock/marginaleffects/issues/387 New vignette: * Elasticity * Frequently Asked Questions New features: * Elasticity and semi-elasticity using the new `slope` argument in `marginaleffects()`: eyex, dyex, eydx * `datagrid()` accepts functions: `datagrid(newdata = mtcars, hp = range, mpg = fivenum, wt = sd)` * New `datagridcf()` function to create counterfactual datasets. This is a shortcut to the `datagrid()` function with default to `grid_type = "counterfactual"` * New `by` arguments in `predictions()`, `comparisons()`, `marginaleffects()` * New `newdata` shortcuts: "tukey", "grid" * New string shortcuts for `transform_pre` in `comparisons()` * `marginalmeans()` now back transforms confidence intervals when possible. * `vcov` argument string shortcuts are now case-insensitive * The default contrast in `comparisons()` for binary predictors is now a difference between 1 and 0, rather than +1 relative to baseline. * documentation improvements ## 0.6.0 New supported packages and models: * `tidymodels` objects of class `tidy_model` are supported if the fit engine is supported by `marginaleffects`. New function: * `deltamethod()`: Hypothesis tests on functions of parameters * `plot_cco()`: Plot conditional contrasts New arguments: * `hypothesis` for hypothesis tests and custom contrasts * `transform_post` in `predictions()` * `wts` argument in `predictions()` only affects average predictions in `tidy()` or `summary()`. New or improved vignettes: * Hypothesis Tests and Custom Contrasts using the Delta Method: https://marginaleffects.com/vignettes/hypothesis.html * Multiple Imputation: https://marginaleffects.com/vignettes/multiple_imputation.html * Causal Inference with the g-Formula: https://marginaleffects.com/vignettes/gcomputation.html (Thanks to Rohan Kapre for the idea) Deprecated or renamed arguments: * `contrast_factor` and `contrast_numeric` arguments are deprecated in `comparisons()`. Use a named list in the `variables` argument instead. Backward compatibility is maintained. * The `transform_post` argument in `tidy()` and `summary()` is renamed to `transform_avg` to disambiguate against the argument of the same name in `comparisons()`. Backward compatibility is preserved. Misc: * `tidy.predictions()` computes standard errors using the delta method for average predictions * Support `gam` models with matrix columns. * `eps` in `marginaleffects()` is now "adaptive" by default: it equals 0.0001 multiplied the range of the predictor variable * `comparisons()` now supports "log of marginal odds ratio" in the `transform_pre` argument. Thanks to Noah Greifer. * New `transform_pre` shortcuts: dydx, expdydx * `tidy.predictions()` computes standard errors and confidence intervals for linear models or GLM on the link scale. ## 0.5.0 Breaking changes: * `type` no longer accepts a character vector. Must be a single string. * `conf.int` argument deprecated. Use `vcov = FALSE` instead. New supported packages and models: * `mlogit` * `mhurdle` * `tobit1` * `glmmTMB` New features: * `interaction` argument in `comparisons()` to compute interactions between contrasts (cross-contrasts). * `by` argument in `tidy()` and `summary()` computes group-average marginal effects and comparisons. * `transform_pre` argument can define custom contrasts between adjusted predictions (e.g., log adjusted risk ratios). Available in `comparisons()`. * `transform_post` argument allows back transformation before returning the final results. Available in `comparisons()`, `marginalmeans()`, `summary()`, `tidy()`. * The `variables` argument of the `comparisons()` function accepts a named list to specify variable-specific contrast types. * Robust standard errors with the `vcov` argument. This requires version 0.17.1 of the `insight` package. - `sandwich` package shortcuts: `vcov = "HC3"`, `"HC2"`, `"NeweyWest"`, and more. - Mixed effects models: `vcov = "satterthwaite"` or `"kenward-roger"` - One-sided formula to clusters: `vcov = ~cluster_variable` - Variance-covariance matrix - Function which returns a named squared matrix * `marginalmeans()` allows interactions * Bayesian Model Averaging for `brms` models using `type = "average"`. See vignette on the `marginaleffects` website. * `eps` argument for step size of numerical derivative * `marginaleffects` and `comparisons` now report confidence intervals by default. * New dependency on the `data.table` package yields substantial performance improvements. * More informative error messages and warnings * Bug fixes and performance improvements New pages on the `marginaleffects` website: https://marginaleffects.com/ * Alternative software packages * Robust standard errors (and more) * Performance tips * Tables and plots * Multinomial Logit and Discrete Choice Models * Generalized Additive Models * Mixed effects models (Bayesian and Frequentist) * Transformations and Custom Contrasts: Adjusted Risk Ratio Example Argument name changes (backward compatibility is preserved: * Everywhere: - `conf.level` -> `conf_level` * `datagrid()`: - `FUN.factor` -> `FUN_factor` (same for related arguments) - `grid.type` -> `grid_type` ## 0.4.1 New supported packages and models: * `stats::loess` * `sampleSelection::selection` * `sampleSelection::heckit` Misc: * `mgcv::bam` models allow `exclude` argument. * Gam models allow `include_smooth` argument. * New tests * Bug fixes ## 0.4.0 New function: * `comparisons()` computes contrasts Misc: * Speed optimizations * `predictions()` and `plot_cap()` include confidence intervals for linear models * More robust handling of in-formula functions: factor(), strata(), mo() * Do not overwrite user's `ggplot2::theme_set()` call ## 0.3.4 * Bug fixes ## 0.3.3 New supported models: * `mclogit::mclogit` * `robust::lmRob` * `robustlmm::rlmer` * `fixest` confidence intervals in `predictions` Misc: * Support `modelbased::visualisation_matrix` in `newdata` without having to specify `x` explicitly. * `tidy.predictions()` and `summary.predictions()` methods. * Documentation improvements. * CRAN test fixes ## 0.3.2 Support for new models and packages: * `brglm2::bracl` * `mclogit::mblogit` * `scam::scam` * `lmerTest::lmer` Misc: * Drop `numDeriv` dependency, but make it available via a global option: options("marginaleffects_numDeriv" = list(method = "Richardson", method.args = list(eps = 1e-5, d = 0.0001))) * Bugfixes * Documentation improvements * CRAN tests ## 0.3.1 documentation bugfix ## 0.3.0 Breaking changes: * `predictions` returns predictions for every observation in the original dataset instead of `newdata=datagrid()`. * `marginalmeans` objects have new column names, as do the corresponding `tidy` and `summary` outputs. New supported packages and models: * `brms::brm` * `rstanarm::stanglm` * `brglm2::brmultinom` * `MASS::glmmPQL` * `aod::betabin` Misc: * `datagrid` function supersedes `typical` and `counterfactual` with the `grid.type` argument. The `typical` and `counterfactual` functions will remain available and exported, but their use is not encouraged. * `posterior_draws` function can be applied to a `predictions` or a `marginaleffects` object to extract draws from the posterior distribution. * `marginalmeans` standard errors are now computed using the delta method. * `predictions` standard errors are now computed using the delta method when they are not available from `insight::get_predicted`. * New vignette on Bayesian models with `brms` * New vignette on Mixed effects models with `lme4` * If the `data.table` package is installed, `marginaleffects` will automatically use it to speed things up. * Contrast definition reported in a separate column of `marginaleffects` output. * Safer handling of the `type` argument. * Comprehensive list of supported and tests models on the website. * Many bug fixes * Many new tests, including several against `emmeans` ## 0.2.0 Breaking change: * `data` argument becomes `newdata` in all functions. New supported packages and models: * `lme4:glmer.nb` * `mgcv::gam` * `ordinal::clm` * `mgcv` `marginalmeans`: * New `variables_grid` argument `predictions`: * Support `mgcv` `plot_cap` * New `type` argument Misc: * New validity checks and tests ## 0.1.0 First release. Bravo! Thanks to Marco Avina Mendoza, Resul Umit, and all those who offered comments and suggestions. marginaleffects/MD50000644000176200001440000004430314560154575013734 0ustar liggesusers97da277069772969d2634d781decf7d0 *DESCRIPTION c8c25b767a658920ebb45c84251300d1 *NAMESPACE c345ec0e88e8fd27299d7435967a5a7a *NEWS.md bb1da3e78a3f5dd03af88bb04cafb8ca *R/RcppExports.R 113931fe7bbad00cdea15dd91343a2a4 *R/backtransform.R 215bc2e132dee432c55d0fb0329c6788 *R/bootstrap_boot.R 11067a113f30465feac4810c73711484 *R/bootstrap_fwb.R c332d55375cb7bad8b9faff88f61c158 *R/bootstrap_rsample.R d0da17fc7cb4cc3e844f93cd7757dd40 *R/broom.R 18ea90260881eafcec66c8c58bd046e7 *R/by.R 51285d2ece185609debc1ad7c0dea3a9 *R/ci.R 00f04d03aa97aa7147a1598e916c38f2 *R/comparisons.R 4b349397e2529b4dda7f5af7fb3bbfb4 *R/complete_levels.R 5298a3c6412e495adaeb55c3cb638d51 *R/conformal.R d2eb2b4516cdc86866f53d980d0c94c1 *R/datagrid.R ee1c3f7de16d9d1eb96ef40383919fcc *R/deprecated.R 82c48550b6a695bdf71d0f850ee0eb0b *R/equivalence.R 3c9e0b8d8defb89dfbfa74bea75b8887 *R/get_averages.R fa771a0ded49cd91d500c4be32e0f344 *R/get_coef.R 1d832e0ef8a52ee668b8c27e65417195 *R/get_contrast_data.R 8728467cc0e3f3771ffb530e91eca231 *R/get_contrast_data_character.R 8c70fb880586b634206e4a9c86e1ab10 *R/get_contrast_data_factor.R b6fd7a6f7c4b4e4e6554db17ced1cf23 *R/get_contrast_data_logical.R afdedd2e6a4aefcbdc7cba78ea0bbb37 *R/get_contrast_data_numeric.R c9e3dccd04eea8632c22e13297a1e90d *R/get_contrasts.R 1fe5ddda867059ca42687fc84d13bbb5 *R/get_group_names.R 7f2c9d2ffc00f794324f3661c924a72e *R/get_hypothesis.R 3b2c04720d67dab17ba2b9f1868190f9 *R/get_jacobian.R 08c5777e213c8aa2ce328c4bd3c7136d *R/get_model_matrix.R e548ba7573c9b0c665317bc85dff653d *R/get_model_matrix_attribute.R e3f3d2e3c52bfcbaefe7b7903e8ada60 *R/get_modeldata.R e7213c0f025c868bb0897d60790b6dd9 *R/get_predict.R df8f33051612f22d9a8627b87202b5ef *R/get_se_delta.R c1d0b40b25414f1eeb2cbab35d56fd20 *R/get_term_labels.R 1406ee14b1c0f42cae69bc7f23effd1f *R/get_vcov.R 7078102a5eaa8e8d5d49b707411d52a4 *R/github_issue.R ba91a987205e717587704726a59e68a1 *R/hush.R 5094046582b828f0a205472f6faa4187 *R/hypotheses.R 8f1fc5be4fba1aac2bc7c7bf87e0ebff *R/hypotheses_joint.R 1a546877b19ab04ca206d341f0a3c4ab *R/imputation.R 02da1e6e0c0c98fa9f6ddbaf336de121 *R/inferences.R ca9964357eb3917f6691bccbda59fe69 *R/mean_or_mode.R b16f4dd6e06ae13bf66e18a081640de9 *R/methods.R 45da88d5337089b487cde2bb95734a24 *R/methods_MASS.R ee0d2b56a5cb611f02ce49a581dff154 *R/methods_MCMCglmm.R 0c32a2964e96f693ad8b9ad87249479c *R/methods_Rchoice.R 153ec50b032f6000bf8634efb38a5a39 *R/methods_afex.R 9b76b0fb1ece5155f87e02998f70e245 *R/methods_aod.R 1639448040715888da590fdef0eda402 *R/methods_betareg.R 664b234b95e2bbd211c3475f2b473a06 *R/methods_bife.R b405afd1dac4df9c5f2100f25e73347f *R/methods_biglm.R 3656e4bac5cd2c76af7d755ccf1c8ed2 *R/methods_brglm2.R 4ce50f350a2e22ad99f565513f834459 *R/methods_brms.R cf983748c9301ed7455894c9ecdfe654 *R/methods_crch.R 86ac1337e67cb6b405aae13cfbafcef9 *R/methods_dataframe.R 57f8db0a3f536d8d38b1ff8474fca54e *R/methods_dbarts.R 16c6859ad75215a6fae3c401dbc3c383 *R/methods_fixest.R c4e6137c681083ab6363059cc41de8d4 *R/methods_gamlss.R 8dd8fb94a2f0d8536f1c438f59cc9b5e *R/methods_glmmTMB.R 3746a261e273e70d27edddbfbdc7b18c *R/methods_glmx.R d29542a2b4bb5a4d1c7bc7c151eb355f *R/methods_inferences_simulation.R 721e9f301b188f48fc7a5d3c5190ad2d *R/methods_lme4.R de5f408473610b110f14afb6165a74e2 *R/methods_mclogit.R 96bdd95b493e263675e7e131c13af0b3 *R/methods_mgcv.R bdd287c114120dfd86a13790736be008 *R/methods_mhurdle.R d2cf8244bd4de0eca23a15da9e3813a7 *R/methods_mlm.R 8b2928d52d5f5ebd71448b0f8cf6aa52 *R/methods_mlogit.R 969f759880202a3adad0fddcc9a0627c *R/methods_mlr3.R 7bc9665f23cbe12b51cccb54c666f097 *R/methods_nlme.R 33e6d249f5c498730c5c47fb8452c90d *R/methods_nnet.R 1ca5985db15036f744428886b9082760 *R/methods_ordinal.R f6f6e2c9e5b0401095d3404f4bdc86fe *R/methods_plm.R d0a4a6b4d611fa455a249d61a0d482f8 *R/methods_pscl.R 45e7bbccb58edb2720a9daa67de702f6 *R/methods_quantreg.R 76406644f1ab215d012f0c24eb5504d5 *R/methods_rms.R 3512e62b3fd5df31858ac52ceff73b30 *R/methods_robustlmm.R 6136062cf0dc2d5d144101d3cf96b4fe *R/methods_rstanarm.R 8d4a45cbf9ee1de2174262096407bc2b *R/methods_sampleSelection.R d6a009aa1aadeff7d5f8af72bcea8120 *R/methods_scam.R d24518f21b90784835bf98d4faeb0e50 *R/methods_stats.R a0c870faee8cfee4b91e62a9bcb3c961 *R/methods_survey.R 04105804c7393e4870f6ef63c85a0ede *R/methods_survival.R 66cb56aaeb95f4f6de1a6cf8548464e7 *R/methods_tidymodels.R 30eec5e50ebb75eee60314d0e3c75249 *R/methods_tobit1.R 576257c245e9b2f490349fbd70f8d218 *R/modelarchive.R 970b67d14246cb1f71b95f90f9f54b93 *R/myTryCatch.R 3df0bda30d2d28df58275caae5ff831c *R/package.R c2e3bc3b6871c223d34897427426b7fe *R/plot.R 86a2bed7ff2587c592c919e6259f72cd *R/plot_build.R 13ca37bab7d8a4fbedcae1472d006585 *R/plot_comparisons.R 30904d661500efd5868bf0ae27a3073a *R/plot_predictions.R c1ea04a6f966fe1fcd4dd563750f849f *R/plot_slopes.R 4d464795da0e9545fe351881ffad23c1 *R/posterior_draws.R 84e64e9b23a7de3babe60e2aea5e76fb *R/predictions.R a7b5d426d9678b34dbfb142889b645e1 *R/print.R f5c51615c1504792626a320c347bd90d *R/recall.R fb6446b4acca895f62e147e2f577edc0 *R/sanitize_comparison.R 8376c26ff6090c9a780f3a5d421e42c7 *R/sanitize_condition.R a3b8b0611f32a8f7f44ce3397b426815 *R/sanitize_conf_level.R afefa9b25eb40cb39cfb1d76d31b03a6 *R/sanitize_hypothesis.R f092223ee4603fe29b5cadcf8a7455cd *R/sanitize_interaction.R f97ab88751d1622f884080980769d206 *R/sanitize_newdata.R faa70a06a3428b56c5dd7b773667673d *R/sanitize_numderiv.R fdc6cef5cbf6ff2229e479c4758fa58a *R/sanitize_type.R bd844ce55ea8189e79ea751fc08b4fd1 *R/sanitize_variables.R e40df5bbcc4d88eb52cd261482c5a8fe *R/sanitize_vcov.R 56a607380a56072ec5b4f91f497b5700 *R/sanity.R 6385876a8a17b4a1e32d65af31e41976 *R/sanity_by.R f9e5c5c4f2e2cdb89a0d8f46c786ff8f *R/sanity_dots.R 1fbaa61302a68e91fb50f094e7c7df55 *R/sanity_model.R c3468fc7ad0a1e1d7afbd66058c8776f *R/set_coef.R a7ec0ae417f84bd1b9d1ffd9fcb2fb9a *R/settings.R 1c481d737a365b03dac8ed811f9db15a *R/slopes.R aaef0ac1c9d1ecf3dc5ff56f4d784242 *R/sort.R 5aca9f19a0e16d0dbdca9963f750e99b *R/tinytest.R 8e763870c3f878fef14291ee291dba70 *R/type_dictionary.R 8c3288b68106bd3979351a0e10fb2d0a *R/unpack_matrix_cols.R 273e190b9e9d631f808d36f3749be6d7 *R/utils.R 7210923781c8f70f1a8a151b0da7499a *README.md 7388d57168b866210b551329420431f6 *inst/COPYRIGHTS 94b004e9ab19331880105b90c9e6672a *inst/WORDLIST 6bd8fd8734beaa2b36cc242d2c769f06 *inst/tinytest/_tinysnapshot/df-t.txt 665f4aa0a133e4659b1cc7cd3308da5c *inst/tinytest/_tinysnapshot/df-z.txt 70baf2033461f7eedf3c43f4112fbdce *inst/tinytest/_tinysnapshot/equivalence-avg_comparisons.txt 3496161c86b73a319efd9b75dd20abaf *inst/tinytest/_tinysnapshot/plot_comparisons-2effects.svg 3061991006036fbb57539c9310676522 *inst/tinytest/_tinysnapshot/plot_comparisons-minmax_x.svg 3c2e86f94486c425d13fc3393eff7119 *inst/tinytest/_tinysnapshot/plot_comparisons-rr_titanic.svg 3f441e95c3833c5adb6514b07c160140 *inst/tinytest/_tinysnapshot/plot_predictions-alpha.svg 78a387db8654197070bbb86a98b2affa *inst/tinytest/_tinysnapshot/plot_predictions-gray.svg 4bad8f29017897da83925f0030018f67 *inst/tinytest/_tinysnapshot/plot_predictions.svg 07643558feae9a9be61fbc9423d801c4 *inst/tinytest/_tinysnapshot/plot_predictions_conf_40.svg 32504909fd4c564d4df06642865b3aa6 *inst/tinytest/_tinysnapshot/plot_predictions_conf_99.svg db73a095653b172c2c203274cdadb848 *inst/tinytest/_tinysnapshot/plot_predictions_link.svg d8e91c0fd72f7e00854453d2b426f8b7 *inst/tinytest/_tinysnapshot/plot_predictions_response.svg 216dfa5f254e010800def2222bed8f4b *inst/tinytest/_tinysnapshot/plot_predictions_vs_categorical_x_axis.svg 7ddd54c918aa54b0d144484809f6da5f *inst/tinytest/_tinysnapshot/plot_predictions_vs_continuous_x_axis.svg a7bcf5fa0f18f8c92eb985f0b2c00d3f *inst/tinytest/_tinysnapshot/plot_slopes_categorical.svg c1a4eebfe19862ef30857057b3cd2aa8 *inst/tinytest/_tinysnapshot/plot_slopes_continuous.svg b6a99e8363cba0eaddebff83706e8c0f *inst/tinytest/_tinysnapshot/plot_slopes_factor_facets.svg 164973877ac9d76afa303c501a497d6b *inst/tinytest/_tinysnapshot/plot_slopes_two_conditions.svg fcd0a95afa8ec894c9390cf15e28de23 *inst/tinytest/_tinysnapshot/print-comparisons_1focal_dataframe.txt bc4169197997b0f43a20437e2d5a9827 *inst/tinytest/_tinysnapshot/print-comparisons_1focal_datagrid.txt 46b061b216c52e98fd1c4d161162e1ec *inst/tinytest/_tinysnapshot/print-comparisons_by.txt 15323fec9b6cc541a2c44b0165949f84 *inst/tinytest/_tinysnapshot/print-marginal_means.txt 3bef4e71d405fb2c736cdaca295682d4 *inst/tinytest/_tinysnapshot/print-predictions.txt d7c89b0365244a1251df993527d29981 *inst/tinytest/_tinysnapshot/print-predictions_by.txt 5d16143baec7376efc095c1b6d0acad0 *inst/tinytest/_tinysnapshot/print-predictions_datagrid.txt 5a71c83d1c941d8452ad4c4c3bff62f5 *inst/tinytest/_tinysnapshot/print-predictions_newdata.txt ca0be80d8c67118daeddb0e522dc254f *inst/tinytest/_tinysnapshot/summary-comparisons_transform.txt 001ddcff8e2500cffe55d7a975f305c6 *inst/tinytest/_tinysnapshot/summary-hypotheses.txt 8ebc9a0778f58f9cb97728778f7fc835 *inst/tinytest/_tinysnapshot/summary-marginaleffects.txt d1a58fa12cf96a61e27830629eb90a8e *inst/tinytest/_tinysnapshot/summary-marginaleffects_conf_level_20.txt 994cd91459cce7c3841782cd787d6aef *inst/tinytest/_tinysnapshot/summary-marginaleffects_conf_level_90.txt a5273604d61aacbc54a4c366f67337d0 *inst/tinytest/_tinysnapshot/summary-marginalmeans.txt a1edb96786dce79877964973befb158b *inst/tinytest/helpers.R 8be50bd18cd3eecb401005d5b071ed79 *inst/tinytest/test-aaa-warn_once.R 70d21198438187b56663852731de44d9 *inst/tinytest/test-analytic.R d41d8cd98f00b204e9800998ecf8427e *inst/tinytest/test-backward.R 4954b959f46fe93410c02a68963e6eb7 *inst/tinytest/test-bugfix.R 22f5d5c4c8354d26d1076682090ce2b0 *inst/tinytest/test-by.R 74d47b6df624a4f184f7eb521158e9b2 *inst/tinytest/test-call.R f74fb4fb756063951d405612290eaf39 *inst/tinytest/test-character.R 6fd369f3e4cab1eea4725323fa80da61 *inst/tinytest/test-comparisons-interaction.R a96b4d17ae04851636ae35c566e14ebe *inst/tinytest/test-comparisons.R ca2b8b79bab2adb01cb0b03e3b79ff2c *inst/tinytest/test-complete_levels.R 2e71185815fe2d303d413e4af5de2ff3 *inst/tinytest/test-conf.level.R 11d6fb8d7ee81837e6da6d695cef041c *inst/tinytest/test-contrast.R 8f63c829e316c97c41201952f7a40b40 *inst/tinytest/test-counterfactual.R dd6b1e07de10900a0c9164b5354a56f6 *inst/tinytest/test-datagrid.R 751b1706e28c704b76b4c6b2ea54b9c4 *inst/tinytest/test-df.R 6a528af82291b784f35bcb3181a257fb *inst/tinytest/test-dots.R d80acfa3ab7626baeb09538bb5003c5c *inst/tinytest/test-elasticity.R bc5cc30bd39ff1be3012df8f14184751 *inst/tinytest/test-eps.R e56721df5683790ce5f8e5ac3db23e54 *inst/tinytest/test-equivalence.R 20f478007d59b7f88276a00c46f518ef *inst/tinytest/test-factor.R db789f0465c6da60da3e939f9fcbb70e *inst/tinytest/test-gformula.R 31a48db3cde62e4b2505268ea0aa93ed *inst/tinytest/test-hypotheses.R 2eee293893fd73cf77bb0bea99349cfc *inst/tinytest/test-hypotheses_joint.R f35b2438c1b46d353f8deca9bbb61ce9 *inst/tinytest/test-hypothesis.R 18a8aca8dbbe41629c68ee2be7e11b02 *inst/tinytest/test-inferences.R 457718e2bc013060f8c002bbdeddbc71 *inst/tinytest/test-interaction.R 783caaf14c53c717ab7a084a7feed7c8 *inst/tinytest/test-jss.R 836152b88a3ff648a60811a8bd1cc7b3 *inst/tinytest/test-logical.R aa33706bf381a8ef15ec49ae8c9d0907 *inst/tinytest/test-marginal_means.R 9c63ceb2ce4a786adc13b0e498118bcc *inst/tinytest/test-marginaleffects.R b6a8970f65bf15bb49903fec0fc67ef8 *inst/tinytest/test-misc.R 5818aad1bdc2e5f56018a70a6fc6d47e *inst/tinytest/test-missing.R b18568acdea841390264d72c7608867f *inst/tinytest/test-nested.R 883043e49186f5cb135161ed73a38af9 *inst/tinytest/test-newdata.R 6e454e397435f2de0e44c732b05a6cb1 *inst/tinytest/test-p_adjust.R 51e9906d2dec44460bc23995d335e209 *inst/tinytest/test-pkg-AER.R aa158a48b98ace2eb10cc230cf4cf251 *inst/tinytest/test-pkg-Amelia.R 6138ee1d09bb0855100c710169d6c46e *inst/tinytest/test-pkg-DCchoice.R 8bccbef837858d27b710276636e7ee52 *inst/tinytest/test-pkg-MASS.R 9d5926f4ebda0625843b582596d90683 *inst/tinytest/test-pkg-MCMCglmm.R 445e15bdd8b27ebd0a6ffcb12b72e7f8 *inst/tinytest/test-pkg-MatchIt.R 4e460c0b5345e7ba7efc2a3e1dc2578e *inst/tinytest/test-pkg-Rchoice.R f0c2f3f3465c57adb9574ad647b11c0b *inst/tinytest/test-pkg-afex.R 86744219adc4123412b10214987f91af *inst/tinytest/test-pkg-aod.R 60587a562af1846eccda43802a00f1a4 *inst/tinytest/test-pkg-betareg.R fda19bc82023fe616d92713d4d9953cf *inst/tinytest/test-pkg-bife.R 5dfad8a17f275247409947d3031785b9 *inst/tinytest/test-pkg-biglm.R 220638872a14554ff03cb8945996d2e9 *inst/tinytest/test-pkg-blme.R 8d59bfc930532ec884d55bc861abadc5 *inst/tinytest/test-pkg-brglm2.R d1d06e6eeefcd4d129ea27a449ae276c *inst/tinytest/test-pkg-brms-average.R 9b88c181f400962f2918859c16e18ccb *inst/tinytest/test-pkg-brms.R 027d5c7c943d8a9482cca15753d19e9f *inst/tinytest/test-pkg-crch.R 1bd338b7c16ce6d2ea9a3274c4b27aa4 *inst/tinytest/test-pkg-dbarts.R 7038878f47f92ed473a2a40f494a75d8 *inst/tinytest/test-pkg-estimatr.R 4cbb55776ec61a02e61fbfad46b6a386 *inst/tinytest/test-pkg-fixest.R 4cf4e795232f89ffce620a4d6d7ba395 *inst/tinytest/test-pkg-gam.R 93b853fa9b008d8631c87b695f6d3252 *inst/tinytest/test-pkg-gamlss.R 35558730f72932df54936f42d52bc363 *inst/tinytest/test-pkg-geepack.R 1d1abeee1bb420b90429139d256d3cc8 *inst/tinytest/test-pkg-ggeffects.R f9ee3c58d78248c6c07594f5f8dfeef3 *inst/tinytest/test-pkg-glmmTMB.R ee6289b47f26dc809cbdd53a195ce53a *inst/tinytest/test-pkg-glmx.R f31c24f69a7fe0f17469962d3ccec640 *inst/tinytest/test-pkg-ivreg.R da4bb5b598724625153e9c08d8ab3a74 *inst/tinytest/test-pkg-lme4.R a8f92ad50d2bcbecf667ff2dc6b771e5 *inst/tinytest/test-pkg-lmerTest.R a11741900594686e1581ba211626d01e *inst/tinytest/test-pkg-logistf.R 878bd7ba0ab6bc569c2779c2b444c97a *inst/tinytest/test-pkg-mclogit.R b21f6eab8c07a07d2518138d2bb76d98 *inst/tinytest/test-pkg-mgcv.R 87327ea6782e7492247d1da41209d11a *inst/tinytest/test-pkg-mhurdle.R 617ba94d5d781d45c99f511f3b584f7b *inst/tinytest/test-pkg-mice.R c59995e0a5c45bde16f9fc9bf64358e1 *inst/tinytest/test-pkg-mlogit.R f9331d91288cffb2a977d69c2b0667c3 *inst/tinytest/test-pkg-mlr3verse.R 4c5294f9daf17a5dc7b7281bf43bcb07 *inst/tinytest/test-pkg-mvgam.R 4774d85a3efe66a899908a3b856ca505 *inst/tinytest/test-pkg-nlme.R a78ce1cbc92668e727aeef4b1ea0eb8f *inst/tinytest/test-pkg-nnet.R 6789a223ee01a8d17363b177bbdee2f3 *inst/tinytest/test-pkg-ordinal.R b8668191afc987af8702e529d5f2d217 *inst/tinytest/test-pkg-phylolm.R 866203636c3090d20ad8d508a1419cad *inst/tinytest/test-pkg-plm.R c0663d88a138ceb8b096b5dacaba392b *inst/tinytest/test-pkg-pscl.R 4620d09922a53ab34d97f0245dfa9cf1 *inst/tinytest/test-pkg-quantreg.R cb0078d78ce2d5fdb0595013e5349d4a *inst/tinytest/test-pkg-rms.R cb56fe62cb3e212d63b4f4e6441fdabe *inst/tinytest/test-pkg-robust.R 83606a06fec328dad0897ff1627b1f19 *inst/tinytest/test-pkg-robustbase.R 9425fa1d7123adc306f08b239d6dba80 *inst/tinytest/test-pkg-robustlmm.R 7575f7c81a4849b1de5c2e3b1fbe1b2d *inst/tinytest/test-pkg-rstanarm.R e0189f89a902b5d0a1fefa9a5d9965b5 *inst/tinytest/test-pkg-sampleSelection.R 8e35d1a6bfe133f36f8f3d47259ffbe5 *inst/tinytest/test-pkg-scam.R 8883662bf73592ac5a79273e6f55033a *inst/tinytest/test-pkg-speedglm.R 9fc6a4b64915fff5219d37054ec398a0 *inst/tinytest/test-pkg-stats.R c7aa0ef7a232495b1ee45eaf1e5e5fcf *inst/tinytest/test-pkg-survey.R 48845870c7733eb8c2f3dca1d9ef9d48 *inst/tinytest/test-pkg-survival.R 26918cf6158835d1bcd5a395e69fe3d1 *inst/tinytest/test-pkg-tidymodels.R fa6ceb4441917e70f797bd29811e69ce *inst/tinytest/test-pkg-tobit1.R 6daccf1a3b3383b931ffecabe5e56bac *inst/tinytest/test-pkg-truncreg.R 3aa6beb1069a6af68410450b95576f34 *inst/tinytest/test-plot.R 4e49d1b296ddc01dc4732e0cdeb2d2bd *inst/tinytest/test-plot_comparisons.R fb02b27b2a8cb765bda5a0da2eeac746 *inst/tinytest/test-plot_predictions.R 7a73c12cb1ce103c8c04138b5a847b46 *inst/tinytest/test-plot_slopes.R d1e588f68d1e8ea1f7fb4d239ad469a2 *inst/tinytest/test-predict_type.R 6927afa63c64ed5c3fa5a75f5bd63d52 *inst/tinytest/test-predictions.R ae2d91bf466f3f02b0ade5729ae603a0 *inst/tinytest/test-print.R 023f0cbc23d3fd0554f886b0219ebbbe *inst/tinytest/test-rank-deficient.R 011e16034a190259adff805631858be2 *inst/tinytest/test-sanity.R 6f1b87d6a204145fc54e8cde88666e00 *inst/tinytest/test-scope.R 56f7ae3ce2517c73d735eeb787b99b17 *inst/tinytest/test-tinytest.R 5af10ac52131c198d927d2c2cbbe176f *inst/tinytest/test-transform_post.R 16ef3772a283a93c1bc22e89a6bea2ab *inst/tinytest/test-transform_pre.R c89013e4bee74119d36a6932e384f969 *inst/tinytest/test-typical.R 0e0b94f417fac67621e8b164499f6cdc *inst/tinytest/test-utils.R 786a9d4decbbaf9468be0b7dd77877cd *inst/tinytest/test-variables.R 5d778e061d56f046bf13ca4000156544 *inst/tinytest/test-vcov.R eea099812df994a30ec5bc3a1accc041 *inst/tinytest/test-weights.R bcb8957ce587b9f9698eda25063ea844 *man/comparisons.Rd 84668bf4b7fa8fc8e3c6351e1bad4e03 *man/complete_levels.Rd d50d66d613f3f5842139a322764c8e71 *man/datagrid.Rd 26f5dc2b3a87bba04de66e5d373f7ba9 *man/datagridcf.Rd 88f305fbe39fd64a4f0ab4cbf7e098f0 *man/deltamethod.Rd 615e980f40da029afe1a3eb08d4a4c18 *man/expect_margins.Rd a13ad2214b8d6671c34bb48ddec118be *man/expect_predictions.Rd 215fd0c51fa09ca96f6aa35da934803f *man/expect_slopes.Rd 2b0a7cc5ceb04acdb70d81087f214a8a *man/figures/zoo_banner.png b49853d021ae17fe65c659f9c7dcc0f8 *man/get_averages.Rd 0e76aafdeaeca82ce4d82904a2088a66 *man/get_coef.Rd 564e21c7b44d04c5306b076f8a7b8a0c *man/get_group_names.Rd 990b0c1bafd409348c6d043d309b1a53 *man/get_model_matrix.Rd 6e119b6d99193933700f121b1e822a46 *man/get_predict.Rd 4804d3eb1f99e47e526c464acfb7f8d6 *man/get_varcov_args.Rd 0b7a28b271abbd7981c24f2b4fc8fb5f *man/get_vcov.Rd c17f29041e7dac8e6031c039e4794afb *man/hypotheses.Rd 90fd09055e068cb2f070e3a54fd81d82 *man/inferences.Rd d122f353a64d83e5f1537251d5d30710 *man/marginal_means.Rd 37f2c2cd77e30a3bd7e21236f21f5c5a *man/marginaleffects.Rd 74c860b56c8c296d9b2e58da6c139822 *man/marginalmeans.Rd f5eee91998298d050863f946e8d5573f *man/meffects.Rd 49583df7f6ae6da0cfaef584f73e94e0 *man/plot_comparisons.Rd 5f0fb2b9d5e0d0265cdc9289847a97c7 *man/plot_predictions.Rd ebd737e48779f19f748572bd6f5fcae9 *man/plot_slopes.Rd 281a145bb93056f6201b86f985857c98 *man/posterior_draws.Rd bb6528442188174f54cb4c0abfbb469b *man/posteriordraws.Rd 80b2b7ed09286766563c478faba8d6d8 *man/predictions.Rd 72630997c8164a5318ba112626f86970 *man/print.marginaleffects.Rd 1e18cfe170451992ed72e3a2e6313dee *man/reexports.Rd 71dc6f0bc51ebe27210caeab55aaea88 *man/sanitize_model_specific.Rd ab9750e5b89a16979ddfd2a232241c06 *man/set_coef.Rd 87b0c798913c8f7a40f763b4849755f8 *man/slopes.Rd e84fbde88a4134d4d943cd148c5e0a67 *src/RcppExports.cpp 52bfe4eb70930b6c30ac049945bb3d8a *src/eigen.cpp ff2d6bca48b0cc82868ec8a0ad8091c4 *tests/spelling.R 1816589a93739ed6bb3f576c586b3429 *tests/tinytest.R marginaleffects/inst/0000755000176200001440000000000014541720224014362 5ustar liggesusersmarginaleffects/inst/COPYRIGHTS0000644000176200001440000000354414541720224016006 0ustar liggesusersCopyright (c) 2022, Vincent Arel-Bundock ######################## Functions in `R/methods_gamlss.R` file were adatped from the `gamlss` package for `R`. Copyright (c) 2022, Mikis Stasinopoulos [aut, cre, cph] shared under the GPL3 License. ######################## Functions in the `R/get_hdi.R` file were extracted from the `HDInterval` package for `R`. Copyright (c) 2020, Mike Meredith and John Kruschke, shared under the GPL3 License. ######################## Some of the methods in the `R/mean_or_mode.R` file were adapted from the `prediction` R package, copyright Thomas J. Leeper, shared under the MIT License (copied below). Tests in the `tests/testhat/test-analytical.R` were adapted from the `margins` R package, copyright Thomas J. Leeper (2018), shared under the MIT License (copied below). Copyright (c) 2016-2018, Thomas J. Leeper. MIT License Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. marginaleffects/inst/tinytest/0000755000176200001440000000000014560042124016242 5ustar liggesusersmarginaleffects/inst/tinytest/test-pkg-estimatr.R0000644000176200001440000000501414560035476021765 0ustar liggesuserssource("helpers.R") using("marginaleffects") requiet("estimatr") requiet("emmeans") requiet("margins") requiet("broom") Km <<- read.csv("https://vincentarelbundock.github.io/Rdatasets/csv/sem/Kmenta.csv") dat <- mtcars dat$cyl <- factor(dat$cyl) dat <<- dat # lm_lin: no validity mod <- lm_lin(mpg ~ am, ~ hp + cyl, data = dat) expect_slopes(mod) expect_slopes(mod, n_unique = 9) # iv_robust vs. stata stata <- readRDS(testing_path("stata/stata.rds"))$estimatr_iv_robust model <- iv_robust( Q ~ P + D | D + F + A, se_type = "stata", data = Km) mfx <- slopes(model) tid <- tidy(mfx) expect_slopes(model) mfx <- merge(tid, stata) expect_equivalent(mfx$estimate, mfx$dydxstata) expect_equivalent(mfx$std.error, mfx$std.errorstata, tolerance = .1) # lm_robust vs. stata vs. emtrends model <- lm_robust(carb ~ wt + factor(cyl), se_type = "HC2", data = dat) stata <- readRDS(testing_path("stata/stata.rds"))$estimatr_lm_robust mfx <- tidy(slopes(model)) mfx$term <- ifelse(mfx$contrast == "6 - 4", "6.cyl", mfx$term) mfx$term <- ifelse(mfx$contrast == "8 - 4", "8.cyl", mfx$term) mfx <- merge(mfx, stata) expect_equivalent(mfx$estimate, mfx$dydxstata) expect_equivalent(mfx$std.error, mfx$std.errorstata, tolerance = .1) # emtrends mfx <- slopes(model, newdata = datagrid(cyl = 4, wt = 2, newdata = dat), variables = "wt") em <- emtrends(model, ~wt, "wt", at = list(cyl = 4, wt = 2)) em <- tidy(em) expect_equivalent(mfx$estimate, em$wt.trend, tolerance = .001) expect_equivalent(mfx$std.error, em$std.error, tolerance = .001) # margins does not support standard errors tmp <- mtcars tmp$cyl <- factor(tmp$cyl) mod <- lm_robust(carb ~ wt + cyl, data = tmp, se_type = "stata") mar <- margins(mod, data = head(tmp)) mfx <- slopes(mod, newdata = head(tmp)) expect_true(expect_margins(mfx, mar, se = FALSE)) # iv_robust: predictions: no validity # skip_if_not_installed("insight", minimum_version = "0.17.1") model <- iv_robust(Q ~ P + D | D + F + A, se_type = "stata", data = Km) expect_predictions(predictions(model), n_row = nrow(Km)) expect_predictions(predictions(model, newdata = head(Km)), n_row = 6) # lm_robust: marginalmeans predictions: no validity # skip_if_not_installed("insight", minimum_version = "0.17.1") tmp <- mtcars tmp$cyl <- as.factor(tmp$cyl) tmp$am <- as.logical(tmp$am) model <- lm_robust(carb ~ wt + am + cyl, se_type = "stata", data = tmp) expect_predictions(predictions(model), n_row = nrow(tmp)) expect_predictions(predictions(model, newdata = head(tmp)), n_row = 6) source("helpers.R") rm(list = ls())marginaleffects/inst/tinytest/test-conf.level.R0000644000176200001440000000237614541720224021410 0ustar liggesuserssource("helpers.R") using("marginaleffects") # conf.level argument changes conf.int size dat <- mtcars dat$cyl <- factor(dat$cyl) mod <- lm(mpg ~ hp + cyl, data = mtcars) mfx95 <- slopes(mod, conf.level = .95) mfx99 <- slopes(mod, conf.level = .99) cmp95 <- comparisons(mod, conf.level = .95) cmp99 <- comparisons(mod, conf.level = .99) pre95 <- predictions(mod, conf.level = .95) pre99 <- predictions(mod, conf.level = .99) expect_true(all(mfx95$conf.low > mfx99$conf.low)) expect_true(all(mfx95$conf.high < mfx99$conf.high)) expect_true(all(cmp95$conf.low > cmp99$conf.low)) expect_true(all(cmp95$conf.high < cmp99$conf.high)) expect_true(all(pre95$conf.low > pre99$conf.low)) expect_true(all(pre95$conf.high < pre99$conf.high)) # tidy() inherits conf_level cmp95 <- comparisons(mod) cmp99 <- comparisons(mod, conf_level = .99) tid95 <- tidy(cmp95) tid99 <- tidy(cmp99) expect_true(all(tid95$conf.low > tid99$conf.low)) expect_true(all(tid95$conf.high < tid99$conf.high)) # conf.low manual mod <- lm(mpg ~ hp, data = mtcars) cmp <- comparisons(mod) critical_z <- qnorm(.025) lb <- cmp$estimate - abs(critical_z) * cmp$std.error ub <- cmp$estimate + abs(critical_z) * cmp$std.error expect_equivalent(cmp$conf.low, lb) expect_equivalent(cmp$conf.high, ub) rm(list = ls())marginaleffects/inst/tinytest/_tinysnapshot/0000755000176200001440000000000014554070103021145 5ustar liggesusersmarginaleffects/inst/tinytest/_tinysnapshot/plot_predictions.svg0000644000176200001440000007441214541720224025261 0ustar liggesusers 0 10 20 30 40 100 200 300 hp mpg wt 1.513 2.5425 3.325 3.65 5.424 marginaleffects/inst/tinytest/_tinysnapshot/plot_predictions_vs_continuous_x_axis.svg0000644000176200001440000004747414541720224031642 0ustar liggesusers -20 0 20 40 2 3 4 5 wt mpg cyl 4 6 8 marginaleffects/inst/tinytest/_tinysnapshot/plot_comparisons-minmax_x.svg0000644000176200001440000001004614541720224027102 0ustar liggesusers -20 -10 0 -SD Mean +SD wt Comparison marginaleffects/inst/tinytest/_tinysnapshot/plot_comparisons-rr_titanic.svg0000644000176200001440000002063514541720224027425 0ustar liggesusers 0.90 0.95 1.00 0 20 40 60 Age Adjusted Risk Ratio P(Survived = 1 | Age + 10) / P(Survived = 1 | Age) marginaleffects/inst/tinytest/_tinysnapshot/summary-marginaleffects_conf_level_20.txt0000644000176200001440000000062714541720224031237 0ustar liggesusers Term Contrast Estimate Std. Error z Pr(>|z|) 40.0 % 60.0 % cyl mean(6) - mean(4) -5.968 1.6393 -3.64 <0.001 -6.3830 -5.5523 cyl mean(8) - mean(4) -8.521 2.3261 -3.66 <0.001 -9.1102 -7.9315 hp mean(dY/dX) -0.024 0.0154 -1.56 0.119 -0.0279 -0.0201 Columns: term, contrast, estimate, std.error, statistic, p.value, conf.low, conf.high Type: response marginaleffects/inst/tinytest/_tinysnapshot/summary-marginalmeans.txt0000644000176200001440000000121014541720224026213 0ustar liggesusers Term Value Mean Std. Error z Pr(>|z|) S 2.5 % 97.5 % gear 3 21.6 1.60 13.5 <0.001 135.6 18.5 24.8 gear 4 21.1 1.26 16.7 <0.001 205.3 18.6 23.6 gear 5 20.0 1.97 10.1 <0.001 77.8 16.1 23.8 am FALSE 17.4 1.42 12.3 <0.001 113.3 14.7 20.2 am TRUE 24.4 1.25 19.5 <0.001 277.6 21.9 26.8 vs FALSE 17.5 1.01 17.3 <0.001 221.6 15.5 19.4 vs TRUE 24.3 1.19 20.4 <0.001 304.1 22.0 26.7 Results averaged over levels of: gear, am, vs Columns: term, value, estimate, std.error, statistic, p.value, s.value, conf.low, conf.high Type: response marginaleffects/inst/tinytest/_tinysnapshot/plot_predictions_conf_40.svg0000644000176200001440000002030214541720224026556 0ustar liggesusers 12 16 20 24 100 200 300 hp mpg marginaleffects/inst/tinytest/_tinysnapshot/print-comparisons_by.txt0000644000176200001440000000177514541720224026103 0ustar liggesusers Term Contrast gear Estimate Std. Error z Pr(>|z|) S 2.5 % 97.5 % gear mean(4) - mean(3) 3 -7.0969 2.8074 -2.53 0.01148 6.4 -12.5993 -1.5944 gear mean(4) - mean(3) 4 3.9009 1.6136 2.42 0.01562 6.0 0.7384 7.0635 gear mean(4) - mean(3) 5 -9.5681 3.3758 -2.83 0.00459 7.8 -16.1845 -2.9516 gear mean(5) - mean(3) 3 6.4076 1.3640 4.70 < 0.001 18.5 3.7343 9.0810 gear mean(5) - mean(3) 4 6.9300 2.2776 3.04 0.00235 8.7 2.4659 11.3940 gear mean(5) - mean(3) 5 6.2903 1.3712 4.59 < 0.001 17.8 3.6027 8.9779 hp mean(+1) 3 -0.0522 0.0146 -3.59 < 0.001 11.6 -0.0808 -0.0237 hp mean(+1) 4 -0.1792 0.0303 -5.92 < 0.001 28.2 -0.2385 -0.1199 hp mean(+1) 5 -0.0583 0.0126 -4.61 < 0.001 17.9 -0.0830 -0.0335 Columns: term, contrast, gear, estimate, std.error, statistic, p.value, s.value, conf.low, conf.high, predicted_lo, predicted_hi, predicted Type: response marginaleffects/inst/tinytest/_tinysnapshot/plot_slopes_two_conditions.svg0000644000176200001440000003635614554070103027370 0ustar liggesusers -0.15 -0.10 -0.05 0.00 0.05 0.10 2 3 4 5 wt Slope factor(am) 0 1 marginaleffects/inst/tinytest/_tinysnapshot/print-comparisons_1focal_dataframe.txt0000644000176200001440000000061514541720224030632 0ustar liggesusers Term Contrast Estimate Std. Error z Pr(>|z|) S 2.5 % 97.5 % am hp am 1 - 0 5.28 1.08 4.89 <0.001 19.9 3.16 7.39 0 120 am 1 - 0 5.28 1.08 4.89 <0.001 19.9 3.16 7.39 1 120 Columns: rowid, term, contrast, estimate, std.error, statistic, p.value, s.value, conf.low, conf.high, predicted_lo, predicted_hi, predicted, am, hp, mpg Type: response marginaleffects/inst/tinytest/_tinysnapshot/print-predictions_datagrid.txt0000644000176200001440000000075514541720224027233 0ustar liggesusers PClass SexCode Estimate Pr(>|z|) S 2.5 % 97.5 % 1st 0 0.4641 0.538 0.9 0.3538 0.578 1st 1 0.9469 <0.001 28.5 0.8735 0.979 2nd 0 0.0663 <0.001 31.4 0.0301 0.140 2nd 1 0.8784 <0.001 27.4 0.7879 0.934 3rd 0 0.1146 <0.001 53.1 0.0740 0.173 3rd 1 0.4497 0.391 1.4 0.3400 0.565 Columns: rowid, estimate, p.value, s.value, conf.low, conf.high, Survived, Age, PClass, SexCode Type: invlink(link) marginaleffects/inst/tinytest/_tinysnapshot/print-marginal_means.txt0000644000176200001440000000065314541720224026023 0ustar liggesusers Term Value Mean Std. Error z Pr(>|z|) S 2.5 % 97.5 % gear 3 17.6 0.796 22.16 <0.001 359.1 16.1 19.2 gear 4 14.3 1.886 7.58 <0.001 44.7 10.6 18.0 gear 5 24.2 1.316 18.41 <0.001 249.1 21.7 26.8 Results averaged over levels of: gear Columns: rowid, term, value, gear, estimate, std.error, statistic, p.value, s.value, conf.low, conf.high, hp, mpg, wts Type: response marginaleffects/inst/tinytest/_tinysnapshot/plot_predictions_link.svg0000644000176200001440000002077614541720224026302 0ustar liggesusers -10 -5 0 5 10 100 200 300 hp am marginaleffects/inst/tinytest/_tinysnapshot/plot_predictions-gray.svg0000644000176200001440000007534214541720224026224 0ustar liggesusers 0 10 20 30 40 100 200 300 hp mpg wt 1.513 2.5425 3.325 3.65 5.424 marginaleffects/inst/tinytest/_tinysnapshot/plot_slopes_factor_facets.svg0000644000176200001440000004327514541720224027131 0ustar liggesusers gear_fct, 4 - 3 gear_fct, 5 - 3 10 15 20 25 30 35 10 15 20 25 30 35 -2.5 0.0 2.5 -4 -2 0 2 mpg Slope marginaleffects/inst/tinytest/_tinysnapshot/print-predictions_by.txt0000644000176200001440000000052614541720224026062 0ustar liggesusers gear Estimate Std. Error z Pr(>|z|) S 2.5 % 97.5 % 3 16.1 0.671 24.0 <0.001 420.8 14.8 17.4 4 24.5 0.750 32.7 <0.001 777.2 23.1 26.0 5 21.4 1.162 18.4 <0.001 248.8 19.1 23.7 Columns: gear, estimate, std.error, statistic, p.value, s.value, conf.low, conf.high Type: response marginaleffects/inst/tinytest/_tinysnapshot/plot_slopes_categorical.svg0000644000176200001440000001113314541720224026567 0ustar liggesusers -0.3 -0.2 -0.1 0.0 0.1 4 6 8 cyl Slope marginaleffects/inst/tinytest/_tinysnapshot/plot_predictions_response.svg0000644000176200001440000002032314541720224027167 0ustar liggesusers 0.0 0.4 0.8 1.2 100 200 300 hp am marginaleffects/inst/tinytest/_tinysnapshot/plot_predictions_conf_99.svg0000644000176200001440000002076514541720224026611 0ustar liggesusers 5 10 15 20 25 100 200 300 hp mpg marginaleffects/inst/tinytest/_tinysnapshot/print-predictions.txt0000644000176200001440000000144014541720224025364 0ustar liggesusers Estimate Std. Error z Pr(>|z|) S 2.5 % 97.5 % 20.9 0.973 21.43 <0.001 336.2 18.95 22.8 20.9 0.973 21.43 <0.001 336.2 18.95 22.8 23.9 0.757 31.56 <0.001 723.9 22.42 25.4 19.6 1.174 16.67 <0.001 204.8 17.26 21.9 16.2 0.671 24.09 <0.001 423.6 14.85 17.5 --- 22 rows omitted. See ?avg_predictions and ?print.marginaleffects --- 26.2 1.562 16.77 <0.001 207.3 23.13 29.3 17.4 1.448 12.01 <0.001 108.0 14.56 20.2 22.6 1.191 18.96 <0.001 264.0 20.25 24.9 13.3 2.110 6.28 <0.001 31.5 9.12 17.4 21.0 0.954 22.05 <0.001 355.5 19.17 22.9 Columns: rowid, estimate, std.error, statistic, p.value, s.value, conf.low, conf.high, mpg, hp, gear Type: response marginaleffects/inst/tinytest/_tinysnapshot/summary-hypotheses.txt0000644000176200001440000000032114541720224025572 0ustar liggesusers Term Estimate Std. Error z Pr(>|z|) S 2.5 % 97.5 % b3 = b4 2.55 1.98 1.29 0.197 2.3 -1.32 6.43 Columns: term, estimate, std.error, statistic, p.value, s.value, conf.low, conf.high marginaleffects/inst/tinytest/_tinysnapshot/print-comparisons_1focal_datagrid.txt0000644000176200001440000000061514541720224030465 0ustar liggesusers Term Contrast am hp Estimate Std. Error z Pr(>|z|) S 2.5 % 97.5 % am 1 - 0 0 120 5.28 1.08 4.89 <0.001 19.9 3.16 7.39 am 1 - 0 1 120 5.28 1.08 4.89 <0.001 19.9 3.16 7.39 Columns: rowid, term, contrast, estimate, std.error, statistic, p.value, s.value, conf.low, conf.high, am, hp, predicted_lo, predicted_hi, predicted, mpg Type: response marginaleffects/inst/tinytest/_tinysnapshot/print-predictions_newdata.txt0000644000176200001440000000045714541720224027076 0ustar liggesusers Estimate Std. Error z Pr(>|z|) S 2.5 % 97.5 % am hp 19.5 0.739 26.4 <0.001 508.8 18.1 21.0 0 120 24.8 0.809 30.7 <0.001 683.5 23.2 26.4 1 120 Columns: rowid, estimate, std.error, statistic, p.value, s.value, conf.low, conf.high, am, hp, mpg Type: response marginaleffects/inst/tinytest/_tinysnapshot/df-t.txt0000644000176200001440000000041414541720224022541 0ustar liggesusers Term Contrast Estimate Std. Error t Pr(>|t|) S 2.5 % 97.5 % Df hp +1 -0.0682 0.0101 -6.74 <0.001 22.4 -0.0889 -0.0476 30 Columns: term, contrast, estimate, std.error, statistic, p.value, s.value, conf.low, conf.high, df Type: response marginaleffects/inst/tinytest/_tinysnapshot/plot_slopes_continuous.svg0000644000176200001440000002143614554070103026525 0ustar liggesusers -0.2 -0.1 0.0 0.1 0.2 2 3 4 5 wt Slope marginaleffects/inst/tinytest/_tinysnapshot/plot_comparisons-2effects.svg0000644000176200001440000004210414541720224026763 0ustar liggesusers hp wt 2 3 4 5 2 3 4 5 -5.0 -4.5 -4.0 -3.5 -3.0 -0.10 -0.05 0.00 0.05 wt Comparison marginaleffects/inst/tinytest/_tinysnapshot/summary-comparisons_transform.txt0000644000176200001440000000030214541720224030026 0ustar liggesusers Term Contrast Estimate Pr(>|z|) S 2.5 % 97.5 % hp +1 2.71 <0.001 Inf 2.69 2.72 Columns: term, contrast, estimate, p.value, s.value, conf.low, conf.high Type: response marginaleffects/inst/tinytest/_tinysnapshot/plot_predictions_vs_categorical_x_axis.svg0000644000176200001440000002332014541720224031671 0ustar liggesusers -20 0 20 40 4 6 8 cyl mpg wt 1.513 2.5425 3.325 3.65 5.424 marginaleffects/inst/tinytest/_tinysnapshot/plot_predictions-alpha.svg0000644000176200001440000005753414541720224026352 0ustar liggesusers 10 20 30 100 200 300 hp mpg wt -SD Mean +SD marginaleffects/inst/tinytest/_tinysnapshot/df-z.txt0000644000176200001440000000040214541720224022544 0ustar liggesusers Term Contrast Estimate Std. Error z Pr(>|z|) S 2.5 % 97.5 % hp +1 -0.0682 0.0101 -6.74 <0.001 35.9 -0.0881 -0.0484 Columns: term, contrast, estimate, std.error, statistic, p.value, s.value, conf.low, conf.high Type: response marginaleffects/inst/tinytest/_tinysnapshot/equivalence-avg_comparisons.txt0000644000176200001440000000077114541720224027406 0ustar liggesusers Term Contrast Estimate Std. Error z Pr(>|z|) S 2.5 % 97.5 % p (NonSup) p (NonInf) p (Equiv) hp +1 -0.112 0.0126 -8.92 < 0.001 60.9 -0.137 -0.0874 <0.001 0.831 0.831 qsec +1 -1.382 0.4331 -3.19 0.00141 9.5 -2.231 -0.5336 <0.001 0.998 0.998 Columns: term, contrast, estimate, std.error, statistic, p.value, s.value, conf.low, conf.high, statistic.noninf, statistic.nonsup, p.value.noninf, p.value.nonsup, p.value.equiv Type: response marginaleffects/inst/tinytest/_tinysnapshot/summary-marginaleffects_conf_level_90.txt0000644000176200001440000000063714541720224031247 0ustar liggesusers Term Contrast Estimate Std. Error z Pr(>|z|) 5.0 % 95.0 % cyl mean(6) - mean(4) -5.968 1.6393 -3.64 <0.001 -8.6640 -3.27128 cyl mean(8) - mean(4) -8.521 2.3261 -3.66 <0.001 -12.3469 -4.69480 hp mean(dY/dX) -0.024 0.0154 -1.56 0.119 -0.0494 0.00131 Columns: term, contrast, estimate, std.error, statistic, p.value, conf.low, conf.high Type: response marginaleffects/inst/tinytest/_tinysnapshot/summary-marginaleffects.txt0000644000176200001440000000063714541720224026543 0ustar liggesusers Term Contrast Estimate Std. Error z Pr(>|z|) 2.5 % 97.5 % cyl mean(6) - mean(4) -5.968 1.6393 -3.64 <0.001 -9.1806 -2.75473 cyl mean(8) - mean(4) -8.521 2.3261 -3.66 <0.001 -13.0799 -3.96183 hp mean(dY/dX) -0.024 0.0154 -1.56 0.119 -0.0542 0.00616 Columns: term, contrast, estimate, std.error, statistic, p.value, conf.low, conf.high Type: response marginaleffects/inst/tinytest/test-pkg-scam.R0000644000176200001440000000074414541720224021054 0ustar liggesuserssource("helpers.R") using("marginaleffects") requiet("scam") # no validity set.seed(4) n <- 200 x1 <- runif(n) * 6 - 3 f1 <- 3 * exp(-x1^2) # unconstrained term x2 <- runif(n) * 4 - 1; f2 <- exp(4 * x2) / (1 + exp(4 * x2)) # monotone increasing smooth y <- f1 + f2 + rnorm(n) * .5 dat <- data.frame(x1 = x1, x2 = x2, y = y) mod <- scam(y ~ s(x1, bs = "cr") + s(x2, bs = "mpi"), data = dat) suppressWarnings(expect_slopes(mod)) expect_predictions(predictions(mod)) rm(list = ls())marginaleffects/inst/tinytest/test-pkg-geepack.R0000644000176200001440000000427014541720224021526 0ustar liggesuserssource("helpers.R") using("marginaleffects") requiet("geepack") requiet("emmeans") requiet("broom") # Stata does not replicate coefficients exactly: # xtset Pig Time # xtgee Weight i.Cu, family(poisson) link(identity) corr(ar 1) # geepack::geeglm: marginaleffects vs. emtrends data(dietox, package = "geepack") dietox$Cu <- as.factor(dietox$Cu) mf <- formula(Weight ~ Cu * (Time + I(Time^2) + I(Time^3))) model <- suppressWarnings(geeglm(mf, data = dietox, id = Pig, family = poisson("identity"), corstr = "ar1")) expect_slopes(model) # emmeans mfx <- slopes(model, variables = "Time", newdata = datagrid(Time = 10, Cu = "Cu000"), type = "link") em <- suppressMessages(emtrends(model, ~Time, var = "Time", at = list(Time = 10, Cu = "Cu000"))) em <- tidy(em) expect_equivalent(mfx$estimate, em$Time.trend, tolerance = .001) expect_equivalent(mfx$std.error, em$std.error, tolerance = .01) # predictions: geepack::geeglm: no validity data(dietox, package = "geepack") dietox$Cu <- as.factor(dietox$Cu) mf <- formula(Weight ~ Cu * (Time + I(Time^2) + I(Time^3))) model <- suppressWarnings(geeglm(mf, data = dietox, id = Pig, family = poisson("identity"), corstr = "ar1")) pred1 <- predictions(model) pred2 <- predictions(model, newdata = head(dietox)) expect_predictions(pred1, n_row = nrow(dietox)) expect_predictions(pred2, n_row = 6) # TODO: why no support for standard errors? # marginalmeans: geepack::geeglm: vs. emmeans data(dietox, package = "geepack") dietox$Cu <- as.factor(dietox$Cu) mf <- formula(Weight ~ Cu + Time + I(Time^2) + I(Time^3)) model <- suppressWarnings(geeglm(mf, data = dietox, id = Pig, family = poisson("identity"), corstr = "ar1")) em <- tidy(emmeans::emmeans(model, ~Cu, df = Inf, at = list(Time = 10)), type = "response") pr <- predictions(model, datagrid(Time = 10, Cu = unique)) expect_equivalent(em$estimate, pr$estimate) expect_equivalent(em$std.error, pr$std.error, tolerance = 1e-5) # TODO: not clear where `emmeans` holds the Time variable # em <- emmeans::emmeans(model, ~Cu, type = "response", df = Inf) # em <- data.frame(em) # expect_equal(mm$estimate, em$emmean) # expect_equal(mm$conf.low, em$asymp.LCL) # expect_equal(mm$conf.high, em$asymp.UCL) rm(list = ls())marginaleffects/inst/tinytest/test-predict_type.R0000644000176200001440000000132014560035476022045 0ustar liggesuserssource("helpers.R") using("marginaleffects") # sanity gives informative error for all the functions dat <- mtcars dat$cyl <- factor(dat$cyl) dat <- dat mod <- lm(mpg ~ hp + cyl, data = dat) expect_error(comparisons(mod, type = "junk"), pattern = "Must be element") expect_error(predictions(mod, type = "junk"), pattern = "Must be element") expect_error(slopes(mod, type = "junk"), pattern = "Must be element") # error: multivariate requiet("pscl") dat2 <- read.csv("https://vincentarelbundock.github.io/Rdatasets/csv/pscl/bioChemists.csv") model <- hurdle(art ~ phd + fem | ment, data = dat2, dist = "negbin") mfx <- slopes(model, type = "prob") expect_true(all(as.character(0:19) %in% mfx$group)) rm(list = ls())marginaleffects/inst/tinytest/test-predictions.R0000644000176200001440000001571414541720224021700 0ustar liggesuserssource("helpers.R") using("marginaleffects") requiet("pscl") tmp <- mtcars tmp$am <- as.logical(tmp$am) mod <- lm(mpg ~ hp + wt + factor(cyl) + am, data = tmp) # Issue #580: binary outcome should not be included in marginalmeans calculation dat <- transform( mtcars, vs = factor(vs), gear = factor(gear), am = factor(am)) mod <- glm(vs ~ gear + am, data = dat, family = binomial) p <- predictions(mod, newdata = "marginalmeans") expect_equal(nrow(p), 6) # bugfix: counterfactual predictions keep rowid mod <- lm(mpg ~ hp + am, mtcars) pred <- predictions(mod, newdata = datagrid(am = 0:1, grid_type = "counterfactual")) expect_predictions(pred, n_row = 64) expect_true("rowidcf" %in% colnames(pred)) # `variables` argument: character vector p <- predictions(mod, variables = list("am" = 0:1)) expect_equivalent(nrow(p), 64) p <- predictions(mod, variables = list("am" = 0:1), newdata = "mean") expect_equivalent(nrow(p), 2) # `variables` argument: character vector expect_error(predictions(mod, variables = list(2)), pattern = "names") p <- predictions(mod, variables = "am") expect_inherits(p, "predictions") # average prediction with delta method are asymptotically equivalent to back transformed set.seed(1024) N <- 1e5 dat <- data.frame( y = rbinom(N, 1, prob = .9), x = rnorm(N)) mod <- glm(y ~ x, family = binomial, data = dat) p1 <- avg_predictions(mod) # average prediction outside [0,1] p2 <- avg_predictions(mod, type = "link", transform = insight::link_inverse(mod)) # average prediction inside [0,1] expect_equivalent(p1$estimate, p2$estimate, tolerance = .001) expect_equivalent(p1$conf.low, p2$conf.low, tolerance = .01) expect_equivalent(p1$conf.high, p2$conf.high, tolerance = .01) ################ # conf.level # ################ # conf.level argument changes width of interval mod <- lm(mpg ~ hp + am, mtcars) for (L in c(.4, .7, .9, .95, .99, .999)) { nd <- datagrid(model = mod) unknown <- predictions(mod, newdata = nd, conf.level = L, df = insight::get_df(mod)) # known values used Wald known <- predict(mod, newdata = nd, se.fit = TRUE, interval = "confidence", level = L)$fit expect_equivalent(unknown$conf.low, known[, "lwr"]) expect_equivalent(unknown$conf.high, known[, "upr"]) } ################################# # average adjusted predictions # ################################# dat <- mtcars dat$w <- 1:32 mod <- lm(mpg ~ hp + am, dat) pre1 <- predictions(mod, by = "am") pre1 <- pre1[order(pre1$am),] pre2 <- predictions(mod) pre2 <- aggregate(estimate ~ am, FUN = mean, data = pre2) expect_equivalent(pre1$estimate, pre2$estimate) ######################################### # weigted average adjusted predictions # ######################################### pre1 <- avg_predictions(mod, wts = mtcars$w) pre2 <- avg_predictions(mod) expect_true(all(pre1$estimate != pre2$estimate)) ###################################### # values against predict benchmark # ###################################### mod <- lm(mpg ~ hp + wt + factor(cyl) + am, data = tmp) nd <- datagrid(model = mod, cyl = c(4, 6, 8)) mm <- predictions(mod, newdata = nd) expect_equivalent(mm$estimate, unname(predict(mod, newdata = nd))) ############################# # size: new data argument # ############################# # `newdata`: mtcars has 32 rows mm <- predictions(mod, newdata = tmp) expect_equivalent(nrow(mm), 32) # `typical`: all factors mm <- predictions(mod, newdata = datagrid(cyl = c(4, 6, 8))) expect_equivalent(nrow(mm), 3) # `typical`: two missing factors mm <- predictions(mod, newdata = datagrid(cyl = 4)) expect_equivalent(nrow(mm), 1) # `typical`: one missing factor mm <- predictions(mod, newdata = datagrid(cyl = c(4, 6))) expect_equivalent(nrow(mm), 2) # `typical`: all logical mm <- predictions(mod, newdata = datagrid(am = c(TRUE, FALSE))) expect_equivalent(nrow(mm), 2) expect_equivalent(length(unique(mm$estimate)), nrow(mm)) # `typical`: missing logical mm <- predictions(mod, newdata = datagrid(am = TRUE)) expect_equivalent(nrow(mm), 1) # exit_file("works interactively") # Issue #496 mod <- lm(mpg ~ factor(vs), data = mtcars) p1 <- predictions(mod, variables = list(vs = 0:1)) p2 <- predictions(mod, variables = list(vs = c("0", "1"))) expect_inherits(p1, "predictions") expect_inherits(p2, "predictions") expect_error(predictions(mod, variables = list(vs = "pairwise")), pattern = "pairwise") dat <- mtcars dat$vs <- factor(dat$vs) mod <- lm(mpg ~ vs, data = dat) p1 <- predictions(mod, variables = list(vs = 0:1)) p2 <- predictions(mod, variables = list(vs = c("0", "1"))) expect_inherits(p1, "predictions") expect_inherits(p2, "predictions") expect_error(predictions(mod, variables = list(vs = "pairwise")), pattern = "pairwise") ######################################################################### # some models do not return data.frame under `insight::get_predicted` # ######################################################################### # Issue 514 dat <- read.csv("https://vincentarelbundock.github.io/Rdatasets/csv/MatchIt/lalonde.csv") fit <- lm(re78 ~ married + race + age, data = dat) p <- predictions(fit, variables = list(age = c(20, 30)), newdata = dat) expect_equivalent(nrow(p), nrow(dat) * 2) p <- predictions(fit, variables = list(age = c(20, 25, 30)), newdata = dat) expect_equivalent(nrow(p), nrow(dat) * 3) p <- predictions(fit, variables = list(age = "minmax"), newdata = dat) expect_equivalent(nrow(p), nrow(dat) * 2) p <- predictions(fit, variables = list(race = c("black", "hispan")), newdata = dat) expect_equivalent(nrow(p), nrow(dat) * 2) p <- predictions(fit, variables = list(race = c("black", "hispan", "white")), newdata = dat) expect_equivalent(nrow(p), nrow(dat) * 3) expect_error(predictions(fit, variables = list(race = "all"), newdata = dat), pattern = "Check") p <- predictions(fit, newdata = datagrid( race = c("black", "hispan", "white"), grid_type = "counterfactual")) expect_equivalent(nrow(p), nrow(dat) * 3) dat <- transform(mtcars, am = as.logical(am)) mod <- lm(mpg ~ am, dat) p <- predictions(mod, variables = list("am" = TRUE), newdata = dat) expect_equivalent(nrow(p), 32) p <- predictions(mod, variables = "am", newdata = dat) expect_equivalent(nrow(p), 64) # hurdle predictions dat <- read.csv("https://vincentarelbundock.github.io/Rdatasets/csv/pscl/bioChemists.csv") mod <- hurdle(art ~ phd + fem | ment, data = dat, dist = "negbin") pred <- predictions(mod, newdata = dat) expect_inherits(pred, "data.frame") expect_true("estimate" %in% colnames(pred)) # Issue #655: Average counterfactual predictions mod <- lm(mpg ~ hp + factor(cyl), data = mtcars) pre <- avg_predictions(mod, variables = "cyl") expect_inherits(pre, "predictions") expect_equivalent(nrow(pre), 3) pre <- avg_predictions(mod, variables = list(cyl = c(4, 6))) expect_inherits(pre, "predictions") expect_equivalent(nrow(pre), 2) pre <- avg_predictions(mod, by = "cyl", newdata = datagrid(cyl = c(4, 6), grid_type = "counterfactual")) expect_inherits(pre, "predictions") expect_equivalent(nrow(pre), 2) rm(list = ls())marginaleffects/inst/tinytest/test-equivalence.R0000644000176200001440000001021514554070103021643 0ustar liggesuserssource("helpers.R") requiet("poorman") requiet("emmeans") requiet("parameters") # exit_file("TODO") mod <- lm(mpg ~ hp + factor(gear), data = mtcars) # predictions() vs. {emmeans}: inf delta <- 1 null <- 20 em <- emmeans(mod, "gear", df = Inf) e1 <- test(em, delta = delta, null = null, side = "noninferiority", df = Inf) e2 <- predictions( mod, newdata = datagrid(gear = unique), equivalence = c(19, 21)) |> poorman::arrange(gear) expect_equivalent(e1$z.ratio, e2$statistic.noninf, tolerance = 1e-6) expect_equivalent(e1$p.value, e2$p.value.noninf) # predictions() vs. {emmeans}: sup e1 <- test(em, delta = 1, null = 23, side = "nonsuperiority", df = Inf) e2 <- predictions( mod, newdata = datagrid(gear = unique), equivalence = c(22, 24)) |> poorman::arrange(gear) expect_equivalent(e1$z.ratio, e2$statistic.nonsup, tol = 1e-6) expect_equivalent(e1$p.value, e2$p.value.nonsup) # predictions() vs. {emmeans}: equiv e1 <- test(em, delta = 1, null = 22, side = "equivalence", df = Inf) e2 <- predictions( mod, newdata = datagrid(gear = unique), equivalence = c(21, 23)) |> poorman::arrange(gear) expect_equivalent(e1$p.value, e2$p.value.equiv) # slopes() works; no validity mfx <- slopes( mod, variables = "hp", newdata = "mean", equivalence = c(-.09, .01)) expect_inherits(mfx, "slopes") # two-sample t-test requiet("equivalence") set.seed(1024) N <- 100 dat <- rbind(data.frame(y = rnorm(N), x = 0), data.frame(y = rnorm(N, mean = 0.3), x = 1)) mod <- lm(y ~ x, data = dat) FUN <- function(model, ...) { data.frame(term = "t-test", estimate = coef(model)[2]) } e1 <- tost(dat$y[dat$x == 0], dat$y[dat$x == 1], epsilon = .05) e2 <- hypotheses( mod, FUN = FUN, equivalence = c(-.05, .05), df = e1$parameter) expect_true(e1$tost.p.value > .5 && e1$tost.p.value < .9) expect_equivalent(e1$tost.p.value, e2$p.value.equiv) # GLM vs emmeans mod <- glm(vs ~ factor(gear), data = mtcars, family = binomial) em <- emmeans(mod, "gear", df = Inf) e1 <- test(em, delta = .5, null = 1, side = "noninferiority", df = Inf) e2 <- predictions( mod, type = "link", newdata = datagrid(gear = unique), equivalence = c(.5, 1.5), numderiv = "richardson") |> poorman::arrange(gear) expect_equivalent(e1$emmean, e2$estimate) expect_equivalent(e1$z.ratio, e2$statistic.noninf) expect_equivalent(e1$p.value, e2$p.value.noninf) # avg_*() and hypotheses() tmp <- lm(mpg ~ hp * qsec, data = mtcars) cmp <- avg_comparisons(tmp) |> hypotheses(equivalence = c(-.2, 0)) mfx <- avg_slopes(tmp) |> hypotheses(equivalence = c(-.2, 0)) pre <- avg_predictions(tmp) |> hypotheses(equivalence = c(-.2, 0)) expect_inherits(cmp, "hypotheses") expect_inherits(mfx, "hypotheses") expect_inherits(pre, "hypotheses") if (!requiet("tinysnapshot")) { exit_file("tinysnapshot") } cmp <- avg_comparisons(tmp, equivalence = c(-.1, 0)) expect_snapshot_print(cmp, "equivalence-avg_comparisons") # bug on with call and symbols mod <- lm(mpg ~ hp * vs, data = mtcars) x <- avg_slopes(mod, by = "vs", variables = "hp", hypothesis = "pairwise") x <- hypotheses(x, equivalence = c(-.2, .2)) expect_inherits(x, "hypotheses") rm("mod") delta <- log(1.25) mod <<- lm(log(conc) ~ source + factor(percent), data = pigs) rg <- ref_grid(mod) em <- emmeans(rg, "source", at = list(), df = Inf) pa <- pairs(em, df = Inf) mm <- predictions( mod, newdata = datagrid(grid_type = "balanced"), by = "source", hypothesis = "pairwise") e1 <- test(pa, delta = delta, adjust = "none", side = "nonsuperiority", df = Inf) e2 <- hypotheses(mm, equivalence = c(-delta, delta)) expect_equivalent(e1$z.ratio, e2$statistic.nonsup, tol = 1e-6) expect_equivalent(e1$p.value, e2$p.value.nonsup, tol = 1e-6) e1 <- test(pa, delta = delta, adjust = "none", side = "noninferiority", df = Inf) e2 <- hypotheses(mm, equivalence = c(-delta, delta)) expect_equivalent(e1$z.ratio, e2$statistic.noninf, tolerance = 1e-6) expect_equivalent(e1$p.value, e2$p.value.noninf) e1 <- test(pa, delta = delta, adjust = "none", df = Inf) e2 <- hypotheses(mm, equivalence = c(-delta, delta)) expect_equivalent(e1$p.value, e2$p.value.equiv) source("helpers.R") rm(list = ls()) marginaleffects/inst/tinytest/test-df.R0000644000176200001440000000253114560035476017750 0ustar liggesuserssource("helpers.R") requiet("emmeans") requiet("marginaleffects") using("marginaleffects") # TODO: rename dat to df to make sure there's no clash with the internal keyword dat <- mtcars dat$cyl <- as.factor(dat$cyl) dat$am <- as.factor(dat$am) mod <- lm(mpg ~ cyl, data = dat) em <- emmeans(mod, ~ cyl) em <- confint(pairs(em), adjust = "none") |> dplyr::arrange(contrast) cmp29 <- comparisons(mod, df = insight::get_df(mod)) cmpInf <- comparisons(mod) expect_true(all(cmp29$p.value > cmpInf$p.value)) expect_true(all(cmp29$conf.low < cmpInf$conf.low)) mfx29 <- slopes(mod, df = insight::get_df(mod)) mfxInf <- slopes(mod) expect_true(all(mfx29$p.value > mfxInf$p.value)) expect_true(all(mfx29$conf.low < mfxInf$conf.low)) # Issue #594 pre29 <- predictions(mod, df = 29) preInf <- predictions(mod) expect_true(all(pre29$p.value > preInf$p.value)) expect_true(all(pre29$conf.low < preInf$conf.low)) # Issue #627: print t instead of z in column names if (!requiet("tinysnapshot")) exit_file("tinysnapshot") using("tinysnapshot") mod <- lm(mpg ~ hp, mtcars) expect_snapshot_print(avg_comparisons(mod), "df-z") expect_snapshot_print(avg_comparisons(mod, df = 30), "df-t") # Issue #754: allow df vector mod <- lm(mpg ~ hp, mtcars) a <- predictions(mod, df = 1:32) b <- predictions(mod, df = 1) expect_equal(sum(a$p.value == b$p.value), 1) rm(list = ls())marginaleffects/inst/tinytest/test-hypotheses_joint.R0000644000176200001440000000535514541720224022753 0ustar liggesuserssource("helpers.R") requiet("car") model <- lm(mpg ~ as.factor(cyl) * hp, data = mtcars) tn <- c("as.factor(cyl)6:hp", "as.factor(cyl)8:hp") # f a <- hypotheses(model, joint = tn, joint_test = "f") b <- car::linearHypothesis(model, tn, test = "F") expect_equal(a$statistic, b[["F"]][2]) expect_equal(a$p.value, b[["Pr(>F)"]][2]) # chi-squared a <- hypotheses(model, joint = tn, joint_test = "chisq") b <- car::linearHypothesis(model, tn, test = "Chisq") expect_equal(a$statistic, b[["Chisq"]][2]) expect_equal(a$p.value, b[["Pr(>Chisq)"]][2]) # numeric indices a <- hypotheses(model, joint = 5:6, joint_test = "f") b <- car::linearHypothesis(model, tn, test = "F") expect_equal(a$statistic, b[["F"]][2]) expect_equal(a$p.value, b[["Pr(>F)"]][2]) a <- hypotheses(model, joint = 2:3, joint_test = "f") b <- car::linearHypothesis(model, c("as.factor(cyl)6", "as.factor(cyl)8"), test = "F") expect_equal(a$statistic, b[["F"]][2]) expect_equal(a$p.value, b[["Pr(>F)"]][2]) # regex indices a = hypotheses(model, joint = "cyl\\)\\d$") expect_equal(a$statistic, 6.11733602323976) a = hypotheses(model, joint = "cyl") expect_equal(a$statistic, 5.70257517421252) # regex: marginaleffects object mod <- glm(am ~ vs + factor(carb), family = binomial, data = mtcars) cmp <- avg_comparisons(mod) a <- hypotheses(cmp, joint = "carb") expect_inherits(a, "hypotheses") # marginaleffects objects mod <- glm(am ~ vs + factor(carb), family = binomial, data = mtcars) cmp <- avg_comparisons(mod) a <- hypotheses(cmp, joint_test = "f", joint = TRUE) b <- hypotheses(cmp, joint_test = "f", joint = 2:3) expect_true(a$p.value < b$p.value) expect_true(a$statistic > b$statistic) # Null hypothesis vector mod <- glm(am ~ vs + factor(carb), family = binomial, data = mtcars) a <- hypotheses(mod, joint = 3:4, hypothesis = 1:2) expect_inherits(a, "hypotheses") expect_error(hypotheses(mod, joint = 3:4, hypothesis = 1:4)) # Single parameter mod <- glm(am ~ vs, family = binomial, data = mtcars) a = hypotheses(mod, joint = TRUE) expect_inherits(a, "hypotheses") # Issue #789: incorrect degrees of freedom lmfit <- lm(mpg~as.factor(cyl)*hp, data=mtcars) H <- matrix(0, nrow = length(coef(lmfit)), ncol = 2) H[5, 1] <- H[6, 2] <- 1 hyp <- hypotheses(lmfit, hypothesis = H) h1 <- hypotheses(hyp, joint = TRUE) h2 <- hypotheses(lmfit, joint = 5:6, joint_test = "f") expect_equivalent(h1$df1, h2$df1) expect_equivalent(h1$df2, h2$df2) # Issue #981 model <- lm(mpg ~ as.factor(cyl), data = mtcars) cmp <- avg_comparisons(model) h1 <- hypotheses(cmp, joint = ".*") h2 <- hypotheses(cmp, joint = "cyl") h3 <- hypotheses(cmp, joint = TRUE) expect_equivalent(h1$estimate, h2$estimate) expect_equivalent(h1$estimate, h3$estimate) expect_equivalent(h1$std.error, h2$std.error) expect_equivalent(h1$std.error, h3$std.error) marginaleffects/inst/tinytest/test-factor.R0000644000176200001440000000437614541720224020635 0ustar liggesuserssource("helpers.R") using("marginaleffects") # # # factor before fitting or in formula is the same tmp <- mtcars tmp$cyl <- factor(tmp$cyl) mod1 <- lm(mpg ~ hp + factor(cyl), mtcars) mod2 <- lm(mpg ~ hp + cyl, tmp) mfx1 <- suppressWarnings(slopes(mod1)) mfx2 <- slopes(mod2) expect_equivalent(mfx1$estimate, mfx2$estimate) expect_equivalent(mfx1$std.error, mfx2$std.error) # factor on LHS and RHS at the same time. data(housing, package = "MASS") mod <- MASS::polr(Infl ~ Sat + Freq, data = housing, Hess = TRUE) mfx <- suppressMessages(slopes(mod, type = "probs")) expect_inherits(mfx, "marginaleffects") expect_true(all(c("Low", "Medium", "High") %in% mfx$group)) # smart detect factor() in formula requiet("estimatr") model <- lm_robust(carb ~ wt + factor(cyl), se_type = "stata", data = mtcars) k <- slopes(model) expect_true(all(c("dY/dX", "8 - 4") %in% k$contrast)) # factor in formula with incomplete newdata mod <- lm(mpg ~ factor(cyl), data = mtcars) mfx1 <- slopes(mod, newdata = data.frame(cyl = 4)) mfx2 <- slopes(mod, newdata = datagrid(cyl = 4)) expect_equivalent(mfx1[, 1:5], mfx2[, 1:5]) # bugs stay dead: get_data.coxph() with strata() # skip_if_not_installed("insight", minimum_version = "0.17.0") requiet("survival") test1 <- data.frame( time = c(4, 3, 1, 1, 2, 2, 3), status = c(1, 1, 1, 0, 1, 1, 0), x = c(0, 2, 1, 1, 1, 0, 0), sex = c(0, 0, 0, 0, 1, 1, 1)) mod <- coxph(Surv(time, status) ~ x + strata(sex), data = test1, ties = "breslow") nd <- datagrid(sex = 0, newdata = test1) mfx <- slopes(mod, variables = "x", newdata = nd, type = "lp") expect_inherits(mfx, "marginaleffects") # Issue #497 dat <- mtcars dat$cyl <- factor(dat$cyl) dat$cyl <- as.factor(dat$cyl) mod <- lm(mpg ~ cyl, dat) cmp1 <- comparisons(mod, variables = list(cyl = c(6, 4))) cmp2 <- comparisons(mod, variables = list(cyl = c("4", "6"))) cmp3 <- comparisons(mod, variables = list(cyl = dat$cyl[2:3])) expect_inherits(cmp1, "comparisons") expect_inherits(cmp2, "comparisons") expect_inherits(cmp3, "comparisons") # Issue #658 dat <- transform(mtcars, cyl = factor(cyl)) mod <- lm(mpg ~ cyl, mtcars) cmp <- comparisons( mod, variables = list(cyl = "minmax"), transform = function(x) x / 3) expect_inherits(cmp, "comparisons") rm(list = ls())marginaleffects/inst/tinytest/test-pkg-DCchoice.R0000644000176200001440000000115314541720224021565 0ustar liggesusers# No validity test whatsoever source("helpers.R") using("marginaleffects") requiet("DCchoice") data(oohbsyn) mod <- oohbchoice(R1 + R2 ~ age + gender | log(BL) + log(BH), data = oohbsyn) # weird result seems to make sense manually p1 <- transform(oohbsyn, BH = 5) p2 <- transform(oohbsyn, BH = 7) p1 <- predict(mod, newdata = p1) p2 <- predict(mod, newdata = p2) bh_comparison <- p2 - p1 expect_true(all(bh_comparison == 0)) slo <- avg_slopes(mod) pre <- predictions(mod, by = "gender") cmp <- comparisons(mod) expect_inherits(cmp, "comparisons") expect_inherits(pre, "predictions") expect_inherits(slo, "slopes") marginaleffects/inst/tinytest/test-comparisons.R0000644000176200001440000000251614541720224021706 0ustar liggesuserssource("helpers.R") using("marginaleffects") # examples from the main documentation mod <- lm(mpg ~ hp, data = mtcars) cmp <- comparisons(mod, variables = list(hp = c(90, 110))) expect_inherits(cmp, "comparisons") # Issue #527 dat <- mtcars dat$new_hp <- 49 * (dat$hp - min(dat$hp)) / (max(dat$hp) - min(dat$hp)) + 1 dat <- dat mod <- lm(mpg ~ log(new_hp) + factor(cyl), data = dat) fdiff <- function(x) data.frame(x, x + 10) cmp1 <- comparisons(mod, variables = list(new_hp = fdiff)) cmp2 <- comparisons(mod, variables = list(new_hp = 10)) expect_equivalent(nrow(cmp1), 32) expect_equivalent(nrow(cmp2), 32) # Issue #720 mod <- lm(mpg ~ hp * qsec, dat = mtcars) cmp <- avg_comparisons(mod, variables = list(hp = "2sd")) expect_equivalent(cmp$contrast, "(x + sd) - (x - sd)") # Issue #622 cross-contrasts mod <- lm(mpg ~ am * factor(cyl), data = mtcars) cmp <- comparisons(mod, variables = c("cyl", "am"), cross = TRUE) expect_equivalent(nrow(cmp), 64) cmp <- avg_comparisons(mod, variables = c("cyl", "am"), cross = TRUE) expect_equivalent(nrow(cmp), 2) # Issue #794 mod <- glm(am ~ hp, data = mtcars, family = binomial()) cmp1 <- comparisons(mod, comparison = "lift") cmp2 <- comparisons(mod, comparison = "liftavg") expect_equal(nrow(cmp1), 32) expect_equal(nrow(cmp2), 1) expect_error(comparisons(mod, comparison = "liftr")) rm(list = ls())marginaleffects/inst/tinytest/test-newdata.R0000644000176200001440000000637114541720224020777 0ustar liggesuserssource("helpers.R") using("marginaleffects") requiet("modelbased") requiet("emmeans") # # this seems deprecated in modelbased in favor of get_datagrid(). Have not investidated yet # # visualisation_matrix() without `x` variable # mod <- lm(mpg ~ hp + factor(cyl), mtcars) # p1 <- predictions(mod, newdata = datagrid(cyl = mtcars$cyl)) # p2 <- predictions(mod, newdata = visualisation_matrix(at = "cyl")) # expect_equivalent(nrow(p1), nrow(p2)) # expect_true(all(c("newdata_adjusted_for", "newdata_at_specs") %in% names(attributes(p2)))) # m1 <- slopes(mod, newdata = datagrid(cyl = mtcars$cyl)) # m2 <- slopes(mod, newdata = visualisation_matrix(at = "cyl")) # expect_equivalent(nrow(m1), nrow(m2)) # expect_true(all(c("newdata_adjusted_for", "newdata_at_specs") %in% names(attributes(m2)))) # shortcut labels dat <- mtcars mod <- glm(vs ~ hp + factor(cyl), family = binomial, data = dat) cmp1 <- comparisons(mod, newdata = "mean") cmp2 <- comparisons(mod, newdata = "median") expect_true(all(cmp1$hp == mean(dat$hp))) expect_true(all(cmp2$hp == stats::median(dat$hp))) expect_true(all(cmp2$estimate != cmp1$estimate)) # newdata = 'marginalmeans' dat <- mtcars dat$gear <- factor(dat$gear) dat$cyl <- factor(dat$cyl) dat$am <- factor(dat$am) mod <- lm(mpg ~ gear + cyl + am, data = dat) cmp <- comparisons(mod, newdata = "marginalmeans", variables = "gear") cmp <- tidy(cmp) emm <- emmeans(mod, specs = "gear") emm <- data.frame(emmeans::contrast(emm, method = "trt.vs.ctrl1")) expect_equivalent(cmp$estimate, emm$estimate) expect_equivalent(cmp$std.error, emm$SE, tolerance = 1e-6) # Issue #624: reserved "group" word in `by` and `newdata` but not in model. dat <- transform(mtcars, group = cyl) mod <- lm(mpg ~ hp, data = dat) expect_error(slopes(mod, newdata = dat, by = "group"), pattern = "forbidden") expect_inherits(slopes(mod, newdata = dat, by = "cyl"), "slopes") # the results are numerically correct, but it's a pain to get the exact same # rows as emmeans # # cross contrast: newdata = 'marginalmeans' # dat <- read.csv("https://vincentarelbundock.github.io/Rdatasets/csv/palmerpenguins/penguins.csv") # mod <- lm(bill_length_mm ~ species * sex + island + body_mass_g, data = dat) # cmp <- comparisons( # mod, # cross = TRUE, # newdata = "marginalmeans", # variables = list(species = "pairwise", island = "pairwise")) # emm <- emmeans(mod, specs = c("species", "island")) # emm <- data.frame(emmeans::contrast(emm, method = "trt.vs.ctrl1")) # # hack: not sure if they are well aligned # expect_equivalent(sort(cmp$estimate), sort(emm$estimate)) # expect_equivalent(sort(cmp$std.error), sort(emm$SE)) # Issue #814 data(lalonde, package = "MatchIt") if(exists("mdata")) rm(mdata) test <- function() { mdata <- lalonde m0 <- lm(re78 ~ nodegree, data = mdata) comparisons(m0, variables = "nodegree", newdata = subset(mdata, nodegree == 1)) } cmp1 <- test() mdata <- subset(lalonde, married == 1) m0 <- lm(re78 ~ nodegree, data = mdata) cmp2 <- comparisons(m0, variables = "nodegree", newdata = subset(mdata, nodegree == 1)) cmp3 <- test() expect_equal(nrow(cmp1), nrow(subset(lalonde, nodegree == 1))) expect_equal(nrow(cmp2), nrow(subset(lalonde, nodegree == 1 & married == 1))) expect_equal(nrow(cmp3), nrow(subset(lalonde, nodegree == 1))) rm(list = ls())marginaleffects/inst/tinytest/test-pkg-mvgam.R0000644000176200001440000000412214554070103021230 0ustar liggesuserssource("helpers.R") using("marginaleffects") if (!EXPENSIVE) exit_file("EXPENSIVE") if (ON_WINDOWS) exit_file("on windows") if (!minver("base", "4.1.0")) exit_file("R 4.1.0") if (!requiet("mvgam")) exit_file("mvgam not installed") # load a pre-compiled model mod1 <- mvgam:::mvgam_example1 # slopes() and tidy() mfx <- slopes(mod1) ti <- tidy(mfx) expect_inherits(ti, "data.frame") expect_true(nrow(ti) == 1) expect_true(ncol(ti) >= 5) expect_true(all(c("term", "estimate", "conf.low") %in% colnames(ti))) # get_predict() with original data preds <- get_predict(mod1) expect_equal(NROW(preds), NROW(mod1$obs_data)) w <- apply(posterior_linpred(mod1, process_error = FALSE), 2, stats::median) x <- get_predict(mod1, type = "link", process_error = FALSE) expect_equivalent(w, x$estimate) # get_predict() with newdata newdat <- mod1$obs_data newdat$season <- rep(1, nrow(newdat)) w <- apply(posterior_linpred(mod1, newdata = newdat, process_error = FALSE), 2, stats::median) x <- get_predict(mod1, type = "link", newdata = newdat, process_error = FALSE) expect_equivalent(w, x$estimate) expect_equal(NROW(x), NROW(newdat)) # expectations vs response predictions() p1 <- suppressWarnings(predictions(mod1, type = "expected")) p2 <- suppressWarnings(predictions(mod1, type = "response")) expected_uncertainty <- p1$conf.high - p1$conf.low response_uncertainty <- p2$conf.high - p2$conf.low expect_true(all(expected_uncertainty < response_uncertainty)) # avg_predictions() ems <- avg_predictions(mod1) expect_equal(NROW(ems), 1) expect_true(all(c("estimate", "conf.low", "conf.high") %in% colnames(ems))) ems <- avg_predictions(mod1, variables = list(season = c(1, 6, 12))) expect_equal(NROW(ems), 3) expect_true(all(c("season", "estimate", "conf.low", "conf.high") %in% colnames(ems))) # latent_N should be an allowed type, but shouldn't work for this model expect_error(predictions(mod1, type = 'latent_N'), '"latent_N" type only available for N-mixture models', fixed = TRUE) marginaleffects/inst/tinytest/test-pkg-robustlmm.R0000644000176200001440000000101414541720224022144 0ustar liggesuserssource("helpers.R") exit_file("Conflict with logistf") if (ON_CI || ON_WINDOWS || ON_OSX) exit_file("local linux only") using("marginaleffects") requiet("robustlmm") requiet("emmeans") requiet("broom") requiet("lme4") # no validity mod <- robustlmm::rlmer(Reaction ~ Days + (Days | Subject), sleepstudy, rho.sigma.e = psi2propII(smoothPsi, k = 2.28), rho.sigma.b = chgDefaults(smoothPsi, k = 5.11, s = 10)) expect_predictions(predictions(mod)) expect_slopes(mod, n_unique = 1) source("helpers.R") rm(list = ls())marginaleffects/inst/tinytest/test-sanity.R0000644000176200001440000000027714541720224020662 0ustar liggesuserssource("helpers.R") using("marginaleffects") # error: supported model classes model <- mtcars class(model) <- "junk" expect_error(slopes(model), pattern = "not supported") rm(list = ls())marginaleffects/inst/tinytest/test-logical.R0000644000176200001440000000044614541720224020763 0ustar liggesuserssource("helpers.R") using("marginaleffects") # marginaleffects: logical dat <- mtcars dat$am <- as.logical(dat$am) mod <- glm(vs ~ am + mpg, data = dat, family = binomial) mfx <- slopes(mod) expect_inherits(mfx, "marginaleffects") expect_equivalent(nrow(mfx), nrow(dat) * 2) rm(list = ls())marginaleffects/inst/tinytest/test-print.R0000644000176200001440000000253414543166074020516 0ustar liggesuserssource("helpers.R") using("marginaleffects") if (!requiet("tinysnapshot")) exit_file("tinysnapshot") using("tinysnapshot") mod <- lm(mpg ~ hp * factor(gear), mtcars) expect_snapshot_print(predictions(mod), "print-predictions") expect_snapshot_print(predictions(mod, by = "gear"), "print-predictions_by") ## guides()-related error in diffObj. Does not seem marginaleffects-related # expect_snapshot_print(comparisons(mod), "print-comparisons") expect_snapshot_print(comparisons(mod, by = "gear"), "print-comparisons_by") # Issue #638: keep datagrid() explicit variables in print dat <- read.csv("https://vincentarelbundock.github.io/Rdatasets/csv/Stat2Data/Titanic.csv") m <- glm(Survived ~ Age * PClass * SexCode, data = dat, family = binomial) p <- predictions(m, newdata = datagrid(PClass = unique, SexCode = 0:1)) expect_snapshot_print(p, "print-predictions_datagrid") # twitter Kurz request mod <- lm(mpg ~ hp + am, data = mtcars) expect_snapshot_print( comparisons(mod, variables = "am", newdata = data.frame(am = 0:1, hp = 120)), "print-comparisons_1focal_dataframe") expect_snapshot_print( comparisons(mod, variables = "am", newdata = datagrid(am = 0:1, hp = 120)), "print-comparisons_1focal_datagrid") expect_snapshot_print( predictions(mod, newdata = data.frame(am = 0:1, hp = 120)), "print-predictions_newdata") rm(list = ls())marginaleffects/inst/tinytest/test-pkg-mice.R0000644000176200001440000000374014560035476021056 0ustar liggesuserssource("helpers.R") requiet("mice") dat <- iris dat$Sepal.Length[sample(seq_len(nrow(iris)), 40)] <- NA dat$Sepal.Width[sample(seq_len(nrow(iris)), 40)] <- NA dat$Species[sample(seq_len(nrow(iris)), 40)] <- NA dat_mice <- mice::mice(dat, m = 20, printFlag = FALSE, .Random.seed = 1024) mir <- with(dat_mice, lm(Petal.Width ~ Sepal.Length * Sepal.Width + Species)) mod <- lm(Petal.Width ~ Sepal.Length * Sepal.Width + Species, data = dat) mfx1 <- suppressWarnings(avg_slopes(mir, by = "Species")) mfx2 <- avg_slopes(mod, by = "Species") expect_inherits(mfx1, "slopes") expect_equivalent(nrow(mfx1), nrow(mfx2)) # Issue #711 data <- structure(list(id = 1:37, trt = c("soc", "soc", "soc", "soc", "soc", "soc", "soc", "soc", "soc", "soc", "soc", "soc", "soc", "soc", "soc", "soc", "soc", "soc", "soc", "soc", "soc", "arm", "arm", "arm", "arm", "arm", "arm", "arm", "arm", "arm", "arm", "arm", "arm", "arm", "arm", "arm", "arm"), endp = structure(c(1L, 1L, 2L, 1L, 2L, 1L, 2L, 1L, 2L, 1L, 2L, 1L, 2L, 1L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 2L, 2L, 2L, 2L, 2L, 1L, 1L, 1L, 1L), levels = c("TRUE", "FALSE"), class = "factor")), row.names = c(NA, -37L), class = "data.frame") data$endp <- factor(data$endp, levels = c("TRUE", "FALSE")) data_miss <- data data_miss[c(1, 5, 7, 30), c("endp")] <- NA imp <- suppressWarnings(mice::mice(data_miss, m = 20, method = "pmm", maxit = 50, seed = 1000, print = FALSE)) dat_mice <- complete(imp, "all") fit_logistic <- function(dat) { mod <- glm(endp ~ trt, family = binomial(link = "logit"), data = dat) out <- avg_slopes(mod, newdata = dat) return(out) } mod_imputation <- suppressWarnings(lapply(dat_mice, fit_logistic)) manu <- suppressWarnings(summary(pool(mod_imputation), conf.int = TRUE)) fit <- with(imp, glm(endp ~ trt, family = binomial(link = "logit"))) auto <- suppressWarnings(avg_slopes(fit)) expect_equivalent(auto$estimate, manu$estimate) expect_equivalent(auto$std.error, manu$std.error, tolerance = 1e-6) source("helpers.R")marginaleffects/inst/tinytest/test-pkg-lme4.R0000644000176200001440000002522714554070103020773 0ustar liggesuserssource("helpers.R") if (!EXPENSIVE) exit_file("EXPENSIVE") using("marginaleffects") requiet("margins") requiet("haven") requiet("lme4") requiet("insight") requiet("emmeans") requiet("broom") # satterthwaite (no validity) #skip_if_not_installed("insight", minimum_version = "0.17.1") dat <- mtcars dat$cyl <- factor(dat$cyl) dat <- dat mod <-lme4::lmer(mpg ~ hp + (1 | cyl), data = dat) x <- predictions(mod) y <- predictions(mod, vcov = "satterthwaite") z <- predictions(mod, vcov = "kenward-roger") expect_true(all(x$conf.low != y$conf.low)) expect_true(all(x$conf.low != z$conf.low)) expect_true(all(y$conf.low != z$conf.low)) expect_true(all(x$p.value != y$p.value)) expect_true(all(x$p.value != z$p.value)) expect_true(all(y$p.value != z$p.value)) # kenward-roger adjusts vcov but not satterthwaite expect_equivalent(x$std.error, y$std.error) expect_true(all(x$std.error != z$std.error)) expect_true(all(y$std.error != z$std.error)) x <- plot_predictions(mod, condition = "hp", draw = FALSE) y <- plot_predictions(mod, condition = "hp", vcov = "satterthwaite", draw = FALSE) z <- plot_predictions(mod, condition = "hp", vcov = "kenward-roger", draw = FALSE) expect_true(all(x$conf.low != y$conf.low)) expect_true(all(x$conf.low != z$conf.low)) expect_true(all(y$conf.low != z$conf.low)) expect_equivalent(x$std.error, y$std.error) expect_true(all(x$std.error != z$std.error)) expect_true(all(y$std.error != z$std.error)) # comparisons x <- comparisons(mod) y <- comparisons(mod, vcov = "satterthwaite") z <- comparisons(mod, vcov = "kenward-roger") expect_true(all(x$conf.low != y$conf.low)) expect_true(all(x$conf.low != z$conf.low)) expect_true(all(y$conf.low != z$conf.low)) expect_true(all(x$std.error == y$std.error)) expect_true(all(x$std.error != z$std.error)) expect_true(all(y$std.error != z$std.error)) # at the mean (regression test) mfx <- slopes( mod, newdata = datagrid(), vcov = "satterthwaite") expect_inherits(mfx, "marginaleffects") # GLM not supported mod <- glmer(am ~ hp + (1 | cyl), family = binomial, data = dat) expect_error(comparisons(mod, vcov = "satterthwaite"), pattern = "Satter") expect_error(comparisons(mod, vcov = "kenward-roger"), pattern = "Satter") expect_error(predictions(mod, vcov = "satterthwaite"), pattern = "Satter") expect_error(predictions(mod, vcov = "kenward-roger"), pattern = "Satter") # type = "link" w <- predict(mod, type = "link") x <- get_predict(mod, type = "link") y <- get_predict(mod, type = "link", conf.level = .9) z <- get_predicted(mod, predict = "link") expect_equivalent(w, x$estimate) expect_equivalent(w, y$estimate) expect_equivalent(w, as.numeric(z)) # type = "response" w <- predict(mod, type = "response") x <- get_predict(mod, type = "response") y <- get_predict(mod, type = "response", conf.level = .9) z <- get_predicted(mod, predict = "expectation") expect_equivalent(w, x$estimate) expect_equivalent(w, y$estimate) expect_equivalent(w, as.numeric(z)) # confidence intervals (weak test) w <- get_predict(mod, conf.level = .95) x <- get_predict(mod, conf.level = .90) expect_true(all(w$conf.low < x$conf.low)) expect_true(all(w$conf.high > x$conf.high)) # no random effects: grand mean w <- predict(mod, re.form = NA, type = "response") x <- get_predict(mod, re.form = NA, type = "response") expect_equivalent(w, x$estimate) # glmer vs. stata vs. emtrends tmp <- read.csv(testing_path("stata/databases/lme4_02.csv")) mod <- glmer(y ~ x1 * x2 + (1 | clus), data = tmp, family = binomial) stata <- readRDS(testing_path("stata/stata.rds"))$lme4_glmer mfx <- merge(avg_slopes(mod), stata) expect_slopes(mod) expect_equivalent(mfx$estimate, mfx$dydxstata, tolerance = .01) expect_equivalent(mfx$std.error, mfx$std.errorstata, tolerance = .01) # emtrends mod <- glmer(y ~ x1 + x2 + (1 | clus), data = tmp, family = binomial) mfx <- slopes(mod, variables = "x1", newdata = datagrid(x1 = 0, x2 = 0, clus = 1), type = "link") em <- emtrends(mod, ~x1, "x1", at = list(x1 = 0, x2 = 0, clus = 1)) em <- tidy(em) expect_equivalent(mfx$estimate, em$x1.trend) expect_equivalent(mfx$std.error, em$std.error, tolerance = 1e-5) # grand mean with new data nd <- datagrid(model = mod, clus = NA, x1 = -1:1) w <- predict(mod, newdata = nd, re.form = NA, type = "response") x <- get_predict(mod, newdata = nd, re.form = NA) y <- predictions(mod, newdata = nd, re.form = NA, type = "response") expect_equivalent(w, x$estimate) expect_equivalent(w, y$estimate) #lme4::lmer vs. stata tmp <- read.csv(testing_path("stata/databases/lme4_01.csv")) mod <- lme4::lmer(y ~ x1 * x2 + (1 | clus), data = tmp) stata <- readRDS(testing_path("stata/stata.rds"))$lme4_lmer mfx <- merge(avg_slopes(mod), stata) expect_slopes(mod) expect_equivalent(mfx$estimate, mfx$dydxstata, tolerance = .001) expect_equivalent(mfx$std.error, mfx$std.errorstata, tolerance = .001) # emtrends mod <-lme4::lmer(y ~ x1 + x2 + (1 | clus), data = tmp) mfx <- slopes(mod, variables = "x1", newdata = datagrid(x1 = 0, x2 = 0, clus = 1)) em <- emtrends(mod, ~x1, "x1", at = list(x1 = 0, x2 = 0, clus = 1)) em <- tidy(em) expect_equivalent(mfx$estimate, em$x1.trend) expect_equivalent(mfx$std.error, em$std.error, tolerance = .001) # vs. margins (dydx only) tmp <- read.csv(testing_path("stata/databases/lme4_02.csv")) mod <- lme4::glmer(y ~ x1 * x2 + (1 | clus), data = tmp, family = binomial) res <- slopes(mod, vcov = FALSE) mar <- margins::margins(mod) expect_true(expect_margins(res, mar, tolerance = 1e-2)) tmp <- read.csv(testing_path("stata/databases/lme4_01.csv")) mod <- lme4::lmer(y ~ x1 * x2 + (1 | clus), data = tmp) res <- slopes(mod, vcov = FALSE) mar <- margins::margins(mod) expect_true(expect_margins(res, mar)) # sanity check on dpoMatrix tmp <- read.csv(testing_path("stata/databases/lme4_02.csv")) mod <- lme4::glmer(y ~ x1 * x2 + (1 | clus), data = tmp, family = binomial) k <- slopes(mod, vcov = as.matrix(stats::vcov(mod))) expect_inherits(k, "data.frame") # bug stay dead: tidy without std.error tmp <- read.csv(testing_path("stata/databases/lme4_02.csv")) mod <- lme4::glmer(y ~ x1 * x2 + (1 | clus), data = tmp, family = binomial) tid <- avg_slopes(mod, vcov = FALSE) expect_inherits(tid, "data.frame") expect_equivalent(nrow(tid), 2) # predictions: glmer: no validity tmp <- read.csv(testing_path("stata/databases/lme4_02.csv")) tmp$clus <- as.factor(tmp$clus) tmp <- tmp model <- lme4::glmer(y ~ x1 * x2 + (1 | clus), data = tmp, family = binomial) pred1 <- predictions(model, newdata = datagrid()) pred2 <- predictions(model, newdata = head(tmp)) expect_predictions(pred1, n_row = 1) expect_predictions(pred2, n_row = 6) # glmer.nb: marginaleffects vs. emtrends set.seed(101) dd <- expand.grid( f1 = factor(1:3), f2 = LETTERS[1:2], g = 1:9, rep = 1:15, KEEP.OUT.ATTRS = FALSE) dd$x <- rnorm(nrow(dd)) mu <- 5 * (-4 + with(dd, as.integer(f1) + 4 * as.numeric(f2))) dd$y <- rnbinom(nrow(dd), mu = mu, size = 0.5) dd <- dd model <- suppressMessages(glmer.nb(y ~ f1 * f2 + (1 | g), data = dd, verbose = FALSE)) void <- capture.output( expect_slopes(model, n_unique = 2) ) # emtrends mod <- suppressMessages(glmer.nb(y ~ x + (1 | g), data = dd, verbose = FALSE)) mfx <- slopes(mod, variables = "x", newdata = datagrid(g = 2), type = "link") em <- emtrends(mod, ~x, "x", at = list(g = 2)) em <- tidy(em) expect_equivalent(mfx$estimate, em$x.trend) expect_equivalent(mfx$std.error, em$std.error, tolerance = 1e-3) # margins mar <- tidy(margins(mod)) mfx <- avg_slopes(mod) expect_equivalent(mfx$estimate, mar$estimate, tolerance = .0001) expect_equivalent(mfx$std.error, mar$std.error, tolerance = .0001) # population-level # Some contrasts are identical with include_random TRUE/FALSE because on Time has a random effect mod <- suppressMessages(lmer( weight ~ 1 + Time + I(Time^2) + Diet + Time:Diet + I(Time^2):Diet + (1 + Time + I(Time^2) | Chick), data = ChickWeight)) mfx2 <- slopes( mod, newdata = datagrid( Chick = NA, Diet = 1:4, Time = 0:21), re.form = NA) mfx3 <- slopes( mod, newdata = datagrid( Chick = "1", Diet = 1:4, Time = 0:21)) expect_inherits(mfx2, "marginaleffects") expect_inherits(mfx3, "marginaleffects") mfx2$estimate != mfx3$estimate pred2 <- predictions( mod, newdata = datagrid( Chick = NA, Diet = 1:4, Time = 0:21), re.form = NA) pred3 <- predictions( mod, newdata = datagrid( Chick = "1", Diet = 1:4, Time = 0:21)) expect_inherits(pred2, "predictions") expect_inherits(pred3, "predictions") expect_true(all(pred2$estimate != pred3$estimate)) # sattertwhaite tmp <- mtcars tmp$cyl <- factor(tmp$cyl) tmp$am <- as.logical(tmp$am) tmp <- tmp mod <-lme4::lmer(mpg ~ hp + am + (1 | cyl), data = tmp) mfx <- slopes(mod, vcov = "kenward-roger") cmp <- comparisons(mod, vcov = "kenward-roger") cmp2 <- comparisons(mod) mfx2 <- slopes(mod) expect_equivalent(mfx$estimate, cmp$estimate) expect_equivalent(mfx$std.error, cmp$std.error, tolerance = .0001) expect_equivalent(attr(mfx, "vcov.type"), "Kenward-Roger") expect_equivalent(attr(cmp, "vcov.type"), "Kenward-Roger") # Issue #436 # e = number of events # n = total dat <- data.frame( e = c( 1, 1, 134413, 92622, 110747, 3625, 35, 64695, 19428, 221, 913, 13, 5710, 121, 1339, 1851, 637, 20, 7, 10, 2508), n = c( 165, 143, 10458616, 5338995, 6018504, 190810, 1607, 2504824, 471821, 5158, 15027, 205, 86371, 1785, 10661, 14406, 4048, 102, 916, 1079, 242715), year = round(runif(21, min = 1, max = 24)), sid = as.factor(1:21)) mod <- glmer( cbind(e, n - e) ~ 1 + year + (1 | sid), data = dat, family = binomial()) p <- predictions( mod, newdata = datagrid( newdata = dat, e = 1, n = 160, year = 1:5, sid = NA), re.form = NA) expect_predictions(p) cmp <- comparisons(mod, variables = "year", newdata = datagrid( newdata = dat, e = 1, n = 160, year = 1:5, sid = NA), re.form = NA) expect_inherits(cmp, "comparisons") # Issue #651: satterthwaite not supported for avg_*() because lmerTest needs a # `data` argument and model matrix, but here we compute the average over several # units of observations. d <- sleepstudy d$Cat <- sample(c("A", "B"), replace = TRUE, size = nrow(d)) fit <- lmer(Reaction ~ Days + Cat + (1 | Subject), d) expect_error( avg_comparisons(fit, vcov = "satterthwaite"), pattern = "not supported") expect_error( avg_predictions(fit, vcov = "satterthwaite"), pattern = "not supported") cmp1 <- comparisons(fit, newdata = datagrid(Cat = unique), vcov = "satterthwaite") cmp2 <- comparisons(fit, newdata = datagrid(Cat = unique)) expect_true(all(cmp1$conf.low != cmp2$conf.low)) expect_true(all(cmp1$std.error == cmp2$std.error)) rm(list = ls())marginaleffects/inst/tinytest/test-pkg-mgcv.R0000644000176200001440000001321414541720224021061 0ustar liggesuserssource("helpers.R") # if (!EXPENSIVE) exit_file("EXPENSIVE") using("marginaleffects") requiet("mgcv") requiet("emmeans") requiet("tibble") requiet("tsModel") # marginaleffects vs. emtrends set.seed(2) void <- capture.output(dat <- gamSim(1, n = 400, dist = "normal", scale = 2)) void <- capture.output(dat2 <- gamSim(1, n = 2000, dist = "poisson", scale = .1)) dat <- dat dat2 <- dat2 m1 <- mgcv::gam(y ~ s(x0) + s(x1) + s(x2) + s(x3), data = dat) m2 <- mgcv::gam(y ~ te(x0, x1, k = 7) + s(x2) + s(x3), data = dat, method = "REML") m3 <- mgcv::gam(y ~ s(x0) + s(x1) + s(x2) + s(x3) + ti(x1, x2, k = 6), data = dat, method = "REML") m4 <- mgcv::gam(y ~ s(x0, x1, k = 40) + s(x2) + s(x3), data = dat, method = "REML") m5 <- mgcv::gam(y ~ s(x0) + s(x1) + s(x2) + s(x3), data = dat, method = "REML", select = TRUE) m6 <- mgcv::gam(y ~ s(x0) + s(x1) + s(x2) + s(x3), sp = c(0.01, -1, -1, -1), data = dat) m7 <- mgcv::gam(y ~ s(x0, sp = .01) + s(x1) + s(x2) + s(x3), data = dat) m8 <- mgcv::gam(y ~ s(x0) + s(x1) + s(x2) + s(x3), min.sp = c(0.001, 0.01, 0, 10), data = dat) m9 <- mgcv::gam(y ~ s(x0, bs = "cr") + s(x1, bs = "cr") + s(x2, bs = "cr") + s(x3, bs = "cr"), family = poisson, data = dat2, method = "REML") expect_slopes(m1) expect_slopes(m2) expect_slopes(m3) expect_slopes(m4) expect_slopes(m5) expect_slopes(m6) expect_slopes(m7) expect_slopes(m8) expect_slopes(m9) # emtrends mfx <- slopes(m1, variables = "x1", newdata = datagrid( x1 = 0, x2 = 0, x3 = 0), type = "link") # TODO: emmeans no longer seems to work # em <- emtrends(m1, specs = ~x1, var = "x1", at = list(x1 = 0, x2 = 0, x3 = 0)) # em <- tidy(em) # expect_equivalent(mfx$estimate, em$x1.trend) # expect_equivalent(mfx$std.error, em$std.error, tolerance = .0001) # predictions: no validity set.seed(2) void <- capture.output(dat <- gamSim(1, n = 400, dist = "normal", scale = 2)) dat <- dat mod <- mgcv::gam(y ~ s(x0) + s(x1) + s(x2) + s(x3), data = dat) pred1 <- predictions(mod) pred2 <- predictions(mod, newdata = head(dat)) expect_predictions(pred1, n_row = nrow(dat)) expect_predictions(pred2, n_row = 6) # Issue #364: predictions confidence intervals for binomial models void <- capture.output( dat <- suppressMessages(gamSim(1, n = 400, dist = "binary", scale = .33))) m <- bam( y ~ s(x0) + s(x1) + s(x2) + s(x3), family = binomial, data = dat, method = "REML" ) p <- predictions(m) expect_true("conf.low" %in% colnames(p)) expect_true("conf.high" %in% colnames(p)) # Issue #363: matrix column in predictors test1 <- function(x, z, sx = 0.3, sz = 0.4) { x <- x * 20 (pi**sx * sz) * (1.2 * exp(-(x - 0.2)^2 / sx^2 - (z - 0.3)^2 / sz^2) + 0.8 * exp(-(x - 0.7)^2 / sx^2 - (z - 0.8)^2 / sz^2)) } n <- 500 x <- runif(n) / 20 z <- runif(n) f <- test1(x, z) y <- f + rnorm(n) * 0.2 df <- tibble::tibble(y, x, z) df <- poorman::mutate( df, x_lags = tsModel::Lag(x, 0:10), L = matrix(0:10, nrow = 1)) b <- mgcv::gam(y ~ s(z) + te(x_lags, L), data = df) mfx <- suppressWarnings(slopes(b)) cmp <- suppressWarnings(comparisons(b)) pre <- predictions(b) expect_inherits(pre, "predictions") expect_inherits(mfx, "marginaleffects") expect_inherits(cmp, "comparisons") # only one regressor since others are matrix columns expect_true(all(mfx$term == "z")) expect_true(all(cmp$term == "z")) expect_error(suppressWarnings(slopes(b, variables = "L")), pattern = "no valid") expect_error(suppressWarnings(comparisons(b, variables = "L")), pattern = "no valid") expect_warning(plot_predictions(b, condition = "z"), pattern = "Matrix columns") expect_warning(plot_slopes(b, variables = "L", condition = "z"), pattern = "Matrix columns") # Issue #365: exclude argument changes predictions void <- capture.output( dat <- gamSim(1, n = 400, dist = "normal", scale = 2) ) b <- bam(y ~ s(x0) + s(x1) + s(x2) + s(x3), data = dat) p1 <- predictions(b) p2 <- predictions(b, exclude = "s(x3)") expect_true(all(p1$estimate != p2$estimate)) # exclude a smooth requiet("itsadug") set.seed(1024) data(simdat) simdat$Subject <- as.factor(simdat$Subject) model <- bam(Y ~ Group + s(Time, by = Group) + s(Subject, bs = "re"), data = simdat) nd <- datagrid( model = model, Subject = "a01", Group = "Adults") expect_equivalent( predictions(model, newdata = nd)$estimate, predict(model, newdata = nd)[1]) expect_equivalent( predictions(model, newdata = nd, exclude = "s(Subject)")$estimate, predict(model, newdata = nd, exclude = "s(Subject)")[1]) mfx <- slopes(model, newdata = "mean", variables = "Time", type = "link") emt <- suppressMessages(data.frame( emtrends(model, ~Time, "Time", at = list(Time = 1000, Subject = "a01", Group = "Adults")))) expect_equivalent(mfx$estimate, emt$Time.trend, tolerance = 1e-2) expect_equivalent(mfx$std.error, emt$SE, tolerance = 1e-3) # Issue #545 p <- plot_slopes(model, variables = "Time", condition = "Time", draw = FALSE) expect_true(nrow(p) > 1) # Issue #844 df <- transform(mtcars, gear = as.integer(gear)) mod <- gam( gear ~ s(hp) + cyl, data = df, family = ocat(R = 5) ) pre <- avg_predictions(model = mod) slo <- avg_slopes(mod) cmp <- comparisons(mod) expect_inherits(pre, "predictions") expect_inherits(slo, "slopes") expect_inherits(cmp, "comparisons") # Issue #931 simdat$Subject <- as.factor(simdat$Subject) model <- bam(Y ~ Group + s(Time, by = Group) + s(Subject, bs = "re"), data = simdat) low = function(hi, lo, x) { dydx <- (hi - lo) / 1e-6 dydx_min <- min(dydx) x[dydx == dydx_min][1] } cmp <- comparisons(model, variables = list("Time" = 1e-6), vcov = FALSE, by = "Group", comparison = low ) expect_inherits(cmp, "comparisons") expect_equal(nrow(cmp), 2) rm(list = ls()) marginaleffects/inst/tinytest/test-pkg-bife.R0000644000176200001440000000316414541720224021035 0ustar liggesuserssource("helpers.R") exit_file("deprecated pending more tests") using("marginaleffects") requiet("bife") mod <- bife(LFP ~ AGE + I(INCH / 1000) + KID1 + KID2 + KID3 | ID, data = psid) dat <- get_modeldata(mod, additional_variables = "KID") dat <- psid <- transform(dat, KID = ifelse(KID1 > 0, 1, 0)) # marginaleffects: bife: no validity expect_slopes(mod) # predictions: bife: no validity # bife does not store the call, so get_call() does not work and get_data() can # only retrieve from model.frame, which is shorter than the full data. pred <- predictions(mod, newdata = dat, type = "response") expect_equivalent(pred$estimate, predict(mod, newdata = dat, type = "response")) expect_predictions(pred, n_row = nrow(dat)) # Issue 809 mod <- bife(LFP ~ KID + AGE + KID * AGE + log(INCH) | ID, data = psid, model = "probit") s1 <- slopes(mod, variables = "AGE", newdata = psid) setorder(s1, rowid) xb <- predict(mod, type = "link", X_new = psid) s2 <- dnorm(xb) * (coef(mod)["AGE"] + coef(mod)["KID:AGE"] * psid$KID) expect_equivalent(s1$estimate, s2, tolerance = 1e-4) s3 <- slopes(mod, variables = "AGE", by = "KID", newdata = psid) s2 <- aggregate(estimate ~ KID, FUN = mean, data = s1) expect_equivalent(s3$estimate, s2$estimate, tolerance = 1e-4) mod <- bife::bias_corr(mod) s1 <- avg_slopes(mod, variables = "AGE", by = "KID", newdata = psid) xb <- predict(mod, type = "link", X_new = psid) psid$s <- dnorm(xb) * (coef(mod)["AGE"] + coef(mod)["KID:AGE"] * psid$KID) s2 <- aggregate(s ~ KID, FUN = mean, data = psid) s2 <- s2[order(s2$KID),] s1 <- s1[order(s1$KID),] expect_equivalent(s1$estimate, s2$s, tolerance = 1e-4) rm(list = ls())marginaleffects/inst/tinytest/helpers.R0000644000176200001440000000371314560035476020047 0ustar liggesusersrm(list=ls()) rm(list=ls(.GlobalEnv), envir = .GlobalEnv) EXPENSIVE <- FALSE options("tinysnapshot_device" = "svglite") options("tinysnapshot_tol" = 200) options(marginaleffects_numDeriv = NULL) if (isTRUE(insight::check_if_installed("cmdstanr", quietly = TRUE))) { options("brms.backend" = "cmdstanr") } # libraries requiet <- function(package) { void <- capture.output( pkg_available <- tryCatch(suppressPackageStartupMessages(suppressWarnings(suppressMessages(tryCatch( isTRUE(require(package, warn.conflicts = FALSE, character.only = TRUE)), error = function(e) FALSE )))))) return(pkg_available) } requiet("tinytest") requiet("tinysnapshot") if (isTRUE(suppressMessages(require("tinytest"))) && packageVersion("tinytest") >= "1.4.0") { tinytest::register_tinytest_extension( "marginaleffects", c("expect_slopes", "expect_predictions", "expect_margins")) } # common names of datasets, often assigned to global environment common <- c("dat", "tmp", "d", "k", "mod", "tmp1", "tmp2", "test1", "test2", "threenum") suppressWarnings(rm(list = common, envir = .GlobalEnv)) suppressWarnings(rm(list = common)) # avoids a `timedatectl`` warning Sys.setenv(TZ="America/New_York") # snapshots options(width = 10000) options(digits = 5) ON_CRAN <- !identical(Sys.getenv("R_NOT_CRAN"), "true") ON_GH <- identical(Sys.getenv("R_GH"), "true") ON_CI <- isTRUE(ON_CRAN) || isTRUE(ON_GH) ON_WINDOWS <- isTRUE(Sys.info()[['sysname']] == "Windows") ON_OSX <- isTRUE(Sys.info()[['sysname']] == "Darwin") minver <- function(pkg, ver = NULL) { ins <- try(utils::packageVersion(pkg), silent = TRUE) if (is.null(ver)) { isTRUE(inherits(ins, "try-error")) } else { isTRUE(as.character(ins) >= ver) } } testing_path <- function(x) { wd <- tinytest::get_call_wd() if (isTRUE(wd != "")) { out <- x } else { out <- paste0(wd, "/", x) } out <- gsub("^\\/", "", out) return(out) } marginaleffects/inst/tinytest/test-pkg-aod.R0000644000176200001440000000104414541720224020666 0ustar liggesuserssource("helpers.R") using("marginaleffects") requiet("aod") # betabin: no validity dat <- read.csv("https://vincentarelbundock.github.io/Rdatasets/csv/aod/orob2.csv") # character variables should be padded, but I am lazy mod <- betabin(cbind(y, n - y) ~ seed, ~ 1, data = dat) expect_error(slopes(mod), pattern = "support.*character") # factor variables work dat$seed <- factor(dat$seed) mod <- betabin(cbind(y, n - y) ~ seed, ~ 1, data = dat) expect_slopes(mod, n_unique = 1) pre <- predictions(mod) expect_predictions(pre) rm(list = ls())marginaleffects/inst/tinytest/test-contrast.R0000644000176200001440000000674314541720224021214 0ustar liggesuserssource("helpers.R") using("marginaleffects") requiet("emmeans") # contrast as difference and CI make sense # problem reported with suggested fix by E.Book in Issue 58 dat <- read.csv("https://vincentarelbundock.github.io/Rdatasets/csv/palmerpenguins/penguins.csv") dat$large_penguin <- ifelse(dat$body_mass_g > median(dat$body_mass_g, na.rm = TRUE), 1, 0) dat <- dat mod <- glm(large_penguin ~ bill_length_mm + flipper_length_mm + species, data = dat, family = binomial) ti <- avg_slopes(mod) reject_ci <- ti$conf.high < 0 | ti$conf.low > 0 reject_p <- ti$p.value < 0.05 expect_equivalent(reject_ci, reject_p) # bug be dead: all levels appear tmp <- mtcars tmp$am <- as.logical(tmp$am) tmp <- tmp mod <- lm(mpg ~ am + factor(cyl), tmp) mfx = slopes(mod, newdata = datagrid(cyl = c(4, 6))) expect_equivalent(nrow(mfx), 6) # numeric contrasts mod <- lm(mpg ~ hp, data = mtcars) contr1 <- comparisons(mod, variables = list("hp" = 1)) contr2 <- comparisons(mod, variables = list("hp" = "iqr")) contr3 <- comparisons(mod, variables = list("hp" = "minmax")) contr4 <- comparisons(mod, variables = list("hp" = "sd")) contr5 <- comparisons(mod, variables = list("hp" = "2sd")) iqr <- diff(stats::quantile(mtcars$hp, probs = c(.25, .75))) * coef(mod)["hp"] minmax <- (max(mtcars$hp) - min(mtcars$hp)) * coef(mod)["hp"] sd1 <- sd(mtcars$hp) * coef(mod)["hp"] sd2 <- 2 * sd(mtcars$hp) * coef(mod)["hp"] expect_equivalent(contr2$estimate, rep(iqr, 32)) expect_equivalent(contr3$estimate, rep(minmax, 32)) expect_equivalent(contr4$estimate, rep(sd1, 32)) expect_equivalent(contr5$estimate, rep(sd2, 32)) # factor glm mod <- glm(am ~ factor(cyl), data = mtcars, family = binomial) pred <- predictions(mod, newdata = datagrid(cyl = mtcars$cyl)) contr <- avg_comparisons(mod) expect_equivalent(contr$estimate[1], pred$estimate[pred$cyl == 6] - pred$estimate[pred$cyl == 4]) expect_equivalent(contr$estimate[2], pred$estimate[pred$cyl == 8] - pred$estimate[pred$cyl == 4]) # emmeans w/ back-transforms is similar to comparisons with direct delta method tol <- 1e-4 dat <- mtcars dat$cyl <- as.factor(dat$cyl) dat <- dat mod <- glm(am ~ cyl, data = dat, family = binomial) # link scale cmp <- comparisons(mod, variables = list(cyl = "pairwise"), type = "link", newdata = datagrid()) emm <- emmeans(mod, specs = "cyl") emm <- emmeans::contrast(emm, method = "revpairwise", df = Inf, adjust = NULL) emm <- data.frame(confint(emm)) expect_equivalent(cmp$estimate, emm$estimate) expect_equivalent(cmp$std.error, emm$SE) expect_equivalent(cmp$conf.low, emm$asymp.LCL) expect_equivalent(cmp$conf.high, emm$asymp.UCL) # response scale cmp <- comparisons(mod, type = "response", newdata = datagrid(), variables = list(cyl = "pairwise")) emm <- emmeans(mod, specs = "cyl") emm <- emmeans::contrast(regrid(emm), method = "revpairwise", df = Inf, adjust = NULL, type = "response", ratios = FALSE) emm <- data.frame(confint(emm)) expect_equivalent(cmp$estimate, emm$estimate, tolerance = tol) expect_equivalent(cmp$std.error, emm$SE, tolerance = tol) expect_equivalent(cmp$conf.low, emm$asymp.LCL, tolerance = tol) expect_equivalent(cmp$conf.high, emm$asymp.UCL, tolerance = tol) # smart contrast labels dat$am <- as.logical(dat$am) dat$cyl <- as.factor(dat$cyl) dat$gear <- as.character(dat$gear) dat <- dat mod <- lm(mpg ~ hp + am + cyl + gear, data = dat) cmp1 <- comparisons( mod, newdata = "mean") |> dplyr::arrange(term) expect_equivalent( cmp1$contrast, c("TRUE - FALSE", "6 - 4", "8 - 4", "4 - 3", "5 - 3", "+1")) rm(list = ls())marginaleffects/inst/tinytest/test-character.R0000644000176200001440000000224714541720224021306 0ustar liggesuserssource("helpers.R") using("marginaleffects") # character variable dat <- read.csv("https://vincentarelbundock.github.io/Rdatasets/csv/palmerpenguins/penguins.csv") dat$large_penguin <- ifelse(dat$body_mass_g > median(dat$body_mass_g, na.rm = TRUE), 1, 0) dat <- dat mod <- glm(large_penguin ~ bill_length_mm + flipper_length_mm + species, data = dat, family = binomial) mfx <- slopes(mod) tid <- tidy(mfx) expect_true(all(c("bill_length_mm", "flipper_length_mm", "species") %in% mfx$term)) expect_true(all(c("Chinstrap - Adelie", "Gentoo - Adelie") %in% mfx$contrast)) # predictions: missing character levels dat <- read.csv("https://vincentarelbundock.github.io/Rdatasets/csv/palmerpenguins/penguins.csv") dat$large_penguin <- ifelse(dat$body_mass_g > median(dat$body_mass_g, na.rm = TRUE), 1, 0) mod <- glm(large_penguin ~ bill_length_mm + flipper_length_mm + species, data = dat, family = binomial) # case 1 pred <- predictions(mod, newdata = datagrid(species = "Chinstrap")) expect_inherits(pred, "data.frame") expect_equivalent(1, nrow(pred)) # case 2 pred <- predictions(mod, newdata = datagrid(species = c("Chinstrap", "Gentoo"))) expect_equivalent(nrow(pred), 2) rm(list = ls())marginaleffects/inst/tinytest/test-missing.R0000644000176200001440000000131214541720224021013 0ustar liggesuserssource("helpers.R") using("marginaleffects") tmp <- mtcars tmp$am <- as.logical(tmp$am) tmp$gear <- as.factor(tmp$gear) for (i in seq_along(tmp)) { tmp[[i]][sample(1:nrow(tmp), 1)] <- NA } tmp <- tmp # original data with NAs do not pose problems in glm and lm. mod1 <- lm(hp ~ mpg + drat + wt + gear, data = tmp) mod2 <- glm(vs ~ mpg + drat + wt + gear, data = tmp, family = binomial) expect_inherits(tidy(slopes(mod1)), "data.frame") expect_inherits(tidy(slopes(mod2)), "data.frame") # newdata with NAs do not pose problems in lm. mod <- lm(hp ~ mpg + drat + wt + factor(gear), data = tmp) mfx <- slopes(mod, newdata = datagrid(drat = c(NA, 10))) expect_inherits(tidy(mfx), "data.frame") rm(list = ls())marginaleffects/inst/tinytest/test-pkg-nnet.R0000644000176200001440000001410314560035476021100 0ustar liggesuserssource("helpers.R") using("marginaleffects") requiet("nnet") requiet("carData") requiet("prediction") # multinom group estimates TitanicSurvival <- "https://vincentarelbundock.github.io/Rdatasets/csv/carData/TitanicSurvival.csv" TitanicSurvival <- read.csv(TitanicSurvival) TitanicSurvival$age3 <- cut( TitanicSurvival$age, include.lowest = TRUE, right = FALSE, dig.lab = 4, breaks = c(0, 25, 50, 80)) m1 <- nnet::multinom(passengerClass ~ sex * age3, data = TitanicSurvival, trace = FALSE) mfx <- slopes( m1, type = "probs", variables = "sex", by = "age3", newdata = datagrid( age3 = c("[0,25)","[25,50)","[50,80]"), grid_type = "counterfactual")) expect_equivalent(nrow(mfx), 9) # multinom basic dat <- read.csv(testing_path("stata/databases/MASS_polr_01.csv")) void <- capture.output( mod <- nnet::multinom(factor(y) ~ x1 + x2, data = dat, quiet = true) ) expect_slopes(mod, type = "probs") # marginaleffects summary dat <- read.csv(testing_path("stata/databases/MASS_polr_01.csv")) void <- capture.output( mod <- nnet::multinom(factor(y) ~ x1 + x2, data = dat, quiet = true) ) s <- avg_slopes(mod, type = "probs") expect_false(anyNA(s$estimate)) expect_false(anyNA(s$std.error)) # multinom vs. Stata stata <- readRDS(testing_path("stata/stata.rds"))$nnet_multinom_01 dat <- read.csv(testing_path("stata/databases/MASS_polr_01.csv")) dat$y <- as.factor(dat$y) void <- capture.output( mod <- nnet::multinom(y ~ x1 + x2, data = dat, quiet = true) ) mfx <- avg_slopes(mod, type = "probs") mfx <- merge(mfx, stata, all = TRUE) mfx <- na.omit(mfx) expect_true(nrow(mfx) == 6) # na.omit doesn't trash everything # standard errors match now!! expect_equivalent(mfx$estimate, mfx$dydxstata, tolerance = .001) expect_equivalent(mfx$std.error, mfx$std.errorstata, tolerance = .001) # set_coef tmp <- mtcars tmp$cyl <- as.factor(tmp$cyl) void <- capture.output( old <- nnet::multinom(cyl ~ hp + am + mpg, data = tmp, quiet = true) ) b <- rep(0, length(coef(old))) new <- set_coef(old, b) expect_true(all(coef(new) == 0)) b <- rep(1, length(coef(new))) new <- set_coef(old, b) expect_true(all(coef(new) == 1)) # bugfix: nnet single row predictions dat <- read.csv(testing_path("stata/databases/MASS_polr_01.csv")) void <- capture.output( mod <- nnet::multinom(factor(y) ~ x1 + x2, data = dat, quiet = true) ) mfx <- slopes(mod, variables = "x1", newdata = datagrid(), type = "probs") expect_inherits(mfx, "data.frame") expect_equivalent(nrow(mfx), 4) mfx <- slopes(mod, newdata = datagrid(), type = "probs") expect_inherits(mfx, "data.frame") expect_equivalent(nrow(mfx), 8) # predictions with multinomial outcome set.seed(1839) n <- 1200 x <- factor(sample(letters[1:3], n, TRUE)) y <- vector(length = n) y[x == "a"] <- sample(letters[4:6], sum(x == "a"), TRUE) y[x == "b"] <- sample(letters[4:6], sum(x == "b"), TRUE, c(1 / 4, 2 / 4, 1 / 4)) y[x == "c"] <- sample(letters[4:6], sum(x == "c"), TRUE, c(1 / 5, 3 / 5, 2 / 5)) dat <- data.frame(x = x, y = factor(y)) tmp <- as.data.frame(replicate(20, factor(sample(letters[7:9], n, TRUE)))) dat <- cbind(dat, tmp) void <- capture.output({ m1 <- nnet::multinom(y ~ x, dat) m2 <- nnet::multinom(y ~ ., dat) }) # class outcome not supported expect_error(predictions(m1, type = "class"), pattern = "type") expect_error(slopes(m1, type = "class"), pattern = "type") # small predictions pred1 <- predictions(m1, type = "probs") pred2 <- predictions(m1, type = "probs", newdata = "marginalmeans") expect_predictions(pred1, n_row = nrow(dat) * 3) expect_predictions(pred2, n_row = 9) # large predictions idx <- 3:5 n_row <- sapply(dat[, idx], function(x) length(unique(x))) n_row <- prod(n_row) * length(unique(dat$y)) expect_error(predictions(m2, type = "probs", newdata = "mean"), pattern = "Cross product") # massive prediction raises error expect_error(predictions(m2, type = "probs"), pattern = "") # bugs stay dead #218 set.seed(42) dat <- data.frame( y = factor(sample(c(rep(4, 29), rep(3, 15), rep(2, 4), rep(1, 2)))), x = factor(sample(c(rep(1, 17), rep(2, 12), rep(2, 12), rep(1, 9)))), z1 = sample(1:2, 50, replace=TRUE), z2=runif(50, 16, 18)) void <- capture.output( model <- nnet::multinom(y ~ x + z1 + z2, data = dat, verbose = FALSE, hessian = TRUE) ) mfx <- slopes(model, type = "probs") expect_inherits(mfx, "marginaleffects") # bug: single row newdata produces vector mod <- nnet::multinom(factor(gear) ~ mpg, data = mtcars, trace = FALSE) p <- predictions(mod, newdata = head(mtcars, 1), type = "latent") expect_equivalent(nrow(p), 3) # Issue #476: binary dependent variable x <- 1:1000 n <- length(x) y1 <- rbinom(n, 10, prob = plogis(-10 + 0.02 * x)) y2 <- 10 - y1 dat <- data.frame(x, y1, y2) dat_long <- tidyr::pivot_longer(dat, !x, names_to = "y", values_to = "count") dat_long <- transform(dat_long, y = factor(y, levels = c("y2", "y1"))) fit_multinom <- nnet::multinom(y ~ x, weights = count, data = dat_long, trace = FALSE) p <- predictions(fit_multinom, newdata = datagrid(x = unique), type = "latent") expect_inherits(p, "predictions") # Issue #482: sum of predicted probabilities mod <- nnet::multinom(factor(cyl) ~ mpg + am, data = mtcars, trace = FALSE) by <- data.frame( by = c("4,6", "4,6", "8"), group = as.character(c(4, 6, 8))) p1 <- predictions(mod, newdata = "mean") p2 <- predictions(mod, newdata = "mean", byfun = sum, by = by) p3 <- predictions(mod, newdata = "mean", byfun = mean, by = by) expect_equivalent(nrow(p1), 3) expect_equivalent(nrow(p2), 2) expect_equivalent(nrow(p3), 2) expect_equivalent(sum(p1$estimate[1:2]), p2$estimate[1]) expect_equivalent(mean(p1$estimate[1:2]), p3$estimate[1]) # Issue #788: match with {predictions::prediction} reg <- nnet::multinom(poverty ~ religion + degree + gender, family = multinomial(refLevel = 1), trace = FALSE, data = carData::WVS) p1 <- avg_predictions(reg, variables = list(religion = c("no"), gender = c("male"))) p1 <- p1$estimate p2 <- prediction::prediction(reg , at = list(religion=c("no"), gender=c("male"))) p2 <- colMeans(p2[, grep("^Pr", colnames(p2))]) expect_equivalent(p1, p2, ignore_attr = TRUE) rm(list = ls())marginaleffects/inst/tinytest/test-plot_predictions.R0000644000176200001440000001775114541720224022741 0ustar liggesuserssource("helpers.R") using("marginaleffects") if (!requiet("tinysnapshot")) exit_file("tinysnapshot") if (ON_CI || ON_WINDOWS || ON_OSX) exit_file("local linux only") using("tinysnapshot") requiet("nnet") # Issue #567: threenum and minmax are mixed up dat <- transform(mtcars, am_fct = factor(am)) mod <- lm(wt ~ am_fct * mpg, data = dat) # minmax p1 <- plot_predictions( mod, draw = FALSE, condition = list("am_fct", mpg = "minmax")) p2 <- predictions( mod, newdata = datagrid(mpg = range, am_fct = 0:1)) p2$am_fct <- as.numeric(as.character(p2$am_fct)) data.table::setorder(p2, am_fct, mpg) expect_equivalent(p1$estimate, p2$estimate) p1$condition1 <- as.character(p1$am_fct) p1$condition2 <- as.character(p1$mpg) x <- p1[p1$condition1 == "1" & p1$condition2 == "Min", "estimate"] y <- p2[p2$am_fct == 1 & p2$mpg == 10.4, "estimate"] expect_equivalent(x, y) # threenum threenum <<- c( mean(dat$mpg) - sd(dat$mpg), mean(dat$mpg), mean(dat$mpg) + sd(dat$mpg)) p1 <- plot_predictions( mod, draw = FALSE, condition = list("am_fct", mpg = "threenum")) p2 <- predictions( mod, newdata = datagrid(mpg = threenum, am_fct = 0:1)) p2$am_fct <- as.numeric(as.character(p2$am_fct)) data.table::setorder(p2, am_fct, mpg) expect_equivalent(p1$estimate, p2$estimate) # Issue #550 x <- abs(rnorm(100, sd = 5)) + 5 y <- exp(2 + 0.3 * x + rnorm(100, sd = 0.4)) dat <- data.frame(x = x, y = y) dat[["log_x"]] <- log(x) dat[["log_y"]] <- log(y) model <- lm(log(y) ~ 1 + log(x), data = dat) p <- plot_predictions(model, condition = "x", draw = FALSE) expect_false(any(is.na(p$estimate))) expect_equal(nrow(p), 50) # points: alpha mod <- lm(mpg ~ hp * wt * am, data = mtcars) p <- plot_predictions(mod, condition = list("hp", "wt" = "threenum"), points = .5) expect_snapshot_plot(p, "plot_predictions-alpha") # two conditions mod <- lm(mpg ~ hp * wt * am, data = mtcars) p <- plot_predictions(mod, condition = c("hp", "wt")) expect_snapshot_plot(p, "plot_predictions") # gray scale mod <- lm(mpg ~ hp * wt * am, data = mtcars) p <- plot_predictions(mod, condition = c("hp", "wt"), gray = TRUE) expect_snapshot_plot(p, "plot_predictions-gray") # continuous vs. categorical x-axis mod <- lm(mpg ~ hp * wt * factor(cyl), mtcars) p <- plot_predictions(mod, condition = c("cyl", "wt")) expect_snapshot_plot(p, "plot_predictions_vs_categorical_x_axis") p <- plot_predictions(mod, condition = c("wt", "cyl")) expect_snapshot_plot(p, "plot_predictions_vs_continuous_x_axis") # conf.level in plots mod <- lm(mpg ~ hp * wt * am, data = mtcars) p1 <- plot_predictions(mod, condition = "hp", conf.level = .99) p2 <- plot_predictions(mod, condition = "hp", conf.level = .4) expect_snapshot_plot(p1, "plot_predictions_conf_99") expect_snapshot_plot(p2, "plot_predictions_conf_40") p1 <- plot_predictions(mod, condition = "hp", conf.level = .99, draw = FALSE) p2 <- data.frame(predict(mod, newdata = p1, se.fit = TRUE)) expect_equivalent(p1$estimate, p2$fit) expect_equivalent(p1$std.error, p2$se.fit, tolerance = 1e-6) expect_equivalent( p1$conf.low, p2$fit - qnorm(.995) * p2$se.fit, tolerance = 1e-6) expect_equivalent( p1$conf.high, p2$fit + qnorm(.995) * p2$se.fit, tolerance = 1e-6) # link vs response mod <- glm(am ~ hp + wt, data = mtcars, family = binomial) p1 <- plot_predictions(mod, condition = "hp", type = "response") p2 <- plot_predictions(mod, condition = "hp", type = "link") expect_snapshot_plot(p1, "plot_predictions_response") expect_snapshot_plot(p2, "plot_predictions_link") # bad condition raises error mod <- lm(mpg ~ hp * wt * am, data = mtcars) expect_error(plot_predictions(mod, condition = c("bad", "wt"))) # Issue #230: glm w/ weights includes confidence intervals mod <- glm(am ~ mpg * cyl, data = mtcars, family = binomial(link = "logit"), weights = carb) p <- plot_predictions(mod, condition = c("mpg", "cyl"), draw = FALSE) expect_true("conf.low" %in% colnames(p)) expect_true("conf.high" %in% colnames(p)) # vcov #skip_if_not_installed("insight", minimum_version = "0.17.1") mod <- lm(mpg ~ hp * wt, data = mtcars) mfx0 <- plot_predictions(mod, condition = "wt", vcov = FALSE, draw = FALSE) mfx1 <- plot_predictions(mod, condition = "wt", draw = FALSE) mfx2 <- plot_predictions(mod, condition = "wt", vcov = "HC3", draw = FALSE) mfx3 <- plot_predictions(mod, condition = "wt", vcov = ~cyl, draw = FALSE) expect_false("conf.low" %in% colnames(mfx0)) expect_true(all(mfx1$std.error != mfx2$std.error)) expect_true(all(mfx1$std.error != mfx3$std.error)) expect_true(all(mfx2$std.error != mfx3$std.error)) expect_true(all(mfx1$conf.low != mfx2$conf.low)) expect_true(all(mfx1$conf.low != mfx3$conf.low)) expect_true(all(mfx2$conf.low != mfx3$conf.low)) # multinomial mod <- nnet::multinom(factor(gear) ~ mpg * wt + am, data = mtcars, trace = FALSE) p1 <- plot_predictions(mod, condition = c("mpg", "group"), type = "probs") p2 <- plot_comparisons(mod, variables = "mpg", condition = c("wt", "group"), type = "probs") p3 <- plot_slopes(mod, variables = "mpg", condition = c("wt", "group"), type = "probs") expect_inherits(p1, "gg") expect_inherits(p2, "gg") expect_inherits(p3, "gg") # Issue #498: New features mod <- lm(mpg ~ hp * wt * am, data = mtcars) # condition list must be named or single characters p <- plot_predictions(mod, condition = list("hp", "wt" = c(1.5, 2.5, 3.5), "am" = 0:1)) expect_inherits(p, "gg") p <- plot_predictions(mod, condition = list("hp" = seq(110, 140), "wt" = c(1.5, 2.5, 3.5))) expect_inherits(p, "gg") p <- plot_predictions(mod, condition = list("hp", "wt" = "threenum", "am" = "minmax")) expect_inherits(p, "gg") expect_error( plot_predictions(mod, condition = list(100:110, "wt" = c(1.5, 2.5, 3.5))), pattern = "condition") # backward compatibility dat <- transform(mtcars, am_fct = factor(am)) mod <- lm(wt ~ am_fct * mpg, data = dat) p1 <- plot_predictions( mod, condition = list("am_fct", mpg = "minmax")) expect_inherits(p1, "gg") # Issue #609: `plot_*(draw=FALSE)` returns original column names instead of `condition1` mod <- lm(mpg ~ hp * qsec * factor(gear), data = mtcars) p <- plot_predictions(mod, condition = list("hp", "qsec", "gear")) expect_inherits(p, "gg") p <- plot_predictions(mod, condition = c("hp", "qsec", "gear")) expect_inherits(p, "gg") p <- plot_predictions(mod, condition = list("hp", "qsec" = "minmax")) expect_inherits(p, "gg") p <- plot_predictions(mod, condition = list("hp", "qsec" = "minmax", "gear"), draw = FALSE) expect_true("qsec" %in% colnames(p)) p <- plot_comparisons(mod, variables = "hp", condition = list("qsec" = "minmax", "gear"), draw = FALSE) expect_true("qsec" %in% colnames(p)) p <- plot_slopes(mod, variables = "hp", condition = list("qsec" = "minmax", "gear"), draw = FALSE) expect_true("qsec" %in% colnames(p)) # Issue #725: `newdata` argument in plotting functions mod <- lm(mpg ~ hp + am + factor(cyl), mtcars) p1 <- plot_predictions(mod, by = "am", draw = FALSE, newdata = datagrid(am = 0:1, grid_type = "counterfactual")) p2 <- avg_predictions(mod, by = "am", draw = FALSE, newdata = datagrid(am = 0:1, grid_type = "counterfactual")) expect_equivalent(p1$estimate, p2$estimate) expect_equivalent(p1$conf.low, p2$conf.low) p3 <- plot_predictions(mod, by = "am", draw = FALSE) p4 <- avg_predictions(mod, by = "am", draw = FALSE) expect_equivalent(p3$estimate, p4$estimate) expect_equivalent(p3$conf.low, p4$conf.low) expect_true(all(p1$conf.low != p3$conf.low)) p5 <- plot_predictions(mod, condition = "am", draw = FALSE) p6 <- predictions(mod, newdata = datagrid(am = 0:1)) expect_equivalent(p5$estimate, p6$estimate) expect_equivalent(p5$conf.low, p6$conf.low) expect_true(all(p1$conf.low != p5$conf.low)) expect_true(all(p3$conf.low != p5$conf.low)) expect_error(plot_predictions(mod, condition = "am", by = "am")) expect_error(plot_predictions(mod, newdata = mtcars)) # Plot 4 variables in condition using facet_grid mod <- lm(hp~mpg*am*gear*carb, data=mtcars) p <- plot_predictions(mod, condition = list("mpg"=1:5, "am"=0:1, "gear"=1:3, "carb"=1:2)) expect_inherits(p, "gg") suppressWarnings(rm("threenum", .GlobalEnv)) rm(list = ls())marginaleffects/inst/tinytest/test-pkg-Rchoice.R0000644000176200001440000000263514541720224021506 0ustar liggesuserssource("helpers.R") requiet("haven") requiet("Rchoice") # hetprob() dy/dx dat <<- transform(iris, y = Sepal.Length > median(Sepal.Length)) mod <- hetprob(y ~ Petal.Width * Petal.Length | factor(Species), data = dat, link = "logit") known <- Rchoice::effect(mod)$margins mfx <- avg_slopes(mod) expect_equivalent(sort(mfx$estimate), sort(known[, "dydx"]), tol = .001) expect_equivalent(sort(mfx$std.error), sort(known[, "Std. error"]), tol = .001) # # IV probit model by MLE # # (nwincome is endogenous and heducation is the additional instrument) # dat <- read.csv("https://vincentarelbundock.github.io/Rdatasets/csv/AER/PSID1976.csv") # dat$nwincome <- with(dat, (fincome - hours * wage)/1000) # dat$lfp <- as.numeric(dat$participation == "yes") # mod <- ivpml( # lfp ~ education + experience + I(experience^2) + age + youngkids + oldkids + nwincome | # education + experience + I(experience^2) + age + youngkids + oldkids + heducation, # data = dat, # message = FALSE) # # effect(mod, asf = FALSE)$margins %>% data.frame() %>% arrange(rownames(.)) # effect(mod, asf = TRUE)$margins %>% data.frame() %>% arrange(rownames(.)) # avg_slopes(mod) # h <- 1e-4 # dat_lo <- transform(dat, nwincome = nwincome - h / 2) # dat_hi <- transform(dat, nwincome = nwincome + h / 2) # p_lo <- predict(mod, newdata = dat_lo) # p_hi <- predict(mod, newdata = dat_hi) # mean((p_hi - p_lo) / h) source("helpers.R") rm(list = ls())marginaleffects/inst/tinytest/test-pkg-mhurdle.R0000644000176200001440000000274714541720224021576 0ustar liggesuserssource("helpers.R") using("marginaleffects") requiet("mhurdle") tol <- 0.001 tol_se <- 0.001 data("Interview", package = "mhurdle") m1 <- mhurdle(shows ~ 0 | linc + smsa + age + educ + size, data = Interview, h2 = TRUE, dist = "n", method = "bhhh") m2 <- mhurdle(shows ~ educ + size | linc | smsa + age, data = Interview, h2 = FALSE, method = "bhhh", corr = TRUE, finalHessian = TRUE) # marginaleffects vs. margins (unit-level SEs) set.seed(1024) nd <- Interview[sample(seq_len(nrow(Interview)), 10),] mfx <- slopes(m2, newdata = nd, type = "E") mar <- margins(m2, type = "response", data = nd, unit_ses = TRUE) expect_equivalent(mfx[mfx$term == "linc", "estimate"], as.numeric(mar$dydx_linc), tolerance = tol) expect_equivalent(mfx[mfx$term == "educ", "estimate"], as.numeric(mar$dydx_educ), tolerance = tol) expect_equivalent(mfx[mfx$term == "age", "estimate"], as.numeric(mar$dydx_age), tolerance = tol) expect_equivalent(mfx[mfx$term == "linc", "std.error"], mar$SE_dydx_linc, tolerance = tol_se) expect_equivalent(mfx[mfx$term == "educ", "std.error"], mar$SE_dydx_educ, tolerance = tol_se) expect_equivalent(mfx[mfx$term == "age", "std.error"], mar$SE_dydx_age, tolerance = tol_se) # marginaleffects vs. margins: AME mfx <- avg_slopes(m2, type = "E") mfx <- mfx[match(c("age", "educ", "linc", "size", "smsa"), mfx$term),] mar <- margins(m2) mar <- summary(mar) expect_equivalent(mfx$estimate, mar$AME, tolerance = tol) expect_equivalent(mfx$std.error, mar$SE, tolerance = tol_se) rm(list = ls())marginaleffects/inst/tinytest/test-rank-deficient.R0000755000176200001440000000053714541720224022240 0ustar liggesuserssource("helpers.R") using("marginaleffects") # rank deficient dat <- mtcars dat$gear <- as.factor(dat$gear) dat$cyl <- as.factor(dat$cyl) dat <- dat m <- glm(am ~ gear * cyl, data = dat, family = binomial()) expect_warning(comparisons(m), pattern = "rank deficient") expect_inherits(suppressWarnings(comparisons(m)), "comparisons") rm(list = ls())marginaleffects/inst/tinytest/test-pkg-mlr3verse.R0000644000176200001440000000232214541720224022045 0ustar liggesuserssource("helpers.R") requiet("mlr3verse") requiet("fmeffects") requiet("tidymodels") data("bikes", package = "fmeffects") # fit model task <- as_task_regr(x = bikes, id = "bikes", target = "count") forest <- lrn("regr.ranger")$train(task) # Plot predictions p <- plot_predictions(forest, condition = "temp", type = "response", newdata = bikes) expect_inherits(p, "gg") # Centered difference cmp <- avg_comparisons(forest, newdata = bikes) expect_inherits(cmp, "comparisons") # Forward difference cmp <- avg_comparisons( forest, variables = list("temp" = \(x) data.frame(x, x + 1)), newdata = bikes) effects = fme(model = forest, data = bikes, target = "count", feature = "temp", step.size = 1) expect_equivalent(effects$ame, cmp$estimate) # Average effect of a simultaneous change in multiple variables cmp <- avg_comparisons( forest, variables = c("temp", "season", "weather"), cross = TRUE, newdata = bikes) # tidymodels forest_tidy <- rand_forest(mode = "regression") |> set_engine("ranger") |> fit(count ~ ., data = bikes) cmp <- avg_comparisons(forest_tidy, newdata = bikes, type = "numeric") expect_inherits(cmp, "comparisons") marginaleffects/inst/tinytest/test-marginal_means.R0000644000176200001440000000771114560035476022341 0ustar liggesuserssource("helpers.R") using("marginaleffects") requiet("emmeans") requiet("broom") requiet("insight") # Issue #438: backtransforms allows us to match `emmeans` exactly mod <- glm(vs ~ mpg + factor(cyl), data = mtcars, family = binomial) em <- emmeans(mod, ~cyl, type = "response") mm <- predictions(mod, by = "cyl", newdata = datagrid(grid_type = "balanced")) |> dplyr::arrange(cyl) expect_equal(data.frame(em)$prob, mm$estimate) expect_equal(data.frame(em)$asymp.LCL, mm$conf.low, tolerance = 1e-5) expect_equal(data.frame(em)$asymp.UCL, mm$conf.high) mod <- glm(breaks ~ wool * tension, family = Gamma, data = warpbreaks) em <- suppressMessages(emmeans(mod, ~wool, type = "response", df = Inf)) mm <- predictions(mod, newdata=datagrid(grid_type="balanced"), by="wool") expect_equal(data.frame(em)$response, mm$estimate) # TODO: 1/eta link function inverts order of CI. Should we clean this up? expect_equal(data.frame(em)$asymp.LCL, mm$conf.high) expect_equal(data.frame(em)$asymp.UCL, mm$conf.low) # old tests used to require pre-conversion dat <- mtcars dat$am <- as.logical(dat$am) dat$cyl <- as.factor(dat$cyl) dat$vs <- as.factor(dat$vs) # marginalmeans vs. emmeans: poisson link or response #skip_if_not_installed("emmeans", minimum_version = "1.7.3") # transform -> regrid dat <- mtcars dat$am <- factor(dat$am) dat$cyl <- factor(dat$cyl) mod <- glm(gear ~ cyl + am, data = dat, family = poisson) # link mm <- predictions(mod, newdata=datagrid(grid_type="balanced"), by="cyl", type = "link") |> dplyr::arrange(cyl) em <- tidy(emmeans(mod, specs = "cyl")) expect_equivalent(mm$estimate, em$estimate, tolerance = 1e-5) expect_equivalent(mm$estimate, em$estimate, tolerance = 1e-5) # response mm <- predictions(mod, newdata=datagrid(grid_type="balanced"), by="cyl") |> dplyr::arrange(cyl) em <- tidy(emmeans(mod, specs = "cyl", type = "response")) expect_equivalent(mm$estimate, em$rate) expect_equivalent(mm$p.value, em$p.value) # simple marginal means mod <- lm(mpg ~ cyl + am + hp, dat) em <- broom::tidy(emmeans::emmeans(mod, "cyl")) me <- predictions(mod, newdata=datagrid(grid_type="balanced"), by="cyl") |> dplyr::arrange(cyl) expect_equivalent(me$estimate, em$estimate) expect_equivalent(me$std.error, em$std.error, tolerance = 1e-5) em <- broom::tidy(emmeans::emmeans(mod, "am")) me <- predictions(mod, newdata=datagrid(grid_type="balanced"), by="am") |> dplyr::arrange(am) expect_equivalent(me$estimate, em$estimate) expect_equivalent(me$std.error, em$std.error, tolerance = 1e-5) # interactions # standard errors do not match emmeans mod <- lm(mpg ~ cyl * am, dat) em <- suppressMessages(broom::tidy(emmeans::emmeans(mod, "cyl"))) me <- predictions(mod, newdata=datagrid(grid_type="balanced"), by="cyl") |> dplyr::arrange(cyl) expect_equivalent(me$estimate, em$estimate) em <- suppressMessages(broom::tidy(emmeans::emmeans(mod, "am"))) me <- suppressWarnings(predictions(mod, newdata=datagrid(grid_type="balanced"), by="am")) me <- me[order(me$am),] expect_equivalent(me$estimate, em$estimate) # wts mod1 <- lm(vs ~ factor(am) + factor(gear) + factor(cyl), data = mtcars) mod2 <- glm(vs ~ factor(am) + factor(gear) + mpg, data = mtcars, family = binomial) # wts = "cells" em <- data.frame(emmeans(mod1, ~am, weights = "cells")) mm <- predictions(mod1, by = "am") expect_equivalent(mm$estimate, em$emmean) expect_equivalent(mm$std.error, em$SE) # Issue #583 dat <- mtcars dat$am <- factor(dat$am) dat$vs <- factor(dat$vs) dat$cyl <- factor(dat$cyl) mod <- glm(gear ~ cyl + vs + am, data = dat, family = poisson) by <- data.frame( by = c("(4 & 6)", "(4 & 6)", "(8)"), cyl = unique(dat$cyl)) expect_inherits(predictions(mod, newdata=datagrid(grid_type="balanced"), by = by), "predictions") # Issue #620 requiet("nnet") nom <- nnet::multinom(factor(gear) ~ mpg + am * vs, data = mtcars, trace = FALSE) by <- data.frame( carb = c("1", "2", "3", "4", "6", "8"), by = c("1", "2", "3,4,6,8" |> rep(4))) cmp <- comparisons(nom, by = by) expect_equivalent(nrow(cmp), 9) rm(list = ls())marginaleffects/inst/tinytest/test-pkg-Amelia.R0000644000176200001440000000400214560035476021321 0ustar liggesuserssource("helpers.R") requiet("Amelia") dat <- iris dat$Sepal.Length[sample(seq_len(nrow(iris)), 40)] <- NA dat$Sepal.Width[sample(seq_len(nrow(iris)), 40)] <- NA dat$Species[sample(seq_len(nrow(iris)), 40)] <- NA dat_amelia <- Amelia::amelia(dat, m = 20, noms = "Species", p2s = 0) amest <- with(dat_amelia, lm(Petal.Width ~ Sepal.Length * Sepal.Width + Species)) mod <- lm(Petal.Width ~ Sepal.Length * Sepal.Width + Species, data = dat) mfx1 <- suppressWarnings(avg_slopes(amest, by = "Species")) mfx2 <- avg_slopes(mod, by = "Species") expect_inherits(mfx1, "slopes") expect_equivalent(nrow(mfx1), nrow(mfx2)) # Issue #711 data <- structure(list(id = 1:37, trt = c( "soc", "soc", "soc", "soc", "soc", "soc", "soc", "soc", "soc", "soc", "soc", "soc", "soc", "soc", "soc", "soc", "soc", "soc", "soc", "soc", "soc", "arm", "arm", "arm", "arm", "arm", "arm", "arm", "arm", "arm", "arm", "arm", "arm", "arm", "arm", "arm", "arm"), endp = structure(c( 1L, 1L, 2L, 1L, 2L, 1L, 2L, 1L, 2L, 1L, 2L, 1L, 2L, 1L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 2L, 2L, 2L, 2L, 2L, 1L, 1L, 1L, 1L), levels = c("TRUE", "FALSE"), class = "factor")), row.names = c( NA, -37L), class = "data.frame") data$endp <- factor(data$endp, levels = c("TRUE", "FALSE")) data_miss <- data data_miss[c(1, 5, 7, 30), c("endp")] <- NA imp <- suppressWarnings(Amelia::amelia(data_miss, m = 20, noms = c("trt", "endp"), p2s = 0)) dat_amelia <- imp$imputations fit_logistic <- function(dat) { mod <- glm(endp ~ trt, family = binomial(link = "logit"), data = dat) out <- avg_slopes(mod, newdata = dat) return(out) } mod_imputation <- suppressWarnings(lapply(dat_amelia, fit_logistic)) manu <- suppressWarnings(summary(mice::pool(mod_imputation), conf.int = TRUE)) fit <- with(imp, glm(endp ~ trt, family = binomial(link = "logit"))) auto <- suppressWarnings(avg_slopes(fit)) expect_equivalent(auto$estimate, manu$estimate) expect_equivalent(auto$std.error, manu$std.error, tolerance = 1e-5) source("helpers.R")marginaleffects/inst/tinytest/test-plot_slopes.R0000644000176200001440000000737014541720224021717 0ustar liggesuserssource("helpers.R") using("marginaleffects") if (!requiet("tinysnapshot")) exit_file("tinysnapshot") if (ON_CI || ON_WINDOWS || ON_OSX) exit_file("local linux only") using("tinysnapshot") # character predictors dat <- read.csv("https://vincentarelbundock.github.io/Rdatasets/csv/palmerpenguins/penguins.csv") dat$large_penguin <- ifelse(dat$body_mass_g > median(dat$body_mass_g, na.rm = TRUE), 1, 0) mod <- glm( large_penguin ~ bill_length_mm * flipper_length_mm + species, family = binomial, data = dat) p <- plot_slopes(mod, variables = "bill_length_mm", condition = "flipper_length_mm", draw = FALSE) expect_inherits(p, "data.frame") expect_equivalent(nrow(p), 50) expect_false(anyNA(p$estimate)) # custom values p <- plot_slopes(mod, variables = "bill_length_mm", condition = list("flipper_length_mm" = 10), draw = FALSE) expect_true(p$flipper_length_mm == 10) # vcov mod <- lm(mpg ~ hp * wt, data = mtcars) mfx1 <- plot_slopes(mod, variables = "hp", condition = "wt", draw = FALSE) mfx2 <- plot_slopes(mod, variables = "hp", condition = "wt", vcov = "HC3", draw = FALSE) mfx3 <- plot_slopes(mod, variables = "hp", condition = "wt", vcov = ~cyl, draw = FALSE) expect_true(all(mfx1$std.error != mfx2$std.error)) expect_true(all(mfx1$std.error != mfx3$std.error)) expect_true(all(mfx2$std.error != mfx3$std.error)) expect_true(all(mfx1$conf.low != mfx2$conf.low)) expect_true(all(mfx1$conf.low != mfx3$conf.low)) expect_true(all(mfx2$conf.low != mfx3$conf.low)) # factor effects are plotted in different facets dat <- mtcars dat$gear_fct <- factor(dat$gear) dat$am_log <- as.logical(dat$am) dat <- dat mod <- lm(cyl ~ mpg * gear_fct + am_log, data = dat) p <- plot_slopes(mod, variables = "gear_fct", condition = "mpg") expect_snapshot_plot(p, "plot_slopes_factor_facets") p <- plot_slopes(mod, variables = "am_log", condition = "mpg") expect_inherits(p, "gg") # continuous vs. categorical x-axis mod <- lm(mpg ~ hp * wt * factor(cyl), mtcars) p <- plot_slopes(mod, variables = "hp", condition = "cyl") expect_snapshot_plot(p, "plot_slopes_categorical") p <- plot_slopes(mod, variables = "hp", condition = "wt") expect_snapshot_plot(p, "plot_slopes_continuous") # two conditions mod <- lm(mpg ~ hp * wt * am, data = mtcars) p <- plot_slopes(mod, variables = "hp", condition = c("wt", "am")) expect_snapshot_plot(p, "plot_slopes_two_conditions", tol = 500) # Issue #725: `newdata` argument in plotting functions mod <- glm(vs ~ hp + am, mtcars, family = binomial) p1 <- plot_slopes(mod, variables = "hp", by = "am", draw = FALSE, newdata = datagrid(am = 0:1, grid_type = "counterfactual")) p2 <- avg_slopes(mod, variables = "hp", by = "am", newdata = datagrid(am = 0:1, grid_type = "counterfactual")) expect_equivalent(p1$estimate, p2$estimate) expect_equivalent(p1$conf.low, p2$conf.low, tolerance = 1e-6) p3 <- plot_slopes(mod, variables = "hp", by = "am", draw = FALSE) p4 <- avg_slopes(mod, variables = "hp", by = "am", draw = FALSE) expect_equivalent(p3$estimate, p4$estimate) expect_equivalent(p3$conf.low, p4$conf.low) expect_true(all(p1$conf.low != p3$conf.low)) p5 <- plot_slopes(mod, variables = "hp", condition = "am", draw = FALSE) p6 <- slopes(mod, variables = "hp", newdata = datagrid(am = 0:1)) expect_equivalent(p5$estimate, p6$estimate) expect_equivalent(p5$conf.low, p6$conf.low) expect_true(all(p1$conf.low != p5$conf.low)) expect_true(all(p3$conf.low != p5$conf.low)) expect_error(plot_slopes(mod, variables = "hp", condition = "am", by = "am")) expect_error(plot_slopes(mod, variables = "hp", newdata = mtcars)) # Plot 4 variables in condition using facet_grid mod <- lm(mpg ~ hp * drat * factor(am)*carb, data = mtcars) p <- plot_slopes(mod, variables = c("hp", "drat"), condition = list("am", "drat" = 3:5, "hp"=c(10,15), "carb"=c(2,3))) expect_inherits(p, "gg") rm(list = ls())marginaleffects/inst/tinytest/test-pkg-logistf.R0000644000176200001440000000153114541720224021573 0ustar liggesuserssource("helpers.R") require("logistf") # logistf: no validity mod <- logistf(am ~ mpg * vs, data = mtcars) slo <- avg_slopes(mod) expect_inherits(slo, "slopes") cmp <- avg_comparisons(mod, variables = list(mpg = "sd")) expect_inherits(cmp, "comparisons") pre <- predictions(mod) expect_inherits(pre, "predictions") # flic: no validity mod <- flic(am ~ mpg * vs, data = mtcars) slo <- avg_slopes(mod) expect_inherits(slo, "slopes") cmp <- avg_comparisons(mod, variables = list(mpg = "sd")) expect_inherits(cmp, "comparisons") pre <- predictions(mod) expect_inherits(pre, "predictions") # flac: no validity mod <- flac(am ~ mpg * vs, data = mtcars) slo <- avg_slopes(mod) expect_inherits(slo, "slopes") cmp <- avg_comparisons(mod, variables = list(mpg = "sd")) expect_inherits(cmp, "comparisons") pre <- predictions(mod) expect_inherits(pre, "predictions")marginaleffects/inst/tinytest/test-pkg-tobit1.R0000644000176200001440000000205214541720224021325 0ustar liggesusersexit_file("tobit1 is not on CRAN") source("helpers.R") using("marginaleffects") if (!require("tobit1")) exit_file("tobit1") requiet("tobit1") requiet("broom") tol <- 0.001 tol_se <- 0.001 charitable$logdon <- log(charitable$donation) - log(25) data("feesadm", package = "tobit1") mod <- tobit1(fees ~ expense + I(expense ^ 2) + region, feesadm) # marginaleffects vs. margins (custom method shipped by tobit1) mfx1 <- slopes(mod, type = "linpred") mfx1 <- tidy(mfx1) mar1 <- margins(mod, what = "linpred") mar1 <- summary(mar1) expect_equivalent(mfx1$estimate, mar1$AME, tolerance = tol) expect_equivalent(mfx1$std.error, mar1$SE, tolerance = tol) mfx2 <- slopes(mod, type = "prob") mfx2 <- tidy(mfx2) mar2 <- margins(mod, what = "prob") mar2 <- summary(mar2) expect_equivalent(mfx2$estimate, mar2$AME, tolerance = tol_se) expect_equivalent(mfx2$std.error, mar2$SE, tolerance = tol_se) # predictions vs. built-in mar <- prediction(mod, what = "expvalue") mfx <- predictions(mod, type = "expvalue") expect_equivalent(mar$fitted, mfx$estimate) rm(list = ls())marginaleffects/inst/tinytest/test-pkg-mclogit.R0000644000176200001440000000646514541720224021575 0ustar liggesuserssource("helpers.R") using("marginaleffects") requiet("mclogit") requiet("MASS") requiet("emmeans") requiet("broom") requiet("splines") # mclogit: no validity data(Transport, package = "mclogit") dat <- Transport void <- capture.output( model <- mclogit(cbind(resp, suburb) ~ distance + cost, data = Transport) ) # type = "link" works expect_slopes(model, type = "link", n_unique = 1) pred <- predictions(model, type = "link") expect_predictions(pred) # mblogit: error on character regressors dat <- read.csv("https://vincentarelbundock.github.io/Rdatasets/csv/MASS/housing.csv") dat$x <- rnorm(nrow(dat)) dat$Sat <- factor(dat$Sat) dat <- dat void <- capture.output( mod <- mblogit(Sat ~ Infl + Type + Cont + x, weights = Freq, data = dat) ) expect_predictions(predictions(mod)) expect_error(suppressWarnings(slopes(mod, type = "link")), pattern = "character") # mblogit: works on factor regressors dat <- read.csv("https://vincentarelbundock.github.io/Rdatasets/csv/MASS/housing.csv") dat$x <- rnorm(nrow(dat)) dat$Sat <- factor(dat$Sat) dat$Infl <- factor(dat$Infl) dat$Cont <- factor(dat$Cont) dat$Type <- factor(dat$Type) void <- capture.output( mod <- mblogit(Sat ~ Infl + Type + Cont + x, weights = Freq, data = dat)) expect_predictions(predictions(mod)) mfx <- suppressWarnings(slopes(mod, type = "link")) expect_inherits(mfx, "marginaleffects") dat <- "https://github.com/vincentarelbundock/modelarchive/raw/main/data-raw/covid_variants.csv" dat <- read.csv(dat) dat <- dat[dat$variant %in% c("Alpha", "Beta", "Other"), ] dat$variant <- factor(dat$variant, levels = c("Other", "Beta", "Alpha")) dat <- dat void <- capture.output(suppressWarnings( mod <- mblogit( formula = variant ~ ns(collection_date_num, df = 2), weights = count, data = dat, from.table = FALSE, dispersion = FALSE, verbose = FALSE, control = mclogit.control(maxit = 100)) )) # response p1 <- predict(mod, type = "response", se.fit = TRUE) p1$fit <- p1$fit[, sort(colnames(p1$fit))] p1$se.fit <- p1$se.fit[, sort(colnames(p1$se.fit))] p2 <- predictions(mod) |> dplyr::arrange(group, rowid) expect_equivalent(p1$fit[160,], p2[p2$rowid == 160, "estimate"]) expect_equivalent(p1$se.fit[160,], p2[p2$rowid == 160, "std.error"], tolerance = .01) expect_equivalent(names(p1$fit[160,]), p2[p2$rowid == 160, "group"]) # link p1 <- predict(mod, type = "link", se.fit = TRUE) p1$fit <- p1$fit[, sort(colnames(p1$fit))] p1$se.fit <- p1$se.fit[, sort(colnames(p1$se.fit))] p2 <- predictions(mod, type = "link") |> dplyr::arrange(group, rowid) expect_equivalent(p1$fit[160,], p2[p2$rowid == 160, "estimate"]) expect_equivalent(p1$se.fit[160,], p2[p2$rowid == 160, "std.error"], tolerance = .001) expect_equivalent(names(p1$fit[160,]), p2[p2$rowid == 160, "group"]) # latent p2 <- predictions(mod, type = "latent") |> dplyr::arrange(group, rowid) p3 <- data.frame( emmeans(mod, ~collection_date_num, by = "variant", at = list(collection_date_num = dat[160, "collection_date_num"]), mode = "latent", level = 0.95)) p3 <- transform(p3, variant = as.character(variant)) p3 <- p3[order(p3$variant),] expect_equivalent(p3$emmean, p2[p2$rowid == 160, "estimate"]) expect_equivalent(p3$SE, p2[p2$rowid == 160, "std.error"], tolerance = 1e-5) expect_equivalent(as.character(p3$variant), p2[p2$rowid == 160, "group"]) rm(list = ls())marginaleffects/inst/tinytest/test-pkg-gamlss.R0000644000176200001440000000730514560035476021430 0ustar liggesuserssource("helpers.R") using("marginaleffects") requiet("margins") requiet("emmeans") requiet("broom") requiet("gamlss") requiet("titanic") # Beta regression tmp <- read.csv("https://vincentarelbundock.github.io/Rdatasets/csv/betareg/GasolineYield.csv") tmp$batch <- factor(tmp$batch) dat <<- tmp mod <- gamlss::gamlss(yield ~ batch + temp, family = "BE", data = dat, trace = FALSE) expect_error(predictions(mod, newdata = head(dat)), pattern = "what. argument") p1 <- predictions(mod, newdata = head(dat), what = "mu") p2 <- predictions(mod, newdata = head(dat), what = "sigma") expect_inherits(p1, "predictions") expect_inherits(p2, "predictions") # EMMeans provides the same results whether regrid = "response" or # regrid = "link" # marginaleffects mfx <- slopes( mod, type = "link", newdata = datagrid(batch = 1), variables = "temp", what = "mu") # emtrends em <- emtrends(mod, ~temp, "temp", at = list("batch" = tmp$batch[1])) em <- data.frame(em) # We do expect that they will be equivalent expect_equivalent(mfx$estimate, em$temp.trend, tolerance = .001) expect_equivalent(mfx$std.error, em$SE, tolerance = .001) # predictions: no validity pred <- suppressWarnings(predictions(mod, what = "mu")) expect_predictions(pred, n_row = nrow(tmp)) pred <- predictions(mod, newdata = datagrid(batch = 1:3, temp = c(300, 350)), what = "mu") expect_predictions(pred, n_row = 6) # marginalmeans: vs. emmeans mm <- predictions(mod, by = "batch", newdata = datagrid(grid_type="balanced"), what = "mu") em <- broom::tidy(emmeans::emmeans(mod, "batch", type = "response")) expect_equivalent(mm$estimate, em$response, tol = 0.001) expect_equivalent(mm$std.error, em$std.error, tolerance = 0.01) # Logistic regression data("titanic_train", package = "titanic") tmp <- titanic_train tmp$Pclass <- as.factor(tmp$Pclass) dat <<- na.omit(tmp) mod <- gamlss::gamlss(Survived ~ Age + Pclass, family = "BI", data = dat, trace = FALSE) # The R-package margins does not provide support to gamlss. # Error in tmp[["fit"]] : subscript out of bounds # In addition: Warning message: # In predict.gamlss(model, newdata = out, type = type, se.fit = TRUE, : # se.fit = TRUE is not supported for new data values at the moment # emtrends mfx <- slopes(mod, type = "link", newdata = datagrid(Pclass = "1"), variables = "Age", what = "mu") em <- emtrends(mod, ~Age, "Age", at = list("Pclass" = "1")) em <- tidy(em) expect_equivalent(mfx$estimate, em$Age.trend, tolerance = .001) expect_equivalent(mfx$std.error, em$std.error, tolerance = .001) # predictions: no validity pred <- predictions(mod, what = "mu") expect_predictions(pred, n_row = nrow(na.omit(titanic_train))) pred <- predictions( mod, newdata = datagrid(Pclass = 1:3, Age = c(25, 50)), what = "mu") expect_predictions(pred, n_row = 6) # marginalmeans: vs. emmeans mm <- predictions(mod, by = "Pclass", what = "mu", newdata = datagrid(grid_type = "balanced")) |> dplyr::arrange(Pclass) mm <- tidy(mm) em <- broom::tidy(emmeans::emmeans(mod, "Pclass", type = "response")) expect_equivalent(mm$estimate, em$response) expect_equivalent(mm$std.error, em$std.error, tolerance = 0.01) # Issue #933 dat <- read.csv("https://vincentarelbundock.github.io/Rdatasets/csv/palmerpenguins/penguins.csv", na.strings = "") dat <- dat |> transform(prop = rBE(nrow(dat), mu = 0.5, sigma = 0.2)) |> na.omit() mod <- gamlss::gamlss( prop ~ sex * body_mass_g + year + re(random = list(~ 1 | species, ~ 1 | island)), family = BE(), data = dat, trace = FALSE) cmp <- avg_comparisons(mod, what = "mu") |> suppressWarnings() expect_inherits(cmp, "comparisons") # end. source("helpers.R") rm(list = ls())marginaleffects/inst/tinytest/test-pkg-brglm2.R0000644000176200001440000000453014541720224021313 0ustar liggesuserssource("helpers.R") using("marginaleffects") requiet("brglm2") requiet("margins") requiet("emmeans") requiet("broom") # brglm2::brglm_fit vs. margins vs. emtrends data("endometrial", package = "brglm2", envir = environment()) dat <- endometrial model <- glm(HG ~ NV + PI + EH, family = binomial("probit"), data = dat) model <- update(model, method = "brglm_fit") # probably breaks get_data from environemnt # margins mar <- margins(model) mfx <- slopes(model, newdata = dat) expect_slopes(model, newdata = dat) expect_margins(mar, mfx) # emtrends em <- emtrends(model, ~PI, "PI", at = list(PI = 15, EH = 2, NV = 0)) em <- tidy(em) mfx <- slopes( model, variables = "PI", newdata = datagrid(PI = 15, EH = 2, NV = 0), type = "link") expect_equivalent(mfx$estimate, em$PI.trend) expect_equivalent(mfx$std.error, em$std.error, tolerance = .001) # brglm2::brglm_fit vs. margins sm <- data.frame( freq = c(15, 16, 16, 27, 33, 20, 21, 18, 26, 41, 38, 27, 29, 21, 33, 60, 41, 42), dose = rep(c(0, 10, 33, 100, 333, 1000), 3), observation = rep(1:3, each = 6)) model <- brnb( freq ~ dose + log(dose + 10), data = sm, link = "log", transformation = "inverse", type = "ML") expect_slopes(model, n_unique = 6, newdata = sm) mfx <- suppressWarnings(slopes(model)) mar <- suppressWarnings(margins(model)) expect_margins(mar, mfx) # predictions: brglm2::brglm_fit: no validity data("endometrial", package = "brglm2", envir = environment()) dat <- endometrial model <- glm(HG ~ NV + PI + EH, family = binomial("probit"), data = dat) model <- update(model, method = "brglm_fit") pred1 <- predictions(model) pred2 <- predictions(model, newdata = head(endometrial)) expect_predictions(pred1, n_row = nrow(endometrial)) expect_predictions(pred2, n_row = 6) # brmultinom: no validity data("housing", package = "MASS") mod <- brmultinom(Sat ~ Infl + Type + Cont, weights = Freq, data = housing, type = "ML", ref = 1) expect_slopes(mod, type = "probs") expect_predictions(predictions(mod, type = "probs")) # bracl: no validity data("stemcell", package = "brglm2") dat <- stemcell dat$religion <- as.numeric(dat$religion) mod <- bracl( research ~ as.numeric(religion) + gender, weights = frequency, data = dat, type = "ML") expect_predictions(predictions(mod, type = "probs")) expect_slopes(mod, type = "probs") rm(list = ls())marginaleffects/inst/tinytest/test-datagrid.R0000644000176200001440000000645414541720224021135 0ustar liggesuserssource("helpers.R") using("marginaleffects") requiet("lme4") requiet("fixest") # informative errors expect_error(datagrid(Petal.Length = 4.6), pattern = "inside") # numeric clusters no longer produce a warning; selects mode mod <-lme4::lmer(mpg ~ hp + (1 + drat | cyl), data = mtcars) expect_true(datagrid(model = mod)$cyl == 8) # functions cmp <- comparisons( mod, newdata = datagrid(hp = range, cyl = unique)) expect_equivalent(nrow(cmp), 6) cmp <- comparisons( mod, newdata = datagrid(hp = range)) expect_equivalent(nrow(cmp), 2) p <- predictions( mod, newdata = datagrid(hp = fivenum)) expect_equivalent(nrow(p), 5) nd <- datagrid(newdata = mtcars, hp = range, mpg = fivenum, wt = sd) expect_equivalent(nrow(nd), 10) mod <- glm(am ~ factor(gear), data = mtcars) cmp <- comparisons(mod, newdata = datagrid(am = 0, gear = mtcars$gear)) expect_equivalent(nrow(cmp), 6) cmp <- comparisons(mod, newdata = datagrid(am = unique, gear = max)) expect_equivalent(nrow(cmp), 4) # Issue #721 requiet("haven") m <- marginaleffects:::hush(read_dta("http://www.stata-press.com/data/r15/margex.dta")) if (inherits(m, "data.frame")) { m <- data.frame(m) m$sex <- as.factor(m$sex) mod <- lm(y ~ sex + age + distance, data = m) expect_error( predictions(mod, newdata = datagrid(sex = c("male", "female"))), pattern = "must be one of the factor levels" ) expect_error( predictions(mod, newdata = datagrid(sex = c("male", "femael"))), pattern = "must be one of the factor levels" ) } mod <- lm(mpg ~ qsec + as.factor(gear), data = mtcars) expect_error( predictions(mod, newdata = datagrid(gear = 6)), pattern = "must be one of the factor levels" ) expect_error( comparisons(mod, newdata = datagrid(gear = 6)), pattern = "must be one of the factor levels" ) # Issue #688 dat <<- transform(mtcars, cyl = factor(cyl)) mod <- lm(mpg ~ hp, data = dat) d <- datagrid(model = mod, by = c("carb", "cyl")) k <- aggregate(cbind(mpg, hp) ~ carb + cyl, data = dat, FUN = mean) expect_equivalent(k$mpg, d$mpg) # Issue 766: categorical predictors + variables arg + avg requiet("Matchit") data('lalonde', package='MatchIt') fit <- lm(re78 ~ race * treat, data = lalonde) a = predict(fit, branewdata = lalonde) b = predictions(fit, newdata = lalonde) expect_equivalent(a, b$estimate) nd = rbind( transform(lalonde, treat = 0), transform(lalonde, treat = 1)) a = predict(fit, newdata = nd) b = predictions(fit, newdata = lalonde, variables = "treat") expect_equivalent(a, b$estimate) a = tapply(predict(fit, newdata = nd), nd$treat, mean) b = avg_predictions(fit, newdata = lalonde, variables = "treat") |> suppressWarnings() expect_equivalent(as.numeric(a), b$estimate) a = predict(fit, newdata = nd) b = predictions(fit, variables = "treat") expect_equivalent(a, b$estimate) a = tapply(predict(fit, newdata = nd), nd$treat, mean) b = avg_predictions(fit, variables = "treat") expect_equivalent(as.numeric(a), b$estimate) a = tapply(predict(fit, newdata = nd), nd$treat, mean) b = predictions(fit, variables = "treat") b = tapply(b$estimate, b$treat, mean) expect_equivalent(a, b) a = as.numeric(tapply(predict(fit, newdata = nd), nd$treat, mean)) b = predictions(fit, variables = "treat", by = "treat") expect_equivalent(a, b$estimate) source("helpers.R") rm(list = ls()) marginaleffects/inst/tinytest/test-tinytest.R0000644000176200001440000000033014560035476021235 0ustar liggesuserssource("helpers.R") using("marginaleffects") dat <- transform(mtcars, gear = factor(gear)) mod <- lm(mpg ~ hp + gear, data = dat) pre <- predictions(mod) expect_slopes(mod) expect_predictions(pre) rm(list = ls())marginaleffects/inst/tinytest/test-pkg-brms-average.R0000644000176200001440000000402314541720224022476 0ustar liggesusers# TODO: high tolerance source("helpers.R") exit_file("memory explosion") if (!EXPENSIVE) exit_file("EXPENSIVE") using("marginaleffects") requiet("brms") requiet("insight") void <- capture.output({ m1 <- brm(mpg ~ hp, data = mtcars, silent = 2) m2 <- brm(mpg ~ hp + drat, data = mtcars, silent = 2) m3 <- brm(mpg ~ hp + drat + mo(cyl), data = mtcars, silent = 2) }) # pp_average() vs. predictions() set.seed(1024) p1 <- suppressWarnings(pp_average( m1, m2 = m2, m3 = m3, robust = TRUE, newdata = head(mtcars) )) set.seed(1024) p2 <- suppressWarnings(predictions( m1, m2 = m2, m3 = m3, type = "average", newdata = head(mtcars) )) expect_equivalent(p2$estimate, p1[, "Estimate"]) expect_equivalent(p2$conf.low, p1[, "Q2.5"]) expect_equivalent(p2$conf.high, p1[, "Q97.5"]) # manual vs. comparisons() set.seed(1024) dat_lo <- dat_hi <- head(mtcars) dat_hi$hp <- dat_hi$hp + 10 dat_lo$hp <- dat_lo$hp - 10 avg1 <- suppressWarnings(pp_average( m1, m2 = m2, m3 = m3, summary = FALSE, newdata = dat_lo )) avg2 <- suppressWarnings(pp_average( m1, m2 = m2, m3 = m3, summary = FALSE, newdata = dat_hi )) contr <- avg2 - avg1 cmp1 <- t(apply(contr, 2, quantile, c(.5, .025, .975))) set.seed(1024) cmp2 <- suppressWarnings(comparisons( m1, m2 = m2, m3 = m3, type = "average", variables = list(hp = 20), newdata = head(mtcars) )) expect_equivalent(cmp2$estimate, cmp1[, "50%"], tol = .2) expect_equivalent(cmp2$conf.low, cmp1[, "2.5%"], tol = .2) expect_equivalent(cmp2$conf.high, cmp1[, "97.5%"], tol = .2) # method argument set.seed(1024) cmp1 <- suppressWarnings(comparisons(m1, m2 = m2, m3 = m3, type = "average", method = "posterior_epred", variables = list(hp = 20), newdata = head(mtcars) )) set.seed(1024) cmp2 <- suppressWarnings(comparisons(m1, m2 = m2, m3 = m3, type = "average", variables = list(hp = 20), newdata = head(mtcars) )) expect_true(all(cmp1$estimate != cmp2$estimate)) rm(list = ls())marginaleffects/inst/tinytest/test-pkg-ggeffects.R0000644000176200001440000000270514541720224022065 0ustar liggesuserssource("helpers.R") using("marginaleffects") # Issue #903 set.seed(123) dat <- data.frame( outcome = rbinom(n = 100, size = 1, prob = 0.35), var_binom = as.factor(rbinom(n = 100, size = 1, prob = 0.2)), var_cont = rnorm(n = 100, mean = 10, sd = 7), groups = sample(letters[1:4], size = 100, replace = TRUE) ) m <- glm(outcome ~ var_binom * var_cont + groups, data = dat, family = binomial() ) d <- structure(list(var_binom = structure(1:2, levels = c("0", "1" ), class = "factor"), var_cont = c(9.24717241397544, 9.24717241397544 ), groups = structure(c(1L, 1L), levels = "b", class = "factor")), class = "data.frame", row.names = c(NA, -2L)) p1 <- predictions( m, newdata = d, by = "var_binom", hypothesis = "pairwise", type = "response", transform = exp ) p2 <- predictions( m, newdata = d, by = "var_binom", hypothesis = "pairwise", type = "invlink(link)", transform = exp ) |> suppressWarnings() p3 <- predictions( m, newdata = d, by = "var_binom", hypothesis = "pairwise", type = NULL, transform = exp ) |> suppressWarnings() # values expect_equal(p1$estimate, 0.9867827, tolerance = 1e-5) expect_equal(p2$estimate, 0.9867827, tolerance = 1e-5) expect_equal(p3$estimate, 0.9867827, tolerance = 1e-5) # warnings expect_warning(predictions( m, newdata = d, by = "var_binom", hypothesis = "pairwise", type = "invlink(link)", transform = exp )) expect_warning(predictions( m, newdata = d, by = "var_binom", hypothesis = "pairwise", transform = exp ))marginaleffects/inst/tinytest/test-inferences.R0000644000176200001440000001127314560035476021503 0ustar liggesuserssource("helpers.R") if (!EXPENSIVE) exit_file("EXPENSIVE") set.seed(1024) R <- 100 mod <- lm(Petal.Length ~ Sepal.Length * Sepal.Width, data = iris) # simulation-based inference x <- mod |> avg_predictions() |> inferences(method = "simulation", R = R) expect_inherits(x, "predictions") x <- mod |> slopes() |> inferences(method = "simulation", R = R) |> head() expect_inherits(x, "slopes") x <- mod |> predictions(vcov = "HC3") |> inferences(method = "simulation", R = R) |> head() expect_inherits(x, "predictions") x <- mod |> comparisons() |> inferences(method = "simulation", R = R) |> attr("posterior_draws") expect_inherits(x, "matrix") # {boot} set.seed(1234) x <- mod |> avg_predictions() |> inferences(method = "boot", R = R) expect_inherits(x, "predictions") expect_equivalent(nrow(x), 1) expect_equal(x$std.error, 0.0491, tolerance = 1e-3) # head works set.seed(1234) x <- mod |> slopes() |> inferences(method = "boot", R = R) expect_inherits(head(x), "slopes") expect_equivalent(nrow(x), 300) expect_equivalent(nrow(head(x)), 6) expect_equal(x$std.error[1:3], c(0.2425, 0.2824, 0.2626), tolerance = 1e-3) # avg_ works set.seed(1234) x <- mod |> avg_slopes() |> inferences(method = "boot", R = R) expect_inherits(x, "slopes") expect_equivalent(nrow(x), 2) expect_equal(x$std.error, c(0.0657, 0.1536), tolerance = 1e-3) x <- mod |> predictions(vcov = "HC3") |> inferences(method = "boot", R = R) |> head() expect_inherits(x, "predictions") x <- mod |> comparisons() |> inferences(method = "boot", R = R) |> attr("inferences") expect_inherits(x, "boot") x <- mod |> comparisons(variables = "Sepal.Width", newdata = datagrid(Sepal.Length = range)) |> inferences(method = "boot", R = R) expect_equivalent(nrow(x), 2) x <- mod|> avg_comparisons() |> inferences(method = "simulation", R = R) expect_equivalent(nrow(x), 2) x <- x |> posterior_draws() expect_equivalent(nrow(x), 2 * R) # {rsample} set.seed(1234) x <- mod |> avg_predictions() |> inferences(method = "rsample", R = R) expect_equal(x$conf.low, 3.6692, tolerance = 1e-3) expect_inherits(x, "predictions") x <- mod |> slopes() |> inferences(method = "rsample", R = R) |> head() expect_inherits(x, "slopes") x <- mod |> predictions(vcov = "HC3") |> inferences(method = "rsample", R = R) |> head() expect_inherits(x, "predictions") x <- mod |> comparisons() |> inferences(method = "rsample", R = R) |> attr("inferences") expect_inherits(x, "bootstraps") x <- mod |> comparisons(variables = "Sepal.Width", newdata = datagrid(Sepal.Length = range)) |> inferences(method = "rsample", R = R) expect_equivalent(nrow(x), 2) x <- mod |> avg_comparisons() |> inferences(method = "rsample", R = R) |> posterior_draws() expect_equivalent(nrow(x), 2 * R) # fwb no validity check set.seed(1234) x <- mod |> comparisons() |> inferences(method = "fwb", R = R) expect_equivalent(nrow(x), 300) expect_equal(x$std.error[1:3], c(0.0739, 0.0568, 0.0508), tolerance = 1e-3) x <- mod |> avg_comparisons() |> inferences(method = "fwb", R = R) expect_equivalent(nrow(x), 2) # {fwb} error when user supplied its own weightso dat <- transform(mtcars, w = runif(32)) mod <- lm(mpg ~ hp, data = dat) expect_error(inferences(comparisons(mod, wts = "w"), method = "fwb"), pattern = "wts") # Issue #856 tmp <- lm(Petal.Length ~ Sepal.Length * Species, data = iris) cmp <- avg_comparisons(tmp, variables = list(Sepal.Length = 1, Species = "reference"), cross = TRUE) |> inferences(method = "boot", R = 5) |> suppressWarnings() expect_inherits(cmp, "comparisons") expect_equal(nrow(cmp), 2) # Issue #853 m <- glm(am ~ mpg + hp + cyl, data = mtcars, family = binomial) p <- avg_predictions(m, by = "cyl") |> inferences(method = "boot", R = 5) |> suppressWarnings() expect_inherits(p, "predictions") p <- predictions(m, by = "cyl") |> inferences(method = "boot", R = 5) |> suppressWarnings() expect_inherits(p, "predictions") # Issue #851: simulation-based inference use the original estimates, not the mean/median of simulations mod <- glm(vs ~ hp + mpg + am, data = mtcars, family = binomial) cmp1 <- avg_comparisons(mod) cmp2 <- cmp1 |> inferences(method = "simulation", R = 500) expect_equivalent(cmp1$estimate, cmp2$estimate) # mfxplainer bug mod <- lm(mpg ~ hp + cyl, data = mtcars) p <- avg_predictions(mod, by = "cyl") |> inferences(method = "simulation", R = 25) expect_inherits(p, "predictions") # inferences with hypotheses mod <- lm(mpg ~ hp + cyl, data = mtcars) p <- hypotheses(mod, hypothesis = "hp/cyl=1") |> inferences(method = "boot", R = 25) |> suppressWarnings() expect_inherits(p, "hypotheses") p <- hypotheses(mod, hypothesis = "hp/cyl=1") |> inferences(method = "simulation", R = 25) expect_inherits(p, "hypotheses") rm(list = ls()) marginaleffects/inst/tinytest/test-pkg-brms.R0000644000176200001440000005610714554070103021076 0ustar liggesusers# TODO: 744, 745, 749 # HPD tests against emmeans, which uses HDI, but our default is ETI # HDI is implemented specifically for these tests # https://github.com/vincentarelbundock/marginaleffects/issues/240 source("helpers.R") using("marginaleffects") # exit_file("not sure why broken") if (!EXPENSIVE) exit_file("EXPENSIVE") if (ON_WINDOWS) exit_file("on windows") if (!minver("base", "4.1.0")) exit_file("R 4.1.0") options("marginaleffects_posterior_interval" = "hdi") requiet("brms") requiet("emmeans") requiet("mice") requiet("broom") requiet("posterior") requiet("data.table") requiet("brmsmargins") tol <- 0.0001 tol_se <- 0.001 # load models brms_numeric <- readRDS("modelarchive/data/brms_numeric.rds") brms_numeric2 <- readRDS("modelarchive/data/brms_numeric2.rds") brms_character <- readRDS("modelarchive/data/brms_character.rds") brms_factor <- readRDS("modelarchive/data/brms_factor.rds") brms_factor_formula <- readRDS("modelarchive/data/brms_factor_formula.rds") brms_interaction <- readRDS("modelarchive/data/brms_interaction.rds") brms_logical <- readRDS("modelarchive/data/brms_logical.rds") brms_epi <- readRDS("modelarchive/data/brms_epi.rds") brms_cumulative_random <- readRDS("modelarchive/data/brms_cumulative_random.rds") brms_monotonic <- readRDS("modelarchive/data/brms_monotonic.rds") brms_monotonic_factor <- readRDS("modelarchive/data/brms_monotonic_factor.rds") brms_vdem <- readRDS("modelarchive/data/brms_vdem.rds") brms_lognormal_hurdle <- readRDS("modelarchive/data/brms_lognormal_hurdle.rds") brms_lognormal_hurdle2 <- readRDS("modelarchive/data/brms_lognormal_hurdle2.rds") brms_binomial <- readRDS("modelarchive/data/brms_binomial.rds") brms_mv_1 <- readRDS("modelarchive/data/brms_mv_1.rds") brms_vdem <- readRDS("modelarchive/data/brms_vdem.rds") brms_ordinal_1 <- readRDS("modelarchive/data/brms_ordinal_1.rds") brms_categorical_1 <- readRDS("modelarchive/data/brms_categorical_1.rds") brms_logit_re <- readRDS("modelarchive/data/brms_logit_re.rds") brms_mixed_3 <- readRDS("modelarchive/data/brms_mixed_3.rds") brms_kurz <- readRDS("modelarchive/data/brms_kurz.rds") brms_inhaler_cat <- readRDS("modelarchive/data/brms_inhaler_cat.rds") brms_poisson <- readRDS("modelarchive/data/brms_poisson.rds") brms_issue500 <- readRDS("modelarchive/data/brms_issue500.rds") brms_issue576 <- readRDS("modelarchive/data/brms_issue576.rds") brms_issue639 <- readRDS("modelarchive/data/brms_issue639.rds") brms_issue782 <- readRDS("modelarchive/data/brms_issue782.rds") brms_issue1006 <- readRDS("modelarchive/data/brms_issue1006.rds") # average marginal effects brmsmargins options("marginaleffects_posterior_interval" = "eti") h <- 5e-5 bm <- brmsmargins( brms_numeric, add = data.frame(hp = c(0, 0 + h)), contrasts = cbind("AME MPG" = c(-1 / h, 1 / h)), CI = 0.95, CIType = "ETI") bm <- data.frame(bm$ContrastSummary) mfx <- avg_slopes(brms_numeric) expect_equivalent(mean(posterior_draws(mfx)$draw), bm$M, tolerance = tol) expect_equivalent(mfx$conf.low, bm$LL, tolerance = tol) expect_equivalent(mfx$conf.high, bm$UL, tolerance = tol) options("marginaleffects_posterior_interval" = "hdi") # marginaleffects vs. emmeans mfx <- avg_slopes( brms_numeric2, newdata = datagrid(mpg = 20, hp = 100), variables = "mpg", type = "link") em <- emtrends(brms_numeric2, ~mpg, "mpg", at = list(mpg = 20, hp = 100)) em <- tidy(em) expect_equivalent(mfx$estimate, em$mpg.trend) expect_equivalent(mfx$conf.low, em$lower.HPD) expect_equivalent(mfx$conf.high, em$upper.HPD) # tolerance is less good for back-transformed response mfx <- avg_slopes(brms_numeric2, newdata = datagrid(mpg = 20, hp = 100), variables = "mpg", type = "response") em <- emtrends(brms_numeric2, ~mpg, "mpg", at = list(mpg = 20, hp = 100), regrid = "response") em <- tidy(em) expect_equivalent(mfx$estimate, em$mpg.trend, tolerance = .1) expect_equivalent(mfx$conf.low, em$lower.HPD, tolerance = .01) expect_equivalent(mfx$conf.high, em$upper.HPD, tolerance = .1) # brms: cumulative: marginaleffects: no validity expect_slopes(brms_cumulative_random, se = FALSE) # brms: logical regressor mfx <- slopes(brms_logical) expect_inherits(mfx, "marginaleffects") expect_equivalent(nrow(mfx), nrow(attr(mfx, "posterior_draws"))) # predictions: hypothetical group nd <- suppressWarnings(datagrid(model = brms_mixed_3, grp = 4, subgrp = 12)) nd$Subject <- 1000 set.seed(1024) p1 <- predictions(brms_mixed_3, newdata = nd, allow_new_levels = TRUE) set.seed(1024) p2 <- predictions(brms_mixed_3, newdata = nd, allow_new_levels = TRUE, sample_new_levels = "gaussian") set.seed(1024) p3 <- predictions(brms_mixed_3, newdata = nd, allow_new_levels = TRUE, sample_new_levels = "uncertainty") expect_false(any(p1$estimate == p2$estimate)) expect_equivalent(p1, p3) expect_inherits(posterior_draws(p3), "data.frame") # predictions w/ random effects set.seed(123) tmp <- read.csv("https://vincentarelbundock.github.io/Rdatasets/csv/lme4/sleepstudy.csv") tmp$grp <- sample(1:5, size = 180, replace = TRUE) tmp$cat <- as.factor(sample(1:5, size = 180, replace = TRUE)) tmp$Reaction_d <- ifelse(tmp$Reaction < median(tmp$Reaction), 0, 1) tmp <- tmp |> poorman::group_by(grp) |> poorman::mutate(subgrp = sample(1:15, size = poorman::n(), replace = TRUE)) w <- apply(posterior_linpred(brms_mixed_3), 2, stats::median) x <- get_predict(brms_mixed_3, newdata = tmp, type = "link") y <- predictions(brms_mixed_3, type = "link") expect_equivalent(w, x$estimate) expect_equivalent(w, y$estimate) # response w <- apply(posterior_epred(brms_mixed_3), 2, stats::median) x <- get_predict(brms_mixed_3, type = "response") y <- predictions(brms_mixed_3, type = "response") expect_equivalent(w, x$estimate) expect_equivalent(w, y$estimate) # no random effects w1 <- apply(posterior_epred(brms_mixed_3), 2, stats::median) w2 <- apply(posterior_epred(brms_mixed_3, re_formula = NA), 2, stats::median) x <- get_predict(brms_mixed_3, re_formula = NA, type = "response") y <- predictions(brms_mixed_3, re_formula = NA, type = "response") expect_true(all(w1 != w2)) expect_equivalent(w2, x$estimate) expect_equivalent(w2, y$estimate) # brms: cumulative: predictions: no validity set.seed(1024) p1 <- predictions(brms_cumulative_random) p2 <- predictions(brms_cumulative_random, re_formula = NA) expect_true(mean(p1$conf.low < p2$conf.low) > .95) # tolerance expect_true(mean(p1$conf.high > p2$conf.high) > .99) # tolerance expect_warning(predictions(brms_cumulative_random, include_random = FALSE)) # only for lme4 # marginaleffects: ordinal no validity expect_slopes(brms_ordinal_1, se = FALSE) # predict new unit: no validity dat1 <- dat2 <- datagrid(model = brms_epi) dat2$patient <- 9999 set.seed(1024) mfx1 <- slopes(brms_epi, newdata = dat1) set.seed(1024) mfx2 <- slopes(brms_epi, newdata = dat2, allow_new_levels = TRUE) expect_false(any(mfx1$estimate == mfx2$estimate)) # tidy() dat <- mtcars dat$logic <- as.logical(dat$vs) dat$cyl_fac <- as.factor(dat$cyl) dat$cyl_cha <- as.character(dat$cyl) ti <- avg_slopes(brms_factor, newdata = dat) expect_inherits(ti, "data.frame") expect_true(nrow(ti) == 3) expect_true(ncol(ti) >= 5) expect_true(all(c("term", "estimate", "conf.low") %in% colnames(ti))) # predictions: no validity # simple pred <- predictions(brms_numeric2, newdata = datagrid(hp = c(100, 120))) expect_predictions(pred, se = FALSE) expect_equivalent(dim(attr(pred, "posterior_draws")), c(2, 2000)) # interaction pred <- predictions(brms_interaction, newdata = datagrid(mpg = c(20, 25))) expect_predictions(pred, se = FALSE) # factor in data frame pred <- predictions(brms_factor, newdata = datagrid()) expect_predictions(pred, se = FALSE) # predictions: prediction vs. expectation vs. include_random # prediction vs. response p1 <- suppressWarnings(predictions(brms_epi, type = "prediction")) p2 <- suppressWarnings(predictions(brms_epi, type = "response")) expect_true(all(p1$conf.low < p2$conf.low)) expect_true(all(p1$conf.high > p2$conf.high)) # re_formula p1 <- predictions(brms_epi, newdata = datagrid(patient = 1)) p2 <- predictions(brms_epi, newdata = datagrid(patient = 1), re_formula = NA) expect_false(p1$estimate == p2$estimate) expect_false(p1$conf.low == p2$conf.low) expect_false(p1$conf.high == p2$conf.high) # predictions vs. emmeans requiet("emmeans") em <- emmeans::emmeans(brms_numeric, ~hp, "hp", at = list(hp = c(100, 120))) em <- data.frame(em) pred <- predictions(brms_numeric, newdata = datagrid(hp = c(100, 120)), type = "link") expect_equivalent(pred$estimate, em$emmean) expect_equivalent(pred$conf.low, em$lower.HPD) expect_equivalent(pred$conf.high, em$upper.HPD) # marginaleffects: no validity expect_slopes(brms_numeric2, se = FALSE) expect_slopes(brms_interaction, se = FALSE) expect_slopes(brms_factor, se = FALSE) # credible intervals and posterior draws tmp <- slopes(brms_factor) expect_true("conf.low" %in% colnames(tmp)) expect_true(all(tmp$estimate > tmp$conf.low)) expect_true(all(tmp$estimate < tmp$conf.high)) expect_false(is.null(attr(tmp, "posterior_draws"))) expect_equivalent(nrow(attr(tmp, "posterior_draws")), nrow(tmp)) # marginaleffects vs. emmeans requiet("emmeans") # # known frequentist example to compare syntax # brms_numeric_freq <- glm(am ~ hp, data = mtcars, family = binomial) # slopes(brms_numeric_freq, newdata = datagrid(hp = 147), type = "link") # emmeans::emtrends(brms_numeric_freq, specs = ~hp, var = "hp", at = list(hp = 147)) # one variable: link scale mfx1 <- slopes(brms_numeric, variables = "hp", newdata = datagrid(hp = 110), type = "link") mfx2 <- as.data.frame(emmeans::emtrends(brms_numeric, ~hp, var = "hp", at = list(hp = 110))) expect_equivalent(mfx1$estimate, mfx2$hp.trend) expect_equivalent(mfx1$conf.low, mfx2$lower.HPD) expect_equivalent(mfx1$conf.high, mfx2$upper.HPD) # one variable: response scale mfx1 <- slopes(brms_numeric, variables = "hp", newdata = datagrid(hp = 110)) mfx2 <- as.data.frame(emtrends(brms_numeric, ~hp, var = "hp", at = list(hp = 110), regrid = "response")) expect_equivalent(mfx1$estimate, mfx2$hp.trend, tolerance = .001) expect_equivalent(mfx1$conf.low, mfx2$lower.HPD, tolerance = .001) expect_equivalent(mfx1$conf.high, mfx2$upper.HPD, tolerance = .001) # numeric + factor: numeric variable dat <- datagrid(model = brms_factor, mpg = 25, cyl_fac = 4) mfx1 <- slopes(brms_factor, variables = "mpg", newdata = dat, type = "link") mfx2 <- as.data.frame(emmeans::emtrends(brms_factor, ~mpg, var = "mpg", at = list(mpg = 25, cyl_fac = 4))) expect_equivalent(mfx1$estimate, mfx2$mpg.trend, tolerance = .001) expect_equivalent(mfx1$conf.low, mfx2$lower.HPD, tolerance = .001) expect_equivalent(mfx1$conf.high, mfx2$upper.HPD, tolerance = .001) # numeric + factor: factor dat <- datagrid(model = brms_factor, mpg = 25, cyl_fac = 4) mfx1 <- slopes(brms_factor, variables = "cyl_fac", newdata = dat, type = "link") mfx2 <- emmeans::emmeans(brms_factor, ~ cyl_fac, var = "cyl_fac", at = list(mpg = 25)) mfx2 <- emmeans::contrast(mfx2, method = "revpairwise") mfx2 <- data.frame(mfx2)[1:2,] expect_equivalent(mfx1$estimate, mfx2$estimate, tolerance = .001) expect_equivalent(mfx1$conf.low, mfx2$lower.HPD, tolerance = .001) expect_equivalent(mfx1$conf.high, mfx2$upper.HPD, tolerance = .001) # # plot_predictions: no validity # p <- plot_predictions(brms_interaction, condition = c("mpg", "vs")) # vdiffr::expect_doppelganger("brms logit plot_predictions", p) # # # # # plot_predictions: no validity # p <- plot_predictions(brms_interaction, condition = c("mpg", "vs")) # vdiffr::expect_doppelganger("brms logit plot_predictions", p) # factor in formula expect_error(slopes(brms_factor_formula), pattern = "factor") expect_error(predictions(brms_factor_formula), pattern = "factor") # bugs stay dead: factor indexing for posterior draws tmp <- predictions(brms_factor, newdata = datagrid(cyl_fac = 4, mpg = c(10, 20))) expect_inherits(posterior_draws(tmp), "data.frame") # mo() recognized as factor: Issue #220 # marginaleffects mfx1 <- slopes(brms_monotonic) mfx2 <- slopes(brms_monotonic, variable = "carb") expect_error(slopes(brms_monotonic_factor), pattern = "cannot be used") expect_inherits(mfx1, "marginaleffects") expect_inherits(mfx2, "marginaleffects") # comparisons expect_error(comparisons(brms_monotonic_factor), pattern = "cannot be used") contr1 <- avg_comparisons(brms_monotonic) known <- c(sprintf("%s - 1", c(2:4, 6, 8)), "+1") expect_equivalent(contr1$contrast, known) # multivariate outcome beta <- get_coef(brms_mv_1) expect_equivalent(length(beta), 12) mfx <- slopes(brms_mv_1) expect_inherits(mfx, "marginaleffects") pred <- predictions(brms_mv_1) expect_inherits(pred, "predictions") comp <- comparisons(brms_mv_1) expect_inherits(comp, "comparisons") draws <- posterior_draws(mfx) expect_inherits(draws, "data.frame") expect_true(all(c("drawid", "draw", "rowid") %in% colnames(draws))) # categorical outcome mfx <- slopes(brms_categorical_1) expect_inherits(mfx, "marginaleffects") pred <- predictions(brms_categorical_1) expect_inherits(pred, "predictions") comp <- comparisons(brms_categorical_1) expect_inherits(comp, "comparisons") draws <- posterior_draws(mfx) expect_inherits(draws, "data.frame") expect_true(all(c("drawid", "draw", "rowid") %in% colnames(draws))) # vignette vdem example p_response <- predictions( brms_vdem, type = "response", newdata = datagrid( party_autonomy = c(TRUE, FALSE), civil_liberties = .5, region = "Middle East and North Africa")) expect_predictions(p_response, se = FALSE) p_prediction <- predictions( brms_vdem, type = "prediction", newdata = datagrid( party_autonomy = c(TRUE, FALSE), civil_liberties = .5, region = "Middle East and North Africa")) expect_predictions(p_prediction, se = FALSE) # bugs stay dead: character regressors used to produce duplicates expect_slopes(brms_character, se = FALSE) mfx <- avg_slopes(brms_character) expect_true(length(unique(ti$estimate)) == nrow(ti)) # warning: vcov not supported expect_warning(slopes(brms_numeric, vcov = "HC3"), pattern = "vcov.*not supported") # Andrew Heiss says that lognormal_hurdle are tricky because the link is # identity even if the response is actually logged # https://github.com/vincentarelbundock/marginaleffects/issues/343 # non-hurdle part: post-calculation exponentiation p1 <- predictions( brms_lognormal_hurdle, newdata = datagrid(lifeExp = seq(30, 80, 10)), transform = exp, dpar = "mu") p2 <- predictions( brms_lognormal_hurdle, newdata = datagrid(lifeExp = seq(30, 80, 10)), dpar = "mu") expect_true(all(p1$estimate != p2$estimate)) eps <- 0.01 cmp1 <- comparisons( brms_lognormal_hurdle, variables = list(lifeExp = eps), newdata = datagrid(lifeExp = seq(30, 80, 10)), comparison = function(hi, lo) (exp(hi) - exp(lo)) / exp(eps), dpar = "mu") cmp2 <- comparisons( brms_lognormal_hurdle, variables = list(lifeExp = eps), newdata = datagrid(lifeExp = seq(30, 80, 10)), comparison = function(hi, lo) exp((hi - lo) / eps), dpar = "mu") expect_true(all(cmp1$estimate != cmp2$estimate)) cmp <- comparisons( brms_lognormal_hurdle2, dpar = "mu", datagrid(disp = c(150, 300, 450)), comparison = "expdydx") expect_equivalent(cmp$estimate, c(-0.0464610297239711, -0.0338017059188856, -0.0245881481374242), # seed difference? # c(-0.0483582312992919, -0.035158983842012, -0.0255763979591749), tolerance = .01) # emt <- emtrends(mod, ~disp, var = "disp", dpar = "mu", # regrid = "response", tran = "log", type = "response", # at = list(disp = c(150, 300, 450))) # Issue #432: bayes support for comparison with output of length 1 cmp1 <- comparisons(brms_numeric2, comparison = "difference") cmp2 <- comparisons(brms_numeric2, comparison = "differenceavg") cmp3 <- comparisons(brms_numeric2, comparison = "ratio") cmp4 <- comparisons(brms_numeric2, comparison = "ratioavg") expect_equivalent(nrow(cmp1), 64) expect_equivalent(nrow(cmp2), 2) expect_equivalent(nrow(cmp3), 64) expect_equivalent(nrow(cmp4), 2) # Issue #432: comparisons = conf.low = conf.high because mean() returns a # single number when applied to the draws matrix cmp <- comparisons(brms_binomial, variables = "tx", comparison = "lnoravg") expect_true(all(cmp$estimate != cmp$conf.low)) expect_true(all(cmp$estimate != cmp$conf.high)) expect_true(all(cmp$conf.high != cmp$conf.low)) # Issue #432: posterior_draws() and tidy() error with `comparison="avg"` pd <- posterior_draws(cmp) expect_inherits(pd, "data.frame") expect_equivalent(nrow(pd), 4000) ti <- tidy(cmp) expect_equivalent(nrow(ti), 1) expect_inherits(ti, "data.frame") # hypothesis with bayesian models p1 <- predictions( brms_numeric2, hypothesis = c(1, -1), newdata = datagrid(hp = c(100, 110))) p2 <- predictions( brms_numeric2, hypothesis = "b1 = b2", newdata = datagrid(hp = c(100, 110))) expect_inherits(p1, "predictions") expect_inherits(p2, "predictions") expect_equivalent(nrow(p1), 1) expect_equivalent(nrow(p2), 1) expect_equivalent(p1$estimate, p2$estimate) expect_true(all(c("conf.low", "conf.high") %in% colnames(p1))) expect_true(all(c("conf.low", "conf.high") %in% colnames(p2))) lc <- matrix(c(1, -1, -1, 1), ncol = 2) colnames(lc) <- c("Contrast A", "Contrast B") p3 <- predictions( brms_numeric2, hypothesis = lc, newdata = datagrid(hp = c(100, 110))) expect_inherits(p3, "predictions") expect_equivalent(nrow(p3), 2) expect_equivalent(p3$term, c("Contrast A", "Contrast B")) expect_equivalent(p3$estimate[1], -p3$estimate[2]) # `by` argument is supported for predictions() because it is a simple average. # In comparisons(), some transformations are non-collapsible, so we can't just # take the average, and we need to rely on more subtle transformations from # `comparison_function_dict`. p <- predictions( brms_factor, by = "cyl_fac") expect_inherits(p, "predictions") expect_equal(ncol(attr(p, "posterior_draws")), 2000) expect_equal(nrow(p), 3) expect_true(all(c("conf.low", "conf.high") %in% colnames(p))) # `by` data frame to collapse response group by <- data.frame( group = as.character(1:4), by = rep(c("(1,2)", "(3,4)"), each = 2)) p <- predictions( brms_cumulative_random, by = by) expect_equivalent(nrow(p), 2) p <- predictions( brms_cumulative_random, by = by, hypothesis = "reference") expect_equivalent(nrow(p), 1) # # `by` not supported in comparisons() or slopes() # # this is not supported!! # expect_error(comparisons(brms_factor, by = "cyl_fac"), pattern = "supported") # expect_error(slopes(brms_factor, by = "cyl_fac"), pattern = "supported") # interaction is same order of magnitude as frequentist # issue reported by Solomon Kurz over Twitter DM dat <- mtcars dat$cyl <- factor(dat$cyl) mod <- lm(mpg ~ am * factor(cyl), data = mtcars) tid <- avg_comparisons(mod, variables = c("cyl", "am"), cross = TRUE) tid.b <- avg_comparisons(brms_kurz, variables = c("cyl", "am"), cross = TRUE) expect_equivalent(tid$estimate, tid.b$estimate, tolerance = 0.1) expect_equivalent(tid$conf.low, tid.b$conf.low, tolerance = 0.2) expect_equivalent(tid$conf.high, tid.b$conf.high, tolerance = 0.2) # issue 445 leftover browser() p <- predictions(mod, by = "am") expect_inherits(p, "predictions") expect_equivalent(nrow(p), 2) # transform works for comparisons() and predictions() p1 <- predictions(brms_poisson, type = "link") p2 <- predictions(brms_poisson, type = "link", transform = exp) expect_equivalent(exp(p1$estimate), p2$estimate) expect_equivalent(exp(p1$conf.low), p2$conf.low) expect_equivalent(exp(p1$conf.high), p2$conf.high) expect_equivalent(exp(attr(p1, "posterior_draws")), attr(p2, "posterior_draws")) p1 <- comparisons(brms_poisson, type = "link") p2 <- comparisons(brms_poisson, type = "link", transform = exp) expect_equivalent(exp(p1$estimate), p2$estimate) expect_equivalent(exp(p1$conf.low), p2$conf.low) expect_equivalent(exp(p1$conf.high), p2$conf.high) expect_equivalent(exp(attr(p1, "posterior_draws")), attr(p2, "posterior_draws")) # byfun by <- data.frame( by = c("1,2", "1,2", "3,4", "3,4"), group = 1:4) p1 <- predictions(brms_cumulative_random, newdata = "mean") p2 <- predictions(brms_cumulative_random, newdata = "mean", by = by) p3 <- predictions(brms_cumulative_random, newdata = "mean", by = by, byfun = sum) expect_equivalent(mean(p1$estimate[1:2]), p2$estimate[1], tolerance = 0.1) expect_equivalent(mean(p1$estimate[3:4]), p2$estimate[2], tolerance = 0.1) expect_equivalent(sum(p1$estimate[1:2]), p3$estimate[1], tolerance = 0.1) expect_equivalent(sum(p1$estimate[3:4]), p3$estimate[2], tolerance = 0.1) # Issue #500 p <- plot_predictions(brms_issue500, condition = "z") expect_inherits(p, "gg") # Issue #504: integrate out random effects set.seed(1024) K <<- 100 cmp <- avg_comparisons( brms_logit_re, newdata = datagrid(firm = sample(1e5:2e6, K)), allow_new_levels = TRUE, sample_new_levels = "gaussian") bm <- brmsmargins( k = K, object = brms_logit_re, at = data.frame(x = c(0, 1)), CI = .95, CIType = "ETI", contrasts = cbind("AME x" = c(-1, 1)), effects = "integrateoutRE")$ContrastSummary expect_equivalent(cmp$estimate, bm$Mdn, tolerance = .05) expect_equivalent(cmp$conf.low, bm$LL, tolerance = .05) expect_equivalent(cmp$conf.high, bm$UL, tolerance = .05) # posterior_draws(shape = ) tid <- avg_comparisons(brms_numeric2) pd <- posterior_draws(tid, shape = "DxP") hyp <- brms::hypothesis(pd, "b1 - b2 > 0") expect_inherits(hyp, "brmshypothesis") # posterior::rvar tid <- avg_comparisons(brms_numeric2) rv <- posterior_draws(tid, "rvar") expect_equivalent(nrow(rv), 2) expect_inherits(rv$rvar[[1]], "rvar") # Issue #546 cmp <- comparisons(brms_numeric2, newdata = datagrid()) expect_false(anyNA(cmp$am)) # Issue #576 cmp <- comparisons(brms_issue576) expect_equal(nrow(cmp), 32) cmp <- comparisons(brms_issue576, by = "term") expect_equal(nrow(cmp), 1) cmp <- comparisons(brms_issue576, by = "cyl") expect_equal(nrow(cmp), 3) cmp <- comparisons(brms_issue576, by = "am") expect_equal(nrow(cmp), 2) # Issue #639 pre <- avg_predictions(brms_issue639) cmp <- avg_comparisons(brms_issue639) expect_inherits(pre, "predictions") expect_inherits(cmp, "comparisons") expect_equivalent(nrow(pre), 5) expect_equivalent(nrow(cmp), 5) # Issue #703 pre <- predictions(brms_inhaler_cat, type = "link") expect_inherits(pre, "predictions") cmp <- comparisons(brms_inhaler_cat, type = "link") expect_inherits(cmp, "comparisons") # Issue #751: informative error on bad predition expect_error(comparisons(brms_logit_re, newdata = datagrid(firm = -10:8)), pattern = "new.levels") cmp = comparisons(brms_logit_re, newdata = datagrid(firm = -10:8), allow_new_levels = TRUE) expect_inherits(cmp, "comparisons") # Issue #888: posterior_draws() fails for quantile transformation expect_error(predictions( brms_factor, by = "cyl_fac", transform = \(x) ecdf(mtcars$mpg)(x)) |> posterior_draws(), pattern = "matrix input must return") # Issue 1006: predictor is also a response cmp <- avg_comparisons(brms_issue1006) expect_inherits(cmp, "comparisons") cmp <- avg_comparisons(brms_issue1006, variables = list(chl = 1)) expect_inherits(cmp, "comparisons") cmp <- avg_comparisons(brms_issue1006, variables = list(chl = 1)) expect_inherits(cmp, "comparisons") cmp <- avg_comparisons(brms_issue1006, variables = list(chl = 1, age = 1)) expect_inherits(cmp, "comparisons") source("helpers.R") rm(list = ls()) marginaleffects/inst/tinytest/test-pkg-MCMCglmm.R0000644000176200001440000000100514541720224021514 0ustar liggesuserssource("helpers.R") # https://stackoverflow.com/questions/72533745/loading-logistf-breaks-mcmcglmm exit_file("Conflict with logistf") if (!EXPENSIVE) exit_file("EXPENSIVE") requiet("MCMCglmm") mod <- MCMCglmm(mpg ~ hp, random = ~carb, data = mtcars, verbose = FALSE) p <- avg_comparisons(mod, newdata = mtcars) expect_inherits(p, "comparisons") expect_equivalent(nrow(p), 1) p <- avg_predictions(mod, by = "carb", newdata = mtcars) expect_inherits(p, "predictions") expect_equivalent(nrow(p), 6) rm(list = ls())marginaleffects/inst/tinytest/test-pkg-robust.R0000644000176200001440000000037514541720224021447 0ustar liggesuserssource("helpers.R") using("marginaleffects") requiet("robust") # no validity dat <- mtcars dat$cyl <- factor(dat$cyl) mod <- lmRob(mpg ~ hp + cyl, data = mtcars) expect_slopes(mod, n_unique = 1) expect_predictions(predictions(mod)) rm(list = ls())marginaleffects/inst/tinytest/test-hypotheses.R0000644000176200001440000001352214560035476021554 0ustar liggesuserssource("helpers.R") using("marginaleffects") requiet("car") # When `FUN` and `hypotheses` are `NULL`, `hypotheses()` returns a data.frame of parameters dat <- mtcars mod <- lm(mpg ~ hp + wt + factor(cyl), data = dat) dmm <- hypotheses(mod) expect_inherits(dmm, "data.frame") # Test of equality between coefficients dmm <- hypotheses(mod, "hp = wt") dmc <- car::linearHypothesis(mod, hypothesis = "hp = wt") expect_equivalent(dmm$estimate, attr(dmc, "value")[[1]]) expect_equivalent(dmm$std.error, sqrt(attr(dmc, "vcov")[[1]])) # Non-linear function dmm <- hypotheses(mod, "exp(hp + wt) = 0.1") expect_inherits(dmm, "data.frame") # Robust standard errors dmm <- hypotheses(mod, "hp = wt", vcov = "HC3") expect_inherits(dmm, "data.frame") # b1, b2, ... shortcuts can be used to identify rows in the output of FUN dmm <- hypotheses(mod, "b2 = b3") expect_inherits(dmm, "data.frame") # term names with special characters have to be enclosed in backticks dmm <- hypotheses(mod, "`factor(cyl)6` = `factor(cyl)8`") expect_inherits(dmm, "data.frame") # The `FUN` argument can be used to compute standard errors for fitted values mod <- glm(am ~ hp + mpg, data = mtcars, family = binomial) f <- function(x) predict(x, type = "link", newdata = mtcars) p <- hypotheses(mod, FUN = f) expect_inherits(p, "data.frame") expect_true(all(p$std.error > 0)) f <- function(x) predict(x, type = "response", newdata = mtcars) p <- hypotheses(mod, FUN = f) expect_inherits(p, "data.frame") expect_true(all(p$std.error > 0)) # equality between predictions: 1 and 2 equal, 2 and 3 different f <- function(x) predict(x, type = "link", newdata = mtcars) dmm <- hypotheses(mod, FUN = f, hypothesis = "b1 = b2") expect_equivalent(dmm$estimate, 0) dmm <- hypotheses(mod, FUN = f, hypothesis = "b3 = b2") expect_equivalent(dmm$estimate, 1.33154848763268) # named matrix mod <- lm(mpg ~ factor(cyl), data = mtcars) hyp <- matrix( c(0, -1, 1, 1/3, 1/3, 1/3), ncol = 2, dimnames = list(NULL, c("H1", "H2"))) del <- hypotheses(mod, hypothesis = hyp) expect_equivalent(del$term, c("H1", "H2")) # two-step to check code where `hypotheses(model)` model is an object not a call mod <- lm(mpg ~ factor(cyl), data = mtcars) cmp <- avg_comparisons(mod) hyp <- hypotheses(cmp, equivalence = c(-10, -5)) expect_inherits(hyp, "hypotheses") # Issue #656 requiet("purrr") reg_list <- list() reg_list[[1]] <- lm(mpg ~ wt + hp, data = mtcars) reg_list[[2]] <- lm(mpg ~ wt + hp + factor(vs), data = mtcars) expect_inherits(hypotheses(reg_list[[1]]), "hypotheses") expect_inherits(hypotheses(reg_list[[2]]), "hypotheses") h <- lapply(reg_list, hypotheses) expect_inherits(h, "list") expect_equivalent(length(h), 2) h <- purrr::map(reg_list, hypotheses) expect_inherits(h, "list") expect_equivalent(length(h), 2) cmp = lapply(reg_list, comparisons) tmp <- lapply(cmp, function(x) hypotheses(x, "b1 = b2")) expect_inherits(tmp, "list") expect_inherits(tmp[[1]], "hypotheses") expect_inherits(tmp[[2]], "hypotheses") tmp <- purrr::map(cmp, hypotheses, hypothesis = "b1 = b2") expect_inherits(tmp, "list") expect_inherits(tmp[[1]], "hypotheses") expect_inherits(tmp[[2]], "hypotheses") mfx = lapply(reg_list, avg_slopes) tmp <- lapply(mfx, function(x) hypotheses(x, "b1 = b2")) expect_inherits(tmp, "list") expect_inherits(tmp[[1]], "hypotheses") expect_inherits(tmp[[2]], "hypotheses") tmp <- purrr::map(mfx, hypotheses, hypothesis = "b1 = b2") expect_inherits(tmp, "list") expect_inherits(tmp[[1]], "hypotheses") expect_inherits(tmp[[2]], "hypotheses") pre = lapply(reg_list, predictions) tmp <- lapply(pre, function(x) hypotheses(x, "b1 = b2")) expect_inherits(tmp, "list") expect_inherits(tmp[[1]], "hypotheses") expect_inherits(tmp[[2]], "hypotheses") tmp <- purrr::map(pre, hypotheses, hypothesis = "b1 = b2") expect_inherits(tmp, "list") expect_inherits(tmp[[1]], "hypotheses") expect_inherits(tmp[[2]], "hypotheses") tmp <- purrr::map(reg_list , ~hypotheses(.) |> tidy()) # error in Github version; works in CRAN version expect_inherits(tmp, "list") expect_inherits(tmp[[1]], "tbl_df") expect_inherits(tmp[[2]], "tbl_df") tmp <- purrr::map(reg_list, function(reg) reg |> hypotheses("wt = 0") |> broom::tidy()) expect_inherits(tmp, "list") expect_inherits(tmp[[1]], "tbl_df") expect_inherits(tmp[[2]], "tbl_df") tmp <- purrr::map(reg_list, function(reg) tidy(hypotheses(reg, "wt = 0"))) expect_inherits(tmp, "list") expect_inherits(tmp[[1]], "tbl_df") expect_inherits(tmp[[2]], "tbl_df") # Issue #776: sort before hypothesis load(url("https://github.com/vincentarelbundock/modelarchive/raw/main/data-raw/gusto.rda")) mod = glm( day30 ~ tx * sex + age, family = "binomial", data = gusto) cmp = avg_comparisons( mod, type = "link", variables = list("tx" = "pairwise"), by = "sex" ) x <- hypotheses(cmp, hypothesis = "b4 - b3 = 0") y <- cmp$estimate[4] - cmp$estimate[3] z <- avg_comparisons( mod, type = "link", variables = list("tx" = "pairwise"), by = "sex", hypothesis = "b4 - b3 = 0" ) expect_equal(x$estimate, y) expect_equal(z$estimate, y) # labels dat <- mtcars mod <- lm(mpg ~ hp + wt + factor(cyl), data = dat) hyp <- hypotheses(mod, hypothesis = "b* = b2") known <- c("b1 = b2", "b2 = b2", "b3 = b2", "b4 = b2", "b5 = b2") expect_true(all(hyp$term %in% known)) # hypotheses() applied to {marginaleffects} package objects # commented out because doesn't work in environments because of match.call() # mod <- glm(vs ~ hp + am, data = mtcars, family = binomial) # cmp <- comparisons(mod, by = "am") # dm <- hypotheses(cmp, hypothesis = "b1 = b3") # expect_true("b1=b3" %in% dm$term) # expect_equivalent(nrow(dm), 1) # mfx <- slopes(mod) # dm <- hypotheses(mfx, hypothesis = "b1 = b3") # expect_true("b1=b3" %in% dm$term) # expect_equivalent(nrow(dm), 1) # pre <- predictions(mod, newdata = datagrid()) # dm <- hypotheses(pre, hypothesis = "b1 = 0.05") # expect_true("b1=0.05" %in% dm$term) # expect_equivalent(nrow(dm), 1) rm(list = ls()) marginaleffects/inst/tinytest/test-vcov.R0000644000176200001440000000530414560035476020335 0ustar liggesuserssource("helpers.R") using("marginaleffects") requiet("sandwich") dat <- read.csv("https://vincentarelbundock.github.io/Rdatasets/csv/datasets/mtcars.csv") # working but no validity check mod <- lm(mpg ~ hp + drat, data = dat) a <- tidy(slopes(mod)) assign("tmp", vcovHC(mod), envir = .GlobalEnv) mfx <- slopes(mod, vcov = tmp) b <- tidy(mfx) expect_true(all(a$estimate == b$estimate)) expect_true(all(a$std.error != b$std.error)) rm("tmp", envir = .GlobalEnv) # matrix produces different results (no validity) mod <- lm(mpg ~ hp * wt, data = dat) V <- vcovHC(mod) mfx1 <- slopes(mod) mfx2 <- slopes(mod, vcov = V) expect_true(all(mfx1$std.error != mfx2$std.error)) pre1 <- predictions(mod) pre2 <- predictions(mod, vcov = V) expect_true(all(pre1$std.error != pre2$std.error)) cmp1 <- comparisons(mod) cmp2 <- comparisons(mod, vcov = V) expect_true(all(cmp1$std.error != cmp2$std.error)) # marginaleffects strings (no validity) mod <- lm(mpg ~ hp * wt, data = dat) # aliases mfx1 <- slopes(mod, vcov = "HC2") mfx2 <- slopes(mod, vcov = "stata") mfx3 <- slopes(mod, vcov = "HC3") mfx4 <- slopes(mod, vcov = "robust") expect_equivalent(mfx1$std.error, mfx2$std.error) expect_equivalent(mfx3$std.error, mfx4$std.error) # different (no validity) mfx5 <- slopes(mod, vcov = ~ cyl) mfx6 <- slopes(mod, vcov = "HAC") expect_true(all(mfx1$std.error != mfx3$std.error)) expect_true(all(mfx1$std.error != mfx4$std.error)) expect_true(all(mfx1$std.error != mfx5$std.error)) expect_true(all(mfx1$std.error != mfx6$std.error)) expect_true(all(mfx3$std.error != mfx5$std.error)) expect_true(all(mfx3$std.error != mfx6$std.error)) expect_true(all(mfx5$std.error != mfx6$std.error)) # predictions strings (no validity) mod <- lm(mpg ~ hp * wt, data = dat) # aliases pre1 <- predictions(mod, vcov = "HC2") pre2 <- predictions(mod, vcov = "stata") pre3 <- predictions(mod, vcov = "HC3") pre4 <- predictions(mod, vcov = "robust") expect_equivalent(pre1$std.error, pre2$std.error) expect_equivalent(pre3$std.error, pre4$std.error) # different (no validity) pre5 <- predictions(mod, vcov = ~ cyl) pre6 <- predictions(mod, vcov = "HAC") expect_true(all(pre1$std.error != pre3$std.error)) expect_true(all(pre1$std.error != pre4$std.error)) expect_true(all(pre1$std.error != pre5$std.error)) expect_true(all(pre1$std.error != pre6$std.error)) expect_true(all(pre3$std.error != pre5$std.error)) expect_true(all(pre3$std.error != pre6$std.error)) expect_true(all(pre5$std.error != pre6$std.error)) # Issue #554 mod <- lm(mpg ~ cyl, data = dat) x <- get_vcov(mod, vcov = sandwich::vcovHC) y <- get_vcov(mod, vcov = "HC3") expect_equivalent(x, y) x <- slopes(mod, vcov = sandwich::vcovHC) y <- slopes(mod, vcov = "HC3") expect_equivalent(x, y) rm(list = ls()) marginaleffects/inst/tinytest/test-pkg-lmerTest.R0000644000176200001440000000271514541720224021730 0ustar liggesuserssource("helpers.R") using("marginaleffects") requiet("lmerTest") requiet("emmeans") requiet("broom") requiet("margins") # vs. emmeans vs. margins dat <- read.csv(testing_path("stata/databases/lme4_02.csv")) mod <-lme4::lmer(y ~ x1 * x2 + (1 | clus), data = dat) # no validity expect_slopes(mod) expect_predictions(predictions(mod)) # emmeans em <- suppressMessages(emmeans::emtrends(mod, ~x1, "x1", at = list(x1 = 0, x2 = 0))) em <- tidy(em) me <- avg_slopes(mod, newdata = datagrid(x1 = 0, x2 = 0, clus = 1)) expect_equivalent(me$std.error[1], em$std.error, tolerance = .01) expect_equivalent(me$estimate[1], em$x1.trend) # margins me <- avg_slopes(mod) ma <- margins(mod) ma <- tidy(ma) expect_equivalent(me$std.error, ma$std.error, tolerance = .0001) expect_equivalent(me$estimate, ma$estimate) # bug: population-level predictions() when {lmerTest} is loaded requiet("lmerTest") mod <- suppressMessages(lmer( weight ~ 1 + Time + I(Time^2) + Diet + Time:Diet + I(Time^2):Diet + (1 + Time + I(Time^2) | Chick), data = ChickWeight)) expect_inherits(predictions(mod, newdata = datagrid(Chick = NA, Diet = 1:4, Time = 0:21), re.form = NA), "predictions") expect_inherits( predictions(mod, newdata = datagrid(Diet = 1:4, Time = 0:21), re.form = NA), "predictions") rm(list = ls())marginaleffects/inst/tinytest/test-pkg-dbarts.R0000644000176200001440000000407214541720224021406 0ustar liggesuserssource("helpers.R") requiet("dbarts") requiet("modeldata") requiet("marginaleffects") dat <- na.omit(modeldata::penguins) # matrix interface not supported y <- as.vector(dat$bill_length_mm) X <- model.matrix(~ ., dat[, -1]) mod <- dbarts::bart( X, y, verbose = FALSE) |> suppressWarnings() expect_error(comparisons(mod, newdata = dat), "bart2") |> suppressWarnings() # formula interface supported mod <- dbarts::bart2( bill_length_mm ~ ., data = dat, keepTrees = TRUE, verbose = FALSE) p <- predictions(mod, by = "species", newdata = dat) expect_inherits(p, "predictions") p <- avg_comparisons(mod, newdata = dat) expect_inherits(p, "comparisons") # Issue 940: Indexing hell options(marginaleffects_posterior_center = mean) data("lalonde", package = "MatchIt") fit <- dbarts::bart2(re78 ~ treat + age + educ + race + married + nodegree + re74 + re75, data = lalonde, keepTrees = T, verbose = F) p0 <- predict(fit, newdata = transform(subset(lalonde, treat == 1), treat = 0)) p1 <- predict(fit, newdata = transform(subset(lalonde, treat == 1), treat = 1)) p <- avg_comparisons(fit, variables = "treat", newdata = subset(lalonde, treat == 1)) expect_equal(p$estimate, mean(p1 - p0)) p0 <- predict(fit, newdata = transform(subset(lalonde, treat == 0), treat = 0)) p1 <- predict(fit, newdata = transform(subset(lalonde, treat == 0), treat = 1)) p <- avg_comparisons(fit, variables = "treat", newdata = subset(lalonde, treat == 0)) expect_equal(p$estimate, mean(p1 - p0)) p0 <- avg_comparisons(fit, variables = "treat", newdata = subset(lalonde, treat == 0)) p1 <- avg_comparisons(fit, variables = "treat", newdata = subset(lalonde, treat == 1)) p <- avg_comparisons(fit, variables = "treat", by = "treat") expect_equal(sort(c(p0$estimate, p1$estimate)), sort(p$estimate)) p0 <- avg_predictions(fit, newdata = subset(lalonde, treat == 0)) p1 <- avg_predictions(fit, newdata = subset(lalonde, treat == 1)) p <- avg_predictions(fit, by = "treat") expect_equal(sort(c(p0$estimate, p1$estimate)), sort(p$estimate)) options(marginaleffects_posterior_center = NULL) marginaleffects/inst/tinytest/test-jss.R0000644000176200001440000000044714544120273020152 0ustar liggesuserslibrary(tinytest) # make sure that the JSS data is always hosted at the same link forever dat <- read.csv("https://marginaleffects.com/data/titanic.csv") expect_inherits(dat, "data.frame") dat <- read.csv("https://marginaleffects.com/data/impartiality.csv") expect_inherits(dat, "data.frame") marginaleffects/inst/tinytest/test-plot_comparisons.R0000644000176200001440000000607414541720224022747 0ustar liggesuserssource("helpers.R") if (!requiet("tinysnapshot")) exit_file("tinysnapshot") if (ON_CI || ON_WINDOWS || ON_OSX) exit_file("local linux only") using("marginaleffects") mod <- lm(mpg ~ wt * hp, data = mtcars) p <- plot_comparisons(mod, variables = list(hp = "minmax"), condition = "wt", draw = FALSE) expect_equivalent(length(unique(p$estimate)), 50) p <- plot_comparisons(mod, variables = list(hp = "minmax"), condition = "wt") expect_inherits(p, "gg") p <- plot_comparisons(mod, variables = list(hp = "iqr"), condition = "wt") p <- plot_comparisons(mod, variables = list("hp" = c(100, 130)), condition = "wt") expect_inherits(p, "gg") # representative values p <- plot_comparisons(mod, variables = list(hp = "minmax"), condition = list("wt" = "threenum")) expect_snapshot_plot(p, "plot_comparisons-minmax_x") # two effects p <- plot_comparisons(mod, variables = c("hp", "wt"), condition = "wt") expect_snapshot_plot(p, "plot_comparisons-2effects") # bug from examples (revise_get_data() no longer returns a factor attribute) mod <- lm(mpg ~ hp * drat * factor(am), data = mtcars) p <- plot_comparisons(mod, variables = "hp", condition = list("am", "drat" = 3:5), draw = FALSE) expect_inherits(p, "data.frame") # Issue #592 mod <- lm(mpg ~ hp * drat * factor(am), data = mtcars) p <- plot_comparisons(mod, variables = "hp", condition = list("am", "drat" = 3:5)) expect_inherits(p, "gg") # Issue #545: blank graph library(ggplot2) dat_titanic <- read.csv("https://vincentarelbundock.github.io/Rdatasets/csv/Stat2Data/Titanic.csv") mod2 <- glm(Survived ~ Age, data = dat_titanic, family = binomial) p <- plot_comparisons( mod2, variables = list("Age" = 10), condition = "Age", comparison = "ratio") + ylab("Adjusted Risk Ratio\nP(Survived = 1 | Age + 10) / P(Survived = 1 | Age)") expect_snapshot_plot(p, "plot_comparisons-rr_titanic") # Issue #725: `newdata` argument in plotting functions mod <- glm(vs ~ hp + am, mtcars, family = binomial) p1 <- plot_comparisons(mod, variables = "hp", by = "am", newdata = datagrid(am = 0:1, grid_type = "counterfactual"), draw = FALSE) p2 <- avg_comparisons(mod, variables = "hp", by = "am", draw = FALSE, newdata = datagrid(am = 0:1, grid_type = "counterfactual")) expect_equivalent(p1$estimate, p2$estimate) expect_equivalent(p1$conf.low, p2$conf.low, tolerance = 1e-6) p3 <- plot_comparisons(mod, variables = "hp", by = "am", draw = FALSE) p4 <- avg_comparisons(mod, variables = "hp", by = "am", draw = FALSE) expect_equivalent(p3$estimate, p4$estimate) expect_equivalent(p3$conf.low, p4$conf.low) expect_true(all(p1$conf.low != p3$conf.low)) p5 <- plot_comparisons(mod, variables = "hp", condition = "am", draw = FALSE) p6 <- comparisons(mod, variables = "hp", newdata = datagrid(am = 0:1)) expect_equivalent(p5$estimate, p6$estimate) expect_equivalent(p5$conf.low, p6$conf.low) expect_true(all(p1$conf.low != p5$conf.low)) expect_true(all(p3$conf.low != p5$conf.low)) expect_error(plot_comparisons(mod, variables = "hp", condition = "am", by = "am")) expect_error(plot_comparisons(mod, variables = "hp", newdata = mtcars)) rm(list = ls())marginaleffects/inst/tinytest/test-pkg-betareg.R0000644000176200001440000000334114560035476021547 0ustar liggesuserssource("helpers.R") using("marginaleffects") requiet("betareg") requiet("margins") requiet("emmeans") requiet("broom") data("GasolineYield", package = "betareg") tmp <- GasolineYield tmp$batch <- factor(tmp$batch) dat <- tmp mod <- betareg::betareg(yield ~ batch + temp, data = dat) # marginaleffects: vs. margins vs. emmeans set.seed(1024) res <- slopes(mod, variables = "temp") mar <- data.frame(margins::margins(mod, unit_ses = TRUE)) expect_true(expect_margins(res, mar, tolerance = 0.1)) # emtrends mfx <- slopes(mod, newdata = datagrid(batch = 1), variables = "temp") em <- suppressWarnings( emtrends(mod, ~temp, "temp", at = list("batch" = tmp$batch[1]))) em <- tidy(em) expect_equivalent(mfx$estimate, em$temp.trend, tolerance = .001) expect_equivalent(mfx$std.error, em$std.error, tolerance = .001) # marginaleffects: vs. Stata # stata does not include contrasts stata <- readRDS(testing_path("stata/stata.rds"))[["betareg_betareg_01"]] mfx <- merge(avg_slopes(mod), stata) expect_equivalent(mfx$estimate, mfx$dydxstata, tolerance = .0001) expect_equivalent(mfx$std.error, mfx$std.errorstata, tolerance = .0001) # predictions: no validity pred <- suppressWarnings(predictions(mod)) expect_predictions(pred, n_row = nrow(GasolineYield)) pred <- predictions(mod, newdata = datagrid(batch = 1:3, temp = c(300, 350))) expect_predictions(pred, n_row = 6) # marginalmeans: vs. emmeans mm <- predictions(mod, type = "link", by = "batch", newdata = datagrid(grid_type = "balanced")) |> dplyr::arrange(batch) expect_inherits(mm, "predictions") expect_equal(nrow(mm), 10) em <- broom::tidy(emmeans::emmeans(mod, "batch")) expect_equivalent(mm$estimate, em$estimate) expect_equivalent(mm$std.error, em$std.error, tolerance = 0.01) rm(list = ls())marginaleffects/inst/tinytest/test-pkg-robustbase.R0000644000176200001440000000131414541720224022274 0ustar liggesuserssource("helpers.R") using("marginaleffects") requiet("robustbase") requiet("margins") # lmrob vs. margins data(coleman, package = "robustbase") model <- lmrob(Y ~ ., data=coleman, setting = "KS2014") expect_slopes(model, n_unique = 1) mar <- margins::margins(model, unit_ses = TRUE) mfx <- slopes(model) expect_true(expect_margins(mar, mfx)) # glmrob vs. margins data(epilepsy, package = "robustbase") model <- glmrob(Ysum ~ Age10 + Base4*Trt, family = poisson, data = epilepsy, method= "Mqle", control = glmrobMqle.control(tcc= 1.2)) expect_slopes(model) mar <- margins::margins(model, unit_ses = TRUE) mfx <- slopes(model) expect_true(expect_margins(mar, mfx)) rm(list = ls())marginaleffects/inst/tinytest/test-pkg-MASS.R0000644000176200001440000002220314560035476020677 0ustar liggesusers# TODO: emtrends not clear what it computes for polr using("marginaleffects") source("helpers.R") requiet("margins") requiet("MASS") requiet("broom") requiet("emmeans") tol <- 0.0001 tol_se <- 0.001 ### marginaleffects # rlm: marginaleffects: vs. margins vs. emmeans dat <- mtcars model <- MASS::rlm(mpg ~ hp + drat, dat) expect_slopes(model, n_unique = 1) # margins mfx <- slopes(model) mar <- margins(model, unit_ses = TRUE) expect_true(expect_margins(mfx, mar, verbose = TRUE, tolerance = tol_se)) # emmeans mfx <- slopes(model, newdata = datagrid(drat = 3.9, hp = 110)) |> dplyr::arrange(term) em1 <- emmeans::emtrends(model, ~hp, "hp", at = list(hp = 110, drat = 3.9)) em2 <- emmeans::emtrends(model, ~drat, "drat", at = list(hp = 110, drat = 3.9)) em1 <- tidy(em1) em2 <- tidy(em2) expect_equivalent(mfx$estimate[2], em1$hp.trend) expect_equivalent(mfx$std.error[2], em1$std.error, tolerance = .002) expect_equivalent(mfx$estimate[1], em2$drat.trend) expect_equivalent(mfx$std.error[1], em2$std.error, tolerance = .002) # glm.nb: marginaleffects: vs. margins vs. emmeans model <- suppressWarnings(MASS::glm.nb(carb ~ wt + factor(cyl), data = mtcars)) # margins does not support unit-level standard errors mar <- margins(model) mfx <- slopes(model) expect_margins(mfx, mar, se = FALSE) # margins: standard errors at mean gradient mfx_tid <- avg_slopes(model) mar_tid <- tidy(mar)[, c("term", "estimate", "std.error")] mar_tid <- setNames(mar_tid, c("term", "mar_estimate", "mar_std.error")) tmp <- merge(mfx_tid, mar_tid) expect_equivalent(tmp$estimate, tmp$mar_estimate, tolerance = .0001) expect_equivalent(tmp$std.error, tmp$mar_std.error, tolerance = .001) # emmeans::emtrends mfx <- slopes(model, newdata = datagrid(wt = 2.6, cyl = 4), type = "link") em <- emtrends(model, ~wt, "wt", at = list(wt = 2.6, cyl = 4)) em <- tidy(em) expect_equivalent(mfx$estimate[mfx$term == "wt"], em$wt.trend) expect_equivalent(mfx$std.error[mfx$term == "wt"], em$std.error, tolerance = 1e-3) # emmeans contrasts mfx <- slopes(model, type = "link", newdata = datagrid(wt = 3, cyl = 4)) em <- emmeans(model, specs = "cyl") em <- emmeans::contrast(em, method = "revpairwise", at = list(wt = 3, cyl = 4)) em <- tidy(em) expect_equivalent(mfx$estimate[mfx$contrast == "6 - 4"], em$estimate[em$contrast == "cyl6 - cyl4"]) expect_equivalent(mfx$std.error[mfx$contrast == "6 - 4"], em$std.error[em$contrast == "cyl6 - cyl4"], tolerance = 1e-4) expect_equivalent(mfx$estimate[mfx$contrast == "8 - 4"], em$estimate[em$contrast == "cyl8 - cyl4"]) expect_equivalent(mfx$std.error[mfx$contrast == "8 - 4"], em$std.error[em$contrast == "cyl8 - cyl4"], tolerance = 1e-4) # glm.nb: marginaleffects: vs. Stata stata <- readRDS(testing_path("stata/stata.rds"))$mass_glm_nb model <- suppressWarnings( MASS::glm.nb(carb ~ wt + factor(cyl), data = mtcars)) mfx <- avg_slopes(model) stata$contrast <- ifelse(stata$term == "factor(cyl)6", "6 - 4", "") stata$contrast <- ifelse(stata$term == "factor(cyl)8", "8 - 4", stata$contrast) stata$term <- ifelse(grepl("cyl", stata$term), "cyl", stata$term) mfx <- merge(mfx, stata) expect_equivalent(mfx$estimate, mfx$dydxstata, tolerance = .0001) expect_equivalent(mfx$std.error, mfx$std.errorstata, tolerance = .001) # polr: marginaleffects: vs. Stata # Hess=TRUE otherwise breaks in the test environment via MASS:::vcov() -> update() stata <- readRDS(testing_path("stata/stata.rds"))[["MASS_polr_01"]] dat <- read.csv(testing_path("stata/databases/MASS_polr_01.csv")) mod <- MASS::polr(factor(y) ~ x1 + x2, data = dat, Hess = TRUE) mfx <- avg_slopes(mod, type = "probs") mfx <- merge(mfx, stata) expect_equivalent(mfx$estimate, mfx$dydxstata, tolerance = .01) expect_equivalent(mfx$std.error, mfx$std.errorstata, tolerance = .01) expect_slopes(mod, type = "probs") # bugs stay dead: polr with 1 row newdata # Hess=TRUE otherwise breaks in the test environment via MASS:::vcov() -> update() dat <- read.csv(testing_path("stata/databases/MASS_polr_01.csv")) dat$y <- factor(dat$y) mod <- MASS::polr(y ~ x1, data = dat, Hess = TRUE) mfx <- slopes(mod, type = "probs", newdata = datagrid(x1 = 0)) expect_inherits(mfx, "marginaleffects") # marginaleffects vs. emmeans #skip_if_not_installed("emmeans", minimum_version = "1.7.1.9") # Hess=TRUE otherwise breaks in the test environment via MASS:::vcov() -> update() dat <- read.csv(testing_path("stata/databases/MASS_polr_01.csv")) dat$y <- factor(dat$y) mod <- MASS::polr(y ~ x1 + x2, data = dat, Hess = TRUE) em <- emmeans::emtrends(mod, ~y, var = "x1", mode = "prob", at = list(x1 = 0, x2 = 0)) em <- tidy(em) mfx <- slopes(mod, newdata = datagrid(x1 = 0, x2 = 0), type = "probs", variables = "x1") expect_equivalent(mfx$estimate, em$x1.trend, tolerance = .01) expect_equivalent(mfx$std.error, em$std.error, tolerance = .01) ### predictions # polr: predictions: no validity mod <- MASS::polr(factor(gear) ~ mpg + factor(cyl), data = mtcars, Hess = TRUE) pred <- suppressMessages(predictions(mod, type = "probs")) expect_predictions(pred) # bugs stay dead expect_true(all(c("rowid", "estimate", "std.error", "group") %in% colnames(pred))) # glm.nb: predictions: no validity model <- suppressWarnings(MASS::glm.nb(carb ~ wt + factor(cyl), data = mtcars)) pred <- predictions(model) expect_predictions(pred, se = FALSE) # rlm: predictions: no validity model <- MASS::rlm(mpg ~ hp + drat, mtcars) pred <- predictions(model) expect_predictions(pred, n_row = nrow(mtcars)) pred <- predictions(model, newdata = head(mtcars)) expect_predictions(pred, n_row = 6) ### marginalmeans # glm.nb: marginalmeans: vs. emmeans dat <- mtcars dat$cyl <- as.factor(dat$cyl) dat$am <- as.logical(dat$am) dat <- dat model <- suppressWarnings(MASS::glm.nb(carb ~ am + cyl, data = dat)) mm <- predictions(model, type = "link", by = "cyl", newdata = datagrid(grid_type = "balanced")) ti <- mm |> dplyr::arrange(cyl) em <- tidy(emmeans::emmeans(model, "cyl")) expect_equivalent(ti$estimate, em$estimate) expect_equivalent(ti$std.error, em$std.error, tolerance = 1e-4) # rlm: marginalmeans: vs. emmeans dat <- mtcars dat$cyl <- as.factor(dat$cyl) dat$am <- as.logical(dat$am) model <- MASS::rlm(mpg ~ cyl + am, dat) ti <- predictions(model, by = "cyl", newdata = datagrid(grid_type = "balanced")) |> dplyr::arrange(cyl) em <- tidy(emmeans::emmeans(model, "cyl")) expect_equivalent(ti$estimate, em$estimate) expect_equivalent(ti$std.error, em$std.error, tolerance = 1e-4) # glmmPQL # glmmPQL: no validity requiet("lme4") # glmmPQL fails when lme4 is not installed tmp <- bacteria tmp$week_bin <- tmp$week > 2 mod <- glmmPQL( y ~ trt + week_bin, random = ~ 1 | ID, family = binomial, verbose = FALSE, data = tmp) expect_slopes(mod, type = "link", n_unique = 1) expect_slopes(mod, type = "response") expect_predictions(predictions(mod)) # emtrends em <- emmeans::emtrends(mod, ~week_bin, "week_bin", at = list(week_bin = 0)) em <- tidy(em) mfx <- slopes(mod, newdata = datagrid(week_bin = 0), type = "link") expect_equivalent(mfx$estimate[3], em$week_bin.trend) expect_equivalent(mfx$std.error[3], em$std.error, tolerance = .01) # bugs stay dead: character regressor with categorical outcome dat <- mtcars dat$cyl <- as.character(dat$cyl) dat <- dat mod <- polr(factor(gear) ~ cyl, data = dat, Hess = TRUE) # not clear why this generates a warning only on CI tid <- suppressMessages(avg_slopes(mod, type = "probs")) expect_equivalent(nrow(tid), 6) # polr: average predictions by group against Stata mod <- polr(factor(gear) ~ hp, data = mtcars, Hess = TRUE) p <- suppressMessages(avg_predictions(mod, type = "probs")) expect_equivalent( p$estimate, c(.4933237, .363384, .1432922), tolerance = tol) expect_equivalent( p$std.error, c(.0867256, .0838539, .0591208), tolerance = tol_se) # Predictive margins Number of obs = 32 # Model VCE : OIM # 1._predict : Pr(gear==1), predict(pr outcome(1)) # 2._predict : Pr(gear==2), predict(pr outcome(2)) # 3._predict : Pr(gear==3), predict(pr outcome(3)) # ------------------------------------------------------------------------------ # | Delta-method # | Margin Std. Err. z P>|z| [95% Conf. Interval] # -------------+---------------------------------------------------------------- # _predict | # 1 | .4933237 .0867256 5.69 0.000 .3233448 .6633027 # 2 | .363384 .0838539 4.33 0.000 .1990335 .5277346 # 3 | .1432922 .0591208 2.42 0.015 .0274175 .2591669 # polr: marginalmeans vs. emmeans k <- mtcars k$vs <- as.factor(k$vs) k$am <- as.logical(k$am) mod <- suppressWarnings(MASS::polr(factor(gear) ~ vs + am, data = k, Hess = TRUE)) # TODO: emmeans seems broken at the moment # em <- emmeans(mod, specs = "am", type = "response") # em <- tidy(em) mm <- predictions(mod, by = "am", type = "probs", newdata = datagrid(grid_type = "balanced")) expect_equivalent(nrow(mm), 6) # Issue #896: polr returns mean of binary instead of median mtcars$gear <- as.factor(mtcars$gear) mod <- polr( gear ~ mpg + cyl + vs, data = mtcars, method = "probit", Hess = TRUE) p <- predictions(mod, newdata = "median", type = "probs") expect_true(all(p$vs == 0)) rm(list = ls())marginaleffects/inst/tinytest/test-counterfactual.R0000644000176200001440000000530214541720224022364 0ustar liggesuserssource("helpers.R") using("marginaleffects") # old bug: counterfactual with a single regressor mod <- lm(mpg ~ hp + drat + wt, mtcars) x <- datagrid(model = mod, hp = c(100, 110), grid_type = "counterfactual") expect_equivalent(nrow(x), 64) mod <- lm(mpg ~ hp, mtcars) x <- datagrid(model = mod, hp = c(100, 110), grid_type = "counterfactual") expect_equivalent(nrow(x), 64) # marginal effects does not overwrite counterfactual rowid mod <- glm(am ~ mpg + factor(cyl), data = mtcars, family = binomial) mfx <- slopes(mod, newdata = datagrid(cyl = c(4, 6, 8), grid_type = "counterfactual")) expect_true(all(mfx$rowidcf %in% 1:32)) expect_true(all(mfx$rowid %in% 1:96)) # alternative syntaxes mod <- lm(mpg ~ hp + drat + wt, mtcars) nd1 <- datagrid(wt = 3, hp = c(100, 110), model = mod) nd2 <- datagrid(wt = 3, hp = c(100, 110), drat = 4, newdata = mtcars) x <- slopes(mod, newdata = datagrid(wt = 3, hp = c(100, 110))) x$mpg <- NULL # placeholder response in predictions y <- slopes(mod, newdata = nd1) z <- slopes(mod, newdata = nd2) expect_equivalent(x$estimate, y$estimate) expect_equivalent(x$conf.low, y$conf.low, tol = 1e-5) expect_equivalent(x$estimate, z$estimate) expect_equivalent(x$conf.low, z$conf.low, tol = 1e-2) # size mod <- lm(mpg ~ hp + drat + wt, mtcars) expect_equivalent(nrow(datagrid(wt = 2:3, newdata = mtcars, grid_type = "counterfactual")), nrow(mtcars) * 2) expect_equivalent(nrow(datagrid(wt = 2:3, newdata = mtcars)), 2) expect_equivalent(nrow(datagrid(wt = 2:3, model = mod, grid_type = "counterfactual")), nrow(mtcars) * 2) expect_equivalent(nrow(datagrid(wt = 2:3, model = mod)), 2) # warning on bad `at` entry expect_warning(datagrid(newdata = mtcars, at = list("blah" = 0:1), grid_type = "counterfactual")) expect_warning(datagrid(newdata = mtcars, at = list("blah" = 0:1))) # datagrid(): factor, logical, automatic variable tmp <- mtcars tmp$am <- as.logical(tmp$am) tmp$gear <- as.factor(tmp$gear) mod <- lm(mpg ~ hp * wt + am + gear, data = tmp) res <- datagrid( model = mod, hp = c(100, 110), gear = c(3, 4), am = TRUE, grid_type = "counterfactual") expect_inherits(res, "data.frame") expect_equivalent(dim(res), c(128, 6)) # datagrid(): factor, logical, numeric tmp <- mtcars tmp$am <- as.logical(tmp$am) tmp$gear <- as.factor(tmp$gear) res <- datagrid(newdata = tmp) expect_inherits(res, "data.frame") expect_equivalent(dim(res), c(1, 11)) expect_equivalent(sum(sapply(res, is.logical)), 1) expect_equivalent(sum(sapply(res, is.factor)), 1) expect_equivalent(sum(sapply(res, is.numeric)), 9) # typical number of rows mod <- lm(mpg ~ hp * wt, data = mtcars) nd <- datagrid(model = mod, hp = c(100, 110)) expect_equivalent(nrow(slopes(mod, newdata = nd)), 4) rm(list = ls())marginaleffects/inst/tinytest/test-pkg-crch.R0000644000176200001440000000342114560035476021054 0ustar liggesuserssource("helpers.R") using("marginaleffects") requiet("crch") requiet("ordinal") dat <- read.csv("https://vincentarelbundock.github.io/Rdatasets/csv/crch/RainIbk.csv") q <- unique(stats::quantile(dat$rain, seq(0.1, 0.9, 0.1))) dat$rain_sqrt <- sqrt(dat$rain) dat$sqrtensmean <- apply(sqrt(dat[,grep('^rainfc',names(dat))]), 1, mean) dat$sqrtenssd <- apply(sqrt(dat[,grep('^rainfc',names(dat))]), 1, sd) dat$enssd <- apply(dat[,grep('^rainfc',names(dat))], 1, sd) dat$ensmean <- apply(dat[,grep('^rainfc',names(dat))], 1, mean) dat <<- subset(dat, enssd > 0) # marginaleffects: crch gaussian: no validity model <- crch(sqrt(rain) ~ sqrtensmean + sqrtenssd, data = dat, dist = "gaussian") expect_slopes(model, n_unique = 1, type = "location") # logistic: no validity model <- crch(sqrt(rain) ~ sqrtensmean | sqrtenssd, data = dat, dist = "logistic", left = 0) expect_slopes(model, type = "location", n_unique = 1) mfx <- slopes(model, type = "location", variables = "sqrtensmean") expect_true(!any(mfx$estimate == 0)) mfx <- slopes(model, type = "location", variables = "sqrtenssd") expect_true(all(mfx$estimate == 0)) mfx <- slopes(model, type = "scale", variables = "sqrtensmean") expect_true(all(mfx$estimate == 0)) mfx <- slopes(model, type = "scale", variables = "sqrtenssd") expect_true(!any(mfx$estimate == 0)) # hlxr: no validity mod <- hxlr(rain_sqrt ~ sqrtensmean, data = dat, thresholds = sqrt(q)) expect_slopes(mod, type = "location", n_unique = 1) # predictions: crch gaussian: no validity model <- crch(sqrt(rain) ~ sqrtensmean + sqrtenssd, data = dat, dist = "gaussian") pred1 <- predictions(model, newdata = dat) pred2 <- predictions(model, newdata = head(dat)) expect_predictions(pred1, n_row = nrow(dat)) expect_predictions(pred2, n_row = 6) source("helpers.R") rm(list = ls())marginaleffects/inst/tinytest/test-complete_levels.R0000644000176200001440000000116714541720224022534 0ustar liggesuserssource("helpers.R") using("marginaleffects") # padding with interactions dat <- read.csv("https://vincentarelbundock.github.io/Rdatasets/csv/ggplot2movies/movies.csv") dat$style <- ifelse(dat$Action == 1, "Action", "Other") dat$style <- ifelse(dat$Comedy == 1, "Comedy", dat$style) dat$style <- ifelse(dat$Drama == 1, "Drama", dat$style) dat$style <- factor(dat$style) dat$certified_fresh <- dat$rating >= 8 dat <- dat[dat$length < 240,] mod <- glm(certified_fresh ~ length * style, data = dat, family = binomial) res <- predictions(mod, type = "response") expect_predictions(res, n_row = nrow(dat), se = FALSE) rm(list = ls())marginaleffects/inst/tinytest/test-pkg-survival.R0000644000176200001440000000705614541720224022007 0ustar liggesuserssource("helpers.R") using("marginaleffects") requiet("survival") requiet("emmeans") requiet("broom") # Issue #911: survreg support fit <- survreg(Surv(futime, fustat) ~ ecog.ps + rx, ovarian, dist='weibull', scale=1) s <- avg_slopes(fit) expect_inherits(s, "slopes") # clogit N <- 10000 ng <- 5000 exd <- data.frame( g = rep(1:ng, each = N / ng), out = rep(0L:1L, N / 2), x = sample(0L:1L, N / 2, prob = c(.8, .2), replace = TRUE)) mod <- clogit( out ~ x + strata(g), method = "exact", data = exd) mfx <- slopes(mod, type = "lp") expect_inherits(mfx, "marginaleffects") cmp <- comparisons(mod, type = "lp") expect_inherits(cmp, "comparisons") pre <- predictions(mod, type = "lp") expect_inherits(pre, "predictions") # coxph vs. Stata stata <- readRDS(testing_path("stata/stata.rds"))$survival_coxph_01 test1 <<- data.frame(time = c(4, 3, 1, 1, 2, 2, 3), status = c(1, 1, 1, 0, 1, 1, 0), x = c(0, 2, 1, 1, 1, 0, 0), sex = factor(c(0, 0, 0, 0, 1, 1, 1))) mod <- coxph(Surv(time, status) ~ x + strata(sex), data = test1, ties = "breslow") mfx <- merge(avg_slopes(mod, type = "lp"), stata) expect_slopes(mod, type = "risk", n_unique = 4) expect_equivalent(mfx$estimate, mfx$dydxstata) expect_equivalent(mfx$std.error, mfx$std.errorstata, tolerance = 1e-5) # emtrends em <- emtrends(mod, ~x, "x", at = list(time = 4, status = 1, x = 0, sex = factor(0, levels = 0:1))) em <- tidy(em) mfx <- slopes(mod, variables = "x", type = "lp") expect_equivalent(mfx$estimate[1], em$x.trend) expect_equivalent(mfx$std.error[1], em$std.error, tolerance = 1e-5) # coxph: no validity test2 <<- data.frame(start = c(1, 2, 5, 2, 1, 7, 3, 4, 8, 8), stop = c(2, 3, 6, 7, 8, 9, 9, 9, 14, 17), event = c(1, 1, 1, 1, 1, 1, 1, 0, 0, 0), x = c(1, 0, 0, 1, 0, 1, 1, 1, 0, 0)) mod <- coxph(Surv(start, stop, event) ~ x, test2) expect_slopes(mod, type = "risk", n_unique = 2) # bugs stay dead: conf.level forces get_predicted which doesn't process 'type' test3 <<- data.frame(time = c(4, 3, 1, 1, 2, 2, 3), status = c(1, 1, 1, 0, 1, 1, 0), x = c(0, 2, 1, 1, 1, 0, 0), sex = factor(c(0, 0, 0, 0, 1, 1, 1))) mod <- coxph(Surv(time, status) ~ x + strata(sex), data = test3, ties = "breslow") p1 <- predictions(mod, type = "lp") p2 <- predictions(mod, type = "risk") expect_true(all(p1$estimate != p2$estimate)) # bugs stay dead: numeric vs factor strata #skip_if_not_installed("insight", minimum_version = "0.17.0") stata <- readRDS(testing_path("stata/stata.rds"))$survival_coxph_01 test4 <<- data.frame(time = c(4, 3, 1, 1, 2, 2, 3), status = c(1, 1, 1, 0, 1, 1, 0), x = c(0, 2, 1, 1, 1, 0, 0), sex = factor(c(0, 0, 0, 0, 1, 1, 1))) test5 <<- data.frame(time = c(4, 3, 1, 1, 2, 2, 3), status = c(1, 1, 1, 0, 1, 1, 0), x = c(0, 2, 1, 1, 1, 0, 0), sex = c(0, 0, 0, 0, 1, 1, 1)) mod1 <- coxph(Surv(time, status) ~ x + strata(sex), data = test4, ties = "breslow") mod2 <- coxph(Surv(time, status) ~ x + strata(sex), data = test5, ties = "breslow") mfx1 <- merge(avg_slopes(mod1, type = "lp"), stata) mfx2 <- merge(avg_slopes(mod2, type = "lp"), stata) expect_equivalent(mfx1$estimate, mfx2$estimate) source("helpers.R") suppressWarnings(rm(list = paste0("test", 1:5), .GlobalEnv)) rm(list = ls())marginaleffects/inst/tinytest/test-call.R0000644000176200001440000000226614541720224020266 0ustar liggesuserssource("helpers.R") # recall captures calls to avoid evaluating twice modd <- lm(mpg ~ hp + factor(gear), data = mtcars) cmp1 <- comparisons(modd) cmp1 <- tidy(cmp1) cmp2 <- tidy(comparisons(modd))[, seq_along(cmp1)] cmp3 <- comparisons(modd) |> tidy() for (col in c("estimate", "std.error", "p.value", "conf.high")) { expect_equivalent(cmp1[[col]], cmp2[[col]]) expect_equivalent(cmp1[[col]], cmp3[[col]]) } suppressWarnings(rm("modd", .GlobalEnv)) suppressWarnings(rm("modd")) # #### Are caught calls roughly twice as fast? # long_avg <- function() { # cmp1 <- comparisons(mod) # tidy(cmp1) # } # long_sum <- function() { # cmp1 <- comparisons(mod) # summary(cmp1) # } # long_tid <- function() { # cmp1 <- comparisons(mod) # tidy(cmp1) # } # bench::mark( # long_avg(), # tidy(comparisons(mod)), # comparisons(mod) |> tidy(), # long_tid(), # tidy(comparisons(mod)), # comparisons(mod) |> tidy(), # long_sum(), # comparisons(mod) |> tidy() |> summary(), # I expected this to be faster but not the case # summary(comparisons(mod)), # comparisons(mod) |> summary(), # check = FALSE, # iterations = 25 # ) rm(list = ls())marginaleffects/inst/tinytest/test-pkg-glmmTMB.R0000644000176200001440000002072014560035476021435 0ustar liggesuserssource("helpers.R") exit_file("glmmTMB produces weird SEs") # if (!EXPENSIVE) exit_file("EXPENSIVE") using("marginaleffects") # exit_file("glmmTMB always causes problems") if (ON_CI) exit_file("on ci") # install and test fails on Github requiet("glmmTMB") requiet("emmeans") requiet("broom") data("Owls", package = "glmmTMB") # marginaleffects no validity Owls <- transform(Owls, Nest = reorder(Nest, NegPerChick), NCalls = SiblingNegotiation, FT = FoodTreatment) m0 <- glmmTMB(NCalls ~ (FT + ArrivalTime) * SexParent + offset(log(BroodSize)) + (1 | Nest), data = Owls, ziformula = ~1, family = poisson) expect_slopes(m0) m1 <- glmmTMB(count ~ mined + (1 | site), zi = ~mined, family = poisson, data = Salamanders) expect_slopes(m1) # Binomial model data(cbpp, package = "lme4") m4 <- glmmTMB(cbind(incidence, size - incidence) ~ period + (1 | herd), family = binomial, data = cbpp) expect_slopes(m4) # comparisons vs. emmeans # Zero-inflated negative binomial model m2 <- glmmTMB(count ~ spp + mined + (1 | site), zi = ~spp + mined, family = nbinom2, data = Salamanders) co <- comparisons(m2, type = "link", variables = "mined", newdata = datagrid(mined = "no", spp = "GP", site = "VF-1")) em <- tidy(pairs(emmeans(m2, "mined", at = list(spp = "GP", site = "VF-1")))) expect_slopes(m2) expect_equivalent(co$estimate, -1 * em$estimate) expect_equivalent(co$std.error, em$std.error) # Issue reported by email by Olivier Baumais bug <- glmmTMB(count ~ spp + mined, ziformula = ~spp + mined, family = "nbinom2", data = Salamanders) mfx <- slopes(bug) tid1 <- comparisons(bug, comparison = "dydxavg") tid2 <- tidy(slopes(bug)) expect_equivalent(tid1$estimate, tid2$estimate) expect_equivalent(tid1$std.error, tid2$std.error) expect_equivalent(tid1$statistic, tid2$statistic) expect_equivalent(tid1$p.value, tid2$p.value) expect_equivalent(length(unique(abs(tid1$statistic))), 7) bed <- marginaleffects:::modelarchive_data("new_bedford") mzip_3 <- glmmTMB( x ~ cfp + c1 + pfp, ziformula = ~ res + inc + age, family = "nbinom2", data = bed) tid <- avg_slopes(mzip_3, type = "response") |> dplyr::arrange(term) # TODO: half-checked against Stata. Slight difference on binary predictors. Stata probably dydx b <- c(-0.0357107397803255, 0.116113581361053, -0.703975123794627, -0.322385169497792, 2.29943403870235, 0.313970669520973) se <- c(0.0137118286464027, 0.335617116221601, 0.333707103584788, 0.0899355981887107, 2.51759246321455, 2.10076503002941) expect_equivalent(b, tid$estimate) expect_equivalent(se, tid$std.error, tolerance = 1e-4) # Hurdle Poisson model m3 <- glmmTMB(count ~ spp + mined + (1 | site), zi = ~spp + mined, family = truncated_poisson, data = Salamanders) expect_slopes(m3) co <- comparisons(m3, type = "link", variables = "mined", newdata = datagrid(mined = "no", spp = "GP", site = "VF-1")) em <- tidy(pairs(emmeans(m3, "mined", at = list(spp = "GP", site = "VF-1")))) expect_slopes(m3) expect_equivalent(co$estimate, -1 * em$estimate) expect_equivalent(co$std.error, em$std.error) # contrast: manual check mod <- glmmTMB(count ~ spp + mined + (1 | site), zi = ~spp + mined, family = nbinom2, data = Salamanders) dat1 <- dat2 <- Salamanders dat1$mined <- "yes" dat2$mined <- "no" cont1 <- predict(mod, type = "response", newdata = dat2) - predict(mod, type = "response", newdata = dat1) cont2 <- comparisons(mod, variables = "mined") expect_equivalent(cont2$estimate, cont1) # informative errors dat <- read.csv("https://vincentarelbundock.github.io/Rdatasets/csv/lme4/VerbAgg.csv") dat$woman <- as.numeric(dat$Gender == "F") dat$item <- as.factor(dat$item) mod <- glmmTMB( woman ~ btype + resp + (1 + Anger | item), family = binomial, data = dat) expect_error(predictions(mod, newdata = datagrid(), vcov = "HC3"), pattern = "not supported") expect_inherits(predictions(mod, newdata = datagrid(), vcov = NULL), "predictions") expect_inherits(predictions(mod, newdata = datagrid(), vcov = FALSE), "predictions") expect_inherits(predictions(mod, newdata = datagrid(), vcov = TRUE), "predictions") expect_inherits(predictions(mod, newdata = datagrid(), vcov = insight::get_varcov(mod)), "predictions") # marginalmeans: some validity p <- predictions(mod, type = "link", re.form = NA) expect_inherits(p, "predictions") em <- data.frame(emmeans(mod, ~Sex)) mm <- predictions(mod, by = "Sex", newdata = datagrid(grid_type = "balanced"), type = "link", re.form = NA) expect_equivalent(em$emmean, mm$estimate) expect_equivalent(em$SE, mm$std.error) # Issue #466: REML not supported # Problem is that model$fit$par does not include all the parameters when # REML=TRUE, so when we set `set_coef()`, we can't change the fixed effects, # and the predictions are not altered. In turn, this produced 0 standard errors # in `get_se_delta()`. set.seed(42) dat <- do.call("rbind", list( transform(PlantGrowth, trial = "A"), transform(PlantGrowth, trial = "B", weight = runif(30) * weight), transform(PlantGrowth, trial = "C", weight = runif(30) * weight))) colnames(dat)[2] <- "groupid" model_REML <- glmmTMB( weight ~ groupid + trial + (1 | groupid:trial), REML = TRUE, data = dat) expect_error(slopes(model_REML), pattern = "REML") expect_error(comparisons(model_REML), pattern = "REML") expect_error(predictions(model_REML), pattern = "REML") expect_inherits(slopes(model_REML, vcov = FALSE), "marginaleffects") expect_inherits(predictions(model_REML, re.form = NA, vcov = FALSE), "predictions") expect_inherits(predictions(model_REML, vcov = FALSE, re.form = NA), "predictions") # Issue #663 if (!requiet("ordbetareg")) exit_file("ordbetareg") requiet("dplyr") data(pew, package = "ordbetareg") model_data <- dplyr::select( pew, therm, age = "F_AGECAT_FINAL", sex = "F_SEX_FINAL", income = "F_INCOME_FINAL", ideology = "F_IDEO_FINAL", race = "F_RACETHN_RECRUITMENT", education = "F_EDUCCAT2_FINAL", region = "F_CREGION_FINAL", approval = "POL1DT_W28", born_again = "F_BORN_FINAL", relig = "F_RELIG_FINAL", news = "NEWS_PLATFORMA_W28") %>% mutate_at(c("race", "ideology", "income", "approval", "sex", "education", "born_again", "relig"), function(c) { factor(c, exclude = levels(c)[length(levels(c))]) }) |> # need to make these ordered factors for BRMS transform( education = ordered(education), income = ordered(income)) model_data$therm_norm <- (model_data$therm - min(model_data$therm)) / (max(model_data$therm) - min(model_data$therm)) mod <- glmmTMB( therm_norm ~ approval + (1 | region), data = model_data, family = ordbeta(), start = list(psi = c(-1, 1))) mfx <- avg_slopes(mod) expect_inherits(mfx, 'slopes') # Issue #707 set.seed(123) n <- 200 d <- data.frame( outcome = rnorm(n), groups = as.factor(sample(c("treatment", "control"), n, TRUE)), episode = as.factor(sample(1:2, n, TRUE)), ID = as.factor(rep(1:10, n / 10)), wt = abs(rnorm(n, mean = 1, sd = 0.1)), sex = as.factor(sample(c("female", "male"), n, TRUE, prob = c(.4, .6)))) mod <- glmmTMB(outcome ~ groups * episode + (1 | ID), data = d, weights = wt) tmp <<- head(d) p <- avg_predictions(mod, variables = "groups", newdata = tmp) expect_inherits(p, "predictions") # Simple prediction standard errors m <- glmmTMB(mpg ~ hp + (1 | carb), data = transform(mtcars, carb = as.character(carb))) p1 <- predictions(m) p2 <- data.frame(predict(m, se.fit = TRUE)) expect_equivalent(p1$estimate, p2$fit) expect_equivalent(p1$std.error, p2$se.fit, tol = 1e-6) exit_file("Issue #810 is not fixed") # Issue #810 m <- glmmTMB(Sepal.Length ~ Sepal.Width, data = iris) p1 <- predictions(m, newdata = iris) |> head() p2 <- data.frame(predict(m, newdata = iris, se.fit = TRUE)) |> head() expect_equivalent(p1$estimate, p2$fit) expect_equivalent(p1$std.error, p2$se.fit, tol = 1e-6) m <- glmmTMB(Sepal.Length ~ Sepal.Width + (1 | Species), data = iris) p1 <- predictions(m, newdata = iris) |> head() p2 <- data.frame(predict(m, newdata = iris, se.fit = TRUE)) |> head() expect_equivalent(p1$estimate, p2$fit) expect_equivalent(p1$std.error, p2$se.fit, tol = 1e-6) m <- glmmTMB(Sepal.Length ~ Sepal.Width + (1 | Petal.Width * Species), data = iris) p1 <- predictions(m, newdata = iris) p2 <- data.frame(predict(m, newdata = iris, se.fit = TRUE)) expect_equivalent(p1$estimate, p2$fit) expect_equivalent(p1$std.error, p2$se.fit, tol = 1e-6) source("helpers.R") rm(list = ls()) marginaleffects/inst/tinytest/test-variables.R0000644000176200001440000001117014554070071021317 0ustar liggesuserssource("helpers.R") using("marginaleffects") tmp <- mtcars tmp$gear <- as.factor(tmp$gear) tmp$cyl <- as.factor(tmp$cyl) mod <- lm(mpg ~ gear + cyl + hp, data = tmp) cmp1 <- comparisons(mod, variables = "hp", newdata = head(tmp, 1)) expect_equivalent(cmp1$term, "hp") expect_equivalent(cmp1$contrast, "+1") cmp2 <- comparisons(mod, variables = list("hp" = 1), newdata = head(tmp, 1)) expect_equivalent(cmp1, cmp2) cmp1 <- avg_comparisons( mod, variables = list(gear = "sequential", hp = 10, cyl = "pairwise")) |> dplyr::arrange(term, contrast) cmp2 <- avg_comparisons( mod, variables = list(gear = "sequential", hp = 1, cyl = "pairwise")) |> dplyr::arrange(term, contrast) # known <- c("4 - 3", "5 - 4", "+10", "6 - 4", "8 - 4", "8 - 6") # aggregate refactor gave us new labels known <- c("+10", "4 - 3", "5 - 4", "6 - 4", "8 - 4", "8 - 6") expect_true(all(known %in% cmp1$contrast)) expect_equivalent(cmp1$estimate[6], cmp2$estimate[6] * 10) # informative errors expect_error(suppressWarnings(comparisons(mod, variables = list(gear = "blah"))), pattern = "variables") expect_error(suppressWarnings(comparisons(mod, variables = list(hp = "pairwise"))), pattern = "variables") # regression test: factor in formula and numeric check mod <- lm(mpg ~ factor(cyl), data = mtcars) expect_inherits(comparisons(mod, variables = list(cyl = "pairwise")), "comparisons") expect_error(comparisons(mod, variables = list(cyl = "iqr")), pattern = "element") # Binary variables mod <- glm(am ~ hp + vs, dat = mtcars, family = binomial) cmp3 <- comparisons(mod, variables = list(vs = 1)) expect_inherits(cmp3, "comparisons") # no need to include categorical focal variable when there is only one of them mod <- lm(mpg ~ hp + factor(am) + wt, mtcars) nd <- data.frame(hp = 120, am = 1) expect_warning(comparisons(mod, variables = "wt", newdata = nd), pattern = "explicitly") expect_error(suppressWarnings(comparisons(mod, variables = "wt", newdata = nd))) nd <- data.frame(hp = 120, wt = 2.5) cmp <- comparisons(mod, variables = "am", newdata = nd) expect_inherits(cmp, "comparisons") expect_warning(comparisons(mod, newdata = nd), pattern = "is included") expect_error(suppressWarnings(comparisons(mod, newdata = nd), pattern = "is included")) # comparisons() variables = data.frame() mod <- lm(mpg ~ hp, mtcars) comparisons(mod, variables = list(hp = data.frame(mtcars$hp, mtcars$hp + 1:32))) # Issue #757: rev mod <- lm(mpg ~ factor(cyl), mtcars) a <- avg_comparisons(mod, variables = list(cyl = "pairwise")) b <- avg_comparisons(mod, variables = list(cyl = "revpairwise")) expect_equal(a$estimate, -1 * b$estimate) a <- avg_comparisons(mod, variables = list(cyl = "reference")) b <- avg_comparisons(mod, variables = list(cyl = "revreference")) expect_equal(a$estimate, -1 * b$estimate) a <- avg_comparisons(mod, variables = list(cyl = "sequential")) b <- avg_comparisons(mod, variables = list(cyl = "revsequential")) expect_equal(a$estimate, -1 * b$estimate) # Custom vector mod <- lm(mpg ~ hp, mtcars) cmp <- avg_comparisons(mod, variables = list(hp = \(x) data.frame(mtcars$hp, mtcars$cyl)), by = "cyl") expect_equal(length(unique(cmp$estimate)), 3) expect_equal(length(unique(round(cmp$statistic, 5))), 1) # Issue 953: Custom functions or data frames for factors/logical columns requiet("ordinal") dat <- wine |> transform( rating = ordered(ifelse(rating == 5, 1, rating)), temp = as.character(temp) ) mod <- clm(rating ~ response * temp, data = dat) DF = data.frame( lo = dat$temp, hi = ifelse(dat$temp == "cold", "warm", "cold") ) cmp <- comparisons(mod, variables = list(temp = DF)) expect_inherits(cmp, "comparisons") p1 <- predictions(mod) p2 <- predictions(mod, newdata = transform(dat, temp = ifelse(temp == "cold", "warm", "cold"))) expect_equal(p2$estimate - p1$estimate, cmp$estimate) cmp <- avg_comparisons(mod, variables = list(temp = DF)) expect_inherits(cmp, "comparisons") expect_equal(nrow(cmp), 4) dat$temp <- dat$temp == "cold" mod <- clm(rating ~ response * temp, data = dat) DF = data.frame( lo = dat$temp, hi = ifelse(dat$temp == FALSE, TRUE, FALSE) ) cmp <- comparisons(mod, variables = list(temp = DF)) expect_inherits(cmp, "comparisons") p1 <- predictions(mod) p2 <- predictions(mod, newdata = transform(dat, temp = ifelse(temp == FALSE, TRUE, FALSE))) expect_equal(p2$estimate - p1$estimate, cmp$estimate) cmp <- avg_comparisons(mod, variables = list(temp = DF)) expect_inherits(cmp, "comparisons") expect_equal(nrow(cmp), 4) DF = \(x) data.frame( lo = x, hi = ifelse(x == FALSE, TRUE, FALSE) ) cmp <- comparisons(mod, variables = list(temp = DF)) expect_inherits(cmp, "comparisons") expect_equal(nrow(cmp), 288) rm(list = ls()) marginaleffects/inst/tinytest/test-weights.R0000644000176200001440000001743114541720224021025 0ustar liggesuserssource("helpers.R") using("marginaleffects") requiet("survey") # mtcars logit tmp <- read.csv("https://vincentarelbundock.github.io/Rdatasets/csv/datasets/mtcars.csv") tmp$weights <- tmp$w <- 1:32 dat <- tmp mod <- suppressWarnings(svyglm( am ~ mpg + cyl, design = svydesign(ids = ~1, weights = ~weights, data = dat), family = binomial)) p1 <- avg_predictions(mod, newdata = dat) p2 <- avg_predictions(mod, wts = "weights", newdata = dat) p3 <- avg_predictions(mod, wts = "w", newdata = dat) p4 <- avg_predictions(mod, wts = dat$weights) expect_false(p1$estimate == p2$estimate) expect_false(p1$std.error == p2$std.error) expect_equivalent(p2, p3) expect_equivalent(p2, p4) # by supports weights p1 <- avg_predictions(mod, wts = "weights", newdata = dat) expect_inherits(p1, "data.frame") m1 <- avg_slopes(mod, wts = "weights", newdata = dat, by = "cyl") expect_inherits(m1, "data.frame") c1 <- avg_comparisons(mod, wts = "weights", newdata = dat, by = "cyl") expect_inherits(c1, "data.frame") # wts + comparison="avg" set.seed(100) k <- read.csv("https://vincentarelbundock.github.io/Rdatasets/csv/MatchIt/lalonde.csv") k$w <- rchisq(614, 2) fit <- lm(re78 ~ treat * (age + educ + race + married + re74), data = k, weights = w) cmp1 <- comparisons(fit, variables = "treat", wts = "w") cmp2 <- comparisons(fit, variables = "treat", wts = "w", comparison = "differenceavg") expect_equivalent(cmp2$estimate, weighted.mean(cmp1$estimate, k$w)) # sanity check expect_error(comparisons(mod, wts = "junk"), pattern = "explicitly") expect_error(slopes(mod, wts = "junk"), pattern = "explicitly") # vs. Stata (not clear what SE they use, so we give tolerance) mod <- suppressWarnings(svyglm( am ~ mpg, design = svydesign(ids = ~1, weights = ~weights, data = dat), family = binomial)) tmp <- mod$prior.weights stata <- c(.0441066, .0061046) mfx <- slopes(mod, wts = tmp, by = "term") expect_equivalent(mfx$estimate[1], stata[1], tol = .01) expect_equivalent(mfx$std.error, stata[2], tolerance = 0.002) # Issue #737 requiet("tidyverse") md <- tibble::tribble( ~g, ~device, ~y, ~N, ~p, "Control", "desktop", 12403, 103341L, 0.120020127538925, "Control", "mobile", 1015, 16192L, 0.0626852766798419, "Control", "tablet", 38, 401L, 0.0947630922693267, "X", "desktop", 12474, 103063L, 0.121032766366203, "X", "mobile", 1030, 16493L, 0.0624507366761656, "X", "tablet", 47, 438L, 0.107305936073059, "Z", "desktop", 12968, 102867L, 0.126065696481865, "Z", "mobile", 973, 16145L, 0.0602663363270362, "Z", "tablet", 34, 438L, 0.0776255707762557, "W", "desktop", 12407, 103381L, 0.120012381385361, "W", "mobile", 1007, 16589L, 0.060702875399361, "W", "tablet", 30, 435L, 0.0689655172413793 ) tmp <<- as.data.frame(md) tmp <- as.data.frame(md) fit <- glm(cbind(y, N - y) ~ g * device, data = tmp, family = binomial()) cmp1 <- avg_comparisons(fit, variables = list(g = c("Control", "Z")), wts = "N", newdata = tmp, comparison = "lnratioavg", transform = exp) cmp2 <- predictions(fit, variables = list(g = c("Control", "Z"))) |> dplyr::group_by(g) |> dplyr::summarise(estimate = weighted.mean(estimate, N)) |> as.data.frame() expect_equivalent( cmp1$estimate, cmp2$estimate[cmp2$g == "Z"] / cmp2$estimate[cmp2$g == "Control"]) # wts shortcuts are internal-only expect_error( avg_comparisons(fit, variables = "g", wts = "N", comparison = "lnratioavgwts", transform = exp), pattern = "check_choice" ) # lnratioavg = lnratio with `by` cmp1 <- avg_comparisons(fit, variables = "g", by = "device", wts = "N", comparison = "lnratioavg", transform = exp) cmp2 <- avg_comparisons(fit, variables = "g", by = "device", wts = "N", comparison = "lnratio", transform = exp) expect_equivalent(cmp1, cmp2) # lnratioavg + wts produces same results in this particular case, because there are only the g*device predictors cmp1 <- avg_comparisons(fit, variables = "g", by = "device", wts = "N", comparison = "lnratioavg", transform = exp) cmp2 <- avg_comparisons(fit, variables = "g", by = "device", wts = "N", comparison = "lnratioavg", transform = exp) expect_equivalent(cmp1, cmp2) # Issue #865 d = data.frame( outcome = c(0,0,1,0,0,1,1,1,0,0,0,1,0,1,0, 0,0,0,0,1,0,0,1,1,1,0,0,0,1,0,0,0,0,0,0,0, 0,1,0,1,0,0,1,1,0,1,0,1,0,0,1,0,1,0,1,0,1, 1,1,0,0,0,0,0,0,0,1,0,1,0,1,1,1,1,0,1,1,1, 0,0,0,0,1,1,0,0,1,0,1,0,1,0,1,0,1,0,0,1,1,0), foo = c(1,1,1,1,1,1,0,1,1,1,1,1,1,1,0, 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,1,1, 1,1,1,1,0,1,1,1,1,1,0,1,1,1,1,0,1,1,1,1,1, 1,1,0,1,1,1,1,0,1,1,1,1,1,1,1,1,1,1,1,1,1, 1,1,0,1,1,1,1,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1), bar = c(1,1,1,0,0,0,1,1,0,0,1,0,1,0,1, 1,1,1,0,1,1,1,1,0,1,0,0,1,0,0,1,1,1,1,0,0, 1,1,0,1,1,1,1,1,0,1,1,1,1,0,1,0,0,0,0,0,1, 1,0,0,0,0,1,0,1,1,0,0,1,1,1,1,1,1,1,1,0,1, 0,1,1,0,1,0,1,1,1,0,1,0,1,1,0,0,1,1,0,1,1,1) ) mod = glm( outcome ~ foo + bar, family = "binomial", data = d ) cmp1 <- avg_comparisons(mod, variables = list(foo = 0:1), type = "response", comparison = "difference") cmp2 <- comparisons(mod, variables = list(foo = 0:1), type = "response", comparison = "differenceavg") expect_equivalent(cmp1$estimate, cmp2$estimate) # Issue #870 Guerry <- read.csv("https://vincentarelbundock.github.io/Rdatasets/csv/HistData/Guerry.csv", na.strings = "") Guerry <- na.omit(Guerry) mod <- lm(Literacy ~ Pop1831 * Desertion, data = Guerry) p1 <- predictions(mod, by = "Region", wts = "Donations") p2 <- predictions(mod, by = "Region") expect_inherits(p1, "predictions") expect_false(any(p1$estimate == p2$estimate)) # brms set.seed(1024) mod <- marginaleffects:::modelarchive_model("brms_numeric2") w <- runif(32) cmp1 <- comparisons(mod, comparison = "differenceavg") cmp2 <- comparisons(mod, wts = w, comparison = "differenceavg") expect_true(all(cmp1$estimate != cmp2$estimate)) # . logit am mpg [pw=weights] # # Iteration 0: log pseudolikelihood = -365.96656 # Iteration 1: log pseudolikelihood = -255.02961 # Iteration 2: log pseudolikelihood = -253.55843 # Iteration 3: log pseudolikelihood = -253.55251 # Iteration 4: log pseudolikelihood = -253.55251 # # Logistic regression Number of obs = 32 # Wald chi2(1) = 8.75 # Prob > chi2 = 0.0031 # Log pseudolikelihood = -253.55251 Pseudo R2 = 0.3072 # # ------------------------------------------------------------------------------ # | Robust # am | Coefficient std. err. z P>|z| [95% conf. interval] # -------------+---------------------------------------------------------------- # mpg | .2789194 .0943021 2.96 0.003 .0940908 .4637481 # _cons | -5.484059 2.066303 -2.65 0.008 -9.533938 -1.434179 # ------------------------------------------------------------------------------ # # . margins, dydx(mpg) # # Average marginal effects Number of obs = 32 # Model VCE: Robust # # Expression: Pr(am), predict() # dy/dx wrt: mpg # # ------------------------------------------------------------------------------ # | Delta-method # | dy/dx std. err. z P>|z| [95% conf. interval] # -------------+---------------------------------------------------------------- # mpg | .0441066 .0061046 7.23 0.000 .0321419 .0560714 # ------------------------------------------------------------------------------ source("helpers.R") rm(list = ls())marginaleffects/inst/tinytest/test-transform_post.R0000644000176200001440000000214414541720224022426 0ustar liggesuserssource("helpers.R") using("marginaleffects") # exponentiate acs12 <- read.csv("https://vincentarelbundock.github.io/Rdatasets/csv/openintro/acs12.csv") acs12$disability <- as.numeric(acs12$disability == "yes") mod <- glm(disability ~ gender + race + married + age, data = acs12, family = binomial) cmp1 <- comparisons( mod, variables = "gender", comparison = "lnratioavg") cmp2 <- comparisons( mod, variables = "gender", comparison = "lnratioavg", transform = exp) expect_equivalent(exp(cmp1$estimate), cmp2$estimate) expect_equivalent(exp(cmp1$conf.low), cmp2$conf.low) expect_equivalent(exp(cmp1$conf.high), cmp2$conf.high) # # argument name deprecation # # aggregate refactor makes thsi possible again # expect_warning(tidy(cmp2, transform = exp)) # expect_warning(summary(cmp2, transform = exp)) # # aggregate refactor deprecates trasnsform_avg # tid1 <- tidy(cmp1) # tid2 <- tidy(cmp1, transform = exp) # expect_equivalent(exp(tid1$estimate), tid2$estimate) # expect_equivalent(exp(tid1$conf.low), tid2$conf.low) # expect_equivalent(exp(tid1$conf.high), tid2$conf.high) rm(list = ls())marginaleffects/inst/tinytest/test-pkg-ivreg.R0000644000176200001440000000255214560035476021255 0ustar liggesuserssource("helpers.R") using("marginaleffects") requiet("margins") requiet("poorman") requiet("ivreg") # marginaleffects: vs. margins data(Kmenta, package = "ivreg") mod <- ivreg::ivreg(Q ~ P * D | D + F + A, data = Kmenta) res <- slopes(mod) mar <- data.frame(margins(mod, unit_ses = TRUE)) expect_true(expect_margins(res, mar, tolerance = .1, verbose = TRUE)) # plot_predictions: bugs stay dead # broke when no conf.low available data(Kmenta, package = "ivreg") mod <- ivreg::ivreg(Q ~ P + D + I(D^2) | D + I(D^2) + F + A, data = Kmenta) expect_inherits(plot_predictions(mod, condition = "D"), "ggplot") # marginaleffects: vs. Stata dat <- read.csv(testing_path("stata/databases/ivreg_ivreg_01.csv")) stata <- readRDS(testing_path("stata/stata.rds"))[["ivreg_ivreg_01"]] mod <- ivreg::ivreg(Q ~ P + D | D + F + A, data = dat) ame <- slopes(mod) |> poorman::group_by(term) |> poorman::summarize(estimate = mean(estimate), std.error = mean(std.error)) |> poorman::inner_join(stata, by = "term") expect_equivalent(ame$estimate, ame$dydxstata, tolerance = 0.0001) # predictions: no validity data(Kmenta, package = "ivreg") mod <- ivreg::ivreg(Q ~ P * D | D + F + A, data = Kmenta) pred1 <- predictions(mod) pred2 <- predictions(mod, newdata = head(Kmenta)) expect_predictions(pred1, n_row = nrow(Kmenta)) expect_predictions(pred2, n_row = 6) rm(list = ls())marginaleffects/inst/tinytest/test-transform_pre.R0000644000176200001440000001260414554070103022227 0ustar liggesusers# TODO: CI: See comment in last test for how the intervals are back transformed source("helpers.R") if (!EXPENSIVE) exit_file("EXPENSIVE") using("marginaleffects") requiet("modelsummary") tol <- .0001 # manual average contrast mod <- glm(am ~ vs + mpg, data = mtcars, family = binomial) cmp1 <- avg_comparisons( mod, variables = list(vs = 0:1), comparison = function(hi, lo) mean(hi - lo)) cmp2 <- avg_comparisons( mod, variables = list(vs = 0:1)) expect_equivalent(cmp1$estimate, cmp2$estimate) expect_equivalent(cmp1$std.error, cmp2$std.error, tolerance = tol) # error when function breaks or returns a bad vector requiet("survey") data(nhanes, package = "survey") dat <- setNames(nhanes, tolower(names(nhanes))) dat$female <- dat$riagendr == 2 dat$race <- sprintf("race%s", dat$race) mod <- glm(hi_chol ~ female, data = dat, family = binomial) expect_error(comparisons(mod, comparison = function(x) rep(1, 1234)), pattern = "numeric vector") expect_error(comparisons(mod, comparison = function(hi, lo) head(hi - lo)), pattern = "numeric vector") # univariate vs. Stata # known stata results arr_s <- c(arr.est = 0.94026450, arr.std_err = 0.09584693, arr.ci_l = 0.76998425, arr.ci_h = 1.14820184) ard_s <- c(ard.est = -0.00996557, ard.std_err = 0.01647135, ard.ci_l = -0.04224882, ard.ci_h = 0.02231767) acs12 <- read.csv("https://vincentarelbundock.github.io/Rdatasets/csv/openintro/acs12.csv") acs12$disability <- as.numeric(acs12$disability == "yes") mod <- glm(disability ~ gender, data = acs12, family = binomial) ard_r <- avg_comparisons(mod, comparison = function(hi, lo) lo - hi) arr_r <- avg_comparisons(mod, comparison = function(hi, lo) mean(lo) / mean(hi)) cols <- c("estimate", "std.error", "conf.low", "conf.high") ard_r <- unlist(ard_r[, cols]) arr_r <- unlist(arr_r[, cols]) expect_equivalent(arr_r[1:2], arr_s[1:2], tolerance = tol) expect_equivalent(ard_r[1:2], ard_s[1:2], tolerance = tol) # multivariate vs. Stata # known stata values arr_s <- c(arr.est = 0.80285689, arr.std_err = 0.07496766, arr.ci_l = 0.66858441, arr.ci_h = 0.96409545) ard_s <- c(ard.est = -0.03544519, ard.std_err = 0.01499735, ard.ci_l = -0.06483945, ard.ci_h = -0.00605093) acs12 <- read.csv("https://vincentarelbundock.github.io/Rdatasets/csv/openintro/acs12.csv") acs12$disability <- as.numeric(acs12$disability == "yes") mod <- glm(disability ~ gender + race + married + age, data = acs12, family = binomial) ard_r <- avg_comparisons(mod, variables = "gender", comparison = function(hi, lo) lo - hi) arr_r <- avg_comparisons(mod, variables = "gender", comparison = function(hi, lo) mean(lo) / mean(hi)) cols <- c("estimate", "std.error", "conf.low", "conf.high") ard_r <- unlist(ard_r[, cols]) arr_r <- unlist(arr_r[, cols]) expect_equivalent(arr_r[1:2], arr_s[1:2], tolerance = tol) expect_equivalent(arr_r[1:2], arr_s[1:2], tolerance = tol) # health insurance vs. Stata # known stata results arr_s <- c(arr.est = 1.04786879, arr.std_err = 0.00976999, arr.ci_l = 1.02889386, arr.ci_h = 1.06719366) ard_s <- c(ard.est = 0.04277614, ard.std_err = 0.00837836, ard.ci_l = 0.02635485, ard.ci_h = 0.05919742) dat <- read.csv("https://vincentarelbundock.github.io/Rdatasets/csv/AER/HealthInsurance.csv") dat$health <- as.factor(dat$health) mod <- glm(health ~ insurance + gender + ethnicity + married + age, data = dat, family = binomial) ard_r <- avg_comparisons( mod, variables = "insurance", comparison = function(hi, lo) hi - lo) arr_r <- avg_comparisons( mod, variables = "insurance", comparison = function(hi, lo) mean(hi) / mean(lo)) cols <- c("estimate", "std.error", "conf.low", "conf.high") ard_r <- unlist(ard_r[, cols]) arr_r <- unlist(arr_r[, cols]) expect_equivalent(ard_r[1:2], ard_s[1:2], tolerance = tol) expect_equivalent(arr_r[1:2], arr_s[1:2], tolerance = tol) # Using manual back-transformation cols <- c("estimate", "conf.low", "conf.high") arr_r <- avg_comparisons( mod, variables = "insurance", comparison = function(hi, lo) log(mean(hi) / mean(lo)), transform = exp) arr_r <- unlist(arr_r[, cols]) expect_equivalent(arr_r, arr_s[c(1, 3, 4)], tolerance = tol) # bugfix: multiple terms w/ n=1 transform # the function must be applied to each group if it takes a mean or something similar dat <- read.csv("https://vincentarelbundock.github.io/Rdatasets/csv/carData/TitanicSurvival.csv") dat$survived <- as.factor(dat$survived) mod <- glm(survived ~ passengerClass + sex, data = dat, family = binomial) cmp <- avg_comparisons(mod, comparison = function(hi, lo) mean(hi - lo)) # bug created duplicate estimates expect_equivalent(length(unique(cmp$estimate)), nrow(cmp)) # TODO: fix eps to make sure slopes() and comparisons() give same result # comparison slope vs slopes() mod <- glm(vs ~ mpg + hp, data = mtcars, family = binomial) mfx1 <- slopes(mod) mfx2 <- comparisons(mod, comparison = "dydx") mfx3 <- slopes(mod, eps = 1e-5) mfx4 <- comparisons(mod, comparison = "dydx", eps = 1e-5) expect_equivalent(mfx1$estimate, mfx2$estimate) expect_equivalent(mfx1$std.error, mfx2$std.error) expect_equivalent(mfx3$estimate, mfx4$estimate) expect_equivalent(mfx3$std.error, mfx4$std.error) # # label ratios: We don't have fancy ratio labels anymore, because +1 is a # # better label when we do centering. # mod <- lm(mpg ~ hp + factor(cyl), data = mtcars) # cmp <- comparisons(mod, comparison = "ratio") # expect_true(all(grepl("\\/", cmp$contrast))) rm(list = ls())marginaleffects/inst/tinytest/test-pkg-afex.R0000644000176200001440000000353314560035476021064 0ustar liggesuserssource("helpers.R") using("marginaleffects") requiet("afex") requiet("emmeans") data(md_12.1, package = "afex") mod <- aov_ez("id", "rt", md_12.1, within = c("angle", "noise")) # no validity expect_slopes(mod) pre <- predictions(mod) expect_inherits(pre, "predictions") cmp <- comparisons(mod) expect_inherits(cmp, "comparisons") # contrasts vs emmeans cmp <- comparisons(mod, variables = "angle", newdata = "marginalmeans") em <- emmeans(mod, ~angle) em <- emmeans::contrast(em, method = "trt.vs.ctrl1") em <- data.frame(em) expect_equal(cmp$estimate, em$estimate) expect_equal(cmp$std.error, em$SE) # predictions vs emmeans pre <- predictions( mod, newdata = datagrid(angle = c("X0", "X4", "X8"), noise = md_12.1$noise)) emm <- emmeans(mod, c("noise", "angle")) emm <- data.frame(emm) expect_equivalent(pre$estimate, emm$emmean) expect_equivalent(pre$std.error, emm$SE) # coefficient matrix (ANOVA on full design) data(obk.long, package = "afex") mod <- suppressMessages(aov_car( value ~ treatment * gender + Error(id/(phase*hour)), data = obk.long, observed = "gender")) em <- data.frame(emmeans(mod, ~ phase)) mm <- predictions(mod, newdata = datagrid(grid_type = "balanced"), by = "phase") expect_equivalent(mm$estimate, em$emmean) expect_equivalent(mm$std.error, em$SE) # data from https://github.com/mattansb/Analysis-of-Factorial-Designs-foR-Psychologists/03 Main and simple effects analysis Phobia <- readRDS("stata/databases/Phobia.rds") mod <- suppressMessages(aov_ez( id = "ID", dv = "BehavioralAvoidance", between = c("Condition", "Gender"), data = Phobia, anova_table = list(es = "pes"))) pre <- predictions(mod) mfx <- slopes(mod) expect_inherits(pre, "predictions") expect_inherits(cmp, "comparisons") expect_false(anyNA(pre$std.error)) expect_false(anyNA(cmp$std.error)) rm(list = ls())marginaleffects/inst/tinytest/test-pkg-speedglm.R0000644000176200001440000000231114541720224021721 0ustar liggesuserssource("helpers.R") using("marginaleffects") if (!requiet("speedglm")) exit_file("speedglm not on CRAN") requiet("margins") # glm vs. Stata stata <- readRDS(testing_path("stata/stata.rds"))[["stats_glm_01"]] dat <- read.csv(testing_path("stata/databases/stats_glm_01.csv")) mod <- speedglm(y ~ x1 * x2, family = binomial(), data = dat) mfx <- merge(avg_slopes(mod), stata) expect_slopes(mod) expect_equivalent(mfx$estimate, mfx$dydxstata, tolerance = .0001) expect_equivalent(mfx$std.error, mfx$std.errorstata, tolerance = .0001) # margins: wrong standard errors mfx <- slopes(mod) mar <- margins(mod, unit_ses = TRUE) expect_true(expect_margins(mfx, mar, tolerance = .001)) # lm vs. Stata stata <- readRDS(testing_path("stata/stata.rds"))[["stats_lm_01"]] dat <- read.csv(testing_path("stata/databases/stats_lm_01.csv")) mod <- speedlm(y ~ x1 * x2, data = dat) mfx <- merge(avg_slopes(mod), stata) expect_slopes(mod) expect_equivalent(mfx$estimate, mfx$dydxstata, tolerance = .00001) expect_equivalent(mfx$std.error, mfx$std.errorstata, tolerance = .0001) # margins: wrong standard errors mfx <- slopes(mod) mar <- margins(mod, unit_ses = TRUE) expect_true(expect_margins(mfx, mar, tolerance = 1e-3)) rm(list = ls())marginaleffects/inst/tinytest/test-pkg-rms.R0000644000176200001440000000315414541720224020730 0ustar liggesuserssource("helpers.R") if (ON_CI || ON_WINDOWS || ON_OSX) exit_file("local linux only") using("marginaleffects") requiet("polspline") requiet("rms") requiet("emmeans") requiet("broom") # lmr: marginaleffects vs emtrends model <- rms::lrm(am ~ mpg, mtcars) void <- capture.output({ expect_slopes(model, type = "lp", n_unique = 1) }) mfx <- slopes(model, newdata = data.frame(mpg = 30), type = "lp", eps = 1/1000 * diff(range(mtcars$mpg))) em <- emtrends(model, ~mpg, "mpg", at = list(mpg = 30)) em <- tidy(em) expect_equivalent(mfx$estimate, em$mpg.trend) expect_equivalent(mfx$std.error, em$std.error, tolerance = .0001) # predictions: rms: no validity model <- rms::lrm(am ~ mpg, mtcars) pred1 <- predictions(model, type = "lp") pred2 <- predictions(model, type = "lp", newdata = head(mtcars)) expect_predictions(pred1, n_row = 32) expect_predictions(pred2, n_row = 6) # comparisons mod <- ols(mpg ~ hp, mtcars) c1 <- comparisons(mod, type = "lp") expect_inherits(c1, "comparisons") mod <- lrm(am ~ hp, mtcars) c1 <- comparisons(mod, type = "fitted") c2 <- comparisons(mod, type = "lp") expect_inherits(c1, "comparisons") expect_inherits(c2, "comparisons") mod <- lrm(cyl ~ hp, mtcars) c1 <- comparisons(mod, type = "fitted") c2 <- comparisons(mod, type = "lp") expect_inherits(c1, "comparisons") expect_inherits(c2, "comparisons") mod <- orm(cyl ~ hp, mtcars) c1 <- comparisons(mod, type = "fitted") c2 <- comparisons(mod, type = "lp") c3 <- comparisons(mod, type = "mean") expect_inherits(c1, "comparisons") expect_inherits(c2, "comparisons") expect_error(comparisons(mod, vcov = "HC3"), pattern = "supported") rm(list = ls())marginaleffects/inst/tinytest/test-eps.R0000644000176200001440000000234314541720224020136 0ustar liggesuserssource("helpers.R") using("marginaleffects") # eps argument affects results as expected mod <- glm(vs ~ mpg + hp, data = mtcars, family = binomial) nd <- datagrid(model = mod) cmp0 <- slopes(mod, variables = "mpg", newdata = nd) cmp1 <- slopes(mod, variables = "mpg", newdata = nd, eps = 1) expect_true(all(cmp0$estimate != cmp1$estimate)) # adaptive eps should matter for logit but not ols mod <- glm(am ~ hp + mpg, data = mtcars, family = binomial) m1 <- slopes(mod, eps = NULL) m2 <- slopes(mod, eps = 1) m3 <- slopes(mod, eps = 1e-4) expect_true(all(m1$estimate != m2$estimate)) expect_true(all(m1$estimate != m2$estimate)) expect_true(all(m3$estimate != m2$estimate)) mod <- lm(am ~ hp + mpg, data = mtcars) m1 <- slopes(mod, eps = NULL) m2 <- slopes(mod, eps = 1) m3 <- slopes(mod, eps = 1e-4) expect_equivalent(m1$estimate, m2$estimate) expect_equivalent(m1$estimate, m3$estimate) expect_equivalent(m2$estimate, m3$estimate) # errors and warnings expect_error(slopes(mod, eps = 0)) # Issue #840 df <- causaldata::restaurant_inspections m1 <- glm(Weekend ~ Year, data = df, family = binomial) z <- avg_slopes(m1, variables = "Year", numderiv = "richardson")$statistic expect_equivalent(z, -2.0682935630417, tol = 1e-5) rm(list = ls())marginaleffects/inst/tinytest/test-by.R0000644000176200001440000001663314560035476020001 0ustar liggesuserssource("helpers.R") using("marginaleffects") requiet("margins") requiet("nnet") tol <- 1e-4 tol_se <- 1e-2 mod1 <- glm(gear ~ cyl + am, family = poisson, data = mtcars) mod2 <- lm(gear ~ cyl + am, data = mtcars) p1 <- predictions(mod1, by = "am") p2 <- predictions(mod2, by = "am") p3 <- predictions(mod2, by = "am", wts = mtcars$wt) expect_true("conf.low" %in% colnames(p1)) expect_true("conf.low" %in% colnames(p2)) expect_equivalent(nrow(p1), nrow(p2)) expect_equivalent(nrow(p1), 2) # use comparison to collapse into averages mod <- glm(gear ~ cyl + am, family = poisson, data = mtcars) x <- avg_comparisons(mod, comparison = "dydx") y <- comparisons(mod, comparison = "dydxavg") expect_equivalent(x$estimate, y$estimate) expect_equivalent(x$std.error, y$std.error, tolerance = 1e-5) x <- avg_comparisons(mod, comparison = "eyex") y <- comparisons(mod, comparison = "eyexavg") expect_equivalent(x$estimate, y$estimate) expect_equivalent(x$std.error, y$std.error, tolerance = 1e-5) x <- avg_comparisons(mod, comparison = "eydx") y <- comparisons(mod, comparison = "eydxavg") expect_equivalent(x$estimate, y$estimate) expect_equivalent(x$std.error, y$std.error, tolerance = 1e-5) x <- avg_comparisons(mod, comparison = "dyex") y <- comparisons(mod, comparison = "dyexavg") expect_equivalent(x$estimate, y$estimate) expect_equivalent(x$std.error, y$std.error, tolerance = 1e-5) x <- avg_slopes(mod, slope = "dyex") y <- slopes(mod, slope = "dyexavg") expect_equivalent(x$estimate, y$estimate) expect_equivalent(x$std.error, y$std.error, tolerance = 1e-5) # input sanity check expect_error(slopes(mod, slope = "bad"), pattern = "eyexavg") ##### aggregate() refactor makes this possible again # by is deprecated in `summary()` and `tidy()` # expect_error(summary(comparisons(mod), by = "am"), pattern = "instead") # expect_error(tidy(comparisons(mod), by = "am"), pattern = "instead") # by argument mod <- glm(am ~ hp + mpg, data = mtcars, family = binomial) cmp <- comparisons(mod, by = "am", comparison = "lnor") expect_equal(nrow(cmp), 4) # counterfactual margins at() dat <- mtcars dat$cyl <- factor(dat$cyl) mod <- lm(mpg ~ factor(cyl) * hp + wt, data = dat) mar <- margins(mod, at = list(cyl = unique(dat$cyl))) mar <- data.frame(summary(mar)) mfx <- slopes( mod, by = "cyl", newdata = datagrid(cyl = c(4, 6, 8), grid_type = "counterfactual")) expect_equivalent(mfx$estimate, mar$AME) expect_equivalent(mfx$std.error, mar$SE, tolerance = 1e6) # issue #434 by with character precitors dat <- read.csv("https://vincentarelbundock.github.io/Rdatasets/csv/AER/Affairs.csv") mod <- glm( affairs ~ children + gender + yearsmarried, family = poisson, data = dat) p <- predictions(mod, by = "children") expect_equivalent(nrow(p), 2) expect_false(anyNA(p$estimate)) # Issue #445: by data frame to collapse response levels mod <- nnet::multinom(factor(gear) ~ mpg + am * vs, data = mtcars, trace = FALSE) expect_error(predictions(mod, type = "probs", by = "response"), pattern = "Character vector") expect_error(predictions(mod, type = "probs", by = mtcars), pattern = "Character vector") p <- predictions(mod, type = "probs", by = "group") expect_equivalent(nrow(p), 3) cmp <- comparisons(mod, type = "probs", by = "group") expect_equivalent(nrow(cmp), 9) by <- data.frame( group = c("3", "4", "5"), by = c("(3,4)", "(3,4)", "(5)")) p1 <- predictions(mod, type = "probs") p2 <- predictions(mod, type = "probs", by = by) p3 <- predictions(mod, type = "probs", by = by, hypothesis = "sequential") p4 <- predictions(mod, type = "probs", by = by, hypothesis = "reference") p5 <- predictions(mod, type = "probs", by = c("am", "vs", "group")) expect_equivalent(mean(subset(p1, group == "5")$estimate), p2$estimate[2]) expect_equivalent(p3$estimate, diff(p2$estimate)) expect_equivalent(nrow(p4), 1) expect_equivalent(nrow(p5), 12) cmp <- comparisons(mod, type = "probs", by = "am") expect_equivalent(nrow(cmp), 18) cmp <- comparisons( mod, variables = "am", by = by, type = "probs") expect_equivalent(nrow(cmp), 2) cmp <- comparisons( mod, variables = "am", by = by, hypothesis = "sequential", type = "probs") expect_equivalent(nrow(cmp), 1) # Issue #481: warning on missing by categories mod <- nnet::multinom(factor(gear) ~ mpg + am * vs, data = mtcars, trace = FALSE) by <- data.frame( by = c("4", "5"), group = 4:5) expect_warning(comparisons(mod, variables = "mpg", newdata = "mean", by = by)) expect_warning(predictions(mod, newdata = "mean", by = by)) # Issue #589: easy marginalization mod <- lm(mpg ~ factor(gear) + am, mtcars) cmp1 <- comparisons(mod, by = TRUE) cmp2 <- comparisons(mod, by = FALSE) expect_equivalent(nrow(cmp1), 3) expect_equivalent(nrow(cmp2), 96) pre1 <- predictions(mod, by = TRUE) pre2 <- predictions(mod, by = FALSE) expect_equivalent(nrow(pre1), 1) expect_equivalent(nrow(pre2), 32) pre1 <- slopes(mod, by = TRUE) pre2 <- slopes(mod, by = FALSE) expect_equivalent(nrow(pre1), 3) expect_equivalent(nrow(pre2), 96) # marginaleffects poisson vs. margins dat <- mtcars mod <- glm(gear ~ cyl + am, family = poisson, data = dat) mfx <- avg_slopes( mod, by = c("cyl", "am"), newdata = datagrid( cyl = unique, am = unique, grid_type = "counterfactual")) |> dplyr::arrange(term, cyl, am) mar <- margins(mod, at = list(cyl = unique(dat$cyl), am = unique(dat$am))) mar <- summary(mar) # margins doesn't treat the binary am as binary automatically expect_equivalent(mfx$estimate[7:12], mar$AME[7:12], tolerance = tol) expect_equivalent(mfx$std.error[7:12], mar$SE[7:12], tolerance = tol_se) # comparisons poisson vs. margins dat <- mtcars dat$cyl <- factor(dat$cyl) dat$am <- as.logical(dat$am) mod <- glm(gear ~ cyl + am, family = poisson, data = dat) mfx <- comparisons( mod, by = c("cyl", "am"), newdata = datagrid( cyl = unique, am = unique, grid_type = "counterfactual")) mfx <- tidy(mfx) mfx <- mfx[order(mfx$term, mfx$contrast, mfx$cyl, mfx$am),] mar <- margins(mod, at = list(cyl = unique(dat$cyl), am = unique(dat$am))) mar <- summary(mar) expect_equivalent(mfx$estimate, mar$AME, tolerance = tol) expect_equivalent(mfx$std.error, mar$SE, tolerance = tol_se) # Issue #715: incorrect grouping with custom `comparison` function dat <- transform(mtcars, vs = vs, am = as.factor(am), cyl = as.factor(cyl)) mod <- lm(mpg ~ qsec + am + cyl, dat) fun <- \(hi, lo) mean(hi) / mean(lo) cmp1 <- comparisons(mod, variables = "cyl", comparison = fun, by = "am") |> dplyr::arrange(am, contrast) cmp2 <- comparisons(mod, variables = "cyl", comparison = "ratioavg", by = "am") |> dplyr::arrange(am, contrast) expect_equivalent(cmp1$estimate, cmp2$estimate) expect_equivalent(cmp1$std.error, cmp2$std.error) expect_equal(nrow(cmp1), 4) # https://stackoverflow.com/questions/75858227/in-rs-marginaleffects-package-why-do-these-two-methods-shows-different-results requiet("dplyr") tmp <- mtcars %>% transform(am = factor(am), cyl = factor(cyl), mpg = mpg) mod <- lm(mpg ~ am * cyl, data = tmp) cmp1 <- avg_comparisons(mod, variables = "am", by = "cyl") |> dplyr::arrange(cyl) cmp2 <- comparisons(mod, variables = "am") %>% dplyr::group_by(cyl) %>% dplyr::summarize(estimate = mean(estimate), .groups = "keep") |> dplyr::ungroup() cmp3 <- predictions(mod) |> aggregate(estimate ~ am + cyl, FUN = mean) |> aggregate(estimate ~ cyl, FUN = diff) expect_equivalent(cmp1$estimate, cmp2$estimate) expect_equivalent(cmp1$estimate, cmp3$estimate) rm(list = ls()) marginaleffects/inst/tinytest/test-pkg-plm.R0000644000176200001440000000535614541720224020725 0ustar liggesuserssource("helpers.R") # exit_file("CHECK THIS") using("marginaleffects") requiet("margins") requiet("broom") requiet("plm") tol <- .001 tol_se <- .01 # BDR emergency email about tiny numerical differences dat <- read.csv("https://vincentarelbundock.github.io/Rdatasets/csv/plm/Grunfeld.csv") dat$rownames <- NULL dat <<- pdata.frame(dat) pool <- plm(inv ~ value * capital, data = dat, model = "pooling") swamy <- plm( inv ~ value * capital, data = dat, model = "random", variables = "individual") amemiya <- plm( inv ~ value * capital, data = dat, model = "random", random.method = "amemiya", variables = "twoways") walhus <- plm( inv ~ value * capital, data = dat, model = "random", random.method = "walhus", variables = "twoways") ### marginaleffects # pooling vs. Stata stata <- readRDS(testing_path("stata/stata.rds"))$plm_pooling mfx <- merge(avg_slopes(pool), stata) expect_slopes(pool, n_unique = 1) expect_equivalent(mfx$estimate, mfx$dydxstata, tolerance = tol) expect_equivalent(mfx$std.error, mfx$std.errorstata, tolerance = tol_se) # Swamy-Arora vs. Stata stata <- readRDS(testing_path("stata/stata.rds"))$plm_sa mfx <- merge(avg_slopes(swamy), stata) expect_slopes(swamy) expect_equivalent(mfx$estimate, mfx$dydxstata, tolerance = tol) expect_equivalent(mfx$std.error, mfx$std.errorstata, tolerance = tol_se) # margins mfx <- avg_slopes(swamy) mar <- tidy(margins(swamy)) mfx <- mfx[order(mfx$term),] expect_equivalent(mfx$estimate, mar$estimate, tolerance = tol) expect_equivalent(mfx$std.error, mar$std.error, tolerance = tol_se) # no validity checks expect_slopes(amemiya) # margins avg_slopes(amemiya, type = "link") avg_slopes(amemiya, type = "response") mfx <- avg_slopes(amemiya) mar <- tidy(margins(amemiya)) mfx <- mfx[order(mfx$term),] expect_equivalent(mfx$estimate, mar$estimate, tolerance = tol) expect_equivalent(mfx$std.error, mar$std.error, tolerance = tol_se) expect_slopes(walhus) # margins mfx <- avg_slopes(walhus) mar <- tidy(margins(walhus)) mfx <- mfx[order(mfx$term),] expect_equivalent(mfx$estimate, mar$estimate, tolerance = tol) expect_equivalent(mfx$std.error, mar$std.error, tolerance = tol_se) # # commented out because the dev version of {plm} now has a fully-working predict method # # within error # # within model are not supported by `predict.plm` # stata <- readRDS(testing_path("stata/stata.rds"))$plm_within # mod <- plm(inv ~ value * capital, data = dat, model = "within", variables = "twoways") # expect_error(slopes(mod), pattern = "Unable") ### predictions # predictions: pooling no validity pred1 <- predictions(pool) pred2 <- predictions(pool, newdata = head(dat)) expect_predictions(pred1, n_row = nrow(dat)) expect_predictions(pred2, n_row = 6) source("helpers.R") rm(list = ls())marginaleffects/inst/tinytest/test-elasticity.R0000644000176200001440000000347414541720224021527 0ustar liggesuserssource("helpers.R") using("marginaleffects") set.seed(1024) tol <- tolse <- 1e-3 results <- readRDS(testing_path("stata/stata.rds")) # lm() dat <- read.csv(testing_path("stata/databases/stats_lm_01.csv")) mod <- lm(y ~ x1 * x2, data = dat) mfx <- slopes(mod, slope = "eyex") expect_inherits(mfx, "slopes") mfx <- avg_slopes(mod, slope = "eyex") sta <- results$stats_lm_elasticity_eyex expect_equivalent(mfx$estimate, sta$dydxstata) expect_equivalent(mfx$std.error, sta$std.errorstata, tolerance = tolse) mfx <- avg_slopes(mod, slope = "eydx", numderiv = "richardson") sta <- results$stats_lm_elasticity_eydx expect_equivalent(mfx$estimate, sta$dydxstata) expect_equivalent(mfx$std.error, sta$std.errorstata, tolerance = tolse) mfx <- avg_slopes(mod, slope = "dyex", numderiv = "richardson") sta <- results$stats_lm_elasticity_dyex expect_equivalent(mfx$estimate, sta$dydxstata) expect_equivalent(mfx$std.error, sta$std.errorstata, tolerance = tolse) # glm() dat <- read.csv(testing_path("stata/databases/stats_glm_01.csv")) mod <- glm(y ~ x1 * x2, data = dat, family = binomial) mfx <- avg_slopes(mod, slope = "eyex", numderiv = "richardson") sta <- results$stats_glm_elasticity_eyex expect_equivalent(mfx$estimate, sta$dydxstata, tolerance = tol) expect_equivalent(mfx$std.error, sta$std.errorstata, tolerance = tolse) mfx <- avg_slopes(mod, slope = "eydx", numderiv = "richardson") sta <- results$stats_glm_elasticity_eydx expect_equivalent(mfx$estimate, sta$dydxstata, tolerance = tol) expect_equivalent(mfx$std.error, sta$std.errorstata, tolerance = tolse) mfx <- avg_slopes(mod, slope = "dyex", numderiv = "richardson") sta <- results$stats_glm_elasticity_dyex expect_equivalent(mfx$estimate, sta$dydxstata, tolerance = 1e-2) expect_equivalent(mfx$std.error, sta$std.errorstata, tolerance = tolse) source("helpers.R") rm(list = ls())marginaleffects/inst/tinytest/test-pkg-survey.R0000644000176200001440000000165314541720224021466 0ustar liggesuserssource("helpers.R") using("marginaleffects") requiet("margins") requiet("emmeans") requiet("broom") requiet("survey") # survey: marginaleffects vs. margins vs. emtrends data("fpc", package = "survey") svyd <- survey::svydesign( weights = ~weight, ids = ~psuid, strata = ~stratid, fpc = ~Nh, variables = ~ x + nh, data = fpc, nest = TRUE) mod <- survey::svyglm(x ~ nh, design = svyd) res <- slopes(mod) mar <- suppressMessages(data.frame(margins(mod, unit_ses = TRUE))) expect_equivalent(res$estimate, as.numeric(mar$dydx_nh)) expect_equivalent(res$std.error, as.numeric(mar$SE_dydx_nh), tolerance = 0.001) # emtrends em <- emtrends(mod, ~nh, "nh", at = list(nh = 4)) em <- tidy(em) mfx <- slopes(mod, type = "link", newdata = data.frame(nh = 4)) expect_equivalent(mfx$estimate, em$nh.trend, tolerance = .001) # CRAN tolerance expect_equivalent(mfx$std.error, em$std.error, tolerance = .001) rm(list = ls())marginaleffects/inst/tinytest/test-pkg-nlme.R0000644000176200001440000000415114560035476021071 0ustar liggesuserssource("helpers.R") using("marginaleffects") requiet("nlme") requiet("emmeans") requiet("broom") dat <<- read.csv("https://vincentarelbundock.github.io/Rdatasets/csv/nlme/Ovary.csv") # nlme::gls: marginaleffects vs. emtrends model <- gls(follicles ~ sin(2 * pi * Time) + cos(2 * pi * Time), dat, correlation = corAR1(form = ~ 1 | Mare)) mfx <- slopes(model) expect_inherits(mfx, "data.frame") expect_false(any(mfx$estimate == 0 | is.na(mfx$estimate))) expect_false(any(mfx$std.error == 0 | is.na(mfx$std.error))) # emtrends nd <- datagrid(newdata = dat, Time = 1) mfx <- slopes(model, variables = "Time", type = "link", newdata = datagrid(Time = 1)) em <- suppressMessages(emtrends(model, ~Time, "Time", mode = "df.error", at = list(Time = 1))) em <- tidy(em) expect_equivalent(mfx$std.error, em$std.error, tolerance = .001) expect_equivalent(mfx$estimate, em$Time.trend, tolerance = .01) # predictions: nlme::gls: no validity model <- gls(follicles ~ sin(2 * pi * Time) + cos(2 * pi * Time), data = dat, correlation = corAR1(form = ~ 1 | Mare)) pred1 <- predictions(model) pred2 <- predictions(model, newdata = head(dat)) expect_predictions(pred1, n_row = nrow(dat)) expect_predictions(pred2, n_row = 6) # glm: marginalmeans vs emmeans tmp <- dat tmp$categ <- factor(sample(letters[1:5], nrow(tmp), replace = TRUE)) tmp <<- tmp mod <- gls(follicles ~ sin(2 * pi * Time) + cos(2 * pi * Time) + categ, data = tmp, correlation = corAR1(form = ~ 1 | Mare)) em <- suppressMessages(emmeans(mod, specs = "categ")) em <- tidy(em) mm <- predictions(mod, newdata = datagrid(grid_type = "balanced"), by = "categ") |> dplyr::arrange(categ) expect_equivalent(mm$estimate, em$estimate) expect_equivalent(mm$std.error, em$std.error, tolerance = 1e-5) # issue #99: Support `lme` if (packageVersion("insight") < "0.19.0.12") exit_file("insight version") mod <- lme(distance ~ age + Sex, data = Orthodont, random = ~ 1) mfx <- avg_slopes(mod) cmp <- comparisons(mod) pre <- predictions(mod) expect_inherits(mfx, "slopes") expect_inherits(cmp, "comparisons") expect_inherits(pre, "predictions") source("helpers.R") rm(list = ls())marginaleffects/inst/tinytest/test-pkg-AER.R0000644000176200001440000000371014560035476020545 0ustar liggesusers# newdata must be explicit otherwise this only works interactively source("helpers.R") using("marginaleffects") requiet("AER") requiet("emmeans") requiet("broom") tol_se <- 1e-4 dat <- read.csv("https://vincentarelbundock.github.io/Rdatasets/csv/AER/Affairs.csv") # tobit: marginaleffects vs. Stata stata <- readRDS(testing_path("stata/stata.rds"))$aer_tobit mod1 <- tobit( affairs ~ age + yearsmarried + religiousness + occupation + rating, data = dat) mfx <- merge(tidy(slopes(mod1, newdata = dat)), stata) expect_slopes(mod1, n_unique = 1, newdata = dat) expect_equivalent(mfx$estimate, mfx$dydxstata, tolerance = .00001) expect_equivalent(mfx$std.error, mfx$std.errorstata, tolerance = 1e-4) stata <- readRDS(testing_path("stata/stata.rds"))$aer_tobit_right mod2 <- tobit( affairs ~ age + yearsmarried + religiousness + occupation + rating, right = 4, data = dat ) mfx <- merge(tidy(slopes(mod2, newdata = dat)), stata) expect_slopes(mod2, n_unique = 1, newdata = dat) expect_equivalent(mfx$estimate, mfx$dydxstata, tolerance = .1) expect_equivalent(mfx$std.error, mfx$std.errorstata, tolerance = .1) # marginaleffects vs. emtrends mod <- tobit(affairs ~ age + yearsmarried, data = dat) mfx <- slopes(mod, newdata = datagrid(age = 30, yearsmarried = 5)) em1 <- emmeans::emtrends(mod, ~age, "age", at = list(age = 30, yearsmarried = 5)) em2 <- emmeans::emtrends(mod, ~yearsmarried, "yearsmarried", at = list(age = 30, yearsmarried = 5)) em1 <- tidy(em1) em2 <- tidy(em2) expect_equivalent(mfx$estimate[1], em1$age.trend) expect_equivalent(mfx$std.error[1], em1$std.error, tolerance = .001) expect_equivalent(mfx$estimate[2], em2$yearsmarried.trend) expect_equivalent(mfx$std.error[2], em2$std.error, tolerance = .0002) # predictions: tobit: no validity mod <- AER::tobit( affairs ~ age + yearsmarried + religiousness + occupation + rating, data = dat) pred <- predictions(mod, newdata = dat) expect_predictions(pred, n_row = nrow(dat)) rm(list = ls())marginaleffects/inst/tinytest/test-pkg-gam.R0000644000176200001440000000450214560035476020702 0ustar liggesuserssource("helpers.R") using("marginaleffects") requiet("gam") requiet("emmeans") requiet("broom") # gam: marginaleffects vs. emtrends data(kyphosis, package = "gam") model <- gam::gam(Kyphosis ~ gam::s(Age,4) + Number, family = binomial, data = kyphosis) expect_slopes(model) # emmeans mfx <- slopes(model, newdata = datagrid(Age = 60, Number = 4), variables = "Number", type = "link") em <- emtrends(model, ~Number, "Number", at = list(Age = 60, Number = 4)) em <- tidy(em) expect_equivalent(mfx$estimate, em$Number.trend) # low tolerance only for CRAN Atlas test expect_equivalent(mfx$std.error, em$std.error, tolerance = .001) # gam: predictions: no validity data(kyphosis, package = "gam") model <- gam::gam(Kyphosis ~ gam::s(Age, 4) + Number, family = binomial, data = kyphosis) pred1 <- predictions(model) pred2 <- predictions(model, newdata = head(kyphosis)) expect_predictions(pred1, se = FALSE) expect_predictions(pred2, n_row = 6, se = FALSE) # gam: marginalmeans vs. emmeans # TODO: not clear what happens to smooth data(kyphosis, package = "gam") tmp <- kyphosis tmp$categ <- as.factor(sample(letters[1:5], nrow(tmp), replace = TRUE)) model <- gam::gam(Kyphosis ~ gam::s(Age, 4) + Number + categ, family = binomial, data = tmp) # `datagrid()` is smarter than `emmeans()` about integers atlist <- list(Age = round(mean(tmp$Age)), Number = round(mean(tmp$Number))) mm1 <- predictions(model, by = "categ", newdata = datagrid(grid_type = "balanced"), numderiv = "richardson") |> dplyr::arrange(categ) em1 <- data.frame(emmeans(model, specs = "categ", type = "response", at = atlist)) mm1 <- predictions(model, newdata = datagrid( grid_type = "balanced", Age = round(mean(tmp$Age)), Number = round(mean(tmp$Number))), by = "categ") mm2 <- predictions(model, type = "link", by = "categ", newdata = datagrid(grid_type = "balanced"), numderiv = "richardson") |> dplyr::arrange(categ) em2 <- data.frame(emmeans(model, specs = "categ", at = atlist)) expect_equivalent(mm1$estimate, em1$prob) expect_equivalent(mm2$estimate, em2$emmean) expect_equivalent(mm1$conf.low, em1$asymp.LCL, tolerance = 1e-6) expect_equivalent(mm1$conf.high, em1$asymp.UCL, tolerance = 1e-6) expect_equivalent(mm2$conf.low, em2$asymp.LCL, tolerance = 1e-6) expect_equivalent(mm2$conf.high, em2$asymp.UCL, tolerance = 1e-4) rm(list = ls())marginaleffects/inst/tinytest/test-pkg-tidymodels.R0000644000176200001440000000331714541720224022305 0ustar liggesusers source("helpers.R") using("marginaleffects") requiet("tidymodels") dat <- read.csv("https://vincentarelbundock.github.io/Rdatasets/csv/palmerpenguins/penguins.csv") dat$large_penguin <- ifelse( dat$body_mass_g > median(dat$body_mass_g, na.rm = TRUE), "yes", "no") dat$large_penguin <- factor(dat$large_penguin, levels = c("yes", "no")) # class mod <- set_engine(logistic_reg(), "glm") |> fit(large_penguin ~ bill_length_mm + flipper_length_mm + species, data = dat) p <- predictions(mod, newdata = dat, type = "prob") expect_inherits(p, "predictions") expect_true("std.error" %in% colnames(p)) p <- predictions(mod, newdata = dat, type = "class") expect_inherits(p, "predictions") expect_false("std.error" %in% colnames(p)) mfx <- avg_slopes(mod, newdata = dat, type = "prob") expect_inherits(mfx, "marginaleffects") expect_true(nrow(mfx) > 0) # workflow: engine supported data("bikes", package = "fmeffects") mod <- workflow(count ~ ., linear_reg()) |> fit(data = bikes) |> suppressWarnings() p <- predictions(mod, newdata = bikes, type = "numeric") |> suppressWarnings() expect_inherits(p, "predictions") expect_true("std.error" %in% colnames(p)) mfx <- avg_slopes(mod, newdata = bikes, type = "numeric") |> suppressWarnings() expect_inherits(mfx, "marginaleffects") expect_true(nrow(mfx) > 0) # workflow: engine not supported mod <- workflow(count ~ ., rand_forest(mode = "regression")) |> fit(data = bikes) |> suppressWarnings() p <- predictions(mod, newdata = bikes, type = "numeric") expect_inherits(p, "predictions") expect_false("std.error" %in% colnames(p)) m <- slopes(mod, newdata = bikes, type = "numeric") expect_inherits(m, "slopes") expect_false("std.error" %in% colnames(m)) rm(list = ls())marginaleffects/inst/tinytest/test-nested.R0000644000176200001440000000132114541720224020624 0ustar liggesuserssource("helpers.R") using("marginaleffects") requiet("fixest") # predictions() call is nested in a function test <- function() { data(mtcars) test_data <- mtcars mod <- feols(mpg ~ hp + factor(cyl), data = test_data) nd <- datagrid(cyl = mtcars$cyl, newdata = test_data) preds <- predictions(mod, newdata = nd) return(preds) } p <- test() expect_inherits(p, "predictions") expect_equivalent(nrow(p), 3) # yet another test <- function() { data(mtcars) test_data <- mtcars mod <- feols(mpg ~ hp + factor(cyl), data = test_data) p <- predictions(mod, variables = "cyl", newdata = test_data) return(p) } p <- test() expect_inherits(p, "predictions") expect_equivalent(nrow(p), 96) rm(list = ls())marginaleffects/inst/tinytest/test-pkg-truncreg.R0000644000176200001440000000223014541720224021752 0ustar liggesuserssource("helpers.R") using("marginaleffects") requiet("truncreg") requiet("margins") # truncreg: no validity check data("tobin", package = "survival") model <- truncreg(durable ~ age + quant, data = tobin, subset = durable > 0) tid <- avg_slopes(model) expect_inherits(tid, "data.frame") expect_equivalent(nrow(tid), 2) expect_false(any(tid$estimate == 0)) expect_false(anyNA(tid$estimate)) expect_false(any(tid$std.error == 0)) expect_false(anyNA(tid$std.error)) # truncreg vs. Stata # numeric differences could be resolved with different tolerance, but # finding the correct threshold by trial and error is difficult on CRAN stata <- readRDS(testing_path("stata/stata.rds"))$truncreg_truncreg_01 data("tobin", package = "survival") model <- truncreg::truncreg(durable ~ age + quant, data = tobin, subset = durable > 0) mfx <- merge(avg_slopes(model), stata) expect_equivalent(mfx$estimate, mfx$dydxstata, tolerance = .0001) expect_equivalent(mfx$std.error, mfx$std.errorstata, tolerance = .001) # margins mar <- margins(model, unit_ses = TRUE) mfx <- slopes(model) expect_true(expect_margins(mfx, mar, tolerance = .001)) rm(list = ls())marginaleffects/inst/tinytest/test-pkg-blme.R0000644000176200001440000000137314541720224021047 0ustar liggesuserssource("helpers.R") using("marginaleffects") requiet("blme") dat <- read.csv("https://vincentarelbundock.github.io/Rdatasets/csv/lme4/sleepstudy.csv") penaltyFn <- function(sigma) dcauchy(sigma, 0, 10, log = TRUE) fm5 <- blmer( Reaction ~ Days + (0 + Days | Subject), data = dat, cov.prior = custom(penaltyFn, chol = TRUE, scale = "log")) fm6 <- blmer( Reaction ~ Days + (1 + Days | Subject), data = dat, cov.prior = NULL, fixef.prior = normal) mod <- bglmer(vs ~ mpg + (1 | gear), data = mtcars, family = binomial) expect_slopes(fm5) expect_slopes(fm6) expect_slopes(mod) pre <- predictions(fm5) expect_predictions(pre) pre <- predictions(fm6) expect_predictions(pre) pre <- predictions(mod) expect_predictions(pre) rm(list = ls())marginaleffects/inst/tinytest/test-scope.R0000644000176200001440000000036514541720224020462 0ustar liggesusers# Issue #769 m = glm(am ~ mpg + hp, data = mtcars, family = binomial) get_slopes_at_value = function(m, x) { slopes(m, newdata = datagrid(mpg = x)) } a = get_slopes_at_value(m, .5) b = slopes(m, newdata = datagrid(mpg = .5)) expect_equal(a, b)marginaleffects/inst/tinytest/test-backward.R0000644000176200001440000000000014541720224021111 0ustar liggesusersmarginaleffects/inst/tinytest/test-pkg-fixest.R0000644000176200001440000002307714541720224021437 0ustar liggesuserssource("helpers.R") using("marginaleffects") requiet("fixest") requiet("data.table") fixest::setFixest_nthreads(1) fixest::setFixest_notes(FALSE) # Issue #375: friendly warning when sandwich fails mod <- feols(y ~ x1 + i(period, treat, 5) | id + period, base_did) hyp <- as.numeric(1:10 %in% 6:10) # not supported expect_warning(hypotheses(mod, hypothesis = hyp, vcov = "HC0"), pattern = "sandwich") # supported d <- hypotheses(mod, hypothesis = hyp, vcov = "HC1") expect_inherits(d, "data.frame") # bugs stay dead: logit with transformations dat <- mtcars dat$gear <- as.factor(dat$gear) dat <- dat mod1 <- suppressMessages(feglm(am ~ mpg + mpg^2 | gear, family = binomial(link = "logit"), data = dat, warn = FALSE)) mod2 <- suppressMessages(feglm(am ~ mpg | gear, family = binomial(link = "logit"), data = dat, warn = FALSE)) mod3 <- suppressMessages(feglm(am ~ mpg + mpg^2 | gear, family = binomial(link = "logit"), data = mtcars, warn = FALSE)) mod4 <- suppressMessages(feglm(am ~ mpg | gear, family = binomial(link = "logit"), data = mtcars, warn = FALSE)) #skip_if_not_installed("fixest", minimum_version = "0.10.2") expect_inherits(insight::get_data(mod1), "data.frame") expect_inherits(insight::get_data(mod2), "data.frame") expect_inherits(insight::get_data(mod3), "data.frame") expect_inherits(insight::get_data(mod4), "data.frame") expect_slopes(mod1, pct_na = 62.5) expect_slopes(mod2, pct_na = 62.5) expect_slopes(mod3, pct_na = 62.5) expect_slopes(mod4, pct_na = 62.5) # 20 observations for which we can't compute results mfx <- slopes(mod1, variables = "mpg") expect_inherits(mfx, "marginaleffects") expect_equivalent(nrow(mfx), 12) # fixest::feols vs. Stata requiet("plm") data(EmplUK, package = "plm") stata <- readRDS(testing_path("stata/stata.rds"))$fixest_feols model <- feols(wage ~ capital * output | firm, EmplUK) mfx <- merge(avg_slopes(model), stata) expect_slopes(model) expect_equivalent(mfx$estimate, mfx$estimate) expect_equivalent(mfx$std.error, mfx$std.errorstata, tolerance = .00001) # fixest::fepois vs. Stata requiet("plm") data(EmplUK, package = "plm") stata <- readRDS(testing_path("stata/stata.rds"))$fixest_fepois model <- fepois(log(wage) ~ capital * output | firm, EmplUK) mfx <- merge(tidy(slopes(model, type = "link")), stata) expect_slopes(model) expect_equivalent(mfx$estimate, mfx$estimate, tolerance = .000001) expect_equivalent(mfx$std.error, mfx$std.errorstata, tolerance = .001) # fixest::feols: predictions data(trade, package = "fixest") model <- feols(Euros ~ dist_km | Destination + Origin, data = trade) pred1 <- predictions(model) pred2 <- predictions(model, newdata = head(trade)) expect_predictions(pred1) expect_predictions(pred2, n_row = 6) # numeric cluster variable raises warning fe <- data.frame(unit = 1:25, fe = rnorm(25)) dat <- expand.grid(unit = 1:25, time = 1:50) dat <- merge(dat, fe, by = "unit") dat$x <- rnorm(nrow(dat)) + dat$fe dat$w <- rnorm(nrow(dat)) dat$y <- dat$x + dat$w + dat$x * dat$w + dat$fe + rnorm(nrow(dat), sd = 10) dat <- dat dat2 <- dat dat2$unit <- as.factor(dat2$unit) dat2 <- dat2 mod1 <- feols(y ~ x * w | unit, data = dat) mod2 <- fixest::feols(y ~ x * w | unit, data = dat2) p <- plot_slopes(mod2, variables = "x", condition = "w") expect_inherits(p, "ggplot") # plot_slopes: extracts all required data fe <- data.frame(unit = 1:25, fe = rnorm(25)) dat <- expand.grid(unit = 1:25, time = 1:50) dat <- merge(dat, fe, by = "unit") dat$x <- rnorm(nrow(dat)) + dat$fe dat$w <- rnorm(nrow(dat)) dat$y <- dat$x + dat$w + dat$x * dat$w + dat$fe + rnorm(nrow(dat), sd = 10) dat <- dat mod1 <- fixest::feols(y ~ x * w | unit, data = dat) dat2 <- dat dat2$unit <- as.factor(dat2$unit) dat2 <- dat2 mod2 <- fixest::feols(y ~ x * w | unit, data = dat2) k <- plot_slopes(mod2, variables = "x", condition = "w", draw = FALSE) expect_inherits(k, "data.frame") expect_false(anyNA(k$estimate)) expect_false(any(k$estimate == 0)) # predictions: bugs stay dead: Issue #203 dat <- mtcars dat$factor_am = factor(dat$am) dat <- dat m1 <- feols(mpg ~ hp * am, data = dat) m2 <- feols(mpg ~ hp * factor_am, data = dat) m3 <- feols(mpg ~ hp * wt, data = dat) m4 <- feols(mpg ~ i(am, hp), data = dat) m5 <- feglm(am ~ hp | gear, data = dat) pred1 <- predictions(m1) pred2 <- predictions(m2) pred3 <- predictions(m3) pred4 <- predictions(m4) pred5 <- predictions(m5) expect_predictions(pred1) expect_predictions(pred2) expect_predictions(pred3) expect_predictions(pred4) expect_predictions(pred5, se = FALSE) # vdiffr::expect_doppelganger("fixest plot_predictions with i()", # plot_predictions(m4, condition = c("hp", "am"))) # bug stay dead: insight::get_data doesn't get all columns reg <- feols( Sepal.Width ~ Petal.Length | Species | Sepal.Length ~ Petal.Width, data = iris) mfx1 <- slopes(reg, newdata = iris) mfx2 <- slopes(reg) expect_inherits(mfx1, "marginaleffects") expect_inherits(mfx2, "marginaleffects") # bug stays dead dt <- mtcars dt$cyl <- factor(dt$cyl) fit1 <- suppressMessages(feols(mpg ~ 0 | carb | vs ~ am, data = dt)) fit2 <- suppressMessages(feols(mpg ~ cyl | carb | vs ~ am, data = dt)) fit3 <- suppressMessages(feols(mpg ~ 0 | carb | vs:cyl ~ am:cyl, data = dt)) mfx1 <- slopes(fit1) mfx2 <- slopes(fit2) mfx3 <- slopes(fit3) expect_inherits(mfx1, "marginaleffects") expect_inherits(mfx2, "marginaleffects") expect_inherits(mfx3, "marginaleffects") # Issue #443: `newdata` breaks when it is a `data.table` dat <- data.table(mtcars) m <- feols(mpg ~ cyl * disp, dat) m1 <- slopes(m) m2 <- slopes(m, newdata = datagrid(disp = 0)) expect_inherits(m1, "marginaleffects") expect_inherits(m2, "marginaleffects") m1 <- comparisons(m) m2 <- comparisons(m, newdata = datagrid(disp = 0)) expect_inherits(m1, "comparisons") expect_inherits(m2, "comparisons") # Issue #458: fixest with data table tmp <- data.table(y = rnorm(10), x = rnorm(10)) model <- feols(y ~ x, tmp) m <- slopes(model) expect_inherits(m, "marginaleffects") # Issue #484: i() converts to factors but was treated as numeric m <- feols(Ozone ~ i(Month), airquality) m <- slopes(m) expect_inherits(m, "marginaleffects") # Issue #493 mod <- feols(vs ~ hp * factor(cyl), data = mtcars) cmp <- comparisons( mod, newdata = datagrid(hp = c(80, 100, 120)), by = "hp") expect_equivalent(nrow(cmp), 9) expect_equivalent(nrow(tidy(cmp)), 9) mod <- feglm(vs ~ hp * factor(cyl), data = mtcars, family = "binomial") cmp <- comparisons( mod, newdata = datagrid(hp = c(80, 100, 120)), by = "hp") # Issue #484: fixest::i() parsing mod1 <- feols(mpg ~ drat + i(cyl, i.gear), data = mtcars) mod2 <- feols(mpg ~ drat + i(cyl, gear), data = mtcars) mod3 <- feols(mpg ~ drat + i(cyl), data = mtcars) mod4 <- feols(mpg ~ drat + i(cyl, wt) + i(gear, i.am), data = mtcars) fun <- function(model) { out <- marginaleffects:::get_modeldata(model) out <- marginaleffects:::get_variable_class(out, compare = "categorical") out <- names(out) return(out) } expect_true(all(c("cyl", "gear") %in% fun(mod1))) expect_true("cyl" %in% fun(mod2)) expect_true("cyl" %in% fun(mod3)) expect_true(all(c("am", "cyl", "gear") %in% fun(mod4))) m <- slopes(mod4) expect_inherits(m, "marginaleffects") expect_true(all(c("am", "cyl", "drat", "gear", "wt") %in% m$term)) # Issue #509 dat <- mtcars dat$mpg[1] <- NA dat <- dat mod <- suppressMessages(feglm(am ~ mpg, family = binomial, data = dat)) mfx <- slopes(mod) expect_inherits(mfx, "marginaleffects") expect_equivalent(nrow(mfx), 31) expect_true("mpg" %in% colnames(mfx)) expect_true("am" %in% colnames(mfx)) # Issue #531 mod <- feols(Ozone ~ Wind + i(Month), airquality) mfx <- slopes(mod, variable = "Wind") expect_true(all(mfx$conf.low < mfx$estimate)) expect_true(all(mfx$conf.high > mfx$estimate)) # regression test Issue #232: namespace collision with `rep()` # can't override global binding for `rep()` rep <- data.frame(Y = runif(100) > .5, X = rnorm(100)) mod <- feglm(Y ~ X, data = rep, family = binomial) mfx <- slopes(mod) expect_inherits(mfx, "marginaleffects") # Issue #549 dat <- mtcars dat$mpg[1] <- NA mod <- fepois(hp ~ mpg + am, data = dat) p <- predictions(mod, by = "am") expect_false(anyNA(p$estimate)) expect_false(anyNA(p$std.error)) # Issue #705 data(trade, package = "fixest") mod1 <- fepois(data = trade, Euros ~ 1 | Origin, offset = ~ log(dist_km)) mod2 <- fepois(data = trade, Euros ~ 1 | Origin) mfx1 <- avg_slopes(mod1) mfx2 <- avg_slopes(mod2) expect_inherits(mfx1, "slopes") expect_inherits(mfx2, "slopes") # Issue #727: backtransform predictions mod = fixest::feglm(am ~ hp, data = mtcars, family = binomial) p1 <- avg_predictions(mod) p2 <- avg_predictions(mod, type = "link", transform = mod$family$linkinv) expect_equivalent(p1$estimate, p2$estimate) expect_equivalent(p1$conf.low, p2$conf.low) # Issue #839 mod <- feols(mpg ~ drat | gear, data = mtcars, weights = ~qsec) res <- suppressWarnings(inferences(avg_slopes(mod), method = "boot", R = 20)) expect_inherits(res, "slopes") ## Issue #461 ## commetned out because this seems to be an upstream problem. See issue. # gen_data <- function(rows) { # data <- data.table( # x1 = rnorm(rows), # x2 = rnorm(rows), # group1 = rep(1:5, rows/5), # group2 = rep(1:2, rows/2), # group3 = rep(1:20, rows/20) # ) # data[, y := x1*x2*rnorm(rows, 1, 0.1)] # data[, fe := paste0(group1, group2)] # setDF(data) # return(data) # } # data <- gen_data(50020) # model <- feols(y ~ x1*x2 | group1^group2, data) # nd <- datagrid(model = model) # expect_error(slopes(model, newdata = "mean"), "combined") ## Issue #229: works interactively # data(trade) # dat <- trade # mod <- feNmlm(Euros ~ log(dist_km) | Product, data = dat) # expect_slopes(mod, newdata = dat) # environment issue rm(list = ls()) marginaleffects/inst/tinytest/test-p_adjust.R0000644000176200001440000000146614560035476021176 0ustar liggesuserssource("helpers.R") mod <- lm(mpg ~ qsec + hp * factor(cyl), data = mtcars) pre1 <- avg_predictions(mod, by = "cyl") pre2 <- avg_predictions(mod, by = "cyl", p_adjust = "hochberg") expect_true(any(pre1$p.value < pre2$p.value)) expect_true(all(pre1$p.value <= pre2$p.value)) expect_false("conf.low" %in% colnames(pre2)) cmp1 <- avg_comparisons(mod, variables = list(cyl = "pairwise")) cmp2 <- avg_comparisons(mod, variables = list(cyl = "pairwise"), p_adjust = "hochberg") expect_true(any(cmp1$p.value < cmp2$p.value)) expect_true(all(cmp1$p.value <= cmp2$p.value)) expect_false("conf.low" %in% colnames(cmp2)) mfx1 <- avg_slopes(mod) mfx2 <- avg_slopes(mod, p_adjust = "hochberg") expect_true(any(mfx1$p.value < mfx2$p.value)) expect_true(all(mfx1$p.value <= mfx2$p.value)) expect_false("conf.low" %in% colnames(mfx2)) marginaleffects/inst/tinytest/test-interaction.R0000644000176200001440000000512514541720224021667 0ustar liggesuserssource("helpers.R") using("marginaleffects") requiet("ggplot2") data("diamonds", package = "ggplot2") dat <- diamonds[1:1000, ] dat$cut <- factor(as.character(dat$cut), levels = levels(dat$cut)) dat$color <- factor(as.character(dat$color), levels = levels(dat$color)) dat$clarity <- factor(as.character(dat$clarity), levels = levels(dat$clarity)) mod <- lm(price ~ cut * color + clarity + carat, data = dat) cmp1 <- comparisons(mod, variables = c("cut", "color"), cross = TRUE) cmp2 <- comparisons(mod, variables = "cut") expect_equivalent(nrow(subset(cmp1, rowid == 1)), 24) expect_equivalent(nrow(subset(cmp2, rowid == 1)), 4) n_unique <- nrow(unique(subset(cmp2, rowid == 1, "contrast"))) expect_equivalent(n_unique, 4) mod <- lm(mpg ~ hp * drat, mtcars) dm <- hypotheses(mod, "`hp:drat` = drat") expect_inherits(dm, "hypotheses") expect_equivalent(nrow(dm), 1) ## Issue #684 # . use "~/penguins.dta", clear # . encode species, gen(speciesid) # . qui logit large_penguin c.bill_length_mm##c.flipper_length_mm i.speciesid # . margins, dydx(*) # # Average marginal effects Number of obs = 342 # Model VCE: OIM # # Expression: Pr(large_penguin), predict() # dy/dx wrt: bill_length_mm flipper_length_mm 2.speciesid 3.speciesid # # ----------------------------------------------------------------------------------- # | Delta-method # | dy/dx std. err. z P>|z| [95% conf. interval] # ------------------+---------------------------------------------------------------- # bill_length_mm | .0278588 .0059463 4.69 0.000 .0162043 .0395134 # flipper_length_mm | .0104927 .0023708 4.43 0.000 .005846 .0151394 # | # speciesid | # Chinstrap | -.4127852 .0560029 -7.37 0.000 -.5225488 -.3030216 # Gentoo | .0609265 .1073649 0.57 0.570 -.1495048 .2713578 # ----------------------------------------------------------------------------------- # Note: dy/dx for factor levels is the discrete change from the base level. dat <- read.csv("https://vincentarelbundock.github.io/Rdatasets/csv/palmerpenguins/penguins.csv") dat$large_penguin <- ifelse(dat$body_mass_g > median(dat$body_mass_g, na.rm = TRUE), 1, 0) mod <- glm(large_penguin ~ bill_length_mm * flipper_length_mm + species, data = dat, family = binomial) mfx <- avg_slopes(mod) expect_equivalent(mfx$estimate, c(.0278588, .0104927, -.4127852, .0609265), tol = 1e-4) expect_equivalent(mfx$std.error, c(.0059463, .0023708, .0560029, .1073649), tol = 1e-3) rm(list = ls())marginaleffects/inst/tinytest/test-comparisons-interaction.R0000644000176200001440000000462014541720224024221 0ustar liggesuserssource("helpers.R") using("marginaleffects") requiet("emmeans") # interaction automatic flip from NULL to useful dat <- mtcars dat$gear <- factor(dat$gear) dat$cyl <- factor(dat$cyl) dat <- dat mod1 <- lm(mpg ~ gear + cyl + wt + gear, data = dat) mod2 <- lm(mpg ~ gear * cyl + wt + gear, data = dat) cmp1 <- comparisons(mod1, newdata = datagrid()) cmp2 <- suppressWarnings(comparisons(mod2, newdata = datagrid(), cross = FALSE)) cmp3 <- suppressWarnings(comparisons(mod2, variables = c("cyl", "gear"), newdata = datagrid(), cross = TRUE)) expect_true("contrast" %in% colnames(cmp1)) expect_true("contrast" %in% colnames(cmp2)) expect_true(all(c("contrast_cyl", "contrast_gear") %in% colnames(cmp3))) # variables must be unnamed vector expect_error(comparisons( mod2, variables = c("cyl" = "ratio", "gear" = "difference"), newdata = datagrid()), pattern = "May not have names") # interaction vs. emmeans mod <- lm(mpg ~ factor(am) + factor(cyl) + wt + gear, data = mtcars) cmp <- suppressWarnings(comparisons( mod, variables = list("cyl" = "all", "am" = "all"), newdata = datagrid(), cross = TRUE)) em <- emmeans(mod, c("cyl", "am")) em <- emmeans::contrast(em, method = "revpairwise") em <- data.frame(em) expect_true(all(round(abs(em$estimate), 5) %in% round(abs(cmp$estimate), 5))) expect_true(all(round(abs(em$SE), 4) %in% round(abs(cmp$std.error), 4))) # tidy does not error (no validity) mod <- lm(mpg ~ factor(am) + factor(cyl) + wt + gear, data = mtcars) cmp <- comparisons(mod, variables = c("am", "cyl"), cross = TRUE) tid <- tidy(cmp) expect_true(all(tid$term == "cross")) # `variables` must be specified mod <- lm(mpg ~ factor(am) + factor(cyl) + wt + gear, data = mtcars) cmp <- comparisons(mod, variables = c("am", "cyl"), cross = TRUE) expect_inherits(cmp, "comparisons") expect_error(comparisons(mod, cross = TRUE), pattern = "variables") # interaction (no validity) mod <- lm(mpg ~ factor(am) * factor(cyl) + wt + gear, data = mtcars) # one row only means tidy is same nrows # on some machines I get 21 rows instead of 18, but can't replicate. maybe look into this if I have the energy. Seems minor. cmp <- comparisons( mod, variables = list("cyl" = "all", "am" = "all"), newdata = datagrid(), cross = TRUE) expect_true(nrow(cmp) > 17) expect_true(nrow(tidy(cmp)) > 17) # deprecated argument expect_warning(comparisons(mod, interaction = TRUE)) rm(list = ls())marginaleffects/inst/tinytest/test-plot.R0000644000176200001440000000065214541720224020326 0ustar liggesuserssource("helpers.R") using("marginaleffects") if (!requiet("tinysnapshot")) exit_file("tinysnapshot") using("tinysnapshot") # from marginaleffects objects mod <- glm(am ~ hp + wt, data = mtcars) expect_error(plot(predictions(mod)), pattern = "plot_predictions") expect_error(plot(slopes(mod)), pattern = "plot_slopes") expect_error(plot(comparisons(mod)), pattern = "plot_comparisons") source("helpers.R") rm(list = ls())marginaleffects/inst/tinytest/test-pkg-biglm.R0000644000176200001440000000141014541720224021212 0ustar liggesuserssource("helpers.R") using("marginaleffects") requiet("biglm") N <- 1e4 x1 <- rnorm(N) x2 <- rnorm(N) y <- rbinom(size = 1, n = N, prob = plogis(x1 + x2)) dat <- data.frame(y, x1, x2) big <- bigglm(y ~ x1 + x2, data = dat, family = binomial()) small <- glm(y ~ x1 + x2, data = dat, family = binomial()) # vcov not supported expect_warning(comparisons(big), pattern = "not supported") # dydx supported big_m <- comparisons(big, vcov = FALSE) small_m <- comparisons(small, vcov = FALSE) t1 <- tidy(big_m) t2 <- tidy(small_m) expect_equivalent(t1$estimate, t2$estimate) big_m <- slopes(big, type = "link", vcov = FALSE) small_m <- slopes(small, type = "link", vcov = FALSE) t1 <- tidy(big_m) t2 <- tidy(small_m) expect_equivalent(t1$estimate, t2$estimate) rm(list = ls())marginaleffects/inst/tinytest/test-utils.R0000644000176200001440000000152414541720224020507 0ustar liggesuserssource("helpers.R") using("marginaleffects") # classic input expect_equivalent( marginaleffects:::is_binary(1:10), isTRUE(all(1:10 %in% 0:1)) ) expect_equivalent( marginaleffects:::is_binary(0:1), isTRUE(all(0:1 %in% 0:1)) ) expect_equivalent( marginaleffects:::is_binary(c(0, 0.5, 1)), isTRUE(all(c(0, 0.5, 1) %in% 0:1)) ) # with single values expect_equivalent( marginaleffects:::is_binary(1), isTRUE(all(1 %in% 0:1)) ) expect_equivalent( marginaleffects:::is_binary(2), isTRUE(all(2 %in% 0:1)) ) # with missings / NULL expect_equivalent( marginaleffects:::is_binary(c(0, 0.5, NA, 1)), isTRUE(all(c(0, 0.5, NA, 1) %in% 0:1)) ) expect_equivalent( marginaleffects:::is_binary(NA), isTRUE(all(NA %in% 0:1)) ) expect_equivalent( marginaleffects:::is_binary(NULL), isTRUE(all(NULL %in% 0:1)) ) rm(list = ls()) marginaleffects/inst/tinytest/test-pkg-stats.R0000644000176200001440000001614614560035476021303 0ustar liggesuserssource("helpers.R") using("marginaleffects") requiet("margins") requiet("broom") requiet("emmeans") requiet("poorman") options(marginaleffects_numDeriv = list(method = "simple", method.args = list(eps = 1e-7))) guerry <- read.csv("https://vincentarelbundock.github.io/Rdatasets/csv/HistData/Guerry.csv") # glm: marginaleffects set.seed(1024) N <- 1e2 dat <- data.frame(x1 = rnorm(N), x2 = rnorm(N), x3 = rnorm(N), x4 = rnorm(N), e = rnorm(N)) |> transform(y = plogis(x1 + x2 + x3 + x4 + x4 * x4)) |> transform(y = rbinom(N, 1, y)) mod <- glm(y ~ x1 + x2 + x3 * x4, data = dat, family = binomial) res <- slopes(mod, eps = 1e-7) mar <- margins(mod, unit_ses = TRUE, eps = 1e-7) # TODO: bad tolerance? for (x in c("x1", "x2", "x3", "x4")) { expect_equivalent(as.numeric(res[res$term == x, "estimate"]), as.numeric(mar[[paste0("dydx_", x)]]), tolerance = 3e-2) expect_equivalent(as.numeric(res[res$term == x, "std.error"]), as.numeric(mar[[paste0("SE_dydx_", x)]]), tolerance = 4e-2) } # predictions pre <- predictions(mod) expect_predictions(pre, se = FALSE) # emmeans comparison # type = "response" works at lower tolerance em <- emmeans::emtrends(mod, ~x2, var = "x2", at = list(x1 = 0, x2 = 0, x3 = 0, x4 = 0)) em <- tidy(em) mfx <- slopes(mod, newdata = datagrid(x1 = 0, x2 = 0, x3 = 0, x4 = 0), variable = "x2", type = "link") expect_equivalent(mfx$estimate, em$x2.trend) expect_equivalent(mfx$std.error, em$std.error, tolerance = 1e-5) # glm vs. Stata: marginaleffects stata <- readRDS(testing_path("stata/stata.rds"))[["stats_glm_01"]] dat <- read.csv(testing_path("stata/databases/stats_glm_01.csv")) mod <- glm(y ~ x1 * x2, family = binomial, data = dat) ame <- merge(avg_slopes(mod, eps = 1e-4), stata) expect_equivalent(ame$estimate, ame$dydxstata, tolerance = 1e-4) expect_equivalent(ame$std.error, ame$std.errorstata, tolerance = 1e-4) # lm vs. Stata: marginaleffects stata <- readRDS(testing_path("stata/stata.rds"))[["stats_lm_01"]] dat <- read.csv(testing_path("stata/databases/stats_lm_01.csv")) mod <- lm(y ~ x1 * x2, data = dat) ame <- merge(avg_slopes(mod, eps = 1e-4), stata) expect_equivalent(ame$estimate, ame$dydxstata, tolerance = 1e-4) expect_equivalent(ame$std.error, ame$std.errorstata, tolerance = 1e-4) # lm with interactions vs. margins vs. emmeans: marginaleffects counterfactuals <- expand.grid(hp = 100, am = 0:1) mod <- lm(mpg ~ hp * am, data = mtcars) res <- slopes(mod, variable = "hp", newdata = counterfactuals) mar <- margins(mod, variable = "hp", data = counterfactuals, unit_ses = TRUE) expect_true(expect_margins(res, mar, tolerance = 1e-3)) # emmeans void <- capture.output({ em1 <- suppressMessages(emmeans::emtrends(mod, ~hp, var = "hp", at = list(hp = 100, am = 0))) em2 <- suppressMessages(emmeans::emtrends(mod, ~hp, var = "hp", at = list(hp = 100, am = 1))) em1 <- tidy(em1) em2 <- tidy(em2) }) res <- slopes(mod, variable = "hp", newdata = counterfactuals) expect_equivalent(res$estimate[1], em1$hp.trend) expect_equivalent(res$std.error[1], em1$std.error, tolerance = .001) expect_equivalent(res$estimate[2], em2$hp.trend) expect_equivalent(res$std.error[2], em2$std.error, tolerance = .001) # lm vs. emmeans: marginalmeans dat <- mtcars dat$cyl <- as.factor(dat$cyl) dat$am <- as.logical(dat$am) mod <- lm(mpg ~ hp + cyl + am, data = dat) mm <- predictions(mod, by = "cyl", newdata = datagrid(grid_type = "balanced")) |> dplyr::arrange(cyl) em <- broom::tidy(emmeans::emmeans(mod, specs = "cyl")) expect_equivalent(mm$estimate, em$estimate) expect_equivalent(mm$std.error, em$std.error, tolerance = 1e-6) mm <- predictions(mod, by = "am", newdata = datagrid(grid_type = "balanced")) |> dplyr::arrange(am) em <- broom::tidy(emmeans::emmeans(mod, specs = "am")) expect_equivalent(mm$estimate, em$estimate) expect_equivalent(mm$std.error, em$std.error, tolerance = 1e-5) # factors seem to behave differently in model.matrix #skip_if_not_installed("emmeans", minimum_version = "1.7.3") dat <- guerry dat$binary <- dat$Crime_prop > median(dat$Crime_prop) # character variables sometimes break the order mod <- glm(binary ~ Region + MainCity + Commerce, data = dat, family = "binomial") # factor variables are safer dat$Region <- as.factor(dat$Region) dat$MainCity <- as.factor(dat$MainCity) mod <- glm(binary ~ Region + MainCity + Commerce, data = dat, family = "binomial") mm <- predictions(mod, type = "link", by = "Region", newdata = datagrid(grid_type = "balanced")) |> dplyr::arrange(Region) em <- data.frame(emmeans::emmeans(mod, specs = "Region")) expect_equivalent(as.character(mm$Region), as.character(em$Region)) expect_equivalent(mm$estimate, em$emmean, tol = 0.05) # not sure why tolerance is not good expect_equivalent(mm$std.error, em$SE, tol = 0.001) mm <- predictions(mod, type = "link", newdata = datagrid(grid_type = "balanced"), by = "MainCity") |> dplyr::arrange(MainCity) em <- data.frame(emmeans::emmeans(mod, specs = "MainCity", type = "link")) expect_equivalent(as.character(mm$MainCity), as.character(em$MainCity)) expect_equivalent(mm$estimate, em$emmean, tol = 0.01) # not sure why tolerance is not good expect_equivalent(mm$std.error, em$SE, tol = 0.001) mm <- predictions(mod, type = "link", by = "MainCity", newdata = datagrid(grid_type = "balanced"), transform = plogis) |> dplyr::arrange(MainCity) em <- data.frame(emmeans(mod, specs = "MainCity", type = "response")) expect_equivalent(as.character(mm$MainCity), as.character(em$MainCity)) expect_equivalent(mm$estimate, em$prob, tolerance = .01) expect_equivalent(mm$conf.low, em$asymp.LCL, tolerance = .01) expect_equivalent(mm$conf.high, em$asymp.UCL, tolerance = .01) ################################################### # note sure if stats::loess should be supported # ################################################### # vcov(loess) does not exist mod <- loess(mpg ~ wt, data = mtcars) expect_warning(slopes(mod), pattern = "Unable") # loess vs. margins mod <- loess(mpg ~ wt, data = mtcars) res <- slopes(mod, vcov = FALSE, newdata = head(mtcars))$estimate mar <- data.frame(margins(mod, data = head(mtcars)))$dydx_wt expect_equivalent(as.numeric(res), as.numeric(mar), tolerance = 1e-3) # loess predictions mod <- loess(mpg ~ wt, data = mtcars) expect_warning(predictions(mod)) pred <- predictions(mod, vcov = FALSE) expect_predictions(pred, se = FALSE) # Issue #548: mlm support mod <- lm(cbind(mpg, cyl) ~ disp + am, data = mtcars) tid <- avg_slopes(mod) expect_inherits(tid, "marginaleffects") expect_equivalent(nrow(tid), 4) # Issue #547: standardize column order mod <- lm(cbind(mpg, cyl) ~ disp + am, data = mtcars) expect_equivalent(colnames(get_predict(mod)), c("rowid", "group", "estimate")) mod <- lm(mpg ~ disp + am, data = mtcars) expect_equivalent(colnames(get_predict(mod)), c("rowid", "estimate")) # Issue #833: Support nls() no validity DNase1 <- subset(datasets::DNase, Run == 1) mod <- nls(density ~ SSlogis(log(conc), Asym, xmid, scal), DNase1) cmp <- avg_comparisons(mod, variables = "conc") expect_inherits(cmp, "comparisons") expect_false(any(is.na(cmp$estimate))) expect_false(any(is.na(cmp$std.error))) source("helpers.R") rm(list = ls())marginaleffects/inst/tinytest/test-gformula.R0000644000176200001440000000207314541720224021163 0ustar liggesuserssource("helpers.R") using("marginaleffects") set.seed(1) n <- 10 group_id <- c(rep("A", n / 2), rep("B", n / 2)) x <- runif(n, 0, 10) xA <- x[group_id == "A"] xB <- x[group_id == "B"] yA <- 2 + 2 * xA + rnorm(n / 2, 0, 0.1) yB <- -1 + 3 * xB + rnorm(n / 2, 0, 0.1) simdat <- data.frame(group_id = group_id, x = c(xA, xB), y = c(yA, yB)) simdat$group_id <- as.factor(simdat$group_id) tmp <- simdat model_additive <- lm(y ~ x + group_id, data = tmp) model_interaction <- lm(y ~ x * group_id, data = tmp) simdat_doA <- simdat_doB <- tmp simdat_doA$group_id <- "A" simdat_doB$group_id <- "B" g1 <- mean(predict(model_additive, newdata = simdat_doB)) - mean(predict(model_additive, newdata = simdat_doA)) g2 <- mean(predict(model_interaction, newdata = simdat_doB)) - mean(predict(model_interaction, newdata = simdat_doA)) c1 <- avg_comparisons(model_additive, variable = "group_id", newdata = tmp) c2 <- avg_comparisons(model_interaction, variable = "group_id", newdata = tmp) expect_equivalent(g1, c1$estimate) expect_equivalent(g2, c2$estimate) rm(list = ls())marginaleffects/inst/tinytest/test-typical.R0000644000176200001440000000341614541720224021016 0ustar liggesuserssource("helpers.R") using("marginaleffects") # datagrid(x = NA) # numeric nd <- datagrid(newdata = mtcars, mpg = NA, hp = 1:4) expect_equivalent(nrow(nd), 4) expect_true(all(is.na(nd$mpg))) # factor tmp <- mtcars tmp$gear <- factor(tmp$gear) nd <- datagrid(newdata = tmp, gear = NA, hp = 1:4) expect_equivalent(nrow(nd), 4) expect_true(all(is.na(nd$gear))) # unique values tmp <- mtcars tmp$am <- as.logical(tmp$am) mod_int <- lm(mpg ~ am * factor(cyl), tmp) mfx <- slopes(mod_int, newdata = datagrid(cyl = unique), variables = "am") expect_equivalent(nrow(mfx), 3) # typical FUN_* tmp <- mtcars tmp$am <- as.logical(tmp$am) tmp$cyl <- as.factor(tmp$cyl) tmp$gear <- as.character(tmp$gear) typ <- datagrid( newdata = tmp, FUN_character = max, FUN_factor = function(x) sort(x)[1], FUN_numeric = stats::median) expect_equivalent(typ$drat, stats::median(mtcars$drat)) expect_equivalent(typ$cyl, factor("4", levels = c("4", "6", "8"))) expect_equivalent(typ$gear, "5") # all manual mod <- lm(hp ~ mpg, mtcars) nd <- datagrid(model = mod, mpg = 110) expect_inherits(nd, "data.frame") expect_equivalent(nrow(nd), 1) # bugs stay dead: FUN_logical tmp <- mtcars tmp$am <- as.logical(tmp$am) mod <- lm(mpg ~ am * factor(cyl), data = tmp) mfx <- slopes(mod, newdata = datagrid(cyl = unique), variables = "am") expect_inherits(mfx, "marginaleffects") expect_equivalent(nrow(mfx), 3) # errors and warnings dat <- mtcars dat$cyl <- factor(dat$cyl) dat <- dat mod <- lm(hp ~ mpg, dat) expect_error(datagrid(), pattern = "are both .NULL") mod <- lm(hp ~ factor(cyl), dat) expect_inherits(datagrid(model = mod, cyl = "4"), "data.frame") expect_error(datagrid(model = mod, cyl = "2"), pattern = "must be one of the factor levels") rm(list = ls())marginaleffects/inst/tinytest/test-marginaleffects.R0000644000176200001440000000104514541720224022477 0ustar liggesuserssource("helpers.R") using("marginaleffects") # marginal effects at the mean mod <- glm(am ~ hp + mpg, data = mtcars, family = binomial) mfx1 <- slopes(mod, newdata = datagrid()) mfx2 <- slopes(mod, newdata = "mean") expect_equivalent(mfx1, mfx2) # unsupported arguments mod <- glm(am ~ hp + mpg, data = mtcars, family = binomial) expect_error(slopes(mod, comparison = mean), pattern = "supported") expect_error(slopes(mod, transform = exp), pattern = "supported") expect_error(slopes(mod, cross = TRUE), pattern = "supported") rm(list = ls())marginaleffects/inst/tinytest/test-pkg-pscl.R0000644000176200001440000000747214560035476021110 0ustar liggesuserssource("helpers.R") using("marginaleffects") requiet("pscl") requiet("emmeans") requiet("broom") requiet("margins") tol <- 0.0001 tol_se <- 0.001 ### marginaleffects # hurdle: set_coef data("bioChemists", package = "pscl") mod1 <- hurdle(art ~ phd + fem | ment, data = bioChemists, dist = "negbin") beta <- stats::setNames(rep(0, length(coef(mod1))), names(coef(mod1))) mod2 <- set_coef(mod1, beta) expect_true(all(coef(mod1) != coef(mod2))) # hurdle: marginaleffects vs margins vs emtrends data("bioChemists", package = "pscl") model <- hurdle(art ~ phd + fem | ment, data = bioChemists, dist = "negbin") mfx1 <- avg_slopes(model, type = "response") mfx2 <- avg_slopes(model, type = "zero") expect_false(any(mfx1$estimate == 0)) expect_false(any(mfx2$estimate == 0)) expect_false(any(mfx1$std.error == 0)) expect_false(any(mfx2$std.error == 0)) expect_inherits(mfx1, "data.frame") expect_inherits(mfx2, "data.frame") # emtrends em <- emtrends(model, ~phd, "phd", at = list(fem = "Men", phd = 2), df = Inf) em <- tidy(em) mfx <- slopes(model, newdata = datagrid(fem = "Men", phd = 2), variables = "phd") expect_equivalent(mfx$estimate, em$phd.trend, tolerance = .01) # standard errors do not match # expect_equivalent(mfx$std.error, em$std.error) # margins: standard errors are not supported (all zeros) res <- slopes(model, newdata = head(bioChemists, 2)) mar <- margins(model, data = head(bioChemists, 2), unit_ses = TRUE) expect_equivalent(res$estimate[res$term == "phd"], as.numeric(mar$dydx_phd), tolerance = .0001) expect_equivalent(res$estimate[res$term == "fem"], as.numeric(mar$dydx_femWomen), tolerance = .00001) # bugs stay dead: hurdle with multi-level regressor data("bioChemists", package = "pscl") tmp <- bioChemists tmp$fem <- as.character(tmp$fem) tmp$fem[sample(1:nrow(tmp), 300)] <- "Other" tmp$fem <- as.factor(tmp$fem) model <- hurdle(art ~ phd + fem | ment, data = tmp, dist = "negbin") expect_slopes(model) # marginaleffects: zeroinfl vs. Stata vs. emtrends data("bioChemists", package = "pscl") model <- zeroinfl(art ~ kid5 + phd | ment, dist = "negbin", data = bioChemists) # stata stata <- readRDS(testing_path("stata/stata.rds"))$pscl_zeroinfl_01 mfx <- merge(avg_slopes(model), stata) expect_slopes(model) expect_equivalent(mfx$estimate, mfx$dydxstata, tolerance = 1e-3) expect_equivalent(mfx$std.error, mfx$std.errorstata, tolerance = tol_se) # emtrends mfx <- slopes(model, variables = "phd", newdata = datagrid(kid5 = 2, ment = 7, phd = 2)) em <- emtrends(model, ~phd, "phd", at = list(kid5 = 2, ment = 7, phd = 2)) em <- tidy(em) expect_equivalent(mfx$estimate, em$phd.trend, tolerance = .0001) expect_equivalent(mfx$std.error, em$std.error, tolerance = .01) # margins: does not support standard errors (all zeros) mar <- margins(model, data = head(bioChemists), unit_ses = TRUE) mfx <- avg_slopes(model, variables = c("kid5", "phd", "ment"), newdata = head(bioChemists)) expect_equivalent(sort(summary(mar)$AME), sort(mfx$estimate), tolerance = 1e-3) ### predictions # marginaleffects: zeroinfl: no validity data("bioChemists", package = "pscl") model <- zeroinfl(art ~ kid5 + phd | ment, dist = "negbin", data = bioChemists) pred1 <- predictions(model) pred2 <- predictions(model, newdata = head(bioChemists)) expect_predictions(pred1) expect_predictions(pred2, n_row = 6) ### marginalmeans # zeroinfl: marginalmeans vs. emmeans data("bioChemists", package = "pscl") model <- zeroinfl(art ~ kid5 + phd + mar | ment, dist = "negbin", data = bioChemists) # response mm <- predictions(model, by = "mar", newdata = datagrid(grid_type = "balanced")) |> dplyr::arrange(mar) em <- tidy(emmeans(model, specs = "mar", df = Inf)) expect_equivalent(mm$estimate, em$estimate, tol = 0.01) expect_equivalent(mm$std.error, em$std.error, tolerance = .01) rm(list = ls())marginaleffects/inst/tinytest/test-analytic.R0000644000176200001440000002047414541720224021160 0ustar liggesuserssource("helpers.R") using("marginaleffects") # lm quadratic set.seed(1027) f <- y ~ x + I(x^2) truth <- function(x) 1 + 2 * x N <- 100000 dat <- data.frame(x = rnorm(N)) dat$y <- 1 + 1 * dat$x + 1 * dat$x^2 + rnorm(N) mod <- lm(f, dat) nd <- datagrid(newdata = dat, x = c(-2:2)) res <- slopes(mod, newdata = nd) res$truth <- truth(res$x) expect_equivalent(res$estimate, res$truth, tolerance = .01) # lm log set.seed(30) f <- y ~ log(x) truth <- function(x) 1 / x N <- 10000 dat <- data.frame(x = runif(N)) dat$y <- log(dat$x) + rnorm(N) mod <- lm(f, dat) nd <- datagrid(newdata = dat, x = c(1:4)) res <- slopes(mod, newdata = nd) res$truth <- truth(res$x) expect_equivalent(res$estimate, res$truth, tolerance = .01) # logit set.seed(2000) f <- y ~ x beta0 <- 1 beta1 <- .2 truth <- function(x) beta1 * dlogis(beta0 + beta1 * x) N <- 1e5 dat <- data.frame(x = rnorm(N, sd = 3)) dat$y <- rbinom(N, 1, pr = plogis(beta0 + beta1 * dat$x)) mod <- glm(f, data = dat, family = binomial) nd <- datagrid(newdata = dat, x = c(-10:10)) res <- slopes(mod, newdata = nd) res$truth <- truth(res$x) expect_equivalent(res$estimate, res$truth, tolerance = .01) ############################################################################ # golder tests copied from the `margins` github repository on 2021-09-18 # # LICENSE: MIT Thomas J. Leeper # ############################################################################ # tests based on formulae from Matt Golder's OLS examples, for numerical accuracy and precision # example data for tests set.seed(1) n <- 25L d <- data.frame(w = rnorm(n), x = rnorm(n), z = rnorm(n)) d[["y"]] <- with(d, w + x + z + w*x + w*z + x*z * w*x*z + rnorm(n)) # set comparison tolerance tol <- 0.001 tol_se <- 0.005 # http://mattgolder.com/wp-content/uploads/2015/05/standarderrors1.png # Golder Interaction Case 1a/1b correct f1.1 <- y ~ x + z + x:z m <- lm(f1.1, data = d) marg <- slopes(m) # ME with respect to x dydx <- coef(m)["x"] + (d$z * coef(m)["x:z"]) sedydx <- sqrt(vcov(m)["x","x"] + (d$z^2 * vcov(m)["x:z","x:z"]) + (2 * d$z * vcov(m)["x","x:z"])) expect_equivalent(as.numeric(marg$estimate[marg$term == "x"]), dydx, tolerance = tol, label = "dy/dx correct") expect_equivalent(sedydx, as.numeric(marg$std.error[marg$term == "x"]), tolerance = tol_se, label = "Var(dy/dx) correct") # ME with respect to z dydz <- coef(m)["z"] + (d$x * coef(m)["x:z"]) sedydz <- sqrt(vcov(m)["z","z"] + (d$x^2 * vcov(m)["x:z","x:z"]) + (2 * d$x * vcov(m)["z","x:z"])) expect_equivalent(as.numeric(marg$estimate[marg$term == "z"]), dydz, tolerance = tol, label = "dy/dz correct") expect_equivalent(sedydz, as.numeric(marg$std.error[marg$term == "z"]), tolerance = tol_se, label = "Var(dy/dz) correct") # Golder Interaction Case 2 correct f1.2 <- y ~ x + z + w + x:z + z:w m <- lm(f1.2, data = d) marg <- slopes(m) dydx <- coef(m)["x"] + (d$z * coef(m)["x:z"]) sedydx <- sqrt(vcov(m)["x","x"] + (d$z^2 * vcov(m)["x:z","x:z"]) + (2 * d$z * vcov(m)["x","x:z"])) expect_equivalent(as.numeric(marg$estimate[marg$term == "x"]), dydx, tolerance = tol, label = "dy/dx correct") expect_equivalent(sedydx, as.numeric(marg$std.error[marg$term == "x"]), tolerance = tol_se, label = "Var(dy/dx) correct") # Golder Interaction Case 3 correct f1.3 <- y ~ x + z + w + x:z + x:w + z:w m <- lm(f1.3, data = d) marg <- slopes(m) dydx <- coef(m)["x"] + (d$z * coef(m)["x:z"]) + (d$w * coef(m)["x:w"]) sedydx <- sqrt(vcov(m)["x","x"] + (d$z^2 * vcov(m)["x:z","x:z"]) + (d$w^2 * vcov(m)["x:w","x:w"]) + (2 * d$z * vcov(m)["x","x:z"]) + (2 * d$w * vcov(m)["x","x:w"]) + (2 * d$z * d$w * vcov(m)["x:z","x:w"]) ) expect_equivalent(as.numeric(marg$estimate[marg$term == "x"]), dydx, tolerance = tol, label = "dy/dx correct") expect_equivalent(sedydx, as.numeric(marg$std.error[marg$term == "x"]), tolerance = tol_se, label = "Var(dy/dx) correct") # Golder Interaction Case 4 correct f1.4 <- y ~ x + z + w + x:z + x:w + z:w + x:z:w m <- lm(f1.4, data = d) marg <- slopes(m) dydx <- coef(m)["x"] + (d$z * coef(m)["x:z"]) + (d$w * coef(m)["x:w"]) + (d$z * d$w * coef(m)["x:z:w"]) sedydx <- sqrt(vcov(m)["x","x"] + (d$z^2 * vcov(m)["x:z","x:z"]) + (d$w^2 * vcov(m)["x:w","x:w"]) + (d$z^2 * d$w^2 * vcov(m)["x:z:w","x:z:w"]) + (2 * d$z * vcov(m)["x","x:z"]) + (2 * d$w * vcov(m)["x","x:w"]) + (2 * d$z * d$w * vcov(m)["x","x:z:w"]) + (2 * d$z * d$w * vcov(m)["x:z","x:w"]) + (2 * d$w * d$z^2 * vcov(m)["x:z","x:z:w"]) + (2 * d$z * d$w^2 * vcov(m)["x:w","x:z:w"]) ) expect_equivalent(as.numeric(marg$estimate[marg$term == "x"]), dydx, tolerance = tol, label = "dy/dx correct") expect_equivalent(sedydx, as.numeric(marg$std.error[marg$term == "x"]), tolerance = tol_se, label = "Var(dy/dx) correct") # Golder Quadratic Case 1 correct f2.1 <- y ~ x + I(x^2) m <- lm(f2.1, data = d) marg <- slopes(m) dydx <- coef(m)["x"] + (2 * coef(m)["I(x^2)"] * d$x) sedydx <- sqrt(vcov(m)["x","x"] + (4 * d$x^2 * vcov(m)["I(x^2)","I(x^2)"]) + (4 * d$x * vcov(m)["x","I(x^2)"])) expect_equivalent(as.numeric(marg$estimate[marg$term == "x"]), dydx, tolerance = tol, label = "dy/dx correct") expect_equivalent(sedydx, as.numeric(marg$std.error[marg$term == "x"]), tolerance = tol_se, label = "Var(dy/dx) correct") # Golder Quadratic Case 2 correct f2.2 <- y ~ x + I(x^2) + z m <- lm(f2.2, data = d) marg <- slopes(m) dydx <- coef(m)["x"] + (2 * coef(m)["I(x^2)"] * d$x) sedydx <- sqrt(vcov(m)["x","x"] + (4 * d$x^2 * vcov(m)["I(x^2)","I(x^2)"]) + (4 * d$x * vcov(m)["x","I(x^2)"])) expect_equivalent(as.numeric(marg$estimate[marg$term == "x"]), dydx, tolerance = tol, label = "dy/dx correct") expect_equivalent(sedydx, as.numeric(marg$std.error[marg$term == "x"]), tolerance = tol_se, label = "Var(dy/dx) correct") # Golder Quadratic Case 3a/3b correct f2.3 <- y ~ x + I(x^2) + z + x:z m <- lm(f2.3, data = d) marg <- slopes(m) # ME with respect to x dydx <- coef(m)["x"] + (2 * coef(m)["I(x^2)"] * d$x) + (d$z * coef(m)["x:z"]) sedydx <- sqrt(vcov(m)["x","x"] + (4 * d$x^2 * vcov(m)["I(x^2)","I(x^2)"]) + (d$z^2 * vcov(m)["x:z","x:z"]) + (4 * d$x * vcov(m)["x","I(x^2)"]) + (2 * d$z * vcov(m)["x","x:z"]) + (4 * d$x * d$z * vcov(m)["I(x^2)", "x:z"]) ) expect_equivalent(as.numeric(marg$estimate[marg$term == "x"]), dydx, tolerance = tol, label = "dy/dx correct") expect_equivalent(sedydx, as.numeric(marg$std.error[marg$term == "x"]), tolerance = tol_se, label = "Var(dy/dx) correct") # ME with respect to z dydz <- coef(m)["z"] + (d$x * coef(m)["x:z"]) sedydz <- sqrt(vcov(m)["z","z"] + (d$x^2 * vcov(m)["x:z","x:z"]) + (2 * d$x * vcov(m)["z","x:z"])) expect_equivalent(as.numeric(marg$estimate[marg$term == "z"]), dydz, tolerance = tol, label = "dy/dz correct") expect_equivalent(sedydz, as.numeric(marg$std.error[marg$term == "z"]), tolerance = tol_se, label = "Var(dy/dz) correct") # Golder Quadratic Case 4a/4b correct f2.4 <- y ~ x + I(x^2) + z + x:z + I(x^2):z m <- lm(f2.4, data = d) marg <- slopes(m) # ME with respect to x dydx <- coef(m)["x"] + (2 * coef(m)["I(x^2)"] * d$x) + (d$z * coef(m)["x:z"]) + (2 * d$x * d$z * coef(m)["I(x^2):z"]) sedydx <- sqrt( vcov(m)["x","x"] + (4 * d$x^2 * vcov(m)["I(x^2)","I(x^2)"]) + (d$z^2 * vcov(m)["x:z","x:z"]) + (4 * (d$x^2) * (d$z^2) * vcov(m)["I(x^2):z","I(x^2):z"]) + (4 * d$x * vcov(m)["x","I(x^2)"]) + (2 * d$z * vcov(m)["x","x:z"]) + (4 * d$x * d$z * vcov(m)["I(x^2)", "x:z"]) + (4 * d$x * d$z * vcov(m)["x","I(x^2):z"]) + (8 * (d$x^2) * d$z * vcov(m)["I(x^2)","I(x^2):z"]) + (4 * d$x * (d$z^2) * vcov(m)["x:z","I(x^2):z"]) ) expect_equivalent(as.numeric(marg$estimate[marg$term == "x"]), dydx, tolerance = tol, label = "dy/dx correct") expect_equivalent(sedydx, as.numeric(marg$std.error[marg$term == "x"]), tolerance = tol_se, label = "Var(dy/dx) correct") # ME with respect to z dydz <- coef(m)["z"] + (d$x * coef(m)["x:z"]) + (d$x^2 * coef(m)["I(x^2):z"]) sedydz <- sqrt(vcov(m)["z","z"] + (d$x^2 * vcov(m)["x:z","x:z"]) + (d$x^4 * vcov(m)["I(x^2):z","I(x^2):z"]) + (2 * d$x * vcov(m)["z","x:z"]) + (2 * (d$x^2) * vcov(m)["z","I(x^2):z"]) + (2 * (d$x^3) * vcov(m)["x:z","I(x^2):z"]) ) expect_equivalent(as.numeric(marg$estimate[marg$term == "z"]), dydz, tolerance = tol, label = "dy/dz correct") expect_equivalent(sedydz, as.numeric(marg$std.error[marg$term == "z"]), tolerance = tol_se, label = "Var(dy/dz) correct") rm(list = ls())marginaleffects/inst/tinytest/test-bugfix.R0000644000176200001440000001123014560035476020637 0ustar liggesuserssource("helpers.R") using("marginaleffects") # Bug stay dead: Issue 55 # Error: Argument 1 must have names. # vab: possibly caused by a version of `emmeans` < 1.6.3 dat <- read.csv("https://vincentarelbundock.github.io/Rdatasets/csv/palmerpenguins/penguins.csv") dat$large_penguin <- ifelse(dat$body_mass_g > median(dat$body_mass_g, na.rm = TRUE), 1, 0) mod <- glm(large_penguin ~ bill_length_mm + flipper_length_mm + species, data = dat, family = binomial) mfx <- slopes(mod, variables = "species") expect_inherits(mfx, "data.frame") expect_true(nrow(mfx) > 0) expect_true(ncol(mfx) > 0) # Hernan & Robins replication: bug would not detect `as.factor()` in formula() nhefs <- read.csv("https://raw.githubusercontent.com/vincentarelbundock/modelarchive/main/data-raw/nhefs.csv") f <- wt82_71 ~ qsmk + sex + race + age + I(age*age) + factor(education) + smokeintensity + I(smokeintensity*smokeintensity) + smokeyrs + I(smokeyrs*smokeyrs) + as.factor(exercise) + as.factor(active) + wt71 + I(wt71*wt71) + I(qsmk*smokeintensity) fit <- glm(f, data = nhefs) pre <- predictions(fit, newdata = nhefs) mfx <- slopes(fit, newdata = nhefs) cmp <- comparisons(fit, newdata = nhefs) expect_inherits(pre, "predictions") expect_inherits(cmp, "comparisons") expect_inherits(mfx, "marginaleffects") # Issue 372: reserved variable names dat <- mtcars dat$group <- dat$am mod <- lm(mpg ~ group, data = dat) expect_error(comparisons(mod), pattern = "forbidden") mod <- lm(mpg ~ group + hp, data = dat) expect_error(comparisons(mod), pattern = "forbidden") # Issue #556 set.seed(12345) n = 500 x = sample(1:3, n, replace = TRUE) y = rnorm(n) z = ifelse(x + y + rlogis(n) > 1.5, 1, 0) dat = data.frame(x = factor(x), y = y, z = z) dat <- dat m1 = glm(z ~ x + y, family = binomial, data = dat) nd <- datagrid(model = m1, y = seq(-2.5, 2.5, by = 0.25)) p1 <- predictions(m1, newdata = nd, type = "link") p2 <- as.data.frame(predict(m1, newdata = nd, se.fit = TRUE)) expect_equal(p1$estimate, p2$fit) expect_equal(p1$std.error, p2$se.fit) set.seed(12345) n = 60 x = sample(1:3, n, replace = TRUE) z = ifelse(x + rlogis(n) > 1.5, 1, 0) dat = data.frame(x = factor(x), z = z) dat <- dat m2 = glm(z ~ I(x==2) + I(x==3), family = binomial, data = dat) p1 <- predictions(m2, type = "link") p2 <- predictions(m2, newdata = dat, type = "link") p3 <- as.data.frame(predict(m2, se.fit = TRUE, type = "link")) # exit_file("works locally") expect_equal(p1$estimate, p3$fit) expect_equal(p1$std.error, p3$se.fit) expect_equal(p2$estimate, p3$fit) expect_equal(p2$std.error, p3$se.fit) # Issue #671 dta <- data.frame( lab = sample(0:1, size = 1000, replace = T), age_group = sample(c("old", "young"), size = 1000, replace = TRUE)) mod <- lm(lab ~ age_group, dta) mfx <- avg_slopes(mod) expect_equivalent(nrow(mfx), 1) expect_true("young - old" %in% mfx$contrast) # Issue #697 dat <- data.frame( outcome = rbinom(n = 100, size = 1, prob = 0.35), var_binom = as.factor(rbinom(n = 100, size = 1, prob = 0.2)), var_cont = rnorm(n = 100, mean = 10, sd = 7), group = sample(letters[1:4], size = 100, replace = TRUE), groups = sample(letters[1:4], size = 100, replace = TRUE)) m1 <- glm( outcome ~ var_binom + var_cont + group, data = dat, family = binomial()) expect_error(avg_slopes(m1), pattern = "forbidden") expect_error(avg_slopes(m1, variables = "var_cont"), pattern = "forbidden") m2 <- glm( outcome ~ var_binom + var_cont + groups, data = dat, family = binomial()) expect_inherits(avg_slopes(m2), "slopes") expect_inherits(avg_slopes(m2, variables = "var_cont"), "slopes") # Issue #723 dat <- data.frame( rbind( c(10., 'A', 'AU'), c(20., 'A', 'AU'), c(30., 'A', 'AU'), c(20., 'B', 'AU'), c(30., 'B', 'AU'), c(40., 'B', 'AU'), c(10., 'B', 'NZ'), c(20., 'B', 'NZ'), c(30., 'B', 'NZ'), c(20., 'A', 'NZ'), c(30., 'A', 'NZ'), c(40., 'A', 'NZ') ) ) colnames(dat) <- c('y', 'treatment', 'country') mod <- lm(y ~ treatment * country, dat) cmp <- comparisons(mod, variables = 'treatment', by = 'country') expect_inherits(cmp, "comparisons") expect_equivalent(nrow(cmp), 2) expect_equivalent( cmp$estimate[cmp$country == "AU"], coef(mod)["treatmentB"]) expect_equivalent( cmp$estimate[cmp$country == "NZ"], coef(mod)["treatmentB"] + coef(mod)["treatmentB:countryNZ"]) # Issue #1005 d1 <- d2 <- mtcars d2[["horse power"]] <- d2$hp m1 <- lm(mpg ~ hp, data = d1) m2 <- lm(mpg ~ `horse power`, data = d2) p1 <- plot_predictions(m1, condition = 'hp', draw = FALSE) p2 <- plot_predictions(m2, condition = 'horse power', draw = FALSE) expect_equivalent(p1$estimate, p2$estimate) expect_equivalent(p1$std.error, p2$std.error) source("helpers.R") rm(list = ls()) marginaleffects/inst/tinytest/test-pkg-sampleSelection.R0000644000176200001440000000221514541720224023253 0ustar liggesuserssource("helpers.R") using("marginaleffects") requiet("sampleSelection") dat <- read.csv("https://vincentarelbundock.github.io/Rdatasets/csv/sampleSelection/Mroz87.csv") dat$kids <- dat$kids5 + dat$kids618 > 0 dat <<- dat # heckit: se not supported yet mod <- heckit(lfp ~ age + I( age^2 ) + faminc + kids + educ, wage ~ exper + I( exper^2 ) + educ + city, data = dat) mfx <- slopes(mod) expect_inherits(mfx, "marginaleffects") mfx <- slopes(mod, part = "selection", type = "link") expect_inherits(mfx, "marginaleffects") mfx <- slopes(mod, part = "outcome", type = "unconditional") expect_inherits(mfx, "marginaleffects") expect_true(all(is.na(mfx$std.error))) # selection: no validity mod <- selection(lfp ~ age + I( age^2 ) + faminc + kids + educ, wage ~ exper + I( exper^2 ) + educ + city, data = dat) mfx <- slopes(mod) expect_inherits(mfx, "marginaleffects") mfx <- slopes(mod, part = "selection", type = "link") expect_inherits(mfx, "marginaleffects") mfx <- slopes(mod, part = "outcome", type = "unconditional") expect_inherits(mfx, "marginaleffects") source("helpers.R") rm(list = ls())marginaleffects/inst/tinytest/test-pkg-rstanarm.R0000644000176200001440000000276514541720224021765 0ustar liggesusers# HPD tests against emmeans, which uses HDI, but our default is ETI # HDI is implemented specifically for these tests # https://github.com/vincentarelbundock/marginaleffects/issues/240 source("helpers.R") using("marginaleffects") options("marginaleffects_posterior_interval" = "hdi") requiet("rstanarm") requiet("emmeans") requiet("margins") requiet("broom") if (!require("rstanarm")) exit_file("rstanarm") # after requiet to avoid messages # interactions void <- capture.output( mod <- stan_glm(am ~ hp + mpg * vs, data = mtcars, family = binomial(link = "logit")) ) expect_slopes(mod, se = FALSE) expect_predictions(predictions(mod), se = FALSE) # no interactions void <- capture.output( mod <- stan_glm(am ~ hp + mpg + vs, data = mtcars, family = binomial(link = "logit")) ) # emtrends mfx <- slopes(mod, newdata = datagrid(hp = 110, mpg = 20, vs = 0), variables = "hp", type = "link") em <- emtrends(mod, ~hp, "hp", at = list(hp = 110, mpg = 20, vs = 0)) em <- tidy(em) expect_equivalent(mfx$estimate, em$hp.trend) expect_equivalent(mfx$conf.low, em$lower.HPD, tolerance = 1e-5) expect_equivalent(mfx$conf.high, em$upper.HPD) options("marginaleffects_posterior_interval" = "eti") # # margins: var is all zeroes and dydx don't match precisely # mar <- margins(mod, unit_ses = TRUE, at = list(hp = 110, mpg = 20, vs = 0)) # mfx <- slopes(mod, variables = "hp", at = list(hp = 110, mpg = 20, vs = 0)) # expect_equivalent(mfx$estimate, mar$dydx_hp) # expect_equivalent(mfx$std.error, mar$dydx_hp) rm(list = ls())marginaleffects/inst/tinytest/test-pkg-ordinal.R0000644000176200001440000001244314554070103021556 0ustar liggesuserssource("helpers.R") using("marginaleffects") if (!EXPENSIVE) exit_file("EXPENSIVE") requiet("MASS") requiet("ordinal") dat <- read.csv( "https://vincentarelbundock.github.io/Rdatasets/csv/MASS/housing.csv", stringsAsFactors = TRUE) # marginaleffects: clm: vs. MASS known <- MASS::polr(Sat ~ Infl + Type + Cont, weights = Freq, data = dat, Hess = TRUE) known <- suppressMessages(avg_slopes(known, type = "probs")) unknown <- clm(Sat ~ Infl + Type + Cont, weights = Freq, data = dat) unknown <- avg_slopes(unknown) expect_equivalent(unknown$estimate, known$estimate, tolerance = .00001) expect_equivalent(unknown$std.error, known$std.error, tolerance = .00001) # marginaleffects: protect against corner cases # do not convert numeric to factor in formula stata <- readRDS(testing_path("stata/stata.rds"))[["MASS_polr_01"]] dat <- read.csv(testing_path("stata/databases/MASS_polr_01.csv")) mod <- ordinal::clm(factor(y) ~ x1 + x2, data = dat) expect_error(slopes(mod), pattern = "Please convert the variable to factor") # marginaleffects: clm: vs. Stata stata <- readRDS(testing_path("stata/stata.rds"))[["MASS_polr_01"]] dat <- read.csv(testing_path("stata/databases/MASS_polr_01.csv")) dat$y <- factor(dat$y) dat <- dat mod <- ordinal::clm(y ~ x1 + x2, data = dat) mfx <- avg_slopes(mod) mfx <- merge(mfx, stata) expect_equivalent(mfx$estimate, mfx$dydxstata, tolerance = .001) expect_equivalent(mfx$std.error, mfx$std.errorstata, tolerance = .001) expect_slopes(mod) # Issue 717: no validity data("wine", package = "ordinal") mod <- clm(rating ~ contact + temp, data = wine) p <- predictions(mod, type = "linear.predictor") expect_inherits(p, "predictions") p <- predictions(mod, type = "cum.prob") expect_inherits(p, "predictions") expect_error(predictions(mod, type = "junk"), pattern = "Assertion") p <- avg_slopes(mod, type = "cum.prob") expect_inherits(p, "slopes") # marginaleffects: clm: no validity tmp <- read.csv("https://vincentarelbundock.github.io/Rdatasets/csv/ordinal/soup.csv") tab26 <- with(tmp, table("Product" = PROD, "Response" = SURENESS)) dimnames(tab26)[[2]] <- c("Sure", "Not Sure", "Guess", "Guess", "Not Sure", "Sure") dat26 <- expand.grid(sureness = as.factor(1:6), prod = c("Ref", "Test")) dat26$wghts <- c(t(tab26)) dat26 <- dat26 m1 <- clm(sureness ~ prod, scale = ~prod, data = dat26, weights = wghts, link = "logit") m2 <- update(m1, link = "probit") m3 <- update(m1, link = "cloglog") m4 <- update(m1, link = "loglog") m5 <- update(m1, link = "cauchit", start = coef(m1)) expect_slopes(m1, n_unique = 6) expect_slopes(m2, n_unique = 6) expect_slopes(m3, n_unique = 6) expect_slopes(m4, n_unique = 6) expect_slopes(m5, n_unique = 6) # Issue #718: incorrect standard errors when scale and location are the same dat <- transform(mtcars, cyl = factor(cyl), vs2 = vs) mod1 <- clm(cyl ~ hp + vs, # vs has a location effect scale = ~ vs, # vs also has a scale effect data = dat) mod2 <- clm(cyl ~ hp + vs, # vs has a location effect scale = ~ vs2, # vs also has a scale effect data = dat) nd <- subset(dat, select = -cyl) pre1 <- predictions(mod1) pre2 <- predictions(mod2) pre3 <- predict(mod1, newdata = nd, type = "prob", se.fit = TRUE) expect_equivalent(pre1$estimate, pre2$estimate) expect_equivalent(pre1$std.error, pre2$std.error) expect_equivalent(subset(pre1, group == 4)$estimate, pre3$fit[, 1]) expect_equivalent(subset(pre1, group == 4)$std.error, pre3$se.fit[, 1], tol = 1e-4) # Issue #718: incorrect dat <- transform(mtcars, cyl = factor(cyl)) mod <- suppressWarnings(clm(cyl ~ vs + carb, scale = ~ vs, nominal = ~ carb, data = dat)) dat$cyl <- NULL p1 <- predictions(mod) p2 <- suppressWarnings(predict(mod, newdata = dat, se.fit = TRUE)) expect_equivalent(subset(p1, group == 4)$estimate, p2$fit[, 1]) expect_equivalent(subset(p1, group == 4)$std.error, p2$se.fit[, 1], tol = 1e4) expect_equivalent(subset(p1, group == 6)$estimate, p2$fit[, 2]) expect_equivalent(subset(p1, group == 6)$std.error, p2$se.fit[, 2], tol = 1e4) expect_equivalent(subset(p1, group == 8)$estimate, p2$fit[, 3]) expect_equivalent(subset(p1, group == 8)$std.error, p2$se.fit[, 3], tol = 1e4) # Issue #729 dat <- transform(mtcars, cyl = factor( cyl, levels = c(4, 6, 8), labels = c("small", "medium", "large"))) mod <- clm(cyl ~ hp + carb, scale = ~vs, data = dat) mfx <- avg_slopes(mod, slope = "eyex") expect_inherits(mfx, "slopes") mfx <- avg_slopes(mod, slope = "dyex") expect_inherits(mfx, "slopes") exit_file("check elasticities") mfx1 <- slopes(mod, variables = "carb", slope = "dydx") mfx2 <- slopes(mod, variables = "carb", slope = "eyex") mfx3 <- slopes(mod, variables = "carb", slope = "eydx") mfx4 <- slopes(mod, variables = "carb", slope = "dyex") expect_equivalent(mfx2$estimate, mfx1$estimate * (mfx1$carb / mfx1$predicted)) expect_equivalent(mfx3$estimate, mfx1$estimate / mfx1$predicted) expect_equivalent(mfx4$estimate, mfx1$estimate * mfx1$carb) mfx1 <- slopes(mod, variables = "carb", slope = "dydx") mfx2 <- slopes(mod, variables = "hp", slope = "eyex") mfx3 <- slopes(mod, variables = "hp", slope = "eydx") mfx4 <- slopes(mod, variables = "hp", slope = "dyex") expect_equivalent(mfx2$estimate, mfx1$estimate * (mfx1$hp / mfx1$predicted)) expect_equivalent(mfx3$estimate, mfx1$estimate / mfx1$predicted) expect_equivalent(mfx4$estimate, mfx1$estimate * mfx1$hp) source("helpers.R") rm(list = ls())marginaleffects/inst/tinytest/test-hypothesis.R0000644000176200001440000001223014560035476021553 0ustar liggesuserssource("helpers.R") using("marginaleffects") requiet("emmeans") dat <- mtcars dat$carb <- factor(dat$carb) dat$cyl <- factor(dat$cyl) mod <- lm(mpg ~ carb + cyl, dat) # informative errors and warnings tmp <- lm(mpg ~ drat + wt, data = mtcars) expect_error(slopes(tmp, hypothesis = "drat = wt"), pattern = "newdata") expect_error(comparisons(tmp, hypothesis = "drat = wt"), pattern = "newdata") expect_error( slopes(mod, newdata = dat, hypothesis = "pairwise"), pattern = "smaller") expect_warning( slopes(mod, lincom = "pairwise"), pattern = "lincom") tmp <- lm(mpg ~ wt + drat, data = mtcars) expect_error(predictions( tmp, hypothesis = "wt = drat", newdata = datagrid(wt = 2:3)), pattern = "unique row") tmp <- mtcars tmp$gear <- factor(tmp$gear) expect_error( comparisons( lm(mpg ~ gear, tmp), newdata = "mean", variables = list(gear = "all"), hypothesis = "gear = 0"), pattern = "indices") expect_error( slopes(mod, newdata = dat, hypothesis = "reference"), pattern = "smaller") expect_error(slopes( mod, newdata = "mean", hypothesis = c(1, 1, 1), variables = "cyl"), pattern = "but has length") # errors expect_error(slopes( mod, newdata = "mean", hypothesis = matrix(rep(1, 6), ncol = 2), variables = "cyl"), pattern = "2 rows") # marginaleffects: hypothesis mfx <- slopes( mod, newdata = "mean", variables = "cyl", hypothesis = "pairwise") expect_inherits(mfx, "marginaleffects") expect_equivalent(nrow(mfx), 1) # contrasts: hypothesis cmp1 <- comparisons( mod, variables = "cyl", newdata = "mean") cmp2 <- comparisons( mod, variables = "cyl", newdata = "mean", hypothesis = "revpairwise") expect_equivalent(diff(cmp1$estimate), cmp2$estimate) # marginaleffects: hypothesis mfx <- slopes( mod, newdata = "mean", variables = "cyl", hypothesis = "pairwise") expect_inherits(mfx, "marginaleffects") expect_equivalent(nrow(mfx), 1) # predictions: hypothesis p1 <- predictions( mod, newdata = datagrid(cyl = c(4, 6)), hypothesis = c(-1, 1)) p2 <- predictions( mod, datagrid(cyl = c(4, 6))) expect_equivalent(p1$estimate, diff(p2$estimate)) lc <- matrix(c( -1, 1, -1, 0 ), ncol = 2) p3 <- predictions( mod, datagrid(cyl = c(4, 6)), hypothesis = lc) expect_inherits(p3, "predictions") expect_true(all(p3$term == "custom")) # hypothesis matrix colnames become labels colnames(lc) <- c("Contrast A", "Contrast B") p3 <- predictions( mod, datagrid(cyl = c(4, 6)), hypothesis = lc) expect_inherits(p3, "predictions") expect_equivalent(p3$term, c("Contrast A", "Contrast B")) # wildcard mm1 <- predictions(mod, by = "cyl", hypothesis = "b* = b1") expect_equal(mm1$term, paste0("b", 1:3, "=b1")) expect_equal(mm1$estimate[1], 0) # marginaleffects: string function mod <- lm(mpg ~ hp + drat, data = mtcars) mfx1 <- slopes( mod, newdata = "mean", hypothesis = "exp(b1 + b2) = 100") mfx2 <- slopes( mod, newdata = "mean", hypothesis = "exp(hp + drat) = 100") expect_inherits(mfx1, "marginaleffects") expect_equivalent(mfx1$estimate, mfx2$estimate) expect_equivalent(mfx1$std.error, mfx2$std.error) # predictions: string formulas p1 <- predictions( mod, newdata = datagrid(hp = c(100, 110, 120))) p2 <- predictions( mod, hypothesis = "b1 + b2 + b3 = 10", newdata = datagrid(hp = c(100, 110, 120))) p3 <- predictions( mod, hypothesis = "b1 = b2", newdata = datagrid(hp = c(100, 110, 120))) expect_equivalent(sum(p1$estimate) - 10, p2$estimate) expect_equivalent(p1$estimate[1] - p1$estimate[2], p3$estimate) # pad missing character levels + hypothesis dat <- mtcars dat$cyl <- as.character(dat$cyl) mod <- lm(mpg ~ cyl, data = dat) p <- predictions( mod, hypothesis = "b1 = b2", newdata = datagrid(cyl = c("6", "8"))) expect_inherits(p, "predictions") expect_equivalent(nrow(p), 1) # Issue #439 mod <- lm(mpg ~ factor(cyl) * factor(am), data = mtcars) cmp <- comparisons( mod, variables = "am", by = "cyl", hypothesis = "pairwise") expect_inherits(cmp, "comparisons") expect_equivalent(nrow(cmp), 3) cmp <- comparisons( mod, variables = "am", by = "cyl", hypothesis = "reference") expect_inherits(cmp, "comparisons") expect_equivalent(nrow(cmp), 2) # Issue #559 mod <- lm(mpg ~ hp + drat, data = mtcars) H <- matrix(c(0, 1, -1, 1/3, 1/3, 1/3), ncol = 2) colnames(H) <- c("H1", "H2") dm <- hypotheses(mod, hypothesis = H) expect_equivalent(dm$term, c("H1", "H2")) # Informative error on row mismatch mod <- lm(mpg ~ hp + drat, data = mtcars) expect_error( predictions(mod, newdata = "mean", hypothesis = "b1=b2"), pattern = "hypothesis testing") # Issue #661: remove redundant labels in pairwise comparisons if (!requiet("tinysnapshot")) exit_file("tinysnapshot") using("tinysnapshot") set.seed(123) dat <- transform(iris, dummy = as.factor(rbinom(nrow(iris), 1, prob = c(0.4, 0.6)))) m <- lm(Sepal.Width ~ Sepal.Length * Species + dummy, data = dat) mfx <- slopes(m, variables = "Sepal.Length", by = c("Species", "dummy"), hypothesis = "pairwise") expect_true("setosa, 0 - setosa, 1" %in% mfx$term) rm(list = ls())marginaleffects/inst/tinytest/test-pkg-glmx.R0000644000176200001440000000177214541720224021102 0ustar liggesuserssource("helpers.R") using("marginaleffects") requiet("glmx") requiet("MASS") requiet("margins") # glmx: marginaleffects vs. margins tmp <- data.frame(x = runif(200, -1, 1)) tmp$y <- rnbinom(200, mu = exp(0 + 3 * tmp$x), size = 1) d <- tmp model <- glmx(y ~ x, data = d, family = negative.binomial, xlink = "log", xstart = 0) expect_slopes(model) # margins produces all zeros for se mar <- margins(model, unit_ses = TRUE) mfx <- slopes(model) expect_true(expect_margins(mfx, mar, se = FALSE, tolerance = .001)) # predictions: glmx: no validity check #skip_if_not_installed("insight", minimum_version = "0.17.1") tmp <- data.frame(x = runif(200, -1, 1)) tmp$y <- rnbinom(200, mu = exp(0 + 3 * tmp$x), size = 1) d <- tmp dhead <- head(d) model <- glmx(y ~ x, data = d, family = negative.binomial, xlink = "log", xstart = 0) pred1 <- predictions(model) pred2 <- predictions(model, newdata = dhead) expect_predictions(pred1, n_row = dhead) expect_predictions(pred2, n_row = 6) source("helpers.R")marginaleffects/inst/tinytest/test-pkg-phylolm.R0000644000176200001440000000126614541720224021615 0ustar liggesuserssource("helpers.R") requiet("phylolm") set.seed(123456) tre = rcoal(60) taxa = sort(tre$tip.label) b0 = 0 b1 = 1 x <- rTrait( n = 1, phy = tre, model = "BM", parameters = list(ancestral.state = 0, sigma2 = 10)) y <- b0 + b1 * x + rTrait(n = 1, phy = tre, model = "lambda", parameters = list( ancestral.state = 0, sigma2 = 1, lambda = 0.5)) dat <<- data.frame(trait = y[taxa], pred = x[taxa]) fit <- phylolm(trait ~ pred, data = dat, phy = tre, model = "lambda") pre <- predictions(fit) cmp <- comparisons(fit) mfx <- avg_slopes(fit) expect_inherits(pre, "predictions") expect_inherits(cmp, "comparisons") expect_inherits(mfx, "slopes") source("helpers.R") rm(list = ls())marginaleffects/inst/tinytest/test-aaa-warn_once.R0000644000176200001440000000024314541720224022037 0ustar liggesuserssource("helpers.R") requiet("marginaleffects") # factor in formula mod <- lm(mpg ~ hp + factor(cyl), data = mtcars) expect_warning(slopes(mod)) rm(list = ls())marginaleffects/inst/tinytest/test-pkg-MatchIt.R0000644000176200001440000000327014541720224021457 0ustar liggesuserssource("helpers.R") requiet("MatchIt") using("checkmate") gen_X <- function(n) { X <- matrix(rnorm(9 * n), nrow = n, ncol = 9) X[,5] <- as.numeric(X[,5] < .5) X } gen_A <- function(X) { LP_A <- - 1.2 + log(2)*X[,1] - log(1.5)*X[,2] + log(2)*X[,4] - log(2.4)*X[,5] + log(2)*X[,7] - log(1.5)*X[,8] P_A <- plogis(LP_A) rbinom(nrow(X), 1, P_A) } gen_Y_C <- function(A, X) { 2*A + 2*X[,1] + 2*X[,2] + 2*X[,3] + 1*X[,4] + 2*X[,5] + 1*X[,6] + rnorm(length(A), 0, 5) } gen_Y_B <- function(A, X) { LP_B <- -2 + log(2.4)*A + log(2)*X[,1] + log(2)*X[,2] + log(2)*X[,3] + log(1.5)*X[,4] + log(2.4)*X[,5] + log(1.5)*X[,6] P_B <- plogis(LP_B) rbinom(length(A), 1, P_B) } gen_Y_S <- function(A, X) { LP_S <- -2 + log(2.4)*A + log(2)*X[,1] + log(2)*X[,2] + log(2)*X[,3] + log(1.5)*X[,4] + log(2.4)*X[,5] + log(1.5)*X[,6] sqrt(-log(runif(length(A)))*2e4*exp(-LP_S)) } set.seed(19599) n <- 2000 X <- gen_X(n) A <- gen_A(X) Y_C <- gen_Y_C(A, X) Y_B <- gen_Y_B(A, X) Y_S <- gen_Y_S(A, X) d <- data.frame(A, X, Y_C, Y_B, Y_S) mF <- matchit( A ~ X1 + X2 + X3 + X4 + X5 + X6 + X7 + X8 + X9, data = d, method = "full", estimand = "ATT") md <- match.data(mF) fit1 <- lm(Y_C ~ A * (X1 + X2 + X3 + X4 + X5 + X6 + X7 + X8 + X9), data = md, weights = weights) pre <- avg_predictions(fit1, variables = "A", vcov = ~subclass, newdata = subset(md, A == 1), wts = "weights", by = "A") expect_inherits(pre, "predictions") expect_data_frame(pre, nrows = 2) cmp <- avg_comparisons(fit1, variables = "A", vcov = ~subclass, newdata = subset(md, A == 1), wts = "weights") expect_inherits(cmp, "comparisons") expect_data_frame(pre, nrows = 2) marginaleffects/inst/tinytest/test-pkg-mlogit.R0000644000176200001440000000653614541720224021431 0ustar liggesusers# WARNING: standard errors are different from nnet::multinom() because stats::vcov gives a very difference matrix. # why `newdata` used to not be supported # here the `newdata` does not include the individual or choice variabls at all, # but we still get a prediction. Impossible to know what order the rows are in, # if `newdata` is balanced, or what group ids to give. `newdata` could be # completely malformed and we would still produce results. I could make strong # assumptions about group id being a multiple of number of rows with some # modulo hacks, but that's bad practice. Example: # nd <- TravelMode[, 3:ncol(TravelMode)] # predict(mod, newdata = head(nd, 12)) source("helpers.R") using("marginaleffects") if (ON_CI) exit_file("on ci") requiet("nnet") requiet("mlogit") requiet("data.table") TravelMode <- read.csv("https://vincentarelbundock.github.io/Rdatasets/csv/AER/TravelMode.csv") TravelMode$rownames <- NULL # {mlogit} assumes first column is the index mod <- mlogit(choice ~ wait + gcost | income + size, data = TravelMode) # no validity mod <- mlogit(choice ~ wait + gcost | income + size, TravelMode) cmp <- comparisons(mod) pre <- predictions(mod) tid <- tidy(cmp) expect_inherits(cmp, "comparisons") expect_inherits(pre, "predictions") expect_slopes(mod) expect_true("group" %in% colnames(tid)) # error on bad newdata mod <- mlogit(choice ~ wait + gcost | income + size, TravelMode) nd <- head(TravelMode, 5) expect_error(comparisons(mod, newdata = nd), pattern = "number of choices") # mlogit doesn't install on Github actions, so we can't have it in DESCRIPTION, # but if we use the Fishing data, this raises an error in check() # vs. nnet::multinom data("Fishing", package = "mlogit") dat <- Fishing Fish <- dfidx(Fishing, varying = 2:9, shape = "wide", choice = "mode") m1 <- mlogit(mode ~ 0 | income, data = Fish) m2 <- nnet::multinom(mode ~ income, data = Fishing, trace = FALSE) # predictions() vs. nnet::multinom() p1 <- predictions(m1) p2 <- predictions(m2, type = "probs") setDT(p1, key = c("rowid", "group")) setDT(p2, key = c("rowid", "group")) expect_equivalent(p1$estimate, p2$estimate, tolerance = 1e-5) expect_true(cor(p1$estimate, p2$estimate) > .98) # comparisons() vs. nnet::multinom() c1 <- comparisons(m1) c2 <- comparisons(m2, type = "probs") setDT(c1, key = c("rowid", "term", "group")) setDT(c2, key = c("rowid", "term", "group")) expect_equivalent(c1$estimate, c2$estimate, tolerance = 1e-5) expect_true(cor(c1$estimate, c2$estimate) > .98) # slopes() vs. nnet::multinom() mfx1 <- slopes(m1) mfx2 <- slopes(m2, type = "probs") setDT(mfx1, key = c("rowid", "term", "group")) setDT(mfx2, key = c("rowid", "term", "group")) expect_equivalent(mfx1$estimate, mfx2$estimate, tolerance = 1e-5) expect_true(cor(mfx1$estimate, mfx2$estimate) > .98) # Issue #551 mod1 <- mlogit(choice ~ wait + gcost | income + size, TravelMode) mfx <- slopes(mod1, variables = c("income", "size")) expect_inherits(mfx, "marginaleffects") TravelMode$dsize <- ifelse(TravelMode$size == "1", 1, 0) mod2 <- mlogit(choice ~ wait + gcost | income + dsize, TravelMode) mfx <- slopes(mod2, variables = c("income", "dsize")) expect_inherits(mfx, "marginaleffects") TravelMode$dsize <- as.factor(TravelMode$dsize) mod3 <- mlogit(choice ~ wait + gcost | income + dsize, TravelMode) mfx <- slopes(mod3, variables = c("income", "dsize")) expect_inherits(mfx, "marginaleffects") rm(list = ls())marginaleffects/inst/tinytest/test-misc.R0000644000176200001440000000045614541720224020305 0ustar liggesuserssource("helpers.R") # important for modelsummary glance tmp <- mtcars tmp$am <- as.logical(tmp$am) mod <- lm(mpg ~ am + factor(cyl), tmp) expect_inherits(attr(predictions(mod), "model"), "lm") expect_inherits(attr(comparisons(mod), "model"), "lm") expect_inherits(attr(avg_slopes(mod), "model"), "lm")marginaleffects/inst/tinytest/test-dots.R0000644000176200001440000000063214541720224020317 0ustar liggesuserssource("helpers.R") using("marginaleffects") requiet("lme4") # lme4::lmer mod <-lme4::lmer(mpg ~ hp + (1 | gear), data = mtcars) expect_inherits(slopes(mod), "marginaleffects") expect_warning(slopes(mod, blah = 2), pattern = "Github") # stats::lm mod <- lm(mpg ~ hp, data = mtcars) expect_inherits(slopes(mod), "marginaleffects") expect_warning(slopes(mod, blah = 2), pattern = "Github") rm(list = ls())marginaleffects/inst/tinytest/test-pkg-quantreg.R0000644000176200001440000000316614560035476021771 0ustar liggesuserssource("helpers.R") if (ON_CI || ON_WINDOWS || ON_OSX) exit_file("local linux only") using("marginaleffects") requiet("quantreg") requiet("emmeans") requiet("broom") # marginaleffects: rq: Stata stata <- readRDS(testing_path("stata/stata.rds"))$quantreg_rq_01 model <- suppressWarnings(quantreg::rq(mpg ~ hp * wt + factor(cyl), data = mtcars)) expect_slopes(model) mfx <- merge(avg_slopes(model), stata) expect_equivalent(mfx$estimate, mfx$dydxstata, tolerance = .0001) expect_equivalent(mfx$std.error, mfx$std.errorstata, tolerance = .001) # marginaleffects vs. emtrends stata <- readRDS(testing_path("stata/stata.rds"))$quantreg_rq_01 model <- quantreg::rq(mpg ~ hp * wt + factor(cyl), data = mtcars) mfx <- slopes(model, variables = "hp", newdata = datagrid(hp = 110, wt = 2, cyl = 4)) em <- suppressMessages(emtrends(model, ~hp, "hp", at = list(hp = 110, wt = 2, cyl = 4))) em <- tidy(em) expect_equivalent(mfx$estimate, em$hp.trend, tolerance = .00001) expect_equivalent(mfx$std.error, em$std.error, tolerance = .001) # predictions: rq: no validity model <- quantreg::rq(mpg ~ hp * wt + factor(cyl), data = mtcars) pred1 <- predictions(model) pred2 <- suppressWarnings(predictions(model, newdata = head(mtcars))) expect_predictions(pred1, n_row = nrow(mtcars)) expect_predictions(pred2, n_row = 6) expect_equivalent(pred1$estimate, predict(model)) expect_equivalent(pred2$estimate, predict(model, newdata = head(mtcars))) # Issue #829 mod = rq(Sepal.Length ~ Sepal.Width * Petal.Length + Species, tau = .25, data = iris) cmp = comparisons(mod) expect_false(any(is.na(cmp$Species))) expect_false(any(is.na(iris$Species))) rm(list = ls())marginaleffects/inst/WORDLIST0000644000176200001440000000410514541720224015554 0ustar liggesusers’s Aaditya AAP AAPs Adelie Adelies AFAICT aje al AME Anastasios Angelopoulos APM ARD Arel args arxiv arXiv arXivhttp Augusto Avina backtransform backtransformation backtransforming backtransforms bayesian bc bca Beaumais BH Boca bonferroni BorgeJorge Brilleman brms bugfix Bundock Candes Carlisle cdot chisq CMD cme Codecov confounders cran CRC cumprob customclass customizable Customizable cv datagrid deduplication demeaned Demetri dev differenceavg differenceavgwts differencing disp doi Dowd dX DxP dY dydx dydxavg dydxavgwts dyex dyexavg dyexavglnratioavgwts dyexavgwts elasticities emmeans EMMs Ep Epidemiologic eps Errickson estimand Estimand estimands Estimands et ETI ev expdydx expdydxavg expdydxavgwts exponentiate exponentiating expvalue eydx eydxavg eydxavgwts eyex eyexavg eyexavgwts Fabbri fdcenter fdforward fdr fivenum Foygel frac fwb Gam Gentoos geoms get_hdi ggeffects github GLMMs grayscale Greifer grey HC HDI Hedeker Heiss Hernán heteroskedasticity Heteroskedasticity hochberg holm hommel http https Hufthammer incrementing interpretability interpretable inv invlink io IPW iqr Isager issuecomment J'VJ jacobian jacobians jacobiansmbox JM JVJ Kapre kenward Kirill Krinsky Kurz kwaa Lakens Leeper Lenth Lifecycle lift liftavg linpred listwise LLC lm lme ln lnor lnoravg lnoravgwts lnratio lnratioavg lnratioavgwts lp Lüdecke Marcio marginalmeans mbox MEM MER methodologists MFX minmax modelbased MrJerryTAO MRP Nafa nd newdata NHEFS numDeriv Numpy NumPyro ORs Ove pairwise Pananos param perc plasminogen poisson ppd pre probs programmatically PxD quartile Rainey Ramdas Randas ratioavg ratioavgwts Raton Rcpp RCTs Rdatasets RDatasets Rdatatable regressand regressors’ reimplement Resul revpairwise revreference revsequential richardson Rina Rmarkdown Robb Rohan rowid rsample rvar satterthwaite Scheel sd semilog sensical sequential softmax Solovev Stata Stata's Stata’s StataCorp streptokinase th threenum Tibshirani tidiers timpipeseek TLDR Tomz TOST tPA tukey Umit underspecified unintuitive untransformed varepsilon vcovHC vincentarelbundock Wittenberg wmean xb XGBoost Xu zlink zprob