merTools/0000755000176200001440000000000013466261516012070 5ustar liggesusersmerTools/inst/0000755000176200001440000000000013466137707013051 5ustar liggesusersmerTools/inst/shiny-apps/0000755000176200001440000000000013402510754015127 5ustar liggesusersmerTools/inst/shiny-apps/shinyMer/0000755000176200001440000000000013466135366016741 5ustar liggesusersmerTools/inst/shiny-apps/shinyMer/ui.R0000644000176200001440000001035713402510754017473 0ustar liggesusers# ShinyMer shinyUI(fluidPage( shiny::titlePanel("Explore your merMod interactively"), shiny::sidebarLayout( shiny::sidebarPanel( shiny::radioButtons("newdataType", "Simulated data scenario", choices=df.choices, selected=NULL), # conditionalPanel(condition = "input.newdataType!='orig'", # selectInput("filter", "Filter", # choices = names(merMod@frame)) # ), shiny::numericInput("n.sims", label="Simulations (Max=1,000)", value=100, min=1, max=1000), conditionalPanel(condition="input.conditionedPanels==3", helpText("Here you can compare impact of changing input variables on the outcome variable for selected cases."), selectInput("group", "Group Factor:", choices = names(ranef(merMod)), selected = NULL), uiOutput("re.ui"), sliderInput("nbin", "Effect Bins", min = 3, max = 10, value = 4, step = 1), helpText("And modify your fixed effects"), selectInput("fixef", "Fixed Effect:", choices = all.vars(nobars(formula(merMod)))[-1], selected = NULL) ), shiny::numericInput("alpha", label="Credible Interval (%)", value=95, min=0, max=100), shiny::radioButtons("stat", "Measure of central tendency", choices=c("Median"="median", "Mean"="mean"), selected=NULL), shiny::radioButtons("predMetric", "Prediction metric", choices=c("Linear Predictor"="linear.prediction", "Probability"="probability"), selected=NULL), shiny::checkboxInput("resid.var", label="Include Residual Variation", value=TRUE) ), shiny::mainPanel( shiny::tabsetPanel(type="tabs", shiny::tabPanel("Prediction uncertainty", shiny::h3("Prediction Intervals:"), plotOutput("predPlot"), shiny::h3("All Predictions"), if ("DT" %in% rownames(installed.packages())) { dataTableOutput("dt") } else { tableOutput("shiny") }, shiny::downloadButton("downloadData", "Download predict interval data") ,value = 1 ), shiny::tabPanel("Parameters", shiny::h3("Original call"), textOutput("call"), shiny::h3("Fixed Effects"), plotOutput("feplot"), shiny::h3("Group Effects"), plotOutput("replot"), value = 2 ), shiny::tabPanel("Substantive Effect", shiny::h3("Effect Sizes"), plotOutput("gPlot"), shiny::h3("Fixef Effect Impact"), plotOutput("wigglePlot"), value = 3 ), id = "conditionedPanels" ) ) ) )) merTools/inst/shiny-apps/shinyMer/server.r0000644000176200001440000001201313466135366020427 0ustar liggesusers#SERVER---- server = function(input, output){ output$text1 <- renderText({ paste("You have selected", input$stat) }) predInput <- reactive({ data <- switch(input$newdataType, "orig" = merMod@frame, "mean" = draw(merMod, type = "average"), "rand" = draw(merMod, type = "random"), "user" = newdata) cbind(predictInterval(merMod, newdata = data, level = input$alpha/100, type = input$predMetric, include.resid.var = input$resid.var, n.sims = input$n.sims, stat = input$stat), data) }) if ("DT" %in% rownames(installed.packages())) { output$dt <- renderDataTable({ predInput() }) } else { output$dt <- renderTable({ predInput() }) } output$downloadData <- shiny::downloadHandler( filename = "predictIntervalResults.csv", content = function(file) { write.csv(shiny::isolate(predInput()), file) } ) output$predPlot <- renderPlot({ data <- predInput() data$x <- factor(seq(1:nrow(data))) ggplot(data, aes(x = x, y = fit, ymin = lwr, ymax = upr)) + geom_pointrange() + theme_bw() + theme(axis.text.x = element_blank(), panel.grid.major.x = element_blank(), panel.grid.minor.x = element_blank(), axis.ticks.x = element_blank()) }) feData <- reactive({ data <- FEsim(merMod, n.sims = input$n.sims) return(data) }) output$feplot <- renderPlot({ plotdf <- feData() scale <- input$alpha/100 vartmp <- input$stat plotFEsim(plotdf, level = scale, stat = vartmp, sd = TRUE, intercept = FALSE) }) reData <- reactive({ data <- REsim(merMod, n.sims = input$n.sims) return(data) }) output$replot <- renderPlot({ plotdf <- reData() scale <- input$alpha/100 vartmp <- input$stat plotREsim(plotdf, level = scale, stat = vartmp, sd = TRUE) }) output$call <- renderPrint({ merMod@call }) reEffInput <- reactive({ data <- switch(input$newdataType, "orig" = merMod@frame, "mean" = draw(merMod, type = "average"), "rand" = draw(merMod, type = "random"), "user" = newdata) if(nrow(data) > 12){ warning("Too much data selected, only using top 12 rows.") data <- data[1:12, ] } return(data) }) groupData <- reactive({ plotdf <- REimpact(merMod, newdata = reEffInput(), groupFctr = input$group, term = input$term, level = input$alpha/100, breaks = input$nbin, type = input$predMetric, include.resid.var = input$resid.var, n.sims = input$n.sims, stat = input$stat) plotdf$upr <- qnorm(input$alpha/100) * plotdf$AvgFitSE plotdf$lwr <- qnorm(input$alpha/100) * plotdf$AvgFitSE plotdf$upr <- plotdf$AvgFit + plotdf$upr plotdf$lwr <- plotdf$AvgFit - plotdf$lwr plotdf$bin <- factor(plotdf$bin) return(plotdf) }) output$gPlot <- renderPlot({ ggplot(groupData(), aes(x = bin, y = AvgFit, ymin = lwr, ymax = upr)) + geom_pointrange() + facet_wrap(~case) + theme_bw() + labs(x = "Bin", y = "Value of DV", title = "Impact of grouping term for selected case") }) wiggleData <- reactive({ valLookup <- unique(merMod@frame[, input$fixef]) if(class(valLookup) %in% c("numeric", "integer")){ newvals <- seq(min(valLookup), max(valLookup), length.out = 20) } else{ if(length(valLookup) < 50){ newvals <- newvals } else{ newvals <- sample(newvals, 50) } } plotdf <- wiggle(reEffInput(), input$fixef, values = list(newvals)) plotdf <- cbind(plotdf, predictInterval(merMod, newdata=plotdf, type = input$predMetric, level = input$alpha/100, include.resid.var = input$resid.var, n.sims = input$n.sims, stat = input$stat)) plotdf$X <- plotdf[, input$fixef] plotdf$case <- rep(1:length(newvals), length = nrow(reEffInput())) return(plotdf) }) output$re.ui <- renderUI({ choices <- names(ranef(merMod)[[input$group]]) selectInput("term", "Group Term:", choices = choices, selected = choices[1]) }) output$wigglePlot <- renderPlot({ ggplot(wiggleData(), aes(x = X, y = fit, ymin = lwr, ymax = upr)) + geom_pointrange() + facet_wrap(~case) + theme_bw() + labs(y = "Simulated Value of DV", title = "Impact of selected fixed effect for selected cases.") }) } merTools/inst/shiny-apps/shinyMer/global.R0000644000176200001440000000104613402510754020311 0ustar liggesusers# Import variables from function library(ggplot2) library(shiny) merMod <<- .shinyMerPar$merMod if(is.null(.shinyMerPar$simData)){ } else { newdata <<- .shinyMerPar$simData } if (!exists("newdata")) { df.choices <- c("Model Frame" = "orig", "Random Obs" = "rand", "Average Obs" = "mean") } else { df.choices <- c("User Supplied" = "user", "Model Frame" = "orig", "Random Obs" = "rand", "Average Obs" = "mean") } merTools/inst/doc/0000755000176200001440000000000013466137707013616 5ustar liggesusersmerTools/inst/doc/marginal_effects.html0000644000176200001440000004667513466137511020010 0ustar liggesusers Using merTools to Marginalize Over Random Effect Levels

Using merTools to Marginalize Over Random Effect Levels

Jared Knowles and Carl Frederick

2019-05-12

Marginalizing Random Effects

One of the most common questions about multilevel models is how much influence grouping terms have on the outcome. One way to explore this is to simulate the predicted values of an observation across the distribution of random effects for a specific grouping variable and term. This can be described as “marginalizing” predictions over the distribution of random effects. This allows you to explore the influence of the grouping term and grouping levels on the outcome scale by simulating predictions for simulated values of each observation across the distribution of effect sizes.

The REmargins() function allows you to do this. Here, we take the example sleepstudy model and marginalize predictions for all of the random effect terms (Subject:Intercept, Subject:Days). By default, the function will marginalize over the quartiles of the expected rank (see expected rank vignette) of the effect distribution for each term.

fm1 <- lmer(Reaction ~ Days + (Days | Subject), sleepstudy)
mfx <- REmargins(merMod = fm1, newdata = sleepstudy[1:10,])
head(mfx)
#>   Reaction Days Subject case grouping_var      term breaks
#> 1   249.56    0     309    1      Subject Intercept      1
#> 2   249.56    0     334    1      Subject      Days      1
#> 3   249.56    0     350    1      Subject Intercept      2
#> 4   249.56    0     330    1      Subject      Days      2
#> 5   249.56    0     308    1      Subject Intercept      3
#> 6   249.56    0     332    1      Subject      Days      3
#>   original_group_level fit_combined upr_combined lwr_combined fit_Subject
#> 1                  308     211.6022     252.5443     177.4033  -40.220047
#> 2                  308     244.4658     283.3303     207.5940   -7.471686
#> 3                  308     237.1407     277.0037     201.7126  -15.712567
#> 4                  308     276.0716     311.4830     239.3424   22.626852
#> 5                  308     256.3923     292.7700     216.6426    3.815614
#> 6                  308     262.6514     295.5489     224.5716   10.306149
#>   upr_Subject lwr_Subject fit_fixed upr_fixed lwr_fixed
#> 1   -4.177672   -74.44036  249.3960  283.5701  215.7122
#> 2   29.913604   -45.58652  251.8809  286.7075  218.0804
#> 3   21.558164   -52.58777  250.4633  288.5282  216.1605
#> 4   59.942004   -14.46200  249.9996  285.5817  216.0925
#> 5   40.481303   -34.47880  252.2277  285.3248  217.0583
#> 6   48.049021   -28.16187  252.2168  286.4449  218.3318

The new data frame output from REmargins contains a lot of information. The first few columns contain the original data passed to newdata. Each observation in newdata is identified by a case number, because the function repeats each observation by the number of random effect terms and number of breaks to simulate each term over. Then the grouping_var

Summarizing

Plotting

Finally - you can plot the results marginalization to evaluate the effect of the random effect terms graphically.

ggplot(mfx) + aes(x = breaks, y = fit_Subject, group = case) +
  geom_line() +
  facet_wrap(~term)

merTools/inst/doc/Using_predictInterval.html0000644000176200001440000032266313466137506021021 0ustar liggesusers Prediction Intervals from merMod Objects

Prediction Intervals from merMod Objects

Jared Knowles and Carl Frederick

2019-05-12

Introduction

Fitting (generalized) linear mixed models, (G)LMM, to very large data sets is becoming increasingly easy, but understanding and communicating the uncertainty inherent in those models is not. As the documentation for lme4::predict.merMod() notes:

There is no option for computing standard errors of predictions because it is difficult to define an efficient method that incorporates uncertainty in the variance parameters; we recommend lme4::bootMer() for this task.

We agree that, short of a fully Bayesian analysis, bootstrapping is the gold-standard for deriving a prediction interval predictions from a (G)LMM, but the time required to obtain even a respectable number of replications from bootMer() quickly becomes prohibitive when the initial model fit is on the order of hours instead of seconds. The only other alternative we have identified for these situations is to use the arm::sim() function to simulate values. Unfortunately, this only takes variation of the fixed coefficients and residuals into account, and assumes the conditional modes of the random effects are fixed.

We developed the predictInterval() function to incorporate the variation in the conditional modes of the random effects (CMRE, a.k.a. BLUPs in the LMM case) into calculating prediction intervals. Ignoring the variance in the CMRE results in overly confident estimates of predicted values and in cases where the precision of the grouping term varies across levels of grouping terms, creates the illusion of difference where none may exist. The importance of accounting for this variance comes into play sharply when comparing the predictions of different models across observations.

We take the warning from lme4::predict.merMod() seriously, but view this method as a decent first approximation the full bootstrap analysis for (G)LMMs fit to very large data sets.

Conceptual description

In order to generate a proper prediction interval, a prediction must account for three sources of uncertainty in mixed models:

  1. the residual (observation-level) variance,
  2. the uncertainty in the fixed coefficients, and
  3. the uncertainty in the variance parameters for the grouping factors.

A fourth, uncertainty about the data, is beyond the scope of any prediction method.

As we mentioned above, the arm:sim() function incorporates the first two sources of variation but not the third , while bootstrapping using lme4::bootMer() does incorporate all three sources of uncertainty because it re-estimates the model using random samples of the data.

When inference about the values of the CMREs is of interest, it would be nice to incorporate some degree of uncertainty in those estimates when comparing observations across groups. predictInterval() does this by drawing values of the CMREs from the conditional variance-covariance matrix of the random affects accessible from lme4::ranef(model, condVar=TRUE). Thus, predictInterval() incorporates all of the uncertainty from sources one and two, and part of the variance from source 3, but the variance parameters themselves are treated as fixed.

To do this, predictInterval() takes an estimated model of class merMod and, like predict(), a data.frame upon which to make those predictions and:

  1. extracts the fixed and random coefficients
  2. takes n draws from the multivariate normal distribution of the fixed and random coefficients (separately)
  3. calculates the linear predictor for each row in newdata based on these draws, and
  4. optionally incorporates the residual variation (per the arm::sim() function), and,
  5. returns newdata with the lower and upper limits of the prediction interval and the mean or median of the simulated predictions

Currently, the supported model types are linear mixed models and mixed logistic regression models.

The prediction data set can include levels that are not in the estimation model frame. The prediction intervals for such observations only incorporate uncertainty from fixed coefficient estimates and the residual level of variation.

Comparison to existing methods

What do the differences between predictInterval() and the other methods for constructing prediction intervals mean in practice? We would expect to see predictInterval() to produce prediction intervals that are wider than all methods except for the bootMer() method. We would also hope that the prediction point estimate from other methods falls within the prediction interval produced by predictInterval(). Ideally, the predicted point estimate produced by predictInterval() would fall close to that produced by bootMer().

This section compares the results of predictInterval() with those obtained using arm::sim() and lme4::bootMer() using the sleepstudy data from lme4. These data contain reaction time observations for 10 days on 18 subjects.
The data are sorted such that the first 10 observations are days one through ten for subject 1, the next 10 are days one through ten for subject 2 and so on. The example model that we are estimating below estimates random intercepts and a random slope for the number of days.

###Step 1: Estimating the model and using predictInterval()

First, we will load the required packages and data and estimate the model:

set.seed(271828)
data(sleepstudy)
fm1 <- lmer(Reaction ~ Days + (Days|Subject), data=sleepstudy)
display(fm1)
#> lmer(formula = Reaction ~ Days + (Days | Subject), data = sleepstudy)
#>             coef.est coef.se
#> (Intercept) 251.41     6.82 
#> Days         10.47     1.55 
#> 
#> Error terms:
#>  Groups   Name        Std.Dev. Corr 
#>  Subject  (Intercept) 24.74         
#>           Days         5.92    0.07 
#>  Residual             25.59         
#> ---
#> number of obs: 180, groups: Subject, 18
#> AIC = 1755.6, DIC = 1760.3
#> deviance = 1751.9

Then, calculate prediction intervals using predictInterval(). The predictInterval function has a number of user configurable options. In this example, we use the original data sleepstudy as the newdata. We pass the function the fm1 model we fit above. We also choose a 95% interval with level = 0.95, though we could choose a less conservative prediction interval. We make 1,000 simulations for each observation n.sims = 1000. We set the point estimate to be the median of the simulated values, instead of the mean. We ask for the linear predictor back, if we fit a logistic regression, we could have asked instead for our predictions on the probability scale instead. Finally, we indicate that we want the predictions to incorporate the residual variance from the model – an option only available for lmerMod objects.

PI.time <- system.time(
  PI <- predictInterval(merMod = fm1, newdata = sleepstudy,
                        level = 0.95, n.sims = 1000,
                        stat = "median", type="linear.prediction",
                        include.resid.var = TRUE)
)

Here is the first few rows of the object PI:

fit upr lwr
251.6679 311.3148 196.4089
271.4794 330.9183 214.2174
292.6803 350.9868 237.7709
311.6967 369.2907 254.2236
331.8317 389.7439 278.1873
350.7461 408.1392 294.8502

The three columns are the median (fit) and limits of the 95% prediction interval (upr and lwr) because we set level=0.95. The following figure displays the output graphically for the first 30 observations.

library(ggplot2);
ggplot(aes(x=1:30, y=fit, ymin=lwr, ymax=upr), data=PI[1:30,]) +
  geom_point() +
  geom_linerange() +
  labs(x="Index", y="Prediction w/ 95% PI") + theme_bw()

Step 1a: Adjusting for correlation between fixed and random effects

The prediction intervals above do not correct for correlations between fixed and random effects. This tends to lead to predictive intervals that are too conservative, especially for existing groups when there is a lot of data on relatively few groups. In that case, a significant portion of the uncertainty in the prediction can be due to variance in the fixed intercept which is anti-correlated with variance in the random intercept effects. For instance, it does not actually matter if the fixed intercept is 5 and the random intercept effects are -2, 1, and 1, versus a fixed intercept of 6 and random intercept effects of -3, 0, and 0. (The latter situation will never be the MLE, but it can occur in this package’s simulations.)

To show this issue, we’ll use the sleep study model, predicting the reaction times of subjects after experiencing sleep deprivation:

Let’s use the model to give an interval for the true average body fat of a large group of students like the first one in the study — a 196cm female baseball player:

There are two ways to get predictInterval to create less-conservative intervals to deal with this. The first is just to tell it to consider certain fixed effects as fully-known (that is, with an effectively 0 variance.) This is done using the ignore.fixed.effects argument.

The second way is to use an ad-hoc variance adjustment, with the fix.intercept.variance argument. This takes the model’s intercept variance \(\hat\sigma^2_\mu\) and adjusts it to:

\[\hat\sigma\prime^2_\mu = \hat\sigma^2_\mu-\Sigma_{levels}\frac{1}{\Sigma_{groups(level)}1/(\hat\sigma^2_{level}+sigma^2_{group})}\]

In other words, it assumes the given intercept variance incorporates spurious variance for each level, where each of the spurious variance terms has a precision equal to the of the precisions due to the individual groups at that level.

A few notes about these two arguments:

Step 2: Comparison with arm::sim()

How does the output above compare to what we could get from arm::sim()?

The prediction intervals from arm:sim() are much smaller and the random slope for days vary more than they do for predictInterval. Both results are as expected, given the small number of subjects and observations per subject in these data. Because predictInterval() is incorporating uncertainty in the CMFEs (but not the variance parameters of the random coefficients themselves), the Days slopes are closer to the overall or pooled regression slope.

###Step 3: Comparison with lme4::bootMer()

As quoted above, the developers of lme4 suggest that users interested in uncertainty estimates around their predictions use lme4::bootmer() to calculate them. The documentation for lme4::bootMer() goes on to describe three implemented flavors of bootstrapped estimates:

  1. parametrically resampling both the “spherical” random effects u and the i.i.d. errors \(\epsilon\)
  2. treating the random effects as fixed and parametrically resampling the i.i.d. errors
  3. treating the random effects as fixed and semi-parametrically resampling the i.i.d. errors from the distribution of residuals.

We will compare the results from predictInterval() with each method, in turn.

Step 3a: lme4::bootMer() method 1

The intervals produced by predictInterval, represented in green, cover the point estimates produced by bootMer in every case for these 30 observations. Additionally, in almost every case, the predictInterval encompasses the entire interval presented by bootMer. Here, the estimates produced by bootMer are re-estimating the group terms, but by refitting the model, they are also taking into account the conditional variance of these terms, or theta, and provide tighter prediction intervals than the predictInterval method.

####Step 3b: lme4::bootMer() method 2

Here, the results for predictInterval in green again encompass the results from bootMer, but are much wider. The bootMer estimates are ignoring the variance in the group effects, and as such, are only incorporating the residual variance and the variance in the fixed effects – similar to the arm::sim() function.

Step 3c: Comparison to rstanarm

PI.time.stan <- system.time({
  fm_stan <- stan_lmer(Reaction ~ Days + (Days|Subject), data = sleepstudy,
                       verbose = FALSE, open_progress = FALSE, refresh = -1,
                       show_messages=FALSE, chains = 1)
  zed <- posterior_predict(fm_stan)
  PI.stan <- cbind(apply(zed, 2, median), central_intervals(zed, prob=0.95))
})
#> Chain 1: 
#> Chain 1: Gradient evaluation took 0 seconds
#> Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 0 seconds.
#> Chain 1: Adjust your expectations accordingly!
#> Chain 1: 
#> Chain 1: 
#> Chain 1: 
#> Chain 1:  Elapsed Time: 6.919 seconds (Warm-up)
#> Chain 1:                3.744 seconds (Sampling)
#> Chain 1:                10.663 seconds (Total)
#> Chain 1:


print(fm_stan)
#> stan_lmer
#>  family:       gaussian [identity]
#>  formula:      Reaction ~ Days + (Days | Subject)
#>  observations: 180
#> ------
#>             Median MAD_SD
#> (Intercept) 251.9    6.3 
#> Days         10.4    1.7 
#> 
#> Auxiliary parameter(s):
#>       Median MAD_SD
#> sigma 25.9    1.5  
#> 
#> Error terms:
#>  Groups   Name        Std.Dev. Corr
#>  Subject  (Intercept) 24           
#>           Days         7       0.07
#>  Residual             26           
#> Num. levels: Subject 18 
#> 
#> Sample avg. posterior predictive distribution of y:
#>          Median MAD_SD
#> mean_PPD 298.5    2.8 
#> 
#> ------
#> * For help interpreting the printed output see ?print.stanreg
#> * For info on the priors used see ?prior_summary.stanreg

PI.stan <- as.data.frame(PI.stan)
names(PI.stan) <- c("fit", "lwr", "upr")
PI.stan <- PI.stan[, c("fit", "upr", "lwr")]
comp.data <- rbind(data.frame(Predict.Method="predictInterval()", x=(1:nrow(PI))-0.1, PI),
                   data.frame(Predict.Method="rstanArm", x=(1:nrow(PI.stan))+0.1, PI.stan))

ggplot(aes(x=x, y=fit, ymin=lwr, ymax=upr, color=Predict.Method), data=comp.data[c(1:30,181:210),]) +
  geom_point() +
  geom_linerange() +
  labs(x="Index", y="Prediction w/ 95% PI") +
  theme_bw() +  theme(legend.position="bottom") +
  scale_color_brewer(type = "qual", palette = 2)

Computation time

Our initial motivation for writing this function was to develop a method for incorporating uncertainty in the CMFEs for mixed models estimated on very large samples. Even for models with only modest degrees of complexity, using lme4::bootMer() quickly becomes time prohibitive because it involves re-estimating the model for each simulation. We have seen how each alternative compares to predictInterval() substantively, but how do they compare in terms of computational time? The table below lists the output of system.time() for all five methods for calculating prediction intervals for merMod objects.

user.self sys.self elapsed
predictInterval() 0.33 0.03 0.36
arm::sim() 0.64 0.00 0.64
lme4::bootMer()-Method 1 5.46 0.00 5.47
lme4::bootMer()-Method 2 5.50 0.00 5.50
lme4::bootMer()-Method 3 5.54 0.00 5.53
rstanarm:predict 11.03 0.02 11.06

For this simple example, we see that arm::sim() is the fastest–nearly five times faster than predictInterval(). However, predictInterval() is nearly six times faster than any of the bootstrapping options via lme4::bootMer. This may not seem like a lot, but consider that the computational time for required for bootstrapping is roughly proportional to the number of bootstrapped simulations requested … predictInterval() is not because it is just a series of draws from various multivariate normal distributions, so the time ratios in the table below represents the lowest bound of the computation time ratio of bootstrapping to predictInterval().

Simulation

TBC.

merTools/inst/doc/imputation.html0000644000176200001440000025120613466137510016673 0ustar liggesusers Analyzing Imputed Data with Multilevel Models and merTools

Analyzing Imputed Data with Multilevel Models and merTools

Jared Knowles

2019-05-12

Introduction

Multilevel models are valuable in a wide array of problem areas that involve non-experimental, or observational data. In many of these cases the data on individual observations may be incomplete. In these situations, the analyst may turn to one of many methods for filling in missing data depending on the specific problem at hand, disciplinary norms, and prior research.

One of the most common cases is to use multiple imputation. Multiple imputation involves fitting a model to the data and estimating the missing values for observations. For details on multiple imputation, and a discussion of some of the main implementations in R, look at the documentation and vignettes for the mice and Amelia packages.

The key difficulty multiple imputation creates for users of multilevel models is that the result of multiple imputation is K replicated datasets corresponding to different estimated values for the missing data in the original dataset.

For the purposes of this vignette, I will describe how to use one flavor of multiple imputation and the function in merTools to obtain estimates from a multilevel model in the presence of missing and multiply imputed data.

Missing Data and its Discontents

To demonstrate this workflow, we will use the hsb dataset in the merTools package which includes data on the math achievement of a wide sample of students nested within schools. The data has no missingness, so first we will simulate some missing data.

data(hsb)

# Create a function to randomly assign NA values

add_NA <- function(x, prob){
  z <- rbinom(length(x), 1, prob = prob)
  x[z==1] <- NA
  return(x)
}

hsb$minority <- add_NA(hsb$minority, prob = 0.05)
table(is.na(hsb$minority))
#> 
#> FALSE  TRUE 
#>  6836   349

hsb$female <- add_NA(hsb$female, prob = 0.05)
table(is.na(hsb$female))
#> 
#> FALSE  TRUE 
#>  6830   355

hsb$ses <- add_NA(hsb$ses, prob = 0.05)
table(is.na(hsb$ses))
#> 
#> FALSE  TRUE 
#>  6821   364

hsb$size <- add_NA(hsb$size, prob = 0.05)
table(is.na(hsb$size))
#> 
#> FALSE  TRUE 
#>  6848   337
# Load imputation library
library(Amelia)
# Declare the variables to include in the imputation data
varIndex <- names(hsb)
# Declare ID variables to be excluded from imputation
IDS <- c("schid", "meanses")
# Imputate
impute.out <- amelia(hsb[, varIndex], idvars = IDS, 
                         noms = c("minority", "female"), 
                         m = 5)
#> -- Imputation 1 --
#> 
#>   1  2  3
#> 
#> -- Imputation 2 --
#> 
#>   1  2  3
#> 
#> -- Imputation 3 --
#> 
#>   1  2  3  4
#> 
#> -- Imputation 4 --
#> 
#>   1  2  3  4
#> 
#> -- Imputation 5 --
#> 
#>   1  2  3
summary(impute.out)
#> 
#> Amelia output with 5 imputed datasets.
#> Return code:  1 
#> Message:  Normal EM convergence. 
#> 
#> Chain Lengths:
#> --------------
#> Imputation 1:  3
#> Imputation 2:  3
#> Imputation 3:  4
#> Imputation 4:  4
#> Imputation 5:  3
#> 
#> Rows after Listwise Deletion:  5865 
#> Rows after Imputation:  7185 
#> Patterns of missingness in the data:  13 
#> 
#> Fraction Missing for original variables: 
#> -----------------------------------------
#> 
#>          Fraction Missing
#> schid          0.00000000
#> minority       0.04857342
#> female         0.04940849
#> ses            0.05066110
#> mathach        0.00000000
#> size           0.04690327
#> schtype        0.00000000
#> meanses        0.00000000
# Amelia is not available so let's just boostrap resample our data
impute.out <- vector(mode = "list", 5)

for (i in 1:5) {
  impute.out[[i]] <- hsb[sample(nrow(hsb), nrow(hsb), replace = TRUE), ]
}

# Declare the variables to include in the imputation data
summary(impute.out)

Fitting and Summarizing a Model List

Fitting a model is very similar

fmla <- "mathach ~ minority + female + ses + meanses + (1 + ses|schid)"
mod <- lmer(fmla, data = hsb)
if(amelia_eval) {
  modList <- lmerModList(fmla, data = impute.out$imputations)
} else {
  # Use bootstrapped data instead
  modList <- lmerModList(fmla, data = impute.out)
}
#> Warning in checkConv(attr(opt, "derivs"), opt$par, ctrl =
#> control$checkConv, : Model failed to converge with max|grad| = 0.00243706
#> (tol = 0.002, component 1)

The resulting object modList is a list of merMod objects the same length as the number of imputation datasets. This object is assigned the class of merModList and merTools provides some convenience functions for reporting the results of this object.

Using this, we can directly compare the model fit with missing data excluded to the aggregate from the imputed models:

fixef(mod) # model with dropped missing
#> (Intercept)    minority      female         ses     meanses 
#>   14.042816   -2.701370   -1.182849    1.899924    2.992329
fixef(modList)
#> (Intercept)    minority      female         ses     meanses 
#>   13.964580   -2.501642   -1.197021    1.930981    3.218819
VarCorr(mod) # model with dropped missing
#>  Groups   Name        Std.Dev. Corr  
#>  schid    (Intercept) 1.53619        
#>           ses         0.64937  -0.566
#>  Residual             6.00409
VarCorr(modList) # aggregate of imputed models
#> $stddev
#> $stddev$schid
#> (Intercept)         ses 
#>   1.5252886   0.6712887 
#> 
#> 
#> $correlation
#> $correlation$schid
#>             (Intercept)       ses
#> (Intercept)    1.000000 -0.493787
#> ses           -0.493787  1.000000

If you want to inspect the individual models, or you do not like taking the mean across the imputation replications, you can take the merModList apart easily:

lapply(modList, fixef)
#> $imp1
#> (Intercept)    minority      female         ses     meanses 
#>   13.963960   -2.506025   -1.187511    1.956362    3.176900 
#> 
#> $imp2
#> (Intercept)    minority      female         ses     meanses 
#>   13.965839   -2.476253   -1.207036    1.941314    3.176969 
#> 
#> $imp3
#> (Intercept)    minority      female         ses     meanses 
#>   13.968526   -2.573101   -1.172855    1.934888    3.206586 
#> 
#> $imp4
#> (Intercept)    minority      female         ses     meanses 
#>   13.961077   -2.427355   -1.230145    1.927593    3.245608 
#> 
#> $imp5
#> (Intercept)    minority      female         ses     meanses 
#>   13.963497   -2.525473   -1.187560    1.894750    3.288035

And, you can always operate on any single element of the list:

fixef(modList[[1]])
#> (Intercept)    minority      female         ses     meanses 
#>   13.963960   -2.506025   -1.187511    1.956362    3.176900
fixef(modList[[2]])
#> (Intercept)    minority      female         ses     meanses 
#>   13.965839   -2.476253   -1.207036    1.941314    3.176969

Output of a Model List

print(modList)
#> $imp1
#> Linear mixed model fit by REML ['lmerMod']
#> Formula: mathach ~ minority + female + ses + meanses + (1 + ses | schid)
#>    Data: d
#> 
#> REML criterion at convergence: 46341.7
#> 
#> Scaled residuals: 
#>     Min      1Q  Median      3Q     Max 
#> -3.2121 -0.7269  0.0318  0.7616  2.9230 
#> 
#> Random effects:
#>  Groups   Name        Variance Std.Dev. Corr 
#>  schid    (Intercept)  2.3241  1.5245        
#>           ses          0.4518  0.6722   -0.45
#>  Residual             35.7631  5.9802        
#> Number of obs: 7185, groups:  schid, 160
#> 
#> Fixed effects:
#>             Estimate Std. Error t value
#> (Intercept)  13.9640     0.1736  80.455
#> minority     -2.5060     0.1983 -12.634
#> female       -1.1875     0.1586  -7.489
#> ses           1.9564     0.1207  16.215
#> meanses       3.1769     0.3606   8.810
#> 
#> Correlation of Fixed Effects:
#>          (Intr) minrty female ses   
#> minority -0.318                     
#> female   -0.483  0.015              
#> ses      -0.200  0.140  0.043       
#> meanses  -0.091  0.121  0.023 -0.235
#> 
#> $imp2
#> Linear mixed model fit by REML ['lmerMod']
#> Formula: mathach ~ minority + female + ses + meanses + (1 + ses | schid)
#>    Data: d
#> 
#> REML criterion at convergence: 46354.9
#> 
#> Scaled residuals: 
#>     Min      1Q  Median      3Q     Max 
#> -3.2035 -0.7211  0.0323  0.7596  2.9226 
#> 
#> Random effects:
#>  Groups   Name        Variance Std.Dev. Corr 
#>  schid    (Intercept)  2.3326  1.5273        
#>           ses          0.3909  0.6252   -0.52
#>  Residual             35.8580  5.9882        
#> Number of obs: 7185, groups:  schid, 160
#> 
#> Fixed effects:
#>             Estimate Std. Error t value
#> (Intercept)  13.9658     0.1733  80.570
#> minority     -2.4763     0.1993 -12.427
#> female       -1.2070     0.1584  -7.620
#> ses           1.9413     0.1193  16.273
#> meanses       3.1770     0.3591   8.846
#> 
#> Correlation of Fixed Effects:
#>          (Intr) minrty female ses   
#> minority -0.315                     
#> female   -0.480  0.007              
#> ses      -0.211  0.140  0.038       
#> meanses  -0.096  0.124  0.023 -0.235
#> convergence code: 0
#> Model failed to converge with max|grad| = 0.00243706 (tol = 0.002, component 1)
#> 
#> 
#> $imp3
#> Linear mixed model fit by REML ['lmerMod']
#> Formula: mathach ~ minority + female + ses + meanses + (1 + ses | schid)
#>    Data: d
#> 
#> REML criterion at convergence: 46342.5
#> 
#> Scaled residuals: 
#>     Min      1Q  Median      3Q     Max 
#> -3.2214 -0.7222  0.0331  0.7602  2.9194 
#> 
#> Random effects:
#>  Groups   Name        Variance Std.Dev. Corr 
#>  schid    (Intercept)  2.3567  1.5352        
#>           ses          0.4808  0.6934   -0.46
#>  Residual             35.7520  5.9793        
#> Number of obs: 7185, groups:  schid, 160
#> 
#> Fixed effects:
#>             Estimate Std. Error t value
#> (Intercept)  13.9685     0.1743  80.142
#> minority     -2.5731     0.1987 -12.947
#> female       -1.1729     0.1586  -7.393
#> ses           1.9349     0.1215  15.928
#> meanses       3.2066     0.3613   8.874
#> 
#> Correlation of Fixed Effects:
#>          (Intr) minrty female ses   
#> minority -0.321                     
#> female   -0.481  0.019              
#> ses      -0.208  0.138  0.042       
#> meanses  -0.093  0.123  0.025 -0.229
#> 
#> $imp4
#> Linear mixed model fit by REML ['lmerMod']
#> Formula: mathach ~ minority + female + ses + meanses + (1 + ses | schid)
#>    Data: d
#> 
#> REML criterion at convergence: 46359.6
#> 
#> Scaled residuals: 
#>     Min      1Q  Median      3Q     Max 
#> -3.2097 -0.7245  0.0341  0.7595  2.9047 
#> 
#> Random effects:
#>  Groups   Name        Variance Std.Dev. Corr 
#>  schid    (Intercept)  2.3002  1.5166        
#>           ses          0.4605  0.6786   -0.51
#>  Residual             35.8664  5.9889        
#> Number of obs: 7185, groups:  schid, 160
#> 
#> Fixed effects:
#>             Estimate Std. Error t value
#> (Intercept)  13.9611     0.1729  80.758
#> minority     -2.4274     0.1981 -12.251
#> female       -1.2301     0.1584  -7.765
#> ses           1.9276     0.1213  15.895
#> meanses       3.2456     0.3574   9.082
#> 
#> Correlation of Fixed Effects:
#>          (Intr) minrty female ses   
#> minority -0.317                     
#> female   -0.481  0.010              
#> ses      -0.219  0.140  0.037       
#> meanses  -0.095  0.121  0.022 -0.233
#> 
#> $imp5
#> Linear mixed model fit by REML ['lmerMod']
#> Formula: mathach ~ minority + female + ses + meanses + (1 + ses | schid)
#>    Data: d
#> 
#> REML criterion at convergence: 46360.4
#> 
#> Scaled residuals: 
#>     Min      1Q  Median      3Q     Max 
#> -3.2003 -0.7248  0.0339  0.7606  2.9253 
#> 
#> Random effects:
#>  Groups   Name        Variance Std.Dev. Corr 
#>  schid    (Intercept)  2.3192  1.5229        
#>           ses          0.4721  0.6871   -0.54
#>  Residual             35.8665  5.9889        
#> Number of obs: 7185, groups:  schid, 160
#> 
#> Fixed effects:
#>             Estimate Std. Error t value
#> (Intercept)  13.9635     0.1735  80.473
#> minority     -2.5255     0.1980 -12.756
#> female       -1.1876     0.1589  -7.472
#> ses           1.8948     0.1212  15.637
#> meanses       3.2880     0.3568   9.216
#> 
#> Correlation of Fixed Effects:
#>          (Intr) minrty female ses   
#> minority -0.319                     
#> female   -0.483  0.016              
#> ses      -0.233  0.138  0.043       
#> meanses  -0.097  0.118  0.023 -0.231
summary(modList)
#> Warning in modelFixedEff(modList): Between imputation variance is very
#> small, are imputation sets too similar?
#> [1] "Linear mixed model fit by REML"
#> Model family: 
#> lmer(formula = mathach ~ minority + female + ses + meanses + 
#>     (1 + ses | schid), data = d)
#> 
#> Fixed Effects:
#>             estimate std.error statistic           df
#> (Intercept)   13.965     0.174    80.476 2.178458e+09
#> female        -1.197     0.159    -7.525 4.567037e+05
#> meanses        3.219     0.361     8.910 1.078433e+05
#> minority      -2.502     0.201   -12.425 2.002493e+04
#> ses            1.931     0.121    15.922 2.337436e+05
#> 
#> Random Effects:
#> 
#> Error Term Standard Deviations by Level:
#> 
#> schid
#> (Intercept)         ses 
#>       1.525       0.671 
#> 
#> 
#> Error Term Correlations:
#> 
#> schid
#>             (Intercept) ses   
#> (Intercept)  1.000      -0.494
#> ses         -0.494       1.000
#> 
#> 
#> Residual Error = 5.985 
#> 
#> ---Groups
#> number of obs: 7185, groups: schid, 160
#> 
#> Model Fit Stats
#> AIC = 46369.8
#> Residual standard deviation = 5.985
fastdisp(modList)
#> Warning in modelFixedEff(x): Between imputation variance is very small, are
#> imputation sets too similar?
#> lmer(formula = mathach ~ minority + female + ses + meanses + 
#>     (1 + ses | schid), data = d)
#>             estimate std.error
#> (Intercept)    13.96      0.17
#> female         -1.20      0.16
#> meanses         3.22      0.36
#> minority       -2.50      0.20
#> ses             1.93      0.12
#> 
#> Error terms:
#>  Groups   Name        Std.Dev. Corr  
#>  schid    (Intercept) 1.53           
#>           ses         0.67     -0.45 
#>  Residual             5.99           
#> ---
#> number of obs: 7185, groups: schid, 160
#> AIC = 46369.8---

The standard errors reported for the model list include a correction, Rubin’s correction (see documentation), which adjusts for the within and between imputation set variance as well.

Specific Model Information Summaries

modelRandEffStats(modList)
#>                        term    group   estimate   std.error
#> 1 cor_(Intercept).ses.schid    schid -0.4937870 0.040228230
#> 2      sd_(Intercept).schid    schid  1.5252886 0.006762784
#> 3   sd_Observation.Residual Residual  5.9850808 0.004875255
#> 4              sd_ses.schid    schid  0.6712887 0.026988611
modelFixedEff(modList)
#> Warning in modelFixedEff(modList): Between imputation variance is very
#> small, are imputation sets too similar?
#>          term  estimate std.error  statistic           df
#> 1 (Intercept) 13.964580 0.1735254  80.475705 2.178458e+09
#> 2      female -1.197021 0.1590628  -7.525465 4.567037e+05
#> 3     meanses  3.218819 0.3612399   8.910476 1.078433e+05
#> 4    minority -2.501642 0.2013395 -12.424990 2.002493e+04
#> 5         ses  1.930981 0.1212754  15.922281 2.337436e+05
VarCorr(modList)
#> $stddev
#> $stddev$schid
#> (Intercept)         ses 
#>   1.5252886   0.6712887 
#> 
#> 
#> $correlation
#> $correlation$schid
#>             (Intercept)       ses
#> (Intercept)    1.000000 -0.493787
#> ses           -0.493787  1.000000

Diagnostics of List Components

Let’s apply this to our model list.

Model List Generics

ranef(modList)
#> $schid
#>        (Intercept)           ses
#> 1224 -1.399143e-01  0.0870214876
#> 1288 -2.029507e-02  0.0209930730
#> 1296 -1.387502e-01 -0.0089081897
#> 1308  9.060509e-02 -0.0457447758
#> 1317  6.838016e-02 -0.0366742092
#> 1358 -2.818154e-01  0.0923422578
#> 1374 -3.777665e-01  0.1339845395
#> 1433  2.725176e-01 -0.0060950439
#> 1436  2.525151e-01 -0.0222370416
#> 1461 -8.627395e-02  0.1483725160
#> 1462  3.233442e-01 -0.1333918276
#> 1477  4.567923e-02 -0.0432910687
#> 1499 -3.181307e-01  0.0955674324
#> 1637 -1.324880e-01  0.0331225424
#> 1906  4.681832e-02 -0.0137194805
#> 1909 -5.288305e-02  0.0323517862
#> 1942  1.966316e-01 -0.0404801434
#> 1946 -3.218928e-02  0.0721722386
#> 2030 -4.210847e-01  0.0250199864
#> 2208 -6.487192e-03 -0.0113346774
#> 2277  2.602357e-01 -0.2385197863
#> 2305  5.242570e-01 -0.2220979374
#> 2336  1.502060e-01 -0.0252668173
#> 2458  2.619321e-01 -0.0380037269
#> 2467 -2.416925e-01  0.0566945925
#> 2526  4.506364e-01 -0.1258336341
#> 2626  5.163045e-02  0.0327197223
#> 2629  3.403597e-01 -0.1159710875
#> 2639  6.473044e-02 -0.0779492331
#> 2651 -4.029183e-01  0.1139582321
#> 2655  6.596490e-01 -0.0621961389
#> 2658 -2.426720e-01  0.0449809087
#> 2755  1.460620e-01 -0.0819997261
#> 2768 -2.479582e-01  0.0848581743
#> 2771  4.371219e-02  0.0430727123
#> 2818 -4.491828e-03  0.0070114057
#> 2917  1.281104e-01 -0.0882143551
#> 2990  4.588319e-01 -0.0710915178
#> 2995 -2.574655e-01 -0.0069496065
#> 3013 -9.979640e-02  0.0495740012
#> 3020  8.354564e-02 -0.0436842394
#> 3039  2.283791e-01 -0.0096086898
#> 3088 -5.936495e-02 -0.0197989752
#> 3152 -3.574201e-02  0.0510562127
#> 3332 -2.514696e-01  0.0054341215
#> 3351 -3.753715e-01 -0.0329565944
#> 3377  1.119774e-01 -0.1241045407
#> 3427  8.509779e-01 -0.2159736932
#> 3498  1.405609e-02 -0.0601425130
#> 3499 -1.526413e-01 -0.0273713481
#> 3533 -1.978000e-01 -0.0130213455
#> 3610  3.139802e-01 -0.0015413182
#> 3657 -5.009415e-02  0.0910862871
#> 3688  8.387615e-03 -0.0300259707
#> 3705 -4.243127e-01  0.0006643509
#> 3716  7.620130e-02  0.0997516459
#> 3838  4.796080e-01 -0.1452775785
#> 3881 -2.859155e-01  0.0525682107
#> 3967 -5.528114e-02  0.0258997861
#> 3992  8.982978e-02 -0.0669851638
#> 3999 -7.807423e-02  0.0796825045
#> 4042 -1.629962e-01 -0.0013027027
#> 4173 -9.154334e-02  0.0481256404
#> 4223  2.919159e-01 -0.0600704384
#> 4253 -5.245953e-02 -0.1001175394
#> 4292  4.824264e-01 -0.1522695765
#> 4325  4.322369e-02  0.0219917820
#> 4350 -3.022826e-01  0.1152436365
#> 4383 -2.289804e-01  0.0816705223
#> 4410 -8.603032e-02  0.0456840195
#> 4420  1.928974e-01  0.0036406506
#> 4458 -4.177671e-02 -0.0242913321
#> 4511  2.382551e-01 -0.1070728572
#> 4523 -2.528129e-01  0.0607102350
#> 4530  5.363988e-02 -0.0206005365
#> 4642  1.198211e-01  0.0235413874
#> 4868 -2.452905e-01 -0.0323862227
#> 4931 -2.106879e-01  0.0296440221
#> 5192 -2.470670e-01  0.0316924716
#> 5404 -2.158925e-01  0.0305173800
#> 5619 -9.054503e-02  0.0949872291
#> 5640  8.676281e-02  0.0449915016
#> 5650  4.793208e-01 -0.1094774343
#> 5667 -2.832204e-01  0.0809240482
#> 5720  1.187385e-01 -0.0044879297
#> 5761  1.462343e-01  0.0108107176
#> 5762 -9.120342e-02 -0.0121329368
#> 5783 -6.455325e-02  0.0117745684
#> 5815 -1.909420e-01  0.0520751759
#> 5819 -3.427290e-01  0.0567897107
#> 5838 -4.330172e-02  0.0046177020
#> 5937  7.015990e-02 -0.0396161585
#> 6074  3.803140e-01 -0.0948826088
#> 6089  2.679939e-01 -0.0767274494
#> 6144 -2.800829e-01  0.0956555049
#> 6170  3.302820e-01  0.0011430677
#> 6291  2.019763e-01 -0.0138928506
#> 6366  1.969135e-01 -0.0448172553
#> 6397  1.875132e-01 -0.0495921582
#> 6415 -6.722504e-02  0.0665583112
#> 6443 -9.745699e-02 -0.0649203444
#> 6464 -4.715221e-02 -0.0137902909
#> 6469  2.976060e-01 -0.0506326255
#> 6484  9.621267e-02 -0.0539022742
#> 6578  3.299875e-01 -0.0731955878
#> 6600 -2.046091e-01  0.1556103932
#> 6808 -3.354575e-01  0.0366785359
#> 6816  2.075283e-01 -0.0841527359
#> 6897  2.875746e-02  0.0586708937
#> 6990 -3.389251e-01 -0.0032399959
#> 7011  5.528705e-02  0.0536884735
#> 7101 -1.317326e-01 -0.0055698337
#> 7172 -2.069157e-01  0.0215594869
#> 7232 -7.958129e-05  0.0700562217
#> 7276 -9.220686e-02  0.0705751886
#> 7332  4.078184e-02  0.0078698866
#> 7341 -2.906345e-01  0.0166689706
#> 7342  7.117216e-02 -0.0372525742
#> 7345 -2.484831e-01  0.1256428877
#> 7364  2.631941e-01 -0.0759474691
#> 7635  6.735063e-02 -0.0217257133
#> 7688  6.103311e-01 -0.1487600362
#> 7697  1.274315e-01  0.0188445088
#> 7734  5.924014e-02  0.0759096078
#> 7890 -3.085090e-01  0.0055988603
#> 7919 -1.316053e-01  0.1101965917
#> 8009 -2.093561e-01 -0.0450702852
#> 8150  1.005283e-01 -0.0757109662
#> 8165  1.738215e-01 -0.0279922313
#> 8175  1.141732e-01 -0.0476553823
#> 8188 -1.195179e-01  0.0719283103
#> 8193  5.708359e-01 -0.1110466009
#> 8202 -2.154225e-01  0.0568820672
#> 8357  1.779093e-01 -0.0170305658
#> 8367 -7.875384e-01  0.1151473352
#> 8477  7.698661e-02  0.0415470127
#> 8531 -2.223920e-01  0.0896911258
#> 8627 -4.182078e-01  0.0659411775
#> 8628  6.084020e-01 -0.1199329555
#> 8707 -9.115985e-02  0.0405670753
#> 8775 -2.215740e-01  0.0101264628
#> 8800  1.251331e-02  0.0069438812
#> 8854 -6.080997e-01  0.1551850607
#> 8857  2.188157e-01 -0.0623912476
#> 8874  1.881203e-01  0.0103110946
#> 8946 -1.465414e-01  0.0047247713
#> 8983 -1.357560e-01 -0.0179624690
#> 9021 -2.605620e-01  0.0254248923
#> 9104 -1.435123e-02 -0.0125625455
#> 9158 -2.798127e-01  0.1137418200
#> 9198  3.129854e-01 -0.0053483729
#> 9225  2.369962e-02  0.0687819911
#> 9292  2.473993e-01 -0.0701204152
#> 9340 -8.268410e-03  0.0286591632
#> 9347 -3.581963e-02  0.0499939562
#> 9359 -2.904600e-02 -0.0609316001
#> 9397 -4.930650e-01  0.1161492623
#> 9508  9.560210e-02  0.0150385488
#> 9550 -2.461625e-01  0.0770399294
#> 9586 -1.196654e-01 -0.0271003565

Cautions and Notes

Often it is desirable to include aggregate values in the level two or level three part of the model such as level 1 SES and level 2 mean SES for the group. In cases where there is missingness in either the level 1 SES values, or in the level 2 mean SES values, caution and careful thought need to be given to how to proceed with the imputation routine.

merTools/inst/doc/marginal_effects.R0000644000176200001440000000117213466137511017224 0ustar liggesusers## ----setup, echo = FALSE, message=FALSE, warning=FALSE, results='hide', cache=FALSE---- library(ggplot2); library(knitr); library(merTools) knitr::opts_chunk$set( cache=FALSE, comment="#>", collapse=TRUE, echo = TRUE ) ## ------------------------------------------------------------------------ fm1 <- lmer(Reaction ~ Days + (Days | Subject), sleepstudy) mfx <- REmargins(merMod = fm1, newdata = sleepstudy[1:10,]) head(mfx) ## ------------------------------------------------------------------------ ggplot(mfx) + aes(x = breaks, y = fit_Subject, group = case) + geom_line() + facet_wrap(~term) merTools/inst/doc/merToolsIntro.html0000644000176200001440000044614313466137707017340 0ustar liggesusers An Introduction to merTools

An Introduction to merTools

Jared Knowles and Carl Frederick

2019-05-12

Introduction

Working with generalized linear mixed models (GLMM) and linear mixed models (LMM) has become increasingly easy with the advances in the lme4 package recently. As we have found ourselves using these models more and more within our work, we, the authors, have developed a set of tools for simplifying and speeding up common tasks for interacting with merMod objects from lme4. This package provides those tools.

Illustrating Model Effects

As the complexity of the model fit grows, it becomes harder and harder to interpret the substantive effect of parameters in the model.

Let’s start with a medium-sized example model using the InstEval data provided by the lme4 package. These data represent university lecture evaluations at ETH Zurich made by students. In this data, s is an individual student, d is an individual lecturer, studage is the semester the student is enrolled, lectage is how many semesters back the lecture with the rating took place, dept is the department of the lecture, and y is an integer 1:5 representing the ratings of the lecture from “poor” to “very good”:

library(lme4)
head(InstEval)
#>   s    d studage lectage service dept y
#> 1 1 1002       2       2       0    2 5
#> 2 1 1050       2       1       1    6 2
#> 3 1 1582       2       2       0    2 5
#> 4 1 2050       2       2       1    3 3
#> 5 2  115       2       1       0    5 2
#> 6 2  756       2       1       0    5 4
str(InstEval)
#> 'data.frame':    73421 obs. of  7 variables:
#>  $ s      : Factor w/ 2972 levels "1","2","3","4",..: 1 1 1 1 2 2 3 3 3 3 ...
#>  $ d      : Factor w/ 1128 levels "1","6","7","8",..: 525 560 832 1068 62 406 3 6 19 75 ...
#>  $ studage: Ord.factor w/ 4 levels "2"<"4"<"6"<"8": 1 1 1 1 1 1 1 1 1 1 ...
#>  $ lectage: Ord.factor w/ 6 levels "1"<"2"<"3"<"4"<..: 2 1 2 2 1 1 1 1 1 1 ...
#>  $ service: Factor w/ 2 levels "0","1": 1 2 1 2 1 1 2 1 1 1 ...
#>  $ dept   : Factor w/ 14 levels "15","5","10",..: 14 5 14 12 2 2 13 3 3 3 ...
#>  $ y      : int  5 2 5 3 2 4 4 5 5 4 ...

Starting with a simple model:

m1 <- lmer(y ~ service + lectage + studage + (1|d) + (1|s), data=InstEval)

After fitting the model we can make use of the first function provided by merTools, fastdisp which modifies the function arm:::display to more quickly display a summary of the model without calculating the model sigma:

library(merTools)
fastdisp(m1)
#> lmer(formula = y ~ service + lectage + studage + (1 | d) + (1 | 
#>     s), data = InstEval)
#>             coef.est coef.se
#> (Intercept)  3.22     0.02  
#> service1    -0.07     0.01  
#> lectage.L   -0.19     0.02  
#> lectage.Q    0.02     0.01  
#> lectage.C   -0.02     0.01  
#> lectage^4   -0.02     0.01  
#> lectage^5   -0.04     0.02  
#> studage.L    0.10     0.02  
#> studage.Q    0.01     0.02  
#> studage.C    0.02     0.02  
#> 
#> Error terms:
#>  Groups   Name        Std.Dev.
#>  s        (Intercept) 0.33    
#>  d        (Intercept) 0.52    
#>  Residual             1.18    
#> ---
#> number of obs: 73421, groups: s, 2972; d, 1128
#> AIC = 237655

We see some interesting effects. First, our decision to include student and lecturer effects seems justified as there is substantial variance within these groups. Second, there do appear to be some effects by age and for lectures given as a service by an outside lecturer. Let’s look at these in more detail. One way to do this would be to plot the coefficients together in a line to see which deviate from 0 and in what direction. To get a confidence interval for our fixed effect coefficients we have a number of options that represent a tradeoff between coverage and computation time – see confint.merMod for details.

An alternative is to simulate values of the fixed effects from the posterior using the function arm::sim. Our next tool, FEsim, is a convenience wrapper to do this and provide an informative data frame of the results.

feEx <- FEsim(m1, 1000)
cbind(feEx[,1] , round(feEx[, 2:4], 3))
#>      feEx[, 1]   mean median    sd
#> 1  (Intercept)  3.224  3.223 0.020
#> 2     service1 -0.071 -0.071 0.013
#> 3    lectage.L -0.187 -0.186 0.016
#> 4    lectage.Q  0.024  0.024 0.012
#> 5    lectage.C -0.024 -0.024 0.013
#> 6    lectage^4 -0.020 -0.021 0.014
#> 7    lectage^5 -0.039 -0.039 0.015
#> 8    studage.L  0.096  0.096 0.019
#> 9    studage.Q  0.006  0.006 0.016
#> 10   studage.C  0.017  0.017 0.016

We can present these results graphically, using ggplot2:

library(ggplot2)
ggplot(feEx[feEx$term!= "(Intercept)", ]) + 
  aes(x = term, ymin = median - 1.96 * sd, 
      ymax = median + 1.96 * sd, y = median) + 
  geom_pointrange() + 
  geom_hline(yintercept = 0, size = I(1.1), color = I("red")) + 
  coord_flip() + 
  theme_bw() + labs(title = "Coefficient Plot of InstEval Model", 
                    x = "Median Effect Estimate", y = "Evaluation Rating")

However, an easier option is:

plotFEsim(feEx) + 
  theme_bw() + labs(title = "Coefficient Plot of InstEval Model", 
                    x = "Median Effect Estimate", y = "Evaluation Rating")

Random Effects

Next, we might be interested in exploring the random effects. Again, we create a dataframe of the values of the simulation of these effects for the individual levels.

reEx <- REsim(m1)
head(reEx)
#>   groupFctr groupID        term        mean      median        sd
#> 1         s       1 (Intercept)  0.20126170  0.19775791 0.3009069
#> 2         s       2 (Intercept) -0.08231243 -0.05911867 0.3174702
#> 3         s       3 (Intercept)  0.28136487  0.27231003 0.3022773
#> 4         s       4 (Intercept)  0.22734481  0.21679242 0.2961077
#> 5         s       5 (Intercept)  0.06441259  0.06801963 0.3021177
#> 6         s       6 (Intercept)  0.11114881  0.11499146 0.2226471

The result is a dataframe with estimates of the values of each of the random effects provided by the arm::sim() function. groupID represents the identfiable level for the variable for one random effect, term represents whether the simulated values are for an intercept or which slope, and groupFctr identifies which of the (1|x) terms the values represent. To make unique identifiers for each term, we need to use both the groupID and the groupFctr term in case these two variables use overlapping label names for their groups. In this case:

table(reEx$term)
#> 
#> (Intercept) 
#>        4100
table(reEx$groupFctr)
#> 
#>    d    s 
#> 1128 2972

Most important is producing caterpillar or dotplots of these terms to explore their variation. This is easily accomplished with the dotplot function:

lattice::dotplot(ranef(m1, condVar=TRUE))

However, these graphics do not provide much control over the results. Instead, we can use the plotREsim function in merTools to gain more control over plotting of the random effect simulations.

p1 <- plotREsim(reEx)
p1

The result is a ggplot2 object which can be modified however the user sees fit. Here, we’ve established that most student and professor effects are indistinguishable from zero, but there do exist extreme outliers with both high and low averages that need to be accounted for.

Subtantive Effects

A logical next line of questioning is to see how much of the variation in a rating can be caused by changing the student rater and how much is due to the fixed effects we identified above. This is a very difficult problem to solve, but using simulation we can examine the model behavior under a range of scenarios to understand how the model is reflecting changes in the data. To do this, we use another set of functions available in merTools.

The simplest option is to pick an observation at random and then modify its values deliberately to see how the prediction changes in response. merTools makes this task very simple:

example1 <- draw(m1, type = 'random')
head(example1)
#>       y service lectage studage    d    s
#> 71529 2       0       3       6 1306 2903

The draw function takes a random observation from the data in the model and extracts it as a dataframe. We can now do a number of operations to this observation:

# predict it
predict(m1, newdata = example1)
#>    71529 
#> 2.815463
# change values
example1$service <- "1"
predict(m1, newdata = example1)
#>    71529 
#> 2.744619

More interesting, let’s programatically modify this observation to see how the predicted value changes if we hold everything but one variable constant.

example2 <- wiggle(example1, varlist = "lectage", 
          valueslist = list(c("1", "2", "3", "4", "5", "6")))

example2
#>        y service lectage studage    d    s
#> 71529  2       1       1       6 1306 2903
#> 715291 2       1       2       6 1306 2903
#> 715292 2       1       3       6 1306 2903
#> 715293 2       1       4       6 1306 2903
#> 715294 2       1       5       6 1306 2903
#> 715295 2       1       6       6 1306 2903

The function wiggle allows us to create a new dataframe with copies of the variable that modify just one value. Chaining together wiggle calls, we can see how the variable behaves under a number of different scenarios simultaneously.

example2$yhat <- predict(m1, newdata = example2)

ggplot(example2, aes(x = lectage, y = yhat)) + geom_line(aes(group = 1)) + 
  theme_bw() + ylim(c(1, 5)) +
  geom_hline(yintercept = mean(InstEval$y), linetype = 2) + 
  geom_hline(yintercept = mean(InstEval$y) + sd(InstEval$y), linetype = 3) + 
  geom_hline(yintercept = mean(InstEval$y) - sd(InstEval$y), linetype = 3)

The result allows us to graphically display the effect of each level of lectage on an observation that is otherwise identical. This is plotted here against a horizontal line representing the mean of the observed ratings, and two finer lines showing plus or minus one standard deviation of the mean.

This is nice, but selecting a random observation is not very satisfying as it may not be very meaningful. To address this, we can instead take the average observation:

example3 <- draw(m1, type = 'average')
example3
#>          y service lectage studage    d    s
#> 1 3.205745       0       1       6 1510 2237

Here, the average observation is identified based on either the modal observation for factors or on the mean for numeric variables. Then, the random effect terms are set to the level equivalent to the median effect – very close to 0.

example3 <- wiggle(example1, varlist = "service", 
          valueslist = list(c("0", "1")))
example3$yhat <- predict(m1, newdata = example3)

ggplot(example3, aes(x = service, y = yhat)) + geom_line(aes(group = 1)) + 
  theme_bw() + ylim(c(1, 5)) +
  geom_hline(yintercept = mean(InstEval$y), linetype = 2) + 
  geom_hline(yintercept = mean(InstEval$y) + sd(InstEval$y), linetype = 3) + 
  geom_hline(yintercept = mean(InstEval$y) - sd(InstEval$y), linetype = 3)

Here we can see that for the average observation, whether the lecture is outside of the home department has a very slight negative effect on the overall rating. Might the individual professor or student have more of an impact on the overall rating? To answer this question we need to wiggle the same observation across a wide range of student or lecturer effects.

How do we identify this range? merTools provides the REquantile function which helps to identify which levels of the grouping terms correspond to which quantile of the magnitude of the random effects:

REquantile(m1, quantile = 0.25, groupFctr = "s")
#> [1] "446"
REquantile(m1, quantile = 0.25, groupFctr = "d")
#> [1] "18"

Here we can see that group level 446 corresponds to the 25th percentile of the effect for the student groups, and level REquantile(m1, quantile = 0.25, groupFctr = "d") corresponds to the 25th percentile for the instructor group. Using this information we can reassign a specific observation to varying magnitudes of grouping term effects to see how much they might influence our final prediction.

example4 <- draw(m1, type = 'average')
example4 <- wiggle(example4, varlist = "s", 
                      list(REquantile(m1, quantile = seq(0.1, 0.9, .1), 
                                 groupFctr = "s")))

example4$yhat <- predict(m1, newdata = example4)

ggplot(example4, aes(x = reorder(s, -yhat), y = yhat)) + 
  geom_line(aes(group = 1)) + 
  theme_bw() + ylim(c(1, 5)) +
  geom_hline(yintercept = mean(InstEval$y), linetype = 2) + 
  geom_hline(yintercept = mean(InstEval$y) + sd(InstEval$y), linetype = 3) + 
  geom_hline(yintercept = mean(InstEval$y) - sd(InstEval$y), linetype = 3)

This figure is very interesting because it shows that moving across the range of student effects can have a larger impact on the score than the fixed effects we observed above. That is, getting a “generous” or a “stingy” rater can have a substantial impact on the final rating.

But, we can do even better. First, we can move beyond the average observation by taking advantage of the varList option to the function which allows us to specify a subset of the data to compute an average for.

subExample <- list(studage = "2", lectage = "4")
example5 <- draw(m1, type = 'average', varList = subExample)
example5
#>          y service lectage studage    d    s
#> 1 3.087193       0       4       2 1510 2237

Now we have the average observation with a student age of 2 and a lecture age of 4. We can then follow the same procedure as before to explore the effects on our subsamples. Before we do that, let’s fit a slightly more complex model that includes a random slope.

data(VerbAgg)
m2 <- glmer(r2 ~ Anger + Gender + btype + situ +
                (1|id) + (1 + Gender|item), family = binomial, 
              data = VerbAgg)
#> Warning in checkConv(attr(opt, "derivs"), opt$par, ctrl =
#> control$checkConv, : Model failed to converge with max|grad| = 0.0071146
#> (tol = 0.001, component 1)

example6 <- draw(m2, type = 'average', varList = list("id" = "149"))
example6$btype <- "scold"
example6$situ <- "self"


tempdf <- wiggle(example6, varlist = "Gender", list(c("M", "F")))
tempdf <- wiggle(tempdf, varlist = "item", 
                    list(unique(VerbAgg$item)))
tempdf$yhat <- predict(m2, newdata = tempdf, type = "response", 
                       allow.new.levels = TRUE)

ggplot(tempdf, aes(x = item, y = yhat, group = Gender)) + 
  geom_line(aes(color = Gender))+ 
  theme_bw() + ylim(c(0, 1)) + 
  theme(axis.text.x = element_text(angle = 20, hjust=1), 
        legend.position = "bottom") + labs(x = "Item", y = "Probability")

Here we’ve shown that the effect of both the intercept and the gender slope on item simultaneously affect our predicted value. This results in the two lines for predicted values across the items not being parallel. While we can see this by looking at the results of the summary of the model object, using fastdisp
in the merTools package for larger models, it is not intuitive what that effect looks like across different scenarios. merTools has given us the machinery to investigate this.

Uncertainty

The above examples make use of simulation to show the model behavior after changing some values in a dataset. However, until now, we’ve focused on using point estimates to represent these changes. The use of predicted point estimates without incorporating any uncertainty can lead to overconfidence in the precision of the model.

In the predictInterval function, discussed in more detail in another package vignette, we provide a way to incorporate three out of the four types of uncertainty inherent in a model. These are:

  1. Overall model uncertainty
  2. Uncertainty in fixed effect values
  3. Uncertainty in random effect values
  4. Uncertainty in the distribution of the random effects

1-3 are incorporated in the results of predictInterval, while capturing 4 would require making use of the bootMer function – options discussed in greater detail elsewhere. The main advantage of predictInterval is that it is fast. By leveraging the power of the arm::sim() function, we are able to generate prediction intervals for individual observations from very large models very quickly. And, it works a lot like predict:

exampPreds <- predictInterval(m2, newdata = tempdf, 
                              type = "probability", level = 0.8)

tempdf <- cbind(tempdf, exampPreds)

ggplot(tempdf, aes(x = item, y = fit, ymin = lwr, ymax = upr, 
                   group = Gender)) + 
  geom_ribbon(aes(fill = Gender), alpha = I(0.2), color = I("black"))+ 
  theme_bw() + ylim(c(0, 1)) + 
  theme(axis.text.x = element_text(angle = 20), 
        legend.position = "bottom")+ labs(x = "Item", y = "Probability")

Here we can see there is barely any gender difference in terms of area of potential prediction intervals. However, by default, this approach includes the residual variance of the model. If we instead focus just on the uncertainty of the random and fixed effects, we get:

exampPreds <- predictInterval(m2, newdata = tempdf, 
                              type = "probability", 
                              include.resid.var = FALSE, level = 0.8)
tempdf <- cbind(tempdf[, 1:8], exampPreds)

ggplot(tempdf, aes(x = item, y = fit, ymin = lwr, ymax = upr, 
                   group = Gender)) + 
  geom_ribbon(aes(fill = Gender), alpha = I(0.2), color = I("black"))+ 
  geom_line(aes(color = Gender)) + 
  theme_bw() + ylim(c(0, 1)) + 
  theme(axis.text.x = element_text(angle = 20), 
        legend.position = "bottom") + labs(x = "Item", y = "Probability")

Here, more difference emerges, but we see that the differences are not very precise.

merTools/inst/doc/Using_predictInterval.Rmd0000644000176200001440000005126213402510755020560 0ustar liggesusers--- title: "Prediction Intervals from merMod Objects" author: "Jared Knowles and Carl Frederick" date: "`r Sys.Date()`" output: rmarkdown::html_vignette vignette: > %\VignetteEngine{knitr::rmarkdown} %\VignetteIndexEntry{Prediction Intervals from merMod Objects} %\VignetteEncoding{UTF-8} --- ```{r setup, echo = FALSE, message=FALSE, warning=FALSE, results='hide', cache=FALSE} library(ggplot2); library(knitr); library(merTools) knitr::opts_chunk$set( cache=FALSE, comment="#>", collapse=TRUE, echo = TRUE ) rstanarm_eval <- "rstanarm" %in% rownames(installed.packages()) ``` ## Introduction Fitting (generalized) linear mixed models, (G)LMM, to very large data sets is becoming increasingly easy, but understanding and communicating the uncertainty inherent in those models is not. As the documentation for `lme4::predict.merMod()` notes: > There is no option for computing standard errors of predictions because it is > difficult to define an efficient method that incorporates uncertainty in the > variance parameters; we recommend `lme4::bootMer()` for this task. We agree that, short of a fully Bayesian analysis, bootstrapping is the gold-standard for deriving a prediction interval predictions from a (G)LMM, but the time required to obtain even a respectable number of replications from `bootMer()` quickly becomes prohibitive when the initial model fit is on the order of hours instead of seconds. The only other alternative we have identified for these situations is to use the `arm::sim()` function to simulate values. Unfortunately, this only takes variation of the fixed coefficients and residuals into account, and assumes the conditional modes of the random effects are fixed. We developed the `predictInterval()` function to incorporate the variation in the conditional modes of the random effects (CMRE, a.k.a. BLUPs in the LMM case) into calculating prediction intervals. Ignoring the variance in the CMRE results in overly confident estimates of predicted values and in cases where the precision of the grouping term varies across levels of grouping terms, creates the illusion of difference where none may exist. The importance of accounting for this variance comes into play sharply when comparing the predictions of different models across observations. We take the warning from `lme4::predict.merMod()` seriously, but view this method as a decent first approximation the full bootstrap analysis for (G)LMMs fit to very large data sets. ## Conceptual description In order to generate a proper prediction interval, a prediction must account for three sources of uncertainty in mixed models: 1. the residual (observation-level) variance, 2. the uncertainty in the fixed coefficients, and 3. the uncertainty in the variance parameters for the grouping factors. A fourth, uncertainty about the data, is beyond the scope of any prediction method. As we mentioned above, the `arm:sim()` function incorporates the first two sources of variation but not the third , while bootstrapping using `lme4::bootMer()` does incorporate all three sources of uncertainty because it re-estimates the model using random samples of the data. When inference about the values of the CMREs is of interest, it would be nice to incorporate some degree of uncertainty in those estimates when comparing observations across groups. `predictInterval()` does this by drawing values of the CMREs from the conditional variance-covariance matrix of the random affects accessible from `lme4::ranef(model, condVar=TRUE)`. Thus, `predictInterval()` incorporates all of the uncertainty from sources one and two, and part of the variance from source 3, but the variance parameters themselves are treated as fixed. To do this, `predictInterval()` takes an estimated model of class `merMod` and, like `predict()`, a data.frame upon which to make those predictions and: 1. extracts the fixed and random coefficients 2. takes `n` draws from the multivariate normal distribution of the fixed and random coefficients (separately) 3. calculates the linear predictor for each row in `newdata` based on these draws, and 4. optionally incorporates the residual variation (per the `arm::sim()` function), and, 5. returns newdata with the lower and upper limits of the prediction interval and the mean or median of the simulated predictions Currently, the supported model types are linear mixed models and mixed logistic regression models. The prediction data set *can* include levels that are not in the estimation model frame. The prediction intervals for such observations only incorporate uncertainty from fixed coefficient estimates and the residual level of variation. ## Comparison to existing methods What do the differences between `predictInterval()` and the other methods for constructing prediction intervals mean in practice? We would expect to see `predictInterval()` to produce prediction intervals that are wider than all methods except for the `bootMer()` method. We would also hope that the prediction point estimate from other methods falls within the prediction interval produced by `predictInterval()`. Ideally, the predicted point estimate produced by `predictInterval()` would fall close to that produced by `bootMer()`. This section compares the results of `predictInterval()` with those obtained using `arm::sim()` and `lme4::bootMer()` using the sleepstudy data from `lme4`. These data contain reaction time observations for 10 days on 18 subjects. The data are sorted such that the first 10 observations are days one through ten for subject 1, the next 10 are days one through ten for subject 2 and so on. The example model that we are estimating below estimates random intercepts and a random slope for the number of days. ###Step 1: Estimating the model and using `predictInterval()` First, we will load the required packages and data and estimate the model: ```{r Prep, message=FALSE, warning=FALSE} set.seed(271828) data(sleepstudy) fm1 <- lmer(Reaction ~ Days + (Days|Subject), data=sleepstudy) display(fm1) ``` Then, calculate prediction intervals using `predictInterval()`. The `predictInterval` function has a number of user configurable options. In this example, we use the original data `sleepstudy` as the newdata. We pass the function the `fm1` model we fit above. We also choose a 95% interval with `level = 0.95`, though we could choose a less conservative prediction interval. We make 1,000 simulations for each observation `n.sims = 1000`. We set the point estimate to be the median of the simulated values, instead of the mean. We ask for the linear predictor back, if we fit a logistic regression, we could have asked instead for our predictions on the probability scale instead. Finally, we indicate that we want the predictions to incorporate the residual variance from the model -- an option only available for `lmerMod` objects. ```{r predInt} PI.time <- system.time( PI <- predictInterval(merMod = fm1, newdata = sleepstudy, level = 0.95, n.sims = 1000, stat = "median", type="linear.prediction", include.resid.var = TRUE) ) ``` Here is the first few rows of the object `PI`: ```{r Inspect predInt, results="asis", echo=FALSE} kable(head(PI)) ``` The three columns are the median (`fit`) and limits of the 95% prediction interval (`upr` and `lwr`) because we set `level=0.95`. The following figure displays the output graphically for the first 30 observations. ```{r Inspect predInt 2, fig.width=7, fig.align="center"} library(ggplot2); ggplot(aes(x=1:30, y=fit, ymin=lwr, ymax=upr), data=PI[1:30,]) + geom_point() + geom_linerange() + labs(x="Index", y="Prediction w/ 95% PI") + theme_bw() ``` #### Step 1a: Adjusting for correlation between fixed and random effects The prediction intervals above do not correct for correlations between fixed and random effects. This tends to lead to predictive intervals that are too conservative, especially for existing groups when there is a lot of data on relatively few groups. In that case, a significant portion of the uncertainty in the prediction can be due to variance in the fixed intercept which is anti-correlated with variance in the random intercept effects. For instance, it does not actually matter if the fixed intercept is 5 and the random intercept effects are -2, 1, and 1, versus a fixed intercept of 6 and random intercept effects of -3, 0, and 0. (The latter situation will never be the MLE, but it can occur in this package's simulations.) To show this issue, we'll use the sleep study model, predicting the reaction times of subjects after experiencing sleep deprivation: ```{r prepSleep, message=FALSE, warning=FALSE} fm1 <- lmer(Reaction ~ Days + (Days|Subject), data=sleepstudy) display(fm1) ``` Let's use the model to give an interval for the true average body fat of a large group of students like the first one in the study — a 196cm female baseball player: ```{r showSleep, message=FALSE, warning=FALSE} sleepstudy[1,] predictInterval(fm1, sleepstudy[1,], include.resid.var=0) #predict the average body fat for a group of 196cm female baseball players ``` There are two ways to get predictInterval to create less-conservative intervals to deal with this. The first is just to tell it to consider certain fixed effects as fully-known (that is, with an effectively 0 variance.) This is done using the `ignore.fixed.effects` argument. ```{r ShowFat2, message=FALSE, warning=FALSE} predictInterval(fm1, sleepstudy[1,], include.resid.var=0, ignore.fixed.terms = 1) # predict the average reaction time for a subject at day 0, taking the global intercept # (mean reaction time) as fully known predictInterval(fm1, sleepstudy[1,], include.resid.var=0, ignore.fixed.terms = "(Intercept)") #Same as above predictInterval(fm1, sleepstudy[1,], include.resid.var=0, ignore.fixed.terms = 1:2) # as above, taking the first two fixed effects (intercept and days effect) as fully known ``` The second way is to use an ad-hoc variance adjustment, with the `fix.intercept.variance` argument. This takes the model's intercept variance $\hat\sigma^2_\mu$ and adjusts it to: $$\hat\sigma\prime^2_\mu = \hat\sigma^2_\mu-\Sigma_{levels}\frac{1}{\Sigma_{groups(level)}1/(\hat\sigma^2_{level}+sigma^2_{group})}$$ In other words, it assumes the given intercept variance incorporates spurious variance for each level, where each of the spurious variance terms has a precision equal to the of the precisions due to the individual groups at that level. ```{r ShowFat3, message=FALSE, warning=FALSE} predictInterval(fm1, sleepstudy[1,], include.resid.var=0, fix.intercept.variance = TRUE) # predict the average reaction time for a subject at day 0,, using an ad-hoc # correction for the covariance of the intercept with the random intercept effects. ``` A few notes about these two arguments: * `fix.intercept.variance=T` is redundant with `ignore.fixed.effects=1`, but not vice versa. * These corrections should NOT be used when predicting outcomes for groups not present in the original data. ### Step 2: Comparison with `arm::sim()` How does the output above compare to what we could get from `arm::sim()`? ```{r arm.Sim, fig.width=7, fig.height=4, fig.align="center"} PI.arm.time <- system.time( PI.arm.sims <- arm::sim(fm1, 1000) ) PI.arm <- data.frame( fit=apply(fitted(PI.arm.sims, fm1), 1, function(x) quantile(x, 0.500)), upr=apply(fitted(PI.arm.sims, fm1), 1, function(x) quantile(x, 0.975)), lwr=apply(fitted(PI.arm.sims, fm1), 1, function(x) quantile(x, 0.025)) ) comp.data <- rbind(data.frame(Predict.Method="predictInterval()", x=(1:nrow(PI))-0.1, PI), data.frame(Predict.Method="arm::sim()", x=(1:nrow(PI.arm))+0.1, PI.arm)) ggplot(aes(x=x, y=fit, ymin=lwr, ymax=upr, color=Predict.Method), data=comp.data[c(1:30,181:210),]) + geom_point() + geom_linerange() + labs(x="Index", y="Prediction w/ 95% PI") + theme_bw() + theme(legend.position="bottom") + scale_color_brewer(type = "qual", palette = 2) ``` The prediction intervals from `arm:sim()` are much smaller and the random slope for days vary more than they do for `predictInterval`. Both results are as expected, given the small number of subjects and observations per subject in these data. Because `predictInterval()` is incorporating uncertainty in the CMFEs (but not the variance parameters of the random coefficients themselves), the Days slopes are closer to the overall or pooled regression slope. ###Step 3: Comparison with `lme4::bootMer()` As quoted above, the developers of lme4 suggest that users interested in uncertainty estimates around their predictions use `lme4::bootmer()` to calculate them. The documentation for `lme4::bootMer()` goes on to describe three implemented flavors of bootstrapped estimates: 1. parametrically resampling both the *"spherical"* random effects *u* and the i.i.d. errors $\epsilon$ 2. treating the random effects as fixed and parametrically resampling the i.i.d. errors 3. treating the random effects as fixed and semi-parametrically resampling the i.i.d. errors from the distribution of residuals. We will compare the results from `predictInterval()` with each method, in turn. #### Step 3a: `lme4::bootMer()` method 1 ```{r bootMer.1, fig.width=7, fig.height=4, fig.align="center"} ##Functions for bootMer() and objects ####Return predicted values from bootstrap mySumm <- function(.) { predict(., newdata=sleepstudy, re.form=NULL) } ####Collapse bootstrap into median, 95% PI sumBoot <- function(merBoot) { return( data.frame(fit = apply(merBoot$t, 2, function(x) as.numeric(quantile(x, probs=.5, na.rm=TRUE))), lwr = apply(merBoot$t, 2, function(x) as.numeric(quantile(x, probs=.025, na.rm=TRUE))), upr = apply(merBoot$t, 2, function(x) as.numeric(quantile(x, probs=.975, na.rm=TRUE))) ) ) } ##lme4::bootMer() method 1 PI.boot1.time <- system.time( boot1 <- lme4::bootMer(fm1, mySumm, nsim=250, use.u=FALSE, type="parametric") ) PI.boot1 <- sumBoot(boot1) comp.data <- rbind(data.frame(Predict.Method="predictInterval()", x=(1:nrow(PI))-0.1, PI), data.frame(Predict.Method="lme4::bootMer() - Method 1", x=(1:nrow(PI.boot1))+0.1, PI.boot1)) ggplot(aes(x=x, y=fit, ymin=lwr, ymax=upr, color=Predict.Method), data=comp.data[c(1:30,181:210),]) + geom_point() + geom_linerange() + labs(x="Index", y="Prediction w/ 95% PI") + theme_bw() + theme(legend.position="bottom") + scale_color_brewer(type = "qual", palette = 2) ``` The intervals produced by `predictInterval`, represented in green, cover the point estimates produced by `bootMer` in every case for these 30 observations. Additionally, in almost every case, the `predictInterval` encompasses the entire interval presented by `bootMer`. Here, the estimates produced by `bootMer` are re-estimating the group terms, but by refitting the model, they are also taking into account the conditional variance of these terms, or `theta`, and provide tighter prediction intervals than the `predictInterval` method. ####Step 3b: `lme4::bootMer()` method 2 ```{r bootMer.2, fig.width=7, fig.height=4, fig.align="center"} ##lme4::bootMer() method 2 PI.boot2.time <- system.time( boot2 <- lme4::bootMer(fm1, mySumm, nsim=250, use.u=TRUE, type="parametric") ) PI.boot2 <- sumBoot(boot2) comp.data <- rbind(data.frame(Predict.Method="predictInterval()", x=(1:nrow(PI))-0.1, PI), data.frame(Predict.Method="lme4::bootMer() - Method 2", x=(1:nrow(PI.boot2))+0.1, PI.boot2)) ggplot(aes(x=x, y=fit, ymin=lwr, ymax=upr, color=Predict.Method), data=comp.data[c(1:30,181:210),]) + geom_point() + geom_linerange() + labs(x="Index", y="Prediction w/ 95% PI") + theme_bw() + theme(legend.position="bottom") + scale_color_brewer(type = "qual", palette = 2) ``` Here, the results for `predictInterval` in green again encompass the results from `bootMer`, but are much wider. The `bootMer` estimates are ignoring the variance in the group effects, and as such, are only incorporating the residual variance and the variance in the fixed effects -- similar to the `arm::sim()` function. #### Step 3c: `lme4::bootMer()` method 3 ```{r bootMer.3, fig.width=7, fig.height=4, fig.align="center"} ##lme4::bootMer() method 3 PI.boot3.time <- system.time( boot3 <- lme4::bootMer(fm1, mySumm, nsim=250, use.u=TRUE, type="semiparametric") ) PI.boot3 <- sumBoot(boot3) comp.data <- rbind(data.frame(Predict.Method="predictInterval()", x=(1:nrow(PI))-0.1, PI), data.frame(Predict.Method="lme4::bootMer() - Method 3", x=(1:nrow(PI.boot3))+0.1, PI.boot3)) ggplot(aes(x=x, y=fit, ymin=lwr, ymax=upr, color=Predict.Method), data=comp.data[c(1:30,181:210),]) + geom_point() + geom_linerange() + labs(x="Index", y="Prediction w/ 95% PI") + theme_bw() + theme(legend.position="bottom") + scale_color_brewer(type = "qual", palette = 2) ``` These results are virtually identical to those above. #### Step 3c: Comparison to rstanarm ```{r echo=FALSE, message=FALSE, eval = rstanarm_eval} library(rstanarm) central_intervals <- function(x, prob) { if (!identical(length(prob), 1L) || prob <= 0 || prob >= 1) stop("'prob' should be a single number greater than 0 and less than 1.", call. = FALSE) alpha <- (1 - prob) / 2 probs <- c(alpha, 1 - alpha) labs <- paste0(100 * probs, "%") out <- t(apply(x, 2L, quantile, probs = probs)) structure(out, dimnames = list(colnames(x), labs)) } ``` ```{r, message=FALSE, fig.width=7, fig.height=4, fig.align="center", eval = rstanarm_eval} PI.time.stan <- system.time({ fm_stan <- stan_lmer(Reaction ~ Days + (Days|Subject), data = sleepstudy, verbose = FALSE, open_progress = FALSE, refresh = -1, show_messages=FALSE, chains = 1) zed <- posterior_predict(fm_stan) PI.stan <- cbind(apply(zed, 2, median), central_intervals(zed, prob=0.95)) }) print(fm_stan) PI.stan <- as.data.frame(PI.stan) names(PI.stan) <- c("fit", "lwr", "upr") PI.stan <- PI.stan[, c("fit", "upr", "lwr")] comp.data <- rbind(data.frame(Predict.Method="predictInterval()", x=(1:nrow(PI))-0.1, PI), data.frame(Predict.Method="rstanArm", x=(1:nrow(PI.stan))+0.1, PI.stan)) ggplot(aes(x=x, y=fit, ymin=lwr, ymax=upr, color=Predict.Method), data=comp.data[c(1:30,181:210),]) + geom_point() + geom_linerange() + labs(x="Index", y="Prediction w/ 95% PI") + theme_bw() + theme(legend.position="bottom") + scale_color_brewer(type = "qual", palette = 2) ``` ### Computation time Our initial motivation for writing this function was to develop a method for incorporating uncertainty in the CMFEs for mixed models estimated on very large samples. Even for models with only modest degrees of complexity, using `lme4::bootMer()` quickly becomes time prohibitive because it involves re-estimating the model for each simulation. We have seen how each alternative compares to `predictInterval()` substantively, but how do they compare in terms of computational time? The table below lists the output of `system.time()` for all five methods for calculating prediction intervals for `merMod` objects. ```{r, echo=FALSE} if (rstanarm_eval) { times <- rbind(PI.time, PI.arm.time, PI.boot1.time, PI.boot2.time, PI.boot3.time, PI.time.stan)[, 1:3] rownames(times) <- c( "predictInterval()", "arm::sim()", "lme4::bootMer()-Method 1", "lme4::bootMer()-Method 2", "lme4::bootMer()-Method 3", "rstanarm:predict" ) kable(times) } else { times <- rbind(PI.time, PI.arm.time, PI.boot1.time, PI.boot2.time, PI.boot3.time)[, 1:3] rownames(times) <- c( "predictInterval()", "arm::sim()", "lme4::bootMer()-Method 1", "lme4::bootMer()-Method 2", "lme4::bootMer()-Method 3" ) kable(times) } ``` For this simple example, we see that `arm::sim()` is the fastest--nearly five times faster than `predictInterval()`. However, `predictInterval()` is nearly six times faster than any of the bootstrapping options via `lme4::bootMer`. This may not seem like a lot, but consider that the computational time for required for bootstrapping is roughly proportional to the number of bootstrapped simulations requested ... `predictInterval()` is not because it is just a series of draws from various multivariate normal distributions, so the time ratios in the table below represents the lowest bound of the computation time ratio of bootstrapping to `predictInterval()`. ## Simulation TBC. merTools/inst/doc/Using_predictInterval.R0000644000176200001440000002067513466137505020253 0ustar liggesusers## ----setup, echo = FALSE, message=FALSE, warning=FALSE, results='hide', cache=FALSE---- library(ggplot2); library(knitr); library(merTools) knitr::opts_chunk$set( cache=FALSE, comment="#>", collapse=TRUE, echo = TRUE ) rstanarm_eval <- "rstanarm" %in% rownames(installed.packages()) ## ----Prep, message=FALSE, warning=FALSE---------------------------------- set.seed(271828) data(sleepstudy) fm1 <- lmer(Reaction ~ Days + (Days|Subject), data=sleepstudy) display(fm1) ## ----predInt------------------------------------------------------------- PI.time <- system.time( PI <- predictInterval(merMod = fm1, newdata = sleepstudy, level = 0.95, n.sims = 1000, stat = "median", type="linear.prediction", include.resid.var = TRUE) ) ## ----Inspect predInt, results="asis", echo=FALSE------------------------- kable(head(PI)) ## ----Inspect predInt 2, fig.width=7, fig.align="center"------------------ library(ggplot2); ggplot(aes(x=1:30, y=fit, ymin=lwr, ymax=upr), data=PI[1:30,]) + geom_point() + geom_linerange() + labs(x="Index", y="Prediction w/ 95% PI") + theme_bw() ## ----prepSleep, message=FALSE, warning=FALSE----------------------------- fm1 <- lmer(Reaction ~ Days + (Days|Subject), data=sleepstudy) display(fm1) ## ----showSleep, message=FALSE, warning=FALSE----------------------------- sleepstudy[1,] predictInterval(fm1, sleepstudy[1,], include.resid.var=0) #predict the average body fat for a group of 196cm female baseball players ## ----ShowFat2, message=FALSE, warning=FALSE------------------------------ predictInterval(fm1, sleepstudy[1,], include.resid.var=0, ignore.fixed.terms = 1) # predict the average reaction time for a subject at day 0, taking the global intercept # (mean reaction time) as fully known predictInterval(fm1, sleepstudy[1,], include.resid.var=0, ignore.fixed.terms = "(Intercept)") #Same as above predictInterval(fm1, sleepstudy[1,], include.resid.var=0, ignore.fixed.terms = 1:2) # as above, taking the first two fixed effects (intercept and days effect) as fully known ## ----ShowFat3, message=FALSE, warning=FALSE------------------------------ predictInterval(fm1, sleepstudy[1,], include.resid.var=0, fix.intercept.variance = TRUE) # predict the average reaction time for a subject at day 0,, using an ad-hoc # correction for the covariance of the intercept with the random intercept effects. ## ----arm.Sim, fig.width=7, fig.height=4, fig.align="center"-------------- PI.arm.time <- system.time( PI.arm.sims <- arm::sim(fm1, 1000) ) PI.arm <- data.frame( fit=apply(fitted(PI.arm.sims, fm1), 1, function(x) quantile(x, 0.500)), upr=apply(fitted(PI.arm.sims, fm1), 1, function(x) quantile(x, 0.975)), lwr=apply(fitted(PI.arm.sims, fm1), 1, function(x) quantile(x, 0.025)) ) comp.data <- rbind(data.frame(Predict.Method="predictInterval()", x=(1:nrow(PI))-0.1, PI), data.frame(Predict.Method="arm::sim()", x=(1:nrow(PI.arm))+0.1, PI.arm)) ggplot(aes(x=x, y=fit, ymin=lwr, ymax=upr, color=Predict.Method), data=comp.data[c(1:30,181:210),]) + geom_point() + geom_linerange() + labs(x="Index", y="Prediction w/ 95% PI") + theme_bw() + theme(legend.position="bottom") + scale_color_brewer(type = "qual", palette = 2) ## ----bootMer.1, fig.width=7, fig.height=4, fig.align="center"------------ ##Functions for bootMer() and objects ####Return predicted values from bootstrap mySumm <- function(.) { predict(., newdata=sleepstudy, re.form=NULL) } ####Collapse bootstrap into median, 95% PI sumBoot <- function(merBoot) { return( data.frame(fit = apply(merBoot$t, 2, function(x) as.numeric(quantile(x, probs=.5, na.rm=TRUE))), lwr = apply(merBoot$t, 2, function(x) as.numeric(quantile(x, probs=.025, na.rm=TRUE))), upr = apply(merBoot$t, 2, function(x) as.numeric(quantile(x, probs=.975, na.rm=TRUE))) ) ) } ##lme4::bootMer() method 1 PI.boot1.time <- system.time( boot1 <- lme4::bootMer(fm1, mySumm, nsim=250, use.u=FALSE, type="parametric") ) PI.boot1 <- sumBoot(boot1) comp.data <- rbind(data.frame(Predict.Method="predictInterval()", x=(1:nrow(PI))-0.1, PI), data.frame(Predict.Method="lme4::bootMer() - Method 1", x=(1:nrow(PI.boot1))+0.1, PI.boot1)) ggplot(aes(x=x, y=fit, ymin=lwr, ymax=upr, color=Predict.Method), data=comp.data[c(1:30,181:210),]) + geom_point() + geom_linerange() + labs(x="Index", y="Prediction w/ 95% PI") + theme_bw() + theme(legend.position="bottom") + scale_color_brewer(type = "qual", palette = 2) ## ----bootMer.2, fig.width=7, fig.height=4, fig.align="center"------------ ##lme4::bootMer() method 2 PI.boot2.time <- system.time( boot2 <- lme4::bootMer(fm1, mySumm, nsim=250, use.u=TRUE, type="parametric") ) PI.boot2 <- sumBoot(boot2) comp.data <- rbind(data.frame(Predict.Method="predictInterval()", x=(1:nrow(PI))-0.1, PI), data.frame(Predict.Method="lme4::bootMer() - Method 2", x=(1:nrow(PI.boot2))+0.1, PI.boot2)) ggplot(aes(x=x, y=fit, ymin=lwr, ymax=upr, color=Predict.Method), data=comp.data[c(1:30,181:210),]) + geom_point() + geom_linerange() + labs(x="Index", y="Prediction w/ 95% PI") + theme_bw() + theme(legend.position="bottom") + scale_color_brewer(type = "qual", palette = 2) ## ----bootMer.3, fig.width=7, fig.height=4, fig.align="center"------------ ##lme4::bootMer() method 3 PI.boot3.time <- system.time( boot3 <- lme4::bootMer(fm1, mySumm, nsim=250, use.u=TRUE, type="semiparametric") ) PI.boot3 <- sumBoot(boot3) comp.data <- rbind(data.frame(Predict.Method="predictInterval()", x=(1:nrow(PI))-0.1, PI), data.frame(Predict.Method="lme4::bootMer() - Method 3", x=(1:nrow(PI.boot3))+0.1, PI.boot3)) ggplot(aes(x=x, y=fit, ymin=lwr, ymax=upr, color=Predict.Method), data=comp.data[c(1:30,181:210),]) + geom_point() + geom_linerange() + labs(x="Index", y="Prediction w/ 95% PI") + theme_bw() + theme(legend.position="bottom") + scale_color_brewer(type = "qual", palette = 2) ## ----echo=FALSE, message=FALSE, eval = rstanarm_eval--------------------- library(rstanarm) central_intervals <- function(x, prob) { if (!identical(length(prob), 1L) || prob <= 0 || prob >= 1) stop("'prob' should be a single number greater than 0 and less than 1.", call. = FALSE) alpha <- (1 - prob) / 2 probs <- c(alpha, 1 - alpha) labs <- paste0(100 * probs, "%") out <- t(apply(x, 2L, quantile, probs = probs)) structure(out, dimnames = list(colnames(x), labs)) } ## ---- message=FALSE, fig.width=7, fig.height=4, fig.align="center", eval = rstanarm_eval---- PI.time.stan <- system.time({ fm_stan <- stan_lmer(Reaction ~ Days + (Days|Subject), data = sleepstudy, verbose = FALSE, open_progress = FALSE, refresh = -1, show_messages=FALSE, chains = 1) zed <- posterior_predict(fm_stan) PI.stan <- cbind(apply(zed, 2, median), central_intervals(zed, prob=0.95)) }) print(fm_stan) PI.stan <- as.data.frame(PI.stan) names(PI.stan) <- c("fit", "lwr", "upr") PI.stan <- PI.stan[, c("fit", "upr", "lwr")] comp.data <- rbind(data.frame(Predict.Method="predictInterval()", x=(1:nrow(PI))-0.1, PI), data.frame(Predict.Method="rstanArm", x=(1:nrow(PI.stan))+0.1, PI.stan)) ggplot(aes(x=x, y=fit, ymin=lwr, ymax=upr, color=Predict.Method), data=comp.data[c(1:30,181:210),]) + geom_point() + geom_linerange() + labs(x="Index", y="Prediction w/ 95% PI") + theme_bw() + theme(legend.position="bottom") + scale_color_brewer(type = "qual", palette = 2) ## ---- echo=FALSE--------------------------------------------------------- if (rstanarm_eval) { times <- rbind(PI.time, PI.arm.time, PI.boot1.time, PI.boot2.time, PI.boot3.time, PI.time.stan)[, 1:3] rownames(times) <- c( "predictInterval()", "arm::sim()", "lme4::bootMer()-Method 1", "lme4::bootMer()-Method 2", "lme4::bootMer()-Method 3", "rstanarm:predict" ) kable(times) } else { times <- rbind(PI.time, PI.arm.time, PI.boot1.time, PI.boot2.time, PI.boot3.time)[, 1:3] rownames(times) <- c( "predictInterval()", "arm::sim()", "lme4::bootMer()-Method 1", "lme4::bootMer()-Method 2", "lme4::bootMer()-Method 3" ) kable(times) } merTools/inst/doc/imputation.R0000644000176200001440000000674313466137510016134 0ustar liggesusers## ----setup, echo = FALSE, message=FALSE, warning=FALSE, results='hide'---- knitr::opts_chunk$set( cache=FALSE, comment="#>", collapse=TRUE, echo=TRUE, fig.width = 7 ) library(knitr); library(merTools) amelia_eval <- "Amelia" %in% rownames(installed.packages()) amelia_uneval <- !amelia_eval ## ------------------------------------------------------------------------ data(hsb) # Create a function to randomly assign NA values add_NA <- function(x, prob){ z <- rbinom(length(x), 1, prob = prob) x[z==1] <- NA return(x) } hsb$minority <- add_NA(hsb$minority, prob = 0.05) table(is.na(hsb$minority)) hsb$female <- add_NA(hsb$female, prob = 0.05) table(is.na(hsb$female)) hsb$ses <- add_NA(hsb$ses, prob = 0.05) table(is.na(hsb$ses)) hsb$size <- add_NA(hsb$size, prob = 0.05) table(is.na(hsb$size)) ## ----impute, message=FALSE, eval = amelia_eval--------------------------- # Load imputation library library(Amelia) # Declare the variables to include in the imputation data varIndex <- names(hsb) # Declare ID variables to be excluded from imputation IDS <- c("schid", "meanses") # Imputate impute.out <- amelia(hsb[, varIndex], idvars = IDS, noms = c("minority", "female"), m = 5) summary(impute.out) ## ----boot, message=FALSE, eval = amelia_uneval--------------------------- # # Amelia is not available so let's just boostrap resample our data # impute.out <- vector(mode = "list", 5) # # for (i in 1:5) { # impute.out[[i]] <- hsb[sample(nrow(hsb), nrow(hsb), replace = TRUE), ] # } # # # Declare the variables to include in the imputation data # summary(impute.out) ## ------------------------------------------------------------------------ fmla <- "mathach ~ minority + female + ses + meanses + (1 + ses|schid)" mod <- lmer(fmla, data = hsb) if(amelia_eval) { modList <- lmerModList(fmla, data = impute.out$imputations) } else { # Use bootstrapped data instead modList <- lmerModList(fmla, data = impute.out) } ## ------------------------------------------------------------------------ fixef(mod) # model with dropped missing fixef(modList) ## ------------------------------------------------------------------------ VarCorr(mod) # model with dropped missing VarCorr(modList) # aggregate of imputed models ## ------------------------------------------------------------------------ lapply(modList, fixef) ## ------------------------------------------------------------------------ fixef(modList[[1]]) fixef(modList[[2]]) ## ------------------------------------------------------------------------ print(modList) ## ------------------------------------------------------------------------ summary(modList) ## ------------------------------------------------------------------------ fastdisp(modList) ## ------------------------------------------------------------------------ modelRandEffStats(modList) modelFixedEff(modList) VarCorr(modList) ## ------------------------------------------------------------------------ modelInfo(mod) ## ------------------------------------------------------------------------ lapply(modList, modelInfo) ## ------------------------------------------------------------------------ summary(modList) ## ------------------------------------------------------------------------ modelFixedEff(modList) ## ------------------------------------------------------------------------ ranef(modList) merTools/inst/doc/imputation.Rmd0000644000176200001440000001327113466135366016456 0ustar liggesusers--- title: "Analyzing Imputed Data with Multilevel Models and merTools" author: "Jared Knowles" date: "`r Sys.Date()`" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{Imputation} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r setup, echo = FALSE, message=FALSE, warning=FALSE, results='hide'} knitr::opts_chunk$set( cache=FALSE, comment="#>", collapse=TRUE, echo=TRUE, fig.width = 7 ) library(knitr); library(merTools) amelia_eval <- "Amelia" %in% rownames(installed.packages()) amelia_uneval <- !amelia_eval ``` ## Introduction Multilevel models are valuable in a wide array of problem areas that involve non-experimental, or observational data. In many of these cases the data on individual observations may be incomplete. In these situations, the analyst may turn to one of many methods for filling in missing data depending on the specific problem at hand, disciplinary norms, and prior research. One of the most common cases is to use multiple imputation. Multiple imputation involves fitting a model to the data and estimating the missing values for observations. For details on multiple imputation, and a discussion of some of the main implementations in R, look at the documentation and vignettes for the `mice` and `Amelia` packages. The key difficulty multiple imputation creates for users of multilevel models is that the result of multiple imputation is K replicated datasets corresponding to different estimated values for the missing data in the original dataset. For the purposes of this vignette, I will describe how to use one flavor of multiple imputation and the function in `merTools` to obtain estimates from a multilevel model in the presence of missing and multiply imputed data. ## Missing Data and its Discontents To demonstrate this workflow, we will use the `hsb` dataset in the `merTools` package which includes data on the math achievement of a wide sample of students nested within schools. The data has no missingness, so first we will simulate some missing data. ```{r} data(hsb) # Create a function to randomly assign NA values add_NA <- function(x, prob){ z <- rbinom(length(x), 1, prob = prob) x[z==1] <- NA return(x) } hsb$minority <- add_NA(hsb$minority, prob = 0.05) table(is.na(hsb$minority)) hsb$female <- add_NA(hsb$female, prob = 0.05) table(is.na(hsb$female)) hsb$ses <- add_NA(hsb$ses, prob = 0.05) table(is.na(hsb$ses)) hsb$size <- add_NA(hsb$size, prob = 0.05) table(is.na(hsb$size)) ``` ```{r impute, message=FALSE, eval = amelia_eval} # Load imputation library library(Amelia) # Declare the variables to include in the imputation data varIndex <- names(hsb) # Declare ID variables to be excluded from imputation IDS <- c("schid", "meanses") # Imputate impute.out <- amelia(hsb[, varIndex], idvars = IDS, noms = c("minority", "female"), m = 5) summary(impute.out) ``` ```{r boot, message=FALSE, eval = amelia_uneval} # Amelia is not available so let's just boostrap resample our data impute.out <- vector(mode = "list", 5) for (i in 1:5) { impute.out[[i]] <- hsb[sample(nrow(hsb), nrow(hsb), replace = TRUE), ] } # Declare the variables to include in the imputation data summary(impute.out) ``` ## Fitting and Summarizing a Model List Fitting a model is very similar ```{r} fmla <- "mathach ~ minority + female + ses + meanses + (1 + ses|schid)" mod <- lmer(fmla, data = hsb) if(amelia_eval) { modList <- lmerModList(fmla, data = impute.out$imputations) } else { # Use bootstrapped data instead modList <- lmerModList(fmla, data = impute.out) } ``` The resulting object `modList` is a list of `merMod` objects the same length as the number of imputation datasets. This object is assigned the class of `merModList` and `merTools` provides some convenience functions for reporting the results of this object. Using this, we can directly compare the model fit with missing data excluded to the aggregate from the imputed models: ```{r} fixef(mod) # model with dropped missing fixef(modList) ``` ```{r} VarCorr(mod) # model with dropped missing VarCorr(modList) # aggregate of imputed models ``` If you want to inspect the individual models, or you do not like taking the mean across the imputation replications, you can take the `merModList` apart easily: ```{r} lapply(modList, fixef) ``` And, you can always operate on any single element of the list: ```{r} fixef(modList[[1]]) fixef(modList[[2]]) ``` ## Output of a Model List ```{r} print(modList) ``` ```{r} summary(modList) ``` ```{r} fastdisp(modList) ``` The standard errors reported for the model list include a correction, Rubin's correction (see documentation), which adjusts for the within and between imputation set variance as well. ## Specific Model Information Summaries ```{r} modelRandEffStats(modList) modelFixedEff(modList) VarCorr(modList) ``` ### Diagnostics of List Components ```{r} modelInfo(mod) ``` Let's apply this to our model list. ```{r} lapply(modList, modelInfo) ``` ### Model List Generics ```{r} summary(modList) ``` ```{r} modelFixedEff(modList) ``` ```{r} ranef(modList) ``` ## Cautions and Notes Often it is desirable to include aggregate values in the level two or level three part of the model such as level 1 SES and level 2 mean SES for the group. In cases where there is missingness in either the level 1 SES values, or in the level 2 mean SES values, caution and careful thought need to be given to how to proceed with the imputation routine. merTools/inst/doc/merToolsIntro.R0000644000176200001440000001532713466137707016571 0ustar liggesusers## ----setup, echo = FALSE, message=FALSE, warning=FALSE, results='hide'---- knitr::opts_chunk$set( cache=FALSE, comment="#>", collapse=TRUE, echo=TRUE, fig.width = 7 ) library(knitr); library(merTools) ## ------------------------------------------------------------------------ library(lme4) head(InstEval) str(InstEval) ## ------------------------------------------------------------------------ m1 <- lmer(y ~ service + lectage + studage + (1|d) + (1|s), data=InstEval) ## ------------------------------------------------------------------------ library(merTools) fastdisp(m1) ## ------------------------------------------------------------------------ feEx <- FEsim(m1, 1000) cbind(feEx[,1] , round(feEx[, 2:4], 3)) ## ------------------------------------------------------------------------ library(ggplot2) ggplot(feEx[feEx$term!= "(Intercept)", ]) + aes(x = term, ymin = median - 1.96 * sd, ymax = median + 1.96 * sd, y = median) + geom_pointrange() + geom_hline(yintercept = 0, size = I(1.1), color = I("red")) + coord_flip() + theme_bw() + labs(title = "Coefficient Plot of InstEval Model", x = "Median Effect Estimate", y = "Evaluation Rating") ## ------------------------------------------------------------------------ plotFEsim(feEx) + theme_bw() + labs(title = "Coefficient Plot of InstEval Model", x = "Median Effect Estimate", y = "Evaluation Rating") ## ------------------------------------------------------------------------ reEx <- REsim(m1) head(reEx) ## ------------------------------------------------------------------------ table(reEx$term) table(reEx$groupFctr) ## ---- eval=FALSE, echo = TRUE-------------------------------------------- # lattice::dotplot(ranef(m1, condVar=TRUE)) ## ------------------------------------------------------------------------ p1 <- plotREsim(reEx) p1 ## ------------------------------------------------------------------------ example1 <- draw(m1, type = 'random') head(example1) ## ------------------------------------------------------------------------ # predict it predict(m1, newdata = example1) # change values example1$service <- "1" predict(m1, newdata = example1) ## ------------------------------------------------------------------------ example2 <- wiggle(example1, varlist = "lectage", valueslist = list(c("1", "2", "3", "4", "5", "6"))) example2 ## ------------------------------------------------------------------------ example2$yhat <- predict(m1, newdata = example2) ggplot(example2, aes(x = lectage, y = yhat)) + geom_line(aes(group = 1)) + theme_bw() + ylim(c(1, 5)) + geom_hline(yintercept = mean(InstEval$y), linetype = 2) + geom_hline(yintercept = mean(InstEval$y) + sd(InstEval$y), linetype = 3) + geom_hline(yintercept = mean(InstEval$y) - sd(InstEval$y), linetype = 3) ## ------------------------------------------------------------------------ example3 <- draw(m1, type = 'average') example3 ## ------------------------------------------------------------------------ example3 <- wiggle(example1, varlist = "service", valueslist = list(c("0", "1"))) example3$yhat <- predict(m1, newdata = example3) ggplot(example3, aes(x = service, y = yhat)) + geom_line(aes(group = 1)) + theme_bw() + ylim(c(1, 5)) + geom_hline(yintercept = mean(InstEval$y), linetype = 2) + geom_hline(yintercept = mean(InstEval$y) + sd(InstEval$y), linetype = 3) + geom_hline(yintercept = mean(InstEval$y) - sd(InstEval$y), linetype = 3) ## ------------------------------------------------------------------------ REquantile(m1, quantile = 0.25, groupFctr = "s") REquantile(m1, quantile = 0.25, groupFctr = "d") ## ------------------------------------------------------------------------ example4 <- draw(m1, type = 'average') example4 <- wiggle(example4, varlist = "s", list(REquantile(m1, quantile = seq(0.1, 0.9, .1), groupFctr = "s"))) example4$yhat <- predict(m1, newdata = example4) ggplot(example4, aes(x = reorder(s, -yhat), y = yhat)) + geom_line(aes(group = 1)) + theme_bw() + ylim(c(1, 5)) + geom_hline(yintercept = mean(InstEval$y), linetype = 2) + geom_hline(yintercept = mean(InstEval$y) + sd(InstEval$y), linetype = 3) + geom_hline(yintercept = mean(InstEval$y) - sd(InstEval$y), linetype = 3) ## ------------------------------------------------------------------------ subExample <- list(studage = "2", lectage = "4") example5 <- draw(m1, type = 'average', varList = subExample) example5 ## ------------------------------------------------------------------------ data(VerbAgg) m2 <- glmer(r2 ~ Anger + Gender + btype + situ + (1|id) + (1 + Gender|item), family = binomial, data = VerbAgg) example6 <- draw(m2, type = 'average', varList = list("id" = "149")) example6$btype <- "scold" example6$situ <- "self" tempdf <- wiggle(example6, varlist = "Gender", list(c("M", "F"))) tempdf <- wiggle(tempdf, varlist = "item", list(unique(VerbAgg$item))) tempdf$yhat <- predict(m2, newdata = tempdf, type = "response", allow.new.levels = TRUE) ggplot(tempdf, aes(x = item, y = yhat, group = Gender)) + geom_line(aes(color = Gender))+ theme_bw() + ylim(c(0, 1)) + theme(axis.text.x = element_text(angle = 20, hjust=1), legend.position = "bottom") + labs(x = "Item", y = "Probability") ## ------------------------------------------------------------------------ exampPreds <- predictInterval(m2, newdata = tempdf, type = "probability", level = 0.8) tempdf <- cbind(tempdf, exampPreds) ggplot(tempdf, aes(x = item, y = fit, ymin = lwr, ymax = upr, group = Gender)) + geom_ribbon(aes(fill = Gender), alpha = I(0.2), color = I("black"))+ theme_bw() + ylim(c(0, 1)) + theme(axis.text.x = element_text(angle = 20), legend.position = "bottom")+ labs(x = "Item", y = "Probability") ## ------------------------------------------------------------------------ exampPreds <- predictInterval(m2, newdata = tempdf, type = "probability", include.resid.var = FALSE, level = 0.8) tempdf <- cbind(tempdf[, 1:8], exampPreds) ggplot(tempdf, aes(x = item, y = fit, ymin = lwr, ymax = upr, group = Gender)) + geom_ribbon(aes(fill = Gender), alpha = I(0.2), color = I("black"))+ geom_line(aes(color = Gender)) + theme_bw() + ylim(c(0, 1)) + theme(axis.text.x = element_text(angle = 20), legend.position = "bottom") + labs(x = "Item", y = "Probability") merTools/inst/doc/marginal_effects.Rmd0000644000176200001440000000445613466135366017563 0ustar liggesusers--- title: "Using merTools to Marginalize Over Random Effect Levels" author: "Jared Knowles and Carl Frederick" date: "`r Sys.Date()`" output: rmarkdown::html_vignette vignette: > %\VignetteEngine{knitr::rmarkdown} %\VignetteIndexEntry{Marginalizing Random Effect Levels} %\VignetteEncoding{UTF-8} --- ```{r setup, echo = FALSE, message=FALSE, warning=FALSE, results='hide', cache=FALSE} library(ggplot2); library(knitr); library(merTools) knitr::opts_chunk$set( cache=FALSE, comment="#>", collapse=TRUE, echo = TRUE ) ``` # Marginalizing Random Effects One of the most common questions about multilevel models is how much influence grouping terms have on the outcome. One way to explore this is to simulate the predicted values of an observation across the distribution of random effects for a specific grouping variable and term. This can be described as "marginalizing" predictions over the distribution of random effects. This allows you to explore the influence of the grouping term and grouping levels on the outcome scale by simulating predictions for simulated values of each observation across the distribution of effect sizes. The `REmargins()` function allows you to do this. Here, we take the example `sleepstudy` model and marginalize predictions for all of the random effect terms (Subject:Intercept, Subject:Days). By default, the function will marginalize over the *quartiles* of the expected rank (see expected rank vignette) of the effect distribution for each term. ```{r} fm1 <- lmer(Reaction ~ Days + (Days | Subject), sleepstudy) mfx <- REmargins(merMod = fm1, newdata = sleepstudy[1:10,]) head(mfx) ``` The new data frame output from `REmargins` contains a lot of information. The first few columns contain the original data passed to `newdata`. Each observation in `newdata` is identified by a `case` number, because the function repeats each observation by the number of random effect terms and number of breaks to simulate each term over. Then the `grouping_var` # Summarizing # Plotting Finally - you can plot the results marginalization to evaluate the effect of the random effect terms graphically. ```{r} ggplot(mfx) + aes(x = breaks, y = fit_Subject, group = case) + geom_line() + facet_wrap(~term) ``` merTools/inst/doc/merToolsIntro.Rmd0000644000176200001440000003615313402510755017076 0ustar liggesusers--- title: "An Introduction to merTools" author: "Jared Knowles and Carl Frederick" date: "`r Sys.Date()`" output: rmarkdown::html_vignette vignette: > %\VignetteEngine{knitr::rmarkdown} %\VignetteIndexEntry{An Introduction to merTools} %\VignetteEncoding{UTF-8} --- ```{r setup, echo = FALSE, message=FALSE, warning=FALSE, results='hide'} knitr::opts_chunk$set( cache=FALSE, comment="#>", collapse=TRUE, echo=TRUE, fig.width = 7 ) library(knitr); library(merTools) ``` ## Introduction Working with generalized linear mixed models (GLMM) and linear mixed models (LMM) has become increasingly easy with the advances in the `lme4` package recently. As we have found ourselves using these models more and more within our work, we, the authors, have developed a set of tools for simplifying and speeding up common tasks for interacting with `merMod` objects from `lme4`. This package provides those tools. ## Illustrating Model Effects As the complexity of the model fit grows, it becomes harder and harder to interpret the substantive effect of parameters in the model. Let's start with a medium-sized example model using the `InstEval` data provided by the `lme4` package. These data represent university lecture evaluations at ETH Zurich made by students. In this data, `s` is an individual student, `d` is an individual lecturer, `studage` is the semester the student is enrolled, `lectage` is how many semesters back the lecture with the rating took place, `dept` is the department of the lecture, and `y` is an integer 1:5 representing the ratings of the lecture from "poor" to "very good": ```{r} library(lme4) head(InstEval) str(InstEval) ``` Starting with a simple model: ```{r} m1 <- lmer(y ~ service + lectage + studage + (1|d) + (1|s), data=InstEval) ``` After fitting the model we can make use of the first function provided by `merTools`, `fastdisp` which modifies the function `arm:::display` to more quickly display a summary of the model without calculating the model sigma: ```{r} library(merTools) fastdisp(m1) ``` We see some interesting effects. First, our decision to include student and lecturer effects seems justified as there is substantial variance within these groups. Second, there do appear to be some effects by age and for lectures given as a service by an outside lecturer. Let's look at these in more detail. One way to do this would be to plot the coefficients together in a line to see which deviate from 0 and in what direction. To get a confidence interval for our fixed effect coefficients we have a number of options that represent a tradeoff between coverage and computation time -- see `confint.merMod` for details. An alternative is to simulate values of the fixed effects from the posterior using the function `arm::sim`. Our next tool, `FEsim`, is a convenience wrapper to do this and provide an informative data frame of the results. ```{r} feEx <- FEsim(m1, 1000) cbind(feEx[,1] , round(feEx[, 2:4], 3)) ``` We can present these results graphically, using `ggplot2`: ```{r} library(ggplot2) ggplot(feEx[feEx$term!= "(Intercept)", ]) + aes(x = term, ymin = median - 1.96 * sd, ymax = median + 1.96 * sd, y = median) + geom_pointrange() + geom_hline(yintercept = 0, size = I(1.1), color = I("red")) + coord_flip() + theme_bw() + labs(title = "Coefficient Plot of InstEval Model", x = "Median Effect Estimate", y = "Evaluation Rating") ``` However, an easier option is: ```{r} plotFEsim(feEx) + theme_bw() + labs(title = "Coefficient Plot of InstEval Model", x = "Median Effect Estimate", y = "Evaluation Rating") ``` ## Random Effects Next, we might be interested in exploring the random effects. Again, we create a dataframe of the values of the simulation of these effects for the individual levels. ```{r} reEx <- REsim(m1) head(reEx) ``` The result is a dataframe with estimates of the values of each of the random effects provided by the `arm::sim()` function. *groupID* represents the identfiable level for the variable for one random effect, *term* represents whether the simulated values are for an intercept or which slope, and *groupFctr* identifies which of the `(1|x)` terms the values represent. To make unique identifiers for each term, we need to use both the `groupID` and the `groupFctr` term in case these two variables use overlapping label names for their groups. In this case: ```{r} table(reEx$term) table(reEx$groupFctr) ``` Most important is producing caterpillar or dotplots of these terms to explore their variation. This is easily accomplished with the `dotplot` function: ```{r, eval=FALSE, echo = TRUE} lattice::dotplot(ranef(m1, condVar=TRUE)) ``` However, these graphics do not provide much control over the results. Instead, we can use the `plotREsim` function in `merTools` to gain more control over plotting of the random effect simulations. ```{r} p1 <- plotREsim(reEx) p1 ``` The result is a ggplot2 object which can be modified however the user sees fit. Here, we've established that most student and professor effects are indistinguishable from zero, but there do exist extreme outliers with both high and low averages that need to be accounted for. ## Subtantive Effects A logical next line of questioning is to see how much of the variation in a rating can be caused by changing the student rater and how much is due to the fixed effects we identified above. This is a very difficult problem to solve, but using simulation we can examine the model behavior under a range of scenarios to understand how the model is reflecting changes in the data. To do this, we use another set of functions available in `merTools`. The simplest option is to pick an observation at random and then modify its values deliberately to see how the prediction changes in response. `merTools` makes this task very simple: ```{r} example1 <- draw(m1, type = 'random') head(example1) ``` The `draw` function takes a random observation from the data in the model and extracts it as a dataframe. We can now do a number of operations to this observation: ```{r} # predict it predict(m1, newdata = example1) # change values example1$service <- "1" predict(m1, newdata = example1) ``` More interesting, let's programatically modify this observation to see how the predicted value changes if we hold everything but one variable constant. ```{r} example2 <- wiggle(example1, varlist = "lectage", valueslist = list(c("1", "2", "3", "4", "5", "6"))) example2 ``` The function `wiggle` allows us to create a new dataframe with copies of the variable that modify just one value. Chaining together `wiggle` calls, we can see how the variable behaves under a number of different scenarios simultaneously. ```{r} example2$yhat <- predict(m1, newdata = example2) ggplot(example2, aes(x = lectage, y = yhat)) + geom_line(aes(group = 1)) + theme_bw() + ylim(c(1, 5)) + geom_hline(yintercept = mean(InstEval$y), linetype = 2) + geom_hline(yintercept = mean(InstEval$y) + sd(InstEval$y), linetype = 3) + geom_hline(yintercept = mean(InstEval$y) - sd(InstEval$y), linetype = 3) ``` The result allows us to graphically display the effect of each level of `lectage` on an observation that is otherwise identical. This is plotted here against a horizontal line representing the mean of the observed ratings, and two finer lines showing plus or minus one standard deviation of the mean. This is nice, but selecting a random observation is not very satisfying as it may not be very meaningful. To address this, we can instead take the average observation: ```{r} example3 <- draw(m1, type = 'average') example3 ``` Here, the average observation is identified based on either the modal observation for factors or on the mean for numeric variables. Then, the random effect terms are set to the level equivalent to the median effect -- very close to 0. ```{r} example3 <- wiggle(example1, varlist = "service", valueslist = list(c("0", "1"))) example3$yhat <- predict(m1, newdata = example3) ggplot(example3, aes(x = service, y = yhat)) + geom_line(aes(group = 1)) + theme_bw() + ylim(c(1, 5)) + geom_hline(yintercept = mean(InstEval$y), linetype = 2) + geom_hline(yintercept = mean(InstEval$y) + sd(InstEval$y), linetype = 3) + geom_hline(yintercept = mean(InstEval$y) - sd(InstEval$y), linetype = 3) ``` Here we can see that for the average observation, whether the lecture is outside of the home department has a very slight negative effect on the overall rating. Might the individual professor or student have more of an impact on the overall rating? To answer this question we need to wiggle the same observation across a wide range of student or lecturer effects. How do we identify this range? `merTools` provides the `REquantile` function which helps to identify which levels of the grouping terms correspond to which quantile of the magnitude of the random effects: ```{r} REquantile(m1, quantile = 0.25, groupFctr = "s") REquantile(m1, quantile = 0.25, groupFctr = "d") ``` Here we can see that group level `r REquantile(m1, quantile = 0.25, groupFctr = "s")` corresponds to the 25th percentile of the effect for the student groups, and level `REquantile(m1, quantile = 0.25, groupFctr = "d")` corresponds to the 25th percentile for the instructor group. Using this information we can reassign a specific observation to varying magnitudes of grouping term effects to see how much they might influence our final prediction. ```{r} example4 <- draw(m1, type = 'average') example4 <- wiggle(example4, varlist = "s", list(REquantile(m1, quantile = seq(0.1, 0.9, .1), groupFctr = "s"))) example4$yhat <- predict(m1, newdata = example4) ggplot(example4, aes(x = reorder(s, -yhat), y = yhat)) + geom_line(aes(group = 1)) + theme_bw() + ylim(c(1, 5)) + geom_hline(yintercept = mean(InstEval$y), linetype = 2) + geom_hline(yintercept = mean(InstEval$y) + sd(InstEval$y), linetype = 3) + geom_hline(yintercept = mean(InstEval$y) - sd(InstEval$y), linetype = 3) ``` This figure is very interesting because it shows that moving across the range of student effects can have a larger impact on the score than the fixed effects we observed above. That is, getting a "generous" or a "stingy" rater can have a substantial impact on the final rating. But, we can do even better. First, we can move beyond the average observation by taking advantage of the `varList` option to the function which allows us to specify a subset of the data to compute an average for. ```{r} subExample <- list(studage = "2", lectage = "4") example5 <- draw(m1, type = 'average', varList = subExample) example5 ``` Now we have the average observation with a student age of 2 and a lecture age of 4. We can then follow the same procedure as before to explore the effects on our subsamples. Before we do that, let's fit a slightly more complex model that includes a random slope. ```{r} data(VerbAgg) m2 <- glmer(r2 ~ Anger + Gender + btype + situ + (1|id) + (1 + Gender|item), family = binomial, data = VerbAgg) example6 <- draw(m2, type = 'average', varList = list("id" = "149")) example6$btype <- "scold" example6$situ <- "self" tempdf <- wiggle(example6, varlist = "Gender", list(c("M", "F"))) tempdf <- wiggle(tempdf, varlist = "item", list(unique(VerbAgg$item))) tempdf$yhat <- predict(m2, newdata = tempdf, type = "response", allow.new.levels = TRUE) ggplot(tempdf, aes(x = item, y = yhat, group = Gender)) + geom_line(aes(color = Gender))+ theme_bw() + ylim(c(0, 1)) + theme(axis.text.x = element_text(angle = 20, hjust=1), legend.position = "bottom") + labs(x = "Item", y = "Probability") ``` Here we've shown that the effect of both the intercept and the gender slope on item simultaneously affect our predicted value. This results in the two lines for predicted values across the items not being parallel. While we can see this by looking at the results of the summary of the model object, using `fastdisp` in the `merTools` package for larger models, it is not intuitive what that effect looks like across different scenarios. `merTools` has given us the machinery to investigate this. ## Uncertainty The above examples make use of simulation to show the model behavior after changing some values in a dataset. However, until now, we've focused on using point estimates to represent these changes. The use of predicted point estimates without incorporating any uncertainty can lead to overconfidence in the precision of the model. In the `predictInterval` function, discussed in more detail in another package vignette, we provide a way to incorporate three out of the four types of uncertainty inherent in a model. These are: 1. Overall model uncertainty 2. Uncertainty in fixed effect values 3. Uncertainty in random effect values 4. Uncertainty in the distribution of the random effects 1-3 are incorporated in the results of `predictInterval`, while capturing 4 would require making use of the `bootMer` function -- options discussed in greater detail elsewhere. The main advantage of `predictInterval` is that it is fast. By leveraging the power of the `arm::sim()` function, we are able to generate prediction intervals for individual observations from very large models very quickly. And, it works a lot like `predict`: ```{r} exampPreds <- predictInterval(m2, newdata = tempdf, type = "probability", level = 0.8) tempdf <- cbind(tempdf, exampPreds) ggplot(tempdf, aes(x = item, y = fit, ymin = lwr, ymax = upr, group = Gender)) + geom_ribbon(aes(fill = Gender), alpha = I(0.2), color = I("black"))+ theme_bw() + ylim(c(0, 1)) + theme(axis.text.x = element_text(angle = 20), legend.position = "bottom")+ labs(x = "Item", y = "Probability") ``` Here we can see there is barely any gender difference in terms of area of potential prediction intervals. However, by default, this approach includes the residual variance of the model. If we instead focus just on the uncertainty of the random and fixed effects, we get: ```{r} exampPreds <- predictInterval(m2, newdata = tempdf, type = "probability", include.resid.var = FALSE, level = 0.8) tempdf <- cbind(tempdf[, 1:8], exampPreds) ggplot(tempdf, aes(x = item, y = fit, ymin = lwr, ymax = upr, group = Gender)) + geom_ribbon(aes(fill = Gender), alpha = I(0.2), color = I("black"))+ geom_line(aes(color = Gender)) + theme_bw() + ylim(c(0, 1)) + theme(axis.text.x = element_text(angle = 20), legend.position = "bottom") + labs(x = "Item", y = "Probability") ``` Here, more difference emerges, but we see that the differences are not very precise. merTools/inst/modelFigure.R0000644000176200001440000000754313402510754015432 0ustar liggesusers# # Not working currently # # # Diagramming a model # library(DiagrammeR) # # # # Get the 'nycflights13' package if not already installed # # install.packages('nycflights13') # # # Get the 'lubridate' package if not already installed # # install.packages('lubridate') # # # Get the latest build of the 'DiagrammeR' package from GitHub # devtools::install_github('rich-iannone/DiagrammeR') # # library("nycflights13") # library("lubridate") # library("DiagrammeR") # library("pipeR") # # # Choose a day from 2013 for NYC flight data # # (You can choose any Julian day, it's interesting to see results for different days) # day_of_year <- 10 # # # Get a data frame of complete cases (e.g., flights have departure and arrival times) # nycflights13 <- # nycflights13::flights[which(complete.cases(nycflights13::flights) == TRUE), ] # # # Generate a POSIXct vector of dates using the 'ISOdatetime' function # # Columns 1, 2, and 3 are year, month, and day columns # # Column 4 is a 4-digit combination of hours (00-23) and minutes (00-59) # date_time <- # data.frame("date_time" = # ISOdatetime(year = nycflights13[,1], # month = nycflights13[,2], # day = nycflights13[,3], # hour = gsub("[0-9][0-9]$", "", nycflights13[,4]), # min = gsub(".*([0-9][0-9])$", "\\1", nycflights13[,4]), # sec = 0, tz = "GMT")) # # # Add the POSIXct vector 'date_time' to the 'nycflights13' data frame # nycflights13 <- cbind(date_time, nycflights13) # # # Select flights only from the specified day of the year 2013 # nycflights13_day <- # subset(nycflights13, # date_time >= ymd('2013-01-01', tz = "GMT") + days(day_of_year - 1) & # date_time < ymd('2013-01-01', tz = "GMT") + days(day_of_year)) # # # Create the 'nodes' data frame where at least one column is named "nodes" or "node_id" # # Column 12 is the 3-letter code for the airport departing from # # Column 13 is for the airport arriving to # # (Option: change df to 'nycflights13_day' and only airports used for the day will be included) # nodes_df <- create_nodes(nodes = unique(c(nycflights13[,12], # nycflights13[,13])), # label = FALSE) # # # The 'edges' data frame must have columns named 'edge_from' and 'edge_to' # # The color attribute is determined with an 'ifelse' statement, where # # column 8 is the minutes early (negative values) or minutes late (positive values) # # for the flight arrival # edges_df <- create_edges(edge_from = nycflights13_day[,12], # edge_to = nycflights13_day[,13], # color = ifelse(nycflights13_day[,8] < 0, # "green", "red")) # # # Set the graph diagram's default attributes for... # # # ...nodes # node_attrs <- c("style = filled", "fillcolor = lightblue", # "color = gray", "shape = circle", "fontname = Helvetica", # "width = 1") # # # ...edges # edge_attrs <- c("arrowhead = dot") # # # ...and the graph itself # graph_attrs <- c("layout = circo", # "overlap = false", # "fixedsize = true", # "ranksep = 3", # "outputorder = edgesfirst") # # # Generate the graph diagram in the RStudio Viewer. # # The green lines show flights that weren't late (red indicates late arrivals) # # This graph is for a single day of flights, airports that are unconnected on a # # given day may be destinations on another day # create_graph(nodes_df = nodes_df, edges_df = edges_df, # graph_attrs = graph_attrs, node_attrs = node_attrs, # edge_attrs = edge_attrs, directed = TRUE) %>>% # render_graph(width = 1200, height = 800) merTools/tests/0000755000176200001440000000000013403040357013217 5ustar liggesusersmerTools/tests/testthat-p_z.R0000644000176200001440000000012513402510755015771 0ustar liggesuserslibrary(testthat) library(merTools) test_check("merTools", filter = "^[m-z]") merTools/tests/shinyAppTests/0000755000176200001440000000000013402510755016040 5ustar liggesusersmerTools/tests/shinyAppTests/test-shinyApps.R0000644000176200001440000000357013402510755021123 0ustar liggesusers#------------------------------------------------------------------------------ # ShinyMer #----------------------------------------------------------------------------- ## Data and models n <- 20 x <- y <- rnorm(n) z <- rnorm(n) r <- sample(1:5, size=n, replace=TRUE) d <- data.frame(x,y,z,r) d2 <- expand.grid(a=factor(1:4),b=factor(1:4),rep=1:10) n <- nrow(d2) d2 <- transform(d2,r=sample(1:5, size=n, replace=TRUE), z=rnorm(n)) d2 <- subset(d2,!(a=="4" & b=="4")) fm <- lmer( z ~ a*b + (1|r), data=d2) fm2 <- lmer( z ~ a*b + (1+b|r), data=d2) data(grouseticks) grouseticks$HEIGHT <- scale(grouseticks$HEIGHT) grouseticks <- merge(grouseticks, grouseticks_agg[, 1:3], by = "BROOD") grouseticks$TICKS_BIN <- ifelse(grouseticks$TICKS >=1, 1, 0) # GLMER 3 level + slope form <- TICKS_BIN ~ YEAR + (1 + HEIGHT|BROOD) + (1|LOCATION) + (1|INDEX) glmer3LevSlope <- glmer(form, family="binomial",data=grouseticks, control = glmerControl(optimizer="bobyqa", optCtrl=list(maxfun = 1e5))) set.seed(3845) d <- expand.grid(fac1=LETTERS[1:5], grp=factor(1:10), fac2 = LETTERS[10:20], obs=1:25) d$x <- runif(nrow(d)) d$y <- simulate(~ x + fac1 + fac2 + (1 + fac1|grp) + (1|obs), family = binomial, newdata=d, newparams=list(beta = rnorm(16), theta = rnorm(16, 5, 1)))[[1]] subD <- d[sample(row.names(d), 5000),] g1 <- glmer(y ~ x + fac1 + fac2 + (1+fac1|grp) + (1|obs), data = subD, family = 'binomial') g2 <- lmer(y ~ lectage + studage + (1+lectage|d) + (1|dept), data=InstEval) # ---------------------------------------------------- # shinyMer blocks shinyMer(fm) shinyMer(fm2) shinyMer(g1) shinyMer(g2) shinyMer(glmer3LevSlope) shinyMer(glmer3LevSlope, simData = glmer3LevSlope@frame[22:25,]) merTools/tests/comparisons/0000755000176200001440000000000013402510755015557 5ustar liggesusersmerTools/tests/comparisons/wheelReinvention.R0000644000176200001440000000700113402510755021225 0ustar liggesusers#What exactly are the values spit out by fitted(sim(merMod), merMod)??? #Does it produce what we are trying to produce with predictInterval? #The answer is no because it doesn't re-sample the RFX (I am fairly certain) #The following does show the arm::sim() side-by-side with our method. It looks #like the effect of the differences is that our within-group regression lines are #closer to the population average regression line. library(arm); library(abind); library(mvtnorm); set.seed(95371) data(sleepstudy) model <- lmer(Reaction ~ Days + (Days|Subject), data=sleepstudy) #Take 5 simulations test.sim <- sim(model, 1000) #Display simulated coefs #coef(test.sim) ranef.array <- coef(test.sim)$ranef[[1]] fixef.matrix <- coef(test.sim)$fixef #Expand and permute fixefs to conform to ranefs for elementwise addition dim(ranef.array) dim(fixef.matrix) #For now do it by hand fixef.array <- array(data = rep(fixef.matrix, dim(ranef.array)[2]), dim = c(dim(fixef.matrix), dim(ranef.array)[2])) fixef.array <- aperm(fixef.array, c(1,3,2)) dim(fixef.array) combo.array <- fixef.array+ranef.array #Extract model.matrix for our own multiplication model.matrix <- lFormula(model@call, data=model@frame)$X dim(model.matrix) expanded.combo.array <- combo.array[,model@flist[[1]],] expanded.combo.array <- aperm(expanded.combo.array, c(3,2,1)) #Matrix multiplication with arrays ??? ##The following yeilds a 180x180 matrix for each prediction because I have ##multiplied ALL obs by ALL possible group coefficients myCalc.large <- abind( lapply(1:dim(expanded.combo.array)[3], function(i) model.matrix %*% expanded.combo.array[,,i]), along=3) ##I only need the diagonal of each 180x180 matrix so we get the value of ##the values for obs i and the coefficients for obs i myCalc <- abind(lapply(1:dim(myCalc.large)[3], function(x) diag(myCalc.large[,,x])), along=2) isTRUE(all.equal(myCalc,fitted(test.sim, model), check.attributes=FALSE)) ##Compare with predictInterval() checkPI <- predictInterval(model, sleepstudy, nsim = 1000, predict="link")$yhat myCalc.lwr <- apply(myCalc,1,function(x) as.numeric(quantile(x, .025))) myCalc.fit <- apply(myCalc,1,function(x) as.numeric(quantile(x, .500))) myCalc.upr <- apply(myCalc,1,function(x) as.numeric(quantile(x, .975))) checkPI.lwr <- apply(checkPI,1,function(x) as.numeric(quantile(x, .025))) checkPI.fit <- apply(checkPI,1,function(x) as.numeric(quantile(x, .500))) checkPI.upr <- apply(checkPI,1,function(x) as.numeric(quantile(x, .975))) plot.data <- rbind( data.frame(model="Arm.sim", x=(1:180)-.15, lwr=myCalc.lwr, fit=myCalc.fit, upr=myCalc.upr), data.frame(model="predictInterval", x=(1:180)+.15, lwr=checkPI.lwr, fit=checkPI.fit, upr=checkPI.upr)) ggplot(aes(x=x, y=fit, ymin=lwr, ymax=upr, color=model, position="dodge"), data=plot.data) + geom_point() + geom_linerange() library(dplyr) calc.sigma <- sqrt(1/rgamma(1000, 0.5*lme4:::df.residual.merMod(model), 0.5*getME(model, "devcomp")$cmp[["pwrss"]])) sim.sigma <- test.sim@sigma data.frame( model=c(rep("ARM",1000),rep("Carl",1000)), sigma=c(sim.sigma, calc.sigma) ) %>% qplot(sigma, color=model, data=., geom="density") merTools/tests/testthat-a_p.R0000644000176200001440000000012313402510755015736 0ustar liggesuserslibrary(testthat) library(merTools) test_check("merTools", filter = "^[a-m]") merTools/tests/timings/0000755000176200001440000000000013402510755014674 5ustar liggesusersmerTools/tests/timings/predictSpeed.R0000644000176200001440000001474513402510755017445 0ustar liggesuserslibrary(microbenchmark) # ClassFilter <- function(x) inherits(get(x), 'lm' ) & !inherits(get(x), 'gl set.seed(101) # Small lmerSlope1 <- lmer(Reaction ~ Days + (Days | Subject), sleepstudy) bench1 <- microbenchmark( predict(lmerSlope1, newdata = sleepstudy[1:100,]), predictInterval(lmerSlope1, newdata = sleepstudy[1:100,], level = 0.9, n.sims = 100, stat = 'median', include.resid.var = TRUE), predictInterval(lmerSlope1, newdata = sleepstudy[1:100,], level = 0.9, n.sims = 100, stat = 'mean', include.resid.var = TRUE), predictInterval(lmerSlope1, newdata = sleepstudy[1:100,], level = 0.9, n.sims = 100, stat = 'median', include.resid.var = FALSE), predictInterval(lmerSlope1, newdata = sleepstudy[1:100,], level = 0.9, n.sims = 1000, stat = 'median', include.resid.var = TRUE), predictInterval(lmerSlope1, newdata = sleepstudy[1:100,], level = 0.8, n.sims = 1000, stat = 'median', include.resid.var = TRUE), times = 10, unit = "s" ) bench2 <- microbenchmark( predict(lmerSlope1, newdata = sleepstudy[1:100,]), predictInterval(lmerSlope1, newdata = sleepstudy[1:100,], level = 0.9, n.sims = 100, stat = 'median', include.resid.var = TRUE), predictInterval(lmerSlope1, newdata = sleepstudy[1:100,], level = 0.9, n.sims = 200, stat = 'mean', include.resid.var = TRUE), predictInterval(lmerSlope1, newdata = sleepstudy[1:100,], level = 0.9, n.sims = 400, stat = 'median', include.resid.var = FALSE), predictInterval(lmerSlope1, newdata = sleepstudy[1:100,], level = 0.9, n.sims = 800, stat = 'median', include.resid.var = TRUE), predictInterval(lmerSlope1, newdata = sleepstudy[1:100,], level = 0.9, n.sims = 1600, stat = 'median', include.resid.var = TRUE), times = 10, unit = "s" ) # Medium d <- expand.grid(fac1=LETTERS[1:5], grp=factor(1:10), obs=1:100) d$y <- simulate(~fac1+(1|grp),family = gaussian, newdata=d, newparams=list(beta=c(2,1,3,4,7), theta=c(.25), sigma = c(.23)))[[1]] g1 <- lmer(y~fac1+(1|grp), data=d) bench3 <- microbenchmark(predictInterval(g1, newdata = d[1:100, ], level = 0.9, n.sims = 50, stat = 'mean', include.resid.var = TRUE), predictInterval(g1, newdata = d[1:200, ], level = 0.9, n.sims = 50, stat = 'mean', include.resid.var = TRUE), predictInterval(g1, newdata = d[1:400, ], level = 0.9, n.sims = 50, stat = 'mean', include.resid.var = TRUE), predictInterval(g1, newdata = d[1:800, ], level = 0.9, n.sims = 50, stat = 'mean', include.resid.var = TRUE), times = 10, unit = "s") # Large g2 <- lmer(y ~ lectage + studage + (1+lectage|d) + (1|dept), data=InstEval) d2 <- InstEval[1:1000, ] bench4 <- microbenchmark(predictInterval(g2, newdata = d2[1:100, ], level = 0.9, n.sims = 500, stat = 'mean', include.resid.var = TRUE), predictInterval(g2, newdata = d2[1:200, ], level = 0.9, n.sims = 500, stat = 'mean', include.resid.var = TRUE), predictInterval(g2, newdata = d2[1:400, ], level = 0.9, n.sims = 500, stat = 'mean', include.resid.var = TRUE), predictInterval(g2, newdata = d2[1:800, ], level = 0.9, n.sims = 500, stat = 'mean', include.resid.var = TRUE), times = 10, unit = "s") d2 <- d2[order(d2$d, d2$dept),] bench5 <- microbenchmark(predictInterval(g2, newdata = d2[1:100, ], level = 0.9, n.sims = 500, stat = 'mean', include.resid.var = TRUE), predictInterval(g2, newdata = d2[1:200, ], level = 0.9, n.sims = 500, stat = 'mean', include.resid.var = TRUE), predictInterval(g2, newdata = d2[1:400, ], level = 0.9, n.sims = 500, stat = 'mean', include.resid.var = TRUE), predictInterval(g2, newdata = d2[1:800, ], level = 0.9, n.sims = 500, stat = 'mean', include.resid.var = TRUE), times = 10, unit = "s") g3 <- lmer(y ~ lectage + studage + (1|s) + (1+lectage|d) + (1|dept), data=InstEval) g2 <- lmer(y ~ lectage + studage + (1+lectage|d) + (1|dept), data=InstEval) p2 <- profvis({ predictInterval(g2, level = 0.9, newdata = InstEval[1:100,], n.sims = 7500, stat = 'mean', include.resid.var = TRUE) }) # View it with: p2 library(doParallel) cl <- makeCluster(4) registerDoParallel(cl, 4) zzz <- predictInterval(g3, level = 0.9, newdata = InstEval, n.sims = 7500, stat = 'mean', include.resid.var = TRUE, .parallel = TRUE) # set.seed(101) # d <- expand.grid(fac1=LETTERS[1:5], grp=factor(1:10), # obs=1:50) # d$y <- simulate(~fac1+(1|grp),family = binomial, # newdata=d, # newparams=list(beta=c(2,-1,3,-2,1.2), theta=c(.33)))[[1]] # subD <- d[sample(row.names(d), 1200),] # g1 <- glmer(y~fac1+(1|grp), data=subD, family = 'binomial') # d$fitted <- predict(g1, d) # # # outs <- predictInterval(g1, newdata = d, level = 0.95, n.sims = 500, # stat = 'mean', include.resid.var = FALSE, # type = 'linear.prediction') # # # g2 <- lmer(y ~ lectage + studage + (1|d) + (1|s), data=InstEval) # d1 <- InstEval[1:100, ] # merTools/tests/timings/Compare_bootMer_KF.R0000644000176200001440000004233313402510755020461 0ustar liggesusers# #This file compares the prediction interval from the "Correct" bootmer method # #to our quick and dirty method to see how they differ. # # #I renamed some things to ease my typing # # #Prep R#### # #rm(list=ls()) # library(lme4) # library(arm) # library(mvtnorm) # library(dplyr) # library(tidyr) # library(ggplot2) # library(knitr) # library(RPushbullet) # # set.seed(51315) # data(InstEval) # data(sleepstudy) # data(VerbAgg) # # #Cannonical Models for testing purposes # ##1) Sleepstudy eg - lmer with random slope and random intercept # m1.form <- Reaction ~ Days + (Days | Subject) # m1.df <- sleepstudy # m1.new.df <- m1.df # ##2) Verbal Aggression eg - lmer (could to glmer(logit)) with 2 levels # m2.form <- r2 ~ (Anger + Gender + btype + situ)^2 + (1|id) + (1|item) # m2.df <- VerbAgg # m2.new.df <- m2.df # ##3) Verbal Aggression eg - glmer(logit) with 2 levels # m3.form <- r2 ~ (Anger + Gender + btype + situ)^2 + (1|id) + (1|item) # m3.df <- VerbAgg # m3.new.df <- m3.df # ##4) Instructer Evaluation et - lmer cross-classified, with two different random slopes # m4.form <- y ~ studage + lectage + service + dept + (studage|s) + (lectage|d) # m4.df <- InstEval # m4.new.df <- m4.df # # #Step 1: Our method #### # # predictInterval <- function(model, newdata, level = 0.95, nsim=1000, stat="median", predict.type="link"){ # # #Depends # # require(mvtnorm) # # require(lme4) # # #Prep Output # # outs <- newdata # # # # #Sort out all the levels # # reTerms <- names(ngrps(model)) # # n.reTerms = length(reTerms) # # # # ##The following 3 lines produce a matrix of linear predictors created from the fixed coefs # # betaSim <- rmvnorm(nsim, mean = fixef(model), sigma = as.matrix(vcov(model))) # # newdata.modelMatrix <- lFormula(formula = model@call, data=newdata)$X # # fixed.xb <- newdata.modelMatrix %*% t(betaSim) # # # # ##Draw from random effects distributions for each level and merge onto newdata # # reSim <- NULL # # for (j in seq_along(reTerms)) { # # group=reTerms[j] # # reMeans <- array(ranef(model)[[group]]) # # reMatrix <- attr(ranef(model, condVar=TRUE)[[group]], which = "postVar") # # reSim[[group]] <- data.frame(rownames(reMeans), matrix(NA, nrow=nrow(reMeans), ncol=nsim)) # # colnames(reSim[[group]]) <- c(group, paste("sim", 1:nsim, sep="")) # # for (k in 1:nrow(reMeans)) { # # lvl = rownames(reMeans)[k] # # reSim[[group]][k,2:ncol(reSim[[group]])] <- rmvnorm(nsim, mean=as.matrix(reMeans[k,]), sigma=as.matrix(reMatrix[,,k])) # # } # # cnames <- colnames(reSim[[group]]) # # reSim[[group]] <- merge(newdata, reSim[[group]], by=group, all.x=TRUE) # # reSim[[group]] <- as.matrix(reSim[[group]][,setdiff(cnames, group)]) # # } # # # # #Calculate yhat as sum of components # # yhat <- fixed.xb + apply(simplify2array(reSim), c(1,2), sum) # # # # #Output prediction intervals # # if (stat=="median") { # # outs$fit <- apply(yhat,1,function(x) as.numeric(quantile(x, .5))) # # } # # if (stat=="mean") { # # outs$fit <- apply(yhat,1,mean) # # } # # outs$upr <- apply(yhat,1,function(x) as.numeric(quantile(x, 1 - ((1-level)/2)))) # # outs$lwr <- apply(yhat,1,function(x) as.numeric(quantile(x, ((1-level)/2)))) # # if (predict.type=="response") { # # outs$fit <- model@resp$family$linkinv(outs$fit) # # outs$upr <- model@resp$family$linkinv(outs$upr) # # outr$lwr <- model@resp$family$linkinc(outs$lwr) # # } # # #Close it out # # return(outs) # # } # # #Step 2: Unit Test Function#### # predictInterval.test <- function(model.form, model.df, model.type="lmer", # predict.df, predict.type="link", stat="median", # idvar=NULL, nSims=1000, ...) { # require(lme4); require(dplyr); require(tidyr); require(ggplot2); # ##Estimate model # if (model.type=="lmer") { # modelEstimation.time <- system.time( # m1 <- lmer(model.form, data=model.df, ...) # ) # } # if (model.type=="glmer") { # modelEstimation.time <- system.time( # m1 <- glmer(model.form, data=model.df, ...) # ) # } # if (model.type=="blmer") { # modelEstimation.time <- system.time( # m1 <- blmer(model.form, data=model.df, ...) # ) # } # if (model.type=="bglmer") { # modelEstimation.time <- system.time( # m1 <- bglmer(model.form, data=model.df, ...) # ) # } # ##If it does not have one, add unique identifier to predict.df # if (is.null(idvar)) { # predict.df$.newID <- paste("newID", rownames(predict.df), sep="") # } # ##Functions for bootMer() and objects # ####Return predicted values from bootstrap # mySumm <- function(.) { # predict(., newdata=predict.df, re.form=NULL, type=predict.type) # } # ####Collapse bootstrap into median, 95% PI # sumBoot <- function(merBoot, stat) { # if (stat=="median") { # fit = apply(merBoot$t, 2, function(x) as.numeric(quantile(x, probs=.5, na.rm=TRUE))) # } # if (stat=="mean") { # fit = apply(merBoot$t, 2, function(x) mean(x, na.rm=TRUE)) # } # return( # data.frame(merBoot$data, # fit = fit, # lwr = apply(merBoot$t, 2, function(x) as.numeric(quantile(x, probs=.025, na.rm=TRUE))), # upr = apply(merBoot$t, 2, function(x) as.numeric(quantile(x, probs=.975, na.rm=TRUE))), # colNAs = apply(merBoot$t, 2, function(x) sum(is.na(x))) # ) # ) # } # ##Bootstrap # ####Method 1: parametric, re-estimate BLUPS # cat("\n Bootstrap Method 1") # boot1.time <- system.time( # boot1 <- bootMer(m1, mySumm, nsim=nSims, # use.u=FALSE, type="parametric", # .progress = "txt", PBargs=list(style=3)) # ) # ####Method 2: parametric, conditional on estimated BLUPS # cat("\n Bootstrap Method 2") # boot2.time <- system.time( # boot2 <- bootMer(m1, mySumm, nsim=nSims, # use.u=TRUE, type="parametric", # .progress = "txt", PBargs=list(style=3)) # ) # ####Method 3: semiparametric (draw from resid), conditional on estimated BLUPS # cat("\n Bootstrap Method 3") # boot3.time <- system.time( # boot3 <- bootMer(m1, mySumm, nsim=nSims, # use.u=TRUE, type="semiparametric", # .progress = "txt", PBargs=list(style=3)) # ) # ##Our Method # kf.time <- system.time( # kf.method <- predictInterval(m1, predict.df) # ) # ##Compare Times # compare.time <- rbind(modelEstimation.time, kf.time, boot1.time, boot2.time, boot3.time) # # ##Summarize and compare results # boot1.sum <- sumBoot(boot1, stat=stat) # boot2.sum <- sumBoot(boot2, stat=stat) # boot3.sum <- sumBoot(boot3, stat=stat) # # eval <- merge(predict.df, boot1.sum) %>% # rename(boot1.fit=fit, boot1.lwr=lwr, boot1.upr=upr) # eval <- merge(eval, boot2.sum) %>% # rename(boot2.fit=fit, boot2.lwr=lwr, boot2.upr=upr) # eval <- merge(eval, boot3.sum) %>% # rename(boot3.fit=fit, boot3.lwr=lwr, boot3.upr=upr) # eval <- merge(eval, kf.method) %>% # rename(KF.fit=fit, KF.lwr=lwr, KF.upr=upr) # # #Check if nrow(eval) still equals nrow(predict.df) because it should # if (nrow(eval)!=nrow(predict.df)) { # stop("Something happened when merging bootstrap summaries together ...") # } # # ##Add lmer yhats (predict.merMod) on there # eval$with.u <- predict(m1, newdata=predict.df, re.form=NULL, type=predict.type) # eval$no.u <- predict(m1, newdata=predict.df, re.form=NA, type=predict.type) # # ##Create summary statistics # piCoveragePct <- function(ref.upr, ref.lwr, new.upr, new.lwr) { # pct <- ifelse(ref.upr < new.lwr | ref.lwr > new.upr, 0, # ifelse(ref.upr < new.upr & ref.lwr > new.lwr, 1, # ifelse(ref.upr > new.upr & ref.lwr < new.lwr, (new.upr-new.lwr)/(ref.upr-ref.lwr), # ifelse(ref.upr < new.upr, (ref.upr-new.lwr)/(ref.upr-ref.lwr), # ifelse(ref.lwr > new.lwr, (new.upr-ref.lwr)/(ref.upr-ref.lwr), NA))))) # return(pct) # } # ###Pct of other PI covered by KF PI # eval$boot1.coverage <- piCoveragePct(eval$boot1.upr, eval$boot1.lwr, eval$KF.upr, eval$KF.lwr) # eval$boot2.coverage <- piCoveragePct(eval$boot2.upr, eval$boot2.lwr, eval$KF.upr, eval$KF.lwr) # eval$boot3.coverage <- piCoveragePct(eval$boot3.upr, eval$boot3.lwr, eval$KF.upr, eval$KF.lwr) # ###Does KF PI contain point estimates # eval$contain.boot1 <- piCoveragePct(eval$boot1.fit, eval$boot1.fit, eval$KF.upr, eval$KF.lwr) # eval$contain.boot2 <- piCoveragePct(eval$boot2.fit, eval$boot2.fit, eval$KF.upr, eval$KF.lwr) # eval$contain.boot3 <- piCoveragePct(eval$boot3.fit, eval$boot3.fit, eval$KF.upr, eval$KF.lwr) # eval$contain.with.u <- piCoveragePct(eval$with.u , eval$with.u , eval$KF.upr, eval$KF.lwr) # eval$contain.no.u <- piCoveragePct(eval$no.u , eval$no.u , eval$KF.upr, eval$KF.lwr) # ###Point estimate "bias" # eval$distance.boot1 <- eval$KF.fit - eval$boot1.fit # eval$distance.boot2 <- eval$KF.fit - eval$boot2.fit # eval$distance.boot3 <- eval$KF.fit - eval$boot3.fit # eval$distance.with.u <- eval$KF.fit - eval$with.u # eval$distance.no.u <- eval$KF.fit - eval$no.u # # ##Close it out # return( # list( # compareTimes=compare.time, # bootstraps = list(boot1, boot2, boot3), # kf.method = kf.method, # model=m1, # evalData = eval # ) # ) # } # # #Step 2b: Post processing predictInterval.test() # PIE.graphics <- function(eval.df, response, grouping.factors, seed=314) { # require(dplyr); require(tidyr); require(ggplot2); require(grid); require(gridExtra); # ####Data prep # set.seed(seed) # eval$random <- runif(nrow(eval)) # Eval <- eval %>% # mutate( # random = row_number(random) # ) %>% # gather(var, value, starts_with("boot"), starts_with("KF")) %>% # mutate( # simMethod = sub("[[:punct:]][0-9A-Za-z]*", "", var), # stat = sub("[0-9A-Za-z]*[[:punct:]]", "", var) # ) %>% # select(-var) %>% # spread(stat, value) %>% # group_by(.newID) %>% # arrange(simMethod) %>% # mutate( # x=row_number(simMethod), # x=(x-mean(x))/10 # ) %>% # group_by(simMethod) %>% # mutate( # x=row_number(random)+x # ) # # ####Direct Comparison Plot # Eval.small <- arrange(Eval, random) %>% filter(random<=30) %>% arrange(x) # p1 <- ggplot(aes(y=fit,x=x, color=simMethod), data=Eval.small) + # geom_point(size=I(3)) + # geom_linerange(aes(ymax=upr, ymin=lwr), size=I(1)) + # geom_point(shape="with.u", color="black", size=I(4), # data=summarize(group_by(Eval.small, .newID), x=mean(x), fit=mean(with.u))) + # geom_point(shape="no.u", color="black", size=I(4), # data=summarize(group_by(Eval.small, .newID), x=mean(x), fit=mean(no.u))) + # theme_bw() + # labs(x="Index", y="Prediction Interval", title="95% Prediction interval by method for 30 random obs") + # scale_x_discrete(breaks=1:30, labels=Eval.small$.newID[Eval.small$simMethod=="KF"]) + # theme(axis.text.x = element_text(angle=90)) # # #####Distribution of fitted values # mean.statment <- paste("mean(",response,")", sep="") # p2 <- ggplot(aes(x=fit), data=Eval) + # geom_density(aes(color=simMethod)) + # geom_vline(x=mean(unlist(Eval[,response]))) + # geom_density(data=summarize_( # group_by_(Eval, grouping.factors), # fit = mean.statment), color="black") + # theme_bw() + # labs(x = "Estimate", y="Density", # title="Average point estimates by prediction type \n (vertical black line is sample grand mean, \n black density is distribution of sample group means)") # # #####Bar Graph of KF PI containing other point estimates # p3 <- eval %>% select(starts_with("contain.")) %>% # gather(simMethod, value) %>% # mutate( # simMethod = sub("[[:alpha:]]*.", "", simMethod) # ) %>% # group_by(simMethod) %>% # summarize( # value=100*mean(value) # ) %>% # qplot(value, x=simMethod, fill=simMethod, data=., geom="bar", position="dodge", stat="identity") + # labs(y="Percent", title="Percent of observations in which our P.I. contains other point estimates") + # theme_bw() # # ##Summarizing Bias # SD <- sd(eval[,response]) # bias.data <- eval %>% select(starts_with("distance.")) %>% # gather(simMethod, value) %>% # mutate( # simMethod = sub("[[:alpha:]]*.", "", simMethod), # value = value/SD, # ymax = max(density(value)$y), # xmin = min(value) # ) %>% # group_by(simMethod) %>% # summarize( # mean.Distance = round(mean(value),4), # mad.Distance = round(mad(value),2), # ymax = max(ymax), # xmin = min(xmin) # ) # bias.xmin <- min(bias.data$xmin, na.rm=TRUE) # bias.xmax <- 0.2 * bias.xmin # bias.ymax <- max(bias.data$ymax, na.rm=TRUE) # bias.ymin <- .5 * bias.ymax # bias.data <- bias.data[,1:3] # # p4a <- eval %>% select(starts_with("distance.")) %>% # gather(simMethod, value) %>% # mutate( # simMethod = sub("[[:alpha:]]*.", "", simMethod), # value = value/SD # ) %>% # qplot(x=value, color=simMethod, data=., geom="density") + # labs(x="Standard deviations of response variable") + # theme_bw() # # p4b <- tableGrob(bias.data, show.rownames = FALSE) # p4 <- arrangeGrob(p4a, p4b, ncol=1, main="Distribution of distance from KF point estimates to other point estimates") # # # ##Summarizing Bias # coverage.data <- eval %>% # select(ends_with(".coverage")) %>% # gather(simMethod, value) %>% # mutate( # simMethod = gsub(".coverage","", simMethod, fixed=TRUE), # TotalCoverage=value==1, # Cov.90to100 = value>0.9 & value < 1, # Cov.80to90 = value>0.8 & value <= 0.9, # Cov.50to80 = value>0.5 & value <= 0.8, # Cov.0to50 = value>0 & value <= 0.5, # ZeroCoverage= value==0 # ) %>% # group_by(simMethod) %>% # summarize( # TotalCoverage= round(100*sum(TotalCoverage)/n(),1), # Cover.90to100 = round(100*sum(Cov.90to100)/n(),1), # Cover.80to90 = round(100*sum(Cov.80to90)/n(),1), # Cover.50to80 = round(100*sum(Cov.50to80)/n(),1), # Cover.0to50 = round(100*sum(Cov.0to50)/n(),1), # ZeroCoverage = round(100*sum(ZeroCoverage)/n(),1) # ) # p5a <- eval %>% # select(ends_with(".coverage")) %>% # gather(simMethod, value) %>% # mutate( # simMethod = gsub(".coverage","", simMethod, fixed=TRUE) # ) %>% # ggplot(data=., aes(x=100*value, fill=simMethod)) + # geom_bar(aes(y=300*(..count..)/sum(..count..)), binwidth=5) + # facet_wrap(~simMethod, ncol=3) + # labs(y="Percent of Observations", x="Coverage Percentage") + # theme_bw() # p5b <- tableGrob(coverage.data, show.rownames = FALSE) # p5 <- arrangeGrob(p5a, p5b, ncol=1, main="Distribution of PI coverage percentages") # # ##Wrap-up # return( # list( # CompareRandomObs=p1, # FitDistributions=p2, # PointEstimateCoverage=p3, # BiasSummary=p4, # CoverageSummary=p5 # ) # ) # } # # PIE.graphics(cannonical.1$evalData, response="Reaction", grouping.factors = "Subject") # # # #Step 3: Summarize and Compare Results#### # #debug(predictInterval.test) # cannonical.1 <- predictInterval.test(model.form = m1.form, model.df = m1.df, predict.df = m1.new.df, nSims=2500) # pbPost("note", title="Finished Cannonical Model 1", body=paste(kable(cannonical.1$compareTimes[,1:3]), collapse ="\n")) # cannonical.2 <- predictInterval.test(model.form = m2.form, model.df = m2.df, predict.df = m2.new.df, nSims=2500) # pbPost("note", title="Finished Cannonical Model 2", body=paste(kable(cannonical.2$compareTimes[,1:3]), collapse ="\n")) # cannonical.4 <- predictInterval.test(model.form = m4.form, model.df = m4.df, predict.df = m4.new.df, nSims=2500) # pbPost("note", title="Finished Cannonical Model 4", body=paste(kable(cannonical.4$compareTimes[,1:3]), collapse ="\n")) # cannonical.3 <- predictInterval.test(model.form = m3.form, model.df = m3.df, model.type="glmer", # predict.df=m3.new.df, predict.type="response", nSims=2500) # pbPost("note", title="Finished Cannonical Model 3", body=paste(kable(cannonical.3$compareTimes[,1:3]), collapse ="\n")) # # # # save.image() # # #Step 4: checking math with a fine-toothed comb #### # ##Pull pieces to create toy example # KF <- cannonical.1$kf.method # model <- cannonical.1$model # nsim=5 merTools/tests/timings/test_fastdisp.R0000644000176200001440000000147413402510755017701 0ustar liggesusers# Test fastdisplay set.seed(51315) library(lme4); library(arm) # Sleepstudy lmerSlope1 <- lmer(Reaction ~ Days + (Days | Subject), sleepstudy) ############################################### context("Fast display") ################################################ test_that("fastdisp pulls out a list", { # hack to avoid console output {sink("NUL"); zz <- fastdisp(lmerSlope1); sink()} expect_is(zz, "list") expect_identical(names(zz), c("call", "t.value", "coef", "se", "ngrps", "AIC", "n")) }) test_that("fastdisp speed is good", { {sink("NUL"); t1 <- system.time(force(fastdisp(lmerSlope1)))["elapsed"]; sink()} {sink("NUL"); t2 <- system.time(force(display(lmerSlope1)))["elapsed"]; sink()} expect_lt(t1, t2) expect_lt(t1, 0.1) }) merTools/tests/testthat.R0000644000176200001440000000007413014440402015174 0ustar liggesuserslibrary(testthat) library(merTools) test_check("merTools") merTools/tests/testthat/0000755000176200001440000000000013466261516015072 5ustar liggesusersmerTools/tests/testthat/test-substEff.R0000644000176200001440000001265213461456457017766 0ustar liggesusers# Test substantive effects library(lme4) set.seed(157) # Test all user parameters for REimpact---- context("Test all user parameters for REimpact") test_that("REimpact parameters are respected", { skip_on_cran() g1 <- lmer(y ~ lectage + studage + (1|d) + (1|s), data=InstEval) #Warning is about %dopar% call in predictInterval zed <- suppressWarnings(REimpact(g1, newdata = InstEval[9:12, ], groupFctr = "d", n.sims = 50, include.resid.var = TRUE)) expect_identical(names(zed), c("case", "bin", "AvgFit", "AvgFitSE", "nobs")) zed2 <- REimpact(g1, newdata = InstEval[9:12, ], groupFctr = "s", n.sims = 50, include.resid.var = TRUE) expect_equal(nrow(zed), 3 * nrow(InstEval[9:12, ])) expect_false(all(zed$AvgFit == zed2$AvgFit)) expect_false(all(zed$AvgFitSE == zed2$AvgFitSE)) expect_identical(names(zed2), c("case", "bin", "AvgFit", "AvgFitSE", "nobs")) zed <- REimpact(g1, newdata = InstEval[9:12, ], groupFctr = "d", breaks = 5, n.sims = 50, include.resid.var = TRUE) expect_equal(nrow(zed), 5 * nrow(InstEval[9:12, ])) }) test_that("REimpact respects passed values for predictInterval", { skip_on_cran() skip_on_travis() d <- expand.grid(fac1=LETTERS[1:5], grp=factor(1:30), obs=1:100) d$y <- simulate(~fac1+(1|grp),family = gaussian, newdata=d, newparams=list(beta=c(2,1,3,4,7), theta=c(.25), sigma = c(.23)))[[1]] subD <- d[sample(row.names(d), 1000),] g1 <- lmer(y ~ fac1 + (1|grp), data=subD) zed <- REimpact(g1, newdata = subD[23:25, ], groupFctr = "grp", breaks = 5, include.resid.var = FALSE, n.sims = 100, level = 0.8) zed2 <- REimpact(g1, newdata = subD[23:25, ], groupFctr = "grp", breaks = 5, n.sims = 500, include.resid.var = TRUE, level = 0.99) # expect_true(all(zed2$AvgFitSE > zed$AvgFitSE)) expect_true(!all(zed2$AvgFit > zed$AvgFit)) expect_identical(names(zed), c("case", "bin", "AvgFit", "AvgFitSE", "nobs")) expect_identical(names(zed2), c("case", "bin", "AvgFit", "AvgFitSE", "nobs")) }) # Test for slopes, intercepts, and combinations---- context("Test for slopes, intercepts, and combinations") test_that("Multiple terms can be accessed", { skip_on_cran() data(grouseticks) grouseticks$HEIGHT <- scale(grouseticks$HEIGHT) grouseticks <- merge(grouseticks, grouseticks_agg[, 1:3], by = "BROOD") grouseticks$TICKS_BIN <- ifelse(grouseticks$TICKS >=1, 1, 0) # GLMER 3 level + slope form <- TICKS_BIN ~ YEAR + HEIGHT + (1 + HEIGHT|BROOD) + (1|LOCATION) + (1|INDEX) glmer3LevSlope <- glmer(form, family="binomial",data=grouseticks, control = glmerControl(optimizer="bobyqa", optCtrl=list(maxfun = 1e5))) # This is the same issue of zero mean zero variance in the predict interval call zed1 <- suppressWarnings(REimpact(glmer3LevSlope, newdata = grouseticks[5, ], groupFctr = "BROOD", term = "HEIGHT", n.sims = 500, include.resid.var = FALSE, breaks = 4, type = "probability")) # This is the same issue of zero mean zero variance in the predict interval call zed2 <- suppressWarnings(REimpact(glmer3LevSlope, newdata = grouseticks[5, ], groupFctr = "BROOD", term = "Intercept", n.sims = 500, include.resid.var = FALSE, breaks = 4, type = "probability")) # This is the same issue of zero mean zero variance in the predict interval call zed4 <- suppressWarnings(REimpact(glmer3LevSlope, newdata = grouseticks[5, ], groupFctr = "LOCATION", n.sims = 500, include.resid.var = FALSE, breaks = 4)) expect_true(all(zed4$AvgFit < zed2$AvgFit)) expect_true(all(zed4$AvgFit < zed1$AvgFit)) expect_false(identical(zed1, zed2)) expect_false(identical(zed1, zed2)) # No longer an error after revision 0.2.3 # expect_error(zed3 <- suppressWarnings(REimpact(glmer3LevSlope, newdata = grouseticks[5, ], groupFctr = "BROOD", # n.sims = 500, # include.resid.var = FALSE, breaks = 4)), "Must specify which") # Don't think we need this ... it throws an subsetting error expect_error(zed5 <- suppressWarnings(REimpact(glmer3LevSlope, newdata = grouseticks[5, ], groupFctr = "LOCATION", term = "HEIGHT", n.sims = 500, include.resid.var = FALSE, breaks = 4)), "undefined columns selected") }) # Custom breaks---- context("Custom breaks") test_that("Custom breakpoints can be set", { skip_on_cran() g1 <- lmer(y ~ lectage + studage + (1|d) + (1|s), data=InstEval) zed <- REimpact(g1, newdata = InstEval[9, ], breaks = c(0, 10, 50, 90, 100), groupFctr = "d", n.sims = 50, include.resid.var = TRUE) zed2 <- REimpact(g1, newdata = InstEval[9, ], breaks = c(1, 20, 40, 60, 80, 100), groupFctr = "d", n.sims = 50, include.resid.var = TRUE) zed3 <- REimpact(g1, newdata = InstEval[9, ], breaks = 5, groupFctr = "d", n.sims = 50, include.resid.var = TRUE) expect_false(nrow(zed) == nrow(zed2)) expect_gt(sd(zed$nobs), sd(zed2$nobs)) expect_gt(mean(zed$nobs), mean(zed2$nobs)) expect_equal(zed3$nobs, zed2$nobs, tolerance = .05) }) merTools/tests/testthat/test-REmargins.R0000644000176200001440000000127413466135366020070 0ustar liggesusers# Test REmargins set.seed(51315) fm1 <- lmer(Reaction ~ Days + (Days | Subject), sleepstudy) # context("Test random effect marginalization works") # mfx <- REmargins(merMod = fm1, newdata = sleepstudy[1:10,]) # test_that("Text marginalized effects object has the correct dimensions", { skip_on_travis() skip_on_cran() # mfx has a row for each unique combo of row in newdata, breaks, grouping_var, and term expect_equal(nrow(mfx), 10 * length(unique(mfx$breaks)) * length(unique(mfx$grouping_var)) * length(unique(mfx$term))) expect_equal(ncol(mfx), 17) }) # ggplot(out_w) + aes(x = obs, y = fit_Subject) + # geom_line() + # facet_wrap(~case) merTools/tests/testthat/test-expectedRank.R0000644000176200001440000000656613466135366020627 0ustar liggesusers# Test expected rank #Using 2 of sample models from test_merExtract.R set.seed(51315) library(lme4) # Sleepstudy m1 <- lmer(Reaction ~ Days + (1 | Subject), sleepstudy) m2 <- lmer(Reaction ~ Days + (Days | Subject), sleepstudy) # Wackier example data(Orthodont,package = "nlme") Orthodont$nsex <- as.numeric(Orthodont$Sex == "Male") Orthodont$nsexage <- with(Orthodont, nsex*age) m3 <- lmer(distance ~ age + (0 + age + nsex|Subject), data=Orthodont) # two grouping factors data(grouseticks) grouseticks$HEIGHT <- scale(grouseticks$HEIGHT) grouseticks <- merge(grouseticks, grouseticks_agg[, 1:3], by = "BROOD") form <- TICKS ~ YEAR + HEIGHT +(HEIGHT|BROOD) + (1|INDEX) m4 <- glmer(form, family="poisson",data=grouseticks, control = glmerControl(optimizer="Nelder_Mead", optCtrl=list(maxfun = 1e5))) form <- TICKS ~ YEAR + HEIGHT + (0 + HEIGHT|BROOD) + (1|INDEX) m5 <- glmer(form, family="poisson",data=grouseticks, control = glmerControl(optimizer="Nelder_Mead", optCtrl=list(maxfun = 1e5))) #Custom Expectation Functions expect_correct_dim <- function(merMod, groupFctr=NULL, term=NULL) { if (is.null(groupFctr)) { n.levels <- nrow(ranef(merMod)[[1]]) } else { n.levels <- nrow(ranef(merMod)[[groupFctr]]) } ER <- expectedRank(merMod, groupFctr, term) testthat::expect_true(nrow(ER) == n.levels & ncol(ER) == 7 & all(colnames(ER)[6:7] == c("ER", "pctER")) & class(ER) == "data.frame") } ############################################### # Testing expected rank---- context("Testing expected rank") ############################################### test_that("expectedRank parameters work and dont work as intended", { expect_correct_dim(m1) expect_correct_dim(m1, groupFctr="Subject") expect_correct_dim(m1, term="(Intercept)") expect_correct_dim(m1, groupFctr="Subject", term="(Intercept)") expect_correct_dim(m2, term="(Intercept)") expect_correct_dim(m2, term="Days") expect_correct_dim(m3, groupFctr="Subject", term="age") expect_correct_dim(m3, groupFctr="Subject", term="nsex") expect_correct_dim(m4, groupFctr="BROOD", term="(Intercept)") expect_correct_dim(m4, groupFctr="INDEX", term="(Intercept)") expect_correct_dim(m5, groupFctr="BROOD") expect_correct_dim(m5, groupFctr="INDEX") # expect_error(expectedRank(m4), "Must specify which grouping factor when there are more than one") # expect_error(expectedRank(m4, groupFctr="BROOD"), "Must specify which random coefficient when there are more than one per selected grouping factor") # expect_error(expectedRank(m3, groupFctr="Subject"), "Must specify which random coefficient when there are more than one per selected grouping factor") # expect_error(expectedRank(m3, term="int"), "undefined columns selected") }) test_that("Ranks have the correct range", { numGrps <- nrow(ranef(m1)[[1]]) expect_true(max(expectedRank(m1)$ER) <= numGrps) expect_true(min(expectedRank(m1)$ER) >= 1) expect_equal(cor(expectedRank(m1)$ER, rank(ranef(m1)[[1]])), 0.99, tolerance = .01) }) test_that("Percentile ranks have the correct range", { expect_true(max(expectedRank(m1)$pctER) <= 100) expect_true(min(expectedRank(m1)$pctER) >= 0) }) merTools/tests/testthat/test-merModList.R0000644000176200001440000001237513466135366020264 0ustar liggesusers# test merModList functions #Do merModList objects get built and work---- context("Do merModList objects get built and work") old_warn <- getOption("warn") options(warn = -1) set.seed(432422) test_that("simple cases work", { # skip_on_cran() library(blme) d <- expand.grid(fac1=LETTERS[1:5], grp=factor(1:10), obs=1:100) split <- sample(x = LETTERS[9:15], size = nrow(d), replace=TRUE) d$y <- simulate(~fac1+(1|grp),family = gaussian, newdata=d, newparams=list(beta=c(2,1,3,4,7), theta=c(.25), sigma = c(.23)))[[1]] out <- split(d, split) rm(split) # TODO change tolerances g1 <- lmerModList(formula = y~fac1+(1|grp), data=out, control= lmerControl(check.conv.grad = .makeCC("warning", tol= 2e-3))) expect_is(g1, "merModList") g2 <- blmerModList(formula = y~fac1+(1|grp), data=out, control= lmerControl(check.conv.grad = .makeCC("warning", tol= 2e-3))) expect_is(g2, "merModList") expect_false(class(g1[[1]]) == class(g2[[1]])) split <- sample(x = LETTERS[1:20], size = nrow(InstEval), replace=TRUE) out <- split(InstEval, split) rm(split) g1 <- lmerModList(formula = y ~ lectage + studage + (1|d) + (1|dept), data=out, control= lmerControl(check.conv.grad = .makeCC("warning", tol = 1e-2))) expect_is(g1, "merModList") }) test_that("print methods work for merModList", { # skip_on_cran() d <- expand.grid(fac1=LETTERS[1:5], grp=factor(1:10), obs=1:100) split <- sample(x = LETTERS[9:15], size = nrow(d), replace=TRUE) d$y <- simulate(~fac1+(1|grp),family = gaussian, newdata=d, newparams=list(beta=c(2,1,3,4,7), theta=c(.25), sigma = c(.23)))[[1]] out <- split(d, split) rm(split); g1 <- lmerModList(formula = y~fac1+(1|grp), data=out, control= lmerControl(check.conv.grad = .makeCC("warning", tol= 1e-2))); {sink("NUL"); zz <- print(g1); sink()} expect_is(zz, "list") zz <- summary(g1) expect_is(zz, "summary.merModList") }) # Numerical accuracy of merModList print method---- context("Numerical accuracy of merModList print method") test_that("print method for merModList works in general case", { # skip_on_cran() data(grouseticks) grouseticks$HEIGHT <- scale(grouseticks$HEIGHT) grouseticks <- merge(grouseticks, grouseticks_agg[, 1:3], by = "BROOD") grouseticks$TICKS_BIN <- ifelse(grouseticks$TICKS >=1, 1, 0) form <- TICKS_BIN ~ HEIGHT +(1 + HEIGHT|BROOD) + (1|YEAR) modDat <- vector(5, mode="list") for(i in 1:length(modDat)){ modDat[[i]] <- grouseticks[sample(x=1:nrow(grouseticks), size = nrow(grouseticks), replace=FALSE),] } g1 <- glmerModList(formula = form, data = modDat, family = "binomial", control = glmerControl(optimizer="bobyqa", optCtrl=list(maxfun = 1e6), check.conv.grad = .makeCC("warning", tol= 1e-2))) g1T <- glmer(form, family = "binomial", data = grouseticks, control = glmerControl(optimizer="bobyqa", optCtrl=list(maxfun = 1e6), check.conv.grad = .makeCC("warning", tol= 1e-2))) expect_equal(VarCorr(g1)$stddev$BROOD, attr(VarCorr(g1T)$BROOD, "stddev"), tolerance = 0.0001) expect_equal(VarCorr(g1)$stddev$YEAR, attr(VarCorr(g1T)$YEAR, "stddev"), tolerance = 0.0001) expect_equal(VarCorr(g1)$correlation$BROOD, attr(VarCorr(g1T)$BROOD, "corre"), tolerance = 0.0001) expect_equal(VarCorr(g1)$correlation$YEAR, attr(VarCorr(g1T)$YEAR, "corre"), tolerance = 0.0001) form <- TICKS_BIN ~ HEIGHT +(1|BROOD) g1 <- glmerModList(formula = form, data = modDat, family = "binomial", control = glmerControl(optimizer="bobyqa", optCtrl=list(maxfun = 1e6), check.conv.grad = .makeCC("warning", tol= 1e-2))) g1T <- glmer(form, family = "binomial", data = grouseticks, control = glmerControl(optimizer="bobyqa", optCtrl=list(maxfun = 1e6), check.conv.grad = .makeCC("warning", tol= 1e-2))) expect_equal(VarCorr(g1)$stddev$BROOD, attr(VarCorr(g1T)$BROOD, "stddev"), tolerance = 0.0001) expect_equal(VarCorr(g1)$stddev$YEAR, attr(VarCorr(g1T)$YEAR, "stddev"), tolerance = 0.0001) expect_equal(VarCorr(g1)$correlation$BROOD, attr(VarCorr(g1T)$BROOD, "corre"), tolerance = 0.0001) expect_equal(VarCorr(g1)$correlation$YEAR, attr(VarCorr(g1T)$YEAR, "corre"), tolerance = 0.0001) }) #ICC function---- context("ICC function") test_that("ICC function works", { ICC1 <- ICC(outcome = "Reaction", group = "Subject", data = sleepstudy) expect_is(ICC1, "numeric") expect_equal(ICC1, 0.3948896, tol = .001) }) options(warn= old_warn) merTools/tests/testthat/test-helpers.R0000644000176200001440000000777313466135366017655 0ustar liggesusers# Test helper functions set.seed(51315) # Trimming data frame---- context("Trimming data frame") test_that("Trimming results in correct size", { data(InstEval) trimDat <- merTools:::trimModelFrame(InstEval) expect_gt(nrow(InstEval), nrow( merTools:::trimModelFrame(InstEval))) expect_equal(nrow(trimDat), 4065) cbpp$obs <- 1:nrow(cbpp) d1 <- cbpp d1$y <- d1$incidence / d1$size gm2 <- glmer(y ~ period + (1 | herd), family = binomial, data = d1, nAGQ = 9, weights = d1$size) trimDat <- merTools:::trimModelFrame(gm2@frame) expect_is(trimDat, "data.frame") expect_equal(nrow(trimDat), 18) }) test_that("Trimming does not corrupt order", { tmp <- InstEval[1:10, ] trimDat <- merTools:::trimModelFrame(InstEval) trimDat <- rbind(tmp, trimDat) expect_lt(nrow(trimDat), nrow(tmp) + nrow(InstEval)) row.names(tmp) <- NULL row.names(trimDat) <- NULL expect_identical(tmp, trimDat[1:10, ]) }) # subBoot and Theta---- context("subBoot and Theta") test_that("Can extract theta from a fit model", { set.seed(404) d <- expand.grid(fac1=LETTERS[1:5], grp=factor(1:10), obs=1:100) d$y <- simulate(~fac1+(1|grp),family = gaussian, newdata=d, newparams=list(beta=c(2,1,3,4,7), theta=c(.25), sigma = c(.23)))[[1]] subD <- d[sample(row.names(d), 1000),] g1 <- lmer(y~fac1+(1|grp), data=subD) g1b <- lm(y ~ fac1, data = subD) expect_equal(thetaExtract(g1), 0.2085, tolerance = .05) expect_error(thetaExtract(g1b)) z1 <- subBoot(g1, 500, FUN = thetaExtract, R = 10) expect_is(z1, "data.frame") expect_equal(nrow(z1), 11) expect_equal(ncol(z1), 2) }) # Test formula Build----- context("Test formula build") test_that("Formula works for additive functions", { n <- 20 x <- y <- rnorm(n) z <- rnorm(n) r <- sample(1:5, size=n, replace=TRUE) d <- data.frame(x,y,z,r) d2 <- expand.grid(a=factor(1:4),b=factor(1:4),rep=1:10) n <- nrow(d2) d2 <- transform(d2,r=sample(1:5, size=n, replace=TRUE), z=rnorm(n)) d2 <- subset(d2,!(a=="4" & b=="4")) fm <- lmer( z ~ a + b + (1|r), data=d2) expect_is(merTools:::formulaBuild(fm), "formula") expect_identical(merTools:::formulaBuild(fm), as.formula("z ~ a + b")) }) test_that("Formula works for interactions", { n <- 200 x <- y <- rnorm(n) z <- rnorm(n) r <- sample(1:5, size=n, replace=TRUE) d <- data.frame(x,y,z,r) d2 <- expand.grid(a=factor(1:4),b=factor(1:4), c = factor(1:4), rep=1:10) n <- nrow(d2) d2 <- transform(d2,r=sample(1:5, size=n, replace=TRUE), z=rnorm(n)) d2 <- subset(d2,!(a=="4" & b=="4")) d2$x <- rnorm(nrow(d2)) fm <- lmer( z ~ a * b + c + (1|r), data=d2) expect_is(merTools:::formulaBuild(fm), "formula") expect_identical(merTools:::formulaBuild(fm), as.formula("z ~ a * b + c")) fm <- lmer( z ~ a * b * c + (1|r), data=d2) expect_is(merTools:::formulaBuild(fm), "formula") expect_identical(merTools:::formulaBuild(fm), as.formula("z ~ a * b * c")) fm <- lmer( z ~ a * b * c + x + I(x^2) + (1 + c|r), data=d2) expect_is(merTools:::formulaBuild(fm), "formula") expect_identical(merTools:::formulaBuild(fm), as.formula("z ~ a * b * c + x + I(x^2)")) }) test_that("Build model matrix produces matrices of the right size", { d <- expand.grid(fac1 = LETTERS[1:5], grp = letters[11:20], obs = 1:50) d$y <- simulate(~fac1 + (1 | grp), family = binomial, newdata = d, newparams = list( beta = c(2,-1,3,-2,1.2), theta = c(.33)), seed =634)[[1]] subD <- d[sample(row.names(d), 1200), ] g1 <- glmer(y~fac1+(1|grp), data=subD, family = 'binomial') d$fitted <- predict(g1, d) mm <- merTools:::buildModelMatrix(g1, newdata = d, which = "full") expect_is(mm, "matrix") expect_equal(dim(mm), c(2500, 15)) } ) merTools/tests/testthat/test-merExtract.R0000644000176200001440000001101313402510755020274 0ustar liggesusers# ----------------------------------------------------- # Test framework includes tests for multiple intercepts and # multiple slopes to ensure extraction of random effects # works in these scenarios #------------------------------------------------------- set.seed(51315) library(lme4) data(grouseticks) grouseticks$HEIGHT <- scale(grouseticks$HEIGHT) grouseticks <- merge(grouseticks, grouseticks_agg[, 1:3], by = "BROOD") # Build out models form <- TICKS ~ YEAR + HEIGHT +(1|BROOD) + (1|INDEX) + (1|LOCATION) glmer3Lev <- glmer(form, family="poisson",data=grouseticks, control = glmerControl(optimizer="Nelder_Mead", optCtrl=list(maxfun = 1e5))) # GLMER 3 level + slope form <- TICKS ~ YEAR + HEIGHT +(1 + HEIGHT|BROOD) + (1|LOCATION) + (1|INDEX) glmer3LevSlope <- glmer(form, family="poisson",data=grouseticks, control = glmerControl(optimizer="bobyqa", optCtrl=list(maxfun = 1e5))) # Sleepstudy lmerSlope1 <- lmer(Reaction ~ Days + (Days | Subject), sleepstudy) # Wackier example data(Orthodont,package="nlme") Orthodont$nsex <- as.numeric(Orthodont$Sex=="Male") Orthodont$nsexage <- with(Orthodont, nsex*age) lmerSlope2 <- lmer(distance ~ age + (0 + age + nsex|Subject), data=Orthodont) ############################################### # Extract Random Effects from merMod---- context("Extract Random Effects from merMod") ################################################ test_that("REextract pulls out a data frame", { expect_is(REextract(lmerSlope2), "data.frame") expect_is(REextract(glmer3LevSlope), "data.frame") expect_is(REextract(glmer3Lev), "data.frame") expect_is(REextract(lmerSlope1), "data.frame") }) test_that("REextract issues error with non merMod objects", { expect_error(REextract(lm(Reaction ~ Days, sleepstudy))) expect_error(REextract(glm(TICKS ~ YEAR + HEIGHT, family="poisson", data=grouseticks))) }) test_that("REextract gets correct dimensions", { expect_equal(ncol(REextract(glmer3Lev)), 4) expect_equal(ncol(REextract(lmerSlope1)), 6) expect_equal(ncol(REextract(lmerSlope2)), 6) expect_equal(ncol(REextract(glmer3LevSlope)), 6) expect_equal(nrow(REextract(glmer3Lev)), 584) expect_equal(nrow(REextract(lmerSlope1)), 18) expect_equal(nrow(REextract(lmerSlope2)), 27) expect_equal(nrow(REextract(glmer3LevSlope)), 584) }) # Check names # Check numerics ############################################### # Fixed effect estimates from posterior---- context("Fixed effect estimates from posterior") ################################################ test_that("FEsim produces data.frames", { expect_is(FEsim(lmerSlope1, n.sims=100), "data.frame") expect_is(FEsim(lmerSlope2, n.sims=100), "data.frame") expect_is(FEsim(glmer3Lev, n.sims=100), "data.frame") expect_is(FEsim(glmer3LevSlope, n.sims=100), "data.frame") }) test_that("n.sims changes simulation results", { expect_false(identical(FEsim(lmerSlope1, n.sims = 1000), FEsim(lmerSlope1, n.sims = 10))) }) # numeric checks ############################################### # Random effect estimates from posterior---- context("Random effect estimates from posterior") ################################################ test_that("REsim produces data.frames", { expect_is(REsim(lmerSlope1, n.sims=100), "data.frame") expect_is(REsim(lmerSlope2, n.sims=100), "data.frame") expect_is(REsim(glmer3Lev, n.sims=100), "data.frame") expect_is(REsim(glmer3LevSlope, n.sims=100), "data.frame") }) ############################################### # RMSE estimates---- context("RMSE estimates") ################################################ test_that("RMSE produces correct variable types", { expect_is(RMSE.merMod(lmerSlope1), "numeric") expect_is(RMSE.merMod(lmerSlope2), "numeric") expect_is(RMSE.merMod(lmerSlope1, scale = TRUE), "numeric") expect_is(RMSE.merMod(lmerSlope2, scale = TRUE), "numeric") }) test_that("RMSE respects scale parameter", { expect_false(identical(RMSE.merMod(lmerSlope1), RMSE.merMod(lmerSlope1, scale = TRUE))) expect_false(identical(RMSE.merMod(lmerSlope2), RMSE.merMod(lmerSlope2, scale = TRUE))) expect_lt(RMSE.merMod(lmerSlope2, scale = TRUE), RMSE.merMod(lmerSlope2)) expect_lt(RMSE.merMod(lmerSlope1, scale = TRUE), RMSE.merMod(lmerSlope1)) }) merTools/tests/testthat/test-plots.R0000644000176200001440000000125613466135366017342 0ustar liggesusers# Test plotting functions # Plot functions return gg objects? ---- context("Plot functions return gg objects?") test_that("Prediction intervals work for simple linear example", { skip_on_cran() d <- expand.grid(fac1=LETTERS[1:5], grp=factor(1:10), obs=1:100) d$y <- simulate(~fac1+(1|grp),family = gaussian, newdata=d, newparams=list(beta=c(2,1,3,4,7), theta=c(.25), sigma = c(.23)))[[1]] subD <- d[sample(row.names(d), 1000),] g1 <- lmer(y~fac1+(1|grp), data=subD) FE1 <- FEsim(g1) p1 <- plotFEsim(FE1) expect_is(p1, "gg") p1 <- plotREsim(REsim(g1)) expect_is(p1, "gg") }) merTools/tests/testthat/test-merData.R0000644000176200001440000005445713466135366017571 0ustar liggesusers# ----------------------------------------------------- #------------------------------------------------------- set.seed(51315) library(lme4) data(grouseticks) grouseticks$HEIGHT <- scale(grouseticks$HEIGHT) grouseticks <- merge(grouseticks, grouseticks_agg[, 1:3], by = "BROOD") # Build out models form <- TICKS ~ YEAR + HEIGHT +(1|BROOD) + (1|LOCATION) + (1|INDEX) glmer3Lev <- glmer(form, family="poisson",data=grouseticks, control = glmerControl(optimizer="Nelder_Mead", optCtrl=list(maxfun = 1e5))) # GLMER 3 level + slope form <- TICKS ~ YEAR + HEIGHT +(1 + HEIGHT|BROOD) + (1|LOCATION) + (1|INDEX) glmer3LevSlope <- glmer(form, family="poisson",data=grouseticks, control = glmerControl(optimizer="bobyqa", optCtrl=list(maxfun = 1e5))) # GLMER 2 level # data(VerbAgg) # fmVA <- glmer(r2 ~ Anger + Gender + btype + situ + # (1|id) + (1|item), family = binomial, data = # VerbAgg) # Sleepstudy lmerSlope1 <- lmer(Reaction ~ Days + (Days | Subject), sleepstudy) # Wackier example data(Orthodont,package="nlme") Orthodont$nsex <- as.numeric(Orthodont$Sex=="Male") Orthodont$nsexage <- with(Orthodont, nsex*age) lmerSlope2 <- lmer(distance ~ age + (0 + age + nsex|Subject), data=Orthodont) ############################################### #Sanitize Names---- context("Sanitize Names") ################################################ test_that("Sanitize names renames variables in data.frame", { badMod <- lmer(distance ~ factor(Sex) + (0 + age + nsex|Subject), data=Orthodont, control = lmerControl(check.conv.grad = .makeCC("warning", tol= 8e-3))) expect_false(identical(names(badMod@frame), names(merTools:::sanitizeNames(badMod@frame)))) expect_is(merTools:::sanitizeNames(badMod@frame), "data.frame") expect_identical(names(merTools:::sanitizeNames(badMod@frame))[2], "Sex") expect_identical(names(badMod@frame)[2], "factor(Sex)") }) ############################################### #Strip Attributes---- context("Strip attributes") ################################################ test_that("Attributes can be stripped from data.frame", { full <- names(attributes(lmerSlope1@frame)) redu <- names(attributes(merTools:::stripAttributes(lmerSlope1@frame))) redu2 <- names(attributes(merTools:::stripAttributes(glmer3LevSlope@frame))) expect_true(length(full) > length(redu)) expect_true(all(redu %in% full)) expect_true(all(redu %in% c("names", "row.names", "class"))) expect_true(all(redu2 %in% c("names", "row.names", "class"))) }) ############################################### #Random Observation---- context("Random observation") ################################################ test_that("A random observation can be sampled from a merMod", { data1 <- draw(glmer3Lev, type = 'random') data2 <- draw(lmerSlope2, type = 'random') data3 <- draw(lmerSlope1, type = 'random') data4 <- draw(glmer3LevSlope, type = 'random') expect_equal(nrow(data1), 1) expect_equal(nrow(data2), 1) expect_equal(nrow(data3), 1) expect_equal(nrow(data4), 1) expect_equal(ncol(data1), 6) expect_equal(ncol(data2), 4) expect_equal(ncol(data3), 3) expect_equal(ncol(data4), 6) expect_identical(names(data1), names(glmer3Lev@frame)) expect_identical(names(data2), names(lmerSlope2@frame)) expect_identical(names(data3), names(lmerSlope1@frame)) expect_identical(names(data4), names(glmer3LevSlope@frame)) expect_false(identical(names(attributes(data1)), names(attributes(glmer3Lev@frame)))) expect_false(identical(names(attributes(data2)), names(attributes(lmerSlope2@frame)))) expect_false(identical(names(attributes(data3)), names(attributes(lmerSlope1@frame)))) expect_false(identical(names(attributes(data4)), names(attributes(glmer3LevSlope@frame)))) expect_false("formula" %in% names(attributes(data1))) expect_false("formula" %in% names(attributes(data2))) expect_false("formula" %in% names(attributes(data3))) expect_false("formula" %in% names(attributes(data4))) }) test_that("Random observation preserves factor levels", { data1 <- draw(glmer3Lev, type = 'random') data2 <- draw(lmerSlope2, type = 'random') data3 <- draw(lmerSlope1, type = 'random') data4 <- draw(glmer3LevSlope, type = 'random') expect_true(length(levels(data1$YEAR)) > length(unique(data1$YEAR))) expect_true(length(levels(data1$BROOD)) > length(unique(data1$BROOD))) expect_true(length(levels(data1$LOCATION)) > length(unique(data1$LOCATION))) expect_true(length(levels(data4$YEAR)) > length(unique(data4$YEAR))) expect_true(length(levels(data4$BROOD)) > length(unique(data4$BROOD))) expect_true(length(levels(data4$LOCATION)) > length(unique(data4$LOCATION))) # test levels are correct levels as well }) ############################################### #Collapse frame---- context("Collapse frame") ################################################ test_that("Collapsing a dataframe results in single row", { data1 <- merTools:::collapseFrame(Orthodont) data2 <- merTools:::collapseFrame(grouseticks) expect_equal(length(data1), length(Orthodont)) expect_equal(length(data2), length(grouseticks)) expect_equal(nrow(data1), 1) expect_equal(nrow(data2), 1) expect_equal(data1$distance, mean(Orthodont$distance)) expect_equal(data1$distance, mean(Orthodont$distance)) expect_equal(data1$age, mean(Orthodont$age)) expect_equal(data1$nsex, mean(Orthodont$nsex)) expect_equal(data1$nsexage, mean(Orthodont$nsexage)) expect_equal(data2$TICKS, mean(grouseticks$TICKS)) expect_equal(data2$HEIGHT, mean(grouseticks$HEIGHT)) expect_equal(data2$cHEIGHT, mean(grouseticks$cHEIGHT)) expect_equal(data2$meanTICKS, mean(grouseticks$meanTICKS)) }) ############################################### context("Subset by a list") ################################################ test_that("Data can be subset by a list", { list11 <- list("Sex" = "Male") list12 <- list("Sex" = "Male", "Subject" = "M05") list13 <- list("Sex" == "Male") list14 <- list("Sex" == "Male", "Subject" == "M05") list15 <- list("Sex" = "Male", "Subject" == "M05") data11 <- merTools:::subsetList(Orthodont, list11) data12 <- merTools:::subsetList(Orthodont, list12) expect_error(merTools:::subsetList(Orthodont, list13)) expect_error(merTools:::subsetList(Orthodont, list14)) expect_error(merTools:::subsetList(Orthodont, list15)) list21 <- list("YEAR" = "95") list22 <- list("LOCATION" = "32", "BROOD" = "503") data21 <- merTools:::subsetList(grouseticks, list21) data22 <- merTools:::subsetList(grouseticks, list22) expect_equal(length(data11), length(Orthodont)) expect_equal(length(data21), length(grouseticks)) expect_equal(length(data12), length(Orthodont)) expect_equal(length(data22), length(grouseticks)) expect_equal(nrow(data11), 64) expect_equal(nrow(data21), 117) expect_equal(nrow(data12), 4) expect_equal(nrow(data22), 0) }) ############################################### #Super factor ---- context("Super factor") ################################################ test_that("Unobserved factor levels can be respected", { fac1 <- factor(c("502", "503")) fac1a <- superFactor(fac1, fullLev = unique(grouseticks$BROOD)) fac2 <- factor(c("M16", "M02", "M05")) fac2a <- superFactor(fac2, fullLev = unique(Orthodont$Subject)) expect_false(identical(levels(fac1), levels(fac1a))) expect_false(identical(levels(fac2), levels(fac2a))) expect_true(identical(levels(grouseticks$BROOD), levels(fac1a))) expect_true(identical(levels(Orthodont$Subject), levels(fac2a))) expect_equal(length(levels(fac1a)), 118) expect_equal(length(levels(fac2a)), 27) }) test_that("SuperFactor handles new factor levels correctly", { fac1 <- factor(c("999", "888")) fac1a <- superFactor(fac1, fullLev = unique(grouseticks$BROOD)) fac2 <- factor(c("Z16", "Z02", "Z05")) fac2a <- superFactor(fac2, fullLev = unique(Orthodont$Subject)) expect_false(identical(levels(fac1), levels(fac1a))) expect_false(identical(levels(fac2), levels(fac2a))) expect_false(identical(levels(grouseticks$BROOD), levels(fac1a))) expect_false(identical(levels(Orthodont$Subject), levels(fac2a))) expect_equal(length(levels(fac1a)), length(levels(grouseticks$BROOD)) + 2) expect_equal(length(levels(fac2a)), length(levels(Orthodont$Subject)) + 3) expect_true(identical(levels(fac1a)[1:118], levels(grouseticks$BROOD))) expect_true(identical(levels(fac2a)[1:27], levels(Orthodont$Subject))) }) ############################################### #Shuffle---- context("Shuffle") ################################################ test_that("Data can be shuffled", { expect_equal(nrow(Orthodont), nrow(merTools:::shuffle(Orthodont))) expect_equal(ncol(Orthodont), ncol(merTools:::shuffle(Orthodont))) expect_equal(nrow(grouseticks), nrow(merTools:::shuffle(grouseticks))) expect_equal(ncol(grouseticks), ncol(merTools:::shuffle(grouseticks))) }) ############################################### #Find RE Quantiles---- context("Find RE Quantiles") ################################################ test_that("Errors and messages are issued", { expect_error(REquantile(glmer3Lev, 23, groupFctr = "BROOD")) expect_warning(REquantile(glmer3Lev, .23, groupFctr = "BROOD", term = "Cat")) expect_error(REquantile(glmer3Lev, .23, groupFctr = "Cat")) expect_error(REquantile(glmer3Lev, c(23, .56, .75), "BROOD")) expect_error(REquantile(glmer3Lev, c(.23, 56, .75), "BROOD")) expect_error(REquantile(glmer3Lev, c(.23, .56, 75), "BROOD")) expect_error(REquantile(glmer3Lev, c(.23, .56, 107), "BROOD")) expect_error(REquantile(glmer3Lev, c(-2, .56, .7), "BROOD")) expect_message(REquantile(lmerSlope1, .25, groupFctr = "Subject")) expect_warning(REquantile(lmerSlope2, c(.24), "Subject")) expect_warning(REquantile(lmerSlope2, c(.24), "Subject", term = "Cat")) }) # what to do without intercepts (REquantile(lmerSlope2), c(.24), "Subject") # test_that("Quantiles are returned correctly", { # myRE <- ranef(glmer3Lev)[["BROOD"]] # myRE <- myRE[order(myRE[, "(Intercept)"]), ,drop = FALSE] # rownames(myRE)[floor(23 / nrow(myRE)*100)] # # # }) ############################################### #Test observation wiggle---- context("Test observation wiggle") ################################################ test_that("Row and column lengths are correct -- single_wiggle", { data1 <- grouseticks[5:9, ] data1a <- wiggle(data1, var = "BROOD", values = list(c("606", "602", "537"))) data1b <- wiggle(data1a, var = "YEAR", values = list(c("96", "97"))) data2 <- grouseticks[3, ] data2a <- wiggle(data2, var = "BROOD", values = list(c("606", "602", "537"))) data2b <- wiggle(data2a, var = "YEAR", values = list(c("96", "97"))) data3 <- grouseticks[12:14, ] data3a <- wiggle(data3, var = "BROOD", values = list(c("606"))) data3b <- wiggle(data3a, var = "YEAR", values = list(c("96", "97"))) expect_equal(nrow(data1), 5) expect_equal(nrow(data1a), 15) expect_equal(nrow(data1b), 30) expect_equal(nrow(data2), 1) expect_equal(nrow(data2a), 3) expect_equal(nrow(data2b), 6) expect_equal(nrow(data3), 3) expect_equal(nrow(data3a), 3) expect_equal(nrow(data3b), 6) expect_equal(length(data1), length(data1a)) expect_equal(length(data1a), length(data1b)) expect_equal(length(data2), length(data2a)) expect_equal(length(data2a), length(data2b)) expect_equal(length(data3), length(data3a)) expect_equal(length(data3a), length(data3b)) data4 <- wiggle(data3, var = "BROOD", values = list(REquantile(glmer3Lev, quantile = c(0.25, 0.5, 0.75), group = "BROOD"))) expect_true(all(table(as.character(data4$BROOD), as.character(data4$INDEX)) ==1)) }) test_that("Values are placed correctly -- single_wiggle", { data1 <- grouseticks[5:9, ] data1a <- wiggle(data1, var = "BROOD", list(values = c("606", "602", "537"))) data1b <- wiggle(data1a, var = "YEAR", values = list(c("96", "97"))) data2 <- grouseticks[3, ] data2a <- wiggle(data2, var = "BROOD", values = list(c("606", "602", "537"))) data2b <- wiggle(data2a, var = "YEAR", values = list(c("96", "97"))) data3 <- grouseticks[12:14, ] data3a <- wiggle(data3, var = "BROOD", values = list(c("606"))) data3b <- wiggle(data3a, var = "YEAR", values = list(c("96", "97"))) data4 <- Orthodont[15, ] data4a <- wiggle(data4, var = "age", values = list(c(10, 11, 12))) data4b <- wiggle(data4a, var = "Sex", values = list(c("Male", "Female"))) expect_false(any(unique(data1$BROOD) %in% unique(data1a$BROOD))) expect_false(any(unique(data1$BROOD) %in% unique(data1b$BROOD))) expect_false(any(unique(data1a$YEAR) %in% unique(data1b$YEAR))) expect_false(any(unique(data2$BROOD) %in% unique(data2a$BROOD))) expect_false(any(unique(data2$BROOD) %in% unique(data2b$BROOD))) expect_false(any(unique(data2a$YEAR) %in% unique(data2b$YEAR))) expect_false(any(unique(data3$BROOD) %in% unique(data3a$BROOD))) expect_false(any(unique(data3$BROOD) %in% unique(data3b$BROOD))) expect_false(any(unique(data3a$YEAR) %in% unique(data3b$YEAR))) expect_true(all(unique(data1a$BROOD) %in% c("606", "602", "537"))) expect_true(all(unique(data1b$BROOD) %in% c("606", "602", "537"))) expect_true(all(unique(data2a$BROOD) %in% c("606", "602", "537"))) expect_true(all(unique(data2b$BROOD) %in% c("606", "602", "537"))) expect_true(all(unique(data3a$BROOD) %in% c("606"))) expect_true(all(unique(data3b$BROOD) %in% c("606"))) expect_true(all(unique(data4a$age) %in% c(10, 11, 12))) expect_true(all(unique(data4b$age) %in% c(10, 11, 12))) expect_true(all(!unique(data1a$YEAR) %in% c("96", "97"))) expect_true(all(unique(data1b$YEAR) %in% c("96", "97"))) expect_true(all(!unique(data2a$YEAR) %in% c("96", "97"))) expect_true(all(unique(data2b$YEAR) %in% c("96", "97"))) expect_true(all(!unique(data3a$YEAR) %in% c("96", "97"))) expect_true(all(unique(data3b$YEAR) %in% c("96", "97"))) expect_true(all(unique(data4a$Sex) %in% c("Male", "Female"))) expect_true(all(unique(data4b$Sex) %in% c("Male", "Female"))) }) test_that("we can use wiggle for multiple variables", { data1 <- grouseticks[5:9, ] data1a <- wiggle(data1, var = c("BROOD", "YEAR"), list(c("606", "602", "537"), c("96", "97"))) data3 <- grouseticks[12:14, ] data3a <- wiggle(data3, var = c("BROOD", "YEAR"), list(c("606"), c("96", "97"))) data4 <- Orthodont[15, ] data4a <- wiggle(data4, var = c("age", "Sex"), list(c(10, 11, 12), c("Male", "Female"))) # tests 1 -- row and columns expect_equal(nrow(data1a), nrow(data1) * 3 * 2) expect_equal(nrow(data3a), nrow(data3) * 1 * 2) expect_equal(nrow(data4a), nrow(data4) * 3 * 2) expect_equal(ncol(data1a), ncol(data1)) expect_equal(ncol(data3a), ncol(data3)) expect_equal(ncol(data4a), ncol(data4)) # tests 2 -- values expect_false(any(unique(data1$BROOD) %in% unique(data1a$BROOD))) expect_false(any(unique(data1$YEAR) %in% unique(data1a$YEAR))) expect_true(all.equal(sort(as.character(unique(data1a$BROOD))), c("537", "602", "606"))) expect_true(all.equal(sort(as.character(unique(data1a$YEAR))), c("96", "97"))) expect_false(any(unique(data3$BROOD) %in% unique(data3a$BROOD))) expect_false(any(unique(data3$YEAR) %in% unique(data3a$YEAR))) expect_true(all.equal(sort(as.character(unique(data3a$BROOD))), "606")) expect_true(all.equal(sort(as.character(unique(data3a$YEAR))), c("96", "97"))) expect_true(all(unique(data4a$age) %in% 10:12)) expect_true(all(unique(data4a$Sex) %in% (c("Female", "Male")))) expect_true(all.equal(sort(unique(data4a$age)), 10:12)) expect_true(all.equal(sort(as.character(unique(data4a$Sex))), c("Female", "Male"))) }) ############################################### #Test average observation extraction---- context("Test average observation extraction") ################################################ test_that("Returns a single row", { data1 <- draw(glmer3Lev, type = 'average') data1a <- draw(glmer3LevSlope, type = 'average') data2 <- draw(lmerSlope1, type = 'average') expect_equal(nrow(data1), 1) expect_equal(nrow(data1a), 1) expect_equal(nrow(data2), 1) }) test_that("Warnings and errors are correct", { expect_message(draw(lmerSlope1, type = 'average')) expect_warning(draw(lmerSlope2, type = 'average')) mylist2 <- list("YEAR" = "97", "LOCATION" = "16") expect_warning(draw(glmer3LevSlope, type = 'average', varList = mylist2)) mylist3 <- list("YEAR" = "97", "LOCATION" = c("16", "56")) expect_warning(draw(glmer3LevSlope, type = 'average', varList = mylist3)) }) test_that("Subsets work", { mylist1 <- list("YEAR" = "97") data1 <- draw(glmer3LevSlope, type = 'average', varList = mylist1) data1a <- draw(glmer3LevSlope, type = 'average') expect_false(identical(data1, data1a)) expect_equal(data1$TICKS, mean(grouseticks$TICKS[grouseticks$YEAR == "97"])) expect_equal(data1a$TICKS, mean(grouseticks$TICKS)) mylist2 <- list("YEAR" = "97", "LOCATION" = "16") expect_warning(draw(glmer3LevSlope, type = 'average', varList = mylist2), "less than 20 rows, averages may be problematic") mylist3 <- list("YEAR" = "97", "LOCATION" = c("16", "56")) expect_warning(draw(glmer3LevSlope, type = 'average', varList = mylist3), "fewer than 3 rows, computing global average instead") }) test_that("Nested specifications work", { library(ggplot2) mod1 <- lmer(sleep_total ~ bodywt + (1|vore/order), data=msleep) data1 <- draw(mod1, "random") expect_is(data1, "data.frame") data2 <- draw(mod1, "average") expect_is(data2, "data.frame") mylist1 <- list("vore" = "carni") mylist2 <- list("order" = "Cetacea") data1 <- draw(mod1, "random", varList = mylist1) expect_is(data1, "data.frame") expect_identical(as.character(data1$vore), "carni") data1 <- draw(mod1, "random", varList = mylist2) expect_is(data1, "data.frame") expect_identical(as.character(data1$order), "Cetacea") data1 <- suppressWarnings(draw(mod1, "average", varList = mylist1)) expect_is(data1, "data.frame") expect_identical(as.character(data1$vore), "carni") data1 <- suppressWarnings(draw(mod1, "average", varList = mylist2)) expect_is(data1, "data.frame") expect_identical(as.character(data1$order), "Cetacea") fm1 <- lmer(Reaction ~ Days + (Days | Subject), sleepstudy) data1 <- suppressWarnings(draw(fm1, type = "average", varList = list("Subject" = "308"))) expect_is(data1, "data.frame") expect_identical(as.character(data1$Subject), "308") }) test_that("findFormFuns works", { #Replicable toy data set.seed(72167) play <- data.frame( a = runif(1000), b = rnorm(1000), c = rbinom(1000, 1, .35), d = rpois(1000, 2) ) play$d <- factor(play$d, labels = LETTERS[seq_along(unique(play$d))]) play$y <- play$a + 0.5*play$b + 2*play$c -1.8*(play$d=="B") + .43*(play$d == "C") + runif(100, 0, .35) play$grp <- factor(sample(x = paste("Group", 1:43), size = 1000, replace = TRUE)) statmode <- function(x){ z <- table(as.vector(x)) m <- names(z)[z == max(z)] if (length(m) == 1) { return(m) } return(".") } trueMeans <- merTools:::collapseFrame(play) #Estimate toy models ##. Scenario 1: I() s1 <- lmer(y ~ a + b + I(b^2) + c + d + (1|grp), data=play) expect_equal(findFormFuns(s1)[names(trueMeans)], trueMeans) expect_equal(findFormFuns(s1)$b^2, findFormFuns(s1)$`I(b^2)`) expect_length(findFormFuns(s1), 7L) ##. Scenario 2: log and no regular a s2 <- lmer(y ~ log(a) + b + c + d + (1|grp), data=play) expect_warning(findFormFuns(s2)) expect_false(suppressWarnings(findFormFuns(s2)$`log(a)` == log(trueMeans$a))) expect_silent(findFormFuns(s2, origData = play)) expect_equal(findFormFuns(s2, origData = play)$`log(a)`, log(trueMeans$a)) ##. Scenario 3: 2 continuous interaction with * s3 <- lmer(y ~ a*b + c + d + (1|grp), data=play) expect_equal(findFormFuns(s3)[names(trueMeans)], trueMeans) expect_length(findFormFuns(s3), 6L) ##. Scenario 4: 2 continuous interaction with : s4 <- lmer(y ~ a:b + c + d + (1|grp), data=play) expect_equal(findFormFuns(s4)[names(trueMeans)], trueMeans) expect_length(findFormFuns(s4), 6L) ##. Scenario 5: 1 cont 1 cat interaction with * s5 <- lmer(y ~ a + c + b*d + (1|grp), data = play) expect_equal(findFormFuns(s5)[names(trueMeans)], trueMeans) expect_length(findFormFuns(s5), 6L) ##. Scenario 6: 1 cont 1 cat interaction with : s6 <- lmer(y ~ a + c + b:d + (1|grp), data = play) expect_equal(findFormFuns(s6)[names(trueMeans)], trueMeans) expect_length(findFormFuns(s6), 6L) ##. Scenario 7: 2 cat interaction with * s7 <- lmer(y ~ a + b + c*d + (1|grp), data = play) expect_equal(findFormFuns(s7)[names(trueMeans)], trueMeans) expect_length(findFormFuns(s7), 6L) ##. Scenario 8: 2 cat interaction with : s8 <- lmer(y ~ a + b + c:d + (1|grp), data = play) expect_equal(findFormFuns(s8)[names(trueMeans)], trueMeans) expect_length(findFormFuns(s8), 6L) ##. Scenario 9: function in random slope s9 <- lmer(y ~ a + b + c + d + (1 + sqrt(abs(b))|grp), data = play) expect_equal(findFormFuns(s9)[names(trueMeans)], trueMeans) expect_equal(findFormFuns(s9)$`sqrt(abs(b))`, sqrt(abs(trueMeans$b))) expect_length(findFormFuns(s9), 7L) ##. Scenario 10: two columns in I with no main effects s10 <- lmer(y ~ I(log(a) + b^3) + c + d + (1|grp), data=play) expect_warning(findFormFuns(s10)) expect_false(suppressWarnings(findFormFuns(s10)$`I(log(a) + b^3)`) == log(trueMeans$a) + trueMeans$b^3) expect_silent(findFormFuns(s10, origData = play)) expect_equal(findFormFuns(s10, origData = play)$`I(log(a) + b^3)`, log(trueMeans$a) + trueMeans$b^3) ##. Test that draw, draw.merMod and averageObs accept origData and issue warning if appropriate expect_warning(averageObs(s10)) expect_silent(averageObs(s10, origData = play)) expect_warning(merTools:::draw.merMod(s10, type = "average")) expect_silent(merTools:::draw.merMod(s10, origData = play, type = "average")) expect_silent(merTools:::draw.merMod(s10, type = "random")) }) merTools/tests/testthat/test-subboot.R0000644000176200001440000000423313466135366017654 0ustar liggesusers# test subboot set.seed(51315) library(lme4) # Sleepstudy lmerSlope1 <- lmer(Reaction ~ Days + (Days | Subject), sleepstudy) ############################################### # Extract theta---- context("Extract theta using subBoot") ################################################ test_that("extract theta produces a vector", { expect_is(thetaExtract(lmerSlope1), "numeric") expect_equal(length(thetaExtract(lmerSlope1)), 3) }) test_that("thetaExtract throws errors for non-merMod objects", { expect_error(thetaExtract(lmerSlope1@frame)) m1 <- lm(mpg ~ disp + hp, data = mtcars) expect_error(thetaExtract(m1)) }) ############################################### # subBoot---- context("subBoot") ################################################ test_that("subBoot produces correct output", { skip_on_cran() # Subbooot returns errors here out1 <- subBoot(lmerSlope1, n = 100, FUN = thetaExtract, R = 100) expect_is(out1, "data.frame") expect_equal(ncol(out1), 4) expect_equal(nrow(out1), 101) out2 <- subBoot(lmerSlope1, n = 100, FUN = function(x) getME(x, "fixef"), R = 100) expect_is(out2, "data.frame") expect_equal(ncol(out2), 3) expect_equal(nrow(out2), 101) }) context("subBoot glmer models") d <- expand.grid(fac1 = LETTERS[1:5], grp = letters[11:20], obs = 1:50) d$y <- simulate(~fac1 + (1 | grp), family = binomial, newdata = d, newparams = list( beta = c(2,-1,3,-2,1.2), theta = c(.33)), seed =634)[[1]] subD <- d[sample(row.names(d), 1200), ] g1 <- glmer(y~fac1+(1|grp), data=subD, family = 'binomial') test_that("subBoot produces correct glmer output", { out1 <- subBoot(g1, n = 1000, FUN = thetaExtract, R = 10) expect_is(out1, "data.frame") expect_equal(ncol(out1), 2) expect_equal(nrow(out1), 11) # out2 <- subBoot(g1, n = 500, FUN = function(x) getME(x, "fixef"), R = 10) expect_is(out2, "data.frame") expect_equal(ncol(out2), 6) expect_equal(nrow(out2), 11) }) merTools/tests/testthat/test-predict.R0000644000176200001440000011557213466135366017642 0ustar liggesusers set.seed(51315) #Prediction intervals cover for simulated problems---- context("Prediction intervals cover for simulated problems") test_that("Prediction intervals work for simple linear example", { skip_on_travis() skip_on_cran() d <- expand.grid(fac1=LETTERS[1:5], grp=factor(1:10), obs=1:100) d$y <- simulate(~fac1+(1|grp),family = gaussian, newdata=d, newparams=list(beta=c(2,1,3,4,7), theta=c(.25), sigma = c(.23)), seed = 4548)[[1]] subD <- d[sample(row.names(d), 1000),] g1 <- lmer(y~fac1+(1|grp), data=subD) d$fitted <- predict(g1, d) #This suppresses the warning about no parallel backend registered outs <- suppressWarnings( predictInterval(g1, newdata = d, level = 0.9, n.sims = 1000, seed = 468, stat = 'mean', include.resid.var = TRUE) ) outs <- cbind(d, outs); outs$coverage <- FALSE outs$coverage <- outs$fitted <= outs$upr & outs$fitted >= outs$lwr expect_true(all(outs$coverage)) expect_lt(abs(mean(outs$fit - outs$fitted)), .0005) expect_lt(abs(mean(outs$fit - outs$y)), .01) rm(outs) }) test_that("Prediction intervals work for simple GLMM example", { skip_on_travis() skip_on_cran() set.seed(101) d <- expand.grid(fac1=LETTERS[1:5], grp=factor(1:10), obs=1:50) d$y <- simulate(~fac1+(1|grp),family = binomial, newdata=d, newparams=list(beta=c(2,-1,3,-2,1.2), theta=c(.33)), seed =634)[[1]] subD <- d[sample(row.names(d), 1200),] g1 <- glmer(y~fac1+(1|grp), data=subD, family = 'binomial') d$fitted <- predict(g1, d) outs <- predictInterval(g1, newdata = d, level = 0.9, n.sims = 500, stat = 'mean', include.resid.var = TRUE, type = 'linear.prediction', seed = 4563) outs <- cbind(d, outs); outs$coverage <- FALSE outs$coverage <- outs$fitted <= outs$upr & outs$fitted >= outs$lwr expect_true(all(outs$coverage)) expect_lt(abs(mean(outs$fit - outs$fitted)), .1) expect_lt(abs(mean(outs$fit - outs$y)), 2) outs2 <- predictInterval(g1, newdata = d, level = 0.95, n.sims = 500, stat = 'mean', include.resid.var = FALSE, type = 'probability') expect_false(identical(outs, outs2)) expect_true(max(outs2$fit) <= 1) expect_true(min(outs2$fit) >= 0) expect_true(max(outs2$lwr) <= 1) expect_true(min(outs2$lwr) >= 0) expect_true(max(outs2$upr) <= 1) expect_true(min(outs2$upr) >= 0) expect_false(max(outs$fit) <= 1) # expect_true(min(outs$fit) < 0) expect_false(max(outs$lwr) <= 1) expect_false(min(outs$lwr) >= 0) expect_false(max(outs$upr) <= 1) rm(outs) }) test_that("Prediction interval respects user input", { skip_on_travis() skip_on_cran() set.seed(101) d <- expand.grid(fac1=LETTERS[1:5], grp=factor(1:10), obs=1:25) d$y <- simulate(~fac1+(1|grp),family = gaussian, newdata=d, newparams=list(beta=c(2,1,3,4,7), theta=c(.25), sigma = c(.23)), seed =463)[[1]] subD <- d[sample(row.names(d), 1000),] g1 <- lmer(y~fac1+(1|grp), data=subD) d$fitted <- predict(g1, d) outs1 <- predictInterval(g1, newdata = d, level = 0.8, n.sims = 500, stat = 'mean', include.resid.var = TRUE, seed=643) outs2 <- predictInterval(g1, newdata = d, level = 0.95, n.sims = 500, stat = 'mean', include.resid.var = TRUE, seed=643) outs1a <- predictInterval(g1, newdata = d, level = 0.8, n.sims = 1500, stat = 'mean', include.resid.var = TRUE, seed=643) outs2a <- predictInterval(g1, newdata = d, level = 0.95, n.sims = 1500, stat = 'mean', include.resid.var = TRUE, seed=643) outs3 <- predictInterval(g1, newdata = d, level = 0.8, n.sims = 500, stat = 'mean', include.resid.var = FALSE, seed=643) outs3b <- predictInterval(g1, newdata = d, level = 0.8, n.sims = 500, stat = 'median', include.resid.var = FALSE, seed=643) outs3c <- predictInterval(g1, newdata = d[1, ], level = 0.8, n.sims = 500, stat = 'median', include.resid.var = FALSE, seed=643) expect_gt(median(outs2$upr - outs1$upr), 0.1) expect_gt(median(outs2a$upr - outs1a$upr), 0.1) expect_lt(median(outs3$upr - outs1$upr), -.2) expect_lt(median(outs3b$upr - outs1a$upr), -.2) expect_lt(mean(outs1$upr - outs1$lwr), mean(outs2$upr - outs2$lwr)) expect_lt(mean(outs1$upr - outs1$lwr), mean(outs1a$upr - outs1a$lwr)) expect_lt(mean(outs2$upr - outs2$lwr), mean(outs2a$upr - outs2a$lwr)) expect_false(median(outs3$fit) == median(outs3b$fit)) expect_equal(nrow(outs3c), 1) }) # Prediction works for all combinations of slopes and intercepts---- context("Prediction works for all combinations of slopes and intercepts") test_that("Predict handles unused and subset of factor levels", { skip_on_cran() skip_on_travis() set.seed(101) moddf <- InstEval[sample(rownames(InstEval), 10000), ] g1 <- lmer(y ~ lectage + studage + (1|d) + (1|s), data=moddf) d1 <- InstEval[1:100, ] outs1 <- suppressWarnings(predictInterval(g1, newdata = d1, level = 0.8, n.sims = 500, stat = 'mean', include.resid.var = TRUE, seed = 4632)) d2 <- rbind(d1, InstEval[670:900,]) outs1a <- suppressWarnings(predictInterval(g1, newdata = d2, level = 0.8, n.sims = 500, stat = 'mean', include.resid.var=TRUE, seed = 4632)[1:100,]) expect_is(outs1, "data.frame") expect_is(outs1a, "data.frame") expect_equal(nrow(outs1), 100) expect_equal(nrow(outs1a), 100) g2 <- lmer(y ~ lectage + studage + (1+lectage|d) + (1|dept), data=moddf) d2 <- InstEval[670:900,] outs1a <- suppressWarnings(predictInterval(g2, newdata = d2, level = 0.8, n.sims = 500, stat = 'mean', include.resid.var=TRUE, seed = 4632)) expect_is(outs1a, "data.frame") expect_equal(nrow(outs1a), 231) }) rm(list = ls()) test_that("Prediction intervals work for multiple parameters per level", { skip_on_travis() skip_on_cran() data(grouseticks) grouseticks$HEIGHT <- scale(grouseticks$HEIGHT) grouseticks <- merge(grouseticks, grouseticks_agg[, 1:3], by = "BROOD") grouseticks$TICKS_BIN <- ifelse(grouseticks$TICKS >=1, 1, 0) # GLMER 3 level + slope form <- TICKS_BIN ~ YEAR + HEIGHT +(1 + HEIGHT|BROOD) + (1|LOCATION) + (1|INDEX) glmer3LevSlope <- glmer(form, family="binomial",data=grouseticks, control = glmerControl(optimizer="bobyqa", optCtrl=list(maxfun = 1e5))) #In the call below we are getting warnings because our call to mvtnorm::rmvnorm #is shotting a warning when mean and sigma of multivariate distribution are #zero using the method="chol outs1 <- suppressWarnings(predictInterval(glmer3LevSlope, newdata = grouseticks[1:10,])) expect_is(outs1, "data.frame") }) test_that("Prediction works for random slopes not in fixed", { skip_on_travis() skip_on_cran() data(grouseticks) grouseticks$HEIGHT <- scale(grouseticks$HEIGHT) grouseticks <- merge(grouseticks, grouseticks_agg[, 1:3], by = "BROOD") grouseticks$TICKS_BIN <- ifelse(grouseticks$TICKS >=1, 1, 0) # GLMER 3 level + slope form <- TICKS_BIN ~ YEAR + (1 + HEIGHT|BROOD) + (1|LOCATION) + (1|INDEX) glmer3LevSlope <- glmer(form, family="binomial",data=grouseticks, control = glmerControl(optimizer="bobyqa", optCtrl=list(maxfun = 1e5))) zNew <- grouseticks[1:10,] #In the call below we are getting warnings because our call to mvtnorm::rmvnorm #is shotting a warning when mean and sigma of multivariate distribution are #zero using the method="chol outs1 <- suppressWarnings(predictInterval(glmer3LevSlope, newdata = zNew)) expect_is(outs1, "data.frame") # Message may not be necessary any more # expect_message(predictInterval(glmer3LevSlope, newdata = zNew)) }) # Test for new factor levels---- context("Test for new factor levels") test_that("Prediction intervals work with new factor levels added", { skip_on_travis() skip_on_cran() data(grouseticks) grouseticks$HEIGHT <- scale(grouseticks$HEIGHT) grouseticks <- merge(grouseticks, grouseticks_agg[, 1:3], by = "BROOD") grouseticks$TICKS_BIN <- ifelse(grouseticks$TICKS >=1, 1, 0) # GLMER 3 level + slope form <- TICKS_BIN ~ YEAR + HEIGHT +(1 + HEIGHT|BROOD) + (1|LOCATION) + (1|INDEX) glmer3LevSlope <- glmer(form, family="binomial",data=grouseticks, control = glmerControl(optimizer="bobyqa", optCtrl=list(maxfun = 1e5))) zNew <- grouseticks[1:10,] zNew$BROOD <- as.character(zNew$BROOD) zNew$BROOD[1:9] <- "100" zNew$BROOD[10] <- "101" #In the call below we are getting warnings because our call to mvtnorm::rmvnorm #is shotting a warning when mean and sigma of multivariate distribution are #zero using the method="chol outs1 <- suppressWarnings(predictInterval(glmer3LevSlope, newdata = zNew)) expect_is(outs1, "data.frame") expect_warning(predictInterval(glmer3LevSlope, newdata = zNew)) }) test_that("Prediction works for factor as a random slope not in fixed", { skip_on_travis() skip_on_cran() data(grouseticks) grouseticks$HEIGHT <- scale(grouseticks$HEIGHT) grouseticks <- merge(grouseticks, grouseticks_agg[, 1:3], by = "BROOD") grouseticks$TICKS_BIN <- ifelse(grouseticks$TICKS >=1, 1, 0) # GLMER 3 level + slope form <- TICKS_BIN ~ HEIGHT +(1 + YEAR|BROOD) + (1|LOCATION) #Suppressing warning for known degenerate model below glmer3LevSlope <- suppressWarnings(glmer(form, family="binomial",data=grouseticks, control = glmerControl(optimizer="bobyqa", optCtrl=list(maxfun = 1e5)))) zNew <- grouseticks[1:10,] zNew$BROOD <- as.character(zNew$BROOD) zNew$BROOD[1:9] <- "100" zNew$BROOD[10] <- "101" expect_warning(predictInterval(glmer3LevSlope, newdata = zNew), "Currently, predictions for these values are based only on the") outs1 <- suppressWarnings(predictInterval(glmer3LevSlope, newdata = zNew)) zNew <- grouseticks[1:10,] outs2 <- predictInterval(glmer3LevSlope, newdata = zNew) expect_is(outs1, "data.frame") expect_is(outs2, "data.frame") expect_identical(dim(outs1), dim(outs2)) }) # Numeric accuracy---- context("Numeric accuracy") # Cases # new factor level for group term test_that("Median of prediction interval is close to predict.lmer for single group models", { set.seed(2311) fm1 <- lmer(Reaction ~ Days + (Days | Subject), sleepstudy) truPred <- predict(fm1, newdata = sleepstudy) newPred <- predictInterval(fm1, newdata = sleepstudy, n.sims = 500, level = 0.9, stat = c("median"), include.resid.var = FALSE, seed = 4563) expect_equal(mean(newPred$fit - truPred), 0, tolerance = sd(truPred)/50) fm1 <- lmer(Reaction ~ Days + (1 | Subject), sleepstudy) truPred <- predict(fm1, newdata = sleepstudy) newPred <- predictInterval(fm1, newdata = sleepstudy, n.sims = 1500, level = 0.9, stat = c("median"), include.resid.var = FALSE, seed = 9598) expect_equal(mean(newPred$fit - truPred), 0, tolerance = sd(truPred)/100) }) test_that("Median of PI is close to predict.lmer for complex group models", { skip_on_cran() skip_on_travis() set.seed(101) moddf <- InstEval[sample(rownames(InstEval), 10000), ] g1 <- lmer(y ~ lectage + studage + (1|d) + (1|s), data=moddf) d1 <- moddf[1:200, ] newPred <- predictInterval(g1, newdata = d1, level = 0.8, n.sims = 500, stat = 'median', include.resid.var = FALSE, seed = 4563) truPred <- predict(g1, newdata = d1) expect_equal(mean(newPred$fit - truPred), 0, tolerance = sd(truPred)/100) rm(list=ls()) }) test_that("Median of PI is close to predict.glmer for basic and complex grouping", { skip_on_cran() skip_on_travis() set.seed(8496) d <- expand.grid(fac1=LETTERS[1:5], grp=factor(1:8), fac2 = LETTERS[12:19], obs=1:20) d$x <- runif(nrow(d)) d$y <- simulate(~ x + fac1 + fac2 + (1 + fac1|grp) + (1|obs), family = binomial, newdata=d, newparams=list(beta = rnorm(13), theta = rnorm(16, 5, 1)), seed = 4563)[[1]] subD <- d[sample(row.names(d), 1500),] # TOO SLOW g1 <- glmer(y ~ x + fac1 + fac2 + (1+fac1|grp) + (1|obs), data = subD, family = 'binomial', control = glmerControl(optimizer="bobyqa", optCtrl=list(maxfun = 1e5))) truPred <- predict(g1, subD) newPred <- suppressWarnings(predictInterval(g1, newdata = subD, level = 0.95, n.sims = 2000, stat = 'median', include.resid.var = FALSE, type = 'linear.prediction', seed = 3252)) expect_equal(mean(newPred$fit - truPred), 0, tolerance = sd(truPred)/20) # This test fails currently # g1 <- glmer(y ~ x + fac2 + (1 + fac1|grp) + (1|obs), data = subD, family = 'binomial') # truPred <- predict(g1, subD, type = "response") # newPred <- predictInterval(g1, newdata = subD, level = 0.8, n.sims = 500, # stat = 'median', include.resid.var = FALSE, # type = 'probability') # expect_equal(mean(newPred$fit - truPred), 0, tolerance = sd(truPred)/20) rm(list = ls()) }) test_that("Prediction intervals work with new factor levels added", { skip_on_cran() skip_on_travis() data(grouseticks) grouseticks$HEIGHT <- scale(grouseticks$HEIGHT) grouseticks <- merge(grouseticks, grouseticks_agg[, 1:3], by = "BROOD") grouseticks$TICKS_BIN <- ifelse(grouseticks$TICKS >=1, 1, 0) # GLMER 3 level + slope form <- TICKS_BIN ~ YEAR + HEIGHT +(1 + HEIGHT|BROOD) + (1|LOCATION) + (1|INDEX) glmer3LevSlope <- glmer(form, family="binomial",data=grouseticks, control = glmerControl(optimizer="bobyqa", optCtrl=list(maxfun = 1e5))) zNew <- grouseticks zNew$BROOD <- as.character(zNew$BROOD) zNew$BROOD[1:99] <- "100" zNew$BROOD[100] <- "101" #In the call below we are getting warnings because our call to mvtnorm::rmvnorm #is shotting a warning when mean and sigma of multivariate distribution are #zero using the method="chol newPred <- suppressWarnings(predictInterval(glmer3LevSlope, newdata = zNew, level = 0.95, n.sims = 500, stat = 'median', include.resid.var = TRUE, seed = 4563)) truPred <- predict(glmer3LevSlope, newdata = zNew, allow.new.levels = TRUE) expect_equal(mean(newPred$fit - truPred), 0, tolerance = sd(truPred)/40) }) test_that("Prediction intervals work with slope not in fixed effects and data reordered", { skip_on_travis() skip_on_cran() data(grouseticks) grouseticks$HEIGHT <- scale(grouseticks$HEIGHT) grouseticks <- merge(grouseticks, grouseticks_agg[, 1:3], by = "BROOD") grouseticks$TICKS_BIN <- ifelse(grouseticks$TICKS >=1, 1, 0) # GLMER 3 level + slope form <- TICKS_BIN ~ YEAR + (1 + HEIGHT|BROOD) + (1|LOCATION) + (1|INDEX) glmer3LevSlope <- glmer(form, family="binomial",data=grouseticks, control = glmerControl(optimizer="bobyqa", optCtrl=list(maxfun = 1e5))) zNew <- grouseticks zNew$BROOD <- as.character(zNew$BROOD) zNew$BROOD[1:99] <- "100" zNew$BROOD[100] <- "101" zNew <- zNew[, c(10, 9, 8, 7, 1, 2, 3, 4, 5, 6, 10)] #In the call below we are getting warnings because our call to mvtnorm::rmvnorm #is shotting a warning when mean and sigma of multivariate distribution are #zero using the method="chol newPred <- suppressWarnings(predictInterval(glmer3LevSlope, newdata = zNew, level = 0.95, n.sims = 500, stat = 'median', include.resid.var = TRUE, seed = 4563)) truPred <- predict(glmer3LevSlope, newdata = zNew, allow.new.levels = TRUE) expect_equal(mean(newPred$fit - truPred), 0, tolerance = sd(truPred)/20) }) # Special cases - rank deficiency---- context("Special cases - rank deficiency") test_that("Prediction intervals are accurate with interaction terms and rank deficiency", { skip_on_travis() skip_on_cran() set.seed(54656) n <- 20 x <- y <- rnorm(n) z <- rnorm(n) r <- sample(1:5, size=n, replace=TRUE) d <- data.frame(x,y,z,r) d2 <- expand.grid(a=factor(1:4),b=factor(1:4),rep=1:10) n <- nrow(d2) d2 <- transform(d2,r=sample(1:5, size=n, replace=TRUE), z=rnorm(n)) d2 <- subset(d2,!(a=="4" & b=="4")) fm <- lmer( z ~ a*b + (1|r), data=d2) expect_is(predictInterval(fm, newdata = d2[1:10, ]), "data.frame") newPred <- predictInterval(fm, newdata = d2, level = 0.8, n.sims = 1500, stat = 'median', include.resid.var = FALSE, seed = 2342) truPred <- predict(fm, newdata = d2) expect_equal(mean(newPred$fit - truPred), 0, tolerance = sd(truPred)/15) fm2 <- lmer( z ~ a*b + (1+b|r), data=d2) #In the call below we are getting warnings because our call to mvtnorm::rmvnorm #is shotting a warning when mean and sigma of multivariate distribution are #zero using the method="chol newPred <- suppressWarnings(predictInterval(fm2, newdata = d2, level = 0.8, n.sims = 1000, stat = 'median', include.resid.var = FALSE)) truPred <- predict(fm2, newdata = d2) expect_is(newPred, "data.frame") expect_equal(mean(newPred$fit - truPred), 0, tolerance = sd(truPred)/10) }) # Test the simResults---- context("Test the simResults") test_that("simResults option behaves", { skip_on_cran() m1 <- lmer(Reaction ~ Days + (1 | Subject), sleepstudy) preds1 <- predictInterval(m1, newdata = sleepstudy[1:5, ]) preds2 <- predictInterval(m1, newdata = sleepstudy[1:5, ], returnSims = TRUE) expect_null(attr(preds1, "sim.results")) expect_is(attr(preds2, "sim.results"), "matrix") out <- attr(preds2, "sim.results") expect_equal(ncol(out), 1000) expect_equal(nrow(out), 5) m1 <- lmer(Reaction ~ Days + (1 | Subject), sleepstudy) preds1 <- predictInterval(m1, newdata = sleepstudy[1:5, ], returnSims = TRUE, which = "random", seed = 23151) preds2 <- predictInterval(m1, newdata = sleepstudy[1:5, ], which = "fixed", returnSims = TRUE, seed = 23151) preds3 <- predictInterval(m1, newdata = sleepstudy[1:5, ], which = "all", returnSims = TRUE, seed = 23151) preds4 <- predictInterval(m1, newdata = sleepstudy[1:5, ], returnSims = TRUE, seed = 23151) preds1b <- predictInterval(m1, newdata = sleepstudy[1:5, ], returnSims = TRUE, which = "random", seed = 23151, include.resid.var = FALSE) preds2b <- predictInterval(m1, newdata = sleepstudy[1:5, ], which = "fixed", returnSims = TRUE, seed = 23151, include.resid.var = FALSE) preds3b <- predictInterval(m1, newdata = sleepstudy[1:5, ], which = "all", returnSims = TRUE, seed = 23151, include.resid.var = FALSE) preds4b <- predictInterval(m1, newdata = sleepstudy[1:5, ], returnSims = TRUE, seed = 23151, include.resid.var = FALSE) expect_is(attr(preds1, "sim.results"), "matrix") expect_gt(abs(mean(attr(preds1, "sim.results") - attr(preds2, "sim.results"))), 200) expect_is(attr(preds2, "sim.results"), "matrix") expect_gt(abs(mean(attr(preds4, "sim.results") - attr(preds1, "sim.results"))), 200) expect_gt(abs(mean(attr(preds4, "sim.results") - attr(preds1, "sim.results"))), abs(mean(attr(preds1, "sim.results") - attr(preds2, "sim.results")))) expect_is(attr(preds3, "sim.results"), "list") expect_is(attr(preds4, "sim.results"), "matrix") expect_is(attr(preds1b, "sim.results"), "matrix") expect_is(attr(preds2b, "sim.results"), "matrix") expect_is(attr(preds3b, "sim.results"), "list") expect_is(attr(preds4b, "sim.results"), "matrix") # Check that samples are wider for include.resid.var = TRUE expect_gt(quantile(attr(preds1, "sim.results"), probs = 0.9) - quantile(attr(preds1b, "sim.results"), probs = 0.9), 20) expect_lt(quantile(attr(preds1, "sim.results"), probs = 0.1) - quantile(attr(preds1b, "sim.results"), probs = 0.1), -20) expect_gt(quantile(attr(preds2, "sim.results"), probs = 0.9) - quantile(attr(preds2b, "sim.results"), probs = 0.9), 20) expect_lt(quantile(attr(preds2, "sim.results"), probs = 0.1) - quantile(attr(preds2b, "sim.results"), probs = 0.1), -20) expect_gt(quantile(attr(preds4, "sim.results"), probs = 0.9) - quantile(attr(preds4b, "sim.results"), probs = 0.9), 15) expect_lt(quantile(attr(preds4, "sim.results"), probs = 0.1) - quantile(attr(preds4b, "sim.results"), probs = 0.1), -15) }) # Test out of sample predictions---- context("Test out of sample predictions") test_that("predictInterval makes predictions without observed outcome", { skip_on_travis() skip_on_cran() possNames <- expand.grid(letters,LETTERS) possNames <- paste(possNames[, 1], possNames[, 2]) newFac <- sample(possNames, 32) modData <- data.frame( y = rnorm(500), x = rnorm(500), team_name = sample(newFac, 500, replace = TRUE) ) modData$y[251:500] <- rep(NA, 250) m0 <- lmer(y ~ x + (1|team_name), data = modData[1:250,]) #In the calls below we are getting warnings because our call to mvtnorm::rmvnorm #is shotting a warning when mean and sigma of multivariate distribution are #zero using the method="chol testPreds1 <- suppressWarnings(predictInterval(m0, newdata = modData[, c(3, 2, 1)])) testPreds2 <- suppressWarnings(predictInterval(m0, newdata = modData[1:250, c(2, 3, 1)])) testPreds3 <- suppressWarnings(predictInterval(m0, newdata = modData[251:500,])) expect_is(testPreds1, "data.frame") expect_is(testPreds2, "data.frame") expect_is(testPreds3, "data.frame") }) # Input validation checks---- context("Input validation checks") test_that("dplyr objects are successfully coerced", { skip_on_cran() set.seed(101) data(sleepstudy) m1 <- lmer(Reaction ~ Days + (1 | Subject), sleepstudy) predData <- sleepstudy %>% group_by(Subject) %>% dplyr::summarise(Days = mean(Days)) expect_warning(predictInterval(m1, newdata = predData), regexp = "newdata is tbl_df or tbl object from dplyr package", all=FALSE) #Suppress the warning that we tested for above preds2 <- suppressWarnings(predictInterval(m1, newdata = predData, n.sims=2000)) expect_is(preds2, "data.frame") predData2 <- as.data.frame(predData) preds1 <- predictInterval(m1, newdata = predData2, n.sims=2000) expect_true(sum(preds1$fit - preds2$fit) > -50 & sum(preds1$fit - preds2$fit) < 50) }) # Model type warnings for non-binomial GLMM---- context("Model type warnings for non-binomial GLMM") test_that("Warnings issued", { skip_on_cran() d <- expand.grid(fac1=LETTERS[1:5], grp=factor(1:10), obs=1:50) d$y <- simulate(~fac1+(1|grp),family = poisson, newdata=d, newparams=list(beta=c(2,-1,3,-2,1.2), theta=c(.33)), seed = 5636)[[1]] g1 <- glmer(y~fac1+(1|grp), data=d, family = 'poisson') expect_warning(predictInterval(g1, newdata = d[1:100,])) rm(list = ls()) }) # Test Parallel---- context("Test Parallel") test_that("parallelization does not throw errors and generates good results", { skip_on_cran() skip_on_travis() library(foreach) set.seed(1241) #TODO reign in memory usage and cpu time here m1 <- lmer(Reaction ~ Days + (1 | Subject), sleepstudy) predA <- predictInterval(m1, newdata = m1@frame, n.sims = 2200, seed = 54, include.resid.var = FALSE, stat = "median") predB <- predictInterval(m1, newdata = m1@frame, n.sims = 1750, seed = 54, include.resid.var = FALSE, stat = "median") expect_equal(mean(predA$fit - predB$fit), 0 , tolerance = .2) predA <- predictInterval(m1, newdata = m1@frame, n.sims = 2500, seed = 2141, include.resid.var = FALSE) predB <- predictInterval(m1, newdata = m1@frame, n.sims = 1500, seed = 2141, include.resid.var = FALSE) expect_equal(mean(predA$fit - predB$fit), 0 , tolerance = .01) moddf <- InstEval[sample(rownames(InstEval), 5000),] g1 <- lmer(y ~ lectage + studage + (1|d) + (1|s), data=moddf) predA <- predictInterval(g1, newdata = g1@frame, n.sims = 2500, seed = 2141, include.resid.var = FALSE) predB <- predictInterval(g1, newdata = g1@frame, n.sims = 1500, seed = 2141, include.resid.var = FALSE) expect_equal(mean(predA$fit - predB$fit), 0 , tolerance = .01) predA <- predictInterval(g1, newdata = g1@frame[1:499,], n.sims = 2500, seed = 2141, include.resid.var = TRUE) predB <- predictInterval(g1, newdata = g1@frame[1:501,], n.sims = 2500, seed = 2141, include.resid.var = TRUE) expect_equal(mean(predA$fit[1:499] - predB$fit[1:499]), 0 , tolerance = .002) detach("package:foreach", character.only=TRUE) }) context("Test returning predict interval components") # Test the option to return different predictInterval components m1 <- lmer(Reaction ~ Days + (1 | Subject), sleepstudy) m2 <- lmer(Reaction ~ Days + (Days | Subject), sleepstudy) form <- TICKS_BIN ~ YEAR + HEIGHT +(1 + HEIGHT|BROOD) + (1|LOCATION) + (1|INDEX) data(grouseticks) grouseticks$TICKS_BIN <- ifelse(grouseticks$TICKS >=1, 1, 0) grouseticks$YEAR <- as.numeric(grouseticks$YEAR) grouseticks$HEIGHT <- grouseticks$HEIGHT - 462.2 glmer3LevSlope <- glmer(form, family="binomial",data=grouseticks) test_that("Output is correct dimensions", { pred1 <- predictInterval(m1, which = "random") pred2 <- predictInterval(m2, which = "fixed") pred3 <- predictInterval(m2, which = "all") expect_equal(nrow(pred1), nrow(pred2)) expect_equal(ncol(pred1), 3) expect_equal(ncol(pred2), 3) expect_equal(ncol(pred3), 5) expect_equal(nrow(pred3), 180*3) expect_equal(nrow(pred1), 180) expect_false(nrow(pred3) == nrow(pred2)) expect_true(nrow(pred3) > nrow(pred2)) pred1 <- predictInterval(m1, which = "random", stat = "mean") pred2 <- predictInterval(m2, which = "fixed", stat = "mean") pred3 <- predictInterval(m2, which = "all", stat = "mean") expect_equal(nrow(pred1), nrow(pred2)) expect_equal(ncol(pred1), 3) expect_equal(ncol(pred2), 3) expect_equal(ncol(pred3), 5) expect_equal(nrow(pred3), 180*3) expect_equal(nrow(pred1), 180) expect_false(nrow(pred3) == nrow(pred2)) expect_true(nrow(pred3) > nrow(pred2)) pred1 <- suppressWarnings(predictInterval(glmer3LevSlope, which = "random", type = "linear.prediction")) pred2 <- suppressWarnings(predictInterval(glmer3LevSlope, which = "random", type = "probability")) pred3 <- suppressWarnings(predictInterval(glmer3LevSlope, which = "all", type = "linear.prediction")) pred4 <- suppressWarnings(predictInterval(glmer3LevSlope, which = "all", type = "probability")) expect_equal(nrow(pred1), nrow(pred2)) expect_equal(nrow(pred3), nrow(pred4)) expect_equal(ncol(pred1), 3) expect_equal(ncol(pred2), 3) expect_equal(ncol(pred3), 5) expect_equal(ncol(pred3), 5) expect_true(mean(pred2$fit) > mean(pred1$fit)) expect_equal(mean(pred2$fit), 0.5, tol = 0.01) expect_equal(mean(pred1$fit), 0.00, tol = 0.05) expect_equal(mean(pred4$fit), mean(grouseticks$TICKS_BIN), tol = 0.15) expect_false(mean(pred4$fit) == mean(pred3$fit)) expect_equal(nrow(pred1), 403) expect_equal(nrow(pred2), 403) expect_equal(nrow(pred3), 403*5) expect_equal(nrow(pred4), 403*5) }) test_that("Compare random, fixed, include-resid", { predInt1 <- predictInterval(m1) predInt1$width <- predInt1[, 2] - predInt1[, 3] predInt2 <- predictInterval(m1, which = "random") predInt2$width <- predInt2[, 2] - predInt2[, 3] predInt3 <- predictInterval(m1, which = "fixed") predInt3$width <- predInt3[, 2] - predInt3[, 3] predInt4 <- predictInterval(m1, which = "all") predInt4$width <- predInt4[, 3] - predInt4[, 4] predInt1b <- predictInterval(m1, include.resid.var = FALSE) predInt1b$width <- predInt1b[, 2] - predInt1b[, 3] predInt2b <- predictInterval(m1, which = "random", include.resid.var = FALSE) predInt2b$width <- predInt2b[, 2] - predInt2b[, 3] predInt3b <- predictInterval(m1, which = "fixed", include.resid.var = FALSE) predInt3b$width <- predInt3b[, 2] - predInt3b[, 3] predInt4b <- predictInterval(m1, which = "all", include.resid.var = FALSE) predInt4b$width <- predInt4b[, 3] - predInt4b[, 4] # These should be fairly close now since residual variance is the biggest expect_true(!all(predInt1$width > predInt2$width)) expect_true(!all(predInt3$width > predInt2$width)) expect_true(!all(predInt4$width[predInt4$effect == "combined"] > predInt1$width)) expect_true(!all(predInt4$width[predInt4$effect == "combined"] > predInt2$width)) expect_true(!all(predInt4$width[predInt4$effect == "combined"] > predInt3$width)) # expect_true(all(predInt1b$width > predInt2b$width)) expect_true(all(predInt3b$width != predInt2b$width)) expect_true(all(predInt4b$width[predInt4b$effect == "combined"] > predInt2b$width)) expect_true(!all(predInt4b$width[predInt4b$effect == "combined"] > predInt1b$width)) # Fits expect_true(all(predInt1$fit > predInt2$fit)) expect_true(all(predInt3$fit > predInt2$fit)) expect_true(all(predInt1$upr > predInt1b$upr)) expect_true(all(predInt1$lwr < predInt1b$lwr)) expect_true(all(predInt2$upr > predInt2b$upr)) expect_true(all(predInt2$lwr < predInt2b$lwr)) expect_true(all(predInt2$upr > predInt2b$upr)) expect_true(all(predInt3$lwr < predInt3b$lwr)) expect_true(all(predInt4$upr > predInt4b$upr)) expect_true(all(predInt4$lwr < predInt4b$lwr)) predInt1 <- predictInterval(m2) predInt1$width <- predInt1[, 2] - predInt1[, 3] predInt2 <- predictInterval(m2, which = "random") predInt2$width <- predInt2[, 2] - predInt2[, 3] predInt3 <- predictInterval(m2, which = "fixed") predInt3$width <- predInt3[, 2] - predInt3[, 3] predInt4 <- predictInterval(m2, which = "all") predInt4$width <- predInt4[, 3] - predInt4[, 4] predInt1b <- predictInterval(m2, include.resid.var = FALSE) predInt1b$width <- predInt1b[, 2] - predInt1b[, 3] predInt2b <- predictInterval(m2, which = "random", include.resid.var = FALSE) predInt2b$width <- predInt2b[, 2] - predInt2b[, 3] predInt3b <- predictInterval(m2, which = "fixed", include.resid.var = FALSE) predInt3b$width <- predInt3b[, 2] - predInt3b[, 3] predInt4b <- predictInterval(m2, which = "all", include.resid.var = FALSE) predInt4b$width <- predInt4b[, 3] - predInt4b[, 4] # These should be fairly close now since residual variance is the biggest variance expect_true(!all(predInt1$width > predInt2$width)) expect_true(!all(predInt3$width > predInt2$width)) expect_true(!all(predInt4$width[predInt4$effect == "combined"] > predInt1$width)) expect_true(!all(predInt4$width[predInt4$effect == "combined"] > predInt2$width)) expect_true(!all(predInt4$width[predInt4$effect == "combined"] > predInt3$width)) # expect_true(all(predInt1b$width > predInt2b$width)) expect_true(all(predInt3b$width != predInt2b$width)) expect_true(all(predInt4b$width[predInt4b$effect == "combined"] > predInt2b$width)) expect_true(!all(predInt4b$width[predInt4b$effect == "combined"] > predInt1b$width)) # Fits expect_true(all(predInt1$fit > predInt2$fit)) expect_true(all(predInt3$fit > predInt2$fit)) expect_true(all(predInt1$upr > predInt1b$upr)) expect_true(all(predInt1$lwr < predInt1b$lwr)) expect_true(all(predInt2$upr > predInt2b$upr)) expect_true(all(predInt2$lwr < predInt2b$lwr)) expect_true(all(predInt2$upr > predInt2b$upr)) expect_true(all(predInt3$lwr < predInt3b$lwr)) expect_true(all(predInt4$upr > predInt4b$upr)) expect_true(all(predInt4$lwr < predInt4b$lwr)) }) test_that("Default is set to all effects", { predInt1 <- predictInterval(m1, seed = 8231) predInt2 <- predictInterval(m1, which = "full", seed = 8231) expect_identical(predInt1, predInt2) predInt1 <- predictInterval(m1, seed = 8231, include.resid.var = TRUE) predInt2 <- predictInterval(m1, which = "full", seed = 8231, include.resid.var = TRUE) expect_identical(predInt1, predInt2) predInt1 <- predictInterval(m1, seed = 8231, include.resid.var = TRUE) predInt2 <- predictInterval(m1, which = "all", seed = 8231, include.resid.var = TRUE) expect_equal(mean(predInt1$fit - predInt2$fit[predInt2$effect == "combined"]), 0, tol = 0.14) expect_equal(mean(predInt1$lwr - predInt2$lwr[predInt2$effect == "combined"]), 0, tol = 0.1) expect_equal(mean(predInt1$upr - predInt2$upr[predInt2$effect == "combined"]), 0, tol=0.1) }) # Test nested effect specifications---- context("Test nested effect specifications") test_that("Nested effects can work", { skip_on_cran() library(ggplot2) mod1 <- lmer(sleep_total ~ bodywt + (1|vore/order), data=msleep) msleep$combn <- paste(msleep$vore, msleep$order, sep = "__") mod2 <- lmer(sleep_total ~ bodywt + (1|combn) + (1|vore), data=msleep) #Suppressing warnings we already tested (coerce tbl and new levels) predInt1 <- suppressWarnings(predictInterval(merMod=mod1, newdata=msleep, seed = 548, n.sims = 2000, include.resid.var = FALSE, stat = "median", level = 0.8)) predInt2 <- suppressWarnings(predictInterval(merMod=mod2, newdata=msleep, seed = 548, n.sims = 2000, include.resid.var = FALSE, stat = "median", level = 0.8)) expect_is(predInt1, "data.frame") expect_is(predInt2, "data.frame") expect_equal(mean(predInt1[,1] - predInt2[,1]), 0, tol = sd(predInt1[,1])/20) expect_equal(mean(predInt1[,2] - predInt2[,2]), 0, tol = sd(predInt1[,2])/10) expect_equal(mean(predInt1[,3] - predInt2[,3]), 0, tol = sd(predInt1[,3])/20) }) context("Interactions without intercepts") sleepstudy$Test <- rep(sample(c(TRUE, FALSE), length(unique(sleepstudy$Subject)), replace = TRUE), each = 10) m1 <- lmer(Reaction ~ Days:Test + (0 + Days | Subject), data = sleepstudy) sleepstudy$cars <- sleepstudy$Days*3 m2 <- lmer(Reaction ~ cars:Test + (0 + Days | Subject), data = sleepstudy) m3 <- lmer(Reaction ~ cars:Test + (1 | Subject), data = sleepstudy) m4 <- lmer(Reaction ~ cars:Test + (0 + cars | Subject), data = sleepstudy) test_that("Models with cross-level interaction and no random intercept work", { preds1 <- predictInterval(m1) expect_equal(nrow(preds1), 180) expect_equal(ncol(preds1), 3) expect_message(predictInterval(m1)) preds1 <- predictInterval(m1, newdata = sleepstudy[1:10, ], level = 0.9, n.sims = 500, include.resid.var = FALSE, fix.intercept.variance = TRUE) expect_equal(nrow(preds1), 10) expect_equal(ncol(preds1), 3) preds1 <- predictInterval(m1, newdata = sleepstudy[1:10, ], level = 0.9, n.sims = 500, include.resid.var = FALSE, ignore.fixed.terms = TRUE) expect_equal(nrow(preds1), 10) expect_equal(ncol(preds1), 3) preds2 <- predictInterval(m1, newdata = sleepstudy[1:10, ], level = 0.9, n.sims = 500, include.resid.var = FALSE, ignore.fixed.terms = FALSE) expect_equal(nrow(preds2), 10) expect_equal(ncol(preds2), 3) expect_false(any(preds1$fit == preds2$fit)) rm(preds1, preds2) preds1 <- predictInterval(m2) expect_equal(nrow(preds1), 180) expect_equal(ncol(preds1), 3) truPred <- predict(m2) expect_equal(mean(preds1$fit - truPred), 0, tolerance = sd(truPred)/100) # preds1 <- predictInterval(m3) expect_equal(nrow(preds1), 180) expect_equal(ncol(preds1), 3) truPred <- predict(m3) expect_equal(mean(preds1$fit - truPred), 0, tolerance = sd(truPred)/100) # preds1 <- predictInterval(m4) expect_equal(nrow(preds1), 180) expect_equal(ncol(preds1), 3) truPred <- predict(m4) expect_equal(mean(preds1$fit - truPred), 0, tolerance = sd(truPred)/100) }) m1 <- lmer(Reaction ~ 0 + Days + Days:Subject + (1 | Days), data = sleepstudy) test_that("Models with cross-level interaction and no random intercept work", { preds1 <- predictInterval(m1) expect_equal(nrow(preds1), 180) expect_equal(ncol(preds1), 3) expect_warning(predictInterval(m1, fix.intercept.variance = TRUE)) preds1 <- predictInterval(m1, newdata = sleepstudy[1:10, ], level = 0.9, n.sims = 500, include.resid.var = FALSE, ignore.fixed.terms = TRUE) expect_equal(nrow(preds1), 10) expect_equal(ncol(preds1), 3) truPred <- predict(m1, newdata = sleepstudy[1:10,]) expect_equal(mean(preds1$fit - truPred), 0, tolerance = sd(truPred)/100) # This is less close preds1 <- predictInterval(m1, newdata = sleepstudy[1:50, ], level = 0.9, n.sims = 500, include.resid.var = FALSE, ignore.fixed.terms = FALSE) expect_equal(nrow(preds1), 50) expect_equal(ncol(preds1), 3) truPred <- predict(m1, newdata = sleepstudy[1:50,]) expect_equal(mean(preds1$fit - truPred), 0, tolerance = sd(truPred)/25) expect_warning({ preds1 <- predictInterval(m1, newdata = sleepstudy[1:50, ], level = 0.9, n.sims = 500, include.resid.var = FALSE, fix.intercept.variance = TRUE) }) expect_failure({ expect_equal(mean(preds1$fit - truPred), 0, tolerance = sd(truPred)/100) }) }) merTools/tests/testthat/test-seeds.R0000644000176200001440000000266113466135366017305 0ustar liggesusers# 'seed' options in draw, REsim, FEsim, predictInterval and subBoot---- context("'seed' options in draw, REsim, FEsim, predictInterval and subBoot") test_that("Equivalent seeds return equivalent results", { fm1 <- lmer(Reaction ~ Days + (Days | Subject), sleepstudy) d1a <- draw(fm1, type="random", seed=1234) d2 <- draw(fm1, type="random", seed=456) d1b <- draw(fm1, type="random", seed=1234) r1a <- REsim(fm1, 25, seed=1234) r2 <- REsim(fm1, 25, seed=456) r1b <- REsim(fm1, 25, seed=1234) f1a <- FEsim(fm1, 25, seed=1234) f2 <- FEsim(fm1, 25, seed=456) f1b <- FEsim(fm1, 25, seed=1234) # TODO - subboot now returns warnings and needs to be checked p1a <- predictInterval(fm1, newdata=sleepstudy[1:10,], seed=1234) p2 <- predictInterval(fm1, newdata=sleepstudy[1:10,], seed=456) p1b <- predictInterval(fm1, newdata=sleepstudy[1:10,], seed=1234) s1a <- subBoot(fm1, n = 160, FUN = thetaExtract, R = 20, seed=1234) s2 <- subBoot(fm1, n = 160, FUN = thetaExtract, R = 20, seed=456) s1b <- subBoot(fm1, n = 160, FUN = thetaExtract, R = 20, seed=1234) expect_identical(d1a, d1b) expect_identical(r1a, r1b) expect_identical(f1a, f1b) expect_identical(p1a, p1b) expect_identical(s1a, s1b) expect_false(identical(d1a, d2)) expect_false(identical(r1a, r2)) expect_false(identical(f1a, f2)) expect_false(identical(p1a, p2)) expect_false(identical(s1a, s2)) }) merTools/NAMESPACE0000644000176200001440000000533413466135366013317 0ustar liggesusers# Generated by roxygen2: do not edit by hand S3method(VarCorr,merModList) S3method(draw,merMod) S3method(fastdisp,merMod) S3method(fastdisp,merModList) S3method(fixef,merModList) S3method(print,merModList) S3method(print,summary.merModList) S3method(ranef,merModList) S3method(summary,merModList) export(FEsim) export(ICC) export(REcorrExtract) export(REextract) export(REimpact) export(REmargins) export(REquantile) export(REsdExtract) export(REsim) export(RMSE.merMod) export(averageObs) export(bglmerModList) export(blmerModList) export(draw) export(expectedRank) export(fastdisp) export(findFormFuns) export(glmerModList) export(lmerModList) export(modelFixedEff) export(modelInfo) export(modelRandEffStats) export(plotFEsim) export(plotREsim) export(predictInterval) export(randomObs) export(shinyMer) export(subBoot) export(superFactor) export(thetaExtract) export(wiggle) import(arm) import(dplyr) import(ggplot2) import(lme4) importFrom(abind,abind) importFrom(arm,sim) importFrom(blme,bglmer) importFrom(blme,blmer) importFrom(broom,tidy) importFrom(foreach,"%dopar%") importFrom(foreach,foreach) importFrom(methods,as) importFrom(methods,is) importFrom(mvtnorm,rmvnorm) importFrom(shiny,actionButton) importFrom(shiny,checkboxInput) importFrom(shiny,downloadButton) importFrom(shiny,downloadHandler) importFrom(shiny,em) importFrom(shiny,eventReactive) importFrom(shiny,fluidPage) importFrom(shiny,h3) importFrom(shiny,isolate) importFrom(shiny,mainPanel) importFrom(shiny,numericInput) importFrom(shiny,observeEvent) importFrom(shiny,plotOutput) importFrom(shiny,radioButtons) importFrom(shiny,reactive) importFrom(shiny,reactiveValues) importFrom(shiny,renderPlot) importFrom(shiny,renderPrint) importFrom(shiny,runApp) importFrom(shiny,shinyApp) importFrom(shiny,sidebarLayout) importFrom(shiny,sidebarPanel) importFrom(shiny,strong) importFrom(shiny,tabPanel) importFrom(shiny,tabsetPanel) importFrom(shiny,textOutput) importFrom(shiny,titlePanel) importFrom(stats,AIC) importFrom(stats,as.formula) importFrom(stats,delete.response) importFrom(stats,df) importFrom(stats,formula) importFrom(stats,getCall) importFrom(stats,logLik) importFrom(stats,median) importFrom(stats,model.frame) importFrom(stats,model.matrix) importFrom(stats,na.omit) importFrom(stats,na.pass) importFrom(stats,pnorm) importFrom(stats,pt) importFrom(stats,qnorm) importFrom(stats,quantile) importFrom(stats,reformulate) importFrom(stats,reshape) importFrom(stats,residuals) importFrom(stats,rgamma) importFrom(stats,rnorm) importFrom(stats,runif) importFrom(stats,sd) importFrom(stats,terms) importFrom(stats,vcov) importFrom(stats,weighted.mean) importFrom(utils,packageVersion) merTools/NEWS.md0000644000176200001440000001715213466135366013177 0ustar liggesusers# NEWS ## merTools 0.5.0 ### New Features - `subBoot` now works with `glmerMod` objects as well - `reMargins` a new function that allows the user to marginalize the prediction over breaks in the distribution of random effect distributions, see `?reMargins` and the new `reMargins` vignette (closes #73) ### Bug fixes - Fixed an issue where known convergence errors were issuing warnings and causing the test suite to not work - Fixed an issue where models with a random slope, no intercept, and no fixed term were unable to be predicted (#101) - Fixed an issue with shinyMer not working with substantive fixed effects (#93) ## merTools 0.4.2 ### New Features - Parallel fitting of `merModLists` is now supported using the `future.apply` package and the `future_lapply` functions, optionally - Reduced package installation surface by eliminating unnecessary packages in the `Suggests` field ### Bug fixes - Fixed a bug (#94) where `predictInterval()` would return a data.frame of the wrong dimensions when predicting a single row of observations for a `glm` - Fixed a bug (#96) related to `rstanarm` dependencies in the package vignette - Switched from `dontrun` to `donttest` for long-running examples (CRAN compliance) - Fixed and made more clear the generics applying to `merModList` objects (#92) ## merTools 0.4.1 ### New Features - Standard errors reported by `merModList` functions now apply the Rubin correction for multiple imputation ### Bug fixes - Contribution by Alex Whitworth (@alexWhitworth) adding error checking to plotting functions - The vignettes have been shortened and unit tests reorganized to facilitate Travis-CI builds and reduce CRAN build burden ## merTools 0.4.0 ### New Features - Added vignette on using multilevel models with multiply imputed data - Added `fixef` and `ranef` generics for `merModList` objects - Added `fastdisp` generic for `merModList` - Added `summary` generic for `merModList` - Added `print` generic for `merModList` - Documented all generics for `merModList` including examples and a new imputation vignette - Added `modelInfo` generic for `merMod` objects that provides simple summary stats about a whole model ### Bug Fixes - Fix bug that returned NaN for `std.error` of a multiply imputed `merModList` when calling `modelRandEffStats` - Fixed bug in `REimpact` where some column names in `newdata` would prevent the prediction intervals from being computed correctly. Users will now be warned. - Fixed bug in `wiggle` where documentation incorrectly stated the arguments to the function and the documentation did not describe function correctly ## merTools 0.3.1 - Update the `readme.rmd` to package graphics with the R package, per CRAN ## merTools 0.3.0 - Improve handling of formulas. If the original `merMod` has functions specified in the formula, the `draw` and `wiggle` functions will check for this and attempt to respect these variable transformations. Where this is not possible a warning will be issued. Most common transformations are respected as long as the the original variable is passed untransformed to the model. - Change the calculations of the residual variance. Previously residual variance was used to inflate both the variance around the fixed parameters and around the predicted values themselves. This was incorrect and resulted in overly conservative estimates. Now the residual variance is appropriately only used around the final predictions - Rebuilt the readme.md to include new information about new features - New option for `predictInterval` that allows the user to return the full interval, the fixed component, the random component, or the fixed and each random component separately for each observation - Fixed a bug with slope+intercept random terms that caused a miscalculation of the random component - Add comparison to `rstanarm` to the Vignette - Make `expectedRank` output more `tidy` like and allow function to calculate expected rank for all terms at once - Note, this breaks the API by changing the names of the columns in the output of this function - Remove tests that test for timing to avoid issues with R-devel JIT compiler - Remove `plyr` and replace with `dplyr` - Fix issue #62 `varList` will now throw an error if `==` is used instead of `=` - Fix issue #54 `predictInterval` did not included random effects in calculations when `newdata` had more than 1000 rows and/or user specified `parallel=TRUE`. Note: fix was to disable the `.paropts` option for `predictInterval` ... user can still specify for *temporary* backward compatibility but this should be either removed or fixed in the permanent solution. - Fix issue #53 about problems with `predictInterval` when only specific levels of a grouping factor are in `newdata` with the colon specification of interactions - Fix issue #52 ICC wrong calculations ... we just needed to square the standard deviations that we pulled ## merTools 0.2.1 - Fix dependency on `lme4` to ensure compatibility with latest changes. ## merTools 0.2 ### Bug fixes - Coerce `dplyr` `tbl` and `tbl_df` objects to data.frames when they are passed to `predictInterval` and issue a warning - Try to coerce other data types passed to `newdata` in `predictInterval` before failing if coercion is unsuccessful - Numeric stabilization of unit tests by including seed values for random tests - Fix handling of models with nested random effect terms (GitHub #47) - Fix vignette images ### New Functionality - Substantial performance enhancement for `predictInterval` which includes better handling of large numbers of parameters and simulations, performance tweaks for added speed (~10x), and parallel backend support (currently not optimized) - Add support for `probit` models and limited support for other `glmm` link functions, with warning (still do not know how to handle sigma parameter for these) - Add ability for user-specified seed for reproducibility - Add support for `blmer` objects from the `blme` package - Add a `merModList` object for lists of `merMod` objects fitted to subsets of a dataset, useful for imputation or for working with extremely large datasets - Add a `print` method for `merModList` to mimic output of `summary.merMod` - Add a `VarCorr` method for `merModList` - Add new package data to demonstrate replication from selected published texts on multilevel modeling using different software (1982 High School and Beyond Survey data) ### Other changes - Changed the default `n.sims` for the `predictInterval` function from 100 to 1,000 to give better coverage and reflect performance increase - Changed the default for `level` in `predictInterval` to be 0.8 instead of 0.95 to reflect that 0.95 prediction intervals are more conservative than most users need ### Future changes - For the next release (1.0) we are considering a permanent switch to C++ RMVN sampler courtesy of Giri Gopalan 's excellent FastGP ## merTools 0.1 - Initial release ### New Functions - Provides `predictInterval` to allow prediction intervals from `glmer` and `lmer` objects - Provides `FEsim` and `REsim` to extract distributions of model parameters - Provides `shinyMer` an interactive `shiny` application for exploring `lmer` and `glmer` models - Provides `expectedRank` function to interpret the ordering of effects - Provides `REimpact` to simulate the impact of grouping factors on the outcome - Provides `draw` function to allow user to explore a specific observation - Provides `wiggle` function for user to build a simulated set of counterfactual cases to explore merTools/data/0000755000176200001440000000000013402510754012770 5ustar liggesusersmerTools/data/hsb.rda0000644000176200001440000010616113402510754014241 0ustar liggesusersBZh91AY&SY xFIPh.m-fv36eݧ;Ͳmcfmbv3clQX m.P7N3 PAU7 *|T +fͶښsmi*fΚig-l1ųl$ZnKFDm[muFmYٛf-k[cmY5ڶ,4l͙5> PR).|lٓenu͙i uZmӈB-1L-2Z2,ՙUOh@ *RzF& !Bd4i!SiLm=S=CCPhj 4A DAa2ddɐЍMSy# 7jz`DJ?M=I觩S4 0>#RoJl1QV,R "0 !`A AHyJ0cH1#D` ADEE@QbU ,ED-*UEDZ"Q%{>/6yd2&@bFtOD*|ދ波T;8"ɀWk -'Tv? d?I~|t;3D0-*+otdHءmy.'o~||Ǿz#Vm[*%JQDRZ4X-m, KL1Y=u*,FȐO˃AH'=$ Y_хvj5OŵZ$tji]& n_2@)_Ewnc%RH4 mA`တ+ Y"VW L-rՆ6+\3*\8-nYE2~ cX)Q %=2}fvf.6rLP'$;ڻ(?fi/M`@@#$'6>V1u0KVS^||_Tޞ;O?*|RH¶_Sk:OG\^S d'd&ŗ3:⥷J)!)HzoMN:~F^wa'P;N1ؙr0`%8djF-:tw09lJa?/>㬪?!c#=/|?ϏnQcPSQ2}@'/C[+DJS[w1.R(T@5÷t6bsVV,Au/m9]ɴy|T[C`@|z˛ͧn+LR}گ,O@I#$#nT׹pϛWMU%mYEX]e<w_yx5r^wa []@# {FeSéAX@I coٝVoCFVQsEO#?JCI:PK'$ESoϥ{ysCʇ؟anżz]78 @"=☴giM4h /NƿkK˷m.B<, B 2Qۏc}lA&e^D#7K[cP"VF A}@L5UxUf.۷1y$(sx)0YbS[W]nLED|uvGXͩiiD=Jʿߵϱ~-v99$C=SʧGy͟HL)AcQuFx vt#s$`Jpf̕SDD6ɨ[R:yaXve$q3 h0p0fix</{n(YZZ*%{f<[kmc=}bx`B{}_Ǯ^.^ \M6S &h0T~m~_T# M_J:{]3r9IqmVndB<:__1ϧjy*ʬHI$PCorKxS(h`'#t5Sof!pëmSd5 'LyRd j6!LɞdM[VjfZv0{M@6jUG:?6G~SIBn)-״<?AI0O6C5ig]_Ύ+ ãqpQqfת= jo?[_)٭а$gщ,hLqOH&DD~+_;>ɭW\K_XB;M)ZuGk}%JBG&wvSkS03(;p; pfyw`f@61;,pJivrR XD@'k~>?[\w$uT%R_NxݵPͮt)! టMM6ۥTn/d91w"HyEw2\ĒI 9"#7@h‚jolfJ].ջSED+$&)b+|Tzlg?3yAA :zV9PvӁ? ?;#QEE-óۗd$1$  lfj BK*A0/5[t;(UH.Y' ?Ht1QPZB l[טS‰q$PI YKFV/ey^.tUBdpDQ_ 5?J[n.[]qkĸ.?ӀzpV3UJ dD9}fٲZQHОQB90ؒAũ>dSLL3Z-p6kU[@x}?~]TNsJ{ɷj!gcۣިI8Dy]\V4;3D*jor|2}v퉎a-(H7߄#?KH8NG7b=v{{RL)31;zI+* Av ȳ. $//W8n qB1*T DMl~ Y>Ȃ" !#جu%lvS2fK.zHH,$dRӿ!߂:~S39ywt9۝mam5Iex/*3D0&6h"E*WUHS3:R̎eE2U$5DPŠXD +1{H$@ D"KްdH ; NGi͊ř9sukĀ}^\p-s>o[_ssΪk*R@ (xTD8(U`CWwC_ݳֲׯ "4Y̐ e@OԈ""PRA)$dR){V:OQJr 55)R%7gvQdAg N n_yE뇱M1$$}q>G\ u?yB*"7L/ yI " tF>ݮl(X0[x'ح [8͡N=~M%~)8?LL  7IdNC3uҧNyf`ˆQQWĄL~3st\cgZ.n`n`̐@1'?_ {Q,<*?ɂ|~lyY>T~<'ƁCV.e*j&T?ߍĴm eũ}hkVYoRЙDiN2*ȤF"QP?؁kף^m왷.Y;r˗)H,PR fI(mĶgP&c9*B.B@PS +P9AJ+IL$+q- d ee+z~SZ4]RP Q]zukS9:[TtJdK VM W:4ңX7+}3׍T6^rQfՔQ2qB%u%C8: N@|~z@SŬmb+YfeFiS,mm0cPJ>}}w/ƪ1ˉs$Lf{mOF6,4w=YSD7 (b"@T^{X),*ToIEE– vq%(TYɱMm٢ k#xNEgI .dF5S=;~p'ͤ+Y]( @īi#Z"P(PaZ,W J-CpUYm);3m)٪eAȖ*1)o%pWeF4y7[JRU2t@FfkOL)Mub]49INMReTĮ&T\nF1(.Sz.beQ Z2Z6%r,G0XIؐ?~/65_ۅr+{EEF`]mEBXFYB=/uV]݊mT`8).VTRVJN SY'iU8'&d*⩕ AؘiSȺg:N]XI+C:uT%V+tYU- gunP%X衈0Sb*[+!'N>:ahSKFUNT%bޒ+VJWz)nK`4 լV7Q+[PJ,R&EZm+Eh#Mi)Sj-Ee)4i*ؗ9.iV2qBғE) 3Y 칲\RhDyQ .WM,JijS E+Y]C:YYux)\}4`,ZLԧ),YZ)KBnF.qEXRs*rFU,.6f_"=Gg(=ݞ6ƍusiNH"F,$J@ 2n;g0)xVIMCSZiV1 aEދIGzK-B )'zUZP^\ʙhFS) R+s6]>N^t]kH |EC'69KiހB׽n˯ I8bP@rYmɷ-rS#OWC|noD/$۟2Ԡ13ߎx_յwß}pvHH33&d=,OCͷfݥjj!zy [k {dpP`A2Q2UqV9oE*V˨ >a~erq1/ ԵEnB$)Y/:_ 讱!u`XgkGRu2}srOHӖ=~o||oN?9>uOhЦLO:EI'$Ґttd.d5{,gEBA@RJd)"Od T~)PY-ruOM~zrnl("ȥحheȣ(K_SM6mwU UV^۶'{!_ŋՀP^‰;L p6ݧUeЋW.bҲE,/k~<ߣ?>"!nٸ6z q믛![iل<0 Ѐ=gf}wXe0Jm;%2@k7$DHP1Q`xQJFL-(yX]^i#XH9+M{Z:qYTI!?HuJkG r98e.sQUg:̰[HK:8Z &hBA8YMfs6 eQk336Z2- صݦX?bE q$M LNP2Sª{qkf;3&^ Y}n^hMd` #HTBxQQWg^r[T+}+2, Or[O ݝqGƔ9\K];+p9&*HL.ݒ01N?"Z@7'i.`y^%&"z0ʩ T3&Tot32͐>>Mt %TɔF޸1ӂF'|kiT"RxKQJ%[#J|&nmNXzrʂt#cY7b\@@!ILݗg)wL%3F"v_j0GwVGJa@G"" o>ɯq8 kֺm{'^ >zp GM_*8ɌVGvណl|H pB"?@tlך=.+q+hIODF~"/l禃a5ܥ2-O \&||OQhkc%NOgmDD~p{Uau 齋nzuDi([ 2C qX@SL ":Z7#&^-9ZXotWJyrPJu=#~)`qeƋI,Uc9" 82ŒfdQR.wz:`VlRԊ¯.{{!Z ,@'8سUVqA2FdH mOCI ` YiH-5 $[#˽qvՁ=XQХLڀӣӵZ]'=HM3vO{eݔ?Oxޏ{19l(1j#"`mY0CG36T@W@)% ^lRjaٺ=)w '@#9 )X2;:2W[F{شDA9JjP^XOf*ɢGtCXc1dcn g&è)bX3quu> VD8BY}=m#Gg9Z BMe#% +#ik8Gz qc16[%[NK)*MS݌2w1҈Qa.fov*չK]T k:Mfo_D=)VN)a%Fz%WY`&xn eh*(lvi;LZrG.h@(U܁lUzNy΢K/Myh H)S6~Ke`)as;%%KΑ9jCI LSP@#F:@cMnxi759^C}^k֬"'cy7Im]nr[(EZvv3<Ϗ"q<bBlV3yV2kx:PRI鳶eRfK; `"nJ2 F|@ɂ#at^ fNxv0h4|NHRڥ1==0%d< @Y"ɗr(;{gY1+FVSZ)U"bzf| P0ƫ-RYiX- so\{tPƌ'Ṯ^[RZZR6v˷tE?ʹQꩌ(imJr}NgdK2ccS p\T6\My#UmZ[nVS:6x{]TkrUK`e3eTteg ;*ǹ͡Eku-V~lӔܩ1|1Q:1QrUTPʲj_sC0]1^ʢi3qeGow>ꘗ" `PeFF OV*EyO =9L ̾{ϑflS!"VvNxӌX[IhҲǎwivޙ^YƵ/WD\HU25ۈcUso1.[s:;g0Nl(Z˗֩Vن3:qIfbeG3.jnګbh.x-u.MIv\f%k&[N%EZ'1ҿh)GI{\MxrGxiDUKf9`jBLf+aexeGZbq)CYS-CPN\)(Nu5<ǹsSZ9_t.2j )&榲o3Y5޳^;ŧ}{k<S;9Nrw/FXy(dD" E VΔ&}FΝ9Sb%9FmKPʋƩ~;s {&Sepe:uR9{+zy˛Nms{:aeKf">Q1f~˴Tw׉yv{enMVʙZwS}3':)DbTSG$]M&PP ;1834wfwG̽<|ZVeo"J5QK؟ ( y =02~M-^H2sO8IOo0E*D|dyiG(0YKR V*lۡ-ek(A[fy`)bCl}[jfUA-?"7M}n&oU}D lryI^ڏz0omY(W7߆Gʝ=5YǙw ?[|uT}{8 $( %!~JN x4[+̂a[/ƧIA->=>}Vn2]j+cDߒ|[5QrOA,*̍RDQ=\Z S?)=ulfzsغermホL )@&Ҟuk֤ZZΩgUEn9ů#nV}6\.q=oQ#O-ڔ9q q Yv)J)L(EbkbIgʳ$pȹ(x)e*S.h/zR%Tg^'*7m1l@Ues'բ:?Q$`" z|ŶJE30WZvԢ^N$}[MI{ \OD1R ySk\,C hW&*9q޼8(Pȼv"|N6) Y-sC5A!fk6qB[lwV;˿7Ut{rFrQ:Ǎ q_SI ZX}~DgmD5Ax}=;L /B2g/noŠ{+aL'.ix%xEϸ֖4)QsoLEgGFL2QQRx -ٿ^OhCE#wg|3!Gxow-3M#J&n 2k[BZ?}Z;4S\PsB ls1;Jڼ-[v.u m/+LG+f#I6hLŃKV8ᆳ‡:ziO$EE&{^x=H_x3N'#׻3`ԉO9TsbLha>dr {0tG a\\soyOCР,-uƅ<o{+ކzKӕP*d)DI`B;eZgZc}s}dҟDH::)}q˳3bPg.4q3F}D5}XrC87@*7{[C»1M}g%Ya6J^رzdHl{7έT\ "#ӷwTuK,kD#߰m("ߺyAt_牲lW6+XQ@sCwxFzta&\A:|t?ԍBSb~4枿&H?y\Zġ FMpyW7{g.Q\/J{jQ&[2&e70b5m;'#*'3>(`>px-ZsR <t W>7 33{yu_e'@^5!$ZS*'ˋӻ %)gVx _ k'w)2{ bn9A4 Lp_9(1TsYƅ0]є $Xi˚D_1hG'a[zE3@{s*{m\=05uqϖ8Ee+o9m[N5f UO3R ~ƒ&a`= HC^sCC>mMw=FJ;Öm)iʎkYRIYX30}R&Tj`Pwf;"b`(%Hۚ1l|;ʉiC/NKgk} CD&~B ni8WRg>YuWY]m~; /̳)BعZ7y,@{ie".u ȻwYP$H/W?ғeyU.C*Cmb8F'0[)m4ob?3څGU_6D>àdQJuYՁ~^8Ō|w\ZI ]JOeiSF9qi ),̓}\qbO:C$լTIVҖ!u.z"&( m%%XWxY(]V0 DPC,k.Y;C&#΂2L- CƋy&r,B( 2p9GRzy5z}+Nax-r#` TmW9Mf~.Bcb#gbHi8U EA,>C?V,**3U~i>Ry +蹗ٓW;O)p,1)rO+϶9}O$9kݰ[#-Sq%L:QHXa֡a'B8m]hB7[.0VU6@'Clsai\|VIWN ̰ #%y7Zjz v֞)<1 oG &dP,eD&#$dyUc:S <2jt5|PVYg'U]̺n+p';7Hޘ =}Cc [ו{*uM ? 8+ܞcI]%BNפE{6m}{FQ uǯE/[ehݟ.J0ik X x*><{:1fҐA'هaН}*!eKTXMpjj;".ev+~#8Fg6C_h]be9e.)${lU=@Иs]P{ހZj&R'XawI9 ;hBXmpp$/2BJX]S )\s` )f==jrc\nwh6RÀgtd~#¯`gK2tǣHa^"9{Mz=;^.7ud+zB41_F4~~??ⶈRv0ܐ KyNћ:+_jnda)˯$.o;2 &Ps8J7Zc?AC 1~CмAhd18KN(ARf_$lc EOv)0R[3.8F[/㑭u'X {|ANO4B/qdTO-Q\][D}WBky\=_ۻۡ`sf4{a!R:<ԧN_py~h{s%/]vT0_>dTuGn!r?`̦Wᘹ:j1uܴCx|*QSEd̊\A*F8j̆=KcoNWJ3g(REA "p!^Y;r^{7r9=}_V r<Ơlv8mrjdDU3Y@;} a(Mx6"['$JZ,xd<~\aO+ ac; P<}H]*1ե~3dV~wPӠ0"ˈ@F/pY_$nLO^E˙5 ^/g6gfr58Q>Vg,wg4j'^Q{[3Lo*I;;k=} ϭ<ԉnu~ߊѩL6,y"oǂ*J NuM[ȯAYu}𚿟L(0>q)o% !n5OHȵS͆Ĝb-V,et1%qç]ri2֮C#,l;Xْ.c]Z5$H'4*bVvᄂKd&:(EvK=]ؘ n.J :eJmz9IеJ]g5L#1ed#1"R(ԊQ/EiK7OMȝ҉ccDV1XgVZy64N?sa5Aq6fGn^D3@:NjTRzqU+*򊦘^l6E ٘:v2G[k}e/X "ʎ5' ɢ'+c&Øz'W4{G%R֒vy:9LЎSO! {{$WsE2, |_P:K%[Ԋi[$:57@>W1#́o?.!u_5]0 >@]{ϻE: gDG!RJIsh+};~/qmgjl8Q<|oo CKDŽw.W[wvTO=BMjq%ѫ9ҵu/Pj*9*A rg?Ol @zP`\nLmUf- RŽ\ؤG<j` |? :ćwg>:k!I&3HȋBom߮g~͓e=.قz Ŗu'߰g4@9`Lu5EBl3OP"sn!7U_1{wcܝ+eusHDc%}zSAEàJvxPv].?_ vlv Ηȥ _\ͮvFKg*8IF(ZbM-IP%lUqIrDa:r$$ܠKVvHÕwr}#!hg 8&^n*KHSn ):ƗS#^; s@XEP]d]1PRb,tɣ}r^ @(C;*ulJC;vO BrA|! Ҕt8l~;u'H8T|] (Do~Ov9gGR)}'riP&g5x \TYk䂺ͳ;.tH ^$8":D1ݛfSl872v{q)6,J;:[5xa|  U۰LwNqA\8/]/~IwW+NOogt3U8N(kysϩ|I㋗}} j JI,TEXĩW>CgN?E)]8T@B {Oes:}XS@GWi$΃}/" mg_?>pH Qf%[γ2鉣znPx] K5AVnM~.Ρ"`%䩔E%ڭ <dE$IR}"ؿNӡ9B0]I>̀}{ ka:鼒=sLqEB} l9'&/$~nYK0}ӯcˑm333N%a'}y^[ܹRj4@4QQD%A ak'ӊh$]U5D2r*gEH[*娬dC"F̐@>Uwc!hZkʼg~$O, ObX뮗cE̥F?H ]Ө((jՌ]: +ͳ1|C.Jv,NSqA DqـI6&&/Bmx}Ȯ{HOLY 1B/@ޗtWs[2˙r1$ 'RAɹ0L!JzS*9 ;I-5 ȅ! $FUiuv::*ѧ /%7>7wGsge>M2SحwaHYpH0uř0`g =*y9cmLvrrk>7$ !$@{HӚo{rn)UکÙ;Yl.iLwK6z?k_kt"KQ" ?p xa\-zBw SS%EJfmԘ 1! ^Zt/:ޝY, ʙZfAL.-*Zϡ!i!<*Fg/r#ܑ4SBM4DM PLfw{N{,z3,)V"n:5AO^,:-;PWoίa$@$ A4_~_E]fi xڍ]:4 ݛ|]]zͮ@Ą~! ss<׼_7%B4O|C7\qnn^uuBT$ +)c˂.mXB/~G HTUzTPA s|/}_w1(IHL{YA$A>1=?!~7y6D{]{vBOr, Q$C&B*$W}ezz$S+u2>|g s^r1n\9W3s7wK̼nrxWD8 D\E'v3G|k˹}\%S{Oe[|\RH:;|np]ܡnnpxîefo4Zy5վ|XIX{V2c3sf|` s<룝vL޹2Bt" |7}NvXoL_c|CIKQJ[+5|6 @H)/L(NָG*١8˻}?lHm.^o;ԉ =cRθ 1;ytS7)JNSB6MoA 頒[AA2 3: %f"ݚ0"x~+h-<]Ԯ fh$ _>3{sҺ;g2z^9wO=Ί:J=.OV0z;<g5ʞ`>>$Kkmoe{ZrQ(3 &`` fJW${uco~M8^U )D.=ѻh.^dy[`rI?'(HK? |)" $&GSyGͽrݽҝD \`A35i~+MɮI2QoW^^*ov u2K|4-DЩUYt9$C:IpZL f2wBhf7b:͆7_.{>}Ozߓ| ZW(qۗZs[ JtiUpzÜ'3S/c( ;kg=ʨ]0Acϓ/ +\(j>UǀB~?̙\//O#9W [z <*q Y:(> ڛ1qD4. ,mۺ"?<~@?LE#γJ#DZoOKwb©Y>a~|Q1BK9ofcGI:)ҖJ)``@r:Ύ&N]cOJ2RIA(@To?warb/L)*?KϯRœp/?or?ʛw:_NǪ4qH;4iQH^Q$ engkwZ{i$/%VeCu,XK٘M̛u45VjS-}p'm& w - :wNǮwixa8WURB)dI]l= ek^ٞ׊@%HW@c.|36֗@\{5JOί1Zc?)зw7t4`$WO͏A$R__a~>*) UHOa1)54\]zNs95"#SRj'0D((ZY/ +COs?DD_Q?T~| RhT{FH [_!HiKS2m)f"h,"!h۹qjAk:@3ćxdV1z#j<@@@gaU~z;跣ez]]prrrGE1*'eLT1\6"6 /pmجZWJР@|,?w6s7pg_˃:CU$dss%Usv\_ۯ^vt:x^`HZ/n)nܽf#vI7(Z#r}1Wh0<-"!=G?}W K%T_AB(E|l,cѻ%MS_@9MpblBT*{y/]ӺUX_<1}s/p3q[D/aRd xzo'b=4*=N[{uQ ɣd4U͂QA E"85 \q¨Mp@5x'BG?z뫩Ə65R UṔS/+ߊ8 S= v?0dQFfmC`PtXs/8jxomz{+̤ ,1>߉wgCkNuVNFP;[0EҊVV|@ :d0-]8QbW%Of˙cA=.CƋ.U1dOpfڝ0{JyWS?l+RFUFO)OLҘT@ 5ףwOU=W/qYIz=ѽ._.rlE6fV>$"$g͠ ?NBryۊtt&ަLIDPB !O^kiqq)qkkJ"\H(=:@Û E3^~F.fgլ,d`IC~{G30iZXB9] g.< n80Sf 4 ˮyGͥGWY`IbbyLۦwdu%tfneݺƭ},fnsmFy8nd $sO7$4KgPQBF3 {o7ܩ=.|›`rVИy}CSS~" $BD%˕TP^oɫ4Ih }U\k35 _mгJ%D߱O-xD56}} p+Vt igrW/."F.6%+ԕ G$G9$=f^BBSߴ<~=\n3sNN:^p >:,`,;Wq8`cj^?}g//gƴB9Hq/*۷Ri -Ni٧_clRs08addl$ UyXsWUʢ**(ȢSbJݜܮݠիhյEMb<P$)2dbA}Jۧ14m]csҢ@D#Lj Dz^Fhbr$(|xѫzm%RWnԵTt*H,|vffcnn إyhoGxopFgÆZT7:xZ90$Y $>W~E mhהZ9UgřxnoR])*՟@NOqU|WoQFƠ:gzޭ-Z ;3qyq:ibv_sua޴UHҢczyM`) t!D%gM(F-=-kZn޽-4GX-(d,]ʹ0{r2Mg3cc ̶n|hi$4ˍs)(DH:vnѻXLre}\=ɐ'G&Z]4ˮkVm)SD@nlq*ZMa~,sbͩf\PQ٥> b-W ّfXFnd\IJQ0au6+, 2BcQn bVTKKLS;;W77r[ ކ;;cY)Dԓ@ v4xt̞a +{Ol8w˟JJnJxxjIDP$=$hͮo-j&< [dWO)/dO* H"R2ft8Wwt`]^e5۬DCeم-X\"~:U<+@7jW]\"PtbPvf@퀯HւQ9}/w ~NW#o#ϹjL"&Bf%3\ެfa ! @ޘD ܌;ݾ~SkbS _}ѹ5t,YIpiVoO(]í\{.ryD~$H?'n?9ue0wq8E8>gyögCzrkys[h8hSPT$Ar;XfOL;V{fo-͚ %\LsUxz)?cwS<}rQι7\ϕmp@ qA݁T0ܛ6|}|sv(ޱ? B!^_({Ql&fL }n׊x5 ǐ'mkkJҞ\ 羍Oq6un\\]mIS))Dlo{>Nm= Jw*8Gk~{"%M%7%kohJƈFJw33ulnseڌ-gJo~5UKWt;k6L< yt,dF:AG)S, L^Kw*W3؊VK67+?bZLK|fl]fB Sܝ :#|tu ?jFw_TD`@D+M(ƭp>:HDI,/UD@@yȠx#5T8ZHϒKD';s5׾Z~$h 43q-ʦxJU( o""kZ"*DMj"2N}Tfs=2{2ɉtظ:7^٭?daxhDK۟7%>lEI.j^#'xYp\[5ybC#k?uAktRu=L%33"qb$\8߶s:F*缉gfOqWc%HW^&Ikƪ&mu)ob>$kSb_B_P)h8ϹI9ǻ?rFY(j]nT;ûb[7Mqu#VP3SH9eRW%""=MWM_UwD%b!p T٣"FA 7H/^_TkE;:̝7N4N"v - n^9X){vNNPCJ,='eh{gmk-')i3 )4r V\X)]5b~'a ҧpؓDQBi7,ʻ+{H*֦WQ$'HU;y (=9!|uZ}eD |BXݑcgD8$3k3YXL,޺;m-ԛ_a#^3y٢"&iݷgWvpU5 ?,͆Is *c풎tJ4vwdK>z*ѕ*PYPR2:4E0c0`NjLѣ ^XUn;'\GF^s5r[^JЪByWZQ54d{)(=e!K.dZt8I܊N(@5KX"(S̟C-!~t[qCǣv3jg3g _cM~7SMSn%yI@,=ͻ`S79q@D{M[}{44&iD뽭t]nƶXO]Tzt"^V›wq0{r55q`u9wl &.\^#Swi^͐QjΚ &d㮚y$om<3Va@E0+0PVM>'-2ꅁRa|A\9sɁfB"1WrȠD.v/gWr[MBH'ұ3N&hYF+:OUbR-PvjVjt,[0m=ja)G_@E`M{ .ibSU@1GlPՄ!nAal 5M76"@1_݃#~JٝMt t˞ȷ?\+ G ej-98P$Vfi$KR]IyL?'kֲ&ə'_qT;BC3~"1ٓώ,V'ۣ9]u\w""hJKGey#xxk2O"RxfxSI>TvokjY΄ &UE""!߸w 7,*BJڭL ѿT-wb·-BZѬ:HeT4q?d:hQy͖f $>EnI)}*)?i5A#6q2X`]"ڈmHoYg5ꓩ(Uw!;=y;Ӽ~ۻvym5J'6n.őFҢrkϽWC_-PO Tͭ74D*NC@I!ŧuU3KveycmpZLJfLa6 QG }b4%IJ‚äF\w pUA;޺?*WlG+>cZ!Qϸ<~3M TـJN )wR/&{u @IC-xµ<ʹ?fcO?GDU$"# m*<ކg6h`DcŊ[EaEt Z1R)$DAgClUԻׄ6}^W}g{A{(z55 k_*~gӑ u djaSeӡ8}4 @2  OQ]}c¸knZUAQ=IOB-ͱVYA-Z9JD@#٢SgS([hiNۚn+kd0EYˆꡆ D"ػ[/OW &V#=  =/۬۬3pB(8 zƮܹXv1Y,mx>_okZ,t0z6AlǨ*jߑ|9by78c&&uzMuAx|A " DO=[z6|L:,,X,XŇvU+SI`@/%6n&1R(w,o.E[^pTlD-[ڍz3'}$~,Zh|x)vu{yWNҕJ4k"pn2ѣ6|]}2Ue(e,O;=*6Pz997fN>Y0ܢמy䛭E(2@e`D+ڿcV32Lz/O `1L,UCZgXD+VL|P?.T=4Cd!iϹro8s˻S(JU5 e( 3nPJ,B;fD&E; kr/'5ʖT^sJTjX)5+>%;ET;koUͷ\25YK VpYY"0{ȥz'![> :ɯ9s2j:g٬κjBHn+1ʙP(Ej{U+pTXOȒ2, رhQZ %-觩Z+ -UW BC0~ nEM1Snw@=(~"6ݒG2m[Vҵ?]jM{v;:bA!0A&`ԙNQL)DT\`N&AfU{l=z@@, @.B@F{֪U *yE,d}  Ha$DVPj4o/>ऒ+ VXߤҵ)-y]^\UMMYk`bݩ6aXT`lM :D 35|b/WB,dܹ0Do޽RzDiPVHAcc&Tb1>2Un%di]eL9$@ g$2K' H}"WϽo ˹mg)ِ@TT] fDH56yQݔŚ*!{$I sߡԅmd>P ~8x.j:ۻWkHocmХ : @@DDHyOleۙћz5+8U<'T;QTjq[aer@*BDXA#+ ( ֻ̻Ʌu{z7g S1|p$)~}?<8)D19ەζy )Wq]{g)<_3}|ʀYJ*޳X-Ml BTmjry+W _~ G}/rꝷ&&"'`#C$ 0zLI}RT 0F.94Eأ@HS*6,x0cY! Szq'߷گ6^UJ &VfR 0q2,B?'~)[FLX֭ ]"-:̫JkVjPVgdVW [d"yg-NL/\Zt%PK) .IDqI b2MNsHE!Hۭcc8m\$! ,,vr\EJuP ƭI)0$D!,K$hQ #JUQ @}kKZwr$UhK`T``,U,?ڟZ~uQ̄ $! "/@'gKŭۮ D&2$ #6oZmK)eFd'$dd#*5ڎv6R{ ˬ{zLaB]x=\f\E:B̒y0C&Q8- ڠTz Ǐ7JiZӸ\a``tP<^Hr/OCc4b$S (YZ"{5N=͔w&U?r'& ;|MaErU|@?ސz{b1__}V^5۬as[ymk =>)P<.9RD VUAGWb}k%rqYϦjNЄ''}&x3*auo(I?-"2~?S/Mߙ| 6 G.6 %ªVeo@ }FkoY&IħR hzg{W+.ub@PRNpV,a7AeKҁ 꽐(䔒 (*C_U?ehժy0 e{0$zh:z,}81i\]^7swWᵁ4P}h2\3# gʕn V2ls  'g`ڛ}qQ()LI =˻ɝ0Ndg&^=~6% i~gʵs4HdXJ_,sV6$Q86yN-arXbr}~5:&͕Bp@ٳ$n1"Pn*6?.p!merTools/R/0000755000176200001440000000000013466135366012274 5ustar liggesusersmerTools/R/shinyMer.R0000644000176200001440000000410513402510754014201 0ustar liggesusers#' Launch a shiny app to explore your merMod interactively #' #' \code{shinyMer} launches a shiny app that allows you to interactively #' explore an estimated merMod using functions from \code{merTools}. #' #' @param merMod An object of class "merMod". #' #' @param simData A data.frame to make predictions from (optional). If #' NULL, then the user can only make predictions using the data in #' the frame slot of the merMod object. #' #' @param pos The position of the environment to export function arguments to. #' Defaults to 1, the global environment, to allow shiny to run. #' #' @return A shiny app #' #' @import ggplot2 #' @importFrom shiny shinyApp #' @importFrom shiny fluidPage #' @importFrom shiny titlePanel #' @importFrom shiny sidebarLayout #' @importFrom shiny sidebarPanel #' @importFrom shiny radioButtons #' @importFrom shiny numericInput #' @importFrom shiny checkboxInput #' @importFrom shiny actionButton #' @importFrom shiny mainPanel #' @importFrom shiny tabsetPanel #' @importFrom shiny tabPanel #' @importFrom shiny h3 #' @importFrom shiny textOutput #' @importFrom shiny plotOutput #' @importFrom shiny downloadButton #' @importFrom shiny em #' @importFrom shiny reactiveValues #' @importFrom shiny eventReactive #' @importFrom shiny observeEvent #' @importFrom shiny reactive #' @importFrom shiny renderPrint #' @importFrom shiny renderPlot #' @importFrom shiny isolate #' @importFrom shiny renderPrint #' @importFrom shiny downloadHandler #' @importFrom shiny strong #' @importFrom shiny runApp #' @export shinyMer <- function(merMod, simData = NULL, pos = 1) { envir = as.environment(pos) if(exists("simData")){ expParm <- function(x, y) assign(".shinyMerPar", list("merMod" = x, "simData" = y), envir = envir) expParm(x = merMod, y = simData) } else{ expParm2 <- function(x) assign(".shinyMerPar", list("merMod" = x, "simData" = NULL), envir = envir) expParm2(x = merMod) } appDir <- system.file("shiny-apps", "shinyMer", package = "merTools") shiny::runApp(appDir, display.mode = "normal") } merTools/R/merExtract.R0000644000176200001440000001577313460722176014545 0ustar liggesusers#' @title Extracts random effects #' @name REextract #' @description Extracts random effect terms from an lme4 model #' @param merMod a merMod object from the lme4 package #' @return a data frame with the following columns #' \describe{ #' \item{groupFctr}{The name of the grouping factor associated with the random effects} #' \item{groupID}{The level of the grouping factor associated with the random effects} #' \item{'term'}{One column per random effect, the name is derived from the merMod} #' \item{'term'_se}{One column per random effect, the name is derived from the merMod} #' } #' @examples #' m2 <- lmer(Reaction ~ Days + (Days | Subject), sleepstudy) #' rfx <- REextract(m2) #' #Note the column names #' head(rfx) #' @export REextract <- function(merMod){ stopifnot(inherits(merMod, "merMod")) out <- lme4::ranef(merMod, condVar = TRUE) lvlNames <- names(out) reDims <- length(out) tmp.out <- vector("list", reDims) for(i in c(1:reDims)){ tmp.out[[i]] <- out[[i]] tmp.out[[i]]$groupFctr <- lvlNames[i] tmp.out[[i]]$groupID <- row.names(out[[i]]) if(ncol(out[[i]]) > 1){ tmp.out.se <- apply(attr(out[[i]], which = "postVar"), 3, function(x) sqrt(diag(x))) tmp.out.se <- as.data.frame(t(tmp.out.se)) colnames(tmp.out.se) <- paste0(names(out[[i]]), "_se") tmp.out[[i]] <- cbind(tmp.out[[i]], tmp.out.se) } else { tmp.out.se <- sapply(attr(out[[i]], which = "postVar"), sqrt) names(tmp.out.se) <- paste0(names(out[[i]]), "_se") tmp.out[[i]] <- cbind(tmp.out[[i]], tmp.out.se) names(tmp.out[[i]])[4] <- paste0(names(out[[i]]), "_se") } } dat <- dplyr::bind_rows(tmp.out) # reorg output dat <- dat[, c("groupFctr", "groupID", names(dat)[!names(dat) %in% c("groupFctr", "groupID")])] return(dat) } #' Simulate random effects from merMod #' \code{REsim} simulates random effects from merMod object posterior distributions #' @param merMod a merMod object from the lme4 package #' @param n.sims number of simulations to use #' @param oddsRatio logical, should parameters be converted to odds ratios? #' @param seed numeric, optional argument to set seed for simulations #' @importFrom arm sim #' @import lme4 #' @return a data frame with the following columns #' \describe{ #' \item{\code{groupFctr}}{Name of the grouping factor} #' \item{\code{groupID}}{Level of the grouping factor} #' \item{\code{term}}{Name of random term (intercept/coefficient)} #' \item{\code{mean}}{Mean of the simulations} #' \item{\code{median}}{Median of the simulations} #' \item{\code{sd}}{Standard deviation of the simulations, \code{NA} if \code{oddsRatio=TRUE}} #' } #' @details Use the Gelman sim technique to build empirical Bayes estimates. #' Uses the sim function in the arm package #' @examples #' require(lme4) #' m2 <- lmer(Reaction ~ Days + (Days | Subject), sleepstudy) #' re2 <- REsim(m2, 25) #' head(re2) #' @export REsim <- function(merMod, n.sims = 200, oddsRatio = FALSE, seed=NULL){ stopifnot(inherits(merMod, "merMod")) if (!is.null(seed)) set.seed(seed) else if (!exists(".Random.seed", envir = .GlobalEnv)) runif(1) mysim <- arm::sim(merMod, n.sims = n.sims) reDims <- length(mysim@ranef) tmp.out <- vector("list", reDims) names(tmp.out) <- names(mysim@ranef) for(i in c(1:reDims)){ zed <- apply(mysim@ranef[[i]], c(2, 3), function(x) as.data.frame(x) %>% dplyr::summarise_all(.funs = c("mean", "median", "sd"))) zed <- bind_rows(zed) zed$X1 <- rep(dimnames(mysim@ranef[[i]])[[2]], length(dimnames(mysim@ranef[[i]])[[3]])) zed$X2 <- rep(dimnames(mysim@ranef[[i]])[[3]], each = length(dimnames(mysim@ranef[[i]])[[2]])) tmp.out[[i]] <- zed; rm(zed) tmp.out[[i]]$groupFctr <- names(tmp.out)[i] tmp.out[[i]]$X1 <- as.character(tmp.out[[i]]$X1) tmp.out[[i]]$X2 <- as.character(tmp.out[[i]]$X2) } dat <- do.call(rbind, tmp.out) dat$groupID <- dat$X1; dat$X1 <- NULL dat$term <- dat$X2; dat$X2 <- NULL dat <- dat[, c("groupFctr", "groupID", "term", "mean", "median", "sd")] rownames(dat) <- NULL if(oddsRatio == TRUE){ dat$median <- exp(dat$median) dat$mean <- exp(dat$mean) dat$sd <- NA # don't know how to do SE of odds ratios currently return(dat) } else{ return(dat) } } #' Simulate fixed effects from merMod #' \code{FEsim} simulates fixed effects from merMod object posterior distributions #' @param merMod a merMod object from the lme4 package #' @param n.sims number of simulations to use #' @param oddsRatio logical, should parameters be converted to odds ratios? #' @param seed numeric, optional argument to set seed for simulations #' @importFrom arm sim #' @import lme4 #' @return a data frame with the following columns #' \describe{ #' \item{\code{term}}{Name of fixed term (intercept/coefficient)} #' \item{\code{mean}}{Mean of the simulations} #' \item{\code{median}}{Median of the simulations} #' \item{\code{sd}}{Standard deviation of the simulations, \code{NA} if \code{oddsRatio=TRUE}} #' } #' @details Use the Gelman sim technique to build fixed effect estimates and #' confidence intervals. Uses the sim function in the arm package #' @examples #' require(lme4) #' m2 <- lmer(Reaction ~ Days + (Days | Subject), sleepstudy) #' fe2 <- FEsim(m2, 25) #' head(fe2) #' @export FEsim <- function(merMod, n.sims = 200, oddsRatio=FALSE, seed=NULL){ stopifnot(inherits(merMod, "merMod")) if (!is.null(seed)) set.seed(seed) else if (!exists(".Random.seed", envir = .GlobalEnv)) runif(1) mysim <- arm::sim(merMod, n.sims = n.sims) means <- apply(mysim@fixef, MARGIN = 2, mean) medians <- apply(mysim@fixef, MARGIN = 2, median) sds <- apply(mysim@fixef, MARGIN =2, sd) dat <- data.frame(term = names(means), mean = means, median = medians, sd = sds, row.names=NULL) if(oddsRatio == TRUE){ dat$median <- exp(dat$median) dat$mean <- exp(dat$mean) dat$sd <- NA # don't know how to do SE of odds ratios currently return(dat) } else{ return(dat) } } #' @title Estimate the Root Mean Squared Error (RMSE) for a lmerMod #' @name RMSE.merMod #' @description Extract the Root Mean Squared Error for a lmerMod object #' @param merMod a lmerMod object from the lme4 package #' @param scale logical, should the result be returned on the scale of #' response variable standard deviations? #' @import lme4 #' @return a numeric which represents the RMSE #' @examples #' require(lme4) #' m2 <- lmer(Reaction ~ Days + (Days | Subject), sleepstudy) #' RMSE.merMod(m2) #' @export RMSE.merMod <- function(merMod, scale = FALSE){ stopifnot(inherits(merMod, "lmerMod") || inherits(merMod, "blmerMod")) # Express RMSE as percentage of dependent variable standard deviation dvSD <- sd(merMod@frame[, 1]) RMSE <- sqrt(mean(residuals(merMod)^2)) if(scale == TRUE){ return(RMSE/dvSD) } else{ return(RMSE) } } merTools/R/helpers.R0000644000176200001440000002755313466135366014075 0ustar liggesusers#Helpers # Function to take only rows that form distinct levels of factors # Need to figure out how to build a model matrix better. trimModelFrame <- function(data){ # Identify numerics nums <- sapply(data, is.numeric) vars <- names(nums[!nums == TRUE]) dataList <- vector(mode = "list", length = length(vars)) names(dataList) <- vars for(i in vars){ dataList[[i]] <- data[!duplicated(data[, i]), ,drop=FALSE] } newdat <- do.call(rbind, dataList) newdat <- newdat[!duplicated(newdat),] return(newdat) } # FROM LME4 residDF.merMod <- function(object) { npar <- length(object@beta) + length(object@theta) + object@devcomp[["dims"]][["useSc"]] nobs <- nrow(object@frame) ## TODO: how do we feel about counting the scale parameter ??? return(nobs - npar) } # from ARM as.matrix.VarCorr easyVarCorr <- function (varc, useScale, digits){ # VarCorr function for lmer objects, altered as follows: # 1. specify rounding # 2. print statement at end is removed # 3. reMat is returned # 4. last line kept in reMat even when there's no error term sc <- attr(varc, "sc")[[1]] if(is.na(sc)) sc <- 1 # recorr <- lapply(varc, function(el) el@factors$correlation) recorr <- lapply(varc, function(el) attr(el, "correlation")) #reStdDev <- c(lapply(recorr, slot, "sd"), list(Residual = sc)) reStdDev <- c(lapply(varc, function(el) attr(el, "stddev")), list(Residual = sc)) reLens <- unlist(c(lapply(reStdDev, length))) reMat <- array('', c(sum(reLens), 4), list(rep('', sum(reLens)), c("Groups", "Name", "Variance", "Std.Dev."))) reMat[1+cumsum(reLens)-reLens, 1] <- names(reLens) reMat[,2] <- c(unlist(lapply(reStdDev, names)), "") # reMat[,3] <- format(unlist(reStdDev)^2, digits = digits) # reMat[,4] <- format(unlist(reStdDev), digits = digits) reMat[,3] <- fround(unlist(reStdDev)^2, digits) reMat[,4] <- fround(unlist(reStdDev), digits) if (any(reLens > 1)) { maxlen <- max(reLens) corr <- do.call("rbind", lapply(recorr, function(x, maxlen) { x <- as(x, "matrix") # cc <- format(round(x, 3), nsmall = 3) cc <- fround (x, digits) cc[!lower.tri(cc)] <- "" nr <- dim(cc)[1] if (nr >= maxlen) return(cc) cbind(cc, matrix("", nr, maxlen-nr)) }, maxlen)) colnames(corr) <- c("Corr", rep("", maxlen - 1)) reMat <- cbind(reMat, rbind(corr, rep("", ncol(corr)))) } # if (!useScale) reMat <- reMat[-nrow(reMat),] if (useScale<0) reMat[nrow(reMat),] <- c ("No residual sd", rep("",ncol(reMat)-1)) return (reMat) } #' Count the number of random effect terms #' @source From lme4 package #' @keywords internal reTermCount <- function(model){ sum(unlist(lapply(as.list(VarCorr(model)), function(x) sqrt(length(x))))) } #' Get names of random effect terms in a model object #' @param model a merMod object with random effect terms #' @return a data.frame with rows for each term with columns naming the grouping #' term and the effect type #' @keywords internal reTermNames <- function(model){ tmp <- NA for(i in 1:length(names(ngrps(model)))){ cons <- names(ngrps(model))[i] vars <- paste(cons, unlist(dimnames(VarCorr(model)[[i]])[1]), sep = "-") tmp <- c(tmp, vars) } tmp <- na.omit(tmp) tmp <- t(as.data.frame(strsplit(tmp, "-"))) row.names(tmp) <- NULL colnames(tmp) <- c("group", "effect") tmp <- as.data.frame(tmp) tmp$group <- as.character(tmp$group) tmp$effect <- as.character(tmp$effect) return(tmp) } #' Clean formula #' @description a function to modify the formula for a merMod object to create #' a model matrix with all predictor terms in both the group level and fixed #' effect level #' @param model a merMod object from lme4 #' @return a formula object #' @keywords internal formulaBuild <- function(model){ slopeFX <- setdiff(all.vars(model@call$formula), names(ngrps(model))) missVar <- setdiff(slopeFX, all.vars(nobars(model@call$formula))) newForm <- nobars(model@call$formula) if(length(missVar > 0)){ newForm <- paste(Reduce(paste, deparse(newForm)), paste(missVar, collapse = " +"), sep = " + ") } newForm <- as.formula(newForm) return(newForm) } ##' Random Effects formula only ##' @param f a model formula ##' @param response logical, should the result include the response ##' @return a formula ##' @keywords internal reOnly <- function(f,response=FALSE) { response <- if (response && length(f)==3) f[[2]] else NULL reformulate(paste0("(", vapply(findbars(f), safeDeparse, ""), ")"), response=response) } safeDeparse <- function(x, collapse=" ") paste(deparse(x, 500L), collapse=collapse) #' Build model matrix #' @description a function to create a model matrix with all predictor terms in #' both the group level and fixed effect level #' @param model a merMod object from lme4 #' @param newdata a data frame to construct the matrix from #' @param character which matrix to return,default is full matrix with fixed and #' random terms, other options are "fixed" and "random" #' @source Taken from predict.merMod in lme4 #' @import lme4 #' @keywords internal buildModelMatrix <- function(model, newdata, which = "full"){ X <- getME(model, "X") X.col.dropped <- attr(X, "col.dropped") if (is.null(newdata)) { newdata <- model@frame } RHS <- formula(substitute(~R, list(R = RHSForm(formula(model, fixed.only=TRUE))))) Terms <- terms(model,fixed.only=TRUE) mf <- model.frame(model, fixed.only = FALSE) isFac <- vapply(mf, is.factor, FUN.VALUE = TRUE) isFac[attr(Terms,"response")] <- FALSE orig_levs <- if (length(isFac)==0) NULL else lapply(mf[isFac],levels) # Suppress warnings about non-factors classified as factors # These are false alarms related to grouping terms mfnew <- suppressWarnings(model.frame(delete.response(Terms), newdata, na.action="na.pass", xlev=orig_levs) ) X <- model.matrix(RHS, data=mfnew, contrasts.arg=attr(X,"contrasts")) offset <- 0 # rep(0, nrow(X)) tt <- terms(model) if (!is.null(off.num <- attr(tt, "offset"))) { for (i in off.num) offset <- offset + eval(attr(tt,"variables")[[i + 1]], newdata) } fit.na.action <- attr(mfnew,"na.action") if(is.numeric(X.col.dropped) && length(X.col.dropped) > 0) { X <- X[, -X.col.dropped, drop=FALSE] } re.form <- reOnly(formula(model)) # RE formula only newRE <- mkNewReTrms(object = model, newdata = newdata, re.form, na.action="na.pass", allow.new.levels = TRUE) reMat <- t(as.matrix(newRE$Zt)) reMat <- as.matrix(reMat) colnames(reMat) <- rownames(newRE$Zt) mm <- cbind(X, reMat) if(which == "full"){ return(mm) } else if(which == "fixed"){ return(X) } else if(which == "random"){ return(reMat) } } #' Calculate the intraclass correlation using mixed effect models #' #' @param outcome a character representing the variable of the outcome #' @param group a character representing the name of the grouping term #' @param data a data.frame #' @param subset an optional subset #' #' @return a numeric for the intraclass correlation #' @export #' @import lme4 #' @examples #' data(sleepstudy) #' ICC(outcome = "Reaction", group = "Subject", data = sleepstudy) ICC <- function(outcome, group, data, subset=NULL){ fm1 <- as.formula(paste(outcome, "~", "1 + (1|", group, ")")) if(length(table(data[, outcome])) == 2){ nullmod <- glmer(fm1, data = data, subset = subset, family = 'binomial') } else { nullmod <- lmer(fm1, data = data, subset = subset) } between <- as.numeric(attr(VarCorr(nullmod)[[1]], "stddev")) within <- arm::sigma.hat(nullmod)$sigma$data ICC <- between^2 / (within^2 + between^2) return(ICC) } #' Utility function to make RE terms objects #' @param object a model object #' @param newdata a data.frame to build RE terms for #' @param re.form a random effect formula to simulate, generated by #' \code{\link{reOnly}} #' @param na.action an object describing how NA values should be handled in newdata #' @param allow.new.levels logical, should new levels be allowed in factor variables #' @return a random effect terms object for a merMod #' @import lme4 #' @keywords internal mkNewReTrms <- function(object, newdata, re.form=NULL, na.action=na.pass, allow.new.levels=FALSE) { if (is.null(newdata)) { rfd <- mfnew <- model.frame(object) } else { mfnew <- model.frame(delete.response(terms(object, fixed.only=TRUE)), newdata, na.action=na.action) if(packageVersion("lme4") < "1.1.9"){ old <- TRUE } else{ old <- FALSE } if (old) { rfd <- na.action(newdata) if (is.null(attr(rfd,"na.action"))) attr(rfd,"na.action") <- na.action } else { newdata.NA <- newdata if (!is.null(fixed.na.action <- attr(mfnew,"na.action"))) { newdata.NA <- newdata.NA[-fixed.na.action,] } tt <- delete.response(terms(object,random.only=TRUE)) ## need to let NAs in RE components go through -- they're handled downstream rfd <- model.frame(tt,newdata.NA,na.action=na.pass) if (!is.null(fixed.na.action)) attr(rfd,"na.action") <- fixed.na.action } } if (inherits(re.form, "formula")) { ## DROP values with NAs in fixed effects if (length(fit.na.action <- attr(mfnew,"na.action")) > 0) { newdata <- newdata[-fit.na.action,] } ## note: mkReTrms automatically *drops* unused levels # rfd = model frame ReTrms <- mkReTrms(findbars(re.form[[2]]), rfd) ## update Lambdat (ugh, better way to do this?) ReTrms <- within(ReTrms, Lambdat@x <- unname(getME(object,"theta")[Lind])) # if (!allow.new.levels && any(vapply(ReTrms$flist, anyNA, NA))) stop("NAs are not allowed in prediction data", " for grouping variables unless allow.new.levels is TRUE") ns.re <- names(re <- ranef(object)) nRnms <- names(Rcnms <- ReTrms$cnms) if (!all(nRnms %in% ns.re)) stop("grouping factors specified in re.form that were not present in original model") new_levels <- lapply(ReTrms$flist, function(x) levels(factor(x))) ## fill in/delete levels as appropriate re_x <- Map(function(r,n) levelfun(r,n,allow.new.levels=allow.new.levels), re[names(new_levels)], new_levels) re_new <- lapply(seq_along(nRnms), function(i) { rname <- nRnms[i] if (!all(Rcnms[[i]] %in% names(re[[rname]]))) stop("random effects specified in re.form that were not present in original model") re_x[[rname]][,Rcnms[[i]]] }) re_new <- unlist(lapply(re_new, t)) } Zt <- ReTrms$Zt attr(Zt, "na.action") <- attr(re_new, "na.action") <- attr(mfnew, "na.action") list(Zt=Zt, b=re_new, Lambdat = ReTrms$Lambdat) } #' Parse merMod formulas #' @keywords internal RHSForm <- function(form,as.form=FALSE) { rhsf <- form[[length(form)]] if (as.form) reformulate(deparse(rhsf)) else rhsf } #' Parse merMod levels #' @keywords internal levelfun <- function(x,nl.n,allow.new.levels=FALSE) { if (!all(nl.n %in% rownames(x))) { if (!allow.new.levels) stop("new levels detected in newdata") newx <- as.data.frame(matrix(0, nrow=length(nl.n), ncol=ncol(x), dimnames=list(nl.n, names(x)))) newx[rownames(x),] <- x x <- newx } if (!all(r.inn <- rownames(x) %in% nl.n)) { x <- x[r.inn,,drop=FALSE] } return(x) } merTools/R/merTools-package.r0000644000176200001440000000574513402510754015653 0ustar liggesusers#' merTools: Provides methods for extracting and exploring results from merMod #' objects in the lme4 package. #' #' The merTools package contains convenience tools for extracting useful #' information from and exploring the implications of merMod objects created by #' the lme4 package. These convenience functions are especially useful for #' merMod objects that take a long time to estimate due to their complexity or #' because they are estimated on very large samples. #' #' See the vignettes for usage examples #' #' @section merMod extraction/utility functions: #' #' \itemize{ #' \item \code{\link{fastdisp}} #' \item \code{\link{superFactor}} #' \item \code{\link{REextract}} #' \item \code{\link{REsim}} #' \item \code{\link{FEsim}} #' \item \code{\link{RMSE.merMod}} #' \item \code{\link{thetaExtract}} #' \item \code{\link{REquantile}} #' } #' #' @section merMod exploration functions: #' #' \itemize{ #' \item \code{\link{plotREsim}} #' \item \code{\link{plotFEsim}} #' \item \code{\link{draw}} #' \item \code{\link{wiggle}} #' \item \code{\link{subBoot}} #' \item \code{\link{predictInterval}} #' \item \code{\link{expectedRank}} #' \item \code{\link{REimpact}} #' \item \code{\link{shinyMer}} #' } #' #' @name merTools #' @docType package #' @aliases merTools merTools-package NULL #' A subset of data from the 1982 High School and Beyond survey used as examples for HLM software #' @description A key example dataset used for examples in the HLM software manual. #' Included here for use in replicating HLM analyses in R. #' @format A data frame with 7,185 observations on the following 8 variables. #' \describe{ #' \item{\code{schid}}{a numeric vector, 160 unique values} #' \item{\code{mathach}}{a numeric vector for the performance on a standardized math assessment} #' \item{\code{female}}{a numeric vector coded 0 for male and 1 for female} #' \item{\code{ses}}{a numeric measure of student socio-economic status} #' \item{\code{minority}}{a numeric vector coded 0 for white and 1 for non-white students} #' \item{\code{schtype}}{a numeric vector coded 0 for public and 1 for private schools} #' \item{\code{meanses}}{a numeric, the average SES for each school in the data set} #' \item{\code{size}}{a numeric for the number of students in the school} #' } #' @details The data file used for this presentation is a subsample from the #' 1982 High School and Beyond Survey and is used extensively in #' Hierarchical Linear Models by Raudenbush and Bryk. It consists of 7,185 students #' nested in 160 schools. #' @source Data made available by UCLA Institute for Digital Research and Education #' (IDRE) online: \url{https://stats.idre.ucla.edu/other/hlm/hlm-mlm/introduction-to-multilevel-modeling-using-hlm} #' @references Stephen W. Raudenbush and Anthony S. Bryk (2002). Hierarchical #' Linear Models: Applications and Data Analysis Methods (2nd ed.). SAGE. #' @examples #' data(hsb) #' head(hsb) "hsb" merTools/R/parallel.R0000644000176200001440000000140013402510754014172 0ustar liggesusers# Parallel functions #' Set up parallel environment #' #' @return Nothing setup_parallel <- function() { if (!requireNamespace("foreach", quietly = TRUE)) { # EXCLUDE COVERAGE START stop("foreach package required for parallel future operation", call. = FALSE) # EXCLUDE COVERAGE END } if (foreach::getDoParWorkers() == 1) { # EXCLUDE COVERAGE START warning("No parallel backend registered", call. = TRUE) # EXCLUDE COVERAGE END } } # if (.parallel) { # setup_parallel() # # i <- seq_len(n) # fe_call <- as.call(c(list(quote(foreach::foreach), i = i), .paropts)) # fe <- eval(fe_call) # # result <- foreach::`%dopar%`(fe, do.ply(i)) # } else { # result <- loop_apply(n, do.ply) # } merTools/R/REmargins.R0000644000176200001440000002234713466135366014316 0ustar liggesusers#' Calculate the predicted value for each observation across the distribution #' of the random effect terms. #' #' \code{REmargins} calculates the average predicted value for each row of a #' new data frame across the distribution of \code{\link{expectedRank}} for a #' merMod object. This allows the user to make meaningful comparisons about the #' influence of random effect terms on the scale of the response variable, #' for user-defined inputs, and accounting for the variability in grouping terms. #' #' The function simulates the #' #' The function predicts the response at every level in the random effect term #' specified by the user. Then, the expected rank of each group level is binned #' to the number of bins specified by the user. Finally, a weighted mean of the #' fitted value for all observations in each bin of the expected ranks is #' calculated using the inverse of the variance as the weight -- so that less #' precise estimates are downweighted in the calculation of the mean for the bin. #' Finally, a standard error for the bin mean is calculated. #' #' @param merMod An object of class merMod #' #' @param newdata a data frame of observations to calculate group-level differences #' for #' #' @param groupFctr The name of the grouping factor over which the random #' coefficient of interest varies. This is the variable to the right of the #' pipe, \code{|}, in the [g]lmer formula. This parameter is optional, if not #' specified, it will perform the calculation for the first effect listed #' by \code{ranef}. If the length is > 1 then the combined effect of all #' listed groups will calculated and marginalized over co-occurences of those #' groups if desired. #' #' @param term The name of the random coefficient of interest. This is the #' variable to the left of the pipe, \code{|}, in the [g]lmer formula. Partial #' matching is attempted on the intercept term so the following character #' strings will all return rankings based on the intercept (\emph{provided that #' they do not match the name of another random coefficient for that factor}): #' \code{c("(Intercept)", "Int", "intercep", ...)}. #' #' @param breaks an integer representing the number of bins to divide the group #' effects into, the default is 3. #' @param .parallel, logical should parallel computation be used, default is TRUE #' #' @param ... additional arguments to pass to \code{\link{predictInterval}} #' #' @return A data.frame with all unique combinations of the number of cases, rows #' in the newdata element: #' \describe{ #' \item{...}{The columns of the original data taken from \code{newdata}} #' \item{case}{The row number of the observation from newdata. Each row in newdata will be #' repeated for all unique levels of the grouping_var, term, and breaks.} #' \item{grouping_var}{The grouping variable the random effect is being marginalized over.} #' \item{term}{The term for the grouping variable the random effect is being marginalized over.} #' \item{breaks}{The ntile of the effect size for \code{grouping_var} and \code{term}} #' \item{original_group_level}{The original grouping value for this \code{case}} #' \item{fit_combined}{The predicted value from \code{predictInterval} for this case simulated #' at the Nth ntile of the expected rank distribution of \code{grouping_var} and \code{term}} #' \item{upr_combined}{The upper bound of the predicted value.} #' \item{lwr_combined}{The lower bound of the predicted value.} #' \item{fit_XX}{For each grouping term in newdata the predicted value is decomposed into its #' fit components via predictInterval and these are all returned here} #' \item{upr_XX}{The upper bound for the effect of each grouping term} #' \item{lwr_XX}{The lower bound for the effect of each grouping term} #' \item{fit_fixed}{The predicted fit with all the grouping terms set to 0 (average)} #' \item{upr_fixed}{The upper bound fit with all the grouping terms set to 0 (average)} #' \item{lwr_fixed}{The lower bound fit with all the grouping terms set to 0 (average)} #' } #' #' #' @references #' Gatz, DF and Smith, L. The Standard Error of a Weighted Mean Concentration. #' I. Bootstrapping vs other methods. \emph{Atmospheric Environment}. #' 1995;11(2)1185-1193. Available at #' \url{http://www.sciencedirect.com/science/article/pii/135223109400210C} #' #' Cochran, WG. 1977. Sampling Techniques (3rd Edition). Wiley, New York. #' #' @seealso \code{\link{expectedRank}}, \code{\link{predictInterval}} #' @importFrom stats reshape #' @examples #' fm1 <- lmer(Reaction ~ Days + (Days | Subject), sleepstudy) #' mfx <- REmargins(merMod = fm1, newdata = sleepstudy[1:10,]) #' \donttest{ #' # You can also pass additional arguments to predictInterval through REimpact #' g1 <- lmer(y ~ lectage + studage + (1|d) + (1|s), data=InstEval) #' margin_df <- REmargins(g1, newdata = InstEval[20:25, ], groupFctr = c("s"), #' breaks = 4) #' margin_df <- REmargins(g1, newdata = InstEval[20:25, ], groupFctr = c("d"), #' breaks = 3) #' } #' @export REmargins <- function(merMod, newdata = NULL, groupFctr = NULL, term = NULL, breaks = 4, .parallel = FALSE, ...){ # Validate inputs if (is.null(groupFctr)) { # If the user doesn't tell us which term to use, we take the first term groupFctr <- names(ranef(merMod))[1] } if (is.null(newdata)) { # If the user doesn't give us data, we take the whole dataset # TODO - how does performance scale to a large number of observations? newdata <- merMod@frame } # If the user doesn't tell us what term to use, we take all the terms if (is.null(term)) { term <- names(ranef(merMod)[[groupFctr]]) # Sub out intercept term[term == "(Intercept)"] <- "Intercept" } # This is a rough way to break the ER distribution into quantiles brks <- ceiling(seq(1, 100, by = 100/breaks)) # Fallback so we always take a 99th percentile effect (for the maximum) if (!99 %in% brks) { brks <- c(brks, 99) } # Inputs are validated - now we get the effect distribution # Generate the expected rank distribution ER_DF <- expectedRank(merMod, groupFctr = groupFctr, term = term) # With many effects there is a lot of duplication - drop duplicated pctER ER_DF <- ER_DF[!duplicated(ER_DF[, c("groupFctr", "term", "pctER")]), ] # Now we create a data frame to capture the factor levels of each groupFctr that # correspond to the right break in the expectedRank distribution of the random # effect grouping factor and term par_df <- expand.grid("grouping_var" = groupFctr, "term" = term, "breaks" = 1:breaks) # Keep only factor levels that have effects at the margins # Need to match closest value here # Find N closest values # Drop duplicates # For each combination build an index of candidate rows/effect levels # Then choose the level that has the most precise estimate within a # tolerance of the effect size for (trm in term) { for (i in seq_along(brks)) { # Compute each terms distance from the break rank_dist <- abs(ER_DF$pctER[ER_DF$term == trm] - brks[i]) # Get the index for the rank that minimizes the distance # TODO - how to break ties here? tmp <- which(rank_dist %in% rank_dist[order(rank_dist)][1]) # Store the result in the par_df object par_df$groupLevel[par_df$breaks == i & par_df$term == trm] <- ER_DF$groupLevel[tmp] } } # Get ready to expand the data sim_data <- as.data.frame(lapply(newdata, rep, each = nrow(par_df))) # sim_data now repeats each row of newdata by the number of rows in par_df # case labels the rows with an integer for later mapping sim_data$case <- rep(1:nrow(newdata), each = nrow(par_df)) sim_data <- cbind(sim_data, par_df) sim_data$original_group_level <- sim_data[, groupFctr] sim_data[, groupFctr] <- sim_data$groupLevel sim_data$groupLevel <- NULL # # Maybe strongly recommend parallel here? if (.parallel & requireNamespace("foreach", quietly = TRUE)) { # TODO use future here setup_parallel() out <- predictInterval(merMod, newdata = sim_data, which = "all", ...) out_w <- stats::reshape(out, direction = "wide", idvar = "obs", timevar = "effect", v.names = c("fit", "upr", "lwr"), sep = "_") out_w$obs <- NULL sim_data <- cbind(sim_data, out_w) } else if ( .parallel & !requireNamespace("foreach", quietly = TRUE)) { warning("foreach package is unavailable, parallel computing not available") } else { out <- predictInterval(merMod, newdata = sim_data, which = "all", ...) out_w <- stats::reshape(out, direction = "wide", idvar = "obs", timevar = "effect", v.names = c("fit", "upr", "lwr"), sep = "_") out_w$obs <- NULL sim_data <- cbind(sim_data, out_w) } # Case is the number of the row in newdata # obs is the variance among the selected random effects to marginalize over # So we want to collapse by case if we can return(sim_data) } merTools/R/subBoot.R0000644000176200001440000000626513466135366014045 0ustar liggesusers#' Extract theta parameters from a merMod model #' @description A convenience function that returns the theta parameters for a #' \code{\link{merMod}} object. #' @param merMod a valid merMod object #' #' @return a vector of the covariance, theta, parameters from a \code{\link{merMod}} #' @seealso merMod #' @export #' @examples #' (fm1 <- lmer(Reaction ~ Days + (Days | Subject), sleepstudy)) #' thetaExtract(fm1) #(a numeric vector of the covariance parameters) thetaExtract <- function(merMod){ stopifnot(class(merMod) %in% c("lmerMod", "glmerMod", "blmerMod", "bglmerMod")) return(merMod@theta) } #' Bootstrap a subset of an lme4 model #' #' @param merMod a valid merMod object #' @param n the number of rows to sample from the original data #' in the merMod object, by default will resample the entire model frame #' @param FUN the function to apply to each bootstrapped model #' @param R the number of bootstrap replicates, default is 100 #' @param seed numeric, optional argument to set seed for simulations #' @param warn logical, if TRUE, warnings from lmer will be issued, otherwise they will be suppressed #' default is FALSE #' @return a data.frame of parameters extracted from each of the R replications. #' The original values are appended to the top of the matrix. #' @details This function allows users to estimate parameters of a #' large merMod object using bootstraps on a subset of the data. #' @examples #' \donttest{ #' (fm1 <- lmer(Reaction ~ Days + (Days | Subject), sleepstudy)) #' resultMatrix <- subBoot(fm1, n = 160, FUN = thetaExtract, R = 20) #' } #' @export subBoot <- function(merMod, n = NULL, FUN, R = 100, seed = NULL, warn = FALSE){ if (missing(n)) { n <- nrow(merMod@frame) } if (!warn) { message("Warnings set to off by default, not all submodels may have converged.") } resultMat <- matrix(FUN(merMod), nrow = 1) tmp <- matrix(data=NA, nrow=R, ncol=ncol(resultMat)) resultMat <- rbind(resultMat, tmp); rm(tmp) if (!is.null(seed)) set.seed(seed) else if (!exists(".Random.seed", envir = .GlobalEnv)) runif(1) for(i in 1:R){ rows <- as.numeric(row.names(merMod@frame)) mysamp <- as.character(sample(rows, n, replace=TRUE)) # http://proteo.me.uk/2013/12/fast-subset-selection-by-row-name-in-r/ newdata <- merMod@frame[match(mysamp, rownames(merMod@frame)),] # Only for lmerMod if (!warn) { suppressWarnings({ if (class(merMod) == "lmerMod") { tmpMod <- lmer(formula(merMod), data = newdata) } else if (class(merMod) == "glmerMod") { tmpMod <- glmer(formula(merMod), data = newdata, family = merMod@call$family) } }) } else { if (class(merMod) == "lmerMod") { tmpMod <- lmer(formula(merMod), data = newdata) } else if (class(merMod) == "glmerMod") { tmpMod <- glmer(formula(merMod), data = newdata, family = merMod@call$family) } } resultMat[i + 1, ] <- FUN(tmpMod) } resultMat <- data.frame(param=resultMat) resultMat$replicate <- c("original", 1:R) return(resultMat) } merTools/R/merList.R0000644000176200001440000003773313420161276014040 0ustar liggesusers# Imputation functions #' Extract model information from a merMod #' #' @param object a merMod object #' #' @return Simple summary information about the object, number #' of observations, number of grouping terms, AIC, and residual standard deviation #' @export #' @examples #' sim_list <- replicate(n = 10, #' expr = sleepstudy[sample(row.names(sleepstudy), 180),], #' simplify=FALSE) #' fml <- "Reaction ~ Days + (Days | Subject)" #' mod <- lmerModList(fml, data = sim_list) #' modelInfo(mod[[1]]) #' lapply(mod, modelInfo) modelInfo <- function(object){ ngrps <- lapply(object@flist, function(x) length(levels(x))) out <- data.frame("n.obs" = getME(object, "devcomp")$dims["n"], "n.lvls" = length(ngrps), "AIC" = AIC(object), "sigma" = sigma(object)) row.names(out) <- NULL return(out) } # Functions to extract standard deviation of random effects from model #' Extract the standard deviation of the random effects from a merMod object #' #' @param model an object that inherits from class merMod #' #' @return a numeric vector for standard deviations of the random effects #' @export #' @examples #' fm1 <- lmer(Reaction ~ Days + (Days | Subject), sleepstudy) #' REsdExtract(fm1) REsdExtract <- function(model){ out <- unlist(lapply(VarCorr(model), attr, "stddev")) return(out) } #' Extract the correlations between the slopes and the intercepts from a model #' #' @param model an object that inherits from class merMod #' #' @return a numeric vector of the correlations among the effects #' @export #' @examples #' fm1 <- lmer(Reaction ~ Days + (Days | Subject), sleepstudy) #' REcorrExtract(fm1) REcorrExtract <- function(model){ out <- unlist(lapply(VarCorr(model), attr, "corre")) return(min(unique(out))) } #' Extract data.frame of random effect statistics from merMod List #' #' @param modList a list of multilevel models #' #' @return a data.frame #' @import dplyr #' @export #' @examples #' sim_list <- replicate(n = 10, #' expr = sleepstudy[sample(row.names(sleepstudy), 180),], #' simplify=FALSE) #' fml <- "Reaction ~ Days + (Days | Subject)" #' mod <- lmerModList(fml, data = sim_list) #' modelRandEffStats(mod) modelRandEffStats <- function(modList){ effList <- lapply(modList, tidy, effects = "ran_pars") effList <- do.call(rbind, effList) out <- effList %>% group_by(term, group) %>% summarize(est = mean(estimate), std.error = sd(estimate)) %>% rename(estimate = est) return(as.data.frame(out)) } #' Extract averaged fixed effect parameters across a list of merMod objects #' #' @param modList an object of class merModList #' @param ... additional arguments to pass to \code{\link{tidy}} #' #' @return a data.frame of the averaged fixed effect parameters #' @details The Rubin correction for combining estimates and standard errors from #' Rubin (1987) is applied to adjust for the within and between imputation variances. #' @export #' @importFrom broom tidy #' @import dplyr #' @examples #' sim_list <- replicate(n = 10, #' expr = sleepstudy[sample(row.names(sleepstudy), 180),], #' simplify=FALSE) #' fml <- "Reaction ~ Days + (Days | Subject)" #' mod <- lmerModList(fml, data = sim_list) #' modelFixedEff(mod) modelFixedEff <- function(modList, ...){ fixEst <- lapply(modList, tidy, effects = "fixed", ...) fixEst <- do.call(rbind, fixEst) # Collapse # Rubin correction, get length of list ml <- length(modList) # Get between and within imputation variance, apply total correction # Calculate degree of freedom correction rubin <- fixEst %>% group_by(term) %>% mutate(mean_est = mean(estimate)) %>% mutate(est_ss = (estimate - mean_est)^2) %>% summarize(estimate = mean(estimate), within_var = mean(std.error), # compute within imputation variance between_var = mean(est_ss)) %>% # estimate the between imputation variance mutate(std.error = within_var + ((1 + 1/ml)*between_var), df = (ml-1)* (1 + within_var/((1 + 1/ml)*between_var))^2) # apply rubins total variance correction # add fallback if (any((((1 + 1/ml)*rubin$between_var)^2) < 0.000000001)) { warning("Between imputation variance is very small, are imputation sets too similar?") } # DEPRECATED method # out <- fixEst %>% dplyr::group_by(term) %>% # dplyr::summarize(estimate = mean(estimate), # std.error = mean(std.error)) rubin$statistic <- rubin$estimate / rubin$std.error rubin <- rubin %>% dplyr::select(term, estimate, std.error, statistic, df) return(as.data.frame(rubin)) } #' Extract fixed-effects estimates for a merModList #' #' @inheritParams lme4::fixef #' @return a named, numeric vector of fixed-effects estimates. #' @details Extract the estimates of the fixed-effects parameters from a list of #' fitted \code{merMod} models. Takes the mean of the individual \code{fixef} #' objects for each of the component models in the \code{merModList}. #' @export #' @examples #' sim_list <- replicate(n = 10, #' expr = sleepstudy[sample(row.names(sleepstudy), 180),], #' simplify=FALSE) #' fml <- "Reaction ~ Days + (Days | Subject)" #' mod <- lmerModList(fml, data = sim_list) #' fixef(mod) fixef.merModList <- function(object, add.dropped = FALSE, ...){ Reduce(`+`, lapply(object, fixef)) / length(object) } #' Extract random-effects estimates for a merModList #' #' @inheritParams lme4::ranef #' @return a named, numeric vector of random-effects estimates. #' @details Extract the estimates of the random-effects parameters from a list of #' fitted \code{merMod} models. Takes the mean of the individual \code{ranef} #' objects for each of the component models in the \code{merModList}. #' @export #' @examples #' sim_list <- replicate(n = 10, #' expr = sleepstudy[sample(row.names(sleepstudy), 180),], #' simplify=FALSE) #' fml <- "Reaction ~ Days + (Days | Subject)" #' mod <- lmerModList(fml, data = sim_list) #' ranef(mod) ranef.merModList <- function(object, ...){ levels <- getME(object[[1]], "n_rfacs") re <- vector(length = levels, mode = "list") for(i in seq_along(1:levels)){ # <- Reduce(`+`, lapply(object, ranef)[i]) / length(object) re[i] <- lapply(Reduce(`+`, lapply(object, ranef)[1]), function(x) x/length(object)) } names(re) <- names(ranef(object[[1]])) return(re) } #' Extract the variances and correlations for random effects from a merMod list #' @inheritParams lme4::VarCorr #' @param rdig the number of digits to round to, integer #' @return a list with two elements "stddev" and "correlation" for the standard #' deviations and correlations averaged across models in the list #' @export #' @import lme4 #' @examples #' sim_list <- replicate(n = 10, #' expr = sleepstudy[sample(row.names(sleepstudy), 180),], #' simplify=FALSE) #' fml <- "Reaction ~ Days + (Days | Subject)" #' mod <- lmerModList(fml, data = sim_list) #' VarCorr(mod) VarCorr.merModList <- function(x, sigma = 1, rdig = 3L){ modList <- x ngrps <- length(VarCorr(modList[[1]])) errorList <- vector(mode = 'list', length = ngrps) corrList <- vector(mode = 'list', length = ngrps) for(i in 1:ngrps){ subList <- lapply(modList, function(x) VarCorr(x)[[i]]) if(all(dim(subList[[1]]) == c(1, 1))){ subList <- mean(sqrt(unlist(subList))) errorList[[i]] <- subList names(errorList) <- "Intercept" corrList[[i]] <- matrix(1) dimnames(corrList[[i]]) <- list("(Intercept)", "(Intercept)") } else { errorList[[i]] <- apply(simplify2array(lapply(subList, attr, "stddev")), 1, mean) corrList[[i]] <- apply(simplify2array(lapply(subList, attr, "corre")), 1:2,mean) } } for(i in 1:length(errorList)){ if(is.null(names(errorList[[i]]))){ names(errorList[[i]]) <- "(Intercept)" } } for(i in 1:length(corrList)){ if(is.null(names(corrList[[i]])) & is.null(dim(corrList[[i]]))){ names(corrList[[i]]) <- "(Intercept)" } } names(errorList) <- names(ranef(modList[[1]])) names(corrList) <- names(ranef(modList[[1]])) return(list("stddev" = errorList, "correlation" = corrList)) } utils::globalVariables(c("term", "estimate","std.error")) #' Print the results of a merMod list #' #' @param object a modList of class merModList #' @param ... additional arguments #' #' @return summary content printed to console #' @export #' @examples #' sim_list <- replicate(n = 10, #' expr = sleepstudy[sample(row.names(sleepstudy), 180),], #' simplify=FALSE) #' fml <- "Reaction ~ Days + (Days | Subject)" #' mod <- lmerModList(fml, data = sim_list) #' print(mod) summary.merModList <- function(object, ...){ modList <- object args <- eval(substitute(alist(...))) if("digits" %in% names(args)){ digits <- args$digits } else{ digits <- 3 } summ.ml <- list() summ.ml$len <- length(modList) summ.ml$form <- modList[[1]]@call summ.ml$method <- summary(modList[[1]])$methTitle summ.ml$family <- summary(modList[[1]])$family summ.ml$fe <- modelFixedEff(modList) dimnames(summ.ml$fe)[[1]] <- summ.ml$fe$term # pfround(summ.ml$fe[, -1], digits) summ.ml$ngrps <- length(VarCorr(modList[[1]])) summ.ml$errorList <- VarCorr(modList)$stddev summ.ml$corrList <- VarCorr(modList)$correlation # lapply(errorList, pfround, digits) summ.ml$residError <- mean(unlist(lapply(modList, function(x) attr(VarCorr(x), "sc")))) summ.ml$ngrps <- lapply(modList[[1]]@flist, function(x) length(levels(x))) summ.ml$modn <- getME(modList[[1]], "devcomp")$dims["n"] summ.ml$mAIC <- mean(unlist(lapply(modList, AIC))) summ.ml$moDsigma.hat <- mean(unlist(lapply(modList, sigma))) class(summ.ml) <- "summary.merModList" return(summ.ml) } #' Summarize a merMod list #' #' @param x a modList of class merModList #' @param ... additional arguments #' #' @return a summary object of model information #' @export #' @examples #' sim_list <- replicate(n = 10, #' expr = sleepstudy[sample(row.names(sleepstudy), 180),], #' simplify=FALSE) #' fml <- "Reaction ~ Days + (Days | Subject)" #' mod <- lmerModList(fml, data = sim_list) #' summary(mod) print.merModList <- function(x, ...){ out <- lapply(x, sum.mm) # class(out) <- "summary.merModList" return(out) } #' Print the summary of a merMod list #' #' @param x a summary of amerModList object #' @param ... additional arguments #' #' @return summary content printed to console #' @export print.summary.merModList <- function(x, ...){ summ.ml <- x args <- eval(substitute(alist(...))) if("digits" %in% names(args)){ digits <- args$digits } else{ digits <- 3 } print(summ.ml$method) cat("Model family: ", summ.ml$family) cat("\n") print(summ.ml$form) cat("\nFixed Effects:\n") pfround(summ.ml$fe[, -1], digits) cat("\nRandom Effects:\n") cat("\nError Term Standard Deviations by Level:\n") for(i in 1:length(summ.ml$errorList)){ cat("\n") cat(names(summ.ml$errorList[i])) cat("\n") if(is.null(names(summ.ml$errorList[[i]]))){ names(summ.ml$errorList[[i]]) <- "(Intercept)" } pfround(summ.ml$errorList[[i]], digits = digits) cat("\n") } # lapply(errorList, pfround, digits) cat("\nError Term Correlations:\n") for(i in 1:length(summ.ml$corrList)){ cat("\n") cat(names(summ.ml$corrList[i])) cat("\n") if(is.null(names(summ.ml$corrList[[i]]))){ names(summ.ml$corrList[[i]]) <- "(Intercept)" } pfround(summ.ml$corrList[[i]], digits = digits) cat("\n") } # lapply(corrList, pfround, digits) cat("\nResidual Error =", fround(summ.ml$residError, digits), "\n") cat("\n---Groups\n") cat(sprintf("number of obs: %d, groups: ", summ.ml$modn)) cat(paste(paste(names(summ.ml$ngrps), summ.ml$ngrps, sep = ", "), collapse = "; ")) cat("\n") cat("\nModel Fit Stats") cat(sprintf("\nAIC = %g", round(summ.ml$mAIC, 1))) cat("\nResidual standard deviation =", fround(summ.ml$moDsigma.hat, digits), "\n") } #' Apply a multilevel model to a list of data frames #' #' @param formula a formula to pass through compatible with merMod #' @param data a list object with each element being a data.frame #' @param parallel logical, should the models be run in parallel? Default FALSE. If so, #' the `future_lapply` function from the `future.apply` package is used. See #' details. #' @param ... additional arguments to pass to the estimating function #' @rdname merModList #' #' @details Parallel computing is provided by the `futures` package, and its #' extension the `future.apply` package to provide the `future_lapply` function #' for easy parallel computations on lists. To use this package, simply register #' a parallel backend using the `plan()` function from `futures` - an example #' is to use `plan(multisession)` #' #' @return a list of fitted merMod objects of class merModList #' @export #' @examples #' sim_list <- replicate(n = 10, #' expr = sleepstudy[sample(row.names(sleepstudy), 180),], #' simplify=FALSE) #' fml <- "Reaction ~ Days + (Days | Subject)" #' mod <- lmerModList(fml, data = sim_list) #' summary(mod) lmerModList <- function(formula, data, parallel = FALSE, ...){ if(parallel) { if (requireNamespace("future.apply", quietly=TRUE)) { ml <- future.apply::future_lapply(data, function(d) lmer(formula, data = d, ...)) } warning("Parallel set but future.apply not available. Running sequentially.") ml <- lapply(data, function(d) lmer(formula, data = d, ...)) } else { ml <- lapply(data, function(d) lmer(formula, data = d, ...)) } class(ml) <- "merModList" return(ml) } #' Apply a Bayesian multilevel model to a list of data frames #' #' @inheritParams lmerModList #' @rdname merModList #' @return a merModList #' @importFrom blme blmer #' @export blmerModList <- function(formula, data, parallel = FALSE, ...){ if(parallel) { if (requireNamespace("future.apply", quietly=TRUE)) { ml <- future.apply::future_lapply(data, function(d) blmer(formula, data = d, ...)) } warning("Parallel set but future.apply not available. Running sequentially.") ml <- lapply(data, function(d) blmer(formula, data = d, ...)) } else { ml <- lapply(data, function(d) blmer(formula, data = d, ...)) } class(ml) <- "merModList" return(ml) } #' Apply a generalized linear multilevel model to a list of data frames #' #' @inheritParams lmerModList #' @rdname merModList #' @return a merModList #' @export glmerModList <- function(formula, data, parallel = FALSE, ...){ if(parallel) { if (requireNamespace("future.apply", quietly=TRUE)) { ml <- future.apply::future_lapply(data, function(d) glmer(formula, data = d, ...)) } warning("Parallel set but future.apply not available. Running sequentially.") ml <- lapply(data, function(d) glmer(formula, data = d, ...)) } else { ml <- lapply(data, function(d) glmer(formula, data = d, ...)) } class(ml) <- "merModList" return(ml) } #' Apply a Bayesian generalized linear multilevel model to a list of data frames #' #' @inheritParams lmerModList #' @rdname merModList #' @return a merModList #' @importFrom blme bglmer #' @export bglmerModList <- function(formula, data, parallel = FALSE, ...){ if(parallel) { if (requireNamespace("future.apply", quietly=TRUE)) { ml <- future.apply::future_lapply(data, function(d) bglmer(formula, data = d, ...)) } warning("Parallel set but future.apply not available. Running sequentially.") ml <- lapply(data, function(d) bglmer(formula, data = d, ...)) } else { ml <- lapply(data, function(d) bglmer(formula, data = d, ...)) } class(ml) <- "merModList" return(ml) } merTools/R/merSubstEff.R0000644000176200001440000001446513402510754014642 0ustar liggesusers#' Calculate the weighted mean of fitted values for various levels of #' random effect terms. #' #' \code{REimpact} calculates the average predicted value for each row of a #' new data frame across the distribution of \code{\link{expectedRank}} for a #' merMod object. This allows the user to make meaningful comparisons about the #' influence of random effect terms on the scale of the response variable, #' for user-defined inputs, and accounting for the variability in grouping terms. #' #' The function predicts the response at every level in the random effect term #' specified by the user. Then, the expected rank of each group level is binned #' to the number of bins specified by the user. Finally, a weighted mean of the #' fitted value for all observations in each bin of the expected ranks is #' calculated using the inverse of the variance as the weight -- so that less #' precise estimates are downweighted in the calculation of the mean for the bin. #' Finally, a standard error for the bin mean is calculated. #' #' @param merMod An object of class merMod #' #' @param newdata a data frame of observations to calculate group-level differences #' for #' #' @param groupFctr The name of the grouping factor over which the random #' coefficient of interest varies. This is the variable to the right of the #' pipe, \code{|}, in the [g]lmer formula. This parameter is optional, if not #' specified, it will perform the calculation for the first effect listed #' by \code{ranef}. #' #' @param term The name of the random coefficient of interest. This is the #' variable to the left of the pipe, \code{|}, in the [g]lmer formula. Partial #' matching is attempted on the intercept term so the following character #' strings will all return rankings based on the intercept (\emph{provided that #' they do not match the name of another random coefficient for that factor}): #' \code{c("(Intercept)", "Int", "intercep", ...)}. #' #' @param breaks an integer representing the number of bins to divide the group #' effects into, the default is 3; alternatively it can specify breaks from 0-100 #' for how to cut the expected rank distribution #' #' @param ... additional arguments to pass to \code{\link{predictInterval}} #' #' @return A data.frame with all unique combinations of the number of cases, rows #' in the newdata element, and number of bins: #' \describe{ #' \item{case}{The row number of the observation from newdata.} #' \item{bin}{The ranking bin for the expected rank, the higher the bin number, #' the greater the expected rank of the groups in that bin.} #' \item{AvgFitWght}{The weighted mean of the fitted values for case i in bin k} #' \item{AvgFitWghtSE}{The standard deviation of the mean of the fitted values #' for case i in bin k.} #' \item{nobs}{The number of group effects contained in that bin.} #' } #' #' @details This function uses the formula for variance of a weighted mean #' recommended by Cochran (1977). #' #' @references #' Gatz, DF and Smith, L. The Standard Error of a Weighted Mean Concentration. #' I. Bootstrapping vs other methods. \emph{Atmospheric Environment}. #' 1995;11(2)1185-1193. Available at #' \url{http://www.sciencedirect.com/science/article/pii/135223109400210C} #' #' Cochran, WG. 1977. Sampling Techniques (3rd Edition). Wiley, New York. #' #' @seealso \code{\link{expectedRank}}, \code{\link{predictInterval}} #' #' @examples #' #For a one-level random intercept model #' m1 <- lmer(Reaction ~ Days + (1 | Subject), sleepstudy) #' m1.er <- REimpact(m1, newdata = sleepstudy[1, ], breaks = 2) #' #For a one-level random intercept model with multiple random terms #' m2 <- lmer(Reaction ~ Days + (Days | Subject), sleepstudy) #' #ranked by the random slope on Days #' m2.er1 <- REimpact(m2, newdata = sleepstudy[1, ], #' groupFctr = "Subject", term="Days") #' #ranked by the random intercept #' m2.er2 <- REimpact(m2, newdata = sleepstudy[1, ], #' groupFctr = "Subject", term="int") #' \donttest{ #' # You can also pass additional arguments to predictInterval through REimpact #' g1 <- lmer(y ~ lectage + studage + (1|d) + (1|s), data=InstEval) #' zed <- REimpact(g1, newdata = InstEval[9:12, ], groupFctr = "d", n.sims = 50, #' include.resid.var = TRUE) #' zed2 <- REimpact(g1, newdata = InstEval[9:12, ], groupFctr = "s", n.sims = 50, #' include.resid.var = TRUE) #' zed3 <- REimpact(g1, newdata = InstEval[9:12, ], groupFctr = "d", breaks = 5, #' n.sims = 50, include.resid.var = TRUE) #' } #' @export REimpact <- function(merMod, newdata, groupFctr=NULL, term = NULL, breaks = 3, ...){ if(missing(groupFctr)){ groupFctr <- names(ranef(merMod))[1] } lvls <- unique(merMod@frame[, groupFctr]) zed <- as.data.frame(lapply(newdata, rep, length(lvls))) zed[, groupFctr] <- rep(lvls, each = nrow(newdata)) zed[, "case"] <- rep(seq(1, nrow(newdata)), times = length(lvls)) outs1 <- cbind(zed, predictInterval(merMod, newdata = zed, ...)) outs1$var <- outs1$upr - outs1$lwr outs1$lwr <- NULL; outs1$upr <- NULL ranks <- expectedRank(merMod, groupFctr = groupFctr, term = term) ranks <- ranks[, c(2, 7)] outs1 <- merge(ranks, outs1, by.x = "groupLevel", by.y = groupFctr); rm(ranks) weighted.var.se <- function(x, w, na.rm=FALSE) # Computes the variance of a weighted mean following Cochran 1977 definition { if (na.rm) { w <- w[i <- !is.na(x)]; x <- x[i] } n = length(w) xWbar = weighted.mean(x,w,na.rm=na.rm) wbar = mean(w) out = n/((n-1)*sum(w)^2)*(sum((w*x-wbar*xWbar)^2)-2*xWbar*sum((w-wbar)*(w*x-wbar*xWbar))+xWbar^2*sum((w-wbar)^2)) return(out) } # bin pctER somehow outs1$bin <- cut(outs1$pctER, breaks = breaks, labels = FALSE, include.lowest = TRUE) bySum <- function(x){ AvgFit <- weighted.mean(x$fit, 1/x$var) AvgFitSE <- weighted.var.se(x$fit, 1/x$var) nobs <- length(x$fit) return(c(AvgFit, AvgFitSE, nobs)) } outs1 <- outs1[order(outs1$case, outs1$bin),] wMeans <- by(outs1, INDICES = list(outs1$case, outs1$bin), bySum) ids <- expand.grid(unique(outs1$case), unique(outs1$bin)) wMeans <- cbind(ids, do.call(rbind, wMeans)) names(wMeans) <- c("case", "bin", "AvgFit", "AvgFitSE", "nobs") return(wMeans) } merTools/R/merExpectedRank.R0000644000176200001440000001532413444576002015475 0ustar liggesusers#' Calculate the expected rank of random coefficients that account for #' uncertainty. #' #' \code{expectedRank} calculates the expected rank and the percentile expected #' rank of any random term in a merMod object. A simple ranking of the estimated #' random effects (as produced by \code{\link[lme4]{ranef}}) is not satisfactory #' because it ignores any amount of uncertainty. #' #' Inspired by Lingsma et al. (2010, see also Laird and Louis 1989), #' expectedRank sums the probability that each level of the grouping factor is #' greater than every other level of the grouping factor, similar to a #' two-sample t-test. #' #' The formula for the expected rank is: #' \deqn{ExpectedRank_i = 1 + \sum \phi((\theta_i - \theta_k) / \sqrt(var(\theta_i)+var(\theta_k))} #' where \eqn{\phi} is the standard normal distribution function, \eqn{\theta} #' is the estimated random effect and \eqn{var(\theta)} is the posterior #' variance of the estimated random effect. We add one to the sum so that the #' minimum rank is one instead of zero so that in the case where there is no #' overlap between the variances of the random effects (or if the variances are #' zero), the expected rank equals the actual rank. The ranks are ordered such #' that the winners have ranks that are greater than the losers. #' #' The formula for the percentile expected rank is: #' \deqn{100 * (ExpectedRank_i - 0.5) / N_grps} #' where \eqn{N_grps} is the number of grouping factor levels. The percentile #' expected rank can be interpreted as the fraction of levels that score at or #' below the given level. #' #' NOTE: \code{expectedRank} will only work under conditions that \code{lme4::ranef} #' will work. One current example of when this is \emph{not} the case is for #' models when there are multiple terms specified per factor (e.g. uncorrelated random #' coefficients for the same term, e.g. #' \code{lmer(Reaction ~ Days + (1 | Subject) + (0 + Days | Subject), data = sleepstudy)}) #' #' @param merMod An object of class merMod #' #' @param groupFctr An optional character vector specifying the name(s) the grouping factor(s) #' over which the random coefficient of interest varies. This is the #' variable to the right of the pipe, \code{|}, in the [g]lmer formula. #' This parameter is optional. If none is specified all terms will be returned. #' #' @param term An optional character vector specifying the name(s) of the random coefficient of interest. This is the #' variable to the left of the pipe, \code{|}, in the [g]lmer formula. Partial #' matching is attempted on the intercept term so the following character #' strings will all return rankings based on the intercept (\emph{provided that #' they do not match the name of another random coefficient for that factor}): #' \code{c("(Intercept)", "Int", "intercep", ...)}. #' #' @return A data.frame with the following five columns: #' \describe{ #' \item{groupFctr}{a character representing name of the grouping factor} #' \item{groupLevel}{a character representing the level of the grouping factor} #' \item{term}{a character representing the formula term for the group} #' \item{estimate}{effect estimate from \code{lme4::ranef(, condVar=TRUE)}).} #' \item{std.error}{the posterior variance of the estimate random effect #' (from \code{lme4::ranef(, condVar=TRUE)}); named "\code{term}"_var.} #' \item{ER}{The expected rank.} #' \item{pctER}{The percentile expected rank.} #' } #' #' @references #' Laird NM and Louis TA. Empirical Bayes Ranking Methods. \emph{Journal of #' Education Statistics}. 1989;14(1)29-46. Available at #' \url{http://www.jstor.org/stable/1164724}. #' #' #' Lingsma HF, Steyerberg EW, Eijkemans MJC, et al. Comparing and #' ranking hospitals based on outcome: results from The Netherlands Stroke Survey. #' \emph{QJM: An International Journal of Medicine}. 2010;103(2):99-108. #' doi:10.1093/qjmed/hcp169 #' #' @examples #' #For a one-level random intercept model #' m1 <- lmer(Reaction ~ Days + (1 | Subject), sleepstudy) #' (m1.er <- expectedRank(m1)) #' #' #For a one-level random intercept model with multiple random terms #' m2 <- lmer(Reaction ~ Days + (Days | Subject), sleepstudy) #' #ranked by the random slope on Days #' (m2.er1 <- expectedRank(m2, term="Days")) #' #ranked by the random intercept #' (m2.er2 <- expectedRank(m2, term="int")) #' #' \donttest{ #' #For a two-level model with random intercepts #' m3 <- lmer(y ~ service * dept + (1|s) + (1|d), InstEval) #' #Ranked by the random intercept on 's' #' (m3.er1 <- expectedRank(m3, groupFctr="s", term="Intercept")) #' } #' @export expectedRank <- function(merMod, groupFctr=NULL, term=NULL) { #Count random terms in merMod n.rfx <- lme4::getME(merMod, "k") n.rfac <- lme4::getME(merMod, "n_rfac") rfx <- lme4::ranef(merMod, condVar=TRUE) if(!is.null(groupFctr)){ groupFctr <- groupFctr } else{ groupFctr <- names(rfx) } out <- data.frame(groupFctr = NA, groupLevel = NA, term = NA, estimate = NA, std.error = NA, ER = NA, pctER = NA) for(i in groupFctr){ rfx.names <- rownames(rfx[[i]]) n.grps <- length(rfx.names) n.terms <- length(rfx[[i]]) if(!is.null(term)){ termIdx <- term } else{ termIdx <- names(rfx[[i]]) } for(j in termIdx){ if (all(grepl("[iI]nt[a-z]*", j)) & is.na(match(j, names(rfx[[i]])))) { j <- "(Intercept)" } term.idx <- grep(j, names(rfx[[i]]), fixed=TRUE) theta <- rfx[[i]][,term.idx] var.theta <- attr(rfx[[i]], which="postVar")[term.idx, term.idx, 1:n.grps] #Calculate Expected Rank which is the sum of the probabilities that group i is greater than all #other groups j (assuming normal distribution of random effects) ER <- pctER <- rep(NA, n.grps) for (k in 1:n.grps) { ER[k] <- 1 + sum(pnorm((theta[k]-theta[-k]) / sqrt(var.theta[k] + var.theta[-k]))) } #Calculated percentile expected rank ... the version of the formula I am using is #the percentage of groups that are ranked **equal to or less than** the selected #group ... if we just wanted percentage ranked less than then remove the 0.5 pctER <- round(100 * (ER - 0.5) / n.grps) tmp <- data.frame(groupFctr = i, groupLevel = rfx.names, term = j, estimate = theta, std.error = var.theta, ER = ER, pctER = pctER) out <- rbind(out, tmp) } } out <- out[-1, ] # Avoid parentheses in parameter names out$term <- gsub("(Intercept)", "Intercept", out$term, fixed = TRUE) return(out) } merTools/R/merPredict.R0000644000176200001440000005544613466135366014533 0ustar liggesusers#' Predict from merMod objects with a prediction interval #' @description This function provides a way to capture model uncertainty in #' predictions from multi-level models fit with \code{lme4}. By drawing a sampling #' distribution for the random and the fixed effects and then estimating the fitted #' value across that distribution, it is possible to generate a prediction interval #' for fitted values that includes all variation in the model except for variation #' in the covariance parameters, theta. This is a much faster alternative than #' bootstrapping for models fit to medium to large datasets. #' @param merMod a merMod object from lme4 #' @param newdata a data.frame of new data to predict #' @param which a character specifying what to return, by default it returns the #' full interval, but you can also select to return only the fixed variation or #' the random component variation. If full is selected the resulting data.frame #' will be \code{nrow(newdata) * number of model levels} long #' @param level the width of the prediction interval #' @param n.sims number of simulation samples to construct #' @param stat take the median or mean of simulated intervals #' @param type type of prediction to develop #' @param include.resid.var logical, include or exclude the residual variance for #' linear models #' @param returnSims logical, should all n.sims simulations be returned? #' @param seed numeric, optional argument to set seed for simulations #' @param fix.intercept.variance logical; should the variance of the intercept #' term be adjusted downwards to roughly correct for its covariance with the #' random effects, as if all the random effects are intercept effects? #' @param ignore.fixed.terms a numeric or string vector of indexes or names of #' fixed effects which should be considered as fully known (zero variance). This #' can result in under-conservative intervals, but for models with random effects #' nested inside fixed effects, holding the fixed effects constant intervals may #' give intervals with closer to nominal coverage than the over-conservative #' intervals without this option, which ignore negative correlation between the #' outer (fixed) and inner (random) coefficients. #' @param .parallel, logical should parallel computation be used, default is FALSE #' @param .paropts, -NOT USED: Caused issue #54- a list of additional options passed into the foreach function #' when parallel computation is enabled. This is important if (for example) your #' code relies on external data or packages: use the .export and .packages arguments #' to supply them so that all cluster nodes have the correct environment set up #' for computing. #' @return a data.frame with three columns: #' \describe{ #' \item{\code{fit}}{The center of the distribution of predicted values as defined by #' the \code{stat} parameter.} #' \item{\code{lwr}}{The lower prediction interval bound corresponding to the quantile cut #' defined in \code{level}.} #' \item{\code{upr}}{The upper prediction interval bound corresponding to the quantile cut #' defined in \code{level}.} #' } #' If returnSims = TRUE, then the individual simulations are attached to this #' data.frame in the attribute \code{sim.results} and are stored as a matrix. #' @details To generate a prediction interval, the function first computes a simulated #' distribution of all of the parameters in the model. For the random, or grouping, #' effects, this is done by sampling from a multivariate normal distribution which #' is defined by the BLUP estimate provided by \code{ranef} and the associated #' variance-covariance matrix for each observed level of each grouping terms. For #' each grouping term, an array is build that has as many rows as there are levels #' of the grouping factor, as many columns as there are predictors at that level #' (e.g. an intercept and slope), and is stacked as high as there are number of #' simulations. These arrays are then multiplied by the new data provided to the #' function to produce a matrix of yhat values. The result is a matrix of the simulated #' values of the linear predictor for each observation for each simulation. Each #' grouping term has such a matrix for each observation. These values can be added #' to get the estimate of the fitted value for the random effect terms, and this #' can then be added to a matrix of simulated values for the fixed effect level to #' come up with \code{n.sims} number of possible yhat values for each observation. #' #' The distribution of simulated values is cut according to the interval requested #' by the function. The median or mean value as well as the upper and lower bounds #' are then returned. These can be presented either on the linear predictor scale #' or on the response scale using the link function in the \code{merMod}. #' @note \code{merTools} includes the functions \code{subBoot} and \code{thetaExtract} #' to allow the user to estimate the variability in \code{theta} from a larger #' model by bootstrapping the model fit on a subset, to allow faster estimation. #' @export #' @import lme4 #' @importFrom abind abind #' @importFrom mvtnorm rmvnorm #' @importFrom foreach %dopar% #' @importFrom foreach foreach #' @examples #' m1 <- lmer(Reaction ~ Days + (1 | Subject), sleepstudy) #' regFit <- predict(m1, newdata = sleepstudy[11, ]) # a single value is returned #' intFit <- predictInterval(m1, newdata = sleepstudy[11, ]) # bounded values #' # Can do glmer #' d1 <- cbpp #' d1$y <- d1$incidence / d1$size #' gm2 <- glmer(y ~ period + (1 | herd), family = binomial, data = d1, #' nAGQ = 9, weights = d1$size) #' regFit <- predict(gm2, newdata = d1[1:10, ]) #' # get probabilities #' regFit <- predict(gm2, newdata = d1[1:10, ], type = "response") #' intFit <- predictInterval(gm2, newdata = d1[1:10, ], type = "probability") #' intFit <- predictInterval(gm2, newdata = d1[1:10, ], type = "linear.prediction") predictInterval <- function(merMod, newdata, which=c("full", "fixed", "random", "all"), level = 0.8, n.sims = 1000, stat=c("median","mean"), type=c("linear.prediction", "probability"), include.resid.var=TRUE, returnSims = FALSE, seed=NULL, .parallel = FALSE, .paropts = NULL, fix.intercept.variance = FALSE, #This does NOT work with random slope models ignore.fixed.terms = NULL) { if(missing(newdata)){ newdata <- merMod@frame } if(any(c("data.frame") != class(newdata))){ if(any(c("tbl_df", "tbl") %in% class(newdata))){ newdata <- as.data.frame(newdata) warning("newdata is tbl_df or tbl object from dplyr package and has been coerced to a data.frame") } else{ newdata <- as.data.frame(newdata) } } predict.type <- match.arg(type, c("linear.prediction", "probability"), several.ok = FALSE) stat.type <- match.arg(stat, c("median","mean"), several.ok = FALSE) which.eff <- match.arg(which, c("full", "fixed", "random", "all"), several.ok = FALSE) if (!is.null(seed)) set.seed(seed) else if (!exists(".Random.seed", envir = .GlobalEnv)) runif(1) ##First: check if it is a GLMM or NLMM and draw from sigma distribution or incorporate scale parameter if GLMM merMod.devcomp <- getME(merMod, "devcomp") if (merMod.devcomp$dims[["GLMM"]] == 0 & merMod.devcomp$dims[["NLMM"]] == 0) { sigmahat <- sqrt(1/rgamma(n.sims, 0.5 * residDF.merMod(merMod), 0.5 * merMod.devcomp$cmp[["pwrss"]])) if (predict.type=="probability") { predict.type="linear.prediction" warning(" Asking for predictions on the probability scale makes no sense, resetting predict.type to linear.prediction", call.=FALSE) } } else if (merMod.devcomp$dims[["GLMM"]] == TRUE & merMod@resp$family$family == "binomial" & merMod@resp$family$link %in% c("logit", "probit")) { sigmahat <- rep(1,n.sims) } else { warning(" Prediction for NLMMs or GLMMs that are not mixed binomial regressions is not tested. Sigma set at 1.") sigmahat <- rep(1,n.sims) } newdata.modelMatrix <- buildModelMatrix(model= merMod, newdata = newdata) # When there is no fixed effect intercept but there is a group level intercept # We need to do something! re.xb <- vector(getME(merMod, "n_rfacs"), mode = "list") names(re.xb) <- names(ngrps(merMod)) for (j in names(re.xb)){ reMeans <- as.matrix(ranef(merMod)[[j]]) reMatrix <- attr(ranef(merMod, condVar = TRUE)[[j]], which = "postVar") # OK, let's knock out all the random effects we don't need if (j %in% names(newdata)){ # get around if names do not line up because of nesting obslvl <- unique(as.character(newdata[, j])) alllvl <- rownames(reMeans) keep <- intersect(obslvl, alllvl) } else { obslvl <- colnames(newdata.modelMatrix) alllvl <- rownames(reMeans) keep <- intersect(obslvl, alllvl) } # Add switch if no random groups are observed to avoid indexing errors, # we burn 1 sample of 1 group of all coefficients that will eventually # be multiplied by zero later on if (length(keep) > 0 & !identical(keep, alllvl)) { reMeans <- reMeans[keep, , drop=FALSE] dimnames(reMatrix)[[3]] <- alllvl reMatrix <- reMatrix[, , keep, drop = FALSE] } else if (length(keep) > 0 & identical(keep, alllvl)){ dimnames(reMatrix)[[3]] <- alllvl # dimnames(reMeans)[[2]] <- j # we need to get the variable name into this ojbect reMatrix <- reMatrix[, , keep, drop = FALSE] } else{ reMeans <- reMeans[1, , drop=FALSE] reMatrix <- reMatrix[, , 1, drop = FALSE] } tmpList <- vector(length = nrow(reMeans), mode = "list") for (k in 1:nrow(reMeans)){ meanTmp <- reMeans[k, ] names(meanTmp) <- NULL matrixTmp <- as.matrix(reMatrix[, , k]) tmpList[[k]] <- as.matrix(mvtnorm::rmvnorm(n= n.sims, mean=meanTmp, sigma=matrixTmp, method = "chol")) } REcoefs <- sapply(tmpList, identity, simplify="array") # rm(tmpList) dimnames(REcoefs) <- list(1:n.sims, attr(reMeans, "dimnames")[[2]], attr(reMeans, "dimnames")[[1]] ) if (j %in% names(newdata)) { # get around if names do not line up because of nesting tmp <- cbind(as.data.frame(newdata.modelMatrix), var = newdata[, j]) tmp <- tmp[, !duplicated(colnames(tmp))] keep <- names(tmp)[names(tmp) %in% dimnames(REcoefs)[[2]]] if (length(keep) == 0) { keep <- grep(dimnames(REcoefs)[[2]], names(tmp), value = TRUE) } if (length(keep) == 0) { tmp <- cbind(model.frame(subbars(formula(merMod)), data = newdata), var = newdata[, j]) keep <- grep(dimnames(REcoefs)[[2]], names(tmp), value = TRUE) } if ( length(keep) == 0) { # Add in an intercept for RE purposes tmp <- cbind(as.data.frame(newdata.modelMatrix), var = newdata[, j]) tmp <- tmp[, !duplicated(colnames(tmp))] tmp <- cbind(data.frame(1), tmp) names(tmp)[1] <- "(Intercept)" keep <- "(Intercept)" } tmp <- tmp[, c(keep, "var"), drop = FALSE] tmp[, "var"] <- as.character(tmp[, "var"]) colnames(tmp)[which(names(tmp) == "var")] <- names(newdata[, j, drop = FALSE]) if (all(grepl(":", keep))) { # Strip out the interaction after keep <- unique(gsub("(.*):.*", "\\1", keep)) } } else { tmp <- as.data.frame(newdata.modelMatrix) tmp <- tmp[, !duplicated(colnames(tmp))] # deduplicate columns because # column names can be duplicated to account for multiple effects # but we've already reconciled all the effects tmp$var <- names(tmp[keep])[max.col(tmp[keep])] #changed alllvl to keep in #this line re: issue #53 where newdata doesn't have all levels of rfx in #nested specification (with ":") so this just takes the subset of alllvl #that are specified in model keep <- names(tmp)[names(tmp) %in% dimnames(REcoefs)[[2]]] tmp <- tmp[, c(keep, "var"), drop = FALSE] tmp[, "var"] <- as.character(tmp[, "var"]) colnames(tmp)[which(names(tmp) == "var")] <- j } ####################### ################ tmp.pred <- function(data, coefs, group){ new.levels <- unique(as.character(data[, group])[!as.character(data[, group]) %in% dimnames(coefs)[[3]]]) msg <- paste(" The following levels of ", group, " from newdata \n -- ", paste0(new.levels, collapse=", "), " -- are not in the model data. \n Currently, predictions for these values are based only on the \n fixed coefficients and the observation-level error.", sep="") if(length(new.levels > 0)){ warning(msg, call.=FALSE) } yhatTmp <- array(data = NA, dim = c(nrow(data), dim(coefs)[1])) colIdx <- ncol(data) - 1 colLL <- length(1:colIdx) if(colLL > dim(coefs)[2]) { # copy over coefs_new <- array(NA, dim = c(dim(coefs)[1], colLL, dim(coefs)[3])) dimnames(coefs_new)[c(1, 3)] <- dimnames(coefs)[c(1, 3)] dimnames(coefs_new)[[2]] <- rep(dimnames(coefs)[[2]], dim(coefs_new)[2]) for (k in 1:colLL) { coefs_new[, k, 1:dim(coefs)[3]] <- coefs[, 1, 1:dim(coefs)[3]] } coefs <- coefs_new } for(i in 1:nrow(data)){ lvl <- as.character(data[, group][i]) if(!lvl %in% new.levels){ yhatTmp[i, ] <- as.numeric(data[i, 1:colIdx]) %*% t(coefs[, 1:colIdx, lvl]) } else{ # 0 out the RE for these new levels yhatTmp[i, ] <- rep(0, colIdx) %*% t(coefs[, 1:colIdx, 1]) } } rownames(yhatTmp) <- rownames(data) rm(data) return(yhatTmp) } ######################### #### if(nrow(tmp) > 1000 | .parallel) { if (requireNamespace("foreach", quietly=TRUE)) { if(.parallel){ setup_parallel() } tmp2 <- split(tmp, (1:nrow(tmp) %/% 500)) #TODO: Find optimum splitting factor tmp2 <- tmp2[lapply(tmp2,length) > 0] fe_call <- as.call(c(list(quote(foreach::foreach), i = seq_along(tmp2), .combine = 'rbind'))) fe <- eval(fe_call) re.xb[[j]] <- foreach::`%dopar%`(fe, tmp.pred(data = tmp2[[i]], coefs = REcoefs[, keep, , drop = FALSE], group = j)) rm(tmp2) } else { warning("foreach package is unavailable, parallel computing not available") re.xb[[j]] <- tmp.pred(data = tmp, coefs = REcoefs[, keep, , drop = FALSE], group = j) } } else{ re.xb[[j]] <- tmp.pred(data = tmp, coefs = REcoefs[, keep, , drop = FALSE], group = j) } rm(tmp) } rm(REcoefs) # TODO: Add a check for new.levels that is outside of the above loop # for now, ignore this check if (include.resid.var==FALSE) { # if (length(new.levels)==0) sigmahat <- rep(1, n.sims) # else { # include.resid.var=TRUE # warning(" \n Since new levels were detected resetting include.resid.var to TRUE.") # } } # fixed.xb is nrow(newdata) x n.sims ##Calculate yhat as sum of the components (fixed plus all groupling factors) fe.tmp <- fixef(merMod) vcov.tmp <- as.matrix(vcov(merMod)) # Detect if an intercept is present # TODO - is this reliable if (is.na(names(attr(VarCorr(merMod)[[j]],"stddev")["(Intercept)"]))) { fix.intercept.variance <- FALSE message("No intercept detected, setting fix.intercept.variance to FALSE") } # If intercept is not in fixed terms if (!"(Intercept)" %in% names(fixef(merMod)) && fix.intercept.variance) { # TODO - decide if this is an error or if we should allow it to continue with warning warning("No fixed-effect intercept detected. Variance adjustment may be unreliable.") } if (fix.intercept.variance) { #Assuming all random effects include intercepts. intercept.variance <- vcov.tmp[1,1] groupsizes <- ngrps(merMod) for(j in names(groupsizes)){ #for every group of random e groupExtraPrecision <- 0 groupVar <- (attr(VarCorr(merMod)[[j]],"stddev")["(Intercept)"])^2 reMatrix <- attr(ranef(merMod, condVar = TRUE)[[j]], which = "postVar") for (eff in 1:dim(reMatrix)[3]) { term <- 1/(reMatrix[1,1,eff] + groupVar) if (term > 0) { groupExtraPrecision <- groupExtraPrecision + term } else { warning("fix.intercept.variance got negative precision; better turn it off.") } } intercept.variance <- intercept.variance - 1/groupExtraPrecision } if (intercept.variance < 0) { warning("fix.intercept.variance got negative variance; better turn it off.") } ratio <- intercept.variance/vcov.tmp[1,1] prec.tmp <- solve(vcov.tmp) prec.tmp[1,1] <- prec.tmp[1,1] / ratio vcov.tmp[1,] <- vcov.tmp[1,] * ratio vcov.tmp <- solve(prec.tmp, tol=1e-50) } if (!is.null(ignore.fixed.terms)) { prec.tmp <- solve(vcov.tmp) for (term in ignore.fixed.terms) { prec.tmp[term,term] <- prec.tmp[term,term] * 1e15 } vcov.tmp <- solve(prec.tmp, tol=1e-50) } if(n.sims > 2000 | .parallel){ if(.parallel){ setup_parallel() } i <- 1:n.sims fe_call <- as.call(c(list(quote(foreach::foreach), i = i, .combine = 'rbind'))) fe <- eval(fe_call) betaSim <- foreach::`%dopar%`(fe, mvtnorm::rmvnorm(n = 1, mean = fe.tmp, sigma = vcov.tmp, method = "chol")) } else { betaSim <- abind::abind(lapply(1:n.sims, function(x) mvtnorm::rmvnorm(n = 1, mean = fe.tmp, sigma = vcov.tmp, method = "chol")), along=1) } # Pad betaSim colnames(betaSim) <- names(fe.tmp) rownames(betaSim) <- 1:n.sims newdata.modelMatrix <- buildModelMatrix(merMod, newdata = newdata, which = "fixed") if (ncol(newdata.modelMatrix) > ncol(betaSim)) { pad <- matrix(rep(0), nrow = nrow(betaSim), ncol = ncol(newdata.modelMatrix) - ncol(betaSim)) if(ncol(pad) > 0){ message("Fixed effect matrix has been padded with 0 coefficients for random slopes not included in the fixed effects and interaction terms.") } colnames(pad) <- setdiff(colnames(newdata.modelMatrix), colnames(betaSim)) betaSim <- cbind(betaSim, pad) keep <- intersect(colnames(newdata.modelMatrix), colnames(betaSim)) newdata.modelMatrix <- newdata.modelMatrix[, keep] betaSim <- betaSim[, keep] } re.xb$fixed <- newdata.modelMatrix %*% t(betaSim) ###### if(which.eff == "full"){ yhat <- Reduce('+', re.xb) } else if(which.eff == "fixed"){ yhat <- Reduce('+', re.xb["fixed"]) } else if(which.eff == "random"){ re.xb["fixed"] <- NULL yhat <- Reduce('+', re.xb) } else if(which.eff == "all"){ yhat <- Reduce('+', re.xb) N <- nrow(newdata) if (include.resid.var==TRUE){ for(i in 1:length(re.xb)){ re.xb[[i]] <- abind::abind(lapply(1:n.sims, function(x) rnorm(N, re.xb[[i]][, x], sigmahat[x])), along=2) } } pi.comps <- re.xb } rm(re.xb) N <- nrow(newdata) outs <- data.frame("fit" = rep(NA, N), "upr" = rep(NA, N), "lwr" = rep(NA, N)) upCI <- 1 - ((1-level)/2) loCI <- ((1-level)/2) if (include.resid.var==TRUE){ yhat <- abind::abind(lapply(1:n.sims, function(x) rnorm(N, yhat[,x], sigmahat[x])), along = 2) } # Output prediction intervals if (stat.type == "median") { outs[, 1:3] <- t(apply(yhat, 1, quantile, prob = c(0.5, upCI, loCI), na.rm=TRUE)) } if (stat.type == "mean") { outs$fit <- apply(yhat, 1, mean, na.rm=TRUE) outs[, 2:3] <- t(apply(yhat, 1, quantile, prob = c(upCI, loCI), na.rm=TRUE)) } if (predict.type == "probability") { if(nrow(outs) == 1) { outs <- t(apply(outs, 2, merMod@resp$family$linkinv)) } else { outs <- apply(outs, 2, merMod@resp$family$linkinv) } } ############################## # Construct observation predictors for each component of the model ########################## if(which.eff == "all"){ if(returnSims == TRUE){ allSims <- pi.comps } for(i in 1:length(pi.comps)){ if( stat.type == "median"){ pi.comps[[i]] <- t(apply(pi.comps[[i]], 1, quantile, prob = c(0.5, upCI, loCI), na.rm=TRUE)) pi.comps[[i]] <- as.data.frame(pi.comps[[i]]) names(pi.comps[[i]]) <- c("fit", "upr", "lwr") } if(stat.type == "mean"){ tmp <- pi.comps[[i]] pi.comps[[i]] <- data.frame("fit" = rep(NA, N), "upr" =NA, "lwr" = NA) pi.comps[[i]]$fit <- apply(tmp, 1, mean, na.rm=TRUE) pi.comps[[i]][, 2:3] <- t(apply(tmp, 1, quantile, prob = c(upCI, loCI), na.rm=TRUE)) } if (predict.type == "probability") { pi.comps[[i]] <- apply(pi.comps[[i]], 2, merMod@resp$family$linkinv) pi.comps[[i]] <- as.data.frame(pi.comps[[i]]) names(pi.comps[[i]]) <- c("fit", "upr", "lwr") } } componentOut <- dplyr::bind_rows(pi.comps, .id="effect") outs <- cbind(data.frame("effect" = "combined"), outs) outs <- suppressWarnings(bind_rows(outs, componentOut)) outs$obs <- rep(1:N, nrow(outs) %/% N) rm(pi.comps) } #Close it out if(returnSims == FALSE){ return(as.data.frame(outs)) } else if(returnSims == TRUE){ outs <- as.data.frame(outs) if(which.eff == "all"){ attr(outs, "sim.results") <- allSims } else{ attr(outs, "sim.results") <- yhat } return(outs) } } ## TODO: Finish exporting so that all returns the individual predictions for # each random effect separately merTools/R/merFastDisplay.R0000644000176200001440000001205513466135366015351 0ustar liggesusers#' Display model fit summary of x or x like objects, fast #' #' Faster than the implementation in the arm package because it avoids refitting #' #' @title fastdisp: faster display of model summaries #' @param x a model object #' @param ... additional arguments to pass to \code{arm::\link[arm]{display}} #' including number of digits #' @details The time saving is only noticeable for large, time-consuming (g)lmer #' fits. #' @import arm #' @return A printed summary of a x object #' @examples #' \donttest{ #' #Compare the time for displaying this modest model #' require(arm) #' m1 <- lmer(y ~ lectage + studage + (1|d) + (1|s), data=InstEval) #' system.time(display(m1)) #' system.time(fastdisp(m1)) #' } #' @seealso \code{\link[arm]{display}} #' @rdname fastdisp #' @export fastdisp fastdisp <- function (x, ...) { UseMethod("fastdisp", x) } #' @rdname fastdisp #' @importFrom stats df pt #' @export fastdisp.merMod <- function (x, ...) { .local <- function (x, digits = 2, detail = FALSE) { out <- NULL out$call <- x@call print(out$call) fcoef <- fixef(x) useScale <- getME(x, "devcomp")$dims["useSc"] corF <- vcov(x)@factors$correlation coefs <- cbind(fcoef, corF@sd) if (length(fcoef) > 0) { if (!useScale) { coefs <- coefs[, 1:2, drop = FALSE] out$z.value <- coefs[, 1]/coefs[, 2] out$p.value <- 2 * pnorm(abs(out$z.value), lower.tail = FALSE) coefs <- cbind(coefs, `z value` = out$z.value, `Pr(>|z|)` = out$p.value) } else { out$t.value <- coefs[, 1]/coefs[, 2] coefs <- cbind(coefs, `t value` = out$t.value) } dimnames(coefs)[[2]][1:2] <- c("coef.est", "coef.se") if (detail) { pfround(coefs, digits) } else { pfround(coefs[, 1:2], digits) } } out$coef <- coefs[, "coef.est"] out$se <- coefs[, "coef.se"] cat("\nError terms:\n") vc <- easyVarCorr(VarCorr(x), useScale = useScale, digits) print(vc[, c(1:2, 4:ncol(vc))], quote = FALSE) out$ngrps <- lapply(x@flist, function(x) length(levels(x))) is_REML <- isREML(x) llik <- logLik(x, REML = is_REML) out$AIC <- AIC(llik) # out$deviance <- deviance(refitML(x)) out$n <- getME(x, "devcomp")$dims["n"] # Dhat <- -2 * (llik) # pD <- out$deviance - Dhat # out$DIC <- out$deviance + pD cat("---\n") cat(sprintf("number of obs: %d, groups: ", out$n)) cat(paste(paste(names(out$ngrps), out$ngrps, sep = ", "), collapse = "; ")) cat(sprintf("\nAIC = %g", round(out$AIC, 1))) # cat(round(out$DIC, 1)) # cat("\ndeviance =", fround(out$deviance, 1), "\n") if (useScale < 0) { out$sigma.hat <- sigma(x) cat("overdispersion parameter =", fround(out$sigma.hat, 1), "\n") } return(invisible(out)) } .local(x, ...) } #' @rdname fastdisp #' @export fastdisp.merModList <- function(x, ...){ .local <- function (x, digits = 2, detail = FALSE) { out <- NULL useScale <- getME(x[[1]], "devcomp")$dims["useSc"] #useScale <- TRUE out$call <- x[[1]]@call listFE <- modelFixedEff(x) row.names(listFE) <- listFE$term out$t.value <- listFE$statistic out$coef <- listFE$estimate out$se <- listFE$std.error listRE <- modelRandEffStats(x) out$ngrps <- lapply(x[[1]]@flist, function(x) length(levels(x))) is_REML <- isREML(x[[1]]) llik <- lapply(x, logLik, REML = is_REML) out$AIC <- mean(unlist(lapply(llik, AIC))) out$n <- round(mean(unlist(lapply(lapply(lapply(x, getME, "devcomp"), "[[", "dims"), "[", 2))), 0) # round to nearest integer print(out$call) if (!detail) { pfround(listFE[, 2:3], digits) } else { listFE$p.value <- 2 * pt(abs(listFE$statistic), listFE$df, lower.tail = FALSE) pfround(listFE[, 2:6], digits) } cat("\nError terms:\n") vc <- easyVarCorr(VarCorr(x[[1]]), useScale = useScale, digits) # Resort the output of the random effect summary listRE <- listRE[grep("cor_", listRE$term, invert=TRUE), ] resid <- listRE[listRE$group == "Residual", ] listRE <- listRE[listRE$group != "Residual", ] listRE <- rbind(listRE, resid) # vc[, 3] <- as.character(round(listRE$estimate^2, digits = digits)) vc[, 4] <- as.character(round(listRE$estimate, digits = digits)) print(vc[, c(1:2, 4:ncol(vc))], quote = FALSE) cat("---\n") cat(sprintf("number of obs: %d, groups: ", out$n)) cat(paste(paste(names(out$ngrps), out$ngrps, sep = ", "), collapse = "; ")) cat(sprintf("\nAIC = %g", round(out$AIC, 1))) cat("---\n") # cat(round(out$DIC, 1)) # cat("\ndeviance =", fround(out$deviance, 1), "\n") if (useScale < 0) { out$sigma.hat <- sigma(x) cat("overdispersion parameter =", fround(out$sigma.hat, 1), "\n") cat("---\n") } return(invisible(out)) } .local(x, ...) } merTools/R/merPlots.R0000644000176200001440000001524713402510754014221 0ustar liggesusers#' @title Plot the results of a simulation of the random effects #' @name plotREsim #' @description Plot the simulated random effects on a ggplot2 chart. Points that #' are distinguishable from zero (i.e. the confidence band based on \code{level} #' does not cross the red line) are highlighted. Currently, the plots are ordered #' according to the grouping factor. #' @param data a data.frame generated by \code{\link{REsim}} with simulations of #' the random effects of a \code{\link{merMod}} #' @param level the width of the confidence interval #' @param stat a character value indicating the variable name in data of the #' midpoint of the estimated interval, e.g. "mean" or "median" #' @param sd a logical indicating whether or not to plot error bars around #' the estimates (default is TRUE). Calculates the width of the error bars #' based on \code{level} and the variable named "sd" in \code{data} #' @param sigmaScale a numeric value to divide the estimate and the standard #' deviation by in the case of doing an effect size calculation #' @param oddsRatio logical, should the parameters be converted to odds ratios #' before plotting #' @param labs logical, include the labels of the groups on the x-axis #' @param facet Accepts either logical (\code{TRUE}) or \code{list} to specify which #' random effects to plot. If \code{TRUE}, facets by both \code{groupFctr} and \code{term}. #' If \code{list} selects the panel specified by the named elements of the list #' @return a ggplot2 plot of the coefficient effects #' @examples #' fm1 <- lmer(Reaction ~ Days + (Days | Subject), sleepstudy) #' (p1 <- plotREsim(REsim(fm1))) #' #Plot just the random effects for the Days slope #' (p2 <- plotREsim(REsim(fm1), facet= list(groupFctr= "Subject", term= "Days"))) #' @export #' @import ggplot2 plotREsim <- function(data, level = 0.95, stat = "median", sd = TRUE, sigmaScale = NULL, oddsRatio = FALSE, labs = FALSE, facet= TRUE){ # error checking plot_sim_error_chks(type= "RE", level= level, stat= stat, sd= sd, sigmaScale= sigmaScale, oddsRatio= oddsRatio, labs= labs, facet= facet) # check for faceting facet_logical <- is.logical(facet) if (!facet_logical) { data <- data[data$groupFctr == facet[[1]] & data$term == facet[[2]], ] } if(!missing(sigmaScale)){ data[, "sd"] <- data[, "sd"] / sigmaScale data[, stat] <- data[, stat] / sigmaScale } data[, "sd"] <- data[, "sd"] * qnorm(1-((1-level)/2)) data[, "ymax"] <- data[, stat] + data[, "sd"] data[, "ymin"] <- data[, stat] - data[, "sd"] data[, "sig"] <- data[, "ymin"] > 0 | data[, "ymax"] < 0 hlineInt <- 0 if(oddsRatio == TRUE){ data[, "ymax"] <- exp(data[, "ymax"]) data[, stat] <- exp(data[, stat]) data[, "ymin"] <- exp(data[, "ymin"]) hlineInt <- 1 } data <- data[order(data[,"groupFctr"], data[,"term"], data[,stat]),] rownames(data) <- 1:nrow(data) data[,"xvar"] <- factor(paste(data$groupFctr, data$groupID, sep=""), levels=unique(paste(data$groupFctr,data$groupID, sep="")), ordered=TRUE) if(labs == TRUE){ xlabs.tmp <- element_text(face = "bold", angle=90, vjust=.5) } else { data[,"xvar"] <- as.numeric(data[,"xvar"]) xlabs.tmp <- element_blank() } p <- ggplot(data, aes_string(x = "xvar", y = stat, ymax = "ymax", ymin = "ymin")) + geom_hline(yintercept = hlineInt, color = I("red"), size = I(1.1)) + geom_point(color="gray75", alpha=1/(nrow(data)^.33), size=I(0.5)) + geom_point(data=subset(data, sig==TRUE), size=I(3)) + labs(x = "Group", y = "Effect Range", title = "Effect Ranges") + theme_bw() + theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(), axis.text.x = xlabs.tmp, axis.ticks.x = element_blank()) if (sd) { p <- p + geom_pointrange(alpha = 1/(nrow(data)^.33)) + geom_pointrange(data=subset(data, sig==TRUE), alpha = 0.25) } # check facet if (facet_logical) { return(p + facet_grid(term ~ groupFctr, scales = "free_x")) } else { return(p) } } #' @title Plot the results of a simulation of the fixed effects #' @name plotFEsim #' @description Plot the simulated fixed effects on a ggplot2 chart #' @param data a data.frame generated by \code{\link{FEsim}} with simulations of #' the fixed effects of a \code{\link{merMod}} #' @param level the width of the confidence interval #' @param stat a character value indicating the variable name in data of the #' midpoint of the estimated interval, e.g. "mean" or "median" #' @param sd logical, indicating whether or not to plot error bars around #' the estimates (default is TRUE). Calculates the width of the error bars #' based on \code{level} and the variable named "sd" in \code{data} #' @param intercept logical, should the intercept be included, default is FALSE #' @param sigmaScale a numeric value to divide the estimate and the standard #' deviation by in the case of doing an effect size calculation #' @param oddsRatio logical, should the parameters be converted to odds ratios #' before plotting #' @return a ggplot2 plot of the coefficient effects #' @examples #' fm1 <- lmer(Reaction ~ Days + (Days | Subject), sleepstudy) #' (p1 <- plotFEsim(FEsim(fm1))) #' @export #' @import ggplot2 plotFEsim <- function(data, level=0.95, stat = "median", sd = TRUE, intercept = FALSE, sigmaScale = NULL, oddsRatio = FALSE){ # error checking plot_sim_error_chks(type= "FE", level= level, stat= stat, sd= sd, sigmaScale= sigmaScale, oddsRatio= oddsRatio, labs= TRUE, facet= TRUE) if(!missing(sigmaScale)){ data[, "sd"] <- data[, "sd"] / sigmaScale data[, stat] <- data[, stat] / sigmaScale } if(intercept == FALSE){ data <- data[data$term != "(Intercept)", ] } data[, "sd"] <- data[, "sd"] * qnorm(1-((1-level)/2)) data[, "ymax"] <- data[, stat] + data[, "sd"] data[, "ymin"] <- data[, stat] - data[, "sd"] hlineInt <- 0 if(oddsRatio == TRUE){ data[, "ymax"] <- exp(data[, "ymax"]) data[, stat] <- exp(data[, stat]) data[, "ymin"] <- exp(data[, "ymin"]) hlineInt <- 1 } xvar <- "term" data$term <- as.character(data$term) data$term <- factor(data$term , levels = data[order(data[, stat]), 1]) p <- ggplot(aes_string(x = xvar, y = stat, ymax = "ymax", ymin = "ymin"), data = data) + geom_hline(yintercept = hlineInt, color = I("red")) + geom_point(size=I(3)) + coord_flip() + theme_bw() if (sd) { p <- p + geom_errorbar(width = 0.2) } p } merTools/R/zzz.R0000644000176200001440000001141613402510754013243 0ustar liggesusers# Global variables utils::globalVariables(c(".shinyMerPar", "sig", "sigma", "Lind", "group", "est", "mean_est", "est_ss", "within_var", "between_var", "statistic")) #' @importFrom methods as is #' @importFrom stats AIC as.formula formula logLik median model.matrix na.omit #' pnorm qnorm quantile residuals rgamma rnorm sd vcov weighted.mean delete.response #' model.frame na.pass reformulate runif terms getCall #' @importFrom utils packageVersion zzz <- function(){ # Nothing } #' Title #' #' @param object a merMod object #' @param correlation optional p value #' @param use.hessian logical #' @param ... additional arguments to pass through #' #' @return a summary of the object sum.mm <- function(object, correlation = (p <= getOption("lme4.summary.cor.max")), use.hessian = NULL, ...) { if (length(list(...)) > 0) { ## FIXME: need testing code warning("additional arguments ignored") } ## se.calc: hess.avail <- (!is.null(h <- object@optinfo$derivs$Hessian) && nrow(h) > length(getME(object,"theta"))) if (is.null(use.hessian)) use.hessian <- hess.avail if (use.hessian && !hess.avail) stop("'use.hessian=TRUE' specified, but Hessian is unavailable") resp <- object@resp devC <- object@devcomp dd <- devC$dims ## cmp <- devC$cmp useSc <- as.logical(dd[["useSc"]]) sig <- sigma(object) ## REML <- isREML(object) famL <- famlink(resp = resp) p <- length(coefs <- fixef(object)) vc <- as.matrix(vcov(object, use.hessian = use.hessian)) stdError <- sqrt(diag(vc)) coefs <- cbind("Estimate" = coefs, "Std. Error" = stdError) if (p > 0) { coefs <- cbind(coefs, (cf3 <- coefs[,1]/coefs[,2]), deparse.level = 0) colnames(coefs)[3] <- paste(if(useSc) "t" else "z", "value") if (isGLMM(object)) # FIXME: if "t" above, cannot have "z" here coefs <- cbind(coefs, "Pr(>|z|)" = 2*pnorm(abs(cf3), lower.tail = FALSE)) } llAIC <- llikAIC(object) ## FIXME: You can't count on object@re@flist, ## nor compute VarCorr() unless is(re, "reTrms"): varcor <- VarCorr(object) # use S3 class for now structure(list(methTitle = methTitle(dd), objClass = class(object), devcomp = devC, isLmer = is(resp, "lmerResp"), useScale = useSc, logLik = llAIC[["logLik"]], family = famL$family, link = famL$link, ngrps = ngrps(object), coefficients = coefs, sigma = sig, vcov = vcov(object, correlation = correlation, sigm = sig), varcor = varcor, # and use formatVC(.) for printing. AICtab = llAIC[["AICtab"]], call = object@call, residuals = residuals(object,"pearson",scaled = TRUE), fitMsgs = fetch.merMod.msgs(object), optinfo = object@optinfo ), class = "summary.merMod") } #' Find link function family #' #' @param object a merMod object #' @param resp the response vector #' #' @return the link function and family famlink <- function(object, resp = object@resp) { if(is(resp, "glmResp")) resp$family[c("family", "link")] else list(family = NULL, link = NULL) } ##' Extract all warning msgs from a merMod object ##' ##' @param x a merMod object fetch.merMod.msgs <- function(x) { ## currently only those found with 'X' : aX <- attributes(x@pp$X) wmsgs <- grep("^msg", names(aX)) if(any(has.msg <- nchar(Xwmsgs <- unlist(aX[wmsgs])) > 0)) Xwmsgs[has.msg] else character() } ##' Extract all warning msgs from a merMod object ##' @param type check a fixed or random effect ##' @inheritParams plotREsim plot_sim_error_chks <- function(type= c("FE", "RE"), level = 0.95, stat = c("mean", "median"), sd = TRUE, sigmaScale = NULL, oddsRatio = FALSE, labs = FALSE, facet= TRUE) { if (level <= 0 | level >= 1) stop("level must be specified as a numeric in (0,1).") stat <- match.arg(stat, several.ok= FALSE) if (!is.logical(sd)) stop("sd must be a logical expression.") if (!is.null(sigmaScale) && !is.logical(sigmaScale)) stop("sigmaScale must be a logical expression.") if (!is.logical(oddsRatio)) stop("oddsRatio must be a logical expression.") if (!is.logical(labs)) stop("labs must be a logical expression.") if (!is.logical(facet)) { if(any(c(!is.list(facet), is.null(names(facet)), names(facet) != c("groupFctr", "term")))) stop("facet must be either a logical expression or a named list.") } } merTools/R/merData.R0000644000176200001440000004136413402510754013770 0ustar liggesusers#' @title Clean up variable names in data frames #' @name sanitizeNames #' @description Strips out transformations from variable names in data frames #' @param data a data.frame #' @return a data frame with variable names cleaned to remove factor() construction sanitizeNames <- function(data){ badFac <- grep("factor\\(", names(data)) for(i in badFac){ names(data)[i] <- gsub("factor\\(", "", names(data)[i]) names(data)[i] <- gsub("\\)", "", names(data)[i]) } row.names(data) <- NULL return(data) } #' @title Remove attributes from a data.frame #' @name stripAttributes #' @description Strips attributes off of a data frame that come with a merMod model.frame #' @param data a data.frame #' @return a data frame with variable names cleaned to remove all attributes except for #' names, row.names, and class stripAttributes <- function(data){ attr <- names(attributes(data)) good <- c("names", "row.names", "class") for(i in attr[!attr %in% good]){ attr(data, i) <- NULL } return(data) } #' @title Draw a single observation out of an object matching some criteria #' @name draw #' @description Draw is used to select a single observation out of an R object. #' Additional parameters allow the user to control how that observation is #' chosen in order to manipulate that observation later. This is a generic #' function with methods for a number of objects. #' @param object the object to draw from #' @param type what kind of draw to make. Options include random or average #' @param varList a list specifying filters to subset the data by when making the #' draw #' @param seed numeric, optional argument to set seed for simulations, ignored if type="average" #' @param ... additional arguments required by certain methods #' @return a data.frame with a single row representing the desired observation #' @details In cases of tie, ".", may be substituted for factors. #' @export draw #' @rdname draw draw <- function(object, type = c("random", "average"), varList = NULL, seed = NULL, ...){ UseMethod("draw") } #' @title Draw an observation from a merMod object #' @rdname draw #' @method draw merMod #' @export #' @import lme4 #' @examples #' fm1 <- lmer(Reaction ~ Days + (Days | Subject), sleepstudy) #' # Random case #' draw(fm1, type = "random") #' # Average #' draw(fm1, type = "average") #' # Subset #' draw(fm1, type = "average", varList = list("Subject" = "308")) #' draw.merMod <- function(object, type = c("random", "average"), varList = NULL, seed = NULL, ...){ type <- match.arg(type, c("random", "average"), several.ok = FALSE) if(type == 'random'){ out <- randomObs(object, varList, seed) return(out) } else if(type == 'average'){ out <- averageObs(object, varList, ...) return(out) } } #' @title Select a random observation from model data #' @name randomObs #' @description Select a random observation from the model frame of a merMod #' @param merMod an object of class merMod #' @param varList optional, a named list of conditions to subset the data on #' @param seed numeric, optional argument to set seed for simulations #' @return a data frame with a single row for a random observation, but with full #' factor levels. See details for more. #' @details Each factor variable in the data frame has all factor levels from the #' full model.frame stored so that the new data is compatible with predict.merMod #' @export randomObs <- function(merMod, varList, seed = NULL){ if(!missing(varList)){ data <- subsetList(merMod@frame, varList) } if (!is.null(seed)) set.seed(seed) else if (!exists(".Random.seed", envir = .GlobalEnv)) runif(1) out <- data[sample(1:nrow(data), 1),] chars <- !sapply(out, is.numeric) for(i in names(out[, chars])){ out[, i] <- superFactor(out[, i], fullLev = unique(merMod@frame[, i])) } out <- stripAttributes(out) return(out) } #' @title Collapse a dataframe to a single average row #' @name collapseFrame #' @description Take an entire dataframe and summarize it in one row by using the #' mean and mode. #' @param data a data.frame #' @return a data frame with a single row #' @details Each character and factor variable in the data.frame is assigned to the #' modal category and each numeric variable is collapsed to the mean. Currently if #' mode is a tie, returns a "." collapseFrame <- function(data){ chars <- !sapply(data, is.numeric) chars <- names(data[, chars, drop = FALSE]) nums <- sapply(data, is.numeric) nums <- names(data[, nums, drop = FALSE]) numDat <- apply(data[, nums, drop = FALSE], 2, mean) statmode <- function(x){ z <- table(as.vector(x)) m <- names(z)[z == max(z)] if (length(m) == 1) { return(m) } return(".") } charDat <- apply(data[, chars, drop = FALSE], 2, statmode) cfdata <- cbind(as.data.frame(t(numDat)), as.data.frame(t(charDat))) cfdata <- cfdata[, names(data)] return(cfdata) } #' @title Subset a data.frame using a list of conditions #' @name subsetList #' @description Split a data.frame by elements in a list #' @param data a data.frame #' @param list a named list of splitting conditions #' @return a data frame with values that match the conditions in the list subsetList <- function(data, list){ if("logical" %in% unlist(lapply(list, class))){ stop("List is improperly formatted. Try using only `=` instead of `==` in subsets") } for(i in names(list)){ data <- split(data, data[, i]) data <- data[[list[[i]]]] data <- as.data.frame(data) } return(data) } #' \code{findFormFuns} used by \link[merTools]{averageObs} to calculate proper #' averages #' #' The purpose is to properly derive data for the average observation in the #' data by being 'aware' of formulas that contain interactions and/or function #' calls. For example, in the old behavior, if the formula contained a square #' term specified as \code{I(x^2)}, we were returning the mean of x{^2} not the #' square of mean(x). #' #' @param merMod the merMod object from which to draw the average observation #' @param origData (default=NULL) a data frame containing the original, #' untransformed data used to call the model. This MUST be specified if #' the original variables used in formula function calls are NOT present #' as 'main effects'. #' #' @return a data frame with a single row for the average observation, but with full #' factor levels. See details for more. #' #' @export findFormFuns <- function(merMod, origData = NULL) { form <- getCall(merMod)$formula form.rhs <- delete.response(terms(form)) modFrame <- model.frame(merMod) if (identical(modFrame, origData)) { origData = NULL } modFrame.tt <- terms(modFrame) #This part is a bit kludgy but should work modFrame.labels <- unique(unlist(strsplit(attr(modFrame.tt, "term.labels"), split = ":", fixed = TRUE))) modFrame.resp <- setdiff(rownames(attr(modFrame.tt, "factors")), unique(unlist(strsplit(colnames(attr(modFrame.tt, "factors")), split = ":", fixed = TRUE)))) modFrame <- modFrame[, c(modFrame.resp, modFrame.labels)] #Scan RHS of formula labels for parens -> exit if clean paren_terms <- grepl("[()]", c(modFrame.resp, modFrame.labels)) if (!any(paren_terms)) { if(is.null(origData)){ out <- collapseFrame(modFrame) } else{ out <- collapseFrame(origData) } return(out) } else { rhs.vars <- all.vars(form.rhs) #Warning if functions are detected but neither MAIN EFFECTS NOR DATA are supplied if (is.null(origData)) { if (!all(rhs.vars %in% modFrame.labels)) { warning(paste("\n\n Functions detected in formula without user supplied data", " or main effects of affected variables so returning means of", " transformed variables.\n", " Make sure that this is appropriate or supply untransformed", " data using the 'origData' argument. See ?merTools::findFormFuns", sep = "\n")) out <- collapseFrame(modFrame) return(out) } else { #Functions Detected and Main Effects Present out <- collapseFrame(modFrame) for (i in which(paren_terms)) { out[1,i] <- eval(parse(text = colnames(out)[i]), envir = out[, rhs.vars]) } return(out) } } else { #Functions Detected and Not All Main Effects Present ... but Data supplied out <- collapseFrame(modFrame) outData <- collapseFrame(origData) for (i in which(paren_terms)) { out[1,i] <- eval(parse(text = colnames(out)[i]), envir = outData) } return(out) } } } #' @title Find the average observation for a merMod object #' @name averageObs #' @description Extract a data frame of a single row that represents the #' average observation in a merMod object. This function also allows the #' user to pass a series of conditioning argument to calculate the average #' observation conditional on other characteristics. #' @param merMod a merMod object #' @param varList optional, a named list of conditions to subset the data on #' @param origData (default=NULL) a data frame containing the original, #' untransformed data used to call the model. This MUST be specified if #' the original variables used in formula function calls are NOT present #' as 'main effects'. #' @param ... not used currently #' @return a data frame with a single row for the average observation, but with full #' factor levels. See details for more. #' @details Each character and factor variable in the data.frame is assigned to the #' modal category and each numeric variable is collapsed to the mean. Currently if #' mode is a tie, returns a "." Uses the collapseFrame function. #' @export averageObs <- function(merMod, varList = NULL, origData = NULL, ...){ if(!missing(varList)){ if (is.null(origData)) { data <- subsetList(merMod@frame, varList) } else { data <- subsetList(origData, varList) } if(nrow(data) < 20 & nrow(data) > 2){ warning("Subset has less than 20 rows, averages may be problematic.") } if(nrow(data) <3){ warning("Subset has fewer than 3 rows, computing global average instead.") if (is.null(origData)) { data <- merMod@frame } else { data <- origData } } } else{ if (is.null(origData)) { data <- merMod@frame } else { data <- origData } } out <- findFormFuns(merMod, origData = data) reTerms <- names(ngrps(merMod)) if(any(reTerms %in% names(varList))){ reTerms <- reTerms[!reTerms %in% names(varList)] } if(length(reTerms) > 0){ for(i in 1:length(reTerms)){ out[, reTerms[i]] <- REquantile(merMod = merMod, quantile = 0.5, groupFctr = reTerms[[i]]) out[, reTerms[i]] <- as.character(out[, reTerms[i]]) } } chars <- !sapply(out, is.numeric) for(i in names(out[, chars])){ out[, i] <- try(superFactor(out[, i], fullLev = unique(merMod@frame[, i])), silent = TRUE) } out <- stripAttributes(out) out <- out[, names(merMod@frame)] return(out) } #' @title Create a factor with unobserved levels #' @name superFactor #' @description Create a factor variable and include unobserved levels #' for compatibility with model prediction functions #' @param x a vector to be converted to a factor #' @param fullLev a vector of factor levels to be assigned to x #' @return a factor variable with all observed levels of x and all levels #' of x in fullLev #' @export #' @examples #' regularFactor <- c("A", "B", "C") #' regularFactor <- factor(regularFactor) #' levels(regularFactor) #' # Now make it super #' newLevs <- c("D", "E", "F") #' regularFactor <- superFactor(regularFactor, fullLev = newLevs) #' levels(regularFactor) # now super superFactor <- function(x, fullLev){ x <- as.character(x) if("factor" %in% class(fullLev)){ fullLev <- unique(levels(fullLev)) } unobsLev <- unique(x)[!unique(x) %in% fullLev] x <- factor(x, levels = c(fullLev, unobsLev), labels = c(fullLev, unobsLev)) return(x) } #' @title Randomly reorder a dataframe #' @name shuffle #' @description Randomly reorder a dataframe by row #' @param data a data frame #' @return a data frame of the same dimensions with the rows reordered #' randomly shuffle <- function(data){ return(data[sample(nrow(data)),]) } # wiggle for a single variable (var) and single set of changing values (values) single_wiggle <- function(data, var, values) { tmp.data <- data data <- do.call("rbind", replicate(length(values), data, simplify= FALSE)) data[, var] <- rep(values, each = nrow(tmp.data)) if(any(class(tmp.data[, var]) %in% c("factor", "ordered"))){ data[, var] <- superFactor(data[, var], fullLev = levels(tmp.data[, var])) } return(data) } #' @title Assign an observation to different values #' @name wiggle #' @description Creates a new data.frame with copies of the original observation, #' each assigned to a different user-specified value of a variable. Allows the #' user to look at the effect on predicted values of changing either a single variable #' or multiple variables. #' @param data a data frame with one or more observations to be reassigned #' @param varlist a character vector specifying the name(s) of the variable to adjust #' @param valueslist a list of vectors with the values to assign to var #' @return a \code{data.frame} with each row assigned to the one of the new variable combinations. #' All variable combinations are returned, eg wiggling two variables with 3 and 4 variables #' respectively will return a new dataset with \code{3 * 4 = 12} observations. #' @details If the variable specified is a factor, then wiggle will return it #' as a character. #' @export #' @examples #' data(iris) #' wiggle(iris[3,], varlist = "Sepal.Width", valueslist = list(c(1, 2, 3, 5))) #' wiggle(iris[3:5,], "Sepal.Width", valueslist = list(c(1, 2, 3, 5))) #' wiggle(iris[3,], c("Sepal.Width", "Petal.Length"), list(c(1,2,3,5), c(3,5,6))) #' wiggle(iris[3:5,], c("Sepal.Width", "Petal.Length"), list(c(1,2,3,5), c(3,5,6))) wiggle <- function(data, varlist, valueslist) { if (length(varlist) != length(valueslist)) stop("varlist and valueslist must be equi-length.") n_var <- length(varlist) if (n_var == 1) { return(single_wiggle(data, varlist[[1]], valueslist[[1]])) } else { temp <- single_wiggle(data, varlist[[1]], valueslist[[1]]) temp <- split(temp, f= varlist[[1]]) varlist <- varlist[-1]; valueslist <- valueslist[-1] return(do.call("rbind", lapply(temp, wiggle, varlist= varlist, valueslist= valueslist))) } } #' @title Identify group level associated with RE quantile #' @name REquantile #' @description For a user specified quantile (or quantiles) of the random effect #' terms in a merMod object. This allows the user to easily identify the observation #' associated with the nth percentile effect. #' @param merMod a merMod object with one or more random effect levels #' @param quantile a numeric vector with values between 0 and 100 for quantiles #' @param groupFctr a character of the name of the random effect grouping factor to extract #' quantiles from #' @param term a character of the random effect to extract for the grouping factor #' specified. Default is the intercept. #' @return a vector of the level of the random effect grouping term that corresponds #' to each quantile #' @export #' @examples #' fm1 <- lmer(Reaction ~ Days + (Days | Subject), sleepstudy) #' REquantile(fm1, quantile = 0.25, groupFctr = "Subject") #' REquantile(fm1, quantile = 0.25, groupFctr = "Subject", term = "Days") REquantile <- function(merMod, quantile, groupFctr, term = "(Intercept)"){ if(any(quantile > 1 | quantile < 0)){ stop("Quantiles must be specified on the range 0-1") } myRE <- ranef(merMod)[[groupFctr]] if(is.null(myRE)){ stop("Random effect group name not found. Please respecify grouping factor.") } myRE.tmp <- try(myRE[order(myRE[, term]), ,drop = FALSE], silent = TRUE) if(class(myRE.tmp) != "data.frame"){ term1 <- names(myRE)[1] myRE.tmp <- try(myRE[order(myRE[, term1]), ,drop = FALSE], silent = TRUE) warning(paste0(term, " not found in random effect terms. Returning first term, ", term1,", for grouping factor, ", groupFctr, ", instead.")) } myRE <- myRE.tmp; myRE.tmp <- NULL nobs <- nrow(myRE) if(nobs < 20){ message("Number of observations < 20, random effect quantiles may not be well-defined.") } obsnum <- floor(quantile * nobs) return(rownames(myRE)[obsnum]) } merTools/vignettes/0000755000176200001440000000000013466137707014104 5ustar liggesusersmerTools/vignettes/Using_predictInterval.Rmd0000644000176200001440000005126213402510755021046 0ustar liggesusers--- title: "Prediction Intervals from merMod Objects" author: "Jared Knowles and Carl Frederick" date: "`r Sys.Date()`" output: rmarkdown::html_vignette vignette: > %\VignetteEngine{knitr::rmarkdown} %\VignetteIndexEntry{Prediction Intervals from merMod Objects} %\VignetteEncoding{UTF-8} --- ```{r setup, echo = FALSE, message=FALSE, warning=FALSE, results='hide', cache=FALSE} library(ggplot2); library(knitr); library(merTools) knitr::opts_chunk$set( cache=FALSE, comment="#>", collapse=TRUE, echo = TRUE ) rstanarm_eval <- "rstanarm" %in% rownames(installed.packages()) ``` ## Introduction Fitting (generalized) linear mixed models, (G)LMM, to very large data sets is becoming increasingly easy, but understanding and communicating the uncertainty inherent in those models is not. As the documentation for `lme4::predict.merMod()` notes: > There is no option for computing standard errors of predictions because it is > difficult to define an efficient method that incorporates uncertainty in the > variance parameters; we recommend `lme4::bootMer()` for this task. We agree that, short of a fully Bayesian analysis, bootstrapping is the gold-standard for deriving a prediction interval predictions from a (G)LMM, but the time required to obtain even a respectable number of replications from `bootMer()` quickly becomes prohibitive when the initial model fit is on the order of hours instead of seconds. The only other alternative we have identified for these situations is to use the `arm::sim()` function to simulate values. Unfortunately, this only takes variation of the fixed coefficients and residuals into account, and assumes the conditional modes of the random effects are fixed. We developed the `predictInterval()` function to incorporate the variation in the conditional modes of the random effects (CMRE, a.k.a. BLUPs in the LMM case) into calculating prediction intervals. Ignoring the variance in the CMRE results in overly confident estimates of predicted values and in cases where the precision of the grouping term varies across levels of grouping terms, creates the illusion of difference where none may exist. The importance of accounting for this variance comes into play sharply when comparing the predictions of different models across observations. We take the warning from `lme4::predict.merMod()` seriously, but view this method as a decent first approximation the full bootstrap analysis for (G)LMMs fit to very large data sets. ## Conceptual description In order to generate a proper prediction interval, a prediction must account for three sources of uncertainty in mixed models: 1. the residual (observation-level) variance, 2. the uncertainty in the fixed coefficients, and 3. the uncertainty in the variance parameters for the grouping factors. A fourth, uncertainty about the data, is beyond the scope of any prediction method. As we mentioned above, the `arm:sim()` function incorporates the first two sources of variation but not the third , while bootstrapping using `lme4::bootMer()` does incorporate all three sources of uncertainty because it re-estimates the model using random samples of the data. When inference about the values of the CMREs is of interest, it would be nice to incorporate some degree of uncertainty in those estimates when comparing observations across groups. `predictInterval()` does this by drawing values of the CMREs from the conditional variance-covariance matrix of the random affects accessible from `lme4::ranef(model, condVar=TRUE)`. Thus, `predictInterval()` incorporates all of the uncertainty from sources one and two, and part of the variance from source 3, but the variance parameters themselves are treated as fixed. To do this, `predictInterval()` takes an estimated model of class `merMod` and, like `predict()`, a data.frame upon which to make those predictions and: 1. extracts the fixed and random coefficients 2. takes `n` draws from the multivariate normal distribution of the fixed and random coefficients (separately) 3. calculates the linear predictor for each row in `newdata` based on these draws, and 4. optionally incorporates the residual variation (per the `arm::sim()` function), and, 5. returns newdata with the lower and upper limits of the prediction interval and the mean or median of the simulated predictions Currently, the supported model types are linear mixed models and mixed logistic regression models. The prediction data set *can* include levels that are not in the estimation model frame. The prediction intervals for such observations only incorporate uncertainty from fixed coefficient estimates and the residual level of variation. ## Comparison to existing methods What do the differences between `predictInterval()` and the other methods for constructing prediction intervals mean in practice? We would expect to see `predictInterval()` to produce prediction intervals that are wider than all methods except for the `bootMer()` method. We would also hope that the prediction point estimate from other methods falls within the prediction interval produced by `predictInterval()`. Ideally, the predicted point estimate produced by `predictInterval()` would fall close to that produced by `bootMer()`. This section compares the results of `predictInterval()` with those obtained using `arm::sim()` and `lme4::bootMer()` using the sleepstudy data from `lme4`. These data contain reaction time observations for 10 days on 18 subjects. The data are sorted such that the first 10 observations are days one through ten for subject 1, the next 10 are days one through ten for subject 2 and so on. The example model that we are estimating below estimates random intercepts and a random slope for the number of days. ###Step 1: Estimating the model and using `predictInterval()` First, we will load the required packages and data and estimate the model: ```{r Prep, message=FALSE, warning=FALSE} set.seed(271828) data(sleepstudy) fm1 <- lmer(Reaction ~ Days + (Days|Subject), data=sleepstudy) display(fm1) ``` Then, calculate prediction intervals using `predictInterval()`. The `predictInterval` function has a number of user configurable options. In this example, we use the original data `sleepstudy` as the newdata. We pass the function the `fm1` model we fit above. We also choose a 95% interval with `level = 0.95`, though we could choose a less conservative prediction interval. We make 1,000 simulations for each observation `n.sims = 1000`. We set the point estimate to be the median of the simulated values, instead of the mean. We ask for the linear predictor back, if we fit a logistic regression, we could have asked instead for our predictions on the probability scale instead. Finally, we indicate that we want the predictions to incorporate the residual variance from the model -- an option only available for `lmerMod` objects. ```{r predInt} PI.time <- system.time( PI <- predictInterval(merMod = fm1, newdata = sleepstudy, level = 0.95, n.sims = 1000, stat = "median", type="linear.prediction", include.resid.var = TRUE) ) ``` Here is the first few rows of the object `PI`: ```{r Inspect predInt, results="asis", echo=FALSE} kable(head(PI)) ``` The three columns are the median (`fit`) and limits of the 95% prediction interval (`upr` and `lwr`) because we set `level=0.95`. The following figure displays the output graphically for the first 30 observations. ```{r Inspect predInt 2, fig.width=7, fig.align="center"} library(ggplot2); ggplot(aes(x=1:30, y=fit, ymin=lwr, ymax=upr), data=PI[1:30,]) + geom_point() + geom_linerange() + labs(x="Index", y="Prediction w/ 95% PI") + theme_bw() ``` #### Step 1a: Adjusting for correlation between fixed and random effects The prediction intervals above do not correct for correlations between fixed and random effects. This tends to lead to predictive intervals that are too conservative, especially for existing groups when there is a lot of data on relatively few groups. In that case, a significant portion of the uncertainty in the prediction can be due to variance in the fixed intercept which is anti-correlated with variance in the random intercept effects. For instance, it does not actually matter if the fixed intercept is 5 and the random intercept effects are -2, 1, and 1, versus a fixed intercept of 6 and random intercept effects of -3, 0, and 0. (The latter situation will never be the MLE, but it can occur in this package's simulations.) To show this issue, we'll use the sleep study model, predicting the reaction times of subjects after experiencing sleep deprivation: ```{r prepSleep, message=FALSE, warning=FALSE} fm1 <- lmer(Reaction ~ Days + (Days|Subject), data=sleepstudy) display(fm1) ``` Let's use the model to give an interval for the true average body fat of a large group of students like the first one in the study — a 196cm female baseball player: ```{r showSleep, message=FALSE, warning=FALSE} sleepstudy[1,] predictInterval(fm1, sleepstudy[1,], include.resid.var=0) #predict the average body fat for a group of 196cm female baseball players ``` There are two ways to get predictInterval to create less-conservative intervals to deal with this. The first is just to tell it to consider certain fixed effects as fully-known (that is, with an effectively 0 variance.) This is done using the `ignore.fixed.effects` argument. ```{r ShowFat2, message=FALSE, warning=FALSE} predictInterval(fm1, sleepstudy[1,], include.resid.var=0, ignore.fixed.terms = 1) # predict the average reaction time for a subject at day 0, taking the global intercept # (mean reaction time) as fully known predictInterval(fm1, sleepstudy[1,], include.resid.var=0, ignore.fixed.terms = "(Intercept)") #Same as above predictInterval(fm1, sleepstudy[1,], include.resid.var=0, ignore.fixed.terms = 1:2) # as above, taking the first two fixed effects (intercept and days effect) as fully known ``` The second way is to use an ad-hoc variance adjustment, with the `fix.intercept.variance` argument. This takes the model's intercept variance $\hat\sigma^2_\mu$ and adjusts it to: $$\hat\sigma\prime^2_\mu = \hat\sigma^2_\mu-\Sigma_{levels}\frac{1}{\Sigma_{groups(level)}1/(\hat\sigma^2_{level}+sigma^2_{group})}$$ In other words, it assumes the given intercept variance incorporates spurious variance for each level, where each of the spurious variance terms has a precision equal to the of the precisions due to the individual groups at that level. ```{r ShowFat3, message=FALSE, warning=FALSE} predictInterval(fm1, sleepstudy[1,], include.resid.var=0, fix.intercept.variance = TRUE) # predict the average reaction time for a subject at day 0,, using an ad-hoc # correction for the covariance of the intercept with the random intercept effects. ``` A few notes about these two arguments: * `fix.intercept.variance=T` is redundant with `ignore.fixed.effects=1`, but not vice versa. * These corrections should NOT be used when predicting outcomes for groups not present in the original data. ### Step 2: Comparison with `arm::sim()` How does the output above compare to what we could get from `arm::sim()`? ```{r arm.Sim, fig.width=7, fig.height=4, fig.align="center"} PI.arm.time <- system.time( PI.arm.sims <- arm::sim(fm1, 1000) ) PI.arm <- data.frame( fit=apply(fitted(PI.arm.sims, fm1), 1, function(x) quantile(x, 0.500)), upr=apply(fitted(PI.arm.sims, fm1), 1, function(x) quantile(x, 0.975)), lwr=apply(fitted(PI.arm.sims, fm1), 1, function(x) quantile(x, 0.025)) ) comp.data <- rbind(data.frame(Predict.Method="predictInterval()", x=(1:nrow(PI))-0.1, PI), data.frame(Predict.Method="arm::sim()", x=(1:nrow(PI.arm))+0.1, PI.arm)) ggplot(aes(x=x, y=fit, ymin=lwr, ymax=upr, color=Predict.Method), data=comp.data[c(1:30,181:210),]) + geom_point() + geom_linerange() + labs(x="Index", y="Prediction w/ 95% PI") + theme_bw() + theme(legend.position="bottom") + scale_color_brewer(type = "qual", palette = 2) ``` The prediction intervals from `arm:sim()` are much smaller and the random slope for days vary more than they do for `predictInterval`. Both results are as expected, given the small number of subjects and observations per subject in these data. Because `predictInterval()` is incorporating uncertainty in the CMFEs (but not the variance parameters of the random coefficients themselves), the Days slopes are closer to the overall or pooled regression slope. ###Step 3: Comparison with `lme4::bootMer()` As quoted above, the developers of lme4 suggest that users interested in uncertainty estimates around their predictions use `lme4::bootmer()` to calculate them. The documentation for `lme4::bootMer()` goes on to describe three implemented flavors of bootstrapped estimates: 1. parametrically resampling both the *"spherical"* random effects *u* and the i.i.d. errors $\epsilon$ 2. treating the random effects as fixed and parametrically resampling the i.i.d. errors 3. treating the random effects as fixed and semi-parametrically resampling the i.i.d. errors from the distribution of residuals. We will compare the results from `predictInterval()` with each method, in turn. #### Step 3a: `lme4::bootMer()` method 1 ```{r bootMer.1, fig.width=7, fig.height=4, fig.align="center"} ##Functions for bootMer() and objects ####Return predicted values from bootstrap mySumm <- function(.) { predict(., newdata=sleepstudy, re.form=NULL) } ####Collapse bootstrap into median, 95% PI sumBoot <- function(merBoot) { return( data.frame(fit = apply(merBoot$t, 2, function(x) as.numeric(quantile(x, probs=.5, na.rm=TRUE))), lwr = apply(merBoot$t, 2, function(x) as.numeric(quantile(x, probs=.025, na.rm=TRUE))), upr = apply(merBoot$t, 2, function(x) as.numeric(quantile(x, probs=.975, na.rm=TRUE))) ) ) } ##lme4::bootMer() method 1 PI.boot1.time <- system.time( boot1 <- lme4::bootMer(fm1, mySumm, nsim=250, use.u=FALSE, type="parametric") ) PI.boot1 <- sumBoot(boot1) comp.data <- rbind(data.frame(Predict.Method="predictInterval()", x=(1:nrow(PI))-0.1, PI), data.frame(Predict.Method="lme4::bootMer() - Method 1", x=(1:nrow(PI.boot1))+0.1, PI.boot1)) ggplot(aes(x=x, y=fit, ymin=lwr, ymax=upr, color=Predict.Method), data=comp.data[c(1:30,181:210),]) + geom_point() + geom_linerange() + labs(x="Index", y="Prediction w/ 95% PI") + theme_bw() + theme(legend.position="bottom") + scale_color_brewer(type = "qual", palette = 2) ``` The intervals produced by `predictInterval`, represented in green, cover the point estimates produced by `bootMer` in every case for these 30 observations. Additionally, in almost every case, the `predictInterval` encompasses the entire interval presented by `bootMer`. Here, the estimates produced by `bootMer` are re-estimating the group terms, but by refitting the model, they are also taking into account the conditional variance of these terms, or `theta`, and provide tighter prediction intervals than the `predictInterval` method. ####Step 3b: `lme4::bootMer()` method 2 ```{r bootMer.2, fig.width=7, fig.height=4, fig.align="center"} ##lme4::bootMer() method 2 PI.boot2.time <- system.time( boot2 <- lme4::bootMer(fm1, mySumm, nsim=250, use.u=TRUE, type="parametric") ) PI.boot2 <- sumBoot(boot2) comp.data <- rbind(data.frame(Predict.Method="predictInterval()", x=(1:nrow(PI))-0.1, PI), data.frame(Predict.Method="lme4::bootMer() - Method 2", x=(1:nrow(PI.boot2))+0.1, PI.boot2)) ggplot(aes(x=x, y=fit, ymin=lwr, ymax=upr, color=Predict.Method), data=comp.data[c(1:30,181:210),]) + geom_point() + geom_linerange() + labs(x="Index", y="Prediction w/ 95% PI") + theme_bw() + theme(legend.position="bottom") + scale_color_brewer(type = "qual", palette = 2) ``` Here, the results for `predictInterval` in green again encompass the results from `bootMer`, but are much wider. The `bootMer` estimates are ignoring the variance in the group effects, and as such, are only incorporating the residual variance and the variance in the fixed effects -- similar to the `arm::sim()` function. #### Step 3c: `lme4::bootMer()` method 3 ```{r bootMer.3, fig.width=7, fig.height=4, fig.align="center"} ##lme4::bootMer() method 3 PI.boot3.time <- system.time( boot3 <- lme4::bootMer(fm1, mySumm, nsim=250, use.u=TRUE, type="semiparametric") ) PI.boot3 <- sumBoot(boot3) comp.data <- rbind(data.frame(Predict.Method="predictInterval()", x=(1:nrow(PI))-0.1, PI), data.frame(Predict.Method="lme4::bootMer() - Method 3", x=(1:nrow(PI.boot3))+0.1, PI.boot3)) ggplot(aes(x=x, y=fit, ymin=lwr, ymax=upr, color=Predict.Method), data=comp.data[c(1:30,181:210),]) + geom_point() + geom_linerange() + labs(x="Index", y="Prediction w/ 95% PI") + theme_bw() + theme(legend.position="bottom") + scale_color_brewer(type = "qual", palette = 2) ``` These results are virtually identical to those above. #### Step 3c: Comparison to rstanarm ```{r echo=FALSE, message=FALSE, eval = rstanarm_eval} library(rstanarm) central_intervals <- function(x, prob) { if (!identical(length(prob), 1L) || prob <= 0 || prob >= 1) stop("'prob' should be a single number greater than 0 and less than 1.", call. = FALSE) alpha <- (1 - prob) / 2 probs <- c(alpha, 1 - alpha) labs <- paste0(100 * probs, "%") out <- t(apply(x, 2L, quantile, probs = probs)) structure(out, dimnames = list(colnames(x), labs)) } ``` ```{r, message=FALSE, fig.width=7, fig.height=4, fig.align="center", eval = rstanarm_eval} PI.time.stan <- system.time({ fm_stan <- stan_lmer(Reaction ~ Days + (Days|Subject), data = sleepstudy, verbose = FALSE, open_progress = FALSE, refresh = -1, show_messages=FALSE, chains = 1) zed <- posterior_predict(fm_stan) PI.stan <- cbind(apply(zed, 2, median), central_intervals(zed, prob=0.95)) }) print(fm_stan) PI.stan <- as.data.frame(PI.stan) names(PI.stan) <- c("fit", "lwr", "upr") PI.stan <- PI.stan[, c("fit", "upr", "lwr")] comp.data <- rbind(data.frame(Predict.Method="predictInterval()", x=(1:nrow(PI))-0.1, PI), data.frame(Predict.Method="rstanArm", x=(1:nrow(PI.stan))+0.1, PI.stan)) ggplot(aes(x=x, y=fit, ymin=lwr, ymax=upr, color=Predict.Method), data=comp.data[c(1:30,181:210),]) + geom_point() + geom_linerange() + labs(x="Index", y="Prediction w/ 95% PI") + theme_bw() + theme(legend.position="bottom") + scale_color_brewer(type = "qual", palette = 2) ``` ### Computation time Our initial motivation for writing this function was to develop a method for incorporating uncertainty in the CMFEs for mixed models estimated on very large samples. Even for models with only modest degrees of complexity, using `lme4::bootMer()` quickly becomes time prohibitive because it involves re-estimating the model for each simulation. We have seen how each alternative compares to `predictInterval()` substantively, but how do they compare in terms of computational time? The table below lists the output of `system.time()` for all five methods for calculating prediction intervals for `merMod` objects. ```{r, echo=FALSE} if (rstanarm_eval) { times <- rbind(PI.time, PI.arm.time, PI.boot1.time, PI.boot2.time, PI.boot3.time, PI.time.stan)[, 1:3] rownames(times) <- c( "predictInterval()", "arm::sim()", "lme4::bootMer()-Method 1", "lme4::bootMer()-Method 2", "lme4::bootMer()-Method 3", "rstanarm:predict" ) kable(times) } else { times <- rbind(PI.time, PI.arm.time, PI.boot1.time, PI.boot2.time, PI.boot3.time)[, 1:3] rownames(times) <- c( "predictInterval()", "arm::sim()", "lme4::bootMer()-Method 1", "lme4::bootMer()-Method 2", "lme4::bootMer()-Method 3" ) kable(times) } ``` For this simple example, we see that `arm::sim()` is the fastest--nearly five times faster than `predictInterval()`. However, `predictInterval()` is nearly six times faster than any of the bootstrapping options via `lme4::bootMer`. This may not seem like a lot, but consider that the computational time for required for bootstrapping is roughly proportional to the number of bootstrapped simulations requested ... `predictInterval()` is not because it is just a series of draws from various multivariate normal distributions, so the time ratios in the table below represents the lowest bound of the computation time ratio of bootstrapping to `predictInterval()`. ## Simulation TBC. merTools/vignettes/imputation.Rmd0000644000176200001440000001327113466135366016744 0ustar liggesusers--- title: "Analyzing Imputed Data with Multilevel Models and merTools" author: "Jared Knowles" date: "`r Sys.Date()`" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{Imputation} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r setup, echo = FALSE, message=FALSE, warning=FALSE, results='hide'} knitr::opts_chunk$set( cache=FALSE, comment="#>", collapse=TRUE, echo=TRUE, fig.width = 7 ) library(knitr); library(merTools) amelia_eval <- "Amelia" %in% rownames(installed.packages()) amelia_uneval <- !amelia_eval ``` ## Introduction Multilevel models are valuable in a wide array of problem areas that involve non-experimental, or observational data. In many of these cases the data on individual observations may be incomplete. In these situations, the analyst may turn to one of many methods for filling in missing data depending on the specific problem at hand, disciplinary norms, and prior research. One of the most common cases is to use multiple imputation. Multiple imputation involves fitting a model to the data and estimating the missing values for observations. For details on multiple imputation, and a discussion of some of the main implementations in R, look at the documentation and vignettes for the `mice` and `Amelia` packages. The key difficulty multiple imputation creates for users of multilevel models is that the result of multiple imputation is K replicated datasets corresponding to different estimated values for the missing data in the original dataset. For the purposes of this vignette, I will describe how to use one flavor of multiple imputation and the function in `merTools` to obtain estimates from a multilevel model in the presence of missing and multiply imputed data. ## Missing Data and its Discontents To demonstrate this workflow, we will use the `hsb` dataset in the `merTools` package which includes data on the math achievement of a wide sample of students nested within schools. The data has no missingness, so first we will simulate some missing data. ```{r} data(hsb) # Create a function to randomly assign NA values add_NA <- function(x, prob){ z <- rbinom(length(x), 1, prob = prob) x[z==1] <- NA return(x) } hsb$minority <- add_NA(hsb$minority, prob = 0.05) table(is.na(hsb$minority)) hsb$female <- add_NA(hsb$female, prob = 0.05) table(is.na(hsb$female)) hsb$ses <- add_NA(hsb$ses, prob = 0.05) table(is.na(hsb$ses)) hsb$size <- add_NA(hsb$size, prob = 0.05) table(is.na(hsb$size)) ``` ```{r impute, message=FALSE, eval = amelia_eval} # Load imputation library library(Amelia) # Declare the variables to include in the imputation data varIndex <- names(hsb) # Declare ID variables to be excluded from imputation IDS <- c("schid", "meanses") # Imputate impute.out <- amelia(hsb[, varIndex], idvars = IDS, noms = c("minority", "female"), m = 5) summary(impute.out) ``` ```{r boot, message=FALSE, eval = amelia_uneval} # Amelia is not available so let's just boostrap resample our data impute.out <- vector(mode = "list", 5) for (i in 1:5) { impute.out[[i]] <- hsb[sample(nrow(hsb), nrow(hsb), replace = TRUE), ] } # Declare the variables to include in the imputation data summary(impute.out) ``` ## Fitting and Summarizing a Model List Fitting a model is very similar ```{r} fmla <- "mathach ~ minority + female + ses + meanses + (1 + ses|schid)" mod <- lmer(fmla, data = hsb) if(amelia_eval) { modList <- lmerModList(fmla, data = impute.out$imputations) } else { # Use bootstrapped data instead modList <- lmerModList(fmla, data = impute.out) } ``` The resulting object `modList` is a list of `merMod` objects the same length as the number of imputation datasets. This object is assigned the class of `merModList` and `merTools` provides some convenience functions for reporting the results of this object. Using this, we can directly compare the model fit with missing data excluded to the aggregate from the imputed models: ```{r} fixef(mod) # model with dropped missing fixef(modList) ``` ```{r} VarCorr(mod) # model with dropped missing VarCorr(modList) # aggregate of imputed models ``` If you want to inspect the individual models, or you do not like taking the mean across the imputation replications, you can take the `merModList` apart easily: ```{r} lapply(modList, fixef) ``` And, you can always operate on any single element of the list: ```{r} fixef(modList[[1]]) fixef(modList[[2]]) ``` ## Output of a Model List ```{r} print(modList) ``` ```{r} summary(modList) ``` ```{r} fastdisp(modList) ``` The standard errors reported for the model list include a correction, Rubin's correction (see documentation), which adjusts for the within and between imputation set variance as well. ## Specific Model Information Summaries ```{r} modelRandEffStats(modList) modelFixedEff(modList) VarCorr(modList) ``` ### Diagnostics of List Components ```{r} modelInfo(mod) ``` Let's apply this to our model list. ```{r} lapply(modList, modelInfo) ``` ### Model List Generics ```{r} summary(modList) ``` ```{r} modelFixedEff(modList) ``` ```{r} ranef(modList) ``` ## Cautions and Notes Often it is desirable to include aggregate values in the level two or level three part of the model such as level 1 SES and level 2 mean SES for the group. In cases where there is missingness in either the level 1 SES values, or in the level 2 mean SES values, caution and careful thought need to be given to how to proceed with the imputation routine. merTools/vignettes/marginal_effects.Rmd0000644000176200001440000000445613466135366020051 0ustar liggesusers--- title: "Using merTools to Marginalize Over Random Effect Levels" author: "Jared Knowles and Carl Frederick" date: "`r Sys.Date()`" output: rmarkdown::html_vignette vignette: > %\VignetteEngine{knitr::rmarkdown} %\VignetteIndexEntry{Marginalizing Random Effect Levels} %\VignetteEncoding{UTF-8} --- ```{r setup, echo = FALSE, message=FALSE, warning=FALSE, results='hide', cache=FALSE} library(ggplot2); library(knitr); library(merTools) knitr::opts_chunk$set( cache=FALSE, comment="#>", collapse=TRUE, echo = TRUE ) ``` # Marginalizing Random Effects One of the most common questions about multilevel models is how much influence grouping terms have on the outcome. One way to explore this is to simulate the predicted values of an observation across the distribution of random effects for a specific grouping variable and term. This can be described as "marginalizing" predictions over the distribution of random effects. This allows you to explore the influence of the grouping term and grouping levels on the outcome scale by simulating predictions for simulated values of each observation across the distribution of effect sizes. The `REmargins()` function allows you to do this. Here, we take the example `sleepstudy` model and marginalize predictions for all of the random effect terms (Subject:Intercept, Subject:Days). By default, the function will marginalize over the *quartiles* of the expected rank (see expected rank vignette) of the effect distribution for each term. ```{r} fm1 <- lmer(Reaction ~ Days + (Days | Subject), sleepstudy) mfx <- REmargins(merMod = fm1, newdata = sleepstudy[1:10,]) head(mfx) ``` The new data frame output from `REmargins` contains a lot of information. The first few columns contain the original data passed to `newdata`. Each observation in `newdata` is identified by a `case` number, because the function repeats each observation by the number of random effect terms and number of breaks to simulate each term over. Then the `grouping_var` # Summarizing # Plotting Finally - you can plot the results marginalization to evaluate the effect of the random effect terms graphically. ```{r} ggplot(mfx) + aes(x = breaks, y = fit_Subject, group = case) + geom_line() + facet_wrap(~term) ``` merTools/vignettes/merToolsIntro.Rmd0000644000176200001440000003615313402510755017364 0ustar liggesusers--- title: "An Introduction to merTools" author: "Jared Knowles and Carl Frederick" date: "`r Sys.Date()`" output: rmarkdown::html_vignette vignette: > %\VignetteEngine{knitr::rmarkdown} %\VignetteIndexEntry{An Introduction to merTools} %\VignetteEncoding{UTF-8} --- ```{r setup, echo = FALSE, message=FALSE, warning=FALSE, results='hide'} knitr::opts_chunk$set( cache=FALSE, comment="#>", collapse=TRUE, echo=TRUE, fig.width = 7 ) library(knitr); library(merTools) ``` ## Introduction Working with generalized linear mixed models (GLMM) and linear mixed models (LMM) has become increasingly easy with the advances in the `lme4` package recently. As we have found ourselves using these models more and more within our work, we, the authors, have developed a set of tools for simplifying and speeding up common tasks for interacting with `merMod` objects from `lme4`. This package provides those tools. ## Illustrating Model Effects As the complexity of the model fit grows, it becomes harder and harder to interpret the substantive effect of parameters in the model. Let's start with a medium-sized example model using the `InstEval` data provided by the `lme4` package. These data represent university lecture evaluations at ETH Zurich made by students. In this data, `s` is an individual student, `d` is an individual lecturer, `studage` is the semester the student is enrolled, `lectage` is how many semesters back the lecture with the rating took place, `dept` is the department of the lecture, and `y` is an integer 1:5 representing the ratings of the lecture from "poor" to "very good": ```{r} library(lme4) head(InstEval) str(InstEval) ``` Starting with a simple model: ```{r} m1 <- lmer(y ~ service + lectage + studage + (1|d) + (1|s), data=InstEval) ``` After fitting the model we can make use of the first function provided by `merTools`, `fastdisp` which modifies the function `arm:::display` to more quickly display a summary of the model without calculating the model sigma: ```{r} library(merTools) fastdisp(m1) ``` We see some interesting effects. First, our decision to include student and lecturer effects seems justified as there is substantial variance within these groups. Second, there do appear to be some effects by age and for lectures given as a service by an outside lecturer. Let's look at these in more detail. One way to do this would be to plot the coefficients together in a line to see which deviate from 0 and in what direction. To get a confidence interval for our fixed effect coefficients we have a number of options that represent a tradeoff between coverage and computation time -- see `confint.merMod` for details. An alternative is to simulate values of the fixed effects from the posterior using the function `arm::sim`. Our next tool, `FEsim`, is a convenience wrapper to do this and provide an informative data frame of the results. ```{r} feEx <- FEsim(m1, 1000) cbind(feEx[,1] , round(feEx[, 2:4], 3)) ``` We can present these results graphically, using `ggplot2`: ```{r} library(ggplot2) ggplot(feEx[feEx$term!= "(Intercept)", ]) + aes(x = term, ymin = median - 1.96 * sd, ymax = median + 1.96 * sd, y = median) + geom_pointrange() + geom_hline(yintercept = 0, size = I(1.1), color = I("red")) + coord_flip() + theme_bw() + labs(title = "Coefficient Plot of InstEval Model", x = "Median Effect Estimate", y = "Evaluation Rating") ``` However, an easier option is: ```{r} plotFEsim(feEx) + theme_bw() + labs(title = "Coefficient Plot of InstEval Model", x = "Median Effect Estimate", y = "Evaluation Rating") ``` ## Random Effects Next, we might be interested in exploring the random effects. Again, we create a dataframe of the values of the simulation of these effects for the individual levels. ```{r} reEx <- REsim(m1) head(reEx) ``` The result is a dataframe with estimates of the values of each of the random effects provided by the `arm::sim()` function. *groupID* represents the identfiable level for the variable for one random effect, *term* represents whether the simulated values are for an intercept or which slope, and *groupFctr* identifies which of the `(1|x)` terms the values represent. To make unique identifiers for each term, we need to use both the `groupID` and the `groupFctr` term in case these two variables use overlapping label names for their groups. In this case: ```{r} table(reEx$term) table(reEx$groupFctr) ``` Most important is producing caterpillar or dotplots of these terms to explore their variation. This is easily accomplished with the `dotplot` function: ```{r, eval=FALSE, echo = TRUE} lattice::dotplot(ranef(m1, condVar=TRUE)) ``` However, these graphics do not provide much control over the results. Instead, we can use the `plotREsim` function in `merTools` to gain more control over plotting of the random effect simulations. ```{r} p1 <- plotREsim(reEx) p1 ``` The result is a ggplot2 object which can be modified however the user sees fit. Here, we've established that most student and professor effects are indistinguishable from zero, but there do exist extreme outliers with both high and low averages that need to be accounted for. ## Subtantive Effects A logical next line of questioning is to see how much of the variation in a rating can be caused by changing the student rater and how much is due to the fixed effects we identified above. This is a very difficult problem to solve, but using simulation we can examine the model behavior under a range of scenarios to understand how the model is reflecting changes in the data. To do this, we use another set of functions available in `merTools`. The simplest option is to pick an observation at random and then modify its values deliberately to see how the prediction changes in response. `merTools` makes this task very simple: ```{r} example1 <- draw(m1, type = 'random') head(example1) ``` The `draw` function takes a random observation from the data in the model and extracts it as a dataframe. We can now do a number of operations to this observation: ```{r} # predict it predict(m1, newdata = example1) # change values example1$service <- "1" predict(m1, newdata = example1) ``` More interesting, let's programatically modify this observation to see how the predicted value changes if we hold everything but one variable constant. ```{r} example2 <- wiggle(example1, varlist = "lectage", valueslist = list(c("1", "2", "3", "4", "5", "6"))) example2 ``` The function `wiggle` allows us to create a new dataframe with copies of the variable that modify just one value. Chaining together `wiggle` calls, we can see how the variable behaves under a number of different scenarios simultaneously. ```{r} example2$yhat <- predict(m1, newdata = example2) ggplot(example2, aes(x = lectage, y = yhat)) + geom_line(aes(group = 1)) + theme_bw() + ylim(c(1, 5)) + geom_hline(yintercept = mean(InstEval$y), linetype = 2) + geom_hline(yintercept = mean(InstEval$y) + sd(InstEval$y), linetype = 3) + geom_hline(yintercept = mean(InstEval$y) - sd(InstEval$y), linetype = 3) ``` The result allows us to graphically display the effect of each level of `lectage` on an observation that is otherwise identical. This is plotted here against a horizontal line representing the mean of the observed ratings, and two finer lines showing plus or minus one standard deviation of the mean. This is nice, but selecting a random observation is not very satisfying as it may not be very meaningful. To address this, we can instead take the average observation: ```{r} example3 <- draw(m1, type = 'average') example3 ``` Here, the average observation is identified based on either the modal observation for factors or on the mean for numeric variables. Then, the random effect terms are set to the level equivalent to the median effect -- very close to 0. ```{r} example3 <- wiggle(example1, varlist = "service", valueslist = list(c("0", "1"))) example3$yhat <- predict(m1, newdata = example3) ggplot(example3, aes(x = service, y = yhat)) + geom_line(aes(group = 1)) + theme_bw() + ylim(c(1, 5)) + geom_hline(yintercept = mean(InstEval$y), linetype = 2) + geom_hline(yintercept = mean(InstEval$y) + sd(InstEval$y), linetype = 3) + geom_hline(yintercept = mean(InstEval$y) - sd(InstEval$y), linetype = 3) ``` Here we can see that for the average observation, whether the lecture is outside of the home department has a very slight negative effect on the overall rating. Might the individual professor or student have more of an impact on the overall rating? To answer this question we need to wiggle the same observation across a wide range of student or lecturer effects. How do we identify this range? `merTools` provides the `REquantile` function which helps to identify which levels of the grouping terms correspond to which quantile of the magnitude of the random effects: ```{r} REquantile(m1, quantile = 0.25, groupFctr = "s") REquantile(m1, quantile = 0.25, groupFctr = "d") ``` Here we can see that group level `r REquantile(m1, quantile = 0.25, groupFctr = "s")` corresponds to the 25th percentile of the effect for the student groups, and level `REquantile(m1, quantile = 0.25, groupFctr = "d")` corresponds to the 25th percentile for the instructor group. Using this information we can reassign a specific observation to varying magnitudes of grouping term effects to see how much they might influence our final prediction. ```{r} example4 <- draw(m1, type = 'average') example4 <- wiggle(example4, varlist = "s", list(REquantile(m1, quantile = seq(0.1, 0.9, .1), groupFctr = "s"))) example4$yhat <- predict(m1, newdata = example4) ggplot(example4, aes(x = reorder(s, -yhat), y = yhat)) + geom_line(aes(group = 1)) + theme_bw() + ylim(c(1, 5)) + geom_hline(yintercept = mean(InstEval$y), linetype = 2) + geom_hline(yintercept = mean(InstEval$y) + sd(InstEval$y), linetype = 3) + geom_hline(yintercept = mean(InstEval$y) - sd(InstEval$y), linetype = 3) ``` This figure is very interesting because it shows that moving across the range of student effects can have a larger impact on the score than the fixed effects we observed above. That is, getting a "generous" or a "stingy" rater can have a substantial impact on the final rating. But, we can do even better. First, we can move beyond the average observation by taking advantage of the `varList` option to the function which allows us to specify a subset of the data to compute an average for. ```{r} subExample <- list(studage = "2", lectage = "4") example5 <- draw(m1, type = 'average', varList = subExample) example5 ``` Now we have the average observation with a student age of 2 and a lecture age of 4. We can then follow the same procedure as before to explore the effects on our subsamples. Before we do that, let's fit a slightly more complex model that includes a random slope. ```{r} data(VerbAgg) m2 <- glmer(r2 ~ Anger + Gender + btype + situ + (1|id) + (1 + Gender|item), family = binomial, data = VerbAgg) example6 <- draw(m2, type = 'average', varList = list("id" = "149")) example6$btype <- "scold" example6$situ <- "self" tempdf <- wiggle(example6, varlist = "Gender", list(c("M", "F"))) tempdf <- wiggle(tempdf, varlist = "item", list(unique(VerbAgg$item))) tempdf$yhat <- predict(m2, newdata = tempdf, type = "response", allow.new.levels = TRUE) ggplot(tempdf, aes(x = item, y = yhat, group = Gender)) + geom_line(aes(color = Gender))+ theme_bw() + ylim(c(0, 1)) + theme(axis.text.x = element_text(angle = 20, hjust=1), legend.position = "bottom") + labs(x = "Item", y = "Probability") ``` Here we've shown that the effect of both the intercept and the gender slope on item simultaneously affect our predicted value. This results in the two lines for predicted values across the items not being parallel. While we can see this by looking at the results of the summary of the model object, using `fastdisp` in the `merTools` package for larger models, it is not intuitive what that effect looks like across different scenarios. `merTools` has given us the machinery to investigate this. ## Uncertainty The above examples make use of simulation to show the model behavior after changing some values in a dataset. However, until now, we've focused on using point estimates to represent these changes. The use of predicted point estimates without incorporating any uncertainty can lead to overconfidence in the precision of the model. In the `predictInterval` function, discussed in more detail in another package vignette, we provide a way to incorporate three out of the four types of uncertainty inherent in a model. These are: 1. Overall model uncertainty 2. Uncertainty in fixed effect values 3. Uncertainty in random effect values 4. Uncertainty in the distribution of the random effects 1-3 are incorporated in the results of `predictInterval`, while capturing 4 would require making use of the `bootMer` function -- options discussed in greater detail elsewhere. The main advantage of `predictInterval` is that it is fast. By leveraging the power of the `arm::sim()` function, we are able to generate prediction intervals for individual observations from very large models very quickly. And, it works a lot like `predict`: ```{r} exampPreds <- predictInterval(m2, newdata = tempdf, type = "probability", level = 0.8) tempdf <- cbind(tempdf, exampPreds) ggplot(tempdf, aes(x = item, y = fit, ymin = lwr, ymax = upr, group = Gender)) + geom_ribbon(aes(fill = Gender), alpha = I(0.2), color = I("black"))+ theme_bw() + ylim(c(0, 1)) + theme(axis.text.x = element_text(angle = 20), legend.position = "bottom")+ labs(x = "Item", y = "Probability") ``` Here we can see there is barely any gender difference in terms of area of potential prediction intervals. However, by default, this approach includes the residual variance of the model. If we instead focus just on the uncertainty of the random and fixed effects, we get: ```{r} exampPreds <- predictInterval(m2, newdata = tempdf, type = "probability", include.resid.var = FALSE, level = 0.8) tempdf <- cbind(tempdf[, 1:8], exampPreds) ggplot(tempdf, aes(x = item, y = fit, ymin = lwr, ymax = upr, group = Gender)) + geom_ribbon(aes(fill = Gender), alpha = I(0.2), color = I("black"))+ geom_line(aes(color = Gender)) + theme_bw() + ylim(c(0, 1)) + theme(axis.text.x = element_text(angle = 20), legend.position = "bottom") + labs(x = "Item", y = "Probability") ``` Here, more difference emerges, but we see that the differences are not very precise. merTools/README.md0000644000176200001440000004730113466135366013357 0ustar liggesusers[![Travis-CI Build Status](https://travis-ci.org/jknowles/merTools.png?branch=master)](https://travis-ci.org/jknowles/merTools) [![Coverage Status](https://coveralls.io/repos/jknowles/merTools/badge.svg?branch=master)](https://coveralls.io/r/jknowles/merTools?branch=master) [![Github Issues](http://githubbadges.herokuapp.com/jknowles/merTools/issues.svg)](https://github.com/jknowles/merTools/issues) [![Pending Pull-Requests](http://githubbadges.herokuapp.com/jknowles/merTools/pulls.svg?style=flat)](https://github.com/jknowles/merTools/pulls) [![CRAN\_Status\_Badge](http://www.r-pkg.org/badges/version/merTools)](https://cran.r-project.org/package=merTools) [![Downloads](http://cranlogs.r-pkg.org/badges/merTools)](https://cran.r-project.org/package=merTools) [![Downloads](http://cranlogs.r-pkg.org/badges/grand-total/merTools)](https://cran.r-project.org/package=merTools) [![Research software impact](http://depsy.org/api/package/cran/merTools/badge.svg)](http://depsy.org/package/r/merTools) # merTools A package for getting the most of our multilevel models in R by Jared E. Knowles and Carl Frederick Working with generalized linear mixed models (GLMM) and linear mixed models (LMM) has become increasingly easy with advances in the `lme4` package. As we have found ourselves using these models more and more within our work, we, the authors, have developed a set of tools for simplifying and speeding up common tasks for interacting with `merMod` objects from `lme4`. This package provides those tools. ## Installation ``` r # development version library(devtools) install_github("jknowles/merTools") # CRAN version install.packages("merTools") ``` ## Recent Updates ### merTools 0.5.0 #### New Features - `subBoot` now works with `glmerMod` objects as well - `reMargins` a new function that allows the user to marginalize the prediction over breaks in the distribution of random effect distributions, see `?reMargins` and the new `reMargins` vignette (closes \#73) #### Bug fixes - Fixed an issue where known convergence errors were issuing warnings and causing the test suite to not work - Fixed an issue where models with a random slope, no intercept, and no fixed term were unable to be predicted (\#101) - Fixed an issue with shinyMer not working with substantive fixed effects (\#93) ### merTools 0.4.1 #### New Features - Standard errors reported by `merModList` functions now apply the Rubin correction for multiple imputation #### Bug fixes - Contribution by Alex Whitworth (@alexWhitworth) adding error checking to plotting functions ### merTools 0.4.0 #### New Features - Added vignette on using multilevel models with multiply imputed data - Added `fixef` and `ranef` generics for `merModList` objects - Added `fastdisp` generic for `merModList` - Added `summary` generic for `merModList` - Added `print` generic for `merModList` - Documented all generics for `merModList` including examples and a new imputation vignette - Added `modelInfo` generic for `merMod` objects that provides simple summary stats about a whole model #### Bug Fixes - Fix bug that returned NaN for `std.error` of a multiply imputed `merModList` when calling `modelRandEffStats` - Fixed bug in `REimpact` where some column names in `newdata` would prevent the prediction intervals from being computed correctly. Users will now be warned. - Fixed bug in `wiggle` where documentation incorrectly stated the arguments to the function and the documentation did not describe function correctly See [NEWS.md](https://github.com/jknowles/merTools/blob/master/NEWS.md) for more details. ## Shiny App and Demo The easiest way to demo the features of this application is to use the bundled Shiny application which launches a number of the metrics here to aide in exploring the model. To do this: ``` r library(merTools) m1 <- lmer(y ~ service + lectage + studage + (1|d) + (1|s), data=InstEval) shinyMer(m1, simData = InstEval[1:100, ]) # just try the first 100 rows of data ``` ![](man/figures/README-predPanel.png) On the first tab, the function presents the prediction intervals for the data selected by user which are calculated using the `predictInterval` function within the package. This function calculates prediction intervals quickly by sampling from the simulated distribution of the fixed effect and random effect terms and combining these simulated estimates to produce a distribution of predictions for each observation. This allows prediction intervals to be generated from very large models where the use of `bootMer` would not be feasible computationally. ![](man/figures/README-effPanel.png) On the next tab the distribution of the fixed effect and group-level effects is depicted on confidence interval plots. These are useful for diagnostics and provide a way to inspect the relative magnitudes of various parameters. This tab makes use of four related functions in `merTools`: `FEsim`, `plotFEsim`, `REsim` and `plotREsim` which are available to be used on their own as well. ![](man/figures/README-substPanel.png) On the third tab are some convenient ways to show the influence or magnitude of effects by leveraging the power of `predictInterval`. For each case, up to 12, in the selected data type, the user can view the impact of changing either one of the fixed effect or one of the grouping level terms. Using the `REimpact` function, each case is simulated with the model’s prediction if all else was held equal, but the observation was moved through the distribution of the fixed effect or the random effect term. This is plotted on the scale of the dependent variable, which allows the user to compare the magnitude of effects across variables, and also between models on the same data. ## Predicting Standard prediction looks like so. ``` r predict(m1, newdata = InstEval[1:10, ]) #> 1 2 3 4 5 6 7 8 #> 3.146337 3.165212 3.398499 3.114249 3.320686 3.252670 4.180897 3.845219 #> 9 10 #> 3.779337 3.331013 ``` With `predictInterval` we obtain predictions that are more like the standard objects produced by `lm` and `glm`: ``` r #predictInterval(m1, newdata = InstEval[1:10, ]) # all other parameters are optional predictInterval(m1, newdata = InstEval[1:10, ], n.sims = 500, level = 0.9, stat = 'median') #> fit upr lwr #> 1 3.015857 5.088929 1.1835562 #> 2 3.277143 5.220196 1.1038519 #> 3 3.404557 5.350846 1.3090942 #> 4 3.108511 5.314549 0.9256501 #> 5 3.260811 5.420831 1.2343590 #> 6 3.150673 5.267239 1.3318446 #> 7 4.085517 6.192887 2.1149662 #> 8 3.776922 5.715385 1.7600005 #> 9 3.799624 6.045041 1.7959515 #> 10 3.195235 5.180454 1.2971043 ``` Note that `predictInterval` is slower because it is computing simulations. It can also return all of the simulated `yhat` values as an attribute to the predict object itself. `predictInterval` uses the `sim` function from the `arm` package heavily to draw the distributions of the parameters of the model. It then combines these simulated values to create a distribution of the `yhat` for each observation. ### Inspecting the Prediction Components We can also explore the components of the prediction interval by asking `predictInterval` to return specific components of the prediction interval. ``` r predictInterval(m1, newdata = InstEval[1:10, ], n.sims = 200, level = 0.9, stat = 'median', which = "all") #> effect fit upr lwr obs #> 1 combined 3.18738014 4.966371 1.126030 1 #> 2 combined 2.97373166 5.126738 1.274230 2 #> 3 combined 3.27899702 5.362678 1.472948 3 #> 4 combined 3.23788384 5.020504 1.050771 4 #> 5 combined 3.37136338 5.350912 1.242096 5 #> 6 combined 3.15899583 5.217095 1.331035 6 #> 7 combined 4.14067417 6.187147 2.068142 7 #> 8 combined 4.02432057 6.067216 1.654789 8 #> 9 combined 3.77403216 5.554346 1.964592 9 #> 10 combined 3.42735845 5.296553 1.435939 10 #> 11 s 0.07251608 1.918014 -2.089567 1 #> 12 s 0.08247714 1.953635 -1.810187 2 #> 13 s 0.09157851 2.184732 -1.943005 3 #> 14 s 0.13788161 1.811599 -1.622534 4 #> 15 s 0.07322001 1.741112 -2.165038 5 #> 16 s -0.11882131 1.735864 -2.302783 6 #> 17 s 0.19512517 2.245456 -1.630585 7 #> 18 s 0.17986892 2.064228 -1.743939 8 #> 19 s 0.42961647 2.089356 -1.536597 9 #> 20 s 0.41084777 2.124038 -1.681811 10 #> 21 d -0.16574871 1.846935 -2.142487 1 #> 22 d -0.05194920 1.839777 -1.897692 2 #> 23 d 0.09294099 2.062341 -1.811622 3 #> 24 d -0.27500494 1.470227 -2.026380 4 #> 25 d 0.10836089 1.758614 -1.613323 5 #> 26 d -0.10553477 2.057018 -1.928175 6 #> 27 d 0.58243006 2.712166 -1.427938 7 #> 28 d 0.24593391 2.142436 -1.421031 8 #> 29 d 0.01724017 2.472836 -1.853576 9 #> 30 d -0.19182347 1.693597 -2.412778 10 #> 31 fixed 3.16933865 5.219839 1.287274 1 #> 32 fixed 3.16287615 5.140116 1.524180 2 #> 33 fixed 3.29291541 4.902726 1.382934 3 #> 34 fixed 3.01686447 5.285364 1.248745 4 #> 35 fixed 3.30761049 5.106185 1.420678 5 #> 36 fixed 3.32362576 4.872431 1.557399 6 #> 37 fixed 3.27480918 5.680335 1.374587 7 #> 38 fixed 3.47316648 5.063170 1.595717 8 #> 39 fixed 3.33332336 5.208318 1.435965 9 #> 40 fixed 3.27800249 5.158261 1.463540 10 ``` This can lead to some useful plotting: ``` r library(ggplot2) plotdf <- predictInterval(m1, newdata = InstEval[1:10, ], n.sims = 2000, level = 0.9, stat = 'median', which = "all", include.resid.var = FALSE) plotdfb <- predictInterval(m1, newdata = InstEval[1:10, ], n.sims = 2000, level = 0.9, stat = 'median', which = "all", include.resid.var = TRUE) plotdf <- dplyr::bind_rows(plotdf, plotdfb, .id = "residVar") plotdf$residVar <- ifelse(plotdf$residVar == 1, "No Model Variance", "Model Variance") ggplot(plotdf, aes(x = obs, y = fit, ymin = lwr, ymax = upr)) + geom_pointrange() + geom_hline(yintercept = 0, color = I("red"), size = 1.1) + scale_x_continuous(breaks = c(1, 10)) + facet_grid(residVar~effect) + theme_bw() ``` ![](man/figures/README_unnamed-chunk-8-1.png) We can also investigate the makeup of the prediction for each observation. ``` r ggplot(plotdf[plotdf$obs < 6,], aes(x = effect, y = fit, ymin = lwr, ymax = upr)) + geom_pointrange() + geom_hline(yintercept = 0, color = I("red"), size = 1.1) + facet_grid(residVar~obs) + theme_bw() ``` ![](man/figures/README_unnamed-chunk-9-1.png) ## Plotting `merTools` also provides functionality for inspecting `merMod` objects visually. The easiest are getting the posterior distributions of both fixed and random effect parameters. ``` r feSims <- FEsim(m1, n.sims = 100) head(feSims) #> term mean median sd #> 1 (Intercept) 3.22450825 3.22391563 0.01814137 #> 2 service1 -0.07020093 -0.07020791 0.01288904 #> 3 lectage.L -0.18513512 -0.18608254 0.01616639 #> 4 lectage.Q 0.02471446 0.02512454 0.01087328 #> 5 lectage.C -0.02594511 -0.02425488 0.01300243 #> 6 lectage^4 -0.01880190 -0.01887871 0.01410205 ``` And we can also plot this: ``` r plotFEsim(FEsim(m1, n.sims = 100), level = 0.9, stat = 'median', intercept = FALSE) ``` ![](man/figures/README_FEsimPlot-1.png) We can also quickly make caterpillar plots for the random-effect terms: ``` r reSims <- REsim(m1, n.sims = 100) head(reSims) #> groupFctr groupID term mean median sd #> 1 s 1 (Intercept) 0.21962903 0.26429668 0.3113619 #> 2 s 2 (Intercept) -0.04134078 -0.03064871 0.2922675 #> 3 s 3 (Intercept) 0.31819925 0.32744181 0.3530303 #> 4 s 4 (Intercept) 0.21088441 0.22023284 0.3176695 #> 5 s 5 (Intercept) 0.02441805 -0.02929245 0.3350150 #> 6 s 6 (Intercept) 0.10534748 0.12763830 0.2284094 ``` ``` r plotREsim(REsim(m1, n.sims = 100), stat = 'median', sd = TRUE) ``` ![](man/figures/README_reSimplot-1.png) Note that `plotREsim` highlights group levels that have a simulated distribution that does not overlap 0 – these appear darker. The lighter bars represent grouping levels that are not distinguishable from 0 in the data. Sometimes the random effects can be hard to interpret and not all of them are meaningfully different from zero. To help with this `merTools` provides the `expectedRank` function, which provides the percentile ranks for the observed groups in the random effect distribution taking into account both the magnitude and uncertainty of the estimated effect for each group. ``` r ranks <- expectedRank(m1, groupFctr = "d") head(ranks) #> groupFctr groupLevel term estimate std.error ER pctER #> 2 d 1 Intercept 0.3944919 0.08665152 835.3005 74 #> 3 d 6 Intercept -0.4428949 0.03901988 239.5363 21 #> 4 d 7 Intercept 0.6562681 0.03717200 997.3569 88 #> 5 d 8 Intercept -0.6430680 0.02210017 138.3445 12 #> 6 d 12 Intercept 0.1902940 0.04024063 702.3410 62 #> 7 d 13 Intercept 0.2497464 0.03216255 750.0174 66 ``` A nice features `expectedRank` is that you can return the expected rank for all factors simultaneously and use them: ``` r ranks <- expectedRank(m1) head(ranks) #> groupFctr groupLevel term estimate std.error ER pctER #> 2 s 1 Intercept 0.16732800 0.08165665 1931.570 65 #> 3 s 2 Intercept -0.04409538 0.09234250 1368.160 46 #> 4 s 3 Intercept 0.30382219 0.05204082 2309.941 78 #> 5 s 4 Intercept 0.24756175 0.06641699 2151.828 72 #> 6 s 5 Intercept 0.05232329 0.08174130 1627.693 55 #> 7 s 6 Intercept 0.10191653 0.06648394 1772.548 60 ggplot(ranks, aes(x = term, y = estimate)) + geom_violin(fill = "gray50") + facet_wrap(~groupFctr) + theme_bw() ``` ![](man/figures/README_unnamed-chunk-13-1.png) ## Effect Simulation It can still be difficult to interpret the results of LMM and GLMM models, especially the relative influence of varying parameters on the predicted outcome. This is where the `REimpact` and the `wiggle` functions in `merTools` can be handy. ``` r impSim <- REimpact(m1, InstEval[7, ], groupFctr = "d", breaks = 5, n.sims = 300, level = 0.9) #> Warning: executing %dopar% sequentially: no parallel backend registered impSim #> case bin AvgFit AvgFitSE nobs #> 1 1 1 2.797430 2.900363e-04 193 #> 2 1 2 3.263396 6.627139e-05 240 #> 3 1 3 3.551957 5.770126e-05 254 #> 4 1 4 3.841343 6.469439e-05 265 #> 5 1 5 4.236372 2.100511e-04 176 ``` The result of `REimpact` shows the change in the `yhat` as the case we supplied to `newdata` is moved from the first to the fifth quintile in terms of the magnitude of the group factor coefficient. We can see here that the individual professor effect has a strong impact on the outcome variable. This can be shown graphically as well: ``` r ggplot(impSim, aes(x = factor(bin), y = AvgFit, ymin = AvgFit - 1.96*AvgFitSE, ymax = AvgFit + 1.96*AvgFitSE)) + geom_pointrange() + theme_bw() + labs(x = "Bin of `d` term", y = "Predicted Fit") ``` ![](man/figures/README_reImpactplot-1.png) Here the standard error is a bit different – it is the weighted standard error of the mean effect within the bin. It does not take into account the variability within the effects of each observation in the bin – accounting for this variation will be a future addition to `merTools`. ## Explore Substantive Impacts Another feature of `merTools` is the ability to easily generate hypothetical scenarios to explore the predicted outcomes of a `merMod` object and understand what the model is saying in terms of the outcome variable. Let’s take the case where we want to explore the impact of a model with an interaction term between a category and a continuous predictor. First, we fit a model with interactions: ``` r data(VerbAgg) fmVA <- glmer(r2 ~ (Anger + Gender + btype + situ)^2 + (1|id) + (1|item), family = binomial, data = VerbAgg) #> Warning in checkConv(attr(opt, "derivs"), opt$par, ctrl = #> control$checkConv, : Model failed to converge with max|grad| = 0.0505464 #> (tol = 0.001, component 1) ``` Now we prep the data using the `draw` function in `merTools`. Here we draw the average observation from the model frame. We then `wiggle` the data by expanding the dataframe to include the same observation repeated but with different values of the variable specified by the `var` parameter. Here, we expand the dataset to all values of `btype`, `situ`, and `Anger` subsequently. ``` r # Select the average case newData <- draw(fmVA, type = "average") newData <- wiggle(newData, varlist = "btype", valueslist = list(unique(VerbAgg$btype))) newData <- wiggle(newData, var = "situ", valueslist = list(unique(VerbAgg$situ))) newData <- wiggle(newData, var = "Anger", valueslist = list(unique(VerbAgg$Anger))) head(newData, 10) #> r2 Anger Gender btype situ id item #> 1 N 20 F curse other 149 S3WantCurse #> 2 N 20 F scold other 149 S3WantCurse #> 3 N 20 F shout other 149 S3WantCurse #> 4 N 20 F curse self 149 S3WantCurse #> 5 N 20 F scold self 149 S3WantCurse #> 6 N 20 F shout self 149 S3WantCurse #> 7 N 11 F curse other 149 S3WantCurse #> 8 N 11 F scold other 149 S3WantCurse #> 9 N 11 F shout other 149 S3WantCurse #> 10 N 11 F curse self 149 S3WantCurse ``` The next step is familiar – we simply pass this new dataset to `predictInterval` in order to generate predictions for these counterfactuals. Then we plot the predicted values against the continuous variable, `Anger`, and facet and group on the two categorical variables `situ` and `btype` respectively. ``` r plotdf <- predictInterval(fmVA, newdata = newData, type = "probability", stat = "median", n.sims = 1000) plotdf <- cbind(plotdf, newData) ggplot(plotdf, aes(y = fit, x = Anger, color = btype, group = btype)) + geom_point() + geom_smooth(aes(color = btype), method = "lm") + facet_wrap(~situ) + theme_bw() + labs(y = "Predicted Probability") ``` ![](man/figures/README_substImpactPredict-1.png) ## Marginalizing Random Effects ``` r # get cases case_idx <- sample(1:nrow(VerbAgg), 10) mfx <- REmargins(fmVA, newdata = VerbAgg[case_idx,], breaks = 4, groupFctr = "item", type = "probability") ggplot(mfx, aes(y = fit_combined, x = breaks, group = case)) + geom_point() + geom_line() + theme_bw() + scale_y_continuous(breaks = 1:10/10, limits = c(0, 1)) + coord_cartesian(expand = FALSE) + labs(x = "Quartile of item random effect Intercept for term 'item'", y = "Predicted Probability", title = "Simulated Effect of Item Intercept on Predicted Probability for 10 Random Cases") ``` ![](man/figures/README_unnamed-chunk-15-1.png) merTools/MD50000644000176200001440000001572113466261516012406 0ustar liggesusers59a6537f3ce642ac25a1005eabccaafa *DESCRIPTION f59daead9a743ae358bc6ff64abca286 *NAMESPACE 3cf8d88bc9083223cd8ad21ce5460c62 *NEWS.md 19976b47bd39bddc0621f6f03ea1ab6a *R/REmargins.R f9204b51d09458fbb2de5027d2b4b139 *R/helpers.R 5c6e5bd09b317e118fb0f87f761cc859 *R/merData.R a0b528265aa194d1a5b3fecaf4c26efc *R/merExpectedRank.R 8ae4a144ed7c737221338a25c77765a5 *R/merExtract.R d696c91b731fe1191d4397f8c4345a4b *R/merFastDisplay.R 41974a3079eb3556f91ba1975e5fe9ae *R/merList.R b28095e35d7232adeef24c3370243bd3 *R/merPlots.R 0defea43eba6619e126a8df9d820eecc *R/merPredict.R b684684fba2f7f13409d6acf932a4c00 *R/merSubstEff.R a220d568399fa042b391e1f057d96d75 *R/merTools-package.r c64605701f8d56610b61e5593692d543 *R/parallel.R 7a84b2ab664d6ef0cbb426f354f35466 *R/shinyMer.R 249d5bc46b09f70c3590a06022a96535 *R/subBoot.R ee01cea9fea42a0597bb4c0ef8007248 *R/zzz.R 23172082ace972d864213e770f74c799 *README.md ee51129e7bbc12ce0dc9e7bb05f50e45 *build/vignette.rds e9ce80f364a542ce71cd162e057b16b9 *data/hsb.rda 4d7a3807632cf90f261383558f2e0721 *inst/doc/Using_predictInterval.R de5827000664458dd0c8f961856d5098 *inst/doc/Using_predictInterval.Rmd 4c237be6e06895be4e85c660f9a4fdf8 *inst/doc/Using_predictInterval.html 21b497b1d841a95c9bbc064a663aeef6 *inst/doc/imputation.R 2bed9a9c50f740d8b3e0b54e4644741c *inst/doc/imputation.Rmd 845a9309d332d0ca8115d7726499351f *inst/doc/imputation.html 1909f66782eebe0a777711b7b9f90da3 *inst/doc/marginal_effects.R ddaad6cd7aa2e85fe1fe7bf586a4c7d2 *inst/doc/marginal_effects.Rmd 7bafba112be46d6d22e9ebaf1938f505 *inst/doc/marginal_effects.html 8298a0aac30f20469f3a57b2e42995a1 *inst/doc/merToolsIntro.R fbf603a99c8bbdf4b6122154297cbe71 *inst/doc/merToolsIntro.Rmd 8ce236685eb5874e48e7f8a22492d7cb *inst/doc/merToolsIntro.html 4f5c6129e5f6bd6fffbef1d32e0fc51c *inst/modelFigure.R 555e67056f7e2c551d18c6490cfc686c *inst/shiny-apps/shinyMer/global.R f67b03d9eebf083be42385bc8a10ab8b *inst/shiny-apps/shinyMer/server.r a50977e8bf85edfac425a97f5f6429ac *inst/shiny-apps/shinyMer/ui.R 2b61a23ae216727b66e113dccf5b376a *man/FEsim.Rd 6e1147b29c64e4d248af20a54ceeb4c3 *man/ICC.Rd ec248230255c8dc25e230ad58c579077 *man/REcorrExtract.Rd db49e2fffe572ce808cdde1d59237b69 *man/REextract.Rd ac3256939db92e69d024a4b59698a97c *man/REimpact.Rd fde99ed2066725e9eba4a0deef26b773 *man/REmargins.Rd 5dd7526b3caf10f371cf1e8317cc02f8 *man/REquantile.Rd 2b6bc63259aa87dd6bede15371aba762 *man/REsdExtract.Rd 1adcec00d78bc6b13c54a648d5667c9f *man/REsim.Rd 7487deb25bf6487e5a1ce07489d7c0f6 *man/RHSForm.Rd 870e2299f4ae5e8bc18d39fc1f3b9858 *man/RMSE.merMod.Rd 6f53b36ad9dd64aaf15e1d1abf679d8e *man/VarCorr.merModList.Rd 94a9a828f4dc0c374caee13adaa4e445 *man/averageObs.Rd 70dad71ffb63028545c029ed3ad8c433 *man/buildModelMatrix.Rd 570d05c72e4d261add4683b833820e29 *man/collapseFrame.Rd a68ad5a878fb67aafd394f6303a0d991 *man/draw.Rd 9456c9781cd82582328bb61a7620ecf0 *man/expectedRank.Rd a46487294d4c4301371c8dca21991264 *man/famlink.Rd d94b66444aae4b36b3ea5b48cf21ab84 *man/fastdisp.Rd eb728f48e18515a89115567df137a145 *man/fetch.merMod.msgs.Rd c02a7b5bd20bb05fa6cec5d50370395c *man/figures/README-effPanel.png 9a27395cee4f457849ee890fbf72d1ad *man/figures/README-predPanel.png 288d424225edbd816bd3f1ff45448088 *man/figures/README-substPanel.png 808efc1838c67048fc20798f8529de93 *man/figures/README_FEsimPlot-1.png e7e0ab10e17ff3b744b4607db7f5659e *man/figures/README_reImpactplot-1.png ec87c3fdbbe58c5d1593b505fee070c0 *man/figures/README_reSimplot-1.png 9fbe228e12fdfceefecfc7ff39d5d6c9 *man/figures/README_substImpactPredict-1.png 2f991ddb35535953f2c0f244f41c0211 *man/figures/README_unnamed-chunk-13-1.png 35aa6341eecca35e3536d50688bbda8b *man/figures/README_unnamed-chunk-15-1.png 7842778fbbff3da19623a4167c33c4e4 *man/figures/README_unnamed-chunk-8-1.png 82ccddd228181ba21295be4a20262818 *man/figures/README_unnamed-chunk-9-1.png d00d521ea411e2270bc5dfe36f2608c4 *man/findFormFuns.Rd 819cd68623135f05d72fb44f6d8ecb1e *man/fixef.merModList.Rd a9db5e89327138411e4b1814cca8fb97 *man/formulaBuild.Rd df35a4b5d948a3694488d41e938e3a9d *man/hsb.Rd 53d7b2877cad1723807a81ccd20eaf62 *man/levelfun.Rd 5725eb39e1ced8574d6b745c1af9b05a *man/merModList.Rd 87d7dd0260eebf0a67efbcd865294427 *man/merTools.Rd 5cdb29d956b993cc7b57778741ec9db4 *man/mkNewReTrms.Rd 79be1adfc0b812d661bbc8c9d037da9d *man/modelFixedEff.Rd 51de5315617ade84ef9769a085596a78 *man/modelInfo.Rd eb8603602209eed655adb435d89c8f59 *man/modelRandEffStats.Rd 28c578b4d92de5ffff1ebf3ddb79f7f8 *man/plotFEsim.Rd d45cf803ae6a9fc08c6632d580137690 *man/plotREsim.Rd 7996508cf29dea004b970286fdb3f7f5 *man/plot_sim_error_chks.Rd 405e71cedefcd3aa584e097e75c40a20 *man/predictInterval.Rd 6279fcb06adf1c307bdb019d6b6eae5b *man/print.merModList.Rd c70a1b85ec78607047579165fee0755f *man/print.summary.merModList.Rd 89d819b3250665bd2061d125341c535b *man/randomObs.Rd 40c3a8536bcb866d49a482c42849d663 *man/ranef.merModList.Rd f5be2c217ab6557becfd175cc52fb250 *man/reOnly.Rd 4f03e502ec29b01d8bfc6ee0ba74e4e4 *man/reTermCount.Rd 114a69cb9d7b96b509bd4931b49eeccb *man/reTermNames.Rd e5a22dfb62ad46bad794b06ed97a657e *man/sanitizeNames.Rd 035f913fa0f25c8abd634e2a8135abb6 *man/setup_parallel.Rd a5f722b3a10524c82dd5444674802c1b *man/shinyMer.Rd 734cb5f36814ffe7c425bb644b1026ef *man/shuffle.Rd fd60f4f99a47ce3df53c1ddea31d99c6 *man/stripAttributes.Rd b59847ab9d24538fd94bd37b3edfa4f9 *man/subBoot.Rd ce9300559709e02ef7f445913236c54f *man/subsetList.Rd 5a4f76622028345265baa259c293c16c *man/sum.mm.Rd a82f73a0b77fca87d6680c86fd60807a *man/summary.merModList.Rd 429b355cb556792e506536481bf3c779 *man/superFactor.Rd c13b5796f90b35e851bf9cb7a2b9b823 *man/thetaExtract.Rd aee235f367c8fe66f42633bb7a800ca2 *man/wiggle.Rd 31589c401f5d01fcade1a07f8b3c503d *tests/comparisons/wheelReinvention.R 654d5633f6a43e5ef1ea62b1a5519d68 *tests/shinyAppTests/test-shinyApps.R f7cbcec42c2b969ab6935ed0816cd332 *tests/testthat-a_p.R 878acc3df7fd4fc38249aefb179c5f8f *tests/testthat-p_z.R e914f85312e1fefdbaa425c59cb31d16 *tests/testthat.R df44ded1611b04b970ee7edb3443480e *tests/testthat/test-REmargins.R 184f051752d5587afccc8755488e8bc0 *tests/testthat/test-expectedRank.R a4985cdeb855f03d903cf2311e21956f *tests/testthat/test-helpers.R 8dbdffcf8ddc966438c5460824cad374 *tests/testthat/test-merData.R e154d28845408e0db7b8efa0980f49d0 *tests/testthat/test-merExtract.R f4aeca136890cf668ff90a756d5792a6 *tests/testthat/test-merModList.R 3f621ba99f8e34d2725129263a278bb8 *tests/testthat/test-plots.R d7e8ae3eb50f9af9bc47f80285b560ef *tests/testthat/test-predict.R 842389d6c9d0f2e85d1a98cfaeeabdd3 *tests/testthat/test-seeds.R bccd03a5c2901e67b6c6cfac74f54810 *tests/testthat/test-subboot.R 6bb02ad5cfe63720f86e20746f19bf3c *tests/testthat/test-substEff.R c8f1ef8ff533cb55a01e9cbca6c90027 *tests/timings/Compare_bootMer_KF.R 3115188a8cd9ff0057fca4f13897ab79 *tests/timings/predictSpeed.R a0a2667f7a63f4bb09f221f494ea325d *tests/timings/test_fastdisp.R de5827000664458dd0c8f961856d5098 *vignettes/Using_predictInterval.Rmd 2bed9a9c50f740d8b3e0b54e4644741c *vignettes/imputation.Rmd ddaad6cd7aa2e85fe1fe7bf586a4c7d2 *vignettes/marginal_effects.Rmd fbf603a99c8bbdf4b6122154297cbe71 *vignettes/merToolsIntro.Rmd merTools/build/0000755000176200001440000000000013466137707013173 5ustar liggesusersmerTools/build/vignette.rds0000644000176200001440000000053213466137707015532 0ustar liggesusersR[O0@!ɧ !J$H];e DFkNT;P2PSEW?rYes9F."(7cmerTools/DESCRIPTION0000644000176200001440000000232513466261516013600 0ustar liggesusersPackage: merTools Title: Tools for Analyzing Mixed Effect Regression Models Version: 0.5.0 Authors@R: c( person(c("Jared", "E."), "Knowles", email = "jknowles@gmail.com", role = c("aut", "cre")), person("Carl", "Frederick", email="carlbfrederick@gmail.com", role = c("aut")), person("Alex", "Whitworth", email="whitworth.alex@gmail.com", role = c("ctb"))) Description: Provides methods for extracting results from mixed-effect model objects fit with the 'lme4' package. Allows construction of prediction intervals efficiently from large scale linear and generalized linear mixed-effects models. Depends: R (>= 3.0.2), arm, lme4 (>= 1.1-11), methods Suggests: testthat, knitr, rmarkdown, parallel, nlme, future.apply, rstanarm, Amelia, DT Imports: dplyr, mvtnorm, foreach, shiny, abind, ggplot2, blme, broom, License: GPL (>= 2) LazyData: true VignetteBuilder: knitr RoxygenNote: 6.1.1 Encoding: UTF-8 BugReports: https://www.github.com/jknowles/merTools NeedsCompilation: no Packaged: 2019-05-13 00:52:24 UTC; jknow Author: Jared E. Knowles [aut, cre], Carl Frederick [aut], Alex Whitworth [ctb] Maintainer: Jared E. Knowles Repository: CRAN Date/Publication: 2019-05-13 12:30:06 UTC merTools/man/0000755000176200001440000000000013466135366012646 5ustar liggesusersmerTools/man/buildModelMatrix.Rd0000644000176200001440000000125113460722176016374 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/helpers.R \name{buildModelMatrix} \alias{buildModelMatrix} \title{Build model matrix} \source{ Taken from predict.merMod in lme4 } \usage{ buildModelMatrix(model, newdata, which = "full") } \arguments{ \item{model}{a merMod object from lme4} \item{newdata}{a data frame to construct the matrix from} \item{character}{which matrix to return,default is full matrix with fixed and random terms, other options are "fixed" and "random"} } \description{ a function to create a model matrix with all predictor terms in both the group level and fixed effect level } \keyword{internal} merTools/man/ranef.merModList.Rd0000644000176200001440000000210413460722176016276 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/merList.R \name{ranef.merModList} \alias{ranef.merModList} \title{Extract random-effects estimates for a merModList} \usage{ \method{ranef}{merModList}(object, ...) } \arguments{ \item{object}{an object of a class of fitted models with random effects, typically a \code{\linkS4class{merMod}} object.} \item{...}{some methods for these generic functions require additional arguments.} } \value{ a named, numeric vector of random-effects estimates. } \description{ Extract random-effects estimates for a merModList } \details{ Extract the estimates of the random-effects parameters from a list of fitted \code{merMod} models. Takes the mean of the individual \code{ranef} objects for each of the component models in the \code{merModList}. } \examples{ sim_list <- replicate(n = 10, expr = sleepstudy[sample(row.names(sleepstudy), 180),], simplify=FALSE) fml <- "Reaction ~ Days + (Days | Subject)" mod <- lmerModList(fml, data = sim_list) ranef(mod) } merTools/man/REquantile.Rd0000644000176200001440000000223613460722176015204 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/merData.R \name{REquantile} \alias{REquantile} \title{Identify group level associated with RE quantile} \usage{ REquantile(merMod, quantile, groupFctr, term = "(Intercept)") } \arguments{ \item{merMod}{a merMod object with one or more random effect levels} \item{quantile}{a numeric vector with values between 0 and 100 for quantiles} \item{groupFctr}{a character of the name of the random effect grouping factor to extract quantiles from} \item{term}{a character of the random effect to extract for the grouping factor specified. Default is the intercept.} } \value{ a vector of the level of the random effect grouping term that corresponds to each quantile } \description{ For a user specified quantile (or quantiles) of the random effect terms in a merMod object. This allows the user to easily identify the observation associated with the nth percentile effect. } \examples{ fm1 <- lmer(Reaction ~ Days + (Days | Subject), sleepstudy) REquantile(fm1, quantile = 0.25, groupFctr = "Subject") REquantile(fm1, quantile = 0.25, groupFctr = "Subject", term = "Days") } merTools/man/mkNewReTrms.Rd0000644000176200001440000000142513460722176015350 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/helpers.R \name{mkNewReTrms} \alias{mkNewReTrms} \title{Utility function to make RE terms objects} \usage{ mkNewReTrms(object, newdata, re.form = NULL, na.action = na.pass, allow.new.levels = FALSE) } \arguments{ \item{object}{a model object} \item{newdata}{a data.frame to build RE terms for} \item{re.form}{a random effect formula to simulate, generated by \code{\link{reOnly}}} \item{na.action}{an object describing how NA values should be handled in newdata} \item{allow.new.levels}{logical, should new levels be allowed in factor variables} } \value{ a random effect terms object for a merMod } \description{ Utility function to make RE terms objects } \keyword{internal} merTools/man/reOnly.Rd0000644000176200001440000000062013460722176014376 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/helpers.R \name{reOnly} \alias{reOnly} \title{Random Effects formula only} \usage{ reOnly(f, response = FALSE) } \arguments{ \item{f}{a model formula} \item{response}{logical, should the result include the response} } \value{ a formula } \description{ Random Effects formula only } \keyword{internal} merTools/man/figures/0000755000176200001440000000000013466135366014312 5ustar liggesusersmerTools/man/figures/README_unnamed-chunk-15-1.png0000644000176200001440000002354513466135366021164 0ustar liggesusersPNG  IHDRMR/JPLTE:f:::f:f333::::f:::::::f:::ff:f:f::MMMMMnMMMnMff:f:f::f:ff:fffffffffnMMnMnnMnnnnnnMMMnMnMn:ff:fff:fې۶nMnnnMȫff::f۶ې۶۶ȎMې:ېf۶f۶ې۶nfȎې۶* pHYsod IDATx{ܶuƹ$MmgGvM*i릉&MFin5Zg%nH$y_fgHop㐨j 3@"M%JB-Z( Pj$"@I}^'ͫOF~v[9.+)O=jKVrK<~4^F*Ȗ1RB"V77U;6OIzo@oE{z+/[2voƦpF[LuaZ<*ZԘs{nzpj'y{@h j|z` FRzfXTTAȳ槒[On~Vt||2vjhۿ;ERGD ڝ<M}Uo6U6'f_xW KM}x=@eop&_ pӀ A}h=6j2뻦:(,j;k y(O Eɒ#=3|n3K6QAw{C@YIT햣,;1l-u-qIaꭇ2Hz ЦywT'mmL7=𱪶Tֿ|AJ| x(TGC|).h΁3]|Y@*m>0:Rk?.ޡTSm6.Pխ߳z.,ϑwġſ85[-k;[ |nT7%=5fFtf0hLM|*qu*a$u>~fմÖY@mEo'Imɜ*H%hLMU~ (=`diSAWz4T&*#Pq]]YY-. N'>LYmZJ;OЎeZpbc,ed4LƧԪL@E/k@ڿ$_] Q:2JqIG!sԣ6@_~&I|GqB\U^ƭ^jx'oE2xr hh*Bʯ@,}B0BQ 'x,gmu:ʜu9Z!Zn]Ll-*y C@r /mvA1WM\D'Qݱ_ beʩy bmFS?o&Z( &/_~$!諏?I H&}tSт/ƔDJ*._/Y )<}鍉"YA.T[PL`QL ;%@U@_}|ߘXmJ*L (J"IӴi0#p ]ؤAԂr+#EaA*P/TQ6D3U [.x[%[4Dz+o@cyehL`w\EŴ`tA)Ru K']1K {"uиYb%@dDS.}F" 2ӤAꂿ1QLBS. *D1EB QLBS. *D1EB QL]BS.d!)R`3N4 fT#q'@Pa @Sl0)]Rx9u4L(Vԅy!Iͫ (V4k%ɺLO(abzfu@5}־[6GwG]jA$^&ƧeEn3G,gCT+L\CYOfH2 {' MhڍaDW$Z4,sR VŒ\4Pcks.טq 6 D&tI23SX(WQA5IbLwureF+I+K@Hz% 0h,60N&/u[to00`39<uYҋF8x"Z5@KsabLs6a@E2/& %.1շ l"gD֫& ;9p>i\7݂>`Ћ)IO\ZHR *DTqe\qa8sMd$e 31XKbȮ{B "@V[0\b6mǑ ؏^] imqjyN&^ XjsԚD|Bàu\W(A]DrMU2sht\KhL`Yai78sLx̅Vf.G+@# @bDY.F&~L#N/LDT+ʕ٣V)"93[P OSŭl#/kª71@#!6ZإNߓ`++& O57N?jZToC[y%"'H$-F^hmA`(cqf[?f{2-y-1і΀.TC买e}PolׂFZPY^ˈ-ȫKvиK}j[Ct~Mm Й3@U5q)s6E3;=Ylz%Vd2'߱Iq!"@!Q$<]$1='ij=@CM~F}]Lo F%wh$$7DH& ]ޒI2-(jFgՌ!-.:qG1@13=[ hZ\PORÇk4;>I4d+DXߍ5@gΜע d 3뻱EvΜ0 ~?98ЩfT#ΙV E^Z!ԍf^AaSʣoVw@3Ι01uBsΙJ@,1w&'A & 1h4gSX3qvn4lx@_=p83Ͳ &/3Kueb&6(kEd]֟;&ę%}+C[c^VqOt .< .޲zs^Uga qOql7Y"}?յVhۈZP_YB|q:|Zu{xEnP@2.jR,iBqLsɡ&@tFyڃ3 H_|E|1@-39\FD4meBh+בf3ˌPuK;˱tq9\2~CP( (N*>I< ݻ4k$XέF)-(LciU-he&kD|7=,.*6ڛ 6$Ø-3,6@]\61uó) b8BS%`4ʘxPΐDX/K<ȯVA[vL.] m쓤͖c7 V- Po[PeƊmx,Z0I3zvg;.Bh,^uLKH [ oQ"@5!]:̴L`dU|e%h~niQm(}z&? abZTaOEkiQqTotO}#ǕӢ 3hw#/VfOEŴv7QxgY:Ӣ MysWEey@hEWMn ӂruӢ 㳑W>1Ӣ 3ȋu]fΙT`5xӢ k&ӽ<X>[TL**Oo5]<(*Efx:ʫ/mӢ #g\ .Ӣ SKPTL* ]>P ~RTL* }용nX'@m!iQШ?F-*EfvgsYLA˾_ߝݜ; a5@Ue&҄r .bZTa4W)0#Oc:h~77fSTa&AYyM `)])tGіZ|Y_pRLeUAUI1E~ x&̎b$)nFZFS.k4wᜈbekW|0bQ1-0XfIS.[tjbe3$x0bQ1-0^wPD1ER2UL)RE`ĢbZTalϨwL6 S.?O"u)ЀQLI{tUO;)H]4$ 1 +I3꯿@*b .>z${J"_$OAcQDR/uʟyh(H],31(x;ӪXTL*>I2S[)H]VEOc"uY/`ĢbZTa= S.Y )RU/uz=~IS.믃N e3kEk`b'6^Xm_$ñ2Ij$ZP}HIqv>XN5IQR fErǩJ?XۂW(V4 cd~ @8,]*]POxm.c٤r&?|i1(~KeqЕ.Vf7] *wvЄ.n% 苑^ Х.Y~ ]@m ,LqfB">- ]ڌ0pMfD%!k.*,?;X|)F.PMև,D?Nrmeq_\4ʴ#0^k#Ɯ&&@ bƹ0*yKլgឣyQ |Eѷq32bMrE|\jA(Wƀ /Le++C@_|`OT*'@g/A0Pv1/Bs; 5q"@;g '3?9X]6G3iV!@\X$ سZ@Z PgFpAhL`%=Xj35?=,dhCLJI߫,( .}8-}ӕ/ }ZyFsiϨ%tCz+l I@-Oaa*$?wT S.%jAׂL`-~ sc:'އ}zR>I>1H4Z|IzgB9_Dfil Й35S am_uP?8dιDK Poe(PPH2K  hAkFEŴ TC^ K3iQ2j^TLK*LАxJiIdA1-02+uѼ&u)|s@@ղ9<*-)GLQ&D嘢v /h@iN.&rF`yCSH-hqfэNV. alz ( (똮ceFp'ULq؄x!@83pYM2lFky!}eaK+@# .X Bqlr(ȡL'~@,3ktx L X@cmPqfIC1m̒.4nFƔ)F2¬iCFrqs) "j,pL`i@0hm|6w6 b㵉£;ق_;!@幑$5ÍӯZ-3D-hL`ATClr d;BQIDATO_>yչyӢ 㳑Wa#ʢbZTa|STL*L@I++@iQAhUʢbZTaPڹgcU4S.[ԥ"u)P1%KI.PLh7 ~@$D1EjS.9 x.D1EqbK} iBs%iL u߂Ș(H]P!)RTbԅ"u!@(H]P!)RTbԅ"u!@(H]P!)RTbԅ"u!@(H]P!)RTbԅ"u!@(H]P!)RH E* U(%%JB-Z( 5v]D,=)&2l+c_@S /Y}gkg#XM]}tbZ,iBܾZE+ZiA_}.,@_}|,D;|ٞ*;hڜL (>e9-h` @HҔ3](R( Pj$"@IEP%V΀^mF?a}?#'́:KH~j{k^@[4ٞ*SR*_@oo?iB;\g}Jb8fZ;BhK!h`|ĉi4r %Kږ| @'̧rI@Lj--0Neu"R8@^};vɳNQ?=cG"Y29I@2ͫN.L\gj*Ͻj=Va*ڋx2Ut1tQ XWrh/mss~9fG_CvaH'8=icgT3e\4رMah:r3He52RʉP?<L@D8["Tu?]~t6YmrPoںӆ4df,fzeFmm#ez3NKuN;[$fe&g&2sByy4DYTA_3`$j Py~əj| P{Yaԋvy`ז.{U=k'1,3A(\iNfM:ZfTЛs1n&-7GīINF$L2I3qf$ 7B۾)'Iƹe:YfTP10Jɮfc ^TWzIʐn>.zу.`b#9IȁZ:P\~.jsQAiꉲt,3* Β*Ѝ%J"JB-Z( Pj$"@IxJ wIENDB`merTools/man/figures/README_unnamed-chunk-13-1.png0000644000176200001440000001514513466135366021157 0ustar liggesusersPNG  IHDRMR/PLTE:f:f?b?b?b333::::f:??b?MMMMMnMMMnMbb?bbb?bffnMMnMnnMnnnnn?َMMMnMnMn::b٫nMnnnMnȫfې?ȎMٟbٽٟٽې:nfȎې۶*_ pHYsodIDATx qi[}3-$oVJL(TkGvkuJ&NTT}oR@r;3Y‘g^G0{ r@CA 4@CA 4@#tFR` 'όEKAAPP;( j@A(T vPPM _w SPP *=AO)h?w'W,7M~WYLҜ}';m4'fT}EA_gNbehNP 夠F#$$ * UWRPI σR|jέOC)'T@y4 hРZRP 'T ^8?! j4i AAURP 'T@݂F/<P ~P *jAt7  =ij4OoC)M:BM8hPҤB)zk(P2?] jTꧧT@ K:hPВtlЂ(hAr3[P1P WA$OJAT(hfT@fC)-᧏bZAPԃF{-KE"ꟃAP$Krz8LRP샀-@}48 @1s*A i=#T@ExJE4A5\Ag4?MXP_HjTO ˭A<0l:nV(*lZ`YW"j^( -҄6 4}=f O@PغT2`^^nCxz-j.|hw{6/K^fpW O8AF-{gUXPzhj-9D? %)<:!(E IИ}DǸ(x/?vxtrkqtQA&z6?Z͝f4(hlvD!^I= Fr (=7ځ{cwђ˕wc|}_Qd4ھ˼5SbApA \ S`V31: Z=-T4y%MA]ЬPWAYWNvRSкoIvb$>F C)G}E Z Z=9C.ǖ,^V?{2fG҅6VIZ:hPiSE}O*SZB:?NQAֳG8 ~f u~? 6?eZ je31 چ1# b-h+~J 5 `,c4?c$9&j*h姗6IA?Tw<N@A=zgz-Qg!h{~&-ڢi :WRLP)2-BTWELAn]>%RJ\oR71xwHAj+J<-(V?#VzWMGAtFP:" iE@((EPd; )j"(z-s-CRV5MU)hւVgOQ Z[As {cCJѶ~ڪD)h %nhEA931R&JAK`"hs@l(-Uy ^)h mfoO~zdzKwPן?gyWFuA[3~A?}x_b,¨-h~/f. !8\KOߏQ( N >HzdXd"-/P^T]A۪V}(Xo*hkK4F֊[r.ۂ[SՂ h$^aWϮ⟉[VlE#kH}mޒ+xC>hĕ,M';EZML|$nlGV]<>ۻ2Ţec i֑.Mq mj4׽ARNhUێ4Z+M WO.Oamnn}u Ta3Hypk;%Bk$7gc(h>F[\?߻FE~Ca+hSJeUQQLxש ւ\RgWsoc/hM4:mjSzUe>xRթB.-Z]*aqoqÛ{JAJ\=8oP99F|=   WAP[їBu[!W[ߓt{]d7\D#m槙: ZNPZCPتsCAA1hA= Z77.`PET[΃͍JlTE+gY=8Tom]l+ Z<꼢U9H!|¸&M;}Cjf{FܛF5FcA+64c̹E姟1ڂUUMO?AkY#/QUE=)h*:(5E^;uUUhhOzz Z?F5YкzhYL&z ZuT5_ZoPZښ/h%=H`NqDZvctf(ne@7(`F "(2_nF-j(6ђC !jk'MT-3O Ah2uJP8CK/(Xb-ZovNyCP&0 XaE1TN3YZLj"(JUY0esq0Tg`A@ZZ??__Pai]3#MTmTAac(Ԅ( B.z IUP&9 8m:: W\?c]χӍUp͐ꀂKLqQwhhbk UO]\?#]8.}㉭uh>XP ?#xz-j.|h>߼WS=?&> n-7/Y7jL=tГu]=U ?w$9};A 5K^\Ǹ(x/ߘfZnu3 1XP24> 1%(_AAd jGrUfZnH=`4A%#) 3L?. Z"I.SP Z$0(h9{[o(^i ZݧBaNQP^zww I2h h4 ͠MAoAh(\vEæB mvrʡA]jCA%hB  *Z@APPf~RP  *AAPP;(] whMЫZР6PP! *@APP#f~RP  *AAWx1ОzY(T vPP *AAPP;( j@A(T vPP * ZЋ'C<O7,Z [[yr45صfRz``ךEK!u_.{ *ւzQ^|q|H ,珮]=t.؀-(6xb}]~sپE 9z2!D@CA 4@Sקnܟ9\h z?Ww2|[=xY}M-ZYA~;{]꨻|u?z`q)P3ҢU+_:>8Z?NjVGCMHkgx AER()^((^4%PP %PP ;&Jŗ)4U݇{{׽}_{gWT~xy|~Cfk,* 7q/_\㡠J?*1\_SPTa˫) rsylKK) {ier((h((h((h((h((h|xIENDB`merTools/man/figures/README-predPanel.png0000644000176200001440000014231413402510754017656 0ustar liggesusersPNG  IHDRV@\sRGBgAMA a pHYsodaIDATx^TTW.V (Oo6޸ƣQŨ(5yJ2A{79}>9٘A1 AO m Q@T8"4EUQEQdZs5\bߚk֨H׍>~~~~~~~~~~~~~~~aԃŁ\>Ub2N,sI-u{p.ig|vW0xgIu >sw̥6mÕ3Zg]9{z Nd\V8e! eVa]1hzƅ˒h4ipqҥI{>l8g!0c^PO\":N]3RXsCڢW3[ cLZQ&=zx+/f_1-`˖Yj$Bis 0\nN]q&1fSOg$-ݐ{ZĊ+rӞß^ʐ:y{Ϧe S6HY3"FSc m+}ʓyo݆oQNtS >|Os(  r7nz}]ЄW 6(y8eJT5˒s:1Doa}z]{[n{&\q9P9c5=D߮.oӶvT)ɽ"^7xhy !/Hw7E\\{ڍJS%Bgui #}f Wm+~8{W5/߾vu>~'-|~@U_4`A ٹf['{S-Ž)v?詿]Oqۓu$,&'(_7}E.(Ybk6 I$=j\?^3Kmڭi?.˓3(噮3Qb_dSI_q8n ~|h޵4JM~ 6Mj:ntxJK ˇu0=y?nJگuvcD~χMbI~ᕃ/|~qmfci;NyÂpodQ=di7{O5[L!|G,U,ei\|őoqm.9CQ}lFw$~_IK]l /˹mLOaڥcw faϫ>_+w*Ct#b!X4*ݓKZ։luRhjj}$sͬi'M&9}۹Z2^M>,ڔ2nXԔ@K7.~it+&)sJ4ts_LKS 5wc^&}M nPNgk2 wwK?h֥|C;¢5svf,ݰ`j9vzܷ߫_CU#Xz?kV;Zw\Ҟ ~笱vcpwMudo/ŪߵƽZn"5U@:ck8W|A{!'_6f4<nߨz8 ߧwFmg~Faʜ!v4m>:C9AXf®驩}ymT'<g-,}榦*}/rYyڑ6Z\;s /MYKm̡3Z@pv,϶/.sS.],\olWէ֔Ӄu lw{bBe"t[οGnLY{_oKW Aʧ7.!5Y2ꩍZLzZrf|ɭ{w+nt{M[FAIʧtq?__D6mFm9`ssOӖz]rNZ3Faڬ/:Z_SW:׏]s>=DEzq8oiz.yΙ5ݲᓦX6|qIf(ús1E!s,Q'w^T0`~@§OW,|#O^XRrbR3pZCoL10má%%{W1@Y15Ss+W 99KJgΌ~GŤiUko jT,+WVN\ԛ-/{(=)<8`U@zwjF;3p ?-}:gZ~ -s4[oiZ]n%6]}}vKFr7&ZIOt?rӷC~7_Çz8\Hq׃ik2}+p-oml B>@rK:nRFjt+{{kݲ4e" hUg,7.;`Pc¤+I鴔Gen96[tGX=X96c0WCmj^HvDIW>#~5nJ>|F> Gn:=7 wݯS 7*/#'=;3Ncݷ/|ۑ)1\ ]09{cݭC:rw68ٸç]Je+§/x wu6ݏtȻ?9kÙ*V&\\fn L@|7llojF.Xhgt|Ɂ,}o(X]˻cڷ?a:P;8XՍ(k};@cTZ[]e+S3c%V̚D;pz5:qS{6Yɧg* >0 Zp@o?kjը#3].).] õJ1<=ۥCZm1e~yi§#҇3htcL8gcf-TN8+b׮l~zhA~Oi?׻;$W~?#M!72귥gRr/Z  !l;v (~ xkHK/e{ҏUhFItVt Ua-g(_gefǤiӔcp卽@hO3Er[H{__։3ΦO/s1cO]3Vܞ޺:~-Kb%ubOHF,ܻvC>3nζ֛F}R~['P+̱Γ19s":$_izpsL Aگ_qHcOx17E OW2wҬoZ|[.nHzC J-\.Y!D-pCKE)ro$|}z/͟.M=qMiUM)睙eL:{~C{@MǮgʉɇÕ \9{+ k(ūeg-(O3niʚզ'",C"^cZ~Hܯk.X͵}6pJy%+3>l}M{>Uy&d9[}&ǸrkJ>6,i*>t/}Ѹ׻{a>Q<r5כkˎX_"f84팸]_x4oognIj7_5|VA>m'R]ݞve Y(ycz>ek/㵒|g~`㌁}t\쿾袭Iל;x`Sw!pe0ϕ7ݬZNsiZ=m_%g\z!>@lXO\h5馿fۇ?63c4;6^;& #X_TuBv[;/i-k7M:wʹC5kS8%ѯH hCԯ!rs7V+ ǰ?`8\^z wU"Τg[<8kY_ 邧/X1榖>2D"|֒Wr_y`6;U%kw4o<:CN?-qgS꒧f.}$+wb=Ĕ)^YK/NMDOz#V駭ɵx _^?@ ,q]Om,,|}AϻQ.(wwN6S7'xmhO/DyZ%+7EX9FC z^֙CD[8 x܉W\9]I) x+O_'r73Js<xps73yԞ/JN,ː ݜG V0Qn:c2.vIS$ED_|?#|BxB}O?QT~36L:.((HKu[nǍSLk9 fTv|;7Nz"mfU9c\qUw#QɸI-G)w0}-[eE!=oOV(B |! >q.}YVF?=\ q}z'?YO'5G$IA+~Ln~M[f5O)?RċzKqMo8enNi_>_N{yΒ؂-13ow]Nѽ,ezMm]S3I`9jۥӑ!/LcQhq6l(_:#:͓K[P5gb NJ QgF^tux9zu>-_uwv>"{3>{n]~OUyڹ~nVȶs_5);y2c_;*J/Z};߬W^Ǐ/_I-%;[uSmczNųϛ7dQ\#oAѿ@܅]sq?\ _f~_:bֲe|aÞg'~r"+/ILڐ%[7 bּǿO;y(g}_=/Ozoi&ݓJ?jY3kStmL[V|o_uMM$]R!>Fqr2cHnxV~Q=_ ͒T=TT`]R-k93l\>c7N޳Z#^x?;7R]31HN'ztU~s?{Ͼc"qݣ~f\z Iʎ"Gֻ6op*ګx, >wZcY0oTXjlT{䙝o&o?xHGk?&23kIaίt<1~5 fL'oб0WHn{_?;'>XX;qn{b68yzyсI,>eq+6N_avӟg|v=m |H>5s_](8'O@|s;4OXSǥʎ-7Ot;Y{cw=?,Nac6覫BLL_8v(/eױ7w4kC\sfh[U-~&BLff3" -_V˨JO8p}j|2Kjzv E]q榩bqNݹcAl~Ƃr1d14pEεlL 9ۙ/^=nƑ<ңKSQ.;S?_6鶶fӬ5ҸG!Nh`USmYOZLc "7/Xg3rFD~/Y5_ھo;fޑ<?g 'm_k1~c3B%Ï4?9&/. {G1݇,-:;NUlࣿmѹ o|ݣo)ϱOL'҃>~Fy͓_g Ε7_KS寒sͷkNI.8x4x0+^sm_~^0mϿ{m_yufYN wd|{3ԁ;g*-;x|fݽ3s]SOB̭/WrJ~6$e/ɩjwRpZ@8m .|cv?}i:cHOk_Ljn3MD~uY1FmzMu3kI!;2yɎѵ61qƅZMV[e-5>z-NژAƝ'n83 {Io:ͺ5[ ?\yJ*=҉sbqاs<}E G4wf5{[c5(O%Tjb⊗fZz5MPwP^7r>uf+|Yo(ΆgL{h|\GyMxZf͗x֨|B+?䥧6[ J;NX[N䣙։궣;EB{y]$kIS x _WWWGGԩSuWcվ_qx0TD[‘XIj.g6,zbqUʬhMeJZi2hewCmiʘAj[,Teޫ'9M_^B$_nҬXf7U}Zil**Woz>r{j u~8nyZEe/Tס4;|_s…x t:rKj[9ZPS]Xu$ݳg͹gmR3QpY} BᎢ˽^۶i:gQ΀TCiH>sQ%?l# zVn.VWdl>^'W~synTG3}D3ԒhƩ% 5 1`߱lڬmj"x3/nySq4-۹%I&Nn{ū+l4:RYYC$RzVLn[8?y읔zS NyWX~eNBYذ;RUYyhB:I ['ox"OXld)w~_WILySU&)6-vy`lمvAiB[>[+V6cq ZNmVS~ïx%YkуO[ke<tۊ:bc)vB/׻$CSmEY~-Rgk\7l{...WrD׷]2ַ$)ORoal7@1P`qFt=j7[=~^\:B*:ng34v1os ԾXfh=i22 xIX  i>P&LZVg5ȕTVV~ֺܝG-*"SdS+9Gj׉UHdTUVV_'[W_UPД'l7g+J`;vV睬|W5JfjCw/~!^qD5i-ubp|'&EEwS#N~씔e˫{zn]y ^z6n3.790nej\%ySيm[}jgmEiSrBh>4~Gٟ2_:_{1ޗJ֥|;)GsO5i/Jwz{a_W˗/=\,U(6*  jt Rlt&wESUaanU{`b :;-cBM}i^tb+[ ЙZhjf.k {`ڴicƌŲY&))IıcdžۻGTPPV߮v- %/Ww[.7G͎ S"\Vs^~CScg-賓TlHm cMNj M*NV;R&4aӧ=ä b裷ϛ_9/eɜ/3SSa[}Yx}u)ٯ)mWs$5rNqwu&,6'fe<xUvkbJuբW-\KgVlS>S4ekc (GfqFPPkNUܽ{͛o)'O2eGf*qۤ-8V:1uTmE> @ &<111Q!x ;B*^/ `?p-w<[\ lHN/hr)rWuYjr(t" !ю&YCgv"Ľ vz>ydtʙ?n_4--õNYFI[JW &E[gw?.IK{ҏ5jeF܄ڪwWH2yu`POzjEf]S#`ڬ?-Wէ7^+IY:׿tcFMhRf $\иT55 $.YֻԧG*Xo;SQ>u瞾3ׂRMtn|ֹlĹ{D=Z!6m[SWS~Ԥ{Uކ`X劖ɉVڸT)E/.gai)Lm9Ed ڿWn(Ù +)j .<I+2i@y)@9Jd(}Kcc?@ȇ\tEY0EY5^]lӞ\iWϟ?gwև6>{)\.IS fdW%pU\tiʮJm|*p15y MR 95eI$Ns0WP;Ge" OG`d Jޘ$漺`Y1ùhɻ V٤Y)9H%@tcEwe4skQbӚAև3vO5/g:Aۇ,: v]TρNY*?q`gG }}z=ocV9xnoYW.\#갌\X2W98_L -x=E :,ݲQ>K_:!]hfPJuWNcʌC[+'',/L_}g;}pG# ?sDDǎ;f̘'|_}Ɇ ue}܆⪪r؅ۣ-EPm3OMY1Ñ^QtN%r/|D˽vw};sk~֚-1?4I2|33> 3ii Y M?}/;#i3s\e*a|7??O?L&SNq㆖Ն{r)11L[s.[*V{uqڪqq˷_h24ϟy$Wr,Rr7MJ/.^Y^_*|Oݮi_׽/У`JV# 8znzjΚX,`Q/EGO峋GS IO-M=vo/꒧u7} }hll0ahw^bSSSՖ抺B;Q[+26뒏TVV̋޹@x<F,glL:S+9{ 9ZTNs2uv|܊w> p*Bw0TZQeo~( O*OfEUeUI|ڔiDo#4mܝo6[n[nW~ lH}~օN#~^zglws7FB,PfYjL~>G'{߾T \j{/ Wely|KmćܥHNg*-uK f%t`ʜOK҇ٙ"ҤiOKg~dvB=+vLƦO0Qu1Oq2@@@?#6 ڊ1 u>Guf]kyտ&!2xyjS̄}YɱZI|~FJAւ=I]F~gf|$,*㜴4p}ȖJsme>\ڨesv_Ÿ,֕="g(uԢן֙-+c>peqrRO(G,X3G27.όȑ?}_6iW_LJ5޲>_?k%ӥ=k\I{Tr5W\ @HHH[ͧŦP$6WVYFC"Cjj{.@j3Ԭ3"ƚ򒉩s%̎$'nS)KsO/ÒYLW/ZSr/H=vJd{:$g3.xxɮfD' ru=i}3^/ÂS~6:]YF/̑2iŮ[&-|^~@zziʊx뷤!- \ؤetUQ^Yŧm|j){rd7FLh4tuu=ZR;0Uqۤʷ5.?s~u+WWToƦyom֋n>wbd`Ky+U1D+fKƖ; v;K~կ웿6D ;wօ#ME6_$97QSN_4D~GnhhW&XiWxL9SYo% _[q~</o著6F,:gW, MaH'Y/9$ (ATqݛ7o޾}S? `SL?~Ql0Rh4OcƌyÌ?Ct'L p'"_ _ _ _ _ _ _ _ _ _mѶ7n=x[Z[[;;;r@@@B#cԩ:_}?裏7N|mmmcƌyꩧ^:J[&F-KޕM."`-E EEE.4wʍnfwqۣۤu;/O>9a;vXk״L_} ; kFr~e/7# _z9S #o~駩SL&h7 7nв:"021ie ʚ@mPnj ;&E'L,*,h)/U˒cNV&a]lG8?ewX`~cK;bRfv?gӂrE/+ՋQʰzGaRCEFF ZVD&S]"E'On]-Ҁ}a Oܗ7N=hRp|[I—Vg/okSmn񶊙F 8pƍ э֝${͛7o߾)ߧԖ|F|0@^=~~~~~~~~~~aԃEnܸ4znjmm uSj^.333##C[Wnڊ[# ꫟G7nHƌSO9R:#RH쨁ԅNI71j^Rrʺڶm:{!7- |嗢1~cNJPPГO>yJdBE+W6R"]/{٠ZXd7uw[+{lK+oWӭkppOI3CJ>tּ}vt7Z:kJjiVyKpa466N0h4޻wOIjCSUYò(}dl5j92fޝ@KUyݲQzoIM3r$ܝG=z5J[iCw}ł:q}wbٶ ŝgEQR rYW1zlѝ@95xvJʲUj~I7H׋#I&d.+ҥ$ F&&o Q2&+V\{A8Q-\xDzGQЭݥV<;ySٲvIli P6g~nR kc.G(w؄ ga1@{CukC+~ų;Kw(T[Qv@NvTI' 1QDvGUǺvk1ne|w|l@{s4/Zg C#r>x+%Tx.6!iӦ3믿͈ıcdžMGK؄r (a(q<҇6\yO6(7  S2w&SAAU{xb+ݶZol>cGUl^<^y@XjKQ k {@O>} &X=PddȠeTnQ&įF_&Lc'ۿHyp`βԼNޟܚZ)[~{dҾ!eņW M 1RXwKwE/) N>f9cL,,Sg_w%em3e%?ƍAAA;ITqݛ7o޾}S? `SL?~T-lSj^.Sn$;r]im> @ |ɘ Ocmq/׋4R!A*/hiiZ\JTM9K#^zdzKָ{s0# =!mg: ;v(a)z{=Z^v=UwrhkTҢHm]uran|J]p=cKuێ6kFLl-2dzhlBTS- %`8k"RޒGRc%J݇Woo?P^jqU-,SYI6oYm-MYs;@T:J,TeV^]TXjkNy}=ls-ߚފ?[\'E$ĄjBHH$ܠ\0)=_d,~Y$DOsD[C zQ<w-g܆ e⥉JyG2DIed:/1V+>v-!.@l./-m1&I}kjJ $ӝ{l߹9RU; .4+cAEn߱@`Sw<{;8^'#u(RGTT(|}UY͋LjrS]E;o^IȵkC&ݢݩ\6BVzkzboٖZ$)"!Vn]xlB$Uշ"]#fHjhO@\-!}L†0p*wK.^_{/]#7HR].gܬ w@O~wRD/Uj)'伿Kʜ`%k7y~rNg]풢fUw< LjS ,ڭ$$m^ݫZtoRkE]^wA?,"b~frJTyVqR80-" [G&*!I M?3l,,B_'\ƻz#=s裷k(hSutmdbc!}y+N24Vgn(RGgh'%V"%V̋-i BhN:gU۞yJﷵF8Ѣ#"5\%;l_?{Sjd2x5aA>' [#|TyeK\T1ԙ whK+җ?3,1%++s'"ws{&aaN<;B޽s]gΛ=~v˅ 꺅 RؙM>L? (ѫ..V'k?_Z z4T_'ٯ;_cq]nً$J3^f4KQQQ:= p@Hd$5W5妪rqԱrM#8 $ ZgVƨ#ak[}w@Lmr_ż]e.@_-g퓪[jK{.۴R'5ߩ4quV)lCgczpgD_Q\%y'q(>00zyahyyy[-'s@,eFk3?L]ؤgKY5Q&FȤae%}"0L_ (% 1?oV[ r뤰 FJ aRAv}]ݳ4#HٿKg̏pȺ@˃#\!FHmѶ7n|JS^ u$?2<0b;CeJA紗nN~=cԩںj{f_W|0AIwv޽emS=c7oV83\u(Dž /p!g[k(\Up!7T!s V j7UU)DžG]lW P\effj+nA`)# !Kvwes A 8[BɣKZ_L,K!qߒA5 g}pܺsC.lkrC.*7{ĪATֺ ;(pB\=MU}6z/xѦ!uU,`D&延/I-gK/MYFVQs:ӟz ğ^m "Vn ?&h1*! QyUmO`@#xO`7g;&_G ϼy /oBÇ\,x,BQ<mqFPPуܻwᄏ{hԒuz'L0vX-I]]]SNֽ\B[TFYl.,UTVD MÅE↢q* Uv69epWlQ#XmqUvs8RT[*݅\X rp_9ԬD֭[2]K1?(L2(!szQةUlQAp ;U(7T!A]\uE`Ղ kUU)gdHqOSmtra(]PhkkUL<8Ct߿/>mIDp?g/\wE]ժ=Q]gSrCMND9.lBp(7Tm npQ*[*݅\X rp_9ԬkqUCiIj?`VO< B vnhQ OEyWk*TP`gSQ [몣- U۩…2VY垣pUQ.l-nBUWQmoGy?a%>dƍDŽ#?/0xC_+bň&N_~x5Bx.:^ CcpFC`!Pq'+Y[!BKx?õ\ !oOffyOOz6f_^Kla!OfښTfF#w=={|k7@C< ~:yPC;do%GڰM;7x(ȤhȽuuc=6Y[ v{ <Ldb޴GEIoV={3}%%}N:~B[ Pxײ~d5jwkl+<0ڻ([?Ň ,yt0/Ex !pG?*BBfw!_^駟6F|h+j?~|GGF|Ԉm Ѐ {`„ ͛?.%>^ć8Z~F=x@[ƍAAAG2^p޽ݻFQKǏcjIN:u2ڊrGt]| &\X,7\UoT!d ٢l*݅U*]F [\ [;]r6.lEe8tRG*;raJ<(ߨBpUQQ`gU.brCv gkwaJ<(WNՃ*gZ[\zm 7s(.(Fdddh+L&bV.]`gU.UU*7lʅU`QnV᪢| UEyWvUv.V9"=++ͣXrv߮B*Vt*WUv6Y*F(7Ta pUQQઢ ;\*;U` (-.܀^=~~~F1'?!#_z1#_ _ _ _ _ _ _pW4sq+ چA夕Z.[ɪ>uc/ɩ8Pi6ŸeCuqqWe7R5nkY_ݽ7h)O{iGٺ;&y=׺RY{[# - F%-˵;RTw60^..6eY" Mj=Zx'&nɮ2bS:)?>oe$5-P~8nyZEd-MyH[5^<8MF|kV-Myg]yz7|?f)b~yJUE u6OE:u7|' Lg!6j@}bDUtԃr+6z2'!xzbf)p~'bO;)"UDdI֚ ^yvѮy .KY)GXhrku@# >MvvX~zJ=+UZWZ/1IfaĕnEKTΎ'5j̋-Ϝ/j@%k83{HQ74Vgn(jRlk?'N~O>L,/,i}AwZըQ%'@}fi_ `I省 8S!ܳkt¦|y9+җ?3,1%++E^0VrkVnZ^tOi;yj r/ 07!(IL[.dVݳTLDyH{T2^*D 5ťr-Ǖ6 >l2Z?߉#cC9fi*)͎lj,OlqhݚNHI7ii&H!1}o뜋dMܕ3Un=Vc6Iu@z29P()J򆗲߲o=Z{nVI(1txk ^KRMKΔ9/)BjmmbiA*QV\=n&Ίtń˥"T)b2J"F]zn m@ؼڿ飷% R@Ԣ@AhbV^r(@71je֮rMRvE&Ʀ5lAZ!06)%F.(`bΩҼy!Y ?LX͌O&ȵuE8HJ"GXDFKo# 7n =MsqMe{%7`'dh/O[ݜֻI+[]]]SNֽ\B[QQo+]pv߮B*Vt*WU* U*7\UoT!(ߨBpUQQ0Ģ,.܀^w .V)S^1URT"gxEֆ(_/蘖ByO ܋/B飷[y .}hw|@υ8W܃BAZxB7RYl\7#Vǎw!b#;,p1m"Gqq6L 3Fx&B).Ά]>x'(TqFPPуܻwᄏ{hԒuz'L0vX-I]]]SNֽ\B[QzwV?m U*Ӫ)ڒ^SE\.xQ*W*WU*7\UoT!8H? tslsGbn(!u8LRuF)S . @*WUD%E J UEFrC*WU*7\UoT! (67y$hkkUL<8Ct߿/>mID0?f=줋ZEFrU"Q*7EUD%E Φ XoT!(ߨBpUQQઢ| aEY\#17_-z!- \6 'ВA.%kWU-x`QQઢ\RHіyG WrCQRHі9g+]| UEF*WUC, h|GVCfܸqL8vNm 127܀GEOV4(_q{q ާ!g/Dal}kӃ~sR۔'G[F`dk#ulj`n(~w7fm/ugxt yϼ*^Zb~ݻl٢Hc=vMmŌt]\RHі$^.B5E k"E[vW]RTQV!`QQઢPઢ| UEF*WUC,n!QzPPL9eG}nNU!R%kw)]Rj*D9UUUJ<(ߨBpUQnBpUQQઢ줋ZEF*!eq<^!?.:H?b@#{|^a+0R׈e>2OFx/qd0|':jelLz~3kt=ެǣ;m%  jܹ6r翼鿩۷̷kǽHּJ(!Iu_ :!pg_$L~ J>lik, #:'>N  bp~m zD[RP!zͲO$m8T#| ս 'i D܎:*M~nQ};߬6|k .7s6~ZWU#/ʓxlT ޮ R&ccӤ'~=G=k/_<*=>[v~8ဧqU}D:^Bfn֜5M'Io&%} ƺB \uB^?j/z+a/DŽLUJ/Ud7Os zΕs|+n]NpD>wfC"> x5O~/,?(\./X~pnTЈw|]o'rƍAAAG2^p޽ݻFQKǏcjIN:u>bʓG}Aa:H mEMY.8oW!(T!R%k.B冢Dmət6]| UE UEyQ"QtIQU!(B$.)JpUEUC, l!yL5,YB.)JDE9*rTVkz*7e5]$jKU* U*7\UoT!(ߨBpUQQ0Ģ,.܀^=xq :hv=U`,I= E xKϑP(:=\U̅=^'ڒ.ipBOsz$@aD iO080h$rpoAz`Oο_K#Ay-ҖĿ}AQpU1?O0[#6! >nj#[N8=>R~ Avxl*zC7f[ pW!E= 0oi`qBފp ?|~+= T; |w[!z^S:.gs>}p) p ;Y\ !p 7VٺS3Y438Fy'x2Bݼ v!㈏M !\r- `dy]Gk>(^zD0$t1^z .@|w ౸k #^}#> "x,!}!"aG,~<<(GH7n =zPUҢ%gwbw,/yc}iª;&iGFt3ԗ6;qjtLi)x 1uTmefffdd:N4L9j`u r UU*7\UoT!$R%k#.B5EF*%R%u6]| aEeee+7m-V8mW_rڃT1GCa;gϑJ|eV{ ,7\UoT!(ߨBIhK"(ߨBpUQQD9UvO% vBbQmnBBB$eIMe-Myy\\9MF-7Ylx9_Uf;^_kVnڛw㖧Uت̭r&zXb}^Ʀ򜭫+zZϫ,ߚYiqO*{z`Xӿ4gۣh.NNm4(ʉ2/ly^=;\ں|Eb~]!yb%ӅzTW*ΛnoVpS,v~%EN6ݡ%  w]V=;b}NN-i0IN5a߱lIfہi؜}uG3M!o~r54eP~Sw9$QH.)PwJ(ϏA\">=m:#֊&|1vEtˊ OVVV~NjTUtVD$)I)wxo\}m['i3{?dmǕ'}oFs- 6Ka+)ռ2Dj.):.'伿+V%6Y ?UXebSޒ>"J6-n5!k- *O-fC+':Ҝo8 u6O݋zjTiO8{(I4o>#;Mދ >,- EDŇH !QrU*z %۞{*ћnІj+3gOȂccu=嵽c0o+Ԩ14ToLRl<]hsUw V9CLH#r1 g3l!PyX;9D{CKZ7S>ĕj0()0Fu|Xw$0? o)x:+zeݨ1֙R{Evv%de5 9';uYt${eі޹hF Жe2eyrmaХϓL]>%I6QD+OӍ!}l)*IZ4l $LgJVkB Rؙ5jn>42>1m9O_wsshPvA]VaQTS\wAEMa`E`H(Z~p>gq\ZjZ_ZɱC+m{h-:*\Z7)1 LR"F'`dG\ˍ!)0jz:vvT aR]2\It}D=z!fm.=s6sY%ORvV}.۴R'5߹Z޾:x!i#%NJ~.Ŧ$F;V%ԈbL6OM}JL{"%EI݇4'JZZaEp _nHwg@ /eqcW5//y^2> jQ}'8?9keHMM$k1c=b ~=yiV٨/#y)ztVzk zkJe'm)Zh!06)%F!`beɱoiiOhbV^r82ld2;)Ɖ)1!"1fRp:S'(GƍAAAG5^f8\وwuH ]]]SNֽ\B[QX.8oW!(ߨBpUQQઢ| UE UEFrU"Q*7U*7XeЫj OKwB0DuQ+vOw\:6|(f:{^[nC'N=X8o|Bn+bE;B]]]uuuSLQ}bɺk۵UrYNZfE=iqiqqJ* FixwDW?~رcnjO޿2|OPU[\Ui֝r{xouoN.豆=7㏏?O?TUU?x7/_l2Ny - Q FmMKaښTuy\\Ui{0h.N[8.n M=Q~ryϭ9Mj%"CZiE(nհ'L`4\RVVW_׿޽{bSSSՖ抺~B;Q[Z3UDuWr.+yw쬎;YYGkf s7DS-*,W,fג3yf7!w>bU}}7nw}%PQ/c"g$$F+SR/2HRmEiSrB^ ))Jn3?~r1h$D9YWn6c.Mh8Z.ǨvE{CukC+`~ų;KwH =(mCSmEف9QPtڿabaƌj*22RD5ŞИ:~KUyH"v`mkzGQ)|fBX-_4!6_j㏋*ŦP%}15Ue0]Mms"thMS\ƚ򒉩s%̎ iƌ_e_fMRRcǎ w9P]o @tbJ܂Ɩ9/|(R&DrKyAA݌>@m7KlᎩS}|FӧO=ztCCCGGI!C=)2hY I5G @ Oܷ]g2ey["Ez;W-S?mWt\y񪝥)[&7rd67nn$Qݻwo޼yNF{@@ɓL2~xq]]]SNֽ\B[QnS[.8oW!X$aBpUEF*W*WU*7\UoT! b\X 9EY\AN0' ODIWu)O*PzVGWp_ BBBBBBp_8ވxgFikaA'#ޣ|H\ =}Z7~qFPP Sjޠ!}?+LVG-Iݻl٢Hc=vMmŌt]Q-.*pUTEU* U1E j,3eE*]*WU*V1&yNQV܂xaƍZVʫښ(ô5E=51\^^>3!y_Vrl]p0 { $$M[Gl V Il*jjMg)/?'m]`I0<6V2!iӦ3믿͈ıcdžkv1Ұ~cS"\V7\yFI6TCmqvAt)^:!A#'cCz~Gnhh0)ĂX}衇"##E-}1OH,RX,45ok(2:yrkjvKko7X@BZo/7D7^[wݻ7o޼}vg|'> `SL?~Ql0R:::N{L">GY.8 ]Q-vrMFAŪATaDQvqCATE٩gw߅EbkgW*WUZ*;aq> @'e„ O>dLLT'xB$MZ&`x |;B*^/ 0 :`@\ ' A`Up?D@ !8ħ0yx7 z_ p3>v<H!V\xSp!zDBBx.[tA p 6 //V J>[>#pB#T._%~+͏c9MFm{Wk 99}f ~#0 txm}HW ۣUl>jV=,fs 5r[`~C+||$ked:[}{o-[V"jK_^.Җ)^]jUKSUnl**ݿeFkw(8['WNvּ{}4;C-QԜQTm]St=wn=Tl+{6˪%qh>M m!{³/9kt+Gh[yWV}YQaIy;RUzBq$͓7{NR*tB{pBb]W$%Tf)l KΚYOqI2ʽx+tojt)=+vc0ll9=C6Mw$m^K͗vmɁWr$]2ַJRLҺ"sp|Ju7 Zyr[[04T%'V_$:Ur9gΟ%X* ?c֏lA 7}/+p\|{fڸ/G Жdb\\ \6L ?P)0$Rl).o)/>d7|⸸f_g׭w)V'k?_Z I.b븺 sQ7n=xAm~ܶ(owm%&ET;å/ii]+CKhH{iOYZp>󹌳rBH[&v1^ynq.x؆om[MYԌ:ʢVea#5:::N{̌ mE3m.3WN vƹevg n W*$7TB#xBMQ,q妭[)ߧrCc'$ĨYiVgke'vҽ{`lRJ\hQiGR)ȅzkw)ealX7֌jhKU-XgY:p7@V8gws'VΦN݅U In…F8. vsbw7s(t7 <.!c9~ /ީDQ'GB3\9ossЇ8l@E6F_B8)!!Bn+bE C@WWW]]]{{)Sbmmm_~ؤe<-.V[1Sg5}8m0x֜&irqihH[]ܬ0D]'|r„ cƌ;vXk״LΉ^Y=Z[q O$o>?I2|=7OSN5L/_~w<׿h4>ƍZV'? ii˷J{s.Wڧ_y@Is//H<@>xuageRXizf e;WcVgWIGu0/YΠrRHkC/eee_}H( Īu u|RYNTq^Il)VzDkTJeo8X-8]p;}v2uvlT*Yw](;k)۹|ɼ;2L 9G$dV3Wq;1f`pKlWݻw_W)z>n8AMIr746>Aן-&KSR/2HRO}Y7_ޠk*;G{٪ȴ)9!\dч'h;?[ܰv:eG}xbNPuTaͲdJRd9w` ": խ ȣ<.5߹n44V8Uj_9=nJVgTLLV \RHHZYdGH= ǰ~0c 5E)~M"bޗGpI2xyjS̄}YɱZ恘Oџn.}ea_W˗/=\,U(6NApXgjnƚ򒉩s%̎$m`Kت48,B3A- o֬P2P#I8cCӦM3f_-E͚5IIIj_$;6<ߍNL8[pYLr/t)zCmqvAt)J脔Eer-R 6/R3HTNe1r &QiúXe4!^?}ѣG744tttbA>C"[o";BSG'On]-Ҁ}a Jwj著6Fm&8!kܗ~_)4%2S/&̻Y倃J2VgO+33;rٍ7D7^[wݻ7o޼}vg|= `SL?~Ql0R:::N{L$- N T4'VnhϿC<}we۰y0a“O>b'b Ͼ(0LPzVe=nA/CGF !!!//0}|!$6 ܺu?Tm/ho7hKx&wۧL mkk/&-uiqښSdԶ:6?..V[q=+V7˙X?1~cǎ3&(('ؤem~u쓷6Iśϻ{ڊ7ŤɆ=O?=d29uoѲ:"xuag,M9[jUij~EgMiڪqq͇(c ?:@Mi/O[]%V^zmd8̗wʓG)kR#0ax޽?Ǐ/2hYvvݑʓźdZy2/z=}S-I|RYN\9*ғNe\XoSpBb 9 +Ui1CMEkBlP??xࣧ*A8g#rƍѣ/(//1cرcݻwՄmݒ|~S|cSE‰ݐMʫ>SP_ⶖ]'fզ߽RdO ϕǿv }E6ihIF/ݤHy ;^cԩںj{UvD(>XsԨQڒ$޽{˖- y_/ڊ[ {ɓկt:ޗd{gU6v% aG`V&^lZ~ j;@;@wkڻD 3N 3gح.ˏ%݀S%&&*sH As>y;yysa2n}rIx>osӭ>|JzDXXX#^CoWoZt*9VY1g?~K?86Ӈ%.o(Xbi,%֠AP0k?F{/*%T =<,8iDہ|;xՊqm!;zu뾦}Ͽ2fRߐ]ɿ#n9*%4d%*yÈx#%22oZH$Z^}5qm3:jXV"Q#M 0+?6 p O_SpӈZK*N>xG999ԧ~GNW_}a=#-Xpa)M:~T~UVޔ_~hĠAǏS@y{UW>|nO GT~,Yw?~=K:)U߹s}"G?V| o<|Xԋdr ~ꙁaq SN&% xw^?Ǐ322/ :k<o/ хcLK? (@>7|󗓂V^Dg^~?RӃ>VRp_fZY0ʴe"XȺ~z\ D "ĂbA X@, hoV:eG,DZZZ2 ,'Nx/Ee(A}_,`SP!ڍ /.ǯ>&ŝw|둶h~?/cw`3kݡԡiSST:ϖgܳ|GGoOW{p;y~hEM[ٌ֞tZ8yIEbvxhOiz|߮Khdl[YB죳_}ӯnN̛>6l>0#ߧJo=}rѸpdDf%aWsޅcD_%:GnT5@{W]] ֌;v驭w#qw|lᰳz7oFrudz/Fx}^aዦ u%7IdZR2yssZmZyw{jxӫקe+Yw,{G$#=OO2y`lo.#2k3n_GH Ol>kۿnHmU&'k'?U':f7_%YTjC&zhDk(=_|~`͟ϛ7/¤E;;t7Djyp_7pSjc+rO͉W|3?/]HSc f,oyMj:9#//GzNpxݯ[^Bizn笣M~5nH 9qxGb}g1P1G|&T1&7^:G-e8ʽϧtvj:~V:.㆛|mݷ0䎲_ǿ=V?{ʞypAVہ|;xՊq$H$^ش/5 @B۹[o}|$8~nAAAj9x㍩/7;t,*~֍:]?jt}֍7;9bU7dmn!}Ȉ;/(ܰm˧?g}[__XSn5,+xiF|m[_uo&]h;T?t:GKdd\wy0xB*6mرcSkR:*\ݼzӡ5ֽ}gy0b\g[[QSJJikee4b+*_Jۚ^*OOa׃Q,-Nl|p3zhmo^܏_y՛ny=OʁZ[xrSφ|mƸ+7] t휻]Ԉ7oJ|ۑSϚ1.5??^X?Ç1`).R֔}eoߌ?TYIt;)kJ銛_Yw޽ՉaW=r ?}徛fNMCJuoUn޷r֦ZH[،7½jEY {ڟTc*w޽U7>tqDsh'!*o*5k \<hoow?è322;@_}\Dā^=ĂbA X@, D "ĂbA X@, D "ĂbA X@, D "ĂbA X@, D "׺PAY][ T$,Wԧ݋*u`ҬYQk}}M,zv"@;cóDwF+R{KCóϛ\TQCp4{Ы`}QTE׸f"QXTHOG>wpچ?gfDb }(}h^IUiA-c^E}{g,pIfffFX[]͚D"b~i55=MpiN oLOZβbaQie]ǍWΚTl(~ Y(|y_Z7~ yN:kwuZ Š'OFJnmmӟ$$?F?>ao*&e~uɳY}E _-5=N^;1zOTnD:^t]e=dbusG߶{w˰ܡ-m3kMm{t{"h;pxP=jDhm}wwTkilhI,j߰ӾިnW+~ Bҍӆ6\[fȽn9sYɂ-GTSΩ.gey3d>opYNj5oCZOhڢv~ζ~NT͢s:VI5T _g"=KGN?!#Ѿ|[8`YG]$"= |dijvF\K9Κ rzq%7- ;Ң̬~ٓ*j׭9aXF?%ﺭ`RKg_ۍNz+:&:='֖ϐ^SpZI5;ys&JKѼK|DuҪXhBPRNp2$%GCvN^^:m $QfN8U@]Y*+4kVe}AvNNB5u7Y0l]; l"~z@b3nHO,>o'hn]gvśkKn),x{x^ڝg ]h]ߣmn!gVн©!kX[sfdж+yEZaީJY K nJRHLt'K7!0rD +;]n_`񦜉S], w ?9,*lA++ E8!jhp*H4m(Yܩ`7Ç/e9g/kj?rn@K= 1qZxYL=jNnZNj'C :isvz3%lYrW8: 5'rsEzY4|]»l96z~SO[>+PZmSTV>uXIgOϻ,h} ok`f^Ј˔oIL(\Eӿ,&HS~{89~nfkQfX䧏)VYGZmC&̯ڼ~QAGaΜ0,.xa޽7;ivh\鮀38vKWj@zvbhĂbA -ę4׭_NG2{Z嶥{*e'lXV,*Lzhtebnʒ`:sAΕ 9<-+vw>)st忩*+^PeݝSlj dO[Z5r]RXTfWc C'55?,nynyioݚS!쟷57MY-qn״fD7`БNm-[-%QkZJJS,]PZ:-IH0X6Ԟo-9u~ޖG«mmsf'CLS0qKEEc~nxaN^AMEEM~};Sa%OM~`T e\kLQiex]eEELv%CmueE 8ZqtS7 d8#p.HLOoH Z.hHK]z^͚uEeu;;'@Nܼ8w'%/TuINfO*_?rZXtg^eUɘ9:?=?79=Y(,8"so?8懏O`me~3V|o\O>¤p~9EճIޮ9fӇ[S'|SOVmVjI~ۑWY~jj's{8_=ieP`RɆ&{ZyuaQp_e=O̠'OF\ASTT>yyyQtW_;F@, D "ĂbA 2]s5~Tڂ 38ϿvDWcǎ{Qy v) Qb 8{kgɓ'E,oFssp5g/~Qa#Xp#ĂbA X@, D "ĂbA X@, D "ĂbA X@, D "ĂbA X@, D "ĂbA X@, D "ĂbA X@, D "ĂbA X@, D "ĂbA X@, D "ĂbA X@, D "ĂbA X@, D "ĂbA X@, D "ĂbA X@, D "ĂbA X@, D "ĂbA X@, D "ĂbA X@, D "ĂbA X@, D "Dq/"r]NIENDB`merTools/man/figures/README-substPanel.png0000644000176200001440000015360313402510754020067 0ustar liggesusersPNG  IHDRAsRGBgAMA a pHYsodIDATx^ x}y콻}6Lf2sg&Y$M܊mILBev_ 6l(TBd iH;Hl퀑L(`S$)Kz! $!I4$I pUB(v>~=PUuu~npؿcyJ#%P' f+lZپzd3K>AX8"g0.[Eca1uW~mqZ/D+"N8zD㸩/ܾ~_4['t#$ ˭95\nuߑ`FɡkW6dw Yۜf&G6+_;ޚ#w\iM*ܩzbyez9UcȣvS~+e̦6_wj#ʥ#}Wݴz}N7-Wzp<+xVGA̕V])A<>}z&3/bǸ3jQ0amڟ`#|n/(ѝ93Qgnj~dUב=Lܳr\SۦC6_S,m 3 }šڂD`{W\ٴjCtxmڪ;;.^;/4Jifdp{[dyA+79T,gS+E1u5kf+%VKL*` 4Rz&7\0ė?kt,uA׆;<(|ƍmSD(xUk֬,?bMkw W1ϵa ڟw\rߙ#CMMrБ^ ^G,bY0 ގa)C*a&̔=?HKB^543I>km~h#gV2Gv2[YZ2`jz> K{G2 ^3#k^/EH_(+1o̙+"bk+<\%qg|ٿAV+IU8>6d!>~H7ml\e+یo0ϾWQRkMMC>J_1c˶~;rQ7V0*Ç.-<V쬕콶J.35!>Xl6fZ]/{%[cc\N95=#ǧma/y :|ӵET>zoc[S~yTJhTXi+_IZqeFկ(nDRG{]"OJu!{M~c\ᙗ$u ^[719oVnU t_2" $IE̿ޮk*Y649s 4vSQ懙ɡ•?Cc:_``qz ʟyb|lu;TZymTVzZT?ʻguXT|TzpQ˞utQDvȢOJ1Rk_JuE?XZV3Ц+*_Aj~G2=[n G!s~r+kvy)Ep@z\\\\\\*dsG?!deP Μ>}Z,W_Ur:55u2I^D hqoC=B2_3Y$bjW^SX{ 2<DQ8KD  ^ Qdx4dx"ãHq @5@:@i%"EG N`/A(s7lL,.Vydx"U _租Xs _QqQ΋ߥAKӗz~0Wnu,\h[oxpն?}uǗ۬gWff_Vxp>lǯ#nx0ˮNj0WV+{)*]c_37>Zqie4ud{U2gOOZm.W?.ok?`ӶG}I=G=ek'Wf?Z=aKogE)JL.}?}ݗn^u]xĞχW؆'"S{R5ʼnp#}? f[/>g{ҽ2Z!"U+՚=E-uP/T+I׬Y˃"ۃ>{d덏Vxʪv?96Ktz[6\}zyCe\w1Y(}.ϻ}[efj_\cߠ> 3Jg$k=7plYk=D KJKFATudGnfjfk[̚;_1-峑Ty`NJw==۟X!g<a6nT,i奧z S^pWL =.*JDy^`5=Z{KxzȾ_wr}FԄ,Գ .P_be7z[ @Ky5s~=&pJ$VWڏ++O]|(!Tw郵e}K߇_7(2<I3ޮ\щzY(.ѕ*2d;?Wⳍt{  |GؔU|=9kZ\7oJ+TXrM-݃SBR7]+k7-Ͳ!Y^;!zòZS=b WZ񽅖 ÍީGXG3t{ ߇D*I=wԊA\n>|fjog.Y83ܽ"L&ΟF7 ?ے;ufؙ(N[nL|fT^Mm uoYHJ•6ѿv! :Ŏ\Q(w Q8KD7&[rvKIUiUp*Ο\ȶv 7-9cŮ"yw4lT/'(lDosK _i%"fSgƃK dx@%ք]a}{5WÇvق+ݴ6ˣ;J(#| fk@Q8KD$?5ح[_.%3|S`֘\v^svSKW5t򎅾oݴ6G2/o><Ʀvk/ iYFeGw}/iYoᇻ[ B{h0KD{O?}w^s5۶m~Xf;M-݃E`>Y ZTo [K[{Zؼyg䱦6-]dx ?%;w<|p[뮓iYoU;ǬޮO,NZ+6Gޮk BKY#5ZZ1O S gϞtww_s5wqL}m{XOO;TPU >XyIe?JBxG#^EW F+"DgݹsDÇOѷ٦v]>|Dx{uxDSe27PX2p{7k2|uxGe'N ٳGһI}[bWKSp{y{ }xW;dy^m#X#t~}pss>״vs!Dϛ5>XGeӧOKh?vѣG^  XS 2<2<D(2<D(2<D~dS5z#/ơX309ԳXn#M׮R_MŃ&[\ N_4O[m&j{ >׿9,4Rů 2<D sFMWaǚٿQ:紑Mܚp @ Zj=!=ߴ&{k{mL߸:)GfT"7Y6_YzK9bU~GYp&~u'pܢtx_{aPOϙϿ'JjuM>/>Y_h|љ6^aǒWQ8 僅.oo~ ˃ Ṁ|e^tPR /woUu^?b2G[[vMȾH=b`qLsM|!&e;w.'Km0~Kb;vRyy3>Ko=/{^/fhG%u o%^e_2s|oê|nup,Q߳gρt|?|Xeͦҳd+{GMV5ѕȬ(-n-+v1K>}([xed)?K^FV.--GV"D z>p&Qgwܩb鑑}MsF }rkҝȬ0[n|\7>}([=cU~N߸2Zrҫů|sh۾oQ#=ku ez\[C-5 d{)Q?q={$OԷ%+ _Q)/<\{7}//}~Uwo[4}k7u >,7J\SK} ?%=O?}S~a},}sgۛ.00\fz[ޮ̎wm~WۙW|ŀz~Pzhk^rdx)Q߹srDf+][/F7w67/T\F4genoj*mBe={8p@ÇeZls|j=L wMV<VYV^(/}x:ZQm20?DgݹsSGFFm ^ov~[Ǣq\P|>Kf.}k^>ނ(Ge'N ٳGһI}\S i ǎ;z܋1D~"@~"@~"@~"@~"@~"@@nl!=Cڡ--[c.y,Yz"1+_`mGEPsfGLYDbղJdx5(ß:uW_}x)J zƱѾ\.7:,,7jmm-mwr />|p)KVQxwGL֒*z50u\"ʷ\bYya=yu S]Uy/4 `YyĊG4s!lz`2ѣG_|qrrR~ʴ,ѷEL twu$O;}jloWsGq "ԜNA 'W}:3V_DU>Yl^{BGD_J׏Vi z[6+msnD&uKf#%}77kU^q #cF^~6RX@ C=G.?eַ_bk@.uI415۱<7wd`amggSSaprz~yG\ͅ{H^{c!~Ւ5nZV@͕rԄvfa6KJ4͗VwyP޷4Yu"X-pw71o sK̺Gc'F,4, Q߳gρt|?|Xe6ے3/Zp`A&ާF7w67/$*GUu;O vVZ|@@͕ҞI} d6vw@5f;EKVm/uJZyaZ#wc,h¹y5@yOuԖ(?;wT1^~Ȉ2}e$ޱ3gF{ sWԵRn-+v G-}x:Z1|S`!μD:%JFbbV$'N޳gw)ӓ9%qL t]|^B|./\Rn'KƂZ×Rҷ O->Ç*-*IO>-رcG{U ik>1PhտR ڢP4j/|Pk(ך|cf!VԹ]P{h<Ԙi*\Rj s 5FTx1xU @PtjCPc@(2|ٵ]ZK >unAU֕꯷U/Eƫo1W2<DǮ[o]S*s Z֭A%rJTTxU ^xi* P$sjqԹ]W.JUK[oݵnXh>x'.i* PUQt],}4^,}Tx F[ xU @PtjCPc@(o3GU~sx<]II Qd\[8:!)k;*XQU58>rWR~#@椧|BU1+I[8:!)k;*XQU58>rWi* uGU1+I☲2xUeQq*w%%~jx{kS.o,`!Lķ]&{Ulޖ}`!/sQ.pyw+v6Xtvb!UQ qLYQl״?To<(k>bcypY{pJ]/SYBKpyMkǂ%s H}F]{ix٣ȯ( N VeUy9R`wKWhmVKTɋCaguGU1>puCSvTL9j<q|V室$𒫃χ/"zKY0GU~sx<]II-un};z)K/e}; fe57[߇˾_-ȃU4s߇5;=H$d wWR/O/1*O/gC[.l!3L鐼 P܂Cx=u4j4^R,%`Wۓ[E .}u|~u?m~w8uh˖j$k"]DObrW7G]'81emGe 9aUJJo߇3_~U M_hoS /*&Npcڎ`1sT7G!êܕ2 Z_=/]zpnQwGU~sx<]II-Qs_] <&/V_m?뀼9)PULJRNpcڎ`1sT:w!êܕd 2АxO^R_wTT9:!)k;*XQU58>rWR~# 2|EǗs G]'81emGe 9aUJJodxYAw2xn8 6c!>GU~sx<]II  +._-uǔc,ķlY4Ǘ _eQ qLYQlr~t/3gO?=/͛+ m9dϷVsus G]'81emGe 9aUJJo2#GNأÛos"2N[^_f"->k-:S]GLUgs߽{-[+B~.|BU1+I☲2xUeQq*w%%72<6Ûs{%[]-uǔc,*oCU+)I(KV,u88;Z/< oNpcsGx)b,*oCU+)I*ϞRf92|E[U6z>UuaIh9߄+oCLU1 Im+]< [8:!EE{Ѿ2T nB~# 2|EaUJR8 6QhIѾ26*wc!Id"*w%~suCSvTL9j<q|V室$F : oNz'TԹ☲2xU]q*w%ƫ@Vp"ysS>ܕQ qLYQlrWR~KGFF^u}7XK~Z *&Npcԟl$UeQejg&aUJJo2ɓ'x'/Vo~Z SmNxqH)z4^Z!NQU5^vfBV室$(@C"םUmN][8:A[)4^vpT7GݎJodx"1>puƋ)k;*XQU58&yU$Fgm[e[\fφ^HPtjC| g;z*LU Qugwo mٲ[͗$y_)PULJR8 6c!>GU~sx<]II Qw6 /*&Npcڎ`1sT7G!êܕU o>K{K[|[$snQpuƋE(k8 GU~sx<]II Qd >ๅ4^L(q6oWd'*oϝX+w%%72<Ds\[8:A玺DOCL9j<w த$F(2|QUL-uǔ-1e*oϝX+w%%72<Dy: oNz'TԹ☲2xU]q*w%ƫ_wTT9:!)k;*XQU58>rWR~KᇶGt 椸cN{6[%)[-P" <񅘂SxO?)åz/8;s}uX6C;☲2xUeQq*w%%72%e}d'tE>tkl7Le.*LᒽS:d»x|86D2v۞ >:1pmN:=ztyd"Fk䙓c rGXp5XpߵAeْȽ?Vxn[wę=RMf0!d=֢OP?6}aDBd7QL6vM'eqxDXǨ̖=J& P/=_~s<\'rq%femǍʝ0_].C@?J(`GYh DV38p{.^2M۞<:qlۭ+N#X^{_\|j6)Xͻ;ѧPڪB/:Fe6r]{vgg۶m%޻Pjb\C.켛0;-Yjե C YnnңDWƋu"&Npcڎ`1sT7M۶lK\~$W7ύ#wMfz.KzP0b`G)m3:"ٛE8nSm\xS@^ unAסh<Ԟ:w'z ƫB cś|ޚBtH( ,&)3' qЖ-eAd/exC^ZP4j/|PkP /~]ˠjs~Y Q /2K2=PuxJ3,0h0OͯǢ?w ]P{,ZSW2<DyᇀZQtjCPc@(2<-:S]GLU Qdx@[u1CƫB ҿ|xK>srpHv_x 7[W pzs ֽ+]5h{׺]zR]GUƫi*$NJ7v<31pﻟ| ۆ9\k>v۞ YG'"? C[.:.ޮu+]W/ i>TTxU WNi*$RsL YI/m k=Ñ[dymԹ]wv{Ww[GMW%u4^un]|U0WDh^?y$?M۞<:ql; Uxl:|ҭ2|PAԹ]W>Y ShhW5xUH+'puCSvTL9j<q|V室$F(2|QUL-uǔc,*oCU+)I _wTs G]'81emGe 9aUJJodxrw-[-ܢ2xn8 6ѿ▐}eQem'ߒfѾ\._UK2X-EL vX :|CmX8 [8:!)k;*aI=ʿh_~sxYIdj&d~ؑoI tS|D]*qH1ЖyNJyT@^ unAסhAs[KW%-E)S.e &_M ohm KrqKU^,Hx*<uaUJR8 6c!>GU~sxGU~sx<]II-Y^}CʵaeiyO v7烯w.կS=VovU|exIm9 [8:!)k;*,K[B26*w%%鷤>\!g~w['ܢ2xn85PT~sxYIxXfx}f+TT9:!)k;*n'36*w%%72<xYOD^: oNz'TGkO:puCSvTT׹k<q|V$xU @^OJޜO*& z*muǔc,*oCU+)I Y2[]-uG)b,*oCזQI Q2|ٿ,QUL-uaIͩvL9j<qLųnG%D^<222{禧RexqQ uaU-urwc,*oCo^QI-Qs'OjddDx-e/9;r}n8 6c!>GU~sx<]II-QK~Zm9mWFU1>puCSv[|Id*oCU+)Id^_7tϹEaSp}nSv ^ZCzUeQq*w%%72<#/Yz+Ko W k^aXL7:unAס\7^v0S]+j4^— _ݻmb_y?\W/BEOJ7(oNx1emGe 9sgz&25B@Vxgw=L[PUL-u$/z'4^ 9pg=L$F>_#;br}n8}@ғQD2ܙL$FdC.ԹUQ qLY;`1sT7G!êܕߜes=#3z?g:Gvx8 [8:!)k;*XQU58>rWR~s'׬/'p/~]b8 [8:!)k;*XQU58>rWR~sv~zg\&"HdebӋ~6^q4^ 9aUJJoo͚"!ֿҟu@ޜ7YL{}$un넇;Sd'A㥈T׹k<q|V$xUpoZݔ^OJޜOd)xܕQ ?/S8 /EUeQq*w%%7~dqnQwpuÝggY l$UeE~?OFh~# WĻx|puCSvTL9j<q|V室$F~dm? -j:.-.Npcڎ`1sT7G!êܕ§ he*ī%RYsๅ☲2xUeQq*w%%72<#(uI~`Ж-`Idz s G],CPc(ÏK{K[ռD_BJOs\[8:!)k;*XQU58>rWR~K!s$K ܢ2xn<)kcuqcTU58>rWR~#@TĹEeQ8bXpxCS28yXe0W>|[]-u0L$.ǔL9j<q|V室$F(ŹEeQ qLYQlpuCSvTL9j<q|V室$F(2|QUL-uǔc,*oCU+)I _wTs G]'81emGe 9aUJJodx"UQ qLYQlK%z6d>]{\`!~eOBp>}p7hn͝gގyXz+ZZ󹦵[m nP:|,sW@u x5xUH%AxLLwd?QȵlŠ|׀,[g\gqT+e~%D3!+Xy𙩽j{+f!VԹ]P{h<Ԙi*xt[rux ^[.|,,o Ke]ZVvG s 5FTx1xUH}+ ':V!du0P˭+ϜޢW ry:(>ے?o=T s 5FTx1xUHߥ_rMW-Krbg?5~t]o ~PR xU g _5unA]4uF>ZJ]<JdTxW4^EԹ]P{h<Ԙi* ]P{,ZSW2<DyT?|BU1+I☲2xUeQq*w%%D^<22? {禧RuGU1>puCSvTL9j<q|V室$(9ɓ'cb522_B;☲2xUeQq*w%%D~hhH'/Yk)d\[8:!)k;*XQU58>rWR~#Ȋg7'=]IQ qLYQlGU~sx<]II  +._-uǔc,*oCU+)Id"ๅ☲2xUeQq*w%%7~cdM#zIɃGd ֦'(Cw2xn8 6c!>GU~sx<]II]sPPٴ?==[f)+]< [8:!)k;*XQU58>rWR~smFwG6]mU>_udFDp̑7 Ȧk7lhj4#=#A&Zۦ6lؗ~^}IA2|EǗs G]'81emGe 9aUJJo2Ȧ*I~Ӑ,:cUtxߚpA?Y\Ov,n_-q#fk46c+]< [8:!)k;*XQU58>rWR~s>Aoex&uKz[nRy`#и$9re[e4L Pjdu1Cƫk[(칶?Ygh篦  hK3pЖ-|IEȻ U$oNpcڎ`1sT7G!êܕ\~^%2m ܱ|.|}>|`~j\*kjqpZ8|@fmB{ dxAU1>puCSvTL9j<q|V室$,ÛKQ]z{>$zwuùEyXtJ7G]'81emGe 9aUJJo2|ww_u[UzBz>Uuǔc,*oCU+)I2|EaUP8 6c!>GU~sx<]II  +._-uǔc,*oCU+)Id"ๅ☲2xUeQq*w%%72ܕ-uǔc,*uCU+4^ɛ U$oNpcڎ`1sT7G!êܕe'O/^K!UQ qLYQlrWR~#@Tn3Ж L^HPtjCP驴ƫjw}h-mP%y_)PULJR8 6c!>GU~sx|J]Dz>UI Q5h} uaUJR8&?wD驴e 9jL[{L}$oNp%T|2xt9hNx3Eqg38ej&72<D/2xn넟C"B|28yXuGU1>puCSvTL9j<q|V室$F(2|QUL-uǔc,*oCU+)I ysS>ܕ-uǔc,*uC߲e7(xU @뎪brW7G]'81emGe 9$^~p[ ?XYL v7e41]`wlj><[Z UQ qL*4^ 9+ߒgBYm߂dҿ뎪br}n8T@{Kcî7G!ܕ}[Dapjcy>o,Yڈͱ|gqB-TeI6?`6R Ie*gD- [8:!)k#4g)z7G!T[CIॉޖ\ީ3\Gqb=f}lWn,,}pk'Kfp5HCv2>MU$2xnVpcڎ`y=|_7GQDFh~KfIi"I~`lA oow H<&G&ΌZ 񭽣<$u"&Npcڎ`1sT7G!êܕߜgޖ|^s<7vfjbE𙱾mJυw6}6QU -~ٖC-ٳus G]'81emGe 9aUJJo)~>&[/ď;?X|L^fڅwO}"<4:6wo mٲ; }@^ unAסh<Ԟ꺬5W)ԉi*$ЈjwmB{$I^!"z'TT9:!Z=卬 6^q2<ܙ᥁FFh~#@T3,-mA|Wms AU1>puÝÀCB6OO$SgQ3iܕUqd܂br}n넟Óڬ5^KF(NêܕE;☲2xx36*w%%72<D뎪br}n8 6c!>GU~sx<]II Qd\[8:!)k;*XQU58>rWR~KeWW߽y0.e_l_}"g7'=]IQ qLYQlܕQ qLYQlO R_W] *&Npcڎ`1sT7Gǟ-/S#4I_|p5>Zp)d{&_=_ȍwx͟W'2|QUL-u6׍!)SU~sxޠbrWR~K#?Ĉg6?q2K?gzB/z"h,(g J$/w2xn'1em,8mF!=$UeQq*w%%2ccE%^؏;6x%r^ux@#[?g ~Ы. _wTs G]'81emGe 9aUJJo|>B#\ ТKqݦ>ٶ۞/ͅ7^HPtjCPc(˚;^0' .e_l_kЖ-eAdoސzo|CU19-] qLQYkK%z6]S>QqpcbGCUU2%;vO<iYrNLLLOO;ȶ_bi i w~wb#dsIj<A?ßK%Yݹ+gyf/̝>}gc|7^~_P%%ߴ?M/>f˽x\'߿;X}l ?%mwDO?JN>}{wO>|ɓΡ۬{2~.L"]ב"_yY|lwQ$g.$%ЖoEtLCe$K2v%Q?wSeɷm ꟏ݡ%ؙ MemDmi,&3/{~w{O>K7@K>kpIÇ]g0sM*DNz_z饑ãxD"2|֐0w^M]P& 驩'NIt`wdjAb'R,dx a'R`qdx) ?=uC'p zi=7&OSzS'Ot{R!ؙ'.tzG>|.1?x]wDI{왣:xk޽o\{ 84#O <dX ?a? |t葽CGLJ|>?~i=zG::Ծ۟dx)ҟxfow?ȃ>_ <<_s#/ً Q=7}wK>y gKٷ_^~ou43S#==^_=w$GP <Vjh|#āb׎|ϟ{.#yՉ"xH_{O- ?Yzhdx Qdx Qdx Qdx Qdx Qdx Qdx Qdx Qdx Qdx Qdx Qdx Qdx Qdx Qdx Qdx Qdx) ?m]zfGmCz C ţg<׆u)7JROǞ?8\ ?)Wɦ55U9ҿ|C3zXADzkrgSqsH[0Ɛ8ß|o%B|:^ f.T%f̍_̯. ,N[m7g%ԾBiǪ{w.ix8Z'OC:o*lXn݆Μymtm7Eܡm7mupQnÝOṷ0F$ rBGop>w|DtydxSжo 凋5_k G+Thfi?ݎ. wn-~. m"Z>;cޚ=ӚM+ \y}W QB7o_zcY>xMF֯nV;2=9>lj~upwWaLX]fSѺ h 2D\˰ZW wWf[ Ń]Yb_Pwezͽ͝js7&ڎf' _r=Yױ}uߑ`2 [GTJ=޿&_w0>Y_{UӼcY[Q6^aDz[J!rMM֫~oM.>ϯR}>֕5; G- 1$wvЛߥ𩲪LN;WW:Z7tS4GG)mM{^WzY`,u-J%6fi&"+_xu=n`CX}O@LrnHmatooHߪҝw%}ǹE+tQBe=*\r;40}ddطug-_@cH7XQ -8jz^ m\"5kuV/%}=_XMH,XsR/{M̌l]5l[iGБf_@c3u_^h9T |+Kߋ__9˿喌CM ="sE?!4tGx-^|aҾ.4Ç ^j53'̃o͗nGqےվ"n[-J0PڼJ-px42<D0Z Г5s)UdܿqUz;ۖEww%m=H9B>_3o,jS[rkۗ}J`íz0ڙ`6Ժ}xʬ_ &/S O}^Rߞojjw'$d:%O;s@kߣB7mT_n=f} ~ru^Zn?X<8w[XhIi"I֔[Ae+--++Gʹu&@a y8_'W>Hߏkϵ_o|]}>}6LDvfAI7}x!O2<4"2<D(2<D(2<D(2<D(2<D(2<(˝GFF{=~Z ~Jɓn1y/^K!O2АxO^R oR[{ _c,pǕǼٔq1;v"g/>} dxYAKD\YC?]K?nplo6CT(Qy2< wn}!p^\j6Mf烹/}HZZۯ|XrHҏ̪Fwj[ϊfޣ4o;͖=y[+͇4oEV.=Dx4 dxYAp{ࣿ<8?9n}dnIS񯿿gIrՂkmד"KX`tY>Vʸp\Hj"vU?;3]m&έlO@ xqٟfw_=k;~n2;vo/ $#7w6pm2wheϾ?o]QuχV. ?n}g^sZ}{ˊձmGGNd5{e4f%e-߁r8~( ?iM^#z,2</2~'O>=?׶ɓ?B{斟/>['nʷ=8;К CrɻZ}37?=6-w`/wvx_sj%eI?_>Y~aU`tYZ}ֿ2o/|?_rۉVeo֚Yި(oñ樂Wt52pv~|CRDK7kM,~N}w`p oͦ7n7=Y\Ob̵wUڏm/tE8[&~ Xa}.q_*?ȞzK7zpiywoV>셳}W-w_͇f}879}2+^8=Xaٳvӛޟ 6wO9?5/҇E.դZ{WAݟg/NggcI[K 6Ïlҧ"u$̑mV].˛œkY! n+ w uf[zn6,iu1`7^MR7rO/9{ʺlr͟[!dSzII ?n}`Sg":Wlfܟyv^{'Xyzvt4{;e;&HU+Gv?sm0{mӓmT+_1 hp=xhGTRϳģ ndZcNGW+_b.TXsy>{oڧm6 _.-˽j_ %.A4,?zRZj/$07NKJ/P?筥ssVo}9强ԯg_n|ϗm̬/;+»Do7X`/uڇ }A7n-ܭ6]mI%ت?YO`MM{¿ ɑA7$B[oKj#B ?q7'7%n?rD^fz"@ ߥ5^44ڟ[fw1^·gǿ{Y}bvЭ-? w}g?zosϾr;?ZGd~zs^fxp ;toof4o7ݽv7 ]'/i`Be{ܙ[}?vwVm]ZVOЗ Ϝ Dn ;7}eO};?glٲ?OeC[.gNn[ƂkennyG_@px=[ïd. gg~X2 pΞ [ }g{>o}j/l|K"wߺ;ݲ X[õ-2< mm4s=}›;WWf/.<{y͎~]~j?Y[Z.P~Sw}p;|bcӡ:;`8E\? KxUɳ.̾uz?9ӓz+ٷfӷg/?{c㯝ffOLN~@gg~r,:9=9=x=[ϟ?ZPP HPUHGFFN<{Nx-S ?==-Cདy/^K$K_ GKK|d^͙ |VYʖp^͉`&g=iݪxhb@0 2<4$O2|gg$#B& 3@(O2 ~==tzޒqhCOS9=xPU @?ȋ/>Igώ"z~X/L`@,Pc^~PU @?^t~W[$䳭疯 (m;O=2 dx"cin)|퓜"yO$1|X;?7zGzxD+}S|n=_U2<Dᱴ']zR'I&=whpM{yF/L`@C?o|M/̀ߥ |9xK$1|v{2[xDno?l;>C/̀gO>'-uhف{|^Oفv )3@(2gS}NOU񹮮ćz%$QGh܍_(pcg{#Ea"oON%,C /|>ǥLeU2<D_K訞+ʰQFDꕨFMQ-2L)O[eU2<DTJN82lz%j5h2laoONyR`R/#8"@?T9rDOձcT]7l!R?.$)laK 9x{rʓ2;:dxjoǏ멺do9+OHqhgyR e 3x{rʓ2;:dxj /Suɛ'{_%D~\8J}4qolZG=9IbsT~bp{|._=0.mD{_p?3a_ ݽVi㣽-k' oo:h;:ѿpQ3eؓKKz|0owWtVMtS27 )Y~loW}]NyrNޠG'I&N#oK 3x{r+GoOI%V,T~%(v:A/ۏ>|<}jBMUwBrܒ`#kڦv!^"y4|`t{sxOx >,`oھbws>pwpIq[3p: /OD~T]>|XOՕ'{2"RD$>VO՛'xBʩʓɁ%Q(̀j5i7\ƞfB}*- -wWמ(%q#̗j ?o L ohm/UŠi %myW$o?zz-=UW ʈH5RMTyR'-˿z$詺J֌F}{yRF}L2>L0lٱޖKW\]zFL]( ̿Y!+V7w'A;sh5w7zKp.J~T]T]y7(#"JIRIґ7O*m|T]bk"G=>|XOՕ'eO:Q_^̀B ߥoV԰Iݪ_Q-M}#S{fe{EM n٩>YSH7W.|gpmC~o΃%.g)SuE6ʈHuh_=UoTI Su%LOՕb@[vru *$Y2X.K>SV .k̙ͿK$/ۏ+9Suޠ+QQGl!gzR5Qԯʯ詺 I'7KqD߇_Dt6ÿu__/ȋ')=UWa+Q'I&IGzs2lz%j5h͓2<`F=IQ;:TUo>;v$se<)C^G:-d [eqۓS!!H2U N 'e+QQGlaK 3x{rʓ2;ĖzIQ o ɓk)dx(FWQ&A (Özf'ev-2D^Ή_u}7XK~Z )ʰQFDꕨqԨIB6ʰ^G=9IbK$(QK~Z )ʰQFDꕨqԨIB6ʰ^G=9IbK$  +(I"J8j$h!eR/Ì#ޞ ^FqDb;U2lz%$QGlaK 9x{rʓ2;Ėzf@U  +/o?z(FWQ&A (Özf'ev-2#2<3Tۏ+ʰQFDꕨFMQ-2L)O[eU2<3Tۏ+ʰQFDꕨFMQ-2L)O[eUwWyt8ns"jRz*%2lz%j5h2laoONyR`R/#8rO|~mKޏC[zKp)fxgOR"o?z(FWNu4 ZF0'<)CClaT\e{Tt{pĞ_wW]\P\=UWa+QQGlaK 3x{rʓ2;ĖzIƑ7L&CG@my3Q͓2DꕨqԨIB6ʰ^G=9IbK$lW]/}ɾ.@͑y3Q͓2DꕨqԨIB6ʰ^G=9IbK$Y]u#ߺ%sYK=xuAF6OWQ&A (Özf'ev-2#?xu ]0So7@y3Q͓2DꕨqԨIB6ʰ^G=9IbK$]d<ÿ#?|pN`+QQ#laK 3u@QF;ĖzI@V/9wAϔpex~#R?.j5hTBF-Ì#ޞ ^FqDf DH"/MÿZ~䧞)COՕeVzN<)CHK%SϧA$;'m,<2lZ:(#bK 3@fgϾ~ׇ>tz}]vۏ#e9{!?| ē2CtG'ǪCF-Ìt<)CClddgowߔȋDz.\ o?SѣG Dz3u|EʐzN<)CHK%SϧA$;<E*Sɸ> Ѩeȑ8'ev-2̀@VewȋOs)yzN JS//JS׉'{Ó2T"?|IRɓQ=U?{C4j&r;IbLMMISϧ *$###xL^%Kx-G|{ʕ~hI^+=U?{)C~: |xRzqhX<‡!|(㭷ޒ2䧞OG $(#bK$(Q?y7/^KmWΚ߈/wU)=U?Ru"YOʐJ䧞O:IJw4??-]uɸteR9P78.6c|̀B   A`y#ɋ{Eʇ2LVAH5M􏠅 : 0㈷'<)CP'jǎRiH2Uo_$sC$y~<9'>I"J8jJ䧞~qo}[Ri0(I //KS%"2#2<D#WШ2lz%j5h;LJRuI"bQ[Jz~|h'xBʐz~<9. (L2Ewʇ2~~Mʐz~8(WQ&A +eO=_'q9)C~4qԐʓg!?|]p\Zʐz~RINPۥ LJ7 D;DFM.rZuɮQʐz~\tG&ÁS<2lZG 9^ʐzNy{>>H2OhdG׏ʇ2ORuo|Cʐz~\t:IJw4 OXxR e|(FLϧDtIzNRI{r\ߤ 逪:ooۏތ·2m&eO=_?]QIxDŽ[=_'zoqԐ_!?|7*i28"@?lzկۏ#e詺 9@׉z~\t:IJw4:uJꔟz~<ēN O(!Á> œJ|(逪#Tg'z~ =UW>[oIS׏'EFMTRuB6sUYϧDtRT<'PU2<DT|Hz(I.TtGӻnS~o]ʐzN(#BS)1(%R/:^Fq(Ïrpn`8SSRA{*ݽK?X0O$ƽ"d#uQO$%2lz%$)d>ǓJdKS eo|L)O逪B ;23ܽB"|hos0g'ZUr2<ෆ/GOOPFDꕨqԐIsv2"R 3u@QF;ĖzIQ ѱkT0-t4X;c2;rM-݃Sr֦pWҿIDs{־-V۷Fg7D62Sa+Q(OHkXʰ^G=9IbK$(y+4%^6c-NΜH,IUIDxd#ײ]R@aEk@kKqL 6T([ (/sZrjwI45cޥ):Xo{p {)C~I"J8j!h!eR/ÌFP^FqB۵PZ=\JKL ohm'y\B5AT6LWMm93Хf-R" śҽT~iŪLEwg˖-Ré͓2DꕨqԨCB6ʰ^G:(#bK$( ^9`XoK>|698>1/ekaP{ItDl(n5wËu)YE!)(fbU}* ZuVג/>J;j觠B| .9s8Ir2l!RDFMQ-28)O[e$Gid)zjx{{-w;2*9U=sr,z+Ç*:yT۵)^Ko?>|~yᗺK)=$[ß$8 kȓgJS&6OWQFA (Özf5ꀢv-2D'?g,?}뾯Ģ1$(I"J8j$h!eR/Ì#ޞ ^FqD7C2+{n5rX4}$9E6OWQ&A (Özf'ev-2#2<D97Rtz腼*ySuE6ʈHuԨIB6ʰ^=9IbK 3@(}+OnjzeMo>ВW$o?z(FWQ&A (Özf'ev-2#2<D9OnNep?~D^'S%o?z(FWNu4 ZF0'<)CClaT,o۟3oo:<Nn,Tۏ+ʰQFDꕨFMQ-2L)O[eU2<D9V5)h/?]dٲCFGOe(#"J8j$h!eR/Ì#ޞ ^FqD(gG>?5>;{-_|JgK]ٓۏ+ʰQFDꕨFMQ-2L)O[eUdB.Q,h+r}z`pEWewW}dI^+=UWa+QQGlaK 3x{rʓ2;ĖzIQ ԩW_}L" :wwutP'2Wk\gW=;{Ἲ~>dN ;N=UoNu4 :F0㈷'<)CCld%G^|I)ӲDf,v+}} ho|.Y f6-]s;lrrŠ>m@e|soj>͏*5TJx3Q͓2DꕨqԨIB6ʰ^G=9IbK$(Q衇~;k~Xf pkTz AYm+2Yj>jp/'INQP(zstԨI6ʰ^G=9N*$;w<|p[뮓iYoi|k@w{X)_/o4S;Zsvq~w/$(v9=UoNu4 :F0㈷'<)CCld%{9p@ww5\swȴ,ѷTؽbE{ :bV>Z;61>V[(+,??/?퀾N ѣGT9:Ij$deR/Ì'W=9p7nSƀPUH}ٝ;wJt?|鑑}MsۗN^(}~j;{g%zs !g̋w7m(ٻGM4j 'gǏS$QG`@(ÖzfTq\lTe'N ٳGһI}\exqal_5_|_/L^?!ySuIT]y7$zK}FMqm [eȑ馛T]ypQ=UoQ *$O~1i5@cp>ОO}ϔ^7UŽ"ySuEoQO/'#驺doxRF]j"Gʟ 7" * e}:ݓzYEI^+=UW_bs5=UW!Ro5u4 O*p႞+^lF=O^x_dxz' \f g"KIJ>ꩺJ=UWr2"RouԨIx2\_O;\ԻD6^=ē .詔U2<D9ßgx=.DOeDHS)QQGH}UǓœzq 2;Ė#2<D瞼׫>sdoJ$ȑ#z6ʈH=8j$POz<9(wG(^ m Cld e?ܺ#Sog $aN=5u4XuQG:(#bK$&~u'p\{߸m'7VI5_}N/QO6ʈpFM“zP8jE[e$G2S^}2o,w?A1~v]7޸k対ۯe5I멺Iѧu4 OR+lkԃbQ(ʈ`R/#8J=z}⋓Se6XhRW⇻; *Ov,͝1︲)w̓-͛@m=f4Im$QGlaK 3u@QF;ĖzIQ C=G.?eַ$ ;N~<>^rޮU`ÛG,Ԋ L{%iSTA$]z85yRptԨIB6ʰ^G:(#bK$(Q߹srDf 3x_{ށ15,̓gF7w67/TWWdx:>0;3鐼B4%\ΟHuQ-2LhE[eUD~Ϟ=a%6JWh5\W[ {&NJ]+YgSM$yqH~T]]eD^G:-d [eqԨ2"!H2eg}vΝ*Oѷtm^=<7fjx{|ѩz2׵3%??ݗggO>~>pȋdT]Q2"RD$5h2la"oONyR`R/ *$'N޳gw)R .Y2|[˷6~?_hi|֒o1\SN驺 eD^G:-d [eqۓS!!H2eӧOKh?vѣG^xYpnf~zV,[30;{-w+'O՝wީ2lz%2x4 ZFP'<)CClaTexhH.3|[~|vO>ǟ~9\IE}{/ߥd׮]z(FW"HU&A (Özj45v-2̀exqa_4V-e^ZXeGv.~sk/_P7Z#<dd1TĀRaT,M|>? CF. H * YMLg>}çs7|~7xAx |/k/<'^ճ3 ({S0Q3 dxrO=eӧư^Z ᭧8sOYJ&=QⰞ?g̀SF*Ճ%PU @ ]k֬?SY=TÃz {m:O&r0<ћN/̀eo~zNLyNOUIǞ;}_^⏮9R'I&=_\&3z d"cn/h^OU2<D9$]_O߭S$GwOz=~2屿e^ K)3@(gkaO*}z/*w)x_5w%:Ib4۷ A~2}ߕU/̀e|_n?_?{}S> O8?ܽ#z ΍ K'9P{ܰ2Px * Y?]zrvvת|JOV /HUOO(&zzI8(WNu4ho}{(޻o%Uk0'o ObK 3@(gٙ=?O\5mm;Su@esϷd8(WNu4h.3pj0'{v2\H 3@(ggxOO?UŽi=UWa+QQGlaK 3x{rʓ2;ĖzIQ ?Xȕ4tNKUT}jaEWHEQlnihWֳC_~t\/D^ϥj||\Oe(#"JIR&A (Öz&r'ev-2̀B 3܊pR%WHEm%Upn}ۇ?ssq߽V I^T]Q2"RDFMQ-28)O[e$Gie`Is{־3S|sgqLVڔ5]ۮ%+cS}r{08B9]ѩ6l9/o|FG15W.).pyow=w}+$)ʰyRH5u4 ZF0㈷'<)CCldYRt CXoK8\(N]RQY^skL )[aؑk>*'L-n-xUWS(#FK%2K:;_Xf1}[$9E6OWQ&A (Özf'ev-2×Ȓr4򛃫z]3%a>IYl.Hek3Q~YV k ȽH=s-(m \e[~_6B/ywS}[$9E6OWQ&A (Özf'ev-2#z[]l+]n(T?ך+ X+_/U3Q>d 8󍿼g۝۾w|^(I"J8j$h!eR/Ì#ޞ:ztS *8ït_W߇/}bgsfeN]7+t>~B~^=ed ooVjWe>ϝ NTۏ+ʰQFDꕨFMQ-2Lɩsz *$Ј]ploξ?-4o}[6_GOe(#"J8j$h!eR/Ì#ޞbs7@(g^L߻~2Tm?fggFo[z)\D^ܧS%o?z(FWNu4 O*9z+-b"oON-[LOŖzf@U Q2o?\ WC`gp%ZGOe(#"JIR&I%ΝSuŀ~PLH}@=zTOՕ'eSƀf@U Q2S_oaG>я\us]7 _}z5-ɋ{E.2lrNʓ!R!j5hTyf=UW ([ŌԹsT]yRƲetV/t'eIeNs|;>οY"/ϞDT]Qٳz<"Nu4 O*ԡeKȑ򗿬ʓ2<M“N * Y?Wxg.첦O=^MK^ ꩺ -ܢʓ!R!j5hTwSOʖA1(ɕgʈ_}U=UW$ Q..79{gտ(WAʰI詺dowG&O)lc@&{"-ȓ2'eO^$G2S^}2oH%GXۜ(E _ \5vmz< ONM<IRFA'P6GɞH c I%$(Q?z}/NNNO%6[$!Š|.߼9>5۱<,:cW6r~`amggSSaz o֙(֦\u:h_/>ܪw~{o+zyzwm$)9las4쉴pl!5}d:MvY˛ ?+!]/U(i>\lDy\2Ï^^AO)ʩG9lc@&{"-͓2;ĖzIQ sÇ:%6fsyS;Z-D6eV}KKe \fC[??g^^A$[z85yRptԨIB6ʰ^G:(#bK$(Q߳gρ;CeIjPes\n-+v ٔ?P۔>=pھ`,7ᣏ e Ϗ֜9[Mȋ۩=UW~+OeD^:Ij$h!eR/DFP^PUH}ٝ;wJt?|鑑};2n.nⱾ<>5}y>|`wU>YmB^dIJ߇o/>|pbg>Wܵ£ꓷKeɱi$/ۏ+ʰQFDꕨqԨIB6ʰ^G=9IbK$(Q?q={$OԷ%Y?ɛyq?*y<=UW#=UW ʈHuԨIB6ʰ^:(#bK 3(ß>}ZBcǎ=*"h 2?~-]'\'>۞7U /R%o?z(FWNu4 ZF0'<)CClaTexhH28{/NŽ"ySuE6ʈH5u4 ZF0㈷'<)CCld Y?}ӗO9wZo>tZT?{ySuE6ʈHuԨIB6ʰ^=9IbK 3@(guٷwo>W$o?z(FWQ&A (Özf'ev-2#2<D95潟+?8Ir2l!RDFMQ-28)O[e$Gdxr'ć> Ͼ'Mpe<)C^G:-d [eqۓS!!H2,Ͼ{ͻ㻯y'!"N 'e+QQGlaK 3x{rʓ2;ĖzIe -$(I"J8j$h!eR/Ì#ޞ ^FqD(w/y7g~o-E8Ir2l!RDFMQ-28)O[e$G2S^}2o,JZ  }zzȭjyf|xtB r/gJ8Ir2l!RDFMQ-28)O[e$G2ѣG_|qrrR~ʴ,ѷ٬=1PhUDRzD[텋uK])=UWa+Q'I:-d [eۓS!!0 2C=O9rD~Xf k;; }pή|uԙ3cW6rͲȭjs?k.F/ vbm>Ž"ySuE6ʈH5u4 ZF0㈷'<)CCld%;w<\Nls/o{Ae7c*( LH.W7dȭ _fY*?;[Q"/ۏ+ʰQFDꕨFMQ-2L)O[eUD~Ϟ=a%6[)Z)-EҼt3fF7w67/T/U)$T_ᯮx8JDGOe(#"JIR&A (Öz&r'ev-2̀B ܹSx)###6Fiᇻ[ {&NJ]+b]^ۧO tTzoOK$/ۏ+ʰQFDꕨqԨIB6ʰ^G=9IbK$(Q?q={$OԷg]zQ4=hxs+E̙cj6Řx5sjIӃICo MI7Z99ڃ76{xԡ(Ăġ i`DE@( _05,>^ϼk3w}fa7cV[uXܿ޴ǁ+;ֶ5 /}COn=FMxrec$̓V#E en!ܞJ$pA4ZPB~ȑC᳦o39ܻ+DD\{{nȓ(+#a$QU x iLjuTNbD3Q n]=ѥx~ltt_^$14'1{A e c$ύXGN;1py>V!|.Q C Lkh[ud[MNر1py&eN'GDD)?=;ׯ3W،eɜ9sGgH "ɶ}1py>G,'"Jed{_uQ {n[Sȶ!Cs̓:&1ƈuTiӦMqr@U֘(RGቈR.%$C+uTj x iLjuTɂ?>b(|?P351QɓJD4/ch0Oꨪ<4c:T*'1 (RGz>hΝ7ڵ ӧOˇ=j}2ڲ2|¦/7_owE .\m-^⥾kw{q"j?b 1IBUyXG,(MbU=[cHß8qbǎǎꫯpD /ٳr-=?JT߸?;mudW\ȯ[w"×14H' uTjBch1bq{*c{Ǐ۟6|<@wwm'bW[diw}h~{VsaגuV>ipsKv.YricbS:Wn럱;ϒo|Sq.uWoM<+6\|+Wvu7>qGͷ$kEDmT1FlL5@}=sK; ;l(wv7Ǐ6^84}5W#>=4' S~dch0O.ZMSHc -l?2ʊ14H' uTjBch1bq{*c#DD6o<:׿ 77.l?2ʊ14H' IU&)1f#ܞJ$pA4Z(R( Ɋ'b PQ×E{x%cI]GU[p1V='b PQ×E{x%yPGU&p1ƈuTՂbĜ9sdU ቈRK${x%cIIU&`]kLjuTNb3gdU ቈRm xTٳg(+'1\2tTjֵyXGܞJ$ \]eAyBA <e$ӧeN0O.ZMC3[nOr yXP-gDDW%<iPbG=~n\nEܧue$pyPGU&`]kLjuTNbD3Qቈ_tY.z1=d~_(+#a$QU x iLjuTNbD3QeOO6VZ={(+#auj5O!14*\Ph1bA=<QfZk$75kec$̓*\MSHc 3O?y/=wq>Y_pv?cO>fOޗ}?g_χO>H_Z XP J9ܞ XPǂbODtu?H =_:5Olx~闿-y]ҟd_~_ Of}\j‚fT5&,(cA'" LLLlr9Gy ^{g]wM䆌>2 d޵$QM J̘jnOjpXPቈob| /˜cPM,(D401AhY OqPMPiXPb&Up{jrÂbODDDDy_8W;Z49s>/# ቈ(yp?hf`ODDDD|W_]O&:uדIENDB`merTools/man/figures/README_substImpactPredict-1.png0000644000176200001440000002511013466135366022003 0ustar liggesusersPNG  IHDRMR/SPLTE:f:f:f8?b?b333::::f::f::::=_??b??b?MMMMMnMMMnMabb?bbb?bbbff:fff:fffffffnMMnMnnMnnnnnwց???َMMMnMnM::::fbٟ٫nMnnnȫfې۶?ȎMٟ҄bٽٟٽې:ېnvmfȎې۶ pHYsod IDATxݸUgC2 /Z . [hHM$?aɒ,ٲ%z<ϙ#[׃2A!P(kP(kP(kP(kP(kʅ\au@@s R\@hT4WP*(U v/U\Z~rI]ЖRf_\Cvٟ+I oi~ݯE oO׿KR@/ɃG SWC^W){7bvՐ𻿓Uݶ>@JЮr(A@_ T5q]%mր6KR@I I"*Z*F_(~~U= C4WP*(U *J@sЕ @^,ϭ-StJ,Tn.hD%]Pw4KFt @]Ј.K%uw @#.hD%]Pw4KFt @]Ј.K%uw @#.hD%] ,(rذCaQP!װ%K @c3| B(7] :;4Kv hDOIZCe۰ 5Ft2l-<hc@#T {*wa @hlarYPۥ0lUZK @cdFtI *KFtynhFty>P!r7hj h4Ec驖O%]ʕ{g3%\*{r @h,JVPCܥ+>O#T.En]P(8evY4O6]r>E\Aw4K_^w @|U+n/йRQf_Ц_ݯ@]P.N'uw @ӑ @WRWX6^]Ok>f˻ @GEq:THP.ny_iP˥} @TT^ԇeaG @4K'Y6,_ir6,薯H1|pC @KK; 6uc"%rOPiP:Ʋ8m @,YlT3XXQPiPje=ip @ @ ߓL,lBF%5Xր=g($`U!Tf1;4OxL$,+wijZFÕߴ ePtpÆЖ[>PC!vˇIS @!v9;T:4EK[ @;#zS @5*HIO]5R#KQw(*w'#tŀmTse<h鞧 ua @zk2@a rmТITn|zqiiZjY І<]g b6)aߡ(>,h^=tJ%Ϙ. ru6g2VA]w8\|T=m5=SBb,>U*Ui6=c ,WRd̀a!k}nWRk ӣώӯ=e? y1GwlkT)|X!eҏj53;d@*0%o>Z^| 7=*/|$8f)`#oF:\.PxL ')T(|Rѓ":/6PA#׉RY}]s:ƥ=w@9%LaM&ɣ^Z P >5FBS,|<97@Fg R{sp[3ZƧ ߡE8,S|:*4;x9y7sӴ٥Pʺ7o80?@ʝFPL[u̘7?=;@{vOIK1n |h{<*Ӑi SCib߽$uh{֮Bkqߡ:ph5){@gb#egUE;_Xb Nϒh j=ێ-Wk'=98/ʀНno>u ..ie^ro@I)zpp[J!IS,5sBӶ3_@[ 5WQ4fLhm{N˝-=;8VU^Tsbgg>J*0l)}.\*Q69:@S71RQݜ}}ǢjICWg҅xnހeaƼ‰5TҞF~e=|tN}1Ҏh1]Ŝx[pJ;@I߭KTvP6n`%9 -vrv~PKO @Y-4=zҎhU|.zA7Su),tO67G j%.5GmIsnPcﰮm4$Is˴yg-r/x*y\nx\)6n{J5k&la%蘵p/A] !ÀKuwg\YEc7 iyQ/h/OlL\V5G j|aHMkz]vvNd:e-B}AjxTq.5ӗT?ne;kb^4camPqH4vҴI.*T)W#P&iM@9TT߭RſxFhT@eMvY?| |`pP-EM7-xgPȊuU@R: aXg6sgdW7n\.]ck3gh8벗%g ͫ߼}pW7<&Je P|UL'|{՛yy-\d@JBWK.a + k:]?V')( \zVM!m]#я%4Ӫtr|aUOKtp{j@my}u?muݩڀVg:yÐRt;;C.Zjr(UU7u -h6 85 6غa3H[gd&HrycZo6e_ǀmPOK8[7^:ټ+]"L.hLC|:^r.ۨ.*htt}f;4Po٥uiP8Jh |惚ttibR@ z:UyN3i1!vrxZt!::m&|PΧ]P!uUjPo>ٕpV(Ivh{V| Ђ=-֌ # w̳'W׫\ քzT ]Ϝ-ny2G@IE3@o-_1YML/4dMp%)YAye57{@^O՗Igۓ/UUɂ5CXqi_[Pm՟5 d˥w΂$N&xh=0Z{챴99VrTwJPW閞D'NMmr(ڞڠݍKOSA_uU]m@O: Md@+>6{,ZeZGdPOT'"n =p.^P^/E`G!{Xj<͚#F/(gjET mNG$J@S7~zvttSB9P_h{Zws|+m'[ʑO)S%|J{P7?-/>~Z}z,j>E{|>N?Oo>#hVU} s*owtёFiGuTHFxgQЗs/>zDJ}5ƿ5)F?#O;.uʭ՛KPQz*mOACTO\dxLX`J?ǻ hroQ 2jԵ *=l6Oπ6wd @z I~ͅ 8~ mJP>s˩h`@Y')3ZGut4Э2sK3{ Pyu}G!5 j,W 5U(0c s"u|0=1PÄd+6oY3.$ `ϔ@T@3WfoxYGA+:VaG/ZcvxP$]G?E~HePʸ^}M΃J>u h( ЀiFO*V'Vo'S!@UG͓hGAU_yb-g$w.FVw-AOϝTo uec Ѐi2W?vis4fmvYwPwJ})F?w>Z,6:I?p7s>^ س^L= o>py.|Ѐu4Mڇ>Zs,2,ʫh!K,G:e@;#Gw 1쵬!6ɺac/x @#Y2:.DP_1 R4 mlLYŏ߆!!4R3mt$v:höeS!KO|,"@eK6!l"$qF9JI^zj?@]1le'{=H^v_͌W~=Xޣ>afCbw&Ɛʶ ([vm|Ɉ.AvWa  VOel\jn@J6([v Y;6jE, )%ak'*]N\vZ~QzQh ]Oe I}~*1zxUҺZGz2C@VPφfrCfOCNݰ uM!ԋ!TN Lf5& Pn0d䨴&\1ݗCq&s@m/|Lpx*U+x&;˪Oy~q@5*\mP݀\1S:MtxzЩ/Sޱ@n%4Okb\z63ti.cP\61v.9c_7΃qN ߗnͅ@љJ;IP6<죧 (_z6gJ|\.(U$eù>pMoNrpVJ.#ֆQ=k/ޜ2aƀFE{Tҙ=eGtY (j+g#J;AzBʇ7L;"n).@uJ ) Jq *|JPK [9\*A3c@Ce۰w2M7 uT =N'|%ڠ#-e)1Q^(աtcrt*x. j'%:ȌU*eIԀ*M xKe<\nѰ@+>Oo'p.z2HfYh\tU'x8n v[ Y. P'^Tw5!:JڠT' m Tz73>w^`CnOpma5hc#a@ Z3:iA:1@(#r }?{8z.hЎ1"[tݬ/ Z >&v Y h,h3 @[ (JneTb;+: }駋峣{JO[hݫ$5=z\Rz~gʋOK?W*(6M>V&9L ϟ%/I?^ӗ(OЇcoe萗gzЋOӲViM_y?6v]#M]M'4]Z)EKP3-nxBo2ڰ3y\J7cWE;t JaMẠ̊́ ZE"<;{~6t2Mp6Ǣx'/efҬX5T{wa:]]fZGwE n*Uy #[q54r~+IDAT#IvuEfl7kV`ˇ)&@6dT/1J {J{n&@|~Vl M_35Ͽ:@^}!{n4'i m+T 2=KTi ~>d$CIK{g&=Z-{?\)Oׅ۠Uzd;u9Z4}09I;!,״(͊>>W(SOC䇓;C=@$3bN= Ҟ%s݀ZLpm:å=@{7%Ѝi2HSiPQ@WUqgOiPk4݁yȥfOiP㻞-#wJ`܊>h0W6[FLZj+P7KfhR;]YֆM.w Qߡ3T ErJ J?Y  :'`bV%J hizPI(R._ @@z'uN`H(Si4˙* [0vFt9@ L[g@#Pέ2nQAPڒ!F(O;CtNFtyc]AGiLB3Ftrߛ:chD9Kϭ<`@]@ZfĨL4LUF2Sg,2@o KGCp6|t7~ |; e?b2T|W@ uJ2\zj ^%z_9Z6UV9Ј.#*EG4J]¹eF3@N%e ܈[I4X ]JӶ]0J6:WihD qiEӫVRg,2 )9y~* T|}Mn<'Ј.Eu.i,Ҷ}gG˭Xѥ@~ݩFtB<7]N­5Y hDJ6s pmW@*C0;t+ zfp4KmP7+Jy8{C(:97]t`h6go@#lFt9PplFtPMsd p(H@*C0;W PRP?ߡP 2~ SCL6]Pw4KFt @]Ј.K%uw @#.hD%]Pw4KFt @]Ј.K%uw @#.hD%Е @^ 3GYܤNtY,nR':,y7@!(((((zӲ|=ŇGG|yt,ˇ] "BH%yhr>so>{T^|ȧKO˧J*~dC\q>y_~ӺI钨}sɆVSO?y_|vY{uyVFr:Bj /zyvyỏ|vTl9dC\|J7+s/*_K:Bj ^!Vqvܳ#{˙dC\)[M_W 鲬;XgC\\A^*UkV!B.F@@@@@@@uvp;-OZoǩobuzqnM}}p,_oTso=Jփh}uPz;3ͫߐw<~K=ͫR@my__ ;w_!jk$NFؤVf:[RUkY4hTPZWx h$UEhE) "j's(fٕ*EhIzP(б"mOVC@G/PPPPPPP EpA:IENDB`merTools/man/figures/README_unnamed-chunk-9-1.png0000644000176200001440000001410213466135366021074 0ustar liggesusersPNG  IHDRMR/;PLTE:f:::f?b?b333::::????b??b???MMMMMnMMMnyMnMnMnMbb?bbb?bdMMffnMMnMnnMnnnnnnoSM????bَMMMnMMnM:bٟٽ٢MnMnnMȫf?yMȎMȎnٟbٽٟٽې:nfȎې۶a pHYsodIDATx {uuRߖmTܦHC9*K\tr U&ɥʱ5^!0s?.9 , )Jqu7LPJuR]T@)PJu5r +U58zx0@ (@U P`Tu0@:Q^?qx{@y:Z[i~=wxˏ.(zpr<*+-m'Dbs=Ļ3,s/~w@O캁j PCոAOY۵L -|q=.m(-3-Rֽh@/w-}:2l{n7'$e;=뱻-ݮ1Voסt >Lz%u?{IC2cP{, ԺᝤhM*U P(@UjT3ϕ T6@vC4m-MJ"܀2IJ,87fJ,87WG=hej rOBpn@' P($v Ó( hpԴr@cNƠ:d Z@71xN.~/ݎ9wn>{9`\{<A\z'5 s5S40W3 @c\z'{LwN39lRN PRNLƥ4jvܭwPf2.%QsO%871Ds,^ip)t53u~>uG?,oz'a;8@2ԦIRP]K 7/m_ktC:7xe$iٺjoGJGZvW@׭܀޾8^Us;̫huo$rXsz8t]\[G ?9:m><~SwhG'PE> @#O΃_j ~"dC== |;=tCcP{5#DQd~pwggj))9/9*TfN-=į[Jj& ރ@kVD}o;f2v\؅B5p h[$U P(@U P`Tu0@:U P(@U Pvܪ7=`zP(@U P`Tu0@:U P(@U P`Tu0@:U P+7-bN.~w<[vZJhyB46!' myi&~} ZwϤ|$u ^8츼]0/zP`z@zf3I8h`@G5?!tjg@ К@{eYځ.+&nnvP&i'I @jlvPT?Ew?g^ϕ T+@L/[kzPSM&If@OƇMcP^ZARʪ PS M4'ꥃ9Qo:QsP]@&i'I @jlvP&i'I @jlvP&i'I @jlvP&i'I @jlvP&i'M h<,p?-`zA:U P(@U P`Tu0@:U P(@U P`Tu0@:U @o_f77I;h@{ @уZ׿GONǎ>` *LZV]^YuMځ9ʹP cqP`zߤjzkރVf77ڛz;? @#=n[A 4;T*8vuW׵uf@;Pu-} PZ?d[pcBF@WYmc( hå>9iUPW'~l^{+Óٿ P<8 ѡxʰt`=hħCp+c!'n *DEGf+uG=U-R#8Bϭ '֑ σÝxL wOԋ9 N10i/P,cľ@bD G;?3kJXp/U.ΔU%wz?sza1h{zR h$4Tp~@'Hg4/} P sN6;(@MN 4gZLR4,>g&i'e jlvPf&i'I @}!>܀2IJ,87fJ,87WG=h@=v?87ֿa泗uMN}vlڅfځUCn58q;t`W&鎌4i#L7Og>sx 6/sh=]3?'5IաWuWf77I;(@MNk^wPzPc$56I;(@MN.4zZTwp~@`szcЄs8Ml&i';?D3!>`Tupv@OݽˏHpn@ *4><q3ӹgW.Vg"+ovTspn@' P vt\fJ&8O d 1ќh fp1꾚I҃.6'BisX@I&r56' j&/FL)'ꂃMjΨlA6 ~^T:xSۤtv:?(@=7 a6n@&fJEQܤovdv{=į#^kVqzgP&@~]PyCͳc58z`pe ?xz;;> @#;_j ~,PPap1hctt_VcP{5#D%P?IRq߾8K9>b8{r7?QosT @GXչ,~i]CΉ!^hb5"[Ԗu  h`U{Y|ub9'B@'GgK 9h$O ?I@GAAS cA[sǺ&i'I @jlvP&i'I @jlvP&i'I @V͹gw;nzдA:U P(@U P`Tu0@:U P(@U P`Tu0@:U к@l]a6qsjZq[ځ-2w6BsP@˪܎;GR;[0;]I#܃z;:g>sP@G5?j3?'W$`Tu0@:U P(@Uh58~\6b"b;<+0w^%Xa\RT(J.R :0׫M/? C`#еk"8|WN7O{g_{w[lgA<ѧB)Ug3kvN| }H.}3^.gޔ^ W_㷽W{yae#ev<ޮQ|u?lp7}(6O{H1ƿ/_z4:L<prAGe럍F;֌vVFrRFM~x6l oa)Yzcpj8l+[?~`yBoai6z-MΌsZp$JuR]T@)PJuR]T@=7w~U]ִ꯾X!K-:u(zt~?yﻓwX~'қlԽ~bg3A_|zP{}W)eԡ^tF)#C:@ݫ8| PP|tgP{ee*瓤$i:Q, <ίA'_ON3 1)R (J.R (J.R 3#IENDB`merTools/man/figures/README-effPanel.png0000644000176200001440000013551213402510754017466 0ustar liggesusersPNG  IHDRM)ƎsRGBgAMA a pHYsodIDATx^l]y ۠Ep{"m;MRw TS+b |144DO TаJ k09;b27ǰq:K9(5ӏČ„!aG.m՜*i}>/\g]k..88|>88|Coժ`[\=ӑ[s9uaFn]iyxrz><׮\|^6/8K uuj[|?>ּY%̝+gN==y?=r @6{g7LAa` w}rת֕O]֥39̩;eMjn]9ܓEG\V sO*}J1(){LGN]o=<~H d4/5Ň#Z7nh?3ڷ{>ڴ~>o\9ϛظt깹|*<վ\Lbgܕ=KqY1߽{*&*Yv(v37L5 C{`/ ]s3˷I=,I{㊊l~6m|Iu{7ϴŝㅽN>OƭKgR 19WݹYLxϕƼ|@JDtb5w6V9{pmo+\>ck6o,([Y_lvh>qXݣ L=f;(6MbĕѾL߇Z]}ʂ#&(K?)tOf1hOU\YIH?x<7Du| >_~"LZE{v_ڽt‚0uK$ͮtvuki1/(2~ʮG+d7 ŝ..;w湧W=_>͢MsAC/ |O5ۦnYظtz>XFv5`ήVu6,+wݮz ~]Hz9fgNm]z֝/HiR~}jf _xSsWU8¤%/ƮEֶsl:~w%I7_PM_tvuڗPk&߼жQw>2rF|e{ ~q|~rl/S=^_}~=%Go¤Nu̓'+8+^+.{f1Qڝl>3::@]bGZ^vήV^_{>߭]g¤=zv;QKwne_Z}頳6mM y3;ί$n6^S5cS•+zWFײYDTwI`";f%=_C>`/_ݰ|_ +=50,|>88l7]ܼySd]6wC[oW=P)v}}6tF]6ڟOU@uЩγnp|_P2`c|>@w >~|O1| >` ;p? D.W_Z%{𭯴}jծW.{ H Ocw;_w*]u{W|[U֫_Y3KoՍwS_2IUSc8"Rgז[_\hxo~7o5/yA>k7{߿jot< =?;nܸ٤7[>_o™]UIfcXo]r[7.~To4oܽW.6^L׾s;o|o|rg/6x3/+s;7οo|/?lf^}zuH7g֫w~no[/};QsX{|o,_s|.R)֕S}u_w-bNx D]ͅKm^u|Mu_?Toqo2zFU_GUqk'|X\ETw7-3q߸'u뿧zϓP,k_|J{ _y;^~@gŗe߿r} _x,#kJ]#s3o7E_[s^VY,~o޹U#Ryܫ~Mo_)>b2r~~\b׮|u׾y:*'mY߻_)~oHI3NgKk<_=pWSV~o{+^~K魹_TWo}b1lqW_i4S7r>Q|:޽jW }[7s~gl\=?wa(fZ*>?)^s}JXmeO}?/.8;wͽܿw{g_^n}nay_~7ެFRZoI;_/yܛΖ^)p5>ٸ{~Gq-o,,\l_XL¥:?o|+ɸ^<i~~s]Y^~c_~}gϟA}|7]<_  ky}mё'&o^m;\:УfqbdW.A>xۗ܃{03`lfxɹ+>v葑'GGGN/U|R/N9rU̳GFF^.G dV~"j=։#J8&E*|^JOs䇽2wZ}Řjr6wqo |>d;_8F0άϏ__i>Ͽ2?yR_ޓW0]G҂L؎R㱧'ZK+:Q̜\Ero,i8G'o|>d>0 2 1vxِbGmMpz~M||~}k;nfJ>|rf􇏵ȧ˽Wg.`;p?U>ūՏ=yO秋W?=9WWwn?|k$cghTcͫT|Y壀@;p?َx|>0c|>@w7J %8b> Z0| 766^W^yE߯mVտTb5~zٜ<9ln.*){>W_}ŋ_{*ۦחuek [_Y^YWb.ߩ՚h-Mm#_U&^_hL``PK/}(]e|bc~{qr|nummyfym5yij' U7wFkMF1$Ec:bK> |> /^|lj߾sڕL1U9``P[o+vBbD5?31-V̼Yi8}zfIe 2FOnӓVsYށ}>w}|M{UvoH>Z%;4aC>|0;Zgȑ';VN:>rjbޞ`;|>@߹zke3jn]{>07ό|j(2WF?2r՛ε cGFF]8Vq{?񏌎~kմ/R#IK꒪- _W^Q|lB͝E^;ybn [w^;x᠋T߹rfɳW6j;W>>rR"KF7Wܸ!?q8UvjQun= o꫗.]ykVe۬6'_kM緇ҽsՇ7OR5:9{? _zFʶ)\Z9??!h,c51RFCյj?qɂ=b9 B92<{Io.އ{'Mڅ1yy1,R1?X_>9ZXZ/+EMH(=e>6ҡ}?=$|󏏞)_kHPt!a3aP[o޿Kukm<^_lηeVsFoO+~/1?ߜC|ظrl|>@߸|w`@q|>@`_|> }>` /oll민_e۬6O'OL/Uɾ6+ӭ6A}zҥ~[QʶYmN4WKRՅDki|ƌDחZӭe4'&&Uejs|fr}~ZsƜ/5}>^z?mV';yس&J߾4=bQ_j4B&fV2UVʼnFkh.6[˫Ņ=A}ŋ/+JW6+ӓ뷗ON3eUsqmQX{ŪfYL:_ \(t󀵕ɉy9g>VudRʝOrN\%'.]whGquVc)]|s\W[}ssתDNg3t}\x>=99j5&[+W["ྰ3qruW.?{yM~΍KϞp{/i.zSϿ6wVs7T|}W5*[uJ3gKn^%;wRQ[T_*٭+WEδ?ϗ3ܼTG[ԤtѦ3l\9;vum#X6^8s|9+0پ9vJ.J+$Klܸpuk~6jU ig^+y6?.~WٗʮVig򹧎*)z:/Z>]'|سUnqUg4/˿5^칫f277 ]vegq\uBe/{qƕֹSg/˷w=Ny9L0l?HꋩLumڬLgL>Kg7{dw⹺s|5RJ*;/l蘭ZJz>]~~\]g$c|p_ D.3.ϝ?uVFs¹zϴn\>/+\jjܹrUsg/\q33lZ_TT2]F̹sg*}K\\tm*mc-:Z# =[W/\hj@7]>|sŐ^4J |>@lok-έ 7/_qEvۿJW;sh-ƒ2-|K͛{ܼz aֵ+WUz6nw wnz꼳 A`> |`P믿+~MY~JE[Ӎp`Pꫯ^tV |4kskz2?\cbbk故DkiHMO43-H*jskFsb1^֎Ws/Kt &XRk5Zh,Oϭ-L6WWk˫kK3Orie9ʼn M*nWW=A}ŋ/+JW6kւhO.we[r}3E|~铓e?H+YEB,/4'ONs/[/j__8]s__x}}1>=j__>ݘoN;ZUJGe9(~?lΌO/iӭVcr{5hLLo%| }ݷ~7T{Wewk U`8`Pg.-,,,^ 0t`_}>|>@`_|> }H'[nZlbخK(:n4ǎumQ7[cyاbg>}|~KjkLŀh'O D߾kƥScy3GFFF?rµ;e;7>2:rg0L%^߽uqd ,^xp39sI9 ?|W?g.'?sOVWr@狸Ll>3zj}y|kϿzn ?wǟ?kn^;m}56r9{RWc+j_U՛"|qg}d% |='P?*scsO9̙ >>0}g5{SWύUIz˽m˻.QN5a+瞊W>>fk>sϜrGӿv'^)\B{L^~lynnxUW=vb?<`GGGX\}{yXsCʼnAv0~Gc##Ggo^j<622 +O+GFFNFO|J^3+#mo+'G&;/N^?=:;cGo/O]JO/SNL<;l 2m?\"?\cWxWwbce~'gfN׭k<1:ls{Ťu?|Q|`_K o޹rЉ_-jF?>xѓ'.^uƋQ Z4j`_|>}>` / >|`P;\z ADۻ[w}`PC`M\A}+W:vj>||> |8 p0صx߼h|tjC@u|;_gJ翢w|qTeCρzȸyf`ɟI`jáͣ>Vc.>Fwwox05Y6 ,1lYx@>IuQ05Y6 ,1lY>׾X 5Y6 ,1lYo> ptg,j>?\=f Y,VcXy՘ ,>_! ej1s|>6k8:dlXY`5Vc._zvU p]f8:dlXY`5y9fW"yj}SS+ه |>UXY`5|},d9{O>tXvx_Ҽώ:|x̢nf±rl Fh7Q a߲Q}v|@N痷7gn{.~!6]}aUuX[Wawଲj1s|>@N~W>vg,j>?\=flXY`5Vc. >βY`5f՘|~X`{> 'aC8f՘Vc9`51 |Y,VcXy՘ ,kl-V`83Y6 ,1lYWZ'?<:2rDkayXEX-jtJ|~$ '[+s^[mX\j >βY`5f՘|~X`{Bf2O5;?֖g=22zt=X;zTn%ΗOxT1ڬl_ /(PDkm}qPfҋ ǚGrbɹRl|0! ej1s>v0ޥ{Vzr飉?:|{}>ڬ,Eie\K'h<'C]+7>{ؑ'&w:6aC8f՘Vc9`51 }~_9L|iOϯ-5_G}9/&ҏq?ӥWxo_ggf&5fս81:f8:dlXY`5Vc.^_?;חW?=S u^/qoVV2~6z䉧L K/N<=^/}aC8f՘Vc9`51 }>;|~~u;?ptg,j>?\=f!{>βY`5f՘|~X`{> 'aC8f՘Vc9`51 |Y,VcXy8TPi&Աs|>@NNM]䶸>uС٪UχY6 ,0>>?;lYy睫W^8h{w{˳廱C?;tT|!m& S`u],i>;\'OIn%[_$ ?&Q_/! {VY`5f՘ѾUja51 _r8h>ϖbloToWlXY`5a?՘ ,$=n,VcXy՘ ,0n,VcXy՘ ,>_! ej1sz _h|ܽ͗S v k8:dlXY`5Vc.p?|z͋/޸q.>_! ej1syW_\xKo~獋ͷ>wn>_! ej1sw y7.΍?`?ptg,j>?\=f~?~ /|o¥K߬z} G,p1 ?\=f Y,VcXy՘ ,dW^],i>Y,VcXy՘ ,dW"yj>βY`5f!Yڇ{ݧ1lYr G,p1 <{ |1 |_?;5u}>uСY7tjM޺S}-|vg,jþC1lYd|~y|m_/Ħ/,}yun,VcXy՘ ,rĎ[χY6 ,1lYn,VcXy՘ ,rf8:dlXY`5Vc. >βY`5f՘|~X`{WJnŀe3Y6 ,1lYWFF,߾xldXsđskU,NL6z̛&:3Y6 ,1lY{c噣3=zznё#'Zrɫrꥮs'F+s5H#%_isg##ϿPM<ĶKӶzNɏ=R`N?Xzf>aC8f՘Vc9`51 }gǛ͓'?/ڑIҟ/5{z"o^9]Kc`daONΦ9/ki˷77 C^N>,75~02ǚ b G,p1 \(<5u]Ϗk>}|]w:Гo󕖇OGUQ<I$ ʲ~|5~0eO?\=fa/|Ĩ\t/^?z䉧Ov}qtxKGGG?tzTֺo @\wO~jTH:*~}&)/P`nF`?ptg,j>?\=f!χʼnѣC>βY`5f՘|~X`{> 'aC8f՘Vc9`51 |Y,VcXy՘ ,rf8:dlXYxhe|~71 |Y,Vcy#6SS+ٗSuCM_D #`Q}!|vg,jCcVc./^?kU6͉FaZ'O>}y111=̄|bY]hNNO֕oԘZ痷˗ώ>]Blz}e녹w| ej1s>^g?;66/~Qʶo诵skk3FcIEKƢo^[^^]__[)2۷g? v |>βY`5f՘|~X`{Bfŗ_~Y_Qʶ)Llί/>qzfA/7fURn''[ K E`Hy>p8f՘Vc9`51 }[o%{/Ze̳ Ko5N6'ijq?ytc~9ytk|0! ej1s>w}|M{U?ptg,j>?\=f!x3Y6 ,1lY?ptg,j>?\=f|0! ej1s|>@N G,p1 ^6| ej1s>W_t>7ZmڜoF5}dח+EBsrb|~{mqRcF`~~v>! ej1s>^mlZ9??!89><3\][^^]__[Y,+Wo^Gp?βY`5f՘|~X`{Bf/}K2UMޗgfs~}3 b~~quydkai>Y!?=C8f՘Fnȓ71 }[o%{K/)V6KS~Ji7| ejC9`oc2w}~7U{UY_"_f*Q;tp3ٱCIf7&jOM_DAQ}`r++)_^dTa$2~~U vj71 <亟?5{}vV>~ӟ+2sSoTna7p1 βY`5f՘|~X`{> '3χY6 ,1lY$Ϗ㧯 >vg,j>?\=f<|>βY`5f՘|~X`{> 'aC8f՘Vc9`51 |Y,VcXy՘ ,rf8:dlXY`5Vc. >βY`5f՘|~X`{> 'aC8f՘Vc9`51 CWJ\l|0! ej1s>yldzJ5Z*c'F&v)tuM0aC8f՘Vc9`51 }~iO.[Ǟxb̳GdOϩG},|JG hDc_p~rJ1ѧ饸cGyb1qͫlG yY,VcXy՘ ,9tC^–/N8="89zz~Ѷwfu"?V`twǎN/{˫ٙ ^i>{;Ub? B{>βY`5f՘|~X`{B~Zht_[j<6>/J.Pɲ J:Ow\`3Y6 ,1lYO>}ixE3jNtn_#'Z]~bdS^_d T2+Uy8f8:dlXPiMj:a썹`{Bfw>vjz%ÇͺЦofpߨwW>eDnlСf><&eE]?$~>YeոK<瀽1lY~>@N珍.n,WKHNL*+}n,VcXy՘ ,r*?[){'%Wv| ej1s|>@Nߓ0lXY`5Vc.χY6 ,1lY?ptg,j>?\=f!x_yʶYmNT^znn`y?X\=Xm6{\bm9?ptg,j>?\=f!W/^^^mRRfkrb|fI}}y111=Յ-勚fMfXXlw-'חf'32 -L<{VeZ˝|0! ej1s>^g?;66/~Qʶ鸟_qoggfo/Nϭ-L6WזWזվ_h]FcIuKF1B1rii]J:93Y6 ,1lY/^/+JW6>~8= WON˚-wW2pe1[j\&&v>βY`5f՘|~X`{Bf[o+oɹfFӧc[ӁӓVsٸ|sbӋEǛ |0! ej1s>w}|M{U?ptg,j>?\=f!x3Y6 ,1lY?ptg,j>?\=f|0! ej1s|>@N G,p1 ?\: |Y,|~v1{c.f>cc_W*&xdJf/椝^[hU?a|J˜N~cPh7Q{Mgn;{C`꫗.]ҌZmڜhڐ/NL,^[oN7ZӭuL79۫ ɉRg~mqqK0-XXP7bo}6(ߍ.~ ?TlV}ccSp?v_V|c7oY|?^ml3{/lbjђY_k5k˫kK3Ww;b1O9Ʌ^?;\ܫ9<%Y l}*leY?iawp1 βY`5f՘|~X`{Bf{|ME*{X_m4}a7p1 @N G,p1 ?\=f|0! ej1s|>@N G,p1 ?\=f|0! ej1s|>@N G,p1 βYHW!\Njy Ls7,dKKK׮]ַUuʼnJvlӱβY>H3>?<39`o߬_oW~h|_L盭ɉ%Jf-.JW֗f'3E\4͉jVJr||U\0 M=4!¤RcrA$ed30JrZv}|pS}å/جTgggKQ1Wh7Q NRpw3̿WRRW|yffQ.Ze+][l^[Eғskk3WɉN<챉Nj۽SX+jז`|GT:ˊ~JgO}ءcs2՗͗/l Dh U vF}Vj0$y{c.f/]3Oo~ջwĴ_ _gu\O V''tL\B/~&0_ Qؙ6hwo/LL.Vf^ʺ)o9Uv5E?>C8f՘Vc9`51 9}|oR73͉VNz9yQN6[͉gKo5N6K>rkIjkњVm5m:Urrҧ[u'|Yeibm^Nf\UgOlXY`5Vc.ׯ|pf8:dlXY`5Vc.<Y,VcXy՘ ,rf8:dlXY`5Vc. >βY`5f՘|~X`{> 'aC8f՘Vc9`51 |Y,VcXy՘ ,rf8:dlXY`5Vc. >βY`5f՘|~X`{> 'aC8f՘Vc9`51 |Y,VcXy՘ ,rf8:dlXY`5Vc. >βY`5f՘|~X`{> 'aC8f՘Vc9`51 |qhΑ0|B5,sX`{> '{gWrt)A6 8<Vc.ɞSͪQءC 7?;v1(fBg|i8HTZ{C`Å:<%Y l}*DUVr?7J%z+u@Uj<sdOU~c-|S$egǢWFu5 >vg,j>?\=f=$a7p1 βY`5f՘|~X`{¶}]]h{&YmC >vg,j>?\=fa>;| ej1s|>C>vg,j>?\=fЁχY6 ,1lYC6CU AjC 2/|P) V# , 5ppo>un p\ggY``cSc!tX{s6F*|wlzo8LX?zFYzS @/B}`:46[#$ @HW#  Rjk{pp|>o?3[MsG]r"t%ppagvſ?$_R볳eÅ㟭<`q?v~~!|><|>88|>88|>88|>88;7>q^Ygo*:n4NJpmQ7[cyG;W=9̹+7<9zr jH:>,~b@4aGFKo\m6*!=9rlKg9222EA۳.S9̙Kuc/=S֕51P׾u3(^p6.k^踳Nj+g'^Qgj淸|]35erEM詹:|k_ʎ7.(L)1:.cG9Ӻr{D;?mS~~ɶ/+*F|}|~+瞊W+\=ԑ{7>5:||{~~a?{En&>x[wzm5rʹ_%5y MX6UW=U0l[W'?Z;}_/s}KuϟP#ct|>͹򊝯||٧ʟ1T=? |}B0|>88|>88|>88|>88|ء6SSe^ԙS[@ٱJmEL|iۆlڦXj@sƏB ]+6~Ȑ< |>d%_m2nׯ˝]uF5x7%hi<xH@V:|fO-|{XO}pmڂ^GYIn{7ʚ/k :}t6ʱ}G 0t<f |8.|C{m !pph 9|88p|>8ƒO| x>$/ : cvx 8C>@r ~{olm >`>uQr_~}jtş9t-<>`SXJp|>WfNJ{cUSp|>8C>@m`ݿ.|>-_8 |>-_8 |>-_>/|\m`b{%|G[ؿr88p > |G[ؿr88p}Gzv>-J|G[w(m>>`Wm`ƾ:pph /bO|G[BWSZv >i# {%|G[B|^"Gm`7j:: > |G[BmSpTSKpxw*5rq(B |>-}">`8)J*7O{С5pph 𐰵ߍbk:Ev-/oI> +} %S4:|~ڕ|Jm|>-C:n޾w&А <|{|h_jm7@o|>-C^tTRNiA}5nBfӨDjmC]Jm|>-CN~ޭXG4tWRZڕֵ(̧QI C6@፱Jm|>--jzm]q},WRM|hJDTQؽ; k(.|ho&)+V88pBjFzԆ3iaZ>k/R^EQMkM$ӂa"b퍱=~Crrm8藯Q+s_2pԚ:VͤϚ6B'RӨR]>Њq2n2zS裏wmDe*!zkenzkDWZ8cϗ&u1Tt;nVD56|xPi4;콬<\^ 97>*W ZioOMQw ߮.,L}"lTAh+nYK @|><(zF]ݽ(jô7yW|>~C G[F'.k^7|gS f:}b>t'+_LXv>+UfdB {{e)q?pEq6)ƔD2}4C$F7?}nJ}~~+CjF-b?DzΙFS[hsj{{{)|# -jP+hR-vի֊>;ߩ!R'HoӤF)X #/"-z-|w3boͿYwfA1EoˊP&-Gϗ@mva&t|@iw&E] Ҷ*s] #^^~>_wQ\UN6Tx55!{!._|ĉO$kG[⣘EĞF*]i \#Ռ(w5}U5-z"mֺ=|S|9=nUj@۷߭C%+j{VP>&}ÇeS>;=ONœ 8d&5QyMI :]5|Pg+@H~>ZIi*lwz-Sj9/K4ʗ#|ᝰjΘZjJZǞlbvPŋr\5>ȏ"wj)jxt*鴙\>EYM=3鱩0QiiDRgJl'mME_-}}-٥ϯϼ54 =nV6i[G4eOE-6%Fj-T{BM'Ur?3?=eO}EW-㯍1K1%8cMhZѫ=2q:dD &a-dN=>p%qRږ|Ս9WQ>`?R{'O//c^ESt$mj;U]w&XވBUIը)a?OT2|>-|si#|G[}]>-t̲vB](㗄p)po1DW|Z IM.(\5rSQ;-WRNۮ^]pg,i{QVS^l{Oʝj'TMZ|K roʨ^]]n*D>_QEWGFFԔϗϯMG[9RĨ-fKj}vBDAM&QEQ'Jz6ZxTkBQK8:.uMEx`B{[>_1*eݵ7 i >`_ݦR8߯wkbqSxSVtQF!B r4Viv'uv.m~V5v2r[|E>?ͳpw>t}r;{16͞WIN ; gt,d2&z-.$jcիc䭅y7ecf k!>_(m{R2uƴB:`w>`hTL[+4iwFhss^H oWE%&)8ʓ vH˷oB;s=я~TZ^5??o}ϯВ#6|G[yREf[(7R&rSĹI EQ8cጏn gDd߸Z*v| .'MuꦼZeM4`w>-.D1DDSXGFu#DSeʪʥlʸW6[M!2g, 2*wF9'N>_>|H/KfЇ3SKc"=<2[̟UiM-֚q mDFn<"( u k㇤Bɲ%7覢췄 2r*WT. ѕ&Mtwg/\Ԯb'X|5ڒtI3֊! *jr3TiGXvVTI၊;:#(y1-M%6BMiEy7ܻ8u22To /1jW-33Nc"=<2=Ot(jie M'CeG22wF 1Tz Q7-/ 5ϷoM}v*i5oTJF4槔z}9> E'E4>؝χ7Ly03igoJU$MJ0[f[. !4OTXp7%\hzFMO/>؎]7O>dujCU8|ZMEtgအm!i3DwLI? #kEm,ֵ!52.5V]n::#$lvYX7CbwIhjJ08*i ] [+rr|k>k_$> 8xtЩTYP#iRm=a@Sy6iG5˞:λؕA qoT:)NuB" 35tR%7cp]jڱP>_Q5jj.cuFiVHv)ԇ:pRQЊj9tTFqJYˈ4+ "k _D貖MDiT6iE׈uRQ5ww9EFxHdgTGZTڨ!-D-X q3pML2jrSIQ +BB" LQхXt3@Y^oڄ۱PR٪LI tC%䁝vJJw3Xhgo@Mb!˸(dD=i1|12.MC[DFIi.k!@@kSmu4 elM{EQ7PS8Gfm˞.2DFآK(o-$췻$؝IBIr5CȮ>Vmm&m<:QM=$x'b ]vv Z5{S :E3n&Qf^FDS1v4#v|u1+u+)dw]2nZK;c-TQhzk?ձqh/'t9H* Y8ctO Lq*T&'0dڛq^rc(^QJZ_eIZ^ĵ&Ǫ1f+VQc.VV3Be f7=M&*P:M) G7^Z]ȇ?>O?]mm?yy|>GGJr=/IҦI.3%.sƽnB'iV 5ef2FɨiY ׈hZĄEum Eڜ+)^'|J*Fy7=JNk$[zi5gԺwFֽ[i%Өdh=i1*՟ZM42PYZYkȤB_ݵL$YjutxHI Y2]ddw3KnwXg\hz*kJE5U (J\z%dTF1iF5ʤK"m]eyq73ŰOGEҢ(jg|x=d7>_FW>-8!>>6h{#} =:%:ƥM6]榴n2B:λLXEq6itbTF:i&F#=[)K leM-5{|#iT#N}vT5g1{gԺ5?AK̬koJF4ilETk%(J'CQI#Qt T6iwl=z5ʤ͔Z1fH']ঢ.G"1I2-T(*HOw)  w3FT5ΨKQZx5%0hT2b!/T+)N^+騦^㌢pEϗ3;}}F%(K_`vСSScWp%:Uj`5db+K$t HE5#aT7bfdRRxhZG.PJG5˙Z1MJzHJj 2 9[Eiݲ9UkwIjn '|kAS'8kȘ7n o ']G[|~FETAMY6X+Py7}@IUbk52N5C(oT&ϩTSHW]8)j uy`DLUI3!(6rpSŞBBe!\Zh*g kCDp'tt1%]r{_KސD |>볳Vp%:USoeB=kßQ>25HE5EYR ׁҢA -aqLցqE-oa]&2i2EVMGNqz{eG?m_Pb1KY`!~JxFC |Pi* !iWZ;_tEKqR- I ȵJκf;[EYJ@E5%QWZckTD,{%"MEeݴPrSMffݗ3_D>Eq1\8_551"'~6~󁦕״r;3um >gt(,5~Du֦kH:FϵQPkeOGW4LJI+-js gjD"If~y%+$͔^fO̙nghͩ$d.ի|i=~jj$Jja@GIg$\eaR,fQ4N %Q 3YV9@!4<2M-* wՒnRQ)+:/Q:ʇeGy6 O(!#q챴}2N*ʪjFR•IRG5TED ͞K3: DC+Wu-MD-_+H5""8ZDD>[4ǿ#o- :tHS u͟>|>n٩]tLt Q uĴ;MSWycu[n#*tYQQ WWѸ3yEQT:-R=|-]n(+$͔^ޢmO 5cM(ltSSIiT=vZĨb?g(mEkTPΨPB]WQR%U@g\| эjD-ֆcW rhQEHGkd_%|5Uz.E(%$\lT^ѕnYI{׈½rgI2gHP*NǺK1刎F!ѕ(EY6g|MȘGH3aMSESÅ3}//O_rhW_Nq^=Ťr˨{-| |w+VС*_*_ENeZJ !|TiBR%ͦP2w5uƧa@Ȼ^12N::-RQum7ѬETtE@]!.7_{__o_گ ^e*ф!0)-𹶆>F3PWh 4 פѽ):ZDꅓMJGU2ҎNFcJC鎼pRT̤κĶ>:̇/}q4򱊶2Ҋ"z%FZž5BwTQQTQoV&.'5JIz ׻LX>P.3.Kd}>w΋ğY g2j(ZxHLI 9@ϣ.T.t2겖V#RW\e ͯv/*䮲Zhf0amM4E |4=(JBG>0EQ DS#QvE<6ClWLb:y4-W~# i!mOjOҴj:re!]Tx]/Wـ>_c Fr?`oa3h^zΦ=Pt>%"*hJ'qe<0eʒFu MFYw'LEYIC*S_^dio;Az^]*iZɄU_ -K vD*PҶ5\H(`-!Qu*WT& _Qt9IۥU.l'5*-Tȸ@8gCF'GRB2  ]`ܔ2pSV]H(Iu bՔpǺT %FB( 8)$,Rbjzf@(MRMʻYQbwބ|SavAEXb|ڔLϼDO0eÐ]s5bO_}!*ӷ!|;ٟY}C0*k|T>l|>Ppn_GJ툞%Nԫl\ 'k+ DLJKGڸ1ֽʨ+yQK(5I3r.\#T/Ҵ{h7r˒gt vD*PRhυƣd}=]h*$Wpg$-|E>; "/ҤO ALzџAjzf*Uf-#92'WB^WQy3*(bT6*p+: L1)L$exPSI*)\#J =Jʹ2-:}bZ3b2O!"ECD{,ĀZeñOr3}iqu =0;r6y}=}}|>j@4;b5ӌuL3FXE-C1.WVْh;7· ӺחKq^T*U&khBZo9ɔI7p֫f;DnGBt4+|i(;<2rqDâ;FB0+mϗG?z{GyD{ow-Чrs{WfpJmk8뭅ZHZ(:)tu33*vҁ tk\*#^ 5SUZ2%)?glK\~~…  wE3%F׸)lXYRd֪<½N t|E Tޟb5=OZh{Ni k Q–GI%Uy&||%ǹeY66C=-Z1>!46r<'W,?ŧy]5cvkUܔP2'pthou)TK(XC(Be HY 育!j.SH3i3B2߳~Nin\D>x_Nغ), #o!jems|=*|?+裏8q>߷c%;||l[zחX+t7E_P!pJ'M4c({ KIGF& RUZ2c-|Ϯ9ŵĶ쮐pW4Sb QmvqFg2E)(QP>tPޣv-jqtY.~4d@]0O$(cYPƟ;5󢤛BWBUniz%5: |.UFQ(2K򊩈Ǚ6E9bӽȧ1%!j, 1`~kooquG"BKTlԨM#FB˺˙k|S/~Z2׷WM{uOCrڐrG|-ot 8#uYRy~gҙiF2&ʈ£Bå݌^c]D)tEϤ٢Kl,zi;^?RvWH+)1JB(BK'Bx+E+ 55PQWp^p(;YNQbtUcDT঴#)C}z$SgDN[IWv3B~^Q).!^ˆz4-lK%QnMJ|cB1*^"Ԛ!t, 1`~2ټg+{˥ E3=i>[4E fYGWw} #Sk >_|.So/j%QQ>1| ?fb7O^x / -/TBtk##QT5"DJTJfdTHE4]"*tBXG4QMwq3i={/\A.eh7YN?hiYBp^p(1Ybt4`Hk\gI.ψj*y GuWM9 G#;a^+##LB(錛6kBQt؝qLfO1iE٢bBty]e||~TYi3=vԑOMp|tfdDTYQӆw4au}!^}u_T_ѲU&!}ĉ:ԾXt1_],"oo(I(騼!4$k~⤛zH[Ԩ ^wE/N7yu[~gW/%n+\κEoRRg&kE{C]㼛Q삈zQ΄N<<}s3&M}|snqCs:CxϗZym4D~RiCo*pS*Sy)chjڴYM"橕klz-ʺq<_@O]k,S< X3f|fzVP+ƛ^QF& ֳ>reDK#:by QSoWS5*жn{6E e"f|6x> |>>7{wn>g孛n'lK]z27N+nja;!yԛETFãWQJ͓zS3 $ !#78=bT[t]+ݖכz?1O6vӕmW~Kud M3;/K5~z:2yk czQ!fm[CG>?v>_ZoW?XxGh?޶ʨoam9-λ,uȮ.6k1.Bt5\oGNZyfq׽<]R1`~\z#: fIZq3-p QI[˽NȸޙZo*NiV^(J}KK+j[p6'մwz%w6 O<4tw\~wh-T2!(mvTI #!4zΫ3“8Q“H(KQ,lCB[~=Qy޿CEt/U9k=GD&MJ/k"M^^G XZn yk!<׹f*)kwn>"?N(׎/~PB_ǤS~G~Dǥg~я~ WƿS?S7_ʻ^ew^(,\MM6k1.Bt56eM{==4]c=++ke~;+n1HF#Z v5˒)QW쩈&|rFQ"RM鲿@*j.t^XM 5U,fնa7]¨16{/\yI^0lqH|Ď!" ->@)OQU?Nk?:k-lU#;)WSvFxG7=Jx6kQC/}]>?|쒰N3AV/"&%k/5:^;FD,- )[O,{6{@vEhЇ>/>_e%,Qq^B]=}nxiocdh=DS+ ѝޢѣ??baX͚!!%ÙG~f\*Xb|"o^1m ZVuMk!<5WFME`(zx|~lRM 5%QTSIt$Bӫ#GW)Jrj}׎F&&f.ith"IkvբLtfYRVt+bZ,K\śG:ZDei3K(G5a$ש}kM^>2ϗm/}O|c=}k.QkUСɟaԚ!v,E޼byE-{wZ'GV2jH{eݴvSx36TPpF[b1Ew3(M/kn`?ןM^_g|ao-PQPUӶ!tVW.^i#mJ5%ԔpTSFQMf % ke~2TW}BA7Cl]M;DL{Zh eY"T3 jj~WJ8L$F9Oi GdhGj/XK{^I8Qx6kE).hʞTc[uT7w v遾* Wm!dXsoU#t9mUJK餛B)FQRG5=JH˔#>r%3_~ _W~=q{k5 Q|wSQ2-{utb@;|w&I8:DFKH g\ $B;qg25*|r{{gVN^~@>|ypb4}\O<տF"^9_3^//{b?GNZ~i_3.[)ECDrrewbZb] ՚.3֊uPj3+Ew&W*,ucYڝ/Pƕu \u_ @MJ m]MʨO`!W8]C>Ɔu: Ѷ)R޽EiX\tTB YbiӶJ؁bJi {uG5fSJP%$%^xt{x__*vj|NO {EgZ:b Q&h-cRXؤ]|w&pF8cBUTBѽI@ͨwEv¸YKغ`||se| >~[aشl˽__I eMsewY[)E|f$S|D2]ejB]nе(KJQ6r(tPSeBEE2$"/ጶ{rw9WXG2t*kwbNWb䍛- bkؒAjW2`)^_ !{ݾ¢jt|E 5.SܖϷVT}]Mg-YeJ:ƳYؽRZ^QMҨ)ԥO>Y}[=tZWrMLӓ]z-j5j8SXHFƤM@E "2kʒsgs;Q˯UC'k8JJwI8F9Mi3Z}]?裲>_Ç?n';|yif_SAM8v eiS"WH3f2ݻ,Dw3D(+[]BQc$ԢoMH8Z(4U$31f;ʜTFMWJ zBGA-f9r, VQqAګV Ee\i-M_%.Nk RsG;|n`rWwR"Wt^n*]N)!^[!am.o a5)oֽkDPI7fS .wS2 E St_")=FϳEFgjy}D䝩i(Zp5"2kʒKx*hYXHֈt(;3!jDeMZY2FM=fΖ",LDRBM[8LJ2ʌ BW;i!N{tQEy]FZ%T#MK eJAeV- iՔ!ƽ-T(v nޓ-w٩jm0|e PpwJ ] Ñ+#\\5VC?'>}}oΧaT};rt"R.<*ƚI &y )s秨wuDӦBM ']f%e<SItDFُ~z zm=v$|dot|}*"!nKT>˅V.6kq>_5L`TM騷/EQgY]EBVLWRTL{cqwƳ9#Qbb!]w(o.mb*pvZ@ImVT4.!B"ἢq^2N**.iG兓"MUIkD8 aҦtJw&آkkv

>`x>U<~;/,W֊:7;#܌uRBFWZ.|O?0}O>j[Zn:zxF]Q^"R.<*5uTtW ͨ2uYG:+*U伢pB]wtc]""eaQ=gQqӗ؂~zl=v73w2|nn_ A;wůꯖ{θi-[QV|E9IE7|*bd9IGT(;7-tQԞ'5޴)(錢9MZhQL ΛPZQƴ.VFHLe&(+4k2.mVєҢ.{QWx{bhjjvG3^i]Եq^h3i4){"=Bt}|>p[NrZBM EhǨc5etEi+k|pd?,3gQ} MҌ$\CzZ*ՅM.qƣ+MFWHI3*anѴ]H(Epc&]1*!Q_z$q9|ٿ)j~5{eo;o ;.QB]uv|K秢(J"Bn;:!YoſMdd&+uLд󊶝λ)|"b=B1*VT2uڬ '`WS1{Ӧ3z j*H 5=JI$T;.sA iV5夣i3B(-M 'qBڋ_X+2Ad"6k]g&Qϳ+CTWWQM##u4 M(.9ai!Wk#2/} @wShBE'%%<|uWQM#u4 MI;|:G  2n*v|EwE:XTBڽ"2fj^I] Mfdtig1W3eO=O9_f-)I2g+Kᦨ[fJ=Iy!6!/o6kBQhδ EspBXBnzPS]:EJ P6ޘٙRBmwrՔwINDFQeFG&Qe%HڸFw_eT>Y&yx =C-ũcppSX6*VR8"tw|Noڧ…w uluQSlU)XT'l+#!ܫL9nE4](CqyGSFB8ɠ(mgMFNie49w6?5>|?g쟭v-F@1i&U/(jN3i{M4UTo9Im})B5.Vuj^7wP0kË ] Qeٹɔ˽(j9)LH;iv K[=UɃ 9zoϷvPtFrim0n l哣)|ꙸ:H9jTwuLqF(*PSH(#GĄzᤣme${)UYh*^Qړ<yGSnFBD"XŠihilTгr݂+aw?~Ջ?^Z"W T $"bZkH(#-X&t2#hl*-tE}) 7+Pyj]1D1Qu2[Оx\ڽFM wYfIKA887۽;Y6NZ_ldשRugl=g QNJ=.JDٿkl+ST˔Qv4.I2uz<iWZ8f?gԐ;|;bk/wS>e΋gܔUXFM2njLj.=%b (S=VQX_\*ЧýƟ W@ZI7FDH9|d%)K B$yz7-"#"D=I`̰3>_"E>]vj 'd%,?ՙ61P'eWFxh M1%7UE4>4HQ϶2EuLu iG(ptj(3"-Ҍ[Sc6Nl'ۊ+ZݔVDݍi2AQ]v m2nJKkUS51PWϪ.0^=˄?.Δ#녚ʹi7k"P.S˗=&Ty{ppee%'#LB;*_.']??X6YMwG]"Pk!tRy'jn*hq4ΧDe9x8ވQ2E5+IjGzMM'Q;&vo_d>_&Yٞ\B(/ !|YU k6 EY2W PZL$2jBc]cB+/E Ukx^zQ͈LJZd>0>_'i _e1JZц<2:*]QV"Pk}Ig iR„v>jWvSĜ礇w3eO%=$30i .A٢kv6 إ.a5я~}k>_FZrNwš͹VDJFѣg H(*p>j" '52z+ *7kݴVE9eAdEНI9MG|}BQh=q=LIu #ß2J|>(jnYۧO7); j:jEufY|J\vttb62:$Bif|~)5|kO~RFږp ABF5ˡ{wF5IkB_hByET^)ΨLZőwSZO`_'Qv¨f,p22Q6S3!|CFoBBG3E4oV^f^>j1tH(>I}tFZ'BWJ`ꤛqRMM4LW@'ŧ$9׺73t=H3qIl=p]|mַN |?i|iE#-W都ܵFZ;`%NiqRH MQ,fD tFHGENDs!|}~K_KW :$ye" |4EZe:hFFgMwU>@Hët4#\tе,t{DykEuE;ShZSpk![Ov9`>O?]mI>_~ .|y{5S?22b/-o/m/.mN!!{c3Q^I5UeB=2*PFBc);vBXj{ZgM xvu6)e_tg{Iy1m)>_:}[^d1ʕNJʸ&)M"ꅵ)-H35m!B7= {.'%?s?WJ%aC?WF~^>_[KQܵ6{!-WUyyP,*,-T^ gT]i^h3Q`i32&;cv |>aO}o|yLcT뱩WS!jha31Pk"*ham"X>slA%Q5i X֏].}~=/mϗ{2v>|x_|Q@ziQJkJQygq3wS]·Y)(4ݻ]v?ϷL=81l=0tUWI}vBKE5C8eAtE \B[HZ3%JuJl`٥Ϗr̼l|# eN8.z|i{uyE",zqt$HT2ΤM-kʈ=)&Sf{88/uAk߻5tw9#%,ԫ>_! ;ֆ+iԴ.g1i^8ij LuJ$.}~s:#J̼'?){>_mآ<x0qAp8\8/gd{}ZHj[oݪ&5Xi4cs,?H3$c'Od|% ߢcMy9`;V[$9wړe=zpzN mYYMrRyetijm>gl^l~~Rќssc'a9?Ib>9P?9X*~B{i1Qi1]6`zwN؎9A:^O yǣStettrM-9?3͑"Ewx*~(??j+v Csbs3vQ?=Xз -6ūr~(Gn16'M@wv`S;m"x~ .ys~e믿r!tq=c"/8`zQ7=|] iߢ[lԱOr~)ȃ=hzϘ~t"/<ԧRwںE,/Gֿ\93>%Y~2Hs.-v֓ce?᝶%,_[tg}7霟yl{.3rCXcu쉃zL8~[۷$ks&KztƆ^E؎si|d)珝\Swj3澛hR|ͽ0θy߱qS{Oվ`kr?zoIٓt쏔~:hO;m"l{eȏGk?vSwj3澛"9Ӄ15Z 2/3Ӗve} }[nC)9?:{˙ia*r>{˽߿u0MPϴcۏw321LR[.hNO['pۇj_|=)z:eO>b[S |<9R=n{7}Weh37NkL1,1GOtthrK }[#O9GO1"aw~2ms;4iʴtewQ 錜7^ x+s~J=RtE.ػ$?:NkLڋrcȘN9t A8K_9iȏVw1ilL쟙f高~vNO$KuƗ>Yr~N/{47g63zL[/StoQW^du"G#\r>v˟OVߢi7TW37ڿߣiۜ+#\r>v߿#?qEe5ӱ'z9l\,9`;9? Fyߢq/r~wqY$|m|\97N O1[ruYDwqY o><Xr>vjǏ\p;ǘGRh r>6r>@mC~˜ߠb,uZӺӎ"o2,F؎lk #'ȞYZv gB؎5W$eo14̧a|ːB؎M=<E!l{_r>v7q!_r>v=׼˓B9rr>@!WN9`;VaSp|9?98r>@qޜpq|ceO&r>p|,9?\(9 |X.r>vB9rr>@۱jr>p|Cr>vj+J9`;|!l\99 |=L巉-p|.n𰻛-p|.[M\m+'p?w?Orr>@j \99 |pr>vWN9`;rv\9! 4>G`;|9C`;|>ݷ`9OsSߧݮ?nf|>͜꧚ݾ{SOG%9O4~pmݽ"r>_]ݝx9C`;|9C`;|9C`+}\X.IENDB`merTools/man/figures/README_unnamed-chunk-8-1.png0000644000176200001440000001721013466135366021076 0ustar liggesusersPNG  IHDRMR/PLTE:f:f?b?b?b333::::f????b???bb?MMMMMnMMMnMbb?bbb?bbbbffnMMnMnnMnnnnn???bbَMMMnMnMn:fbb?ٟٽ٫nMnnMȫf?ٽȎMٟbٽٟٽې:nfȎېP pHYsodIDATx {ƕ4Ic*^gVQ⤎rҪ[m"lJjF5Xsycux8z9B6(`e]((-JAR"(- ;hb,c K;v,ڞK#ںK#ںK#ںK#ںK#ںK#ںK#8@<؏ym\uǺ<j0׷2%iw MNl0{CЈ9Vt=C%4QzE+@/'<{Ep˜ɟž!/gx:%A'.;14OX ]LrR+@G\|zx*;oq?ޟG_<,dS*@N'GC$P*:WSL'dJ@`0 @{ƧזcN=bޛ ЫPcЙf M,yZ(w@+xgk]#N~:͌o䱖:, a1Vhb,hb,hb,hb,hb,&4Vb[ŷ8`T0cM7>VCJ@fTdE@fTdE@fTdE@fTdE@fTdE@fTdE@fTde?7KB@# Pc}u4~fA@ PI( лo_-dJ,@Lyv/>]2w̛| a,b,>9@C(}3% TdeNa<vX6@?YL h* ŲZ 2@%AXKoJ,@-YbP[,* "XzTd2!~4ljI( զ P,M'AXhVC@ Pc! Bl@/9¡xр*N;r&@+?"@5?}`u$έ*m-I@[Bt!$ʗ_V.lsN)J4/m-ud,d@@54XT#ڼjhN"+mI@5F4x'unPՈ6$#:(jufRP I@[,J@ h!з/!>Ȁj ̀^ ΀j ~6W2J@ 5Oŭ]ܴ?zuyL_3-oVuAGOo::')LK\_϶:v j!2SyՁP6g_Ћ{LR-nW摀n8WWan579m؉ ҈1Pw49m؉ h,,d@@5-t^3J@]jᔏ, '׵p]V@Ule @֠zy**A)z<- lXs@A>}?Z^PvhM*z?܉ _#h:@Wrys[# {Ш7oen ^twVy3t'2Ơ޶{FUmCAo?@u_YF[M\_mo=t0MmZ3;Ze2o'ǩiZf[6ȠS\f_ [+g@7TL/(tM#u2E/] skME*ZL+"tOhFo!U1 "~-j@@5egh̗&&9i[hՀj f=>{FPҎA|1h"jD@wP2$)DT#; ƈ$nvT+mAM.K?PP7Ћ]N<)zrVD%b ɰ;EPQXs% f[C@7ЋƠoOy5B (](q*1hދ[m^f?TXB\Ռ1 ꢹ/ڒ4sي+-tQ 7mЈCrǀ<5jZl@OOՀNo&,"рNth$T+eULt.t@}PZh+cP_dP),@1w{k&ɗp.-nP&@ @cf*5AD@; v g3I l[,buxebykvUgڳ؃wv;wjD@E@ &IIf"Tc+zT*@3\ft4yVHiK@53; s1>񾝫MeUB (gTc@@@y48Ɨ~-]$έ`@WGo&<ȇgnǽE6k-ulnwߓx΀^оv^i]Wgw6"}Fk8PZެIRέ4X0/SWϠwggԳPgk$' @&@sP|p'Tnf"&aйE@ hY`g3t Lc1tL@ hɀj hF4XP`Ph(% 1 "`PJ@Tc@@E@5"oؠR,[!e=h)ZF4XP`Ph(% 1 "`PJ@!g@@54XT#,J@jD@7 (isu$X4tg>Ab@"  :4'|h~ї=OҘ6'VǀoAjޅqNj6AŽ: vyOކWđ^^3mG/wZ h5@M@#\YrOƻƋFv,!PpdezxX@$<_ X@$*]$*]$*]$*]$*]$K/m Mh, dP41F_޵9(*=(E(\jA @'/OucPj]A)h{PA)0izaEh:UL\7KP_+h@c P,~t5 P,t5 P,t5 P,t5 P,t5 P,t5 P,o_ ?El@c P,o6ID,@L}ZlA Yb=_|<h]稈4SU],@=7,^i 5m {6@%AXY<JTdeDX@$hI$"AXTj6@%AXTj6@%AXTj6@%AXTj6@%AXTj6@%AXTj6@%AXTj6@%AXTj6@%AXTj6@%AXTj6@%AXTj6@%AXTj6@%AXTj6@%AXTj6@%AX)vK!XAfԮJXTj6@%AXTj6@%AXTj6@%AXTj6@%AXTj6@%AXTF"JW* "JW* "JW* "JW* "JW* "JW*  Pc} 5VN`Oz hyF`Ͳ*j6@@bwРXq$k}>}q4/A$}jP* *p<+w @G]VNl6Lb&[f pK/ȸzr 2j$ilMV+X/Հ^s}=2 lkg>G:k)cPa$,~hVC@ PcY{PZ~dy^$%X1hYN c@j6@e[f*i-^@;,3X&TcM@?QX@fI]>FhMy;չh۱_͚tqr"5&_m59߱~:}q]h5lw{PWlkg3 Ybx5IB!}; h5V=طƻ Pc<\KXɗ4B@H(Vz@( Śxˎ"@$+$ X@$+2& H(_eZj6@%AXE (1@,l@@5lJ,@nTcktLAfTd5[fe W* Ś=ɰ;EPjČAa<4 H(iW.X@$˼̔n׼ɻQcid$}*X@],@=$"AX6@GO$"AX&@K|b YbLx* P^tfW* Zy*lJ,@<^2u5 Pe&)h@c P' PI(r4Es!fTd%+^e틣&*F@+-gkE^x߱X ƺZK6 y+Į_LEMX oǝO~h(kc,Pchq*h"xmӂ8GͿtmVAu6Acb8xu1AKjF̊w*qu/fe Ҷڰh_AֿMsn4XWYd/HIR!^I ]$4](4 H(y *͛1Ff@{yUd q~'$"AXYrX@$hOl@c P,\ū PI(y4 H(U PI(U PI(U PI(U PI(U PI(U PI(VJ@XƪSsNߒXOէXt "#@c9(E'JAR"(-=^!8ޝjbug\RfI/x/׏Wޝjbug7}o_W:XY,zy=>;TM,cibK@< r UbK@]X$'K4}Q f,˥^uP+K$E@)hP ZE@)huMM-T4hRdʲ_m,T}MoE{ҟ?$6T &'t?viD@S>Ͽoqħl"hoQh*?Ns6JŋRuTIdM&̔>ʲW(JAR"(-JAR"(-JARryZUIENDB`merTools/man/figures/README_FEsimPlot-1.png0000644000176200001440000001316013466135366020036 0ustar liggesusersPNG  IHDRMR/PLTE:f:f333::::f:MMMMMnMMMnMnMMffnMMnMnnMnnnnnnnMMMnMnMnnn:f۶nMnnnȫȫfȎMȎnȎې:nfȎȫېwT pHYsod;IDATx wיDe-QHj[1IczhQ/~ݍ$Bh c qj^/Y9,Aa ( A0hM/Z%6a& e"'0 A j 5@D h"AA 4H@PM$h &4A j 5@D h"AA 4H@PM$h &4A j 5@D h"AA 4D}#'0lx,5xj'"hÆ&AakLGO` z>zttVW7GѨִk[rvw3a{'iVH^NG?6gs_?*o_4W݉@}q?ݻ47,NEsn R崽8ZH@PM$h &4A j 5@D h"AA 4H@PM$h &4A j 5@D h"AA 4H@PM$h &4A j 5@D h"AA 4H@PM$h &4A j 5@D h"AA 4H@PM$h A'ރtӆ;Ltܼ ǁLQЫ% KJR16"hAE|AГgU:8.mƥҮk6AA Ag3?8mq)=s:7v*w.hf.!~^qi+4XA XAA6m :xvM@PAǥxl]u 5@ݳrzeW=:{V\ooAW|ꚏu7A5zr%AA xsKo#ht +t 4H@PM$h &4A j 5@D h"AA 4H@PM$h &4A j 5@D h"AA 4H@PM$h &4A j 5@D h"AA 4H@PM$h &4A j 5@D h"AA 4H@PM$h Zl A' e"~ffR5) zɣ%-|4Mda f&U{=zZZOh;04u,rmWiZPݶtN FT)T ep+f^t:ѝ:ٸ{]ܿ(u]oTЦ>] qm h+hM}(o[8;A*EuӞ ԇk+:nA8 AsVТ^DoAqsa29W]峹_X}-Pǽaw? A j 5@D h"AA 4H@PM$h &4A j 5@D h"AA 4H@PM$h &4A j 5@D h"AA 4H@PM$h &4A j 5@D h"AA 4H@PM$h &4A j NJ?KE3& ]StQSy%AA! v"-9}փ"|]I@y h5"0MPśڎm!"mƃ7Bմ֕` Ϯ :AmY>r#4v|^O>hFO$h ouC%vaH@PM$h &4A j 5@D h"AA 4H@PM$h &4A j 5@D h"AA 4H@PM$h &4A j 5@D h"AA 4{'[/ǍWa&_n/'h Eo~x~{4`" 7AHaMoHIMwH3/oymA$,M~0S6+2kUѾ :Ti<'ia*+9.}ƥ弶^< >lY+;8M?]yZWk|z9x흻v|~K5xڎ%x=ċJv|v|rԘsu.FHۖضxnqg0Amd]vtu)CK'ͻxeW= 0.f?껫r o>uڎ 6;Iʝ&_vaڎw?l^C;A71a'h}ؾ ڽ_aH@PAއ헠Os7nؾ A{ 5@D h"AA 4H@PM$h &4A j 5@D h"AA 4H@PM$h &4A j 5@D h"AA 4H@PM$h &4A j 5@D h"Acx74X^?S!p>IdݾB1zZ:O&"a5UkJl.\U%Qkf ^E]YLV.ޛW@t +hGw :}qs0]zR7Wa+6 \ݬ0]uܻK{4H@PM$h &4A j 5@D h"AA 4H@PM$h &4A j 5@D h"AA 4H@PM$h &4A j 5@D h"AA 4H@PM$h &4A j 5@D h"AA 4H@PM$h|۷Wiu T/m~p:uŝ* HG!hQU|AtmC)*G!hճYDi;.l^i|mI)Ho;8Lv o;@ =7S+ )(An;n9=#htv.CD h"AA 4H@PM$h &4A j 5@D h"AA 4H@PM$h &4A j 5@D h"AA 4H@PM$h &4A j 5@D h"AA 4H@PM$h &4A j 5@D h"AA 4qStHT]U+" A||k]]̅"Ӻx? A5|4n?]Q{k>^B9,%h)xԕtw?47lC@s6|іtO;[PAs־H3 :FP [Kҳ]k OGOmII''/*VhScN&W4_^ŷŲ{/V!h"AA 4H@PM$h &4A j 5@D h"AA 4H@PM$h &4A j 5@D h"AA 4H@PM$h &4A j 5@D h"AA 4H@PM$h &4A j 5@D h"AA 4H@PM$\g^ƯЁ쾆!h"::3 kV+T悮nZ4t`;aZ!萇!hUː5I?//FG<: *4_.:0Z2GJwl;ժjU=h;ni9XX?. _ϭ:XA0jZO8j:t j}6s{4vt^q[Yl.eskUA0 AjҸWǿ}q<@.ѽ+ i+YrWA{K]wqY=Х;4uTgѪh@Yj,Z5|h>V 4EϢUgѪh@Yj,Z5|h>ۉ[ӟ06|7u|&g l׬;}[[hٖY&5_Xm2[52S٢v=X/ԭzS(Oݪk,Z5|h>V 4EϢUgѪh@Yj,Z5|h>V 4EϢUgѪh@Yj,Z5|h>V 4EϢUgѪh@Yj,Z5|h>V 4EϢUgѪh@Yj,Z5|h>V 4EϢUgѪh@Yj,Z5|h>V 4EϢUgѪh@Yj,Z5| '[6chVaU/fl8`j@?2 bpfˆ{ G݇jjNFoAϞOvA{3X8rݼ]Aop{ŁySʢe.3]YV 4EϢUgѪh@Yj,Z5|h>V 4EϢUgѪh@Yj,Z5|h>V 4EϢUgѪh@Yj,Z5|h>V 4EϢUgѪh@Yj,Z5xvc< 7]h6uUT]~ 4p a@>x}c,Z5|h>V 4EϢUgj@O4EϢUgѪh@Yj,Z5|h>V 4EϢUgѪh@Yj,Z5|h>V 4EϢUgѪh@Yj,Z5|h>V 4EϢUgѪh@Yj,Z5|h>V 4EϢy~}Y?lp{Ug@&xq-z1Xz Zsˆ{ zϢ[!,ZugѪhu!~-z1X:?@olp{UgѪh@Yj,Zk^`ѪAYZ#г~fl8`@7?gOfl8`jC|iQ|8gˆ{  t^s|Sy`o`oCV 4E[,ZugѪh#E5oxϖ ,Z|h>V}3E~1E^fgѪh/h/hջh@YY_gѪY:/Ћ͔ĢUoϢ;NFcˆ{ _AI,Z$)EϢ9~\tpoy8>X!(¢[/ֹjλul8`Ѫh@Yj,ZL,ZB}>V 4EϢUgѪh@Yj,Z5|h>V 4EϢUgѪh@Yj,Z5|h>V 4EϢUgѪh@Yj,Z5|h>V3w-Nuا}J79TQ0ɽZthNu OR Aҍz ng^ǨϢUgѪh@Yj,Z5|h>V 4EϢo -z1X8E3[6ch!\JL?MPh^j-q|:{:9_3vA{3XZoo4P Vk^f=Oe.xѧbXZc=|Fz!@),Z|h>V 4EϢUgѪh@Yj,Z5|h>V 4EϢUgѪh@Yj,Z5|h>V 4EϢUgѪh@Yj,Z5|h>V 4EϢUgѪh@Yj,Z5|h>V 4EϢUgѪh@Yj,Z5|h>V 4EϢUgѪh@Yj,ZmV 4EϢUgѪh@Yj,Z5|h>V 4EϢUgѪh@Yj,Z5|h>V 4EϢUgѪh@Yj,Z5|h>V 4EϢUgѪh@Yj,Z5|h>V 4EϢUgѪh@Yj,Z5|h>V 4EϢUgѪh@{5ndzY'Zv .݇b[Ofl2`zS^?O5uURzA ]5ghN@/߿uuaj@ߎ=[!@UM9|qV.-TŁ^+h{ ._aq?,ajׯ$EϢUgѪh@Yj,Z5|h>V 4EϢUgѪh@Yj,Zm0+YBPY5\6B5\6B,Pǩ3lcF:=]Zuͫ:gkwWmq.߷`)vY_ O[[EjQ~Nv觧jx/?MvV?ğYv:i3{VϠּnUp^tqTzEmSJ>?ݛG]ϛmq:XAzL]:ilvk~lcF:=lcF4eQ|׹q/O?0\? ?_wǿupUsk t:yN u]i}OzŴ:?.WuߦV3jU᯽o=;_4C?>)=wqxuw1VU=st)z@WIi'ӓၼ:G3Y>Um=ɣ1Еf~-4r,&? hxyXqc+hThVzs t;CY# ]0̜Oú$͓*Ac+$i t|iLEx*j:=lcF:=lcF:=lf1IENDB`merTools/man/figures/README_reSimplot-1.png0000644000176200001440000007317013466135366020161 0ustar liggesusersPNG  IHDRHc pHYsod IDATx{l8, .IhCAɚƂS)4aU]!hE6QM#?+l)IR{DJ" n!b\{<2{yg|>n><D"0 0Lr`a}Xa& aga$aaxaIBXa& aga$aa_u#vau;Kcp[-as|!J\LߖI_^һG˨\pZ0 0q?_YPASz2f+a)x3vkfs رcۖ;>{ u;tR@f: jЏ+oTcw{v󯫮^y郞^7maTMl0 1@D"irZY9k%SvM]`mmx=:EwNyw0 $#"}G} UNI_큲;oK8OICY4~0lv;5Um[~r@ٴ}MaNbpݣPv*n-29^'ZE"*ayPVVYvgYUS$9T ꪫwXWKWY;p˪~X&<uuuݥKSOi80 0GumO5!nkihU*fj:*}rۊ~0;\^n޼kW^T~;_JWNi eO~Cزo vݣPUTjD"a$/ƓePyuSt˾dȗw8k?%E%9>uKu;aWU+=[/U~osq{߲eWI 0ŝX=g(Ek|^m499IWv(irf?ީ?0 3H p9j4ƹqdW8WY&Dw4Yt5EPZ֕jj+[ڔ.4LtkYU0 hH$lpaqDX SRݎ5a-xsZ[wa)l^Gga<-xaIB؂ga$aaxaIBXa& I5k 3裏-<&hG_7ވ#0 ⩧#abHt v3 0L0 0I <0 $!, 0 3 0L0 0I <0 $!,m\9q 0Laga$옄yDJ0̥y߆Hџ5)0>-x~to.0̤yߏ_}xу-xU|a@wvֵfr5([4Qۙʢm5 n'aKٞz)ga$aaxaIBXa& aga$aa@$Y*s篫~p lxg?j͚5^=0|G hG4x|S]J_V'c#G"Ē5kּίSOnE~S[Omea&Vxf?;w|뗔3LHDEv2  U&`&`gf:N`Е={V(履~x0^x&A4ȼp =`0/ƍ ^;]+= ("ჃL):::-R"ܙ3gxv3  W+ރa|]yC>1L,!5{(_y챗KxZK.< G=ZZZ?Ϥ4}g:fך5k/xJ04gñյpB8{lFFF xJk׮aOGqyzy]urφ;qk_ ?&&&bx&p=<.m[ͯ cۣ> [n8Nag$Ai( N$OHO=G3L}Hp8<111992IrLk.^ppԩSBwZya ѵ݇T4g&! GcLx/ߪ~= øѭc@Uzs<3LZuW E%/_`$ $+U}͸1 }g>oC3qùE4g!Q"{{{[ZZp=444<<7 }'K}bb‹]Xd%9{(٤|WW9j6[0F{#~0Be-/~NO.^'|'`CXQ\vX ޭ~ tuu3g(ө;1IN_q `>`Pd|ag}ѮkS ̗K0A%&hټn;̣]&o˗/}>Nq/rͯ~GicΞ=KIw CX}h҄a:_ pXtwww###Vn>Lb8`gEBzW#3$࿢iс/^ xH8FE?wʊ ?`AA+AԔxA<-oGw1H… a<3+_ \1}hh䇇[ZZ?-bACIqqW:Qsܣba0I2JYP<9r?z[kƨF̝;݇acFt8Uk-ᑑu2'FEtŽ~p>-0Y$Ҋ;1u/О0]J,U BZiG0:35#hvxLZ]QYCCI<'3IQ'wvv∦88*Z CaJѱ+:16(###<* 'fW8<;~WE' *vQo-I@cwEwwvM$: n)zƌ{---W^U oIh&c3?S+VQM|Erڵkկ0!Q,%ҘSiYFuv)N |qCX1Y0BWun:Nx/6Ϧ<L`jڰ162ͥ7ưJ8Fu_Nze=v?ӈY܍l?;%\F_py*㢞.Ë'⺸oj]ǘy7q8Qr˗qԋ+~) 0/f5=ADX Dm8=4ɲnX<[,<JOd6W=ssʕ+$]TndQQ/^ꎘ,k[uF]]t\vWFtoQv2gϞr `Uus<:s*0}.UYw4mn-d`Jn^ǯҿccc7ɏe``| FE,t{zzDm֊7z+}.ʲS12ڕL\TVϵӦY}X{Qn}Te^ZƲk׮eh 03/ 5uKHo| <svYOkr&z ncvܩx:l٢lT@ zGE!cB:4 :FȀ6J~ B===&Kۋ10%Gzu%9:? LĕW ]Weł,OQ15N#pw1KlGtbUXUWZ`P*tE/B< yvN4E(nXrHilk3_\e0:uw;KǎV?-K7 dd[:ޝƫ3mk]^W1lsf2eў"K1&Q^D0*V\7]Y*JCBy1],Y7ɻuDNUp񾶫Қkwbէ[~2~&jKiAM{֋FJ* :;;ւ$9]b<DcOeuT {I T,x)>4{ؖ[F-WǎniX#G-cL:3lnѥ6rwttHw*###4h8u] vsM\ԙŋ]sXg^sY *婭ⰦHuu/ltQuQ]B%ڎC@[OsmOt[WN UerQp݊b<ޱ1jC]'yVaO?Ĩ/ݷz0;V徱Dp႓B;wwbWƷxW|Ie ϟ?B_C8qҥtCEI%e@3n7p3Čym:M]ݯ]"g`B0 8P^^2bwޱ}.zI1:t/`q,P9XzP(77.mt0Q\NNLfggS=;;N`:ɚEW XFF+%gu+yǝD]ݎollT,He֭eee&b̝[%|M>a%3]OT^IøקĴOk(ĵ>v)hf@D҉|*ff/=#LjqQս#Mb7ٽ{wccu~AhD$Ԩnc"ѡBwtt,-؋aqˤ]Pm 61]iJLZ^vImWnŋ̞=܅>p>-Ъ2 eb̿n:'%k^欄nFMY0]J熫tn(2ù5 t kXV62G߁mX c~EG5 Z'Rڌ wwTtzm'>@NL6u6L1gӃM_#WF֤hrc7 mWQŊN`ڒsS^Ȉ7^KNLL|G{0`OZ*nKjiW 9pv__6Nd/]T˗/_|,p\l2٤)%UOOꫝ[cq;h4ԥd3m<ňw1]+w]V1_~llW̤ϾrԳ'?:9'VȪCxbКpX/NW]]{n{w2? ׎$?g^99= /J;Y޻ 4BQg2d, IDATTJPhk}zU`:qdO5g nLnyDݯ^㰠/.yٳ_ hsw9RΌ7 n۲wu֨,Y5g ;;;EҰ+*NlNbXO]ޒx,ՠj{vڵ)))T3N. 144dtC0'onn6O?SAe_vM$uKP&sV)P4''GSUWڎhG>O+###YYY04GGGɔڥ|sӍYFO.^b=Ç8Kw=--㸆 ^WRk}W廝;weZJ,n#V<\W W?W]PGG`cT,W__0  uww"TW.㥱@)*>)]uBۤ+xI+] w7|@Ut⣏>'Qwoy`W6 jϒ;_9]z+#8ImߺuĄҥKuݧmt-鏟8/W=BˣJyyy4|s!OijD;FS__3ḇ1i*266޾b 2Ƕ#7otaeU<Jw%=,aU֑Uf6mrʎ߿\%yVLC4*3-/RDyp u۬獷(w]kc/&VfcVZAUtl#Sԛs\ZZ‚3hr5O*?ܹ.tQc.]gI7nX[[kv*OG)mX(f\1cd.ActT$dK)mX*Ok&]8$Nؾ}{OOn^Km|&k \Svw}$xwqң]@T#.J{UUUii%#fV_b֫W[b43_4:L88S(w;LtTBxn8FN!Ony_?++?33Hp89gڂr*ڪjbM$$}v.cCG(Y˪"w6j5U-|$Q[;::JJJT^WLVVqy׽񥥥eE=s7ݴiǣ߲eˉ'h} ҟ"t] ^mI>> )mR.(R=ڵkXommuAuGGFJQW.^(KOF]$(NFNxufڷD=)1ٳ~a8b>#N)}xxX4$Pkiڳ¥*6wxk׮yZ۷/^wuGHKKCw-ldW>Av6_8TJ;Xanpkw̱q/z/brru2N݄@ 0111k֬H$]AА2[\'qWȑ#| NYRSSaHx1^sS>S't|߻w+ױDVV֍7WR|TF*R]pmgEGoVaeI999k֬ٳgG"id;AFX08N1KUg sθ'%։?~m___JJJjjjffٳSRRRRR'&&&&&&''񘴴7o]rcd<̙3]7n88rH$:666::ZVVk.,~7K5~^*j<ܣv$5bSWBԍXZIm}sـybm쵠@Kc8ʲ2ڥrZifݾmQ%#9߮Q)..޺us0>nŊ/^tH+Vot=\z?f+,,ԞkwO:5JfN+xw2;:;;Gʋk^}뭷>ׯggg 233'&&{ƍ̂ᴴzFMv)ZLz6mz뭷6mҲi&ݪ5 *"`kɡvtt`L6$*j&QHՇwє=zsnWPPyf) nݽ\P9b1,]?̙3UC㢷G878~ӧ;::io7ollF"7ob? !qIV^kj0nR8֒3ڵk1)**z']_ː+Vn#ڍsuAV8ִ7oLMU3Ly)׾freed#O?l7 <ӯ\_XN &䂡1Uw se>|{ƌ{W ztG;S4+Vʕ+Aaxbm`II ~&RVrˢm߭XA[E@[ ו8rnG`3Ԛ{篫^yAu^)ym=aPLs>}ԞUFg[LٳKJJ:::1n.##)$~xx ߸q# .]{i"^H%lb}2}SGSU,L߳k@ccc&.zs;>r,Z詧}QѵuMv+[ܕ+WO3K{|߸zƍΜ9U1{j}?^Pme_wWqeCكͧ}PbG. И\' *3@:@&(@KgT[))Tx*, ~`%PFzoq z&@ == ܊Df@6@.@?@乪ٹ Wb<4_lv|Խ 54ohأX%DmX`| L8')O-%QwuV8Y0ÍYcxe< 0:@E2BB,@6]*S4+aC˖(Z(1Gg )X@v4F0jh g߸q#s7Sfd$[8]xvxfs#@*)97'-{6f\( 3z{6zz/@\ Z2b2g2-T `'){j|%gCaO+XWPme`s,JE%Kԟ7_/;;S ;$V/AK \ CCW Нigڵp̙A+CC<P\( [ʕ  A8 +V@g'|1<ÀWgٙ 0LfLNNݚ5kΜ9s22&''g͙|pZx0xpw7,XKۿb ѯ:'Q1/h@m?(nY !ŋy()`!CA _PP088RTb FjK#~?r%\~ve .}'Os--EEEpO/Ci?looo>mmm?Cv6twʕ00p ,Xcc@K@kql.<.1D/btb0 [>@c|.~y\}C3? Fs挎 dd9x{33at4 333k޼[Vd 9EE(*遜1^r<|PH_lZ_EjR|q_]i6Pgǟ|Fĉ-[<{ IDATyR*)zLXJ8uvn9{,qxJ{oooVVVFFF$tLuD"陙7o*))ٸq#Dt1N{(HM.\"6t=v$qeJbm|EjaIv\*aby$DK=deea:͛7ߨNƂ tb>'kV4^m_n2_T_?්;+Jz5?C=dƶ6|sޕ*ϟϮp8]RSS+hJJ k?>җ/xnx^^]GltwNuk7j_1KsիWS3g,]ԩS#UהT|Sĉmjkk?쳘zJJ=OJc(:XiC\Q2###K.%ǔl\gde{Ev6ciI︹oU?;yw4Տ?y+|)WrnMo/}ŏ? `M١YfONNΞ=X-[~{w%T!??_L' 6J n{g0, >~-7o?$ǾR^AAmP˗/S0Zׯ_`WW(؄ Hfoywc`m߾]ژufT_tgee"p R;`lwy \rE4%c{Ƶ#O뿪#?|OWm7Km.; l(_߭w5Cف&dAl{YgΟ?_{hamΝ;(],xCzݿ gddܺu oܸ1k֬p8<99OSoGw1n^}+:UI7 nѫA= $T YSGc=VG$ҙWG~xi?*((}57̯V|f핬E۾[W\;m0~d|츲~Yu Ν;^;{w]~====--- )33u}bbbrr2;;6/X,YXNq0㲺Y3qlj*{{{.^z=ŋԟѣG^:99"dee=֭7Fwdx k˓gggcĢp8G@i{ZF=׉iE%Ct-lrk/_U?~ql555q0榥7Ow' Eq1?|ɓ'Jo&8 C>\z5&/V`0H OU-hѢm6%%ɝN̝;_s̝;W< 5^k(++K^`J魂/I{ ͞E~ʧ57|3^n ^CٓH0u^J1]̓I/\ 8.\|E 5gΜ\YOKK Sӱh<|_w5;AQiҜ@ 3"77Hi\'9{,h* 㫖D B8ys ~u.ۺu+^n;6]D^Lx39_۳iHDQuؗWo448 &sXmmx_xֵkJkϽ|7 ExVV듓iiiyyy###7nW{npd 8texw!QǑrਮxWdw_7*EPUm0_vKCqFoqB`j6zNNdS;`O??O,ZF|WW`)ٳcΞ=k5]hnŊ}}}nBS8;;֭[P(===''gll =*й,5tL|{y{d^dN%5Qɦ1GtiW4srrz衕+W&z=lxt8x W-.6%Ā%KBٿ͠l&ygo^u[~yܬ]C2Rz: u".& ?ׯwE+zqq1z7N=FaիW`xx8 y[[[u=AQrWHzI_Mʴپp5Ň7rkMpQQ QΝ a9D;IB4v`' L^(vw]GmZ[[o.\:_M7ҥKR֔hfff$Ș7jm9o͞Aɏ*{q4RuԨxSrxsHWW… {lllٸͦ7o#m?bu ;;{ƍ~0Ou#̙ C]d&i_#d-Pj{8F?k88qW{z&SS<(Νڵ>t(\o޼yC7>>?zyqmƨoKO'6 .q <^^{ɻn^+kcccqIL{)ݜB U6drrr0Tt_Y999[ d,|PJ['dpemHΝSɃ߹s}ݡP(--m۶mĦ !ugYOگ-Fktwe]]] T/nw $r\ܛe#`B~GΝ;22;S([rrr͛Gd)*$^ǸwFGJ>¥KWdiNa ѕcIyzwRFKT]kSt9erFzNNӴ6(ؒ7-^h?`ir1 %14$y#^eQ\uu_z%Wztisi1Lh Zsĉ70Fq1H;{lcc(F+zwݫm?rtdOڌX}ddvQŹ ̣L~eŋ$@\8]mo]Xgvk)|믛X֮wg#f6Gp cll Z9qʕ+o;,8"{ujGzUFґR]L1ThA] ey 1,ڤ &%+rss1չ~^^AQ=^_y@ Z3vNul"u㵜;wμ"NtNG GWobRDV=##Cl.7?ui^JKK`zu4}" xB6,hHzJ9/jT@ݎfH6{wpB;nnd||\ۓ55V{%1b+#Gu5 ߵX43GžcH3揇:J044DnvJD}aCCC6WkG*>6E}C'<<, 6o===E׬yEs/ƿxKr6cε#O뿪#?|OWmW K/gZQcMWܹW_t;&ۧ fD ڵ2Lar sЀ\qwttݻݻ}v{T7:eΜ9Fp;ޝb;1O=;;[ ?KK~YYYh͋۵}~~!6{U*EV|WPE۾[~fu;JwW ~YUS+ Hldpmm-4Manw ^U+*8}yl]T裏\y۷ŋ]4ESnyKBۍvbx)ӞNڏcyyy&] w)D[II q]Wrru4)vu˿SJ;^ZǕPswV|4g`ΝsS,ǷJ 1p_8\ھ7lWUPVY\u۱n:O83gΠДw uV;bnSln ?ȓ*czz3h#s67;;[,.IMQ233%Újad/f.>o<@:m? q) +..nOb&/M&O[,eUM;BmM},8I1j.kpk׮9mUj]y9.*mWj-z#< +VhgQNeE]W\]ɖt ?:թzZ8ѩ.ZYLXѺ`]_`AwwՁI긝ƣ`#$v ̴3"N(;6l;VWg_WM(//zn1`oܪƳ1Z.ڈu2fn^!NVicvv6\IQqgffj' #''i }4d͋-1)/////h401ҪLe{hp|kU.[Mkjj2Lۭt|mw 5idrLUý$kV-u2%nu q\8G-&MiRUqhMyN. :m:>Dv'ϼu7Zt}^Wj6@YVﮠwܑS|;VWܼtR1@C]uN.6q>Ш%Vk%Tlwݬ1WJ6xo{)Y˪"?n) l6_M﮵ŝW"Ο?ب~b5*uÆjQw{Pcǎ KT6on~I'E1NŬ9Q+OÕ^n9X`I,8wU ŋ6gee0b$999.=.\؞??oPӤ$kmQw_1e{`Æ +HuL߰)fAv?sxKpr5܍Bbz%(.S__>x^Z J7j}CO»Em --755}.> !&8t3ρAbt@\ZmXqfQs3Eg1M܂[ߚ)TEFeWz{{Ņv."=-y|~~> =PŊ8pb3FַZFsEh#w3 ~YUS$Dj+*k#|*TO?RQs>FW 8Ҵ_4*/F5)bXN|g[KI!v]+ ,]Iy9mE4:fbL8?0u7Q/KT}N}`Dkb!WXz5~P+|m^|ˈ|>\$]WPTwt6CuaΝ0w\[dX]";;;77zNl[b4.((0מESl{ Fo5؆ᮋgi&W uQ KLp VS(m jyFFuR4>++Kbdmc@;Fi7o^NNҥKwZ;f0-Tw ^NNʕ+(MT(zҸAdUҿIDAT|e>.O^mizN>~euu7gx1uyzA6ux(D SոMmg;hc GmtLqqJϱ%1wƆL7*3b &{K[Z} \t_jBb\T|To<]A[VDݑLXut仈n,؟M qɡŞ+ Dsj ÔsG4{É[3Jϛ7 V{EK]}xAWXNtΓ.bB#.+‘%;Fw]ս歮mm3zSеɵ.֤6 #C\ͪMN3/+ S򜕕E/MSNGE1NIb[]AQ w:]Q T7Nl137i~ua"-Gm:w\zEyff&l͕@źRZCݥҹhmktwbu)10^QE{B<<~ɒ%Oo K)ŷ>W^IIeCٳg%銅Z 2ږn:^\\:ɰDϧ0x77Rvh-ڀGL&^Pj`~r[4!Pq!QzB.kD'D?:WbrTu ifeggbv~L}qqBWĘx"[hYͨu/cL`.\]+sg^|Q9P|6.>VQ2#x&&lwEU_^^.{;!IfERRnsPY%}%Fp*6x++{qy \ɎG /LLo"7[d{*u־>Is6Jw(e~Q?677ר]Xv^߲Į0= kyQQ$ۊ!4Bu#8vyWV1ǬT!m:TS+@"ڢS~ŋE],@f:LQiH[|F (rſOV EEu{-]VZVw=`_.}yc&bf!۶mE]D.{Ot۵' m$~VD1l^VVŊ.Og2Rk̗mw~@?og'IEgee-X`ɒ%-.v!0srf2I GQs={{6nܘT<5N=]Vvll,si1ng쟐𭂼x|{N=F{-~r+?3F@io=LzǗPݞ={ҥd'o޼ynQBXP|폣 ̹Q`̭Kg|&;vhz߹sg~k>t?Ž༛ h/{zOoתm B2om),|x6:zyN"-e ?cqk瞋&|ɤ]i٤5z31>st4Z5)dWg-K]97zkgE6lߣ(zG䄸o/FOkiccc?s c5x+d R=[0|ו绛Spʆ ңxǓ_-DNj5-ݘԞpU!s6QLkiq駟سgOuGFF4nmO ;Ӄy'擩@UIklM$7nܘ+K5vgj%gҒf֨k6_w\#gV#333Q&1|[Nn]KMW6T~TH:JGX+bJ7X<C6~yKf'cSeyR_|9V_|QsVmgJu'gZ[UgI:3U"2Kݢ G2ߓԏ*#Z/iLnw*6n(C\6-.F<ׯ |dd$=ifK~ $B ?a^^pܽ#Ev۰z/O=xtVqM;v4&a̺"]Ϫ vڷ*E4?tPrm߾=.o!ȀTt|-M| kT, 7o۶m˴3M6mjZ0Б5IҽV 5^7=Ǎm۶eNh\"Wfd(UNk|/{IUtc;֭[:0-d*)rq_~%j{$$aۦ{ϙjתg>j޴n]Ȯ]0VZ>9J< ::@{O758ZKV@..e˖Z%ZJ^_~ukhhqݺug+qo:iTN]F[l1ZY#W6zK6|?kTv2ONVPjݻw۶mugA&]edX'V3Ѕ`/_?ďGFFEq9V舋Z` Hw4.gF/nLTC̙O`-Ex?35;mmKw*#=Ķ108 ڶr-itt4]CB{DѳJ3mt駇TFBpTz֭[}مAPr_;{[]znXؘ+Gѣg?gt].ӎ/mv|V{W'<%.{ҩ(sˆ_tj~wݻ͛I|p5^曟|IE;wܽ{wG ԟu7nHwTm⃳ uŽbIaxIo]rgSi9fsZ\SE7 {^Ɂnݺu_6&<ZK_/m _}()ׯG[ߪ[msDCbnnhIv;-.GbSSS_u/O}5+^d< ZmK1::ڴ^&e2DQaÆhARwU}w%x@{/7xկZ5 2 & z'2G$z^D*VN.ݽ{o2F ~hh7O7 o #໖Oտ9jr(ڴiS?^y*ZC7;;{ z[ivv6Y;;Oq'Ѯ Om\_LgP |J@ZE>j+j\\<:tA={8o9kǠ|]0]pxVÁ{˗(:}*|6*[Ehm z{ОHMk(h1)P6_!U x=y[)Š|er.\yU{fkǠ|9T0 o;-#֠A"̜Av޽2CZoFomzB8 o`bA</GqQ`!<T )ͰB x R%B x^=Z(( PA|OZ;:Tw~ҕ?OO;_BCn߼MO7/G^?<-/OoH ѕha>@Co}5ו\zʽw^yw}x|715]y;y:U^|R2&w틾&S/9`P x]k.@t}7~`` xWWN`ܧ'&{/}X^|ˋ[z#+pUCz_O %GNd71hswON-ć8K;_} K.z *H *H@ x $<TпD) 7IENDB`merTools/man/hsb.Rd0000644000176200001440000000340113460722176013702 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/merTools-package.r \docType{data} \name{hsb} \alias{hsb} \title{A subset of data from the 1982 High School and Beyond survey used as examples for HLM software} \format{A data frame with 7,185 observations on the following 8 variables. \describe{ \item{\code{schid}}{a numeric vector, 160 unique values} \item{\code{mathach}}{a numeric vector for the performance on a standardized math assessment} \item{\code{female}}{a numeric vector coded 0 for male and 1 for female} \item{\code{ses}}{a numeric measure of student socio-economic status} \item{\code{minority}}{a numeric vector coded 0 for white and 1 for non-white students} \item{\code{schtype}}{a numeric vector coded 0 for public and 1 for private schools} \item{\code{meanses}}{a numeric, the average SES for each school in the data set} \item{\code{size}}{a numeric for the number of students in the school} }} \source{ Data made available by UCLA Institute for Digital Research and Education (IDRE) online: \url{https://stats.idre.ucla.edu/other/hlm/hlm-mlm/introduction-to-multilevel-modeling-using-hlm} } \usage{ hsb } \description{ A key example dataset used for examples in the HLM software manual. Included here for use in replicating HLM analyses in R. } \details{ The data file used for this presentation is a subsample from the 1982 High School and Beyond Survey and is used extensively in Hierarchical Linear Models by Raudenbush and Bryk. It consists of 7,185 students nested in 160 schools. } \examples{ data(hsb) head(hsb) } \references{ Stephen W. Raudenbush and Anthony S. Bryk (2002). Hierarchical Linear Models: Applications and Data Analysis Methods (2nd ed.). SAGE. } \keyword{datasets} merTools/man/REsim.Rd0000644000176200001440000000260113460722176014146 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/merExtract.R \name{REsim} \alias{REsim} \title{Simulate random effects from merMod \code{REsim} simulates random effects from merMod object posterior distributions} \usage{ REsim(merMod, n.sims = 200, oddsRatio = FALSE, seed = NULL) } \arguments{ \item{merMod}{a merMod object from the lme4 package} \item{n.sims}{number of simulations to use} \item{oddsRatio}{logical, should parameters be converted to odds ratios?} \item{seed}{numeric, optional argument to set seed for simulations} } \value{ a data frame with the following columns \describe{ \item{\code{groupFctr}}{Name of the grouping factor} \item{\code{groupID}}{Level of the grouping factor} \item{\code{term}}{Name of random term (intercept/coefficient)} \item{\code{mean}}{Mean of the simulations} \item{\code{median}}{Median of the simulations} \item{\code{sd}}{Standard deviation of the simulations, \code{NA} if \code{oddsRatio=TRUE}} } } \description{ Simulate random effects from merMod \code{REsim} simulates random effects from merMod object posterior distributions } \details{ Use the Gelman sim technique to build empirical Bayes estimates. Uses the sim function in the arm package } \examples{ require(lme4) m2 <- lmer(Reaction ~ Days + (Days | Subject), sleepstudy) re2 <- REsim(m2, 25) head(re2) } merTools/man/RHSForm.Rd0000644000176200001440000000040313460722176014405 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/helpers.R \name{RHSForm} \alias{RHSForm} \title{Parse merMod formulas} \usage{ RHSForm(form, as.form = FALSE) } \description{ Parse merMod formulas } \keyword{internal} merTools/man/shinyMer.Rd0000644000176200001440000000144613460722176014733 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/shinyMer.R \name{shinyMer} \alias{shinyMer} \title{Launch a shiny app to explore your merMod interactively} \usage{ shinyMer(merMod, simData = NULL, pos = 1) } \arguments{ \item{merMod}{An object of class "merMod".} \item{simData}{A data.frame to make predictions from (optional). If NULL, then the user can only make predictions using the data in the frame slot of the merMod object.} \item{pos}{The position of the environment to export function arguments to. Defaults to 1, the global environment, to allow shiny to run.} } \value{ A shiny app } \description{ \code{shinyMer} launches a shiny app that allows you to interactively explore an estimated merMod using functions from \code{merTools}. } merTools/man/stripAttributes.Rd0000644000176200001440000000073213460722176016342 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/merData.R \name{stripAttributes} \alias{stripAttributes} \title{Remove attributes from a data.frame} \usage{ stripAttributes(data) } \arguments{ \item{data}{a data.frame} } \value{ a data frame with variable names cleaned to remove all attributes except for names, row.names, and class } \description{ Strips attributes off of a data frame that come with a merMod model.frame } merTools/man/averageObs.Rd0000644000176200001440000000237613460722176015216 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/merData.R \name{averageObs} \alias{averageObs} \title{Find the average observation for a merMod object} \usage{ averageObs(merMod, varList = NULL, origData = NULL, ...) } \arguments{ \item{merMod}{a merMod object} \item{varList}{optional, a named list of conditions to subset the data on} \item{origData}{(default=NULL) a data frame containing the original, untransformed data used to call the model. This MUST be specified if the original variables used in formula function calls are NOT present as 'main effects'.} \item{...}{not used currently} } \value{ a data frame with a single row for the average observation, but with full factor levels. See details for more. } \description{ Extract a data frame of a single row that represents the average observation in a merMod object. This function also allows the user to pass a series of conditioning argument to calculate the average observation conditional on other characteristics. } \details{ Each character and factor variable in the data.frame is assigned to the modal category and each numeric variable is collapsed to the mean. Currently if mode is a tie, returns a "." Uses the collapseFrame function. } merTools/man/expectedRank.Rd0000644000176200001440000001116013460722176015544 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/merExpectedRank.R \name{expectedRank} \alias{expectedRank} \title{Calculate the expected rank of random coefficients that account for uncertainty.} \usage{ expectedRank(merMod, groupFctr = NULL, term = NULL) } \arguments{ \item{merMod}{An object of class merMod} \item{groupFctr}{An optional character vector specifying the name(s) the grouping factor(s) over which the random coefficient of interest varies. This is the variable to the right of the pipe, \code{|}, in the [g]lmer formula. This parameter is optional. If none is specified all terms will be returned.} \item{term}{An optional character vector specifying the name(s) of the random coefficient of interest. This is the variable to the left of the pipe, \code{|}, in the [g]lmer formula. Partial matching is attempted on the intercept term so the following character strings will all return rankings based on the intercept (\emph{provided that they do not match the name of another random coefficient for that factor}): \code{c("(Intercept)", "Int", "intercep", ...)}.} } \value{ A data.frame with the following five columns: \describe{ \item{groupFctr}{a character representing name of the grouping factor} \item{groupLevel}{a character representing the level of the grouping factor} \item{term}{a character representing the formula term for the group} \item{estimate}{effect estimate from \code{lme4::ranef(, condVar=TRUE)}).} \item{std.error}{the posterior variance of the estimate random effect (from \code{lme4::ranef(, condVar=TRUE)}); named "\code{term}"_var.} \item{ER}{The expected rank.} \item{pctER}{The percentile expected rank.} } } \description{ \code{expectedRank} calculates the expected rank and the percentile expected rank of any random term in a merMod object. A simple ranking of the estimated random effects (as produced by \code{\link[lme4]{ranef}}) is not satisfactory because it ignores any amount of uncertainty. } \details{ Inspired by Lingsma et al. (2010, see also Laird and Louis 1989), expectedRank sums the probability that each level of the grouping factor is greater than every other level of the grouping factor, similar to a two-sample t-test. The formula for the expected rank is: \deqn{ExpectedRank_i = 1 + \sum \phi((\theta_i - \theta_k) / \sqrt(var(\theta_i)+var(\theta_k))} where \eqn{\phi} is the standard normal distribution function, \eqn{\theta} is the estimated random effect and \eqn{var(\theta)} is the posterior variance of the estimated random effect. We add one to the sum so that the minimum rank is one instead of zero so that in the case where there is no overlap between the variances of the random effects (or if the variances are zero), the expected rank equals the actual rank. The ranks are ordered such that the winners have ranks that are greater than the losers. The formula for the percentile expected rank is: \deqn{100 * (ExpectedRank_i - 0.5) / N_grps} where \eqn{N_grps} is the number of grouping factor levels. The percentile expected rank can be interpreted as the fraction of levels that score at or below the given level. NOTE: \code{expectedRank} will only work under conditions that \code{lme4::ranef} will work. One current example of when this is \emph{not} the case is for models when there are multiple terms specified per factor (e.g. uncorrelated random coefficients for the same term, e.g. \code{lmer(Reaction ~ Days + (1 | Subject) + (0 + Days | Subject), data = sleepstudy)}) } \examples{ #For a one-level random intercept model m1 <- lmer(Reaction ~ Days + (1 | Subject), sleepstudy) (m1.er <- expectedRank(m1)) #For a one-level random intercept model with multiple random terms m2 <- lmer(Reaction ~ Days + (Days | Subject), sleepstudy) #ranked by the random slope on Days (m2.er1 <- expectedRank(m2, term="Days")) #ranked by the random intercept (m2.er2 <- expectedRank(m2, term="int")) \donttest{ #For a two-level model with random intercepts m3 <- lmer(y ~ service * dept + (1|s) + (1|d), InstEval) #Ranked by the random intercept on 's' (m3.er1 <- expectedRank(m3, groupFctr="s", term="Intercept")) } } \references{ Laird NM and Louis TA. Empirical Bayes Ranking Methods. \emph{Journal of Education Statistics}. 1989;14(1)29-46. Available at \url{http://www.jstor.org/stable/1164724}. Lingsma HF, Steyerberg EW, Eijkemans MJC, et al. Comparing and ranking hospitals based on outcome: results from The Netherlands Stroke Survey. \emph{QJM: An International Journal of Medicine}. 2010;103(2):99-108. doi:10.1093/qjmed/hcp169 } merTools/man/formulaBuild.Rd0000644000176200001440000000072313460722176015557 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/helpers.R \name{formulaBuild} \alias{formulaBuild} \title{Clean formula} \usage{ formulaBuild(model) } \arguments{ \item{model}{a merMod object from lme4} } \value{ a formula object } \description{ a function to modify the formula for a merMod object to create a model matrix with all predictor terms in both the group level and fixed effect level } \keyword{internal} merTools/man/modelFixedEff.Rd0000644000176200001440000000167513460722176015642 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/merList.R \name{modelFixedEff} \alias{modelFixedEff} \title{Extract averaged fixed effect parameters across a list of merMod objects} \usage{ modelFixedEff(modList, ...) } \arguments{ \item{modList}{an object of class merModList} \item{...}{additional arguments to pass to \code{\link{tidy}}} } \value{ a data.frame of the averaged fixed effect parameters } \description{ Extract averaged fixed effect parameters across a list of merMod objects } \details{ The Rubin correction for combining estimates and standard errors from Rubin (1987) is applied to adjust for the within and between imputation variances. } \examples{ sim_list <- replicate(n = 10, expr = sleepstudy[sample(row.names(sleepstudy), 180),], simplify=FALSE) fml <- "Reaction ~ Days + (Days | Subject)" mod <- lmerModList(fml, data = sim_list) modelFixedEff(mod) } merTools/man/levelfun.Rd0000644000176200001440000000041613460722176014751 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/helpers.R \name{levelfun} \alias{levelfun} \title{Parse merMod levels} \usage{ levelfun(x, nl.n, allow.new.levels = FALSE) } \description{ Parse merMod levels } \keyword{internal} merTools/man/thetaExtract.Rd0000644000176200001440000000120213460722176015563 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/subBoot.R \name{thetaExtract} \alias{thetaExtract} \title{Extract theta parameters from a merMod model} \usage{ thetaExtract(merMod) } \arguments{ \item{merMod}{a valid merMod object} } \value{ a vector of the covariance, theta, parameters from a \code{\link{merMod}} } \description{ A convenience function that returns the theta parameters for a \code{\link{merMod}} object. } \examples{ (fm1 <- lmer(Reaction ~ Days + (Days | Subject), sleepstudy)) thetaExtract(fm1) #(a numeric vector of the covariance parameters) } \seealso{ merMod } merTools/man/print.merModList.Rd0000644000176200001440000000120613460722176016341 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/merList.R \name{print.merModList} \alias{print.merModList} \title{Summarize a merMod list} \usage{ \method{print}{merModList}(x, ...) } \arguments{ \item{x}{a modList of class merModList} \item{...}{additional arguments} } \value{ a summary object of model information } \description{ Summarize a merMod list } \examples{ sim_list <- replicate(n = 10, expr = sleepstudy[sample(row.names(sleepstudy), 180),], simplify=FALSE) fml <- "Reaction ~ Days + (Days | Subject)" mod <- lmerModList(fml, data = sim_list) summary(mod) } merTools/man/RMSE.merMod.Rd0000644000176200001440000000123013460722176015114 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/merExtract.R \name{RMSE.merMod} \alias{RMSE.merMod} \title{Estimate the Root Mean Squared Error (RMSE) for a lmerMod} \usage{ RMSE.merMod(merMod, scale = FALSE) } \arguments{ \item{merMod}{a lmerMod object from the lme4 package} \item{scale}{logical, should the result be returned on the scale of response variable standard deviations?} } \value{ a numeric which represents the RMSE } \description{ Extract the Root Mean Squared Error for a lmerMod object } \examples{ require(lme4) m2 <- lmer(Reaction ~ Days + (Days | Subject), sleepstudy) RMSE.merMod(m2) } merTools/man/plotREsim.Rd0000644000176200001440000000374413460722176015056 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/merPlots.R \name{plotREsim} \alias{plotREsim} \title{Plot the results of a simulation of the random effects} \usage{ plotREsim(data, level = 0.95, stat = "median", sd = TRUE, sigmaScale = NULL, oddsRatio = FALSE, labs = FALSE, facet = TRUE) } \arguments{ \item{data}{a data.frame generated by \code{\link{REsim}} with simulations of the random effects of a \code{\link{merMod}}} \item{level}{the width of the confidence interval} \item{stat}{a character value indicating the variable name in data of the midpoint of the estimated interval, e.g. "mean" or "median"} \item{sd}{a logical indicating whether or not to plot error bars around the estimates (default is TRUE). Calculates the width of the error bars based on \code{level} and the variable named "sd" in \code{data}} \item{sigmaScale}{a numeric value to divide the estimate and the standard deviation by in the case of doing an effect size calculation} \item{oddsRatio}{logical, should the parameters be converted to odds ratios before plotting} \item{labs}{logical, include the labels of the groups on the x-axis} \item{facet}{Accepts either logical (\code{TRUE}) or \code{list} to specify which random effects to plot. If \code{TRUE}, facets by both \code{groupFctr} and \code{term}. If \code{list} selects the panel specified by the named elements of the list} } \value{ a ggplot2 plot of the coefficient effects } \description{ Plot the simulated random effects on a ggplot2 chart. Points that are distinguishable from zero (i.e. the confidence band based on \code{level} does not cross the red line) are highlighted. Currently, the plots are ordered according to the grouping factor. } \examples{ fm1 <- lmer(Reaction ~ Days + (Days | Subject), sleepstudy) (p1 <- plotREsim(REsim(fm1))) #Plot just the random effects for the Days slope (p2 <- plotREsim(REsim(fm1), facet= list(groupFctr= "Subject", term= "Days"))) } merTools/man/modelInfo.Rd0000644000176200001440000000133013460722176015041 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/merList.R \name{modelInfo} \alias{modelInfo} \title{Extract model information from a merMod} \usage{ modelInfo(object) } \arguments{ \item{object}{a merMod object} } \value{ Simple summary information about the object, number of observations, number of grouping terms, AIC, and residual standard deviation } \description{ Extract model information from a merMod } \examples{ sim_list <- replicate(n = 10, expr = sleepstudy[sample(row.names(sleepstudy), 180),], simplify=FALSE) fml <- "Reaction ~ Days + (Days | Subject)" mod <- lmerModList(fml, data = sim_list) modelInfo(mod[[1]]) lapply(mod, modelInfo) } merTools/man/sum.mm.Rd0000644000176200001440000000074713460722176014354 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/zzz.R \name{sum.mm} \alias{sum.mm} \title{Title} \usage{ \method{sum}{mm}(object, correlation = (p <= getOption("lme4.summary.cor.max")), use.hessian = NULL, ...) } \arguments{ \item{object}{a merMod object} \item{correlation}{optional p value} \item{use.hessian}{logical} \item{...}{additional arguments to pass through} } \value{ a summary of the object } \description{ Title } merTools/man/subBoot.Rd0000644000176200001440000000234113466135366014552 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/subBoot.R \name{subBoot} \alias{subBoot} \title{Bootstrap a subset of an lme4 model} \usage{ subBoot(merMod, n = NULL, FUN, R = 100, seed = NULL, warn = FALSE) } \arguments{ \item{merMod}{a valid merMod object} \item{n}{the number of rows to sample from the original data in the merMod object, by default will resample the entire model frame} \item{FUN}{the function to apply to each bootstrapped model} \item{R}{the number of bootstrap replicates, default is 100} \item{seed}{numeric, optional argument to set seed for simulations} \item{warn}{logical, if TRUE, warnings from lmer will be issued, otherwise they will be suppressed default is FALSE} } \value{ a data.frame of parameters extracted from each of the R replications. The original values are appended to the top of the matrix. } \description{ Bootstrap a subset of an lme4 model } \details{ This function allows users to estimate parameters of a large merMod object using bootstraps on a subset of the data. } \examples{ \donttest{ (fm1 <- lmer(Reaction ~ Days + (Days | Subject), sleepstudy)) resultMatrix <- subBoot(fm1, n = 160, FUN = thetaExtract, R = 20) } } merTools/man/modelRandEffStats.Rd0000644000176200001440000000124113460722176016473 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/merList.R \name{modelRandEffStats} \alias{modelRandEffStats} \title{Extract data.frame of random effect statistics from merMod List} \usage{ modelRandEffStats(modList) } \arguments{ \item{modList}{a list of multilevel models} } \value{ a data.frame } \description{ Extract data.frame of random effect statistics from merMod List } \examples{ sim_list <- replicate(n = 10, expr = sleepstudy[sample(row.names(sleepstudy), 180),], simplify=FALSE) fml <- "Reaction ~ Days + (Days | Subject)" mod <- lmerModList(fml, data = sim_list) modelRandEffStats(mod) } merTools/man/randomObs.Rd0000644000176200001440000000144713460722176015062 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/merData.R \name{randomObs} \alias{randomObs} \title{Select a random observation from model data} \usage{ randomObs(merMod, varList, seed = NULL) } \arguments{ \item{merMod}{an object of class merMod} \item{varList}{optional, a named list of conditions to subset the data on} \item{seed}{numeric, optional argument to set seed for simulations} } \value{ a data frame with a single row for a random observation, but with full factor levels. See details for more. } \description{ Select a random observation from the model frame of a merMod } \details{ Each factor variable in the data frame has all factor levels from the full model.frame stored so that the new data is compatible with predict.merMod } merTools/man/subsetList.Rd0000644000176200001440000000070513460722176015273 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/merData.R \name{subsetList} \alias{subsetList} \title{Subset a data.frame using a list of conditions} \usage{ subsetList(data, list) } \arguments{ \item{data}{a data.frame} \item{list}{a named list of splitting conditions} } \value{ a data frame with values that match the conditions in the list } \description{ Split a data.frame by elements in a list } merTools/man/draw.Rd0000644000176200001440000000265613460722176014076 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/merData.R \name{draw} \alias{draw} \alias{draw.merMod} \title{Draw a single observation out of an object matching some criteria} \usage{ draw(object, type = c("random", "average"), varList = NULL, seed = NULL, ...) \method{draw}{merMod}(object, type = c("random", "average"), varList = NULL, seed = NULL, ...) } \arguments{ \item{object}{the object to draw from} \item{type}{what kind of draw to make. Options include random or average} \item{varList}{a list specifying filters to subset the data by when making the draw} \item{seed}{numeric, optional argument to set seed for simulations, ignored if type="average"} \item{...}{additional arguments required by certain methods} } \value{ a data.frame with a single row representing the desired observation } \description{ Draw is used to select a single observation out of an R object. Additional parameters allow the user to control how that observation is chosen in order to manipulate that observation later. This is a generic function with methods for a number of objects. } \details{ In cases of tie, ".", may be substituted for factors. } \examples{ fm1 <- lmer(Reaction ~ Days + (Days | Subject), sleepstudy) # Random case draw(fm1, type = "random") # Average draw(fm1, type = "average") # Subset draw(fm1, type = "average", varList = list("Subject" = "308")) } merTools/man/merModList.Rd0000644000176200001440000000350013460722176015205 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/merList.R \name{lmerModList} \alias{lmerModList} \alias{blmerModList} \alias{glmerModList} \alias{bglmerModList} \title{Apply a multilevel model to a list of data frames} \usage{ lmerModList(formula, data, parallel = FALSE, ...) blmerModList(formula, data, parallel = FALSE, ...) glmerModList(formula, data, parallel = FALSE, ...) bglmerModList(formula, data, parallel = FALSE, ...) } \arguments{ \item{formula}{a formula to pass through compatible with merMod} \item{data}{a list object with each element being a data.frame} \item{parallel}{logical, should the models be run in parallel? Default FALSE. If so, the `future_lapply` function from the `future.apply` package is used. See details.} \item{...}{additional arguments to pass to the estimating function} } \value{ a list of fitted merMod objects of class merModList a merModList a merModList a merModList } \description{ Apply a multilevel model to a list of data frames Apply a Bayesian multilevel model to a list of data frames Apply a generalized linear multilevel model to a list of data frames Apply a Bayesian generalized linear multilevel model to a list of data frames } \details{ Parallel computing is provided by the `futures` package, and its extension the `future.apply` package to provide the `future_lapply` function for easy parallel computations on lists. To use this package, simply register a parallel backend using the `plan()` function from `futures` - an example is to use `plan(multisession)` } \examples{ sim_list <- replicate(n = 10, expr = sleepstudy[sample(row.names(sleepstudy), 180),], simplify=FALSE) fml <- "Reaction ~ Days + (Days | Subject)" mod <- lmerModList(fml, data = sim_list) summary(mod) } merTools/man/VarCorr.merModList.Rd0000644000176200001440000000222113460722176016561 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/merList.R \name{VarCorr.merModList} \alias{VarCorr.merModList} \title{Extract the variances and correlations for random effects from a merMod list} \usage{ \method{VarCorr}{merModList}(x, sigma = 1, rdig = 3L) } \arguments{ \item{x}{for \code{VarCorr}: a fitted model object, usually an object inheriting from class \code{\linkS4class{merMod}}. For \code{as.data.frame}, a \code{VarCorr.merMod} object returned from \code{VarCorr}.} \item{sigma}{an optional numeric value used as a multiplier for the standard deviations.} \item{rdig}{the number of digits to round to, integer} } \value{ a list with two elements "stddev" and "correlation" for the standard deviations and correlations averaged across models in the list } \description{ Extract the variances and correlations for random effects from a merMod list } \examples{ sim_list <- replicate(n = 10, expr = sleepstudy[sample(row.names(sleepstudy), 180),], simplify=FALSE) fml <- "Reaction ~ Days + (Days | Subject)" mod <- lmerModList(fml, data = sim_list) VarCorr(mod) } merTools/man/collapseFrame.Rd0000644000176200001440000000114013460722176015701 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/merData.R \name{collapseFrame} \alias{collapseFrame} \title{Collapse a dataframe to a single average row} \usage{ collapseFrame(data) } \arguments{ \item{data}{a data.frame} } \value{ a data frame with a single row } \description{ Take an entire dataframe and summarize it in one row by using the mean and mode. } \details{ Each character and factor variable in the data.frame is assigned to the modal category and each numeric variable is collapsed to the mean. Currently if mode is a tie, returns a "." } merTools/man/predictInterval.Rd0000644000176200001440000001414413460722176016273 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/merPredict.R \name{predictInterval} \alias{predictInterval} \title{Predict from merMod objects with a prediction interval} \usage{ predictInterval(merMod, newdata, which = c("full", "fixed", "random", "all"), level = 0.8, n.sims = 1000, stat = c("median", "mean"), type = c("linear.prediction", "probability"), include.resid.var = TRUE, returnSims = FALSE, seed = NULL, .parallel = FALSE, .paropts = NULL, fix.intercept.variance = FALSE, ignore.fixed.terms = NULL) } \arguments{ \item{merMod}{a merMod object from lme4} \item{newdata}{a data.frame of new data to predict} \item{which}{a character specifying what to return, by default it returns the full interval, but you can also select to return only the fixed variation or the random component variation. If full is selected the resulting data.frame will be \code{nrow(newdata) * number of model levels} long} \item{level}{the width of the prediction interval} \item{n.sims}{number of simulation samples to construct} \item{stat}{take the median or mean of simulated intervals} \item{type}{type of prediction to develop} \item{include.resid.var}{logical, include or exclude the residual variance for linear models} \item{returnSims}{logical, should all n.sims simulations be returned?} \item{seed}{numeric, optional argument to set seed for simulations} \item{.parallel, }{logical should parallel computation be used, default is FALSE} \item{.paropts, }{-NOT USED: Caused issue #54- a list of additional options passed into the foreach function when parallel computation is enabled. This is important if (for example) your code relies on external data or packages: use the .export and .packages arguments to supply them so that all cluster nodes have the correct environment set up for computing.} \item{fix.intercept.variance}{logical; should the variance of the intercept term be adjusted downwards to roughly correct for its covariance with the random effects, as if all the random effects are intercept effects?} \item{ignore.fixed.terms}{a numeric or string vector of indexes or names of fixed effects which should be considered as fully known (zero variance). This can result in under-conservative intervals, but for models with random effects nested inside fixed effects, holding the fixed effects constant intervals may give intervals with closer to nominal coverage than the over-conservative intervals without this option, which ignore negative correlation between the outer (fixed) and inner (random) coefficients.} } \value{ a data.frame with three columns: \describe{ \item{\code{fit}}{The center of the distribution of predicted values as defined by the \code{stat} parameter.} \item{\code{lwr}}{The lower prediction interval bound corresponding to the quantile cut defined in \code{level}.} \item{\code{upr}}{The upper prediction interval bound corresponding to the quantile cut defined in \code{level}.} } If returnSims = TRUE, then the individual simulations are attached to this data.frame in the attribute \code{sim.results} and are stored as a matrix. } \description{ This function provides a way to capture model uncertainty in predictions from multi-level models fit with \code{lme4}. By drawing a sampling distribution for the random and the fixed effects and then estimating the fitted value across that distribution, it is possible to generate a prediction interval for fitted values that includes all variation in the model except for variation in the covariance parameters, theta. This is a much faster alternative than bootstrapping for models fit to medium to large datasets. } \details{ To generate a prediction interval, the function first computes a simulated distribution of all of the parameters in the model. For the random, or grouping, effects, this is done by sampling from a multivariate normal distribution which is defined by the BLUP estimate provided by \code{ranef} and the associated variance-covariance matrix for each observed level of each grouping terms. For each grouping term, an array is build that has as many rows as there are levels of the grouping factor, as many columns as there are predictors at that level (e.g. an intercept and slope), and is stacked as high as there are number of simulations. These arrays are then multiplied by the new data provided to the function to produce a matrix of yhat values. The result is a matrix of the simulated values of the linear predictor for each observation for each simulation. Each grouping term has such a matrix for each observation. These values can be added to get the estimate of the fitted value for the random effect terms, and this can then be added to a matrix of simulated values for the fixed effect level to come up with \code{n.sims} number of possible yhat values for each observation. The distribution of simulated values is cut according to the interval requested by the function. The median or mean value as well as the upper and lower bounds are then returned. These can be presented either on the linear predictor scale or on the response scale using the link function in the \code{merMod}. } \note{ \code{merTools} includes the functions \code{subBoot} and \code{thetaExtract} to allow the user to estimate the variability in \code{theta} from a larger model by bootstrapping the model fit on a subset, to allow faster estimation. } \examples{ m1 <- lmer(Reaction ~ Days + (1 | Subject), sleepstudy) regFit <- predict(m1, newdata = sleepstudy[11, ]) # a single value is returned intFit <- predictInterval(m1, newdata = sleepstudy[11, ]) # bounded values # Can do glmer d1 <- cbpp d1$y <- d1$incidence / d1$size gm2 <- glmer(y ~ period + (1 | herd), family = binomial, data = d1, nAGQ = 9, weights = d1$size) regFit <- predict(gm2, newdata = d1[1:10, ]) # get probabilities regFit <- predict(gm2, newdata = d1[1:10, ], type = "response") intFit <- predictInterval(gm2, newdata = d1[1:10, ], type = "probability") intFit <- predictInterval(gm2, newdata = d1[1:10, ], type = "linear.prediction") } merTools/man/wiggle.Rd0000644000176200001440000000264113460722176014411 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/merData.R \name{wiggle} \alias{wiggle} \title{Assign an observation to different values} \usage{ wiggle(data, varlist, valueslist) } \arguments{ \item{data}{a data frame with one or more observations to be reassigned} \item{varlist}{a character vector specifying the name(s) of the variable to adjust} \item{valueslist}{a list of vectors with the values to assign to var} } \value{ a \code{data.frame} with each row assigned to the one of the new variable combinations. All variable combinations are returned, eg wiggling two variables with 3 and 4 variables respectively will return a new dataset with \code{3 * 4 = 12} observations. } \description{ Creates a new data.frame with copies of the original observation, each assigned to a different user-specified value of a variable. Allows the user to look at the effect on predicted values of changing either a single variable or multiple variables. } \details{ If the variable specified is a factor, then wiggle will return it as a character. } \examples{ data(iris) wiggle(iris[3,], varlist = "Sepal.Width", valueslist = list(c(1, 2, 3, 5))) wiggle(iris[3:5,], "Sepal.Width", valueslist = list(c(1, 2, 3, 5))) wiggle(iris[3,], c("Sepal.Width", "Petal.Length"), list(c(1,2,3,5), c(3,5,6))) wiggle(iris[3:5,], c("Sepal.Width", "Petal.Length"), list(c(1,2,3,5), c(3,5,6))) } merTools/man/plot_sim_error_chks.Rd0000644000176200001440000000270313460722176017201 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/zzz.R \name{plot_sim_error_chks} \alias{plot_sim_error_chks} \title{Extract all warning msgs from a merMod object} \usage{ plot_sim_error_chks(type = c("FE", "RE"), level = 0.95, stat = c("mean", "median"), sd = TRUE, sigmaScale = NULL, oddsRatio = FALSE, labs = FALSE, facet = TRUE) } \arguments{ \item{type}{check a fixed or random effect} \item{level}{the width of the confidence interval} \item{stat}{a character value indicating the variable name in data of the midpoint of the estimated interval, e.g. "mean" or "median"} \item{sd}{a logical indicating whether or not to plot error bars around the estimates (default is TRUE). Calculates the width of the error bars based on \code{level} and the variable named "sd" in \code{data}} \item{sigmaScale}{a numeric value to divide the estimate and the standard deviation by in the case of doing an effect size calculation} \item{oddsRatio}{logical, should the parameters be converted to odds ratios before plotting} \item{labs}{logical, include the labels of the groups on the x-axis} \item{facet}{Accepts either logical (\code{TRUE}) or \code{list} to specify which random effects to plot. If \code{TRUE}, facets by both \code{groupFctr} and \code{term}. If \code{list} selects the panel specified by the named elements of the list} } \description{ Extract all warning msgs from a merMod object } merTools/man/REsdExtract.Rd0000644000176200001440000000110613460722176015316 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/merList.R \name{REsdExtract} \alias{REsdExtract} \title{Extract the standard deviation of the random effects from a merMod object} \usage{ REsdExtract(model) } \arguments{ \item{model}{an object that inherits from class merMod} } \value{ a numeric vector for standard deviations of the random effects } \description{ Extract the standard deviation of the random effects from a merMod object } \examples{ fm1 <- lmer(Reaction ~ Days + (Days | Subject), sleepstudy) REsdExtract(fm1) } merTools/man/REimpact.Rd0000644000176200001440000001110413460722176014631 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/merSubstEff.R \name{REimpact} \alias{REimpact} \title{Calculate the weighted mean of fitted values for various levels of random effect terms.} \usage{ REimpact(merMod, newdata, groupFctr = NULL, term = NULL, breaks = 3, ...) } \arguments{ \item{merMod}{An object of class merMod} \item{newdata}{a data frame of observations to calculate group-level differences for} \item{groupFctr}{The name of the grouping factor over which the random coefficient of interest varies. This is the variable to the right of the pipe, \code{|}, in the [g]lmer formula. This parameter is optional, if not specified, it will perform the calculation for the first effect listed by \code{ranef}.} \item{term}{The name of the random coefficient of interest. This is the variable to the left of the pipe, \code{|}, in the [g]lmer formula. Partial matching is attempted on the intercept term so the following character strings will all return rankings based on the intercept (\emph{provided that they do not match the name of another random coefficient for that factor}): \code{c("(Intercept)", "Int", "intercep", ...)}.} \item{breaks}{an integer representing the number of bins to divide the group effects into, the default is 3; alternatively it can specify breaks from 0-100 for how to cut the expected rank distribution} \item{...}{additional arguments to pass to \code{\link{predictInterval}}} } \value{ A data.frame with all unique combinations of the number of cases, rows in the newdata element, and number of bins: \describe{ \item{case}{The row number of the observation from newdata.} \item{bin}{The ranking bin for the expected rank, the higher the bin number, the greater the expected rank of the groups in that bin.} \item{AvgFitWght}{The weighted mean of the fitted values for case i in bin k} \item{AvgFitWghtSE}{The standard deviation of the mean of the fitted values for case i in bin k.} \item{nobs}{The number of group effects contained in that bin.} } } \description{ \code{REimpact} calculates the average predicted value for each row of a new data frame across the distribution of \code{\link{expectedRank}} for a merMod object. This allows the user to make meaningful comparisons about the influence of random effect terms on the scale of the response variable, for user-defined inputs, and accounting for the variability in grouping terms. } \details{ The function predicts the response at every level in the random effect term specified by the user. Then, the expected rank of each group level is binned to the number of bins specified by the user. Finally, a weighted mean of the fitted value for all observations in each bin of the expected ranks is calculated using the inverse of the variance as the weight -- so that less precise estimates are downweighted in the calculation of the mean for the bin. Finally, a standard error for the bin mean is calculated. This function uses the formula for variance of a weighted mean recommended by Cochran (1977). } \examples{ #For a one-level random intercept model m1 <- lmer(Reaction ~ Days + (1 | Subject), sleepstudy) m1.er <- REimpact(m1, newdata = sleepstudy[1, ], breaks = 2) #For a one-level random intercept model with multiple random terms m2 <- lmer(Reaction ~ Days + (Days | Subject), sleepstudy) #ranked by the random slope on Days m2.er1 <- REimpact(m2, newdata = sleepstudy[1, ], groupFctr = "Subject", term="Days") #ranked by the random intercept m2.er2 <- REimpact(m2, newdata = sleepstudy[1, ], groupFctr = "Subject", term="int") \donttest{ # You can also pass additional arguments to predictInterval through REimpact g1 <- lmer(y ~ lectage + studage + (1|d) + (1|s), data=InstEval) zed <- REimpact(g1, newdata = InstEval[9:12, ], groupFctr = "d", n.sims = 50, include.resid.var = TRUE) zed2 <- REimpact(g1, newdata = InstEval[9:12, ], groupFctr = "s", n.sims = 50, include.resid.var = TRUE) zed3 <- REimpact(g1, newdata = InstEval[9:12, ], groupFctr = "d", breaks = 5, n.sims = 50, include.resid.var = TRUE) } } \references{ Gatz, DF and Smith, L. The Standard Error of a Weighted Mean Concentration. I. Bootstrapping vs other methods. \emph{Atmospheric Environment}. 1995;11(2)1185-1193. Available at \url{http://www.sciencedirect.com/science/article/pii/135223109400210C} Cochran, WG. 1977. Sampling Techniques (3rd Edition). Wiley, New York. } \seealso{ \code{\link{expectedRank}}, \code{\link{predictInterval}} } merTools/man/FEsim.Rd0000644000176200001440000000244213460722176014135 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/merExtract.R \name{FEsim} \alias{FEsim} \title{Simulate fixed effects from merMod \code{FEsim} simulates fixed effects from merMod object posterior distributions} \usage{ FEsim(merMod, n.sims = 200, oddsRatio = FALSE, seed = NULL) } \arguments{ \item{merMod}{a merMod object from the lme4 package} \item{n.sims}{number of simulations to use} \item{oddsRatio}{logical, should parameters be converted to odds ratios?} \item{seed}{numeric, optional argument to set seed for simulations} } \value{ a data frame with the following columns \describe{ \item{\code{term}}{Name of fixed term (intercept/coefficient)} \item{\code{mean}}{Mean of the simulations} \item{\code{median}}{Median of the simulations} \item{\code{sd}}{Standard deviation of the simulations, \code{NA} if \code{oddsRatio=TRUE}} } } \description{ Simulate fixed effects from merMod \code{FEsim} simulates fixed effects from merMod object posterior distributions } \details{ Use the Gelman sim technique to build fixed effect estimates and confidence intervals. Uses the sim function in the arm package } \examples{ require(lme4) m2 <- lmer(Reaction ~ Days + (Days | Subject), sleepstudy) fe2 <- FEsim(m2, 25) head(fe2) } merTools/man/reTermCount.Rd0000644000176200001440000000050313460722176015375 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/helpers.R \name{reTermCount} \alias{reTermCount} \title{Count the number of random effect terms} \source{ From lme4 package } \usage{ reTermCount(model) } \description{ Count the number of random effect terms } \keyword{internal} merTools/man/findFormFuns.Rd0000644000176200001440000000206313460722176015531 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/merData.R \name{findFormFuns} \alias{findFormFuns} \title{\code{findFormFuns} used by \link[merTools]{averageObs} to calculate proper averages} \usage{ findFormFuns(merMod, origData = NULL) } \arguments{ \item{merMod}{the merMod object from which to draw the average observation} \item{origData}{(default=NULL) a data frame containing the original, untransformed data used to call the model. This MUST be specified if the original variables used in formula function calls are NOT present as 'main effects'.} } \value{ a data frame with a single row for the average observation, but with full factor levels. See details for more. } \description{ The purpose is to properly derive data for the average observation in the data by being 'aware' of formulas that contain interactions and/or function calls. For example, in the old behavior, if the formula contained a square term specified as \code{I(x^2)}, we were returning the mean of x{^2} not the square of mean(x). } merTools/man/merTools.Rd0000644000176200001440000000257713460722176014747 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/merTools-package.r \docType{package} \name{merTools} \alias{merTools} \alias{merTools-package} \title{merTools: Provides methods for extracting and exploring results from merMod objects in the lme4 package.} \description{ The merTools package contains convenience tools for extracting useful information from and exploring the implications of merMod objects created by the lme4 package. These convenience functions are especially useful for merMod objects that take a long time to estimate due to their complexity or because they are estimated on very large samples. } \details{ See the vignettes for usage examples } \section{merMod extraction/utility functions}{ \itemize{ \item \code{\link{fastdisp}} \item \code{\link{superFactor}} \item \code{\link{REextract}} \item \code{\link{REsim}} \item \code{\link{FEsim}} \item \code{\link{RMSE.merMod}} \item \code{\link{thetaExtract}} \item \code{\link{REquantile}} } } \section{merMod exploration functions}{ \itemize{ \item \code{\link{plotREsim}} \item \code{\link{plotFEsim}} \item \code{\link{draw}} \item \code{\link{wiggle}} \item \code{\link{subBoot}} \item \code{\link{predictInterval}} \item \code{\link{expectedRank}} \item \code{\link{REimpact}} \item \code{\link{shinyMer}} } } merTools/man/plotFEsim.Rd0000644000176200001440000000261713460722176015040 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/merPlots.R \name{plotFEsim} \alias{plotFEsim} \title{Plot the results of a simulation of the fixed effects} \usage{ plotFEsim(data, level = 0.95, stat = "median", sd = TRUE, intercept = FALSE, sigmaScale = NULL, oddsRatio = FALSE) } \arguments{ \item{data}{a data.frame generated by \code{\link{FEsim}} with simulations of the fixed effects of a \code{\link{merMod}}} \item{level}{the width of the confidence interval} \item{stat}{a character value indicating the variable name in data of the midpoint of the estimated interval, e.g. "mean" or "median"} \item{sd}{logical, indicating whether or not to plot error bars around the estimates (default is TRUE). Calculates the width of the error bars based on \code{level} and the variable named "sd" in \code{data}} \item{intercept}{logical, should the intercept be included, default is FALSE} \item{sigmaScale}{a numeric value to divide the estimate and the standard deviation by in the case of doing an effect size calculation} \item{oddsRatio}{logical, should the parameters be converted to odds ratios before plotting} } \value{ a ggplot2 plot of the coefficient effects } \description{ Plot the simulated fixed effects on a ggplot2 chart } \examples{ fm1 <- lmer(Reaction ~ Days + (Days | Subject), sleepstudy) (p1 <- plotFEsim(FEsim(fm1))) } merTools/man/sanitizeNames.Rd0000644000176200001440000000065213460722176015745 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/merData.R \name{sanitizeNames} \alias{sanitizeNames} \title{Clean up variable names in data frames} \usage{ sanitizeNames(data) } \arguments{ \item{data}{a data.frame} } \value{ a data frame with variable names cleaned to remove factor() construction } \description{ Strips out transformations from variable names in data frames } merTools/man/setup_parallel.Rd0000644000176200001440000000042113460722176016141 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/parallel.R \name{setup_parallel} \alias{setup_parallel} \title{Set up parallel environment} \usage{ setup_parallel() } \value{ Nothing } \description{ Set up parallel environment } merTools/man/fixef.merModList.Rd0000644000176200001440000000233413460722176016311 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/merList.R \name{fixef.merModList} \alias{fixef.merModList} \title{Extract fixed-effects estimates for a merModList} \usage{ \method{fixef}{merModList}(object, add.dropped = FALSE, ...) } \arguments{ \item{object}{any fitted model object from which fixed effects estimates can be extracted.} \item{add.dropped}{for models with rank-deficient design matrix, reconstitute the full-length parameter vector by adding \code{NA} values in appropriate locations?} \item{...}{optional additional arguments. Currently none are used in any methods.} } \value{ a named, numeric vector of fixed-effects estimates. } \description{ Extract fixed-effects estimates for a merModList } \details{ Extract the estimates of the fixed-effects parameters from a list of fitted \code{merMod} models. Takes the mean of the individual \code{fixef} objects for each of the component models in the \code{merModList}. } \examples{ sim_list <- replicate(n = 10, expr = sleepstudy[sample(row.names(sleepstudy), 180),], simplify=FALSE) fml <- "Reaction ~ Days + (Days | Subject)" mod <- lmerModList(fml, data = sim_list) fixef(mod) } merTools/man/REmargins.Rd0000644000176200001440000001216013466135366015024 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/REmargins.R \name{REmargins} \alias{REmargins} \title{Calculate the predicted value for each observation across the distribution of the random effect terms.} \usage{ REmargins(merMod, newdata = NULL, groupFctr = NULL, term = NULL, breaks = 4, .parallel = FALSE, ...) } \arguments{ \item{merMod}{An object of class merMod} \item{newdata}{a data frame of observations to calculate group-level differences for} \item{groupFctr}{The name of the grouping factor over which the random coefficient of interest varies. This is the variable to the right of the pipe, \code{|}, in the [g]lmer formula. This parameter is optional, if not specified, it will perform the calculation for the first effect listed by \code{ranef}. If the length is > 1 then the combined effect of all listed groups will calculated and marginalized over co-occurences of those groups if desired.} \item{term}{The name of the random coefficient of interest. This is the variable to the left of the pipe, \code{|}, in the [g]lmer formula. Partial matching is attempted on the intercept term so the following character strings will all return rankings based on the intercept (\emph{provided that they do not match the name of another random coefficient for that factor}): \code{c("(Intercept)", "Int", "intercep", ...)}.} \item{breaks}{an integer representing the number of bins to divide the group effects into, the default is 3.} \item{.parallel, }{logical should parallel computation be used, default is TRUE} \item{...}{additional arguments to pass to \code{\link{predictInterval}}} } \value{ A data.frame with all unique combinations of the number of cases, rows in the newdata element: \describe{ \item{...}{The columns of the original data taken from \code{newdata}} \item{case}{The row number of the observation from newdata. Each row in newdata will be repeated for all unique levels of the grouping_var, term, and breaks.} \item{grouping_var}{The grouping variable the random effect is being marginalized over.} \item{term}{The term for the grouping variable the random effect is being marginalized over.} \item{breaks}{The ntile of the effect size for \code{grouping_var} and \code{term}} \item{original_group_level}{The original grouping value for this \code{case}} \item{fit_combined}{The predicted value from \code{predictInterval} for this case simulated at the Nth ntile of the expected rank distribution of \code{grouping_var} and \code{term}} \item{upr_combined}{The upper bound of the predicted value.} \item{lwr_combined}{The lower bound of the predicted value.} \item{fit_XX}{For each grouping term in newdata the predicted value is decomposed into its fit components via predictInterval and these are all returned here} \item{upr_XX}{The upper bound for the effect of each grouping term} \item{lwr_XX}{The lower bound for the effect of each grouping term} \item{fit_fixed}{The predicted fit with all the grouping terms set to 0 (average)} \item{upr_fixed}{The upper bound fit with all the grouping terms set to 0 (average)} \item{lwr_fixed}{The lower bound fit with all the grouping terms set to 0 (average)} } } \description{ \code{REmargins} calculates the average predicted value for each row of a new data frame across the distribution of \code{\link{expectedRank}} for a merMod object. This allows the user to make meaningful comparisons about the influence of random effect terms on the scale of the response variable, for user-defined inputs, and accounting for the variability in grouping terms. } \details{ The function simulates the The function predicts the response at every level in the random effect term specified by the user. Then, the expected rank of each group level is binned to the number of bins specified by the user. Finally, a weighted mean of the fitted value for all observations in each bin of the expected ranks is calculated using the inverse of the variance as the weight -- so that less precise estimates are downweighted in the calculation of the mean for the bin. Finally, a standard error for the bin mean is calculated. } \examples{ fm1 <- lmer(Reaction ~ Days + (Days | Subject), sleepstudy) mfx <- REmargins(merMod = fm1, newdata = sleepstudy[1:10,]) \donttest{ # You can also pass additional arguments to predictInterval through REimpact g1 <- lmer(y ~ lectage + studage + (1|d) + (1|s), data=InstEval) margin_df <- REmargins(g1, newdata = InstEval[20:25, ], groupFctr = c("s"), breaks = 4) margin_df <- REmargins(g1, newdata = InstEval[20:25, ], groupFctr = c("d"), breaks = 3) } } \references{ Gatz, DF and Smith, L. The Standard Error of a Weighted Mean Concentration. I. Bootstrapping vs other methods. \emph{Atmospheric Environment}. 1995;11(2)1185-1193. Available at \url{http://www.sciencedirect.com/science/article/pii/135223109400210C} Cochran, WG. 1977. Sampling Techniques (3rd Edition). Wiley, New York. } \seealso{ \code{\link{expectedRank}}, \code{\link{predictInterval}} } merTools/man/fastdisp.Rd0000644000176200001440000000202413460722176014743 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/merFastDisplay.R \name{fastdisp} \alias{fastdisp} \alias{fastdisp.merMod} \alias{fastdisp.merModList} \title{fastdisp: faster display of model summaries} \usage{ fastdisp(x, ...) \method{fastdisp}{merMod}(x, ...) \method{fastdisp}{merModList}(x, ...) } \arguments{ \item{x}{a model object} \item{...}{additional arguments to pass to \code{arm::\link[arm]{display}} including number of digits} } \value{ A printed summary of a x object } \description{ Display model fit summary of x or x like objects, fast } \details{ Faster than the implementation in the arm package because it avoids refitting The time saving is only noticeable for large, time-consuming (g)lmer fits. } \examples{ \donttest{ #Compare the time for displaying this modest model require(arm) m1 <- lmer(y ~ lectage + studage + (1|d) + (1|s), data=InstEval) system.time(display(m1)) system.time(fastdisp(m1)) } } \seealso{ \code{\link[arm]{display}} } merTools/man/print.summary.merModList.Rd0000644000176200001440000000070613460722176020041 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/merList.R \name{print.summary.merModList} \alias{print.summary.merModList} \title{Print the summary of a merMod list} \usage{ \method{print}{summary.merModList}(x, ...) } \arguments{ \item{x}{a summary of amerModList object} \item{...}{additional arguments} } \value{ summary content printed to console } \description{ Print the summary of a merMod list } merTools/man/fetch.merMod.msgs.Rd0000644000176200001440000000052013460722176016410 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/zzz.R \name{fetch.merMod.msgs} \alias{fetch.merMod.msgs} \title{Extract all warning msgs from a merMod object} \usage{ fetch.merMod.msgs(x) } \arguments{ \item{x}{a merMod object} } \description{ Extract all warning msgs from a merMod object } merTools/man/REextract.Rd0000644000176200001440000000155513460722176015037 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/merExtract.R \name{REextract} \alias{REextract} \title{Extracts random effects} \usage{ REextract(merMod) } \arguments{ \item{merMod}{a merMod object from the lme4 package} } \value{ a data frame with the following columns \describe{ \item{groupFctr}{The name of the grouping factor associated with the random effects} \item{groupID}{The level of the grouping factor associated with the random effects} \item{'term'}{One column per random effect, the name is derived from the merMod} \item{'term'_se}{One column per random effect, the name is derived from the merMod} } } \description{ Extracts random effect terms from an lme4 model } \examples{ m2 <- lmer(Reaction ~ Days + (Days | Subject), sleepstudy) rfx <- REextract(m2) #Note the column names head(rfx) } merTools/man/REcorrExtract.Rd0000644000176200001440000000111213460722176015652 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/merList.R \name{REcorrExtract} \alias{REcorrExtract} \title{Extract the correlations between the slopes and the intercepts from a model} \usage{ REcorrExtract(model) } \arguments{ \item{model}{an object that inherits from class merMod} } \value{ a numeric vector of the correlations among the effects } \description{ Extract the correlations between the slopes and the intercepts from a model } \examples{ fm1 <- lmer(Reaction ~ Days + (Days | Subject), sleepstudy) REcorrExtract(fm1) } merTools/man/reTermNames.Rd0000644000176200001440000000075613460722176015362 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/helpers.R \name{reTermNames} \alias{reTermNames} \title{Get names of random effect terms in a model object} \usage{ reTermNames(model) } \arguments{ \item{model}{a merMod object with random effect terms} } \value{ a data.frame with rows for each term with columns naming the grouping term and the effect type } \description{ Get names of random effect terms in a model object } \keyword{internal} merTools/man/superFactor.Rd0000644000176200001440000000147113460722176015430 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/merData.R \name{superFactor} \alias{superFactor} \title{Create a factor with unobserved levels} \usage{ superFactor(x, fullLev) } \arguments{ \item{x}{a vector to be converted to a factor} \item{fullLev}{a vector of factor levels to be assigned to x} } \value{ a factor variable with all observed levels of x and all levels of x in fullLev } \description{ Create a factor variable and include unobserved levels for compatibility with model prediction functions } \examples{ regularFactor <- c("A", "B", "C") regularFactor <- factor(regularFactor) levels(regularFactor) # Now make it super newLevs <- c("D", "E", "F") regularFactor <- superFactor(regularFactor, fullLev = newLevs) levels(regularFactor) # now super } merTools/man/summary.merModList.Rd0000644000176200001440000000124713460722176016707 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/merList.R \name{summary.merModList} \alias{summary.merModList} \title{Print the results of a merMod list} \usage{ \method{summary}{merModList}(object, ...) } \arguments{ \item{object}{a modList of class merModList} \item{...}{additional arguments} } \value{ summary content printed to console } \description{ Print the results of a merMod list } \examples{ sim_list <- replicate(n = 10, expr = sleepstudy[sample(row.names(sleepstudy), 180),], simplify=FALSE) fml <- "Reaction ~ Days + (Days | Subject)" mod <- lmerModList(fml, data = sim_list) print(mod) } merTools/man/famlink.Rd0000644000176200001440000000056613460722176014560 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/zzz.R \name{famlink} \alias{famlink} \title{Find link function family} \usage{ famlink(object, resp = object@resp) } \arguments{ \item{object}{a merMod object} \item{resp}{the response vector} } \value{ the link function and family } \description{ Find link function family } merTools/man/ICC.Rd0000644000176200001440000000127313460722176013531 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/helpers.R \name{ICC} \alias{ICC} \title{Calculate the intraclass correlation using mixed effect models} \usage{ ICC(outcome, group, data, subset = NULL) } \arguments{ \item{outcome}{a character representing the variable of the outcome} \item{group}{a character representing the name of the grouping term} \item{data}{a data.frame} \item{subset}{an optional subset} } \value{ a numeric for the intraclass correlation } \description{ Calculate the intraclass correlation using mixed effect models } \examples{ data(sleepstudy) ICC(outcome = "Reaction", group = "Subject", data = sleepstudy) } merTools/man/shuffle.Rd0000644000176200001440000000056113460722176014566 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/merData.R \name{shuffle} \alias{shuffle} \title{Randomly reorder a dataframe} \usage{ shuffle(data) } \arguments{ \item{data}{a data frame} } \value{ a data frame of the same dimensions with the rows reordered randomly } \description{ Randomly reorder a dataframe by row }