Amelia/0000755000176200001440000000000014336230455011447 5ustar liggesusersAmelia/NAMESPACE0000644000176200001440000000237714335240021012664 0ustar liggesusersexport(amelia, write.amelia, AmeliaView, ## amelia.prep, ameliabind, compare.density, overimpute, disperse, tscsPlot, missmap, moPrep, transform.amelia, mi.meld, amelia.molist, amelia.default, with.amelia, mi.combine) import(foreign) import(utils) import(Rcpp) S3method(print, amelia) S3method(plot, amelia) S3method(summary, amelia) S3method(summary, mi) S3method(amelia, amelia) S3method(amelia, default) S3method(amelia, molist) S3method(moPrep, molist) S3method(moPrep, default) S3method(transform, amelia) S3method(with, amelia) useDynLib(Amelia) importFrom("grDevices", "chull", "colorRampPalette", "dev.copy2pdf", "dev.new", "devAskNewPage", "heat.colors", "rainbow", "rgb") importFrom("graphics", "abline", "arrows", "axis", "hist", "image", "legend", "lines", "matplot", "par", "plot", "plot.new", "plot.window", "points", "rect", "segments", "text", "xinch", "xyinch", "layout") importFrom("methods", "hasArg", "is") importFrom("stats", "coef", "complete.cases", "cov", "density", "lm", "na.omit", "prcomp", "qnorm", "quantile", "rbinom", "reshape", "rnorm", "runif", "sd", "var") Amelia/.Rinstignore0000644000176200001440000000002614335240021013736 0ustar liggesusersMakefile doc/.*[.]png$Amelia/README.md0000644000176200001440000000313114335240021012711 0ustar liggesusers# Amelia II [![R build status](https://github.com/IQSS/Amelia/workflows/R-CMD-check/badge.svg)](https://github.com/IQSS/Amelia/actions) [![CRAN version](http://www.r-pkg.org/badges/version/Amelia)](https://cran.r-project.org/package=Amelia) ## Overview [Amelia II][] is an R package for the multiple imputation of multivariate incomplete data. It uses an algorithm that combines bootstrapping and the EM algorithm to take draws from the posterior of the missing data. The Amelia package includes normalizing transformations, cell-level priors, and methods for handling time-series cross-sectional data. ## How to install To install the latest version of Amelia, which requires [R][] version 2.14.0 or higher, simply use the standard R installation tools: ```r install.packages("Amelia") ``` If you would to use the current development release of Amelia (which may be unstable), run the following: ```r require(devtools) devtools::install_github("IQSS/Amelia") ``` ## Getting started with Amelia The main function in the Amelia package is `amelia()` which will perform multiple imputation on a data frame. It allows for easy setting of time-series and unit variables via the `ts` and `cs` arguments. ```r library(Amelia) data(africa) a.out <- amelia(africa, m = 5, ts = "year", cs = "country") ``` ## AmeliaView GUI Once installed, you can access most of the Amelia functionality through an interactive GUI by running the following command: ```r Amelia::AmeliaView() ``` [Amelia II]: https://gking.harvard.edu/amelia [R]: https://cran.r-project.org Amelia/data/0000755000176200001440000000000014335240021012345 5ustar liggesusersAmelia/data/africa.RData0000644000176200001440000000406714335240021014516 0ustar liggesusersWP*D ZA4/IP͌I {JmsΥ;vֵkYvժԩiNVX+{Ivr[|>y<MmNlm,H@p*G,e첚E)8B#:=B)<ǏId>ɐN2"&[osbko2ᛍSV[Dlu\εٛ:/x#؏N>d2I.TIOH%Yz3z*Lto+S Q )N'`?\`|q YPs/aa>ٿ{ڳBH;O<(Pl/m#?)=G!55g%em ޡ~Agi,qX.P<9lĿZ:H>~>p}RMvMɯq7٭sK|WF=3M/SzOIкap+J!?X9JNP6nx<(L-{rP-i !ol ˷}g|NB_@쇊gڙ(9>?-/·Wۡ}_`AK%alzapŨU xt86.iͧᅧ] V}yJ$Or NBJɠv4h;# P5W`uڍh;#gة}<ïjzybA# $b!ZSja+ ~$x!y<L?~ fffA ^)AR`^Hs.E'T2^ TCz FJ7, Š%w'4eX߹ 7q, \FP?P^(PkPolAV4js~4"lȧVhpCa:v|1VGtEt(vq49rf[%6YvubgMt[}q9-?I~opl4PXAmelia/data/freetrade.RData0000644000176200001440000000651314335240021015230 0ustar liggesusers[yX1EeZZ7^U!@ԺI-VŽRQ_} .-ok}דv~;Ld>-0 #0ޚ L9jLԆsɖ0n|ÍC(anF7r%8ƍ(F7Np'/n2r\.%9fOPGJR([ sP*dJgGICϗh!;CuC C']G+;S\a>e87n?zG:?':4EG<}m|=ްTO=YZ_#<|߇e믧)i > 4_>Gy麲u%b=)sp MQn ' NMLnrxOdž9B1C-04صo`{ k5~е_TD5n= !+bO pCN/԰˷MEd17Gt@Лa׎2z2|Q٥+Lb}$P=ٌ+A.tC]Crt[#K^9FDxG* zN'L0c2x+cz$cDŜ"[G|v)A[avڏ>j3#x 䅌j/zl"lSMļT!6 ʮïvχΗ=n ߢ|ZCtM'x7d9mWuHtC'˦tF< }*Q<}0SKʮt_~x_㊿E}Nt7% PTˑ`OBi'[DA'N-O1Ϋyl]_>OM:r _!𒔯g4 2tU [NOl;/ر[\/aǫ/t}V|XZ:T"/EzAiJ_$M Pe سwB6ȋa~s<Y:,ݍ3k]$O?ދz}|T#vB3%^+lMē}~)^Rtm+{`+& {'}6>x( <p~` /~W="S ^tP +M/^숼ckA 0crw3B~@Il1gFuY['Zoa7Z)pk>- +E>py'Ζ mgG}_ww%~5g[fM;'ulw;~?9<䉸fegFw;mB{OGDno;כSȟW7& <gQ#[> qvlė~\z^|.B^ã<$w$'ݡW0|DŸʡVI^KX˙i·Ӫj |/Aw$>g[Yc GMяy/tXm2q9QZ~yUuvC#߾ԥɸ>`dM~h &Mc`t"竖7V:=\oM9@~Sk7J_ަq*PcDW^%s`\M:>_r/wJ aߝ?}KS ~%f{u/:t%f9eMwC\඄-yؾ~O[/˸c|!2k>?k/"RrZzaf.uَgW)+Qo&?,=9ijַ*QVM]!@ܦ|?Y~V6Րw`;Ӷ[ t|qmc^\/ȧ.nvh66 q]7IMS5_y<< y4rcܸ&ƾOkz"R~g+TqGx'X/r$}k?Boa-~@VǬ*h{Gud%';ۙ @/y<7fWcת woO`< r]f:l -][X/3-˄yD@k_a-i/ޖgXCu[7弊/]}sOv'N}\:ϙ0+ 9Ȩ˨˨˨/sU=b V$vy9瑜'ӕTl#0UBxJ I^1JNwIxP_u( E?`Af2)Zu$5hjy@R)CZ}TT^*?jL*G5@_o#dupM,ooYJ[F|[[[tBtN V+@s .֟3Amelia/man/0000755000176200001440000000000014335240021012207 5ustar liggesusersAmelia/man/plot.amelia.Rd0000644000176200001440000000155614335240021014712 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/plot.amelia.R \name{plot.amelia} \alias{plot.amelia} \title{Summary plots for Amelia objects} \usage{ \method{plot}{amelia}(x, which.vars, compare = TRUE, overimpute = FALSE, ask = TRUE, ...) } \arguments{ \item{x}{an object of class "amelia"; typically output from the function \code{amelia}.} \item{which.vars}{a vector indicating the variables to plot. The default is to plot all of the numeric variables that were actually imputed.} \item{compare}{plot the density comparisons for each variable (True or False)} \item{overimpute}{plot the overimputation for each variable (True or False)} \item{ask}{prompt user before changing pages of a plot (True or False)} \item{...}{further graphical arguments.} } \description{ Plots diagnostic plots for the output from the \code{amelia} function. } Amelia/man/amelia.Rd0000644000176200001440000003376114335240053013745 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/emb.r \name{amelia} \alias{amelia} \alias{amelia.amelia} \alias{amelia.molist} \alias{amelia.default} \title{AMELIA: Multiple Imputation of Incomplete Multivariate Data} \usage{ amelia(x, ...) \method{amelia}{amelia}(x, m = 5, p2s = 1, frontend = FALSE, ...) \method{amelia}{molist}(x, ...) \method{amelia}{default}( x, m = 5, p2s = 1, frontend = FALSE, idvars = NULL, ts = NULL, cs = NULL, polytime = NULL, splinetime = NULL, intercs = FALSE, lags = NULL, leads = NULL, startvals = 0, tolerance = 1e-04, logs = NULL, sqrts = NULL, lgstc = NULL, noms = NULL, ords = NULL, incheck = TRUE, collect = FALSE, arglist = NULL, empri = NULL, priors = NULL, autopri = 0.05, emburn = c(0, 0), bounds = NULL, max.resample = 100, overimp = NULL, boot.type = "ordinary", parallel = c("no", "multicore", "snow"), ncpus = getOption("amelia.ncpus", 1L), cl = NULL, ... ) } \arguments{ \item{x}{either a matrix, data.frame, a object of class "amelia", or an object of class "molist". The first two will call the default S3 method. The third a convenient way to perform more imputations with the same parameters. The fourth will impute based on the settings from \code{moPrep} and any additional arguments.} \item{...}{further arguments to be passed.} \item{m}{the number of imputed datasets to create.} \item{p2s}{an integer value taking either 0 for no screen output, 1 for normal screen printing of iteration numbers, and 2 for detailed screen output. See "Details" for specifics on output when p2s=2.} \item{frontend}{a logical value used internally for the GUI.} \item{idvars}{a vector of column numbers or column names that indicates identification variables. These will be dropped from the analysis but copied into the imputed datasets.} \item{ts}{column number or variable name indicating the variable identifying time in time series data.} \item{cs}{column number or variable name indicating the cross section variable.} \item{polytime}{integer between 0 and 3 indicating what power of polynomial should be included in the imputation model to account for the effects of time. A setting of 0 would indicate constant levels, 1 would indicate linear time effects, 2 would indicate squared effects, and 3 would indicate cubic time effects.} \item{splinetime}{interger value of 0 or greater to control cubic smoothing splines of time. Values between 0 and 3 create a simple polynomial of time (identical to the polytime argument). Values \code{k} greater than 3 create a spline with an additional \code{k-3} knotpoints.} \item{intercs}{a logical variable indicating if the time effects of \code{polytime} should vary across the cross-section.} \item{lags}{a vector of numbers or names indicating columns in the data that should have their lags included in the imputation model.} \item{leads}{a vector of numbers or names indicating columns in the data that should have their leads (future values) included in the imputation model.} \item{startvals}{starting values, 0 for the parameter matrix from listwise deletion, 1 for an identity matrix.} \item{tolerance}{the convergence threshold for the EM algorithm.} \item{logs}{a vector of column numbers or column names that refer to variables that require log-linear transformation.} \item{sqrts}{a vector of numbers or names indicating columns in the data that should be transformed by a sqaure root function. Data in this column cannot be less than zero.} \item{lgstc}{a vector of numbers or names indicating columns in the data that should be transformed by a logistic function for proportional data. Data in this column must be between 0 and 1.} \item{noms}{a vector of numbers or names indicating columns in the data that are nominal variables.} \item{ords}{a vector of numbers or names indicating columns in the data that should be treated as ordinal variables.} \item{incheck}{a logical indicating whether or not the inputs to the function should be checked before running \code{amelia}. This should only be set to \code{FALSE} if you are extremely confident that your settings are non-problematic and you are trying to save computational time.} \item{collect}{a logical value indicating whether or not the garbage collection frequency should be increased during the imputation model. Only set this to \code{TRUE} if you are experiencing memory issues as it can significantly slow down the imputation process} \item{arglist}{an object of class "ameliaArgs" from a previous run of Amelia. Including this object will use the arguments from that run.} \item{empri}{number indicating level of the empirical (or ridge) prior. This prior shrinks the covariances of the data, but keeps the means and variances the same for problems of high missingness, small N's or large correlations among the variables. Should be kept small, perhaps 0.5 to 1 percent of the rows of the data; a reasonable upper bound is around 10 percent of the rows of the data.} \item{priors}{a four or five column matrix containing the priors for either individual missing observations or variable-wide missing values. See "Details" for more information.} \item{autopri}{allows the EM chain to increase the empirical prior if the path strays into an nonpositive definite covariance matrix, up to a maximum empirical prior of the value of this argument times \code{n}, the number of observations. Must be between 0 and 1, and at zero this turns off this feature.} \item{emburn}{a numeric vector of length 2, where \code{emburn[1]} is a the minimum EM chain length and \code{emburn[2]} is the maximum EM chain length. These are ignored if they are less than 1.} \item{bounds}{a three column matrix to hold logical bounds on the imputations. Each row of the matrix should be of the form \code{c(column.number, lower.bound,upper.bound)} See Details below.} \item{max.resample}{an integer that specifies how many times Amelia should redraw the imputed values when trying to meet the logical constraints of \code{bounds}. After this value, imputed values are set to the bounds.} \item{overimp}{a two-column matrix describing which cells are to be overimputed. Each row of the matrix should be a \code{c(row,column)} pair. Each of these cells will be treated as missing and replaced with draws from the imputation model.} \item{boot.type}{choice of bootstrap, currently restricted to either \code{"ordinary"} for the usual non-parametric bootstrap and \code{"none"} for no bootstrap.} \item{parallel}{the type of parallel operation to be used (if any). If missing, the default is taken from the option \code{"amelia.parallel"} (and if that is not set, \code{"no"}).} \item{ncpus}{integer: the number of processes to be used in parallel operation: typically one would choose the number of available CPUs.} \item{cl}{an optional \pkg{parallel} or \pkg{snow} cluster for use if \code{parallel = "snow"}. If not supplied, a cluster on the local machine is created for the duration of the \code{amelia} call.} } \value{ An instance of S3 class "amelia" with the following objects: \item{imputations}{a list of length \code{m} with an imputed dataset in each entry. The class (matrix or data.frame) of these entries will match \code{x}.} \item{m}{an integer indicating the number of imputations run.} \item{missMatrix}{a matrix identical in size to the original dataset with 1 indicating a missing observation and a 0 indicating an observed observation.} \item{theta}{An array with dimensions \eqn{(p+1)} by \eqn{(p+1)} by \eqn{m} (where \eqn{p} is the number of variables in the imputations model) holding the converged parameters for each of the \code{m} EM chains.} \item{mu}{A \eqn{p} by \eqn{m} matrix of of the posterior modes for the complete-data means in each of the EM chains.} \item{covMatrices}{An array with dimensions \eqn{(p)} by \eqn{(p)} by \eqn{m} where the first two dimensions hold the posterior modes of the covariance matrix of the complete data for each of the EM chains.} \item{code}{a integer indicating the exit code of the Amelia run.} \item{message}{an exit message for the Amelia run} \item{iterHist}{a list of iteration histories for each EM chain. See documentation for details.} \item{arguments}{a instance of the class "ameliaArgs" which holds the arguments used in the Amelia run.} \item{overvalues}{a vector of values removed for overimputation. Used to reformulate the original data from the imputations. } Note that the \code{theta}, \code{mu} and \code{covMatrcies} objects refers to the data as seen by the EM algorithm and is thusly centered, scaled, stacked, tranformed and rearranged. See the manual for details and how to access this information. } \description{ Runs the bootstrap EM algorithm on incomplete data and creates imputed datasets. } \details{ Multiple imputation is a method for analyzing incomplete multivariate data. This function will take an incomplete dataset in either data frame or matrix form and return \code{m} imputed datatsets with no missing values. The algorithm first creates a bootstrapped version of the original data, estimates the sufficient statistics (with priors if specified) by EM on this bootstrapped sample, and then imputes the missing values of the original data using the estimated sufficient statistics. It repeats this process \code{m} times to produce the \code{m} complete datasets where the observed values are the same and the unobserved values are drawn from their posterior distributions. The function will start a "fresh" run of the algorithm if \code{x} is either a incomplete matrix or data.frame. In this method, all of the options will be user-defined or set to their default. If \code{x} is the output of a previous Amelia run (that is, an object of class "amelia"), then Amelia will run with the options used in that previous run. This is a convenient way to run more imputations of the same model. You can provide Amelia with informational priors about the missing observations in your data. To specify priors, pass a four or five column matrix to the \code{priors} argument with each row specifying a different priors as such: \code{ one.prior <- c(row, column, mean,standard deviation)} or, \code{ one.prior <- c(row, column, minimum, maximum, confidence)}. So, in the first and second column of the priors matrix should be the row and column number of the prior being set. In the other columns should either be the mean and standard deviation of the prior, or a minimum, maximum and confidence level for the prior. You must specify your priors all as distributions or all as confidence ranges. Note that ranges are converted to distributions, so setting a confidence of 1 will generate an error. Setting a priors for the missing values of an entire variable is done in the same manner as above, but inputing a \code{0} for the row instead of the row number. If priors are set for both the entire variable and an individual observation, the individual prior takes precedence. In addition to priors, Amelia allows for logical bounds on variables. The \code{bounds} argument should be a matrix with 3 columns, with each row referring to a logical bound on a variable. The first column should be the column number of the variable to be bounded, the second column should be the lower bounds for that variable, and the third column should be the upper bound for that variable. As Amelia enacts these bounds by resampling, particularly poor bounds will end up resampling forever. Amelia will stop resampling after \code{max.resample} attempts and simply set the imputation to the relevant bound. If each imputation is taking a long time to converge, you can increase the empirical prior, \code{empri}. This value has the effect of smoothing out the likelihood surface so that the EM algorithm can more easily find the maximum. It should be kept as low as possible and only used if needed. Amelia assumes the data is distributed multivariate normal. There are a number of variables that can break this assumption. Usually, though, a transformation can make any variable roughly continuous and unbounded. We have included a number of commonly needed transformations for data. Note that the data will not be transformed in the output datasets and the transformation is simply useful for climbing the likelihood. Amelia can run its imputations in parallel using the methods of the \pkg{parallel} package. The \code{parallel} argument names the parallel backend that Amelia should use. Users on Windows systems must use the \code{"snow"} option and users on Unix-like systems should use \code{"multicore"}. The \code{multicore} backend sets itself up automatically, but the \code{snow} backend requires more setup. You can pass a predefined cluster from the \code{parallel::makePSOCKcluster} function to the \code{cl} argument. Without this cluster, Amelia will attempt to create a reasonable default cluster and stop it once computation is complete. When using the parallel backend, users can set the number of CPUs to use with the \code{ncpus} argument. The defaults for these two arguments can be set with the options \code{"amelia.parallel"} and \code{"amelia.ncpus"}. Please refer to the Amelia manual for more information on the function or the options. } \section{Methods (by class)}{ \itemize{ \item \code{amelia(amelia)}: Run additional imputations for Amelia output \item \code{amelia(molist)}: Perform multiple overimputation from moPrep \item \code{amelia(default)}: Run core Amelia algorithm }} \examples{ data(africa) a.out <- amelia(x = africa, cs = "country", ts = "year", logs = "gdp_pc") summary(a.out) plot(a.out) } \references{ Honaker, J., King, G., Blackwell, M. (2011). Amelia II: A Program for Missing Data. \emph{Journal of Statistical Software}, \bold{45(7)}, 1--47. \doi{10.18637/jss.v045.i07} } \seealso{ For imputation diagnostics, \code{\link{missmap}}, \code{\link{compare.density}}, \code{\link{overimpute}} and \code{\link{disperse}}. For time series plots, \code{\link{tscsPlot}}. Also: \code{\link{plot.amelia}}, \code{\link{write.amelia}}, and \code{\link{ameliabind}} } \author{ James Honaker Gary King Matt Blackwell } \keyword{models} Amelia/man/write.amelia.Rd0000644000176200001440000000504414335240021015062 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/write.amelia.R \name{write.amelia} \alias{write.amelia} \title{Write Amelia imputations to file} \usage{ write.amelia( obj, separate = TRUE, file.stem, extension = NULL, format = "csv", impvar = "imp", orig.data = TRUE, ... ) } \arguments{ \item{obj}{an object of class "amelia"; typically output from the function \code{amelia}} \item{separate}{logical variable. If \code{TRUE} (default), the imputed datasets will be written to separate files, whose names come from the \code{file.stem} and \code{extension} arguments. If \code{FALSE}, the imputations are stacked and written as a single file.} \item{file.stem}{the leading part of the filename to save to output The imputation number and \code{extension} will be added to complete the filename. This can include a directory path.} \item{extension}{the extension of the filename. This is simply what follows \code{file.stem} and the imputation number.} \item{format}{one of the following output formats: \code{csv}, \code{dta} or \code{table}. See details.} \item{impvar}{the name of imputation number variable written to the stacked dataset when \code{separate} is \code{FALSE}.} \item{orig.data}{logical variable indicating whether the original, unimputed dataset should be included in the stacked dataset when \code{separate} is \code{FALSE}.} \item{\dots}{further arguments for the \code{write} functions.} } \description{ Writes the imptuted datasets to file from a run of \code{amelia} } \details{ \code{write.amelia} writes the imputed datasets to a file or a set of files using one of the following functions: \code{write.csv}, \code{write.dta}, or \code{write.table}. You can pass arguments to these functions from \code{write.amelia}. When \code{separate} is \code{TRUE}, each imputed dataset is written to its own file. If you were to set \code{file.stem} to \code{"outdata"} and the \code{extension} to \code{".csv"} , then the resulting filename of the written files will be \preformatted{ outdata1.csv outdata2.csv outdata3.csv ... } and so on. When \code{separate} is \code{FALSE}, the function adds a variable called \code{impvar} to each dataset which indicates the imputed dataset to which the row belongs. Then, each of the datasets are stacked together to create one dataset. If \code{orig.data} is \code{TRUE}, then the original, unimputed dataset is included at the top of the stack, with its imputation number set to 0. } \seealso{ \code{\link{write.csv}}, \code{\link{write.table}}, \code{\link{write.dta}} } Amelia/man/compare.density.Rd0000644000176200001440000000421514335240021015604 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/diag.r \name{compare.density} \alias{compare.density} \title{Compare observed versus imputed densities} \usage{ compare.density( output, var, col = c("indianred", "dodgerblue"), scaled = FALSE, lwd = 1, main, xlab, ylab, legend = TRUE, frontend = FALSE, ... ) } \arguments{ \item{output}{output from the function \code{amelia}.} \item{var}{column number or variable name of the variable to plot.} \item{col}{a vector of length 2 containing the color to plot the (1) imputed density and (2) the observed density.} \item{scaled}{a logical indicating if the two densities should be scaled to reflect the difference in number of units in each.} \item{lwd}{the line width of the density plots.} \item{main}{main title of the plot. The default is to title the plot using the variable name.} \item{xlab}{the label for the x-axis. The default is the name of the variable.} \item{ylab}{the label for the y-axis. The default is "Relative Density."} \item{legend}{a logical value indicating if a legend should be plotted.} \item{frontend}{a logical value used internally for the Amelia GUI.} \item{...}{further graphical parameters for the plot.} } \description{ Plots smoothed density plots of observed and imputed values from output from the \code{amelia} function. } \details{ This function first plots a density plot of the observed units for the variable \code{var} in \code{col[2]}. The the function plots a density plot of the mean or modal imputations for the missing units in \code{col[1]}. If a variable is marked "ordinal" or "nominal" with the \code{ords} or \code{noms} options in \code{amelia}, then the modal imputation will be used. If \code{legend} is \code{TRUE}, then a legend is plotted as well. } \examples{ data(africa) } \references{ Abayomi, K. and Gelman, A. and Levy, M. 2005 "Diagnostics for Multivariate Imputations," \emph{Applied Statistics}. 57,3: 273--291. } \seealso{ For more information on how densities are computed, \code{\link{density}}; Other imputation diagnostics are \code{\link{overimpute}}, \code{\link{disperse}}, and \code{\link{tscsPlot}}. } Amelia/man/mi.meld.Rd0000644000176200001440000000357114335240021014031 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/diag.r \name{mi.meld} \alias{mi.meld} \title{Combine Multiple Results From Multiply Imputed Datasets} \usage{ mi.meld(q, se, byrow = TRUE) } \arguments{ \item{q}{A matrix or data frame of (k) quantities of interest (eg. coefficients, parameters, means) from (m) multiply imputed datasets. Default is to assume the matrix is m-by-k (see \code{byrow}), thus each row represents a set of results from one dataset, and each column represents the different values of a particular quantity of interest across the imputed datasets.} \item{se}{A matrix or data frame of standard errors that correspond to each of the elements of the quantities of interest in \code{q}. Should be the same dimensions as \code{q}.} \item{byrow}{logical. If \code{TRUE}, \code{q} and \code{se} are treated as though each row represents the set of results from one dataset (thus m-by-k). If \code{FALSE}, each column represents results from one dataset (thus k-by-m).} } \value{ \item{q.mi}{Average value of each quantity of interest across the m models} \item{se.mi}{Standard errors of each quantity of interest} } \description{ Combine sets of estimates (and their standard errors) generated from different multiply imputed datasets into one set of results. } \details{ Uses Rubin's rules for combining a set of results from multiply imputed datasets to reflect the average result, with standard errors that both average uncertainty across models and account for disagreement in the estimated values across the models. } \references{ Rubin, D. (1987). \emph{Multiple Imputation for Nonresponse in Surveys}. New York: Wiley. Honaker, J., King, G., Honaker, J. Joseph, A. Scheve K. (2001). Analyzing Incomplete Political Science Data: An Alternative Algorithm for Multiple Imputation \emph{American Political Science Review}, \bold{95(1)}, 49--69. (p53) } Amelia/man/transform.amelia.Rd0000644000176200001440000000251314335240021015741 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/transform.amelia.R \name{transform.amelia} \alias{transform.amelia} \title{Transform imputed datasets from Amelia objects} \usage{ \method{transform}{amelia}(`_data`, ...) } \arguments{ \item{_data}{an object of class "amelia"; typically output from the function \code{amelia}.} \item{...}{further arguments of the form \code{tag = value}.} } \value{ An object of class \code{amelia} with its \code{imputations} and \code{missMatrix} values updated according to the transformations. In addition, each of the calls to \code{transform.amelia} are stored in } \description{ Updates the imputed datasets from an \code{amelia} output with the specified transformations. } \details{ The \code{\dots} arugments to \code{transform.amelia} are expressions of the form \code{tag = value}, where \code{tag} is the variable that is being updated or created and \code{value} is an expression that is a function of the variables in the imputed datasets. For instance, if you wanted to create an interaction of two imputed variables, you could have one argument be \code{intervar = var1 * var2}. This would either update the current variable \code{intervar} in the imputed data or append a new variable called \code{intervar} to the imputed datasets. } \seealso{ \code{\link{transform}} } Amelia/man/amelia.package.Rd0000755000176200001440000000253414335240021015327 0ustar liggesusers\name{amelia-package} \alias{amelia-package} \docType{package} \title{Amelia II: A Program for Missing Data} \description{Uses a bootstrap+EM algorithm to impute missing values from a dataset and produces multiple output datasets for analysis.} \details{ \tabular{ll}{ Package: \tab amelia\cr Type: \tab Package\cr Version: \tab 1.0\cr Date: \tab 2006-03-03\cr License: \tab See Manual\cr } You can use the package in one of two ways: either by invoking the \code{ameliagui()} command and running the program from a graphical interface or by loading in your data and then running the \code{amelia} function on the data. If you use the GUI in Windows, makes sure that you run R under a Single Window Interface (SDI) as it will try to grab focus from the GUI if you don't. } \author{ James Honaker, Matthew Blackwell, Gary King } \references{James Honaker, Gary King, Matthew Blackwell (2011). Amelia II: A Program for Missing Data. \emph{Journal of Statistical Software}, \bold{45(7)}, 1--47. URL http://www.jstatsoft.org/v45/i07/. King, Gary; James Honaker, Anne Joseph, and Kenneth Scheve. \"Analyzing Incomplete Political Science Data: An Alternative Algorithm for Multiple Imputation\", \emph{American Political Science Review}, Vol. 95, No. 1 (March, 2001): Pp. 49-69. } \keyword{package} Amelia/man/moPrep.Rd0000644000176200001440000000733614335240053013756 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/mo.R \name{moPrep} \alias{moPrep} \alias{moPrep.molist} \alias{moPrep.default} \title{Prepare Multiple Overimputation Settings} \usage{ moPrep( x, formula, subset, error.proportion, gold.standard = !missing(subset), error.sd ) \method{moPrep}{molist}(x, formula, subset, error.proportion, gold.standard = FALSE, error.sd) \method{moPrep}{default}( x, formula, subset, error.proportion, gold.standard = !missing(subset), error.sd ) } \arguments{ \item{x}{either a matrix, data.frame, or a object of class "molist" from a previous \code{moPrep} call. The first two derive the priors from the data given, and the third will derive the priors from the first \code{moPrep} call and add them to the already defined priors.} \item{formula}{a formula describing the nature of the measurement error for the variable. See "Details."} \item{subset}{an optional vector specifying a subset of observations which possess measurement error.} \item{error.proportion}{an optional vector specifying the fraction of the observed variance that is due to measurement error.} \item{gold.standard}{a logical value indicating if values with no measurement error should be used to estimate the measurement error variance.} \item{error.sd}{an optional vector specifying the standard error of the measurement error.} } \value{ An instance of the S3 class "molist" with the following objects: \itemize{ \item priors a four-column matrix of the multiple overimputation priors associated with the data. Each row of the matrix is \code{c(row,column, prior.mean, prior.sd)} \item overimp a two-column matrix of cells to be overimputed. Each row of the matrix is of the form \code{c(row, column)}, which indicate the row and column of the cell to be overimputed. \item data the object name of the matrix or data.frame to which priors refer. } Note that \code{priors} and \code{overimp} might contain results from multiple calls to \code{moPrep}, not just the most recent. } \description{ A function to generate priors for multiple overimputation of a variable measured with error. } \details{ This function generates priors for multiple overimputation of data measured with error. With the \code{formula} arugment, you can specify which variable has the error, what the mean of the latent data is, and if there are any other proxy measures of the mismeasured variable. The general syntax for the formula is: \code{errvar ~ mean | proxy}, where \code{errvar} is the mismeasured variable, \code{mean} is a formula for the mean of the latent variable (usually just \code{errvar} itself), and \code{proxy} is a another mismeasurement of the same latent variable. The proxies are used to estimate the variance of the measurement error. \code{subset} and \code{gold.standard} refer to the the rows of the data which are and are not measured with error. Gold-standard rows are used to estimate the variance of the measurement. error. \code{error.proportion} is used to estimate the variance of the measurement error by estimating the variance of the mismeasurement and taking the proportion assumed to be due to error. \code{error.sd} sets the standard error of the measurement error directly. } \section{Methods (by class)}{ \itemize{ \item \code{moPrep(molist)}: Alter existing moPrep output \item \code{moPrep(default)}: Default call to moPrep }} \examples{ data(africa) m.out <- moPrep(africa, trade ~ trade, error.proportion = 0.1) a.out <- amelia(m.out, ts = "year", cs = "country") plot(a.out) m.out <- moPrep(africa, trade ~ trade, error.sd = 1) a.out <- amelia(m.out, ts = "year", cs = "country") } \seealso{ \code{\link{amelia}} } Amelia/man/combine.output.Rd0000644000176200001440000000161414335240021015453 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/prep.r \name{combine.output} \alias{combine.output} \title{Combine Multiple Amelia Output Lists} \usage{ combine.output(...) } \arguments{ \item{...}{a list of Amelia output lists from runs of Amelia with the same arguments except the number of imputations.} } \description{ This function combines output lists from multiple runs of Amelia, where each run used the same arguments. The result is one list, formatted as if Amelia had been run once. } \details{ This function is useful for combining the output from Amelia runs that occurred at different times or in different sessions of R. It assumes that the arguments given to the runs of Amelia are the same except for \code{m}, the number of imputations, and it uses the arguments from the first output list as the arguments for the combined output list. } \keyword{utilities} Amelia/man/freetrade.Rd0000644000176200001440000000165314335240021014444 0ustar liggesusers\name{freetrade} \docType{data} \alias{freetrade} \title{Trade Policy and Democracy in 9 Asian States} \description{Economic and political data on nine developing countries in Asia from 1980 to 1999. This dataset includes 9 variables including year, country, average tariff rates, Polity IV score, total population, gross domestic product per capita, gross international reserves, a dummy variable for if the country had signed an IMF agreement in that year, a measure of financial openness, and a measure of US hegemony. These data were used in Milner and Kubota (2005).} \usage{freetrade} \format{A data frame with 10 variables and 171 observations.} \source{World Bank, World Trade Organization, Polity IV and others.} \references{Helen Milner and Keiko Kubota (2005), ``Why the move to free trade? Democracy and trade policy in the developing countries.'' \emph{International Organization}, Vol 59, Issue 1.} \keyword{datasets} Amelia/man/mi.combine.Rd0000644000176200001440000000377014335240053014532 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/combine.R \name{mi.combine} \alias{mi.combine} \title{Combine results from analyses on imputed data sets} \usage{ mi.combine(x, conf.int = FALSE, conf.level = 0.95) } \arguments{ \item{x}{List of output from statistical models estimated on different imputed data sets, as outputted by \code{with(a.out, expr)} where \code{a.out} is the output of a call to \code{amelia}.} \item{conf.int}{Logical indicating if confidence intervals should be computed for each quantity of interest (default is \code{FALSE}).} \item{conf.level}{The confidence level to use for the confidence interval if \code{conf.level = TRUE}. Defaults to 0.95, which corresponds to a 95 percent confidence interval.} } \value{ Returns a \code{tibble} that contains: \describe{ \item{term}{Name of the coefficient or parameter.} \item{estimate}{Estimate of the parameter, averagine across imputations.} \item{std.error}{Standard error of the estimate, accounting for imputation uncertainty.} \item{statistic}{Value of the t-statistic for the estimated parameter.} \item{p.value}{p-value associated with the test of a null hypothesis that the true coefficient is zero. Uses the t-distribution with an imputation-adjusted degrees of freedom.} \item{df}{Imputation-adjusted degrees of freedom for each parameter.} \item{r}{Relative increase in variance due to nonresponse.} \item{miss.info}{Estimated fraction of missing information.} \item{conf.low}{Lower bound of the estimated confidence interval. Only present if \code{conf.int = TRUE}.} \item{conf.high}{Upper bound of the estimated confidence interval. Only present if \code{conf.int = TRUE}.} } } \description{ Combine results from statistical models run on multiply imputed data sets using the so-called Rubin rules. } \examples{ data(africa) a.out <- amelia(x = africa, cs = "country", ts = "year", logs = "gdp_pc") imp.mods <- with(a.out, lm(gdp_pc ~ infl + trade)) mi.combine(imp.mods, conf.int = TRUE) } \author{ Matt Blackwell } Amelia/man/AmeliaView.Rd0000644000176200001440000000061314335240021014521 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/ameliagui.r \name{AmeliaView} \alias{AmeliaView} \title{Interactive GUI for Amelia} \usage{ AmeliaView() } \description{ Brings up the AmeliaView graphical interface, which allows users to load datasets, manage options and run Amelia from a traditional windowed environment. } \details{ Requires the tcltk package. } Amelia/man/summary.amelia.Rd0000644000176200001440000000102414335240021015417 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/summary.amelia.R \name{summary.amelia} \alias{summary.amelia} \title{Summary of an Amelia object} \usage{ \method{summary}{amelia}(object, ...) } \arguments{ \item{object}{an object of class \code{amelia}. Typically, an output from the function \code{amelia}.} \item{...}{further arguments.} } \description{ Returns summary information from the Amelia run along with missingles information. } \seealso{ \code{\link{amelia}}, \code{\link{plot.amelia}} } Amelia/man/disperse.Rd0000644000176200001440000000464114335240021014321 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/diag.r \name{disperse} \alias{disperse} \title{Overdispersed starting values diagnostic for multiple imputation} \usage{ disperse( output, m = 5, dims = 1, p2s = 0, frontend = FALSE, ..., xlim = NULL, ylim = NULL ) } \arguments{ \item{output}{output from the function \code{amelia}.} \item{m}{the number of EM chains to run from overdispersed starting values.} \item{dims}{the number of principle components of the parameters to display and assess convergence on (up to 2).} \item{p2s}{an integer that controls printing to screen. 0 (default) indicates no printing, 1 indicates normal screen output and 2 indicates diagnostic output.} \item{frontend}{a logical value used internally for the Amelia GUI.} \item{...}{further graphical parameters for the plot.} \item{xlim}{limits of the plot in the horizontal dimension.} \item{ylim}{limits of the plot in vertical dimension.} } \description{ A visual diagnostic of EM convergence from multiple overdispersed starting values for an output from \code{amelia}. } \details{ This function tracks the convergence of \code{m} EM chains which start from various overdispersed starting values. This plot should give some indication of the sensitivity of the EM algorithm to the choice of starting values in the imputation model in \code{output}. If all of the lines converge to the same point, then we can be confident that starting values are not affecting the EM algorithm. As the parameter space of the imputation model is of a high-dimension, this plot tracks how the first (and second if \code{dims} is 2) principle component(s) change over the iterations of the EM algorithm. Thus, the plot is a lower dimensional summary of the convergence and is subject to all the drawbacks inherent in said summaries. For \code{dims==1}, the function plots a horizontal line at the position where the first EM chain converges. Thus, we are checking that the other chains converge close to that horizontal line. For \code{dims==2}, the function draws a convex hull around the point of convergence for the first EM chain. The hull is scaled to be within the tolerance of the EM algorithm. Thus, we should check that the other chains end up in this hull. } \seealso{ Other imputation diagnostics are \code{\link{compare.density}}, \code{\link{disperse}}, and \code{\link{tscsPlot}} } Amelia/man/ameliagui.Rd0000644000176200001440000000061014335240021014430 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/ameliagui.r \name{ameliagui} \alias{ameliagui} \alias{main.close} \title{Interactive GUI for Amelia} \usage{ AmeliaView() } \description{ Brings up the AmeliaView graphical interface, which allows users to load datasets, manage options and run Amelia from a traditional windowed environment. } \keyword{utilities} Amelia/man/ameliabind.Rd0000644000176200001440000000231214335240021014561 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/emb.r \name{ameliabind} \alias{ameliabind} \title{Combine multiple runs of Amelia} \usage{ ameliabind(...) } \arguments{ \item{...}{one or more objects of class \code{amelia} with the same arguments and created from the same data.} } \value{ An object of class \code{amelia}. } \description{ Combines multiple runs of \code{amelia} with the same arguments and data into one \code{amelia} object. } \details{ \code{ameliabind} will combine multiple runs of \code{amelia} into one object so that you can utilize diagnostics and modelling on all the imputations together. This function is useful for combining multiple runs of \code{amelia} run on parallel machines. Note that \code{ameliabind} only checks that they arguments and the missingness matrix are identical. Thus, it could be fooled by two datasets that are identical up to a transformation of one variable. } \examples{ data(africa) a1.out <- amelia(x = africa, cs = "country", ts = "year", logs = "gdp_pc") a2.out <- amelia(x = africa, cs = "country", ts = "year", logs = "gdp_pc") all.out <- ameliabind(a1.out, a2.out) summary(all.out) plot(all.out) } \seealso{ \code{\link{amelia}} } Amelia/man/missmap.Rd0000644000176200001440000000616014335240021014152 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/missmap.R \name{missmap} \alias{missmap} \title{Missingness Map} \usage{ missmap( obj, vars, legend = TRUE, col, main, y.cex = 0.8, x.cex = 0.8, y.labels, y.at, csvar = NULL, tsvar = NULL, rank.order = TRUE, margins = c(5, 5), gap.xaxis = 1, x.las = 2, ... ) } \arguments{ \item{obj}{an object of class "amelia"; typically output from the function \code{amelia}, a matrix or a dataframe.} \item{vars}{a vector of column numbers or column names of the data to include in the plot. The default is to plot all variables.} \item{legend}{should a legend be drawn? (True or False)} \item{col}{a vector of length two where the first element specifies the color for missing cells and the second element specifies} \item{main}{main title of the plot. Defaults to "Missingness Map".} \item{y.cex}{expansion for the variables names on the x-axis.} \item{x.cex}{expansion for the unit names on the y-axis.} \item{y.labels}{a vector of row labels to print on the y-axis} \item{y.at}{a vector of the same length as \code{y.labels} with row nmumbers associated with the labels.} \item{csvar}{column number or name of the variable corresponding to the unit indicator. Only used when the \code{obj} is not of class \code{amelia}.} \item{tsvar}{column number or name of the variable corresponding to the time indicator. Only used when the \code{obj} is not of class \code{amelia}.} \item{rank.order}{a logical value. If \code{TRUE}, the default, then the order of the variables along the the x-axis is sorted by the percent missing (from highest to lowest). If \code{FALSE}, it is simply the order of the variables in the data.} \item{margins}{a vector of length two that specifies the bottom and left margins of the plot. Useful for when variable names or row names are long.} \item{gap.xaxis}{value to pass to the \code{gap.axis} argument of the \code{axis} function that plots the x-axis. See \code{\link{axis}} for more details. Ignored on R versions less than 4.0.0.} \item{x.las}{value of the \code{las} argument to pass to the \code{\link{axis}} function creating the x-axis.} \item{...}{further graphical arguments.} } \description{ Plots a missingness map showing where missingness occurs in the dataset passed to \code{amelia}. } \details{ \code{missmap} draws a map of the missingness in a dataset using the \code{image} function. The columns are reordered to put the most missing variable farthest to the left. The rows are reordered to a unit-period order if the \code{ts} and \code{cs} arguments were passed to \code{amelia}. If not, the rows are not reordered. The \code{y.labels} and \code{y.at} commands can be used to associate labels with rows in the data to identify them in the plot. The y-axis is internally inverted so that the first row of the data is associated with the top-most row of the missingness map. The values of \code{y.at} should refer to the rows of the data, not to any point on the plotting region. } \seealso{ \code{\link{compare.density}}, \code{\link{overimpute}}, \code{\link{tscsPlot}}, \code{\link{image}}, \code{\link{heatmap}} } Amelia/man/overimpute.Rd0000644000176200001440000000607114335240021014701 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/diag.r \name{overimpute} \alias{overimpute} \title{Overimputation diagnostic plot} \usage{ overimpute( output, var, draws = 20, subset, legend = TRUE, xlab, ylab, main, frontend = FALSE, ... ) } \arguments{ \item{output}{output from the function \code{amelia}.} \item{var}{column number or variable name of the variable to overimpute.} \item{draws}{the number of draws per imputed dataset to generate overimputations. Total number of simulations will \code{m * draws} where \code{m} is the number of imputations.} \item{subset}{an optional vector specifying a subset of observations to be used in the overimputation.} \item{legend}{a logical value indicating if a legend should be plotted.} \item{xlab}{the label for the x-axis. The default is "Observed Values."} \item{ylab}{the label for the y-axis. The default is "Imputed Values."} \item{main}{main title of the plot. The default is to smartly title the plot using the variable name.} \item{frontend}{a logical value used internally for the Amelia GUI.} \item{...}{further graphical parameters for the plot.} } \value{ A list that contains (1) the row in the original data (\code{row}), (2) the observed value of that observation (\code{orig}), (2) the mean of the overimputations (\code{mean.overimputed}), (3) the lower bound of the 95\% confidence interval of the overimputations (\code{lower.overimputed}), (4) the upper bound of the 95\% confidence interval of the overimputations (\code{upper.overimputed}), (5) the fraction of the variables that were missing for that observation in the original data (\code{prcntmiss}), and (6) a matrix of the raw overimputations, with observations in rows and the different draws in columns (\code{overimps}). } \description{ Treats each observed value as missing and imputes from the imputation model from \code{amelia} output. } \details{ This function temporarily treats each observed value in \code{var} as missing and imputes that value based on the imputation model of \code{output}. The dots are the mean imputation and the vertical lines are the 90\% percent confidence intervals for imputations of each observed value. The diagonal line is the \eqn{y=x} line. If all of the imputations were perfect, then our points would all fall on the line. A good imputation model would have about 90\% of the confidence intervals containing the truth; that is, about 90\% of the vertical lines should cross the diagonal. The color of the vertical lines displays the fraction of missing observations in the pattern of missingness for that observation. The legend codes this information. Obviously, the imputations will be much tighter if there are more observed covariates to use to impute that observation. The \code{subset} argument evaluates in the environment of the data. That is, it can but is not required to refer to variables in the data frame as if it were attached. } \seealso{ Other imputation diagnostics are \code{\link{compare.density}}, \code{\link{disperse}}, and \code{\link{tscsPlot}}. } Amelia/man/with.amelia.Rd0000644000176200001440000000166714335240053014717 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/with.R \name{with.amelia} \alias{with.amelia} \title{Execute commands within each imputed data set} \usage{ \method{with}{amelia}(data, expr, ...) } \arguments{ \item{data}{imputation output from the \code{amelia} funtion.} \item{expr}{expression to evaluate in each imputed data set in \code{data}.} \item{...}{arguments to be passed to (future) methods.} } \value{ a list the same length as \code{data$imputations} that contains the output of the expression as evaluated in each imputed data set of \code{data}. } \description{ Evaluate an R expression in the environments constructed from the imputed data sets of a call to \code{amelia} function. } \examples{ data(africa) a.out <- amelia(x = africa, cs = "country", ts = "year", logs = "gdp_pc") imp.mods <- with(a.out, lm(gdp_pc ~ infl + trade)) mi.combine(imp.mods, conf.int = TRUE) } \author{ Matt Blackwell } Amelia/man/africa.Rd0000644000176200001440000000116114335240021013722 0ustar liggesusers\name{africa} \docType{data} \alias{africa} \title{Economic and Political Indictors in 6 African States} \description{Data on a few economic and political variables in six African States from 1972-1991. The variables are year, country name, Gross Domestic Product per capita, inflation, trade as a percentage of GDP, a measure of civil liberties and total population. The data is from the Africa Research Program. A few cells are missing.} \usage{africa} \format{A data frame with 7 variables and 120 observations.} \source{Africa Research Program \url{https://scholar.harvard.edu/rbates/data}} \keyword{datasets} Amelia/man/tscsPlot.Rd0000644000176200001440000000523514335240021014316 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/diag.r \name{tscsPlot} \alias{tscsPlot} \title{Plot observed and imputed time-series for a single cross-section} \usage{ tscsPlot( output, var, cs, draws = 100, conf = 0.9, misscol = "red", obscol = "black", xlab, ylab, main, pch, ylim, xlim, frontend = FALSE, plotall = FALSE, nr, nc, pdfstub, ... ) } \arguments{ \item{output}{output from the function \code{amelia}.} \item{var}{the column number or variable name of the variable to plot.} \item{cs}{the name (or level) of the cross-sectional unit to plot. Maybe a vector of names which will panel a window of plots} \item{draws}{the number of imputations on which to base the confidence intervals.} \item{conf}{the confidence level of the confidence intervals to plot for the imputated values.} \item{misscol}{the color of the imputed values and their confidence intervals.} \item{obscol}{the color of the points for observed units.} \item{xlab}{x axis label} \item{ylab}{y axis label} \item{main}{overall plot title} \item{pch}{point shapes for the plot.} \item{ylim}{y limits (y1, y2) of the plot.} \item{xlim}{x limits (x1, x2) of the plot.} \item{frontend}{a logical value for use with the \code{AmeliaView} GUI.} \item{plotall}{a logical value that provides a shortcut for ploting all unique values of the level. A shortcut for the \code{cs} argument, a TRUE value overwrites any \code{cs} argument.} \item{nr}{the number of rows of plots to use when ploting multiple cross-sectional units. The default value will try to minimize this value to create a roughly square representation, up to a value of four. If all plots do not fit on the window, a new window will be started.} \item{nc}{the number of columns of plots to use. See \code{nr}} \item{pdfstub}{a stub string used to write pdf copies of each window created by the plot. The default is not to write pdf output, but any string value will turn on pdf output to the local working directory. If the stub is \code{mystub}, then plots will be saved as \code{mystub1.pdf}, \code{mystub2.pdf}, etc.} \item{...}{further graphical parameters for the plot.} } \description{ Plots a time series for a given variable in a given cross-section and provides confidence intervals for the imputed values. } \details{ The \code{cs} argument should be a value from the variable set to the \code{cs} argument in the \code{amelia} function for this output. This function will not work if the \code{ts} and \code{cs} arguments were not set in the \code{amelia} function. If an observation has been overimputed, \code{tscsPlot} will plot both an observed and an imputed value. } Amelia/DESCRIPTION0000644000176200001440000000420614336230455013157 0ustar liggesusersPackage: Amelia Version: 1.8.1 Date: 2022-11-18 Title: A Program for Missing Data Authors@R: c( person("James", "Honaker", email = "james@hona.kr", role = c("aut")), person("Gary", "King", email = "king@harvard.edu", role = c("aut")), person("Matthew", "Blackwell", email = "mblackwell@gov.harvard.edu", role = c("aut", "cre"), comment = c(ORCID = "0000-0002-3689-9527")) ) Depends: R (>= 3.0.2), Rcpp (>= 0.11) Imports: foreign, utils, grDevices, graphics, methods, stats, rlang LinkingTo: Rcpp (>= 0.11), RcppArmadillo Description: A tool that "multiply imputes" missing data in a single cross-section (such as a survey), from a time series (like variables collected for each year in a country), or from a time-series-cross-sectional data set (such as collected by years for each of several countries). Amelia II implements our bootstrapping-based algorithm that gives essentially the same answers as the standard IP or EMis approaches, is usually considerably faster than existing approaches and can handle many more variables. Unlike Amelia I and other statistically rigorous imputation software, it virtually never crashes (but please let us know if you find to the contrary!). The program also generalizes existing approaches by allowing for trends in time series across observations within a cross-sectional unit, as well as priors that allow experts to incorporate beliefs they have about the values of missing cells in their data. Amelia II also includes useful diagnostics of the fit of multiple imputation models. The program works from the R command line or via a graphical user interface that does not require users to know R. License: GPL (>= 2) URL: https://gking.harvard.edu/amelia Suggests: tcltk, broom, rmarkdown, knitr VignetteBuilder: knitr RoxygenNote: 7.2.1 Encoding: UTF-8 LazyData: true NeedsCompilation: yes Packaged: 2022-11-18 19:28:14 UTC; mblackwell Author: James Honaker [aut], Gary King [aut], Matthew Blackwell [aut, cre] () Maintainer: Matthew Blackwell Repository: CRAN Date/Publication: 2022-11-19 19:40:29 UTC Amelia/build/0000755000176200001440000000000014335756316012556 5ustar liggesusersAmelia/build/vignette.rds0000644000176200001440000000047514335756316015123 0ustar liggesusersRQK0n|7QDqkh IiSop^dK\r%}wח!_Z ~A2&8y笎3jd/UyZ:K]ulY\}\vg ]Į+y.NDs%q|6nVi *ܵjpyd3a pkuXT$36Ӡ vU'@x`Fm,4 v8'oqd,g)߱Z#K6oO0M|*J)" hs Amelia/build/partial.rdb0000644000176200001440000000007514335756301014677 0ustar liggesusersb```b`abb`b1 H020piּb C".X7Amelia/tests/0000755000176200001440000000000014335240021012576 5ustar liggesusersAmelia/tests/overimp.R0000644000176200001440000000035614335240021014406 0ustar liggesuserslibrary(Amelia) data(africa) af2 <- na.omit(africa) oi <- matrix(c(1:10,rep(4, 10)), nrow = 10) a.out <- amelia(af2, cs = 2, overimp = oi) if (a.out$imputations[[1]][1,4] == a.out$imputations[[2]][1,4]) { stop("overimp is broken") } Amelia/tests/moPrep-test.R0000644000176200001440000000026514335240021015143 0ustar liggesuserslibrary(Amelia) x <- rnorm(100) s <- x + rnorm(100, 0, 0.1) vv <- rep(0.1^2/var(s), 100) df <- data.frame(x, s) mop <- moPrep(df,s ~ s,error.proportion = vv) a.out <- amelia(mop) Amelia/tests/bounds.R0000644000176200001440000000062314335240021014214 0ustar liggesuserslibrary(Amelia) data(freetrade) bds <- matrix(c(3, 30, 32), nrow = 1, ncol = 3) set.seed(12345) a.out.bds <- amelia(freetrade, ts = "year", cs = "country", bounds = bds, max.resample = 10, p2s = 0) out <- range(a.out.bds$imputations$imp1[is.na(freetrade[,3]),3]) if (out[1] < 30) { stop("lower bounds not working") } if (out[2] > 32) { stop("upper bounds not working") } Amelia/src/0000755000176200001440000000000014335756316012246 5ustar liggesusersAmelia/src/em.cpp0000644000176200001440000003776314335240021013350 0ustar liggesusers #include "em.h" #include using namespace Rcpp ; SEXP emcore(SEXP xs, SEXP AMr1s, SEXP os, SEXP ms, SEXP ivec, SEXP thetas, SEXP tols, SEXP emburns, SEXP p2ss, SEXP empris, SEXP autos, SEXP alls, SEXP prs){ //, SEXP p2ss, SEXP prs, SEXP empris, SEXP fends, SEXP alls, SEXP autos, SEXP emburns// NumericMatrix xr(xs); NumericMatrix thetar(thetas); NumericVector tol(tols); NumericMatrix AMr1r(AMr1s); NumericMatrix orr(os); NumericMatrix mr(ms); NumericVector ir(ivec); NumericVector emburn(emburns); NumericVector p2sr(p2ss); NumericVector emprir(empris); // NumericVector frontend(fends); NumericVector allthetas(alls); NumericVector autopri(autos); int p2s = p2sr(0), empri = emprir(0); int n = xr.nrow(), k = xr.ncol(); int const AMn = n; int npatt = orr.nrow(); int cvalue = 1; arma::mat x(xr.begin(), n, k, false); arma::mat thetaold(thetar.begin(), k + 1, k + 1, false); arma::mat AMr1(AMr1r.begin(), n, k, false); arma::mat obsmat(orr.begin(), npatt, k, false); arma::mat mismat(mr.begin(), npatt, k, false); arma::vec ii(ir.begin(), n, false); //Rcpp::Rcout << "Set up arma things. " << std::endl; // Bring out your priors. NumericMatrix prr; int npr, knr; arma::mat priors; if (!Rf_isNull(prs)) { prr = NumericMatrix(prs); npr = prr.nrow(); knr = prr.ncol(); priors = arma::mat(prr.begin(), npr, knr, false); } int count = 0; int is, isp; //int nparam = arma::accu(arma::find(arma::trimatu(thetaold))); arma::uvec upperpos = arma::find(arma::trimatu(arma::ones(k+1,k+1))); arma::mat xplay = arma::zeros(AMn,k); arma::mat hmcv(k,k); arma::mat imputations(2,k); arma::vec music(k); arma::mat thetanew(k+1, k+1); arma::mat theta(k+1, k+1); arma::vec sweeppos(k+1); arma::uvec mispos; arma::uvec thetaleft; arma::vec etest; arma::mat iterHist(1,3); arma::mat thetaHolder(upperpos.n_elem,1); thetaHolder.col(0) = thetaold.elem(upperpos); iterHist.zeros(); sweeppos.zeros(); hmcv.zeros(); music.zeros(); int st, ss, singFlag, monoFlag; if (arma::accu(mismat.row(0)) == 0) { st = 1; } else { st = 0; } // if (empri > 0) { arma::mat hold = empri * arma::eye(k,k); arma::mat simple(k,k); //} if (p2s > 0) Rcpp::Rcout << std::endl; //Rcpp::Rcout << "Starting loop. " << std::endl; while ( ( (cvalue > 0) || (count < emburn(0)) ) && ( (count < emburn(1)) || (emburn(1) < 1))) { count++; hmcv.zeros(k,k); music.zeros(k); xplay.zeros(AMn,k); if (p2s > 0) { if (count < 10) { Rcpp::Rcout << " " << count; } else { Rcpp::Rcout << " " << count; } if (count % 20 == 0) { Rcpp::Rcout << std::endl; } } if (st == 1) { xplay.rows(0,ii(1)-2) = x.rows(0,ii(1)-2); } if (Rf_isNull(prs)) { for (ss = st; ss < obsmat.n_rows; ss++) { is = ii(ss)-1; isp = ii(ss+1)-2; theta = thetaold; sweeppos.zeros(); sweeppos(arma::span(1,k)) = arma::trans(obsmat.row(ss)); sweep(theta, sweeppos); imputations.zeros(); imputations.set_size(isp - is, k); imputations = x.rows(is, isp) * theta(arma::span(1,k), arma::span(1,k)); imputations.each_row() += theta(0, arma::span(1,k)); imputations = AMr1.rows(is, isp) % imputations; xplay.rows(is, isp) = x.rows(is, isp) + imputations; mispos = arma::find(mismat.row(ss)); hmcv(mispos, mispos) += (1+ isp - is) * theta(mispos+1, mispos+1); } } else { for (ss = st; ss < obsmat.n_rows; ss++) { is = ii(ss)-1; isp = ii(ss+1)-2; theta = thetaold; sweeppos.zeros(); sweeppos(arma::span(1,k)) = arma::trans(obsmat.row(ss)); sweep(theta, sweeppos); imputations.zeros(); imputations.set_size(isp - is, k); imputations = x.rows(is, isp) * theta(arma::span(1,k), arma::span(1,k)); imputations.each_row() += theta(0, arma::span(1,k)); imputations = AMr1.rows(is, isp) % imputations; mispos = arma::find(mismat.row(ss)); arma::mat solveSigma = arma::inv(theta(mispos + 1, mispos + 1)); arma::mat diagLambda = arma::zeros(mispos.n_elem, mispos.n_elem); for (int p = 0; p <= isp-is; p++) { arma::uvec prRow = arma::find(priors.col(0) == p + is + 1); if (prRow.n_elem > 0) { arma::uvec pu(1); pu(0) = p; arma::mat thisPrior = priors.rows(prRow); arma::uvec theseCols = arma::conv_to::from(thisPrior.col(1)-1); arma::vec prHolder = arma::zeros(k); prHolder.elem(theseCols) = thisPrior.col(3); diagLambda.diag() = prHolder.elem(mispos); arma::mat wvar = arma::inv(diagLambda + solveSigma); prHolder.elem(theseCols) = thisPrior.col(2); arma::mat firstInner = solveSigma * arma::trans(imputations(pu, mispos)); arma::mat secondInner = prHolder.elem(mispos); arma::mat muMiss = wvar * (secondInner + firstInner); imputations(pu, mispos) = arma::trans(muMiss); hmcv(mispos, mispos) += wvar; } else { hmcv(mispos, mispos) += theta(mispos + 1, mispos + 1); } } xplay.rows(is, isp) = x.rows(is, isp) + imputations; } } hmcv += xplay.t() * xplay; music += arma::trans(arma::sum(xplay)); if (empri > 0) { simple = (music * arma::trans(music))/AMn; hmcv = (( (double)AMn/(AMn+empri+k+2)) * (hmcv - simple + hold)) + simple; } thetanew(0,0) = AMn; thetanew(0, arma::span(1,k)) = arma::trans(music); thetanew(arma::span(1,k), 0) = music; thetanew(arma::span(1,k), arma::span(1,k)) = hmcv; thetanew = thetanew/AMn; thetanew = symmatu(thetanew); sweeppos.zeros(); sweeppos(0) = 1; sweep(thetanew, sweeppos); theta = arma::abs(thetanew - thetaold); thetaleft = arma::find(arma::trimatu(theta) > tol(0)); cvalue = thetaleft.n_elem; thetaold = thetanew; if (cvalue > iterHist(count-1,0) && count > 20) { monoFlag = 1; if (autopri(0) > 0) { if (arma::accu(iterHist(arma::span(count - 20, count - 1), 2)) > 3) { if (empri < (autopri(0) * (double)n)) { empri = empri + 0.01 * (double)n; } } } } else { monoFlag = 0; } etest = arma::eig_sym(thetaold(arma::span(1,k), arma::span(1,k))); if (arma::accu(etest <= 0)) { singFlag = 1; } else { singFlag = 0; } if (p2s > 1) { Rcpp::Rcout << "(" << cvalue << ")"; if (monoFlag == 1) { Rcpp::Rcout << "*"; } if (singFlag == 1) { Rcpp::Rcout << "!"; } } iterHist.resize(iterHist.n_rows+1, iterHist.n_cols); iterHist(count, 0) = cvalue; iterHist(count, 1) = monoFlag; iterHist(count, 2) = singFlag; if (allthetas(0) == 1) { thetaHolder.resize(thetaHolder.n_rows, thetaHolder.n_cols + 1); thetaHolder.col(count) = thetaold.elem(upperpos); } } iterHist.shed_row(0); if (p2s > 0) Rcpp::Rcout << std::endl; List z; if (allthetas(0) == 1) { thetaHolder.shed_row(0); z = List::create(Rcpp::Named("thetanew") = thetaHolder, Rcpp::Named("iter.hist") = iterHist); } else { z = List::create(Rcpp::Named("theta") = thetaold, Rcpp::Named("iter.hist") = iterHist); } return z ; } // void sweep(arma::mat& g, arma::vec m) { // int p = g.n_rows, h, j, i; // arma::uvec k = arma::find(m); // if (k.n_elem == p) { // g = -arma::inv(g); // } else { // for (h = 0; h < k.n_rows; h++) { // for (j = 0; j < p; j++) { // for (i = 0; i <= j; i++) { // if (i == k(h)) { // if (j == k(h)) { // g(i,j) = -1/g(i,j); // //Rcpp::Rcout << k(h) << ": " << "(i,j): (" <(k+1,k+1)))); arma::mat xplay = arma::zeros(AMn,k); arma::mat imputations(2,k); arma::mat theta(k+1, k+1); arma::mat junk(2,k); arma::mat Ci(k, k); arma::vec sweeppos(k+1); arma::uvec mispos; sweeppos.zeros(); int st, ss; if (arma::accu(mismat.row(0)) == 0) { st = 1; } else { st = 0; } if (st == 1) { xplay.rows(0,ii(1)-2) = x.rows(0,ii(1)-2); } if (Rf_isNull(prs)) { for (ss = st; ss < obsmat.n_rows; ss++) { is = ii(ss)-1; isp = ii(ss+1)-2; theta = thetaold; sweeppos.zeros(); sweeppos(arma::span(1,k)) = arma::trans(obsmat.row(ss)); sweep(theta, sweeppos); mispos = arma::find(mismat.row(ss)); Ci.zeros(k, k); Ci(mispos, mispos) = chol(theta(mispos+1, mispos + 1)); junk = Rcpp::rnorm((isp - is + 1)* k, 0, 1); junk.reshape(isp - is +1, k); junk = junk * Ci; imputations.zeros(); imputations.set_size(isp - is, k); imputations = x.rows(is, isp) * theta(arma::span(1,k), arma::span(1,k)); imputations.each_row() += theta(0, arma::span(1,k)); imputations = AMr1.rows(is, isp) % imputations; if (Rf_isNull(bdss)) { xplay.rows(is, isp) = x.rows(is, isp) + imputations + junk; } else { xplay.rows(is, isp) = resampler(x.rows(is, isp), Ci, imputations, mispos, bounds, maxsamples); } } } else { for (ss = st; ss < obsmat.n_rows; ss++) { is = ii(ss)-1; isp = ii(ss+1)-2; theta = thetaold; sweeppos.zeros(); sweeppos(arma::span(1,k)) = arma::trans(obsmat.row(ss)); sweep(theta, sweeppos); junk.zeros(isp - is + 1, k); junk = Rcpp::rnorm((isp - is + 1)* k, 0, 1); junk.reshape(isp - is +1, k); imputations.zeros(); imputations.set_size(isp - is, k); imputations = x.rows(is, isp) * theta(arma::span(1,k), arma::span(1,k)); imputations.each_row() += theta(0, arma::span(1,k)); imputations = AMr1.rows(is, isp) % imputations; mispos = arma::find(mismat.row(ss)); arma::mat solveSigma = arma::inv(theta(mispos + 1, mispos + 1)); arma::mat diagLambda = arma::zeros(mispos.n_elem, mispos.n_elem); for (int p = 0; p <= isp-is; p++) { arma::uvec prRow = arma::find(priors.col(0) == p + is + 1); Ci.zeros(k,k); if (prRow.n_elem > 0) { arma::uvec pu(1); pu(0) = p; arma::mat thisPrior = priors.rows(prRow); arma::uvec theseCols = arma::conv_to::from(thisPrior.col(1)-1); arma::vec prHolder = arma::zeros(k); prHolder.elem(theseCols) = thisPrior.col(3); diagLambda.diag() = prHolder.elem(mispos); arma::mat wvar = arma::inv(diagLambda + solveSigma); prHolder.elem(theseCols) = thisPrior.col(2); arma::mat muMiss = wvar * (prHolder.elem(mispos) + solveSigma * arma::trans(imputations(pu, mispos))); imputations(pu, mispos) = arma::trans(muMiss); Ci(mispos, mispos) = chol(wvar); } else { Ci(mispos, mispos) = chol(theta(mispos + 1, mispos + 1)); } junk.row(p) = junk.row(p) * Ci; if (Rf_isNull(bdss)) { xplay.row(is + p) = x.row(is + p) + imputations.row(p) + junk.row(p); } else { xplay.row(is + p) = resampler(x.row(is + p), Ci, imputations.row(p), mispos, bounds, maxsamples); } } } } return wrap(xplay); } arma::mat resampler(arma::mat x, arma::mat ci, arma::mat imps, arma::uvec mss, arma::mat bounds, int maxsample) { int nss = x.n_rows, k = x.n_cols; arma::mat ub(nss, k); arma::mat lb(nss, k); arma::umat utest; arma::umat ltest; ub.fill(arma::datum::inf); lb.fill(-arma::datum::inf); arma::mat xp = arma::zeros(nss, k); arma::mat junk = Rcpp::rnorm(nss * k, 0, 1); junk.reshape(nss, k); junk = junk * ci; int nb = 0, bdvar; for (int j = 0; j < bounds.n_rows; j++) { bdvar = (int) bounds(j,0) - 1; if (arma::accu(mss == bdvar)) { nb++; lb.col(bdvar) = arma::ones(nss) * bounds(j,1); ub.col(bdvar) = arma::ones(nss) * bounds(j,2); } } if (nb == 0) { return x + imps + junk; } //Rcpp::Rcout << ub << std::endl; int samp = 0; arma::colvec done = arma::zeros(nss); arma::colvec left = arma::ones(nss); arma::uvec finished; while ((arma::accu(left) > 0) & (samp < maxsample)) { samp++; utest = (imps + junk) > ub; ltest = (imps + junk) < lb; done += left % (arma::sum(utest + ltest, 1) == 0); finished = arma::find(left % (arma::sum(utest + ltest, 1) == 0)); left -= left % (arma::sum(utest + ltest, 1) == 0); ub.rows(finished).fill(arma::datum::inf); lb.rows(finished).fill(-arma::datum::inf); xp.rows(finished) = x.rows(finished) + imps.rows(finished) + junk.rows(finished); junk = Rcpp::rnorm(nss * k, 0, 1); junk.reshape(nss, k); junk = junk * ci; } if (arma::accu(left) > 0) { xp.rows(arma::find(left)) = x.rows(arma::find(left)) + imps.rows(arma::find(left)) + junk.rows(arma::find(left)); utest = (imps + junk) > ub; ltest = (imps + junk) < lb; arma::uvec ufails = arma::find(utest); arma::uvec lfails = arma::find(ltest); xp.elem(ufails) = ub.elem(ufails); xp.elem(lfails) = lb.elem(lfails); } return xp; } Amelia/src/init.c0000644000176200001440000000113414335240021013331 0ustar liggesusers#include #include #include // for NULL #include /* .Call calls */ extern SEXP ameliaImpute(SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP); extern SEXP emcore(SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP); static const R_CallMethodDef CallEntries[] = { {"ameliaImpute", (DL_FUNC) &ameliaImpute, 9}, {"emcore", (DL_FUNC) &emcore, 13}, {NULL, NULL, 0} }; void R_init_Amelia(DllInfo *dll) { R_registerRoutines(dll, NULL, CallEntries, NULL, NULL); R_useDynamicSymbols(dll, FALSE); } Amelia/src/em.h0000644000176200001440000000201714335240021012775 0ustar liggesusers#ifndef _Amelia_EMCORE_H #define _Amelia_EMCORE_H #include /* * note : RcppExport is an alias to `extern "C"` de ned by Rcpp. * * It gives C calling convention to the rcpp hello world function so that * it can be called from .Call in R. Otherwise, the C++ compiler mangles the * name of the function and .Call can't nd it. * * It is only useful to use RcppExport when the function is intended to be called * by .Call. See the thread http://thread.gmane.org/gmane.comp.lang.r.rcpp/649/focus=672 * on Rcpp-devel for a misuse of RcppExport */ RcppExport SEXP emcore(SEXP xs, SEXP AMr1s, SEXP os, SEXP ms, SEXP is, SEXP thetas, SEXP tols, SEXP emburn, SEXP p2ss, SEXP empris, SEXP autos, SEXP alls, SEXP prs) ; void sweep(arma::mat& g, arma::vec m); RcppExport SEXP ameliaImpute(SEXP xs, SEXP AMr1s, SEXP os, SEXP ms, SEXP ivec, SEXP thetas, SEXP prs, SEXP bdss, SEXP maxres); arma::mat resampler(arma::mat x, arma::mat ci, arma::mat imps, arma::uvec mss, arma::mat bounds, int maxsample); #endif Amelia/src/Makevars0000644000176200001440000000015614335240021013721 0ustar liggesusersCXX_STD = CXX11 PKG_LIBS = `$(R_HOME)/bin/Rscript -e "Rcpp:::LdFlags()"` $(LAPACK_LIBS) $(BLAS_LIBS) $(FLIBS) Amelia/src/Makevars.win0000644000176200001440000000032514335240021014513 0ustar liggesusers## Use the R HOME indirection to support installations of multiple R version CXX_STD = CXX11 PKG_LIBS = $(shell "${R_HOME}/bin${R_ARCH_BIN}/Rscript.exe" -e "Rcpp:::LdFlags()") $(LAPACK_LIBS) $(BLAS_LIBS) $(FLIBS) Amelia/vignettes/0000755000176200001440000000000014335756316013467 5ustar liggesusersAmelia/vignettes/diagnostics.Rmd0000644000176200001440000003351314335240021016424 0ustar liggesusers--- title: "Multiple Imputation Diagnostics" date: "`r Sys.Date()`" link-citations: yes bibliography: amelia.bib output: rmarkdown::html_vignette: keep_md: true vignette: > %\VignetteIndexEntry{Multiple Imputation Diagnostics} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r setup, echo = FALSE, include = FALSE} knitr::opts_chunk$set(fig.width = 5, fig.height = 4, fig.align = "center") options(digits = 4, show.signif.stars = FALSE) set.seed(12345) ``` Amelia currently provides a number of diagnostic tools to inspect the imputations that are created. To illustrate these, we use the `freetrade` data from the package: ```{r amelia, results = "hide"} library(Amelia) data(freetrade) a.out <- amelia(freetrade, m = 5, ts = "year", cs = "country", p2s = 0) ``` \subsubsection{Comparing Densities} One check on the plausibility of the imputation model is check the distribution of imputed values to the distribution of observed values. Obviously we cannot expect, *a priori*, that these distribution will be identical as the missing values may differ systematically from the observed value--this is fundamental reason to impute to begin with! Imputations with strange distributions or those that are far from the observed data may indicate that imputation model needs at least some investigation and possibly some improvement. The `plot.amelia()` method works on output from `amelia()` and, by default, shows for each variable a plot of the relative frequencies of the observed data with an overlay of the relative frequency of the imputed values. ```{r plot_amelia} plot(a.out, which.vars = 3:6) ``` where the argument `which.vars` indicates which of the variables to plot (in this case, we are taking the 3rd through the 6th variables). The imputed curve (in red) plots the density of the *mean* imputation over the $m$ datasets. That is, for each cell that is missing in the variable, the diagnostic will find the mean of that cell across each of the $m$ datasets and use that value for the density plot. The black distributions are the those of the observed data. When variables are completely observed, their densities are plotted in blue. These graphs will allow you to inspect how the density of imputations compares to the density of observed data. Some discussion of these graphs can be found in @AbaGelLev08. Minimally, these graphs can be used to check that the mean imputation falls within known bounds, when such bounds exist in certain variables or settings. We can also use the function `compare.density()` directly to make these plots for an individual variable: ```{r compare_density} compare.density(a.out, var = "signed") ``` ## Overimpute {#sec_overimpute} *Overimputing* is a technique we have developed to judge the fit of the imputation model. Because of the nature of the missing data mechanism, it is impossible to tell whether the mean prediction of the imputation model is close to the unobserved value that is trying to be recovered. By definition this missing data does not exist to create this comparison, and if it existed we would no longer need the imputations or care about their accuracy. However, a natural question the applied researcher will often ask is how accurate are these imputed values? Overimputing involves sequentially treating each of the *observed* values as if they had actually been missing. For each observed value in turn we then generate several hundred imputed values of that observed value, *as if it had been missing*. While $m=5$ imputations are sufficient for most analysis models, this large number of imputations allows us to construct a confidence interval of what the imputed value would have been, had any of the observed data been missing. We can then graphically inspect whether our observed data tends to fall within the region where it would have been imputed had it been missing. For example, we can run the overimputation diagnostic on our data by running ```{r} overimpute(a.out, var = "tariff") ``` Our overimputation diagnostic runs this procedure through all of the observed values for a user selected variable. We can graph the estimates of each observation against the true values of the observation. On this graph, a $y=x$ line indicates the line of perfect agreement; that is, if the imputation model was a perfect predictor of the true value, all the imputations would fall on this line. For each observation, `overimpute()` also plots 90\% confidence intervals that allows the user to visually inspect the behavior of the imputation model. By checking how many of the confidence intervals cover the $y=x$ line, we can tell how often the imputation model can confidently predict the true value of the observation. Occasionally, the overimputation can display unintuitive results. For example, different observations may have different numbers of observed covariates. If covariates that are useful to the prediction are themselves missing, then the confidence interval for this observation will be much larger. In the extreme, there may be observations where the observed value we are trying to overimpute is *the only* observed value in that observation, and thus there is nothing left to impute that observation with when we pretend that it is missing, other than the mean and variance of that variable. In these cases, we should correctly expect the confidence interval to be very large. An example of this graph is show here: ```{r overimp_bad, echo = FALSE, results = "hide"} dd <- Amelia:::rmvnorm(50, mu = c(0.5,0.5), vcv = matrix(c(0.25^2,.06, .06,0.25^2),2,2)) ddmiss <- sample(1:50, replace = FALSE, size = 10) is.na(dd) <- ddmiss aa.out <- amelia(dd, m = 5) overimpute(aa.out, var = 2, main = "Observed versus Imputed Values") ``` In this simulated bivariate dataset, one variable is overimputed and the results displayed. The second variable is either observed, in which case the confidence intervals are very small and the imputations (yellow) are very accurate, or the second variable is missing in which case this variable is being imputed simply from the mean and variance parameters, and the imputations (red) have a very large and encompassing spread. The circles represent the mean of all the imputations for that value. As the amount of missing information in a particular pattern of missingness increases, we expect the width of the confidence interval to increase. The color of the confidence interval reflects the percent of covariates observed in that pattern of missingness, as reflected in the legend at the bottom. ## Overdispersed Starting Values {#sec_overdisperse} If the data given to `amelia()` has a poorly behaved likelihood, the EM algorithm can have problems finding a global maximum of the likelihood surface and starting values can begin to effect imputations. Because the EM algorithm is deterministic, the point in the parameter space where you start it can impact where it ends, though this is irrelevant when the likelihood has only one mode. However, if the starting values of an EM chain are close to a local maximum, the algorithm may find this maximum, unaware that there is a global maximum farther away. To make sure that our imputations do not depend on our starting values, a good test is to run the EM algorithm from multiple, dispersed starting values and check their convergence. In a well behaved likelihood, we will see all of these chains converging to the same value, and reasonably conclude that this is the likely global maximum. On the other hand, we might see our EM chain converging to multiple locations. The algorithm may also wander around portions of the parameter space that are not fully identified, such as a ridge of equal likelihood, as would happen for example, if the same variable were accidentally included in the imputation model twice. Amelia includes a diagnostic to run the EM chain from multiple starting values that are overdispersed from the estimated maximum. The overdispersion diagnostic will display a graph of the paths of each chain. Since these chains move through spaces that are in an extremely high number of dimensions and can not be graphically displayed, the diagnostic reduces the dimensionality of the EM paths by showing the paths relative to the largest principle components of the final mode(s) that are reached. Users can choose between graphing the movement over the two largest principal components, or more simply the largest dimension with time (iteration number) on the $x$-axis. The number of EM chains can also be adjusted. Once the diagnostic draws the graph, the user can visually inspect the results to check that all chains convergence to the same point. For our original model, this is a simple call to `disperse()`: ```{r displd} disperse(a.out, dims = 1, m = 5) disperse(a.out, dims = 2, m = 5) ``` where `m` designates the number of places to start EM chains from and `dims` are the number of dimensions of the principal components to show. In one dimension, the diagnostic plots movement of the chain on the $y$-axis and time, in the form of the iteration number, on the $x$-axis. The first plot shows a well behaved likelihood, as the starting values all converge to the same point. The black horizontal line is the point where `amelia()` converges when it uses the default method for choosing the starting values. The diagnostic takes the end point of this chain as the possible maximum and disperses the starting values away from it to see if the chain will ever finish at another mode. ## Time-series Plots {#sec_tscsplots} As discussed above, information about time trends and fixed effects can help produce better imputations. One way to check the plausibility of our imputation model is to see how it predicts missing values in a time series. If the imputations for the Malaysian tariff rate were drastically higher in 1990 than the observed years of 1989 or 1991, we might worry that there is a problem in our imputation model. Checking these time series is easy to do with `tscsPlot()`. Simply choose the variable (with the `var` argument) and the cross-section (with the `cs` argument) to plot the observed time-series along with distributions of the imputed values for each missing time period. For instance, we can get the plot of the `tariff` variable for Malaysia with the following commands: ```{r tsplot1} a.out.time <- amelia(freetrade, ts = "year", cs = "country", polytime = 1, intercs = TRUE, p2s = 0) tscsPlot(a.out.time, cs = "Malaysia", main = "Malaysia (with time settings)", var = "tariff", ylim = c(-10, 60)) ``` Here, the black point are observed tariff rates for Malaysia from 1980 to 2000. The red points are the mean imputation for each of the missing values, along with their 95% confidence bands. We draw these bands by imputing each of missing values 100 times to get the imputation distribution for that observation. In figure \ref{fig:tsplot1}, we can see that the imputed 1990 tariff rate is quite in line with the values around it. Notice also that values toward the beginning and end of the time series have slightly higher imputation variance. This occurs because the fit of the polynomials of time in the imputation model have higher variance at the beginning and end of the time series. This is intuitive because these points have fewer neighbors from which to draw predictive power. A word of caution is in order. As with comparing the histograms of imputed and observed values, there could be reasons that the missing values are systematically different than the observed time series. For instance, if there had been a major financial crisis in Malaysia in 1990 which caused the government to close off trade, then we would expect that the missing tariff rates should be quite different than the observed time series. If we have this information in our imputation model, we might expect to see out-of-line imputations in these time-series plots. If, on the other hand, we did not have this information, we might see "good" time-series plots that fail to point out this violation of the MAR assumption. Our imputation model would produce poor estimates of the missing values since it would be unaware that both the missingness and the true unobserved tariff rate depend on another variable. Hence, `tscsPlot()` is useful for finding obvious problems in imputation model and comparing the efficiency of various imputation models, but it cannot speak to the untestable assumption of MAR. ## Missingness maps {#sec_missmaps} One useful tool for exploring the missingness in a dataset is a *missingness map*. This is a map that visualizes the dataset a grid and colors the grid by missingness status. The column of the grid are the variables and the rows are the observations, as in any spreadsheet program. This tool allows for a quick summary of the patterns of missingness in the data. If we simply call the `missmap()` function on our output from `amelia()`, ```{r mmap1} missmap(a.out) ``` The `missmap()` function arrange the columns so that the variables are in decreasing order of missingness from left to right. If the `cs` argument was set in the `amelia` function, the labels for the rows will indicate where each of the cross-sections begin. In this missingness map, it is clear that the tariff rate is the variable most missing in the data and it tends to be missing in blocks of a few observations. Gross international reserves (`intresmi`) and financial openness (`fivop`), on the other hand, are missing mostly at the end of each cross-section. This suggests *missingness by merging*, when variables with different temporal coverages are merged to make one dataset. Sometimes this kind of missingness is an artifact of the date at which the data was merged and researchers can resolve it by finding updated versions of the relevant variables. The missingness map is an important tool for understanding the patterns of missingness in the data and can often indicate potential ways to improve the imputation model or data collection process. ## References Amelia/vignettes/ameliaview.Rmd0000644000176200001440000003355214335240021016243 0ustar liggesusers--- title: "AmeliaView GUI Guide" date: "`r Sys.Date()`" link-citations: yes bibliography: amelia.bib output: rmarkdown::html_vignette: keep_md: true vignette: > %\VignetteIndexEntry{AmeliaView GUI Guide} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r setup, echo = FALSE, include = FALSE} knitr::opts_chunk$set(fig.width = 5, fig.height = 4, fig.align = "center") options(digits = 4, show.signif.stars = FALSE) set.seed(12345) ``` Below is a guide to the AmeliaView menus with references back to the users's guide. The same principles from the user's guide apply to AmeliaView. The only difference is how you interact with the program. Whether you use the GUI or the command line versions, the same underlying code is being called, and so you can read the command line-oriented discussion above even if you intend to use the GUI. ## Loading AmeliaView The easiest way to load AmeliaView is to open an R session and type the following two commands: ```{r load_av, eval = FALSE} library(Amelia) AmeliaView() ``` This will bring up the AmeliaView window on any platform. ![AmeliaView welcome screen](assets/splash.png) ## Loading data into AmeliaView AmeliaView loads with a welcome screen that has buttons which can load a data in many of the common formats. Each of these will bring up a window for choosing your dataset. Note that these buttons are only a subset of the possible ways to load data in AmeliaView. Under the File menu (shown below), you will find more options, including the datasets included in the package (`africa` and `freetrade`). You will also find import commands for Comma-Separated Values (.CSV), Tab-Delimited Text (.TXT), Stata v.5-10 (.DTA), SPSS (.DAT), and SAS Transport (.XPORT). Note that when using a CSV file, Amelia assumes that your file has a header (that is, a row at the top of the data indicating the variable names). ![AmeliaView File and import menu.](assets/import.png) You can also load data from an RData file. If the RData file contains more than one `data.frame`, a pop-up window will ask to you find the dataset you would like to load. In the file menu, you can also change the underlying working directory. This is where AmeliaView will look for data by default and where it will save imputed datasets. ## Variable Dashboard ![Main variable dashboard in AmeliaView](assets/main.png) Once a dataset is loaded, AmeliaView will show the variable dashboard. In this mode, you will see a table of variables, with the current options for each of them shown, along with a few summary statistics. You can reorder this table by any of these columns by clicking on the column headings. This might be helpful to, say, order the variables by mean or amount of missingness. ![Variable options via right-click menu on the variable dashboard](assets/context-menu.png) You can set options for individual variables by the right-click context menu or through the "Variables" menu. For instance, clicking "Set as Time-Series Variable" will set the currently selected variable in the dashboard as the time-series variable. Certain options are disabled until other options are enabled. For instance, you cannot add a lagged variable to the imputation until you have set the time-series variable. Note that any `factor` in the data is marked as a ID variable by default, since a `factor` cannot be included in the imputation without being set as an ID variable, a nominal variable, or the cross-section variable. If there is a `factor` that fails to meet one of these conditions, a red flag will appear next to the variable name. 1. **Set as Time-Series Variable** - Sets the currently selected variable to the time-series variable. Disabled when more than one variable is selected. Once this is set, you can add lags and leads and add splines of time. The time-series variable will have a clock icon next to it. 2. **Set as Cross-Section Variable** - Sets the currently selected variable to the cross-section variable. Disabled when more than one variable is selected. Once this is set, you can interact the splines of time with the cross-section. The cross-section variable will have a person icon next to it. 3. **Unset as Time-Series Variable** - Removes the time-series status of the variable. This will remove any lags, leads, or splines of time. 4. **Unset as Cross-Section Variable** - Removes the cross-section status of the variable. This will remove any intersection of the splines of time and the cross-section. 5. **Add Lag/Lead** - Adds versions of the selected variables either lagged back ("lag") or forward ("lead"). 6. **Remove Lag/Lead** - Removes any lags or leads on the selected variables. 7. **Plot Histogram of Selected** - Plots a histogram of the selected variables. This command will attempt to put all of the histograms on one page, but if more than nine histograms are requested, they will appear on multiple pages. 8. **Add Transformation...** - Adds a transformation setting for the selected variables. Note that each variable can only have one transformation and the time-series and cross-section variables cannot be transformed. 9. **Remove Transformation** - Removes any transformation for the selected variables. 10. **Add or Edit Bounds** - Opens a dialog box to set logical bounds for the selected variable. ## Amelia Options ![Options menu](assets/options.png) The "Variable" menu and the variable dashboard are the place to set variable-level options, but global options are set in the "Options" menu. For more information on these options, see `vignette("using-amelia")`. 1. **Splines of Time with...** - This option, if activated, will have Ameliause flexible trends of time with the specified number of knots in the imputation. The higher the number of knots the greater the variation in the trend structure, yet it will take more degrees of freedom to estimate. 2. **Interact with Cross-Section?** - Include and interaction of the cross-section with the time trends. This interaction is way of allowing the trend of time to vary across cases as well. Using a 0-level spline of time and interacting with the cross section is the equivalent of using a fixed effects. 3. **Add Observational Priors...** - Brings a dialog window to set prior beliefs about ranges for individual missing observations. 4. **Numerical Options** - Brings a dialog window to set the tolerance of the EM algorithm, the seed of the random number generator, the ridge prior for numerical stability, and the maximum number of redraws for the logical bounds. 5. **Draw Missingness Map** - Draws a missingness map. 6. **Output File Options** - Bring a dialog to set the stub of the prefix of the imputed data files and the number of imputations. If you set the prefix to `mydata`, your output files will be `mydata1.csv, mydata2.csv...` etc. 7. **Output File Type** - Sets the format of imputed data. If you would like to not save any output data sets (if you wanted, for instance, to simply look at diagnostics), set this option to "(no save)." Currently, you can save the output data as: Comma Separated Values (.CSV), Tab Delimited Text (.TXT), Stata (.DTA), R save object (.RData), or to hold it in R memory. This last option will only work if you have called AmeliaView from an R session and want to return to the R command line to work with the output. Its name in R workspace will be the file prefix. The stacked version of the Stata output will work with their built-in `mi` tools. ### Numerical options ![Numerical options menu](assets/numopts.png) 1. **Seed** - Sets the seed for the random number generator used by Amelia. Useful if you need to have the same output twice. 1. **Tolerance** - Adjust the level of tolerance that Amelia uses to check convergence of the EM algorithm. In very large datasets, if your imputation chains run a long time without converging, increasing the tolerance will allow a lower threshold to judge convergence and end chains after fewer iterations. 1. **Empirical Prior** - A prior that adds observations to your data in order to shrink the covariances. A useful place to start is around 0.5\% of the total number of observations in the dataset. 1. **Maximum Resample for Bounds** - Amelia fits logical bounds by rejecting any draws that do not fall within the bounds. This value sets the number of times Amelia should attempt to resample to fit the bounds before setting the imputation to the bound. ### Add Distributional Prior ![Detail for Add Distributional Prior dialog](assets/distpri.png) 1. **Current Priors** - A table of current priors in distributional form, with the variable and case name. You can remove priors by selecting them and using the right-click context menu. 1. **Case** - Select the case name or number you wish to set the prior about. You can also choose to make the prior for the entire variable, which will set the prior for any missing cell in that variable. The case names are generated from the row name of the observation, the value of the cross-section variable of the observation and the value of the time series variable of the observation. 1. **Variable** - The variable associated with the prior you would like specify. The list provided only shows the missing variables for the currently selected observation. 1.**Mean** - The mean value of the prior. The textbox will not accept letters or out of place punctuation. 1. **Standard Deviation** - The standard deviation of the prior. The textbox will only accept positive non-zero values. ### Add Range Prior ![Detail for Add Range Prior dialog](assets/rangepri.png) 1. **Case** - Select the case name or number you wish to set the prior about. You can also choose to make the prior for the entire variable, which will set the prior for any missing cell in that variable. The case names are generated from the row name of the observation, the value of the cross-section variable of the observation and the value of the time series variable of the observation. 1. **Variable** - The variable associated with the prior you would like specify. The list provided only shows the missing variables for the currently selected observation. 1. **Minimum** - The minimum value of the prior. The textbox will not accept letters or out of place punctuation. 1. **Maximum** - The maximum value of the prior. The textbox will not accept letters or out of place punctuation. 1. **Confidence** - The confidence level of the prior. This should be between 0 and 1, non-inclusive. This value represents how certain your priors are. This value cannot be 1, even if you are absolutely certain of a give range. This is used to convert the range into an appropriate distributional prior. ## Imputing and checking diagnostics ![Output log showing Amelia output for a successful imputation.](assets/output-log.png) Once you have set all the relevant options, you can impute your data by clicking the "Impute!" button in the toolbar. In the bottom right corner of the window, you will see a progress bar that indicates the progress of the imputations. For large datasets this could take some time. Once the imputations are complete, you should see a "Successful Imputation!" message appear where the progress bar was. You can click on this message to open the folder containing the imputed datasets. If there was an error during the imputation, the output log will pop-up and give you the error message along with some information about how to fix the problem. Once you have fixed the problem, simply click "Impute!" again. Even if there was no error, you may want to view the output log to see how Ameliaran. To do so, simply click the "Show Output Log" button. The log also shows the call to the `amelia()` function in R. You can use this code snippet to run the same imputation from the R command line. You will have to replace the `x` argument in the `amelia()` call to the name of you dataset in the R session. ### Diagnostics Dialog ![Detail for the Diagnostics dialog](assets/diag.png) Upon the successful completion of an imputation, the diagnostics menu will become available. Here you can use all of the diagnostics available at the command-line. 1. **Compare Plots** - This will display the relative densities of the observed (red) and imputed (black) data. The density of the imputed values are the average imputations across all of the imputed datasets. 1. **Overimpute** - This will run Ameliaon the full data with one cell of the chosen variable artificially set to missing and then check the result of that imputation against the truth. The resulting plot will plot average imputations against true values along with 90% confidence intervals. These are plotted over a $y=x$ line for visual inspection of the imputation model. 1. **Number of overdispersions** - When running the overdispersion diagnostic, you need to run the imputation algorithm from several overdispersed starting points in order to get a clear idea of how the chain are converging. Enter the number of imputations here. 1. **Number of dimensions** - The overdispersion diagnostic must reduce the dimensionality of the paths of the imputation algorithm to either one or two dimensions due to graphical restraints. 1. **Overdisperse** - Run overdispersion diagnostic to visually inspect the convergence of the Amelia algorithm from multiple start values that are drawn randomly. ## Sessions It is often useful to save a session of AmeliaView to save time if you have impute the same data again. Using the **Save Session** button will do just that, saving all of the current settings (including the original and any imputed data) to an RData file. You can then reload your session, on the same computer or any other, simply by clicking the **Load Session** button and finding the relevant RData file. All of the settings will be restored, including any completed imputations. Thus, if you save the session after imputing, you can always load up those imputations and view their diagnostics using the sessions feature of AmeliaView. Amelia/vignettes/assets/0000755000176200001440000000000014335240021014746 5ustar liggesusersAmelia/vignettes/assets/numopts.png0000644000176200001440000003265114335240021017170 0ustar liggesusersPNG  IHDR 뽘 pHYs   OiCCPPhotoshop ICC profilexڝSgTS=BKKoR RB&*! J!QEEȠQ, !{kּ> H3Q5 B.@ $pd!s#~<<+"x M0B\t8K@zB@F&S`cbP-`'{[! eDh;VEX0fK9-0IWfH  0Q){`##xFW<+*x<$9E[-qWW.(I+6aa@.y24x6_-"bbϫp@t~,/;m%h^ uf@Wp~<5j>{-]cK'Xto(hw?G%fIq^D$.Tʳ?D*A, `6B$BB dr`)B(Ͱ*`/@4Qhp.U=pa( Aa!ڈbX#!H$ ɈQ"K5H1RT UH=r9\F;2G1Q= C7F dt1r=6Ыhڏ>C03l0.B8, c˱" VcϱwE 6wB aAHXLXNH $4 7 Q'"K&b21XH,#/{C7$C2'ITFnR#,4H#dk9, +ȅ3![ b@qS(RjJ4e2AURݨT5ZBRQ4u9̓IKhhitݕNWGw Ljg(gwLӋT071oUX**| J&*/Tު UUT^S}FU3S ԖUPSSg;goT?~YYLOCQ_ cx,!k u5&|v*=9C3J3WRf?qtN (~))4L1e\kXHQG6EYAJ'\'GgSSݧ M=:.kDwn^Loy}/TmG X $ <5qo</QC]@Caaᄑ.ȽJtq]zۯ6iܟ4)Y3sCQ? 0k߬~OCOg#/c/Wװwa>>r><72Y_7ȷOo_C#dz%gA[z|!?:eAAA!h쐭!ΑiP~aa~ 'W?pX15wCsDDDޛg1O9-J5*>.j<74?.fYXXIlK9.*6nl {/]py.,:@LN8A*%w% yg"/6шC\*NH*Mz쑼5y$3,幄'L Lݛ:v m2=:1qB!Mggfvˬen/kY- BTZ(*geWf͉9+̳ې7ᒶKW-X潬j9(xoʿܔĹdff-[n ڴ VE/(ۻCɾUUMfeI?m]Nmq#׹=TR+Gw- 6 U#pDy  :v{vg/jBFS[b[O>zG499?rCd&ˮ/~јѡ򗓿m|x31^VwwO| (hSЧc3- cHRMz%u0`:o_F*IDATx엽K@bpNAPIp;: +8ԥ.R7W)1]DbK~P [w}^.@2G^Pp ʻ ސ~"uNPw#Q|%0Z8@ 2qQLecsD@ ID]Oy+ Ѷ(RJYV,:BPT/"jz=B5,>-`Ma`XLf)c$7X-p, n:iVY&Mo}9̶4`WX-MLaj DhLlSܝ5]`+#~jJh4z+'uoC}hy=uGT>ׇXK0 6@Gq`&W]:."ۤ:pHs`94XQ{' ɗGgDQ4)R^L?f}`F+`~0z\Fs2v.Vx~ !D:2mr~bc^c?|DּﳽȒc0=,|OXjY/`Y 1p?S;6?v[@[J_V~Y3cUۿ>#ie.&a-J6ߖ9e^Xo]`|>mu8- 5dP(tiB)ohg?ki %&V60C6 C&C k:V,(lA2dX"s8Jź6BJas(KKǤY฻<=O{~ygq:3KȟJyxi/6P8yPv´sXp2|΅z{~u㝍<64f$ ȒĉBnp@.vCm갍 [!O_ulo$3I3ᶏ>=Ecߘ^;}?_ G>D~= J0;tS&<m^o!#-!ĩsIZe|O_ $aFft,IX[>#pDZ٬i7qfi,*ؤfx9;KlSwh6ޅ( ՓZ^` hV(>J Nʭk]]tZ `˴9`KDڱS()w) J~/nRWt߻?G;%{ 2 i/: *`g0_WQ6WDךk"ufE {>>#꿄GqeZEǴjG)sLʙ=hE2HeF.'3 Z<(# SO_dA3{s粛h0O /fi%|֞5(d t^bSרğNSxi{M-NsJ/rzzvJ&k2ľΌẀ̬T.g[fL&I$~9Ykm)z3I$b1tt.F,F3/whIŶ>#g=c~~xʧ7do<_Ie TU9n[w(NٹxjZ@ ' )#5[X y<:Q B=u=<薡Xe;U|59wD1x AC&(2]hQʢ)ΩDPwS^nVۻ夆`L }MEUUԨǍ6Zl=Us {urwﳿ |ox穉o˧ %Ed=gQ>g<\Ń tU; !`S(2]HB|qYAQ6Ͱ$!'Qy bB/k -Hy3,,9%5`bPC6aͬ *\Z_NxJhQUiHXM t*k/\iVPY755I\tkx -2]D0 ACjDóǚK!G i ۲E3Vjϐȟcu j(`GkǩO),n?4ףѐ6mqEo<_ꔩ ڞ7?gScѦE aK0ԅ9EܨaDYL!LӤaf eо'O!3}]f 4M. K7ssؔ,"4RKq]ve力"jJ4[3$Lcf\vZ퓧~YW*>Z7\A4,b Q>" !`a…TTTLge_l}55-Lëؼdm:OVM{Zγp.aXB0޽i }rSHKv.awywɤ:yĵ ٫' YGwUdQaYw}S:&dY"}imsoO9`͌ldOtO==sk H_l t@+++1tKϘm0&bĎ!T>x]ނPLǿ3liSIcjFT rUUzq=sƠWܝA k'eit 􏙝]8$}ywyf32v?~Αenwikko~N_ 惺(Uw7慵dx$ X<)־مyXitaq#ԩE[GD4w<8dž*c k۫4>l9+-\{bi65>O!W'# >q,jVP/3ߦ`<,aQс 8ٶ5;.HBjs$uvvBaW N Q'L :}q,8ɏo^n;+,9"t+˱X28P犚ׯBkc\4]V==sQpE,wT`z8"^aF;w;XZu8mU6bhooG{{;v.Y ksmΉJ|qJ[[SnayZ% vgf`:^Xc!Kpwn"!7]p$zio,Z;j68%Ý; pwn"u .qVfcֻr9>ˆh=e@GGiLrzctat]?ӍmXӍmZy6dfCJz3785r8qݸpa`Ym+P.òSMilC&,}cMGMc 7Ѹ&CIoͻ:h|qZG0?D?pX<]q.HÞ_PSEX߾񗸑`js8VFX,3^I ʹ`?G LIyAj"LBx"DHA HDlΕCYGSmxixF42Ak{. %@1=ߡ!i"|iIDa!3t]jyDwxpM[]`1gu#Fs WrnNVsx颖@4\1b-r5jԠޮ`t|LU"bi?xw0 aLw%ݕ a$ A0A "z<~¹!ԏhcD% JF('_r}vxf:=ɯ3wO{燽wz <kwnXܿYԡ~h!l*F7Tz }KP"P$I$IZqusӚ蕥as IB[,~ޮZVO_dl~@$aF&ΨSHibu(.޲l*ݿcgC>D%ĐpY|F8qÀZ.SO1A H a$ A0|F8 %gwsƲ}>6A]L>gYY`)@jtKn}l+P%L~2xSr%8<@`Xh{)ͯ;>L1b *zX 2C`X?* KZZtD;j)0cpwTq"9F6]M_-Hy.c`X?-{| .Z PyTF=Fw6}RaP{fg`) Kyd9)*!QD)+¡DP(,Q0A H a$ AD 3¡DP(,=s /=݉Wu@$0 uQ6A>x99H-)K(Uxoqߐ_Q9&,}; <^,!2| GU}[a|µ9&z_o*=@G@kúk7Yv_L g9xؙy LlEIk>,Z ?(,+r[ Ȫd=7ϡ$ukG7[a atn>Y,B,YVِgσ([ uYqlu|PesLq-lrc5Ҵ/m;|[l|hߤJl6VT7ulj)|8z}z}8z֪|߶mrssԆɓ'ӧ1cE$nW#Ƅ 0vA}78kƔ1?#"JA HD6+g8<lt3FE1 U\R{L.QVDDb${:-kYb:Ɗi"kvĸ)bXVm%?gqV}޷=/SN;qDŋ/= n!(19d}+lzs+~qZ\>ӥg>*'w7c?APUI B$_ߞ*@# m nӦn\ІmZ(kFݟٻCQ-Wa ^c?xIq/z,^ywdl gkzC+ePwDFFUu\xbU#ҎsB]al˘6};Z 2vBս@w쮷 c<̻s8Sjo WouǸ~ XdW' {˧xbalx .TyǨۻʇ?|--NtG瘛[vzWXzWhool*9|8[[nڢhiAX;;~sG|'x(w}7:tB,?)^LT.wG?{Q <Cx;=_{X9uVޅrW½\jbi;V1Tz| _y" >*,@.Q.Uy.W&3XrdA{F9eպ xGwde-g8vH^v`V Y(mh $܌ <˗/s43eY>خF<%~=swΩkɟ6CH~w (&nըCtDtN` jn&"u4c8Mx_r ׶bfWL9~$_&q1֭_+_ʐ*y"-#%~kq9<;"U{<_Dx n1sʥ^~!Bt7b$Y& Xs|(ym=F7ˎaXkJ^[!/ԩ%FtoWp$I>@E^{§>܇$wlGXsOnL,[eUGߋ t:jz{xA::a#FTo! 6b6vƁB"@0#tO{&'Bql\ BB GB6=#+pF *0 H`L @0Vf4V۽ f V/!G0 Fʝm(DaVLMVMzB"]0 (Рf|@Z9f[K-QQ]htaX]!.6{`9i@!G@ZY-DYDKf5+Ѧ`AHX"H$U{KBH܆q #"r3zms?%!!p-,cJOB\S["K4b*OF !A`B( !A! BrJOB\lQzC;fƌE $T Lȱ\┄Y0lf%՞f1'svA(@U@u-PUAH{FEΚ%APVcB=2X" u%$IԐ]D_@ef#?R"9' UxE6Vm\JG6&V/~d:@ ;YqIN8%uCEP7A [4J4hGHNICRlvA|Xb_L8L" )OjnfiģdM@` Q]r݆ ",=D fbNZrzMQ1دmu'# F,;X$oXކ+8B vu-Wk[q$B"``BI !\EH ƅbD]?tPCLĎ^$ !A`B( !A! Bbғ ܆ظ0rC5ov^# B&A `tMuN@;B  LGZ2&&5W^ cV|܏Mkql6mmnL\WT X]9JumH3zG`80{|tXaZlK]9f?BT6ShARQ,*Edn\cPgjHY6^-GEoH+UojKRPWh)~MVsa^b `.W$k5ˌ,+}RjP9=C4ٻ B0c0,R" $$~8uVD;6pB P0! BB P0!A`BBWzMbbd10f( !bxxfRD~$#ȜzdrXM%w Sg*pyRmf9~' V@ҽRٱ!] T<ҐrMM/cj} bPj1⤡VI m0Թ+?A5$ԕH$ RC  |@+GMdZ{T&Uz<姢*Pʽ6}M VOP3`QŬktc6W/"k5F64GõRWJG_~ ;YqIZ>@2-H.w/x?Vԋ>ijKX9`L[i("F0gl=3yW}~&k)uHѠ:Յ ђ)O_n4u 31'S:?vhG49M (֌c)&{õZ;ʠ2X_~IHyRsO$$cH0 (u 31'9z=wjd_ Qn i ˞M4;PX-Xյ(65JH]Q(7Mr/%/q4 hc}=FK-rnmZ Q]\T__Y!0?V9IvN`NbSzYLI8lI#bȄEdzdr`qCqcB $n !A`B( !A! Bb3LKpAy=z403# (--Eqq1z=Z[[ QQZZ+Wag Fގxܹsghoo0N $IBgggofǛQ)kfS)OGo+&ױh,XEYVƶYvE6MEujS0(o'-@ExԌY(~6tq|7M.C|0nL۵:X^D٧VaXi 744A"Bf+]hx.["zFMINgn /`V JpH-?8 z{NO!i8{lNgȦ$/ %4q $m2S8uUL 95>Umi#knl+L~ N:V?Vh$aիQZZgj?$&&bʕXz5f~f00:::3# 70f?~P`ܸqqbbb03$a͛q~]_~~C?+S !ׂOI!A`B(m!/Wfu xIENDB`Amelia/vignettes/assets/splash.png0000644000176200001440000052403014335240021016752 0ustar liggesusersPNG  IHDR.,c ͂ pHYs   OiCCPPhotoshop ICC profilexڝSgTS=BKKoR RB&*! J!QEEȠQ, !{kּ> H3Q5 B.@ $pd!s#~<<+"x M0B\t8K@zB@F&S`cbP-`'{[! eDh;VEX0fK9-0IWfH  0Q){`##xFW<+*x<$9E[-qWW.(I+6aa@.y24x6_-"bbϫp@t~,/;m%h^ uf@Wp~<5j>{-]cK'Xto(hw?G%fIq^D$.Tʳ?D*A, `6B$BB dr`)B(Ͱ*`/@4Qhp.U=pa( Aa!ڈbX#!H$ ɈQ"K5H1RT UH=r9\F;2G1Q= C7F dt1r=6Ыhڏ>C03l0.B8, c˱" VcϱwE 6wB aAHXLXNH $4 7 Q'"K&b21XH,#/{C7$C2'ITFnR#,4H#dk9, +ȅ3![ b@qS(RjJ4e2AURݨT5ZBRQ4u9̓IKhhitݕNWGw Ljg(gwLӋT071oUX**| J&*/Tު UUT^S}FU3S ԖUPSSg;goT?~YYLOCQ_ cx,!k u5&|v*=9C3J3WRf?qtN (~))4L1e\kXHQG6EYAJ'\'GgSSݧ M=:.kDwn^Loy}/TmG X $ <5qo</QC]@Caaᄑ.ȽJtq]zۯ6iܟ4)Y3sCQ? 0k߬~OCOg#/c/Wװwa>>r><72Y_7ȷOo_C#dz%gA[z|!?:eAAA!h쐭!ΑiP~aa~ 'W?pX15wCsDDDޛg1O9-J5*>.j<74?.fYXXIlK9.*6nl {/]py.,:@LN8A*%w% yg"/6шC\*NH*Mz쑼5y$3,幄'L Lݛ:v m2=:1qB!Mggfvˬen/kY- BTZ(*geWf͉9+̳ې7ᒶKW-X潬j9(xoʿܔĹdff-[n ڴ VE/(ۻCɾUUMfeI?m]Nmq#׹=TR+Gw- 6 U#pDy  :v{vg/jBFS[b[O>zG499?rCd&ˮ/~јѡ򗓿m|x31^VwwO| (hSЧc3- cHRMz%u0`:o_FCIDATxytTUuԽ5f!L * yJ-*22~bheaW,qD2yq8t\v/WR鯤]z'"t/ f]#];jjkUE֗-`OX0~PvHQGbTg}CP~8w؄$9UpЧ[K1mud݃&;{2w**絷ѱ]/ʫvJRl 0 J*ϭt_L^A^W{E%suhRx;,Fwng ~~,kAu\X\.'ѮxH?EEu\U`bℱIN`7ݤqP ܜ8~ Ȳ<={;Ɇu٬0ø,VOra %Ed9y2Pw='9G˳Ր,}k9>X˰aux:Ǐ1$s주=X5m; ױ-g= ̞sjcvsk8Q|D&{wR ô" 4IƽFTQ>%tz(WCѵ[{nrS^bŪeT0{ƫ|VV.(|'ɤwYI`Ӧ)/셳kk~-^y={ilf3OlJl;BJU$x}XO1 &//x%K?CU?7eK0L\vZ 䑇禛=ɖ~Β6'UU?)ݽnW=_!=R՚Y^ڽfR&=4W[O2HMASSѧwWjD&LK>F *7_EˤV'ģEġ+nl$)Hn٤SܱEQ! "_dmB 7`ɒ4}%LMc_ UۻG”S|%2&BSr9-ŜT/5TT6NQ_S!;{=[n*`6QW gxTnߍ̧y :v?kprO'b=F=o+V#|8ݰ%bĠC1p`_j<4i<,/)),(d ȲHmm9ÆKddE|2Y2|K@>bsx{읭vy # ~#ǎ`DGGSy%vIt:>33 9;sG7d"ߑڂs;>Lmm-NoVR^^wG7Ho*PE'2O$'D1aX*+|b;KO҉t"##[+*Y:]#"'N.+$"Cm餩[ȇ`& W!'1a v \[1%:. 1䤡ϙbf}]),qíEҳkv;KהmG6^xpݘ(l*+)ӈ؉@HFej(*-1PG㨂c*B.[ĉYSU6_gY_LJB6nYnnᲱ恕里ǔCn=(]Ǐ.yqN0?KOlG+?eŷ僢شs\f &Q>V˥r1Ul'gA5.n~MV\S+P`L6)`ƨ1I'UzIF_^^W]uՀ1RuK0'N&#Å`d`4Aj!j*DdEJGT(ATD +È/ FBS6KV7'{N&bad`\;lÁ]OGjX$l#55$lJB^|@^R @u¡!`㈒@[rܙ ZXu ͊CpfvDنfsp8z|zƽ/ξ+,Y@0GUTE_ݪhSѬbEADQbA>*S ˘eX,Y}ˢhrgR^;7yq-ܴXGRs+OaȲYm1B$&rϲGYTdYd˜ܵ|!{ T6VzD Λ9z(sc!H}(Y^.WW*9W_.Watv=b(!`w9 IuM cǍbh,^SxpHp^~}6 N~`O1K_,f<6g/Ef0.btxs{d lzcGd|Y*C&(#e݄]U x<<;ieWeьP- ueaL<_9SXu MF?[ ^'şshei3.?Lk+Y_EB(֓*f^33D,q2E[̐ oyOヴpnHʪdV&EEy- oW3z>{/ MM..0'HǓHCb>s4Y#r0a<'O"98\:ұ.tJjZ*`lX$hr\)!v~\{W?2vqÙ<ńc]^ |>~0u"XzKmMw3^2͵ /߱uԡ4V{6nȦMo8IwU+ϟUE=^| <<^FӉ탍A9Bn8v }'O42\16B ?YIv8߈Uj)/-&b4ɢyۋٟh k=C+e\,6>N[b!ƎCGW`=o4iin*)q=wip#EE{9\xbo>KWC<39>cM>lN E$&eޏXjCwRr i>*1㒙<\vfĔάK0!8TV"=V(ڷ @q Iԩ$3'y M7]kCCT/4M8F  ##fJctF"8B0 !'񵷷9d (i7pqcDb佈I\4weޕHlffd A46skhim%--0`.aCdfdYFS 2FNkk[|2GXMULp6BHm;dֆZq1(}+5G*jEgE{3+8nARC5'`+]אeG8@%dH({vFc! &@8ƑA:bve84 И; }=&*+!@1߂yI{;! P}h(H*R?`V[ՖZUkS$9ȵ7;cK5~y<֛`|rssq5̞:/*ގ-yks mex{؀D"E[_[7?̵o4 -"˂pc=~4) ʒ*Vy}Vܝ7\B;i! c6Kn fvu`![|𛰙CxheL>xâ[uLrf(`ܷ.G+Fc^4~x xe,x. Tw=5L~_X@?$ܳE/i_/5 A|˰`f٬D,0D5~iq Y`HpDE$`xHpD3i G$UV,X8-Xp|;aD0ǟ@jH$*>ۍQEQQQ^Tn?v[Dv`r葘Cyt$r+S!=AbTcp8zeFu ji7bbnX_oD#@E}^<5݃HMO%:NE=Z&vcw *n<2vlC`g&zeǗ0 3l:՝f :;͕{58eekddBKC;si0pCrߥ>[3Ask%O<(&F$a 4vN9 IH91%tvnǘcxvr lV$?.gk CTL(ef%?KVj**vpu7 uDC;TB]] >Whllo>6m~ [,Lu<ܻ{d~g5^> ڇ[ƨa?lTCs/;nFOg>ʣ>˛N7-EꧡbaBz;w6F] =PN' 4 ?,U%r4:|n]o9? m+XG|f~3Nҙz|˹QT:*vl/' lfݸ~u feLdESSTK4WǻK_Cu or'z>B`Y?ENʵ:!HIECHƥ|YT?)[ǒŒ鳙_8Y_Ļo-ggԳ'seWpӏnGQ"YlٴմOK%#55,ma*+;y7z 2::4Tw|I|z&JUU -sh8ГEI)kATh0#-r;JhKX> N#G .@vm ňw'vB/_<L:T)aXb#eYGtĘݩ^{;\v9j Mf9oO=g$4bH߬Kmm-y/PSGĞ} j 6o-aˎ3jpCOĶ-{䳝\uk|dyk np+x?`0X B;?cusxptM;)ޱ_BC]gE,sz2򨭯ᩧW!xO li32vb5T |FƎ>MT2/&8g-ޥ<_2 Yٰ~)!M#%1/bk$"-b1lĄTOI."F `uÆİq1fcKym0MINH$F(C)Q<\"6\WP3NsyeI9$*Vt`W ƍzzLL .8[\.DQA0d&'_c5Ä r] YU{:#hD\wÅ 6ogs˭?fȀu`ێY+CYY9w)~J,_y7!5Տ6Qc_p;v0t@v`3˸`o o7e豖=fdK}MǴP˖/ax ~~k urʈko&7'eb!QDum@?Ğ/3fiAC3$4 ]Ќf.믹 I@/UŽ](k>?z6;8i|#|1SL4Lob99e"69T׈(ujH$*?,P/ݻ߿?K[ŭsҊnCQdbJJE~|CtcGH}{P@"Qb1'ˑeZFQb v>]"O,zHskYlܼK_e?f_k{_oaxlL<m ik笳da,]1% a0QΡbE0)I;9T˙߼-$!nn3w/' -IYiѓNebfLL N5ht8>y?>w3 ŭzY-N|MDN0ɕs`U;y}2k\lJCcKue9eR]_t3`3n|~db #I^JFOka䢪*ig2fę2=Ba( so^ Ȧ]b2F6 IR.1 Nl FԠ#aTWא˛F}U)T6F'`0HkgP '{VK?dnf?˩].dEFCueè _=?%iiivC4SoGUU\.B]c}6#bvnݵC.b47aщiWʠ~U\Y1~`ˌ( F\&.t$X*B߼Ad9+ ~6 |^ʈC1$m헸tUuσaz;aFYgΈ1rr &HqbSbĸ/'G%5# z;` vM:zIti:X M0 AUQK.A n3$b隆( pP-ii@gggrg{lh;H~inn6`GQtC$iLSPUvmAV#V/Yow{~!Ӈr s7גMc[ڪ`ꥤyW3v"^m8U?mA.b!( !ҰţDb17P(L/X,FGG~/B\$RVV& bOrInud4F\&A̮F]]Ƀ.MT+ O6TWNL#*i9]mQqDCq9wc3hx\az膈-QsСZ^/9}iDbܿvt*QǃMtPYAFlk`隆˼Jhi0̀>ycf(hɉI HPzc- nf\254p84hif$s,XceR`oE ,{b 7w*V׽ +2W@ +bXTE, b )bQsWеoWaE{~`a,XL2 ,X`|.b>rUt/_@Ec>_LH9s(gr-X~ ,̂RpNூnOi9wDZR9QaI0 ۮV7[.VU;[. ]Æ͵"aշ>Oo:8u$c! WVQi:At<'!o}/3'+{.ʯ+@kk-ߡZﭧlwlZOkc̫9'SqSdff[Xpi2 M[VYͭ0-0VǓi+r g1kq+VvmZ\F~~~ί _HJ[,.Mp|zY,.Ӓ[=,V5C;|e)'w)̟YO4peȲLaa +{0ND`$` 0,X`" a,XcE ,{g_Eun !, kEk֥AQֶ)-mPlFUU@ !! $!]g1Ʌ( y/y̙̜{>]J&I*886뮿,Ł$KS Ywri}벝N8ޠ0'^x~{}\Ra 寿E2 IqV#r)=q=Z𡾼y)P웗ѵ}T/̣=3-=;`N?{&9fY `Vk>ُǣ >-/ϥꙧZz mHj`T|]aXt7`ѳSp%tX*!b T`Vq;Vp8t&owMQcT9|OwA5"~˯JH:Jﭯv|k|l3Ou*] zj%3Z}ǔZcZ,^688 Y RQ| qEZfA9\]77wý7ANfC b9Lg?岋8{L/o!#n_WTk7voU"e$zn ǽ - $x*U~6=e&w >K,_u} &xRwY0YN.r;pp,)\N=8pp88.-W\.+|3Y\b88,[8f Ep9f'l[8F!Y8pY$,|#$G8p-4;v!.:h"ODsss% $WOwVEZV&b #AV42r\}(jOC%FI@2 ^dYBT|*v%S犒0[Ih_6!ײ,,Jsly@k @E"F ibf,ˊ9?opm'. iomC%-+[*2^Gnn:[ţ (7a'+T MSFڑDU`jaeiX IF#]+ik"EI, Y1 t̄IL׆i^ gO!).TM%;ˇ,r6QmFedՇW*(#Te9Edi62k֮ћh&xo4kD""*@4f±=|>U\GpȲiF>4E 'APIxR2MA$D"A"@܇Kĭ,62I1FՔVz=x"~UCR|({d ]P\($E݈HRAQ,AFQM DqSH3M~;4#*d' 5K9dh = TҊIKGU@rȢ .RJ #2&6Q`&Gi3ߦ͹?vTha&6 fY W[h@}^@Gt(u&h/î0ŇGvz1Qe UUI o**Nmkk&jcW_1NG:_%WsrmDCED&Yu}>ZB!TͅFEEui9(#%]("J3FM$fR4AVܘxbI|5d|4"OΑfk;'Z .C]I[ü~MK%RN).@[ T9)ITc<I֘EQua\vdw\$ved|GAW JrGtxXx ݯl+E-y1QUyP$J5HȰL6翺M2\i"+J}خӁCA֐nF @(Lff"#Fd<EKCE4METđ>K |EvGL& z ݫmؑuw:+Vn1f8~xᕈ^=H'h0iⲢ 6DElYk+tGr1#Qb `( EQ4DRƯK%)l.n=1l lM[;4[Lvzh#cOC$a dddufL8Es)ټ@6 z""Ey0z  7l)!:|{a6ib"$$~-&O`ɄXBH$$9fXG>Jіvf+>EAFkM_}e_G+.M$"QDO8F4i8Y8fD$~L:oZ6m*4~2ҽٽ{7?BRNjl凔+^GPZ# 3pįi6tL$e$Acs5JwDߓSXXH,8Di3(vCfR&ͧzݭ?$Ynv5 ӥ%UQIW_;KA0e4>ͧWQrzPugBܤkΙtK5Sв}(N.Ȫ2B0wk 8MJ6gXłaC3ixD/>q;zhrա2IU"Ѕ g Ǐe?fbikmﲤU"N\x0&q=NKK yyyzdgxA0L0 ؗpT'4 P}h'Ne!TU%v͡kAJvd1qEQH˰ M;XZwBVnbKW,d[1kVmbՔW;d[G!xd?b[F%\H1i0ޛsi)]ǰQ)޲N&JQ=$I+bP"qY1L4t]O`ԨZ& )n>6@~NIѥ[>vizVCSK}#qƕcJ zwAQ2ۯxKWwQ^kI4շqݭcVL15J܋EpܠOŽ~Oilڏ7T @FMkKp?;ʛ73l$눢HVv6*K\l/>)L= $b "(%jG6o)EV/N ,AN0d,A,rWlۺݻji0PucHw~9dEҥ#(jw>* 8P~|^1^⡢t KƯk&HG@<،"f*G^z@СxMaa\E߁cOPm:C:^I`ʾ@^y-{IM,[17XAL?D `ﳯt'!>]5 Bkڌ(Xq@8LR8dt)&hHQH*2` !TO!yݿqn>\%@ ?ܼt;-%-BJFJغu3svoAH~N,z縈aӧ=u"ϽݔSZm[?tv-! c3 hhha(+Ʈ(sΝp}H6LA\q|إhiv$,s7{j =??<3f\{筅Kaݤ}R2xxrdegp{-+u~=LQ:;K+}4_v1S3T$QFG>ƍ܁n4Ps W^J~P[QSSD'`!] cGe4ENnwZ*Y7 9*;:(>P#mXE~n>^n'n7r:l0 ~绩_fgp{Y ۷#""cOAUU%j0v$H8ꐥ@+QDQfݧvoEK w~#fO}C#y9tnd˞x=szubַУW.i,^a N=+g<ɲeK~ˋ/Jv{ޥf,`8\":V~%e*;;vrL~|,G KREnf":&w4U E5v*J6s?aczmh dt.~suOFΡJd-pI^Wvaƭ3a0|a/9eՄ<b۶̶ͻ?v zgߧ[QiѧflKPMV 3 uxt]d-v]46Wҽp_Dv؈G+`mEݺZȇテsH^SOQPTH$bݳ߯3YdYd`nl%5uld_I楹KӈIeK7ēЃjn"^xn MEtj:n !˴ fآ|hJݔ4Cuuuu"}Ģ2[rf5&2tp `7O^|J& HK]$)As!rx8q<ğ,.- 6B\ǽĢQ1999de3~˖d|d#7\# ^_sz&OO$EpXIf(yOZ7m}+GCÛa \47%1"gt+I,#Dzi2~O(_3g[3 ՂMYBe.&7_AO٣/W^};7N=։;w7dĕٰjK#"I"}>nTToULR4O+ *?XeW\C2,y%5ͤU<>sϝK[W_,H8fX;͒)OMS|VWQ^z9)Lr /;c>8 ⭻h 2_Z#Ԣp_.Yq  /۶~㶔)5yH>zKu4{I{jRVTDĪ5[f2ɸ1=l]3(ٺ;vh&ٮ32?'@$h{1[qMS,>t|3Sk< xDa$Y"b: 5TTAn޲L?.3n~/D/K|AfHC}uqZ~CW(&Syx{y%,]%.rQ}`VmADIi0Q-;l_?]D~IJZgTӀV"ۣ `b`Gag~oہϓu+ +)Kia7ҿ.á'~W9l е A5SKlV^Zhv5MYQVcPQ/#;Keڴ T1z< &#,6AZͬÇxB&[$ex1 fӦl&xv H$eDўKSϴ{mqWO[f c&ln -= ׍"~NAB~'Y7g/L+ҥG2bpΞzY:21( $pIj%T"ɓ~v0|(b&̤ .3N;-BdZ"&$!==ePXԋ=e8缱94&N8P4hD" Eni)r]3+2).#4J*˖"n`ۦ/ 6SQ%0__ 'j .\vȲ_AYT֒姾XTgFmM_[ա8?ǟnk޻47v۶{fݍi էܥ3W]s%xw̼|>?ՅE 15Uѫ{7;(ݻP(" z煬Z Gw@y>K0\fD"q‰8)hi3'kDl/ޖ2w=,Q J݈DwϘ~,:&Oqa8ZXm;diktm1/c:rY4i d#3yEL]mVdF~{N2T=++D"Q|>_IO3IOWvY;Q#_w/W/nNi2uEG] pO/v:,9v)jQA&a2)jǀB-4& R'M%\g2h8sdxt@$aH g@0BNMM //X?s&Rw7k tDX4JKS- bF<ΕĦ-۸3+gk1;L;J.Ik7)X/py2OǨ1C݋_|wΤY_`~Mx5[\y%gWѥS ??7ŋAѣ櫐s.gr2t\DҌmv&j6s˯!/&DvV6 x>`9O;Ν$^{m'A4w^TUч3.I?#;aӆ< hKytWˌyuCE.9{gE9X[ Xi;iY^<iyx] y\8Ji 7x}>Fj+>Xæ[ x,XwUUeje%ye\w f~46)HS[Vٵ}3999shXd+X"N9m}+o#ncC:sSKfTT# V~Ƅ 8W@\5xٗ]-;@aΘ?FQ76-`/;3N3iDz_co1\hY#7_q-9yril0GHpL2g^ƶ-ɴ PM:trXo.O׮}9Dz>&M>Mq7r'd7NaoYI%h)jLЫg| P\=VOAe2pPOzW> V'+!4gRJGr) $!5'2dkBb0zh5pcqTՒ)@>eMe4 6`K5F"772|>?{px!y)(kkTUEU;OMi)\$J޻&k1؄adee4^Պ`&LDu4oPL|:Ɯן%h~SK.g_Km^ϙ;fXw~QЯGj? ̘IGSQYM^Uߩkoމ9*CREeP=zPWQE~W7['m[n^~T&JLw!.Dݟ3zZpݴĚFcx<,Hn-;FT3PL#a$,aCR2QD[6{ VşF<}գ/SE{:;vgϮx5bEFN6%*t,xq$Iuv+

֮$\ii~ @(Fҥ"XMdg2UtH$)f4qkH׋*x<t@QlOqdeR׃((bKNiD" x4"nfM$lr:ud~Sc5u;e,[ZҺeKf/ii搎SS[K^n:EUU GVFWd44M#m"m$QUAWUJSضvQf8( qLlvۂKbIiEg hFKK >AƐӅ@ZipX$D"vSRЈE\*Y sgȡ L>c$DD 뺽vKRf+С#^4 5?BLTh3݋b2N"B xt+*4ږn7vmf,EVL2M \ȊD$xD{!u28^<:#vpn$!I{ 8H^|\D$v!KGFn Jr2HD/dW4h8vo?@CTư+iG- SSS溺|+;Mx"iyh~#bhP(DFFBsۣ5-Ji6hnm:y<)uu죨i(n,#LvV>͇uV f3J"a>(t'tܑC|бH !Y8pCtP \r{+ K!*6%df@c꼽0sFt/T =z'`^-yS=ɵΏ?{?` ~< .Of.<{`nKf;)”syzÅYje}ԀMsz>͇g~f2o) t18pC8pŁ,mXySz2'r W!~IN@k@ 250%3v#Xڮ:u^cqttJ:pa8dq!Y8pCX躎 c}m~{|,dddL&}\B!b ,L&1 [O&DjO&ICdfd}V{0{]!]?z:T|:~;{g3H0]69.`wH#OңePqO$i%G֓Cd?Ҟ퇧Rij'H8#Q gidP_>e,e|Xn1{)9Gn/C˿Q>2|_K痥ŲʸGR,}go}vwy>,f&mZV*r<\Yc$&׼%,3u2u*$Cg1k8JfgV: JJZMRn\Ym)>+pEu\2,W JJH$Qx!3D"+eK)yybJ) ~Mp |f&Tl׿sG{K=A ^ȼ?9_P&pYv@U? Kn~Kv٨=UwS9-~YQx(sLcU3恽lh>|ieU0NH@$mgˀ^sUիVU}lkܨ"F \YЀM>a r\Msο}cWGViJ]8l-{=ebY|K<)_9 m;Ͼ,m~N:83nm.?~m{kwW 2YI0xs]@qOW87,=eAeیc}:?psoT ~3gלZz / D\2,nk;0jO^_I bp07 x/N&MxC|p|h" 7  z`{?GY>n nIH@Sk`)\0~p27|w?cW0zT4D8;;`{CX0`^ 7-f䎙єxp1> n$N(o' rc5kqǿ`{[8ދsc,l%f7JM)$DWaΞ\ܢ!@ >#ڡ0@ |&]lq#E [\\"/j8vqQ@Ee7Er8r.1a!nHMqm@e/@ ZE"/(.n" qD"/]\!.@ ({Z.Z.&.\@EvAlN\D*@ !R@aTd-'_D2B&D.L<ד1]:~d2%!e."s@ 󽖗0]F<8H*+a$H~'e~{$TY 8uS"Jza"tj]D%/a"^$qz8R&jNLiDe$UAt]'U Yug"ePd ń²7, M-!!KA $ O$r9 t @RC_h>Mdc}cY2n-UӴ\t]'Jv7BUM".% @SwB\['N]o;^dAHc )/DIgxd1 vY^='.~ DSU2hic ZSU4-4-L2N{( YIFg蚎My22>k"iIw&s'L&,^%2]BacaeC&wV0d $ޤC$t9UL3WDz,[l-Н,4˾)þa/Ա,~OJuQؿ)/ž$Ed\ E -ixM +@s VJ^T9P4d)X& dY&LxOSd,ekTr{\oy!uc]Ky/.{Ȕ_CYòv INߟf1 շp 7eɽ!_7Y +FAdtM]9K0mp;L&őuMp: ;-@(R1yKSU4ECÓsg{Xy a0Yʲ\8^B0ee+>ҳn<`/yWU#Cd2H3纓T-\RO(%Y n1}Z\D/ݏ5_8LpBk*iگ7s۫-] 4@`~DD vPe{R2MӎQ&0ťS^V>tA-/I:,Ȋ!#kAaY\EQ W_䏵Bl .!UU \D>ZjaizY:qT>-Ʋ]l~gy8] \rJ[&oc%twLT a:n˹D44M ͋X䲺rwъ-ʮ;i;`2L.-eX$>% >O?D5{"}iy揑=2c4{(.'\cXd2Bqq-A$|^ ðc2RnmeCm'e$gBge%'2|V*LZ*vi {&!qUvp\ VǾX)&&"6'eYh쫐 }|RJ "dAY%*b&b&^J:$ي0L\wɻU!P0XI@YFVP&.فBP%Y*; +WbR΅I"[J>I tȿ.^g+#4̲@=A*B85@ EvTt_'ړf L0쀷.:&,yWQ5x$`%u=7e}w?1111DۧXdR".tOğf]>;c/q/FU B_B\1dKqcwH/t]GT4݇G(R nZ|:q`MrAfdk@.Q IOA \S'oEI~WI}Vd8I&;;=prW/L`Ȳ?P Ecc[TlW?*X+\#;Qr$'lJz<\^.ˀRq=_, eo};OE<۟Gp&ZݹC$TUŪvAG7_^,d$,k[LQB1f'U-B EQQQN$ҍr)]t6cu]+F͂Qq݋a.9aq:m!q:B(\cE!Cfe+Ťt&AÅ{4$ B%zabUZI"#d,>N[}, =6[D~$Ixha݊Zp˸Sd[tLB aY?ݽ%ILM{I)uL&QUG&Agg[nHPHSc;>s>Rw,&"hmibqLðӹWdr_H_mAM夤$ӥ…ĒwR^^F,gøWeK7Kk 51[ai @D;XR'&}MU|R؏A䜵M$eI;*94],gq*~:¤"t6B\ XKvZơvQqt:)--%?G~Ud IH+} ԌHQQ%%%,^ ;r߽1r8&8;n2>~3N?kv3@ʲ{@~O6DޞN2 Vt:0yY>̾s٤nDPp8$gYit: ),#"ʭحu]($h4J*XWNۭ;ضc8x7}}x\;v"2EE~6o+緰W_zV,ywx2I6n\˒%غ}(˗!%'ʫɔ(-Rs+45m$9#L2 13* >eX)7Q׽䯹L Y3$͊Q*iBmb1ܢo7t(Cb=.̌M(*0 X:,r| <_bd-T [e ]ZN[~G:gd2YEM \_K*4$-3j͛X#FV" a5tt2t``Epdy Z-ml\$3N.{?|Շҫ5S9hii"XkR"V+IKg#R`$(F ভ;;n"mzT V~ςB/2e[蠽d"$Xvx<GOrݍ_,aEX.p1s'O3G:z]rt:q4,,ZKaպ 4J3LZ|L?˹KJ*{qW`3h~/￳}&xn>'O"gsҲjpD,L]֮ k좡w\"5[^p;Rv@|%_ 0EV$>|-LCW^`ءtTPT\ͼ? ,PK^0Ȏ"NTGW?1gei+MʰnrBQR5SrM7Q]]AMYOUUF@{{ daCF29䓸W7'bxG|2tF4M=#G{{\q'P3xG*z0sYdnRF7N{g3?H,Lo t[| nFHӸݻy^&=Y`iTX-u u4}{--lܰaNʈ6]M苉~^q o[,ثe'Y[X4ZOgu ˊ ;jZH1'GX0`{u 8u>ۅh0E">[V(]4فiĒRȲl׿H&nb^QIXb"ӧG}cbH+ӧ.흵'|EKOL5ŝꪑ?$rHҶڝ;5?cFRTVغs #I4y*(;f*B<0Mb4wuRU2r~mtwiC#n‹.7z$dv[83~ Ș[>,B$㰭YQPdAWeTE5`uvɅ@`Lp43_'sM72ff6Aq ¶MUE}sÆ SyQ\dPe%qK_< @?S'j/6 %{{ ,Y?DE(2F*V8}q.B>h۶og欓\\J"&q]a„|G\$ dkB!N9R$GT:Ts<S7>MA p̙^c W q'O{{~Q0lVK.+é.7eik dC9g~%/(cvܜ> 뗒b3tul{ ֹ+/j\^vD41he;j{{^ 1g@!wfå\ۭ2d:lIkc+K↟61i+s2}Lg>JVI3eT< \z5hhck`޼7GJY W駟’JC2aOE>G fذIyݷ:}"E>>>'/lfj6mY 2mdp8H0~| +VBT>6l?<'z&5&vFT2f$-}K2p)^݋S͒JXA[ sg+/*[tKJ+p`b 5x"ĸqØ*IIEe}#]DžbMhu"7GTUΉIUKpXbbn}/H~/"y?*\\8d"ˤjnȫv:뷳~G|j-pP )֭~N8͛z(wHebĒY S)rݓ}*=V֬x-gܘjU)o\G/@q|ɇxhgTV0Lz{:>q4++n;cfS=K&;MkIƹpчSZ>}t&B*G͘)4x~n^[?j%%7G֋K/quZ:j˃2f4Xo-4ib;CLI*es{mU]fKKZ&#řuo[/qP$?Of۱??&A$j*Fihh#/q% kR -0nOTÏ q"ef(<_XDc/_LJ6)r8Yb\};+g`Ҕfuy$ 5b2CrLXV]BGW=CVA5sSNٳjXf.2>XX2m̞UCIRFb괩,p1Upx܃숲nr6߄a470tPb8Fr \ճyg;vo2VRa亵8SД? uԌK̈́8 (rz ֱy5@"7:wy/k)#tP9g|ʫ/;]ƭUW_DCv8nŸ]. )Fww;Xz&}^/@D%f˶FYwEcXQsb#,!.B\pc4ȖeY"D9jiYcCBJYP0$4f8?K/aU\~9 0lP6m AMBnƌm*EttdI,쳾H'r9xl-[Ο@cS3L;xJRO?Ò%?n4,f"H OgIGC~ɓ3lX%><%N1uA$#G浗k%.ws#GpnK[[;6-E_*UX|}-3*CGz̜5֖'|:{9E>iBR*N#cR:?t6ZMP_ȡ#qyŋ-X2DG4B\qٟW}}cͭ[I&h`%L??<'^{}y/fB0*sG0qx֬g^򵫑3PXfaJ&NJz] .,tb:~s]w|k%(XZrP3fyN[겈qⲟ1Nu۸=TTdP\\D"A:&#T$F||LO(an XիL{g/ux >W&MEYPnļ3whO(%A|>GL+g]F=Qpũ:::Qm+ hꛜzq\#9r,-mDi:oX)Ur4D&GA3}eo:z%~\>TG<eؑAry=.n݊iŎ%AA?>x7z:N 5)))Yviko2H2чfX6o 3w)"M(g" H_}P!.\1-{rT-p'J(fZ>¶UPU]{MGGGu ʆ y7rώf24ٞL`S>|{R; .bF .Dŭw3?^!n+Ovd gE U֐苲bFzۛpY/=?}dqQGMzpnކ t:sȏqb8H$o+My1ˋ@Xn5q""G:Y\x${DEZ\N9@4Gs]Qjo"M2r`=!UCsdHy5T]r 6o)#?1k*L.+NWh_x&/ZcM%KI$L\.v5RXEP _Â)VtLv[&8dLFEeI*e0c*=RF ]ӑC2b+Λ//Dz/<Pe0_p>jƍŲ,,kW:VQQD[2pMroR(a[GFٸz #j7 pSu, tp^XֲyZ.[C$UW} UU9ChlƩNE\?S{ߺZSOK,z5#;4-yq/8S8ɸo^nG3_1Ce:}Aqa$IV-]AWw;l6 k!.B\R=<XΜ#F$+װ~<!O5fۖ:!z{HBY5naA$ Iv15(".ln=6ho$vɶ[BP0!VmxtЄd`Ų-\sд#@vﲼ'KPYQN<`떍L2HIq1 Kj(2#Ʋ]msrkYVaͼw=7zzzVUΊUo~$7g%x{8YԌ=o#s?*wU 9nHQ=T2clZ:x򉿱}K}z;+?o'fjz-J|Ё}Ά )+=T~.."沟3,]e\ȳ'KDCFUmؽԮL% W#Ni*]8EN?,z{%㔕dˑ?.OJQq))rȴ9vTEhzJv]ebq(iN:f{zhh'fXU9-_wX6n]ׯ FtWKJ4nȬٸa FT/̾Y&=!U4P]$܈QtїcTQJCӮ{kN2IgOe%vᠺ ˈ+r4gxgBN;J;Np,3f439C'2vL9k׬iFUUL$ b>X 7h"ݤ2 ^yeimm6]{PZ% %epY5۷n ?U46~iӦr߽\GEIi Ǧ-+929$I$rI iS&eDQtƎv4ŢJnO`%s:D@>];)2 v0x "bLpNJh4J2bʹ3x RzvqNsJywORHw *?7_{G}ifZZZYC^zyZK ,ME[G iq_aKA&u{αq<<Տ 3u>Z:7wҕ_zyz1l޼AC+hj7Ï;:ii馢)* jEs-z23f$衶.s=KKVwG쌀Dqy魿!cλ޹SL13 Dc$SY룸tXuƫxr4 ,p3gأG-k7n,.C(#nj7p9'ĺl 1slx"=,_fyOKGtDuN?6Ѿ$m;$ xkW peqGk`Ə3t(>e#KTWFȏ_))-EU{t۝3vFưcy/L&:xWp݌>).R>&O@c}w5 ,+r-R۱+VUOؼy3/͟_ȢER=8ȵ߹>SsqNR2Dgǣg)˼5ڂ0o|R8ٸ>=;s83~!梋CQƌK `æ\v7oĢc#֛⩧3sD*~s'#ؼv#:}jFaG}'?k~Bw3e k3OgQ|ډ\OyɧKG{'^f>`7z:!K.ǟbАASK,cӉt( o$H/ "Y*U7v0rHn:;pz$caLAe`~69 %1{L֭HiY)H7> 62th5CX:;Z( q܉'̢fђeRpny^fU<9'eW˝!cY?0nt7r;?a$Ӭh1n^zyN9XIι<CGpƙ!E"È'Kڍ#Ƃ좸Yihl$ɰ22{zp8ønW;궳c\N]Q\RCT鴉妱3f:X d"zqG62d,#@T*өCFBu*|>:Z0YN:4[;:|,Ǝ_7q^Otʇqasظ~+}N?ʲcif8Zꋐ>h~q/ JpinKS^RAH JacQU'ëHeH[ bw ( փ:hikf섉S1SN ;vԲz*&LCNae{9sGi'vD9YXFp \geywټd 3N 6v8E /z&pQ믽 ëFRVQE0b]!XoKa;sOgt 'bzY)**4MTHD*g9䐙lݹfА3Y54MDzXE"DQ$I"NoXݎ̈́:rjFO"5d4Mšeu:D5cq>TnΪV9D;V8*[hh΁3 dȑxak6>;#J_O'qnU*˙OeC̞=P(l'2bpbQ'req!.B\gqI$cVT2=v0rXN;|2pw`qx4F8hܲȚ׮0jt:$I$PP>XW_S{w}dF9@iI  !2T G&CּTވ= `C}S]r0X@>"ݶKlK$b[7;9x,YizratHzI\fl`F*e-EO[n=7Ic&  @*->d2IW(DŐEJ]vNY-[6bhB!b8X s/8eKPZ쥴(!.B\yKL8 61bhg n wm${!-v3.bQv|D&zK0LPmג\&d^LTfLee30bk((t0.d嬘N.-r9m LX0š:É[,ðE5HΪx\vo Iv{pxS>&J-xp(v6\>ޕҢBzsc};NxeLauaen-].w{vs9s;纂~:sCE@ пRX.@ #E B\@ E B\@ "!.@ "G31_>'=B  p@ `H%+'xr׶>̝;Zo8_a< R "|I5#w w^Xpzz]Hzzzظ@OO7{n.`³=ӵݞYXճ9˙AϽ^ߏQ//!.@ "!.@ "GG?b,s 71|p.ϴwA*?`_"x<3o[nE?M,r-zbG|:"H6?n >,P]8T_5l6JJ]!%I~}˯?~s8yukϠXn/k~yXk꠲tss/lspt?~iGYXH|};ur:OǴ%$N LpuM,Їiif6g9/$F|Vۺ0-ocXY"$tAǏy xmeB@XOǿK8VČ,) V_29}c?Sʺs.奷3oLB\#H~3|e=^Κ1SjƝNq ωv?x>.hشZ+e|>B\cd2bb|?}?.˲GΣ_w;L&K6{w37!MX] /|L&C]x`dyk^UULcc9}YOar1@ "!.@ "!.@ q@ q@@ @@ B\@ E B\@ " YZo"GVX.@ w?o=䤓KtHG (REP$t{9%}fvf~=''4ٝg߯y8ow_eQ}iĄwYׇq{;{}`}S?̓I ;m?!ο:}C $JXN:㏱W|S=4? BXpIX pK. _{rͭ') \x:_< .[' .7Sr|}Ǖ/ſoCt{x8_!JccPο|:wv:Lb%)i\;1M?#Tt}3Nw< Ydq{X;]˩/EWI,h/bA ~^̱$#.zmb3_|4Qg+[aRZ,rE}ȥO6 YnrrmlO7ew |B(r.7w>("6ɕ3'Ȣ"wM-ic5fX"N;:ψMwu>!cJ5YS1>{{x?>UHyu/Z&! (=Dgt+aW_˔}x$$Ax<[]Q@`EdǛZ)>H =Za IB~Xj%uD::{o:vE~^?_x'^~1b("IR)#:\%[<)OT^\mtW/₹r=yq-".x"$x,!9q.~)'= _0W48d,B!QiQ~v~ xbvb Q0 d%BD_" HV88 0LP0a(ZEU4",:Bn_`(r$vdYsJeYH{#I.,J[idI&m4 +m!9*`6d/m۶sNet:(vl##iCeYX5dN]sSNaGz?] y,;|#Qb1tħB"5\VQ Bk `hĒײB25k4U&bqk$IɏG&TB6ٿY$YG~ˮe de8y '9HI%T2x8fI'Ki+eYȲ[Nw.}ʹt=E6v橗?kLB"B  TVV?@eZ Bh 22?$W4 $g, 2QT?hԲĒ5j,2aM$i+g,ԇCG2iJ*iDQ$-G4Y>4M,;((e#Hg8,,hVOv)}f|rsO2G6cBwuv3(A?cHTTaaPRVB҈!_ &wICQHTidZN(0 T-ӖQ2[BHJ`xH1רd $'PZ1uYZ1 UUQ|LO bnZb9(0q#( I κdIFr\c1HHv&" (CUA,³0\:`$=Ò1- EcOh~<>_+O,O.,UU\R;챳FDl{ɏ,CTaˏ,.iRnȑg$+TNnG!2#a0{+k>->gv wq _'#۱Q(sN??l'XhXX9Ec(clTiHʠl8gX#ϧ4LI4sr,VXT_ANqA \gp[u[ Cp C&їKHˢk$)d_ % Ys5%Y2Ir۲pȒ1,+# iɸgSizuF `idY2$#1 Ӓ@2(*XL6[7R=xa7E0\B,%L6L#2+lUAQ4U͑l# yJ% ~W"%8qmMkkE](HY'+|yj.FVyyE,#8}^uiG {eciR)p@-4lPT4sH^0Уn$R^"@~ KT4UEOg22-VTiB,8ಬ"fa5#rqH0{{Cҫ>|ixHd0yH-NJ=rۈ !+mxLIzXs+yOrGGjQUMӐ@lkniqhEmXKW"KH3z#N2.;34 U\'/c/$ +j'lFPl$aHQ$,Za{d5a ~`V, )`b^ &@$.؜ p㹫+{]T!`q6]!,Rk SUPֈb,UQH"hjY.C,뼊BrI*F!)eU9EfHJ,Md+ч!.6b6((Xe,>+y |F2w <$I$iQJn$I"Hhm^|>_u~U-3( >|!$YH$CY|#eU.2_U0ѧQl5-TOx8@4L VVV`&-# &J*1kmSD!f4ZߘIw+sH>~6x_2be}^F>dGJ_=]hҵG3 gu.#s<#O$ D1MLäEc qa唋ߏ_vK6CLVdd5-EqY!,?ƒukSmXC+۳` q+Av2YnO#HE>-lBqHY{U#Ʋ!=9lѭG 9X<ȐA)]'Q*YŒu5vp6Cq47ޡ2UY,ٞ``Z)Cp'QJ~s(]hGV 0$ au5,p'w E1MT*_Zt̬_ٮb^;J<r"~\TTH ЈIڱrAxi2X2 ?,~YΠ;,kőY7X~u,(w}RwZO/:Ȥnh$~?AȲ1F_W [̃G.r HDiR) %[mt\*~]IHô$,GAׇgzbv#%(Boop;4 Uݬ0uЇQeH% Eqٹ,TS6T,`HAeN"I2iGX;nP(n'2\%`,wt3=UUQFVҎO0: * C$-&|V\fhe˲[jlQX+ߕG 77&eeeH#6NFx<@EӱR)3},JGn/\P#7?UK_ecA;ѐshX\< *EQe_YbqUMh823i2e TD$%, -tDAIA7ƒeK$ݐ[ݾ" sɍb 8CׅۧՈ{*/ՅK>{piy̯)sƺ˲@UUB c~Mf(--E 8{:Ҋ*KqRiH; EF l'iA7i==Dy4_ OnLdJsĢHgZ ߍdZjan!!їS-Y#%p'ĒOe#*]!NZ4Xr6D%|O3NEQ$ڻQUr*󯥫]7vR-eakMimcGE> J̜9 I EBATU%XPJ@ QZPܹ2vT)-E d ƒ$roeL6%9?Xi*DsZA`Z@Ob(;Xk(a kֳ&UԌKLB9"&!4'/ƑQ-D)a:+C.AEL4d!ab enL&3sF3fL{.BCs/!Sgj&%Ȳ;eHmD8fT(4#iʮE;j@xGY5>^c}* ͽl sq+VWI r9W Z*H¢(--2PWWϤIS8yy%Ӵhg9Gأ^2~8JKKI&7l@mB/O1uޜ9sTH~?fUzD#E C]aqC9WH.4qkcD%U,j69Q,L,49eXr^`X˸C-tz'U" 28|zzg7Lx姙:m2];2J=^wgWmBOpbxMrW\ݫ(UHssj71j,cƌaǎ?1y8P&UWS@_2fCwW{ΜI2c鲷X}6lZG_g7N<9(|{7hme̸ș%EEݧ\O*:FG.vwd x62 `nKy;ִ,5GD i"[7 J&JSX\d}XS$l3kE InH$*D"Qfgbٴf:::ٴi3yB3XQE⽮-QKǎ WT1},fNS6fMbk+do\FCc]f9{IsV|R:O>{b8?^m%gblACջpWkF.H:bAC(F*~T\d}{Y yOZnY TC`[XTAndj]Rq#eHmr$JY{EBwrt:M*B򹝙*`>DKӂN"@Lδ,+O㘦I0df ?aby\pwX۔~rȡ{_`\n,%9b55+Gs8h"ǟx|o@,z{tH$DU'qǸ- -PȔ)S!;R3cKnPXF|䯯8T* #ۍqAV<_$V"Q,a`&L/̰/;~5W8ճdq$>a$cTOڃVmɔIX(hkT) h pD I0 RLbZʩ.ӔHMI$ڝQ*ò,>hvPӔ ʙLLK{}dYϾ%.LZA.ޏsy؝a%}.IDiEȑJVd%SwsD_.3,ضmا:t6Q1z<b e9S˛fhXd (12d$a&lvYs +L. Uu3/XS\A{{;cOŢC;z8hgEEG8+hi䥗eU,X,G{4o>Z,_A]]s܁ae_;s?GyA!)+Amz%HWOMΎzp0 "= uuuL4@ )S@Q@0f*Ŧ0R}`auNc$*> \ 1YinnfGVnYpH@i3?e{A,Y!\p I8̎-a Xe2YbeAPH[iJ.$1$O ŕL1`0~T_9sdTB73KC"5 2nT5i*d ?@OW7'pn +I=&X,Aͭi H Hn648$IBr\f,|%UƏBRt)|+-~COaۗkwLd%gEɫױ-L4EZȴ+t;S&ayAdBWG3`1wrwK۝s)'q_SN"~K<iTd FWE"\ P )\XY&I`zƩ?al^Ǵ)s E~scY#2PK>FQY "Peb53量n+\ƶM[It8iG.xحHRAwJmSuejWEFeU-s)UJ#a`K̛;d:Fχ.a\u {98D/CHNG0wJwiY+b! $ōe`DD"A83xKu"@fN9D>\%%lٲ1&⤓Aq@JV._/~ mI&N_Ky%L(G}/ (vؖ G0s Ͱ N~!K1j2FU2A*⯏뮾vjR]AOg3ŕ9U(G:\餴0wgV`%Г}4mm&+V-$" 5P i˃G.;\b)ة%( vq(9b$ɝ0)IHqO4D(.}V :;In-OGOV0n\kUcZbe{Y{+?fi9]{{;&Lŗ^9gOf,€ξ6z:[504сN|WAz{{)@ '<ra$Sc*4X/ebFrNz[- HSK?U LFId *2F̃\9H4EEz =F#DD'.qj qDBƒD& `1<~z8Y9c:X/E2TE!RWWݿ|;)8:Pp]!Q7ʰd4MIuГ4PUIUTRa`:)KW|3?EuT!ƍtٳBOOnhko' VԢ8'Oxl!' bX},BȪǶ,B~:ڹi&|uԑt6qo_95HTuۘ2i,PU b**4!F2`((劒(/4 @ D#$IL:qB _65PP!ΑfOu;v;?L#,(LP9*.۴o||kٺ8TUUrנ n &d-;hlALqZ4-AiٴC tu`TXM~˷q)N Hc['d?v*v8x x O>Ek[EU6i"=iبD#pwu>u[yeu,]ku5lܸ_ĕW}.BZs]a I4#?OV~@'#IKƉŐe* tyw4^{c5[hmK3z]=l'OgJx=|K!)̪W#l5$I0p{nE~**LǸy_ l7{(lZoիq 60t(۷`z/~S˗dl^Kmf>z'謥g-mHXmwi&4X؉ՔVVb>CWMq$y72eH@-kK(<ߟ't2n+im`9s>a% t3Aˆְ6ncl߾EQXx)' 'oWy6 E^#8@_.N) v%m=L&J~! ް,!T_AN(41঴f4MCEEM~jks8~"a9o^{5\}# OiY&2edz{d %L41Q1b"74Mz{(-s+"D l!#hyÌ==D 0Y~)x#̭RWf3{dx䯏qሾ\{ '_Y{åD",#@z#nLI3hoff7.%GÎlGAU,Z.NIG_Gop tw0 -TIAPa֕V?m"[8싸?7cgY2RioXnoXvq4fzH_$,,Ѡo*I%tuup3x?bмhTѾQMZ执{=ϴu$h`3hlbhV}kM}C#Gn2ήVk_Ҧf OȥtwmU8"15>&H=`0HMM {?Z!ywrJ5H [9#zQUpuw4{oev%UCG"$G4.f9}#Oߛ&ݙ7X/3uh/۷ԣi&x IĚ+x'Wq]CI%_daV]; 'm|g5B<>¢2|~?ē}}l߾_aY 1cs `>3hV.KZV++dTU ओNO>O&a~fϞE[g76 ˒а9q ID&>Ms+ k6ݎ=p2Пd f1gvnR$?s\PKII%": 4irq-=~ àXK0hhV0$>njY3g}Y*Jk*>X1"DO7&M;NϧE T@$4-L3sJ y\vcjkL\i7Ovg9\?lgwIYe9D"/>i{L# L184O&`݊ih\ s\zɵ_[z{R]]c>NqQO8&sBOa&|UTWc9swx>7oy w9aҲH_wW_y%%%lڲb'WȖuؚ[XvP\THqa;YdfZd\6 LbFil妟\ϟ|?|B_*Ee464] s~Ai[aJwW+'R\YMCtXBwk'O$У 5p^{I(J"d|U%}\&pwpU1D"F$ɭG^e7G.;lXOrEZ_RCĒl޼Z6lZǺ4y*/ UxS%^N (@sCi4WRP&+b fP/&#j7߶K `L77d޼Cy{R$rƎ S_̙gKRljՅKsfs q'PP RXkH$~M=Ir*(˹5S\ p;WGLڑYKͨ#aH$b>*CXO_LG$>qrY/ W]v5abणX1i3 xP?$;kgo$ME` 9syd:6mC7T =<8). vjEEJˋi@RO+"V.@aQ۲ O3iTZ,-ۚ?,Ge֞3A^{-aP9hljNH5)OI6njFtCGO8LLnf/沛ax#ݜ\:{ۑ6i[M{w%6E8D)$ؖoOy~xoǭe}YJ~>SM LaOQ֘3c.s?kWciBE>VP/}#6m'x ,zG}}+O"RqQG!̄ Zμn<{K8sb3>."m{D|,_1BXf-(GGrʩ]2&O$[Fŋ?DXtqAaޓ.Gcfoo-sOfӦMlܸkG nW})Ȫ@o?88XMcSPi=@K?fɇkq 9C'/+Λo}!_SO 'iHV?,"T Is,F -=ex}\"IIд $#@9%)QVXJ"c?R<C- ?x f_fO(be$cttt yPM}m-mMp{TWf* K"Oc-<뼶p)7 0|J' @PΤt(Ҳȷ SPZZIihjk &c ;@!WOmV>h[2 ,[!08dĭlM"KYF{G'~NQ˯_ľs@_M-(ȶ1mX*J etC/>t|0ٳsݷnΟK},,D =M1{\"6q09DZ/p;ȣSdulBgG'L\ŢEikwO>훷QZ/|`)?IT5ܣƗ s=!GL_o 3_*gwZ(bmY]56%=9Mkk9,y)$ /w)WTꣻ׋foEEECgG',!jH4MŲl$IM|>'E QT⏿)Ulݰ9=텤9Y"w,sGϧf Wn~G/ g=((yG;8=ؓi<ȣyp 3xݗ8i"͌?>Z-Ts;g}h Xj %%UlڸBJ(*+fGF/J4٧PWN4Om:D%FrŢwbR_5UI \’%Wal֭]ϴ|zA?F&꧸x 5Lk&+Vc`|8jo(XP+7JkhC"{K8xեqޤ('p:w;bV.)ثŋ|1yx<mJ~˲r䒶elGıȢ2mwI J)/PU8c݃9Gp$S&U`"d`z:EwOe'ظi%z~H W]qbݚ,}&OiY$w}+9+ Lͷ }:}LLq?! =?T*q)1vxV|ֶvd1ĺ[6}SM \HwOڍCW{E%̚5[+9S i(r@ U}/'ʅ]ʄ 3P^7VoC$w&oY3&LlosNgӆuu1~XjO$?E0,[(*)BE H2D3d2G')Scd֬.3 PFt$Ջm;,n6/沛axS.rX{}{L I|>P,0%LK54 @0k9Cp$?ܝSO>G!etHsF6nDGz0mm-(>ړ.֬k 胛~z7,Y`0RV9+~ϧ+« (Fҭxh|]暯~Q7o@߾&nòDR4t8== ,zw)ZPzKKٲq= Hē$RLM]ģ o[nJ?0d_ /gnGN=>XEW;9H$-Y輋9𐃉hi̘>-شD<WPTXtaW((/X˯ş~.z/0tvHFQ$Ox#ݝ\Z;fwtt2atܹ-4궉,TWkW`d'[qԑx'J]֮fjk6e6vԶݫlZ[K '1HvE<-?1{}8QC2O>8S3 ZLd=λhHē{\wxfR@tTxuR C'@"1s;/kd"$t8y룸G_1b7KBB䍷>Ң0=QJDëx\|?|,Kh+$&-TWO0 >|! E գ3qUhYNCsog^?ݛ+W⑋G.945 "UUZ +dpܠƢ_ .Sn拉t`ͬ\ѥe44HhJY{ŭMŖ~0))E}} ¼y̌)G}yFow{MQGAk.%ʊ hlO$fl _‹K@% ; d"I}V;H}g)d.q![n!M~[>s`{= :)* RR&Hqs3_tO/";D:$SN:o~z8#)/+j"˖}B,tMSZ^CGG 7^7_u&M|7^Bv:u&koL+x⑋[lwl 1~۶%@J"Ő̌K'nu ٲm+/g{0fhkסaO'ƺE =.1gSʹ&PUhچv.CAz]WϷm+x㭷|x ƏsW/"'Hē4ƓZ^h7R]UJox23<D&e?-PTTȖM__X6?\"訠O>&V1k7gĩsQ`g~.*`Eݺ ͟g[ہLMOg6eapi x%liaTQ~,=IaPV9V?AmM =}z5x>0,5|+VmBԭpx~/2Qw o~=GxaX.!{I\wew2jX{xϚu h6C# Iλ/qig30b 25u9r:껞[l7wy䲛nuCii)hFii)o6~1c&v_uu3zrW2nd6183B9c+FUL dLf|!A@Oޞ&VK|wI.filFỘ1c:|;ocWږ#|zWyQ :r䲫.[r_@Oϑvs24AՍNOO93N%G5b\q񕈒M {Ϧ~sa{}+׮W_$R؏5OO_ێ=0u L9hHa?d;>{?IgHDd1Ǝڵv;HJl޼Ҳ4(_,x43ξ!Fg!,炙3FaN~Fok58uQ[SG\=~zGp|n,ѠGַ_W]Ky{Ҳ۷5MB>Tg_B^}uzxuRA?``i(?(k=O" bᰀ?`ZS.r!RR'Guw6@ER@8F,Tɢ/~ƍ§0qDB P_ ^c2ץ PH-)cZC<J5 Mmm(F4z;H'`߃Lv8L>5WxDzU6/zyM Q@k;d}LcsG RZ ~bjk:Xj*-a/&i0 Ң0ӎ@+h6d7ofO;'(E՜r~V{!.;;m?IEϴiSiid"A{G?/ZZ{=vtni(`]GOڂ7xljČ\!ٺ}fOCN*b\e'Tjl!P0\um=DL:(BOP5eK^7倃[2z(m]qjkZrĤLc'z2+ -swB,⪪ƂP=}}*/e{Lb$c+>cx֐N1]!fnY$}TfΜ7N͖z>`!q|R3trY uAWȺ 9ټ{dl߁*sdyx䲛@T UUIc$ Cerc)ibJ_Ʋ҈J("# Ͷmt]q;GQ^^N<QO]ß[ZNAQ$kزy-o12Vzz(/- K"5}[Zݎ"x?{TpÞ{̤rL)%jJx?ojX'O8^{&O̡ĉմ=O4*~))*^~% -=I&ϤI`G\zy44~!r`ܦjfg9Ù2m<[lOŗޤiGSS~v?ᏉF >EU)-*#\ل ᔓÑiq [I%Ȳ M}af;?VD^vTMUͅOV*h//\io4;{?n&.<ĿX!ٌl#QEv7r4 MB6߻?§])I 89ϋ/0}46n\ǑL*_m5;F!O<ˊOdp!;z䲛a0(VIEM|> ۶PUu[,v[dI&- (`'q:$OڤIɏiP//[?btwwt n>p#}ʸpDhmtNdo23+ lZ- ijN_G?HM+ mC@ќ}!x<㨣3",g+&p7r2NY!(>Z>NL__??({4]clY_k<<9C= s|f̜ε߸χrKuc 9(/#1w\^[Yxg)-u/(~1JIY lhcO$d**x񙅜r! [L2֎>4N>HDu:{wQ_WG?yG2o̜9~|mcnv9gKGHRW-1,]VKaHwNMK⣏Wrd{'̳OW_% 8DQCt5sqGt ~tL{pVzz}q$S2nv5aJHmHi 8`J~(>4EDE|T{DGp!JqJKK9أlGfÆZף|HB$|$ɍ(#r6lf5@OoKr2j+7ixٗgST^ҡ{o;DS&.x=+ :ƹa)n+-VN;}>>+m(ah;%:HlIR߰vc2TƆ9w׎6Q*EhO6'^ᕗ樣L3w}9(.*bM7wO|B;:Q߁m|+4 ?dCcy?*[lwwŢ,b, Uհ !wqY01 Gts%DF Yf Q4#ibGI~1h" )Cʸ$I°'Ǧ-پ}z6#I~JKy8sByD|)XEaHO f+WO /91vu!((CHQEt]Dz,|)T 2b(( ,(@$> jj5#jF2W|ǟ|gy .N=fqO?:لk>梋On5ۚ18Xr gΒ%PWx50ظaa2Ń,b[#ݜ\}?n8OE"'M"#p3~4ɤ:/J2%SX2d)$#Q>^︞׭*gc\vsrb9>/Jg_nBZ;XjT [nc@UQo}:̙=cs[AaT]=; XBQ<_^II-$I%J!!|QOnBi?>dϟsIi#)qpjvI:m&=H8rpe?0W4HxuBee U] `Yźd ˈQTt3-[~&aɢ݇+VmIly2M%!:|>Z>{I"ǜ}C sݿ3%"ŋ?z{=W_o⦟_[HmM A(-ͨ$柋+#J坓{џS39>41pIU?qn. _ęg (<+..뼳 {I^{?Z|bHK۰n>Z;œzcsa;#4*+`T(},p)cW:сoW_} <zzzǣa:4^xyJ,gS.r;#nS# ߦ'zb".{e *mˋK[bX &Mb劕L6['0SZ^ƒ{"N?R1GPH&SC\OF4,K'et=^4#asq'!qƖ.Y/K_Qƙ瞉,Y||={'c;2#AaQd2I*$ :*n' ID*B,nj'3@O{b0##|DB&L+ ݟ)'lRB eՊt5q (IK⹘@?%E~zz%Θ1c49y,yw9ݽ=L2,c>3hhlbthG\sŬGҼMˎvFMO#Hm;n ݡ47nbqΙ'j?v)#~vpx1|*,[_ ӤQ_vs%ؽ!>A_p竏*/BQ$&LMi?G{LG$Orswso㘦IAAYtH߾;ZJl^%Y:e itb f*eYk`0@QI%E%TVŹ嶇oMT!IH H&zKGii =R")Mӈ6k!g مmƑ002TMiJsv!Ƒϣ4*;֮ Ͻ5W_mtuGiit~zmLB0h<>^KOw7ׯ`I-Mu,[7K~f'P\׭Yɚ5ԱrDZe:]m} b A `bɮcZL ~?j2h0y$*ǡހ%lX#z5x\tŜut"2 o.uʹt1e1GbysQExࡔ1 τ{P\Tm %ʌdz{.0M+8skǃG.;,pQ%_ 7@_넛'1MT{Eh0$1sϙpaTWJ/ N,#*F!2X,׶̉^Ipg{=_z~7KSFQ$ϧhlEQ(..%8tSGU~B6nf}g *g L$ُIo^C"LRDSD 8sYu;nȣE\+Wci_C%KWM@iFooD.|jX<ΨQh;wg@M;!I"mDT_!@憚5reK/Nd)x|t%% šRy 8=&?"ҙwuKiف{ͤ}p h~?ym)RC"t]h-;~@AHֶ.fK?^.onrPtu N_oEe$I]<`E\yED#Q,zJ }l޶u?v,G$i* -ѣ),GAQx2N,Kggɓ2ktt9?oo/b Qab}Iz=̛w&Nf3)̙QMx*17βiZƖjb+*vZ'K2~T~P4BA'nGoձlʤe(2a88鴣tǙ9m@!Ia񻯢4kaɒw839}xm=xEa.L\͚uxp{ef|>&奅sUƄuFhrbزG.;;1Xt"N&Ǟp$m]M\sՕȬ]1]4r!")8HiZ-6ԀŒSزy-ǗsUWQ=foo|g#6EeXBzzz$fSȚ&,n@jj0l/7x?RtUijjyG UUc%f'aQPXʁϲ۶m`aOe䦛P9v4 B X;IS}m-"JK8ֆ&]/_1ϘUhD&`6ݱN:I2B%lSge660j| w<7glX_(ITic5|2 Lsm8BGg3ID7^_Ĕ3,ޝQt1=I&dp&K,U~,DԀGu׀ W@@ $arO2$s]U]U?jsyq_yMtWSyg``э,ߏS(|qͣ hAQC,7::i ՜{8cTLǝŴa."\Np#;߱8;߯ra֭Cm/3c4w/o=lF$PԀB/,t«I{[B{1!2)Y P4 CAy$ybef͓/1~hB tdʧzz4s{q,zA6b^ꛈ'*),'O!#v d:Ci_<4KX—u1 _eongyd%^.C2bjjfϜNn%2̙<9TG,'Wi&\{ULK0b횵XfD,^4cgɤ9u2tQP⓸+y`p`Ҥ̋/mCz*'syR xl$ +s%(Lj˙^ye3{ud }̙=KGmc%dz.Ic0=:mvE/$$WvPGޱjʃ?)d{QJ͘e^A1wyGCj4q4N;PC;t]G4ƍGe"ĺu3f4zVWbD#a^|v/څKrq|R 3}j#g.M8g9#| 冏~(J?/_Cy\'gC743veQLU~ZDFO~ 2dE*/Vk ؽh4+ s*b*~I18d``6`l6K.=(u8zó4oP w~m)_ڿqG', ϟwA4Yf->5+laҔT4u;T7L7|_!ikC0D!7)M 4mbcƌ#r7Uz^zy#n\DEy]ȁ{صeL"0KKDqTOdSRFM$8akjFQu^| rXʕ+e Shװ{^qᚅ ÚY~ gLX* %(OwLm{v$6mC˸:^Y*YĢAr!NJ__5%]u Fcv/Kkryrǿ6tB@$˯$ [Y@2dx,^{-p5L0/D0+kw|ﺓp]<9sb(U`u5 U mϡ( Zw/fb 2S}/|t.x{GGGpIPOyUTGUݎW70oLՉ:k'ppwdtX,z~ *qhk<ٽkhZXz7چnxehA=[رg֮)f_ ⷿ5}|nu .BdRt]GUU`˪Xrۗٵo3Z_,kڐe^xg֬crq>K4 ͞=lym=\Q~ȼ.fڴ) 6[m3gm+ {vmg{AYgGMM fL8>f}y\{Շ}Wz|q"c'KPPضMeidbK&{q2jTZxqN>Ev-_psqٳK3Va 䀏C2e2_|w=r晓O-w}{)\{3fs9qF\oڂOq3I<D$ۅpǿ|&|u \>~hix(+ !j̜>L&ϧחdK/]baUvO4Rigs? Һm;H*5HMm :ˮa"7ob̨QJtC4?aR4lۼN3/>,lܰ/m}>*2x$߷{^ֽ/ -=f4Ç1mog3k8g\ d yQd5;"qlah(#sITUEP<> (l'FOW'| 3Mfǟ%g9PP6lЭ" ߇B?Dcc ^挙%+# ٟe+ s=p7ʮ]mH/"1K&2q";wmwO<@/ƏkEV=XI|sp9E F46Vӽ8fg=g?>MdF/oc/3vL&O4P(p㇗!O3/j.ye;HşfEC`>֯_]͒%k>m u Jg1u d;n<CyY@ :D"QiTVbC!z{ddCCC;X"BWo'cCW!r~~>uCHY^)<ܹ3q-"EׇN ' Q2yӧeҤTՑQ^ Qaҙ'E8~ɓHHN۶ca$B: @P'g``?FXd.*+$ethD #O{uMHHYKVUUi;ʰia^/AsN&21'dĪ*A(SdY,}D/}+lyu7RC)"*BL:@Y֔Κ ȁ H%;foO] \kAkeT*Q|^LP~{Y>Ѿs7S(C4aCK8L~Уt?J6 0k}{;پm'`<D,vキزE {2LV={rwxDm}˙g ɐd顮{(//p]ߢ&A}C3"Q^;,L u ";6pz)D}}}{$!l[ADA =IʣUHJH=Cm:!$ +?D$^EOOtuuQVVF!_a0̗+x^H$B6G~|> ,}}T2#=88HwW7RUSMb&/WȦ!Nx}Dr~ |1{Ţ$ ~U\xl߾S Oi̻`cu0k;IQ^WC&!h1꣺.c~1ioeW|9|~ӄ~~?쳮by漞AluT PUUcٓy*alZ2Օx| 3uri۷0nld9tEnϥg*wƖd&gLBWWF CCC,P m2q x=$gd2YG1,nR5UQd-3KCC=xl6Ky!|aBޭ5dYp?ƶm,4dM3&$ڷ&z{l+uwTw9<*aYՉ:lƶRIwJSP(ǐe#K@ :~9G+ItΘM$^EcObC_ˁ$nef|xaƨ׿9뛨k[_d)=+84+bxoO?>g]!>\9r| g=#+xw-u/gTʫ *]I Y@4;wn#7e͌kt&T&UCgGo=漋3gMgt(I2jt!_K6EUU,FBF44O>#{|jl:Or L,%-P4 8NpGr0O qh?Pdށ^&ka+ W碋Bx)~^/Ȼ`1Mb<^/aEe ElF%1 )j Z|>䃃T?bH]_ H,}}X(^ ]ױ:B#2)%P(Hz<~-HY(@!CRxģx`ʱLޮp(3OtF3L.bvng >t5 Qa=$FE> ńc6icں^IuYX~K6oWQ,QUH"HQH>Ht;ϯ#Q7EEEdعsWiq?4oRb海m@m@MD" ` V)34o]׉cG@]w R2@ xTԸH$L*?нD!m7ui/@,#J iR 8w{5hnnq}fbz ݠsn|vEB|ضm;vEy\嗿|X|r:Fi4?>W_ 4M<^GeҤiĆ|ŢD_(;DW/fP4I$nމWʡ 6w 2]⺧?ܐ &bc%Pn"C8,^Vl`0@{VFn&Y=-9՟#  :ad&(CRN6FV', ]`ppD"mdf2"T~Jw/)Ҍ-*$Mfaj) $a^eqı<[xL$5HgSapXUA>8ۖmYb"GX4M{zh(|>$hK.Jħ}3/CQ<C|,P]A,Xuk@UU4p,<&H]ב$|688'ʑ%CaP]Un71mt| pH3gW4bC4䫋o5?5 h XE$ 7\c( D((~߽%NZTr,൝ z9=|kضw֭E* #3,mV/qt@&&B7 `$lHm<e˲fsȩBTUUS$I5ᚋ5*#y |>_&c6nM-aEbJHjQ4F$4Yp dY J*@Y[FEzrrWB$A_O/QAz={0,mjhSfϚ}6,A\NK6NEE9EC׳}N3nٌmYWU<,BܚW ѲB48#^B~44S !ؽsb杷O~H @(g{ۈc, p "BtBUT7B1 ߏWcx<,qhA>$J$"QבP \$4qx<}?j PUU={^̢"{z=Pi4Mx<L$ 8bqVQ4l(dczHRRiB~/^dȚD]euuHj )~L"Ų,l mi+A)=2Ayy9mP(bQl$TE%)X&P K"j.FAϥATucP}Qf9jkkl#?QYYmfڧ2   XnCC)ΘMUUXi!ငaHd۾~VBHD.+]P=>rC _,G5SA$z)x͏eY~I&xHqT{J$Ʋl"mj8b=xij-$ST|)LXӝG7ʨ8,gI$(Qxm[1-5,I|ÚOԏ;0eaR^SlTd5DyeNirCE2H3XEB6`_'CHY^Mm te1M̖$ISe1bzETj2"eSRBEZʑmr{p (#!i>Gr,;|ZtLDz,,4GeY03H0WK*G7&T˲c[G* ӌ mMJ\_H;]ˑh<ئRmɶeEK @n?Vm1H#;rah(*mȺ%ɋeo4ضͨd͗M$pغxy 8'>XBtFŋ 0 ӋY~AN8" pAD "\AA  EA" "\A. AA" pՋc#Xފ-^}ce6Lَ\GlKb>vk8X,A[ͻNڀXyj;owB믅Բ ?r4J:n{V/wɍǽ8D"oH"Y / VâAp`v,^ -gy,hl5ՋDȸ;VѶ{5qOZMXՋa^#^vcﯘ3A+Ax=Z-/.bMW|Y?&5EM'bn.ӎ=shndp v3 w7\G̛q5ױζ&媕fYGrnbyj]u/oqEbGDqU+i8,nE7T6N iZc[hZIJXXl>w5e#v-^]Z΍$I$"1{{X9sM4lJFe+XOlN5<ӭA>d&fA޴cg5A-'EA" pAD AA  EAD "\A.  EA" pA.ߗw4cGicݐtG-;iё,jh[ObyDE; 9A41ͱ p‰pAD "\A.  EA" pA. AA  pAx N;]O=oQ$i7|ZfŊ۷O*QOKtR IQB' --7/c>O*;* iOq٬(I"c~ r(;T~}.'B()ܻ ;DΒ)HG;wJOϔ^#ߋO.Y!)? "\9㼡h])_uǢs"|luDA o.+{Kv`3["~'q>,}*؟,vZ2"?Ű f ߵVzztw1soV<%{bZp(J{)OO"M"c唂%_`),VmYoE~ҫpI_#mx1,À }ۺo>kW ?)Cp۫+N}dٟ.|W`$1rCNyS"\a(Q3U??Gqx+]X|S'$p9iR7-_g0|ʿiS#>5"̟xBjIO]?p>AA\N%,bD.'SB-Q~M%N4۶u{Vwߺ/,{Nm?.px{W۷w[/(M^ap[kv] ezo|id{/<}9.[D}iuliieZZ}rs_ɣֱqQq}偵n\7\vy_KK py`h}}m.޺qZqzx{oֲdu!>'o^L~ė8%i'~_}R|&Ǯ_wSsq}Eimm§Z{zh9ohqF]98uthGcI}֣&UG@k}9AZ[}}aﻟOmϺr-3h]r>9[Q띿?===Pf1. [.EAxˉpAD "\A.  EA" pA. AA  pAD "\AA  EA" "\A. AA  pAD "\AA  EA" "\A. AA" dqx 8YI " ?{/GYݙ[TJ(齨"]zk+.W*W* (( +$$$眓^N1;svٓ"\_/8Syfv̷IA}]tэxzhp@ @ē޵aw;S;i@ /EZYT(_ܿĔ@ @ xIb5HȒ @ @$_@1@ a6,9P_2cwq5W{ó/f/?9{\/뗅$e_/_xFq @ ϰG¿܋v/|z/1íH~m 7zny\}\ӗ/ |ϗ ,7>|{)Z/4<}W38~;n ,<|׿q^@ D$JQא :G}d_yIt95qmzQ6-g/nef\>=#Tl}Db>."Wn>}._BT)72y_~{\7[+ \2(@ 7d94yKHH:}_ oǟuKZm| +HAPr]n5vkxOfvDoY$ML*_( /|C~{~{tN|߿nP4sa~;>K~~x><.|r󣓯f΍m w+.LY'bw_շt;Jez3osN]x>}7]Մ@ /1"I Li[xv^wСG)w Ux;u |CK/񃫿ߴm~`h=n} ks_W!>rDaaXj `ٛɕ}.GeqTW =>_b𡓿48 sdre'/@_Mnt('} YQi7sc_h2p dE9o.'_Aß.g:x?R>{~Țp}#?Z( @R. gkmdzkWmgcN:,!ITpbd2rW- dY( M3lN[Y%5o.S&cg0'<ʾ ;=|U`S2 f絛_Fwo]AdE!<=|7߼Z>u|u?ÿWNs./O]{Nwt/;՜\޶ KkK @RBd$ {\o1|s[~{W"D&ô1ԿcUc1gAUZ|iq\Zl~/DNf R()>޾B ?<+X@ G?_c.h3? /zɌ{8u5AY[UR5A$AP)LbQRaR)BN;K!>8֟PܱOܱ@ !V=bE;f"O%3kV}6TS3}{\JYܹf@ܱ@ %ɕAr (9U̒@ @ xQnTy&<b@ Us= _̢@ @ ".\\D]|,~޽=b_ͮ+'; ;_g9oO|!>V~| ~Zn{q./^?#@DǥPg526|Ο-gxl.gv7WFiM;&޾yx"L$A݌[,..7mc|$WWWwRǜ_|Eϝv}=yGN ?.$e#ejmS%,j~/-|9jAy}u0wg~;}<>mŸ߱vk~y¾߹jGhl9?O?҅Wq*qك^ϦqSiy*N?/-fa ep\>i.g?G;2}sL!@ρ,YHAw]v؃p,n.u?to?H?\k*:W^B$ Y,BBBFeĥg qM@ ]b@ :z\N>.&B g.R`9.l4.I$,H&$R.vl)܇NM|lF \D ]q\UI,䒆i& hI9ֲLa-J ԤAC6NAVR4mr]MCu)qt]G5Rxv@ sSS''SN*硪*z :DƥW'\UKQpJѭqVU꽢(7k*c|j*ض]V76RiڸdYFeRyyiSFzT+Z7ZG岩vZǭuT\^ OiTt]G\E;^Gu4-UUŶx ][ 8Є 5@ 80I&5' Z UMdԥShk؎hH䋡A/4H6%@{kSlkhI21Mם#u-naɐ014Ri x%YYӈDjHI5TC״XDu|t<' "@]TjOQ^)Z"/xו# #Eݫy>"fJ{>O)eJ*- C-qTkhY}={;}I3}^Zs9ULS=cF,TnOr__=}b&0|LLP(cL&dT"$h1$0tR0RNQ*٘ =a]Z1R&J&fR0,3>~.DzL4]X̠i)+# #|ܢ&Sx"Em6]=G]L H^uP5"' =+R`^wyX"2^ [VUXLDL|xSD RRPP J<؁]iUٛa?Q)L"<ϋ?*d& ƩB,ƻTp|+^SUeS)||@k=J}TUvqiZ]ОIeM^+.Q475b;n&`d244ץc @SSDSH$&. |uPHSa*Ċ;bтPD$ J]urZT͢ LVyQ"j@!<`u?Ъ MC) 0wCD\PC(P*~UQCAqwJ _Ɇݕu!=]n0ŕk7$1*Z* 诪ӄT?gd~ ,KLhY4+ѿoWVÐ9*^")G#5̖>O4O0Z&*C*FeX ێ]uΔWSu1$n*+,jkU2-zd2IP4U ]l6G*e. gGakzhO@,ZtMC5d#/`I XXx P@EN%\rؗiq} UXj} $C5T ?by<'UL 45({ c PfQ (y-33{{2LTD\kJ2yn%%0 snDBGɫBd+sK%l-+;_߱7tEG1|oPuZXT/Ta'w@ 8E 81q8|~V.0z $M:. lf,Fe隆i=ZH֥ vY, f/r`t#|HN} X艺U$):`KBCä| bѾHelU JJ 'RE,Eۣq _%كQ^+<$=5Mh-|H~A .)s a:YQBZQ`'/dOܗ{>APM#HJ%Oa1%JQELBF p3bCaarS{; ԥC0N$fLdٲIai^L&IօVQKi $0.# k$GiES#!Q8,@|ZFdtJohW`+<,NkweW_3)1{oLSKϴ)?S߿z*W ս h k*jLRY v)\#@ @ 8@pF"[ŋejXVR)4C=ckf!J7649 2EMXe8vlgCk'4ҪQHU%ٛZ8CR]5`@+LVX}4}U4P)%K^ةZچ8$+3ydWJ1VjgtN:Sr f*<[Ǵ7!;75zZ}r3 IV"dUA-//(go) 3T^P9@ @ 8H=(N1aY&# mNU.K&3jy)㺹i]$Z"KZu]E/HD*a¿Hch.32TRI 'pS{ 9qWr ZO5VHX28yP&|_[92|m_j_Ӌ¯{U883f35WjyC!U,]JFM-=B<_$c&AY(؁aHU^XHrS2q%*|UJdTh=>,SCױ$pUUIi〡^JU$ݘ1ĩ2P Ƙ^ ZI*KNVڳ{?32S-aj/{ TQ:δ~x:fjYp{%P È+!\JQutX׫썩`-K.bTq㼖(DL4d%].!\8@S`<~Pı`ѫJO@NdWE/iB*M}R P*qe`ƴ{Nߓ?Ն OK1i| CT{dtZ-BLeuD@ @ =am͌U}dՕQ,N`L-e)>SDOZaNHyi#1MTD L5ŻG;v{N=T4,';~m/iO0` ,?\˸rR8+_)QN{*,#I^ގ|rSib03%ܙIe"TL "bqŰbL kIił%axA޸q("\t#L/e%۶u͜RaLV%R (H$Ԛ\h5=S{T^QeQNToj}'1y%pu`ɸ45k*z7x#^y!j E׽U!Z*ĄE "!/A#>L c٪<T)"XIssSH %YՋ%6zFE#%pT+ե*d cVK`_SDS *SQ8 uVV0&j]5dE{{-Be&7uZ_(yXRa}!ѹ$r[.`Q%ZEbѦb:rX$L3l..F\ҩaBaض8F1uIzG2UM"QLZ |eωjL5";%-^코5g{Zoa~%Iz_ X9bb_˞YS+E@ E LimmK*a1hlnƱqY Uql'eUaa {+%I/aLh*Z``+ .=8HurL()e{-phbQ^Kl.}ǥM x`9-eÐdr^ bB4* b sZ*uS,XzhQAYzU_e͔UdG{TJL-A'iZR|ǾޢyU. Pʅ;j z>yj@ @ 8,kL"#&TV EKlu]rwl2<.¢Z%IRu~a(9-DL 4E]"A뽮8CɖЦ$43yD*KU؊;^eJJi Ū2({{[>՘SIۿɸߛGe %{g`{=:ߓ/:^余V "PJK ٢Q9~/ TPVmiMmzzh,jzX!aѠ*~ɀۛ?$r'/aHa.Iҋj=>iGIeq}3y{u=zZ+pSX !\.n`S5)aNrZ4='(rb>n& ɭӢ8ɺ8IRT?QD"ZH d|@$$ kU `4| لTLt<YaHy*}y@Bi${OSUdEAud9I٣g>M-<}^Y/ eOTZ~BrҩI%GaaZn,$Jhm +XLaI'8^n&TUՀdA\~RaG,6Z(A5!ZfV*W1Uj%|e>xm(LP(LL?,JKe{=k5SnL-* !\󠾡~|_4ULS+$Cf2 [E XiD۶ISNaȕJ2^_n4 ^:~zXR J%iFeH^+aґۭ)Q8/ uj\i>+ST +E[5k=.J$ lۉ;q%,!m{rUOZUl0tO >ױQ- *p$d>uT^|G*UubNAnEQpqHX.Q(E*:HT^ nN"UQq>S nT YV&*(RT+?D&j.X4(Jl`*FT+m;LHN7 -!, &E @ L 01vAtRy<]& d=nSe<6k6fB*`QXL.4VsrO SE޶9fsRg9~v|Lb&J@w@)t"+%Et " 6ګPID 7˾ #PTF"8^d2V5PQPTDTOd&&&]J*VKضSURyo!b/5*=DS$jH"p1w%,f2T6⦒j@MXI1?mۓdu018PDSKOKD`07*Z>-־R4TOZnW&+YQRVk_:Nxߓhp]TupF &v'_q6m|ysba3id bً#&]I|EBf_0ry21lF*q]JF"Z54ԛM@3n>ReB*uu2ELZVOb]ZƽU\0R~fLF "PZʂ@+n2(W XI1 #~# -PT d|USANLq}(XϵR(tl)&ܙ ДR){n8U2EHPqQxO^!y>L&0Riy{ŭDSc#۶`<뗷Sp] 9SXyJj vƄS dUUH<$Wضy/OXd>TӲ0 ɕ(.~U\-AP}N$jwN-!QyXkAN3Mz?}$2TCK+WEa;\+iDDX7,ZJ^u/L'1ӑCo+tumbӆ$3ΥI&`%%?ʥQ BvgI7k?`bbM Z*in(mbP *##̝I__?: L cg}ZH&+& =Z/IP> ^ǶmREGGߙr^GiwG\,lr@aO"! !\A%ɰqs, "Nٮj$kh,EW ˢ%zBUʉ'hu]Ӑ5jlR,w\)F*.T%X7Cأ &z$ .Oёp=q0MM\]q|]I)6ЫbX /^L1g`^;w02ӟ\.z#7^)VT.Cnq],DkCݽ;ڹyg}^ZZ98hAsrf ۓilη_ezz4n1:2nzrydl, GAEAt|4_ȊD*w 쏧e_So.@ E (R$P\br\xj8^c s˲P5-+DkMS uQ*]SKe}xp{MsU`2C6{NBo㱐dƲ(G^{˹OwO~6osWx\fjOw&]/?8 Q5`3G?_ v0'bUi³\+^!"CtvvҬ7_wglJd> ÖW~ OYH__n'\:h*#)YUkFlFIrH)(\6\Ds[0gmY˞*:gѾTBb~ɏB,V"ZKD}f`:翑$rI4($f|Nf`!{H 44βu&6 e4Φu+=^I2DwrqU-/}ZY}T~*+ !\EJ n@.2'öjaDlM汤Xh)3Ѣ:N9^$ʞQaq?Г 㣨 }JnJ!+eOQd)r*6_ zgQ|[—l"rer wk ;q0'c%=idLĐ͸A(UqV0C<"7g4w4Ɍя}ʸhm@fp! sTN> tSS.M7o\ǽ ᇯy466⨃9zY2-]Ȋ#Q*o|GcѢhgxdukōFS)t`&|m܁l7/0JJyuN=xL+ 3<SN:B!ORxnz6l8}NnƖM[RG藫09g.,G2hnn 4lقH`4PJ*G&V4%AU~MT:bo Fk?b{ ?L(|,'B08Z:FX*7c& #AX EDIWVeb7R9,J l;)^?R{2YGF8AwR`P ```1 rϬym7ٶu;favSK w +P X f@%vlggvyq>:>> 4CCtJY4Z,d2 gNS4ɝQLWT}[B#q1^֯A}L>$)8E !\\pԧD'hU$%tRuIVj E =,,!W'3G^w$TD9&RTvT $b=LQ\EBUU4!)8ضÎmid܅R)  nKi,09ARQ%7d v*{Xɶmhoo'N31:p {Eh^d͢(uFjaƆ{I׷FX$4X(7?Ksk i1GzZe,4t2>s'SNx x[ƩgiUW/~JRJ(dŹH$ o  t6H<wds[ngd~L׿L6K:ℓN尃'wQ4 w쀾8nI%hCss3VbސI/J#xi-QME "ʆ@78^ `Z9 DeSrj`$m&p ~gasI%2YWŋ0rX*MJRlZ8bEQLLL:eaY&NUwU%@#JSjJNs-m4MjW*r,.J9¶m'@! FeD-Ҵ8-aXWc,&ry807I*^# dRhwm۶sgq=ᅑhw<)K2Pbz"шR "P8_X,R,uqFШ2./7r¶_ES=-^0)X`J{:AWeDB4I}] ;ٲوUL:рan]@jѳ'c,yud@5'R RT MA4\PQJz|_4Zr^a-QHT PqT(Co a7-a՝Od7i:R`6u 8qҦB.x5aji͢ sy@4Mk|$5l(WtB$SiE}HJE)<6F1sX2I,( H'qQPPaDNETvl=Xud2; xnzrba?%Ig,fnGV~qh qt]l$k<ɍ7LKs xYv-p#<~kx?Ϳu6og7\;Xn5YO_^cW.ӟu媫]=͛G׼B/YN}}}\94~C-&rs٧._3DsS1<2¬v`\@ѲinnBV!7H$J>gsaLc#[N^OZ6WU᩽W"O*ɲGdǢX(fs8C.#˗[4L\nZumCuw?.@4%cceA^L8DR&5CqJ5@0&e:200HDq0MaSv '* >w扵PBذM,TQUR4F$|"CX dǽj">fie ݈:Q{Ab%>, o_y?D2 \dT466wlcgܹxvlCK[+NC7aݜsx?̬47-jL_H$M #q䩿>Ïmu}.y[ŋM QN|y'{<㵯}=ݽdᢃqM6L95 %HN]mJ[3e0oUEsn\Bw##khErd]-HXecEh3`_7.C-ߓCDeU j}|]ߏx8n.}6u]L71K =Q=4 P=C-pxdi٘NWTcJك=(Z9K~٣R(jVh]q2388H6#jDu:C6q%0qKY d<č%7kDTqLQNOɝHF?<4UNēeޔEQb(I33۶5hZ%j + /Zv’%:f}=ɔG>ɗ@ S*ԛqȡtuuҜk&Nj":NU )laaSt&+GFLTA,is2=NJ/Jhdz2Ix8,uzI\'KBkEK>ͬ~pI?yƆ{f ^$GvLh̩aȨ陖ō55K%zjDU`SR3||*0 b0%u]1bdt-eP,44Os&=7ϭ_KWWǟt--ٰ8XPPҜ'|G}$=$"‚hX0I>箻XV?b1x>r?n詚'[F,W._qǞȼA5V%EMOHBܰ|||^GlL\l&e,;. ^M7Q g׭1eW`Y C%c455y=@CK]b?WB"mOQ`n(l| ^'zH a2=lt̔ER'mfH$etOW6N&I .(l۶q]͑f)L 188P8# 32<®&GsRA@ @p5DINRtb:Ei8raQ.KDT&Y=K JQڍ&(q| [ زy=###jo`|ġ$?@۬yl)I)r9^Sxͼ]IKs C8eCqCTJ$Ia7tcMX˕ˠ#T$8Ń*030E='u'&80 LӌC~k o]]lZ%LxʹfH .~E\v;m&N< 놞 84lO38ևmA:?aa,r( r4%hH;Y~L3> gy*\)\Z;1ªId |!WPK6 uWR PHb62B*( hL5֓JHuIӴ*{Q/8O txx8n8C_o#8CWw# R(I&I>bxO דpAYjA.GETB<%t2՗4]GSxLsDIVØ"Z²J&HFձ)=dQ"-kؼc +99ml߲[.h8gs+8seɒbujowšVI"JFPQߊhQKq$)(yHe*+>=ǝ֯"-K68չ/aN7'Ăj *egA,6n/Y|{vitw000yuE7rϊFt:M:| r.TK\.ǡ+VC/_.PޥlbUQm[wmXIwwwG3\hƽM!?eo8fsq\qF"6 |G*Qrض]"|ld] PT\Qϲ,2c.Bx'R؅qҜrGZ'nI]dädN":Kt'͂20u_<FuWP.j\;?*=|eb4d)).FGG$2>>DD&2lf׎~F2ـR =r9D3)G/#E "f&*0iPz 4E-)$ƎuM)kJ 'g A)$P%Z$`6iS{dr6LdtN)6u09j硸LLRU6=f8-z&"+ iuuu5KC8W@QrLαJPJ:v)))I-DN6K6~R`޼|c֬8T `mlܰ DŽ3W mLIR7dm ì~_u($LpGiӶdz|:|주 _G8A17&ecǎ87{֮]\6[E2FWeNPb%%ٸC!7Da4Lq';HR)`֍"HiLZ46i蚎:rYeON;DN풍%'Ifshx,ogђEqt:V㇡ IYwqɪ`L Y%baddLq|Pm3XǓ)I= Yu EؤRMlcŊQpߐJY=t9.3̟?'~47}89J~!^qyn&40U9AaH i IO8<&mEWgD"r^@طR ('<;[QyITj S<<;%T4 !} q(Y\rLh:ȚIVS/?lJhxiŢM]JO%յ6/_FWW7@u5=;Qh I^soZPMmT,r +yrONR.B8唕R)\VCae-;V>O+o{>AgggTtm !H%/e`?  ]gbl0~PY w>$EƁ,a:|!RfcHO+ERJ%ō+ٶKC!RDK};m?)‚bPY$X)xY\el]C䳣Gw.2,lx ^NFhyGr&㸎C05Or0@@p`Ttm<-q j)Xb"(Ee0| b 4mWTy T?cv{l=b chȎ)u>K$R4k:˫l, E6,**j)2dR0-0JqgurƩ T ]u0wHe%L,P/]$Q\8)z=\c&$ \atP^ٵغ3(u)|5vZٵk7ώ9ܳ >a y_ɓOEK-pq+y5#bMcvHaL&d{8P\srrv6x6)bbM)P5|0$Z`ߣnVvq<&FɻN6<ꖎ.Y\Nq)1 |q!Pill P\z!Yz&2Y2C#Ld&2zgx$P(s *q141jxL*=#)#Djy[Bͬz_oA,:>3b-oI R{,a(UITmq\.\@[k3>3s iNi(g?wwN? n}Ld'8t<9[Z ;غ&ts0,]&>kX64&4MC-4]'eYaMTB`V*qިT"j²žayh?p=q،gl$U ]Ϣan0M#ry|7 еճ1 A+;]`CW0S)z}C.e̙3qXē|˩\ K-y>suɼ!7b5jHFEN8x\?5߿ʗcW;M 7/sM7r0k^+7zz{޵vi sϏЩt866t^t]T0BnmDž& c~HeAHYyV C焓NetlƆz\'X7B u&Fq4M;Y0"W_];D$ h.٨6[bl|b~<.,)I:g҂kfD~[602<ʡ.Qvwm^~ɴw@.Ɍl߱wҵu;cYu8Xwd+Ud&4~@@CP6S*E我(IzQ4SB*K+FE/ETEWt]ql4Sad2zmw_$~&l4h%¼>,R䠹\q׸?GKK ~A~f=C8BvB!ѿk9H$_$͡+^W1669j4v E; (*8WtTDh0DL4Rt q<0b``="L9͑, ÈsFpѰHd=@]]#=V 3\~ Q$`E֬ZfٴYm?DWˡ˗?Ɖ3k7r_ggtdM7ueuQ|0#M ,_}Qyk_ ]bى! yZR :Z욵 ^GIg,g0V|QtjVve 7ʒV#i3C  Knn}SN=6142JWNUY<&u`ÒIZH&, ezEhtѲ?BF@0RkOZ@ȭ6}'`*R`2tLѬ*!-IȻRYWi xv&Ùz)>񑏳iV2mEe ?6R:[fs_dd `ZIj%K(jn]z)aID4|LNHKj4VnǪK D]K؎z(cALddlvhjnddx+->ġGxLIPqd2 ֊a_k}ۼ oK@"aO2O ';w.ׯw駟M78W~̙3l9;j>jf,XH$ v#;a`^mv!]{$+8{%|4wĹEl<;C>'CaJ1eddB~,F-b``O;]-K.&CJGHl|qV19!:o\soaxxPTJ&N*exc߉n8d'54p O=Wݻv\7.Z*R^:1>>.~b鄲d6K@p cjiLeydEO=,SEIXHBE-X\ þ"ǵ? W~;lL(K{=krхo}/7'W>w2wn~c,CX`ꛘ9 \$ߊ5_hnn旿;#?_\ /s_!xTYm L4Mom|iljO?)+^F)W+5iإ<#c6Q. *ИAQ|6=leWn."n}W)xvvnh|-[“O΅|W_W=X,].(ZXM) ¬lL=nu$4N!MM-\wu.n͘zm܁ԙŽJj5] L6B8U˯@d~G5,3"zTaI/2i ErȲLP.,PT(}zzzxÓcJ<2sc "Ț+lg8=kT F < -'V_?os#Gw1죣m6M2# $G׮Ä&@]] $ iZSIϳ÷e_!عc -hnjq]fa:( b~6Ɔ2l2Gˇ?a ٌJձyt4ƍO2sٵk7+V{>\[a%-HPf9+ ~~x߻AKK ;obÆpYyԩ<,\uk.]~֊dy/f޼|kWNwsi8K,q]Z'S̐ͻ ?Dv;:.l!aڂ: ^9viV^ڔ•;0e򥴵RBhӵ-l0iٹy\ΒKpq(rGXvrDX2{xbnQ0(x9::+9,Ʉŧ?Qo79ۓhM J&1Q"~$fZ6G:L+&B*^&8iqRy$XTU-7 L^WIh&%r,has \µm۶qՋ'v`%_s_Or%e3X-LdH$Rn5E׿I6?†'E*mO twc&db/$  }.L+IDF6o꣩ $L0

Yb"3JB رu1soR^2+&!%L.O~v,Xp+{w/g6vؼ}-l/Ns,]y}\}w83:#/ltg7‚ D5LӤ0R~B1Ǭ }$lqb1\e||]=Y~C٣1DWN_q4.`ha> Cu!sXLt6`"zzXNSc3I3kdx4Si$,j T8m`W('@$  Qrw"iӟgD ՙLn#s: 5 Ţ:ԥӴ43o\]q4xZ_eܹF.3},]7_8y'&N;4d_w;477s-7ry翂SO;f,ˢv.z~ Б\<0݃cgٰi챳{!>k.lܼN<˲xק6#> --M̞3k3OdYi$k׮;9Xq=E H[9b0쩥m6##",25ZH7#JqpIͨk|at,FX48 r\y5<}'?bxxbѦesg5!H0<gb"CK]0:Ǭ ^.sƥ]ì~) []QU9s1CãLd'yxǻ/acrYqd1tq'b uknjd"{;YZj_6--M=oEgd ]Mhu&m$P ٵ[Q[L-,,ˍ#ƇFQm-04P`w.\塇P(qYjzpǣV2L-u7I% XK\Z6SCfLݧ$wEr@p|!\X<ָ"@ɖb1ɾ,aG\}1m@_}=]OO|>=qYw2<2g]O.Mf"p,aE /Fmebmȣ1L2x  d֍Aơ86lՓY g׾GWw~meϬـ+'¹QM94KJKkex.W\93B2Qò,}>xҩ}_H!148c&;@}SJn7.e6}mpNP]l\8\'ICK/ 󦷿ys V1dݨ$UvetLWWmHh4+`A_x0v/~+sԱG.i2Rdh(C*QdžأѥPD/r-W c%[A6}T y]*U!\p`~>U: 3 j[ɒd'˱{w/|\n,2r<\˲ onH,^ \{9îM6zrryz CgV,bN\=EnX.lbKr}du9͛Uo0P.N38eŬٸm / ;/%C2}W/|#'usuɌлFҩ4u]v%ˑɌ048L)PI, <;wv+b{V -Zs{Y-{!Vy} 7G״16Kͳcv<;b62yl uT0t%\@P(PʗNC}hO |r]w^F}]7AZfsءkl,7lHvl躆㸨IؾeFJy=w4576 p YfCXěr ^{5=ݣ8¬$12K׮,Y#V{\ΝDf` 7xPzy8Xl2Zq]:0t&2Y锅i&I$BOK6V: qri'Dz0WJE;WςEq)l6K!;SOd(1<$˓2)T-Z@6gy5uL ~ՆCeH>1KmX@#@ .*\}qRqZ{j Ja`&^IE ϵH,AZ[[ˢ%,?|WpʩǑ{lڰ̿7K(y,^ 'E ={\7H]׶1>1] gpҩ,Zp8fF4MCR MLFfwo/cqٰ9[yhL5y>yŻ8%ڸE;8#hmM?PM'?N{{O1TU!Ɋ+H vJKS#Ǟp,s;ݳ/GT ]g``A&&2"rlyn}d2cuCN}R/^Ww ͒JfY{첷ؘ'I/U~;gft,Kgg'MMMHyuR) xV2<4L t۩gwo/7:Ni[{J:)F1GɚuX|+z2"%yIxQƺg7ӽs;VO_o7hFSNCCFUU&,U ƆV{; FӴ8IgrYכ :͚70QDX7+n=GQG,y}lzni}s{-cllLfeKdA,ZYA.3LXduf؅Xɡəg΢ شy ͳ⼔P 464qKo~lǩ'}M D9s]$%Xd{1 41VG{F/]Dnu#eY,z8ݳm]a.|!J^s+,D"D|;p<`ןNoܵ꾌%3;~cWD!կέ#a0":XR0GW_ƍ 8x\m ,95kk~u}ԧMo~8~'7t#W SK*PJ֯+:NHS|_E E@.8'l6K 'P0ВPb#r,N"PXKF^sUzzӜf?b᱿?ty'؇H$ .\˸?]San>C<G}.Ssʩ'K0d"Ξ] ll<[/O8[Wkh{Vr3ȹ/|Kilݴ]w-(gy y=vL`} P<Ƌ_k_.\@cc#\иLPFG`b2PN=%3alh;x`ђ?*Gf4 7MBSS#Ĵ8?jf[䲣|ʫ1u45u2VTMg"+>q%~D=cc%֬ cgWh[cc ]67%<_=ҩT܄6R ؜ilj[bU<3 L߷݃e!?ϡj: KRQҿy ꯫img޼ȄR4V\ `phLK&Mmf['$<qٜ}+xh±+ܔ"sSذn#Iɏb4Ppc45D/{]O""@p y6K8q#ֶx׻NS\|t]GU8,,z r9L'Xډ~8NkjaXƒqvO#}nzr _ϫ[=2  yǥl6|o{@}ja5`*O{M,줱dp\y$~G\Y*?xɧ{ٺy=~waNL/&Fx -456az?+= R(/L&ٱc*31:@"a0{v3l;ob^WY~0 C= 1*]c(\GwS2'M-鸼o>^àdSmRfɿgx,l>Gk^rtGWsN~l3w,)g/ȕGڊW(J 0!Wٵ~K_@SS#uH?mm Lu`E }N~uMhj61b# vSW9 6mBlxⅬ8r7okvvaY:xTsq̠wEl"EITe˽׸;Nlv$NNS'qM։m]duɒ(&v  031$D9N6}8" f{{ilfL$HD| zdr`Џ>##>v|)m'MIM5kWyVU M^5rw \Xx?iTHZQ5:kK$FeT *ĥB\*ML\=!I6 ݎ+2<4L0TX.TT (XK3NHiPK0pHp~b"D"?1vޏ`Gwu֙파sUס(S=d܅XDE]P(G@#Gu&tuر.vObQ,`gV].T#%{x#HA}c5μ;;J_˒VNs8XdyUӰIR9`hhH$L8(9}bo_Ye7)@_}y>SG;RU,Nv>ذo}GxmuI|Nyȴ,0q{Ξ)Vplا;|];0^&@z4rD_Ys _~9m^. a R/o;n~I<~Lqڗr9c,c㌌$Mɓ+ەWKcc, N ?QE9T=Nz{>1PfK20zdwHEB:dYad$rVb_"^UkxA?l߾\&!'f q q *x޾H(XV$I*ƃCx"#b/8ejX . (Ϩ΄Aǡ+Bhي6v |_$zyt%bX4rSJLcDo_?]N"Qg{]4;?)9NtbZxma6|ݔtt]& db MM+X*1bX4`/CC#GGHv Sw=ʁ}P Ξ*f:23ݘB{nK+/oA2a%l6ߺ2,-dɒ8Jj]CC;8wE{Á#Xx1/v@0ŋYnVVPMOnyۭqǻi,UF  q|Na,+߃,Nry!  qYg0O&MZB~~i=3uKs9>O|(``#! w=w9rHwǝfpiiCM͍8Ng8+ʙB]$S+4襂!8p$ds0xHcf7^g J9hEΕo5M5\p|w'?|?-s&Т]Z>wPD*L25e!w gk9z646Vf?;{h¹s$&w .8B"zlcT~_3t"Y,LGTXZOfp"]q8%,6VXF gyuZ3sM^O*_8Amx|o@:]yG7KTKTP|{p:=l6DQb˖Wp\eaۈ%@, EښjINRS_˼''&'&,^d浶LL; -$ؿg]Cd*'{д9Ba7UHLL$3Lf?rǎ'60CLSmLzYɑL& PoQ jܜqXa!rcwLFH&S45J猜x"27"H?*=up5/p~Σ]IGM"%<RD_3لG n淏9?֕yash47rfMl q q *xS޾ yY$7$t:eW9IOWS[]sbC& )$  0>޾TW j-:jk#l߾}#.XB(R@ޞMRt908e`1_=LWm- x>rDBA UU:+W9펥(iEl f +[`l>)`޼Vz>,eNܖ:ް I( qyO|?AO=?|/䟟}ѾިSc+|3vi[nuͷUW_YyO/UKTKTPDZ Y|l dYz%.T*0'F*\~ٍHA[[-55dY.jՃ?&Ib$XE+sj 9J^z呭ver8$;.pLxfsf@}VdIi#c3t9ˍdiyNOjxp%Xf%/}k2eߦq,_A:D2A(Tp}={WZ? lgK( :pS$.T3| LÇaBu,lkW`dr]K`]{N`n \te$cl}`BnY'I-mLݷ4tX~#.:Pr9lVC CDBA)y/YDP/ё$a!?17*fo(SrpwM7_C6WϿxVǂf@@]#S(!W0s34ךM@fDf xŭݯ~+Xn-ӁJ>"iQ-455kg_TPATKYNzE!I8?Mpq֬]@<s4Y6-Ca'}ł旟GɥڷPW`ns3-@(:'1) ַ 2}t-[:AWO'lFE*Qm#ʢR\/' L$ٵrisQ`pHvrZL j|܏*]kvG['ϖOHʹDT KQ"H&w}z}Fq#o{ u[bۖ=wl^ڎ p jk;|cm faNm-\5q80k]wpXe,hig`8Ʒ#yaT^4ea¡]=}ol3~M 5Esʕ+`2UG(!2ccLNZ[7VKTKTPL$Q3sZDÎaёedAN'BX, -2YvhAv8ebJJG3,vkp=H2M݆?TulV&_`wmؿo?~Qi.)2j´5Ӿh'ٲys>gl$i~[bv8MN*9u5!̠9DU-T¡:rz{ gׯψ_̑<<חSW͊f^xKSyډ|0Dbr,N/g6^_TΆkfe&cBQ>OX;V0 >O?X^N WSlE>K~qOL'r3/l^|fiV޳-W,YkTijl'a.GDmvKey,T(+B\*ĥ %@$ Bnhˢ%:f-IMS1qa%&03+E79 Un9t:IiY Tl6[j;N:IR;rݻr8ݻ%7݌MoKk# ,%Rg``goyKK_ Vut q8#1t9G.CWs9(u9$D` @ drLutFe]˯~=sj(YrER8#'<̓LTXHhyl`-^/%%X]f)>ט:U\n,̟]jϾnDs[ `vrL{]f"ǚ5Ao>x'aͺXv y+Qa}ߕO3Zz͘zΌσi/4䆇Nrhq; ^۽ rV& ,XL&(S`0Pl?^WM *9.TP!.RAP&$a4?>^I\"VI1 Ͱ!a9\93C\< `Z: uEQ+JȕX,ivIUFC}l6"HYӣi H$<xmr^nvMyۙB?Sͩ;sLL$y'ӊi @ $u U`H>nFƙR. ƛhmKy%{&bpp|^V=)3bT3Djo9;\e6yNHSC ˖o瞭e$M fa>A/2ɖ+bnK3 ho_Ȕdέ|?-{Nۆ([p=msNKGJUYs8CVYr9GDW'YYgtxDBi7 sW]s odxՁ{zce3n%] * q7+qH\[,Ӓ3 H2e!ڽe2 ILl3XT*ZqZ#wڨQ2_oA@Q,%L^Y%)29R[[PQU\.$I"HO/}twwxW!k{/C63$hF'㴛r;P4U%198f 0+o~X]]c+2&j4+]^>wrY?ܡLFI&S+Kku<,S]mvx 1:6c>,+4md^^|i3ױDj8={jbqEC:xF3} ffٶʯA#!bcLe .<=9e˖ I14P;lgC$SsKœ{9HH8♿niYgo`^k=-s4)s|D2/ez f&f8l4dԧ,_yfy_]; (KgرNl." ˕\v?1T >VKɉrz*MEłj=mKEdYn#iRa0 xÑ08ieB22'@UUUbaTT0 ߋ6 a`ِ0 V+Uచ;$L8^Oy?3l-w{-w{VzuU66O_'¡)(:$V!&N'@.;؇GYA XTy`O~;?TWG( l"qvL*9NZ$ q9gOf~9gE0ĉݸϫ۶" @ PTSQ]]M:%̓s"Ͻ,jo[(rιg31(;fg/dJ('̯\g?Enz˵&yQB,[u:<ξ=;ٺ̻[`pϾƳ6YT$vtA1@4mˆ 3ъvy9͵NU5#SW/OAuu&% I(`S@ϧ95ćYf1{5[fq|ߡߏ%}Ci_f?gF4^y矏oy &wNDTR|>Gʵ^itw3¥ϡ.eRN쳛X 6d'pMW3}֯: ld 2u.yD<6J4nZ+ŗk_:?/+T&GRAoBq^Y*10Eɡj(i7^3rJ"JL!Wfu-7BLLL5)w7I˦gL.'ee"xF /"o}s/|>=^$pp˭73ֹ;r57TѢSSSK__/tXhΜ3KNt]gbqοbb8_<đ̥ˮ UU%7;TukW1cU,?d {<R 8@GG+ *xq ޘ@Ɗt tpZ?gazƌ3?"=R`5()@I1Mtj=/v{ E=x[}&Vʝ&ED.%0 r `awP]+_O<' -/[9 cҸq[¹gA4Y ׋*L \GpL$feDQDrH$̕uǰX,NuuzN y f84ʈ@X,u<ѵP0NugiTUC{Ig Kg)D$Eۂ&v gt@\/_|K_ֽFh&K^hT6q.'D~ij4+dnJVUtpBN߽pk˯8wmswP /è+`ɲBqv7by찱kLRra Nu$34M%=GE[x9|x<$l6 X$7;={z+r ;6@ld va:EQE9otZk̓O+B?CƊ@QG(,7)fe1E:B6+v0 !Y=tw7]6yQ@$GX<'J&hbe8v6\Ăbμ[o{NU 0Yv@8\Vs=ˏ?~mx q%p8H$x9$uu~~vxYt1]x9 ;)tSu38s̮UMRZ@P~N djL.?Q$e$#\Ag>l6 9=$>ɑ!8-[Q0C# ~B8}.բ4M͍|w2O4rHgwbԹ8sňP8x7;cF |\py9~QrJ6`N] ٬Lm@0QDW'`;n;_;Xvo99p5W/ϼMN_ .<.G&AKK[W^#T+B\*7/q""?Ľ?F|9;n͌Ƽ>3亷\W>/ ?'9K4c=-3Ԕƞv1xry<="=ÃϞ[9眳YOe֬]!7]In(9i:OA ,"#"VtlzP,fӁCtVl(.PMט4 kYd `-P6cZ1ذb%+O}&'&$})i#L:!8'̚S_h)WAuuL:A.Zb~ .[EKCnBI,wf:qę4󜿝nT0aߡ9nhP( Hn%'"IN;GvÅŢ3… :QUr.'S8pʎo&6!1UC@:-<@mu p-/>PW%~M 2} !AU5{ٹ}+d)ۗn|4C#IM1N/]E&O6yj|mk*I|^ѱqS,Zkl޲H$36 *B\*͌3-<.HFvb%Oh;=rg̉8Ļo)ҟr|>ϳnnN-W3-1]$r G:IN$R޵htFOO/{`tuE`{A8<CyW]`JM.z^۷N9~drD io#1@U5t]&11fcebI$Cؽ92u9Mu-XRɂV*r3 c#B6 F6۴K&USIg,_˗HVѼ[1(]4g,#}8.< `L]T1ǎ;q6=#'QI$S\xFNtuRS_˲Eˑ$|;?妷\hr-243^v1ۗ_]>gᰇ)N驩)4M#Pe\QSD*"0ƙq\D1N'>Ak֬&SDWyc $h`dqH6$I#7} 9q"" εWv[Ǹ-pygs͵oA4yt!4]цF'h:B!pk}LZ !"2%BPjjyX,ΜgQdLZ)r8vv;.'WFq:l6>/% >FF LAv4GcS ];KU/k!@W_'cc|[x>Ğ=|w#gt=!dt`Gd|\>c>ܶ\@M6$Svֆt&$Iff Sq$-F+H{AH&S.TUeddYNqB>͡ضm;v줭m>W]u)?44_|~7(9FFI0:2W\P? s+_T>PAg<̐Ata&S(bF?=Yjja"FsoHL>o2  +b*HE'Z$6EKaDx0RHQ2 d6= U/?{N: ^ZU"t3DP,cQ mq^ݱ$v VkB,+$ FGhY0ꐛ]0qƣ1ka5etM}C x|.<2}}{GU56Pn`|XTP!.TP/^3dՄ95N$"ʑ 6]r N/hmNUJϽav1}೟sf  G"vɂfCM +@8F-* tJزAQ6Vs454NqNtu/|8=9>-08;x*HgTr6-bv0t4f>YYs{eⓙ># %Fl[x;sVBz>M.#=ީxr(hp?QS`jj!fVs^cI|Cw}^~|Xl\uk$$۶pjDr%lNCa^c -Khihx,I7P_2rTf衦r%3  x<;x%`l(99FFIR躎g22IF{kV/akG=u޽ L}GOs LeM`Lab"cZ>IL -K.>jjjxC\ކ|!ho_cZPۅa'˕ r͆.w+f \MӐ tWILŢ6WmMpxbv܆}FX0µ_xw 9AIG%`H&vt]7vh@rra(*lyVVLV&PV6ucƀl&K׉ZZZ:q]ARAy4ӫQmV|Z4?45V4zo&¢$S nJbpK8odEIrH'\uSY< AO #')@`A{G .XᠣWwhbl6Cl$]zVh8=gF ,6 E]25#aj 4Doo\z]w9Wj܍ԧT2ʪ+صk^~[_ԹtpU}*:99 )jUCNpPU0M#39],aXD'E5E/IK*CŎJ]7[ЈOIH}M ̓|hi5ɇMfsaI&|e+W044L4eNMME =HԆH)Ue sh|}Egb"" t*n}^ :(%̡`(K rw32# ž=;5g"ٽLLd)*F$˅im>-fJ\HjAe"455ER2cD"~6u!='!:D \.B{}tvXM]] Y@ o7ygdp=mH4: vPSS 7\G]uTF#t#ot,>;h*Rq>D@E($q{X;3OFFFT fvG?|'8>{jY73|r>O p 7UC=z'|{ߡm⦛nFɦ|gWPAn ޴(ncU5TY; c6Q9 44}=#6wfjjy! WdkIqnz Ǐc=fF@(gOXlӛvBE% l2`lYlٲ;viTI"WȰj<el|gom::477)9* cI淵]@(٘SHm:k1ΦMO31}VYɭݎ(Iģp[mWDLO_\v@(􇠘]xBIRTЩd6Y݆P HVcQ i'yt/ɉ$.wcBAlDm] syŗVb+Oo2/|C抷\TB$#?Kn&"J1:ԇ}! nj4MW? *k'k֬d ȣ`&}{CǰxC jd;?^9cZTv cl"k׮N߅fl2Hϋ7%DQrXԾaB f*Z+ĥ * *x3â2imf6Ý Gz9 Ma7]ۙilyEzN266桽}>p# ={ٸ~4]r .o#Xn.,+N @-ZHUIֶj: L)?=w憛n糟/QxBQ 8&au g_F^N6``HdWwPWSmR52$="D9[b,#L20c|_vH$gϹ$I|s_宏~fqSUTng`p_'LP pvBٳhW}U6pV;GqK4C&MmS&- 3v14\WPAoNTr\*Mk ,L/J Z[8k:N'H&`V^~UQXs&XFCc5wp`~#CQr`+yl, kjoTt@nz /c˖W8qdžݙta]e2[oY)DԬAph|?1!2ɽ??$@ 3'H.JMعUV\ƢRSSi"0C1^x~ȺSWO-rT`ͪFc!/YDu]#5˧>)&Y/ī;O27Yapp_{:M7]E瑃\wZht}{H9ƓX(\B,o`.O%Q,H ]㟶fU#>(ݝ|髟g*D|Ac i.i_Hww'rB\ou:ʝaDɴ*njjzFRP(`e^ ]XtvvM)WKdeeZ.xƇvQ]ۈf#bxhv;ĺ5 OPWS͜:B`&Gc14st2}y*h- 9xIrmnif4 zp8L+/}GsV!VdY!/%&ep`.TÅn'#2ccBuD5HX lJXvIYzYK08cCu1$aZ>MMu5ق=;xǸڷNg8>3;I =AC8~N?U% ñc]W+_RAo q71qWѦ̐?i9R={On;Fb'Dêe8v?E5mbҰhXZ˷]\.(n"XΙ{?p'YK.lN/nj y|_-8v/WÁ$ Y6g^;/&9̲eKq}5Ft:COq<6F{r2o^+${a|)9s z[Yj9<8Ǿ,YJ.+'0e>7{w_ECl@ '>3O308GK/6k }]ĜHJ&pJ6% r9ٱc7hyTGyYܾT*EuM5#$)hkk":zѡ>~XM;rp7 K8?<nj<? ``s |>uk9~ @׆7P :T ӉJ.~?ҴT2J*d˓455sHfKE  %#3M&8SdetwR*c= jBDe+ I&ܽ4ٻ5^2C8N&)^M0`yyul"_Rɴޟp˭7ޅ$q|"ZP^ĔYgٿBH(R˛w?ƂE+X4Y:صk/Lz/[ͱ#ws3\ǭ7@`s1I&/ZΜFƇ31iuu@fu>{c<3$I6ulPS\psevAN8JӼY34a.XλDMˮ^9=C2brڰ!2ljWCuu!I|G|w׫ʅyE(Z?DL&SNLřPfh~~?5^;=wbZ6l8Atix"T^@v 1v;5դRjaJU*_§Yz9k֬X,Ν{8t`?w!:$3P*km"yC砫_trrH L_EQWp"ɩNh4J8rjV^0M;Nz2Be US[[Kj,J&!&{Nd:eٲ蚆.Ts$)dY.@mAruLLFy-l~%?ߴqƳws{_˙;]inh@Oo?npihnc; W6u|jn7Q8}'xW̛3<(w!ɤ1旸VKɉ/}K}Qo*g Ǐgʕr՗qӟbĪePdS`1֬Y LQ/]U:uA]m[q(:؁ &ٿ{(瞷^ލӡSZ6oy=D.$zj6P 57o#w}K.c{yn| Yl#twp@@)!"A üv#Oʨ$R_[jzBH'=%H V?>Ob IGadY(R |XVr|>/Bߏi(yH' a&C+吤7^c":LV"gSA*9zRS9.ADr$o(W7?=܈ 55 JNdh"aJ̩(9"}}./U 09i.6&tbzDsXݤr2:G:pLq9D~MW}LM&hep`d*F}C pxV0!<6S>5}pϣ?$IFb$S"jeT * q71q,^C8n 9qks[䢳y׏rSr|G:Y q q *x!C>@`begm<]DaJ"j^e``S䮏~wdb{׾ʕ˸ַ۸]r2vDl8]N>sߡe>Do㝇IM,_|^q>L]%.VInMUl6$Qdll ݎr"tF{UTU+34dtx L$&  ,Z `#IRYGJv;h$v?br"",O dAuS aL% f{rU:}V! + (: M8ch}/Ǖ\O|/{!\s=- ȲlY,ETUPt:q9,e2i:`!˓H$q o7Qh~Y}O^x'H$.IFH6. d2H$I(ݽ/?|^{a!sŗM_ xyZٽm'NyȲj5<$OPeux )Ur9`|DL0J6A49F"6ԔI&Yzk:W fhf iſP4Si{zMaEMQD,@:)owfJ-* U}T5sI'O\.jEFD"^lmCa>H$p8EO0a$G(,Qy9v+lG6+O144Lccwp8( N:x]^Nqr>&9,T5bv\!Bydy2E:.$HdbV,]ngl|O!ZtdYfl< 篢0HFhkk+HVe)$@I4Mc$ YV#7SSi0˖64hytw}2,YX XdE1Ecl} Ƴ6݌yᅗ> @μ6zUyt0ec[|^%NSSiA  Ǿ};DYl uuu$S)䂆VE}}=hh4FMM5C1Zl6/l⬍280`;k֮AӴӺ4R2'2>6No n{6(dέ\.z{Q[WKmc=]GaUQ1"D"0 ܠ'E$Wf!k$ Yٷ5ꚑ\֗[nYn4+]>/, WmmmyyNKRU3(8(p84(;:3fVuzU\8,9&xr6{j"I+y{0ݎfC2L$&ʝ/3Huutngh5gnΓ $Cˢcs>#9 X`s XUػ$edtpo~[x\"]' q q *xsI$XQVfʵPpUc3SEID% Bd%Lxj et:90l8Iݔ` u5$)8LNajka,@oj1SXv n" 6IId,%9UX,j,uApz\a'Q0P5\jy:~,8~b8P܁Hg\~.(Xs Gkcv*53aFkfG HW;ⲋ.`ΧwI @b\,E]WgESHX&v3S.|lRqM)\. P;% } J*RM*:-eS],O266nÛUf"MuLxfo&iv|>4I*BQ°3"?zS֒Q&i$Q,)l/PTP@4[e,yP}YdKȲ(*e<¾jb8@7D$bv$'''뢥ŴO2Kov^ݹy-kj4]C+EH`lt*cӌ)L  RH||VZ/`מI&evfo#rNSϓO>Ïn6}Kٽx%_~)ʖ;yMd}v3:WKTKTPl~>cQ<OV)Jr69ɈH}c+NiŻ|2 ~g2Sf˸].B`v, CRǢ1s6 Md%ǺNu5 F>"Esj``~ymp\TEV<~f٘H)$޵j6Ѵ BxR C˺UUtLX,RRUB>GQdιχ,XDkU%B;# wa6;[)vǟK71Y3+,,+T*fbq8 JqyZʄ+3vDZ|dY7gpI&IG^̐u!z,Q9 0ռiFt1Gh4m5%MIfbqY1S05mZe< J6iz^<+^6&kc|ߥNsWtuCRC)t3|tER6\aˆn.8xAUlR]BR˯5XX'Nĕ{_.RzeM^߾."z'Yn*Xb9m׿fX2^Ƕm*hI˫Nc]qz)B\.1Fq:l+U\eZR$SwM@2Fhh1<1їOy,\B3 K9zhwv:@"l0|fj˖Q![L$X\uq-7a0ϙ-He\3=,lW]lWi5s9RW\TÏo˗J4||=~/]-2::΢M/_tȦꢿ߬v]m^zɬ^,[)V1cS*cqWjJۄOnO.@iR/R/ ^r]/ /A2v'ok_-7)~󦛱[i\e||d2ȉ k4 -x5ۖEgfP1]]4MJ"u׌4 M70zetrY:6npZ h%YtRa&k{݃S2<rbZGO!L T*ڵkXrAkҌiRvi'rVh_{C_ =`6izi'Df&HRx;MWLs}H's>-D2fd҄)qJ|7C$7AjSS,x~ǡ˒%Mвur.З.pW簝4|o%_^a)3<.>Зwv7'yqൌ}~w_Wh(ȹjޗ~GKyx V\D6[ns+4?ࡗhŸWY|=ca?ln|2pRJQDzMܛhL{A@:#ݕ# Cf8q5vdgf|>ORetT\bttGu8Ŀ7lnSv{{nWT*LLbqJLTz )MwQTM ,Ot<)=]frTie4jڭfIv%Lɔ/rYi'lBH7qowjL{uH:8v9zY-2^PțN)G]lj *3!햍]ΤQb0-Gv#MvL o= 0gYvKY%n%ȥ-bOwi9r(L+dQm3==R6 VKRm^,.l8$5&0[<6I!s5j&D[x}.3:L&&g' 8G\6;JN ۲F^"k(u)M) (j5VzŌqb- k.o`ūyǯDukGoyuoy~n?tux]LLkYBA: ny,j\<'mӍ 3R@MUJffYfRcq&hW*t-9MRijgL7\zKV@@V"AMMuY < ER؞ˌ|̘q{V<?4SD¢ͦ2;n5!l7ZLLLPjfef&SYC(Z3:m Ef gQflwēVJ 9OW==f ,tڔ.cT'6zHf^Nx@ p],2eo-WȦ,(2:2dT@cq_?]Gfْ-""2+j}YrK/"19ZetlBTXHX6ŗ]Eg2|+.d3D?,;v~޺org 7%CC+Xt?K;iu]„xe/HíN;)hL#plSz#n#nYƻ={lz ڥb\ o˭v 'OH@sV? l6x{Tnw g**UK} ,Ћ/,[U! 0a M+)Ǚ5?8{̄'udq@ 9 {q@K&R֬$OZ3Qed|3)CJԊNτZN@|e)nk`Lt|nj"d09jZ?'q|% $S9M_BAi3 -T\dʉkkqrUqȑvE6G!_Ri9S 8Ēm8]4Mn|>gbbd%)PlWfR^瑯 r?Z%\)֮]MCn9x}'du 3^=cu75o=ϣMǕj?T_2ၕ%Y;3hxef!cYL&wV\;VMxM>gѢ+/gi{Myٶ?uh6l7geYnEe!Y4MDj Q׵cHyG<֬ctd҄ uG3ORFcڋ׋i͎]j*LKeY(= vˡ"Nnlx LV }'~D}{D:?1_4$vm&[LMʬS + xt0 iUVf4qq]/^3FGH83#uLF%@*4uyf7}= -K㤲ԽiR)&M_D~ db&f +p!SSS Y<=7]099?~&kUoxBZE:qr,JSjRMb6fV2A"lilԼi3~&SdCgbU+אwce>|"̷:0I4|7#Ť ljڬ q&`%-f{ڙ[E1Y& fq9`fhQÈcAGVZ,\h b|> at0L-T2zw- Z)RN p!11:]]]]OV緒cuh0, 7c\6D/R6~< fVjWh359cZfνxA4&X:q񓦋ULz`t$,`&&r8J̪Gs\Q.#EIw0;5)EZ^ jbe-ףj5vDix YiYKxit;eU*)p=f 7՚5㛈(9 {>Y%|blL&\BZ5s%3PS j8^F,ˢ|KMP\ZT*r0fE&Pfv0]rgAV\8|C{pB n]-ڄLL[LLi, ɃXOվf[rٌ3B7v@o|E)Wįs]kne*n=`?#̪'ɸ[Q`v{]~1DejVN.[cW$[a\lYVzX$2S:ٳRD&&>N)P1ﻠ G+Q"*-uc5ZD\Du_ox `[n\F>Leʷp2U<=I^xt7 ŗ_,?-boWA+8i4$1QcZ_<~``m :Q+DLeJ~V6f9ZFuj4J5Il-Z՞#Z@Jʕ+/Rz HQq\DVlQp9-7o򸞇WRk)jɗ)drfY{?p??sݯڻXb9#q_x%a's饿РeWsJ@1 XVDk|>O25˥ZdK,],o]9SIdtwxrx@$`xAj7 FjJ#b6-\_̐8MxLmu,Ыrx%<WM7rX)$gQAt;PͪeY6I" lJ$n+e[ZMfIVGQATzt*ᦒ<{:u1:qhKg --86OքiuNy_˲fZd]̿Y憶!8N<}Җe {ZfC33Ch@v<ǖ]WXI9^l*yk;sEE6ͤ* R0ݘ+LC2([֫EfVRL+ȹ.jZ;?Zp2G%$r׾B3{^&''U"gwM,M>g"r.z׻ޥB3Bȹ-"QpQpQpQpQpQpގ-}v6lؾ ~g'l)o'lذ-^3RfMlPe D`Ky ;Qpyͭvݮem_f{ܻu߼bV)˔[ImWnŕY|+;fe LѶ a[ʛxzyyǞo*7ww_m;{y=cǽcl۵ixwlasyB<7;̳XG&'xmDDDED5_{w{ٺl^mw1G˷X|47Ά}3ydbau#mfLa=L+={T|W\ W>u{3gjq9VaHCW.sTB"""""rRpkbɜl)?9)((((((YVșf}f馛X` _l6_tTrR_DEÑÇ?MVV@:r9>ۿ*U'E\DhZZxj- CTȬ=לҲ9h/ſTWכTw3|ĹrΔsr6r}:Y/"r /Բqxgz5Ǹ!~/dm9k?OzK}{xOsSwBj| gCIBrN\Nf$"r|(jڽO2G?d`_Y:zV(6>R%XZ rnshԈX| j8P/߽7X3XX˳=qQtz˘~)<#9+HAp\NIDEuo?я Z?S3Bʄ 54Yb%RW s'ynWV~| '__kw_s'N9YwR(ybS>(4V0 O^6/lW3~t20]pk';l+1G.s+n[,>%l-SuC/JE & Zw&,/xiS8WQtڷNr׿i>ǽSv:L>Qp9 .Qݠ;[4}Gh.TWP4uO%Y}NO.a?=#u?pr&^F7S~1^~˯-p] :u?q_NIDEP+z>&\4\?Șk 9z0z2M%f?O0K/|VSįѩz܅=s=qQtN.Ez \We`n+Y(Mݳ듈iO=4f'/גr}[=%kynŜt|꼀_\}%{t;'ónsnUD'}T*UE\D8E/:G*99*QpyZܾFךsks}UXsv7>u8Iyc:=_8;ly'^!zPW19cCn[ak;xo} {ܹ&?vqOrq{ܹf;++/!n⟳{vVѭ !'Ɖl?{,:߻5WgeY|w绷F{Ag+-Ӽ̏W|g?9}2wg+ŷnz`/7{)<5nqs}רs^_߲9SEXavZp;c}߃{ͻyp|EϾϾyݢװf]_mZ6^eDq4w?{׼A!;Xf ~pvYs;yuNt}}o_Wɮ/\wmgxCmO~w|gp?xeJ};ȶ[e;kԙrfg߃evys~*QsD9wy5G\DNk<7kڿ/n|=qxe>mDwصk\y7?tUNDwUnwZne)5oF.cuK͟y+7<̳xYtNpy"Kc#p#n9w3zͭ*Zo3zֽzv)yiLGxl޸mز9pn0_*5w={I6m; 99 21= =#OPQ>yfnߴJ<Smt9tG?a~K[x۝;;G1e v;5'}x'zk%jZ;6\Q i箻ٻw c+Vo箻:Ί,U?{~}̼F/rpkbɜl)?93iv͙WW? hq3Ϟ2?#gy:3Qz\䌳pB~6mDWW $? .TEH?/zRW19-fӞ(((((((((((((((șh]4e#)OEDDDDDN{'_T)iizpЍiIENDB`Amelia/vignettes/assets/import.png0000644000176200001440000005356014335240021016777 0ustar liggesusersPNG  IHDRP=k pHYs   OiCCPPhotoshop ICC profilexڝSgTS=BKKoR RB&*! J!QEEȠQ, !{kּ> H3Q5 B.@ $pd!s#~<<+"x M0B\t8K@zB@F&S`cbP-`'{[! eDh;VEX0fK9-0IWfH  0Q){`##xFW<+*x<$9E[-qWW.(I+6aa@.y24x6_-"bbϫp@t~,/;m%h^ uf@Wp~<5j>{-]cK'Xto(hw?G%fIq^D$.Tʳ?D*A, `6B$BB dr`)B(Ͱ*`/@4Qhp.U=pa( Aa!ڈbX#!H$ ɈQ"K5H1RT UH=r9\F;2G1Q= C7F dt1r=6Ыhڏ>C03l0.B8, c˱" VcϱwE 6wB aAHXLXNH $4 7 Q'"K&b21XH,#/{C7$C2'ITFnR#,4H#dk9, +ȅ3![ b@qS(RjJ4e2AURݨT5ZBRQ4u9̓IKhhitݕNWGw Ljg(gwLӋT071oUX**| J&*/Tު UUT^S}FU3S ԖUPSSg;goT?~YYLOCQ_ cx,!k u5&|v*=9C3J3WRf?qtN (~))4L1e\kXHQG6EYAJ'\'GgSSݧ M=:.kDwn^Loy}/TmG X $ <5qo</QC]@Caaᄑ.ȽJtq]zۯ6iܟ4)Y3sCQ? 0k߬~OCOg#/c/Wװwa>>r><72Y_7ȷOo_C#dz%gA[z|!?:eAAA!h쐭!ΑiP~aa~ 'W?pX15wCsDDDޛg1O9-J5*>.j<74?.fYXXIlK9.*6nl {/]py.,:@LN8A*%w% yg"/6шC\*NH*Mz쑼5y$3,幄'L Lݛ:v m2=:1qB!Mggfvˬen/kY- BTZ(*geWf͉9+̳ې7ᒶKW-X潬j9(xoʿܔĹdff-[n ڴ VE/(ۻCɾUUMfeI?m]Nmq#׹=TR+Gw- 6 U#pDy  :v{vg/jBFS[b[O>zG499?rCd&ˮ/~јѡ򗓿m|x31^VwwO| (hSЧc3- cHRMz%u0`:o_FLIDATxVMHTQ=9cf.H"Fr*\.L "n2E]ڴqC1eM$<\Hd`①AR<lfOgFFs]}~{>.x1/*otl[Y?4L/qpW]͟(|q X]c= o^__ 3LoN|e&r\ a4͏샾OVEdr3Z-= dM_Ui9 a%Fh纁fݺwlțq?n3#V 'n,S<%ʶPjqRSƄ!SVaBrJ% aŶ$y?kV9LT<[ҭ `Ծ$(N2͟}J f1G)Guv/UIwGQ<%+jk 4O;A.k?7?22 ƴr]rRy[c‚n֗orʤ`5^f%9770Vi3VPx =:VXRt=>VZC=!ӻQ#h'I# 9S_z5&8G{f)/{]^^VU-,,D8zWWWg)kǀ"hK@ `+:& Mh@_wEl!,%d]VN^m]-9elOzb 5 De H!MrchC.&ޛf/3IkkEr/]_V/||W|xCskڱT*u׋g_os^8IfuqiCdz=> iZ "Eô$Q0L0- Tە"~$ Rm0;'؆i˹Z.y`~~JsgJD; f3@]y/÷o$o 罛yX;])c^yna˩+cF~9`AA(>P3$CA. 庾? 5ֽǗde`;##(ě;_(n$~O`:1uCӨpS ~*qLj92(AQ>R-2TU툋GZ+X:\:AނO A1RNhRGޮL$\TG(V145Eiv]ٿP`Nu}աi#ovmv(d|3 ۿ[Oh\Eed 9] zbZv-ŽH;DY{C-Pӈj.=A[aB˔MiɮH6mRafg_fn^ο|o1mۺk]Gcypyopx~>ߘUO_l?K_zb"Pں W[$f}*~HD | RQh:v >t_0N SQ3NOϠ*-1: ]EHfluwCgSQ}ݓ9'R-YaYXY{vVxf099_0G*dY(aK4.@rN)7I$7n73=|ʽ6ӸW/pm`oF|puCV ~j'pgJ%.zq*4 z1BSE0BUi P.M% (@0AQR)2 VU jվ?V,nj>#tMPw=.Y类pgMtKЖjvqtB61m' f2"]RF udr&R!BujEgҧ !dOk8_h+yVضZ~R%Irʶ%LHN%(%&,#PӑrQ>epWN}*riuFB⢕ y]"6R-ʥ Wl^vU)7o_C|}qcnqZ(xZsvvv{_Μ61aϡ|d(3BiM;#|>%[M2=|6ac֬1OJX5![J.;DɳxV?g<#Ua}SJK6ՒjYi ٶE] % &Ni ʥ^w-% .m*@?+.;}֡S*qUnii)H]lW}vF◑!]tVPzLHKC["i)B$fDP [BRY[v+wkm XQ"}QAINԎ/サ||w /SfE& sΚ_nHc1?t!`bN>oM l٩޽X<_w>͆ǚWށ/ \ަϝ>L,;4X,W4>5=q뉮ϙ̈́w{_6 A^OݽC> ƶWv6^Q_@/,+LUPt:v溺mWFmR$лݏ\;ǹ %q\&~O}qWƯ_;˱=mǚǓexr)p& \ª5 j[w7GGpe 5X,uvγuv uvU&mbأmk .g&ھe0`7m]_(wX Ķ4lO"]gڻF,p7<hkk5 ܱ+7זU8Nj~?4PxW4j3Y5TTy]m=%ʓ_g8&,ucK<}ɧ{qn/!RDBQu /_n]o>LN~և?xC_rFwTޝ?$^H?aÆq7oTp8^;?OR֝\i8Bu?^mpmT,P~EzZ27]Y8Ѽ @O܉oa.-iR,DKWʙ@<=_ZJD~D,4s<>ClPt[\GϝxV.;', v+] 3f*cA`1ny7.1F[@a *uz+Ƃ, ]a00=-W;;J޽uڻs*DE06,pY_QW,6&:2Hr wYUΖ\"ɃJ7/Iô-CBP)%D"Lɿ-]m)O+j=QBB=TAʰFL7Ś|J H2N3,':T%ҩY£E!T} ۔4BqvZhVV\PJr"O$^!4\OrT%K+(FO8aJW8Ɠa2 _QlL{uy (ġЪ®_gB_!KPbP$B5;¾|_aFѣGׯ_/YA ,YX=obD4 w4- ctYGvE&'XJ0caB7|J/h܇SVP!oa#(E՘,vE#Uu~OaX$@[Q/mޮZBL[DX[SWi9_(S{;4:ʬl^'fxۺ8 OoQ>C/,/apOcqч=Bm=%|RDx PB, U!Į86Pr R⍢HPxbg;;PCqF"s[^W5PJnzevJz ZYc_IT @-;W_/DL_1gYx| z͇l˗/7AU.mZ[/VAUhmigaXq:`(= ,!6t qɉQ:deػ&;yA u:PV4U'eZ|4DUJY`U[Q4JYufR##Ҁ`%0hDVU@J8r)wvHlc'~{w}s}Cq)ppgٸ8ƞv́g.MXG-³OoތW8+-;%(, =6IenBd馂f\iXV~Ojx&/ M*@ە\2˅/)dl)ADw?1Xf֩:PH"Kl?$F~R1v9woM!g9S<Bnݺj*m`/\L*6G|r> dwk5u=TޔdɤH8{i#ww\9f!,ASK2<VY1dɲw #u&"2J9I! #{^g+2{”(zIƇ`6%^L4"og]*i]f*HjȃG*z}$bʔ) r4VfKQɤq?38ΠBH&J™4mEH R$YQ k* u}PRA̶XXM1#X(Ǻ6"Y̖&UD)U$-$Gei̩NU(T4Sp{e&nhȢ\TiVEUUpQ42JV\`t6>yڄ-^5z$8UUz TAr#gÚ+j}iHU3ɱ3 Dy@jV!aUH(jvzUU uQaDE䤈hT w@4@Q(un-A(T57#٤qNf;}ɌHFii3-i&)O(ҌLZ1L2} t~^ll=i=nT8 SA, YREEcE\pL&a`+ԵnnJ['SS^Ϫ.N5yZVײn@y:WKPaD&'D~NC/$Rގ^pPLŋtOcӧi=FdXE ([!qc֔D]%P8Ri;<.@pzKeacSYt((|RpʒbZJHN`Eɵ_7wC₝bTe/S m =#SQfx'6II)Caf^y|8(]V'jM) *4VevdzVp)zIqu9bPٿ+oX*%Vn_d@Z[[OM)~lur gFnU:BBB,Vbri%l(iT+M9Bԅ,S1|nM׃"uw@#ޅvFÞ"G;;P@$gk1ڍY"H%14?.}5p;6"ȶ%kĂ;% н;q-YZXy ^]8yr5fc\ՆX z4m+VqasKQP]tս; I @*lHLkGU 4=Fa'0VމU)Vh\TҨ,#Hӧ0Q )_1&d7r?g읹?f|/YaDH1$*tH5_T cz]lmou[]̿ ,cA,տ;T̛|?,Â>xk) s T"$ܯ`?zJHJlx킙#oߴ`J~vL!>ižT"OV?&{+s2^/[|2~4{'^͝ƃ׽yƃҮP]HR"]u-x翶nx҃:[)93_o6}h,YP\m>|y3,9V) 3gHkD3$j{)GXsn|׿Ν]ρ]_Oɽ$vMw>>dO4~/0sn +W"|)x!jΙ]Ť͛7{m݀&Otm0`9wGow X@{/A;Ar\ĉ'}*\qV<]*8 #_궎n力\{.n5gQ=Ij:Tmȍ<^θaKMnV}u9 @7~7vNxdOu\|>;…?ǩ@4RgN~:/ڗg5_6ּ. ?f_gt}ʺݬ@$ BȘ1c3k?;idegfd_/ C{(E|$on{e?0m[w||PJmm//RJi;#y5>7nmt};)9 yͫ>'^77E{Uar0 bp5jF` FO[.13B$ܱuw"ҁ8]DdlD {%$s(<'Q >9s^A$JMY5)({%%KJ I$jry}[7e>Ԛeff%ھdJc^Zaz瘬,J)!D C78$Ir_rN IoU7>KD/=noqY~.EGcl=I<>=no4:[FNy9?(l4`JWO}˾?`|ÿws/~wf=~0sۧ[Ue;sJ''9 iXy@??n&7o0sxG 9n޵?~0 T1`v c@yy`%-bZk-*هs׮]ӧO;vl~̙_<}oG[k;x;ɏ#߭oun4iݜs2ٞ7i]sH&'N)_ޛm96mW La_&O`߽Qp 0mvjlr)ه7NMy$ BΌԇ43"{)Gq*ރÙ뚽rk4AwwW18eG2:t$=B{s'>g7wؕ 1HJ<ۺ0w5~NPo*񄊘H9[[N$ `գ~q,Nh?ۃ)KJMy6M3O]y?[cσRw=WWZBXCnTx=))off%#Q1[t~m6?PY+ #y?/he]gơcW\7j'tݍC蟳}UV͚5+///pʧz2q+Wm|ڞXk=<gяE_,;鎆wlYT]IUݱKp AQ)F6n(òǹA0{( 8K`x@P %8qĹs à@DHvyQsqlxBW0п")[wcB4$\olhҡ%@M}yOasl$}-̟ۺ\AWkzo%-/}c./zg޼yJOFdma=ィ(C')bd ~ ӌN.s~n]87gՕ!H1(%$pvxZ1XcwjVʌCkY~\Sur} T/9ouɃŔpL*E;r W@ܟ'| 6ݥmarEʖ'B`l"*AȀ&+zxweX4Uhꦱmwje6VcQɴ[-ĢMBW5 ME׆ SmK}\<>a]5uQ>JSf 'MxStӏjZQj̑ aaIb#YpxJ+6)v@/miЀU< Ai>hɻí-t+gVȖz6ݔjz=bjג5$?cѦ:(|П&9b ?L9ApeR羡 5<gKOX7Vuf͞:ujư_*f4 Ug'?4@75M)!q&LpEX%@Zs.B)EHmJmOSw2}t^>Mb?7f:#Ѧ|]L_SZ8C=sU@KO1GAHL' Rx|M2S4C9,4z,>69^sMAH}h_?Kmd}xqצ8cJ Lޗ~=rq708m:ηB ?ل-4 _7t;Y P:LYsa u-u}yaU&wus^%禜HFs@\t׆RoE #EYՁ@CDI s>K>#4{VVx2:Y4ŋP$5`*|"uŠaGQB0h}џhBVV K%0pb:cs]W:8/Q!^BqrŪy cepsWLFuG cc%m17 (8g֣ϖZ]W&udM ?I3i8? )"eBFBao~c&\"8媔9b)+3ᒬJI!!Jsn8?9%atX9[Cs Uf)zx z"wp_JyJ@ck0桉A|Gx >K=amW>譏mH/p^#u-u`2ੑJ-ֆIxO^h+%o8 (($qIawĺU@pN@@pN`!G͗  3o V99=8ydgggGG c?ǘ h?a" .0j[kj_]X=tU4UGZA9O(jˤ`b`̠$T2Bi$s2 eQՔ:@' ι=sF wkt%yb٬r,i;W99tE'lMvڵӮ%&7n\ff&!D!0yӧE?\ZZ:JLy_pvùqƍ;vҷ.*((%& A6 LSJ@ wš BfX h[ز6Hٓm $̹k cҩfa-5 #ĭ`r1 c\ap9Zm+p.oжbPk%oFZ`cs,i apSΥsɥ; akWBE@84mRg?I~9!# ?{D̤-ݴBӖ.uW+tT,!,Rt-}v}(e)YAb&Z޲6Rp-R5\2L.m4><}sΜۜ圙lAdJЄ -EO t蹄 -O?Y\Vf_=v {Ox {qxs,wFزg@7Tn6<ɤwݧȥl`#/sٍ#nɀMugx_'x x% }o Ֆi^ ֹxʛ& +Q2`$WSNo+aatł%طq<_\_8oZ<Ѵi|){X1 _ |-w7OkZ"A8QY|T΄E׼s?=?>RI1Qj?mcV2)8+o7! M!>L` 4~f $$s=A9^wTV@Dc:=+Nsx 6 rtn{wӝGW'TV:+Qtd/z$ה[:K1OϭQ&t^l^m T]+R`6nM7ޓZUqN6{үnzws؃:/.~tүOh[^= ynŏαO~rt^l}>uU>6f׊"ۺ?ehX:&~/+<|vH-R?MBtWm.Ibz9k߅\+*xlOw$HB#NS{ϼI$EAPaKYSXsԩoϟeߥa&vD|qڻUOglw" qf`Trn~渍~s!A[`J%.%[,KYZO6,Եmq[UMd)FRRԘ[H#+,Y*p[: F@P nG ?Lg )H ّޒZ=,K $YO_x3$8U}" ^Aq7xĩZNjjDrSFoy? Wm<U (2n)ٽ#f0ʙQe;4xvvWYo<^)Ad 7ݬֱϣssꆑ#7vuweϬ},[O|%bEy{NGW_s I$2HfAI2qxVD6͋Ǹ|]sH#G*oʘ8; KOz!l/:"op QW:="6BAQEQA 1Kh=ZRĠCp'VRk<Q4Z=Ʒ>KT0\hll[[[{zz=vSM\THnQ;JgOa=W=~ӭݚy{_^9ӻmc~LGs ˨ˣY+Xa#m6v4MCPj1hLtidдI#Q>+;_?Tm]i;:btǛ+UluGں=`J3;] Wrƭ:̸h<.egt[+'93Oe㗮{nkwؑ!3tNI +kJr5V(L>ZU:+cj ]mh5*3,T.nU/j4͘N~'މ\.bwtfdfd{vu hP.җ3zaNq?۸#gz:p+q$?$/G{]9 '~tl;\{kBٞr{z]펩U)/׏fܶ/O7:smii!vX:;ZH1moDvW@H!Wxrw(l20," ̨1bngܧusZi\׻:7;mxo{$'СC}?Y:i})شi8}&l_0ީ ,,(gHٯ.[[[GGtEAg1dP+9305Yz#f{Iެt<=QY+jDAw30wt-t_elPkq|*Kf*ѓRQ٥\_P?$߂;k{ѮaEH c4FH=˞xJƪ6^-t oC7d@.K ~m5:KYU`l ;@˩c/.w,#=o$#ZFNN)טhL1Sl~G\kdܽg*V_YHU]Tš"f3kjL?*3Ga8kctAZPw̽tIX sOܒ%"΄j3ٳ<> hA.T!I|Tl\K*u{uyI6XjҊ%E<;5;ǹ<9i++}ӿy.DT*<d3򫀱.pYTkJ A`ɫN\m(֕ep13i+-U)6HCC8| `;mM.4q"l ?2GzJM=@>!\s'|MA\_K6Ց=Vx۵`8 O_Wla [ZZ(R, 0CC !Iවtuu Ks0`z) @$U҅sI@ՆJD{wky7/޴\B6]܎î#P,ˇ0ӽǏGQTh =쐯CDA(6,!(jdP-KvAycwvttvSn s~y-i zNS=UC~t籾dHpB/2 EEnn__Q,@9=,Y`Ӕ 0H]+x~k5k khbP!At8X7w2.O^:Ld]W[d0A`])7uhkZCu.qW]c!TWZ0R h0e?z>1簎p~|EZM$uVsEuf/I K SD۫>JKcůER.X;| k\ս}v~+Z^GlaK=AzºI4Kig+/-t~Jig-)MЫ>` ?;Ik7)pbuaJT#ARWw-Cw{~D/`Fu0k$|.D&Q- /O~fd@k$xCMqv'A0.--!!!66(EXq~[mXds̈RDG+>,:vxLY:A~ʕ֖nHlAdZ:AI Gg0Յ6ŷQ'&qb_+ -?HhF{P |ˑ:é/D4GK83QQ]*3(_B]-ŠflwXs_MDoZ[6稡H]jf/:KɯkIsb79FݽϞ=RRSSU*9^?9TWG!#F_uU̷$!<ޏȕ5!@FA }5*66WUPF W]MF uDEE1#smjREF$͂ eR )p?A$E(b8j2:-"Q6 hAy!N`%DA{NǻOjAo2*+X`5 $EvЙ-T Z:QN ЂĨ1b& fCEn " KHL0ͣR 8{5T]Yd]f^Y99i)i32Ki5e5fRw9SlEoC.dְ{3ju$KnD?!.(t2AEPH%o5Ilt>@ HXtCuԨQ:hK 8{EAK A$dYS!YQjT꡴oH(br._|ҥ/ƶ0c:?J#Y*}E]m]fy,T6t Z~2`s%KYR&XmebZm(֕-jI8HM933J㇇50$,5@ć8s" ^ m^%X̲4th)59U.BKVFZ? rj&HUX,+J:"GBe2={VRTX|#lZ&ۆ'.3\pBr.̓ ptn:::ϝ;YQ7;Grrr||ȑ# A -\sM||nw8u3fVA( BAQAhȲt$I$3lȑj -xiCn|~d (Ɇׯ>Г rKWi)z9piHɞuړniB+]8+ jR\VJAL^{A-LA.slnOCStnzA0oPA mi E+ H4{PIENDB`Amelia/vignettes/assets/main.png0000644000176200001440000016177614335240021016422 0ustar liggesusersPNG  IHDR.,c ͂ pHYs   OiCCPPhotoshop ICC profilexڝSgTS=BKKoR RB&*! J!QEEȠQ, !{kּ> H3Q5 B.@ $pd!s#~<<+"x M0B\t8K@zB@F&S`cbP-`'{[! eDh;VEX0fK9-0IWfH  0Q){`##xFW<+*x<$9E[-qWW.(I+6aa@.y24x6_-"bbϫp@t~,/;m%h^ uf@Wp~<5j>{-]cK'Xto(hw?G%fIq^D$.Tʳ?D*A, `6B$BB dr`)B(Ͱ*`/@4Qhp.U=pa( Aa!ڈbX#!H$ ɈQ"K5H1RT UH=r9\F;2G1Q= C7F dt1r=6Ыhڏ>C03l0.B8, c˱" VcϱwE 6wB aAHXLXNH $4 7 Q'"K&b21XH,#/{C7$C2'ITFnR#,4H#dk9, +ȅ3![ b@qS(RjJ4e2AURݨT5ZBRQ4u9̓IKhhitݕNWGw Ljg(gwLӋT071oUX**| J&*/Tު UUT^S}FU3S ԖUPSSg;goT?~YYLOCQ_ cx,!k u5&|v*=9C3J3WRf?qtN (~))4L1e\kXHQG6EYAJ'\'GgSSݧ M=:.kDwn^Loy}/TmG X $ <5qo</QC]@Caaᄑ.ȽJtq]zۯ6iܟ4)Y3sCQ? 0k߬~OCOg#/c/Wװwa>>r><72Y_7ȷOo_C#dz%gA[z|!?:eAAA!h쐭!ΑiP~aa~ 'W?pX15wCsDDDޛg1O9-J5*>.j<74?.fYXXIlK9.*6nl {/]py.,:@LN8A*%w% yg"/6шC\*NH*Mz쑼5y$3,幄'L Lݛ:v m2=:1qB!Mggfvˬen/kY- BTZ(*geWf͉9+̳ې7ᒶKW-X潬j9(xoʿܔĹdff-[n ڴ VE/(ۻCɾUUMfeI?m]Nmq#׹=TR+Gw- 6 U#pDy  :v{vg/jBFS[b[O>zG499?rCd&ˮ/~јѡ򗓿m|x31^VwwO| (hSЧc3- cHRMz%u0`:o_F)IDATx]]LW.R@DcTkcZaҮO]kT^*M76Qb#&P36-+bR$6̺ņZ.[bcLD>uK&=sνs=3p!SJ}`u3O ŮJp:n=Kp[| --># `q.V_=BA4l-F]JĴf{XE5߿}F!7gz14JWT\<}|3%%%=]L;tGL!l*#~5;b'U6NPdׄ٢EiK_pL?Z|rE '8 KUS[h9:.eSL:&Bdɒe@!Gc&&TG%1j&LL(߉ `b \>q ev56 eDOA%1mF\O~B[BڧscQ}O#MIysâu/α֓T.'&3{(m<<Mi 1Ɛy,Z ƵW`<ϣɛ݃t <ϣlN(=ʙyϗͫ ؚ@pݒXnR@Hfx)@ūө1[_O;:8a}aSYvV$+lA&oJS =V7X; V)z<J;TI,SZ0䟅?h 0RbS0b1 F #a`0b1F #a033k1z$/迏`ty*#0 0 ѴhYp!Ϳ2) CA97p>UkNP.BHC(5|[mMpxto8jr}mQ[s@ |U[g9l`f9 >8rQCG*g);ª2^ǬEN8N:ԦmY 0 l'`sK[o`j/,,D*TWWgsQ}e?dGfsIu؇SJ?Dugp:08wciXf ߣᅦl(`MbpB!ɇg;;$Tqc]P#8-C9T"ܿ2[bETbُb7lJ ]goleǿOזNON@m2ȆW" NQIu,f!16; %FE@ (cs^e)|qڻno{wnr ?sL|1 D۔q= ذaǿX(RUw7VJn_  _}Z?لPx ۛPPQoPq;`{O}XSRnwPP:{o`y]އ(t/sAIA8,P;ǁ.8 +6xwvq=Pgm >Ld!fzi83hjMN^Kzq7rwc<Ճz=7~H|RMG  *ĭag=2y)ف?r>Αhښ|^+Uߓsm(x"? ե16|?3 1G}$ I/J q7Pqq^;{c9 QT>d&7ѓdJ4好OnOf-IFXccI iF7MD"l~RIDc.Ic=&(g ZW ᕳbOMȓ}ˣT_9[i$_Q]P|xxDAQ c))6GokS D?)0_{_9#D?5 /fDtRdĝ;iK6{tܼ%<5 f3N\C&Nt*r^^\pٜxWOjn,| ,5|P#{H ;Njnt v;5PSgF_:Wہ"&Ur\T?S&8 ӊ8?JEN.jث@%+~_>b&v,GSYJˁlP!s1pAEI?~Č?R䟭_M[;֟?`Ɠz' i/~3I];x{F%5;2"eLVk$e1ɿ8XF,/%$HHHzH?˒?|I 3=G vp(N%w" ԧSD"| ^C|;Ŭxzss ۝TO@&OJ襚?@&ɀMFޕI|pj&Tpp;ѸuNثipO^LQR2DƐ1$2!1$2!1d !1d !cHd̔Cydyf2FUdTc+.*1'<!ls丫%C$N@b!y>x_&cۀPxWen֪Qߘ#>c8.\<(W@jQHUZ&88e*jrIpr1`_l7/#(T^/Z[[֯_Oec|4urn7n÷@Qe5;/7:80^FMPpD, & t,i~\+GHuq`hrq+Fgz*$o<>׋K& 2r:Dհu{lzgA.)HB\aJHKZ6&5-M!,EbrH*M<l%DĞ Bna?fvv޵a'9{93gޙݻ7~P/<7rS{㬊S&]_Y7 j֕_% c94]:1]'p5ܵz7j9g]_Er]h _;~){֬VZ=_LXΧo<5T+dlA(v̙অ۳uBWK&6xs-<B74ma>=p7 hEYӂ˹H G-:g(nrE:!8%`ϮCџ+]w >7fVϷv G  y\n3ʸvc=li={Fc/.N\^;0Wϑh%`*K|9j78ka w.yqb{oLu=Ob)Y4+$ [%?+u_ix λKh_Qdݟʙ;Rf5:$dcK( R3jDOڊV~M]f4ǪnXee 6]&uO ojZ]hh.gFjQӲ/tUCW'ɚֶ5I]W5Zfز em [ֶemKPI,>OZw=HiAF=L)`L"Yb9Qf[k_Y"ÌvW0CY~#PΗR|+wPG Kd'b{+uUWVA+ ܷ%xP>pm/;eP7Y}rYͿ >H,nܪS ,n*-=m:`$G&ϳ>V\YE]GڱE KvPkRGFF'.1.f_u΃L>B}w#_;϶A x{l3%c&ɤ=}OpQUUE"1g9Z0o~4 ?6=l{j- 3MMma~C -0PD:A] \k=H53i3Cu)Xg0^'aQȤ׽_]mT\alPdJџp-̼yij ޼{KYGjb3fbB,2m&sy0㭉UZQhMߨh.DInjhVPH/3M P=0oa]լᭋҜ}φǎGx#zSǨin`NF iaAr~?}xսǿgHH z{BV[lYa"G/F.p*n%Z_ڦWL\G *V <& aM?eg-yٙ9g~73a&MJw:nr A[MG8 :'9%$?Q7'W ?0^r۫3ois| ;p(OMr]XR+˃w [.(Ój4os| 꾮OŠ^K?{{'/᥅QCx`*p`M 3It9`! DyO,}Ьt/EIz .@$$KIl_Pi)6wǗ D/Kta؂} [L'>N7[6 H"HJ2|>Rry>~OwÒ.vz0F{[< !"0XW6Cc?x}s/vI?ݏ/wy/IxsM>g&#JX'/Gx;JmV&GSC-)!0 H\=_1*}ڛ>pZjuߜ`Jh~in\{w.Kysihҏq_*aܞWgq~pce:ڽxRB,om7|DWN+6yVGgA12OR-@AE!l1D9^(߇-m*wU{IކB]Y\8f_-aVvJ_U.wy< .D/,H8\Wve`VkTˤ eJ؀c8f M0ʢLPf9 &'n"mF,-B"ݖ%*YFQIVYKm.U7~ķY^h,'r.7?{Ba8y.mA0C2 :%eWOhM{Ϛ c,gKrl@{{<ޠ؜`ӺZw`Ouc-DzOW=u|c&b+ sۡ/RKE'c{drr2R0ʏ4p}#%Smݷ~k_vHÅ=K)) {"}?ӈg'1pŢ%'1 4-8} 22+R__O7D$##.yP_؁a:Luב"X%݋泷k=]%=IhǜhnR );r(v-}bK\-[Ul)JE,Ǥ h: qb4hwh4nPV͖CKz1?s3%VbF N'k=.^X|-b}Q,>{$s;;D!---l%bD_K@5OҒ6˚~/>`2z]{ k/PS f-PfU[Nd?q&j1+ʬ 5' @مtȒR45:o+""y0oJdxTO,TcjzxXhe|S. -f7/3$KXΞ=K^!$iiiicwJO-)*|p\0joZewhNhH>5Ro7Z02I'K<7I?}K?Y_rhlW)9j8=| arA>WIX_wo˲ɳ=$*&Yfe` E`sU"rExKO1Bu򛲰[ ҘD)r])K&Sf'--W2x UVw܆uiB>EJEBk/BZ֎qWEǯ-p#|~Zڇ/|1%ۊu92>1%]ŀ ɧ7t򋋬&ijrjpr'aɁhl7屔Ґ˝dז XbтF;|Q%m"ZKK]V, V,j1n'0m}}tA'}?yoQ.GnƦ1t8<^&viˣ/2F'k|'DTO y&x"aC}o.`ؗ),mW:;n8vu$p&`'uw4(yv}o_39M_MTYw> >S$8O_^BFƻw8[<_I;Ve}Ѣ[;UF:> EsѵhƏ;qVQ=&@?KFLAm //%Xj|]4 qFc 4t|gPCۮǟYc*KMM JC5Eɧݗ! ">|穏xi}PXL eʒɂn|(\1ћnèO]1D_b+bhy3ɅgWR#d7Z} FQ}B1~YȻ>W~/C73}ٚv,fy:2+7.dGMʓe~˱->ʏ4w7pn7KunTs|&r9ǼRUyF[)צ9lXP 9WZ7je9onng~'ê&n7/?/Joú_ ~%+$!d|PTTwW]{V<ͪ`n{'zΠ自{*o7E?D#ˤеY`@@ĂFHU0aټ4.+7.~C.!QVn B!$1'EɉŷvLr {u! /((9uZ?z}A:2}-'&GzPky fPSϾ[#.#=D0iz;?ЂOϢ!>=`4]׊CGz$ܳ9̬syz;.2iI͜-Y=n D8Rz]{`4=a‡1`6֨o+(RbSWq)S/Aa@qI +zc1dkRӇR`˝h24>Y"AYi8cXttmь9cP5Xn*ݝQuX*=ẇEr1s4e9pe%azyX24k1?6,zZI0W,9WUǔŗ2RA!XH \'Sڋ%B!iQqQ ӛ|R1O-BIv[~@C!d\/ٰ-=֏\~/3S<)gZy/D\Xzw;}#ΝYߔ0__ǀg_\4Ґ/z pWOp~U^?jeiꛌ/| o,<4->@(`A?=?<39KqD]Jp@+[a]=#qQIF\G{˗nw㛺ݝWx'=AGG ~ .VlxF <x"!5`p$ ^KRxExEI/gfH4r"ءH9 `Ǖ0{A 0ϙ~\:q2}8 |A:ӏ??_{ȵZqo¨ȴG?7;kp'P0K }-+#Ÿ(EQw$DPn=_beOXǣH˿f!!EKtAFF4 4[Uϥ 4˩LI0i4h4r|aUܤgUt59Y Ss@HݪG'w9҅)F~MZ>i%F;k2&]۱vZlJ:nbIF`\ )5@}lLݺA~`h0Ipw*U ;)Y!b [8N#IQNeJbItkp:#,N1T&G ǩT;\N}+0I58tx׿ދ^5q)5|a5`br~(>FYQF\&V,32nX9Lwlxu!Aҙ ̹6> xfCۻA:vA9]Xiك=daMr]g&dɎh\d[RT$" [hy}=4 |= s.~KN/5Wj, 4u["lS"Kjn˝͊5_L@K h4|H^d$`,M1UurQkpNͬP l_ Fy1IPsto|lhu /TukfiW/$92$ Ge*4K88RPҋV rR˽tZuc :N9`hИ^QXU;ߎOB{ e{u3]Qf7??s_ǿ-a$>V|k׮ŧW<`ZlçكmIq > 8 }B!ѭH("Z!2"DP)BFҖHƥxKEEŘp__U!:Oc=uFKgeeeĉrAPV X"Dٸo1qș?CK~8ͨ\e+rAT]`3vܷˏyc4^)oxW;F?#ʓZs6I_gaS`3ĆMS|0)'9=،Ot^0ʸ{qGo1wn3v}C@w+^xwݏve&W&: zTOU{rJXFu&l~̝nRj(u6DRh$H6"15Fzmؖ b]&| 18Xɓ5;l]}gtG\~$}qe\rr=Ψ|,:` bHLN<\|y ZB!Y7.`\6f|Oǟ|K7pi VmVӬVhf8auKGIHmC5,hbmי<|?_z3h,SN?0/'VCdrZϲ?mv[ nc]6Ο ܯlV GZbU^D24!ը`5܈fV`k3$GYfO37v^l ύ3܁KZ5lј<8Zl챡\9/3hv[QiŅq[ xc˩(R"nns" "F%lյڇq!dNyy9  qT8w΋pO8[]z/Lpx4QLN!0>oqJΟ~t<'"|: t=W,`qIC a4..!)H\r="h D :ŹT:N"Q貄Q/]ɞ%en\1u9]v/6ûE[t!"q (`:uEי*-ͭ"t:#,ӭb]"LڍE-Hݶ:HhX8(ei-ұpYP#EbGNH~&K4lI8}[Vi+iY!$|l1B!clBNZZ3ǽYkY+] !}%RV_avuNXH_1V(/<2hZ)}N8#~K5c6_B:Y7.FAY{KDv["{ D aTl/[(齁E_t>.;`W9wƥ':"=i7{˧ƍ>:D0m@"cZ&֕NAXބv۬Ƶ*Z^e ?yX)wڇ v,MXYt"lmezFAT(0-Bm#7j$yx7 :XSAX1Ǚ$:O'R]iVxB!|q>!ub`ja Z!㇨`NǻVw*Sxt'O<;zrBr&wTu`SOZiD 3:KVlj>7pbV<{ Ghk&D57;6Xyz+nWUɼS9ώQ jT  EQY*OfI! )m6!'t,q}B!G˥z`cuG=c6RZ{k lT/"6?̎1쨋^XOy=,T:*z8Jǎh.i0kW㵳'5.7,?(R^,WEm{6Mһk;`tQ˧>ZXdhkpJ꿖TߑjEXMQϥCOY粼Xr -prU!x\!l1'ur⡯ V ){#NHpVtm5 {g'BJ-t/c`PI-"H>8ݨ!WeLky4 1f魖 x<`yIpV8jlG- ] WY5nJmY@^G\PғZnǃa4Ȋ#B!ƅBHR v؁Ç# #\".S2.F!d|000B eǸ={  $(0a"DQdBH BHϋ"lb@7o7ߌ믿>>}@H!g\B(-+ɓU7n܈[o_=&LQE_=ÝKF.gyޱKB p䢹b\qLJd:H|i\M|2L~ KB TTީ GJ%YBeWKO2!㚌b× lڃ]ϤIX`ud9vpTp=2_Z5lR~Uq!tטLZp{ߏe˰ 0~]Z(] \~e|ĝJ7!y7. Y^`ͺ;ol1I>TLjP u&G USBe\z=zj"/G^@xȗ[lpހމ}ĉ+~4ayw4qyh4)L՟_>޻VmUJ&24:K݆)S00u*}T9;q2&{¼$0*}cĕHoue }2RzuEsZ^6`wk |ex?bF@~ywQB!$qbBNqV!Es#5kְp+2φL;w̪L}}},d|\Dg2Im*dIsQJ.ոȈRUx"tdoӪ㜔e9h{#MH%DsNٍoFn,K/Ka+tƭW$O-U㞐 .Ө24GS|cI0w*րJbG B4kt'hs*S:}bhqx͚5X~=KnPQQ12φL;wwܑ5P^^r0ihmg$9m7=ɧ"q!2FKjBH֠ !}|ս?^V`j$ loQcY-RT ۮ 7RR,R WoXXI+rA`]DQay&3;;ly^ٙ3s=g眙DQDssX\mBNSWcp%t !dՍ`{B!I0i r:>v!<{D(.BH[ A.։|S۳׃-%~oMWS~/sZk\v,,}xݬL{:>ߕwA+.ǩb  ?䯫&6:rN<&~Tv~]ֈ^$}+2]O>4 {2z͚F9&^ /}ߧU1jj Dw[;D~}Gev@PhMOW'u? ,E|4Cxח7b^  kph<)Ai?@JwP? xBv~.k?s~b^ HPBr~Gcvwގv;Ϫ1w~x=薷q>m%K$ O_য]'f^;p-^Mv@-KkANǤi6/^L~up݅ބN'wه?⯞5pϮ!dpq}Z&F;T_ӈ}YrxƗQa_i}{D 9oQ\zg.KG,⣒lA/`k9rܿ^@!GS}_ ~ xzc}p`ˏUs\,l"wuP!d0dP yu|AD4CE:Ө^2t:9ǹ/ )/zOƆ?̪eOr򳘜s#\GںaO5_ڿG.Kg|@!) /}Tuq۬;-8~iӱ©uϟ{>rt:"_qgxmG`O!c.~~r:`0w9̇oo;BtB/BF0oBI.^ GGC8BH?q<\݋./D!d@ߋ?'P#X)Ƽ8oLjWbu9'WsS|\Tk\QO-4Ox0JDF;Bqikc{_~{O]$ѽ .B}(lW~!aɩ[8CFSHGoԾ#̡z/t:]#`;&O7@K:|~UUq~U\'uOeq@7'yt:>i˹%x~?*⸁<?J:lۄ{ǎ:Aa{( w:)竕&G׏„Qgo "?3Ȏ z{/`fO @ 7 xrsb[`ueo;!otB!=Ɔ- ` ruJ&n cs ؽ#72q@ûmE_*`~'||:!zyKM&G,~ˇ{.ٯOFeb:z^є!;q5w{yQ{}LJ'M '=cs{_`lkNBo/ ^ZLyA隓6.qeל>^:~;<=?] P)oe1ƾ OiN'в7r l'_6O.:,;GŸ3=BQH[pyY0\kS!~J\Tۆ$B!sBI:9}@!$ ($TB!IVd͏O 'Nw8zPl]~~6;v?:8^#FǑJz/mNZx>=z|';$Mֻ^sY{=g7O .{?ٍX4$ط/ a.7]1X[cd$=ރW)_~.xϧd}(g' :054H/@;ֻշ~w޽g RU_eʬO~t+4pN)NaC }y:9"&\bhK}hy{dTx;"&upm7$,_/]m/n3|wee<񣫻ǻe"D^y"kJ2ø6d uĩShkDgS8zTV1᷵U8tO-Cǐ?JW݅/>Vt*˿o- zmEwHlȠB'RttXXn-Sp'Oy ';8zFR``WtgU? o[ZݕqΏ`f}X~h],Q\n,|t [#2(F.-ICbE#n]E7scrr3JXgK5DؤeMN[ܐ6U>iZRlZi(FC&h(6Pܐ4۪*TUU%)]x @\Z"0ԌCQi}>|)wUDmp[l)^_;;S<}SDSBb0̔rQ25剻'e$^q{ K.ҥKQ_.b>OBD'Xւ^tz)R{F!!aSΦt -6CURPX hA=̊87^@ۦ1 ȣX!oSo0Mk- fyԌUU.[ bC 墵-v99X&;fD]"#'mH`٪c/z ^UQhb}o{cH#%}[,%: 3^b-X[,{>[ Z!LIYQ< !;>0E>fk^ -cHSAǍ<.]6T W{S 6+lVag./ >stqxcEL/Pn<¡¥1UUipUU %9F.6kϱDɆZltP_sYJm3.} !d6 B!$#!d.v ϡrzA>[m>|8t@޼סWs_H]Z! pRT<B egY(9q%! }v6O>$zzzĕrd .Z TVVשsK윒mQ>Mi@.z`V!-7vgM0ͪk)Uus45 e>tO<477)0)-XXN?0wu~2WRN? S!?\a*::}>L~\-$dGOO/{b醿˯+VWBG._j@Z';wbܩxUZI_)k“,P&๿ҩ.zU~v;q_K }jyǓW+lӕX}}|>yR1}ɯ+T/ZpS {}֍[~^s(v}JwY3Pwtup שP.U\?=8~c;qơEc ox m`[is|Su<zNJWӃy ka|IkW☯K udI}+${ȫ7.ٰrEz-QIqىs Eۛ:~b>ǯYMǃ)Q gge{'DP{$E`q@Ǎg1T^%7 ;}]m^`Ȑ!\B:#$Y\DeWE:NI PM??:pM+qSݔ~>Nu$vQ ҁ mV|4ẕ=~|l$[f )$½ryc_ix:pztչCENվn⦍To0us`9"Ҿ~ڥ^^#]x|ծOn > |"DS ZQLbթ`h®> ~PK'r/EO[~z>8vP4|N֣y|gg\&GQXAΛ?gjV&]ڨseD8T%D}!SbWRl-l+}O_7 X-{mm*5b,q0.w0P`!LP'aQ>P]zTqq\~eXp]YA^ &kC JQ`c SFd8kעX[j Qk4hoAAnL~-bϕKeGmRXM:5Ĩ_ővxep~\o 9bGA0arVx^eV7Ճb݊Wn5A׋]/gg>7p1|q!*Wqj,ڷJwUS.|CEs({wYō{w/NbĐv?U7>PL# klT!$xP}F9.?n>|8y$o;^&lgq , Kz/oyBHF1 枓+E%@fRBJq=z4=E!$n>!̌\X}^&\׍pսKpսK& xV̒"gn0MW!Xvl,D5\r폝m=N0 ff!8A Uք` am 9lqKCV0;h: 66y4j5 yxqPWQ`Ǥr37֔kbqcoQqA<ϿsUo`@- *f8!pK0S!:KX4ar%n@P2s$:n,@E8KBuۜ Q1s$@+6x$+3ly:2k(V\--C79+&Ipfn#񰚫ǪCSwoATNM_zjK#zBݲSڔ]jvT<0oʲ.gU?BXOpf N/'25AS rKy a (w\#;П݇ vvLU AG=FH2`a (&'%ERyRX!$ˉK\>LOBO!B!*vQM \>`T6P\e\+&LgfLT$Yhk[ ,P4V,-ΔֳX+Í0]\-pDž,NaBH)&xk-Nc(.{dH{ U6>T^i]ncڧB+` 7l95Q핫`/]K{P]oMi=Kz_DVkx6 1h&(쪮I]^FPhݸ*/+M ǃ/ْ֢\vj]0պy[ެNHqZP/_홽EWeWhO'wJ,>;ԧ|s!ۥF^eW.ΜR*XRꔻ'7ͫB(\$>-F!BdŅBH҉ ѣGSB8r!Bq!BqIe&'$bv73u ;hM V| TtS:YPY\fv&kZ|/ Y&T\hrbKX3|-d<4 *-w*W5ijG{6ؤuVKmX!VCk[l5ZV%JZ+7|Quk5g%(ڄb#(܈& YL @pTF Jb6C @U)νTu2o=Yz;yCx&8%tMV%N !$&uqǙ,`OBOIF7$d3jI$B!Y..! _$Bq!Bq!diQW.kŢ_,NK k1rzlJq.rx^X(̨5a W,mU]/5SўZN7xWUQ-ޭgVC+^0IWZ< K!0k#F><(/_;rizQVz㴠^ѢH1 Q~/'O&+ޭxzr4Ǹ|cRtp03>sҨ}˫tWuʯ/{wt`VH{'?igEm1)Y0K?g'U#Wa1Zޝ<5\Ѣ?ؽ*c"T۴(cR5I6-O>]\ZGj Ŕ<=(/[Wk :mxwVh$s$Z^yTmr}&ȕەil1Cezwc, c-4أ,Q^HzCx4r`ddZG!dE(/n>H9>=F0B!igwN G-2K\^0=E!g &8|BHҡB9ŕhwd1ͣƉHh<Ό\7F3lqzqZ c>MyPIiljt?>7.{F{XTQE^NEVũ>KEWJqv\9rh4ˁ})L'2y8&Rl W+i+U,ۭ&X%qCb̧PP)R1mmvyo쥻1U\LZkQVg#׋qQ}[(x'˅O &Wo xnzRKp_B#}Ɖ_ ji0[P+7ay2||[OAMVx^Ғ ͮj-쫬0qq֘\^0Yh5ɣB#v%j `-u*NFջ*UPq*Js\@} grAݲ/'ߜre 㴠>M,>:u~ܻFB B7qյ5~ tj<ȫM_;Vݼc֯QnmJQ|Wtm0=FU*}>Kť"z'R;6ZaR*R +( Jr6c\q3NfOxN|f ǬozFJ[wUקe7ZԾ8[ȻpJg$\jni 8^BZiZ)>+-ㄥ 9KWngĉWښ ~ ă6^'Y;G\E]dRqZPoMꍥQ{Y* wBXMl`!$m'P Ͼ7o1B"}v]68B\ѣ)B!gȅBŅBqiJymBxhb U^ vGĀWhOiP-9mB!XK% "ҷ,Ic#얖˩:AB2Kd:(_-a'n7"D2bm}gqG^MU me&Bt` F47YTwVOC-%cgg3Im͛'}Z ~Ofj /~#)7,XYt-" [N  sJ !$u3..E{BBB!B!B!B!B!B!B!P\!P\!P\!P\! ! ! !Bq!Bq!Bq!B(.B(.B(.BŅBŅBŅBBBBB!B!B!B!B!B!B!P\!P\!P\! ! ! !Bq!Bq!Bq!B(.B(.B(.B(.BŅBŅBŅBBBB!B!B!B!B!B!B!P\!P\!P\!P\! ! ! !Bq!Bq!ň^b/z{{ۃ^7sBB!J.p9C1d; fB?fqA\rŲĆ!$ ! !\!hݍt:\z饸q9P\! ` {4i ‘ !tt:t(⊠5Fq!oz}pyСq ŅBH̑@~H,HS.`ذa1ݍj! :::] n~=ˑ#GBJeXoo(,BR6n܈k&Mt2d  :ty).ʑ C'N@E!$ Ƈn7n@CʀEIoo/x TTTD܏wg BHlӟy̞={X$./0am'G[[;KBή.#GĎ;pWfN\~%ʐˏN !d!]Qe̘17n\뗸t:in;u4XݚmqTﴤM\i1zht: vKBaeVAHCiӦ!$˹䒋j".3!ҋqup::Bq&*o`Ҵ1i8~#Zl ҧbCqio4{@aSg}m%nGS`<97$\1}O1xl#V7[?>緿y>?fŵo(/oyJۯ<^KLK- ?6\U}:xkSTϷw\fBWv|괷PfVH[v)c$@܆]G/_ZL|5%2!܋.Gߺ:l@og I<_]9; R\֓2kzS-R&*ZlhQW^Q6 u䋲ENU7x"zz,oEء<OXPӾ(ڜ5zl"uCiQk4g 3(z-r pM-Y(MWh)郃n քzte{ Ky~a"|ihہEe*B2)A&MHi'=#W|7)Q'%b3e@ 2̀bttY ŀǍTvmVUs{eZ SYخ(}2U1&$X&?p9? w&հc^aO]!梌J([ a`Yvk˔`(^m0ׇ}љ5q+:pqeBݘg.s/F]BbEۤF/ZEX4qn;e܂[)RݽP /1dR'^ʚ|PGR`Z+Lg< ď\ocu((Эy?{ty{,L[ $ 環B!EGBI6B!B!ُjn 6#BW]\`̙!i'uVŋqgU'&XhQR}O4dАs|rɓ'̓e"!@ߦ8~7l|Fb0').,$Zlߙz /0";s c9BAOC Ó6[d9uH1 Eb |>oBKܢ`0őG uQS<̑|ǔm%ن>?x)8ߎ$wSPWȅg E3Nk-qTFU6{N첱 Aq.$"-ÖKXU:LmQO٦3e8)Mq6q:v?E/O<٫3Y47穮XוPܠq8jQ+vQYG|4ۢͬM(.aFQl+\~^i[,:c.W[z9"9iur>=6X$j%8ó\TM+ft'vmT5۰rHIxy]rLh!YrFe%fu<|6Z3kZ\`0e(z6s`kќ5"lWeݡJcvٸzNUct~2gDÈHyGt6"5:H\lG ٦3:kyC<= СU%^mX9G1cx`D\3/'JYO1֍WB`Vfϒ9븄:X#2XQM\َƬ~eÆ | ɓI+r2<6_o#F-h Ii&<̿y,^8v1"RXH9ąl-d~J2P+HB7o 6B! ZpSBI>B!IwM_rQl@h2)R˨jTۢsl5É+Guk2';SY\Aq LiK㜓9IIz4'眜>|sӧO;-((_q!""""0hgX|͕aΝ?v]`CCDDDDD&uSٳgg_1DDDDDιl"9 ADDDDDQ#iebB"ADDDDCq6}S'O{&wҧ|)}%jAt|*ۆ]M,*zqθE"qu yxQ`%pՇ,DDDD3nʇc_{{V>^fG6lovoEۇlF_m)DVkDe%Q7WqMGJ9y.]|"O̾䫚5_~?=H;[>|m7GW\J\|Iζ7? X_E~GdIC HD$^|7Vqwȹݱ;-+eC xkk.ODDDDIHH:}{ @HHnû3rB'.swG\mmɊjx5"<,P=tuuMY .@WN#ϸqyiTER~( |o^blS݄OOE%f.|dY㤤aUwnϰpF^yみ?.pz\U^GMX=/r0RMXzmX?qżxBa\08oaH6%V<C,q8N|BF|a7AeY!phmǏ]w.N{n~~yӈ57\nX=xUU8v]9)~v84 /opȗQ⢌<*''*yD5Є^kxh&^i D7 -wD$zD|Yُl$$$ 1QbV`׿% HL._vMh8[.o!j%(BJnFɆW Ec$$6,to=v}?yI\:}fyT4m=ӧO@mm!1#RXB;ٝ}Wcy{~x48fzc/qV?,2*=`v8}K{l,|MP" !q?“/<|w<ʕu(ʿ{[w_wf?^8oߵq츕AEDDDD$AHQrKO~2.>#Fw } aa6@%c_nYSnnH,ۑ5VBi}0(&?#g= wKDDDDC/lGһq{ωFbmQ 7^維oD9R.rR#@T'&ޤAS-Gry(}子ۺqĹRb}?,DDDD4$8v 6=Gi3ǎm5+qۆs nZ_δ=('.;%/Q2[ ?ߥ|[d_ K 6![DGQ$""""~%$%A_9ؙ%7 7ofǟMz/Q"*3u( %&bl 'Ouh5gOw)0u'w| M"$!~R5DWOWI PuXм:6 w-ʻ-nl3h=L5>Z&aҽ^~͉Gѱ lp}]^%&& 1X$y};ZLzmK-&i ~*uxİ] E58-w< w®tz6rDDD$. bF~M>An{|:2ga݆ٻɛrkXMdTOFǧ0ȯq/vߞm}#2{kqȯQXF@"_},u66x/m,A|\Cꜯ0-׹?>X7p͵`{xҐ>ml HൗzȿG][GTw;~uqs=2%eϱpٶ lL(; e)]v]߾UdFk>/!w{߿.!)|$#*=dȥ5 ox>?sv}g½ Q މ.:a$k_ʟs:guɅŎ㾆Z2*pZ#"󢋆Lltz͋QEm,݅ `g,Trft'NnQ W`M^___<G.7nR|ϳ1]hH\: 0f~w]3|̨tv7-%ov|k"g{+8!!Eh';뇁6)vmgrBgWmmEW;Dי,^?(uЧg݌.Tnϐͩ3gu%,r\bbaM(;+?Ǒ3p|Rw7N_89kkpٶ L^t:/& 7iqynzzD R._[[q,DK{4?-NB|wE_mc?ǣ?x\hobh&.J$,Յ.tu'>GU_q;`xKfތrshGioҒ$k7ގo߭7ai3>9]7zăv~C.6ZqǸl/\7r,X1,>.S.lӸ# :Gw>Wuΰޏ}Kǜw>/1zd vQLd.ΏC᧽wFZ^8K/1ds*_ϯthsq\2cW K+_cqL*`!7 ,x!Μuc3eڟ,P?Ʉ%Ro-P)S/yOcۃĀEɰk'c߮E0hkĩ'EJ RR^,.,2\T:c>Ě}N8h?0nA*v:vDV}RLZ]rQ7㡫4Wb$|!㠧n򅁺i8,H<:K:'ZU}0ʝMC@FR#F:'cR(Xu q}RKcƒUj*|s 9Z Sɋ Ton/g#.Io =rv|ʕG N>}kpz=˱{!k-6V;n-WM6)\ i"5yҕ`n. jlVLR潌Yݾ /qCtz;˾.nEo]@4"Έ:z/`X]aZJ Nއ9VOKIJ/s$=xq= ڄ:o$Lr}A6Gz6#j,ؾb|vyf,.3|~Ly \g8&d=\8،z.xYywEDC7qMQ=6gm]p=FBF1c0 Ob#LaX߬= "v.O!ۼX|xG\4rĨIK~[_;~oe׍Z]t\2C ܚRhe|ft5a;> M e71P}\r?lkGi}zid;nbI"l ZT?^Uռ_!Hx5%oF`nozNkzxn諘 DEQ|.dw_ںMx{ Q#GO.ǀIS}q'K`1c3ƌ1\,f[~!8#GAFgeJJ 0n8ݟwi<҉1uM:3Lp^8"{"8p}՗B 3$ cƘ1fc1>HdWDn#̙/vxvq,4R ou-蝃&rGŁgЪ"> GK\%&`ب<ʃRDǀljz˜1fcFY,Yt:y(6緗HDDDDbw\Uq~G#otKhԛj7T5]k=YbɘiLbTQo1[`w*RuS#ffRSa( f*;2^Zqz˶ېw}8{H6]}~m+䅈QΤF-YFXvXh7KKTna,H3fڱbBY)Eت{02T Qr*i8[x`h5QQrPፋ[j ٞEuⲭ6-^'K}w~ǗC©c|Z:<=x ެd0dnz)[5kDDCDddnAҶ[{rX"_jYcŘ1t:ch[! xL:*n1I7#jՔ#J'_fB^l WajB֛Q\jSfQۧS'>?+Mڤo"h?g]gGmF-Q%h5݆f\ 74cV ̎ `"22+\OL Cd 1 3rr"Θif"lR4o1QTV_C^n'eFF,,]|h:;eALa.lFycEL_knBS8iLa9prqh\~d鐼q; 9Y G7JW*3m"3P,B ىbtE^r4ZTj!~Qfy#ۓh;Gmu x4f0G4,Tlr^htTQ Sf9VyaN:* Z1 X'ai7XY[lE!\3;,B47?z (u~RIVfsgne7Lo&9%@5"Rsxظq#ԩn?@с1[([$}yEN)- MVF!k%&3n `Ų];$""""N\ʓw_߾ˣHDDDDbV'~uKf(~٪!j'&! ܆Z*wlAjj7Hmb6|S-LPm~&.}\߻ :c]b]zEC(Vblhɗ]ATJ6vEDCZ_g_t4Eb&/*d#`k(//.fVnF`vK؍L:;mh߂_n,U1)olUOdrjdVgÐ|GDYt*Jtk9XaؔQhE#$HTDބ2%i ,`ܹAզtLgZpSHd1}IyzsOF&BLM:}̸:LM:~/qOi%h`>SjDѐW` bcؘT35S,* /LY7h[[ޯ_RALVuT}KkJ$v V#9 ӶX{#.F&g Q6Җ~ѲoK ]kz7 Jj Qaa()UlRe:ذc:bURL^-Gc TxWZRY[x]7ڶlQźnN)@x}pfs'yDDDDDq qFAEnx<8OˍNttt 9y v  )I_^xSJKrծ:d"""""~L\( ;$""""~"""""b28jaQe`B7Q/As c*[?Hb^*d u UٿpԚbLG5AP^ 6?V%i QrLi!"yx8:QXx;A?+` ?P23xRVeC0A ҏЊA:̔(H 쿗aT[ ѱyja2n6!Flngu3xRFNWe Ձ`}a_ ]7g*W⒃ G.GDDDDDш*FDDDDDQF\n>0˻LWQ/QE#.J-O;?OI;9` r}˜i. I8<7>>}/?R,[dR͋P1N-gZRO (g^]VBL\G9;:xW7/8][#N8 t.\?2'N`P=I'/+g5M1GrS福b--S$-/dyj#T*/'IOY,=?kX`2|ZY:[Z)}""_u'3}|=*wsMC dK K~&?ۥ;s#M (i2XwhI]δZ--c+u!9W~9m/StL8 ,ءxM^R<vR>bt>מ'5.DD?~)_]|gU_BaB9L0'ho2'FMWuK/[xy|3ͺ"f#7ԟCyp>{R5%KxG:fn ͻ.ODD@}_&}Gߓ!ǷNJ8婦+7Wf4Re'lP?4=y!RO43zsF)BFy 5DQ;:(Y庚Mx>ݰ`_;|S4ĥ'z3H3r~2 OTbƍ(**(p=px+%""""#5D϶$pQn1GDD1,T"5ՌỤ̈n57ӤYRkF QXV댫)+! x7&!PAc"fzC,DoR^趎0:q` V&v{,p8p4f[RsF<Ì,#,T:pl Tm}YMS@*" :KI5.cnC.q31(L13=!cnF8*r4:L\"$$rQfMYzM^IRnșw]~;H eo94\f܂ o;nEP˪Fb@9uK&:C1ތrTw2KA'm0f<.ʨo+rzYL/Vg''ygnC3.EZM)ȑ+z-Ս# DD4p&"#S&R&Cj`b2QcNeeH5;W9-Fyf<a.l'c닚~C*HsOz匉ˀʁh/ʲRY{c*뽣)Ye{Q ԫ%r۾^TF6WrU%VV 9pO]TvXhzXXj,`цqu~cj`*1+}7gx4b1q,D{%h1[ X\C,ѫ~e=wIDDDDGjy.j~Dž_LDDDDDQ Ea}c2DDDDD/8BDDDDDL\ B6lf0 {Q P-@ ө B̟Ou`jB,+@Ȯ-20yYAV5oT/1 >yB 񉱺{KZ <@DD KGSQ!NA9}`P9VQ^iNgv26TecJ^|q@O1˷H"D yjM[^, % Qe+OђQa./ºY׏݉::sT2C)TfUeF9@0RkBv-ʱ""ȋHTD GyI+AhA"GjMvACIzf4lM(S':q3 ;6^XP/1J7̒_mJ:bHb9)NF[FRҒ,CEتf50rdXF%%S%xV#9eK%2JV.ԛY&i[&'gJ6ϑ"!BjpT gJookk}EPN@x<x<9~qQ|;ok;> ߒ@DDDDDD#(b̘#.Æ _S'K_ '"f̘a$d͘(8'G##'Nĉquױ `?=ؼIIIQ/ tW28_xN/ڤG{;|w54"""lٵ?@w}깸0*<؏qaU0*}h޳+KowCiJj]{ RJQz^S7PWҺI)(かpIW0[0GKR1@>+g|HYχp8zہcǀ'v %㦡1<YwL{i-WGbm-Q&.ϮCaFR6~;FaFR:9r[nN?oUi M;`W_N qףyŕ}0L;eN790>^ccrlu(M)&iq U#WS (t/\ң]zQ?+ᏑFB벅M_9VS×u:=DZ"M\lvx޿1h1rn{r4&-.Nv ;o|^i˱ǻ&HYND.*NZd2vDnNr݉Wg`ZbkдvQ k͔;koMkݎ0bIJCӺw{oҊ_${)h߂-HG﫞č xQ8Ǭ28^uHێ\T:c>Ě}N8 RmWPٽ ŲtBS)Yio.oҢv}d71RB85[<-~עFԳ|AYoQփǡuⲲr'_Í7YY 綇?/x QcH>Կq/+H:F5Cיt:哝wj|' Jf^G/w"5y[^Ĵix񖧱"v29p[]!كwڽis? i)sy~\=@޼[u2WÔ/"ULyc8֫rf\]5=e/=ho]WO}~Eu5/lq1aʞXXMi:tޔ!P X|U(YVހbN{'?Կq++wPD|܌o{w~ }/9[c^I 644.E5SxCHxSJKrծ~Es(B=݆3'h?:<ƿep(jU("""""" """""bBDDDDD[Bc3JDDDDDo6o,%.SN1=cz <XQo cWcc#֬Y@ӧOX+"" ~ƅ9qCiJj)H)pW_Ҕd QD;W!uHv ;ڔ%#tt:U7)y#`;(1+M籲לҔ<Ư?]}}!"&.ҹk w]i RRGi``4_}k[^zUBuᖩ+EJ^ ·Q2l*HF^M7a(-=))@^J2RR}ӂNW֗Ӓ{Օ*$U듶A_X`A^ݹ Dڇ[^\H]~I_{h)8+B Xt 'ר+E27T)u^KK?b5ku`1ai'f.KW:ru&ϻnx]2Bcjz XՔ 'H,|Mk?½1qt%&Oϋ&oZ{.*1Wb>'|tP2N1e_t-ߵ(-`}˱Ƿ 2pZC؍B ƚ}klC}Tpn>߻>To!)8m Ga\&xqp}p:RG^w=t4ִbנim 찣fmܗkB˓X[Įu/…9_+ہ_DvCemV~7= W2*g])RN>}kpY\|AϻiMxx4rCא){c]Zl[u::2.ᔓ ged4Rֺy }x0eHPŠE- k;P1ۧoyZ0;۵{Mtն깽\4iłAӷi.Kp`i~G$ze/pgC^JAE|g44&a{p#ۮ8't߂?mnD@EQD{V~5d/ر*FON>qE!G7eECVҔoWl%yDDsDqq1vwHiI@n5*FDDDDDя#.(99A g>J=v"""-zF\͛I"""""T(FRq!""""ąްK)@DDDDDQʤl ŨIENDB`Amelia/vignettes/assets/distpri.png0000644000176200001440000010133414335240021017134 0ustar liggesusersPNG  IHDR}l? pHYs   OiCCPPhotoshop ICC profilexڝSgTS=BKKoR RB&*! J!QEEȠQ, !{kּ> H3Q5 B.@ $pd!s#~<<+"x M0B\t8K@zB@F&S`cbP-`'{[! eDh;VEX0fK9-0IWfH  0Q){`##xFW<+*x<$9E[-qWW.(I+6aa@.y24x6_-"bbϫp@t~,/;m%h^ uf@Wp~<5j>{-]cK'Xto(hw?G%fIq^D$.Tʳ?D*A, `6B$BB dr`)B(Ͱ*`/@4Qhp.U=pa( Aa!ڈbX#!H$ ɈQ"K5H1RT UH=r9\F;2G1Q= C7F dt1r=6Ыhڏ>C03l0.B8, c˱" VcϱwE 6wB aAHXLXNH $4 7 Q'"K&b21XH,#/{C7$C2'ITFnR#,4H#dk9, +ȅ3![ b@qS(RjJ4e2AURݨT5ZBRQ4u9̓IKhhitݕNWGw Ljg(gwLӋT071oUX**| J&*/Tު UUT^S}FU3S ԖUPSSg;goT?~YYLOCQ_ cx,!k u5&|v*=9C3J3WRf?qtN (~))4L1e\kXHQG6EYAJ'\'GgSSݧ M=:.kDwn^Loy}/TmG X $ <5qo</QC]@Caaᄑ.ȽJtq]zۯ6iܟ4)Y3sCQ? 0k߬~OCOg#/c/Wװwa>>r><72Y_7ȷOo_C#dz%gA[z|!?:eAAA!h쐭!ΑiP~aa~ 'W?pX15wCsDDDޛg1O9-J5*>.j<74?.fYXXIlK9.*6nl {/]py.,:@LN8A*%w% yg"/6шC\*NH*Mz쑼5y$3,幄'L Lݛ:v m2=:1qB!Mggfvˬen/kY- BTZ(*geWf͉9+̳ې7ᒶKW-X潬j9(xoʿܔĹdff-[n ڴ VE/(ۻCɾUUMfeI?m]Nmq#׹=TR+Gw- 6 U#pDy  :v{vg/jBFS[b[O>zG499?rCd&ˮ/~јѡ򗓿m|x31^VwwO| (hSЧc3- cHRMz%u0`:o_FxIDATx_h[U?'Ml>Zcbъ%|hE`ٲey!ЇLF SЇ8J E-J%[EfRU69>6i6m. s9snN@c߈9Cg0̸55?YǠP RQ(RQEoRQR]EyHɔ+.碚w.*;(nFYR|Lfp/  |qp&{ߥD"Axy=Z,= 0a gڵ(y "ȫvlǻӍAz/:1L$ a:yhpuumDQGFGF:Ѽ_`;oBum@gD+B -|huqFj>FV hkkkF.>t)"[q*qzNDk3;>#Qk˭޻ݟyw6KaZh{0瀗C=kI (my9+ STf*\'<N%y{q"X>=LFCiir ]Sd]sa2yt {NϮ>N{{9P]]NNuF677)gJD'L~U XLKǭbceڍe@AܸU___ .hn#Kw. AÈ[hPƿiEM'1DJ /^^dnEā |Qp'*x(^眳*sZJE֮LJ4if1:AhGf``TUUQ' E(BRWE] w]5ysY ؇]|6@Ëhފ @}h| Лֽg œUf4 γ!ꔜS `NW۩;'bt%M=q `\قH `9Q.D8кm?cΔtAu3\H;YnJRs01WͳQ!X֜&{c[34l'i |? !wBI$d2)1ZOMC̫;!CG>RM!$X,&oZ\"sx 97!+E4,јh`sԔAl+kg2FnG]Y4FFH!(D$÷sH͑9u͖;~'+XM9;|BMUi466?0kggn ~M#NZNk,C|ZqrG鹑i! pK9wp+I*vOz(K(̓wEZ2ox_d 6啦p^VfFM()',CB٦)ߤY}d1lPpfu~^2&))oz>v٦rZP1+ !;p ]e*)5jtbLM,E8y)dDҎE4Eeėg 8N'V-?L\I$4|?`h~V`RBrC9ڋAH Oj:-߽RP[[ ɗRR3+ Mb`)I 4 ar9H$tqޗ$S)m,XU}gS2O`HD3LEβ,r~MQ2C:5r %YSs }}e; lV)kڔ~Z-'b=O$,!4(3?oleǿQXB&Io QIMY?qɔc,f /0 thg SLĽD/j֡2J1@n-vxiڵkOrs=w}a9pygޱ] @icY<巿̉dj05KB1%:Zq~ q ؾI؏?>Evv|8U Ϟ<8k:ht/p2}{]t0I I<8D+X'=1ܓgJ58s.-4شMMŗsFb*imFrDmGuQ''K7H*IJ*leb;|>){QR;](Vrra;1S[XuolM9>:BPG*㟧zT{4N#tG"$uWI434GdY7/PT5kҥK3s:^s_nH:SfD1HmA Loj1'yKrSU_404fy􀵔Miba|r\rU\^HUO0dk׳0c%ء֫C3Rr7$:zxdYn)#Wo,(7^ Vd5ڤ+q uj0w+~<˷4{jn h6\g0>(O=$dc44ГS%Ɏpֻı[<.@nrpT Ɉ 8p {vLȦr!`x^0jrW Xh$;gl^hEdXg.njzя`X駑霜"H:\FXĂ+˱jՊ.BH\t oM4u rϝBM|ښ4KWqf- 06l*<Ţ^\.DQc,rZ c=B:<<{wKҗ1),@t.QsgJx[[fJJJ U?ٴ;#|70ƴnݼ6^v"==$aXx X cajo4$c5yad NNlu_O:tc!H8k05hN"/-vFP0Dp'lSqH\~?i]>h~OQlYIH*gPJyT'(2C2:KX{w~.gm>u}vTh}`>Ejf&h/Ή!)Cô)")Zo ">!5G`CR i-X! ?ŗ[瑱4\۩g vF 3sO LɄ}A#yof PNX%tp{[M8@}j7nd(MŚf}m֞oPʼnuzAq!Gh\WhƠ.6%칎40Buut; ` J*YVzxKbb{fcmsVvP_*װ|rʎsoX¤ѥKLzyݩٳ1+(b=|v}=>7a=|ϗW/V;N2_ahF:M7N1}P#{_CY+:K&qkPչ M͛#kNehO'pV0l45 uS_7-0k7=z[]<#̌#mvvvZoofyۨn)śnhF:eee<"\|anui# \YϝWwyy!O|ԭ#txˋ7]wz455"ikoo[xpȤb4c="͠\4qegE[.,-HTasTX6,`:B[﾿OZWڞ֖fP0'"Wf{m-ٻw7 !v$1ZOOC,aȯ iʎړf=q~"pCwF$ĭʼ|f_/kvGb3| Đ^q7T&cMw~Gl|%zj==jJ #ZK!6ftbwB A/0M`u;Wuv VW^.X!9ulME.cp EZ"A+_VE.ZWb]s6 kQ{.%6XQ ذ6E3,,+V1oQ}~hU(ǬVWw"o[,ȳPT mٞ Ӳj͹8NsmNDޯ Z4;78bM( _mn%>\x$k7NmP٧ 6RE>@ G mp7w8fsļV|J5詫EMeN&0ө[tYea[aP%ynW/^Ry "nSp~+*]-pr:*n~g۽CΒpĸms)_I dM瓞/H]#+Ȏ_9hvَ*|lf֏UݜAxUPe?/2vC= z>ǎyڟl6㘣q́U/aY9_dgc{-Rw } 9&~>_@&2 O O O O?Dt220w-?K dD|W }s}%"b|l[{63'jZAzn㎲@q+8m^]6(2֙0HIJ p+ձtWW^NwE#- == 6}TӦMc{[o|Rѣ4i}au0|>FuwYHܜhäI'$4f̘!n"Xģ9s&qw5ǨԩS3fL nk7\&~ѢzfVYd2a4طogESSm ׫_"$7 O 7030F2f،K ;"E)JL]rEo\!> ]sʕN'  D|42(//g=?2dȐ ZR7P)((hUK4EV˰;QU5OzcF<^awzq\яFO.b$aĈG/4n{9'吽YoqUۣ:I[Xw@"oyyG7(++cٲe=k> 3rN =¡(B  )èP;(3n22BJV$z(sPXifkK;.8j!Pq=]jWf n ڢl ٳۉ(%hQ3;6$3?wfL&s{?}w{{֚= l5Jkz\ A2iFUA҅)'~K7f;^ʿMilgcټv%w%~~#ޏs{s@RԤsR#9^Eߢ(xT*|SOH|ɫ`εw.U>◓X֗9{ q;,_GFN{'S,^ƞS髯pz {,vL`uKas4f wϩt,?IŶ,b $WROqo߱Ttw;Z7 Vi{9F|:zFiK>++BZs:>z~o=o]Vc'reAjkY9 0%=!=APDAݶyJuT5?Rk, c/mdw40W֤FMji%eYp<idpP՜<~앭rw234͙Df*o(h plXbK Xp/p)/~'NL}t˰W]?{i3+Yؔ8+]g cjM?B)}m>_by1uX)R-Fy{hDeqTmhƾQem%Q u[Ȣ^Zw?zj8qyG_|~kX[zҫ jcƣ*)5<1"vݚ=ZН1ٳL0!cnzǪqƥtыOMi֣g ۬Z=FƍetBkb1rOЫY26@?BBpUJ)\C)>DPuCiTAR3%PH9g_R,'t]M*{5OSCG7&[_`l>3qDA0P pRNL;sF7xfgE7 :1+%TEIUSB v;vOm.#{*w% K7S3.& ,Y&͂v('/ W[κb?9ԡ̮VI]-o9J)S0/TE]*D]BF~ *B4P)q|=~'ť6=)Tl(`ƹ kJqF[K.Z?|i.k%^;p[ko&}! M^<_T鵵w֢:lCٿbx1c )7V3O +ǎZz8x )߫e#٨s(A}1S R 6hְŖE\ zd7xH}" ><͑Ge̹!Kߥf/8h'pN@-TN".hPpucyN[UWteXHϰ#_!AU2<ߴ?$68߂ A L hkԺ _,J֯F)A߇B'FG^ Я ڍl)7[^|B<}i v(3<<5ƕ݁T[íP"#FGðPZPuEhmt)ȋs(3d_K~v;*65-wJ)#߽&+0D]y~}zm+.rj]p+™5ȝTVqg MUU%%-{&sAU=ED/r!'uԙbaa:[^TW WgxsWWazIlט 4 ɝTOS(`lr 6N}px99Ye@z 0d@KI)x%Zc AD2 hjA< [f"x~ e|H$<ϸLL ◩,732ߡ$gG_w/#""~CDd9;e4PRq3YJJxIPTMU%4U#9UzH??A(}cFfod6M<:{[|ͱ\jFV+7_HkDN'       n#?A?A|;©zqV\I^^^Zcb 10>K {֬Y.7F^ab^o-.xrN{qZ p87k9^[{W >0O+h#Ǎ)+?ahoog'Sig.˧iooG?wڝrC?a˱i3\O76;Xv"前0oDl B쳷cLjF_㞗Ff3{g^>vW_,t2h4R~Mאo&رtD Ƨ )455]Tm֯zt"~ dy E ݵ ֝Vg!3?*e@ϊ`_qdU"@phqt} 1a\A}`AшXðNYWTdHf:<ì2І .V4}7MB6m|߯}:|Ϲ'=s*lR f|ߜ X.$]9~5R$ ׍۾O!HO3ɣ>ARLL"| ǞAHr^xv'ܵߚ5uZz|NGC ]S'?kT7 oںW9}$u.=͔37vi,ӇN(.`_cء /?୾{fd?̍\!uFz)4! "\Mio0wf'\`TMi/jMg5||5~"Wa%̺{9K>9̞|L^oY%,Zʭ'ʇkyM,M'|>`/| 4Pޔ4f-`{B|H&sF2ٰ_'BrT_^Wgo_`,~- ,VҀXGVohɔoaRP_/Ff\c}y [v䥛o]xؼ^<>Uoa8JIO3`s./~;%l$DŽBz;<=21f+bv?Ac<44i=?:T<oծc\9]d\z9* ~}jVA>A>A>f{)6 p|}:_՘ c֧g]+-_wطK9" 3ŏUS1ڍ NuRxvv]7 _q큪-wKVXu | cWQ+bàZkb۰wiabTR-v]3=(k"uޖ^G\A<2&1A5U9͒fk{L:2X~կz5On ˀ̋X]PM>Pp~5Og/fΈt %)ϋY ^1X鼸m Ku͙l,Ͼ OR06ƐDr!\_Sr")Dv"unÐt#kRq^GS./{$Kxl|jjj߿e*BcccIoأdA?K=V])d|wA Q8gsW UɊeD  va3xXnqXvzq ,bݡNxnkXV|(`0AmUP]׽*ylu( ^=-EQ"nkbRM 8={# dUEQẒV.EQpxSbEQX+"VT\9Ji:Ew;b`q.=-ұ_Q- 8`9Y[oCl';a[x*ڨ#\Oؔ[W'0,@ Vܩ {ǰf4}ngmo}x~3sܡUpyo8U^}U&L}&/ۋX,Nr^٠;s5}᨜]=Uۙ8T]WuO'fUFҰ@UUos1̓nre N ,Uըtl@ZW;W绵}nJ 4?iⲺ:]8CRWW¡6ͽ֍{_$wP؅,j wa4wD-oL^T2xJ-k[ֆ-5F>U5 a_u~u.] ?Ď6v2 *jnc0}C3gǏ?7xcuAWSj/G}sNXx8)wZn|>70jܧ{wźyUT4͎\x sW徫o#0 Q Tª ޸o8ץ @O+UTTTagN\glKPا>TQAEE9Suk8w"a av ~Q¤[Q#{[zY >j`bF?dçuz,&z<0I-j\ҽ8~яlя ޏ>m3 ~WBƲ:]mz2bKI~|"McpÏ|YjUHMv}P\SNEUU~4皚8}42H]W=6!$ˈZ )"{Iζ/4U&?gioz{nDڨ|:Mi&3ç&w~;۾7#=6j]"M]/=] ANBpIt8p*-y{mq~8r\d!i1=rnZ3myoXL^N6\*(B-5/[Sq.l^e*E*Naҁ2ux)b\-GQ*U (9.Wz+| <ɤOHjk]:,NцNlU,C8-ʁ+n TUel7=88r]x*U,İ]UBg\P@B_d1f T?U\։^ :8ڛZhsǴ/8~n%QL}>o P;0, *VSsxc>mS$zQ47~,߀(,-w5bwܾH>=yynP;H綶#7㋍m!t(̃2٬ -n31]P梪 >7fτPK_h?wLjּ̇8/"υ/~9fSd}`[v썶kn^X7u|YG~UخMP_Ӗӆ@-}:>د*wZP=YaфFce5Ram x'S 11;Ks<>Zɛ6G*xg7BU1("Mц0}cC"^+p!i=S[2O^co6xapۤVliƾY]/͎cNEB:lw O%0{Py(EbqKCWȝ`:kY*ڬBVwxZrՃGXc ًЕX-ZC?{mE.].6UEyZSw]I1PZ[KUŌ],k}{wðkݴ &fö0LE~N;i3,)](}c*B㨍i+[#DUaAm8扄qZpy BݲǾb;c~坓GeK Z;ZN-Օg}bXs*9m[ug }TX]AF~&NyecuE3|2zw'SX]HW/jyC PLnBFpF]lceԆ>M2W!b?Nヨn43xşN(Jmźіƪd,D٥uqXݝ`} *ҁYX+{knKȍpwT7Q!^*ǨJRі FnxV=xQ;KMݿUW^(wOYª~;ڗ!s(.Ex֥X9;+1{9pnFPP>Fbپ4nY7G RB#7* /iDڼ-L% _֬moR !(f{Z`ύdTAQgn]EŇWeuelUQ4.1#7낲4j]+]f+(l}S!TՃbuaq"h2|Fh7MÎ]bkr #7<6 =wQWgQڿɽsL^Q-fWAlfPWQpmE0ҁbo3wo\:dẙyB#O44uCS9NGVRQƦ4Fgq9QUg(=DLOs Y.̕O^G.}>Z_2)Cͳ4ǡOuqGz= ֥츍gzf92`k˅5CC}Xfg 5k~^0ңiAi</j"8f雇^a6GV!|mrb=XZ,}󎽦w)evΣ7H]/7Ԣ#/7v="SrȇMڼ , -ԯ77BA 2_s Ҕ7''5KOH%.D "| "|K6qraBADz)3^^LUܻIYUl m.?aٌݳ —VLktxQ%ț6 2jggA/(]|#GW[yHjJOz 頙WdQmֆϨ-զ*6SЋ' 7'i* 5f 6ADADADA Kkygjr##y7 _M 5'y3dAG pq̴=|X4%[qU4TW)cZ: 0V'"|qu1͌>ch3dM!lPKKlrd9@Y\=!GHU`0R<l' 8>rsURgEO2qypg/ 7jHgԂ 'tTAK)֯_ܹs–: (BoMMM459ȹs4O_TOo̗jBٶm'Olnv(ئ8䭮 ) )4uA4շo_n'B7`Ϟ= лQEQӧw}7}ҷo_L&گ_(AWxNOH - cq8][UU?RSS>GUv#4y!C[/lmhh3M"|B2`C> 2}t ɝEzVZ0Lc2➉HOH:BE! ꫯ2{쨊 45e^L0AҢs= דDOHdʕL0!G럖RΝ;֭[''$ÇgРA-={ZdF,|ˆF8p _}B{=S444$Ջ/==뮻&nSSSCP>]kQ*ZA>A>A>A>A>A>A>A>AD!BR?gC6֮]5#voYOpna-XE!Iv́Slg|Nf3yk}<kqp>f3漵_(?Y<>AHR*~>ˉB˔QwhuU,N8nRmg8>A!3fuu%x:`_yE=nF ,gd/Ó?`fΦÆ/9HM6KSWzKS|W}ȤS|܎fel/ P dݨ<O*$@ ]MSl$% ƹ 2rssY}ܪhe|g|}q^Kncnn.>=J7| l-̲L6+}c5} BO2wnaĈ^ci<$nv'}l$+K.af{!%06u ) "| "| "| =t;zNJDދ* AH ?{t}MZhHZV/腺.lqC9RЩ"EOj=N+^ (ފCp 6-\DzM&ڴI_=||I> 7ܠ^`AjVVo׳$DDDDaԦ/NNX~ۤAž={į,W*DDDDa*UwMQcJ`5TM6fJj] m(/g㎼|q~׻`C R<@v2Zgrݿ`wc˻){]{rc}<Âx܈w%ȏ_oF 3= kٍɽ._}ex< }WGDDDX;:`mowi[,oSow3/|_Dxdk7ozߚWuߎW a [Tj/7jgo݌nGf<1a{ұ|=g*L"{.nT;+joko# :S:;/e*/xl섥ž=3 @B0r^GkXewMW>Sǯ'Up߆ >~S_~?^pK8+V_(V({]f'pƱ8pQL} T*EmU:?Eֿ~8& cٹr3{OQz j>_u!""Sݾ:IT84:[JaK:|X1]1,P{ΗCWYM <;E GǠV<A>;Ln?-NQf/>JWA }-6?TA6@1,u)A!1mX)HoD<j{(JW}8F kx݈YLxdn޵Mo?Ss$x,Wf_<{ŔsGjN4^ı|H-t56,~?%|XڏJe:W%y4,O~;Nhnɿc?Y;QV x}ݏy=zG;]\ܼxώ9},f=~3EDDD6AQNbyCC1C1QX-h4 ' ]u-#"DqX`(8K?e;x%b % }~4^/9@} _(uHJOCLM vs=" 'C[ l yx;zNzF\car:i,uSS*oۈ XJke{;Va]幋^ EÎx}HTxfiL,yE_VVZz]e7X B=7^Daw@y=.k19㞅Fi?NJVq \ kZ."퇣|pEFDQi+hq 2~BA4V誚HC3v2n#Q=g("#F^/]^bUd7b{eH0V5ޮH5脶+=GЮlݹ]}rݍ?DvM.AE3^: Bt ˫^"?^ D-qk<"LDDDr#6/hi""^w| ){E<`6>K26TేY0܌ÇaZP8o~+̙3AR~i E xmmmAm|{3K0l0.UKK  "uA, Q~We|So Vurt rw}JWd*A}e{>R]wm8?fD } }V+:;;i&tvvj/u}L%XV,hB_'Bf. |Ԉ5OI d@CA٧Amc[ViO=^9=d`; _F1vqޜXa5mF]LcF/6 IUGgEU[½}Y? :^z=>g&p}q\.kjbO<{ƍ޽{Q]]7I3`Qe6'#9+y^_Sz#e]H~R5sS}-*^@]>?U?>KS1; =R&LlJُ)0`{%{Tvg7b38s Μy0.W{{‘|'!FNBώEcXSS9 :Prbeȕ\_wy/is7s!vj];a{şra GX"LŒ%SJ7+Dt*ǔ'p]ѻ6χ)c~MS!o_6=;2sV(iu==aW p}a֬Y>}:f̘ \'y/^RtطOA @ot)rp1}ϧ* 9gYAs*`N2e0CZɾ{Զ#.RB&cMrS!sr9Tr~ O]lvNU-"v~.SX;ǟ󰠵e:pknC\\V\cuXvbEV>;e;W .nvzmq.i߹Z^l'-&:ƳM?CXi-XP˵?[|y&i7^0-(ՊMOc?+?9˫B.6-7 ˟݄= hBiظNE]nV~6bx-x4,KLse BDamO$س< _<.'F}fu'>粠;Qڎw'IY[ԤZDDD 22}RA|s)=Σ0u1+JոVKt k}Х(φ|ԈeU/Ha'srҭ-N5"lGe}|ԥax_ﳗ[{fF!*Č@C({VXpd£4i 028|0$nFiYVdDzA6&7v44&Ch⥅ _b⸙\G0ǵ^`jo= }"D{Me:|҆ 7~҆'Tz+n/5ALLA-mTzg>Q>$: pD?n8}/3S~I`lH"6^:{Jg{Lļ@&/z G=\'(Y ;mm[=}'g>Xg‹ uY`fRԮ`-r+y)Eo!Z_֦=2`[3֋3QtldIE/b۸=wܯ_E8d?|v<2.G|'KHH ?m݆di<'NW_ ko_ u{*l$6>F6? ~ֶDl~fC_B_ Pt`?q/L * V.śoɓ'#66ֱM[[l6@ogZ܄:|= }Q^B_,IK̏_nSVzެVgۻ%?x~ï~p~m;s bbbm*>  Ŋ--hokLjo{_N}{MMM`Էԓ! M,\ B|V }B}XǡC}D4Dk^}xَ0QxySx =ϡ$;!w "!_.wpu(D@"/q]JT(\^/>L(MWjcBI!7<&{حGQ\?t҈Gu7\6h8T~݆:j~lkЧkAoVk@Cnn?9JViڍwT{'<e;m?2_օ.1c\wKF~l-! %rQ+ "}:\xrCly$_ŽO=t(2`UBr䬑FJNvds g2/9B0 p}uj.^;>YKJڇ18Ƕx9_W:?^n(W+pSoy]uJ7uwg]R[}E 0) ]B[l+^HlTbW pfɟۍ:yrS Os n)X-`#_Lacn<%0Uaۨ=sei&ʔ0i)&/G2d"esP>˳g&yx|eD9 }Q.:2Z>j'ίAr>j?okP")_c[À\'g=xEx -#t- :htF!54H=[?UHrMe\ݢFϰkr\{]z .>7ϗntccP;yե/D!55g.T"{w=yzŤe(ǎ*g\_e1o7CdB-0;ѹNuHzg6 Q Dum{(Z{X~9HywW/Ż}=sT>NPtW.SW&Pjwۺ/٩b>.O5rul\.QlHPg}jrw]<3oAc[C7ӻԇj4cIE8X;1w#Tc1udU+/ CA Yٻ)N?X.([4oQsʞ$v\P/& `;0wo">-D5zRs`9QM.`9ع9R|}'wvߖ.}#(N @DAŗ33Qw_y>CǒD=f}zOK/y,w}Qa|]]-qcQTD>On n0v/r ʼn |s7% h']DGa[8.( ]"WSw/ػ[I yZ=bQY+b֭{!"VVc]sS,αw;::ގ#cG",yXƌQxrWchbDi]"K`*QġsGpoLFU9}K_rfP)&""JdK#3(`="9vI)Kzi9|WgK>#rRUAst{D|sG•#G!仳M8܂!$fA/,#W$v,S ~ɳEe0u_rHXI6= pW\cb2L*: y1`dݿQߊ` b9mf g0vYmcv f/ۻ-A2Q28|dm=#"""b# ]ߝmbxΈHۻ.}dSCD(cOC1QH{(d TcՑpt5-NAle NgyEuz:h4:5DDD }2Zhv)Lu+4P4$':6@u^aIHhȐןŨA"""b裾g#X-%`*5h$މ*@}G9{_B~d@op ~zPmK^y][8QEDDDtGDDD4BVtTgf!Ͱ(H"YF` zw9(doArcl7 .fAD飐sMDD }DDDDЧ`VQ`#t- A6Fcި1d䒘 #t,Tc<2: 4yr)9teZe j0@orL~iQ?-zAfk~}7%uh hp tmV#r6 Ir[&>Dyہ,{OTC{K'7w`8b1}uW=" Zx\I yZ=ςQY+b֭{!"VV+l6VMMtZ`X`Xсv1>Ywe!..5͛:H -Q#"""(}wE&"pD YW ~00 G "#"""b#"""">""""b#"""">""""b#"""">""""b#"""">"""">""""b#"""">""""b#"""">""""b#"""">""""b#""""#FDDDDLP{ ZƊQ㵇kAkjjbE"">"""">""""b#"""">"""""X˫NB{{;0x`\s5Xx1ADG j*h4 QQQ0(,,իY"">ىnZ[[N(LEXޝݙذpc%X ;T+-kغ/],剷w=}D@ʜGN`y1Z6lZcy eF5 | 0f466"22 xQ8+l 6n0]FWƖ-[p9~ @8(ܹs>|V @RaР*  f&‹/B3BGn-["tjX,T*jDz#c!RT?QjЇw>"""">"""" |OQX,())AGG@RaȐ!ȑ#1}t 4(EDD@AEǨQcz|Ѓ(ĂJJ%9s`<ž e#""""##A}lSADDDԋa+wGDRʥhooE`Ra=_=Ug^mQ[oF(,,Q;w>vm^{N߫VV[ZZzS }DD{1|㵷wyh416QDX,V 2&fΜ3_PNC[+..Əc̚5gs?:-VجVy0u" }QjEd:ĦMpW{J {0?~""_È\6 o6cщtvvD!jj`ȀOHH@RRRSSGDpg+ .\6"o"M;dht@׉_. .VQ8]Ӻ+wxf㵊"K@DDDGDDDD }DDDDGDDDD }DDDDGDDDD }DDDDGDDDD }DDDDIK@DD :u <\s /^:XTXXUVAѠm@ ** fXz5Buf#"" Pgg'bbb2`kڊtvv!Vg>""nl,rA""nE6jbbRoW-hˏS,)$bshcbv7:3uS3xn$' g~bG &l6"9Ӏn:9 }h}M̼:snWOAظr$FFy#V acLIo 3ۭ4*gȕ0ۻDDD`X`6{ ͭf l|1pU|,N :gn?cq\ɏ1᱔ObT {F=աα3̟V {F!9=W܂ulFOF̞>""nEVw R%)k1/֤ X8 &$Y[FJV4v۬>{K [uֽk9{=bѪc_ǣoA愗yk۱0GҴv}]: ({񷛈HvZرc Xz5֮]::?!E:\Wh]""Ee׀u:3(225jY1cƠsՙwa˖-8w\~9q111Xl.CQ]"""($D?WuBDDDD![;-gO@GDDD4DO DDDDa 稛b#IENDB`Amelia/vignettes/assets/diag.png0000644000176200001440000006335414335240021016373 0ustar liggesusersPNG  IHDR 9 pHYs   OiCCPPhotoshop ICC profilexڝSgTS=BKKoR RB&*! J!QEEȠQ, !{kּ> H3Q5 B.@ $pd!s#~<<+"x M0B\t8K@zB@F&S`cbP-`'{[! eDh;VEX0fK9-0IWfH  0Q){`##xFW<+*x<$9E[-qWW.(I+6aa@.y24x6_-"bbϫp@t~,/;m%h^ uf@Wp~<5j>{-]cK'Xto(hw?G%fIq^D$.Tʳ?D*A, `6B$BB dr`)B(Ͱ*`/@4Qhp.U=pa( Aa!ڈbX#!H$ ɈQ"K5H1RT UH=r9\F;2G1Q= C7F dt1r=6Ыhڏ>C03l0.B8, c˱" VcϱwE 6wB aAHXLXNH $4 7 Q'"K&b21XH,#/{C7$C2'ITFnR#,4H#dk9, +ȅ3![ b@qS(RjJ4e2AURݨT5ZBRQ4u9̓IKhhitݕNWGw Ljg(gwLӋT071oUX**| J&*/Tު UUT^S}FU3S ԖUPSSg;goT?~YYLOCQ_ cx,!k u5&|v*=9C3J3WRf?qtN (~))4L1e\kXHQG6EYAJ'\'GgSSݧ M=:.kDwn^Loy}/TmG X $ <5qo</QC]@Caaᄑ.ȽJtq]zۯ6iܟ4)Y3sCQ? 0k߬~OCOg#/c/Wװwa>>r><72Y_7ȷOo_C#dz%gA[z|!?:eAAA!h쐭!ΑiP~aa~ 'W?pX15wCsDDDޛg1O9-J5*>.j<74?.fYXXIlK9.*6nl {/]py.,:@LN8A*%w% yg"/6шC\*NH*Mz쑼5y$3,幄'L Lݛ:v m2=:1qB!Mggfvˬen/kY- BTZ(*geWf͉9+̳ې7ᒶKW-X潬j9(xoʿܔĹdff-[n ڴ VE/(ۻCɾUUMfeI?m]Nmq#׹=TR+Gw- 6 U#pDy  :v{vg/jBFS[b[O>zG499?rCd&ˮ/~јѡ򗓿m|x31^VwwO| (hSЧc3- cHRMz%u0`:o_F\IDATxOHq?SR8"H :uV !aH֛m(0݊( .?1NA؄,K*vwYt~}ObB>Et:Fx jCzeYk0y7m iƭ<gx;Q1A5KpQ4\(i.\55s3V97 zz:::6m&PY3I䬘^SSSptuh4Z6 2???z;ʝS/ x{ +z _ۋHBVmL@yYİQI45gUC*\2BQmޔ "tw0}Za!kghIGգU{a5?|,\޷*jG8.-PEx)X\h-Yd2IW1C@3HdU5e^kU/%+qg 4NjD0#A+jCSm қBV#)OwHV$'d$G,dL=cjr0QdeƗ*~;|U x>Oz]%*PVxW ͵m$z0vq1{ւmᷓs tnBUZW~4'1pRhӇ Mx #Ӈ`  %q3V zX͐BI-g[qmabQcr]]~R(:J"GmuX%*IA !N%C 5,U:J`*P+fJE`e2-ZdZjܟE; c60d zÂЃ qM{jj nX KQ ,Er5GJ4R[N|8Sl5a5HcI[; %! nxat槧N1aac:P5W_L~7I1ȰPq(񓘈S1SE}8:+xʵ'| JN"C]>v^ H%8%zR Ub3RЉ8:/yG%=Ƅ"ri%CuRbv񆝍"ɠsfY0ƐfO+قmֲ;!\Ў4n>MJ8;1fG,3zV1fiݙTU5~ ߏe^oP.c.w?]] E$C PAal.1$ a٢aS+CVmki뺑UdSٗf$=V5*Ҥl@wy}\d )bySUW_k߿Ee8c.]ž<5ľ<5Dmyjax+'syiySuE$JWz$WOG8> pxx廿Bg۾Cz>cr_cb}vu5M{sX/GJwϤS[^{37 {AoOS~_?7Ȁ͋?EowY#{ό{x \ݸ\g2~, -DLADLA,N1Yf@ XoL1/fƔX'5 Q۟<&@'b!ѵa8ϳohnCCpg3SM1]?r5b<3$4v]d|׿h 'w|i'`:~(/xHKPK(Vn /eXV sDNILsE3 >3Ĵ).i_iߢ5ەD"D8ɰupYVQQuL|HdH7ꎤ6_WYۑs{ȹ#]}krIf\ѧO. 1 z>^WRqzŠssYnܸ1gbX,6зYx[@0`V4PJk(M@i-s l\73q .]9H |l%a_O{b33 N2WYb? ;Y䯇h''iosq$Q&j/nJ~ؘgK&tv6վ@ #>bX-&e$>qktq>cʔXbł$ ]A,FlƄǮ=#kmV1S1wQgHRKZhLeZ cE XӴЙ\!a4҉w;NUH2bA)bǼ_bgʤ>\wi =T^wuwf[gxk{.%;2,|oY"+8WtQ }(]{&nP_Xnnt؜Ѵ(1jӾԑ\rs`;|jU`)ػ%lZ3bѾ%}N3v5hd?:FY㏵?r5 H#>#I$N HA$"MGc~#Pk VX~(G_KF8#rIPpy FۢG7r-ShI"y!Nт(^fFCgY ,!2zF WhyʎR;&&}eI&т{x֩s'ίhvub"vKX,Pj@B#ʒ1~CKx7]m!ܱ۾𝧼 !D:I,?gc\<-Q*?Z0}VAi<:P]0V O#߶ߢa"rq6a;B[><@(^Shqg(PYW?Oy,a=Sϔ **?V }?N?8HR2A])AA8 bBYH" s $Ly)\7s>07sZPZelsuҰUET ʧ򴰃vf gkbjGYh\CP]>ږh-(2fD8S];X4$>fr6o\Ɯ@MӢ"{xgk93D93 7HXK HA8 qDRWJ|v͚5i|'UwNR$7𹠛L$Ť*Z*`* (`RPP`,KB7I bI(߆ zp˹s脤J ⓸pEq !B`/{=CBCϿcm\=EQbn<twwCQxgtbFww7`tZ(﷡|{ E^OGAQ<ĭ7Ij@`6@+ivՊgfyp %⤮wN 9-\$ uAc-A8è0zܓArJbY D-b}BW 8^xD'v 1RTm9Xmd܀8--E$Nl]LWr#9㉊ $aD9 q0#AfA4-9S =xc++qU}7ayYj;s3w\>@C!QK" o,Dմ+Hk g & E: Pށu1Q2kJ"."wW]$+SZϷwsgn]nm]> ڑۆV ^{b51̏ M`%+`tcp{^R>arIebXJN". k q 'AKXT_ܠs7`k+F'N7j&qf >|6'ERx`c3=ʞ{xw "0D&2hxn*Nඦ*xnFW43ða 9E ic .nω7!@^.!†2V1o/̂OGͣ+:AqI k q te֭LPP,7 @-2d=2*Ae9vMk*eY, B`:::8af͚#GͻON9r:JRylقիWg(..7o… S^K$]w9IWI8 "I<AA8 IP~KG ҈o>: pa(P& 0l=@-,=E. g)k"*.Ҡli+0BPwBL;;@&H&y>s~Ͻw ^~AxĄfӧ#A4}V1(BRX͙[Xgk0!BI :f8` +cqeeey&.uSvXO_|[d44F}>c3kw]~xDL0_vLak?d,Cyjߝmu]~iV𸭵tb2Bz?ia?F^zヘe!L@ff|n}|,yeKX ޙ2nd0ojvagdIs!x]+20ed*N ndfPAHリc= 3@7<[WSdNΏfƲSXv}*N&]ϴosgsh̷%gN20.ݿd`233s3Vf?sm媯pmc 3DFFoY׌zEʜ+.L*b$&|b%Y'XWovv!=4eCr4Ex(a}e˧ 2X7m,3t cfҗ;uCnօ_s | %m7x%;_х?a B/#d21m9K8ptވSD 0A)SD 0A)S ;a:ui]V\Nv]gkZlNW :يkb5Eik IY؇}aRC\};u(F_x?s(۟}+ɢQ4Y *meГd%k սJP,Q) /@U6tq+\e!J&XS]Ml{jXG ُEʍAG#.]Ur{ijFaL倯 FRQyH6gfR2dHܒve&0Uql6(5b,@ l\&pSZ: w ,47v o燿<̳\zͥguXܸi\\LEL[b0asnvmֽb$C6y*S5^c {S,) [&SR 4e6u 6Thwl5Mk75ckXىil9M Դv FG"r=puy Y<14M㱉0O&NJiVѣ-A4wX1x~NfWaK(]cF.)-y1D2(;;SNdeEYuk?Q폙oe=L )jlkkKQRkQk},Ѥ0'(H.\HYlͻuk[cƌ!;;;)Tֿҕ9a0@œ$``W^Ik&1!?a S a֟<#gL@Ie $j)KA1A)SfR'nduvu#C=*u5ᾪ}RGI'/\#Xs؍tJӫ: Vw>u{6.պG,%hŹ~VjdSͰg.OTT)!ЭT5RSDҫPJ/BE|M8R_~yDT쾬ptHl&`uPRO?^ 9Uè>uCL='l|9$JOT$|3NZŦWWt26&pԡm$zXrW Kvf_F71 * =ȟ;2buMPKѸ}Q (axSK:~z4tG 0Aa S WE2J1 ,>F%„S4΃r~FB5ܘWT(wir ##.ZFi"[X1;9:L&j]%q1TdNRQZlLѓv94l SN~[9[ n. u:݌B?f.bYkUޭҕ`Kh''MP(}"t}H/A8x˾dcȿ~c  "LAD A)"LAD Z:s\8e|y.]+s۷xfV|WE H?K.W{РA;{V.`: SH `@ '/=cJ LiˮpE(p[: ?& OXi1C =(6yjV( 5M6}GڭӴֆi0ƝӶ|=nClBUk{@5 A&B"qFwƝ;Hu8?Hcc#wf)nq`̺46 A _jnYhR LyD.#W=+s^L:O,z#b\M(ymmm'|ڬO9Ç6\hÕns /ph? B@G(_o4[xcH! 70X~grDh "(yfk 0јޯ]+$nV 8~8Wgvq"$ODzv -o!ǽ2i0(E:u;u&c챧(ʔpY4z̹KTRM mS/ Acjjr,|(ۅ^*,dՊ:g{X׾p(y}dQVV'O5u߶meee}$%O>ȋiBBV9eq*W07 m۶x^G%0З'vA腈0A)SD >hO B <܌er!I)+)SD@)j{.ﷳN~._dЧZCVY_Hھjdy,* J]GB0wsCxh\+toEn @+=m@+:i2ٹMȻy0}Lsku/Gbunz I}.} Ǎdaƌka%,ÀV  r! s-#,qm]]fNA˱,rrVEmQx!rg)䙤>5&%Ft]ql/*.ַ`r)/1P\ ržjʎ6vU>uѣ>,>PFl3,b럲`Vp<޶tmRMvF,>GE7;1E? 0A)S n$sfDUHUl4eA ԧ;u:6[uq;Q쮄۲w#=ٲ <[y XzG`^=j#dN}6%[y>tTj/95Eq٪Cx`pt:vUng'aT&l]޸vrF7҉7[TR-bG=j>\z 6[u=u;pa wDZG[n|ǣQը3[4`l&lhf~\DoTَ:[uI(VA8=:jEC2228p ---r70 ]AUUTUntEwFkG3{|?ǟNG,L/v><]4unp;q P&F1n;&zA(AL$¨񓟿?~HSR6lynUffzQĪH.=rG:Q^'AԄ(U+l1zm=```6֛+9 ~"QkՊvE:X5E:||3W}٢v,WۗMcΞ5%~j G~S~r7!$Ai5={[McrRz1F0iJff&Ξ-'B 0A)S 0Aa S W?qmC1|^SM60˹g4hPꈽtREB&AyLWBݼ:NDȬ(Jô@Uk%>"C EY։abClbGKʮAƨ"jVzڙ5n¬\"k#{N= -  OP1T~;4wbJ(@x+v !#sBNnI#qF1{p@&ZÌn2b'M"~_OwYϲv~?,>\_n߿/WRy8}t~{ikka lqyl݌b#]5̱EfovmE3;GjVe' ѷ :,lh˨0AQ0`@v4 -75?<~Q&i;f4Mk4Ѵ M3~x4-Ț'qobpYy폣ix̾C>O>SvOGa>( .خLM$䝧:v8 M6Hsʧ.IK =Z$ZV+k?XȌ@1 A ŚXVk_ ۼ`Z@O m>Vg$MV+VUN?hJh4 -RVVŋ{`˯?/$o9nܸkJh~gZgY`ʦa WdԨQf km6z!iJSV>ۦ]z!"LAa  "%9k$9a677czeAHd;vKSVS 0Aa S 0Aa pU@)BٻC_QP!Q?4bR$7q1J 5 N5e1VJ!Fjumҕe_4S  xH`f02gbΏg>ckdYdQTT].!""[2oJ?duBDD4EIKKw+d{ً/[MfΈv7 [دx6]˸s_F A4?<2dYhdYw.g~0`C|,/fd{}5;z ymkC}-?sH 7 41ݗ1 Kp:O|@8PP]'0*=F=>/(DZ(=tE'S7 eiwBu'C i/iJ=?;<цOјd{ %e _vcSVL-{>裐\9wn C{7`6ޅ3fizԑf?;2+w.x_S L> 1ӕ sXFS[!7>m2y=pş =}b8m2h9} ᥗLC So(_x&S%#+x +^Fr2 '"C1q񈛛 YJMkax'|m?܁e1=k6?YOpg9gnǯ1q>eY_6i-v<DDtC[ 7~7Qק%"p](wZ1{!Gi[ LxH?KDD4AOtwdkgB9y~nSEIDD$""bh14DDD M"""&14DDD7bh`P섰QcqNnޟqI㭻]CWtByёۈX^b'!^o?14C O'1<k-F;T׹u6p@msu١Ԩ8gG W"bhutB(TCGflQ69tHκ{L/1"}hӺH~ĐQFZ [zi1-GAhȌmkGģ]Mr1,yf.(H,/ 1[l)t@p9TXaYNb I#9W?D.[.ìCQ1bP3Fxfl;my#Z_ aO.T_ †8T@XՃ#uHyKPZ _GDQckk6 NeW׍5Oǎ8juB(V2˿>#U'A3ʲ`PVXRq(@QTDеqQ# Zʺ1gueXZ)/yXDwݪ$H],單IaN;uU;! aDDc#7sATS#˲lyϧ9rh{?rBDD)&CIDD$""bh14n1~H?JW' .Hłc^qq())AJJ ;;K/ Rb9Ezk"Vߛ͎.D܎>3<>.ghѨɲ Î7GkB_ӴfGJFk8'vn(;vw>2&2^+ﶏݹwC/R#M. Ne8$#/DgE27ѸE@x{G/lh!ٕc>tqޥ0 G/1>{ߔ"QlMk5P_cC:̳CN0 #p#uYw24Qj ǎeД x(4=Qi4!N [ƓBWWWdۿoO4/>1%{"mowuF@{[Fl)'M)߿-r/qWpյZ @gGafjIxըND9Z50jSSoPҭAS8{.x;lEh Л Y HkEkvaV<]/ ;˿߉Y2(1+ [,exG?o݅a;8ܷʻzv;'a >3O霃-ÃveHH3tlP_&n 7H ;q6F>NQ}}`)VH0C=ڀ5#MYO`m1۶)ĉ\l Pb/hɯs"z=OK:<'PlRҀ<xEV>'Wz|ov1a7߅K^#.{1%O6s {q̓X'Xzק_}]<+lTZ[JT=Zf24'@wW08B]1/ ۷oG[[$;_a‰kEff&mۆ?yr\khϧɏLYq&n;nVpc"6ݳ?1YQ)nþ}PTTvt.۷v[`Ol_ ޞ%"Fo ۉ"$""bh14DDDY>rMn+h\zcq Mj㉈s'x{IDD$""BӺ x"6g7QnDKg@ݝ<6`[6 YTb\(X?!qs*ppEf¾]Q iY7rv nKġn^ڱh -8[i- u1/VԎu =ԠyMH3Q:r !yGFA#N~ =f4+Ci68®Fb}hll hkH.T]WBcȶ{Ci$hȌm \[?G;I($5(8b9yjPn؝!ZXF.נޘ¼] wL<\(m< с-sjN lCE[GVU־pv-O3R۵5j\k ܅$=uR!QԏWs'(3KhN`9XDwݪ$HŒ:0l0e\CwCPP:QNNyWg U:¤UGCuZvD8K2RW9:QT>#^xfv dơ@NT+ :בSMN OnOh`h14DDD M"""& ]0/tIq!X0?Aмzk"Vߛ͎.&ޞ%""B(@E4Zh4hȡF5Q~ǰ," p{g+$,Mu==ڌf>c8Ҽ<&`>0 "V#5v(foFk|Gf!hG+ei- 1.?~FNj.!"ft֠[.T:7",WMLz8"A a tʾuN<0OȐe +P\y1:p1rWn\SZ5Qw}!g4>0_xϮ,K\Ԅ" Qn 7H ;qQ XR Am`9FktGQ,mK@VW 4E܆(~g._M~\]OxƩ&@w#(m071AthLY1cAnN bhޜbc!@wM=KDD$""bh14DDD M"""&14DDDQo"544… fΜ磴A ͉3q30/9AW.tEejTTT@|f͂$IFee%;IytEA$&&dgC<;(F J@ЪFA1m(4_o (ʯm=Yu{*nG]Z߶܊UB9\Iw urAQы#x eBjkŏ2z% ks`s ݐ Imyūử8'bco+~ԺwcQ[nᓔ&Ht6aM!$A:]呔51t چR. ל$IbGqjY\2VV9zӄwn(: X~w?݅KiqpG/ƶWwWG8nX}Kgp9Sڒl:vP{Wis oҰC,˖/⎌cZ'\.reGPm򑓪*TWW222PYY*v[[;c{=%(WbG@ .~V"~DKll,:;;`|Dll,;ilV8o]S$f *f{+W!P$&&ۀID~=;(B儈IDD$""bhݔ [[;{ƍ9Q+?j6$$DD4~z{{^hzDDtkDDD M"""&M-;`vzW`aRߍ`99; lV|o~!"B 8Ԙ ֻ2SQ0;57;a o_<&(u?Nu{Kj# UN5wEDŽn3LjO'r']aYOBF4I0wژ=a<Oρ hW?ʣ*?;!ZAt?D#,z*,낺dX0haZyX ZU]N)O]i7.ÜG܉8jLQ]I@q7rve;GHD4+ 7sAKE'NjLR+Sa*`1qK'(x:I ?m,r`X6m2<܍5zl@2,0jjqQ8TlHWn@Ѓ#->Au2XWvD4%$$Ȳ,[޾;2fD%44a }M'Y|,Q8 uXq05Ʊȇ#M"""&CIDD$""ݳDcg3f`^N.tanbV9C9NOo4oHZa(99`sPe=џ:F:\h=h4hQg"MNo` !P H$P$ $q CGq}%T]rw!.0=YڑI2 "a,*Բ$I ~UtX,ԫX YqM5!IL1:Yc޿՛ Y ]#k3N2dY,om1I/MvHMM,?缃#F;|PP=]r/IaiD\4˻qb{Xm)h10#<^y(wLGe`bd Tǩ(ʬ0 9mAwciwN.#'tS`DDD܀Li] I4͙1/ǏoE o14DDD M"""&CIDDD M"""&QTƙwE[[.^;w.233ӧ8Ҥ! Zd9DTAɵkO& ˦fl6d 嶅Ff6-Xl6457vÈ53"8i6y'VCl @u!G&4jlEρli8GjPPRf#4;U+WoP;J6=з?ЀqXr%;4C&5&uCݪ qVԀGˑ(̦EDϒt(q|eulq4O`=ljҳNn-I(8a՛ Y ԟ._Ƽyp31;h11 vCrr2.wvyJ4 5!#C z߈Ӱ\-tYӰD<[R>I0]Oʄ:J5El@=,2^ چRTvJF98[ o<ǾYly{!~6L㍓'&4,˖/⎌cZ'\0$M&}}}8n@s?>࣏>t:FϩEBBlm&2JKK M"UWW O?e0k,HjTVVCDtDbb"a#11 &wE[[.^;w.233ӧOx9HN.cEy(/߿.`ͭnfRWRx}hZr(I~S l/|9 Ѵ 0ohB&.KϢB KH#gQM H$P$Z!nݠ qVԀטBsM5!ILz#9H՛ Y E\|C__槦`nhS 5)IҴ1sL$''rgz\._K=paCYzr' %aࡍ8,IT:~Io ޱ -6$b.Z[o nGMk+Z[[q6 ɷo魗-ߣO=޹K"?DaU;w*14iP_؄ ڜzƝh*A@\48 (Grv)VXrYȅ?$Id_,AY:juSr8[ o<%F#c̙F5y`O۳n[Y^,ކWܯbxeO> {%밻x[qHsN@6%?83g(KJu$#oV*w}38> (PWPrJpܰ*A#˲ly"Ș1;|b/pbV231|G}{tb(">>~ʜSUUq)Q[E~ ǯ֌ TVV(!!vhorOݳYu|RRRMey ~tn d4o M".Ă L o~ &ttvv"66O&*++g\rD~Cx tSgDDD M"""&M9驰hڑ^hoABB"ީ^=""Q5M"""&CIDD$""bh14IDD$""*Pf7]ig}IENDB`Amelia/vignettes/assets/context-menu.png0000644000176200001440000007576014335240021020121 0ustar liggesusersPNG  IHDRlsj pHYs   OiCCPPhotoshop ICC profilexڝSgTS=BKKoR RB&*! J!QEEȠQ, !{kּ> H3Q5 B.@ $pd!s#~<<+"x M0B\t8K@zB@F&S`cbP-`'{[! eDh;VEX0fK9-0IWfH  0Q){`##xFW<+*x<$9E[-qWW.(I+6aa@.y24x6_-"bbϫp@t~,/;m%h^ uf@Wp~<5j>{-]cK'Xto(hw?G%fIq^D$.Tʳ?D*A, `6B$BB dr`)B(Ͱ*`/@4Qhp.U=pa( Aa!ڈbX#!H$ ɈQ"K5H1RT UH=r9\F;2G1Q= C7F dt1r=6Ыhڏ>C03l0.B8, c˱" VcϱwE 6wB aAHXLXNH $4 7 Q'"K&b21XH,#/{C7$C2'ITFnR#,4H#dk9, +ȅ3![ b@qS(RjJ4e2AURݨT5ZBRQ4u9̓IKhhitݕNWGw Ljg(gwLӋT071oUX**| J&*/Tު UUT^S}FU3S ԖUPSSg;goT?~YYLOCQ_ cx,!k u5&|v*=9C3J3WRf?qtN (~))4L1e\kXHQG6EYAJ'\'GgSSݧ M=:.kDwn^Loy}/TmG X $ <5qo</QC]@Caaᄑ.ȽJtq]zۯ6iܟ4)Y3sCQ? 0k߬~OCOg#/c/Wװwa>>r><72Y_7ȷOo_C#dz%gA[z|!?:eAAA!h쐭!ΑiP~aa~ 'W?pX15wCsDDDޛg1O9-J5*>.j<74?.fYXXIlK9.*6nl {/]py.,:@LN8A*%w% yg"/6шC\*NH*Mz쑼5y$3,幄'L Lݛ:v m2=:1qB!Mggfvˬen/kY- BTZ(*geWf͉9+̳ې7ᒶKW-X潬j9(xoʿܔĹdff-[n ڴ VE/(ۻCɾUUMfeI?m]Nmq#׹=TR+Gw- 6 U#pDy  :v{vg/jBFS[b[O>zG499?rCd&ˮ/~јѡ򗓿m|x31^VwwO| (hSЧc3- cHRMz%u0`:o_FqIDATxXMhG}ژލBi"4MI' ӴGԐ Zl"L)-ANi1HnD)Bh6"gimD=$B%kmI ޛoI)U8?>ϻ66 Gʅ=wwX3W%0'=y8r> dIQXɕZ((W).㇙tpOc t>> $><1+BYfuRmoJYF.HR\j:P"e. EԶ2,j^+Z"Wl>_ͬ1bXeނ{y7|A?WX,?U~zlPt6;z|hk`vE`'O523rmd< Ѹg&a&\{h}9S(!J&̲ 6 Oώ*&p)dUQ>w\G0< rNkk .4 AprgHf@Atq"FD;>WU4*u)忤 dۙ "a/Ӭ|%W8BEqDQpn""OB'Ic%UY 'Q}[W\IE{-//;ǔߊ\XOuԚAhVJ>J SJYJ !"A;56U5+:谛lƧհœD;l0DF8$^vx,J$9vۓ}}1=Y2@79l>PNu\>t흪׾4#4tno;feˈѦ^P~ibgImn xxCfs{wuhxarn?@O#tZGۺW2cccZlqqqn+C?.s&cnHiqvUHWx@ 'WHWxh*8ީn2 ܻHqF/+U,򽛥RvŢXHZ'\\q7l^qDTgyOnvϟᣥ(砲*=|Ő`l\t($Iu]X0/(,Hm@ `hz@AeY$Y6з1}C:uh֒Tq1z'x'NUrR#8Iu8S!dfOy iQ*j[Kjiaiu+TE"Ӡ\h<ۧcMz-3}se<>f3S,],Y:kKxA@Tn4#R@DJ 톳'ySCЋD73 T TMQSvJFxB-|goF"\.ٖPl"\@RѽG͊z=sTv<@KQ3+`&K}Yљ/d^P.j6 /tTJ(ˆ, Nk뭡 {?]_hu\l6T_!';ę:dInn<[sPƞ:ChqVRha *۴ F^|TIv1Krkݵv_w}PݲVbԘH*ǘ۝H*3I%%3'JtEercjLl(tX & yd4&ʌm6^GqS'7*㨿M9N-*&.QE}nU6x<1}Yy͵pgNt+rȧo:c k6G~\=0Gy806eͤ*qt/:mr ◾`"}!hR\0j/TxxYIVfi\H\[8E]A }3XlڻDˑ6 xm ^/-2ݚizxX uE"kk~KȂ6קW`ʒ jT&үm-}:fI0E".\K3e*DsyÌaɂHVjm4uQl0ICemw$C֥CX놖kO.j!yԤ'qG,5}5nHQ+299 m,k.lyFeuR'O>$|4IR?+ Mo_detQ& mO_f9eY]Ooed9ؕ19xȲeK{ yC+ eyڹ^,,+F7̙J)YPOeY^eYo>H_ws9V]2UEg0ʥrBRebbGM?1i|yG?v5 M.dX,tc/"yHlNfaڤ2kҽW?z >2\ƭ{W<^`ƀ,c5wG `۟{n,񩝿&խ;ǡS0d&7eCf-?s6l>Tlnf63dxMSj`S~?q=yP~.+o ϬK=/S#d5.i[x*(k܋fB%E T/#dz~8n|dm!PT"=]UI9I嗕4y lM(asQsp G}t8pY6H/>U2fqhƒfX͍ZҎez7eֿZ3fL"X1{{?z+iApfs'B8OEBHGo͍\uMMŀ(BDQƏ>(yYBBw3"LNNL^OncD tP\jM}%Tv' bE^9"k'lw-w$-{Ds `KxԩSv횿Q_~ 2p||h~ωx<U V??,/OU>i)?36}?'p~*ITes6noݷ}Em[Et#kSO4FBo.Lu5n9x0[maM НR$)///āF6[--'?2oH央mTlgWѾ4:ֵ-VK/1RW{7#xUɻ kf6' ,,VQC^~`pI4|4'VҔYZSf+@?éEUl&Lo=o8?ߵ[!RVjbχ"kѣ=cKGaV`\G/b*zuT\;ߦHe{x4;P 3%ffHS6K ZˬI`*Y1m!I٬Y$l"j$MMJr$0"YKLJRfN#jBI(Qf=.Q|ϡn|KL E/}p(6H!HIki) @/,=)^%(+d Po9TW2ݬnjn?{b7,OkϞP5kf0 O9@8>5`Od0w ]w0 3@QɔͰڒL\Dҁ,dmv %j??66VerBùpiю}/I]{n`xZu_,;o;`D BGXXB4ZuA,8 5u^P5i/pp,?E. a7zP_Y{LT/@տ^em{^ZUVXEOwp 9fT!OmBOzGYLjHX!kB]FG]xH<]Kus\crC Ȗ ɠtfHa}Ҳb H{-!UUXȘ; @X&7Pv!]A&3M)𙦋ޑ#[FG gPjB1;˟%(MQQ$13w)P%Xd2fbsRRݖ٩J\r)_^l1}ee^_(-)1B:iQ٢VdV8F@US#٬L"a/"O5+Ef?d.ytdY_"Z=(̱" C֍kA2LvjW#|v֭ .ۙ:d}xE  jjsڵ8.y+:pƟc(Z H;2 n_}Ÿu(b@@ONe.3 [h*K7p\z"zՠ ^oOgs ,+@@Ԯ;um)WR 9sfCo VF+V`2 ˩30bP cAWyNc ]L;NB?vow$gO=Zb*hPLE(g}:&E&$WJգצScUYSk+kMe'dɘpuܲVRBp$jߊۥ'_ #pT&-9FUMpd{%|rt*Yfܹ3:F = IJ?<ڸH" 9 xo{t)&E wfW*{}r.(cϫ=/V !-Ѷ~PۣT,)KuZBTf0~GaB4C¯T((,B볱(bRPL0Y|Ƕha md/Ls9ko7N``ڵThEwwnN=HY|H KDR=V=#ZGIY1o'O߉.[l6N/ J:6QjAѲ,DztR/.BdY&UvZL=r{ű=ptz+K:W\$v&X^ygK x`nJ.д<̲Dm> YH>.0'\(K{cy%N%j0hjG z&\PO<*Ά8[$BzVq5$9~^)TUۯzjm3|?*CD{a>l},<2NGRJ`{1k^̿VD2s\Zqս$ tr`5GPJz+KxU!`\,̤)O?4xtzu5@\ qw5 '\$[+$aQ^fj|%SS[L,3^?/][n]~=~lt}ltttŊsΝMuU wݱ|"zk\|V^x1[gML6&j!BVo[G!gqgg?C@\dY,+u w5?a`޿mM>46&e}@ݾ? )t`l_{wmN&h;Q`[R\%^/U~D U*ߔcMJ#_ܹs,X/,s̺vڌӦ?kTROkeF^1B3Nl4؎"9+^ V*֌ɬWGD⣣L*ҧ;Y-1>?:xySA?@?(Z kꁙg<Sy|G>,wM_;]3~<ckZ9g]y7Ɏ؆Y0X,JdWG~Nalku_C_m~ǫFᝈDvşjc0wM=gbVGUxv0>)sdU_42jGoَ4qo, r@, B H$BBH@grj-r|Fj S설\E ,Kq_>8W,]I&yI$ = ]fODULS`IevmġPPrEYpi ;0!A~!۞R { ! 8Bt'64&%E뛽> JUZ~݈#pVeM^B5Rc^ ǔI9d`IbR#(_=j+2^.F?#(IpX5'}W2p"8d"NT] !Dw0̬y$gqL{ZCF4: 5A@eYj0~u2^qFTȲ,8=NR~sR=lEQWh5t (ETS g~!5t6 $ԲiIz$qBCbpx0)(\`b޼yMX͛7͔M *gk$J_h8ܖB{59)5cߒ%&e1ǑɄ1& N8DݚڎH~jw)f u`XX"b\gjטv #MI`mwqjC=)7Kal;:ٷJחqǪT%T^x 奜ms_m%L~j\d`26vzz$B\@lX5 |; p&L(SjΐB9Y[ާfW5~b9B$!8uH i091.0ʲo'gN[9Y"+HHX" ڛZL]g#٩SWx}Poz(v:YӀ,{ %݄Nx'zs y _;Dph: fUH2ZXUs!Ue1VSܻ4N'?x(Z|LOz]=MP;-(2nYrF1 !.I0/́<3]◟֮"[6 NO'3^|D9_@aaaaa! ~FA"#A{"DZi߶2(j5ˮMo4D ?FIIp>`itߜ9g~w sv@` 3|!||h[\|9l&f^h,~3˼Dˇ=18 ܍/*VCQcTDd`0\l%~:|& &nϬ`vFkUW8ڵkh 2'J}-áoC+#oPG׼ 1{ܕe;Vۿ|e;m֮KռЫ2' Ƴ'`սWw7<U\EuLLS(SL_Q:74٭lv+ r+ʆuɔ Fofj@NSԝ;_SwS[ٙ~(;ӯw+JFdPofrCh1[FѪ5;_Sc>2үVckO!XYF]/kQ1pܾdn ,}[=ޏUʞۭ%zѷPsjVfџٿ) `fX qяxgyZ=2ϝ] ?4#{WMյvș ]fεulǒϔmyS+ْaPe374@Qdf?|rX=̝E] o(+F v0ffkf PwQoaXm潝aUQ_&2ffFra|6z{V_|a Z{ ϟgdz m?馛nJaT-rٯtI53/HI9LΟdޟvFL?]9եD"ɌlK^)i&\hQDv.-i'=ch yyV}(|SiK_[]M dȈqEKv﮵<H)Dݾ_{AŅm`P݅js)6u|WjU4Y<*)z r[i& `%V\ Ff:oK<:8{T8 lzX3MV>UQMQt3#&5nfV)DiϞygUcuwnvMW9Y{6o@ٙj(V@(?h0h:ꚼN+ t6mߖl{蟺.ia~f*sPV~:, Lg(PNw߶߶uUWOQkP{ 3{DXn0XfP",6`O/a 8.e>\~}]|QQcOq1y5'J^cClj:E/G7cP%D.p^Sٳn=/*W4gO]D1?iփP=\gx 'bT߲e M<:{ygJPW4/ۘSHϟI3-K5.FfUHyRK 5;\5)RGQYaEZŋA`([lӷ7ߟ$ HNݑ3yRVw"`qPW+aQA6)5M9r8hԧL+oWyف !Զ$,@xHBsEH/ʐil>1H(_j ֺ~p:h02^, љPQJ #KȢ,:%PjesY1f\CvpP²bZb갨rZEI НPr@YaoXVLgS gAC&BuX\}"H+z W\Ni_ӻ9'8-$VO&3&Nov7iO~q]@f5(vCQ򔵳myc#v//jSjt//jSz NډI<` /ߞ$hqg{3[Jc=n,TdC< CQ9x`(}ң-cRک1%iA{ڔs6En<ƼS7UIM:&`k33ޜ1x"%zD[,J㦗& M6 f5%~ŋY|ZZڤi8l@I~:.HaQ'PᤎlB+adq:l$zBzJȓ^PL˩eͦj:Ԕ@;`NQ 'uX$QS=`6ͥ th5apЯc2О5 tᴫJ]{USn;'όINqKlp,r|{w"|{ӯ`ݮhdS"ϐ1 #!yݺNyn%l?N_0g9=ӰwKz?HOUߴR/6x<2M[YMVoǥ?󇢚f{|ld>j|iV^mBʍEp 7|3{0m,Y>. ޡxG]sO5 eOzoZٖ~+޵r?dj h2Ҁ{mI/|-;gfuRg;˼n3=|~JЀh؝R\g-[ifY糉霱e~ڶw2;7jiUUUoZ_ 9'?)ZhJox1`ۛoLY34:6}^`uQF{/+PXvj_xuF8oe@Mf`wFx<0t -vnݢ~Х`^'?SK鴿z8⾢0T3v&^} #>Hte~3M45x1-[KW:~i\e۶2qUjk|}+y<[dgO6^_.޽~qV j7G~g8!ݾ%VZ' ?~n 3o)~թF~Ϳ~UF@H^uG*,Y/k|%#1mK1(3c)==Ҍ)_1śx=,)x<&I5fҨ)/{<馬!MӇ/{FR3MiCO)$}ɔN&M dyiL yӌC+Y&S*F=+@YW2WẃP&tKU]&6O³X ਕ8<8[:-f:5Ww=8 ٌ~><e6DOEEłca<1Lg$y>K >FE[DzyAG.l_A<ʣJ_〺=8. `q :hqpKK]ñI~s-[ _xJ{[0CHkM&κͥmZ>g2YP:~d`L 3p[m\Y! " Ǵ=εo7ͥV[7MghG!&^ /{쮲#d$٦6a;[av룔RJɵ&g}YO!=εǤdx3&X`ppl~Gϧ!?m5tz̢}u>1I3uz4|#cV5fsX:c0&7~BgHbM )m%Q !ȓ98I !0,,:] ųm:Mʵm.8k] 3i {  VNk`k;!v.fmAp?PԦݍE+zP8rv[gFw(JqOW:PڎC.R-gn[ $&"O!{W|hϢjXަy ֞>%m #ƛ F Z$dO1c޷gR30,u|%‹^! R'- $́; 6DY X/Albn/}zex"38;d'{ \B(w8'%𢬦$TVN&A_pƌ1gc`0Lgusn,?:Vk o2 3Yl bi!OHX`Bd-!R$`Lhogqp e⠑<>Es)&d7 ˴ kFBH{^B$z:!< i1"p|a> M5y&PWt?$CQOH Y` kƘ6RZwXoef N (+Dg"8J)$D-7];Pb&8@a'>ZT}7xewo Uu|^yaA<v<, _@N<P$X4qn[k0d[lId ܿw2{-O\|*tƘAx'_m?<Ƹ쳕sy'b.}.nd†M~ "D%mT)OTl*gl6 KwlgktY#;.+R8`qB䮭hлzOC}Uu.}o :VWֱa.ܿ@&_ &"y7.EGItl.]3!&^Ia3B =8]!͓t-b"X!,;$;I~\Q"ԍ^Q1V!ݠp%!w'F2DPeiX.8qD: _cQ ;x4oǛȶ5] 4WpgND3'A[\nv+y T́ xuUpp˨\jcm߾իWO>$bH#Ɨ_~uZʽX?w]`rYa;Qꘇ7<<|5H8 %%%%33h 3t! $NM3l!N`s'߶'Պ̨6Rx۔Ҽ/LglD |%%™7*!iQ/&<-kl`:c0LgD Eu]Q[;v,tAkpvzW27l.g]M]*>RU'uv5\Uh^%ܢkqAQC0^r3cG}@6BW}Vb"c3.UM&mee6š=-&|:qX`}]{΄kKRRR&G ㊢Wt(c0SIu70^7g5|E|>uٜp/>lM+V7u6::jg?SWºz|~9~o{D3,##;V-[l~Sխ_>vY"p 7&j`4GGGt2nm#CCC.QeL~>;ld/}c0&oK?`Nxyu{REQNͩ>`UƈEU3HED*r*z$&Sߛ1p)9ʽG h zV%gOz.Dvlf?3{ a>gk{,ѠN 73: (g gꢭL.tjoayh!MpzX&pzX~Ӑ+mp-*y=5؀~.p@^3bxsVEG|޾ŗ?j1[zw Krdf'=fK/^:bdhmzyT$D^b1[zS"KG|[cś&ḇ-fK"N.gO֋ҫҙʊ7Q:oJrw(y' 2vP[O~=7\@ >h'l;776c')P 韇?rF>&f;o~S\#uޜFJZOg7H$I 7f~KJ$S hd̦fSyčbIԕ?>C9};+66&`Zk"pip՞|m6uIܲn$u JL}Ҵ}G'7Ɛg\m6uM槎ak N3mu|'?$X{iVȱzFӕ?wMcIt"t `iBs軮_S,r*WU؈ ϝ1zz.b[ogoˇt':KW+6E`巟jup~7ZoWHj& ֛D/o!w~̯%!&Ö́ H1֙@ Z-Y u򙾟fIYx[G-؜U/}|f՚ׯ˸9, {<1 $/wܺ2sSs@_2kQz:>UD(? 2jN=ȭ$I(~>W~p:"PZIo7 ثr6l+tOJ~!=zHoDN=uO+RKN/MW(K%M󬁢4g ٝOֿ0l^/X2sҔF'mS緕|_OGC A/ӗM`~G@Yd 8弝g  'g"ސ݂ `٤I |pU 8,Yzߨ~|/ 29۽nCQw=8řt{{ݜW\KM_pN㿭l:Il:񂵿_8'Y(l=# {#qChQYY.sQ!׶yGn!7wHh{sm H׵Ży/j^7Ż}v'OJ7)+ŵmgUsVF%I$Y?SLρKTZ$XTJ h!6=Բ뾗^a?rL+oREf^(YV&/gr2]ΪL%X*ZVyEup>@#j;Wm`Y?zS>}6.L/oY_~ѐԿ!WTf!r~NS-gNQoteg*q 8Gw 72ln A @ Adٞxyח8FqƠ ؓ< \X.צ#xlVO`*x RD+a!2to4h&ە/`n|ʻd91 PZgb9 ^pxWQVX7`;62̽ `e,X0A$`3tBD!aPD&(gt]:6 >̿r֜3l+ҫIF*eLK[ԲPD]eP9~)}|ĦX}$$y}wKF D.QF*B\U MIb\c.́TBv*йRFLڎќ$+nˢC4,¹e:+{5yC JÇ,`(F!2v YrPk!`H2X%*=xm{%F6ȒOSF&pJ/ yjNRvVBVQV҃^UV'n\Dcl,hO∖#(g~:H^kTFe&CNDl"Џх2Qc#RR'3FWeY (+.I*ȘٵYPct7b o駟"""l6Qv\6iXVF=wfׂm橣5eω@(A&Gqm* C~%EMNUeCIBE=-5O'5׵Ab,AkSLҏtL|2#(J 5Bs9i$lȜv'Z ziަ(é'n~~GVCEZXNP+8,,0Yj.)Iަ 5EЉ߱& S< @h8_Xi=&%j0Z[16*k~4'9%,Yz?.~dPst:,΅ nαkjE}}رcǏ?a„ !CtL֚,lܛ=*>Kz9{w+$R"9Z[ZOyKNq$)cDtǴ_o+Eom+2aCy75=!kU4w"}On2scT!*eҷjN76koeS KHP4½)]o>*nsӘn iwl0sCHܸ B͡և_\m5wPgzz,@Z!$$:eӎQUVxr4@W揣"u/y'ZFk[~0!&1Q,i>*<@l$NMJIpp;{T2Lv@RVxk+I=.`ɥM0/ѱV)We ,xmw_;5nPZ.AGD0ǡ7AS½ 09Y,gJ-kO"9Kye;x x%UBFnWuey}}:l"rV&-*+oebҵEe pO2 =t'b`-&&ܰ.(犉e2u9I;kO"9KwRv]S^ "dy}3ڬ$IdctW 8s@T2s=;"`;}yI%b;\*ٛ@U;`:A?| o zmz,70U`ܫAlG-Q}ׁ΃Sh,j j.,c^q_W W t"p֙w&9'浴Oo֓ygDͥn+_fF2jTwܶi̗YgU-h,Ֆe>`PmDX\w` 3+``9v?;cCy+탢vO >?U;9_3:K懆BH9;i <"2"@^=7&kѓk燚;:WZf녚C[<n{v?:z-(d^$xWBӊ#]Z*}t_,*J]s(4m{uIĹv #j:D<[ilxM4[{?hpy)SLK^oՖ "ٝPq{`s$0ir4ACl(;[<:::ߝ\l(6Aw ȈpWZg (gE#'*@h9cwJ*iI|KK|  W9s ]m\Æb1PY!#YEfF^UR .=4(d 5 ~3ݱB%G_. Ȩ$I$gהtegsU QfRaSSaX,SGr umyBVM Pk֮~ (g!6$cue8cBS>hA78Z?YV[qEKq[\L]О$b` ZŔEe&Wgbiy) H$٤!}6DIYm-jrl6C*=̩]D܄ A  Tp- (gzT*K۪\W Z{ن =y3*] %=s^Bާ (gks>g RLg ͧ[g Y@*2kls7@h4ղX]kYRa:KIF6P{&y}7l"}x(I`sz[ݧ'b@*Y@&6;g tlF..o1-KA#C5Q#.=]Q^_J뵥҃/*JsG4\8HF Gdj#sh@i -E_:MA9󙁖*pL;D3.4#O{ZB& ZgC3Jz   fc&v^O%92 lS)j AF\HcJT8lW6kJ*f.b=搟WFP| UcK6lP` H[gO6HfIbj$#3tz fNYAvSeJZE)q77r6n d[@A,U %ī7jK<ڤZܤ&j%.gI/iȯ\Fo fLgE O:/Q"Y-A9F>\VIb+I9ff^(o,FlC2FK V)pdžn/ 0APD!]3ć$PN5rTy~/OH ݶ+77rx^%͂jzSlY uHLfK2#+azX1z$*,vzA  k|eQ/'޾}֬YrpF2l!ʙ{R?F@|ˣ p /^;nj&\D/dړ/Ot\9p1МY?ح0 tӜ39)Oꫯ_C$Zg~"~3 sLS |ܕ`-oO*i|] | VVާmn>>S,[,$Zg>3Is~6ٓ{bh{ \ f0Va 4rHmIcbX-qMNg9鏽Ƭl/UR3P|d's9L5`9L4#|v]$Op`D0W&(,}$-x L͂JP Lt-q'eKK̭i׵3d7_ڒ$L{ qK/a9T+[a| [x'8 pYhNO^vLaGp8^#-xms56V >MIBH.nQdT,΢JdX,đ$⪀5QCD#g oQu ҥald lA9CA9v&m ,A:vG>>E9cw!ە[|'ىmn,7UrGَ.Y\xp_Mn\`',p\wjc6]-}QhP_ǝyvZ 5!S@%-Wgƕv[a޴k onnX'Ɲg䲴9[p;˓{rC(gWT6f* 0 D_"˥2[) 5O\i{QGe8#&/3| OnjfMkU{ڞKww8v02Hk:4v-Zg p-;Q h /C9@PJvv66s|hs/*iRmk=r tcBD #;fh's=3 B\[VZͤ?^۔]cN3 L_J !S'qtB8 hddT u,ʙ`R2A:ZYwvݘ]e h~cOPbəi4LtRb+N6fV(Xϯ\:WNRSۛl휹p BE0\rG1>Ud&ࡿ gZ4e<'S\ѿ` lx'bF *۲v{ b \ ޤ#P!o c;@odG܀QA/J9_j%8>頜!HpYaswW|P$h(rĸnҿ99<<#z5k?;;;]vҥk׮ݾ}j 'Ũ&H.M1R"(}1,o1 JTD >?8`XgK$>@hnnOOQaaǏ7n!>3ll*G$O!!5`Liefy13 |l˔q{'ldxZ2"L 9smεm> ,(C9CHI]h?nk0%Цӳ}v'};`A@dj#hb1mreFXrjsD20K\ AL*A @5@A #3APAPAPduv˟==0o> A$Jc g=,gA n,s]}j_¯w@8F9C&9>UWV:*Svm,hO∖^%M*lb. AC9C&:6~ee[]'/]_D6v8A F;@p- #7k6_ټl6{\oooXE7Z,_~l6 h34>I}YJJX, D}}}}}}.ҍ=ZPάVj ?,O IjEECF,/κ_~7nL&ƍwwP3&*ђ3f5ׇ} 52aһ',,|̘1ǏB,^D䬷,,,l̙{&Swww/Xz{{K[ǏQDc1}坜m޼jj1cƸ6~b6bXK)2DB;su$q Hș3 uT$M4kw1zQ(g E?,IENDB`Amelia/vignettes/assets/output-log.png0000644000176200001440000007676314335240021017616 0ustar liggesusersPNG  IHDR} pHYs   OiCCPPhotoshop ICC profilexڝSgTS=BKKoR RB&*! J!QEEȠQ, !{kּ> H3Q5 B.@ $pd!s#~<<+"x M0B\t8K@zB@F&S`cbP-`'{[! eDh;VEX0fK9-0IWfH  0Q){`##xFW<+*x<$9E[-qWW.(I+6aa@.y24x6_-"bbϫp@t~,/;m%h^ uf@Wp~<5j>{-]cK'Xto(hw?G%fIq^D$.Tʳ?D*A, `6B$BB dr`)B(Ͱ*`/@4Qhp.U=pa( Aa!ڈbX#!H$ ɈQ"K5H1RT UH=r9\F;2G1Q= C7F dt1r=6Ыhڏ>C03l0.B8, c˱" VcϱwE 6wB aAHXLXNH $4 7 Q'"K&b21XH,#/{C7$C2'ITFnR#,4H#dk9, +ȅ3![ b@qS(RjJ4e2AURݨT5ZBRQ4u9̓IKhhitݕNWGw Ljg(gwLӋT071oUX**| J&*/Tު UUT^S}FU3S ԖUPSSg;goT?~YYLOCQ_ cx,!k u5&|v*=9C3J3WRf?qtN (~))4L1e\kXHQG6EYAJ'\'GgSSݧ M=:.kDwn^Loy}/TmG X $ <5qo</QC]@Caaᄑ.ȽJtq]zۯ6iܟ4)Y3sCQ? 0k߬~OCOg#/c/Wװwa>>r><72Y_7ȷOo_C#dz%gA[z|!?:eAAA!h쐭!ΑiP~aa~ 'W?pX15wCsDDDޛg1O9-J5*>.j<74?.fYXXIlK9.*6nl {/]py.,:@LN8A*%w% yg"/6шC\*NH*Mz쑼5y$3,幄'L Lݛ:v m2=:1qB!Mggfvˬen/kY- BTZ(*geWf͉9+̳ې7ᒶKW-X潬j9(xoʿܔĹdff-[n ڴ VE/(ۻCɾUUMfeI?m]Nmq#׹=TR+Gw- 6 U#pDy  :v{vg/jBFS[b[O>zG499?rCd&ˮ/~јѡ򗓿m|x31^VwwO| (hSЧc3- cHRMz%u0`:o_FsIDATxO+DQ߹sBHR0BRlLI+%IFRD30㱘Svn}z:tI|BIw,hmZ;HV'ĈW2JWJMgkjNMn~Vnəm״tRri R|GCGytPTZi&L=O93BߪШc|^A9)_sZNV*fWߞussh 쳆0{`?ۍ' #"{,. L 0 0 00LL 0 000L 0 0ہw ܏[S=D\.3>33J%I']=ha~z5P(Bqө栺dɩQ,!7:H rةGMEJ - &fp5<H#I[|~iIT;eV6/cΑ?1ܩNC^߻/!}_ǃmfΊ9wJ_` .v |,e\v c>Rz @NCQ(| %3RH vS%ro~ۢ^f)*ՆJ3Vp'g#^]FY}ֱε`Acu XsZz8'&JwZH^55~U#300-(̭<!IYe pobnuG%7{X[]أnO2ܯ5R!vpكqg&XZL36mQa3wi}Oȹ-G`sD*IGe桋qbŝ0:Xs fbYrT!D25*!1Inr#5Nl:6k6J!*%mXV ?qw#{ukk9ȟ1\'tx7`YO;1A?u{MbhD{aRsT}.%s'\\\\\j.\\\{})y>K{z|z.}.}.A=>\\RsaחIn$ᖸh\CRzՇqk9N-H;t ='Zt Yaaa}}}읱oVǿk( qJ Y2]uC "UHʄ uF P`ҭ:J丄kp%Ufpsl|ǹHI;3UT \.>fg|t۫kK҇#HۯtւN?1B98ĂϺ~O}3sG ߺ92stͳw%I`eaī<ϛ~T !xqlĒ$x.]:!]Rާ4^+# 䂿VM5N;H@_b(co/Hcd,A͎9oێ`x;w'LS6uJ|tt;-_q 67776<|>;{&q+J%nC/^T=T^]GPTcvB-?4?תpSs)^$H:NǕ7͕I+jkyҌ/JZ[[W"d[VWޙW:?^C_h4J7o^kXSyVGm)PuKڍݔU!}:^9`tR'u;D\(Ꮫ;fingj.;n< }qC g7;|h.h(Pg+ }=4z/T;wB=S;l~~0j+5%EykQ0r QֻT''=YYR|(: S\UjRmYkS5uE>%cj K"x! ˲vk؆G5jr[muǿ.{$H.;]/yÁ Q %|Dg/BD8SBB_Yr"iÞ=v`{;0;;;7{{o!ɩ a!nLAH4BKB)ܖ܋]/pVȴònSH X1χ]n4Q˺ڱb}G˻᏾Ƣy`O5ڱ˻ڱWxbrt'G?n7fd@-Hk4;מrbڤc f;n7QiD'93~? o{+?|< WWdDԍg[OZ=o.|N UmضKߟh]3& $|3EOq6Hy$蟣y!K8;$<3gV'n&,h*|3Eb8+$~2W&sxY!Gft|31{lN dT*@^Ќ:U6B3^2^[S@\aJWO:.r5"{4XX_wV /oRJO/^g/;rtվhGvr6DtZ՗5n]_M1?#16wvV>tn~Pi{pisvc==/Hȱ19>>.ǥi;R>wGX2o$>$"3Q~XOoU78MCk'-\u&2v"d1\z  z[V࡬YcUrGljP5Wo?NƻޱkYC;۽[ѽfsσy*K/VkGQpgJ<[wOюN箱DBNLܕ.'i)_w'zd2[M}#PZz- |3uE +W- Y-;UܷVkyzy^>U;H y{"\MbUGU[;u 5Vƿc z\k&m []+8Z14Qz*> %zyi+y:zHvƧ:"8q\{o;*_+AQp=~$LKiP;nS{RoݗCxܻrټ莻?D%TA\68)xJz#{ROT['"͇88hJ)~#U8}G(IBBB ׷ '{@w gx8oq\pqFK(\B(\B \ h{wr1OtUsړʡCE%8u[-,/#o_}LM<)i|\!.!.!.p-.L;Of#%  PIǭo>\Y 4m2@|\!.!.p pK)%7#w-qIa>.%%%.!a.q?n(`>.e>.!>|\!.!.p pqp%K q =.!.!.p pq x|\"K8#%%.!o4Bv`"9qIa>.%%%.!a.q?n(`>.e>.!>|\!.!.p p MsT!2uO\mV[-LnmO }9 C˕[^Of6 UU!9Bǥh'ǫ\i^ /S:z]C+.wxփYQPnE+8ܳ%tϜ &n^ffpw5iIseN :-- 梭m.RT⌐PF *;wD:pIB VZVz\DIYژ Q\Uٝ-KE D&>܉)Ӓ sDyĘ@ `L 11%&@[j);C*ZvwtLJ;3{;sLlg̝slܸq>~KKFo> ,$e_==_z◍AwP{n['IzfO~=ϺWT醇iҏ|2rv7-''Aְىq>)~7GekPC]w7Hd5#ICCӞIP=ٵr=/#SKC_^<~zޥܾu𑵺偧b4hhH'u{uJ<~,X.G~tHDdekNAIҖ9?u#q㫱ߒN[#., >Ǿ1峂?uVt{\YXI/2pUCv> a&Ut5wJۣշTF=ve$#g pg4~sʱH=U&#'m=3 h&QXR坟rUhc+J!W]ѫN7K]U6hrrR_t1F3g 1z^@ӺRPI ެ1R)\I*dL۝[Dn;ǫީ`*Y<?B .T=2^y94MǗZCl{~U 2Ș\dv<1ףJ4?[Fx߫ub{c+s o5yg{Yh׮]&gvvv1kg%Y[I}˶ȶhnv;r[O/_9zW+|9:ϥ<.Cn~0|iq>aܖk_ZBb{s]=EBQjIbbſ?_i{ ;99i'''ӧMMMٱz}5ZAEwtgftxDsTJ…YML̇3:s} _J_mVw ҠY rOBVJsR빧jU Yr‘J*dMJ:ќXt.{1*'|gc>.j\{g w78~6`#S}RF\.ssVsss8 ^3vqx=:CK:~-n9<؜J{^xr{-vrS-SBߡ|~oR֞/ۘ`װe!OOOs٩3ԩq{п+cccxtRtvz.^xQuOLj>༆Wi͚TDs+K$cjwt,[6֜;_:\7Nrvꀻ9{-o\u˱^H8kMكǦV,`Znk+ʹ!/'w{ηYWr 󫝏3uv9rڗvr{T=I҉3t9w_tl5kM7tsvH*Z'm9^QGsss_gk{:^[^n6z߿g kg s뚓nw$S>D5{Y{鶑S:®y%Z!>:n?/R発/f}L>ChU[}M5b?(MkN-*[GO rowN{ṱt?,pu|NMD{!DsHnCgsFIssF쇚ͦW ?_Zk_:fV,M_֎Mo*gFjS( ǭZ}|ߓܧF~jt/k߶uR)u5l2k\Ȥi|T qꏓ֧=LZߘ[3>q}cz']_`%YkǚP+rϲbd4w*{^]U߃=ZOZ_m<^{V|S/)쑶X=R @.EN@. }t{@^|htvu@] AeA~| og}cη]_\ZK?~7WZߦ~P }t @^맛@^+4X"P ^t ]@]&n+Oӥn }:]苜. ]A&R?uMA۶Oͼ,+t`~O/@.˂.tOƜ/uOƍӥ., t/r.t]蛠KtS@7^맫uۂM,+t`~O/@.˂.tO t O>A.EN@. ]tKlPYtJ 8*ύ۴m>3[(WRl]j)g+T{ lÜS;^z;=~| W+zEVulѷ^l%r޳{/F/_Ճnku$Y::||zq{ƵMл9|urrqo\~6#u=U,'5y~m\=^M~ E39PN7%ٱc'fuH6l$Es` AlUP2\b444T/oZzu=ɴ)ZI tZ5Β^e>t3'mű#yqGsn\^&A/ĶI,Ɏv'tҒ8uJ{# G) <ѦDWG^} >$gC>ޏ?ٸ̕"B2333op O}ʻ9-DD{J>2g6$9DD{~^% \"%їH҈:ƶ@_BQoy9K~;md2x_g_;? wcmL#KX <9_5|}}K6ՙsCWgq<>s?GD]Qp鷵:?I2uؼqWQ} &'o߆_ol}d'=/N>{ŅI|ٯ!7Q pg/sxg_x6q6{a#9|>3ORD ͛qa{@ᣏ 6660O7&.77qƶ +H-+wVH$$"  ?w_";$SOwC_2! >{xa\m]g_:/ߟ紐L/ԙWT Ǫ3tb=Q57O{-Ԑx?^D廯 QaEQll+q+hNDD [R X,ƍO~?^#L" 8"]77|ދX~),_}_嫜8"]]+k?Ń~>D" ||n|_wrjt'oW_/O> dB@$jx ~v|H$L&=AcD"?G; D HD8=M{;ވnpVϟߺKUQ{.o9)DD NB6ty>|؟ed؝>""[w˨KDԱ۶53̺|4cMTF䑯tp~C 3V_w8==}|g)Efi ׽쯛mmm_"I;{_F+VW!򨛆I#Z|TonJh{tz+;ϨS; }c}U 41whƹ  au]uگ4 "J "ml>eF*NoTDWRJI ?0,PCMCVGt: p)sz*-Ǟ]W]q`'>L&?cy|/I)$d ȕ%Y+d,e1MNJYnߟ~ߗs:RRŚ۷Nem?=<0{?ƹ!s@{4;Z>|mG5K)%<ocxC\cHs <޾>ԮA[o|˭i!]hS%"jsKZ?~(?҈(o۸q! o6n^ǵk?O?EX4"|2&VVb}}++W@F{ܴ;/P7:hOq7~rRnݻ&)rcyRJloockkX_O2&b/8[BK yցIdKtgWW+U<ռn?_jH^*NYh/W/0a@}ya{rB7r0g﾿1zbl"!?*v$ e@ ydr8}yyŚTq!&ݼR%3w̫Vwܜ7_@wի3ZGXɠ뉱+rt;kG^dS{rsI yy\ʓg;M5yyj5|R6H=;\<]'.^<ݝQ# ||*yDo<Z<ݟy! G yrrzawr>R<p{{/1"=8'"䍳BX5N^AcJO< x?S)-Wh؄GIt]O|G=]]t{-y><_;{Ϩd.n[ܶ)9/DD{g+7q~/qn.MF̛J :H1+-*1 yj^<<@t=]*)}A72O2>r\%":`A׭'(X0qVB )BƪwkշU*pەJ`'ET_;:}rI|}Ut 1Dzt`tr=^"^ kODt`.111..111.ѝtuZm+-K3 #o/Vcn٣;x m󣮏ӾYnZf; Gs}>֟uJ-MV!pJG54T)]FF}H[:sxtZUgQ3VUGoUGo`Ktn/Mf]'X1K-j HΩ. YEzFc٥mjx^˪DsXjÃuKrkrrR`}2Rϕe,92Uj[L_R}esm5~u}Wm1W/9=_466&Uye>o`ccWpmbqiv}l.f/=]""b%"b%"b%""]""]""b%"b%"b%""]""tjL#;kzDD{[7BQ7҈BXAӰ;Yvba,(I~B) w}\SŅ{WpVS<<|+"vѭKDEAW€1*h&2qBd1Xa`0}i80UQ7*y5HfQ@D׎Ne}2 6Uo{H e)t`tr'䊷i@Dtn/xVe01..11...11nW֫uJ2#oǣY٦n٣;x m󣮏ӾYnZf; 'b"Q뫶Z߰Y_b gu ~qVƀ&2*eѲKi{9{<|g cOq*}ۣ770ވUG>j};]_b1zV尼UM@ZEwNva*ғ5b.n;TXV K jT׷ח0 JeZm+Xrv{dXն5~)^[|jD>j}}c_NsQZ߰Zf[t'kkkre媼x\\|I93/gf'8!O{b쑹RL_jչ2,߉b7+u6VWW4~;>yz.111..111.nsd]m׵DDy[7BQ7yBXAӰ;ŝ=.j"ڍ`I{B) w}\SŅ{WpVS\<|+"vҫJDUAW€%J&2qBd1Xa`0}i80UQ7*yUn_uJ_;:8;}(T ]ӁUQ+ޒ(q^`%"b%""]""]""b%"b%""]""]""]""b%"b%"]h՞VZfy3~0#@luӀyǛOmu}X_u{6OGo` O4 ^iOQB@SǪ~ %BD}Ī+\J!C0oSt R lOKW!G~JLD˷FSAtttAAtttAN "_*c,.ɨs zF܁=*76?8hm6Zx"&Rmj{%u-ZB'hiÝxE. ##+9sxt+LZUgQ3VUGoUGo`뫶Z_uB4`u"U9,oĻz~VѝS1]d0KլĽrU+RC/4=IN`}MNN0TQF%g@Xm)ZR굵ϷqƯO׷=4=ߨ ҝellLɕrq%y̼yNx<퉱GK1=IªWf|'""i||###X_&֭X]]< ]^ "{DDĠKDĠKDĠKDD DD DDĠKDĠKDĠKDD DD ֨iw]{uJn"_ȣnZ󄰂awb^@V1XTQ,=^7DD]}{;>.JB+8s).]> ȕK-wѭJDUAW€1'h&2qBd1Xa`0}+4 ^iOQB@SǪ~vվd'"ꔾvtb}v.s!Pz@)K}N?WU [":0DD DDĠKDĠKDD DD DDĠKDĠKDĠKDD DD DD۠+zV ▭uJ2#oƯ[?nZe+76?8hm6ZxXZЕVNŸ ~qVƀ4fqV==ilx)NY|5cZ_u{X_}t^̺ND*cַ~Ь;0dafbY{e,PK5Ap}%9599)REVt2^*b-t/k_J>߲ƹ>Q競Z_/^kkkre媼x\\|I93/gf'8!O{b쑹RL_jչ2,߉b7+u6VWW4~;>yz.111..111.ѝt #oo_զVmauӭ7댃@JZNPtQ~䁺|"iBtƭ-=[0LT)T6ie! sgsn㌻n'"p)8wy]VGt ]:wRٺ RŚ[3oI6i]qJY _\1,PU=iO{<eS;>?YjSO9Pϩmֿ:ΜXxtYO7ifa}zQ UɀKDKDD DD0MTVGq?6LN캩=_"kf.=57N7HDNdj@*,ֻWlN<ܨ\NjM q+W[wp[ /ܷu<ܨ\8Q< &5j'ͯUp幆?Vvtyj{rh\<۔;fz7m챓;je0ti;imDO7OS{lz..mnnacc[X_b%"j=_EAl3=܃A$d0]-_{EQ1uo/֖/(Hn90ٗeMo½ދ6@DKB!o}+L&CJh$H$|A{a647on'.sl$5\iDDĠKDAr,;yz,C;&d֠؝"=uB~; nXPY\3{tc>,5v',LiW} tJ"`y#}tKy[=~ܹk4##+?_y-bsN148^o׉;w^%NM:qIMOF˵F݅iFbo}E O_?ط}-]׽a^6 d@/tcݵ+?zZWnsW:$Sq8zZ.Gؼ(j~Tk%Ezy?Q]~tӿ'=v}DӍ߷w_E_Y}m+,~kU9'q 1Դ[=A\Ve[HƶjwJ.'Z[G?˿ Cݽ+/7ɒl=6BI?^ӮߤxOߤֿ5?kJ~ui'߶3ߑԵ]_cVzok[OOå>؇OW$=#m}q`6,TN~[#rK~ml;~3iտ>4>#zti=)M?vl죤~D ?ꔦLF?{c}8^Z6[zh^<aI/>G }џ$}'?eQvkFɯB_+'[s~I}ӿ}R|_ߧg] %ߗ45=^Cw?OjZFW~V[nyg2~8IoWz>˿!J[(IGzW#_~HBtJoÙ+5?pK'WaV7_yUQ=z8.ioJM2Zk_yć?oʯ}>T)][Y듟_l#HDm|^1 V>p\w=LՁ}[{7փnט)W~^3o|ko ǧAO{<ɓ'095f\QΟ|vs.㵶7ӹuaA>#?ZRZO=ԮO4”+uB AǓp"Zb690H IF[+2~p䉳6b QG^":IRJئVvex͟$͜VRRz>윤&;̯ m4cԹwS9|:]qZ,fLfeLk{hdW]mߧkmMRw;t}~l!ϟcÞp>'?Iݽ{wxdWn}c=f[YuKCےV l~6o٤Mf~гOy|x,6ǯq8~dꘗ? pۮ6;ϩxӿ>Oaw=ΟQm:tfqoD6&9txqvkoݺeg>c3r}իW>9{-{mۧz~Ouew|y;GJ%)h^(fMNdbGwӿĬҊvE[3m^u 2gA#W7.0~.!7rWOR WG?﷯ZF8knBgev~g ~OcEi{so~3uǓ8ҥh93#gA0+w&z3 ~hɭ+k mg|aϟ=~'N{^]|o};Ssss.w}=>`zfYct%Yuwe#)yt)'";mFVs./9܏^֟#!*/6=fz=n8]{G黯j5 4׾5}K_3<ӵ_}_D"7eut {{OXkUT5ٚՋ/jjjWA>M=|Nl172z#rS'w)1soב= q|~n[:#>\Wnalkj*zY݊vvW^K/_xQ/^~U e =^0'Q:{h5|nn:BȁuG8:k<:;uJ5:C\3?㦼uVG_whl:m91A:<L`Mo<*HRX~S]^U 2>.:uli,4۬H3;W_1x?5:>F| RC qƭ.ϴ_Kg[M%f~ TXDFA1㟗ά7ڦ}Xes{_>hFy? ߩhfk5ݼOIdH53lkeWt[R[xwWbt e^w=rb8w{8NĮ"8tSL\/u`q\Ϗr5-..ZVZ۷U[nRΎNzPkWus~SzU]m|_=w8AOvݟ|Aï_]r.je$iʘ۽fl6Ju(bL2?J^88}=}IͦvCϧ6Twcw}\ƯƦw_x;76?ӹucInu.0oO\ =oG9>^lΝ;}饗/ho~~ڧ~_ܲ]dxb~ Ok~>U{?qSO>T,/>o)/6圡u]9g勬Unf[Wy9U~7JaƪC9qPgtkAu{>^d~rZfh:'~Yхab`au8S[ ANUp;:יLރ]=_N8'˲Zk]eʋa_M urA^'Fu[qʋSj;uC*3K%$28#r~R)/ܛe* 8eYHǹN0ayOG><t `p:n":#a̧uNRqxӁu2s<pN03Il>o}7}k^Lb[|AuCs·;zw۫?:]<<un6 n?,ut{eVf!Vf:C0rO\LQ&*G:R':5R~3?1(hKՙZېʵHm xŬ63ȨOG=daS[ۓ~ WtY6\^2 }f:)-wʡQ=_qXas<1?OtrzrfujYՁozfK+my#' x#8#vQg0)/hkY_s+ضOG+'ȲZkK_{;I:#-u}]m5."u0FF[^͏9pܜ$kM$za'r{Fl:qV09Ttn/yG8e)Iz^ӾprT9[gE9.3pvxhEayu67' ҿdm7,noxvq6׭tL9ތrj7QpI]~Y6C̠:#iXgPXphauz{k1P@5:s>ffYgCz"-k8ӻu>Ǭ7~!uz{Ḳw (fL{V!3cWnRwDX+χ#:FʲTN֫.."ptt.@A] t.pqsc7kx?=tS9+k٤٢T#8r1|maW$12Ƒ繡쨙J#c+5qlߍlÙǠ~&rln|tC)2;oͰ5lŬZPZټtf}7K*,hk6W@g7*f_"Zei[ʥB9d蟅vL ]JW&$[_^Zݷ+8 ziEqyNln]WpAYqJAǏ6Wͭ7Ih?@+7nI&Zāx#] I00rYkkkZ\\VjMjUZMjMݾ݊**vvvT.uԃ*\sۑeԉ2P l@ @A ]t.@! kd&Yϩ?}<վq /׏rL{|qODIq4~"w{wW㳇;}l/t.: .iIqgD&h_\skU6J&뛘vӫog6' [֏:8>?m9\w]wk;t|'~{D4|\ +Aq{=sQ*,,pLA:Qߍei vw"Shk%\Op6:ښUm~Yct}DFHzT*~*W*+D%Zތu|@٤47h&GM(-m>㑘.obc ׿X_@Zkܸ%kZZZbr5-..ZjjZjnVnEJEJU;;;*:uA]չFȲTNRuA] t.@A ] Ft\ 9^@9=>sA7֪M*-Z+KI%O12#F`s#sCAQ3˕s;9nfCO@awj|~G;߁ǰ~GwPqQI_ݡR9+OO풖s]"/.\HrY%'1@|攊;H;Mdṵ*Lk%M|X緳}]Awmm].as;a폻~?:c߁ǰ~/Ѹqa6P.5 3̲ D(zգqYrΉLxzbv'sZ8q_Qimiם6e~]m:2Q`m/c0ʅxJ bb9_7cD&P6)&F9-b:P ˚[Ɉe@Zkܸ%kZZZbr5-..ZjjZjnVnEJEJU;;;*:uA]չFȲTNRuA] t.@A ] Ft\ 9^@]:g? TZb6d(kl.%<98\cd+w͕/sqyn((;jfkfvwپ #<ه3A8L"wݞGqVŅQ! [ö!QP*嬕KgxS뾔 ɂfs gm\d4T}Xl2|VT3 <#<2eW&$[_^Z# ?dn2n9Fھ?.FqyNln]WpAYqJAǏ6Wͭ7I36qZRa4aZkܸ%kZZZ:9t)-`ִ(k՚ժj՚}[U*U*U\.ԩUvU>71#>R9e2Aʋ t.@ ] tCt}#+MSoR(yN}xr_12wʟH݉#7DJ/ٹil"t~;|Eq[5٫rb~ nבkcuUT.tU2WKωJ$2Iin6LPZZ}~;#1+](>F9-SaYs+~Y^qK2 J7絴( vrYkkkZ\\VjMjUZMjMݾ݊**vvvT.uԃ*\sۑe'.A ] .@A8FAw鸹p5rA@1[CtS9+k٤٢T#8f\î|Icd#sCAQ3Ot|yiuW} 323*\<+8 ziEqyNln]WpAYq"#UEseMy鼂?fʍ[nVy=#q H؂!&\.kmmM֪ZZVZ۷U[QRQRΎNzPkWus~c;#:q\*-hA ] .@A8dAw1rD:9'77!#qw^i1tߝ>s;9nfCO@awj|~G;߁ǰ~GwP~n*geu6XY[]Ғ*Ljpy"Af+f[S* xR*7}Zl2L"71WcmNrwuq}~rvqǏ~?w~//W>.ʥ&V!{TXYVxt" < Ue >7_8'2ŎV ٹil"t~;|Eq[5Mb~ >nבkcuUT.tţU2Wω$2Iin6LPZZ}~;#1+](>F9-b:P ˚[Ɉ/앱+7nI&Z%Į\.kmmM֪ZZVZ۷U[QRQRΎNzPkWus~c;#T]DA ] t@A (17?vFW<9usrVZI%EYkesv@s̀滑9F8<75dsdQ377CӾCG47 #<ه3A8h;^PMj)~#dvޚak1$2 Y%y|#ojݗ2U6Yl.pR"MFCu>݇&ʷo 'JYXi ߥq}e ra\Nd;任b~0٠̮[9͇:ꦚw_m'gI U*ٳ}?B]\U4Z\P&xN*ti{s>fʍ[nVy=#q H؂!&\.kmmM֪ZZVZ۷U[QRQRΎNzPkWus~c;#:q\*-hA ] .@A8dAw1rD:9'77!#qw^i1tߝ>s;9nfCO@awj|~G;߁ޥrV6Xg3%-) 'oFbVI P:꿵9Ҏ'rGxn&Z$b}>nxz5l$wak]Gg=-g `w{wXa폻~$/tƕ r|%(nbg.Je.7H'2 ⻱^,m\Vs3NdmBɜ&b@WuZ[*-/p 6~(6I1]W [OBWY%y1^DKۛO"(f䨹 s<RcԜra=u~A[5 Xk$`tz^KKKbW.EYkUTVUT۪ܭRRjggGrYN=µ:9]وYI.x" t.@ ] t} kd$I%ϑ1;S{ylx|Gf}P?tS9|:]q}%2l1d2+dZpPthnsR(XKcd(]na6 dU1 $2٤٢+Z8rŬiџ'g}^dZ='_s\=ܮ㕢c q{ߘF< hXG!Sm9=-3d<9L`ZYvPV6RksJ%2 Y%y|#njfN+a 3rL0ucd #}T.4>AF P&1|A\zyJ&!r&aəYVA;Ӝ_g>?@gd 4>C0^Э pHdվ|o.ԃʸJ%)hbVɉ IJUZ RzU;Bׯ~\Rkq%Tcոb ө|A#۸\lgoX^qK2 J7絴({GKT rYkkkZ\\VjMjUZMjMݾ݊**vvvT.uԃ*\sۑe { pQ^ ]piɔvNv c/Rls>38Р[6P&[ٕ:n3H7 ;Hv?B:]mAul;x/Vv>t{uǭ;l|mo;r$ʥF;ackok AzЛZQ.%)L:1AqٝʹKE꼎Ҿ洲2dضxmG}HP[v aZ7:wTζ$ԇюoLyMiQ"1;W/Ru ]=-TAu:CL{ΪNm$hۡ}/m(!upc;q$Ƿf&mg|l NSߐ :q)H1 ʋ5?~XTdmMjZfk#]ooֳ>|PZM1FSSSOVSV/^}]^hzzZJESSSnm{k_#k7Ν; 8 GQTyvvv$ISSSz׽N}kwS?AGˉ',k>^MozStpԔ꟪mw~~^Z+c*1tpw}p^F -҉#^C$kap<tp,xQاIENDB`Amelia/vignettes/assets/options.png0000644000176200001440000006153214335240021017156 0ustar liggesusersPNG  IHDR|-P|Y pHYs   OiCCPPhotoshop ICC profilexڝSgTS=BKKoR RB&*! J!QEEȠQ, !{kּ> H3Q5 B.@ $pd!s#~<<+"x M0B\t8K@zB@F&S`cbP-`'{[! eDh;VEX0fK9-0IWfH  0Q){`##xFW<+*x<$9E[-qWW.(I+6aa@.y24x6_-"bbϫp@t~,/;m%h^ uf@Wp~<5j>{-]cK'Xto(hw?G%fIq^D$.Tʳ?D*A, `6B$BB dr`)B(Ͱ*`/@4Qhp.U=pa( Aa!ڈbX#!H$ ɈQ"K5H1RT UH=r9\F;2G1Q= C7F dt1r=6Ыhڏ>C03l0.B8, c˱" VcϱwE 6wB aAHXLXNH $4 7 Q'"K&b21XH,#/{C7$C2'ITFnR#,4H#dk9, +ȅ3![ b@qS(RjJ4e2AURݨT5ZBRQ4u9̓IKhhitݕNWGw Ljg(gwLӋT071oUX**| J&*/Tު UUT^S}FU3S ԖUPSSg;goT?~YYLOCQ_ cx,!k u5&|v*=9C3J3WRf?qtN (~))4L1e\kXHQG6EYAJ'\'GgSSݧ M=:.kDwn^Loy}/TmG X $ <5qo</QC]@Caaᄑ.ȽJtq]zۯ6iܟ4)Y3sCQ? 0k߬~OCOg#/c/Wװwa>>r><72Y_7ȷOo_C#dz%gA[z|!?:eAAA!h쐭!ΑiP~aa~ 'W?pX15wCsDDDޛg1O9-J5*>.j<74?.fYXXIlK9.*6nl {/]py.,:@LN8A*%w% yg"/6шC\*NH*Mz쑼5y$3,幄'L Lݛ:v m2=:1qB!Mggfvˬen/kY- BTZ(*geWf͉9+̳ې7ᒶKW-X潬j9(xoʿܔĹdff-[n ڴ VE/(ۻCɾUUMfeI?m]Nmq#׹=TR+Gw- 6 U#pDy  :v{vg/jBFS[b[O>zG499?rCd&ˮ/~јѡ򗓿m|x31^VwwO| (hSЧc3- cHRMz%u0`:o_FXIDATxWAkA4=XKO"; S/BB%ES B٪` H)hJ =j.H6&j56I7%1wX,߾}͸|F3k#{q0صEHׁjMڽ}&f.ifo$/cc `/%fDHJYV(j-!}8^Xmu$P˵bf^oXp}÷!eZ[9wˮZ^UDDT/%b*M(B!ɔb(Q$:W1YZ뵋&ͺ6r?r[,;gm;[mG o76ҮږdZR+Lz<'ؐ {Cƀ cj;mM!4]*TX!~s#H,CЃxi쩬gr}ysO@vh}sxЉBKeS!Y" UA b=gJj!$b' t|w21PDݲͅ[Vvz 0qx!|a$#Sj/]@_i2 't}Ĩ.pZ5MxKYnOMӊrݎ*Uv*5Wj ɺ. QݵKWPp-ه"n-qȢUfk8d'4mcAONZUXH@V+sf\"__&"D/]%Ϥ6@4ƓLWa+5U[ZKh@lʖŲb-J"sT/* UZQP,RU]7NݝeO\,`=.>RJquc(\*qg9,":2L1!{Ybk9s Ţ(677{$|?}mI<' 'ҥ7L(^e 潚R>@GU]TT]Qյuvn>~hΪ/P"mzi/U)Z$b*V(&3G(ȘT=թ)0 ٲx9T<YNu3ls6'+cӆ K~] p%nA^4lG31Ȁ0$$ݰ꩹3 t_4 !'1 @:)(N`L|K(>6:7@q"ju&JWE')WYQl2$|۞o%B$R8 Zy9v/_b;,`blGo%x3F4 \_h[UuV6:TAMut}ő[d"IQFip"-IUҖqbтB)9wռ8+hѵs$77Mc{S{ܓ|}n%\QB/9p>8`9̞jmi}||'qmK~;':~to өlgi-bss&X8ۿ#JHZ*i:]Df$mcXoVqBu~G8viB7=uqh`*S%3XMNaY֚y1vm %AQBɓcG$SF=C!h ,*SHjaŷ 5QzeY.`"ΰ:_ۑC{_V`SC VDd B2Lm S)Rer8aWcPKl.;__=+c65:NPѓ]y.<@jPJ+++"6N2(HT >UMARfAښcQzBTAOw W4Y+X`ZAeY\VSR5(I ! 8TVCQ!g&yhvAQ ^:ЫiB&yЫ)$08*bECdd[@ɠZ/movغP)[5L@_5F2>Nv&c!t}{M ԉo/\ ɩ0!|Nc,=a*?s^m_Z{M2]w~G JaL-K6}F%O[l-;ݪ280D\!R5Iema`#JTޔfMM岱ar+?~î=p8Qx٭^ٙ?fH'0E R*KC^i'Rx,'_K!qN%aqNPi=Py˼v  MnXbq'xe/…wre6M5i 񍡙sSUd2W!>648gvr~)?czc8ϩ&ͪ*IKN,|BtZ3i % W2yhUdQEhvemn!*`":G2`@׸n;۵/vy~>8}<<8* SC+߼u@6qBlr:[[>Yy%ɺόE~ױ[Vwmy秗?dh;1yE-}͋G>=r_O\pۉ'zqfɫv\lvOy?~GE\\]೿p | `B`y'S+ C 7gw8{yD٢iZS}ٯ]vTU-[vbĭ[YhGz%=;Kq'''w?s677p;ukOR11(&8OX, 6RiJ5+vr"/"0[fª*+t(ziz.  *1OHXYd)J;EFKWz9YԼF57y &,.}TV%KxEћgĐ"S_g0{,KRY D$RDbe׽_NE( +H"  g/|ّ_>;{\7LO `.8 TVFY0Is wGۧBF$"Ԑb呑ϗb2YPrR&ۚNA-`ekеc#DnY3CĮ]>/84M˙7pt,)GCw)+Cd<>>>00w`{Y5SLf|*+eH`/s~ Kk퍰>^(]`vrózm|Vu^}[f%\?tf_\N~y磏<1o Yn钒M6waxƆ_v ߶4n݊Ʒa Jp&Iytd2/ɾR2\/%y_8B@293ҷHbn$F̭vXVTt!Ў_MMM۷ox1 0 cJ%6P./U 8E)$nEc%Ŕhc,C6}F !8 &y&NuJSMlpCWLlP߬8gJ)ArEmJW)A+ctlիrpxg kG1وZ TU51^1tE ipZ")&<T%ʀw 4 c$1OzK< /)b?e/iőW{WN\tw?J7+p )Ol`}|st[Ox3ҋHٖOb^<*b/ZN%|ήM^' hSݶrPdʢT%?{+V+ߛ˺VV)|ȑ#3gxeo;av>vwwۮs^*ZKU\-UtE )?`@L'KkJbM;Xesl BATOxQxDdP)Bj4H֭[ZQA`!6.GrU[S\b?c/<_;Vjʹ9TDXBI=F+ ܄J{KԹmzﬓ;'wu}f-j&*IҒYe[by`4lV( US,Pհ_Ȉׄϛ]h-QJ5h,p˜6ǡ+Ȩ:(X\DJK5kVyCsUbuKIʲL 3>\b0 +r@r=P* t gZPD(U K2 r-scX {DOx 8nfLׯ__ܼsNi?8@cg셟Cn8\23 0:9)6.$8!BZ`hF,UgZ,pY@MGx{{K].}&}ݻ}}~}ß}C,sf`gΛ/}(⁊!ךZN/ߜ.>hx?[6sfhqhnU߯I%YzEۮɥ2aM4]흃m>ctͺ2}/mu,޶_6ꀒe>OO~ꉉ,an;T}M'_=9Qy"EGu߳V9Up Kam\_L_P z;46 |aS'Mѥk>yݎoK AǦz?qGǏH\5)Z@Vbs3DfJ,9reV{I ?H*hTsZ'Pis P5(oJEEsG KyQ7htA B}}}SSS6l~q,}e'E_;~U/=٫OtL=_Ӵ̲5[Dii_2]v*Ȗ[phHi.IUiskBض{d =M/,'].E0ݿ_L~JDTXA%$N?-]^Ò]]"#H*MahJ56>WBT$&"(P’J)@=^e,[#*tUX5MdT=ޝ:l]Ww: z'NnV!2lF ECJ 2>D6H4,,Bl5sG6kfYDC8HXv! G"g .ϣBtBN(%e򞱞oӹAaU%cϵi!YVF'iO);ӯm$es'Yg]i%[ wүRp]1s'PW#LO_ hDPڧ2`dUR^o D8Qe&A[%I!*w&: dv bÐRA!/W$?V*Ipܲ@H tkJ 6rѣG'-zܜ&fP-(PZc.i%5Rz+xXIA+@iZZZW0 tInVО'{Y|NdAU9g$Ye&8XD'[iźm@6Z yW[m5GI+mZVz/A7ZZo,7Zɉ˵v;jEL:x;r*ZU,ZKrF1ɩh9VtGNEKYEKŴ|}E˓UowwtG֝K2%7{2"jՆ7D'vU.Zxʔ) :w  0qhhh;v酔Rb$}tүH揢h pyoz[[4mmK0HK\/ OKM(B`2!L y׍7nC%Q+POkuP/A&l^zr&`Z5CD-(Et^cL~_a2 tӥ^Q9GteKt;?e;[gHJqaUBXEEt^@7g'xoOn@ i״ Ν;ϟi?B?˗0bĈ1c$3k5MK VDDu{/w{³c≸ʼAeZspik]2K@D-%n7:8$GC;<>xv`n9s@x$#]K\Q]]iR5onk'y,ɅsVխXN9bs;;V[Uvj,Lm@HYщMg ƚ(?1%NY*)asOi{C BT~dճ`&XTwoУe8-uG*x(̣XaLoreº51PdQmHYܻzT&?iAG?t< VTLwhh< !߇/{و3Ϙ1c1qt8Uh(S6@!f֖z8uT?]s*Æ)XOtǮ)|#9XiUJWq<Va~X0\ OafѮ܆.xJPS!!o1 #:8Tm~!kng6i5‰@$c|M1 c0EfmaQrY[[|=cݙCGsbb-A{dar9^RbMfN߁*>PaG&{∁==Tlaxy?9&3ĀA4Y'RҨtivf>4\/ YL~w59u$ǟH՟Tw$ 9I3_[lJ!&'Қ3krN}ų5MEԠ|&HXA̚Bp5M/' !j9=yt=sdfMN5پsuOq&3DcnF`~]0pՕ٬t)&*Dd93B Ta ̼ ?EN .j̋|SP=)Z g"bMf&ߙuͮGcw66d  Alr7.g?~9U;Mi֍ǿWu+96ؽ,*b@ԡk}.'|\|=IamflᴃV=&*&q{}c_s|{=۵, n6jsyݮrDXUսnZ$= n̮^Ş$fqڇ㐙a7UQc#| ]W13n.þŸx//;333233n-=~nYa6f6ߞ_5"~l[S.8ڕ> |56o !oM$984HXq+??Ҥ I!e3D6=TBc"˖eK@q6[dI!n{}ިIԕށ#mRXu -dU RY,n4B?4y @d jRe52UZPgy+2b#+BI$h- ShH6%cB=}Uh(J:qc7 xE(Q~襕>@]'(|RtCCgN:Ev@N2A%Ԅ֍RեpUYP#X]SJ"°U=g,Ι PDیQOZ5٘u ~LG"IDP{BW1^xabەo%"v Z\T)R9 /EŋΖը1_rKAtK5WIH5ʜ!0EaBF+2j{Q]WSoSaDGS\ KP! dD+RJU4BNspW;tX{IE u9Kg檲^mN,Fw)$nJ hXZ~H+UGML!CԮbŔ2jA+  P8L$V"=DS'L}f,4O;tQ2:iu2B{/_0"AF: vR,e#hy{YYfO MsHᦑ "+6ER !N#YWn5gA1}}}S}Kxrw"Q%)$B]:` *7|IK Gc2 TЂ/>@Sn[u!vMOdMBDRYX*MUyٮ&=HG*)$C)U_X7w*: mp@Snt!|* nG@n!H*k7xU|Jp%?;|I:o縕+Nuw]RouȑA>a32׮];wdu>EGeΰ9O"4&|D:Ls>)y2nIo sA'7ܐ=K MDdEɡ -#;+ܞdff't\z32%3`H E~RnXBDez@Nzݪ?)ct) D ?e'rz{{O}n{{{yY5 ͳ:⪡vWtP -+^R sGO@ΐz^{qmWNNmZ8P>hKucrkM_qMwT IAy[/;'r_ep"8C./!vu4G}*tD{}^YpkNNΩ'*k{7y)Lq渣ZjF3?0\XxvXs.Zh<"ac #q66%@d$w+?q?X%i).(㐎B ,JSFջ۱ҿ{][k˹vA|/s=یbq]*S$mT .iyOÎ\0qVr++wi "bgʧ]uq 4GpPQ3Y")Zou++d<.1XɕZ~e~P7E9O&5^*qOXy[s+wXI(sbּ`+mS2}ڿ5ߺy-&b G~Q[:3i5`9Ow3c\u+GF>#3%ubXEVꜙNo҉m mckq2ٛ23ӴBg:˰eatBb jϘQª4jLE 2Q5DiZa|Jo7aSz: L1GAMT@hÎ0+SE;=IC{ܲ 42?D Ňx?#N=sx +`Oj[/:(*^i2ʗj{qD_UUU/>b- 2漾 U{3FisUV I=eA) 755ug:;v{N`8tlZJSF7qAݓI:b9 itd93]J:L?j;|监ZS7|ҹx|hh覛n"#)6ahx\+lL(*w!:sEJۋZWn\EUR7UUU-EF:iEꢴҮlLj/x3 T51n?!kicZ!a8qquƳo;yR6uC}}l6~c9_qdL&3$hMrxp4E"t&33eЌJCQp ZV6a10JHV7Su!>Emm]b3#iTh-)a')7m$q*mvDvZKujk/{Z!aT.Wل0$` uV}|ZC$+I-H;i2M ģ9/p#)p:=tJJ]梮*SXlnJʒ(Eu;H!HNTB'!ĉΔAVۧS4⩳xҮfJ)g;UUUGKהMX MMM{u2Sq`Y)#xq؄&l fFЌ.$BrL0ҙBQJ`YM3IA|:F8\]WR͓-) fv\G𖖎RE!lb0ш )$^_$5r:Ũu!N5p)#F:qrȑe,)Cضm.MX댇@8#(1")5 .Ao2I3P$+a$!{m'Y J:fמ|qu!D>yrdH L ϟ>ott4=yxgU+j璥ZKzOw_<\֯cF&Iʕp333oƹsHg70~ gq;&tp9N[C+Qsx 3rKPו[Kkk]]/HXtUqiX. x) bb% *nm\K$L$M7=3q*.uGMN0fs/ *tu[hŲ.MRRRھ4AryuWzǟnEFpbw˗o{wDȑ7]yJMMdgҋo;z$l;*=A)"%SBNijWۭQ IҾO{O{ާPާ]&Mym}ņ\Otnlf<`]͜qߔ\s|z29rǶ~է>T9pWS!R"9 IJ ߱1R޸%#=㓵57u,##[N]ñaJM2OdtB_ON'-O0VK#~T#Ys6TOy;&,|*ojPTGj3-9|zD4-DM A!Kx^5s, fnroѴsǗw7Cxe+ ȚEy,/z/3.d9Hx=0*uv337o5pߨ=ndϙ ךxjOE۾MD HQuy [=gC)aKAG#@G~sP)G˨l"n pHԖWDvy3l 6lXZZzv;`ףW\{R,xvX ߠ׽¥z(,R@vɒ7K)iiG(ӄNBGw4|Xmssߒ;S v*%%e}ޙUwnrUKf3")> )_\2eosBgT^!HBiK.1ۤKFRGPOhV8?_x-/zz6=%~W|qVӹuϞnPeO_W\ylj.xBGѷ*5K@; /}nr[u*GdA A %bjcXΘDl1Xiٳ`#Vv+-j%)9>'B'.FhNZM~Wp:}0mb@do:9o qImdnԙ˚4)'N2aouICqEueAj!0VZ^QD#JV"!f[$:d:#7}[U}}m$u`E51J}G&ZXeY[A+CYXtU,kf>_^褦tTA8iskk  :dʐY,PN򠢠)^1Vso돤|gV"A0q5MNf{C3k IlvFv&K/;%\6`VCgs庶If٠iA~y4-o e.3# u95bȷa;$@$U^զWy!C+ *Z^+2~8#ɯ@:EaF8۲Q"G7_>i:;'ts;$]n_w*xc$r\>;vOĿT0SC6e$ b?d/pkH^=}0Lx<x<!"Iy= qWoLwx"IGIg!P|L袋E . /3f5e  AP )ȝ2,q 2`j|j+|S;;G2F)aCAFbiq.=~2@e[[=Os닗@Vf[يws礥nyEYXGnu7텎Hvq{@z?pd9$WzǏ?~Kկ/~{lů߾|g½ӡ,lV;Xkn?ul##bAT9vӕV/L8}5}8vXPRcc#AHzaۏ92iҤ(z H\m#\{7n1bȑG;vl+A$:~[h,Xism_ APnGv:vp:wLl") 5"H[ˈ>S,z3 B'C:Ά%@l}#bmXdY4~.Yj)R͗ b#ꡔӿmeµ@TEN;΅eļ>GA4.SWbk1lAm܇S(UTe+ ~87Cf74tŅp7s[ۣʫVi2kj5 NYp{XVz 8s809%)e>6ڄ4Xa rIEK!"t>K =.-p堗.(峬_)Kwj{kym+H ~IUP뭝 EF*$G[ź:Z}kYSc$4hZ/j^HrjPDI h*m:UcV|ܮ@dsҶW!y%rZYp\˥-?r^: ۷ Jw`Qd!Y"YVluİIFh~ɓ'O<?Gc LB D}A=͠ q%I O 2CPd%AЉGPR! ] 't W*4 Bh$(BbR KA#JxH-)$J.h@ЈKE.傟F6꬙ ;Al>WGpR IRi~9Ur]p)ú)K\UlPz"& % v}Eɴ13.O3Y6Pq`<\oN;m~5gM)~AcY0r LR&(ee3--8?Ktl}I +t 2  }:7^$h:8Hm) NiA5QF ><55 /re6pgqRDM wa/~C)9R RÛ4 4qļ'1"B!=d.1^]+KOo:~A"wb(z+jXˀ󔅥&hf>}ZVpQbi3fԊڝOM93[5a\S*;MBĔ,Rh)59)9)ƨ9BU!_:I@Օ&RbY ¡$-PEkyoP2,9 A5y{Ǔ)߅a!y80`tda5auw"rp?\?+s C`Lؠ2!w  h;x9jWЗ^i]^]uNgNhY%p'Baޘ:*h r7y$" J R&vRZo&=@d%N7U%V JzN$8g;!laR/i)_2BPl3Yϗ7Oet`?~E<6͆JCL$DV~_$6^1d{_Vk"ɓp-:D`D .a~,v3^H"=EqJ:N 1:ǏJ** ^ܹs'O8uTWWw*FMΜ9sSNeffvvvNZZGvQ HSSSj +,eCFc2&u~UER 6 ' Id"&̎BPG Fr l-R^e֚p`QXq $w8 ( :$XSf2Xm !Uכ?E r|R&ъ daMj0LnoB=4^^~AW$77=5Aⴼz|W^j: AGqp!?4J6rh蘢(XXimCq$AJsF1bĈ#G=zر#QAo3h Q=itDؔ? HU}m7/4XLrIkvja`TisҚ$Ҩ.Ùźŷ74T*2KoBR3jB CbL:daNVvQr{5Vj(͕Z'Va:TRV25FGE+A&eW@8," tw[G.ĞkWٮSDdA̕e1QA$A$HFA# KJ5>ط?fă1QAdnԙ|.#94_y{=gEIP\U}D79qqy ~’RKFleB h՘QAR"4h-ŲlkNi%s*24WVs> Օua2ǎËI 80?rdAd駟4)5i,v..UO(5A%vMR8 q2$Q0J| gYay1(q̚V%"ٿ}Ӥ6kv#A䃣p;V XӤo 9FSA:P4:Xe]xqK#;+'r:"8~J㖆>+H"4:,C>-+=K,+E W`RApy _^ݦ/3bdAVGMA\^!?NNS(Ax<ȐG N,l֚lJPg)N 0VZ6E^?z)+ 4:آFӤֶW8d"`R$ipn?rȤ;Y{ke\Rt2G'̿F\^!&\< 67}BA1mʯΠE2 ]bd#G.I"c#2 l:{A(A#]VeY~7cddh\UW ^ gZXimCq@hdYj VaPNZ_(i`cYD|pzD&U0tPNZXQtO0F25x/\m lCN+ (WNG$Zo՘}*2 qAA.') / c͚XrA%R,l#*|FGEbj`I߼Gz;ЃLŽ-I0\i a9oÇ+=E?\ny Gtj:ڡZH\^!!m'~ ɄnIH&N~Q:tdĉ?É'Ν;Wr0|cfee3&##CRA vn RSSSSSSRR  (q0IENDB`Amelia/vignettes/assets/rangepri.png0000644000176200001440000010167614335240021017276 0ustar liggesusersPNG  IHDR}l? pHYs   OiCCPPhotoshop ICC profilexڝSgTS=BKKoR RB&*! J!QEEȠQ, !{kּ> H3Q5 B.@ $pd!s#~<<+"x M0B\t8K@zB@F&S`cbP-`'{[! eDh;VEX0fK9-0IWfH  0Q){`##xFW<+*x<$9E[-qWW.(I+6aa@.y24x6_-"bbϫp@t~,/;m%h^ uf@Wp~<5j>{-]cK'Xto(hw?G%fIq^D$.Tʳ?D*A, `6B$BB dr`)B(Ͱ*`/@4Qhp.U=pa( Aa!ڈbX#!H$ ɈQ"K5H1RT UH=r9\F;2G1Q= C7F dt1r=6Ыhڏ>C03l0.B8, c˱" VcϱwE 6wB aAHXLXNH $4 7 Q'"K&b21XH,#/{C7$C2'ITFnR#,4H#dk9, +ȅ3![ b@qS(RjJ4e2AURݨT5ZBRQ4u9̓IKhhitݕNWGw Ljg(gwLӋT071oUX**| J&*/Tު UUT^S}FU3S ԖUPSSg;goT?~YYLOCQ_ cx,!k u5&|v*=9C3J3WRf?qtN (~))4L1e\kXHQG6EYAJ'\'GgSSݧ M=:.kDwn^Loy}/TmG X $ <5qo</QC]@Caaᄑ.ȽJtq]zۯ6iܟ4)Y3sCQ? 0k߬~OCOg#/c/Wװwa>>r><72Y_7ȷOo_C#dz%gA[z|!?:eAAA!h쐭!ΑiP~aa~ 'W?pX15wCsDDDޛg1O9-J5*>.j<74?.fYXXIlK9.*6nl {/]py.,:@LN8A*%w% yg"/6шC\*NH*Mz쑼5y$3,幄'L Lݛ:v m2=:1qB!Mggfvˬen/kY- BTZ(*geWf͉9+̳ې7ᒶKW-X潬j9(xoʿܔĹdff-[n ڴ VE/(ۻCɾUUMfeI?m]Nmq#׹=TR+Gw- 6 U#pDy  :v{vg/jBFS[b[O>zG499?rCd&ˮ/~јѡ򗓿m|x31^VwwO| (hSЧc3- cHRMz%u0`:o_FxIDATx_h[U?'mJ{՗ >( X Z>X=HRBX F SkZЇ8H "R" 7U69>$m$M¹sN߿ss"cRu,D"RʉoEPWaj6&24a.Mr2W.Nru6-y˟rf(Td/N1{oN>$R8}˔LGg/Ե;E{,JR.u]RJu"u]<^GNºp/dRJW]^~?/Ř<0~YAUOy'|$ѕ(,EAuFE}盽iMMMyFi: v|]BH`B~H&5Mc0i!Cv24 -4ReL0BϣۭBe]X'EBtkί09ur[vv h埅F\B=osiyu*\yH wڪpVL=#3I$]bkc~v5_z.<q`l6_I)fjxHqh[T%0fζ(y LŽǷe]*--d>Eavk@hgw+F Y1ob3+,ZY1"" ~?D"]Z>b~X[[3{&Y{|{pT efAbBVڈlV2z V)V9.I,333M)=jkkq>lmmU^)gDWJ"L -uUC`,3 Xl:o'677y'uǼUSH9͆ x< |pׁ֬GuV.1 ! _qhU?\21T#˨Y*tTl*ac9m0J p !B RTfY躲e]+X#쒘Kp\o{>' SYߠ*0䇖/K2$i;y9xC/O9sNZΟN&ů'qxG..M+H?>.Z`Ӛj8n_{!1m +m"t:^$z L[ߝ.X4N}k!| ;1"Wr$xK2.96㛯:{lsBX:muv Z쿘ڧ` GQ RMգﳮh211(v@YHsHHl5kS^ ށݼ^^9sU'%wob bSwWrV\f? SӳQ2ꝥHo|O?㩳!efCw ,9;NGƩ=p5^OzvE5!_Keh7ntdyXSwYF{[ֺT8C,?a H|Y̎/ e L6*peC_{ "P,²Mtka&qت6(Y!|)23M,5e<*I!5 (l6m䵤֟zRF,3o e>ВrNy}.\Ϭ&;@sP@G&6RAGt-XecX\}Ց,hoQZ`)B ,>` K6(,Z`ӸGVedkRJ6^ғ>!q[a`2͉FecV]D$v[4&p8$i3gy'''P8쉍u=i"5{ uuZgyWuϕ^c(&"TIdbx={eeaٮ.~cAY.Z =ײK沈a4/_hSW?'JPꜵ2e/L)( ݌ a u[3:vM 6ŊPѮ??:[jٻ{ohZ4 {sι瞜9ߟBڐxP^Yͯ #1) hYBh~ F5 v͝s:W?~M^47]^"P3WCyT sEBnq 5"И!Ncݞܿ v}1MhjSeI^?(ønJDih!D)H(F~z^BؗtG? &焦iگoIOOOV~9+V x (/bk>رH$no5ڮ?`, ehѢ<]L}ӚYym)066F4/ 39E@w0vԆu `eKeLl]Qg M2]BeaPu:Xf4-e!BH!<M Ot qx3}e K@W܉)O(ɑ,;d sB4Eu=sٴSw;`222Jߜkfr:"t ~}<N3J"&Z@S(B|{XWJ2/9>"YyQnJ>oC .N; ]K,g<ʄ3ɘq ;&QDMx&ү+XIM9odMm0BP\SivQj{Bx|@CFCJk587-V&n04Ͱb/C iUP#N\A-ʳ)\@_„`wX EAQjr=.LՑ-I x0䵄V˷$ (_Jf?}9%%6U-gOh BʵJ9Jy`wZ =%L CM'/xFVI@C6(8E⻃(yF+NBZa.20>5Аuޯ)in݌I3ugoؽk$`:wjf {pLp\-r<1$ ذ[ިB(Y [PEpb ?rd=0 tDRB?HfH$N,Yҥ9 (!bK%" 8IZuzzzhkkC4[͝sXEEn\i!| p\teyUh$6Kڊ!g%5M—T} e$Rt#*:8 -`bB+U"sH_?Y>d௯z۶qGf PPP 8ӑʋk-44`' |67lë-yKf )-`>F ;n-}wtGQ=Ra9`/**Jo,!10Gr"U}pTHqgPRCP`i hKH*TGH@3Ne*t148'XLȔÀF,J$4?dw6!y;{ιs{ϫx=ٲP&[xs FBɰlUj7_?/N Sf%+>{,F`ɜI=rsc?Y:g%+/ϲts̻+V?K@KaJVy][G9+y~Y*͛wV=ދO6cYoxKhXnM|JHOz}vヌ s;\\\C\n`s\.>EZF\C4@*>n>nannlԊ| \֑|9t_Y{?hSU5-k=PFӿة31'tikzVf]'}ar0k=*zoUE瞢((jV; k ݞ!韵^s>jNW'~h˥Ne˖p\ Mcd펾.Ca񐙙F-X9Kxo~T⯉u\\WʊxіBn E5VֈbxQې/flXky7ԩ&k l0UQ-2{qs t]z]Ke K=4F(XzYSVx_ZE|`/Ӗ /a"QLѝ3[>cIaV=s"?A~M,6c4TEOWV&:Y3]}Nmvn!⺺^lDW OhY϶B] nĒM}B VUnN48L_'Q"๎h,SBR#. Dq(E %WKdNf6˾}fXdSX8t 46[,ws⺺ثm,l^U0% )k~}(`Jۡ0K[b?G=jp+B«-;eb!)6,( )_mj#O] |D9Oj O7dSWፄx~=iÝh~Wh;O *pQ,+Z(-dVnF-/JW|9">qQ|QGO r{-`Gb6(-^PRˋtW ⋑\ҲO{3mۃ|A|WKa-E"J[rCT-WΔ/Ȥ_]A 2& IW`D|FLb5QK-n+OA-Z*~>_|-mf+w+Uץǿh~ۨrh sJUb }D|C˰+">A݄!~>b&ϻK0~NJU|}"* \,Z&Ȱ{l@!m۬­R%H'#I0_"P BH3>pJ4 #WJ|_===bfz^{}” =_DrR4j|ʭKÏ|̼yhnv1bvD_ op{FPϧL9 cY223~֮>yc3oq!"%?mmm}'$  D|02صk֭}FZ^~µkE|ÕVe^8k5tIጪ[IvG,Hz#0vp8M"ᦛn x3~{t~OzD|ÌNg7NnD(ǽ'yt-[oQYYɪUYqISl_VUʼNΞs(bpID'$ BAH g*uL$'unD PlhbKXlvZi݌Ç'f P 1֟ƳbFi ĦX 2 yS1q:O'PR}01{x&}aBicW\hTUg0/9f_u+MݨMZk Q:nzJk0G:h-;|vG_:iAnkMbhb诨#W޹?OZ6񳟔gL_:uCzk_'^Mi_֪gv ۗ;嗙GFnCkQV{(9))~f #ZDD".gFH m~شIeZnV}_ wL*^^bC;8 A/J)\>6W.?u%DJYR(jQrǟ~l4Dz~ x!™|^?&."%PnMݭۊQY [8plKKh]@Rzy$yjyP(DQ:B D _PPs% zBY?Dȑ)۾ld떍,"Co-n- %M2rH\ڢZ~3+%ۡys$W5[Fk *5Ψ@xeA늖t+Ą*{`źsOJxYkO5ײlf=lijvl1?k:0:f_AͲ^ۢ='ȲMgkak:!hڴ#-~11/oOJgĉ7CޟR,"֯Wo#<G͘˔v+Nvh•NRݲۗ/ΘT__ψ#2ƾo:ٷo_'Ndm<>3z^_&;z򀦧nq[yҖ-/Ӻm_wקo-BccH$ʧsLN?mxj:k`?ע3Rt8n 𰡙mnұvsdw@sT#߼9Q-~ @)'Z%bњNoIr6{\JQIZcc= zRNy uXw%JQ h|t^Wn(ͩص.+@%ņaNY5W5lXbYSJ @k̺[e7ꪫYmM*vؖV]PrC|6vBGXLyͨ߅Sx@D#XgF׭e~,ƥh=Axj;؂D)OxGWoڕd7y˦@=A@kj<ԤLwrP}W]Y _2>V2 0G6.a`:\sׁzzwϝ|>!X=mB֊ v'D1Q)u%x yxnFufNH@Q~{J)D"TI5AJnp*v,F)e f +TV ,1=5_WۗH -OǝRC;TqWۻx 3zavRJifԴصNi;5!8 eax~}[fV޹G?jͤ'</xliދX^{~ENy9Bzc:S_o0l~A|JܰH5NEy@t<5<͖@{BM<*1AS]y~6Τw)@%%@3凊 nPo |Eqtz0 %=D0n\/KWHϴ5No>!h%rR%64kR8`Sm<{]O۔'D5L^b5n])Jyնc(}]ɧZîuYB9ʬwoW㶰j}TbKԈnZf+6Dlq*#(W,cص.sc(+*&(v^͙^],lW8^c%\J*g3wY=MI=$94~۾'( EAH 't?}^cǘyPx~}G9a$EnA.}T t>9? QT<3{`D^%&sDyF̼y466tr@?袋0 cG} ʁzhH$B4Dcc 457eK,)-z3ge ao9r;\<A^As&|I?|rss;v,YYDA86-JZҞxe'uC қE7~޺u+xֺSgw!צ(֯l}A)Evv6ٜ>}t(_+gظq# .$;;;766Ddch}b˖-̝;JOH Gi2'N|ʩSd'cJ3gNgŘ-;+f>aJkĺk. GA~ǟ?h$EJ  "~ "~ "~ "~ C !++O?sOY!Minn>'[F@}}=y%ψ;P>f}=ub麭"~DQp̞#G)mɪ#{Gٳ7mØ[aSN#G]͑#2{l[pi|WÔBΜ 4Eki逸n&77C]/"p'9槞rs|λ]8if3췵Zk|o~hFii66iA3oO{R:^9~w~:IDX`K+Ӥ)A%,rO> 1'|gA-*jUڟ$5j2ԍ ~:aX6A:sgN4=0Vo:w4Mu.>DovqSys 7M%su㳙{c*غU\%.P6~mL]V:Tx3'0)(*aOMM<|;)aqӬ>Ov~&m_mbiz g=Od"P|uSn˻U U}17ٷ6AZsY7%i繻OCmi*Zh= iU~8'!U~msTX8,J c|Yư0\OZ`U!\bĕgܰ(l/f"o_C7Q~Q@.ˏQl"nX8tkSXσl.H1|&d~xˏI(ƵcsgN8w[hh(d5Tw۞(?10x.qbY˷#{e>#>?esyf0j1^qr7yVͿXPP3 c֟ߧORk nN|K8]~k~ty3{s̷w Bpuh((X|! Xp\Oƺ49eު4&?ﭒ)L' a҅o? Qg߾( D֥ݱ:8??!.k !B Ce1A9DAAAAQSW6RK#3:p UU!SI I֓|6lΜԟyő`Px5٘-L;jg{5黱)|?N yU^zָZ’+h-$Pmk)}{F~mӇ裉ŅgMqa3AluU3V[辦g(kf"}ߒ `.'w ccHird`c8@%]E֓t4@~ncߐ4{rcQEX9P 5IWfmmAבQu  MڪHW/p Ro cC$+>Ia  MףG>sY^&GHF(a ucuw:4jbQt2!,_8jzkqEŋ-'^!H.Ͽ[<' -k/x̛%/BimY|Ʋ!'Ah?<qqqW=? lSXv ]oXvqqZqI?B(]N䴇>UF?lv;*ovEiKQzx<_7XEWBEiz<Ǣ^K,>ۍpz^Dlsn ILLyo`6}$&&`6^lcA\Bѿ$ϒ(xf:s\|[jwr^(K zOT s<q5^nqJk`QCe+yyh;[<b.\g#-K8C~}ys[n4'v5ٿ|KӸ]MhF0ˇƯӯF+!׉#la}/|IB4MCKd{͗ 4F̙Czz:vFj>Ǣa,iO^/ .M` c},ޘ5%_~x,a^RF}fy˚ 3C:<<Sk&67"L(]մr LH+724o<RKb}ifژo#~yNKj~cZY1!}z4n_G{ME%?c"Y-&ef +hZB/ɍ ^WwIeՒ>js~d`}z(-g۟隟Ni>q}^gwǔBQ}/GP.k/S_})w/$ߕ>LAeUUu!5.TUu@ʖdl &,sWزL1EREABĬM.Z+PF`ňٖ 84Ͳ &ekY ^+|8Ö eAvڒyP(2QWy^*Zqf v+׌îiV+9%Ne)y6M]BEu,>͓1u.zPSSE| @ dRoٳL}sAM]Y0,>M;|&ӿsiM2=[(K>GsX2=5"l?7ceiG^ Ecq_ȹL ,~sʏDQ*۷O;1d{Śp?>Faʵb<,)7CbMk+y؝AeH|uY,ơq +|ZCPU}omy|ePuQ!'*l$,VBiNn]A +FkA&L[&$=YF(9]23t?g+ڵXvPC韅6dEOg jg33FegPe-e]/vg@@{sE< 9}:`sR ͺ{?Ǿ"|$Ͼ]Mat "|-Zc'](}i4 t,{W+6xmQoa  Ey'7^e?-0 _wi wnf `Xdף񾋓@ ۏIdiK(tyDC;IY:hpݻ熿ŋu{94nH{W>b* [E+ɜcb-+ ŚUR-Z8#2;bbSXee̴+ie4N ]M/u+,ֽ54hkpƑe`X*ѣc҄n[7ױF4쩙: )g kN|-8Ȧ^SSiZ By u6iNpQ:PԎkcb\[duFxnmuaWW:L( mynдbkah/!~SPy+td+  B7CE/kr1WOfUrMb;?PAA@- |@nl1PbCUUlzsMUqǴ2: "|aTثaJkj`-f 7: "|%9ѪJ31G[@O %tEmE PW>ZAjs#X0kCOAOAOAO!Í?O*bު:1rtA/w e{,>d+ 9:̵"UM,j~ "|]9ˣlr$ u=VL>+` 8TߺJ%z=oe, Uo63^Mu)l6=d`c @C#A/)]ˢs}Tۋ@%6c<]Z`h!; ".1kbOO;dR"|HzH3B!C]ADADADA6p#cO _Ln:Νeq\S_TzzS/Vx衇۷/]M0;vA*II7 ݻ7Eŋ-C s sPW.Ii\={X| tY|M+ EQP=zpУG>A/={bz D>A%ȅ#v"|BT x||.ҔBg1闽jk_Wc4MkU^>|kȝwyak]]ݕޞ>}^iӦzw~ P`XbHO:|7Ek1{쀎j7Hn׫^~8'"|Bt|/Vb!-ۥKؾ};?]D`/h ^"vgի'^o߾r-9C|/"䟢"PXザ>!?ajnHH}ҤI;i_iƾ}gKP^$0x!\:T~mo߾g]1ݰQʰQ* $ ^X|-H.ǹs֥_ AOAOAOAOAOAOAOAOAO!RHAB֮9sHLL2Y& "vw* D!'OO>\pʐc],'QJgZY BxCUU2ָx<x<dar.TUEXӪ2'QJSɿksP2m>p^S-s%~2NjE2 tnwx 3sǶmn`//pE=nuǒlŰ_>O|{|CΤOuBE! 0`@aݺuADADADA6螿꒴ M,>Ad+?$-La ҂T(w6c-j;DD*H{S:wL*  y :2Jh?'?N~4mI_=#眼Os'vMK,i]x_i, Qt5{/\uӯ ĪEQM$GV(J xAX """(pލYRDDDD] ^>R\[s[1'/¨Ġ];C +x<{S}`[&z߽GLlwo}~uV$o17;-ǥ_ݳw?߭GDDD[[aoi[s,o碑{qF[nVDt/,hۻL5  YT|o},l{,X|TR;j}WڊBf"q⅏})=g*L#cGx`r?#""F'IihkTito`*4"G[Sk3Wf @(>gl/y>B~8{ײPU~}?a-VRy95 wW~νYo?谫npV>{ z3X\e$~y'@s{1:??OELErϊpÖoGϯǧT"""U}t ΩhY|"my۶_XYߦYY Pu"/ zp6?Z]ml;if wO~e3e%R{2`)X:+R~OMpQ-07>jU0g%5;X99~~)X~:p|kJDDD1`p }3<s=&m8I vp{| ւw^xǚ4]);  _.,ual| :sלsBQ <ԹҧQtXrJ?cc{}Oś3u[>uoi75;g_LE1>͙oO*Ecg[os*(11C1CEe;_]a`<l&賔 AuXx=>v uCl06aa㱻0<4Y^H\&f }MyCa;E*F#/53yGc+ޡBk5St>הuJIO-Mm6/qO{+0.QD ꍀv5DWf]´-^+Hv0(- d0*/^0|WZ4ihXWJm_WPP좧Gף `Fô &s&ޑ0ת(еfL#v\#kL>[">Т ]18y$ψ;{a\4@V=&+6yvkX{Jh/ }ʹ$O״ }@7(pӎfK΋C,ҾLJ]vO=Aaqc xlHCE== #aZ0?D}QŅvU : $Rh5Hk< ]ay:hvDE"C"tsz-"Q(鯗. u6ׅڵkX&%"^ *KB0l/cX(DDDDGPI}0TLKԧ:՝(|b9 ы6br.a!:ev 0tP"J 緡p2\|1110`T*Q!|Ar .tOs!"""a~mlld剈G"?(~W|S8S(*s\!,D5n{Q(;Lbñ˸|Yg|#Wp#'Iif9Fr_gSG'<ӷ)<.#YؑȪxz-YJ7,uTgZ.P0H_>/\~e.Aen!R峲0?Ӷgcr_Wz}phni̩?03$ [Qzlu>޻T=:(陿]Y{ao֥ v-=Qyz3̳}Z,,]:YSqIUmlCD|E=g'z *g5JfԿpU򋟹y{dLF9ֻu^|jے*sމ>(͛{):gO~Ajz Xoe0|Z+Gʽ / 2tG̀nسՎNޕro;K>=vnoVǚe||n:2z݇|;osĨQ+^Su1=ISпTS8zTTA B(΀e'gPZwZ ,HϚWo_rH峍I|n 0]2 GK?H>Rd͚`8,9v8ٝ\oK=OW/bEhُ (BcS DqϭO_ł];+^-6 h'y}k-!bҪش:P'<7OUaebvx~Qāi~#H>[ԂƦH{Qϲ_1A:uO45TVƺ50dN }޷waMF>HM@ѧ(c gcyMc>géuQzRSSP1w оMc;9SXjڑo:V;cF6♟̖;4jk`oԛc |BطgoVICߋpr+7T9ϽTZNځG ’2Y1O *SM6?tN"2z= }ɏ -73\71llɰl n@LNEҴ }p?ɏ|4WRZ9=͟~|nOo:4ocg{` ~ əX4`_4rְ):AE+~9:= [٫;X^6Ξ= 8. L;v= UCZn`O#uN< _vrQ~<O{s^;iBY:< YeZҿTT|*RVuwv%k?+ Y5vƒr,54?6B޷>}4ƫ} e6O@[2qn Ν:7~6 W-ᅬ3f`Ĉu\o777D~V59S;S/.@[NRs<ѲWw"w.]ݎrwRoɯ=y=oFG3_$_)yal;``&;Dϗ:TIKKcۼ 0Qtp_D11,Q')庰KW {'E+O!ש@%~u*47]6hYPaznNFh,}7}2=bX6NoG}F{)?>C7/kPuRSOD¨3ޠ  \yzi-:ɞ2 >`Ӌ0LwWu!mGLi~Q{`{^ X?;Pd 5Dsހ75-BEhmz3yx*s\S5nRw=+98/F%*;9kgxi$s_J PAUnnڹ H/-yi_Zp /+O!9G&`aOoNFI6)}z7j~ B4~2}j ;Μ>>[Bx|Du0,:x`fÅp0H(X{Qֱ2YP~/swim)E#=?;Val{|zLcn%`ه,;F\ <|CH3Q&&wYHKtC9r!(۟y=3){z&e~丆t%X @?'myA1m RaJi~_<(oOD3F< ee3zhfku  {x^>[Nߋb l%yz;#'_z У(eȾ1y)V^&NGB!Oֿ9-萂#Gϖ}@:=rDL9*I9oP=\6s6 w 3_GS`)M̅! 5ƀY\I=*v ?UGe뤦)9+ }G+u"P>g[i~_<ֈSu[0QBL<Z#tNj0is50O  ɞuLZ k[4 OBȽ.'!VPdӑcXF5W9H.\! О'c(zzqaX[ x2׽\CEL!̳ d8`?.e|\&+t8î{2HρP yxn[wRګyf5WAFn o(@C #tK ob8 O/F?Zy0by;zs{ ?Mo?&Hzmk v@SxN { !Pd쮇X.}n"l-)ױ [>gkQdez3)Q'{PuNf7mg ϨP]FPK>k@9z*_Nz{PЊYՕkohĨA$A/,#WL,S ~)Gϒ-0hrH'WZeP]cIJLiԈr筜7 3ds @#gtLs qAHu\h6 s`uTbRڌ׀~ΰ-SBN^x>Z |Pw۳171& '`uCA枾퇬Vc#"Y1,A}&X|fj j=^A Udmc &k} t,a2Q2z}eg=}#"""b#\_]"ޥ4ltk""DhDD}{"?Gɋy'?$4 &bΔY "">Okna!c:o1Qg^o`[IQF'[k`B]'MG6Mif=NCqrLVg]%GDDD }Գ&=nBipw+ϧЬFYi=4]1=CyN`RFe+<﬛kG6`UXg[t,/9:َkNDDD }V2LŧQ+I î4<=MF~_aOQ-O U ˕<`o@I6ku{QJ5|^B#DD }|}_:o1C1Q/rPĪoh,F?ՕkohĨ~3G皈/` L&2"""b裈=F]1&؅ly k8'ٹ:W*4Q lF@Qn˽v;Ў8d0bgP2t y>k͗E0&#d)e))Ɂv1!&V.`3|{CiG8_K΃4i4(trI8azb#{@e,vL Nq5; {ŽlhZt5j5^l{"""">""""b#"""">""""EX.^ `F3cFcͣY "">O I@ƽY~E.C04hS8Ʀ5=m` بzeDDD nDi8]`dE^5C OX,KDDDц=}}>nl MZ_c'-Ӹh0+㽝kZc1\OjSgmg_R/ίP-><=DDD1BZa:6v>QK{iւ鞠Wl!;&WWҾj`'(pN[Z<$1ԇ< F4>ӗӉI(v/mWvSiLs3ZyOֻ=|m7i=}DDDaELS̝2- V` Y yy/h{`ұka{QĊ,D?"1QB,,Q']"""~ =}ÇgU>%kQw5Vzm " #"""b#"""">""""b#"""7g&e۷oŋb 87|3Y "">VTTh4477 2qqqZ(**իY"">ֆ466>6(:8ADQ6tzu:l;l] J[IE1E },A/=yFX-F9#G\c D=e8Q]^f`Z}?g/ۺbTŠQWބnGVP\S$xv M VX? {!" 2Qaۻw:3ycބgN/ŚCv&aECҲ׷-e<]"aOQoe~U8iYc6rNT=1QHDQfc!jCDD }D/66uuu3f KDfܸqCll,ADQGn qv8DbGԋV^m۶ի q@8(%$$ի:t<"AJ€1PU00C 7n܀f(,_E "RPհlPTPe#G(J!PNL<)sۻDDDDCCE~l6R0h 5 #G=܃0Ez{cƌ#0~x/z0EXSTPOչp8~aOo2q;?-[-|Tuc؊Q[=_*\ ---fZf :㫇 jQsʕ.-">#u]h4E%pW^Çqw\:ZZv?nllo2(...5ׁB_}}=4it(B9"l6; 3 Ν|/]ȧ'[ożyC'9fa;S'{"v;bc!]'JJJpM7ᡇJ{0sԘL2cƌ i&455ӟ!fàAq!R0b8q* SNso2)] cbj*8ȱRuE]~ 1|pa".VQ4]:*o^vg<^?RDDDD }DDDDGDDDD }DDDDGDDDD }DDDDGDDDD }DDDDGDDDԟİDDD۾};.^~o٬sՙ(DEEE(((FAsssA\\V+zj9GDD6$$$֠ hkkc# }DDDp8X9DDD "GO&=tv5 vlI/_ֽ(}gA{6gΟ uf#""n #/䚟=y0Zp8DVa\#Yrsz$:[{isNuuDQ nOW7ӺuPTTg:$%%aXn|l|r]-ou(lsՙ(DØ1c_7uuue#|{mۆWv͉zMHHY>X`o2E`%""""B x;v"""" r%=}DDDDCQ?3γ DDDDQ6@IENDB`Amelia/vignettes/amelia.bib0000644000176200001440000001134214335240021015353 0ustar liggesusers @STRING{ jasa = "Journal of the American Statistical Association"} @article{Efron94, author={Bradley Efron}, title={Missing data, imputation, and the bootstrap}, journal={Journal of the American Statistical Association}, volume={89}, year={1994}, pages={463-475}, month={June}, number={426} } @article{Lahlrl03, author={P. Lahlrl}, title={On the impact of boostrapping in survey sampling and small area estimation}, journal={Statistical Science}, volume= 18, year= 2003, pages={199-210}, number= 2 } @book{Rubin87, author={Donald B. Rubin}, title={Multiple imputation for nonresponse in surveys}, publisher={John Wiley \& Sons}, year={1987}, address={New York} } @article{Rubin94, author={Donald B. Rubin}, title={Missing data, imputation, and the bootstrap: Comment}, journal={Journal of the American Statistical Association}, volume={89}, year={1994}, pages={475-478}, month={Jun}, number={426} } @article{RubSch86, author={Donald Rubin and Nathaniel Schenker}, title={Multiple imputation for interval estimation for simple random samples with ignorable nonresponse}, journal= jasa, volume= 81, year= 1986, pages={366-374}, number= 394 } @book{Schafer97, author={Joseph L. Schafer}, title={Analysis of incomplete multivariate data}, publisher={Chapman \& Hall}, year={1997}, address={London} } @article{ShaSit96, author={Jun Shao and Randy R. Sitter}, title={Bootstrap for imputed survey data}, journal={Journal of the American Statistical Association}, volume={91}, year={1996}, pages={1278-1288}, month={September}, number={435} } @article{MilKub05, title = {{Why the move to free trade? Democracy and trade policy in the developing countries}}, author = {Helen Milner and Keiko Kubota}, journal = {International Organization}, volume = {59}, number = {1}, pages = {107--143}, year = {2005} } @book{King89, author = {Gary King}, title = {Unifying political methodology: The likelihood theory of statistical inference}, publisher = {Michigan University Press}, year = 1989, address = {Ann Arbor} } @article{HonKin10, author = {James Honaker and Gary King}, title = {What to do about missing values in time series cross-section data}, journal = {American Journal of Political Science}, year = {2010}, volume = {54}, number = {2}, month = {April}, pages = {561--581}, note = {{http://gking.harvard.edu/files/abs/pr-abs.shtml}} } @article{KinHonJos01, author = {Gary King and James Honaker and Anne Joseph and Kenneth Scheve}, title = {Analyzing incomplete political science data: An alternative algorithm for multiple imputation}, journal = {American Political Science Review}, volume = 95, year = 2001, pages = {49--69}, month = {March}, number = 1 , note = {{http://gking.harvard.edu/files/abs/evil-abs.shtml}} } @article{KinTomWit00, author = {Gary King and Michael Tomz and Jason Wittenberg}, title = {Making the most of statistical analyses: Improving interpretation and presentation}, journal = {American Journal of Political Science}, volume = 44, year = 2000, pages = {341--355}, month = {April}, number = 2, note = {{http://gking.harvard.edu/files/abs/making-abs.shtml}} } @article{SchOls98, author={Joseph L. Schafer and Maren K. Olsen}, title={Multiple imputation for multivariate missing-data problems: A data analyst's perspective}, journal={Multivariate Behavioral Research}, volume={33}, year={1998}, pages={545-571}, number={4} } @article{DemLaiRub77, author={Arthur P. Dempster and N.M. Laird and D.B. Rubin}, title={Maximum likelihood estimation from incomplete data via the em algorithm}, journal={Journal of the Royal Statistical Society B}, volume={39}, year={1977}, pages={1-38} } @article{AbaGelLev08, author = {Kobi Abayomi and Andrew Gelman and Marc Levy}, title = {Diagnostics for multivariate imputations}, journal = {Applied Statistics}, volume = {57}, number = {3}, pages = {273--291}, year = {2008} } @misc{HonJosKin98, author = {James Honaker and Anne Joseph and Gary King and Kenneth Scheve and Naunihal Singh.}, title = {\pkg{AMELIA}: A program for missing data}, year = {1998-2002}, note = {http://gking.harvard.edu/amelia} } @Manual{R11, title = {\proglang{R}: A language and environment for statistical computing}, author = {{\proglang{R} Development Core Team}}, organization = {\proglang{R} Foundation for Statistical Computing}, address = {Vienna, Austria}, year = 2011, note = {{ISBN} 3-900051-07-0}, url = {http://www.R-project.org} }Amelia/vignettes/using-amelia.Rmd0000644000176200001440000012477614335240021016504 0ustar liggesusers--- title: "Using Amelia" date: "`r Sys.Date()`" link-citations: yes bibliography: amelia.bib output: rmarkdown::html_vignette: keep_md: true vignette: > %\VignetteIndexEntry{Using Amelia} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r setup, echo = FALSE, include = FALSE} knitr::opts_chunk$set(fig.width = 5, fig.height = 4, fig.align = "center") options(digits = 4, show.signif.stars = FALSE) set.seed(12345) ``` ## Data We now demonstrate how to use Amelia using data from @MilKub05 which studies the effect of democracy on trade policy. For the purposes of this user's guide, we will use a subset restricted to nine developing countries in Asia from 1980 to 1999[^freetrade]. This dataset includes 9 variables: | Variable | Description | |:-----------|:----------------------------------------------------| | `year` | year | | `country` | country | | `tariff` | average tariff rates | | `polity` | Polity IV Score[^polity] | | `pop` | total population | | `gdp.pc` | gross domestic product per capita | | `intresmi` | gross international reserves | | `signed` | dummy variable if signed an IMF agreement that year | | `fivop` | measure of financial openness | | `usheg` | measure of US hegemony[^hegemony] | These variables correspond to the variables used in the analysis model of @MilKub05 in table 2. [^freetrade]: We have artificially addedsome missingness to these data for presentational purposes. You can access the original data at [https://scholar.princeton.edu/hvmilner/data](https://scholar.princeton.edu/hvmilner/data). [^polity]: The Polity score is a number between -10 and 10 indicating how democratic a country is. A fully autocratic country would be a -10 while a fully democratic country would be 1 10. [^hegemony]: This measure of US hegemony is the US imports and exports as a percent of the world total imports and exports. We first load the Amelia and the data: ```{r load_data, results = "hide"} library(Amelia) data(freetrade) ``` We can check the summary statistics of the data to see that there is missingness on many of the variables: ```{r summarize_data} summary(freetrade) ``` In the presence of missing data, most statistical packages use *listwise deletion*, which removes any row that contains a missing value from the analysis. Using the base model of @MilKub05 Table 2, we run a simple linear model in R, which uses listwise deletion: ```{r mk_lm} summary(lm(tariff ~ polity + pop + gdp.pc + year + country, data = freetrade)) ``` Note that 60 of the 171 original observations are deleted due to missingness. These observations, however, are partially observed, and contain valuable information about the relationships between those variables which are present in the partially completed observations. Multiple imputation will help us retrieve that information and make better, more efficient, inferences. ## Multiple Imputation When performing multiple imputation, the first step is to identify the variables to include in the imputation model. It is crucial to include at least as much information as will be used in the analysis model. That is, any variable that will be in the analysis model should also be in the imputation model. This includes any transformations or interactions of variables that will appear in the analysis model. In fact, it is often useful to add more information to the imputation model than will be present when the analysis is run. Since imputation is predictive, any variables that would increase predictive power should be included in the model, even if including them in the analysis model would produce bias in estimating a causal effect (such as for post-treatment variables) or collinearity would preclude determining which variable had a relationship with the dependent variable (such as including multiple alternate measures of GDP). In our case, we include all the variables in `freetrade` in the imputation model, even though our analysis model focuses on `polity`, `pop` and `gdp.pc`. We're not incorporating time or spatial data yet, but we do below. To create multiple imputations in Amelia, we can simply run ```{r amelia} a.out <- amelia(freetrade, m = 5, ts = "year", cs = "country") a.out ``` Note that our example dataset is deliberately small both in variables and in cross-sectional elements. Typical datasets may often have hundreds or possibly a couple thousand steps to the EM algorithm. Long chains should remind the analyst to consider whether transformations of the variables would more closely fit the multivariate normal assumptions of the model (correct but omitted transformations will shorten the number of steps and improve the fit of the imputations), but do not necessarily denote problems with the imputation model. The output gives some information about how the algorithm ran. Each of the imputed datasets is now in the list `a.out$imputations`. Thus, we could plot a histogram of the `tariff` variable from the 3rd imputation, ```{r} hist(a.out$imputations[[3]]$tariff, col = "grey", border = "white") ``` ### Saving imputed datasets If you need to save your imputed datasets, one direct method is to save the output list from `amelia`, ```{r save, eval = FALSE} save(a.out, file = "imputations.RData") ``` As in the previous example, the ith imputed datasets can be retrieved from this list as `a.out$imputations[[i]]`. In addition, you can save each of the imputed datasets to its own file using the `write.amelia()` command, ```{r write_amelia, eval = FALSE} write.amelia(obj = a.out, file.stem = "outdata") ``` This will create one comma-separated value file for each imputed dataset in the following manner: outdata1.csv outdata2.csv outdata3.csv outdata4.csv outdata5.csv The `write.amelia` function can also save files in tab-delimited and Stata (`.dta`) file formats. For instance, to save Stata files, simply change the `format` argument to `"dta"`, ```{r write_dta, eval = FALSE} write.amelia(obj = a.out, file.stem = "outdata", format = "dta") ``` Additionally, `write.amelia()` can create a "stacked" version of the imputed dataset which stacks each imputed dataset on top of one another. This can be done by setting the \code{separate} argument to `FALSE`. The resulting matrix is of size $(N \cdot m) \times p$ if the original dataset is excluded (`orig.data = FALSE`) and of size $(N \cdot (m+1))\times p$ if it is included (`orig.data = TRUE`). The stacked dataset will include a variable (set with `impvar`) that indicates to which imputed dataset the observation belongs. ## Combining multiple calls to `amelia()` The EMB algorithm is what computer scientists call *embarrassingly parallel*, meaning that it is simple to separate each imputation into parallel processes. With Amelia it is simple to run subsets of the imputations on different machines and then combine them after the imputation for use in analysis model. This allows for a huge increase in the speed of the algorithm. Output lists from different Amelia runs can be combined together into a new list. For instance, suppose that we wanted to add another ten imputed datasets to our earlier call to `amelia()`. First, run the function to get these additional imputations, ```{r more_amelia} a.out.more <- amelia(freetrade, m = 10, ts = "year", cs = "country", p2s = 0) a.out.more ``` then combine this output with our original output using the `ameliabind()` function, ```{r ameliabind} a.out.more <- ameliabind(a.out, a.out.more) a.out.more ``` This function binds the two outputs into the same output so that you can pass the combined imputations easily to analysis models and diagnostics. Note that `a.out.more` now has a total of 15 imputations. A simple way to execute a parallel processing scheme with Amelia would be to run `amelia()` with `m` set to 1 on $m$ different machines or processors, save each output using the `save()` function, load them all on the same R session using `load()` command and then combine them using `ameliabind()`. In order to do this, however, make sure to name each of the outputs a different name so that they do not overwrite each other when loading into the same R session. Also, some parallel environments will dump all generated files into a common directory, where they may overwrite each other. If it is convenient in a parallel environment to run a large number of `amelia()` calls from a single piece of code, one useful way to avoid overwriting is to create the `file.stem` with a random suffix. For example: ```{r rand_stem, eval = FALSE} b <- round(runif(1, min = 1111, max = 9999)) random.name <- paste("am", b, sep = "") amelia <- write.amelia(obj = a.out, file.stem = random.name) ``` ### Screen output Screen output can be adjusted with the "print to screen" argument, `p2s`. At a value of 0, no screen printing will occur. This may be useful in large jobs or simulations where a very large number of imputation models may be required. The default value of 1, lists each bootstrap, and displays the number of iterations required to reach convergence in that bootstrapped dataset. The value of 2 gives more thorough screen output, including, at each iteration, the number of parameters that have significantly changed since the last iteration. This may be useful when the EM chain length is very long, as it can provide an intuition for many parameters still need to converge in the EM chain, and a sense of the time remaining. However, it is worth noting that the last several parameters can often take a significant fraction of the total number of iterations to converge. Setting `p2s` to 2 will also generate information on how EM algorithm is behaving, such as a `!` when the current estimated complete data covariance matrix is not invertible and a `*` when the likelihood has not monotonically increased in that step. Having many of these two symbols in the screen output is an indication of a problematic imputation model. Problems of non-invertible matrices often mean that current guess for the covariance matrix is singular. This is a sign that there may be two highly correlated variables in the model. One way to resolve is to use a ridge prior (see \@ref(sec_prior)). An example of the output when `p2s` is 2 would be ```{r p2s} a.out.p2s <- amelia(freetrade, m = 1, ts = "year", cs = "country", p2s = 2) ``` ## Parallel Imputation {#sec:parallel} Each imputation in the above EMB algorithm is completely independent of any other imputation, a property called embarrassingly parallel. This type of approach can take advantage of the multiple-core infrastructure of modern CPUs. Each core in a multi-core processor can execute independent operations in parallel. Amelia can utilize this parallel processing internally via the `parallel` and the `ncpus` arguments. The `parallel` argument sets the parallel processing backend, either with `"multicore"` or `"snow"` (or `"no"` for no parallel processing). The `"multicore"` backend is not available on Windows systems, but tends to be quicker at parallel processing. On a Windows system, the `"snow"` backend provides parallel processing through a cluster of worker processes across the CPUs. You can set the default for this argument using the `"amelia.parallel"` option. This allows you to run Amelia in parallel as the default for an entire R session without setting arguments in the `amelia()` call. For each of the parallel backends, Amelia requires a number of CPUs to use in parallel. This can be set using the `ncpus` argument. It can be higher than the number of physical cores in the system if hyperthreading or other technologies are available. You can use the `parallel::detectCores()` function to determine how many cores are available on your machine. The default for this argument can be set using the `"amelia.ncpus"` option. On Unix-alike systems (such as macOS and Linux distributions), the `"multicore"` backend automatically sets up and stops the parallel workers by forking the process. On Windows, the `"snow"` backend requires more attention. Amelia will attempt to create a parallel cluster of worker processes (since Windows systems cannot fork a process) and will stop this cluster after the imputations are complete. Alternatively, Amelia also has a `cl` argument, which accepts a predefined cluster made using the `parallel::makePSOCKcluster()`. For more information about parallel processing in R, see the documentation for the `parallel` package that ships along with R or the CRAN Task View on [Parallel Computing with R](https://cran.r-project.org/view=HighPerformanceComputing) ## Improving Imputations via Transformations {#sec:trans} Social science data commonly includes variables that fail to fit to a multivariate normal distribution. Indeed, numerous models have been introduced specifically to deal with the problems they present. As it turns out, much evidence in the literature [discussed in @KinHonJos01] indicates that the multivariate normal model used in Amelia usually works well for the imputation stage even when discrete or non-normal variables are included and when the analysis stage involves these limited dependent variable models. Nevertheless, Amelia includes some limited capacity to deal directly with ordinal and nominal variables and to modify variables that require other transformations. In general nominal and log transform variables should be declared to Amelia, whereas ordinal (including dichotomous) variables often need not be, as described below. (For harder cases, see [@Schafer97], for specialized MCMC-based imputation models for discrete variables.) Although these transformations are taken internally on these variables to better fit the data to the multivariate normal assumptions of the imputation model, all the imputations that are created will be returned in the original untransformed form of the data. If the user has already performed transformations on their data (such as by taking a log or square root prior to feeding the data to `amelia()`) these do not need to be declared, as that would result in the transformation occurring *doubly* in the imputation model. The fully imputed data sets that are returned will always be in the form of the original data that is passed to the `amelia()` routine. ### Ordinal {#sec:ord} In much statistical research, researchers treat independent ordinal (including dichotomous) variables as if they were really continuous. If the analysis model to be employed is of this type, then nothing extra is required of the of the imputation model. Users are advised to allow Amelia to impute non-integer values for any missing data, and to use these non-integer values in their analysis. Sometimes this makes sense, and sometimes this defies intuition. One particular imputation of 2.35 for a missing value on a seven point scale carries the intuition that the respondent is between a 2 and a 3 and most probably would have responded 2 had the data been observed. This is easier to accept than an imputation of 0.79 for a dichotomous variable where a zero represents a male and a one represents a female respondent. However, in both cases the non-integer imputations carry more information about the underlying distribution than would be carried if we were to force the imputations to be integers. Thus whenever the analysis model permits, missing ordinal observations should be allowed to take on continuously valued imputations. In the `freetrade` data, one such ordinal variable is `polity` which ranges from -10 (full autocracy) to 10 (full democracy). If we tabulate this variable from one of the imputed datasets, ```{r polity_tab} table(a.out$imputations[[3]]$polity) ``` we can see that there is one imputation between -4 and -3 and one imputation between 6 and 7. Again, the interpretation of these values is rather straightforward even if they are not strictly in the coding of the original Polity data. Often, however, analysis models require some variables to be strictly ordinal, as for example, when the dependent variable will be modeled in a logistical or Poisson regression. Imputations for variables set as ordinal are created by taking the continuously valued imputation and using an appropriately scaled version of this as the probability of success in a binomial distribution. The draw from this binomial distribution is then translated back into one of the ordinal categories. For our data we can simply add `polity` to the `ords` argument: ```{r polity_ord} a.out1 <- amelia(freetrade, m = 5, ts = "year", cs = "country", ords = "polity", p2s = 0) table(a.out1$imputations[[3]]$polity) ``` Now, we can see that all of the imputations fall into one of the original polity categories. ### Nominal {#sec:nom} Nominal variables[^binary] must be treated quite differently than ordinal variables. Any multinomial variables in the data set (such as religion coded 1 for Catholic, 2 for Jewish, and 3 for Protestant) must be specified to Amelia. In our \code{freetrade} dataset, we have `signed` which is 1 if a country signed an IMF agreement in that year and 0 if it did not. Of course, our first imputation did not limit the imputations to these two categories ```{r binary_tab} table(a.out1$imputations[[3]]$signed) ``` In order to fix this for a $p$-category multinomial variable, Amelia will determine $p$ (as long as your data contain at least one value in each category), and substitute $ p-1$ binary variables to specify each possible category. These new $p-1$ variables will be treated as the other variables in the multivariate normal imputation method chosen, and receive continuous imputations. These continuously valued imputations will then be appropriately scaled into probabilities for each of the $p$ possible categories, and one of these categories will be drawn, where upon the original $p$-category multinomial variable will be reconstructed and returned to the user. Thus all imputations will be appropriately multinomial. [^binary]: Dichotomous (two category) variables are a special case of nominal variables. For these variables, the nominal and ordinal methods of transformation in Amelia agree. For our data we can simply add `signed` to the `noms` argument: ```{r noms} a.out2 <- amelia(freetrade, m = 5, ts = "year", cs = "country", noms = "signed", p2s = 0) table(a.out2$imputations[[3]]$signed) ``` Note that Amelia can only fit imputations into categories that exist in the original data. Thus, if there was a third category of signed, say 2, that corresponded to a different kind of IMF agreement, but it never occurred in the original data, Amelia could not match imputations to it. Since Amelia properly treats a $p$-category multinomial variable as $p-1$ variables, one should understand the number of parameters that are quickly accumulating if many multinomial variables are being used. If the square of the number of real and constructed variables is large relative to the number of observations, it is useful to use a ridge prior as in section \@ref(sec_prior). ### Natural log {#sec:log} If one of your variables is heavily skewed or has outliers that may alter the imputation in an unwanted way, you can use a natural logarithm transformation of that variable in order to normalize its distribution. This transformed distribution helps Amelia to avoid imputing values that depend too heavily on outlying data points. Log transformations are common in expenditure and economic variables where we have strong beliefs that the marginal relationship between two variables decreases as we move across the range. For instance, we can show the `tariff` variable clearly has positive (or, right) skew while its natural log transformation has a roughly normal distribution. ```{r tarrif_hist} hist(freetrade$tariff, col="grey", border="white") hist(log(freetrade$tariff), col="grey", border="white") ``` ### Square root {#sec:sqrt} Event count data is often heavily skewed and has nonlinear relationships with other variables. One common transformation to tailor the linear model to count data is to take the square roots of the counts. This is a transformation that can be set as an option in Amelia. ### Logistic {#sec:lgstc} Proportional data is sharply bounded between 0 and 1. A logistic transformation is one possible option in Amelia to make the distribution symmetric and relatively unbounded. ## Identification Variables {#sec:idvars} Datasets often contain identification variables, such as country names, respondent numbers, or other identification numbers, codes or abbreviations. Sometimes these are text and sometimes these are numeric. Often it is not appropriate to include these variables in the imputation model, but it is useful to have them remain in the imputed datasets (However, there are models that would include the ID variables in the imputation model, such as fixed effects model for data with repeated observations of the same countries). Identification variables which are not to be included in the imputation model can be identified with the argument `idvars`. These variables will not be used in the imputation model, but will be kept in the imputed datasets. If the `year` and `country` contained no information except labels, we could omit them from the imputation: ```{r idvars} amelia(freetrade, idvars = c("year", "country")) ``` Note that Amelia will return with an error if your dataset contains a factor or character variable that is not marked as a nominal or identification variable. Thus, if we were to omit the factor `country` from the `cs` or `idvars` arguments, we would receive an error: ```{r idvars_error} a.out2 <- amelia(freetrade, idvars = c("year")) ``` In order to conserve memory, it is wise to remove unnecessary variables from a data set before loading it into Amelia. The only variables you should include in your data when running Amelia are variables you will use in the analysis stage and those variables that will help in the imputation model. While it may be tempting to simply mark unneeded variables as IDs, it only serves to waste memory and slow down the imputation procedure. ## Time Series, or Time Series Cross Sectional Data {#sec:tscs} Many variables that are recorded over time within a cross-sectional unit are observed to vary smoothly over time. In such cases, knowing the observed values of observations close in time to any missing value may enormously aid the imputation of that value. However, the exact pattern may vary over time within any cross-section. There may be periods of growth, stability, or decline; in each of which the observed values would be used in a different fashion to impute missing values. Also, these patterns may vary enormously across different cross-sections, or may exist in some and not others. Amelia can build a general model of patterns within variables across time by creating a sequence of polynomials of the time index. If, for example, tariffs vary smoothly over time, then we make the modeling assumption that there exists some polynomial that describes the economy in cross-sectional unit $i$ at time $t$ as: \[ \textrm{tariff}_{ti} = \beta_0 + \beta_1 t + \beta_1 t^2 + \beta_1 t^3 \ldots \] And thus if we include enough higher order terms of time then the pattern between observed values of the tariff rate can be estimated. Amelia will create polynomials of time up to the user defined $k$-th order, ($k\leq3$). We can implement this with the `ts` and `polytime` arguments. If we thought that a second-order polynomial would help predict we could run ```{r polytime, results = "hide"} a.out2 <- amelia(freetrade, ts = "year", cs = "country", polytime = 2) ``` With this input, Amelia will add covariates to the model that correspond to time and its polynomials. These covariates will help better predict the missing values. If cross-sectional units are specified these polynomials can be interacted with the cross-section unit to allow the patterns over time to vary between cross-sectional units. Unless you strongly believe all units have the same patterns over time in all variables (including the same constant term), this is a reasonable setting. When $k$ is set to 0, this interaction simply results in a model of *fixed effects* where every unit has a uniquely estimated constant term. Amelia does not smooth the observed data, and only uses this functional form, or one you choose, with all the other variables in the analysis and the uncertainty of the prediction, to impute the missing values. In order to impute with trends specific to each cross-sectional unit, we can set `intercs` to `TRUE`: ```{r intercs, results = "hide"} a.out.time <- amelia(freetrade, ts = "year", cs = "country", polytime = 1, intercs = TRUE, p2s = 2) ``` Note that attempting to use `polytime` without the `ts` argument, or `intercs` without the `cs` argument will result in an error. Using the `tscsPlot()` function (discussed below), we can see that we have a much better prediction about the missing values when incorporating time than when we omit it: ```{r tcomp1} tscsPlot(a.out, cs = "Malaysia", main = "Malaysia (no time settings)", var = "tariff", ylim = c(-10, 60)) tscsPlot(a.out.time, cs = "Malaysia", main = "Malaysia (with time settings)", var = "tariff", ylim = c(-10, 60)) ``` ### Lags and leads {#sec:lags} An alternative way of handling time-series information is to include lags and leads of certain variables into the imputation model. *Lags* are variables that take the value of another variable in the previous time period while *leads* take the value of another variable in the next time period. Many analysis models use lagged variables to deal with issues of endogeneity, thus using leads may seems strange. It is important to remember, however, that imputation models are predictive, not causal. Thus, since both past and future values of a variable are likely correlated with the present value, both lags and leads should improve the model. If we wanted to include lags and leads of tariffs, for instance, we would simply pass this to the `lags` and `leads` arguments: ```{r lags_leads} a.out2 <- amelia(freetrade, ts = "year", cs = "country", lags = "tariff", leads = "tariff") ``` ## Including Prior Information Amelia has a number of methods of setting priors within the imputation model. Two of these are commonly used and discussed below, ridge priors and observational priors. ### Ridge priors for high missingness, Small samples, or large correlations {#sec_prior} When the data to be analyzed contain a high degree of missingness or very strong correlations among the variables, or when the number of observations is only slightly greater than the number of parameters $p(p+3)/2$ (where $p$ is the number of variables), results from your analysis model will be more dependent on the choice of imputation model. This suggests more testing in these cases of alternative specifications under Amelia. This can happen when using the polynomials of time interacted with the cross section are included in the imputation model. For example, in our data, if we used a polynomial of degree 2 with unit-specific trends and there are 9 countries, it would add $3 \times 9 - 1= 17$ more variables to the imputation model (dropping one of the fixed effects for identification). When these are added, the EM algorithm can become unstable. You can detect this by inspecting the screen output under `p2s = 2` or by observing that the number iterations per imputation are very divergent. In these circumstances, we recommend adding a ridge prior which will help with numerical stability by shrinking the covariances among the variables toward zero without changing the means or variances. This can be done by including the `empri` argument. Including this prior as a positive number is roughly equivalent to adding `empri` artificial observations to the data set with the same means and variances as the existing data but with zero covariances. Thus, increasing the `empri` setting results in more shrinkage of the covariances, thus putting more a priori structure on the estimation problem: like many Bayesian methods, it reduces variance in return for an increase in bias that one hopes does not overwhelm the advantages in efficiency. In general, we suggest keeping the value on this prior relatively small and increase it only when necessary. A recommendation of 0.5 to 1 percent of the number of observations, $n$, is a reasonable starting value, and often useful in large datasets to add some numerical stability. For example, in a dataset of two thousand observations, this would translate to a prior value of 10 or 20 respectively. A prior of up to 5 percent is moderate in most applications and 10 percent is reasonable upper bound. For our data, it is easy to code up a 1 percent ridge prior: ```{r empri} a.out.time2 <- amelia(freetrade, ts = "year", cs = "country", polytime = 1, intercs = TRUE, p2s = 0, empri = .01 * nrow(freetrade)) a.out.time2 ``` ### Observation-level priors {#sec:obspri} Researchers often have additional prior information about missing data values based on previous research, academic consensus, or personal experience. Amelia can incorporate this information to produce vastly improved imputations. The Amelia algorithm allows users to include informative Bayesian priors about individual missing data cells instead of the more general model parameters, many of which have little direct meaning. The incorporation of priors follows basic Bayesian analysis where the imputation turns out to be a weighted average of the model-based imputation and the prior mean, where the weights are functions of the relative strength of the data and prior: when the model predicts very well, the imputation will down-weight the prior, and vice versa [@HonKin10]. The priors about individual observations should describe the analyst's belief about the distribution of the missing data cell. This can either take the form of a mean and a standard deviation or a confidence interval. For instance, we might know that 1986 tariff rates in Thailand around 40%, but we have some uncertainty as to the exact value. Our prior belief about the distribution of the missing data cell, then, centers on 40 with a standard deviation that reflects the amount of uncertainty we have about our prior belief. To input priors you must build a priors matrix with either four or five columns. Each row of the matrix represents a prior on either one observation or one variable. In any row, the entry in the first column is the row of the observation and the entry is the second column is the column of the observation. In the four column priors matrix the third and fourth columns are the mean and standard deviation of the prior distribution of the missing value. For instance, suppose that we had some expert prior information about tariff rates in Thailand. We know from the data that Thailand is missing tariff rates in many years, ```{r thailand} freetrade[freetrade$country == "Thailand", c("year", "country", "tariff")] ``` Suppose that we had expert information that tariff rates were roughly 40% in Thailand between 1986 and 1988 with about a 6% margin of error. This corresponds to a standard deviation of about 3. In order to include this information, we must form the priors matrix: ```{r build_prior} pr <- matrix( c(158, 159, 160, 3, 3, 3, 40, 40, 40, 3, 3, 3), nrow = 3, ncol = 4 ) pr ``` The first column of this matrix corresponds to the row numbers of Thailand in these three years, the second column refers to the column number of `tariff` in the data and the last two columns refer to the actual prior. Once we have this matrix, we can pass it to `amelia()`, ```{r amelia_prior} a.out.pr <- amelia(freetrade, ts = "year", cs = "country", priors = pr) ``` In the five column matrix, the last three columns describe a confidence range of the data. The columns are a lower bound, an upper bound, and a confidence level between 0 and 1, exclusive. Whichever format you choose, it must be consistent across the entire matrix. We could get roughly the same prior as above by utilizing this method. Our margin of error implies that we would want imputations between 34 and 46, so our matrix would be ```{r build_prior2} pr.2 <- matrix( c(158, 159, 160, 3, 3, 3, 34, 34, 34, 46, 46, 46, 0.95, 0.95, 0.95), nrow = 3, ncol = 5 ) pr.2 ``` These priors indicate that we are 95% confident that these missing values are in the range 34 to 46. If a prior has the value 0 in the first column, this prior will be applied to all missing values in this variable, except for explicitly set priors. Thus, we could set a prior for the entire `tariff` variable of 20, but still keep the above specific priors with the following code: ```{r build_prior3} pr.3 <- matrix( c(158, 159, 160, 0, 3, 3 , 3, 3, 40, 40, 40, 20, 3, 3, 3, 5), nrow = 4, ncol = 4) pr.3 ``` ### Logical bounds In some cases, variables in the social sciences have known logical bounds. Proportions must be between 0 and 1 and duration data must be greater than 0, for instance. Many of these logical bounds can be handled by using the correct transformation for that type of variable (see \@ref(sec:trans) for more details on the transformations handled by Amelia). In the occasional case that imputations must satisfy certain logical bounds not handled by these transformations, Amelia can take draws from a truncated normal distribution in order to achieve imputations that satisfy the bounds. Note, however, that this procedure imposes extremely strong restrictions on the imputations and can lead to lower variances than the imputation model implies. The mean value across all the imputed values of a missing cell is the best guess from the imputation model of that missing value. The variance of the distribution across imputed datasets correctly reflects the uncertainty in that imputation. It is often the mean imputed value that should conform to the any known bounds, even if individual imputations are drawn beyond those bounds. The mean imputed value can be checked with the diagnostics presented in the next section. In general, building a more predictive imputation model will lead to better imputations than imposing bounds. Amelia implements these bounds by rejection sampling. When drawing the imputations from their posterior, we repeatedly resample until we have a draw that satisfies all of the logical constraints. You can set an upper limit on the number of times to resample with the `max.resample` arguments. Thus, if after `max.resample` draws, the imputations are still outside the bounds, Amelia will set the imputation at the edge of the bounds. Thus, if the bounds were 0 and 100 and all of the draws were negative, Amelia would simply impute 0. As an extreme example, suppose that we know, for certain that tariff rates had to fall between 30 and 40. This, obviously, is not true, but we can generate imputations from this model. In order to specify these bounds, we need to generate a matrix of bounds to pass to the `bounds` argument. This matrix will have 3 columns: the first is the column for the bounded variable, the second is the lower bound and the third is the upper bound. Thus, to implement our bound on tariff rates (the 3rd column of the dataset), we would create the matrix, ```{r build_bounds} bds <- matrix(c(3, 30, 40), nrow = 1, ncol = 3) bds ``` which we can pass to the `bounds` argument to `amelia()`: ```{r amelia_bounds} a.out.bds <- amelia(freetrade, ts = "year", cs = "country", bounds = bds, max.resample = 1000) ``` The difference in results between the bounded and unbounded model are not obvious from the output, but inspection of the imputed tariff rates for Malaysia shows that there has been a drastic restriction of the imputations to the desired range: ```{r bounds_plot} tscsPlot(a.out, cs = "Malaysia", main = "No logical bounds", var = "tariff", ylim = c(-10, 60)) tscsPlot(a.out.bds, cs = "Malaysia", main = "Bounded between 30 and 40", var = "tariff", ylim = c(-10, 60)) ``` Again, analysts should be extremely cautious when using these bounds as they can seriously affect the inferences from the imputation model, as shown in this example. Even when logical bounds exist, we recommend simply imputing variables normally, as the violation of the logical bounds represents part of the true uncertainty of imputation. ## Post-imputations Transformations {#sec_postimptrans} In many cases, it is useful to create transformations of the imputed variables for use in further analysis. For instance, one may want to create an interaction between two variables or perform a log-transformation on the imputed data. To do this, Amelia includes a `transform()` function for `amelia()` output that adds or overwrites variables in each of the imputed datasets. For instance, if we wanted to create a log-transformation of the `gdp.pc` variable, we could use the following command: ```{r amelia_transform} a.out <- transform(a.out, lgdp = log(gdp.pc)) head(a.out$imputations[[1]][,c("country", "year","gdp.pc", "lgdp")]) ``` To create an interaction between two variables, we could simply use: ```{r interaction} a.out <- transform(a.out, pol_gdp = polity * gdp.pc) ``` Each transformation is recorded and the `summary()` command prints out each transformation that has been performed: ```{r sum_trans} summary(a.out) ``` Note the updated output is almost exactly the same as the fresh `amelia()` output. You can pass the transformed output back to `amelia()` and it will add imputations and update these imputations with the transformations you have performed. ## Analysis Models {#sec_analysis} Imputation is most often a data processing step as opposed to a final model in of itself. To this end, it is easy to pass output from `amelia()` to other functions. The easiest and most integrated way to run an analysis model is to use the `with()` and `mi.combine()` functions. For example, in @MilKub05, the dependent variable was tariff rates. We can replicate table 5.1 from their analysis with the original data simply by running ```{r lm_lwd} orig.model <- lm(tariff ~ polity + pop + gdp.pc + year + country, data = freetrade) orig.model ``` Running the same model with imputed data is almost identical. We can run the `lm` within each imputed data set by using the `with()` function: ```{r lm_imp} imp.models <- with( a.out, lm(tariff ~ polity + pop + gdp.pc + year + country) ) imp.models[1:2] ``` The result here is simply a list of output of `lm()` applied to each imputed data set. We can combine the imputed estimates using the rules described in @KinHonJos01 and @Schafer97 with the `mi.combine()` function: ```{r mi_combine} out <- mi.combine(imp.models, conf.int = TRUE) out ``` The combination of the results depends on the [broom](https://broom.tidymodels.org) package and results can be combined if a `tidy()` method exists for the estimation function passed to `with()`. Other packages such as [Zelig](https://zeligproject.org) can also combine imputed data sets across a number of statistical models. Furthermore, users can easily export their imputations using the `write.amelia()` function as described in \@ref(sec_saving) and use statistical packages other than R for the analysis model. In addition to the resources available in R, users can draw on Stata to implement their analysis models. As of version 11, Stata has built-in handling of multiply imputed datasets. In order to utilize this functionality, simply export the "stacked" imputations using the `write.amelia()` function: ```{r write_dta_stacked, eval = FALSE} write.amelia(a.out, separate = FALSE, file.stem = "outdata", format = "dta") ``` Once this stacked dataset is open in Stata, you must tell Stata that it is an imputed dataset using the \code{mi import flong} command: ```{stata eval = FALSE} mi import flong, m(imp) id(year country) imp(tariff-usheg) ``` The command takes a few options: `m` designates the imputation variable (set with `impvar` in `write.amelia()`), `id` sets the identifying varibles, and `imp` sets the variables that were imputed (or included in the imputation). The `tariff-usheg` indicates that Stata should treat the range of variables between `tariff` and `usheg` as imputed. Once we have set the dataset as imputed, we can use the built-in `mi` commands to analyze the data: ```{stata eval = FALSE} mi estimate: reg tariff polity pop gdp_pc ``` ``` Multiple-imputation estimates Imputations = 5 Linear regression Number of obs = 171 Average RVI = 1.4114 Complete DF = 167 DF adjustment: Small sample DF: min = 10.36 avg = 18.81 max = 37.62 Model F test: Equal FMI F( 2, 10.4) = 15.50 Within VCE type: OLS Prob > F = 0.0008 ------------------------------------------------------------------------------ tariff | Coef. Std. Err. t P>|t| [95% Conf. Interval] -------------+---------------------------------------------------------------- polity | -.2058115 .3911049 -0.53 0.610 -1.072968 .6613452 pop | 3.21e-08 8.72e-09 3.68 0.004 1.27e-08 5.14e-08 gdp_pc | -.0027561 .000644 -4.28 0.000 -.0040602 -.0014519 _cons | 32.70461 2.660091 12.29 0.000 27.08917 38.32005 ------------------------------------------------------------------------------ ``` ## The `amelia` class {#sec_out} The output from the `amelia()` function is an instance of the S3 class `amelia`. Instances of the `amelia` class contain much more than simply the imputed datasets. The `mu` object of the class contains the posterior draws of the means of the complete data. The `covMatrices` contains the posterior draws of the covariance matrices of the complete data. Note that these correspond to the variables as they are sent to the EM algorithm. Namely, they refer to the variables after being transformed, centered and scaled. The `iterHist` object is a list of `m` 3-column matrices. Each row of the matrices corresponds to an iteration of the EM algorithm. The first column indicates how many parameters had yet to converge at that iteration. The second column indicates if the EM algorithm made a step that decreased the number of converged parameters. The third column indicates whether the covariance matrix at this iteration was singular. Clearly, the last two columns are meant to indicate when the EM algorithm enters a problematic part of the parameter space. ## References Amelia/vignettes/intro-mi.Rmd0000644000176200001440000002262114335240021015651 0ustar liggesusers--- title: "Introduction to Multiple Imputation" date: "`r Sys.Date()`" link-citations: yes bibliography: amelia.bib output: rmarkdown::html_vignette: keep_md: true vignette: > %\VignetteIndexEntry{Introduction to Multiple Imputation} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r loadpkg, echo = FALSE, include = FALSE} knitr::opts_chunk$set(fig.width = 5, fig.height = 4, fig.align = "center") ``` ## Introduction {#sec:intro} Missing data is a ubiquitous problem in social science data. Respondents do not answer every question, countries do not collect statistics every year, archives are incomplete, subjects drop out of panels. Most statistical analysis methods, however, assume the absence of missing data, and are only able to include observations for which every variable is measured. Amelia allows users to impute ("fill in" or rectangularize) incomplete data sets so that analyses which require complete observations can appropriately use all the information present in a dataset with missingness, and avoid the biases, inefficiencies, and incorrect uncertainty estimates that can result from dropping all partially observed observations from the analysis. Amelia performs *multiple imputation*, a general-purpose approach to data with missing values. Multiple imputation has been shown to reduce bias and increase efficiency compared to listwise deletion. Furthermore, ad-hoc methods of imputation, such as mean imputation, can lead to serious biases in variances and covariances. Unfortunately, creating multiple imputations can be a burdensome process due to the technical nature of algorithms involved. \Amelia\ provides users with a simple way to create and implement an imputation model, generate imputed datasets, and check its fit using diagnostics. The Amelia program goes several significant steps beyond the capabilities of the first version of Amelia [@HonJosKin98]. For one, the bootstrap-based EMB algorithm included in Amelia can impute many more variables, with many more observations, in much less time. The great simplicity and power of the EMB algorithm made it possible to write Amelia so that it virtually never crashes --- which to our knowledge makes it unique among all existing multiple imputation software --- and is much faster than the alternatives too. Amelia also has features to make valid and much more accurate imputations for cross-sectional, time-series, and time-series-cross-section data, and allows the incorporation of observation and data-matrix-cell level prior information. In addition to all of this, Amelia provides many diagnostic functions that help users check the validity of their imputation model. This software implements the ideas developed in @HonKin10. ## What Amelia Does {#sec:what} Multiple imputation involves imputing $m$ values for each missing cell in your data matrix and creating $m$ "completed" data sets. Across these completed data sets, the observed values are the same, but the missing values are filled in with a distribution of imputations that reflect the uncertainty about the missing data. After imputation with Amelia's EMB algorithm, you can apply whatever statistical method you would have used if there had been no missing values to each of the $m$ data sets, and use a simple procedure, described below, to combine the results[^combine]. Under normal circumstances, you only need to impute once and can then analyze the $m$ imputed data sets as many times and for as many purposes as you wish. The advantage of Amelia is that it combines the comparative speed and ease-of-use of our algorithm with the power of multiple imputation, to let you focus on your substantive research questions rather than spending time developing complex application-specific models for nonresponse in each new data set. Unless the rate of missingness is very high, $m = 5$ (the program default) is probably adequate. [^combine]: You can combine the results automatically by doing your data analyses within [Zelig for R](https://zeligproject.org), or within [Clarify for Stata](https://gking.harvard.edu/clarify). ### Assumptions The imputation model in Amelia assumes that the complete data (that is, both observed and unobserved) are multivariate normal. If we denote the $(n \times k)$ dataset as $D$ (with observed part $D^{obs}$ and unobserved part $D^{mis}$), then this assumption is \begin{equation} D \sim \mathcal{N}_k(\mu, \Sigma), \end{equation} which states that $D$ has a multivariate normal distribution with mean vector $\mu$ and covariance matrix $\Sigma$. The multivariate normal distribution is often a crude approximation to the true distribution of the data, yet there is evidence that this model works as well as other, more complicated models even in the face of categorical or mixed data [see @Schafer97, @SchOls98]. Furthermore, transformations of many types of variables can often make this normality assumption more plausible (see \@ref(sec:trans) for more information on how to implement this in Amelia). The essential problem of imputation is that we only observe $D^{obs}$, not the entirety of $D$. In order to gain traction, we need to make the usual assumption in multiple imputation that the data are *missing at random* (MAR). This assumption means that the pattern of missingness only depends on the observed data $D^{obs}$, not the unobserved data $D^{mis}$. Let $M$ to be the missingness matrix, with cells $m_{ij} = 1$ if $d_{ij} \in D^{mis}$ and $m_{ij} = 0$ otherwise. Put simply, $M$ is a matrix that indicates whether or not a cell is missing in the data. With this, we can define the MAR assumption as \[ p(M|D) = p(M|D^{obs}). \] Note that MAR includes the case when missing values are created randomly by, say, coin flips, but it also includes many more sophisticated missingness models. When missingness is not dependent on the data at all, we say that the data are *missing completely at random* (MCAR). Amelia requires both the multivariate normality and the MAR assumption (or the simpler special case of MCAR). Note that the MAR assumption can be made more plausible by including additional variables in the dataset $D$ in the imputation dataset than just those eventually envisioned to be used in the analysis model. ### Algorithm In multiple imputation, we are concerned with the complete-data parameters, $\theta = (\mu, \Sigma)$. When writing down a model of the data, it is clear that our observed data is actually $D^{obs}$ and $M$, the missingness matrix. Thus, the likelihood of our observed data is $p(D^{obs}, M|\theta)$. Using the MAR assumption\footnote{There is an additional assumption hidden here that $M$ does not depend on the complete-data parameters.}, we can break this up, \begin{align} p(D^{obs},M|\theta) = p(M|D^{obs})p(D^{obs}|\theta). \end{align} As we only care about inference on the complete data parameters, we can write the likelihood as \begin{align} L(\theta|D^{obs}) &\propto p(D^{obs}|\theta), \end{align} which we can rewrite using the law of iterated expectations as \begin{align} p(D^{obs}|\theta) &= \int p(D|\theta) dD^{mis}. \end{align} With this likelihood and a flat prior on $\theta$, we can see that the posterior is \begin{equation} p(\theta | D^{obs}) \propto p(D^{obs}|\theta) = \int p(D|\theta) dD^{mis}. \end{equation} The main computational difficulty in the analysis of incomplete data is taking draws from this posterior. The EM algorithm [@DemLaiRub77] is a simple computational approach to finding the mode of the posterior. Our EMB algorithm combines the classic EM algorithm with a bootstrap approach to take draws from this posterior. For each draw, we bootstrap the data to simulate estimation uncertainty and then run the EM algorithm to find the mode of the posterior for the bootstrapped data, which gives us fundamental uncertainty too [see @HonKin10 for details of the EMB algorithm]. Once we have draws of the posterior of the complete-data parameters, we make imputations by drawing values of $D^{mis}$ from its distribution conditional on $D^{obs}$ and the draws of $\theta$, which is a linear regression with parameters that can be calculated directly from $\theta$. ### Analysis In order to combine the results across $m$ data sets, first decide on the quantity of interest to compute, such as a univariate mean, regression coefficient, predicted probability, or first difference. Then, the easiest way is to draw $1/m$ simulations of $q$ from each of the $m$ data sets, combine them into one set of $m$ simulations, and then to use the standard simulation-based methods of interpretation common for single data sets @KinTomWit00. Alternatively, you can combine directly and use as the multiple imputation estimate of this parameter, $\bar{q}$, the average of the $m$ separate estimates, $q_j$ $(j=1,\dots,m)$: \begin{equation} \bar{q}=\frac{1}{m}\sum^{m}_{j=1}q_j. \end{equation} The variance of the point estimate is the average of the estimated variances from *within* each completed data set, plus the sample variance in the point estimates *across* the data sets (multiplied by a factor that corrects for the bias because $m<\infty$). Let $SE(q_j)^2$ denote the estimated variance (squared standard error) of $q_j$ from the data set $j$, and $S^{2}_{q}=\Sigma^{m}_{j=1}(q_j-\bar{q})^2/(m-1)$ be the sample variance across the $m$ point estimates. The standard error of the multiple imputation point estimate is the square root of \begin{equation} SE(q)^2=\frac{1}{m}\sum^{m}_{j=1}SE(q_j)^2+S^2_q(1+1/m). \end{equation} ## References Amelia/NEWS0000644000176200001440000001367414335240021012146 0ustar liggesusers// // Amelia II - User visible changes // // // == 1.7.6 (24 Nov 2019) == * Added "x.las" and "gap.xaxis" arguments to missmap() to allow for more control over the axis. * Added xlim/ylim arguments to disperse() * Moved documentation to roxygen * Fixed bug in compare.density() when only 1 missing value. * Minor bug fixes == 1.7.5 (07 May 2018) == * Fixed bug with factor names under perfect collinearity * Added "draws" argument to overimp to control number of overimputation draws * Fix issue with tibbles and tscsPlot() * Fix issues with missmap() * Fixed issue with iterHist indicators being reversed == 1.7.4 (21 Nov 2015) == * Fixed issue with log axes in overimpute * Allow for vector in 'main' argument in tscsPlot() * Moved a collinearity check from error to warning. * Handle subsets better in moPrep() * tscsPlot() won't throw an error when cs is unspecified and plotall=TRUE * Fixed other small bugs and issues == 1.7.3 (14 Nov 2014) == * Fixed bug with overimp not being respected * Added an argument boot.type='none' to amelia() to allow it to run on the original, non-bootstrapped data * Fixed bug in plot.amelia() with matrix inputs * Fixed bug with lower bounds not being respected * Made compatible with most recent versions of Rcpp and RcppArmadillo == 1.7.2 (08 Jun 2013) == * Bug fixes to priors (especially important for multiple overimputation). * Fixed issue with names of imputations for integration with Zelig. == 1.7.1 (24 Mar 2013) == * Speed improvement (thanks to Paul Johnson). * Amelia now requires R>=2.13.5 * missmap() now displays correctly when data is completely observed. * An error is now called when users try to use overimpute() on a variable marked as nominal. * Fixed a bug when all imputations resulted in uninvertible covariance matrices. * Fixed a bug where incorrectly setting the emburn argument could cause a segfault. * Various package cleanups for CRAN compatibility. == 1.7 (10 Feb 2013) == * Ported core EM algorithm to C++. Speed should increase. * Plots in AmeliaView should now use Quartz on Mac OS X instead of X11. * Amelia now requires R >=2.14.0. * Amelia now can run its imputations in parallel using infrastructure from R's parallel package. Note that R < 2.15.3 will crash if parallel is used while tcltk is loaded (or has been loaded and then unloaded). This will be fixed in R 2.15.3 (the patched version of 2.15.2) and we will require R>=2.15.3 when that version is released. * Fixed bug with priors not working correctly. * Fixed bug with character variables set to nominal. == 1.6 (22 Feb 2012) == * Added a transform function to create transformed variables in the imputed datasets. * Added a mi.meld() function that can combine quantities of interest using the Rubin rules. * Added a subset arugment to overimpute. * write.amelia() can now create a stacked/long imputed datatset (also updated to AmeliaView) * Fixed a bug in moPrep (Thanks to Jeff Arnold for the patch) * missmap() has an arugment to not re-order the variables. == 1.5-4 == * Fixed a bug with error messages. == 1.5-3 == * Fixed a bug with completely missing rows in the tscsPlot(). == 1.5-2 (26 Apr 2011) == * Fixed a bug in the handling of priors. == 1.5-1 (23 Nov 2010) == * Fixed a bug in the new GUI where it didn't respect the "intercs" option. == 1.5-0 (23 Nov 2010) == * Major changes to the AmeliaView GUI. == 1.2-18 (4 Nov 2010) == * Fixed a bug when all variables are set to nominal or ordinal. == 1.2-17 (10 May 2010) == * Fixed a bug with the 'ask' argument when using "plot" on an 'amelia' object. == 1.2-16 (20 Mar 2010) == * Fixed a bug when priors specified. * When priors are used, Amelia now tries to use starting values with the prior-filled data. == 1.2-15 (20 Feb 2010) == * Fixed a bug when only 1 variable is not an ID variable or a nominal/ordinal variable. * Fixed a bug with the naming of columns in the imputation process. == 1.2-14 (16 Nov 2009) == * Fixed a bug that "ords" variables would return multiple copies of the same level. == 1.2-13 (09 Aug 2009) == * Fixed a small bug in the error checking routines that handled nominal variables. == 1.2-12 (11 Jul 2009) == * Fixed a bug in AmeliaView that caused it to crash. == 1.2-11 (10 Jul 2009) == * Minor bugfixes in removing test code from AmeliaView() and handling of the priors. == 1.2-10 (07 Jul 2009) == * Fixed a bug in the error checking routine that occurred when users put all of their variables into one of (idvars, noms, ords, ts, cs). == 1.2-9 (02 Jul 2009) == * Fixed typos in the manual with regard to ridge priors and clarified the advice about them. === 1.2-8 (01 Jul 2009) == * Major update to the Amelia manual (now compiled as a vignette using Sweave). * Changed a typo that stated values were the "percent missing" when they should have been "fraction missing." This is fixed. === 1.2-7 (29 Jul 2009) == * In the amelia output, mu and covMatrices now have relevant dimension names to be able to tell which column which. * Fixed a bug in the handling of priors that may have affected answers, but not significantly. * The missmap() function can now accept any matrix or data.frame, not just Amelia output. This allows for drawing a missingness map before running amelia(). == 1.2-0 (09 Apr 2009) == * Amelia output is now an instance of the S3 class 'amelia'. * Imputations are now stored in a list of length 'm' (the number of imputations) in output$imputations, which is of the class 'mi', making it simple to pass to Zelig. * Amelia output contains a matrix of means (one column for each imputation) and an array of covariance matrices. These are the posterior modes found by the EM algorithm in each imputation. Amelia/R/0000755000176200001440000000000014335240021011635 5ustar liggesusersAmelia/R/with.R0000644000176200001440000000212514335240021012733 0ustar liggesusers##' Evaluate an R expression in the environments constructed from the ##' imputed data sets of a call to \code{amelia} function. ##' ##' ##' @title Execute commands within each imputed data set ##' @param data imputation output from the \code{amelia} funtion. ##' @param expr expression to evaluate in each imputed data set in ##' \code{data}. ##' @param ... arguments to be passed to (future) methods. ##' @return a list the same length as \code{data$imputations} that ##' contains the output of the expression as evaluated in each imputed ##' data set of \code{data}. ##' @author Matt Blackwell ##' ##' @examples ##' data(africa) ##' a.out <- amelia(x = africa, cs = "country", ts = "year", logs = ##' "gdp_pc") ##' ##' imp.mods <- with(a.out, lm(gdp_pc ~ infl + trade)) ##' ##' mi.combine(imp.mods, conf.int = TRUE) ##' ##' @export with.amelia <- function(data, expr, ...) { expr <- rlang::enquo(expr) out <- vector("list", length(data$imputations)) for (j in seq_along(data$imputations)) { out[[j]] <- rlang::eval_tidy(expr, data$imputations[[j]]) } class(out) <- "amest" out } Amelia/R/summary.mi.R0000644000176200001440000000032714335240021014063 0ustar liggesuserssummary.mi <- function(object, ...) { m <- length(object) nv <- length(object[[1]]) nr <- nrow(object[[1]]) cat("[", m, "imputations,", nv, "variables,", nr, "rows]\n\n") summary(do.call(rbind, object)) } Amelia/R/combine.R0000644000176200001440000000655014335240021013402 0ustar liggesusersest.matrix <- function(x, name) { vals <- lapply(x, function(z) z[[name]]) out <- do.call(cbind, vals) out } ##' Combine results from statistical models run on multiply imputed ##' data sets using the so-called Rubin rules. ##' ##' @title Combine results from analyses on imputed data sets ##' @param x List of output from statistical models estimated on ##' different imputed data sets, as outputted by \code{with(a.out, ##' expr)} where \code{a.out} is the output of a call to \code{amelia}. ##' @param conf.int Logical indicating if confidence intervals should ##' be computed for each quantity of interest (default is \code{FALSE}). ##' @param conf.level The confidence level to use for the confidence ##' interval if \code{conf.level = TRUE}. Defaults to 0.95, which ##' corresponds to a 95 percent confidence interval. ##' @return Returns a \code{tibble} that contains: ##' \describe{ ##' \item{term}{Name of the coefficient or parameter.} ##' \item{estimate}{Estimate of the parameter, averagine across imputations.} ##' \item{std.error}{Standard error of the estimate, accounting for ##' imputation uncertainty.} ##' \item{statistic}{Value of the t-statistic for the estimated ##' parameter.} ##' \item{p.value}{p-value associated with the test of a null ##' hypothesis that the true coefficient is zero. Uses the ##' t-distribution with an imputation-adjusted degrees of freedom.} ##' \item{df}{Imputation-adjusted degrees of freedom for each ##' parameter.} ##' \item{r}{Relative increase in variance due to nonresponse.} ##' \item{miss.info}{Estimated fraction of missing information.} ##' \item{conf.low}{Lower bound of the estimated confidence interval. ##' Only present if \code{conf.int = TRUE}.} ##' \item{conf.high}{Upper bound of the estimated confidence interval. ##' Only present if \code{conf.int = TRUE}.} ##' } ##' @author Matt Blackwell ##' ##' @examples ##' data(africa) ##' a.out <- amelia(x = africa, cs = "country", ts = "year", logs = ##' "gdp_pc") ##' ##' imp.mods <- with(a.out, lm(gdp_pc ~ infl + trade)) ##' ##' mi.combine(imp.mods, conf.int = TRUE) ##' ##' @export mi.combine <- function(x, conf.int = FALSE, conf.level = 0.95) { if (requireNamespace("broom", quietly = TRUE)) { tidiers <- grep("^tidy\\.", ls(getNamespace("broom")), value = TRUE) tidiers <- gsub("tidy\\.", "", tidiers) } else { rlang::abort("{broom} package required for mi.combine") } if (!(class(x[[1L]]) %in% tidiers)) { rlang::abort("analysis model does not have tidy() method.") } mi_tidy <- lapply(x, function(x) broom::tidy(x)) m <- length(mi_tidy) out <- mi_tidy[[1L]] ests <- est.matrix(mi_tidy, "estimate") ses <- est.matrix(mi_tidy, "std.error") wi.var <- rowMeans(ses ^ 2) out$estimate <- rowMeans(ests) diffs <- sweep(ests, 1, rowMeans(ests)) bw.var <- rowSums(diffs ^ 2) / (m - 1) out$std.error <- sqrt(wi.var + bw.var * (1 + 1 / m)) r <- ((1 + 1 / m) * bw.var) / wi.var df <- (m - 1) * (1 + 1 / r) ^ 2 miss.info <- (r + 2 / (df + 3)) / (r + 1) out$statistic <- out$estimate / out$std.error out$p.value <- 2 * stats::pt(out$statistic, df = df, lower.tail = FALSE) out$df <- df out$r <- r out$miss.info <- miss.info if (conf.int) { t.c <- stats::qt(1 - (1 - conf.level) / 2, df = df, lower.tail = FALSE) out$conf.low <- out$estimate - t.c * out$std.error out$conf.high <- out$estimate + t.c * out$std.error } out } Amelia/R/diag.r0000644000176200001440000012512114335240021012726 0ustar liggesusers## diag.r ## amelia diagnostic functins ## ## 05/05/06 mb - added amelia.arg compatibility ## 07/05/06 jh - compare: changed how variable names are found, changed titles/labels, set x-axis values in matplot, colours for no imputations ## overimpute: added new m-name in output, ## 09/05/06 mb - overimpute: added frontend check for overimpute. ## 15/05/06 jh - overimpute: stacking of original data, and various graphics adjustments ## 01/06/06 mb - added "gethull" and "disperse" for overdispersion diagnostic ## 19/07/06 mb - moved handling of arglists to prep. ## 01/12/06 mb - can't compare non-numerics, only use the relevant columns when ## building compare ## 13/12/06 mb - changed for new priors. ## 26/03/07 jh - overimpute: excluded polynomials of time from missingness count, reordered ploting of ci's (smallest last), allow variable name as var argument ## 28/03/07 jh - disperse: changed tolerance and empri handling. ## 03/04/07 jh - disperse: changed 1d plot settings, number of colors, minor edits to "patt" construction. ## 10/04/07 jh - created sigalert function to view disperse principal components. ## 22/07/08 mb - good coding update: T->TRUE/F->FALSE ## 10/02/09 mb - compare: added lwd, col, main, lab, etc for user ## control, added scale so that users can control scaling, ## uses amelia class ## overimpute: uses amelia class, added lwd, col, main, lab, etc for user ## disperse: now uses amelia class ## 02/21/12 jh - added mi.meld to combine multiply imputed quantities of interest and se's. ## 10/30/12 jh - tscsPlot: expanded to allow to cycle through sets of cross sectional units efficiently. #' Compare observed versus imputed densities #' #' Plots smoothed density plots of observed and imputed values from output #' from the \code{amelia} function. #' #' @param output output from the function \code{amelia}. #' @param var column number or variable name of the variable to plot. #' @param col a vector of length 2 containing the color to plot the (1) #' imputed density and (2) the observed density. #' @param scaled a logical indicating if the two densities should be #' scaled to reflect the difference in number of units in each. #' @param lwd the line width of the density plots. #' @param main main title of the plot. The default is to title the plot #' using the variable name. #' @param xlab the label for the x-axis. The default is the name of the #' variable. #' @param ylab the label for the y-axis. The default is "Relative Density." #' @param legend a logical value indicating if a legend should be #' plotted. #' @param frontend a logical value used internally for the Amelia GUI. #' @param ... further graphical parameters for the plot. #' #' @details This function first plots a density plot of the observed units for the #' variable \code{var} in \code{col[2]}. The the function plots a density plot of the mean #' or modal imputations for the missing units in \code{col[1]}. If a #' variable is marked "ordinal" or "nominal" with the \code{ords} or #' \code{noms} options in \code{amelia}, then the modal imputation will #' be used. If \code{legend} is \code{TRUE}, then a legend is plotted as well. #' #' @references #' Abayomi, K. and Gelman, A. and Levy, M. 2005 "Diagnostics for #' Multivariate Imputations," \emph{Applied Statistics}. 57,3: 273--291. #' #' @examples #' data(africa) #' #' @seealso For more information on how densities are computed, #' \code{\link{density}}; Other imputation diagnostics are #' \code{\link{overimpute}}, \code{\link{disperse}}, and #' \code{\link{tscsPlot}}. #' compare.density <- function(output, var, col = c("indianred", "dodgerblue"), scaled = FALSE, lwd = 1, main, xlab, ylab, legend = TRUE, frontend = FALSE, ...) { if (!("amelia" %in% class(output))) stop("The 'output' is not Amelia output.") ##data <- getOriginalData(output) data <- remove.imputations(output) ## Checks on if the variable makes sense to plot. if (inherits(var, "character")) if (!(var %in% names(data))) stop("The variable name (var) doesn't correspond to a column in the data.") else var <- match(var, names(data)) if (any(var > ncol(data), var < 0, (var %% 1) != 0)) stop("The 'var' option points to a non-existant column.") if (var %in% output$arguments$idvar) stop("the variable selected was marked as an idvar") ## We need to clean the data to make sure that ## we're not going to run into NAs mcount <- sum(!is.na(output$imputations)) imputed <- (1:output$m)[!is.na(output$imputations)] ## create an empty vector to sum across varimp <- matrix(NA, nrow(data), mcount) for (i in 1:mcount) { if (is.data.frame(data)) { varimp[,i] <- output$imputations[[imputed[i]]][[var]] } else { varimp[,i] <- output$imputations[[imputed[i]]][,var] } } if (var %in% c(output$arguments$noms, output$arguments$ords)) { leg.text <- "Modal Imputations" varimp <- apply(varimp, 1, function(x) as.numeric(names(which.max(table(x))))) } else { leg.text <- "Mean Imputations" varimp <- rowMeans(varimp) } if (frontend) { dev.new() } if (is.data.frame(data)) { vars <- data[[var]] } else { vars <- data[,var] } if (scaled) ratio <- sum(is.na(vars))/sum(!is.na(vars)) else ratio <- 1 varnames <- dimnames(data)[[2]] # This will work for both data.frames AND matricies. vname <- varnames[var] # This will work for both data.frames AND matricies. if (sum(is.na(vars)) > 1) { oiDetect <- (sum(output$missMatrix[,var]) + sum(!is.na(vars))) > length(vars) if (missing(main)) { if (oiDetect) { main <- paste("Observed and Overimputed values of", vname) } else { main <- paste("Observed and Imputed values of", vname) } } if (missing(xlab)) { xlab <- paste(vname," -- Fraction Missing:", round(mean(is.na(vars)), digits = 3)) } if (missing(ylab)) { ylab <- "Relative Density" } xmiss <- density(varimp[output$missMatrix[, var]], na.rm = TRUE) xobs <- density(vars[!is.na(vars)], na.rm = TRUE) compplot <- matplot(x = cbind(xmiss$x, xobs$x), y = cbind(ratio * xmiss$y, xobs$y), xlab = xlab, ylab = ylab, type = "l", lwd = lwd, lty = 1, main = main, col = col, ...) if (legend) { legend("topright", legend = c(leg.text, "Observed Values"), col = col, lty = c(1,1), bg = 'gray90', lwd = lwd) } } else { if (missing(main)) { main <- paste("Observed values of",vname) } if (missing(xlab)) { xlab <- vname } if (missing(ylab)) { ylab <- "Relative Density" } compplot <- plot(density(varimp, na.rm = TRUE), col = col[2], main = main,...) if (sum(is.na(vars)) == 1) { abline(v = varimp[output$missMatrix[, var]], col = col[1]) } if (legend) { legend("topright", legend = c("Mean Imputations","Observed Values"), col = col, lty = c(1,1), bg = 'gray90') } } invisible() } #' Overimputation diagnostic plot #' #' Treats each observed value as missing and imputes from the imputation #' model from \code{amelia} output. #' #' @param output output from the function \code{amelia}. #' @param var column number or variable name of the variable to #' overimpute. #' @param draws the number of draws per imputed dataset to generate #' overimputations. Total number of simulations will \code{m * #' draws} where \code{m} is the number of imputations. #' @param subset an optional vector specifying a subset of observations #' to be used in the overimputation. #' @param legend a logical value indicating if a legend should be #' plotted. #' @param xlab the label for the x-axis. The default is "Observed Values." #' @param ylab the label for the y-axis. The default is "Imputed Values." #' @param main main title of the plot. The default is to smartly title the plot #' using the variable name. #' @param frontend a logical value used internally for the Amelia GUI. #' @param ... further graphical parameters for the plot. #' #' @details #' This function temporarily treats each observed value in #' \code{var} as missing and imputes that value based on the imputation #' model of \code{output}. The dots are the mean imputation and the #' vertical lines are the 90\% percent confidence intervals for #' imputations of each observed value. The diagonal line is the \eqn{y=x} #' line. If all of the imputations were perfect, then our points would #' all fall on the line. A good imputation model would have about 90\% of #' the confidence intervals containing the truth; that is, about 90\% of #' the vertical lines should cross the diagonal. #' #' The color of the vertical lines displays the fraction of missing #' observations in the pattern of missingness for that #' observation. The legend codes this information. Obviously, the #' imputations will be much tighter if there are more observed covariates #' to use to impute that observation. #' #' The \code{subset} argument evaluates in the environment of the #' data. That is, it can but is not required to refer to variables in the #' data frame as if it were attached. #' #' @return A list that contains (1) the row in the original data #' (\code{row}), (2) the observed value of that observation #' (\code{orig}), (2) the mean of the overimputations #' (\code{mean.overimputed}), (3) the lower bound of the 95\% #' confidence interval of the overimputations #' (\code{lower.overimputed}), (4) the upper bound of the 95\% #' confidence interval of the overimputations #' (\code{upper.overimputed}), (5) the fraction of the variables #' that were missing for that observation in the original data #' (\code{prcntmiss}), and (6) a matrix of the raw overimputations, #' with observations in rows and the different draws in columns (\code{overimps}). #' #' @seealso Other imputation diagnostics are #' \code{\link{compare.density}}, \code{\link{disperse}}, and #' \code{\link{tscsPlot}}. overimpute <- function(output, var, draws = 20, subset, legend = TRUE, xlab, ylab, main, frontend = FALSE, ...) { if (!("amelia" %in% class(output))) stop("The 'output' is not Amelia output.") data <- getOriginalData(output) ## via the subset.data.frame function if (missing(subset)) { r <- TRUE } else { e <- substitute(subset) r <- eval(e, data, parent.frame()) if (!is.logical(r)) { stop("'subset' must evaluate to logical") } r <- r & !is.na(r) if (sum(r) == 0) { stop("no observations in the subset") } } data <- data[r,] origAMr1 <- is.na(data) ## Allow character names as arguments for "var" with data.frames if(is.character(var)){ if(!is.data.frame(data)){ stop("var must be identified by column number as dataset is not a data frame.") } else { nomnames <- colnames(output$imputations[[1]])[output$arguments$noms] if (var %in% nomnames) { stop("Cannot overimpute variables set to be nominal") } varpos <- match(var, colnames(data)) if(is.na(varpos)){ stop("The name provided for var argument does not exist in the dataset provided.") } else { var <- varpos } } } ## The argument list for an amelia output is now ## at "output$arguments" prepped <- amelia.prep(x = data, arglist = output$arguments, incheck = FALSE) stacked.var <- match(var, prepped$subset.index[prepped$p.order]) subset.var <- match(var, prepped$subset.index) if (!is.null(prepped$blanks)) fully.missing <- origAMr1[-prepped$blanks, var][prepped$n.order] else fully.missing <- origAMr1[, var][prepped$n.order] if (is.na(stacked.var)) { if (frontend) tcltk::tkmessageBox(message="The variable you selected doesn't exist in the Amelia output becuase it wasn't imputed.",icon="error",type="ok") stop("var doesn't exist in the amelia output. It either didn't get imputed or is out of the range of columns.") } means <- c() lowers <- c() uppers <- c() pcnts <- c() color <- c() AMr1 <- is.na(prepped$x) ## if (sum(!AMr1[,stacked.var]) == 0){ ## if (frontend) { ## tkmessageBox(parent = getAmelia("gui"), ## message="The variable needs to have at least one fully observed cell.",icon="error",type="ok") ## } ## stop("function needs at least one fully observed cell in 'var'.") ## } AMr1[,stacked.var] <- TRUE AMp <- ncol(prepped$x) imphold <- matrix(NA, nrow = nrow(prepped$x), ncol = output$m * draws) for (i in 1:nrow(prepped$x)) { if (fully.missing[i]) { next() } x <- prepped$x[i,,drop=FALSE] x[1, stacked.var] <- NA o <- !is.na(x) miss <- !o x[is.na(x)] <- 0 oo <- 1 * o mm <- 1 * miss #o<-!AMr1[i,] #o[stacked.var]<-FALSE pcntmiss <- (sum(miss))/(length(miss)-sum(prepped$index==0)) # Does not include time polynomials (index==0) in the denominator ## These are always fully observed by construction, but auxiliary. ## Leaves constructed lags and ## leads, and nominal variables ## in count, however. conf <- c() for (k in 1:output$m) { ## The theta matrix is now stored in an array with ## dimensions c(vars+1,vars+1,m), so this grabs ## the kth theta matrix. thetareal <- output$theta[,,k] xx <- matrix(x, draws, AMp, byrow = TRUE) rr <- matrix(AMr1[i,], draws, AMp, byrow = TRUE) xc <- .Call("ameliaImpute", xx, rr, oo, mm, c(1, nrow(xx) + 1), thetareal, NULL, NULL, NULL, PACKAGE = "Amelia") conf <- c(conf, xc[, stacked.var]) } scaled.conf <- (conf * prepped$scaled.sd[subset.var]) + prepped$scaled.mu[subset.var] varlog <- match(var, prepped$logs) if (!is.na(varlog)) { scaled.conf <- untransform(as.matrix(scaled.conf), logs = 1, xmin = prepped$xmin[varlog], sqrts = NULL, lgstc = NULL) } if (!is.na(match(var,prepped$sqrts))) { scaled.conf <- untransform(as.matrix(scaled.conf), logs = NULL, xmin = NULL, sqrts = 1, lgstc = NULL) } if (!is.na(match(var,prepped$lgstc))) { scaled.conf <- untransform(as.matrix(scaled.conf), logs = NULL, xmin = NULL, sqrts = NULL, lgstc = 1) } ##colors are based on rainbow roygbiv l->r is higher missingness \ blue <- rgb(0,0,1, alpha = 0.75) green <- rgb(0,.75,0, alpha = 0.75) orange <- rgb(1, 0.65,0, alpha = 0.75) tomato <- rgb(1, 0.39, 0.28, alpha = 0.75) red <- rgb(0.75, 0, 0, alpha = 0.75) spectrum <- c(blue, green, orange, tomato, red) if (pcntmiss < .20) color <- c(color, spectrum[1]) else if (pcntmiss >= .20 && pcntmiss < .40) color <- c(color, spectrum[2]) else if (pcntmiss >= .40 && pcntmiss < .60) color <- c(color, spectrum[3]) else if (pcntmiss >= .60 && pcntmiss < .80) color <- c(color, spectrum[4]) else if (pcntmiss >= .80) color <- c(color, spectrum[5]) imphold[i,] <- scaled.conf means <- c(means, mean(scaled.conf)) lowers <- c(lowers, sort(scaled.conf)[round(output$m * draws * 0.05)]) uppers <- c(uppers, sort(scaled.conf)[round(output$m * draws * 0.95)]) pcnts <- c(pcnts, pcntmiss) } #AMr1<-is.na(prepped$x[,stacked.var]) #partial.n.order<-prepped$n.order[!origAMr1] if (is.data.frame(data)) { xplot <- data[[var]] } else { xplot <- data[,var] } if (is.null(prepped$blanks)) { xplot <- xplot[prepped$n.order][!fully.missing] } else { xplot <- xplot[-prepped$blanks][prepped$n.order][!fully.missing] } addedroom <- (max(uppers) - min(lowers)) * 0.1 if (!hasArg(log)) { this.ylim <- range(c(lowers - addedroom, uppers)) legpos <- "bottomright" } else { this.ylim <- range(c(lowers[lowers > 0], uppers + addedroom)) legpos <- "topright" } if (missing(xlab)) { xlab <- "Observed Values" } if (missing(ylab)) { ylab <- "Imputed Values" } if (missing(main)) { main <- paste("Observed versus Imputed Values of",colnames(data)[var]) } if (frontend) { dev.new() } ci.order <- order(uppers - lowers, decreasing = TRUE) # Allows smallest CI's to be printed last, and thus not buried in the plot. overplot <- plot(xplot[ci.order], means[ci.order], xlab = xlab, ylab = ylab, ylim = this.ylim, type = 'p', main = main, col = color[ci.order], pch = 19,...) segments(xplot[ci.order], lowers[ci.order], xplot[ci.order], uppers[ci.order], col = color[ci.order]) if (legend) { legend(legpos, legend = c(" 0-.2",".2-.4",".4-.6",".6-.8",".8-1"), col = spectrum, lty = c(1,1), horiz = TRUE, bty = "n") } abline(0,1) out <- list(row = prepped$n.order[!fully.missing], orig = xplot, mean.overimputed = means, lower.overimputed = lowers, upper.overimputed = uppers, prcntmiss = pcnts, overimps = imphold[!is.na(imphold[,1]),]) invisible(out) } gethull <- function(st,tol,rots) { stvec <- st for (i in 1:length(st)) { addedvec <- rep(0,length(st)) addedvec[i] <- tol * 100 newvec <- cbind(st + addedvec, st - addedvec) stvec <- cbind(stvec, newvec) } reduced.hull <- t(rots) %*% stvec return(reduced.hull) } #' Overdispersed starting values diagnostic for multiple imputation #' #' A visual diagnostic of EM convergence from multiple overdispersed #' starting values for an output from \code{amelia}. #' #' @param output output from the function \code{amelia}. #' @param m the number of EM chains to run from overdispersed starting values. #' @param dims the number of principle components of the parameters to #' display and assess convergence on (up to 2). #' @param p2s an integer that controls printing to screen. 0 (default) #' indicates no printing, 1 indicates normal screen output and 2 #' indicates diagnostic output. #' @param frontend a logical value used internally for the Amelia GUI. #' @param xlim limits of the plot in the horizontal dimension. #' @param ylim limits of the plot in vertical dimension. #' @param ... further graphical parameters for the plot. #' #' @details This function tracks the convergence of \code{m} EM chains which start #' from various overdispersed starting values. This plot should give some #' indication of the sensitivity of the EM algorithm to the choice of #' starting values in the imputation model in \code{output}. If all of #' the lines converge to the same point, then we can be confident that #' starting values are not affecting the EM algorithm. #' #' As the parameter space of the imputation model is of a #' high-dimension, this plot tracks how the first (and second if #' \code{dims} is 2) principle component(s) change over the iterations of #' the EM algorithm. Thus, the plot is a lower dimensional summary of the #' convergence and is subject to all the drawbacks inherent in said #' summaries. #' #' For \code{dims==1}, the function plots a horizontal line at the #' position where the first EM chain converges. Thus, we are checking #' that the other chains converge close to that horizontal line. For #' \code{dims==2}, the function draws a convex hull around the point of #' convergence for the first EM chain. The hull is scaled to be within #' the tolerance of the EM algorithm. Thus, we should check that the #' other chains end up in this hull. #' #' @seealso Other imputation diagnostics are #' \code{\link{compare.density}}, \code{\link{disperse}}, and #' \code{\link{tscsPlot}} disperse <- function(output, m = 5, dims = 1, p2s = 0, frontend = FALSE, ..., xlim = NULL, ylim = NULL) { if (!("amelia" %in% class(output))) stop("The 'output' is not Amelia output.") ## The original data is the imputed data with the ## imputations marked to NA. These two lines do that data <- getOriginalData(output) if (frontend) { requireNamespace("tcltk") putAmelia("output.log", c(getAmelia("output.log"), "==== Overdispersion Output ====\n")) } # prep the data and arguments prepped<-amelia.prep(x=data, arglist=output$arguments) if (p2s) cat("-- Imputation", "1", "--") if (frontend) { putAmelia("output.log", c(getAmelia("output.log"), paste("-- Imputation","1","--\n"))) } flush.console() # run EM, but return it with the theta at each iteration thetanew <- emarch(prepped$x, p2s = p2s, thetaold = NULL, tolerance = prepped$tolerance, startvals = 0, priors = prepped$priors, empri = prepped$empri, frontend = frontend, allthetas = TRUE, collect = FALSE) #change 4 # thetanew is a matrix whose columns are vectorized upper triangles of theta # matrices for each iteration. thus, there are k(k+1)/2 rows. impdata <- thetanew$thetanew # we'll put the theta of the last iteration into a new starting theta startsmat <- matrix(0, ncol(prepped$x) + 1, ncol(prepped$x) + 1) startsmat[upper.tri(startsmat, TRUE)] <- c(-1, impdata[, ncol(impdata)]) startsmat <- t(startsmat) startsmat[upper.tri(startsmat, TRUE)] <- c(-1, impdata[, ncol(impdata)]) iters <- nrow(thetanew$iter.hist) + 1 for (i in 2:m) { if (p2s) cat("-- Imputation", i, "--\n") if (frontend) { putAmelia("output.log", c(getAmelia("output.log"), paste("-- Imputation",i,"--\n"))) } # get a noisy sample of data from the that starting value (which is the # Amelia answer) and use that to estimate a new starting theta (mus/vcov) newstarts <- rmvnorm(round(2.5 * ncol(prepped$x)), startsmat[1,2:ncol(startsmat)], startsmat[2:nrow(startsmat),2:nrow(startsmat)]) startcov <- var(newstarts) startmus <- colMeans(newstarts) newstartsmat <- matrix(-1, ncol(prepped$x) + 1, ncol(prepped$x) + 1) newstartsmat[2:nrow(startsmat),2:nrow(startsmat)] <- startcov newstartsmat[1,2:nrow(startsmat)] <- startmus newstartsmat[2:nrow(startsmat),1] <- startmus # grab the iteration history of the thetas thetanew <- emarch(prepped$x, p2s = p2s, thetaold = newstartsmat, tolerance = prepped$tolerance, startvals = 0, priors = prepped$priors, empri = prepped$empri, frontend = frontend, allthetas = TRUE, collect = FALSE) # change 5 impdata <- cbind(impdata, thetanew$thetanew) iters <- c(iters, nrow(thetanew$iter.hist) + 1) } if (dims == 1) comps <- c(1) else comps <- c(1,2) # reduce the dimenionality from k(k+1)/2 to 1 or 2 via principle components rotations <- prcomp(t(impdata))$rotation[, comps] reduced.imps <- t(rotations) %*% impdata cols <- rainbow(m) # plot the imputations if (frontend) { dev.new() } if (dims == 1) { addedroom <- (max(reduced.imps) - min(reduced.imps)) * 0.1 x <- seq(iters[1]) if (is.null(xlim)) xlim <- c(0, max(iters)) if (is.null(ylim)) ylim <- range(c(reduced.imps - addedroom, reduced.imps)) y <- reduced.imps[1, 1:iters[1]] patt <- seq(1, length(x) - 1) plot(x, y, col = 1, main = "Overdispersed Start Values", xlab = "Number of Iterations", ylab = "Largest Principle Component", xlim = xlim, ylim = ylim, type = "n") segments(x[patt], y[patt], x[patt + 1], y[patt + 1], col = cols[1]) for (i in 2:length(iters)) { x <- seq(iters[i]) y <- reduced.imps[1, (sum(iters[1:(i-1)])+1):(sum(iters[1:i]))] patt <- seq(1, length(x)-1) segments(x[patt], y[patt], x[patt+1], y[patt+1], col=cols[i]) #points(x,y,col=i) } abline(h = reduced.imps[iters[1]], lwd = 2) legend("bottomright", legend = c("Convergence of original starting values"), lwd = 2, bty = "n") } else { xrange <- c((min(reduced.imps[1,])), (max(reduced.imps[1,]))) yrange <- c((min(reduced.imps[2,])), (max(reduced.imps[2,]))) if (is.null(xlim)) xlim <- xrange if (is.null(ylim)) ylim <- yrange plot(reduced.imps[1,1:iters[1]], reduced.imps[2,1:iters[1]], type = "n", main = "Overdispersed Starting Values", xlab = "First Principle Component", ylab = "Second Principle Component", col=cols[1], xlim = xlim, ylim = ylim) for (i in 2:length(iters)) { x <- reduced.imps[1, (sum(iters[1:(i-1)])+1):(sum(iters[1:i]))] y <- reduced.imps[2, (sum(iters[1:(i-1)])+1):(sum(iters[1:i]))] patt <- c() xdiffs <- diff(x) ydiffs <- diff(y) veclength <- sqrt(xdiffs^2+ydiffs^2) for (j in 1:length(xdiffs)) if (veclength[j] > xinch(1/500)) patt <- c(patt,j) if (!is.null(patt)) arrows(x[patt], y[patt], x[patt + 1], y[patt + 1], length = .1, col = cols[i]) patt <- seq(1, length(x) - 1) segments(x[patt], y[patt], x[patt+1], y[patt+1], col = cols[i]) } x <- reduced.imps[1,1:iters[1]] y <- reduced.imps[2,1:iters[1]] xdiffs <- diff(x) ydiffs <- diff(y) veclength <- sqrt(xdiffs^2+ydiffs^2) inchlength <- sqrt(sum(xyinch(1/500)^2)) patt <- c() for (j in 1:length(xdiffs)) if (veclength[j] > inchlength) patt <- c(patt,j) #if (!is.null(patt)) # arrows(x[patt],y[patt],x[patt+1],y[patt+1],length=.15,col=1,lwd=5) patt <- seq(1, length(x) -1) segments(x[patt], y[patt], x[patt + 1], y[patt + 1], col = cols[1], lwd = 1) dists <- gethull(st = impdata[ ,iters[1]], tol = prepped$tolerance, rots = rotations) convexhull <- chull(t(dists)) convexhull <- c(convexhull, convexhull[1]) lines(t(dists)[convexhull,], col = "orange", pch = 19, lwd = 2) abline(h = 0, lty = 2) abline(v = 0, lty = 2) } #if (frontend) # tkdestroy(getAmelia("tcl.window")) out <- list(impdat = impdata, p.order = prepped$p.order, index = prepped$index, iters = iters, rotations = rotations, dims = dims) invisible(out) } sigalert <- function(data, disperse.list, output, notorious = 5){ k <- length(disperse.list$p.order) + 1 # Construct Variable Names for all variables constructed in Imputation Model. # This uses the "index" which details all the variables included in the imputation model. # The index is in the unstacked variable order. # Possibly, if this is useful elsewhere, this might be moved to "prep.r". varnm <- NULL lag.count <- 0 lead.count <- 0 poly.count <- 0 unknown.count <- 0 for (i in 1:(k-1)) { if (identical(disperse.list$index[i], -0.5)) { lag.count <- lag.count + 1 varnm <- c(varnm, paste("lag", lag.count)) } else if (identical(disperse.list$index[i], 0.5)) { lead.count <- lead.count + 1 varnm <- c(varnm, paste("lead", lead.count)) } else if (identical(disperse.list$index[i],0)) { poly.count <- poly.count + 1 varnm <- c(varnm, paste("polytime", poly.count)) } else if(disperse.list$index[i] >= 1) { varnm <- c(varnm, names(data[disperse.list$index[i]])) # Check what this does with matricies? } else { unknown.count <- unknown.count + 1 varnm <- c(varnm, paste("unknown", unknown.count)) } } # WARNING: Currently assumes rotations is a vector. If dim=2, rotations is a matrix. # if(!identical(disperse.list$dims,1)){ # disperse.list$rotations<-disperse.list$rotations[1,] # } # This is a flag vector that identifies the largest values in the first principal component. largest.rotations <- disperse.list$rotations * 0 largest.rotations[order(abs(disperse.list$rotations),decreasing = TRUE)[1:notorious]] <- 1 # This is a matrix of the size of theta, which has a 1 in the positions of the largest # contributions to the first principal component. # (largest corresponding elements of disperse.list$rotations) map <- matrix(0, k, k) map[upper.tri(map, TRUE)] <- c(0, largest.rotations) map <- t(map) map[upper.tri(map, TRUE)] <- c(0, largest.rotations) map[c(1, disperse.list$p.order + 1), c(1, disperse.list$p.order + 1)] <- map # Rearrange to unstacked variable positions print(abs(map)) gtz<-function(a) return(sum(a) > 0) row.keep <- apply(map, 1, gtz) col.keep <- apply(map, 2, gtz) # This is the submatrix of rotations, reshaped as a theta matrix, with the largest elements. prcomp.matrix <- matrix(0,k,k) prcomp.matrix[upper.tri(prcomp.matrix, TRUE)] <- c(0, disperse.list$rotations) prcomp.matrix <- t(prcomp.matrix) prcomp.matrix[upper.tri(prcomp.matrix, TRUE)] <- c(0, disperse.list$rotations) prcomp.matrix[c(1,disperse.list$p.order+1),c(1,disperse.list$p.order+1)] <- prcomp.matrix # Rearrange to unstacked variable positions # This is the submatrix that we want to represent portal <- prcomp.matrix[row.keep,col.keep] portalsize <- ncol(portal) portal.row.names <- varnm[row.keep] # In symmetric matricies, these are the same. portal.col.names <- varnm[col.keep] # In symmetric matricies, these are the same. # This is a matrix that gives the relative rank of every element. col.map <- matrix(0, portalsize, portalsize) col.portal <- rank(abs(portal[upper.tri(portal, TRUE)])) col.map[upper.tri(col.map, TRUE)] <- col.portal col.map <- t(col.map) col.map[upper.tri(col.map, TRUE)] <- col.portal # This creates a continuous color palette of the correct size. n.unique <- sum(upper.tri(matrix(1, portalsize, portalsize), TRUE)) Lab.palette <- colorRampPalette(c("white", "yellow", "red"), space = "Lab") my.palette <- Lab.palette(n.unique) # Plot the submatrix to be represented. plot.new() plot.window(xlim = c(-2, portalsize + 1), ylim = c(1, portalsize + 3)) for(i in 1:portalsize){ text(x = 1, y = portalsize - i + 1 + 0.5, pos = 2, labels = portal.row.names[i]) # Row variable names for(j in 1:portalsize){ rect(xleft = j, ybottom = portalsize - i + 1, xright = j + 1, ytop = portalsize - i + 2, density = NULL, angle = 45, col = my.palette[col.map[i, j]], border = NULL, lty = par("lty"), lwd = par("lwd")) text(x = j + 0.5, y = portalsize - i + 1 + 0.5, labels = as.character(round(portal[i,j]*100)/100) ) # SHOULD FIND BETTER SIG FIGS HACK } } for(j in 1:portalsize){ text(x = j + 0.6, y = portalsize + 1.1, pos = 2, labels = portal.col.names[j], srt = 270) # Column variable names. } return(NULL) } #' Plot observed and imputed time-series for a single cross-section #' #' Plots a time series for a given variable in a given cross-section and #' provides confidence intervals for the imputed values. #' #' @param output output from the function \code{amelia}. #' @param var the column number or variable name of the variable to plot. #' @param cs the name (or level) of the cross-sectional unit to plot. #' Maybe a vector of names which will panel a window of plots #' @param draws the number of imputations on which to base the confidence #' intervals. #' @param conf the confidence level of the confidence intervals to plot #' for the imputated values. #' @param misscol the color of the imputed values and their confidence #' intervals. #' @param obscol the color of the points for observed units. #' @param xlab x axis label #' @param ylab y axis label #' @param main overall plot title #' @param pch point shapes for the plot. #' @param ylim y limits (y1, y2) of the plot. #' @param xlim x limits (x1, x2) of the plot. #' @param frontend a logical value for use with the \code{AmeliaView} GUI. #' @param plotall a logical value that provides a shortcut for ploting all unique values of the level. #' A shortcut for the \code{cs} argument, a TRUE value overwrites any #' \code{cs} argument. #' @param nr the number of rows of plots to use when ploting multiple cross-sectional #' units. The default value will try to minimize this value to create a roughly #' square representation, up to a value of four. If all plots do not fit on the #' window, a new window will be started. #' @param nc the number of columns of plots to use. See \code{nr} #' @param pdfstub a stub string used to write pdf copies of each window created by the #' plot. The default is not to write pdf output, but any string value will turn #' on pdf output to the local working directory. If the stub is \code{mystub}, #' then plots will be saved as \code{mystub1.pdf}, \code{mystub2.pdf}, etc. #' @param ... further graphical parameters for the plot. #' #' @details #' The \code{cs} argument should be a value from the variable set to the #' \code{cs} argument in the \code{amelia} function for this output. This #' function will not work if the \code{ts} and \code{cs} arguments were #' not set in the \code{amelia} function. If an observation has been #' overimputed, \code{tscsPlot} will plot both an observed and an imputed #' value. tscsPlot <- function(output, var, cs, draws = 100, conf = .90, misscol = "red", obscol = "black", xlab, ylab, main, pch, ylim, xlim, frontend = FALSE, plotall=FALSE, nr, nc, pdfstub, ...) { if (missing(var)) stop("I don't know which variable (var) to plot") if (missing(cs) && !plotall) stop("case name (cs) is not specified") if (is.null(output$arguments$ts) || is.null(output$arguments$cs)) stop("both 'ts' and 'cs' need to be set in the amelia output") if (!("amelia" %in% class(output))) stop("the 'output' is not Amelia output") data <- getOriginalData(output) # Allow character names as arguments for "var" with data.frames if (is.character(var)) { if (!is.data.frame(data)) { stop("'var' must be identified by column number as dataset is not a data frame") } else { varpos <- match(var, colnames(data)) if (is.na(varpos)) { stop("the name provided for 'var' argument does not exist in the dataset provided") } else { var <- varpos } } } csvarname <- output$arguments$cs tsvarname <- output$arguments$ts if (is.data.frame(data)) { csvar <- data[[csvarname]] tsvar <- data[[tsvarname]] } else { csvar <- data[,output$arguments$cs] tsvar <- data[,output$arguments$ts] } if (is.factor(csvar)) { units <- levels(csvar) } else { units <- unique(csvar) } if (plotall) { cs <- units } else { if (!(all(cs %in% units))) stop("some cross-section unit requested for the plot is not in the data") } # Picks a number of rows and columns if not user defined. Maxs out at 4-by-4, unless user defined if (missing(nr)) { nr <- min(4, ceiling(sqrt(length(cs)))) } if (missing(nc)) { nc <- min(4, ceiling(length(cs)/nr)) } if (length(cs)>1) { oldmfcol <- par()$mfcol par(mfcol = c(nr, nc)) } prepped <- amelia.prep(x = data, arglist = output$arguments) if (!is.null(prepped$blanks)) { data <- data[-prepped$blanks,] unit.rows <- which(csvar %in% cs) miss <- output$missMatrix[-prepped$blanks,][unit.rows, var] == 1 } else { unit.rows <- which(csvar %in% cs) miss <- output$missMatrix[unit.rows, var] == 1 } time <- tsvar[unit.rows] # These are the time values for rows appearing in some future plot imps.cs <- csvar[unit.rows] # These are the cs units for rows appearing in some future plot cross.sec <- prepped$x[!is.na(match(prepped$n.order, unit.rows)),] stacked.var <- match(var, prepped$subset.index[prepped$p.order]) subset.var <- match(var, prepped$subset.index) imps <- array(NA, dim = c(nrow(cross.sec), draws)) drawsperimp <- draws/output$m if (sum(miss) > 0) { for (i in 1:draws) { currtheta <- output$theta[,,ceiling(i/drawsperimp)] imps[,i] <- amelia.impute(x = cross.sec, thetareal = currtheta, bounds = prepped$bounds, priors = prepped$priors, max.resample = output$arguments$max.resample)[,stacked.var] } imps <- imps*prepped$scaled.sd[subset.var] + prepped$scaled.mu[subset.var] if (var %in% output$arguments$logs) { imps <- exp(imps) + prepped$xmin[which(var == output$arguments$logs)] } if (var %in% output$arguments$sqrt) { imps <- imps^2 } if (var %in% output$arguments$lgstc) { imps <- exp(imps)/(1 + exp(imps)) } outoforder <- match(prepped$n.order, unit.rows)[!is.na(match(prepped$n.order, unit.rows))] imps <- imps[order(outoforder),] } if (missing(pch)) pch <- 19 if (missing(xlab)) xlab <- "time" if (missing(ylab)) ylab <- names(data)[var] if (frontend) { dev.new() } if (!missing(main)) { main <- rep(main, length.out = length(cs)) } count <- 0 for(i in 1:length(cs)){ current.rows <- which(csvar == cs[i]) current.time <- tsvar[current.rows] flag <- imps.cs == cs[i] current.miss <- miss[flag] if (sum(current.miss) > 0) { current.imps <- imps[flag,] current.means <- rowMeans(current.imps) current.uppers <- apply(current.imps, 1, quantile, probs = (conf + (1 - conf)/2)) # THIS IS LIKELY SLOW current.lowers <- apply(current.imps, 1, quantile, probs = (1-conf)/2) # THIS IS LIKELY SLOW } else { current.means <- data[[var]][current.rows] current.uppers <- current.lowers <- current.means } cols <- ifelse(current.miss, misscol, obscol) current.main <- ifelse(missing(main), as.character(cs[i]), main[i]) # Allow title to be rolling if not defined if (missing(xlim)) { # Allow axes to vary by unit, if not defined current.xlim <- range(current.time) } else { current.xlim <- xlim } if (missing(ylim)) { current.ylim <- range(current.uppers,current.lowers,current.means) } else { current.ylim <- ylim } plot(x = current.time, y = current.means, col = cols, pch = pch, ylim = current.ylim, xlim = current.xlim, ylab = ylab, xlab = xlab, main = current.main, ...) segments(x0 = current.time, x1 = current.time, y0 = current.lowers, y1 = current.uppers, col = cols, ...) oiDetect <- (sum(output$missMatrix[current.rows,var]) + sum(!is.na(data[current.rows, var]))) > length(current.rows) if (oiDetect) { points(x = current.time, y = data[current.rows, var], pch = pch, col = obscol) } # print page if window full if ((!missing(pdfstub)) & (i %% (nr*nc) ==0)) { count <- count + 1 dev.copy2pdf(file = paste(pdfstub, count, ".pdf", sep="")) } } if (!missing(pdfstub)) { if ((i %% (nr*nc)) != 0) { # print last page if not complete count <- count + 1 dev.copy2pdf(file = paste(pdfstub, count, ".pdf", sep="")) } par(mfcol = oldmfcol) # return to previous windowing } # although always now fills by col even if previously by row invisible(imps) } #' Combine Multiple Results From Multiply Imputed Datasets #' #' Combine sets of estimates (and their standard errors) generated from #' different multiply imputed datasets into one set of results. #' #' @param q A matrix or data frame of (k) quantities of interest (eg. #' coefficients, parameters, means) from (m) multiply imputed datasets. #' Default is to assume the matrix is m-by-k (see \code{byrow}), thus each #' row represents a set of results from one dataset, and each column #' represents the different values of a particular quantity of interest #' across the imputed datasets. #' @param se A matrix or data frame of standard errors that correspond to each of the #' elements of the quantities of interest in \code{q}. Should be the same #' dimensions as \code{q}. #' @param byrow logical. If \code{TRUE}, \code{q} and \code{se} are treated as #' though each row represents the set of results from one dataset #' (thus m-by-k). If \code{FALSE}, each column represents results from one #' dataset (thus k-by-m). #' #' @details Uses Rubin's rules for combining a set of results from multiply imputed #' datasets to reflect the average result, with standard errors that both average #' uncertainty across models and account for disagreement in the estimated values #' across the models. #' #' @return #' \item{q.mi}{Average value of each quantity of interest across the m models} #' \item{se.mi}{Standard errors of each quantity of interest} #' #' @references #' Rubin, D. (1987). \emph{Multiple Imputation for Nonresponse in Surveys}. #' New York: Wiley. #' #' Honaker, J., King, G., Honaker, J. Joseph, A. Scheve K. (2001). Analyzing #' Incomplete Political Science Data: An Alternative Algorithm for Multiple #' Imputation \emph{American Political Science Review}, \bold{95(1)}, 49--69. (p53) #' mi.meld<-function(q, se, byrow = TRUE) { if (!byrow) { q <- t(q) se <- t(se) } if (is.data.frame(q)) { q <- as.matrix(q) } if (is.data.frame(se)) { se <- as.matrix(se) } am.m <- nrow(q) ones <- matrix(1, nrow = 1, ncol = am.m) imp.q <- (ones %*% q)/am.m # Slightly faster than "apply(b,2,mean)" ave.se2 <- (ones %*% (se^2))/am.m # Similarly, faster than "apply(se^2,2,mean)" diff <- q - matrix(1, nrow = am.m, ncol = 1) %*% imp.q sq2 <- (ones %*% (diff^2))/(am.m - 1) imp.se <- sqrt(ave.se2 + sq2 * (1 + 1/am.m)) return(list(q.mi = imp.q, se.mi = imp.se)) } Amelia/R/amcheck.r0000644000176200001440000011465014335240021013422 0ustar liggesusers## amcheck.r ## Function for checking for errors in coding ## of the data or input vectors ## ## 21/10/05 - now converts variables names to column numbers, stops if variable doesn't exist; returns codes and messages, doesn't stop execution ## 04/05/06 mb - moved parameter vs. obs check to prep, checks outname ## 10/07/06 mb - fixed handling of variance checks with no fully observed rows. ## 17/07/06 mb - stops if variable only has one observed value. ## 02/08/06 mb - fixed handling of character variables. ## 25/09/06 mb - fixed handling of errors in output writing. ## 13/12/06 mb - removed dropping of extra priors, added new priors ## 15/12/06 mb - fixed problem of nrow(priors)==5 ## 22/07/08 mb - good coding update: T->TRUE/F->FALSE ## 27/03/10 jh - added checks for splinetime amcheck <- function(x,m=5,p2s=1,frontend=FALSE,idvars=NULL,logs=NULL, ts=NULL,cs=NULL,means=NULL,sds=NULL, mins=NULL,maxs=NULL,conf=NULL,empri=NULL, tolerance=0.0001,polytime=NULL,splinetime=NULL,startvals=0,lags=NULL, leads=NULL,intercs=FALSE,archive=TRUE,sqrts=NULL, lgstc=NULL,noms=NULL,incheck=TRUE,ords=NULL,collect=FALSE, arglist=NULL, priors=NULL,bounds=NULL, max.resample=1000, overimp = NULL, emburn=NULL, boot.type=NULL) { #Checks for errors in list variables listcheck<-function(vars,optname) { if (identical(vars,NULL)) return(0) if (mode(vars) == "character") { if (any(is.na(match(vars,colnames(x))))) { mess<-paste("The following variables are refered to in the", optname,"argument, but don't are not columns in the data:", vars[is.na(match(vars,colnames(x)))]) return(list(1,mess)) } return(0) } if (any(vars>AMp,vars<0,vars%%1!=0)) { mess<-paste(optname," is out of the range of \n", "possible column numbers or is not an integer.") return(list(2,mess)) } return(0) } #Checks for errors in logical variables logiccheck<-function(opt,optname) { if (!identical(opt,NULL)) { if (length(opt) > 1) { mess<-paste("The",optname,"setting is longer than one logical.") return(list(1,mess)) } if (mode(opt) != "logical") { mess<-paste("The",optname,"setting is not a logical (TRUE/FALSE) value.") return(list(2,mess)) } } else { mess<-paste("The",optname,"setting cannot be NULL. Please change to TRUE/FALSE.") return(list(3,mess)) } return(0) } #Checks for errors in priors variables priorcheck<-function(opt,optname) { if (!identical(opt,NULL)) { if (!is.matrix(opt)) { mess<-paste("The", optname,"matrix is not a matrix.\n") return(list(1,mess)) } if (is.character(opt)) { mess<-paste("The", optname,"matrix is a character matrix.\n", "Please change it to a numeric matrix.") return(list(2,mess)) } if (any(dim(opt)!=dim(x))) { mess<-paste("The", optname,"matrices must have the same dimensions\n", "as the data.") return(list(3,mess)) } } return(0) } error.code <- 1 #Error Code: 3 #Arguments point to variables that do not exist. if (inherits(try(get("x"),silent=TRUE),"try-error")) return(list(code=3,mess=paste("The setting for the data argument doesn't exist."))) if (inherits(try(get("m"),silent=TRUE),"try-error")) return(list(code=3,mess=paste("The setting for the 'm' argument doesn't exist."))) if (inherits(try(get("idvars"),silent=TRUE),"try-error")) return(list(code=3,mess=paste("The setting for the 'idvars' argument doesn't exist."))) if (inherits(try(get("means"),silent=TRUE),"try-error")) return(list(code=3,mess=paste("The setting for the 'means' argument doesn't exist."))) if (inherits(try(get("sds"),silent=TRUE),"try-error")) return(list(code=3,mess=paste("The setting for the 'sds' argument doesn't exist."))) if (inherits(try(get("mins"),silent=TRUE),"try-error")) return(list(code=3,mess=paste("The setting for the 'mins' argument doesn't exist."))) if (inherits(try(get("maxs"),silent=TRUE),"try-error")) return(list(code=3,mess=paste("The setting for the 'maxs' argument doesn't exist."))) if (inherits(try(get("conf"),silent=TRUE),"try-error")) return(list(code=3,mess=paste("The setting for the 'conf' argument doesn't exist."))) if (inherits(try(get("empri"),silent=TRUE),"try-error")) return(list(code=3,mess=paste("The setting for the 'empri' argument doesn't exist."))) if (inherits(try(get("ts"),silent=TRUE),"try-error")) return(list(code=3,mess=paste("The setting for the 'ts' argument doesn't exist."))) if (inherits(try(get("cs"),silent=TRUE),"try-error")) return(list(code=3,mess=paste("The setting for the 'cs' argument doesn't exist."))) if (inherits(try(get("tolerance"),silent=TRUE),"try-error")) return(list(code=3,mess=paste("The setting for the 'tolerance' argument doesn't exist."))) if (inherits(try(get("polytime"),silent=TRUE),"try-error")) return(list(code=3,mess=paste("The setting for the 'polytime' argument doesn't exist."))) if (inherits(try(get("splinetime"),silent=TRUE),"try-error")) return(list(code=3,mess=paste("The setting for the 'splinetime' argument doesn't exist."))) if (inherits(try(get("lags"),silent=TRUE),"try-error")) return(list(code=3,mess=paste("The setting for the 'lags' argument doesn't exist."))) if (inherits(try(get("leads"),silent=TRUE),"try-error") ) return(list(code=3,mess=paste("The setting for the 'leads' argument doesn't exist."))) if (inherits(try(get("logs"),silent=TRUE),"try-error")) return(list(code=3,mess=paste("The setting for the 'logs' argument doesn't exist."))) if (inherits(try(get("sqrts"),silent=TRUE),"try-error")) return(list(code=3,mess=paste("The setting for the 'sqrts' argument doesn't exist."))) if (inherits(try(get("lgstc"),silent=TRUE),"try-error")) return(list(code=3,mess=paste("The setting for the 'lgstc' argument doesn't exist."))) if (inherits(try(get("p2s"),silent=TRUE),"try-error")) return(list(code=3,mess=paste("The setting for the 'p2s' argument doesn't exist."))) if (inherits(try(get("frontend"),silent=TRUE),"try-error")) return(list(code=3,mess=paste("The setting for the 'frontend' argument doesn't exist."))) if (inherits(try(get("intercs"),silent=TRUE),"try-error")) return(list(code=3,mess=paste("The setting for the 'intercs' argument doesn't exist."))) if (inherits(try(get("noms"),silent=TRUE),"try-error")) return(list(code=3,mess=paste("The setting for the 'noms' argument doesn't exist."))) if (inherits(try(get("startvals"),silent=TRUE),"try-error")) return(list(code=3,mess=paste("The setting for the 'startvals' argument doesn't exist."))) if (inherits(try(get("ords"),silent=TRUE),"try-error")) return(list(code=3,mess=paste("The setting for the 'ords' argument doesn't exist."))) if (inherits(try(get("collect"),silent=TRUE),"try-error")) return(list(code=3,mess=paste("The setting for the 'collect' argument doesn't exist."))) AMn<-nrow(x) AMp<-ncol(x) subbedout<-c(idvars,cs,ts) if (is.null(idvars)) idcheck <- c(1:AMp) else idcheck <- -idvars ## Error Code: 4 ## Completely missing columns if (any(colSums(!is.na(x[,idcheck])) <= 1)) { all.miss <- colnames(x[,idcheck])[colSums(!is.na(x[,idcheck])) <= 1] if (is.null(all.miss)) { all.miss <- which(colSums(!is.na(x[,idcheck])) <= 1) } all.miss <- paste(all.miss, collapse = ", ") error.code<-4 error.mess<-paste("The data has a column that is completely missing or only has one,observation. Remove these columns:", all.miss) return(list(code=error.code,mess=error.mess)) } #Error codes: 5-6 #Errors in one of the list variables idout<-listcheck(idvars,"One of the 'idvars'") if (!identical(idout,0)) return(list(code=(idout[[1]]+4),mess=idout[[2]])) lagout<-listcheck(lags,"One of the 'lags'") if (!identical(lagout,0)) return(list(code=(lagout[[1]]+4),mess=lagout[[2]])) leadout<-listcheck(leads,"One of the 'leads'") if (!identical(leadout,0)) return(list(code=(leadout[[1]]+4),mess=leadout[[2]])) logout<-listcheck(logs,"One of the 'logs'") if (!identical(logout,0)) return(list(code=(logout[[1]]+4),mess=logout[[2]])) sqout<-listcheck(sqrts,"One of the 'sqrts'") if (!identical(sqout,0)) return(list(code=(sqout[[1]]+4),mess=sqout[[2]])) lgout<-listcheck(lgstc,"One of the 'lgstc'") if (!identical(lgout,0)) return(list(code=(lgout[[1]]+4),mess=lgout[[2]])) tsout<-listcheck(ts,"The 'ts' variable") if (!identical(tsout,0)) return(list(code=(tsout[[1]]+4),mess=tsout[[2]])) csout<-listcheck(cs,"The 'cs' variable") if (!identical(csout,0)) return(list(code=(csout[[1]]+4),mess=csout[[2]])) nomout<-listcheck(noms,"One of the 'noms'") if (!identical(nomout,0)) return(list(code=(nomout[[1]]+4),mess=nomout[[2]])) ordout<-listcheck(ords,"One of the 'ords'") if (!identical(ordout,0)) # THIS FORMERLY READ "NOMOUT" return(list(code=(ordout[[1]]+4),mess=ordout[[2]])) # priors errors if (!identical(priors,NULL)) { # Error code: 7 # priors isn't a matrix if (!is.matrix(priors)) { error.code <- 7 error.mess <- "The priors argument is not a matrix." return(list(code=error.code, mess=error.mess)) } # Error code: 8 # priors is not numeric if (!is.numeric(priors)) { error.code <- 7 error.mess <- paste("The priors matrix is non-numeric. It should\n", "only have numeric values.") return(list(code=error.code, mess=error.mess)) } # Error code: 47 # priors matrix has the wrong dimensions if (ncol(priors) != 4 & ncol(priors) != 5) { error.code <- 47 error.mess <- paste("The priors matrix has the wrong numberof columns.\n", "It should have either 4 or 5 columns.",) return(list(code=error.code, mess=error.mess)) } if (nrow(priors) > nrow(x)*ncol(x)) { error.code <- 47 error.mess <- "There are more priors than there are observations." return(list(code=error.code, mess=error.mess)) } # Error code: 48 # NAs in priors matrix if (any(is.na(priors))) { error.code <- 48 error.mess <- "There are missing values in the priors matrix." return(list(code=error.code, mess=error.mess)) } # Error code: 49 # multiple priors set if (any(duplicated(priors[,1:2]))) { error.code <- 49 error.mess <- "Multiple priors set on one observation or variable." return(list(code=error.code,mess=error.mess)) } prior.cols <- priors[,2] %in% c(1:ncol(x)) prior.rows <- priors[,1] %in% c(0:nrow(x)) ## Error code: 9 ## priors set for cells that aren't in the data if (sum(c(!prior.cols,!prior.rows)) != 0) { error.code <- 9 error.mess <- "There are priors set on cells that don't exist." return(list(code=error.code,mess=error.mess)) } ## Error code: 59 ## no priors on nominal variables if (any(priors[,2] %in% noms)) { error.code <- 59 error.mess <- "Cannot set priors on nominal variables. " return(list(code = error.code, mess = error.mess)) } ## Error code: 60 ## no priors on nominal variables if (any(priors[,2] %in% idvars)) { error.code <- 60 error.mess <- "Cannot set priors on ID variables. " return(list(code = error.code, mess = error.mess)) } ## Error code: 12 ## confidences have to be in 0-1 if (ncol(priors) == 5) { if (any(priors[,5] <= 0) || any(priors[,5] >= 1)) { error.code<-12 error.mess<-paste("The priors confidences matrix has values that are less \n", "than or equal to 0 or greater than or equal to 1.") return(list(code=error.code,mess=error.mess)) } } } #Error code: 10 #Square roots with negative values if (!is.null(sqrts)) { if (sum(colSums(x[,sqrts, drop = FALSE] < 0, na.rm = T))) { neg.vals <- colnames(x[,sqrts, drop = FALSE])[colSums(x[,sqrts, drop = FALSE] < 0, na.rm = T) > 1] if (is.null(neg.vals)) neg.vals <- sqrts[colSums(x[,sqrts, drop = FALSE] < 0, na.rm = T) > 1] neg.vals <- paste(neg.vals, collapse = ", ") error.code<-10 error.mess<-paste("The square root transformation cannot be used on variables with negative values. See column(s):", neg.vals) return(list(code=error.code,mess=error.mess)) } } #warning message #logs with negative values if (!is.null(logs)) { triggered<-FALSE for(localindex in 1:length(logs)){ if(!triggered){ if (any(na.omit(x[,logs[localindex]]) < 0)) { warning(paste("The log transformation is being used on \n", "variables with negative values. The values \n", "will be shifted up by 1 plus the minimum value \n", "of that variable.")) triggered<-TRUE } } } } #Error code: 11 #0-1 Bounds on logistic transformations if (!identical(lgstc,NULL)) { lgstc.check <- colSums(x[,lgstc,drop=FALSE] <= 0 | x[,lgstc,drop=FALSE] >= 1, na.rm = TRUE) if (sum(lgstc.check)) { neg.vals <- colnames(x[,lgstc,drop=FALSE])[lgstc.check > 0] if (is.null(neg.vals)) neg.vals <- lgstc[lgstc.check > 0] neg.vals <- paste(neg.vals, collapse = ", ") error.code<-11 error.mess<-paste("The logistic transformation can only be used on values between 0 and 1. See column(s):", neg.vals) return(list(code=error.code,mess=error.mess)) } } #Error code: 12 #Confidence Intervals for priors bounded to 0-1 # if (!identical(conf,NULL)) { # if (any(conf <= 0,conf>=1,na.rm=T)) { # error.code<-12 # error.mess<-paste("The priors confidences matrix has values that are less \n", # "than or equal to 0 or greater than or equal to 1.") # return(list(code=error.code,mess=error.mess)) # } # } #Error code: 13 #Can't set all variables to 'idvar' if (!identical(idvars,NULL)) { if ((AMp-1) <= length(idvars)) { error.code<-13 error.mess<-paste("You cannot set all variables (or all but one) as ID variables.") return(list(code=error.code,mess=error.mess)) } } ## Error code: 14 ## ts canonot equal cs if (!identical(ts,NULL) && !identical(cs,NULL)) { if (ts==cs) { error.code<-14 error.mess<-paste("Time series and cross-sectional variables cannot be the same.") return(list(code=error.code,mess=error.mess)) } } #Error code: 15 #TS is more than one integer if (!identical(ts,NULL)) { if (length(ts) > 1) { error.code<-15 error.mess<-paste("The time series variable option is longer than one integer.") return(list(code=error.code,mess=error.mess)) } } #Error code: 16 #CS is more than one integer if (!identical(cs,NULL)) { if (length(cs) > 1) { error.code<-16 error.mess<-paste("The cross section variable option is longer than one integer.") return(list(code=error.code,mess=error.mess)) } } ## if (!identical(casepri,NULL)) { ## #Error code: 17 ## #Case prior must be in a matrix ## if (!is.matrix(casepri)) { ## error.code<-17 ## error.mess<-paste("The case priors should be in a martix form.") ## return(list(code=error.code,mess=error.mess)) ## } ## #Error code: 18 ## #CS must be specified with case priors ## if (identical(cs,NULL)) { ## error.code<-18 ## error.mess<-paste("The cross-sectional variable must be set in order to use case priors.") ## return(list(code=error.code,mess=error.mess)) ## } ## #Error code: 19 ## #Case priors have the wrong dimensions ## if (sum(dim(casepri) == c(length(unique(data[,cs])),length(unique(data[,cs])))) != 2) { ## error.code<-19 ## error.mess<-paste("The case priors have the wrong dimensions. It should \n", ## "have rows and columns equal to the number of cases.") ## return(list(code=error.code,mess=error.mess)) ## } ## #Error code: 20 ## #Case prior values are out of bounds ## if (all(casepri != 0,casepri!=1,casepri!=2,casepri!=3)) { ## error.code<-20 ## error.mess<-paste("The case priors can only have values 0, 1, 2, or 3.") ## return(list(code=error.code,mess=error.mess)) ## } ## } #check polynomials if (!identical(polytime,NULL)) { #Error code: 21 #Polynomials of time are longer than one integer if (length(polytime) > 1) { error.code<-21 error.mess<-paste("The polynomials of time setting is greater than one integer.") return(list(code=error.code,mess=error.mess)) } if (!is.numeric(polytime)) { error.code<-22 error.mess<-paste("The setting for polytime is not a number.") return(list(code=error.code,mess=error.mess)) } if ((polytime %% 1) != 0) { error.code<-23 error.mess<-paste("The number of polynomial terms to include for time (polytime) must be an integer.") return(list(code=error.code,mess=error.mess)) } if (any(polytime > 3,polytime < 0)) { error.code<-24 error.mess<-paste("The number of polynomial terms to include must be between 1 and 3.") return(list(code=error.code,mess=error.mess)) } if (identical(ts,NULL)) { error.code<-25 error.mess<-paste("You have set polynomials of time without setting the time series variable.") return(list(code=error.code,mess=error.mess)) } if (all(!intercs,identical(polytime,0))) { warning(paste("You've set the polynomials of time to zero with no interaction with \n", "the cross-sectional variable. This has no effect on the imputation.")) } } if (!identical(splinetime,NULL)) { #Error code: 54 #Spline of time are longer than one integer if (length(polytime) > 1) { error.code<-54 error.mess<-paste("The spline of time setting is greater than one integer.") return(list(code=error.code,mess=error.mess)) } if (!is.numeric(splinetime)) { error.code<-55 error.mess<-paste("The setting for splinetime is not a number.") return(list(code=error.code,mess=error.mess)) } if ((splinetime %% 1) != 0) { error.code<-56 error.mess<-paste("The number of spline degrees of freedom to include for time (splinetime) must be an integer.") return(list(code=error.code,mess=error.mess)) } if (any(splinetime > 6,splinetime < 0)) { error.code<-57 error.mess<-paste("The number of spline degrees of freedom to include must be between 0 and 6.") return(list(code=error.code,mess=error.mess)) } if (identical(ts,NULL)) { error.code<-58 error.mess<-paste("You have set splines of time without setting the time series variable.") return(list(code=error.code,mess=error.mess)) } if (all(!intercs,identical(polytime,0))) { warning(paste("You've set the spline of time to zero with no interaction with \n", "the cross-sectional variable. This has no effect on the imputation.")) } } #checks for intercs if (identical(intercs,TRUE)) { if (identical(cs,NULL)) { error.code<-27 error.mess<-paste("You have indicated an interaction with the cross section \n", "without setting the cross section variable.") return(list(code=error.code,mess=error.mess)) } if (length(unique(x[,cs])) > (1/3)*(AMn)) { error.code<-28 error.mess<-paste("There are too many cross-sections in the data to use an \n", "interaction between polynomial of time and the cross-section.") return(list(code=error.code,mess=error.mess)) } if (sum(is.na(x[,cs])) > 0) { error.code <- 60 error.mess <- paste("There are missing values in the 'cs' variable.") return(list(code=error.code,mess=error.mess)) } } #Error codes: 29-31 #logical variable errors interout<-logiccheck(intercs,"cross section interaction") if (!identical(interout,0)) return(list(code=(28+interout[[1]]),mess=interout[[2]])) #p2sout<-logiccheck(p2s,"print to screen") #if (!identical(p2sout,0)) # return(list(code=(p2sout[[1]]+28),mess=p2sout[[2]])) frout<-logiccheck(frontend,"frontend") if (!identical(frout,0)) return(list(code=(frout[[1]]+28),mess=frout[[2]])) collout<-logiccheck(collect,"archive") if (!identical(collout,0)) return(list(code=(collout[[1]]+28),mess=collout[[2]])) #Error code: 32 #Transformations must be mutually exclusive if (length(unique(c(logs,sqrts,lgstc,noms,ords,idvars))) != length(c(logs,sqrts,lgstc,noms,ords,idvars))) { error.code<-32 error.mess<-paste("Transfomations must be mutually exclusive, so one \n", "variable can only be assigned one transformation. You have the \n", "same variable designated for two transformations.") return(list(code=error.code,mess=error.mess)) } #Error code: 33 #ts/cs variables can't be transformed if (any(unique(c(logs,sqrts,lgstc,noms,ords,idvars)) == ts,unique(c(logs,sqrts,lgstc,noms,ords,idvars)) == cs)) { error.code<-33 error.mess<-paste("The time series and cross sectional variables cannot be transformed.") return(list(code=error.code,mess=error.mess)) } #Error code: 35 #tolerance must be greater than zero if (tolerance <= 0) { error.code<-35 error.mess<-paste("The tolerance option must be greater than zero.") return(list(code=error.code,mess=error.mess)) } #check nominals if (!identical(noms,NULL)) { for (i in noms) { #Error code: 36 #too many levels on noms if (length(unique(na.omit(x[,i]))) > (1/3)*(AMn)) { bad.var <- colnames(x)[i] if (is.null(bad.var)) bad.var <- i error.code<-36 error.mess<-paste("The number of categories in the nominal variable \'",bad.var,"\' is greater than one-third of the observations.", sep = "") return(list(code=error.code,mess=error.mess)) } if (length(unique(na.omit(x[,i]))) > 10) warning("\n\nThe number of categories in one of the variables marked nominal has greater than 10 categories. Check nominal specification.\n\n") if (all(i==cs,intercs==TRUE)) { noms<-noms[noms!=i] warning("The cross sectional variable was set as a nominal variable. Its nominal status has been dropped.") } } } if (is.null(c(noms,ords,idvars,cs))) fact <- c(1:AMp) else fact <- -c(noms,ords,idvars,cs) if (is.null(c(cs,idvars))) idcheck <- c(1:AMp) else idcheck <- -c(cs,idvars) ##Error code: 37 ##factors out of the noms,ids,ords,cs if (is.data.frame(x)) { if (length(x[,fact])) { if (sum(sapply(x[,fact],is.factor))) { bad.var <- colnames(x[,fact])[sapply(x[,fact],is.factor)] if (is.null(bad.var)) bad.var <- setdiff(which(sapply(x,is.factor)), -fact) bad.var <- paste(bad.var, collapse = ", ") error.code<-37 error.mess<-paste("The following variable(s) are 'factors': ", bad.var, "You may have wanted to set this as a ID variable to remove it", "from the imputation model or as an ordinal or nominal", "variable to be imputed. Please set it as either and", "try again.", sep = "\n") return(list(code=error.code,mess=error.mess)) } if (sum(sapply(x[,fact],is.ordered))) { bad.var <- colnames(x[,fact])[sapply(x[,fact],is.ordered)] if (is.null(bad.var)) bad.var <- setdiff(which(sapply(x,is.ordered)), -fact) bad.var <- paste(bad.var, collapse = ", ") error.code<-37 error.mess<-paste("The following variable(s) are 'factors': ", bad.var, "You may have wanted to set this as a ID variable to remove it", "from the imputation model or as an ordinal or nominal", "variable to be imputed. Please set it as either and", "try again.", sep = "\n") return(list(code=error.code,mess=error.mess)) } if (sum(sapply(x[,fact],is.character))) { bad.var <- colnames(x[,fact])[sapply(x[,fact],is.character)] if (is.null(bad.var)) bad.var <- setdiff(which(sapply(x,is.character)), -fact) bad.var <- paste(bad.var, collapse = ", ") error.code<-38 error.mess<-paste("The following variable(s) are characters: ", paste("\t",bad.var), "You may have wanted to set this as a ID variable to remove it", "from the imputation model or as an ordinal or nominal", "variable to be imputed. Please set it as either and", "try again.", sep = "\n") return(list(code=error.code,mess=error.mess)) } } } else { if (!is.numeric(x)) { error.code <- 38 error.mess <- paste("The \'x\' matrix is not numeric.") return(list(code=error.code,mess=error.mess)) } } #Error code: 39 #No missing observation if (!any(is.na(x[,idcheck,drop=FALSE])) & is.null(overimp)) { error.code<-39 error.mess<-paste("Your data has no missing values. Make sure the code for \n", "missing data is set to the code for R, which is NA.") return(list(code=error.code,mess=error.mess)) } #Error code: 40 #lags require ts if (!is.null(lags)) { if (is.null(ts)) { error.code<-40 error.mess<-paste("You need to specify the time variable in order to create lags.") return(list(code=error.code,mess=error.mess)) } } #Error code: 41 #leads require ts if (!is.null(leads)) { if (is.null(ts)) { error.code<-41 error.mess<-paste("You need to specify the time variable in order to create leads.") return(list(code=error.code,mess=error.mess)) } } #Error code: 42 #Only 1 column of data if (AMp==1) { error.code<-42 error.mess<-paste("There is only 1 column of data. Cannot impute.") return(list(code=error.code,mess=error.mess)) } ## catch problems when the only other variable is an unused ## cross-section. if (!isTRUE(intercs) & ncol(x[,idcheck, drop = FALSE]) == 1) { error.code<-42 error.mess<-paste("There is only 1 column of data. Cannot impute.") return(list(code=error.code,mess=error.mess)) } ts.nulls <- is.null(polytime) & is.null(splinetime) ts.zeros <- (polytime == 0) & (splinetime == 0) if (!isTRUE(polytime > 0) & !isTRUE(splinetime > 0)) { if (!isTRUE(intercs) & !is.null(ts)) { if (ncol(x[,-c(ts,cs,idvars), drop = FALSE]) == 1) { error.code<-61 error.mess<-paste("There is only 1 column of data after removing the ts, cs and idvars. Cannot impute without adding polytime.") return(list(code=error.code,mess=error.mess)) } } } #Error code: 43 #Variable that doesn't vary ## note that this will allow the rare case that a user only has ## variation in a variable when all of the other variables are missing ## in addition to having no variation in the listwise deleted ## dataset. Our starting value function should be robust to this. num.nonmissing <- function(obj) length(unique(na.omit(obj))) if (is.data.frame(x)) { non.vary <- sapply(x[,idcheck, drop = FALSE], num.nonmissing) } else { non.vary <- apply(x[,idcheck, drop = FALSE], 2, num.nonmissing) } if (sum(non.vary == 1)) { non.names <- colnames(x[,idcheck])[non.vary == 1] if (is.null(non.names)) { hold <- rep(-1, ncol(x)) hold[-idcheck] <- non.vary non.names <- which(hold == 0) } non.names <- paste(non.names, collapse = ", ") error.code<-43 error.mess<-paste("You have a variable in your dataset that does not vary. Please remove this variable. Variables that do not vary: ", non.names) return(list(code=error.code,mess=error.mess)) } ## } else { ## if (nrow(na.omit(x)) > 1) { ## if (any(diag(var(x[,idcheck],na.rm=TRUE))==0)) { ## error.code<-43 ## error.mess<-paste("You have a variable in your dataset that does not vary. Please remove this variable.") ## return(list(code=error.code,mess=error.mess)) ## } ## } else { ## for (i in 1:ncol(x[,idcheck])) { ## if (var(x[,i],na.rm=TRUE) == 0) { ## error.code<-43 ## error.mess<-paste("You have a variable in your dataset that does not vary. Please remove this variable.") ## return(list(code=error.code,mess=error.mess)) ## } ## } ## } ## } #checks for ordinals if (!is.null(ords)) { for (i in ords) { #Error code: 44 # Ordinal variable with non-integers (factors work by design, and they're # harder to check if (!is.factor(x[,i])) { if (any(unique(na.omit(x[,i])) %% 1 != 0 )) { non.ints <- colnames(x)[i] if (is.null(non.ints)) non.ints <- i error.code<-44 error.mess<-paste("You have designated the variable \'",non.ints, "\' as ordinal when it has non-integer values.", sep = "") return(list(code=error.code,mess=error.mess)) } } } } ## #checks for outname ## if (write.out==TRUE) { ## if (!is.character(outname)) { ## outname<-"outdata" ## warning("The output filename (outname) was not a character. It has been set it ## its default 'outdata' in the working directory.") ## } ## #Error code: 45 ## #output file errors ## outtest<-try(write.csv("test",file=paste(outname,"1.csv",sep="")),silent=TRUE) ## if (inherits(outtest,"try-error")) { ## error.code<-45 ## error.mess<-paste("R cannot write to the outname you have specified. Please ## check","that the directory exists and that you have permission to write.",sep="\n") ## return(list(code=error.code,mess=error.mess)) ## } ## tmpdir<- strsplit(paste(outname,"1.csv",sep=""),.Platform$file.sep) ## am.dir <- tmpdir[[1]][1] ## if (length(tmpdir[[1]]) > 1) ## for (i in 2:(length(tmpdir[[1]]))) ## am.dir <- file.path(am.dir, tmpdir[[1]][i]) ## file.remove(am.dir) ## } # if (xor(!identical(means,NULL),!identical(sds,NULL))) { # means<-NULL # sds<-NULL # warning("Both the means and the SDs have to be set in order to use observational priors. The priors have been removed from the analysis.") # } # if (sum(!identical(mins,NULL),!identical(maxs,NULL),!identical(conf,NULL)) != 3 && # sum(!identical(mins,NULL),!identical(maxs,NULL),!identical(conf,NULL)) != 0) { # mins<-NULL # maxs<-NULL # conf<-NULL # warning("Not all of the range parameters were set for the observational priors. They have been removed.") # } #checks of m if (!is.numeric(m)) { m<-5 warning("The number of imputations ('m') was a non-numeric. The value was changed to the default.") } if ((m %% 1) != 0) { m<-5 warning("The number of imputation ('m') was not an integer. The value was changed to the default (5).") } if (m<=0) { m<-5 warning("The number of imputations ('m') must be greater than 0. The value was changed to the default (5).") } # checks for bounds if (!is.null(bounds)) { b.size <- is.matrix(bounds) && ncol(bounds)==3 && nrow(bounds) > 0 b.cols <- sum(bounds[,1] %in% c(1:AMp)) == nrow(bounds) maxint <- max.resample > 0 && (max.resample %% 1)==0 # Error 50: # wrong sized bounds matrix if (!b.size) { error.code<-50 error.mess<-paste("The bounds argument is a three-column matrix.") return(list(code=error.code,mess=error.mess)) } # Error 51: # nonexistant columns in bounds. if (!b.cols) { error.code<-51 error.mess<-paste("One of the bounds is on a non-existant column.") return(list(code=error.code,mess=error.mess)) } # Error 52: # max.resample needs to be positive integer. if (!maxint) { error.code<-52 error.mess<-paste("The max.resample argument needs to be a positive integer.") return(list(code=error.code,mess=error.mess)) } } if (!is.null(overimp)) { o.num <- is.numeric(overimp) o.size <- (is.matrix(overimp) & ncol(overimp) == 2) | length(overimp) == 2 o.cols <- all(unique(overimp[,2]) %in% 1:ncol(x)) o.rows <- all(unique(overimp[,1]) %in% 1:nrow(x)) ## Error 53: ## overimp not numeric if (!o.num | !o.size) { error.code <- 53 error.mess <- "The overimp matrix needs to be a two-column numeric matrix." return(list(code=error.code,mess=error.mess)) } ## Error 54: ## overimp out of range if (!o.rows | !o.cols) { error.code <- 54 error.mess <- "A row/column pair in overimp is outside the range of the data." return(list(code=error.code,mess=error.mess)) } } if (is.data.frame(x)) { is.posix <- function(x) inherits(x, c("POSIXt", "POSIXct", "POSIXlt")) posix.check <- sapply(x, is.posix) if (any(is.na(x[, posix.check]))) { stop("NA in POSIXt variable: remove or convert to numeric") } } if (!is.null(emburn)) { if (length(emburn) != 2) { stop("emburn must be length 2") } } if (!is.null(boot.type)) { if (!(boot.type %in% c("ordinary", "none"))) { stop("boot.type must be either 'ordinary' or 'none'") } } if (is.data.frame(x)) { if (sum(sapply(x, length) == 0)) { bad.var <- colnames(x)[sapply(x,length) == 0] if (is.null(bad.var)) bad.var <- which(sapply(x,length) == 0) bad.var <- paste(bad.var, collapse = ", ") error.code <- 53 error.mess<-paste("The variable(s)",bad.var,"have length 0 in the data frame. Try removing these variables or reimporting the data.") return(list(code=error.code,mess=error.mess)) } } if (nrow(na.omit(x[,idcheck,drop=FALSE])) > ncol(x[,idcheck,drop=FALSE])) { if (is.data.frame(x)) { lmcheck <- lm(I(rnorm(AMn))~ ., data = x[,idcheck, drop = FALSE]) } else { lmcheck <- lm(I(rnorm(AMn))~ ., data = as.data.frame(x[,idcheck, drop = FALSE])) } if (any(is.na(coef(lmcheck)))) { bad.var <- names(coef(lmcheck))[which(is.na(coef(lmcheck)))] if (length(bad.var) == 1) { warning(paste("The variable", bad.var, "is perfectly collinear with another variable in the data.\n")) } else { bad.var <- paste(bad.var, collapse = ", ") warning(paste("The variables (or variable with levels)", bad.var, "are perfectly collinear with another variable in the data.\n")) } } } return(list(m=m,priors=priors)) } Amelia/R/zzz.R0000644000176200001440000000121214335240021012611 0ustar liggesusers.onAttach <- function(...) { mylib <- dirname(system.file(package = "Amelia")) ver <- packageVersion("Amelia") builddate <- packageDescription("Amelia")$Date curryear <- format(Sys.time(), "%Y") mess <- c("## ", "## Amelia II: Multiple Imputation", paste("## (Version ",ver,", built: ", builddate,")", sep=""), paste("## Copyright (C) 2005-",curryear, " James Honaker, Gary King and Matthew Blackwell",sep=""), paste("## Refer to http://gking.harvard.edu/amelia/", "for more information"), "## ") mess <- paste(mess, collapse = "\n") packageStartupMessage(mess) } Amelia/R/emb.r0000644000176200001440000012001414335240021012561 0ustar liggesusers## Code for bootstrapped Amelia ported to R ## 17/09/05 jh - Added "subset" routine for idvars and completely missing observations ## 22/09/05 jh - Changed stack function to optionally fix column positions, changed bootx to reject some bootstraps, changed emarch to work when no data missing ## 23/09/05 mb - Added "amcheck" function to change data and check for errors, "impdata" now in format given to amelia. ## 24/09/05 jh - Modified "amcheck," added polynomials of time, added ability to impute "logicals" from data frames ## 25/09/05 jh - Finalized plumbing for observational priors ## 26/09/05 mb - Added "frontend" argument and screen box to amelia and emarch functions ## 27/09/05 jh - Added observational and empirical priors ## 28/09/05 mb - Fixed "frontend" to update the GUI after each print. ## 30/09/05 mb - "amcheck" expanded, priors passed as individual matrices ## 07/10/05 mb - Added passing of lags and multiple levels of polynomials; expanded "amcheck" to cover these ## 08/10/05 jh - Enabled variable degree of polynomials of time, enabled interaction with cross-section ## 14/10/05 mb - Put "amcheck" into its own file ## 21/10/05 mb - Changed "stack" to "amstack" (and "unstack"); added log transformations in "amtransform"; adding "archive" option that saves a list of the settings ## 21/10/05 mb - Added a soft-crash that will print and output the error number and message. ## 24/10/05 mb - Added "sqrt" option for square root transformations, "lgstc" for logistic transformations ## 27/10/05 mb - Enabled lags and leads ## 9//11/05 mb - Enabled nominals; added "incheck" to allow skipping amcheck; moved dataframe->matrix conversion to framemat function. ## 15/12/05 mb - new (fixed) impute function; ## 21/02/06 mb - added positive definite check to "startvals", now defaults to identity if not pd. ## 22/02/06 mb - penrose inverse function added in sweep; soft-crashes on a non invertible covariance matrix at the end of EM ## 23/02/06 mb - empri increases if EM hits a non-monotonic section; added 'startvals' option; added iteration history to archive; ## 21/03/06 mb - added "ords" option and added ordinal support in "unsubset"; fixed a bug in nominals that wouldn't fill in imputations; ## 22/03/06 mb - character/factors can be specified and returned correctly as ordinals; ## 08/04/06 jh - revised functions to handle large datasets, merged with parallel emb.r version ## 10/04/06 mb - added "nametonumber" function that converts column names to numbers in all of the list options ## 28/04/06 mb - extracted transformation functions to prep.r ## 29/04/06 jh - changed screen output for "p2s", ivector and icap in "indxs", revised "diff" convergence monitor to upper triangular ## 01/05/06 mb - removed "rbind" calls in emfred, impute. ## 01/06/06 mb - added "allthetas" option to emarch for overdispersion diagnostic ## 15/06/06 jh - merged with priors version changing all EM and impute procs, modified how lists are generated in indxs("icap") and amelia("impdata"). ## 27/06/06 mb - added arglist argument to load in output from amelia or the gui. ## 13/07/06 mb - moved gc() calls out of emfred into emarch ## 02/08/06 mb - removed data.matrix() call when calling unsubset (moved to prep), fixed impfill for char. ## 29/08/06 jh - changed tolerance defaults ## 20/09/06 mb - new option (temp?) keep.data that will trash datasets from memory ## 01/10/06 mb - added additional info to p2s=2. ## 27/11/06 mb - new priors format ## 15/01/07 jh/mb - final version changes, degrees of freedom messages,autoprior option, modified comments, rearranged core arguments ## 10/05/07 mb - changed 'impute' to 'amelia.impute' ## 04/07/07 jh - added "emburn" option to modify convergence criteria ## 04/06/08 mb - changed the writing to GUI ('if (frontend)' calls) to remove globals ## 17/07/08 mb - fixed frontend error bug (dumping output to screen ## 22/07/08 mb - good coding update: T->TRUE/F->FALSE ## 27/03/10 jh - small changes to arguments of functions to deal with "splinetime" option in same fashion as "polytime" ## Draw from a multivariate normal distribution ## n: number of draws ## mu: vector of means ## vcv: variance-covariance matrix rmvnorm <- function(n,mu,vcv){ return(matrix(rnorm(n*length(mu)),n,length(mu)) %*% (chol(vcv)) + (matrix(1,n,1) %*% mu ) ) } ## Returns the data matrix without the rows with missing values ## (Same return as na.omit, without any attributes) ## x: data matrix ## can't send it a vector right now packr<-function(x) { r<-is.na(x) sumr<-rowSums(r) x2<-x[sumr==0, , drop=FALSE] return(x2) } ## Create dataset bootstrapped from original dataset ## Rejects Bootstraps where an entire variable becomes missing ## x: data (matrix) ## priors: matrix of priors about means for observations bootx<-function(x,priors=NULL, boot.type="np"){ flag <- TRUE AMn <- nrow(x) if (!is.null(boot.type)) { if (boot.type == "none") { return(list(x=x,priors=priors)) } } while (flag){ order<-trunc(runif(nrow(x), min=1, max=nrow(x)+1)) xboot<-x[order,] if (!identical(priors,NULL)){ sigPriors <- matrix(NA,nrow(x),ncol(x)) muPriors <- matrix(NA,nrow(x),ncol(x)) muPriors[priors[,1:2]] <- priors[,3] sigPriors[priors[,1:2]] <- priors[,4] muPriors <- muPriors[order,] sigPriors <- sigPriors[order,] prior.ind <- which(!is.na(muPriors), arr.ind = TRUE) priors <- cbind(prior.ind, muPriors[prior.ind], sigPriors[prior.ind]) # priors[,1]<-match(priors[,1],order) #priors <- priors[!is.na(priors[,1]),,drop=FALSE] } flag<-any(colSums(is.na(xboot))==AMn & !((1:ncol(xboot)) %in% priors[,2])) } return(list(x=xboot,priors=priors)) } ## Put imputations into the original data format ## Converts integer values back to factors or characters impfill <- function(x.orig, x.imp, noms, ords, priors, overimp) { if (!is.null(priors)) { is.na(x.orig)[priors[,c(1,2)]] <- TRUE } if (!is.null(overimp)) { is.na(x.orig)[overimp] <- TRUE } AMr1.orig <- is.na(x.orig) orig.fact <- sapply(x.orig, is.factor) orig.char <- sapply(x.orig, is.character) x.imp <- as.data.frame(x.imp[, 1:ncol(x.orig)]) for (i in 1:ncol(x.orig)) { if (is.logical(x.orig[[i]]) & sum(!is.na(x.orig[[i]])) > 0) { x.imp[,i] <- as.logical(x.imp[,i]>0.5) } ## imputations will be numeric and tibbles if (is.integer(x.orig[[i]]) & sum(is.na(x.orig[[i]])) > 0) { x.orig[, i] <- as.numeric(x.orig[[i]]) } } possibleFactors <- unique(c(noms, ords)) if (!is.null(possibleFactors)) { if (ncol(x.orig) > length(possibleFactors)) { num.cells <- which(is.na(x.orig) & !(col(x.orig) %in% possibleFactors), arr.ind = TRUE) for (j in seq_len(nrow(num.cells))) { j_row <- num.cells[j, 1] j_col <- num.cells[j, 2] x.orig[j_row, j_col] <- x.imp[j_row, j_col] } } for (i in possibleFactors) { if (orig.fact[i]) x.orig[is.na(x.orig[, i]), i] <- levels(x.orig[[i]])[x.imp[is.na(x.orig[[i]]), i]] else if (orig.char[i]) x.orig[, i] <- levels(factor(x.orig[[i]]))[x.imp[, i]] else x.orig[is.na(x.orig[[i]]), i] <- x.imp[is.na(x.orig[[i]]), i] } } else { x.orig[AMr1.orig] <- x.imp[AMr1.orig] } new.char <- sapply(x.orig, is.character) char.match <- orig.char != new.char if (sum(char.match) != 0) for (i in seq_along(char.match)) if (char.match[i]) x.orig[, i] <- as.numeric(x.orig[[i]]) return(x.orig) } ## Create Starting Values for EM Chain startval<-function(x,startvals=0,priors=NULL){ AMp<-ncol(x) if (!is.null(priors)) { ## fill in prior means x[(priors[,2]-1)*nrow(x)+priors[,1]] <- priors[,3] } if (ncol(as.matrix(startvals)) == AMp+1 && nrow(as.matrix(startvals)) == AMp+1) #checks for correct size of start value matrix if (startvals[1,1]==-1) #checks for the -1 necessary for sweep return(startvals) thetast<-matrix(0,nrow=AMp+1,ncol=AMp+1) # Create matrix of zeros thetast[row(thetast)==col(thetast)] <- 1 # Create Identity matrix thetast[1,1]<-(-1) if (startvals==0){ # Defaults to Identity if too few rows fully observed cmpr<-packr(x) if (nrow(cmpr)>AMp){ means<-colMeans(cmpr) if (all(eigen(cov(cmpr))$values > 10*.Machine$double.eps)) { #Checks for positive definiteness (positive eigenvalues) thetast[2:(AMp+1),2:(AMp+1)]<-cov(cmpr) #.Machine$double.eps instead of 0 to account for rounding. thetast[2:(AMp+1),1]<-means thetast[1,2:(AMp+1)]<-means } } } return(thetast) } ## Create certain indicies. Only needs to be called once, not every pattern. ## o,m,icap come from omiindxs ## ivector is i from indexm indxs<-function(x){ AMn<-nrow(x) AMr1<-is.na(x) # True if missing. AMr2<-unique(AMr1) o<- !AMr2 # (or o<-AMr2==1) Maybe == is not robust to small fluctuations m<- AMr2 # so put in check procedure (m<-) ## The following can be replaced by fortran .dll, although this has only moderate time savings ## ivector<-1 for(i in 2:AMn){ ischange<- !identical(AMr1[i,],AMr1[i-1,]) if(ischange){ ivector<-c(ivector,i) } } ivector<-c(ivector,AMn+1) ##################################################### ## ivector<-.Fortran("indxs",1*AMr1,as.integer(AMn),as.integer(ncol(x)),as.integer(nrow(AMr2)+1),ivector=integer(nrow(AMr2)+1))$ivector icap<-vector(mode="list",nrow(AMr2)) # This is a useful index, although no longer currently used for (i in 2:length(ivector)){ icap[[i]]<-seq(ivector[i-1],ivector[i]-1) } return(list(AMr1=AMr1,AMr2=AMr2,o=o,m=m,icap=icap,ivector=ivector)) } ## EM chain architecture calls emarch<-function(x,p2s=TRUE,thetaold=NULL,startvals=0,tolerance=0.0001,priors=NULL,empri=NULL,frontend=FALSE,collect=FALSE,allthetas=FALSE,autopri=0.05,emburn=c(0,0)){ if (p2s == 2) { cat("setting up EM chain indicies\n") flush.console() } iter.hist<-matrix(0,nrow=1,ncol=3) if (sum(complete.cases(x)) < nrow(x)){ # Check for fully observed data if (identical(thetaold,NULL)) thetaold<-startval(x,startvals=startvals,priors=priors) indx<-indxs(x) # This needs x.NA if (!identical(priors,NULL)){ priors[,4]<-1/priors[,4] # change sd to 1/var priors[,3]<-priors[,3]*priors[,4] # get the precision-weighted # mus priors <- priors[order(priors[,1],priors[,2]),,drop = FALSE] } x[is.na(x)]<-0 # Change x.NA to x.0s AM1stln<-sum(indx$m[1,])==0 & nrow(indx$m) > 1 count<-0 diff<- 1+tolerance AMr1 <- 1 * indx$AMr1 oo <- 1 * indx$o mm <- 1 * indx$m if (is.null(empri)) { empri <- 0 } theta <- .Call("emcore", x, AMr1, oo, mm, indx$ivector, thetaold, tolerance, emburn, p2s, empri,autopri, allthetas, priors, PACKAGE="Amelia") } else { if (p2s) cat("\n","No missing data in bootstrapped sample: EM chain unnecessary") pp1<-ncol(x)+1 # p (the number of variables) plus one means<-colMeans(x) thetanew<-matrix(0,pp1,pp1) thetanew[1,1]<-(-1) thetanew[2:pp1,1]<-means thetanew[1,2:pp1]<-means thetanew[2:pp1,2:pp1]<-cov(x) # Need to consider Priors in these cases, iter.hist<-NA # Although not # currently necessary. return(list(thetanew=thetanew,iter.hist=iter.hist)) } return(list(thetanew=theta$theta,iter.hist=theta$iter.hist)) } ## Draw imputations for missing values from a given theta matrix amelia.impute<-function(x,thetareal,priors=NULL,bounds=NULL,max.resample=NULL){ indx<-indxs(x) # This needs x.NA if (!identical(priors,NULL)){ priors[,4]<-1/priors[,4] priors[,3]<-priors[,3]*priors[,4] priors <- priors[order(priors[,1],priors[,2]),,drop = FALSE] } x[is.na(x)]<-0 # Change x.NA to x.0s AM1stln<-sum(indx$m[1,])==0 & nrow(indx$m) > 1 # Create sundry simple indicators i<-indx$ivector iii<-indx$icap AMp<-ncol(x) AMn<-nrow(x) AMr1 <- 1 * indx$AMr1 oo <- 1 * indx$o mm <- 1 * indx$m if (is.null(bounds)) max.resample <- NULL imp <- .Call("ameliaImpute", x, AMr1, oo, mm, indx$ivector, thetareal, priors, bounds, max.resample, PACKAGE="Amelia") return(imp) } #' Combine multiple runs of Amelia #' #' Combines multiple runs of \code{amelia} with the same #' arguments and data into one \code{amelia} object. #' #' @param ... one or more objects of class \code{amelia} with the same #' arguments and created from the same data. #' #' @details \code{ameliabind} will combine multiple runs of \code{amelia} into one #' object so that you can utilize diagnostics and modelling on all the #' imputations together. This function is useful for combining multiple #' runs of \code{amelia} run on parallel machines. #' #' Note that \code{ameliabind} only checks that they arguments and the #' missingness matrix are identical. Thus, it could be fooled by two #' datasets that are identical up to a transformation of one variable. #' #' @return An object of class \code{amelia}. #' #' @seealso \code{\link{amelia}} #' #' @examples #' data(africa) #' a1.out <- amelia(x = africa, cs = "country", ts = "year", logs = "gdp_pc") #' a2.out <- amelia(x = africa, cs = "country", ts = "year", logs = "gdp_pc") #' all.out <- ameliabind(a1.out, a2.out) #' summary(all.out) #' plot(all.out) #' ameliabind <- function(...) { args <- list(...) if (any(!sapply(args, is, "amelia"))) stop("All arguments must be amelia output.") if (length(args) > 1) { ## test that data is the same. we'll just compare the missMatrices. ## this will allow datasets with the same size and missingness ## matrix to be combined unintentionally, but this seems unlikely. datacheck <- lapply(args, function(x) isTRUE(identical(x$missMatrix,args[[1]]$missMatrix))) if (any(!unlist(datacheck))) stop("Non-compatible datasets.") ## test that all the arguments are the same check <- lapply(args, function(x) isTRUE(identical(x$arguments, args[[1]]$arguments))) if (any(!unlist(check))) stop("Non-compatible amelia arguments") check <- lapply(args, function(x) isTRUE(identical(x$transform.calls, args[[1]]$transform.calls))) if (any(!unlist(check))) stop("Non-compatible transformations on imputed datasets") imps <- unlist(lapply(args, function(x) return(x$m))) newm <- sum(imps) impindex <- c(0,cumsum(imps)) k <- nrow(args[[1]]$mu) out <- list(imputations = list(), m = integer(0), missMatrix = matrix(NA,0,0), overvalues = args[[1]]$overvalues, theta = array(NA, dim = c(k+1,k+1,newm) ), mu = matrix(NA, nrow = k, ncol = newm), covMatrices = array(NA, dim = c(k,k,newm)), code = integer(0), message = character(0), iterHist = list(), arguments = list(), orig.vars = args[[1]]$orig.vars) out$m <- newm out$missMatrix <- args[[1]]$missMatrix out$arguments <- args[[1]]$arguments out$transform.calls <- args[[1]]$transform.calls out$transform.vars <- args[[1]]$transform.vars ## since code==1 is good and code==2 means we have an NA, ## then our new output should inherit a 2 if there are any out$code <- max(unlist(lapply(args,function(x) return(x$code)))) if (out$code > 2) stop("Amelia output contains error.") if (out$code==2) out$message <- "One or more of the imputations resulted in a covariance matrix that was not invertible." else out$message <- "Normal EM convergence" for (i in 1:length(args)) { currimps <- (impindex[i]+1):impindex[i+1] out$mu[,currimps] <- args[[i]]$mu out$theta[,,currimps] <- args[[i]]$theta out$covMatrices[,,currimps] <- args[[i]]$covMatrices out$imputations <- c(out$imputations, args[[i]]$imputations) out$iterHist <- c(out$iterHist, args[[i]]$iterHist) } names(out$imputations) <- paste("imp",1:length(out$imputations),sep="") #or: names(out$imputations) <- paste("imp",1:impindex[i+1],sep="") class(out) <- "amelia" class(out$imputations) <- c("mi","list") } else { out <- args[[1]] if (out$code > 2) stop("Amelia output contains error.") } return(out) } getOriginalData <- function(obj) { data <- obj$imputations[[1]] is.na(data) <- obj$missMatrix data <- data[, obj$orig.vars] oi <- obj$arguments$overimp if (!is.null(oi)) { for (i in 1:nrow(oi)) { data[oi[i,1], oi[i,2]] <- obj$overvalues[i] } } return(data) } remove.imputations <- function(obj) { data <- obj$imputations[[1]] is.na(data) <- obj$missMatrix oi <- obj$arguments$overimp if (!is.null(oi)) { for (i in 1:nrow(oi)) { data[oi[i,1], oi[i,2]] <- obj$overvalues[i] } } return(data) } ## amelia - multiple imputation. core function ## #' AMELIA: Multiple Imputation of Incomplete Multivariate Data #' #' Runs the bootstrap EM algorithm on incomplete data and creates #' imputed datasets. #' #' @author James Honaker #' @author Gary King #' @author Matt Blackwell #' #' #' @param x either a matrix, data.frame, a object of class #' "amelia", or an object of class "molist". The first two will call the #' default S3 method. The third a convenient way to perform more imputations #' with the same parameters. The fourth will impute based on the settings from #' \code{moPrep} and any additional arguments. #' @param m the number of imputed datasets to create. #' @param p2s an integer value taking either 0 for no screen output, #' 1 for normal screen printing of iteration numbers, and 2 for detailed #' screen output. See "Details" for specifics on output when p2s=2. #' @param frontend a logical value used internally for the GUI. #' @param idvars a vector of column numbers or column names that indicates #' identification variables. These will be dropped from the analysis but #' copied into the imputed datasets. #' @param ts column number or variable name indicating the variable identifying time #' in time series data. #' @param cs column number or variable name indicating the cross section variable. #' @param polytime integer between 0 and 3 indicating what #' power of polynomial should be included in the imputation model #' to account for the effects of time. A setting of 0 would #' indicate constant levels, 1 would indicate linear time #' effects, 2 would indicate squared effects, and 3 would #' indicate cubic time effects. #' @param splinetime interger value of 0 or greater to control cubic #' smoothing splines of time. Values between 0 and 3 create a simple #' polynomial of time (identical to the polytime argument). Values \code{k} greater #' than 3 create a spline with an additional \code{k-3} #' knotpoints. #' @param intercs a logical variable indicating if the #' time effects of \code{polytime} should vary across the #' cross-section. #' @param lags a vector of numbers or names indicating columns in the data #' that should have their lags included in the imputation model. #' @param leads a vector of numbers or names indicating columns in the data #' that should have their leads (future values) included in the imputation #' model. #' @param startvals starting values, 0 for the parameter matrix from #' listwise deletion, 1 for an identity matrix. #' @param tolerance the convergence threshold for the EM algorithm. #' @param logs a vector of column numbers or column names that refer #' to variables that require log-linear transformation. #' @param sqrts a vector of numbers or names indicating columns in the data #' that should be transformed by a sqaure root function. Data in this #' column cannot be less than zero. #' @param lgstc a vector of numbers or names indicating columns in the data #' that should be transformed by a logistic function for proportional data. #' Data in this column must be between 0 and 1. #' @param noms a vector of numbers or names indicating columns in the data #' that are nominal variables. #' @param ords a vector of numbers or names indicating columns in the #' data that should be treated as ordinal variables. #' @param incheck a logical indicating whether or not the inputs to the #' function should be checked before running \code{amelia}. This should #' only be set to \code{FALSE} if you are extremely confident that your #' settings are non-problematic and you are trying to save computational #' time. #' @param collect a logical value indicating whether or #' not the garbage collection frequency should be increased during the #' imputation model. Only set this to \code{TRUE} if you are experiencing memory #' issues as it can significantly slow down the imputation #' process #' @param arglist an object of class "ameliaArgs" from a previous run of #' Amelia. Including this object will use the arguments from that run. #' @param empri number indicating level of the empirical (or ridge) prior. #' This prior shrinks the covariances of the data, but keeps the means #' and variances the same for problems of high missingness, small N's or #' large correlations among the variables. Should be kept small, #' perhaps 0.5 to 1 percent of the rows of the data; a #' reasonable upper bound is around 10 percent of the rows of the #' data. #' @param priors a four or five column matrix containing the priors for #' either individual missing observations or variable-wide missing #' values. See "Details" for more information. #' @param autopri allows the EM chain to increase the empirical prior if #' the path strays into an nonpositive definite covariance matrix, up #' to a maximum empirical prior of the value of this argument times #' \code{n}, the number of observations. Must be between 0 and 1, and at #' zero this turns off this feature. #' @param emburn a numeric vector of length 2, where \code{emburn[1]} is #' a the minimum EM chain length and \code{emburn[2]} is the #' maximum EM chain length. These are ignored if they are less than 1. #' @param bounds a three column matrix to hold logical bounds on the #' imputations. Each row of the matrix should be of the form #' \code{c(column.number, lower.bound,upper.bound)} See Details below. #' @param max.resample an integer that specifies how many times Amelia #' should redraw the imputed values when trying to meet the logical #' constraints of \code{bounds}. After this value, imputed values are #' set to the bounds. #' @param overimp a two-column matrix describing which cells are to be #' overimputed. Each row of the matrix should be a \code{c(row,column)} pair. #' Each of these cells will be treated as missing and #' replaced with draws from the imputation model. #' @param boot.type choice of bootstrap, currently restricted to either #' \code{"ordinary"} for the usual non-parametric bootstrap and #' \code{"none"} for no bootstrap. #' @param parallel the type of parallel operation to be used (if any). If #' missing, the default is taken from the option #' \code{"amelia.parallel"} (and if that is not set, \code{"no"}). #' @param ncpus integer: the number of processes to be used in parallel #' operation: typically one would choose the number of available CPUs. #' @param cl an optional \pkg{parallel} or \pkg{snow} cluster for use if #' \code{parallel = "snow"}. If not supplied, a cluster on the local #' machine is created for the duration of the \code{amelia} call. #' @param ... further arguments to be passed. #' #' @details #' Multiple imputation is a method for analyzing incomplete #' multivariate data. This function will take an incomplete dataset in #' either data frame or matrix form and return \code{m} imputed datatsets #' with no missing values. The algorithm first creates a bootstrapped #' version of the original data, estimates the sufficient statistics #' (with priors if specified) by EM on this bootstrapped sample, and #' then imputes the missing values of the original data using the #' estimated sufficient statistics. It repeats this process \code{m} #' times to produce the \code{m} complete datasets where the #' observed values are the same and the unobserved values are drawn #' from their posterior distributions. #' #' The function will start a "fresh" run of the algorithm if \code{x} is #' either a incomplete matrix or data.frame. In this method, all of the #' options will be user-defined or set to their default. If \code{x} #' is the output of a previous Amelia run (that is, an object of #' class "amelia"), then Amelia will run with the options used in #' that previous run. This is a convenient way to run more #' imputations of the same model. #' #' You can provide Amelia with informational priors about the missing #' observations in your data. To specify priors, pass a four or five #' column matrix to the \code{priors} argument with each row specifying a #' different priors as such: #' #' \code{ one.prior <- c(row, column, mean,standard deviation)} #' #' or, #' #' \code{ one.prior <- c(row, column, minimum, maximum, confidence)}. #' #' So, in the first and second column of the priors matrix should be the #' row and column number of the prior being set. In the other columns #' should either be the mean and standard deviation of the prior, or a #' minimum, maximum and confidence level for the prior. You must specify #' your priors all as distributions or all as confidence ranges. Note #' that ranges are converted to distributions, so setting a confidence of #' 1 will generate an error. #' #' Setting a priors for the missing values of an entire variable is done #' in the same manner as above, but inputing a \code{0} for the row #' instead of the row number. If priors are set for both the entire #' variable and an individual observation, the individual prior takes #' precedence. #' #' In addition to priors, Amelia allows for logical bounds on #' variables. The \code{bounds} argument should be a matrix with 3 #' columns, with each row referring to a logical bound on a variable. The #' first column should be the column number of the variable to be #' bounded, the second column should be the lower bounds for that #' variable, and the third column should be the upper bound for that #' variable. As Amelia enacts these bounds by resampling, particularly #' poor bounds will end up resampling forever. Amelia will stop #' resampling after \code{max.resample} attempts and simply set the #' imputation to the relevant bound. #' #' If each imputation is taking a long time to converge, you can increase #' the empirical prior, \code{empri}. This value has the effect of smoothing #' out the likelihood surface so that the EM algorithm can more easily find #' the maximum. It should be kept as low as possible and only used if needed. #' #' Amelia assumes the data is distributed multivariate normal. There are a #' number of variables that can break this assumption. Usually, though, a #' transformation can make any variable roughly continuous and unbounded. #' We have included a number of commonly needed transformations for data. #' Note that the data will not be transformed in the output datasets and the #' transformation is simply useful for climbing the likelihood. #' #' Amelia can run its imputations in parallel using the methods of the #' \pkg{parallel} package. The \code{parallel} argument names the #' parallel backend that Amelia should use. Users on Windows systems must #' use the \code{"snow"} option and users on Unix-like systems should use #' \code{"multicore"}. The \code{multicore} backend sets itself up #' automatically, but the \code{snow} backend requires more setup. You #' can pass a predefined cluster from the #' \code{parallel::makePSOCKcluster} function to the \code{cl} #' argument. Without this cluster, Amelia will attempt to create a #' reasonable default cluster and stop it once computation is #' complete. When using the parallel backend, users can set the number of #' CPUs to use with the \code{ncpus} argument. The defaults for these two #' arguments can be set with the options \code{"amelia.parallel"} and #' \code{"amelia.ncpus"}. #' #' Please refer to the Amelia manual for more information on the function #' or the options. #' #' @return An instance of S3 class "amelia" with the following objects: #' \item{imputations}{a list of length \code{m} with an imputed dataset in #' each entry. The class (matrix or data.frame) of these entries will #' match \code{x}.} #' \item{m}{an integer indicating the number of imputations run.} #' \item{missMatrix}{a matrix identical in size to the original dataset #' with 1 indicating a missing observation and a 0 indicating an observed #' observation.} #' \item{theta}{An array with dimensions \eqn{(p+1)} by \eqn{(p+1)} by \eqn{m} (where #' \eqn{p} is the number of variables in the imputations model) holding #' the converged parameters for each of the \code{m} EM chains.} #' \item{mu}{A \eqn{p} by \eqn{m} matrix of of the posterior modes for the #' complete-data means in each of the EM chains.} #' \item{covMatrices}{An array with dimensions \eqn{(p)} by \eqn{(p)} by #' \eqn{m} where the first two dimensions hold the posterior modes of the #' covariance matrix of the complete data for each of the EM chains.} #' \item{code}{a integer indicating the exit code of the Amelia run.} #' \item{message}{an exit message for the Amelia run} #' \item{iterHist}{a list of iteration histories for each EM chain. See #' documentation for details.} #' \item{arguments}{a instance of the class "ameliaArgs" which holds the #' arguments used in the Amelia run.} #' \item{overvalues}{a vector of values removed for overimputation. Used to #' reformulate the original data from the imputations. } #' #' Note that the \code{theta}, \code{mu} and \code{covMatrcies} objects #' refers to the data as seen by the EM algorithm and is thusly centered, #' scaled, stacked, tranformed and rearranged. See the manual for details #' and how to access this information. #' #' @references #' Honaker, J., King, G., Blackwell, M. (2011). #' Amelia II: A Program for Missing Data. #' \emph{Journal of Statistical Software}, \bold{45(7)}, 1--47. #' \doi{10.18637/jss.v045.i07} #' #' @seealso For imputation diagnostics, \code{\link{missmap}}, #' \code{\link{compare.density}}, #' \code{\link{overimpute}} and \code{\link{disperse}}. For time series #' plots, \code{\link{tscsPlot}}. Also: \code{\link{plot.amelia}}, #' \code{\link{write.amelia}}, and \code{\link{ameliabind}} #' #' @examples #' data(africa) #' a.out <- amelia(x = africa, cs = "country", ts = "year", logs = "gdp_pc") #' summary(a.out) #' plot(a.out) #' #' @keywords models amelia <- function(x, ...) { UseMethod("amelia", x) } #' @describeIn amelia Run additional imputations for Amelia output amelia.amelia <- function(x, m = 5, p2s = 1, frontend = FALSE, ...) { ## The original data is the imputed data with the ## imputations marked to NA. These two lines do that data <- x$imputations[[1]] ## Only the variables in the missMatrix should be passed. This is ## because the others are data <- getOriginalData(x) out <- amelia.default(x = data, m = m, arglist=x$arguments, p2s=p2s, frontend = frontend, incheck=FALSE) num.tcalls <- length(x$transform.calls) if (num.tcalls > 0) { for (i in 1:num.tcalls) { tcall <- x$transform.calls[[i]] tcall[[2]] <- as.name("out") out <- eval(tcall) } out$transform.calls <- x$transform.calls } ret <- ameliabind(x, out) return(ret) } #' @describeIn amelia Perform multiple overimputation from moPrep amelia.molist <- function(x, ...) { m <- match.call(expand.dots=TRUE) m$x <- x$data m$priors <- x$priors m$overimp <- x$overimp m[[1]] <- quote(Amelia::amelia.default) ret <- eval(m, parent.frame()) return(ret) } #' @describeIn amelia Run core Amelia algorithm amelia.default <- function(x, m = 5, p2s = 1, frontend = FALSE, idvars = NULL, ts = NULL, cs = NULL, polytime = NULL, splinetime = NULL, intercs = FALSE, lags = NULL, leads = NULL, startvals = 0, tolerance = 0.0001, logs = NULL, sqrts = NULL, lgstc = NULL, noms = NULL, ords = NULL, incheck = TRUE, collect = FALSE, arglist = NULL, empri = NULL, priors = NULL, autopri = 0.05, emburn = c(0,0), bounds = NULL, max.resample = 100, overimp = NULL, boot.type = "ordinary", parallel = c("no", "multicore", "snow"), ncpus = getOption("amelia.ncpus", 1L), cl = NULL, ...) { ## parellel infrastructure modeled off of 'boot' package if (missing(parallel)) parallel <- getOption("amelia.parallel", "no") parallel <- match.arg(parallel) have_mc <- have_snow <- FALSE if (parallel != "no" && ncpus > 1L) { if (parallel == "multicore") have_mc <- .Platform$OS.type != "windows" else if (parallel == "snow") have_snow <- TRUE if (!have_mc && !have_snow) ncpus <- 1L if (p2s == 2) { cat("\nUsing '", parallel, "' parallel backend with", ncpus, "cores.") } } if (p2s == 2) { cat("\namelia starting\n") flush.console() } am.call <- match.call(expand.dots = TRUE) archv <- am.call prepped<-amelia.prep(x = x, m = m, idvars = idvars, empri = empri, ts = ts, cs = cs, tolerance = tolerance, polytime = polytime, splinetime = splinetime, lags = lags, leads = leads, logs = logs, sqrts = sqrts, lgstc = lgstc, p2s = p2s, frontend = frontend, intercs = intercs, noms = noms, startvals = startvals, ords = ords, incheck = incheck, collect = collect, arglist = arglist, priors = priors, autopri = autopri, bounds = bounds, max.resample = max.resample, overimp = overimp, emburn = emburn, boot.type = boot.type) if (prepped$code != 1) { cat("Amelia Error Code: ", prepped$code, "\n", prepped$message, "\n") return(invisible(list(code = prepped$code, message = prepped$message))) } do.amelia <- function(X, ...) { if (p2s == 2) { cat("running bootstrap\n") } k <- ncol(prepped$x) if (!is.null(colnames(x))) { ovars <- colnames(x) } else { ovars <- 1:k } code <- 1 impdata <- list(imputations = list(), m = 1, missMatrix = prepped$missMatrix, overvalues = prepped$overvalues, theta = array(NA, dim = c(k+1,k+1,1) ), mu = matrix(NA, nrow = k, ncol = 1), covMatrices = array(NA, dim = c(k,k,1)), code = integer(0), message = character(0), iterHist = list(), arguments = list(), orig.vars = ovars) class(impdata) <- "amelia" class(impdata$imputations) <- c("mi","list") x.boot<-bootx(prepped$x,prepped$priors, boot.type) # Don't reorder columns thetanew will not align with d.stacked$x x.stacked<-amstack(x.boot$x,colorder=FALSE,x.boot$priors) if (p2s) cat("-- Imputation", X, "--\n") thetanew <- emarch(x.stacked$x, p2s = p2s, thetaold = NULL, tolerance = tolerance, startvals = startvals, priors = x.stacked$priors, empri = empri, frontend = frontend, collect = collect, autopri = prepped$autopri, emburn = emburn) ##thetanew <- .Call("emarch", PACKAGE = "Amelia") ## update the amelia ouptut impdata$iterHist[[1]] <- thetanew$iter.hist impdata$theta[,,1] <- thetanew$thetanew impdata$mu[,1] <- thetanew$thetanew[-1,1] impdata$covMatrices[,,1] <- thetanew$thetanew[-1,-1] dimnames(impdata$covMatrices)[[1]] <- prepped$theta.names dimnames(impdata$covMatrices)[[2]] <- prepped$theta.names dimnames(impdata$mu)[[1]] <- prepped$theta.names evs <- eigen(thetanew$thetanew[-1, -1, drop = FALSE], only.values=TRUE, symmetric=TRUE) if (any(evs$values < .Machine$double.eps)) { impdata$imputations[[1]] <- NA impdata$code <- 2 impdata$arguments <- prepped$archv class(impdata$arguments) <- c("ameliaArgs", "list") cat("\n\nThe resulting variance matrix was not invertible.", " Please check your data for highly collinear variables.\n\n") return(impdata) } ximp <- amelia.impute(prepped$x, thetanew$thetanew, priors = prepped$priors, bounds = prepped$bounds, max.resample) ximp <- amunstack(ximp, n.order = prepped$n.order, p.order = prepped$p.order) ximp <- unscale(ximp, mu = prepped$scaled.mu, sd = prepped$scaled.sd) ximp <- unsubset(x.orig = prepped$trans.x, x.imp = ximp, blanks = prepped$blanks, idvars = prepped$idvars, ts = prepped$ts, cs = prepped$cs, polytime = polytime, splinetime = splinetime, intercs = intercs, noms = prepped$noms, index = prepped$index, ords = prepped$ords) ximp <- untransform(ximp, logs = prepped$logs, xmin = prepped$xmin, sqrts = prepped$sqrts, lgstc = prepped$lgstc) if (p2s==2) { cat("\n saving and cleaning\n") } ## here we deal with the imputed matrix. ## first, we put the data into the output list and name it impdata$imputations[[1]] <- impfill(x.orig = x, x.imp = ximp, noms = prepped$noms, ords = prepped$ords, priors = priors, overimp = overimp) if (p2s) cat("\n") if (frontend) { requireNamespace("tcltk") tcltk::tcl(getAmelia("runAmeliaProgress"), "step",(100/m -1)) } impdata$code <- code impdata$arguments <- prepped$archv names(impdata$imputations) <- paste("imp", X, sep = "") class(impdata$arguments) <- c("ameliaArgs", "list") return(impdata) } ## parallel infrastructure from the 'boot' package impdata <- if (ncpus > 1L && (have_mc || have_snow)) { if (have_mc) { parallel::mclapply(seq_len(m), do.amelia, mc.cores = ncpus) } else if (have_snow) { list(...) # evaluate any promises if (is.null(cl)) { cl <- parallel::makePSOCKcluster(rep("localhost", ncpus)) if(RNGkind()[1L] == "L'Ecuyer-CMRG") parallel::clusterSetRNGStream(cl) res <- parallel::parLapply(cl, seq_len(m), do.amelia) parallel::stopCluster(cl) res } else parallel::parLapply(cl, seq_len(m), do.amelia) } } else lapply(seq_len(m), do.amelia) if (all(sapply(impdata, is, class="amelia"))) { if (!all(sapply(impdata, function(x) is.na(x$imputations)))) { impdata <- do.call(ameliabind, impdata) if (impdata$code == 2) { impdata$message <- paste("One or more of the imputations resulted in a", "covariance matrix that was not invertible.") } else { impdata$message <- paste("Normal EM convergence.") } } else { impdata <- do.call(ameliabind, impdata) impdata$code <- 2 impdata$message <- paste("All of the imputations resulted in a covariance", "matrix that is not invertible.") } } return(impdata) } Amelia/R/write.amelia.R0000644000176200001440000001022414335240021014340 0ustar liggesusers#' Write Amelia imputations to file #' #' Writes the imptuted datasets to file from a run of \code{amelia} #' #' @param obj an object of class "amelia"; typically output from the #' function \code{amelia} #' @param separate logical variable. If \code{TRUE} (default), the #' imputed datasets will be written to separate files, whose names come #' from the \code{file.stem} and \code{extension} arguments. If \code{FALSE}, #' the imputations are stacked and written as a single file. #' @param file.stem the leading part of the filename to save to #' output The imputation number and \code{extension} will be added to #' complete the filename. This can include a directory path. #' @param extension the extension of the filename. This is simply what #' follows \code{file.stem} and the imputation number. #' @param format one of the following output formats: \code{csv}, #' \code{dta} or \code{table}. See details. #' @param impvar the name of imputation number variable written to the #' stacked dataset when \code{separate} is \code{FALSE}. #' @param orig.data logical variable indicating whether the original, #' unimputed dataset should be included in the stacked dataset when #' \code{separate} is \code{FALSE}. #' @param \dots further arguments for the \code{write} functions. #' #' @details #' \code{write.amelia} writes the imputed datasets to a file or a set of files #' using one of the following functions: \code{write.csv}, #' \code{write.dta}, or \code{write.table}. You can pass arguments to #' these functions from \code{write.amelia}. #' #' When \code{separate} is \code{TRUE}, each imputed dataset is written #' to its own file. If you were to set \code{file.stem} to #' \code{"outdata"} and the \code{extension} to \code{".csv"} , then the #' resulting filename of the written files will be #' \preformatted{ #' outdata1.csv #' outdata2.csv #' outdata3.csv #' ... #' } #' and so on. #' #' When \code{separate} is \code{FALSE}, the function adds a variable #' called \code{impvar} to each dataset which indicates the imputed #' dataset to which the row belongs. Then, each of the datasets are #' stacked together to create one dataset. If \code{orig.data} is \code{TRUE}, #' then the original, unimputed dataset is included at the top of the #' stack, with its imputation number set to 0. #' #' @seealso \code{\link{write.csv}}, \code{\link{write.table}}, \code{\link{write.dta}} write.amelia <- function(obj, separate = TRUE, file.stem, extension = NULL, format = "csv", impvar = "imp", orig.data = TRUE, ...) { if(!(format %in% c("csv","table","dta"))) { stop("The writing format is not supported") } ## smart defaults for the extensions if (missing(extension)) { if (format == "dta") extension <- ".dta" if (format == "csv") extension <- ".csv" } m <- length(obj$imputations) Call <- match.call(expand.dots = TRUE) Call[[1]] <- as.name(paste("write",format, sep=".")) ## these arugments should not be passed to write.format Call$obj <- NULL Call$file.stem <- NULL Call$extension <- NULL Call$format <- NULL Call$separate <- NULL Call$orig.data <- NULL Call$impvar <- NULL if (separate) { for (i in 1:m) { if (format == "dta") Call$dataframe <- obj$imputations[[i]] else Call$x <- obj$imputations[[i]] Call$file <- paste(file.stem, i, extension,sep="") eval.parent(Call) } } else { if (orig.data) { odata <- obj$imputations[[1]] is.na(odata) <- obj$missMatrix odata[, impvar] <- 0 } obj$imputations[[1]][, impvar] <- 1 if (orig.data) { obj$imputations[[1]] <- rbind(odata, obj$imputations[[1]]) } if (format == "dta") { Call$dataframe <- obj$imputations[[1]] } else { Call$x <- obj$imputations[[1]] } for (i in 2:m) { obj$imputations[[i]][, impvar] <- i if (format == "dta") { Call$dataframe <- rbind(Call$dataframe, obj$imputations[[i]]) } else { Call$x <- rbind(Call$x, obj$imputations[[i]]) } } Call$file <- paste(file.stem, extension, sep = "") eval.parent(Call) } invisible() } Amelia/R/mo.R0000644000176200001440000001602514335240021012377 0ustar liggesusers#' Prepare Multiple Overimputation Settings #' #' A function to generate priors for multiple overimputation of #' a variable measured with error. #' #' @param x either a matrix, data.frame, or a object of class "molist" #' from a previous \code{moPrep} call. The first two derive the priors #' from the data given, and the third will derive the priors from the #' first \code{moPrep} call and add them to the already defined #' priors. #' @param formula a formula describing the nature of the measurement #' error for the variable. See "Details." #' @param subset an optional vector specifying a subset of observations #' which possess measurement error. #' @param error.proportion an optional vector specifying the fraction of #' the observed variance that is due to measurement error. #' @param gold.standard a logical value indicating if values with no #' measurement error should be used to estimate the measurement error #' variance. #' @param error.sd an optional vector specifying the standard error of #' the measurement error. #' #' @return An instance of the S3 class "molist" with the following #' objects: #' \itemize{ #' \item priors a four-column matrix of the multiple overimputation priors #' associated with the data. Each row of the matrix is #' \code{c(row,column, prior.mean, prior.sd)} #' \item overimp a two-column matrix of cells to be overimputed. Each #' row of the matrix is of the form \code{c(row, column)}, which #' indicate the row and column of the cell to be overimputed. #' \item data the object name of the matrix or data.frame to which #' priors refer. #' } #' #' Note that \code{priors} and \code{overimp} might contain results from #' multiple calls to \code{moPrep}, not just the most recent. #' #' @details #' This function generates priors for multiple overimputation of data #' measured with error. With the \code{formula} arugment, you can specify #' which variable has the error, what the mean of the latent data is, and #' if there are any other proxy measures of the mismeasured variable. The #' general syntax for the formula is: \code{errvar ~ mean | proxy}, #' where \code{errvar} is the mismeasured variable, \code{mean} is a #' formula for the mean of the latent variable (usually just #' \code{errvar} itself), and \code{proxy} is a another mismeasurement of #' the same latent variable. The proxies are used to estimate the #' variance of the measurement error. #' #' \code{subset} and \code{gold.standard} refer to the the rows of the #' data which are and are not measured with error. Gold-standard rows are #' used to estimate the variance of the #' measurement. error. \code{error.proportion} is used to estimate the #' variance of the measurement error by estimating the variance of the #' mismeasurement and taking the proportion assumed to be due to #' error. \code{error.sd} sets the standard error of the measurement #' error directly. #' #'@seealso #' \code{\link{amelia}} #' #' @examples #' data(africa) #' m.out <- moPrep(africa, trade ~ trade, error.proportion = 0.1) #' a.out <- amelia(m.out, ts = "year", cs = "country") #' plot(a.out) #' m.out <- moPrep(africa, trade ~ trade, error.sd = 1) #' a.out <- amelia(m.out, ts = "year", cs = "country") #' moPrep <- function(x, formula, subset, error.proportion, gold.standard = !missing(subset), error.sd) { UseMethod("moPrep",x) } #' @describeIn moPrep Alter existing moPrep output moPrep.molist <- function(x, formula, subset, error.proportion, gold.standard = FALSE, error.sd) { m <- match.call() m$x <- x$data m[[1]] <- as.name("moPrep") res <- eval(m, sys.frame(sys.parent())) x$priors <- rbind(x$priors, res$priors) x$overimp <- rbind(x$overimp, res$overimp) return(x) } #' @describeIn moPrep Default call to moPrep moPrep.default <- function(x, formula, subset, error.proportion, gold.standard=!missing(subset), error.sd) { if (!missing(error.proportion) && !(length(error.proportion) %in% c(1,nrow(x)))) { stop("The error.proportion arugment must be of length 1 or the number of rows of the data.") } if (!missing(error.sd) && !(length(error.sd) %in% c(1,nrow(x)))) { stop("The error.sd arugment must be of length 1 or the number of rows of the data.") } if (!missing(error.proportion) & !missing(error.sd)) { stop("error.proportion and error.sd cannot be set at the same time.") } ## parse the formula target.name <- formula[[2]] pars <- formula[[3]] vnames <- all.vars(formula, unique = FALSE) if ("|" %in% all.names(formula)) { proxyname <- vnames[length(vnames)] meanpos <- length(vnames)-1 } else { meanpos <- length(vnames) } if (!exists("proxyname") && missing(error.proportion) && !gold.standard && missing(error.sd)) { stop("Need to specify a proxy, an error proportion, an error variance, or gold-standard data.") } proxysplit <- strsplit(deparse(formula), "\\|")[[1]] form <- formula(paste(proxysplit, collapse = "+")) m <- match.call() m[[1]] <- as.name("model.frame") m$formula <- form m$error.proportion <- NULL m$error.sd <- NULL m$gold.standard <- NULL m$data <- m$x m$x <- NULL mf <- eval(m, sys.frame(sys.parent())) if (nrow(mf) == 0L) stop("0 cases to overimpute, check subset argument") if (!missing(error.proportion)) { if (length(error.proportion) == nrow(x)) { if (!missing(subset)) { error.proportion <- error.proportion[eval(substitute(subset,x))] } gs <- mf[error.proportion == 0, , drop = FALSE] mf <- mf[error.proportion != 0, , drop = FALSE] } } else if (!missing(error.sd)) { if (length(error.sd) == nrow(x)) { if (!missing(subset)) { error.sd <- error.sd[eval(substitute(subset,x))] } gs <- mf[error.sd == 0, , drop = FALSE] mf <- mf[error.sd != 0, , drop = FALSE] } } else { gs <- mf[0,] } if (ncol(mf) < meanpos) meanpos <- ncol(mf) prior.mean <- mf[,meanpos] var.mm <- var(mf[,1], na.rm=TRUE) if (!missing(error.proportion)) { prior.var <- var.mm*error.proportion } if (!missing(error.sd)) { prior.var <- error.sd^2 } if (exists("proxyname")) { prior.var <- var.mm - cov(mf[,1],mf[,proxyname], use="complete.obs") } if (gold.standard && !is.null(m$subset)) { m$subset <- NULL mf.full <- eval(m, sys.frame(sys.parent())) gs2 <- mf.full[which(!(rownames(mf.full) %in% rownames(mf))), , drop = FALSE] gs <- rbind(gs, gs2) var.gs <- var(gs[,1],na.rm=TRUE) prior.var <- var.mm - var.gs } col <- match(names(mf)[1], names(x)) rows <- as.integer(rownames(mf)) out <- list() out$priors <- cbind(rows,col,prior.mean, prior.var) out$overimp <- cbind(rows, col) if (sum(out$priors[,4] <= 0) > 0) { out$priors <- out$priors[out$priors[,4] > 0,] warning("Some observations estimated with negative measurement error variance. Set to gold standard.") } out$priors[,4] <- sqrt(out$priors[,4]) out$data <- substitute(x) class(out) <- c("molist","list") return(out) } Amelia/R/plot.amelia.R0000644000176200001440000000677614335240021014205 0ustar liggesusers #' Summary plots for Amelia objects #' #' Plots diagnostic plots for the output from the #' \code{amelia} function. #' #' @param x an object of class "amelia"; typically output from the #' function \code{amelia}. #' @param which.vars a vector indicating the variables to plot. The #' default is to plot all of the numeric variables that were actually #' imputed. #' @param compare plot the density comparisons for each variable (True or False) #' @param overimpute plot the overimputation for each variable (True or False) #' @param ask prompt user before changing pages of a plot (True or False) #' @param ... further graphical arguments. #' plot.amelia <- function(x, which.vars, compare = TRUE, overimpute = FALSE, ask = TRUE, ...) { imputedVars <- colSums(x$missMatrix) > 0 ## if it's a matrix, it's already numeric if (is.data.frame(x$imputations[[1]])) { numericVars <- sapply(x$imputations[[1]],"is.numeric") } else { numericVars <- rep(TRUE, length(imputedVars)) } ## Choose the correct variables to plot. Only numerics. ## And, if they didn't pick, only show the imputed variables. if (missing(which.vars)) { which.vars <- which(imputedVars & numericVars) } else { ## trim user-choosen variables that are not numeric which.vars <- which.vars[numericVars[which.vars]] } mfrow <- set.mfrow(nvars = length(which.vars), overimpute) on.exit(par(NULL)) layout <- par(mfrow = mfrow) for (i in seq(along=which.vars)) { if (compare) compare.density(output=x, var=which.vars[i], legend=FALSE,...) if (overimpute) overimpute(output=x, var=which.vars[i]) if (i==1) devAskNewPage(ask) } devAskNewPage(ask=FALSE) invisible() } ## ## set.mfrow() - gets the proper number of frames for plotting the ## output of the "amelia" class. ## ## INPUTS: nvars - number of variables being plotted ## overimpute - are we plotting overimputes? ## ## OUTPUT: mfrow - vector of length 2 with the (rows,cols) of the ## plotting window ## ## NOTICE: idea taken from the "coda" package ## ## set.mfrow <- function(nvars = 1, overimpute = FALSE) { if (overimpute) { ## If we are overimputing as well, we need ## two plots per variable mfrow <- switch(min(nvars, 13), c(2,1), ## 2 plot : 1x2 c(2,2), ## 4 plots: 2x2 c(3,2), ## 6 plots: 3x2 c(4,2), ## 8 plots: 4x2 c(3,2), ## 10 plots: 3x2 c(3,2), ## 12 plots: 3x2 c(4,2), ## 14 plots: 4x2 c(4,2), ## 16 plots: 4x2 c(4,2), ## 18 plots: 4x2 c(3,2), ## 20 plots: 3x2 c(3,2), ## 22 plots: 3x2 c(3,2), ## 24 plots: 3x2 c(4,2)) ## 26 plots: 4x2 } else { mfrow <- switch(min(nvars, 13), c(1,1), ## 1 plot : 1x1 c(2,1), ## 2 plots: 2x1 c(2,2), ## 3 plots: 2x2 c(2,2), ## 4 plots: 2x2 c(3,2), ## 5 plots: 3x2 c(3,2), ## 6 plots: 3x2 c(3,3), ## 7 plots: 3x3 c(3,3), ## 8 plots: 3x3 c(3,3), ## 9 plots: 3x3 c(3,2), ## 10 plots: 3x2 c(3,2), ## 11 plots: 3x2 c(3,2), ## 12 plots: 3x2 c(3,3)) ## 13 plots: 3x3 } return(mfrow) } Amelia/R/ameliagui.r0000644000176200001440000040571414335240021013770 0ustar liggesusers#' Interactive GUI for Amelia #' #' @name ameliagui #' #' @description #' Brings up the AmeliaView graphical interface, which allows users #' to load datasets, manage options and run Amelia from a traditional #' windowed environment. #' #' @usage AmeliaView() #' @keywords utilities main.close<-function() { qvalue<-tcltk::tkmessageBox(parent=getAmelia("gui"), message="Are you sure you want to exit Amelia?", icon="question", type="okcancel", default="cancel") if (tcltk::tclvalue(qvalue)=="ok") { tcltk::tkdestroy(getAmelia("gui")) } } setWorkingDir <- function() { newwd <- tcltk::tkchooseDirectory(parent = getAmelia("gui"), initialdir = getwd(), title = "Set output directory...", mustexist = TRUE) if (tcltk::tclvalue(newwd) != "") setwd(tcltk::tclvalue(newwd)) return(NULL) } loadStata <- function() { filetype <- c("{{Stata files} {.dta}} {{All files} *}") putAmelia("am.filename", tcltk::tclvalue(tcltk::tkgetOpenFile(parent=getAmelia("gui"), filetypes=filetype))) if (getAmelia("am.filename") == "") return(NULL) if (!is.null(getAmelia("amelia.data"))) { sure<-tcltk::tkmessageBox(parent=getAmelia("gui"), message="If you load another dataset, your current settings will be erased. Are you sure you want to load the new data?",icon="question",type="yesno") if (tcltk::tclvalue(sure) == "no") return(NULL) } putAmelia("amelia.data",try(read.dta(getAmelia("am.filename"),convert.factors=FALSE))) putAmelia("am.filetype", "Stata") if (inherits(getAmelia("amelia.data"), "try-error")) { tcltk::tkmessageBox(parent=getAmelia("gui"), message="Failure in loading the data. Try again.",icon="error",type="ok") putAmelia("amelia.data",NULL) return(NULL) } activateGUI() } loadSPSS <- function() { filetype <- c("{{SPSS} {.sav}} {{All files} *}") putAmelia("am.filename", tcltk::tclvalue(tcltk::tkgetOpenFile(parent=getAmelia("gui"), filetypes=filetype))) if (getAmelia("am.filename") == "") return(NULL) if (!is.null(getAmelia("amelia.data"))) { sure<-tcltk::tkmessageBox(parent=getAmelia("gui"), message="If you load another dataset, your current settings will be erased. Are you sure you want to load the new data?",icon="question",type="yesno") if (tcltk::tclvalue(sure) == "no") return(NULL) } putAmelia("amelia.data",try(read.spss(getAmelia("am.filename"),use.value.labels=FALSE,to.data.frame=TRUE))) putAmelia("am.filetype", "SPSS") if (inherits(getAmelia("amelia.data"), "try-error")) { tcltk::tkmessageBox(parent=getAmelia("gui"), message="Failure in loading the data. Try again.",icon="error",type="ok") putAmelia("amelia.data",NULL) return(NULL) } activateGUI() } loadSAS <- function() { filetype <- c("{{SAS Transport} {.xpt}} {{All files} *}") putAmelia("am.filename", tcltk::tclvalue(tcltk::tkgetOpenFile(parent=getAmelia("gui"), filetypes=filetype))) if (getAmelia("am.filename") == "") return(NULL) if (!is.null(getAmelia("amelia.data"))) { sure<-tcltk::tkmessageBox(parent=getAmelia("gui"), message="If you load another dataset, your current settings will be erased. Are you sure you want to load the new data?",icon="question",type="yesno") if (tcltk::tclvalue(sure) == "no") return(NULL) } putAmelia("amelia.data",try(read.xport(getAmelia("am.filename")))) putAmelia("am.filetype", "SAS") if (inherits(getAmelia("amelia.data"), "try-error")) { tcltk::tkmessageBox(parent=getAmelia("gui"), message="Failure in loading the data. Try again.",icon="error",type="ok") putAmelia("amelia.data",NULL) return(NULL) } activateGUI() } loadTAB <- function() { filetype <- c("{{Tab-delimited files} {.txt .tab .dat}} {{All files} *}") putAmelia("am.filename", tcltk::tclvalue(tcltk::tkgetOpenFile(parent=getAmelia("gui"), filetypes=filetype))) if (getAmelia("am.filename") == "") return(NULL) if (!is.null(getAmelia("amelia.data"))) { sure<-tcltk::tkmessageBox(parent=getAmelia("gui"), message="If you load another dataset, your current settings will be erased. Are you sure you want to load the new data?",icon="question",type="yesno") if (tcltk::tclvalue(sure) == "no") return(NULL) } putAmelia("amelia.data",try(read.table(getAmelia("am.filename"),header=TRUE))) putAmelia("am.filetype", "TAB") if (inherits(getAmelia("amelia.data"), "try-error")) { tcltk::tkmessageBox(parent=getAmelia("gui"), message="Failure in loading the data. Try again.",icon="error",type="ok") putAmelia("amelia.data",NULL) return(NULL) } activateGUI() } loadCSV <- function() { filetype <- c("{{Comma-delimited files} {.csv}} {{All files} *} ") putAmelia("am.filename", tcltk::tclvalue(tcltk::tkgetOpenFile(parent=getAmelia("gui"), filetypes=filetype))) if (getAmelia("am.filename") == "") return(NULL) if (!is.null(getAmelia("amelia.data"))) { sure<-tcltk::tkmessageBox(parent=getAmelia("gui"), message="If you load another dataset, your current settings will be erased. Are you sure you want to load the new data?",icon="question",type="yesno") if (tcltk::tclvalue(sure) == "no") return(NULL) } putAmelia("amelia.data",try(read.csv(getAmelia("am.filename"),header=TRUE))) putAmelia("am.filetype", "CSV") if (inherits(getAmelia("amelia.data"), "try-error")) { tcltk::tkmessageBox(parent=getAmelia("gui"), message="Failure in loading the data. Try again.",icon="error",type="ok") putAmelia("amelia.data",NULL) return(NULL) } activateGUI() } loadRData <- function() { onOK <- function() { putAmelia("amelia.data", eval(as.name(tcltk::tclvalue(tcltk::tkget(objectChooser))))) tcltk::tkdestroy(chooseObjectWindow) tcltk::tkfocus(getAmelia("gui")) tcltk::tkgrab.release(chooseObjectWindow) activateGUI() return() } onCancel <- function() { rm(list=getAmelia("amelia.data")) tcltk::tkdestroy(chooseObjectWindow) tcltk::tkfocus(getAmelia("gui")) tcltk::tkgrab.release(chooseObjectWindow) return() } filetype <- c("{{R Data files} {.RData .Rdata .Rda .rda}} {{All files} *} ") putAmelia("am.filename", tcltk::tclvalue(tcltk::tkgetOpenFile(parent=getAmelia("gui"), filetypes=filetype))) if (getAmelia("am.filename") == "") return(NULL) if (!is.null(getAmelia("amelia.data"))) { sure <- tcltk::tkmessageBox(parent = getAmelia("gui"), message = "If you load another dataset, your current settings will be erased. Are you sure you want to load the new data?", icon = "question", type = "yesno") if (tcltk::tclvalue(sure) == "no") return(NULL) } putAmelia("amelia.data",try(load(getAmelia("am.filename")))) putAmelia("am.filetype", "RData") if (inherits(getAmelia("amelia.data"), "try-error")) { tcltk::tkmessageBox(parent=getAmelia("gui"), message="Failure in loading the data. Try again.",icon="error",type="ok") putAmelia("amelia.data",NULL) return(NULL) } if (length(getAmelia("amelia.data")) == 1) { putAmelia("amelia.data", eval(as.name(getAmelia("amelia.data")))) } else { datasets <- sapply(getAmelia("amelia.data"), function(x) is.data.frame(eval(as.name(x)))) datasets <- getAmelia("amelia.data")[datasets] chooseObjectWindow <- tcltk::tktoplevel(parent=getAmelia("gui")) tcltk::tkwm.title(chooseObjectWindow, "Find Data Set") chooseFrame <- tcltk::ttkframe(chooseObjectWindow) objectChooser <- tcltk::ttkcombobox(chooseFrame, width = 20) tcltk::tkconfigure(objectChooser, values = datasets) tcltk::tkset(objectChooser, datasets[1]) objectOK <- tcltk::ttkbutton(chooseFrame, text = "OK", width = 10, command = onOK) objectCancel <- tcltk::ttkbutton(chooseFrame, text = "Cancel", width = 10, command = onCancel) tcltk::tkgrid(tcltk::ttklabel(chooseFrame, text = "Please select your dataset from the following objects:"), row = 0, column = 0, columnspan = 2, padx = 10, pady = 10) tcltk::tkgrid(objectChooser, row = 1, column = 0, columnspan = 2, padx = 10, pady = 10) tcltk::tkgrid(objectOK, row = 2, column = 0, padx = 10, pady = 10) tcltk::tkgrid(objectCancel, row = 2, column = 1, padx = 10, pady = 10) tcltk::tkgrid(chooseFrame, padx = 10, pady = 10) tcltk::tkgrab(chooseObjectWindow) tcltk::tkfocus(chooseObjectWindow) tcltk::tkwm.protocol(chooseObjectWindow, "WM_DELETE_WINDOW", onCancel) centerModalDialog(chooseObjectWindow, resize=FALSE) } return() } loadDemo <- function(name) { if (!is.null(getAmelia("amelia.data"))) { sure<-tcltk::tkmessageBox(parent=getAmelia("gui"), message="If you load another dataset, your current settings will be erased. Are you sure you want to load the new data?",icon="question",type="yesno") if (tcltk::tclvalue(sure) == "no") return(NULL) } data(list=name, package="Amelia", envir = ameliaEnv) putAmelia("amelia.data", eval(as.name(name))) putAmelia("am.filetype", "demo") putAmelia("am.filename", name) activateGUI() } drawMissMap <- function() { dev.new() missmap(getAmelia("amelia.data"), csvar = getAmelia("csvar"), tsvar = getAmelia("tsvar")) } activateGUI <- function(session = FALSE) { temp.list <- strsplit(getAmelia("am.filename"),"/")[[1]] if (getAmelia("am.filetype") != "demo") { temp.list <- strsplit(getAmelia("am.filename"),"/")[[1]] putAmelia("am.directory", paste(temp.list[-length(temp.list)],"",sep="/",collapse="")) setwd(getAmelia("am.directory")) } else { putAmelia("am.directory", getwd()) } filename <- temp.list[length(temp.list)] dotList <- strsplit(filename, "\\.")[[1]] if (length(dotList) > 1) dotList <- dotList[-length(dotList)] filestub <- paste(paste(dotList, collapse = "."), "-imp", sep="") putAmelia("varnames" , names(getAmelia("amelia.data"))) tcltk::tkgrid.remove(getAmelia("error.label")) tcltk::tkgrid.remove(getAmelia("allgood.label")) tcltk::tkgrid(getAmelia("noimps.label"), row = 2, column = 7, sticky ="e", padx = 10) ## Get rid of welcome frame if (as.logical(tcltk::tkwinfo("ismapped", getAmelia("gui.welcome")))) { tcltk::tkgrid.remove(getAmelia("gui.welcome")) tcltk::tkgrid(getAmelia("gui.skel"), row = 0, column = 0, sticky ="news") tcltk::tkgrid(getAmelia("statusbar"), sticky = "sew") } ## initialize values ## turn on various forms and buttons tcltk::tkconfigure(getAmelia("output.run"), state = "normal") #tcltk::tkconfigure(getAmelia("output.entry"), textvariable=getAmelia("outname")) #tcltk::tkconfigure(getAmelia("output.num"), textvariable=getAmelia("outnum")) tcltk::tkentryconfigure(getAmelia("main.menu.file"),"Edit Data...", state="normal") tcltk::tkentryconfigure(getAmelia("main.menu.options"),"Draw Missingness Map", state="normal") tcltk::tkentryconfigure(getAmelia("main.menu.file"),"Save Session...", state = "normal") tcltk::tkentryconfigure(getAmelia("main.menu.options"),"Output File Type...", state = "normal") tcltk::tkentryconfigure(getAmelia("main.menu.options"),"Output File Options...", state = "normal") tcltk::tkconfigure(getAmelia("missmapButton"), state = "normal") tcltk::tkconfigure(getAmelia("editDataButton"), state = "normal") tcltk::tkconfigure(getAmelia("plotHistButton"), state = "normal") tcltk::tkconfigure(getAmelia("showLogButton"), state = "disabled") fillMainTree() ## Mark factors as ID by default. classes <- sapply(getAmelia("amelia.data"), class) factorVars <- which(classes == "factor" | classes == "character") if (!session) { opt.holder <- vector("numeric",ncol(getAmelia("amelia.data"))) names(opt.holder) <- getAmelia("varnames") putAmelia("noms", opt.holder) putAmelia("ords", opt.holder) putAmelia("logs", opt.holder) putAmelia("sqrt", opt.holder) putAmelia("lgstc", opt.holder) putAmelia("idvar", opt.holder) putAmelia("lags", opt.holder) putAmelia("leads", opt.holder) boundsholder <- matrix(NA, nrow = ncol(getAmelia("amelia.data")), ncol = 3) boundsholder[,1] <- 1:ncol(getAmelia("amelia.data")) rownames(boundsholder) <- getAmelia("varnames") putAmelia("num.poly",tcltk::tclVar("0")) putAmelia("intercs",tcltk::tclVar("0")) putAmelia("priorsmat", NULL) putAmelia("boundsmat", boundsholder) putAmelia("max.resample", tcltk::tclVar("1000")) putAmelia("outname", tcltk::tclVar(filestub)) putAmelia("outnum", tcltk::tclVar("5")) putAmelia("empri", tcltk::tclVar("0")) putAmelia("tsvar", NULL) putAmelia("csvar", NULL) id.holder <- opt.holder id.holder[factorVars] <- 1 putAmelia("idvar", id.holder) for (i in factorVars) { tcltk::tkset(getAmelia("main.tree"), getAmelia("varnames")[i], "transform", "ID") } } else { for (i in factorVars) { if (all(getAmelia("idvar")[i]==0, getAmelia("csvar")!=getAmelia("varnames")[i],getAmelia("noms")[i]==0)) { tcltk::tcl(getAmelia("main.tree"), "item", getAmelia("varnames")[i], image = getAmelia("redFlagIcon")) } } } tcltk::tkentryconfigure(getAmelia("main.menu.options"), "Add Observations Priors...", state="normal") tcltk::tkentryconfigure(getAmelia("main.menu.options"), "Numerical Options", state="normal") ## add the filename and rows/cols to statusbar tcltk::tkconfigure(getAmelia("statusbar.lab1b"), text = getAmelia("am.filename"), foreground = "blue") tcltk::tkconfigure(getAmelia("statusbar.n"), text = paste(nrow(getAmelia("amelia.data"))), foreground = "blue") tcltk::tkconfigure(getAmelia("statusbar.k"), text = paste(ncol(getAmelia("amelia.data"))), foreground = "blue") } save.session <- function() { if (is.null(getAmelia("amelia.data"))) { tcltk::tkmessageBox(parent=getAmelia("gui"), message="You must load a dataset before you can save a session.", icon="error", type="ok") return(NULL) } file.select <- tcltk::tclvalue(tcltk::tkgetSaveFile(parent=getAmelia("gui"), filetypes="{{RData files} {.RData}} {{All files} *}")) putAmelia("session.flag", TRUE) sessionList <- c("am.directory","amelia.data", "am.filename", "am.filetype", "boundsmat", "csvar", "idvar", "lags", "leads", "lgstc", "logs", "noms", "num.poly", "ords", "outname.value", "outnum.value", "output.log", "outtype.value", "priorsmat", "runState", "seed.value", "session.flag", "splinestime.value", "sqrt", "tol.value", "tsvar", "empri.value", "intercs.value", "max.resample.value", "ameliaObject") putAmelia("empri.value", tcltk::tclvalue(getAmelia("empri"))) putAmelia("intercs.value", tcltk::tclvalue(getAmelia("intercs"))) putAmelia("max.resample.value", tcltk::tclvalue(getAmelia("max.resample"))) putAmelia("outname.value", tcltk::tclvalue(getAmelia("outname"))) putAmelia("outnum.value", tcltk::tclvalue(getAmelia("outnum"))) putAmelia("outtype.value", tcltk::tclvalue(getAmelia("outtype"))) putAmelia("seed.value", tcltk::tclvalue(getAmelia("seed"))) putAmelia("tol.value", tcltk::tclvalue(getAmelia("tol"))) putAmelia("splinestime.value", tcltk::tclvalue(getAmelia("splinestime"))) save(list = sessionList, envir=ameliaEnv, file = file.select) return(NULL) } load.session <- function() { ## diaglog to get RData file file.select <- tcltk::tclvalue(tcltk::tkgetOpenFile(parent=getAmelia("gui"), filetypes= "{{RData files} {.RData}} {{All files} *}")) if (nchar(file.select) <= 0) return(NULL) ## try loading the RData file and stop if it doesn't work tryloadsess <- try(load(file=file.select, envir=ameliaEnv), silent=TRUE) if (inherits(tryloadsess,"try-error")) { tcltk::tkmessageBox(parent=getAmelia("gui"),message="Error loading session. This is not a valid session file.",icon="error",type="ok") return(NULL) } ## make sure that the RData file loaded the right list if (!("session.flag" %in% ls(ameliaEnv)) | !getAmelia("session.flag")) { tcltk::tkmessageBox(parent=getAmelia("gui"), message="Not an Amelia session file. Try again.",icon="error",type="ok") return(NULL) } activateGUI(session = TRUE) nn <- ncol(getAmelia("amelia.data")) if (!is.null(getAmelia("tsvar"))) { tcltk::tcl(getAmelia("main.tree"), "item", getAmelia("tsvar"), image = getAmelia("clockIcon")) tcltk::tkentryconfigure(getAmelia("main.menu.options"),0, state="normal") for (i in 1:nn) { if (getAmelia("lags")[i] == 1) tcltk::tkset(getAmelia("main.tree"), getAmelia("varnames")[i], "lag", "X") if (getAmelia("leads")[i] == 1) tcltk::tkset(getAmelia("main.tree"), getAmelia("varnames")[i], "lead", "X") } } if (!is.null(getAmelia("csvar"))) { tcltk::tcl(getAmelia("main.tree"), "item", getAmelia("csvar"), image = getAmelia("userIcon")) tcltk::tkentryconfigure(getAmelia("main.menu.options"), 1, state="normal") tcltk::tkentryconfigure(getAmelia("main.menu.options"), 1, variable = getAmelia("intercs")) } for (i in 1:nn) { if (getAmelia("idvar")[i] == 1) tcltk::tkset(getAmelia("main.tree"), getAmelia("varnames")[i], "transform", "ID") if (getAmelia("ords")[i] == 1) tcltk::tkset(getAmelia("main.tree"), getAmelia("varnames")[i], "transform", "Ordinal") if (getAmelia("noms")[i] == 1) tcltk::tkset(getAmelia("main.tree"), getAmelia("varnames")[i], "transform", "Nominal") if (getAmelia("logs")[i] == 1) tcltk::tkset(getAmelia("main.tree"), getAmelia("varnames")[i], "transform", "Log") if (getAmelia("sqrt")[i] == 1) tcltk::tkset(getAmelia("main.tree"), getAmelia("varnames")[i], "transform", "Square Root") if (getAmelia("lgstc")[i] == 1) tcltk::tkset(getAmelia("main.tree"), getAmelia("varnames")[i], "transform", "Logistic") } for (i in 1:nn) { bdMin <- getAmelia("boundsmat")[i,2] bdMax <- getAmelia("boundsmat")[i,3] if (!is.na(bdMin)) { treeBounds <- paste("[",bdMin,", ", bdMax,"]", sep = "") } else { treeBounds <- "" } tcltk::tkset(getAmelia("main.tree"), getAmelia("varnames")[i], "bounds", treeBounds) } tcltk::tcl("set", getAmelia("seed"), getAmelia("seed.value")) tcltk::tcl("set", getAmelia("tol"), getAmelia("tol.value")) tcltk::tcl("set", getAmelia("empri"), getAmelia("empri.value")) tcltk::tcl("set", getAmelia("outname"), getAmelia("outname.value")) tcltk::tcl("set", getAmelia("outnum"), getAmelia("outnum.value")) tcltk::tcl("set", getAmelia("outtype"), getAmelia("outtype.value")) tcltk::tcl("set", getAmelia("intercs"), getAmelia("intercs.value")) tcltk::tcl("set", getAmelia("splinestime"), getAmelia("splinestime.value")) tcltk::tcl("set", getAmelia("max.resample"), getAmelia("max.resample.value")) tcltk::tkgrid.remove(getAmelia("noimps.label")) tcltk::tkgrid.remove(getAmelia("error.label")) tcltk::tkgrid.remove(getAmelia("allgood.label")) tcltk::tkgrid(getAmelia(paste(getAmelia("runState"),"label", sep = ".")), row = 2, column = 7, sticky ="e", padx = 10) if (getAmelia("runState") != "noimps") { tcltk::tkentryconfigure(getAmelia("main.menu.output"), "Output Log", state="normal") tcltk::tkconfigure(getAmelia("showLogButton"), state = "normal") } if (getAmelia("runState") == "allgood") { tcltk::tkentryconfigure(getAmelia("main.menu.output"), 0, state = "normal") tcltk::tkentryconfigure(getAmelia("main.menu.output"), 2, state = "normal") resave <- tcltk::tkmessageBox(parent = getAmelia("gui"), message = "Re-save imputed data sets to the working directory?", icon = "question", default = "yes", type = "yesno") if (tcltk::tclvalue(resave) == "yes") { amelia.save(getAmelia("ameliaObject"), tcltk::tclvalue(getAmelia("outname")), as.numeric(tcltk::tclvalue(getAmelia("outnum")))) } } return(NULL) } run.amelia <- function() { save.type <- as.numeric(tcltk::tclvalue(getAmelia("outtype"))) if (file.access(getwd(), mode = 2) == -1 & !(save.type %in% c(0,6))) { tcltk::tkmessageBox(parent = getAmelia("gui"), message = "The current working directory is not writable. Please select a different working directory or chose to not save the imputed data sets.", type ="ok") return(NULL) } ## Let's not allow people to overwrite their data. temp.list <- strsplit(getAmelia("am.filename"),"/")[[1]] filename <- temp.list[length(temp.list)] outfiles <- paste(tcltk::tclvalue(getAmelia("outname")), 1:as.numeric(tcltk::tclvalue(getAmelia("outnum"))), sep ="") save.type <- as.numeric(tcltk::tclvalue(getAmelia("outtype"))) exten <- switch(save.type, "csv","txt","dta","dta","RData") outfiles <- paste(outfiles, exten, sep = ".") outfiles <- paste(paste(temp.list[-length(temp.list)], collapse = "/"), outfiles, sep = "/") if (getAmelia("am.filename") %in% outfiles) { tcltk::tkmessageBox(parent = getAmelia("gui"), message = "Current settings would overwrite the original data. Please change the output file name.", icon = "error", type ="ok") return(NULL) } ts <- getAmelia("tsvar") cs <- getAmelia("csvar") nn <- ncol(getAmelia("amelia.data")) am.intercs <- as.logical(as.numeric(tcltk::tclvalue(getAmelia("intercs")))) sptime <- as.numeric(tcltk::tclvalue(getAmelia("splinestime"))) if (sptime == 0) if (am.intercs == FALSE) sptime <- NULL if (is.null(ts)) sptime <- NULL if (is.null(cs)) am.intercs <- FALSE id <- getAmelia("varnames")[getAmelia("idvar")==1] ord <- getAmelia("varnames")[getAmelia("ords")==1] nom <- getAmelia("varnames")[getAmelia("noms")==1] logs <- getAmelia("varnames")[getAmelia("logs")==1] sqrts <- getAmelia("varnames")[getAmelia("sqrt")==1] lgstc <- getAmelia("varnames")[getAmelia("lgstc")==1] amlags<- getAmelia("varnames")[getAmelia("lags")==1] amfut <- getAmelia("varnames")[getAmelia("leads")==1] if (length(id) == 0) id <- NULL if (length(ord) == 0) ord <- NULL if (length(nom) == 0) nom <- NULL if (length(logs) == 0) logs <- NULL if (length(sqrts)== 0) sqrts<- NULL if (length(lgstc)== 0) lgstc<- NULL if (length(amlags)==0) amlags <- NULL if (length(amfut)== 0) amfut<- NULL pmat <- getAmelia("priorsmat") colnames(pmat) <- NULL rownames(pmat) <- NULL bdmat <- getAmelia("boundsmat") colnames(bdmat) <- NULL rownames(bdmat) <- NULL bdmat <- bdmat[!is.na(bdmat[,2]) & !is.na(bdmat[,3]),,drop=FALSE] if (nrow(bdmat) == 0) bdmat <- NULL tol <- as.numeric(tcltk::tclvalue(getAmelia("tol"))) max.re <- as.numeric(tcltk::tclvalue(getAmelia("max.resample"))) num.imp <- as.numeric(tcltk::tclvalue(getAmelia("outnum"))) emp <- as.numeric(tcltk::tclvalue(getAmelia("empri"))) if (!is.na(as.numeric(tcltk::tclvalue(getAmelia("seed"))))) set.seed(as.numeric(tcltk::tclvalue(getAmelia("seed")))) tcltk::tkgrid.remove(getAmelia("noimps.label")) tcltk::tkgrid.remove(getAmelia("error.label")) tcltk::tkgrid.remove(getAmelia("allgood.label")) tcltk::tkgrid(getAmelia("runAmeliaProgress"), row = 2, column = 7, sticky ="e", padx = 10) amcall <- substitute(amelia(x = getAmelia("amelia.data"), m = num.imp, idvars = id, ts = ts, cs= cs, priors = pmat, lags = amlags, empri = emp, intercs = am.intercs, leads = amfut, splinetime = sptime, logs = logs, sqrts = sqrts, lgstc = lgstc, ords = ord, noms = nom, bounds = bdmat, max.resample = max.re, tolerance= tol)) putAmelia("output.log", c(getAmelia("output.log"), sub(" ","\n ",deparse(amcall, control=NULL, width.cutoff=60)),"\n\n")) putAmelia("wdForLastImputation", getwd()) ## run amelia! or at least try, and put the output in a list ## the name of the list will be the output name set by user output.connection <- textConnection(".Output", open="w", local = TRUE) sink(output.connection, type="output") putAmelia("ameliaObject", try(amelia.default(x = getAmelia("amelia.data"), m = as.numeric(tcltk::tclvalue(getAmelia("outnum"))), p2s = 1, idvars = id, ts = ts, cs = cs, priors = pmat, lags = amlags, empri = as.numeric(tcltk::tclvalue(getAmelia("empri"))), intercs = am.intercs, leads = amfut, splinetime = sptime, frontend = TRUE, logs = logs, sqrts = sqrts, lgstc = lgstc, ords = ord, noms = nom, bounds = bdmat, max.resample = as.numeric(tcltk::tclvalue(getAmelia("max.resample"))), tolerance= as.numeric(tcltk::tclvalue(getAmelia("tol")))), silent=TRUE)) sink(type = "output") putAmelia("output.log", c(getAmelia("output.log"), paste(textConnectionValue(output.connection), "\n"))) tcltk::tkgrid.remove(getAmelia("runAmeliaProgress")) tcltk::tkconfigure(getAmelia("runAmeliaProgress"), value = 0) ## check for errors in the process. if (inherits(getAmelia("ameliaObject"),"try-error")) { putAmelia("output.log", c(getAmelia("output.log"),"\nThere was an unexpected error in the execution of Amelia. \nDouble check all inputs for errors and take note of the error message:\n\n")) putAmelia("output.log", c(getAmelia("output.log"),paste(getAmelia("ameliaObject")))) #tcltk::tkconfigure(getAmelia("pass.fail.label"), foreground = "red") #tmp <- getAmelia("pass.fail") #tcltk::tclvalue(tmp) <- "Error! See log." show.output.log() tcltk::tkentryconfigure(getAmelia("main.menu.output"), 1, state = "normal") tcltk::tkconfigure(getAmelia("showLogButton"), state = "normal") tcltk::tkgrid(getAmelia("error.label"), row = 2, column = 7, sticky ="e", padx = 10) putAmelia("runState", "error") return(NULL) } if (all(getAmelia("ameliaObject")$code!=c(1,2))) { putAmelia("output.log", c(getAmelia("output.log"),"\n")) putAmelia("output.log", c(getAmelia("output.log"),paste("Amelia Error Code:", getAmelia("ameliaObject")[[1]],"\n", getAmelia("ameliaObject")[[2]]))) #tcltk::tkconfigure(getAmelia("pass.fail.label"), foreground = "red") #tmp <- getAmelia("pass.fail") #tcltk::tclvalue(tmp) <- "Error! See log." show.output.log() tcltk::tkentryconfigure(getAmelia("main.menu.output"), 1, state = "normal") tcltk::tkconfigure(getAmelia("showLogButton"), state = "normal") tcltk::tkgrid(getAmelia("error.label"), row = 2, column = 7, sticky ="e", padx = 10) putAmelia("runState", "error") } else { putAmelia("output.log", c(getAmelia("output.log"),"Amelia has run successfully.\n")) tcltk::tkentryconfigure(getAmelia("main.menu.output"), 0, state = "normal") tcltk::tkentryconfigure(getAmelia("main.menu.output"), 1, state = "normal") tcltk::tkentryconfigure(getAmelia("main.menu.output"), 2, state = "normal") tcltk::tkconfigure(getAmelia("showLogButton"), state = "normal") amelia.save(getAmelia("ameliaObject"), tcltk::tclvalue(getAmelia("outname")), as.numeric(tcltk::tclvalue(getAmelia("outnum")))) tcltk::tkgrid(getAmelia("allgood.label"), row = 2, column = 7, sticky ="e", padx = 10) putAmelia("runState", "allgood") } } amelia.save <- function(out,outname,m) { save.type <- as.numeric(tcltk::tclvalue(getAmelia("outtype"))) if (save.type == 1) { write.amelia(out, file.stem = outname, format = "csv", row.names = FALSE) } if (save.type == 2) { write.amelia(out, file.stem = outname, extension = "txt", format = "table", row.names = FALSE) } if (save.type == 3) { write.amelia(out, file.stem = outname, format = "dta", version = 6) } if (save.type == 4) { write.amelia(out, file.stem = outname, format = "dta", version = 7) } if (save.type == 5) { write.amelia(out, file.stem = outname, format = "dta", version = 8) } if (save.type == 6) { write.amelia(out, file.stem = outname, format = "dta", version = 10) } if (save.type == 7) { write.amelia(out, file.stem = outname, format = "dta", separate = FALSE, version = 10) } if (save.type == 8) { save(list = "ameliaObject", envir = ameliaEnv, file = paste(outname, ".RData", sep = "")) } } set.out<-function(...) { putAmelia("output.select",as.numeric(tcltk::tkget(getAmelia("output.drop.box")))) } setTS <- function() { tsvartemp <- strsplit(tcltk::tclvalue(tcltk::tcl(getAmelia("main.tree"),"selection")), " ")[[1]] if (length(tsvartemp) > 1) { tcltk::tkmessageBox(parent=getAmelia("gui"), message="Only one variable can be set as the times-series variable.",icon="error",type="ok") return(NULL) } if (!is.null(getAmelia("csvar"))) { if (getAmelia("csvar") == tsvartemp) { tcltk::tkmessageBox(parent=getAmelia("gui"), message="A variable cannot be both the time-series and cross-section index.",icon="error",type="ok") return(NULL) } } if (!(sapply(getAmelia("amelia.data"), class)[tsvartemp] %in% c("numeric","integer"))) { tcltk::tkmessageBox(parent=getAmelia("gui"), message="The time-series index must be numeric.",icon="error",type="ok") return(NULL) } children <- strsplit(tcltk::tclvalue(tcltk::tcl(getAmelia("main.tree"),"children","")), " ")[[1]] for(i in setdiff(children, getAmelia("csvar"))) tcltk::tcl(getAmelia("main.tree"), "item", i , image="") tcltk::tcl(getAmelia("main.tree"), "item", tsvartemp, image = getAmelia("clockIcon")) putAmelia("tsvar", tsvartemp) tcltk::tkentryconfigure(getAmelia("main.menu.options"),0, state="normal") dropTrans() } unsetTS <- function() { tsvartemp <- strsplit(tcltk::tclvalue(tcltk::tcl(getAmelia("main.tree"),"selection")), " ")[[1]] sure<-tcltk::tkmessageBox(parent=getAmelia("gui"), message="If you unset the time-series variable, you will lose any time-series settings such as lags, leads, or polynomials of time. Unset the time-series variable?",icon="question",type="yesno") if (tcltk::tclvalue(sure) == "no") return(NULL) tcltk::tcl(getAmelia("main.tree"), "item", tsvartemp, image = "") putAmelia("tsvar", NULL) tcltk::tkentryconfigure(getAmelia("main.menu.options"),0, state="disabled") putAmelia("lags",vector("numeric",ncol(getAmelia("amelia.data")))) putAmelia("leads",vector("numeric",ncol(getAmelia("amelia.data")))) children <- strsplit(tcltk::tclvalue(tcltk::tcl(getAmelia("main.tree"),"children","")), " ")[[1]] for(i in children) { tcltk::tkset(getAmelia("main.tree"), i, "lag", "") tcltk::tkset(getAmelia("main.tree"), i, "lead", "") } } unsetCS <- function() { csvartemp <- strsplit(tcltk::tclvalue(tcltk::tcl(getAmelia("main.tree"),"selection")), " ")[[1]] sure<-tcltk::tkmessageBox(parent=getAmelia("gui"), message="If you unset the cross-section variable, you will lose any cross-section settings. Unset the cross-section variable?",icon="question",type="yesno") if (tcltk::tclvalue(sure) == "no") return(NULL) tcltk::tcl(getAmelia("main.tree"), "item", csvartemp, image = "") putAmelia("csvar", NULL) tcltk::tkentryconfigure(getAmelia("main.menu.options"),0, state="normal") if (is.factor(getAmelia("amelia.data")[,csvartemp]) | is.character(getAmelia("amelia.data")[,csvartemp])) { tcltk::tcl(getAmelia("main.tree"), "item", csvartemp, image = getAmelia("redFlagIcon")) } } setCS <- function() { csvartemp <- strsplit(tcltk::tclvalue(tcltk::tcl(getAmelia("main.tree"),"selection")), " ")[[1]] if (length(csvartemp) > 1) { tcltk::tkmessageBox(parent=getAmelia("gui"), message="Only one variable can be set as the cross-section variable.",icon="error",type="ok") return(NULL) } if (!is.null(getAmelia("tsvar"))) { if (getAmelia("tsvar") == csvartemp) { tcltk::tkmessageBox(parent=getAmelia("gui"), message="A variable cannot be both the time-series and cross-section index.",icon="error",type="ok") return(NULL) } } if (!is.null(getAmelia("csvar"))) { if (is.factor(getAmelia("amelia.data")[,getAmelia("csvar")]) | is.character(getAmelia("amelia.data")[,getAmelia("csvar")])) { tcltk::tcl(getAmelia("main.tree"), "item", getAmelia("csvar"), image = getAmelia("redFlagIcon")) } else { tcltk::tcl(getAmelia("main.tree"), "item", getAmelia("csvar"), image = "") } } dropTrans() tcltk::tcl(getAmelia("main.tree"), "item", csvartemp, image = getAmelia("userIcon")) putAmelia("csvar", csvartemp) tcltk::tkentryconfigure(getAmelia("main.menu.options"),1,state="normal") tcltk::tkentryconfigure(getAmelia("main.menu.options"), 1, variable = getAmelia("intercs")) } fillMainTree <- function() { children <- strsplit(tcltk::tclvalue(tcltk::tcl(getAmelia("main.tree"),"children","")), " ")[[1]] tcltk::tkdelete(getAmelia("main.tree"), children) for (i in names(getAmelia("amelia.data"))) { if (is.factor(getAmelia("amelia.data")[,i]) | is.character(getAmelia("amelia.data")[,i])) { vals <- c("","","","","(factor)","...","...","...") vals <- c(vals,paste(sum(is.na(getAmelia("amelia.data")[,i])), nrow(getAmelia("amelia.data")), sep="/")) } else { vals <- c(min(getAmelia("amelia.data")[,i],na.rm=T), max(getAmelia("amelia.data")[,i],na.rm=T), mean(getAmelia("amelia.data")[,i],na.rm=T), sd(getAmelia("amelia.data")[,i],na.rm=T)) vals <- signif(vals, digits = 4) vals <- c("","","","", vals, paste(sum(is.na(getAmelia("amelia.data")[,i])), nrow(getAmelia("amelia.data")), sep="/")) } tcltk::tkinsert(getAmelia("main.tree"),"","end", id = i,tag="normal",text = i, values = vals) } bandTree() return() } #' Interactive GUI for Amelia #' #' Brings up the AmeliaView graphical interface, which allows users to load datasets, #' manage options and run Amelia from a traditional windowed environment. #' #' @details #' Requires the tcltk package. #' AmeliaView<-function() { ##Preamble requireNamespace("tcltk") || stop("The package 'tcltk' is required") if (.Platform$OS.type != "windows") { tcltk::tcl("ttk::style", "theme", "use", "clam") tcltk::tkfont.configure("TkHeadingFont", weight="normal") tcltk::tkfont.configure("TkCaptionFont", weight="normal") } ## If the current working directory is not writable, move to a ## sensible default locations: the HOME dir if (file.access(getwd(), mode = 2) == -1) { if (file.access(Sys.getenv("HOME"), mode = 0) == 0 & file.access(Sys.getenv("HOME"), mode = 2) == 0) { setwd(Sys.getenv("HOME")) } } tcltk::tclServiceMode(on=FALSE) putAmelia("outname", tcltk::tclVar("outdata")) putAmelia("outnum", tcltk::tclVar("5")) putAmelia("empri", tcltk::tclVar("0")) putAmelia("tol", tcltk::tclVar("0.0001")) putAmelia("amelia.data",NULL) putAmelia("am.filename",NULL) putAmelia("varnames", NULL) putAmelia("tsvar", NULL) putAmelia("csvar", NULL) putAmelia("varmin", NULL) putAmelia("varmax", NULL) putAmelia("runState", "noimps") putAmelia("session.flag", FALSE) putAmelia("intercs",tcltk::tclVar("0")) putAmelia("splinestime",tcltk::tclVar("0")) putAmelia("outtype", tcltk::tclVar("1")) putAmelia("max.resample", tcltk::tclVar("1000")) putAmelia("inname", tcltk::tclVar("")) putAmelia("seed", tcltk::tclVar("")) putAmelia("output.log", NULL) putAmelia("boundMin", tcltk::tclVar("")) putAmelia("boundMax", tcltk::tclVar("")) putAmelia("wdForLastImputation", getwd()) output.types <- c("(no save)", "CSV", "Tab Delimited", "Stata 6", "Stata 7", "Stata 8/9", "Stata 10+", "Stata 10+ (stacked)", "RData") ampath <- find.package(package = "Amelia")[1] ameliaFile <- file.path(ampath, "gui/gallery19.gif") goFile <- file.path(ampath, "gui/action_go.gif") tableFile <- file.path(ampath, "gui/table.gif") rFile <- file.path(ampath, "gui/page-R.gif") dtaFile <- file.path(ampath, "gui/page_dta.gif") spssFile <- file.path(ampath, "gui/page_spss.gif") clockFile <- file.path(ampath, "gui/icon_clock.gif") userFile <- file.path(ampath, "gui/icon_user.gif") upFile <- file.path(ampath, "gui/arrow_up.gif") downFile <- file.path(ampath, "gui/arrow_down.gif") worldFile <- file.path(ampath, "gui/icon_world.gif") pageTextFile <- file.path(ampath, "gui/page_text.gif") pageEditFile <- file.path(ampath, "gui/page_edit.gif") histFile <- file.path(ampath, "gui/histogram.gif") saveFile <- file.path(ampath, "gui/action_save.gif") pageUpFile <- file.path(ampath, "gui/page_up.gif") redStopFile <- file.path(ampath, "gui/action_stop.gif") redFlagFile <- file.path(ampath, "gui/flag_red.gif") greenCheckFile <- file.path(ampath, "gui/icon_accept.gif") putAmelia("ameliaPic", tcltk::tkimage.create("photo", file=ameliaFile)) putAmelia("action.go.icon", tcltk::tkimage.create("photo", file = goFile)) putAmelia("tablePic", tcltk::tkimage.create("photo", file = tableFile)) putAmelia("rPic", tcltk::tkimage.create("photo", file = rFile)) putAmelia("dtaPic", tcltk::tkimage.create("photo", file = dtaFile)) putAmelia("spssPic", tcltk::tkimage.create("photo", file = spssFile)) putAmelia("clockIcon", tcltk::tkimage.create("photo", file = clockFile)) putAmelia("userIcon", tcltk::tkimage.create("photo", file = userFile)) putAmelia("worldIcon", tcltk::tkimage.create("photo", file = worldFile)) putAmelia("upArrowIcon", tcltk::tkimage.create("photo", file = upFile)) putAmelia("downArrowIcon", tcltk::tkimage.create("photo", file = downFile)) putAmelia("histIcon", tcltk::tkimage.create("photo", file = histFile)) putAmelia("saveIcon", tcltk::tkimage.create("photo", file = saveFile)) putAmelia("pageUpIcon", tcltk::tkimage.create("photo", file = pageUpFile)) putAmelia("redFlagIcon", tcltk::tkimage.create("photo", file = redFlagFile)) putAmelia("redStopIcon", tcltk::tkimage.create("photo", file = redStopFile)) putAmelia("greenCheckIcon", tcltk::tkimage.create("photo", file = greenCheckFile)) putAmelia("pageTextIcon", tcltk::tkimage.create("photo", file = pageTextFile)) putAmelia("pageEditIcon", tcltk::tkimage.create("photo", file = pageEditFile)) putAmelia("gui", tcltk::tktoplevel()) tcltk::tkwm.title(getAmelia("gui"), "AmeliaView") tcltk::tkwm.protocol(getAmelia("gui"),"WM_DELETE_WINDOW", function() main.close()) tcltk::tkwm.geometry(getAmelia("gui"), "800x500") ##Menu putAmelia("main.menu", tcltk::tkmenu(getAmelia("gui"))) putAmelia("main.menu.file", tcltk::tkmenu(getAmelia("main.menu"), tearoff=0)) putAmelia("main.menu.demo", tcltk::tkmenu(getAmelia("main.menu"), tearoff=0)) putAmelia("main.menu.import", tcltk::tkmenu(getAmelia("main.menu"), tearoff=0)) putAmelia("main.menu.options", tcltk::tkmenu(getAmelia("main.menu"), tearoff=0)) putAmelia("main.menu.splines", tcltk::tkmenu(getAmelia("main.menu"), tearoff=0)) putAmelia("main.menu.output", tcltk::tkmenu(getAmelia("main.menu"), tearoff=0)) putAmelia("main.menu.help", tcltk::tkmenu(getAmelia("main.menu"), tearoff=0)) putAmelia("main.menu.variables", tcltk::tkmenu(getAmelia("main.menu"), tearoff=0, postcommand = variableOptionsPost)) putAmelia("main.menu.trans", tcltk::tkmenu(getAmelia("main.menu"), tearoff=0)) putAmelia("main.menu.outfile", tcltk::tkmenu(getAmelia("main.menu"), tearoff=0)) tcltk::tkadd(getAmelia("main.menu.file"),"command",label="Load R Data File...",command=function()loadRData(), underline = 5) tcltk::tkadd(getAmelia("main.menu.import"),"command",label="Import comma-separated value data...", command=loadCSV, underline = 7) tcltk::tkadd(getAmelia("main.menu.import"),"command",label="Import tab-delimited data...", command=loadTAB, underline = 7) tcltk::tkadd(getAmelia("main.menu.import"),"command",label="Import Stata dta file...", command=loadStata, underline = 13) tcltk::tkadd(getAmelia("main.menu.import"),"command",label="Import SPSS data...", command=loadSPSS, underline = 7) tcltk::tkadd(getAmelia("main.menu.import"),"command",label="Import SAS Transport data...", command=loadSAS, underline = 8) tcltk::tkadd(getAmelia("main.menu.file"),"cascade",menu=getAmelia("main.menu.import"),label="Import Data", underline = 0) tcltk::tkadd(getAmelia("main.menu.demo"),"command",label="africa", command=function() loadDemo(name="africa"), underline = 0) tcltk::tkadd(getAmelia("main.menu.demo"),"command",label="freetrade", command=function() loadDemo(name="freetrade"), underline = 0) tcltk::tkadd(getAmelia("main.menu.file"),"cascade",menu=getAmelia("main.menu.demo"),label="Load Package Data", underline = 5) tcltk::tkadd(getAmelia("main.menu.file"),"command",command = setWorkingDir,label="Set Working Directory...", underline = 4) tcltk::tkadd(getAmelia("main.menu.file"),"command",label="Edit Data...", command=function(){putAmelia("amelia.data", edit(getAmelia("amelia.data")));updateTreeStats()},state="disabled", underline = 0) tcltk::tkadd(getAmelia("main.menu.file"),"separator") tcltk::tkadd(getAmelia("main.menu.file"),"command",label="Load Session...",command=function()load.session(), underline = 0) tcltk::tkadd(getAmelia("main.menu.file"),"command",label="Save Session...",command=function()save.session(), state="disabled", underline = 0) tcltk::tkadd(getAmelia("main.menu.file"),"separator") tcltk::tkadd(getAmelia("main.menu.file"),"command",label="Quit Amelia",command=function()main.close(), underline = 0) tcltk::tkadd(getAmelia("main.menu.variables"), "command", label = "Set as Time-Series Variable", command = setTS, state = "disabled", underline = 0) tcltk::tkadd(getAmelia("main.menu.variables"), "command", label = "Set as Cross-Section Variable", command = setCS, state = "disabled", underline = 7) tcltk::tkadd(getAmelia("main.menu.variables"), "command", label = "Unset as Time-Series Variable", command = unsetTS, state = "disabled", underline = 0) tcltk::tkadd(getAmelia("main.menu.variables"), "command", label = "Unset as Cross-Section Variable", command = unsetCS, state = "disabled", underline = 23) tcltk::tkadd(getAmelia("main.menu.variables"),"separator") tcltk::tkadd(getAmelia("main.menu.variables"), "command", label = "Add Lag", command = function() addLag(), state = "disabled", underline = 0) tcltk::tkadd(getAmelia("main.menu.variables"), "command", label = "Add Lead", command = function() addLead(), state = "disabled", underline = 4) tcltk::tkadd(getAmelia("main.menu.variables"), "command", label = "Remove Lag", command = function() dropLag(), state = "disabled", underline = 0) tcltk::tkadd(getAmelia("main.menu.variables"), "command", label = "Remove Lead", command = function() dropLead(), state = "disabled", underline = 1) tcltk::tkadd(getAmelia("main.menu.variables"),"separator") tcltk::tkadd(getAmelia("main.menu.variables"), "command", label = "Plot Histogram(s) of Selected", command = plotHist, state = "disabled", underline = 0) tcltk::tkadd(getAmelia("main.menu.trans"), "command", label = "Log", command = function(x) setTrans("logs"), underline = 0) tcltk::tkadd(getAmelia("main.menu.trans"), "command", label = "Square Root", command = function(x) setTrans("sqrt"), underline = 0) tcltk::tkadd(getAmelia("main.menu.trans"), "command", label = "Logistic", command = function(x) setTrans("lgstc"), underline = 1) tcltk::tkadd(getAmelia("main.menu.trans"), "command", label = "Nominal", command = function(x) setTrans("noms"), underline = 0) tcltk::tkadd(getAmelia("main.menu.trans"), "command", label = "Ordinal", command = function(x) setTrans("ords"), underline = 0) tcltk::tkadd(getAmelia("main.menu.trans"), "command", label = "ID Variable", command = function(x) setTrans("idvar"), underline = 0) tcltk::tkadd(getAmelia("main.menu.variables"), "cascade", label = "Add Transformation...", menu = getAmelia("main.menu.trans"), state = "disabled", underline = 4) tcltk::tkadd(getAmelia("main.menu.variables"), "command", label = "Remove Transformations", command = dropTrans, state = "disabled", underline = 2) tcltk::tkadd(getAmelia("main.menu.variables"),"separator") tcltk::tkadd(getAmelia("main.menu.variables"), "command", label = "Add or Edit Bounds", command = addBounds, state = "disabled", underline = 12) for (i in 0:10) tcltk::tkadd(getAmelia("main.menu.splines"), "radiobutton", variable = getAmelia("splinestime"), label = paste(i,"knots"), value = i, underline = 0) tcltk::tkadd(getAmelia("main.menu.options"), "cascade", label = "Splines of Time with...", menu = getAmelia("main.menu.splines"), state="disabled", underline = 0) tcltk::tkadd(getAmelia("main.menu.options"), "checkbutton", label = "Interact Spline With Cross-Section?", variable = getAmelia("intercs"), onvalue=1,offvalue=0, state="disabled", underline = 0) tcltk::tkadd(getAmelia("main.menu.options"),"separator") tcltk::tkadd(getAmelia("main.menu.options"),"command", label = "Add Observations Priors...", command = gui.pri.setup, state="disabled", underline = 17) tcltk::tkadd(getAmelia("main.menu.options"), "separator") tcltk::tkadd(getAmelia("main.menu.options"), "command", label = "Numerical Options", command = buildNumericalOptions, state = "disabled", underline = 0) tcltk::tkadd(getAmelia("main.menu.options"), "command", label = "Draw Missingness Map", command = drawMissMap, state="disabled", underline = 5) tcltk::tkadd(getAmelia("main.menu.options"), "command", label = "Output File Options...", command = buildOutputOptions, state = "disabled", underline = 0) for (i in 1:length(output.types)) { tcltk::tkadd(getAmelia("main.menu.outfile"), "radiobutton", variable = getAmelia("outtype"), label = output.types[i], value = i-1) } tcltk::tkadd(getAmelia("main.menu.options"), "cascade", label = "Output File Type...", menu = getAmelia("main.menu.outfile"), state = "disabled", underline = 7) tcltk::tkadd(getAmelia("main.menu.output"),"command", label = "Imputation Diagnostics...", command = gui.diag.setup, state="disabled", underline = 11) tcltk::tkadd(getAmelia("main.menu.output"),"command", label = "Output Log", command = show.output.log, state="disabled", underline = 0) tcltk::tkadd(getAmelia("main.menu.output"),"command", label = "Open Folder Containing Imputated Data", command = showImputedFiles, state="disabled", underline = 12) tcltk::tkadd(getAmelia("main.menu.help"),"command",label="Amelia Website",command= function()browseURL("http://gking.harvard.edu/amelia/"), underline = 7) tcltk::tkadd(getAmelia("main.menu.help"),"command",label="Documentation",command= function() browseURL("http://gking.harvard.edu/amelia/docs/"), underline = 0) tcltk::tkadd(getAmelia("main.menu.help"),"command",label="About...",command= function()buildAboutDialog(), underline = 0) tcltk::tkadd(getAmelia("main.menu"),"cascade",label="File", menu = getAmelia("main.menu.file"), underline = 0) tcltk::tkadd(getAmelia("main.menu"),"cascade",label="Variables", menu = getAmelia("main.menu.variables"), underline = 0) tcltk::tkadd(getAmelia("main.menu"),"cascade",label="Options", menu = getAmelia("main.menu.options"), underline = 0) tcltk::tkadd(getAmelia("main.menu"),"cascade",label="Output", menu = getAmelia("main.menu.output"), underline = 1) tcltk::tkadd(getAmelia("main.menu"),"cascade",label="Help", menu = getAmelia("main.menu.help"), underline = 0) tcltk::tkconfigure(getAmelia("gui"), menu = getAmelia("main.menu")) ## Welcome Screen putAmelia("gui.welcome", tcltk::ttkframe(getAmelia("gui"))) ameliaPicLabel <- tcltk::ttklabel(getAmelia("gui.welcome"), relief = "groove", image = getAmelia("ameliaPic")) loadRButton <- tcltk::ttkbutton(getAmelia("gui.welcome"), text = "Load R Data", image = getAmelia("rPic"), compound = "top", command = loadRData) loadCSVButton <- tcltk::ttkbutton(getAmelia("gui.welcome"), text = "Import CSV", image = getAmelia("tablePic"), compound = "top", command = loadCSV) loadStataButton <- tcltk::ttkbutton(getAmelia("gui.welcome"), text = "Import STATA", image = getAmelia("dtaPic"), compound = "top", command = loadStata) loadSPSSButton <- tcltk::ttkbutton(getAmelia("gui.welcome"), text = "Import SPSS", image = getAmelia("spssPic"), compound = "top", command = loadSPSS) loadDemoButton <- tcltk::ttkbutton(getAmelia("gui.welcome"), text = "Load Demo", image = getAmelia("tablePic"), compound = "top", command = function () loadDemo(name = "africa")) tcltk::tkgrid(ameliaPicLabel, row = 0, column = 0, columnspan = 6, padx = 10, pady = 10) tcltk::tkgrid(tcltk::ttklabel(getAmelia("gui.welcome"), text=paste("Welcome to AmeliaView ",packageDescription("Amelia", fields="Version"), "!", sep="")), row = 1, column = 0, columnspan = 6, padx = 10, pady = 10) tcltk::tkgrid(tcltk::ttklabel(getAmelia("gui.welcome"), text="Please load a dataset:"), row = 2, column = 0, columnspan = 6, padx = 10, pady = 10) tcltk::tkgrid(loadRButton, row = 3, column = 0, padx = 10, pady = 10) tcltk::tkgrid(loadCSVButton, row = 3, column = 1, padx = 10, pady = 10) tcltk::tkgrid(loadStataButton, row = 3, column = 2, padx = 10, pady = 10) tcltk::tkgrid(loadSPSSButton, row = 3, column = 3, padx = 10, pady = 10) tcltk::tkgrid(loadDemoButton, row = 3, column = 4, padx = 10, pady = 10) tcltk::tkgrid(getAmelia("gui.welcome"), row = 0, column = 0) ##Frame putAmelia("gui.skel", tcltk::ttkpanedwindow(getAmelia("gui"), orient = "vertical")) ############### ### Toolbar ### ############### toolbar <- tcltk::ttkframe(getAmelia("gui.skel")) putAmelia("loadSessionButton", tcltk::ttkbutton(toolbar, text = "Load Session", command = load.session, image = getAmelia("pageUpIcon"), compound = "top", style="Toolbutton")) putAmelia("saveSessionButton", tcltk::ttkbutton(toolbar, text = "Save Session", command = save.session, image = getAmelia("saveIcon"), compound = "top", style="Toolbutton")) putAmelia("plotHistButton", tcltk::ttkbutton(toolbar, text = "Plot Histogram", state = "disabled", command = plotHist, image = getAmelia("histIcon"), compound = "top", style="Toolbutton")) putAmelia("editDataButton", tcltk::ttkbutton(toolbar, text = "Edit Data", state = "disabled", command = function(){putAmelia("amelia.data", edit(getAmelia("amelia.data")));updateTreeStats()}, image = getAmelia("pageEditIcon"), compound = "top", style="Toolbutton")) putAmelia("missmapButton", tcltk::ttkbutton(toolbar, text = "Missingness Map", state = "disabled", command = drawMissMap, image = getAmelia("worldIcon"), compound = "top", style="Toolbutton")) putAmelia("output.run", tcltk::ttkbutton(toolbar,text="Impute!", state = "disabled", command = run.amelia, image = getAmelia("action.go.icon"), compound = "top", style="Toolbutton")) putAmelia("showLogButton", tcltk::ttkbutton(toolbar, text = "Output Log", state = "disabled", command = show.output.log, image = getAmelia("pageTextIcon"), compound = "top", style="Toolbutton")) tcltk::tkgrid(getAmelia("loadSessionButton"), row =0, column = 0, sticky = "ew") tcltk::tkgrid(getAmelia("saveSessionButton"), row =0, column = 1, sticky = "ew") tcltk::tkgrid(tcltk::ttkseparator(toolbar, orient = "vertical"), row = 0, column = 2, padx=5, pady=5, sticky="ns") tcltk::tkgrid(getAmelia("plotHistButton"), row = 0, column = 3, sticky = "ew") tcltk::tkgrid(getAmelia("editDataButton"), row = 0, column = 4, sticky = "ew") tcltk::tkgrid(getAmelia("missmapButton"), row = 0, column = 5, sticky="ew") tcltk::tkgrid(tcltk::ttkseparator(toolbar, orient = "vertical"), row = 0, column = 6, padx=5, pady=5, sticky="ns") tcltk::tkgrid(getAmelia("output.run"), row = 0 , column = 7, sticky = "ew") tcltk::tkgrid(getAmelia("showLogButton"), row = 0, column = 8, sticky = "ew") ########################## ### Variable Dashboard ### ########################## dashboard <- tcltk::ttkframe(getAmelia("gui.skel")) yscr <- tcltk::ttkscrollbar(dashboard, orient = "vertical", command=function(...)tcltk::tkyview(getAmelia("main.tree"),...)) xscr <- tcltk::ttkscrollbar(dashboard, orient = "horizontal", command=function(...)tcltk::tkxview(getAmelia("main.tree"),...)) sorts <- rep(FALSE, times = 10) names(sorts) <- c("#0","transform","lag", "lead","bounds", "min", "max", "mean", "sd", "miss") putAmelia("sortDirs", sorts) putAmelia("main.tree", tcltk::ttktreeview(dashboard, columns = "transform lag lead bounds min max mean sd miss", yscrollcommand=function(...)tcltk::tkset(yscr,...), xscrollcommand=function(...)tcltk::tkset(xscr,...), selectmode = "extended")) #putAmelia("sum.right.click",tcltk::tkmenu(getAmelia("main.tree"), tearoff = FALSE) ) #tcltk::tkadd(getAmelia("sum.right.click"), "command", label = "Plot Histogram of Selected", command = function() sum.plot()) #tcltk::tkbind(getAmelia("main.tree"), "", RightClick) #putAmelia("sum.right.dis",tcltk::tkmenu(getAmelia("main.tree"), tearoff = FALSE) ) #tcltk::tkadd(getAmelia("sum.right.dis"), "command", label = "Plot Histogram of Selected", state = "disabled") tcltk::tcl(getAmelia("main.tree"), "column", "#0", width = 70, minwidth = 80) tcltk::tcl(getAmelia("main.tree"), "column", 0, width = 78, minwidth = 78, anchor = "center") tcltk::tcl(getAmelia("main.tree"), "column", 1, width = 20, minwidth = 20, anchor = "center") tcltk::tcl(getAmelia("main.tree"), "column", 2, width = 20, minwidth = 20, anchor = "center") tcltk::tcl(getAmelia("main.tree"), "column", 3, width = 50, minwidth = 50, anchor = "e") tcltk::tcl(getAmelia("main.tree"), "column", 4, width = 50, minwidth = 50, anchor = "e") tcltk::tcl(getAmelia("main.tree"), "column", 5, width = 50, minwidth = 50, anchor = "e") tcltk::tcl(getAmelia("main.tree"), "column", 6, width = 50, minwidth = 50, anchor = "e") tcltk::tcl(getAmelia("main.tree"), "column", 7, width = 50, minwidth = 50, anchor = "e") tcltk::tcl(getAmelia("main.tree"), "column", 8, width = 50, minwidth = 50, anchor = "e") tcltk::tcl(getAmelia("main.tree"), "heading", "#0", text = "Variable", command = function() sortTreeBy("#0")) tcltk::tcl(getAmelia("main.tree"), "heading", 0, text = "Transformation", command = function() sortTreeBy("transform")) tcltk::tcl(getAmelia("main.tree"), "heading", 1, text = "Lag", command = function() sortTreeBy("lag")) tcltk::tcl(getAmelia("main.tree"), "heading", 2, text = "Lead", command = function() sortTreeBy("lead")) tcltk::tcl(getAmelia("main.tree"), "heading", 3, text = "Bounds", command = function() sortTreeBy("lower")) tcltk::tcl(getAmelia("main.tree"), "heading", 4, text = "Min", command = function() sortTreeBy("min")) tcltk::tcl(getAmelia("main.tree"), "heading", 5, text = "Max", command = function() sortTreeBy("max")) tcltk::tcl(getAmelia("main.tree"), "heading", 6, text = "Mean", command = function() sortTreeBy("mean")) tcltk::tcl(getAmelia("main.tree"), "heading", 7, text = "SD", command = function() sortTreeBy("sd")) tcltk::tcl(getAmelia("main.tree"), "heading", 8, text = "Missing", command = function() sortTreeBy("miss")) tcltk::tkbind(getAmelia("main.tree"), "", mainTreeRightClick) ## Windows 7 doesn't handle treeview selection correctly selectbg <- tcltk::tcl("ttk::style","configure",".","-selectbackground") selectfg <- tcltk::tcl("ttk::style","configure",".","-selectforeground") tcltk::tktag.configure(getAmelia("main.tree"),"normal", background="white") tcltk::tktag.configure(getAmelia("main.tree"),"selected", background=selectbg, foreground=selectfg) tcltk::tkbind(getAmelia("main.tree"),"<>",function() refreshSelection(getAmelia("main.tree"))) putAmelia("legendFrame", tcltk::ttkframe(dashboard)) tcltk::tkgrid(tcltk::ttklabel(getAmelia("legendFrame"), text="= Time-Series Variable", image = getAmelia("clockIcon"), compound = "left"), row = 0, column = 0, sticky="w", padx = 5) tcltk::tkgrid(tcltk::ttklabel(getAmelia("legendFrame"), text="= Cross-Section Variable", image = getAmelia("userIcon"), compound = "left"), row = 0, column = 1, sticky="w", padx = 5) tcltk::tkgrid(tcltk::ttklabel(getAmelia("legendFrame"), text="= Unhandled Factor Variable", image = getAmelia("redFlagIcon"), compound = "left"), row = 0, column = 2, sticky="w", padx = 5) tcltk::tkgrid(getAmelia("main.tree"), row=0,column=0, sticky="news") tcltk::tkgrid(yscr, row = 0, column = 1, sticky = "ns") tcltk::tkgrid(xscr, row = 1, column = 0, sticky = "ew") tcltk::tkgrid(getAmelia("legendFrame"), row = 2, column = 0, sticky = "ew") tcltk::tkgrid.rowconfigure(dashboard, 0, weight = 1) tcltk::tkgrid.columnconfigure(dashboard, 0, weight = 1) ##Output Frame ##output options, run button, diag ##output options ##grid the whole thing tcltk::tkadd(getAmelia("gui.skel"), toolbar) tcltk::tkadd(getAmelia("gui.skel"), dashboard) tcltk::tkgrid(toolbar, row = 0, column = 1, padx = 2, pady=2, sticky = "ew") tcltk::tkgrid(dashboard,row = 1, column = 1, sticky = "news", padx = 10, pady = 5) tcltk::tkgrid.rowconfigure(getAmelia("gui.skel"), 1, weight = 1) tcltk::tkgrid.columnconfigure(getAmelia("gui.skel"), 1, weight = 1) #tcltk::tkgrid(gui.skel,sticky="news") tcltk::tkgrid.rowconfigure(getAmelia("gui"), 0, weight = 1) tcltk::tkgrid.columnconfigure(getAmelia("gui"), 0, weight = 1) ##statusbar at the bottom. putAmelia("statusbar", tcltk::ttkframe(getAmelia("gui"), relief = "groove", borderwidth = 3)) statusbar.lab1a <- tcltk::ttklabel(getAmelia("statusbar"), text = "Data Loaded:", anchor = "w", padding = c(2,0)) putAmelia("statusbar.lab1b", tcltk::ttklabel(getAmelia("statusbar"), text = "Unspecified", relief = "sunken", anchor = "w", foreground = "red",padding = c(2,0), width = 35)) statusbar.nlab <- tcltk::ttklabel(getAmelia("statusbar"), text = "Obs:", anchor="e", padding = c(2,0)) putAmelia("statusbar.n", tcltk::ttklabel(getAmelia("statusbar"), text = "----", relief = "sunken", anchor = "w", foreground = "red",padding = c(2,0,0,0), width = 6)) statusbar.klab <- tcltk::ttklabel(getAmelia("statusbar"), text = "Vars:", anchor="e", padding = c(2,0)) putAmelia("statusbar.k", tcltk::ttklabel(getAmelia("statusbar"), text = "----", relief = "sunken", anchor = "w", foreground = "red", padding = c(2,0,0,0), width = 6)) putAmelia("runAmeliaProgress", tcltk::ttkprogressbar(getAmelia("statusbar"), value = 0, length = 200, mode = "determinate")) putAmelia("error.label", tcltk::ttkbutton(getAmelia("statusbar"), text = "Error! See Output Log.", image = getAmelia("redStopIcon"), compound = "left", style = "Toolbutton", command = show.output.log)) putAmelia("allgood.label", tcltk::ttkbutton(getAmelia("statusbar"), text = "Successful Imputation.", image = getAmelia("greenCheckIcon"), compound = "left", style = "Toolbutton", command = showImputedFiles)) putAmelia("noimps.label", tcltk::ttklabel(getAmelia("statusbar"), text = "No imputations run.", justify = "right")) tcltk::tkgrid(statusbar.lab1a,row = 2, column = 1, sticky="w") tcltk::tkgrid(getAmelia("statusbar.lab1b"),row = 2, column = 2, sticky="w") tcltk::tkgrid(statusbar.nlab,row = 2, column = 3, sticky="w") tcltk::tkgrid(getAmelia("statusbar.n"),row = 2, column = 4, sticky="w") tcltk::tkgrid(statusbar.klab,row = 2, column = 5, sticky="w") tcltk::tkgrid(getAmelia("statusbar.k"), row = 2, column = 6, sticky = "w") tcltk::tkgrid(getAmelia("noimps.label"), row = 2, column = 7, sticky ="e", padx = 10) tcltk::tkgrid.rowconfigure(getAmelia("statusbar"), 2, weight = 1) #tcltk::tkgrid(statusbar, sticky = "sew") bindTooltip(widget = "output.run", tip = "Run Amelia on your input dataset with the current settings.") # bindTooltip(widget = "output.diag", tip = "Post-imputation checks for problems in the imputation.") bindTooltip(widget = "runAmeliaProgress", tip = "Amelia is currently running and this shows its progress. On large datasets, Amelia may take quite some time.") # bindTooltip(widget = "output.drop.label", tip = "Set the file format for saving the imputed datasets, if you want to save them.") # bindTooltip(widget = "output.drop.box", tip = "Set the file format for saving the imputed datasets, if you want to save them.") bindTooltip(widget = "showLogButton", tip = "Show the output log for the Amelia run. From here, you can save the output. Look here if something went wrong.") bindTooltip(widget = "missmapButton", tip = "Show a map of the missingnes in the data.") bindTooltip(widget = "editDataButton", tip = "Edit individual cells of the data set.") bindTooltip(widget = "plotHistButton", tip = "Plot histogram(s) of the selected variable(s).") bindTooltip(widget = "loadSessionButton", tip = "Load a previously saved Amelia session. This will remove any current settings.") bindTooltip(widget = "saveSessionButton", tip = "Save the current Amelia session. This will save the data, settings, and any imputed data in the Amelia session.") bindTooltip(widget = "legendFrame", tip = "A legend for the icons used in the variable dashboard.") bindTooltip(widget = "noimps.label", tip = "No imputations have been run yet. To run Amelia, hit the 'Impute!' button in the toolbar.") bindTooltip(widget = "allgood.label", tip = "Amelia has run successfully! You can now run imputation diagnostics from the 'Output' menu above. If you chose to save the imputations to file, they should be saved in the working directory. Click here to open the containing folder..") bindTooltip(widget = "error.label", tip = "There was an error the last time you ran Amelia. Click here to open the output log to identify the problem and to see how to fix it.") ## these commands force R to wait for tcltk if (.Platform$OS.type == "windows") tcltk::tkwm.iconbitmap(getAmelia("gui"),file.path(find.package(package = "Amelia")[1], "gui/amelia.ico")) tcltk::tkraise(getAmelia("gui")) tcltk::tkwm.deiconify(getAmelia("gui")) tcltk::tkfocus(getAmelia("gui")) tcltk::tclServiceMode(on = TRUE) tcltk::tkwait.window(getAmelia("gui")) } buildNumericalOptions <- function() { onCancel <- function(){ tcltk::tcl("set", getAmelia("seed"), getAmelia("temp.seed")) tcltk::tcl("set", getAmelia("tol"), getAmelia("temp.tol")) tcltk::tkwm.withdraw(getAmelia("numericalWindow")) tcltk::tkgrab.release(getAmelia("numericalWindow")) tcltk::tkfocus(getAmelia("gui")) } putAmelia("temp.seed", tcltk::tclvalue(getAmelia("seed"))) putAmelia("temp.tol", tcltk::tclvalue(getAmelia("tol"))) if (exists("numericalWindow", envir = ameliaEnv)) { tcltk::tkwm.deiconify(getAmelia("numericalWindow")) tcltk::tkraise(getAmelia("numericalWindow")) return() } putAmelia("numericalWindow", tcltk::tktoplevel()) tcltk::tkwm.title(getAmelia("numericalWindow"), "Numerical Options") numericalBox <- tcltk::ttkframe(getAmelia("numericalWindow")) putAmelia("output.seedlab", tcltk::ttklabel(numericalBox, text="Seed:")) putAmelia("output.seed", tcltk::ttkentry(numericalBox, width="7", textvariable=getAmelia("seed"))) putAmelia("output.tollab", tcltk::ttklabel(numericalBox, text="Tolerance:")) putAmelia("output.tol", tcltk::ttkentry(numericalBox, width="7", textvariable=getAmelia("tol"))) putAmelia("empri.ent", tcltk::ttkentry(numericalBox, width=7,textvariable = getAmelia("empri"))) putAmelia("empri.label", tcltk::ttklabel(numericalBox,text="Ridge prior:")) putAmelia("maxre.ent", tcltk::ttkentry(numericalBox, width=7,textvariable = getAmelia("max.resample"))) putAmelia("maxre.label", tcltk::ttklabel(numericalBox,text="Maximum Resample for Bounds:")) buttonBox <- tcltk::ttkframe(numericalBox) okButton <- tcltk::ttkbutton(buttonBox, text = "OK", width = 10, command = function() {tcltk::tkwm.withdraw(getAmelia("numericalWindow"));tcltk::tkgrab.release(getAmelia("numericalWindow"));tcltk::tkfocus(getAmelia("gui"))}) cancelButton <- tcltk::ttkbutton(buttonBox, width = 10, text = "Cancel", command = onCancel) tcltk::tkgrid(getAmelia("output.seedlab"), row = 1, column = 1, sticky = "w", padx = 10, pady = 10) tcltk::tkgrid(getAmelia("output.seed"), row = 1, column = 2, sticky = "w", padx = 10, pady = 10) tcltk::tkgrid(getAmelia("output.tollab"), row = 2, column = 1, sticky = "w", padx = 10, pady = 10) tcltk::tkgrid(getAmelia("output.tol"), row = 2, column = 2, sticky = "w", padx = 10, pady = 10) tcltk::tkgrid(getAmelia("empri.label"), row = 3, column = 1, sticky = "w", padx = 10, pady = 10) tcltk::tkgrid(getAmelia("empri.ent"), row = 3, column = 2, sticky = "w", padx = 10, pady = 10) tcltk::tkgrid(getAmelia("maxre.label"), row = 4, column = 1, sticky = "w", padx = 10, pady = 10) tcltk::tkgrid(getAmelia("maxre.ent"), row = 4, column = 2, sticky = "w", padx = 10, pady = 10) tcltk::tkgrid(okButton, row = 0, column = 0, padx = 10, pady = 10) tcltk::tkgrid(cancelButton, row = 0, column = 1, padx = 10, pady = 10) tcltk::tkgrid(buttonBox, row = 5, column = 1, sticky = "e", columnspan = 2) tcltk::tkgrid(numericalBox, sticky = "news") tcltk::tkwm.protocol(getAmelia("numericalWindow"), "WM_DELETE_WINDOW", onCancel) centerModalDialog(getAmelia("numericalWindow"), resize=FALSE) bindTooltip(widget = "empri.ent", "Ridge prior that shrinks the covariances, which stabilizes estimation. Five percent of the number of observations is a useful default.") bindTooltip(widget = "empri.label", "Ridge prior that shrinks the covariances, which stabilizes estimation. Five percent of the number of observations is a useful default.") bindTooltip(widget = "output.seed", tip = "Set seed for random number generator. Useful if you need to replicate the exact same imputations.") bindTooltip(widget = "output.seedlab", tip = "Set seed for random number generator. Useful if you need to replicate the exact same imputations.") bindTooltip(widget = "output.tol", tip = "Set the tolerance for the Amelia run. This is the value used to determine when Amelia has converged. Higher values mean Amelia will coverge more quickly, but this may lead to a poor approximation of the parameters.") bindTooltip(widget = "output.tollab", tip = "Set the tolerance for the Amelia run. This is the value used to determine when Amelia has converged. Higher values mean Amelia will coverge more quickly, but this may lead to a poor approximation of the parameters.") bindTooltip(widget = "maxre.ent", tip = "Amelia fits bounds by rejecting any draws that do not fall within the bounds. This value sets the number of times Amelia should attempt to resample to fit the bounds before setting the imputation to the bound.") bindTooltip(widget = "maxre.label", tip = "Amelia fits bounds by rejecting any draws that do not fall within the bounds. This value sets the number of times Amelia should attempt to resample to fit the bounds before setting the imputation to the bound.") } buildOutputOptions <- function() { onCancel <- function(){ tcltk::tcl("set", getAmelia("outname"), getAmelia("temp.name")) tcltk::tcl("set", getAmelia("outnum"), getAmelia("temp.num")) tcltk::tkwm.withdraw(getAmelia("outputWindow")) tcltk::tkgrab.release(getAmelia("outputWindow")) tcltk::tkfocus(getAmelia("gui")) } putAmelia("temp.name", tcltk::tclvalue(getAmelia("outname"))) putAmelia("temp.num", tcltk::tclvalue(getAmelia("outnum"))) if (exists("outputWindow", envir = ameliaEnv)) { tcltk::tkwm.deiconify(getAmelia("outputWindow")) tcltk::tkraise(getAmelia("outputWindow")) return() } putAmelia("outputWindow", tcltk::tktoplevel()) tcltk::tkwm.title(getAmelia("outputWindow"), "Output Options") outputBox <- tcltk::ttkframe(getAmelia("outputWindow")) putAmelia("output.label", tcltk::ttklabel(outputBox, text="Name the Imputed Dataset:")) putAmelia("output.entry", tcltk::ttkentry(outputBox, width="15", textvariable = getAmelia("outname"))) putAmelia("output.numlab", tcltk::ttklabel(outputBox, text = "Number of Imputed Datasets:")) putAmelia("output.num", tcltk::ttkentry(outputBox, width = "7", textvariable = getAmelia("outnum"))) buttonBox <- tcltk::ttkframe(outputBox) okButton <- tcltk::ttkbutton(buttonBox, text = "OK", width = 10, command = function() {tcltk::tkwm.withdraw(getAmelia("outputWindow"));tcltk::tkgrab.release(getAmelia("outputWindow"));tcltk::tkfocus(getAmelia("gui"))}) cancelButton <- tcltk::ttkbutton(buttonBox, width = 10, text = "Cancel", command = onCancel) tcltk::tkgrid(getAmelia("output.label"), row = 1, column = 1, sticky = "w", padx = 10, pady = 10) tcltk::tkgrid(getAmelia("output.entry"), row = 1, column = 2, sticky = "w", padx = 10, pady = 10) tcltk::tkgrid(getAmelia("output.numlab"), row = 2, column = 1, sticky = "w", padx = 10, pady = 10) tcltk::tkgrid(getAmelia("output.num"), row = 2, column = 2, sticky = "w", padx = 10, pady = 10) tcltk::tkgrid(okButton, row = 0, column = 0, padx = 10, pady = 10) tcltk::tkgrid(cancelButton, row = 0, column = 1, padx = 10, pady = 10) tcltk::tkgrid(buttonBox, row = 3, column = 1, sticky = "e", columnspan = 2) tcltk::tkgrid(outputBox, sticky = "news") tcltk::tkwm.protocol(getAmelia("outputWindow"), "WM_DELETE_WINDOW", onCancel) centerModalDialog(getAmelia("outputWindow"), resize=FALSE) bindTooltip(widget = "output.entry", tip = "The prefix for the saved imputed datasets. For most saving options they will be in the following format: \n\nmyprefix1.out\nmyprefix2.out\n...\n\nAnd so on, where \"out\" is the file extension.") bindTooltip(widget = "output.label", tip = "The prefix for the saved imputed datasets. For most saving options they will be in the following format: \n\nmyprefix1.out\nmyprefix2.out\n...\n\nAnd so on, where \"out\" is the file extension.") bindTooltip(widget = "output.num", tip = "Set the number of imputed datasets.\n\nIn many cases, around 5 is sufficient, but if the fraction of missingness is high, you may need more. Use the Summarize Data and Missingness Map above to get a sense for the amount of missingness in your data.") bindTooltip(widget = "output.numlab", tip = "Set the number of imputed datasets.\n\nIn many cases, around 5 is sufficient, but if the fraction of missingness is high, you may need more. Use the Summarize Data and Missingness Map above to get a sense for the amount of missingness in your data.") } buildAboutDialog <- function() { if (exists("aboutWindow", envir = ameliaEnv)) { tcltk::tkwm.deiconify(getAmelia("aboutWindow")) tcltk::tkraise(getAmelia("aboutWindow")) return() } putAmelia("aboutWindow", tcltk::tktoplevel(parent=getAmelia("gui"))) tcltk::tkwm.title(getAmelia("aboutWindow"), "About AmeliaView") aboutBox <- tcltk::ttkframe(getAmelia("aboutWindow"), height = 150, width = 200) #ameliaPic <- tcltk::tkimage.create("photo",file=ameliaFile) picLabel <- tcltk::ttklabel(aboutBox, image=getAmelia("ameliaPic"), relief="groove", borderwidth=2) tcltk::tkgrid(tcltk::ttkframe(aboutBox,width=100), row=0,column=1) tcltk::tkgrid(tcltk::ttkframe(aboutBox,height=150,width=0), row=0,column=0,rowspan=3) tcltk::tkgrid(picLabel, row = 1, column=1, pady = 20, padx = 20) tcltk::tkgrid(tcltk::ttklabel(aboutBox, text=paste("AmeliaView",packageDescription("Amelia", fields="Version")), justify="center"), row = 2, column = 1) tcltk::tkgrid(tcltk::ttklabel(aboutBox, text="James Honaker, Gary King, Matthew Blackwell", justify="center"), row = 3, column = 1, padx=20) tcltk::tkgrid(tcltk::ttklabel(aboutBox, text="\uA9 2006-2010", justify="center"), row = 4, column = 1, padx=20) buttonBox <- tcltk::ttkframe(aboutBox) closeButton <- tcltk::ttkbutton(buttonBox, text = "Close", command = function() {tcltk::tkwm.withdraw(getAmelia("aboutWindow"));tcltk::tkgrab.release(getAmelia("aboutWindow"));tcltk::tkfocus(getAmelia("gui"))}, width = 10) websiteButton <- tcltk::ttkbutton(buttonBox, text = "Website", command = function() browseURL("http://gking.harvard.edu/amelia/")) tcltk::tkgrid(websiteButton, row=0, column = 0, sticky="w", padx=10, pady=10) tcltk::tkgrid(closeButton, row=0, column = 0, sticky="e", padx=10, pady=10) tcltk::tkgrid.columnconfigure(buttonBox, 0, weight=1) tcltk::tkgrid(buttonBox, row=5, column = 1, sticky="ew") tcltk::tkgrid(aboutBox, sticky = "nsew") tcltk::tkwm.protocol(getAmelia("aboutWindow"), "WM_DELETE_WINDOW", function() {tcltk::tkwm.withdraw(getAmelia("aboutWindow"));tcltk::tkgrab.release(getAmelia("aboutWindow"));tcltk::tkfocus(getAmelia("gui"))}) centerModalDialog(getAmelia("aboutWindow"), resize=FALSE) } gui.pri.setup <- function() { cancelPriors <- function() { putAmelia("priorsmat", getAmelia("temp.priorsmat")) } onOK <- function() { nm <- c("dist","range")[getAmeliaInd("addpri.note")+1] varBox <- paste("add",nm,"var",sep=".") caseBox <- paste("add",nm,"case",sep=".") caseSelection <- as.numeric(tcltk::tcl(getAmelia(caseBox),"current")) varSelection <- as.numeric(tcltk::tcl(getAmelia(varBox),"current")) + 1 thiscase <- tcltk::tclvalue(tcltk::tkget(getAmelia(caseBox))) thisvar <- tcltk::tclvalue(tcltk::tkget(getAmelia(varBox))) if (caseSelection==0) { rowSelection <- 0 colSelection <- which(anyMissing)[varSelection] } else { rowSelection <- missingCases[caseSelection] colSelection <- which(is.na(getAmelia("amelia.data")[rowSelection,]))[varSelection] } # fork for range vs. dist if (nm == "range") { if (tcltk::tclvalue(getAmelia("priorMin"))=="") { tcltk::tkmessageBox(parent=getAmelia("priorsWindow"), title="Error", message="Please enter a minimum value.", type="ok",icon="error") return() } if (tcltk::tclvalue(getAmelia("priorMax"))=="") { tcltk::tkmessageBox(parent=getAmelia("priorsWindow"), title="Error", message="Please enter a maximum value.", type="ok",icon="error") return() } if (tcltk::tclvalue(getAmelia("priorConf"))=="") { tcltk::tkmessageBox(parent=getAmelia("priorsWindow"), title="Error", message="Please enter a confidence value.", type="ok",icon="error") return() } if (isTRUE(as.numeric(tcltk::tclvalue(getAmelia("priorConf"))) <= 0 | as.numeric(tcltk::tclvalue(getAmelia("priorConf"))) >= 1)) { tcltk::tkmessageBox(parent=getAmelia("priorsWindow"), title="Error", message="Confidence levels must be between 0 and 1.", type="ok",icon="error") return() } prMax <- as.numeric(tcltk::tclvalue(getAmelia("priorMax"))) prMin <- as.numeric(tcltk::tclvalue(getAmelia("priorMin"))) prCon <- as.numeric(tcltk::tclvalue(getAmelia("priorConf"))) if (prMax <= prMin) { tcltk::tkmessageBox(title="Error", message="The max is less than the min.", type="ok",icon="error") return() } prMean<- prMin + ((prMax-prMin)/2) prSD <-(prMax-prMin)/(2*qnorm(1-(1-prCon)/2)) #if dist prior } else { if (tcltk::tclvalue(getAmelia("priorMean"))=="") { tcltk::tkmessageBox(parent=getAmelia("priorsWindow"), title="Error", message="Please enter a mean value.", type="ok",icon="error") return() } if (tcltk::tclvalue(getAmelia("priorSD"))=="") { tcltk::tkmessageBox(parent=getAmelia("priorsWindow"), title="Error", message="Please enter a standard deviation.", type="ok",icon="error") return() } if (isTRUE(as.numeric(tcltk::tclvalue(getAmelia("priorSD"))) == 0)) { tcltk::tkmessageBox(parent=getAmelia("priorsWindow"), title="Error", message="Standard deviations must be greater than 0.", type="ok",icon="error") return() } prMean <- as.numeric(tcltk::tclvalue(getAmelia("priorMean"))) prSD <- as.numeric(tcltk::tclvalue(getAmelia("priorSD"))) } newPrior <- c(rowSelection, colSelection,prMean,prSD) if (!is.null(getAmelia("priorsmat"))) { matchPrior <- apply(getAmelia("priorsmat"), 1, function(x) all(x[1]==rowSelection, x[2]==colSelection)) } else { matchPrior <- FALSE } if (any(matchPrior)) { mess <- "There is a prior associate with this case. Overwrite?" over <- tcltk::tkmessageBox(parent=getAmelia("priorsWindow"), title="Overwrite Prior",message=mess, icon="question",type="yesno",default="no") if (tcltk::tclvalue(over)=="no") { return() } else { putAmelia("priorsmat",getAmelia("priorsmat")[-which(matchPrior),]) tcltk::tkdelete(getAmelia("priors.tree"), paste(rowSelection,colSelection,sep="-")) } } putAmelia("priorsmat",rbind(getAmelia("priorsmat"),newPrior)) ## need to change the treeview #updateTree() tcltk::tkinsert(getAmelia("priors.tree"),"","end", id = paste(rowSelection,colSelection,sep="-"), values = c(thisvar,prMean,prSD), text = thiscase,tag="normal") resetEntries() return() } validateNumeric <- function(x) { if (isTRUE(grep("(^-?[0-9]*\\.?[0-9]*$)",x)==1)) return(tcltk::tclVar("TRUE")) else return(tcltk::tclVar("FALSE")) } validateSD <- function(x) { if (isTRUE(grep("^[0-9]*\\.?[0-9]*$",x)==1)) return(tcltk::tclVar("TRUE")) else return(tcltk::tclVar("FALSE")) } validateConf <- function(x) { if (isTRUE(grep("^0*\\.[0-9]*$",x)==1)) return(tcltk::tclVar("TRUE")) else return(tcltk::tclVar("FALSE")) } setMissingVars <- function() { currentSelection <- as.numeric(tcltk::tcl(getAmelia("add.dist.case"), "current")) currentCase <- missingCases[currentSelection] if (currentSelection==0) missVars <- anyMissing else missVars <- is.na(getAmelia("amelia.data")[currentCase,]) missVarNames <- colnames(getAmelia("amelia.data"))[missVars] tcltk::tkconfigure(getAmelia("add.dist.var"),values = missVarNames) tcltk::tcl(getAmelia("add.dist.var"), "current", 0) } setMissingRangeVars <- function() { currentSelection <- as.numeric(tcltk::tcl(getAmelia("add.range.case"), "current")) currentCase <- missingCases[currentSelection] if (currentSelection==0) missVars <- anyMissing else missVars <- is.na(getAmelia("amelia.data")[currentCase,]) missVarNames <- colnames(getAmelia("amelia.data"))[missVars] tcltk::tkconfigure(getAmelia("add.range.var"),values = missVarNames) tcltk::tcl(getAmelia("add.range.var"), "current", 0) } resetEntries <- function() { tcltk::tcl("set", getAmelia("priorMin"),"") tcltk::tcl("set", getAmelia("priorMax"),"") tcltk::tcl("set", getAmelia("priorMean"),"") tcltk::tcl("set", getAmelia("priorSD"),"") tcltk::tcl("set", getAmelia("priorConf"),"") return() } updateTree <- function() { allrows <- paste(tcltk::tcl(getAmelia("priors.tree"),"children","")) tcltk::tkdelete(getAmelia("priors.tree"), allrows) if (is.null(getAmelia("priorsmat"))) { return() } varnames <- names(getAmelia("amelia.data")) cases <- paste(rownames(getAmelia("amelia.data")), ") ", getAmelia("amelia.data")[,getAmelia("csvar")]," ", getAmelia("amelia.data")[,getAmelia("tsvar")], sep="") cases <- c("(whole variable)", cases) for (i in 1:nrow(getAmelia("priorsmat"))) { thiscase <- cases[getAmelia("priorsmat")[i,1]+1] thisvar <- varnames[getAmelia("priorsmat")[i,2]] tcltk::tkinsert(getAmelia("priors.tree"),"","end", id = paste(getAmelia("priorsmat")[i,1],getAmelia("priorsmat")[i,2],sep="-"), values = c(thisvar,getAmelia("priorsmat")[i,c(3,4)]), text = thiscase,tag="normal") } return() } dropPriors <- function() { sel.pri <- strsplit(tcltk::tclvalue(tcltk::tcl(getAmelia("priors.tree"), "selection")), " ")[[1]] pri.mat.rows <- c() for (i in 1:length(sel.pri)) { pri.mat.rows <- c(pri.mat.rows, tcltk::tclvalue(tcltk::tkindex(getAmelia("priors.tree"),sel.pri[i]))) } pri.mat.rows <- as.numeric(pri.mat.rows) + 1 putAmelia("priorsmat", getAmelia("priorsmat")[-pri.mat.rows,, drop = FALSE]) tcltk::tkdelete(getAmelia("priors.tree"),paste(tcltk::tcl(getAmelia("priors.tree"), "selection"))) if (nrow(getAmelia("priorsmat")) == 0) putAmelia("priorsmat", NULL) return(NULL) } RightClick <- function(x, y) { # x and y are the mouse coordinates rootx <- as.integer(tcltk::tkwinfo("rootx", getAmelia("priors.tree"))) # tcltk::tkwinfo() return several infos rooty <- as.integer(tcltk::tkwinfo("rooty", getAmelia("priors.tree"))) xTxt <- as.integer(x) + rootx yTxt <- as.integer(y) + rooty # Create a Tcl command in a character string and run it tcltk::.Tcl(paste("tk_popup", tcltk::.Tcl.args(getAmelia("pri.right.click"), xTxt, yTxt))) } putAmelia("temp.priorsmat", getAmelia("priorsmat")) if (exists("priorsWindow", envir=ameliaEnv)) { updateTree() resetEntries() tcltk::tkwm.deiconify(getAmelia("priorsWindow")) tcltk::tkraise(getAmelia("priorsWindow")) tcltk::tkgrab(getAmelia("priorsWindow")) return() } putAmelia("priorsWindow", tcltk::tktoplevel()) tcltk::tkwm.title(getAmelia("priorsWindow"),"Observational Priors") priorsBox <- tcltk::ttkframe(getAmelia("priorsWindow")) prior.frame <- tcltk::ttkpanedwindow(priorsBox, orient = "horizontal") prior.disp <- tcltk::ttklabelframe(prior.frame, text = "Observational priors ", height = 200, width = 200) prior.add <- tcltk::ttklabelframe(prior.frame, text = "Add priors", height = 200, width = 200) putAmelia("prior.add.but", tcltk::ttkbutton(prior.add, text = "Add", command = function() onOK())) yscr <- tcltk::ttkscrollbar(prior.disp, orient = "vertical", command=function(...)tcltk::tkyview(getAmelia("priors.tree"),...)) xscr <- tcltk::ttkscrollbar(prior.disp, orient = "horizontal", command=function(...)tcltk::tkxview(getAmelia("priors.tree"),...)) putAmelia("priors.tree", tcltk::ttktreeview(prior.disp, columns = "Variable Mean SD", yscrollcommand=function(...)tcltk::tkset(yscr,...), xscrollcommand=function(...)tcltk::tkset(xscr,...))) putAmelia("pri.right.click",tcltk::tkmenu(getAmelia("priors.tree"), tearoff = FALSE) ) tcltk::tkadd(getAmelia("pri.right.click"), "command", label = "Remove selected priors", command = function() dropPriors()) tcltk::tkbind(getAmelia("priors.tree"), "", RightClick) tcltk::tcl(getAmelia("priors.tree"), "column", "#0", width = 120) tcltk::tcl(getAmelia("priors.tree"), "column", 0, width = 80, anchor = "center") tcltk::tcl(getAmelia("priors.tree"), "column", 1, width = 40, anchor = "center") tcltk::tcl(getAmelia("priors.tree"), "column", 2, width = 40, anchor = "center") tcltk::tcl(getAmelia("priors.tree"), "heading", "#0", text = "Case") tcltk::tcl(getAmelia("priors.tree"), "heading", 0, text = "Variable") tcltk::tcl(getAmelia("priors.tree"), "heading", 1, text = "Mean") tcltk::tcl(getAmelia("priors.tree"), "heading", 2, text = "SD") ## Windows 7 doesn't handle treeview selection correctly if (.Platform$OS.type == "windows") { tcltk::tktag.configure(getAmelia("priors.tree"),"normal", background="white") tcltk::tktag.configure(getAmelia("priors.tree"),"selected", background="SystemHighlight") tcltk::tkbind(getAmelia("priors.tree"),"<>",function() refreshSelection(getAmelia("priors.tree"))) } putAmelia("addpri.note", tcltk::ttknotebook(prior.add)) add.dist.frame <- tcltk::ttkframe(getAmelia("addpri.note")) add.range.frame <- tcltk::ttkframe(getAmelia("addpri.note")) missingCases <- which(!complete.cases(getAmelia("amelia.data"))) anyMissing <- apply(getAmelia("amelia.data"), 2, function(x) any(is.na(x))) cases1 <- paste(rownames(getAmelia("amelia.data"))[missingCases], ") ", getAmelia("amelia.data")[missingCases, getAmelia("csvar")]," ", getAmelia("amelia.data")[missingCases, getAmelia("tsvar")], sep="") cases <- c("(whole variable)",cases1) if (!is.null(getAmelia("priorsmat"))) updateTree() vars <- getAmelia("varnames")[anyMissing] ## Distribution prior note putAmelia("add.dist.case",tcltk::ttkcombobox(add.dist.frame, values=cases, state="readonly", width=15)) putAmelia("add.dist.var",tcltk::ttkcombobox(add.dist.frame, values=vars, state="readonly", width=15)) tcltk::tkbind(getAmelia("add.dist.case"), "<>", function(...) setMissingVars()) tcltk::tkgrid(tcltk::ttklabel(add.dist.frame, text="Case:"), column=1, row=1, sticky = "e") tcltk::tkgrid(tcltk::ttklabel(add.dist.frame, text="Variable:"), column=1, row=2, sticky = "e") tcltk::tcl(getAmelia("add.dist.case"), "current", 0) tcltk::tcl(getAmelia("add.dist.var"), "current", 0) tcltk::tkconfigure(getAmelia("add.dist.var"), postcommand=function(...) setMissingVars()) tcltk::tkgrid(getAmelia("add.dist.case"), column=2, row=1, pady=3) tcltk::tkgrid(getAmelia("add.dist.var"), column=2, row=2, pady=3) putAmelia("priorMean", tcltk::tclVar()) putAmelia("priorSD", tcltk::tclVar()) tcltk::tkgrid(tcltk::ttkframe(add.dist.frame, width = 150, height = 0), column = 1, row = 0) putAmelia("meanBox", tcltk::ttkentry(add.dist.frame, textvar=getAmelia("priorMean"), validate="key", validatecommand = function(P) validateNumeric(P))) putAmelia("sdBox", tcltk::ttkentry(add.dist.frame, textvar=getAmelia("priorSD"), validate="key", validatecommand = function(P) validateSD(P))) tcltk::tkgrid(tcltk::ttklabel(add.dist.frame, text="Mean:"), column=1, row=3, sticky = "e") tcltk::tkgrid(tcltk::ttklabel(add.dist.frame, text="Standard Deviation:"), column=1, row=4, sticky = "e") tcltk::tkgrid(getAmelia("meanBox"), column=2, row=3, pady=5, padx=5) tcltk::tkgrid(getAmelia("sdBox"), column=2, row=4, pady=5, padx=5) ## Range prior note putAmelia("add.range.case",tcltk::ttkcombobox(add.range.frame, values=cases, state="readonly", width=15)) putAmelia("add.range.var",tcltk::ttkcombobox(add.range.frame, values=vars, state="readonly", width=15)) tcltk::tkbind(getAmelia("add.range.case"), "<>", function(...) setMissingRangeVars()) tcltk::tkgrid(tcltk::ttklabel(add.range.frame, text="Case:"), column=1, row=1, sticky = "e") tcltk::tkgrid(tcltk::ttklabel(add.range.frame, text="Variable:"), column=1, row=2, sticky = "e") tcltk::tcl(getAmelia("add.range.case"), "current", 0) tcltk::tcl(getAmelia("add.range.var"), "current", 0) tcltk::tkconfigure(getAmelia("add.range.var"), postcommand=function(...) setMissingRangeVars()) tcltk::tkgrid(getAmelia("add.range.case"), column=2, row=1, pady=3) tcltk::tkgrid(getAmelia("add.range.var"), column=2, row=2, pady=3) tcltk::tkgrid(tcltk::ttkframe(add.range.frame, width = 150, height = 0), column = 1, row = 0) putAmelia("priorMax", tcltk::tclVar()) putAmelia("priorMin", tcltk::tclVar()) putAmelia("priorConf", tcltk::tclVar()) putAmelia("minBox", tcltk::ttkentry(add.range.frame, textvar=getAmelia("priorMin"), validate="key", validatecommand = function(P) validateNumeric(P))) putAmelia("maxBox", tcltk::ttkentry(add.range.frame, textvar=getAmelia("priorMax"), validate="key", validatecommand = function(P) validateNumeric(P))) putAmelia("confBox", tcltk::ttkentry(add.range.frame, textvar=getAmelia("priorConf"), validate="key", validatecommand = function(P) validateNumeric(P))) tcltk::tkgrid(tcltk::ttklabel(add.range.frame, text="Minimum:"), column=1, row=3, sticky = "e") tcltk::tkgrid(tcltk::ttklabel(add.range.frame, text="Maximum:"), column=1, row=4, sticky = "e") tcltk::tkgrid(tcltk::ttklabel(add.range.frame, text="Confidence:"), column=1, row=5, sticky = "e") #tcltk::tkgrid(tkframe(add.range.frame, width = 20, height = 0), column = 1, row = 6) tcltk::tkgrid(getAmelia("minBox"), column=2, row=3, pady=5, padx=5) tcltk::tkgrid(getAmelia("maxBox"), column=2, row=4, pady=5, padx=5) tcltk::tkgrid(getAmelia("confBox"), column=2, row=5, pady=5, padx=5) tcltk::tkadd(getAmelia("addpri.note"), add.dist.frame, text = "Add Distribution Prior") tcltk::tkadd(getAmelia("addpri.note"), add.range.frame, text = "Add Range Prior") tcltk::tkgrid(getAmelia("addpri.note"), row = 1, sticky = "nsew") tcltk::tkgrid(getAmelia("prior.add.but"), sticky = "se", padx = 10, pady = 10) but.frame <- tcltk::ttkframe(priorsBox) putAmelia("pri.ok", tcltk::ttkbutton(but.frame, text = "OK", command = function(){tcltk::tkwm.withdraw(getAmelia("priorsWindow"));tcltk::tkgrab.release(getAmelia("priorsWindow"));tcltk::tkfocus(getAmelia("gui"))}, width = 10)) putAmelia("pri.can", tcltk::ttkbutton(but.frame, text = "Cancel", width = 10, command = function() {cancelPriors();tcltk::tkwm.withdraw(getAmelia("priorsWindow"));tcltk::tkgrab.release(getAmelia("priorsWindow"));tcltk::tkfocus(getAmelia("gui"))})) tcltk::tkgrid(getAmelia("priors.tree"), row = 1, column = 1, sticky = "nsew") tcltk::tkgrid(yscr, row = 1, column = 2, sticky = "nsew") tcltk::tkgrid(xscr, row = 2, column = 1, sticky = "nsew") tcltk::tkgrid.rowconfigure(prior.disp, 1, weight = 1) tcltk::tkgrid.columnconfigure(prior.disp, 1, weight = 1) tcltk::tkadd(prior.frame, prior.add) tcltk::tkadd(prior.frame, prior.disp) tcltk::tkgrid(prior.frame, row = 1, column = 0, columnspan = 2, padx = 10, pady = 10, sticky = "news") tcltk::tkgrid(getAmelia("pri.ok"), row = 0, column = 1, sticky = "ne", padx = 10, pady = 10) tcltk::tkgrid(getAmelia("pri.can"), row = 0, column = 2, sticky = "ne", padx = 10, pady = 10) tcltk::tkgrid(but.frame, row = 2, column = 1, sticky = "ne") tcltk::tkgrid.rowconfigure(priorsBox, 1, weight = 1) tcltk::tkgrid.columnconfigure(priorsBox, 0, weight = 1) tcltk::tkgrid.columnconfigure(priorsBox, 1, weight = 1) tcltk::tkgrid(priorsBox, row = 0, column = 0, sticky = "news") tcltk::tkgrid.rowconfigure(getAmelia("priorsWindow"), 0, weight = 1) tcltk::tkgrid.columnconfigure(getAmelia("priorsWindow"), 0, weight = 1) tcltk::tkwm.protocol(getAmelia("priorsWindow"), "WM_DELETE_WINDOW", function() {tcltk::tkwm.withdraw(getAmelia("priorsWindow"));tcltk::tkgrab.release(getAmelia("priorsWindow"));tcltk::tkfocus(getAmelia("gui"))}) centerModalDialog(getAmelia("priorsWindow"), resize = TRUE) bindTooltip(widget = "priors.tree", "Currently set observation-level priors for the data. You can remove these using the right-click menu.") bindTooltip(widget = "pri.ok", tip = "Save changes and close window.") bindTooltip(widget = "pri.can", tip = "Cancel any changes and close window.") bindTooltip(widget = "prior.add.but", tip = "Add the above prior for the selected observation and variable to the list of priors for this data set.") bindTooltip(widget = "meanBox", tip = "The mean of a normal prior distribution on the value of the selected missing cell.") bindTooltip(widget = "sdBox", tip = "The standard deviation of a normal prior distribution on the value of the selected missing cell.") bindTooltip(widget = "add.dist.case", tip = "Select the case name or row number of the case for the cell-level prior.") bindTooltip(widget = "add.dist.var", tip = "Select the variable name for the cell-level prior.") bindTooltip(widget = "confBox", tip = "A confidence level between 0 and 1 for the confidence bound on the distribution of the selected missing cell. These confidence bounds are converted into a normal distribution prior on the value.") bindTooltip(widget = "minBox", tip = "A lower confidence bound on the distribution of the selected missing cell. These confidence bounds are converted into a normal distribution prior on the value.") bindTooltip(widget = "maxBox", tip = "An upper confidence bound on the distribution of the selected missing cell. These confidence bounds are converted into a normal distribution prior on the value.") bindTooltip(widget = "add.range.case", tip = "Select the case name or row number of the case for the cell-level prior.") bindTooltip(widget = "add.range.var", tip = "Select the variable name for the cell-level prior.") } gui.diag.setup <- function() { if (exists("diagWindow", envir = ameliaEnv)) { tcltk::tkwm.deiconify(getAmelia("diagWindow")) tcltk::tkraise(getAmelia("diagWindow")) tcltk::tkfocus(getAmelia("diagWindow")) return() } putAmelia("diagWindow", tcltk::tktoplevel()) tcltk::tkwm.title(getAmelia("diagWindow"), "Diagnostics") diagBox <- tcltk::ttkframe(getAmelia("diagWindow")) gui.top<-tcltk::ttkpanedwindow(diagBox, orient = "vertical") var.diags <- tcltk::ttklabelframe(gui.top, text = "Individual Variable Plots", width = 100, height = 100) tscs.diags <- tcltk::ttklabelframe(gui.top, text = "Time-Series Cross-Sectional Plots", width = 100, height = 100) disp.diags <- tcltk::ttklabelframe(gui.top, text = "Overdispersion Plots", width = 100, height = 100) tcltk::tcl("set","indvar","") ## get variable names that are actually numeric variables <- getAmelia("varnames") variables <- variables[sapply(getAmelia("amelia.data"), is.numeric)] putAmelia("var.diags.combo", tcltk::ttkcombobox(var.diags,textvariable="indvar", values = variables, state = "readonly")) indvar.lab <- tcltk::ttklabel(var.diags, text = "Variable:") var.button.frame <- tcltk::ttkframe(var.diags) putAmelia("diag.but.compare",tcltk::ttkbutton(var.button.frame, text="Compare", command = function() compare.density(getAmelia("ameliaObject"), var=tcltk::tclvalue("indvar"),frontend=TRUE))) putAmelia("diag.overimp",tcltk::ttkbutton(var.button.frame,text="Overimpute",state="normal", command = function() overimpute(getAmelia("ameliaObject"), var=tcltk::tclvalue("indvar"),frontend=TRUE))) tcltk::tcl(getAmelia("var.diags.combo"), "current", 0) tcltk::tkgrid(indvar.lab, row = 0, column = 0, padx = 5) tcltk::tkgrid(getAmelia("var.diags.combo"), row = 0, column = 1, padx = 10, pady = 10) tcltk::tkgrid(getAmelia("diag.but.compare"), row = 0, column = 0, padx = 10, pady = 10) tcltk::tkgrid(getAmelia("diag.overimp"), row = 0, column = 1, padx = 10, pady = 10) tcltk::tkgrid(var.button.frame, row =0, column = 2) tcltk::tkgrid(tcltk::ttkframe(var.diags, width = 50, height = 0), row = 1) ## tscs plots csvar <- getAmelia("ameliaObject")$arguments$cs tsvar <- getAmelia("ameliaObject")$arguments$ts ## can't do tscsplots for the ts or cs variable tscsvariables <- variables[variables != getAmelia("varnames")[csvar] & variables != getAmelia("varnames")[tsvar]] if (is.null(tsvar) | is.null(csvar)) { st <- "disabled" but.st <- st } else { st <- "readonly" but.st <- "normal" } if (!is.null(csvar)) { cases <- unique(getAmelia("amelia.data")[,csvar]) if (is.factor(getAmelia("amelia.data")[,csvar])) { cases <- levels(getAmelia("amelia.data")[,csvar])[cases] } } else { cases <- 1:nrow(getAmelia("amelia.data")) } tcltk::tcl("set", "casename","") tcltk::tcl("set", "tscsvarname", "") putAmelia("tscs.case.combo", tcltk::ttkcombobox(tscs.diags,textvariable="casename", values = cases, state = st)) putAmelia("tscs.var.combo", tcltk::ttkcombobox(tscs.diags,textvariable="tscsvarname", values = tscsvariables, state = st)) putAmelia("tscs.plot.but", tcltk::ttkbutton(tscs.diags, text = "TSCS Plot", state = but.st, command = function() tscsPlot(getAmelia("ameliaObject"), cs = tcltk::tclvalue("casename"), var = tcltk::tclvalue("tscsvarname"), frontend = TRUE))) if (st == "readonly") { tcltk::tcl(getAmelia("tscs.case.combo"), "current", 0) tcltk::tcl(getAmelia("tscs.var.combo"), "current", 0) } tcltk::tkgrid(tcltk::ttklabel(tscs.diags, text = "Case:"), row = 0, column = 0, sticky = "e", padx = 5) tcltk::tkgrid(getAmelia("tscs.case.combo"), row = 0, column = 1, padx = 10, pady = 10) tcltk::tkgrid(tcltk::ttklabel(tscs.diags, text = "Variable:"), row = 1, column = 0, sticky = "e", padx = 5) tcltk::tkgrid(getAmelia("tscs.var.combo"), row = 1, column = 1, padx = 10, pady = 10) tcltk::tkgrid(getAmelia("tscs.plot.but"), row = 1, column = 2, padx = 10, pady = 10, sticky = "se") tcltk::tkgrid(tcltk::ttkframe(tscs.diags, width = 50, height = 0), row = 2) dimvalue<-tcltk::tclVar("1") putAmelia("onedim", tcltk::ttkradiobutton(disp.diags, variable=dimvalue, value="1")) putAmelia("twodims", tcltk::ttkradiobutton(disp.diags, variable=dimvalue, value="2")) disp.imps.tcl<-tcltk::tclVar("5") putAmelia("disp.imps", tcltk::ttkentry(disp.diags,width="5",textvariable=disp.imps.tcl)) putAmelia("disp.but", tcltk::ttkbutton(disp.diags,text="Overdisperse",state="normal", command = function() disperse(m=as.numeric(tcltk::tclvalue(disp.imps.tcl)), dims=as.numeric(tcltk::tclvalue(dimvalue)),frontend=TRUE,output=getAmelia("ameliaObject")))) tcltk::tkgrid(tcltk::ttklabel(disp.diags,text="Number of dispersions:"),row=2,column=1, sticky="e") tcltk::tkgrid(tcltk::ttkframe(disp.diags, width = 50, height = 0), row = 5) tcltk::tkgrid(getAmelia("disp.imps"),column=2,row=2,sticky="nw", padx = 10, pady = 10) tcltk::tkgrid(tcltk::ttklabel(disp.diags,text="One Dimension:"),row=3,column=1, sticky = "e") tcltk::tkgrid(tcltk::ttklabel(disp.diags,text="Two Dimensions:"),row=4,column=1, sticky = "e") tcltk::tkgrid(getAmelia("onedim"),row=3,column=2,padx=10,pady=5) tcltk::tkgrid(getAmelia("twodims"),row=4,column=2,padx=10) tcltk::tkgrid(getAmelia("disp.but"),row=4,column=3,padx=15, pady=10,sticky="news") tcltk::tkadd(gui.top, var.diags) tcltk::tkadd(gui.top, tscs.diags) tcltk::tkadd(gui.top, disp.diags) tcltk::tkgrid(gui.top, row = 0, padx = 20, pady = 20) tcltk::tkgrid(diagBox, sticky = "news", row = 0, column = 0) tcltk::tkgrid.rowconfigure(getAmelia("diagWindow"), 0, weight = 1) tcltk::tkgrid.columnconfigure(getAmelia("diagWindow"), 0, weight = 1) tcltk::tkwm.protocol(getAmelia("diagWindow"), "WM_DELETE_WINDOW", function() {tcltk::tkwm.withdraw(getAmelia("diagWindow"));tcltk::tkgrab.release(getAmelia("diagWindow"));tcltk::tkfocus(getAmelia("gui"))}) centerModalDialog(getAmelia("diagWindow"), resize = FALSE) bindTooltip(widget = "var.diags.combo", tip = "Variable for either the density comparison plot or the overimputation plot.") bindTooltip(widget = "tscs.var.combo", tip = "Variable to use for the time-series cross-sectional plot.") bindTooltip(widget = "tscs.case.combo", tip = "Case to use for the time-series cross-sectional plot.") bindTooltip(widget = "diag.but.compare", tip = "Compare densities of the imputed values vs. observed values.") bindTooltip(widget = "diag.overimp", tip = "Overimpute and graph confidence intervals. ") bindTooltip(widget = "disp.but", tip = "Plot the convergence of the EM algorithm from overdispersed starting values.") bindTooltip(widget = "tscs.plot.but", tip = "Plot a time-series within one cross-section with imputation distributions in red.") bindTooltip(widget = "disp.imps", tip = "Number of different overdispersed starting values to use.") bindTooltip(widget = "onedim", tip = "Number of dimensions to visualize convergence.") bindTooltip(widget = "twodims", tip = "Number of dimensions to visualize convergence.") } ## the following functions have been imported from Rcmdr putAmelia <- function(x, value) { assign(x, value, envir = ameliaEnv) } getAmelia <- function(x, mode="any") get(x, envir = ameliaEnv, mode = mode, inherits = FALSE) getAmeliaInd <- function(x) { as.numeric(tcltk::tkindex(getAmelia(x), "current")) } ameliaTclSet <- function(name, value){ name <- ls(unclass(getAmelia(name))$env) tcltk::tcl("set", name, value) } save.log <- function() { file.select <- tcltk::tclvalue(tcltk::tkgetSaveFile(parent=getAmelia("gui"), filetypes="{{Text files} {*.txt}} {{All files} *}")) cat(getAmelia("output.log"), file = file.select) } show.output.log <- function() { RightClick <- function(x, y) { # x and y are the mouse coordinates rootx <- as.integer(tcltk::tkwinfo("rootx", getAmelia("log.viewer"))) # tcltk::tkwinfo() return several infos rooty <- as.integer(tcltk::tkwinfo("rooty", getAmelia("log.viewer"))) xTxt <- as.integer(x) + rootx yTxt <- as.integer(y) + rooty # Create a Tcl command in a character string and run it tcltk::.Tcl(paste("tk_popup", tcltk::.Tcl.args(getAmelia("log.right.click"), xTxt, yTxt))) } if (exists("log.top", envir = ameliaEnv)) { tcltk::tkconfigure(getAmelia("log.viewer"), state = "normal") tcltk::tkdelete(getAmelia("log.viewer"), "0.0", "end") tcltk::tkinsert(getAmelia("log.viewer"), "end", paste(getAmelia("output.log"), collapse = "")) tcltk::tkconfigure(getAmelia("log.viewer"), state = "disabled") tcltk::tkwm.deiconify(getAmelia("log.top")) tcltk::tkraise(getAmelia("log.top")) tcltk::tkfocus(getAmelia("log.top")) return() } putAmelia("log.top", tcltk::tktoplevel()) tcltk::tkwm.title(getAmelia("log.top"), "Output Log") scr <- tcltk::ttkscrollbar(getAmelia("log.top"), command=function(...)tcltk::tkyview(getAmelia("log.viewer"),...)) putAmelia("log.viewer", tcltk::tktext(getAmelia("log.top"), width = 80, height = 25, yscrollcommand=function(...)tcltk::tkset(scr,...))) tcltk::tkinsert(getAmelia("log.viewer"), "end", paste(getAmelia("output.log"), collapse = "")) tcltk::tkconfigure(getAmelia("log.viewer"), state = "disabled") main.menu <- tcltk::tkmenu(getAmelia("log.top")) main.menu.file <- tcltk::tkmenu(main.menu, tearoff=0) tcltk::tkadd(main.menu.file,"command",label="Save log file",command=function() save.log()) tcltk::tkadd(main.menu.file,"command",label="Close",command=function(){tcltk::tkwm.withdraw(getAmelia("log.top"));tcltk::tkgrab.release(getAmelia("log.top"));tcltk::tkfocus(getAmelia("gui"))}) tcltk::tkadd(main.menu,"cascade",label="File",menu=main.menu.file) tcltk::tkconfigure(getAmelia("log.top"),menu=main.menu) putAmelia("log.right.click",tcltk::tkmenu(getAmelia("log.viewer"), tearoff = FALSE) ) tcltk::tkadd(getAmelia("log.right.click"), "command", label = "Copy ", command = function() tcltk::tkevent.generate(getAmelia("log.viewer"),"<>")) tcltk::tkbind(getAmelia("log.viewer"), "", RightClick) #tcltk::tkgrid(main.menu, row = 0, sticky = "ew") tcltk::tkgrid(getAmelia("log.viewer"), row = 0, column = 0, sticky = "news") tcltk::tkgrid(scr, row =0, column = 1, sticky = "ns") #tcltk::tkgrid.columnconfigure(log.top, 1, weight = 1) tcltk::tkgrid.columnconfigure(getAmelia("log.top"), 0, weight = 1) tcltk::tkgrid.rowconfigure(getAmelia("log.top"), 0, weight = 1) tcltk::tkwm.protocol(getAmelia("log.top"), "WM_DELETE_WINDOW", function() {tcltk::tkwm.withdraw(getAmelia("log.top"));tcltk::tkgrab.release(getAmelia("log.top"));tcltk::tkfocus(getAmelia("gui"))}) centerModalDialog(getAmelia("log.top"), resize=TRUE) } after <- function(ms, func) { tcltk::.Tcl(paste("after", ms, tcltk::.Tcl.callback(func))) } cancel.after <- function(id) { invisible(tcltk::.Tcl(paste("after","cancel", id))) } bindTooltip <- function(widget, tip) { after.name <- paste(widget, "after", sep = ".") tip.name <- paste(widget, "tip", sep = ".") # tcltk::tkbind(getAmelia(widget), "", showTooltip(widget, tip)) tcltk::tkbind(getAmelia(widget), "", function() putAmelia(after.name, after(400, showTooltip(widget, tip)))) tcltk::tkbind(getAmelia(widget), "", function() {killTooltip(widget) cancel.after(getAmelia(after.name))}) tcltk::tkbind(getAmelia(widget), "", function() cancel.after(getAmelia(after.name))) tcltk::tkbind(getAmelia(widget), "", function() cancel.after(getAmelia(after.name))) } showTooltip <- function(widget, text) { function() { if (getAmelia(widget)$ID != tcltk::tclvalue(tcltk::tkwinfo("containing", tcltk::tkwinfo("pointerx","."), tcltk::tkwinfo("pointery",".")))) { return() } tip.name <- paste(widget, "tip", sep = ".") tiplabel.name <- paste(widget, "tiplabel",sep=".") if (exists(tip.name, envir = ameliaEnv)) { if (as.logical(tcltk::tkwinfo("exists",getAmelia(tip.name)))) { if (as.logical(tcltk::tkwinfo("ismapped",getAmelia(tip.name)))) { return() } } } scrh <- tcltk::tclvalue(tcltk::tkwinfo("screenheight", getAmelia(widget))) scrw <- tcltk::tclvalue(tcltk::tkwinfo("screenwidth", getAmelia(widget))) tcltk::tclServiceMode(on=FALSE) if (!exists(tip.name, envir = ameliaEnv)) { if (.Platform$OS.type == "windows") { borderColor <- "SystemWindowFrame" bgColor <- "SystemWindow" fgColor <- "SystemWindowText" } else { borderColor <- "black" bgColor <- "lightyellow" fgColor <- "black" } putAmelia(tip.name, tcltk::tktoplevel(getAmelia(widget), bd = 1, bg = borderColor, relief = "raised")) tcltk::tkwm.geometry(getAmelia(tip.name), paste("+",scrh,"+",scrw,sep="")) tcltk::tcl("wm","overrideredirect", getAmelia(tip.name), 1) putAmelia(tiplabel.name, tcltk::ttklabel(getAmelia(tip.name), background = bgColor, foreground = fgColor, text = text, justify = "left", wraplength=300)) tcltk::tkpack(getAmelia(tiplabel.name)) tcltk::tkbind(getAmelia(tip.name), "", function() tcltk::tkwm.withdraw(getAmelia(tip.name))) tcltk::tkbind(getAmelia(tip.name), "", function() tcltk::tkwm.withdraw(getAmelia(tip.name))) tcltk::tkbind(getAmelia(tip.name), "", function() tcltk::tkwm.withdraw(getAmelia(tip.name))) } width <- as.numeric(tcltk::tclvalue(tcltk::tkwinfo("reqwidth", getAmelia(tiplabel.name)))) height <- as.numeric(tcltk::tclvalue(tcltk::tkwinfo("reqheight",getAmelia(tiplabel.name)))) posX <- as.numeric(tcltk::tclvalue(tcltk::tkwinfo("pointerx","."))) posY <- as.numeric(tcltk::tclvalue(tcltk::tkwinfo("pointery","."))) + 25 screen <- as.numeric(tcltk::tclvalue(tcltk::tkwinfo("screenwidth","."))) # a.) Ad-hockery: Set positionX so the entire tooltip widget will be displayed. if ((posX + width) > screen) { posX <- posX - ((posX + width) - screen) - 3 } tcltk::tclServiceMode(on = TRUE) tcltk::tkwm.geometry(getAmelia(tip.name), paste("+",posX,"+",posY,sep = "")) tcltk::tkwm.deiconify(getAmelia(tip.name)) tcltk::tkraise(getAmelia(tip.name)) } } killTooltip <- function(widget) { tip.name <- paste(widget,"tip", sep = ".") if (exists(tip.name, envir = ameliaEnv)) { tcltk::tkwm.withdraw(getAmelia(tip.name)) } } refreshSelection <- function(tree) { all <- strsplit(tcltk::tclvalue(tcltk::tcl(tree,"children","")), " ")[[1]] sel <- strsplit(tcltk::tclvalue(tcltk::tcl(tree, "selection")), " ")[[1]] bandTree() for (i in sel) { tcltk::tcl(tree, "item", i, tags = "selected") } return(NULL) } variableOptionStatus <- function(sel) { states <- rep("normal", 15) classes <- sapply(getAmelia("amelia.data"), class)[sel] if (length(sel) ==0) { states <- rep("disabled", 15) return(states) } if (length(sel) > 1) states[c(1:4,15)] <- "disabled" if (!is.null(getAmelia("tsvar"))) if (getAmelia("tsvar") %in% sel) states[c(1:2,5:9,12:13,15)] <- "disabled" else states[3] <- "disabled" if (!is.null(getAmelia("csvar"))) if (getAmelia("csvar") %in% sel) states[c(1:2,5:9,12:13,15)] <- "disabled" else states[4] <- "disabled" if (is.null(getAmelia("tsvar"))) states[c(3,6:9)] <- "disabled" if (is.null(getAmelia("csvar"))) states[4] <- "disabled" if ("factor" %in% classes | "character" %in% classes) states[c(11,15)] <- "disabled" if (is.null(getAmelia("amelia.data"))) states <- rep("disabled", 15) return(states) } variableOptionsPost <- function() { sel <- strsplit(tcltk::tclvalue(tcltk::tcl(getAmelia("main.tree"), "selection")), " ")[[1]] states <- variableOptionStatus(sel) for (i in 0:14) { if (tcltk::tclvalue(tcltk::tktype(getAmelia("main.menu.variables"), i)) != "separator") tcltk::tkentryconfigure(getAmelia("main.menu.variables"),i, state = states[i+1]) } return(NULL) } mainTreeRightClick <- function(x, y) { # x and y are the mouse coordinates rootx <- as.integer(tcltk::tkwinfo("rootx", getAmelia("main.tree"))) # tcltk::tkwinfo() return several infos rooty <- as.integer(tcltk::tkwinfo("rooty", getAmelia("main.tree"))) xTxt <- as.integer(x) + rootx yTxt <- as.integer(y) + rooty # Create a Tcl command in a character string and run it sel <- strsplit(tcltk::tclvalue(tcltk::tcl(getAmelia("main.tree"), "selection")), " ")[[1]] states <- variableOptionStatus(sel) main.tree.right.click <- tcltk::tkmenu(getAmelia("main.tree"), tearoff = FALSE) main.tree.trans <- tcltk::tkmenu(getAmelia("main.tree"), tearoff = FALSE) tcltk::tkadd(main.tree.right.click, "command", label = "Set as Time-Series Variable", command = setTS, state = states[1]) tcltk::tkadd(main.tree.right.click, "command", label = "Set as Cross-Section Variable", command = setCS, state = states[2]) tcltk::tkadd(main.tree.right.click, "command", label = "Unset as Time-Series Variable", command = unsetTS, state = states[3]) tcltk::tkadd(main.tree.right.click, "command", label = "Unset as Cross-Section Variable", command = unsetCS, state = states[4]) tcltk::tkadd(main.tree.right.click,"separator") tcltk::tkadd(main.tree.right.click, "command", label = "Add Lag", command = function() addLag(), state = states[6]) tcltk::tkadd(main.tree.right.click, "command", label = "Add Lead", command = function() addLead(), state = states[7]) tcltk::tkadd(main.tree.right.click, "command", label = "Remove Lag", command = function() dropLag(), state = states[8]) tcltk::tkadd(main.tree.right.click, "command", label = "Remove Lead", command = function() dropLead(), state = states[9]) tcltk::tkadd(main.tree.right.click,"separator") tcltk::tkadd(main.tree.right.click, "command", label = "Plot Histogram(s) of Selected", command = plotHist, state = states[10]) if (.Platform$OS.type == "windows") { tcltk::tkadd(main.tree.trans, "command", label = "Log", command = function(x) setTrans("logs")) tcltk::tkadd(main.tree.trans, "command", label = "Square Root", command = function(x) setTrans("sqrt")) tcltk::tkadd(main.tree.trans, "command", label = "Logistic", command = function(x) setTrans("lgstc")) tcltk::tkadd(main.tree.trans, "command", label = "Nominal", command = function(x) setTrans("noms")) tcltk::tkadd(main.tree.trans, "command", label = "Ordinal", command = function(x) setTrans("ords")) tcltk::tkadd(main.tree.trans, "command", label = "ID Variable", command = function(x) setTrans("idvar")) tcltk::tkadd(main.tree.right.click, "cascade", label = "Add Transformation...", menu = main.tree.trans, state = states[12]) } else { tcltk::tkadd(main.tree.right.click, "command", label = "Mark as Log", command = function(x) setTrans("logs"), state = states[12]) tcltk::tkadd(main.tree.right.click, "command", label = "Mark as Square Root", command = function(x) setTrans("sqrt"), state = states[12]) tcltk::tkadd(main.tree.right.click, "command", label = "Mark as Logistic", command = function(x) setTrans("lgstc"), state = states[12]) tcltk::tkadd(main.tree.right.click, "command", label = "Mark as Nominal", command = function(x) setTrans("noms"), state = states[12]) tcltk::tkadd(main.tree.right.click, "command", label = "Mark as Ordinal", command = function(x) setTrans("ords"), state = states[12]) tcltk::tkadd(main.tree.right.click, "command", label = "Mark as ID Variable", command = function(x) setTrans("idvar"), state = states[12]) } tcltk::tkadd(main.tree.right.click, "command", label = "Remove Transformations", command = dropTrans, state = states[13]) tcltk::tkadd(main.tree.right.click,"separator") tcltk::tkadd(main.tree.right.click, "command", label = "Add or Edit Bounds", command = addBounds, state = states[15]) tcltk::tkpopup(main.tree.right.click, xTxt, yTxt) } addLag <- function() { sel <- strsplit(tcltk::tclvalue(tcltk::tcl(getAmelia("main.tree"), "selection")), " ")[[1]] tmp <- getAmelia("lags") tmp[sel] <- 1 putAmelia("lags", tmp) for (i in sel) tcltk::tkset(getAmelia("main.tree"), i, "lag", "X") return() } addLead <- function() { sel <- strsplit(tcltk::tclvalue(tcltk::tcl(getAmelia("main.tree"), "selection")), " ")[[1]] tmp <- getAmelia("leads") tmp[sel] <- 1 putAmelia("leads", tmp) for (i in sel) tcltk::tkset(getAmelia("main.tree"), i, "lead", "X") return() } dropLag <- function() { sel <- strsplit(tcltk::tclvalue(tcltk::tcl(getAmelia("main.tree"), "selection")), " ")[[1]] tmp <- getAmelia("lags") tmp[sel] <- 0 putAmelia("lags", tmp) for (i in sel) tcltk::tkset(getAmelia("main.tree"), i, "lag", "") return() } dropLead <- function() { sel <- strsplit(tcltk::tclvalue(tcltk::tcl(getAmelia("main.tree"), "selection")), " ")[[1]] tmp <- getAmelia("leads") tmp[sel] <- 0 putAmelia("leads", tmp) for (i in sel) tcltk::tkset(getAmelia("main.tree"), i, "lead", "") return() } setTrans <- function(trans) { all.trans <- c(logs = "Log",sqrt = "Square Root", lgstc = "Logistic", noms = "Nominal", ords = "Ordinal", idvar = "ID") sel <- strsplit(tcltk::tclvalue(tcltk::tcl(getAmelia("main.tree"), "selection")), " ")[[1]] tmp <- getAmelia(trans) tmp[sel] <- 1 putAmelia(trans, tmp) for (j in sel) { tcltk::tkset(getAmelia("main.tree"), j,"transform", all.trans[trans]) tcltk::tcl(getAmelia("main.tree"), "item", j, image = "") } return() } dropTrans <- function() { all.trans <- c("logs","sqrt","lgstc","noms","ords","idvar") sel <- strsplit(tcltk::tclvalue(tcltk::tcl(getAmelia("main.tree"), "selection")), " ")[[1]] for (j in sel) tcltk::tkset(getAmelia("main.tree"), j,"transform", "") if (is.factor(getAmelia("amelia.data")[,j]) | is.character(getAmelia("amelia.data")[,j])) { tcltk::tcl(getAmelia("main.tree"), "item", j, image = getAmelia("redFlagIcon")) } for (i in all.trans) { tmp <- getAmelia(i) tmp[sel] <- 0 putAmelia(i, tmp) } } addBounds <- function() { onOK <- function(sel) { bdMax <- as.numeric(tcltk::tclvalue(getAmelia("boundMax"))) bdMin <- as.numeric(tcltk::tclvalue(getAmelia("boundMin"))) if (is.na(bdMax) & !is.na(bdMin)) { tcltk::tkmessageBox(parent=getAmelia("addBoundsWindow"), title="Error", message="Please enter a minimum and a maximum value or neither to clear the bounds.", type="ok",icon="error") return() } if (!is.na(bdMax) & is.na(bdMin)) { tcltk::tkmessageBox(parent=getAmelia("addBoundsWindow"), title="Error", message="Please enter a minimum and a maximum value or neither to clear the bounds.", type="ok",icon="error") return() } if (!is.na(bdMax) & !is.na(bdMin)) { if (bdMax <= bdMin) { tcltk::tkmessageBox(parent=getAmelia("addBoundsWindow"), title="Error", message="The maximum is less than the minimum.", type="ok",icon="error") return() } } tmpbmat <- getAmelia("boundsmat") tmpbmat[sel,2:3] <- c(bdMin, bdMax) putAmelia("boundsmat", tmpbmat) if (!is.na(bdMin)) { treeBounds <- paste("[",bdMin,", ", bdMax,"]", sep = "") } else { treeBounds <- "" } tcltk::tkset(getAmelia("main.tree"), sel, "bounds", treeBounds) tcltk::tkwm.withdraw(getAmelia("addBoundsWindow")) tcltk::tkgrab.release(getAmelia("addBoundsWindow")) tcltk::tkfocus(getAmelia("gui")) return() } validateNumeric <- function(x) { if (isTRUE(grep("(^-?[0-9]*\\.?[0-9]*$)",x)==1)) return(tcltk::tclVar("TRUE")) else return(tcltk::tclVar("FALSE")) } sel <- strsplit(tcltk::tclvalue(tcltk::tcl(getAmelia("main.tree"), "selection")), " ")[[1]] if (sum(is.na(getAmelia("amelia.data")[,sel])) == 0) { tcltk::tkmessageBox(parent = getAmelia("gui"), message = "No missing data on the selected variable.", type = "ok") return() } currMin <- getAmelia("boundsmat")[sel,2] currMax <- getAmelia("boundsmat")[sel,3] putAmelia("boundMin", tcltk::tclVar(ifelse(is.na(currMin), "", currMin))) putAmelia("boundMax", tcltk::tclVar(ifelse(is.na(currMax), "", currMax))) if (exists("addBoundsWindow", envir = ameliaEnv)) { tcltk::tkconfigure(getAmelia("maxBox"), textvar = getAmelia("boundMax")) tcltk::tkconfigure(getAmelia("minBox"), textvar = getAmelia("boundMin")) tcltk::tkconfigure(getAmelia("bd.ok"), command = function() onOK(sel)) tcltk::tkwm.deiconify(getAmelia("addBoundsWindow")) tcltk::tkraise(getAmelia("addBoundsWindow")) return() } putAmelia("addBoundsWindow", tcltk::tktoplevel()) tcltk::tkwm.title(getAmelia("addBoundsWindow"), "Add or Edit Bounds") bounds.add <- tcltk::ttkframe(getAmelia("addBoundsWindow")) putAmelia("minBox", tcltk::ttkentry(bounds.add, textvar=getAmelia("boundMin"), validate="key", validatecommand = function(P) validateNumeric(P))) putAmelia("maxBox", tcltk::ttkentry(bounds.add, textvar=getAmelia("boundMax"), validate="key", validatecommand = function(P) validateNumeric(P))) tcltk::tkgrid(tcltk::ttklabel(bounds.add, text="Minimum:"), column=1, row=2, sticky = "e", padx = 10, pady = 10) tcltk::tkgrid(tcltk::ttklabel(bounds.add, text="Maximum:"), column=1, row=3, sticky = "e", padx = 10, pady = 10) tcltk::tkgrid(getAmelia("minBox"), column=2, row=2, pady=5, padx=5) tcltk::tkgrid(getAmelia("maxBox"), column=2, row=3, pady=5, padx=5) but.frame <- tcltk::ttkframe(bounds.add) putAmelia("bd.ok", tcltk::ttkbutton(but.frame, text = "OK", command = function() onOK(sel))) putAmelia("bd.can", tcltk::ttkbutton(but.frame, text = "Cancel", width = 10, command = function() {tcltk::tkwm.withdraw(getAmelia("addBoundsWindow"));tcltk::tkgrab.release(getAmelia("addBoundsWindow"));tcltk::tkfocus(getAmelia("gui"))})) tcltk::tkgrid(getAmelia("bd.ok"), row = 0, column = 1, sticky = "ne", padx = 10, pady = 10) tcltk::tkgrid(getAmelia("bd.can"), row = 0, column = 2, sticky = "ne", padx = 10, pady = 10) tcltk::tkgrid(but.frame, row = 4, column = 1, columnspan = 2, sticky = "ne") tcltk::tkgrid(bounds.add, sticky = "news") tcltk::tkwm.protocol(getAmelia("addBoundsWindow"), "WM_DELETE_WINDOW", function() {tcltk::tkwm.withdraw(getAmelia("addBoundsWindow"));tcltk::tkgrab.release(getAmelia("addBoundsWindow"));tcltk::tkfocus(getAmelia("gui"))}) centerModalDialog(getAmelia("addBoundsWindow"), resize=FALSE) } plotHist <- function() { sel <- strsplit(tcltk::tclvalue(tcltk::tcl(getAmelia("main.tree"), "selection")), " ")[[1]] if (length(sel)==0) { tcltk::tkmessageBox(parent = getAmelia("gui"), type = "ok", message = "No variable selected.") return(NULL) } sel <- sel[which(sapply(getAmelia("amelia.data")[sel], is.numeric))] if (length(sel)==0) { tcltk::tkmessageBox(parent = getAmelia("gui"), type = "ok", message = "Cannot plot non-numeric variables.") return(NULL) } dev.new() mfrow <- set.mfrow(nvars = length(sel)) on.exit(par(NULL)) layout <- par(mfrow = mfrow) j <- 0 for (i in sel) { j <- j + 1 if (j > 9) { j <- 1 dev.new() layout <- par(mfrow = mfrow) } hist(getAmelia("amelia.data")[,i], main = paste("Histogram of",i), ylab = "Frequnecy", xlab ="", col="grey", border = "white") } invisible() } sortTreeBy <- function(col) { coldata <- c() children <- strsplit(tcltk::tclvalue(tcltk::tcl(getAmelia("main.tree"), "children","")), " ")[[1]] if (col == "#0") { coldata <- children } else { for (i in children) { coldata <- c(coldata, tcltk::tclvalue(tcltk::tkset(getAmelia("main.tree"), i, col))) } } dirs <- getAmelia("sortDirs") sortDir <- dirs[col] if (col %in% c("mean", "sd", "min", "max")) { coldata[coldata == "..."] <- "-Inf" coldata[coldata == "(factor)"] <- "-Inf" sortOrder <- order(as.numeric(coldata), decreasing = sortDir) } else if (col == "miss") { coldata <- matrix(unlist(strsplit(coldata,"/")), nrow=2)[1,] sortOrder <- order(as.numeric(coldata), decreasing = sortDir) } else { sortOrder <- order(coldata, decreasing = sortDir) } sorted <- children[sortOrder] for (i in 1:length(children)) { tcltk::tkmove(getAmelia("main.tree"), sorted[i],"", i-1) } drawArrow(col, sortDir) refreshSelection(getAmelia("main.tree")) dirs[col] <- !sortDir putAmelia("sortDirs", dirs) } drawArrow <- function(col, down) { treecols <- names(getAmelia("sortDirs")) for (i in treecols) { tcltk::tcl(getAmelia("main.tree"), "heading", i, image = "") } if (down) { tcltk::tcl(getAmelia("main.tree"), "heading", col, image = getAmelia("upArrowIcon")) } else { tcltk::tcl(getAmelia("main.tree"), "heading", col, image = getAmelia("downArrowIcon")) } return(NULL) } bandTree <- function() { children <- strsplit(tcltk::tclvalue(tcltk::tcl(getAmelia("main.tree"), "children","")), " ")[[1]] j <- 0 tcltk::tktag.configure(getAmelia("main.tree"),"white", background="white") tcltk::tktag.configure(getAmelia("main.tree"),"gray", background="gray92") for (i in children) { j <- j+1 if ((j %% 2) == 0) { tcltk::tcl(getAmelia("main.tree"), "item", i, tag = "white") } else { tcltk::tcl(getAmelia("main.tree"), "item", i, tag = "gray") } } } updateTreeStats <- function(){ children <- strsplit(tcltk::tclvalue(tcltk::tcl(getAmelia("main.tree"),"children","")), " ")[[1]] for (i in names(getAmelia("amelia.data"))) { if (is.factor(getAmelia("amelia.data")[,i]) | is.character(getAmelia("amelia.data")[,i])) { vals <- c("(factor)","...","...","...") vals <- c(vals,paste(sum(is.na(getAmelia("amelia.data")[,i])), nrow(getAmelia("amelia.data")), sep="/")) } else { vals <- c(min(getAmelia("amelia.data")[,i],na.rm=T), max(getAmelia("amelia.data")[,i],na.rm=T), mean(getAmelia("amelia.data")[,i],na.rm=T), sd(getAmelia("amelia.data")[,i],na.rm=T)) vals <- signif(vals, digits = 4) vals <- c(vals, paste(sum(is.na(getAmelia("amelia.data")[,i])), nrow(getAmelia("amelia.data")), sep="/")) } tcltk::tkset(getAmelia("main.tree"), i, "min", vals[1]) tcltk::tkset(getAmelia("main.tree"), i, "max", vals[2]) tcltk::tkset(getAmelia("main.tree"), i, "mean", vals[3]) tcltk::tkset(getAmelia("main.tree"), i, "sd", vals[4]) tcltk::tkset(getAmelia("main.tree"), i, "miss", vals[5]) } } centerModalDialog <- function(window, resize=TRUE) { xpos <- as.numeric(tcltk::tkwinfo("rootx",getAmelia("gui"))) ypos <- as.numeric(tcltk::tkwinfo("rootx",getAmelia("gui"))) rwidth <- as.numeric(tcltk::tkwinfo("width",getAmelia("gui"))) rheight <- as.numeric(tcltk::tkwinfo("height", getAmelia("gui"))) width <- as.numeric(tcltk::tkwinfo("reqwidth",window)) height <- as.numeric(tcltk::tkwinfo("reqheight",window)) newxpos <- xpos + .5*rwidth - .5*width newypos <- ypos + .5*rheight - .5*height if (.Platform$OS.type == "windows") tcltk::tkwm.geometry(window, paste("+",round(newxpos),"+",round(newypos),sep="")) tcltk::tkfocus(window) tcltk::tkgrab.set(window) if (!resize) { tcltk::tkwm.resizable(window, 0,0) } tcltk::tkwm.transient(window, getAmelia("gui")) tcltk::tcl("update","idletasks") } showImputedFiles <- function() { if (Sys.info()['sysname'] %in% c("Windows", "Darwin")) system(paste("open", shQuote(getAmelia("wdForLastImputation")))) else system(paste("xdg-open", shQuote(getAmelia("wdForLastImputation")))) return(NULL) } ## Here is (finally) a decent solution to the tcl/tk issues with ## global variables. Here we create new environment, whose parent is ## the Amelia namespace. We then make sure that all of the GUI ## functions use that as their enclosure. This means that any of these ## functions can use values in the ameliaEnv. This eliminates the need ## for any "getAmelia" calls, but we still have to be careful since ## assigning values in these functions is local and doesn't ## automatically add the value to ameliaEnv. So, for assigning, ## 'putAmelia' probably still makes sense. We could use ## assign("foo", "bar", envir = parent.frame()) ## but putAmelia is probably more clear. getAmelia() is probably still ## a little more safe to use because it will throw an error if ## something is missing, whereas relying on lexical scoping will try ## to use something with the same name in the search path. ameliaEnv <- new.env() environment(main.close) <- ameliaEnv environment(setWorkingDir) <- ameliaEnv environment(loadStata) <- ameliaEnv environment(loadSPSS) <- ameliaEnv environment(loadSAS) <- ameliaEnv environment(loadTAB) <- ameliaEnv environment(loadCSV) <- ameliaEnv environment(loadRData) <- ameliaEnv environment(loadDemo) <- ameliaEnv environment(drawMissMap) <- ameliaEnv environment(activateGUI) <- ameliaEnv environment(save.session) <- ameliaEnv environment(load.session) <- ameliaEnv environment(run.amelia) <- ameliaEnv environment(amelia.save) <- ameliaEnv environment(set.out) <- ameliaEnv environment(setTS) <- ameliaEnv environment(unsetTS) <- ameliaEnv environment(setCS) <- ameliaEnv environment(unsetCS) <- ameliaEnv environment(fillMainTree) <- ameliaEnv environment(AmeliaView) <- ameliaEnv environment(buildNumericalOptions) <- ameliaEnv environment(buildOutputOptions) <- ameliaEnv environment(buildAboutDialog) <- ameliaEnv environment(gui.pri.setup) <- ameliaEnv environment(gui.diag.setup) <- ameliaEnv environment(save.log) <- ameliaEnv environment(show.output.log) <- ameliaEnv environment(bindTooltip) <- ameliaEnv environment(showTooltip) <- ameliaEnv environment(killTooltip) <- ameliaEnv environment(refreshSelection) <- ameliaEnv environment(variableOptionStatus) <- ameliaEnv environment(variableOptionsPost) <- ameliaEnv environment(mainTreeRightClick) <- ameliaEnv environment(addLag) <- ameliaEnv environment(addLead) <- ameliaEnv environment(dropLag) <- ameliaEnv environment(dropLead) <- ameliaEnv environment(setTrans) <- ameliaEnv environment(dropTrans) <- ameliaEnv environment(addBounds) <- ameliaEnv environment(plotHist) <- ameliaEnv environment(sortTreeBy) <- ameliaEnv environment(drawArrow) <- ameliaEnv environment(bandTree) <- ameliaEnv environment(updateTreeStats) <- ameliaEnv environment(centerModalDialog) <- ameliaEnv environment(showImputedFiles) <- ameliaEnv Amelia/R/print.amelia.R0000644000176200001440000000124114335240021014341 0ustar liggesusers## ## print.amelia() - print method for the "amelia" class ## ## INPUT: object - an object of class "amelia" which is output ## from the amelia() function ## ## OUTPUT: Prints some information about the imputations. ## ## mb 02/02/09 ## print.amelia <- function(x, ...) { m <- length(x$imputations) cat(paste("\nAmelia output with ",m," imputed datasets.\n", sep="")) cat(paste("Return code: ", x$code,"\n"), sep="") cat(paste("Message: ", x$message, "\n"), sep="") cat("\nChain Lengths:\n") cat("--------------\n") for (i in 1:m) { cat(paste("Imputation ",i,": ", nrow(x$iterHist[[i]]),"\n", sep="")) } cat("\n") invisible(x) } Amelia/R/summary.amelia.R0000644000176200001440000000303714335240021014707 0ustar liggesusers #' Summary of an Amelia object #' #' Returns summary information from the Amelia run along with #' missingles information. #' #' @param object an object of class \code{amelia}. Typically, an output #' from the function \code{amelia}. #' @param ... further arguments. #' #' @seealso \code{\link{amelia}}, \code{\link{plot.amelia}} summary.amelia <- function(object, ...) { percent.missing <- colMeans(object$missMatrix) n.patterns <- nrow(unique(object$missMatrix)) rows.imputed <- nrow(na.omit(object$imputations[[1]])) rows.lwd <- sum(rowSums(object$missMatrix)==0) print.amelia(object) cat("Rows after Listwise Deletion: ",rows.lwd,"\n") cat("Rows after Imputation: ", rows.imputed,"\n") cat("Patterns of missingness in the data: ", n.patterns, "\n\n") cat("Fraction Missing for original variables: \n") cat("-----------------------------------------\n\n") tb <- data.frame(cbind(percent.missing)) rownames(tb) <- colnames(object$missMatrix) colnames(tb) <- "Fraction Missing" print(tb) cat("\n") if (!is.null(object$transform.calls)) { cat("Post-imputation transformed variables: \n") cat("-----------------------------------------\n\n") tnames <- unlist(lapply(object$transform.calls, function(x) names(x)[-c(1,2)])) texprs <- unlist(lapply(object$transform.calls, function(x) as.character(x[-c(1,2)]))) tb2 <- data.frame(cbind(texprs)) rownames(tb2) <- paste(tnames, "=") colnames(tb2) <- "Transformations" print(tb2) } } Amelia/R/missmap.R0000644000176200001440000001745614335240021013446 0ustar liggesusers #' Missingness Map #' #' Plots a missingness map showing where missingness occurs in #' the dataset passed to \code{amelia}. #' #' @param obj an object of class "amelia"; typically output from the #' function \code{amelia}, a matrix or a dataframe. #' @param vars a vector of column numbers or column names of the data #' to include in the plot. The default is to plot all variables. #' @param legend should a legend be drawn? (True or False) #' @param col a vector of length two where the first element specifies #' the color for missing cells and the second element specifies #" the color for observed cells. #' @param main main title of the plot. Defaults to "Missingness Map". #' @param y.cex expansion for the variables names on the x-axis. #' @param x.cex expansion for the unit names on the y-axis. #' @param y.labels a vector of row labels to print on the y-axis #' @param y.at a vector of the same length as \code{y.labels} with row #' nmumbers associated with the labels. #' @param csvar column number or name of the variable corresponding to #' the unit indicator. Only used when the \code{obj} is not of class #' \code{amelia}. #' @param tsvar column number or name of the variable corresponding to #' the time indicator. Only used when the \code{obj} is not of class #' \code{amelia}. #' @param rank.order a logical value. If \code{TRUE}, the default, then #' the order of the variables along the the x-axis is sorted by the #' percent missing (from highest to lowest). If \code{FALSE}, it is #' simply the order of the variables in the data. #' @param margins a vector of length two that specifies the bottom and #' left margins of the plot. Useful for when variable names or #' row names are long. #' @param gap.xaxis value to pass to the \code{gap.axis} argument of #' the \code{axis} function that plots the x-axis. See #' \code{\link{axis}} for more details. Ignored on R versions less #' than 4.0.0. #' @param x.las value of the \code{las} argument to pass to the #' \code{\link{axis}} function creating the x-axis. #' @param ... further graphical arguments. #' #' @details \code{missmap} draws a map of the missingness in a dataset using the #' \code{image} function. The columns are reordered to put the most #' missing variable farthest to the left. The rows are reordered to a #' unit-period order if the \code{ts} and \code{cs} arguments were passed #' to \code{amelia}. If not, the rows are not reordered. #' #' The \code{y.labels} and \code{y.at} commands can be used to associate #' labels with rows in the data to identify them in the plot. The y-axis #' is internally inverted so that the first row of the data is associated #' with the top-most row of the missingness map. The values of #' \code{y.at} should refer to the rows of the data, not to any point on #' the plotting region. #' #' @seealso \code{\link{compare.density}}, \code{\link{overimpute}}, #' \code{\link{tscsPlot}}, \code{\link{image}}, \code{\link{heatmap}} missmap <- function(obj, vars, legend = TRUE, col, main, y.cex = 0.8, x.cex = 0.8, y.labels, y.at, csvar = NULL, tsvar = NULL, rank.order = TRUE, margins = c(5, 5), gap.xaxis = 1, x.las = 2, ...) { if (inherits(obj, "amelia")) { vnames <- colnames(obj$imputations[[1]]) n <- nrow(obj$missMatrix) p <- ncol(obj$missMatrix) percent.missing <- colMeans(obj$missMatrix) pmiss.all <- mean(obj$missMatrix) r1 <- obj$missMatrix } else { vnames <- colnames(obj) n <- nrow(obj) p <- ncol(obj) percent.missing <- colMeans(is.na(obj)) pmiss.all <- mean(is.na(obj)) r1 <- 1 * is.na(obj) } if (missing(col)) col <- c("#eff3ff", "#2171b5") if (!missing(vars)) { if (is.character(vars)) { vars <- match(vars, vnames) if (any(is.na(vars))) { stop("vars not found in the data") } } if (any(!(vars %in% 1:p))) { stop("vars outside range of the data") } p <- length(vars) r1 <- r1[, vars] percent.missing <- percent.missing[vars] pmiss.all <- mean(r1) } if (!missing(y.labels) && (missing(y.at) && (length(y.labels) != n))) { stop("y.at must accompany y.labels if there is less than onefor each row") } if (is.null(csvar)) csvar <- obj$arguments$cs if (is.null(tsvar)) tsvar <- obj$arguments$ts if (missing(y.labels)) { if (!is.null(csvar)) { if (inherits(obj, "amelia")) { cs <- obj$imputations[[1]][, csvar] } else { cs <- obj[, csvar] } y.labels <- cs if (is.factor(y.labels)) y.labels <- levels(y.labels)[unclass(y.labels)] cs.names <- y.labels if (!is.numeric(cs)) cs <- as.numeric(as.factor(cs)) if (!is.null(tsvar)) { if (inherits(obj, "amelia")) { ts <- as.numeric(obj$imputations[[1]][, tsvar]) } else { ts <- as.numeric(obj[, tsvar]) } unit.period <- order(cs, ts) } else { unit.period <- 1:n } y.labels <- y.labels[unit.period] r1 <- r1[unit.period, ] brks <- c(TRUE,rep(FALSE, times = (n-1))) for (i in 2:n) { brks[i] <- (cs[unit.period][i] != cs[unit.period][i - 1]) } y.at <- which(brks) y.labels <- y.labels[brks] } else { y.labels <- row.names(obj$imputations[[1]]) y.at <- seq(1, n, by = 15) y.labels <- y.labels[y.at] } } else { if (missing(y.at)) y.at <- n:1 } missrank <- rev(order(percent.missing)) if (rank.order) { chess <- t(!r1[n:1, missrank]) vnames <- vnames[missrank] } else { chess <- t(!r1[n:1, ]) } y.at <- (n:1)[y.at] if (missing(main)) main <- "Missingness Map" par(mar = c(margins, 2, 1) + 0.1) ## here we fork for data/tscs type plots. users cant set this yet. type <- "data" if (legend) { graphics::layout(matrix(c(1, 2), nrow = 1), widths = c(0.75, 0.25)) par(mar = c(margins, 2, 0) + 0.1, mgp = c(3, 0.25, 0)) } if (type == "data") { col.fix <- col if (sum(!chess) == 0) { col.fix <- col[2] } image(x = 1:(p), y = 1:n, z = chess, axes = FALSE, col = col.fix, xlab = "", ylab = "", main = main) if (getRversion() >= "4.0.0") { axis(1, lwd = 0, labels = vnames, las = x.las, at = 1:p, cex.axis = x.cex, gap.axis = gap.xaxis) } else { axis(1, lwd = 0, labels = vnames, las = x.las, at = 1:p, cex.axis = x.cex) } axis(2, lwd = 0, labels = y.labels, las = 1, at = y.at, cex.axis = y.cex) if (legend) { pm.lab <- paste("Missing (", round(100 * pmiss.all), "%)", sep = "") po.lab <- paste("Observed (", 100 - round(100 * pmiss.all), "%)", sep = "") par(mar = c(0, 0, 0, 0.3)) plot(0, 0, type = "n", axes = FALSE, ann = FALSE) legend("left", col = col, bty = "n", xjust = 0, border = "grey", legend = c(pm.lab, po.lab), fill = col, horiz = FALSE) } } else { tscsdata <- data.frame(cs.names, ts, rowMeans(r1)) tscsdata <- reshape(tscsdata, idvar = "cs.names", timevar = "ts", direction = "wide") rownames(tscsdata) <- tscsdata[, 1] colnames(tscsdata) <- unique(ts) tscsdata <- as.matrix(tscsdata[, -1]) cols <- rev(heat.colors(5)) image(z = t(tscsdata), axes = FALSE, col = cols, main = main, ylab = "", xlab = "") at.seq <- seq(from = 0, to = 1, length = ncol(tscsdata)) axis(1, labels = unique(ts), at = at.seq, tck = 0, lwd = 0, las = 2) axis(2, labels = rownames(tscsdata), at = at.seq, tck = 0, lwd = 0, las = 1, cex.axis = .8) if (legend) { leg.names <- c("0-0.2", "0.2-0.4", "0.4-0.6", "0.6-0.8", "0.8-1") legend(x = 0.95, y = 1.01, col = cols, bty = "n", xjust = 1, legend = leg.names, fill = cols, horiz = TRUE) } } invisible(NULL) } Amelia/R/prep.r0000644000176200001440000007146414335240021013002 0ustar liggesusers## ## prep.r ## ## Various routines for transforming the original data to the imputation model, ## and reverting back to the format of the original data ## 28/04/06 mb functions extracted from emb.r to prep.r ## 29/04/06 jh revised unsubset for memory issues ## 04/05/06 mb moved parameter vs. observation check to the end of prep. ## 18/05/06 mb 'ords' unsubset fixed to have range of original values ## 15/06/06 jh revised "generatepriors" ## 26/06/06 mb fixed archive to work with session loading. ## 27/06/06 mb amelia.prep accepts, checks, and processes 'arglist' ## 27/07/06 mb amsubset changes from dataframe to matrix after subsetting to ## avoid eating it on strings. ## 02/08/06 mb frame.to.matrix now converts chars to either factors (cs,noms) ## or junk (idvars). added to subset/unsubset (ignore last update). ## 02/08/06 mb fixed bug where polytime=0 would make for odd behaviour/crashing ## 11/09/06 mb fixed bug in unsubset: 'index' too long to subset w/o 'which' ## 18/10/06 mb incorporated confidence levels into generating priors ## 20/10/06 mb new format for priors ## 13/12/06 mb indiv. obs priors get priority in generatepriors ## 28/03/07 jh added empri to prepped$archv, modified construction of timevars ## 10/05/07 mb logs now adds 1 instead of "epsilon" to avoid strange imputations. ## fixed blanks problems when no priors specified. ## 11/05/07 mb added "combine.output" to combine multiple amelia outputs ## 15/08/07 jh modified construction of timevars ## 14/09/07 mb added 'bounds' support ## 22/07/08 mb - good coding update: T->TRUE/F->FALSE ## 27/03/10 jh added spline basis functions, changed "polynomials" matrix to instance of "timebasis" nametonumber <- function(x, ts, cs, idvars, noms, ords, logs, sqrts, lgstc, lags, leads) { listconvert <- function(opt) { junk.seq <- 1:ncol(x) junk.names <- dimnames(x)[[2]] for (i in 1:length(opt)) { mat <- opt[i] == junk.names if (sum(mat) == 0) return(NA) opt[i] <- junk.seq[mat] } return(as.numeric(opt)) } code<-0 mess<-paste("One of the variable names in the options list does not match a variable name in the data.") if (inherits(ts, "character")) ts <- listconvert(ts) if (inherits(cs, "character")) cs <- listconvert(cs) if (inherits(idvars, "character")) idvars <- listconvert(idvars) if (inherits(noms, "character")) noms <- listconvert(noms) if (inherits(ords, "character")) ords <- listconvert(ords) if (inherits(logs, "character")) logs <- listconvert(logs) if (inherits(sqrts, "character")) sqrts <- listconvert(sqrts) if (inherits(lgstc, "character")) lgstc <- listconvert(lgstc) if (inherits(lags, "character")) lags <- listconvert(lags) if (inherits(leads, "character")) leads <- listconvert(leads) output <- list(code = code, ts = ts, cs = cs, idvars = idvars, noms = noms, ords = ords, logs = logs, sqrts = sqrts, lgstc = lgstc, lags = lags, leads = leads, mess = mess) if (any(is.na(output))) output$code <- 1 return(output) } ## convert.priors - convert 4/5-column priors to matrix of priors ## priors: 4/5 column priors matrix ## nrow: rows of the data matrix ## ncol: cols of the data matrix ## ## output: a list of either 2 (in the 4 col case) or 3 (in the 5 col ## case) of prior matrices. #convert.priors <- fucntion(priors, nrow, ncol) { # if (!is.matrix(priors)) # stop("argument 'priors' is not a matrix") # if (ncol(priors) != 4 || ncol(priors) != 5) # stop("priors matrix has the wrong number of columns") # if (ncol(priors) == 4) { # #generate output priors matrix, the size of the data # out.means <- matrix(NA, nrow = nrow, ncol = ncol) # out.sds <- matrix(NA, nrow = nrow, ncol = ncol) # # fill in the the matrices # for (i in 1:nrow(priors)) { # out.means[priors[i,1], priors[i,2]] <- priors[i,3] # out.sds[priors[i,1], priors[i,2]] <- priors[i,4] # } # return(list(means = out.means, sds = out.sds)) # } # if (ncol(priors) == 5) { # out.mins <- matrix(NA, nrow = nrow, ncol = ncol) # out.maxs <- matrix(NA, nrow = nrow, ncol = ncol) # out.conf <- matrix(NA, nrow = nrow, ncol = ncol) # for (i in 1:nrow(priors)) { # out.mins[priors[i,1], priors[i,2]] <- priors[i,3] # out.maxs[priors[i,1], priors[i,2]] <- priors[i,4] # out.conf[priors[i,1], priors[i,2]] <- priors[i,5] # } # return(list(mins = out.mins, maxs = out.maxs, conf = out.conf)) # } #} ## amtransform - Transform variables to assume normality ## x: data matrix ## logs: variable list of log-linear transformations ## sqrts: variable list of square root transformations ## lgstc: variable list of logistic transformations ## xmin: vector of column minimums amtransform<-function(x,logs,sqrts,lgstc) { logs<-unique(logs) sqrts<-unique(sqrts) lgstc<-unique(lgstc) xmin<-c() if (!is.null(logs)) { for (i in 1:length(logs)) { j<-logs[i] xmin<-c(xmin,min(c(0,min(x[,j],na.rm=TRUE)))) #we need mins to avoid creating NAs x[,j]<-log(x[,j]-xmin[i]+1) #by taking a log of a negative number } } if (!is.null(sqrts)) for (i in sqrts) x[,i]<-sqrt(x[,i]) if (!is.null(lgstc)) for (i in lgstc) x[,i]<-log(x[,i]/(1-x[,i])) return(list(x=x,xmin=xmin)) } ## untransform - Convert imputed variables to original scale ## x.imp: imputed data matrix ## logs: variable list of log-linear transformations ## xmins: vector of column minimums ## sqrts: variable list of square root transformations ## lgstc: variable list of logistic transformations untransform<-function(x.imp,logs,xmin,sqrts,lgstc) { logs<-unique(logs) sqrts<-unique(sqrts) lgstc<-unique(lgstc) if (!is.null(logs)) { for (i in 1:length(logs)) { j<-logs[[i]] x.imp[,j]<-exp(x.imp[,j])+xmin[[i]] } } if (!is.null(sqrts)) for (i in sqrts) x.imp[,i]<-(x.imp[,i])^2 if (!is.null(lgstc)) for (i in lgstc) x.imp[,i]<-exp(x.imp[,i])/(1 + exp(x.imp[,i])) return(x.imp) } frame.to.matrix<-function(x,idvars) { char.vars<-which(sapply(x,class)=="character") if (length(char.vars) > 0) for (i in char.vars) if (is.na(match(i,idvars))) x[,i]<-as.factor(x[,i]) #changes cs/noms char. vars to factors else x[,i]<-1 #junks id char vars. return(data.matrix(x)) #return it as matrix } ## Remove rows and columns from dataset that do not belong amsubset<-function(x,idvars,p2s,ts,cs,priors=NULL, polytime=NULL,splinetime=NULL,intercs=FALSE,lags=NULL, leads=NULL,noms=NULL,bounds=NULL, overimp = NULL) { lags <- unique(lags) leads <- unique(leads) noms <- unique(noms) idvars <- unique(idvars) index <- c(1:ncol(x)) theta.names <- colnames(x) if (!is.null(idvars)) { index <- index[-idvars] theta.names <- theta.names[-idvars] } if (is.data.frame(x)) x <- frame.to.matrix(x,idvars) overvalues <- NULL ## Set overimp cells to missing if (!is.null(overimp)) { whole.vars <- overimp[overimp[,1] == 0, 2] whole.vars <- as.matrix(expand.grid(1:nrow(x), whole.vars)) overimp <- overimp[overimp[,1] != 0,] overimp <- rbind(overimp, whole.vars) if (!is.matrix(overimp)) overimp <- t(as.matrix(overimp)) overvalues <- x[overimp] is.na(x) <- overimp } AMmiss <- is.na(x) if (!is.null(lags)) { if (!identical(cs,NULL)) { tsarg<-list(x[,cs],x[,ts]) } else { tsarg<-list(x[,ts]) } tssort<-do.call("order",tsarg) x.sort<-x[tssort,] for (i in lags) { lagged<-c(NA,x.sort[1:(nrow(x)-1),i]) if (!identical(cs,NULL)) { for (i in 2:nrow(x.sort)) if (x.sort[i,cs]!=x.sort[i-1,cs]) is.na(lagged)<-i } x.sort<-cbind(x.sort,lagged) x<-cbind(x,1) index<-c(index,-.5) #-.5=lags theta.names <- c(theta.names, paste("lag",colnames(x)[i],sep=".")) } x[tssort,]<-x.sort } if (!is.null(leads)){ if (!identical(cs,NULL)) { tsarg<-list(x[,cs],x[,ts]) } else { tsarg<-list(x[,ts]) } tssort<-do.call("order",tsarg) x.sort<-x[tssort,] for (i in leads) { led<-x.sort[2:nrow(x),i] led<-c(led,NA) if (!identical(cs,NULL)) { for (i in 1:(nrow(x.sort)-1)) if (x.sort[i,cs]!=x.sort[i+1,cs]) is.na(led)<-i } x.sort<-cbind(x.sort,led) x<-cbind(x,1) index<-c(index,.5) #.5=leads theta.names <- c(theta.names, paste("lead",colnames(x)[i],sep=".")) } x[tssort,]<-x.sort } #puts timeseries and crosssection into the id variable to avoid singularity if (!is.null(ts)) { theta.names <- theta.names[index != ts] index<-index[index!=ts] idvars<-c(idvars,ts) } if (!is.null(cs)) { theta.names <- theta.names[index != cs] index<-index[index!=cs] idvars<-c(idvars,cs) } #nominals if (!is.null(noms)) { for (i in noms) { values<-unique(na.omit(x[,i])) newx<-matrix(0,nrow=nrow(x),ncol=length(values)-1) theta.names <- theta.names[index != i] index<-index[index!=i] for (j in 2:length(values)) { newx[,j-1]<-ifelse(x[,i] == values[j],1,0) index<-c(index,-i) theta.names <- c(theta.names, paste("noms",colnames(x)[i],j,sep=".")) } x<-cbind(x,newx) idvars<-c(idvars,i) } } ## REVISION TODAY BEGINS HERE #basis functions for time if (!identical(polytime,NULL) | !identical(splinetime,NULL) ){ if (!identical(splinetime,NULL)){ time<-x[,ts] knot<-rep(0,5) if(splinetime>3){ knot[1:(splinetime-1)]<-seq(from=min(time),to=max(time),length=(splinetime-1)) # The end points of this sequence are not being used } timebasis<-cbind(1,time,time^2,time^3,pmax(time-knot[2],0)^3,pmax(time-knot[3],0)^3,pmax(time-knot[4],0)^3) timebasis<-timebasis[,1:(splinetime+1),drop=FALSE] } if (!identical(polytime,NULL)){ time<-x[,ts] timebasis<-cbind(1,time,time^2,time^3) timebasis<-timebasis[,1:(polytime+1) ,drop=FALSE] } cstypes<-unique(x[,cs]) timevars<-matrix(0,nrow(x),1) if (intercs){ for (i in cstypes){ dummy<-as.numeric(x[,cs]==i) timevars<-cbind(timevars,dummy*timebasis) } timevars<-timevars[,c(-1,-2), drop = FALSE] } else { timevars<-cbind(timevars,timebasis) timevars<-timevars[,-c(1,2), drop = FALSE] # first column is a holding variable, second is to have fixed effects identified } ## ENDS TODAY x<-cbind(x,timevars) if (ncol(timevars)) { for (i in 1:ncol(as.matrix(timevars))) { index<-c(index,0) #0 - timevars theta.names <- c(theta.names, paste("time",i,sep=".")) } } } else { if (intercs) { cstypes <- unique(x[,cs]) timevars <- matrix(0, nrow(x), 1) for (i in cstypes) { dummy <- as.numeric(x[,cs] == i) timevars <- cbind(timevars, dummy) } timevars <- timevars[,-c(1,2)] x<-cbind(x,timevars) if (ncol(timevars)) { for (i in 1:ncol(as.matrix(timevars))) { index<-c(index,0) #0 - timevars theta.names <- c(theta.names, paste("time",i,sep=".")) } } } } if (!identical(idvars,NULL)) x<-x[,-idvars, drop = FALSE] if (p2s == 2) { cat("Variables used: ", theta.names,"\n") } AMr1 <- is.na(x) blanks <- which(rowSums(AMr1)==ncol(x)) if (length(blanks)) { x <- x[-blanks, ] if (!is.null(priors)) { priors <- priors[!(priors[,1] %in% blanks),] if (length(blanks) == 1) { row.adjust <- 1 * (priors[, 1, drop = FALSE] > blanks) } else { row.adjust <- colSums(sapply(priors[, 1, drop = FALSE],">",blanks)) } priors[,1] <- priors[,1,drop=FALSE] - row.adjust } if (p2s) cat("Warning: There are observations in the data that are completely missing.","\n", " These observations will remain unimputed in the final datasets.","\n") } else { blanks<-NULL } priors[,2] <- match(priors[,2], index) bounds[,1] <- match(bounds[,1], index) if (is.null(dim(x))) { x <- matrix(x, ncol = 1) } return(list(x=x,index=index,idvars=idvars,blanks=blanks,priors=priors,bounds=bounds,theta.names=theta.names,missMatrix=AMmiss,overvalues=overvalues)) } ## Replace rows and columns removed in "amsubset" ## Create integer values for nominals and ordinals ## ## x.orig: the original data-matrix. transformed, but not subsetted, ## scaled or centered, thus all variables are as they are in the ## user-submitted data. ## x.imp: the imputed data. has been unscaled, uncentered, but its ## it still has excess variables (polynomials of time, nominal ## categories, etc) and ordinal variables still have non-integer ## values. ## index: denotes what each column of x.imp is. ## a positive integer (i): ith column of x.orig. ## 0: basis function (polynomial/spline) of time ## .5: leads ## -.5: lags ## a negative integer (-i): a dummy used for the nominal var in ## the ith column of x.orig unsubset <- function(x.orig, x.imp, blanks, idvars, ts, cs, polytime, splinetime, intercs, noms, index, ords) { ## create if (is.data.frame(x.orig)) { oldidvars <- idvars[-match(c(cs, noms), idvars)] x.orig <- frame.to.matrix(x.orig, oldidvars) } AMr1.orig <- is.na(x.orig) ## since we're going to use the blanks in noms/ords ## we need these changed here. if (identical(blanks, NULL)) {blanks <- -(1:nrow(x.orig))} if (identical(idvars, NULL)) {idvars <- -(1:ncol(x.orig))} ## noms are idvars, so we'll fill them in manually ## (mb 2 Apr 09 -- fixed handling of "blanks") if (!is.null(noms)) { for (i in noms) { y <- runif(nrow(x.imp)) dums <- x.imp[, which(index == -i)] p <- dums * (dums > 0) * (dums < 1) + ((dums - 1) >= 0) psub <- rowSums(as.matrix(p)) psub <- (psub <= 1) + (psub) * (psub > 1) p <- p / psub pzero <- 1 - rowSums(as.matrix(p)) p <- cbind(pzero, p) pk <- ncol(p) utri.mat <- matrix(0, nrow = pk, ncol = pk) utri.mat <- utri.mat + upper.tri(utri.mat, diag = TRUE) cump <- p %*% utri.mat cump.shift <- cbind(matrix(0, nrow(cump), 1), cump[, 1:(ncol(cump) - 1)]) yy <- (y < cump) * (y > cump.shift) renom <- (yy %*% unique(na.omit(x.orig[, i]))) x.orig[-blanks, i] <- renom } } ## here we force the ords into integer values ## (mb 2 Apr 09 -- fixed handling of "blanks") if (!is.null(ords)) { ords <- unique(ords) # find where the ordinals are in the impords <- match(ords,index) x <- x.imp[, impords] * AMr1.orig[-blanks, ords] ############ revision ##################### minmaxords <- matrix(0, length(ords), 2) for(jj in 1:length(ords)) { tempords <- x.orig[AMr1.orig[, ords[jj]] == 0 , ords[jj]] minmaxords[jj,1] <- min(tempords) minmaxords[jj,2] <- max(tempords) } minord <- minmaxords[,1] maxord <- minmaxords[,2] ############ replaces ##################### # minord <- apply(ifelse(AMr1.orig[,ords]==1,NA,x.orig[,ords]),2,min,na.rm=T) # maxord <- apply(ifelse(AMr1.orig[,ords]==1,NA,x.orig[,ords]),2,max,na.rm=T) ordrange <- maxord - minord p <- t((t(x) - minord) / ordrange) * AMr1.orig[-blanks, ords] p <- p * (p > 0) * (p < 1) + ((p - 1) >= 0) newimp <- matrix(0, nrow(x.imp), length(ords)) for (k in 1:length(ords)) { reordnl <- rbinom(nrow(x.imp), ordrange[k], p[, k]) newimp[, k] <- reordnl + minord[k] * AMr1.orig[-blanks, ords[k]] } ############# revision ############################# ## replace the imputations with the ordinal values for(jj in 1:length(ords)){ x.imp[, impords[jj]] <- round(x.imp[, impords[jj]]) x.imp[AMr1.orig[-blanks, ords[jj]] == 1, impords[jj]] <- newimp[AMr1.orig[-blanks, ords[jj]] == 1, jj] } # MAYBE CAN REMOVE LOOP ############# replaces ############################# # x.orig[,ords] <- ifelse(AMr1.orig[,ords]==1,0,x.orig[,ords]) + newimp } ## now we'll fill the imputations back into the original. if (!identical(c(blanks, idvars), c(NULL, NULL))) { x.orig[-blanks, -idvars] <- x.imp[, 1:ncol(x.orig[, -idvars, drop = FALSE])] } else { x.orig <- x.imp[, 1:ncol(x.orig)] } return(x.orig) } ## Rescale Dataset scalecenter<-function(x,priors=NULL,bounds=NULL){ AMn<-nrow(x) ones<-matrix(1,AMn,1) meanx<-colMeans(x,na.rm=TRUE) stdvx<-apply(x,2,sd,na.rm=TRUE) no.obs <- colSums(!is.na(x)) == 0 if (!is.null(priors)) { meanx[no.obs] <- 0#unlist(tapply(priors[,3],priors[,2],mean))[order(unique(priors[,2]))] stdvx[no.obs] <- 1#unlist(tapply(priors[,3],priors[,2],sd))[order(unique(priors[,2]))] } x.ztrans<-(x-(ones %*% meanx))/(ones %*% stdvx) if (!is.null(priors)){ priors[,3]<-(priors[,3]-meanx[priors[,2]])/stdvx[priors[,2]] priors[,4]<- (priors[,4]/stdvx[priors[,2]])^2 #change to variances. } if (!is.null(bounds)) { bounds[,2] <- (bounds[,2]-meanx[bounds[,1]])/stdvx[bounds[,1]] bounds[,3] <- (bounds[,3]-meanx[bounds[,1]])/stdvx[bounds[,1]] } return(list(x=x.ztrans,mu=meanx,sd=stdvx,priors=priors,bounds=bounds)) } unscale<-function(x,mu,sd){ AMn<-nrow(x) ones<-matrix(1,AMn,1) x.unscale<-(x * (ones %*% sd)) + (ones %*% mu) return(x.unscale) } ## Stack dataset and return vectors for sorting ## NOTE: THIS ORDERS TIES IN A SLIGHTLY DIFFERENT WAY THAN "stack.g" IN GAUSS AMELIA amstack<-function(x,colorder=TRUE,priors=NULL,bounds=NULL){ AMp<-ncol(x) AMr1<-is.na(x) if (colorder){ #Rearrange Columns p.order <- order(colSums(AMr1)) AMr1<-AMr1[,p.order, drop = FALSE] } else { p.order<-1:ncol(x) } n.order <- do.call("order", as.data.frame(AMr1[,AMp:1])) #Rearrange Rows AMr1<- AMr1[n.order,, drop = FALSE] # p.order has already been rearranged x<- x[n.order,p.order, drop = FALSE] # rearrange rows and columns of dataset if (!identical(priors,NULL)){ priors[,1]<-match(priors[,1],n.order) priors[,2]<-match(priors[,2],p.order) } if (!identical(bounds,NULL)) bounds[,1]<-match(bounds[,1],p.order) return(list(x=x,n.order=n.order,p.order=p.order,priors=priors,bounds=bounds)) } ## Rearrange dataset to original ordering of rows and columns amunstack<-function(x,n.order,p.order){ x.unstacked<-matrix(0,nrow=nrow(x),ncol=ncol(x)) x.unstacked[n.order,p.order]<-x return(x.unstacked) } # This function is in miserable shape. Need to clean up how lack of priors are dealt with. generatepriors<-function(AMr1,empri=NULL,priors=NULL){ if (!identical(priors,NULL)) { if (ncol(priors) == 5){ new.priors<-matrix(NA, nrow = nrow(priors), ncol = 4) new.priors[,1:2]<-priors[,1:2] new.priors[,3]<-priors[,3] + ((priors[,4] - priors[,3])/2) new.priors[,4]<-(priors[,4]-priors[,3])/(2*qnorm(1-(1-priors[,5])/2)) #NOTE: FIX THIS: Currently ignores CONF- ASSUMES CI95 } else { new.priors <-priors } zeros <- which(new.priors[,1]==0) if (length(zeros) > 0) { varPriors <- new.priors[zeros,2] missCells <- which(AMr1[,varPriors,drop=FALSE], arr.ind=TRUE) addedPriors <- matrix(NA, nrow=nrow(missCells), ncol=4) addedPriors[,1] <- missCells[,1] addedPriors[,2] <- varPriors[missCells[,2]] addedPriors[,-c(1,2)] <- new.priors[zeros[missCells[,2]],-c(1,2)] new.priors <- new.priors[-zeros,,drop=FALSE] # find any matches in the rows/cols and remove from addedPriors # since we've removed other dups, addedPriors will have the only # dups new.priors <- rbind(new.priors,addedPriors) new.priors <- new.priors[!duplicated(new.priors[,1:2]),] } return(new.priors) } } #' Combine Multiple Amelia Output Lists #' #' This function combines output lists from multiple runs of #' Amelia, where each run used the same arguments. The result is one #' list, formatted as if Amelia had been run once. #' #' @param ... a list of Amelia output lists from runs of Amelia with the #' same arguments except the number of imputations. #' #' @details This function is useful for combining the output from Amelia #' runs that occurred at different times or in different sessions of #' R. It assumes that the arguments given to the runs of Amelia are the #' same except for \code{m}, the number of imputations, and it uses the #' arguments from the first output list as the arguments for the combined #' output list. #' #' #' @keywords utilities combine.output <- function(...) { cl <- match.call() cool <- unlist(lapply(cl, function(x) is.null(eval(x,parent.frame())$amelia.args))) if (max(cool[-1])==1) stop("One of the arguments is not an Amelia output list.") # we need the total number of imputations, so we'll # grab it from each argument (each ameliaoutput) # NOTE: the 'lapply' subset will be NULL for things in the call # that aren't amelia.output. 'unlist' then ignores those NULLs. ms <- unlist(lapply(cl,function(x) eval(x, parent.frame())$amelia.args$m)) m <- sum(ms) new.out <- vector("list", 2*m+1) names(new.out)[[2*m+1]] <- "amelia.args" new.out[[2*m+1]] <- eval(cl[[2]])$amelia.args new.out$amelia.args$m <- m count <- 1 for (i in 1:length(ms)) { for (j in 1:ms[i]) { new.out[[count]] <- eval(cl[[1+i]])[[j]] new.out[[m+count]] <- eval(cl[[1+i]])[[ms[i]+j]] new.out$amelia.args[[count+19]] <- eval(cl[[1+i]])$amelia.args[[j+19]] names(new.out)[count] <- paste("m", count, sep="") names(new.out)[m+count] <- paste("theta", count, sep="") names(new.out$amelia.args)[count+19] <- paste("iter.hist", count, sep="") count <- count + 1 } } return(new.out) } amelia.prep <- function(x,m=5,p2s=1,frontend=FALSE,idvars=NULL,logs=NULL, ts=NULL,cs=NULL,empri=NULL, tolerance=0.0001,polytime=NULL,splinetime=NULL,startvals=0,lags=NULL, leads=NULL,intercs=FALSE,sqrts=NULL, lgstc=NULL,noms=NULL,incheck=TRUE,ords=NULL,collect=FALSE, arglist=NULL, priors=NULL,var=NULL,autopri=0.05,bounds=NULL, max.resample=NULL, overimp = NULL, emburn=NULL, boot.type=NULL) { code <- 1 ## If there is an ameliaArgs passed, then we should use ## those. if (!identical(arglist,NULL)) { if (!("ameliaArgs" %in% class(arglist))) { error.code <- 46 error.mess <- paste("The argument list you provided is invalid.") return(list(code=error.code, message=error.mess)) } idvars <- arglist$idvars empri <- arglist$empri ts <- arglist$ts cs <- arglist$cs tolerance <- arglist$tolerance polytime <- arglist$polytime splinetime<- arglist$splinetime lags <- arglist$lags leads <- arglist$leads logs <- arglist$logs sqrts <- arglist$sqrts lgstc <- arglist$lgstc intercs <- arglist$intercs noms <- arglist$noms startvals <- arglist$startvals ords <- arglist$ords priors <- arglist$priors autopri <- arglist$autopri empri <- arglist$empri #change 1 bounds <- arglist$bounds overimp <- arglist$overimp emburn <- arglist$emburn boot.type <- arglist$boot.type max.resample <- arglist$max.resample } # If data frame is a tibble, code will break because of assumptions about # [, i, drop = TRUE]. Rather than change existing code, convert # `x` to a data.frame if (is.data.frame(x)) x <- as.data.frame(x) numopts<-nametonumber(x=x,ts=ts,cs=cs,idvars=idvars,noms=noms,ords=ords, logs=logs,sqrts=sqrts,lgstc=lgstc,lags=lags,leads=leads) if (numopts$code == 1) { return(list(code=44,message=numopts$mess)) } if (incheck) { checklist<-amcheck(x = x, m = m, idvars = numopts$idvars, priors = priors, empri = empri, ts = numopts$ts, cs = numopts$cs, tolerance = tolerance, polytime = polytime, splinetime = splinetime, lags = numopts$lags, leads = numopts$leads, logs = numopts$logs, sqrts = numopts$sqrts, lgstc =numopts$lgstc, p2s = p2s, frontend = frontend, intercs = intercs, noms = numopts$noms, startvals = startvals, ords = numopts$ords, collect = collect, bounds=bounds, max.resample=max.resample, overimp = overimp, emburn=emburn, boot.type=boot.type) #check.call <- match.call() #check.call[[1]] <- as.name("amcheck") #checklist <- eval(check.call, parent.frame()) if (!is.null(checklist$code)) { return(list(code=checklist$code,message=checklist$mess)) } m <- checklist$m priors <- checklist$priors } priors <- generatepriors(AMr1 = is.na(x),empri = empri, priors = priors) archv <- match.call(expand.dots=TRUE) archv[[1]] <- NULL archv <- list(idvars=numopts$idvars, logs=numopts$logs, ts=numopts$ts, cs=numopts$cs, empri=empri, tolerance=tolerance, polytime=polytime, splinetime=splinetime, lags=numopts$lags, leads=numopts$leads, intercs=intercs, sqrts=numopts$sqrts, lgstc=numopts$lgstc, noms=numopts$noms, ords=numopts$ords, priors=priors, autopri=autopri, bounds=bounds, max.resample=max.resample, startvals=startvals, overimp = overimp, emburn=emburn, boot.type=boot.type) #change 2 if (p2s==2) { cat("beginning prep functions\n") flush.console() } d.trans<-amtransform(x,logs=numopts$logs,sqrts=numopts$sqrts,lgstc=numopts$lgstc) d.subset<-amsubset(d.trans$x,idvars=numopts$idvars,p2s=p2s,ts=numopts$ts,cs=numopts$cs,polytime=polytime,splinetime=splinetime,intercs=intercs,noms=numopts$noms,priors=priors,bounds=bounds, lags=numopts$lags, leads=numopts$leads, overimp=overimp) d.scaled<-scalecenter(d.subset$x,priors=d.subset$priors,bounds=d.subset$bounds) d.stacked<-amstack(d.scaled$x,colorder=TRUE,priors=d.scaled$priors,bounds=d.scaled$bounds) if (incheck) { realAMp <- ncol(d.stacked$x) realAMn <- nrow(d.stacked$x) #Error code: 34-35 #Too few observations to estimate parameters if (!identical(empri,NULL)) { if (realAMp*2 > realAMn+empri) { error.code<-34 error.mess<-paste("The number of observations in too low to estimate the number of \n", "parameters. You can either remove some variables, reduce \n", "the order of the time polynomial, or increase the empirical prior.") return(list(code=error.code,message=error.mess)) } if (realAMp*4 > realAMn +empri) { warning("You have a small number of observations, relative to the number, of variables in the imputation model. Consider removing some variables, or reducing the order of time polynomials to reduce the number of parameters.") } } else { if (realAMp*2 > realAMn) { error.code<-34 error.mess<-paste("The number of observations is too low to estimate the number of \n", "parameters. You can either remove some variables, reduce \n", "the order of the time polynomial, or increase the empirical prior.") return(list(code=error.code,message=error.mess)) } if (realAMp*4 > realAMn) { warning("You have a small number of observations, relative to the number, of variables in the imputation model. Consider removing some variables, or reducing the order of time polynomials to reduce the number of parameters.") } } } return(list( x = d.stacked$x, code = code, priors = d.stacked$priors, n.order = d.stacked$n.order, p.order = d.stacked$p.order, scaled.mu = d.scaled$mu, scaled.sd = d.scaled$sd, trans.x = d.trans$x, blanks = d.subset$blanks, idvars = d.subset$idvars, ts = numopts$ts, cs = numopts$cs, noms = numopts$noms, index = d.subset$index, ords = numopts$ords, m = m, logs = numopts$logs, archv = archv, xmin = d.trans$xmin, sqrts = numopts$sqrts, lgstc = numopts$lgstc, # outname = outname, subset.index = d.subset$index, autopri = autopri, bounds = d.stacked$bounds, theta.names = d.subset$theta.names, missMatrix = d.subset$missMatrix, overvalues = d.subset$overvalues, empri = empri, #change 3a tolerance = tolerance)) #change 3b } Amelia/R/transform.amelia.R0000644000176200001440000000347414335240021015232 0ustar liggesusers #' Transform imputed datasets from Amelia objects #' #' Updates the imputed datasets from an \code{amelia} output #' with the specified transformations. #' #' @param _data an object of class "amelia"; typically output from the #' function \code{amelia}. #' @param ... further arguments of the form \code{tag = value}. #' #' @details #' The \code{\dots} arugments to \code{transform.amelia} are #' expressions of the form \code{tag = value}, where \code{tag} is the #' variable that is being updated or created and \code{value} is an #' expression that is a function of the variables in the imputed #' datasets. For instance, if you wanted to create an interaction of two #' imputed variables, you could have one argument be \code{intervar = #' var1 * var2}. This would either update the current variable #' \code{intervar} in the imputed data or append a new variable called #' \code{intervar} to the imputed datasets. #' #' @return #' An object of class \code{amelia} with its \code{imputations} and #' \code{missMatrix} values updated according to the transformations. In #' addition, each of the calls to \code{transform.amelia} are stored in #' #' @seealso \code{\link{transform}} transform.amelia <- function(`_data`, ...) { tcall <- match.call(expand.dots = TRUE) if (is.null(`_data`$transform.calls)) { `_data`$transform.calls <- list(tcall) } else { `_data`$transform.calls <- c(`_data`$transform.calls, tcall) } tcall[[1]] <- as.name("transform") names(tcall)[2] <- "" m <- length(`_data`$imputation) orig.data <- remove.imputations(`_data`) tcall[[2]] <- orig.data new.miss.matrix <- as.matrix(is.na(eval.parent(tcall))) for (i in 1:m) { tcall[[2]] <- `_data`$imputations[[i]] `_data`$imputations[[i]] <- eval.parent(tcall) } `_data`$missMatrix <- new.miss.matrix return(`_data`) } Amelia/MD50000644000176200001440000001307414336230455011764 0ustar liggesusers80fa70b3e1b7a52688370b2a75afc515 *DESCRIPTION e57d22d34f32a55b109ad971cde0e566 *NAMESPACE de0c286af910e1c1051b337523357bdf *NEWS f5e54746b65a49c7b3508dec6037025e *R/amcheck.r d3baac892bde0c75332242e676426e63 *R/ameliagui.r 6b5c4d33ef7340d85b9ccacc2db0bbae *R/combine.R 4ee08b222ded7e9c1943cbe53daeec8e *R/diag.r 690958e871c2eed10b2f325c17fa1e3b *R/emb.r aaa91207b7b35b6282675101f29fbb57 *R/missmap.R c1d9c2c6c97c836eb5e6ace99c467be2 *R/mo.R 93bc0d9c8a961979d55be6194c36312f *R/plot.amelia.R 5198e335e812b53c650b90aa13e5a6a7 *R/prep.r d5f644e2e2728a64f5e217a2d69ac318 *R/print.amelia.R 8423cb6dc20ca96865f07657b71b8506 *R/summary.amelia.R cceafec2e1bfe781c787cbcfbfba2072 *R/summary.mi.R 94ca6c1cc5486ad9e2a1f5e9da042229 *R/transform.amelia.R 40724c6f4b3cfd836f81c3002fb15810 *R/with.R 8936a4a01c5cc6578bd92a56972fdd9f *R/write.amelia.R d48d95b9df424494d5a007c7b2db0c95 *R/zzz.R 4606c8182d8212c717a4509be73e4898 *README.md ea124a494ad7922985532ad3b308e4d5 *build/partial.rdb 96230398d3c866fa237ec1d5a3ec82e6 *build/vignette.rds 86a9ede313e30b97b574185b484ffeea *data/africa.RData 05fc570eaae49e085a4f7e47edd69528 *data/freetrade.RData 17c3311711286117566d87d9babd4984 *inst/CITATION 34c3b909fad852edcfc786db278fbfb6 *inst/doc/ameliaview.R eb6e3522db295745308d91497b419279 *inst/doc/ameliaview.Rmd 2029acdb08b962d3a8a19939a166d1f1 *inst/doc/ameliaview.html c72c5427e945afd47259dd12b037d5c4 *inst/doc/diagnostics.R 048cf04b72ce93ec709b1accb3b33063 *inst/doc/diagnostics.Rmd ae49f7daad0663dbd64223c7d1ac9682 *inst/doc/diagnostics.html e4f6ec85f55c9cbc51e27b4a765d8e68 *inst/doc/intro-mi.R bf56359289980c8896c9e2c09a8d6498 *inst/doc/intro-mi.Rmd 0ad59baa33c1944a1859634bc5febef6 *inst/doc/intro-mi.html ade4e767074c762f07bee63b747da311 *inst/doc/using-amelia.R 7521730818adae6c009973dc7b665ec7 *inst/doc/using-amelia.Rmd e42f43cbed98839312922521e7f404d7 *inst/doc/using-amelia.html 04f6681a198ff53c82fd817af394eeda *inst/gui/action_go.gif 77065468f34e1ca5cc157a1b2981c717 *inst/gui/action_save.gif ed7fc633e7eee3181358f1952e6431d2 *inst/gui/action_stop.gif d385c634a375c5f8c298cd9574814274 *inst/gui/amelia.ico 53276025c89eb6d7dff9625c46d196b4 *inst/gui/arrow_down.gif f816544708aa47cc40a5f91c2dac6b76 *inst/gui/arrow_left.gif 639e1bd5c6199c1bb2717584c02b23ef *inst/gui/arrow_up.gif fddd644d7e138851617e74129a81f0ba *inst/gui/flag_red.gif 907670afe1f918ca79a8db365b318078 *inst/gui/gallery19.gif 470d00d4916dcbe93dab7535c8e8d142 *inst/gui/gallery19.jpg 2af148c94b46dccc06e9712854cbcd45 *inst/gui/histogram.gif 757fe8b8a3f26f8f41d071177e9d7ddd *inst/gui/icon_accept.gif f71383c993e0db3d6346d357132b4d1c *inst/gui/icon_clock.gif ac1cd0acbd38d8c313a30377b20739f2 *inst/gui/icon_user.gif 04ff1a5f927b484b66490a86727f18f2 *inst/gui/icon_world.gif 842455e3c2d9400175989bd758f7dec6 *inst/gui/page-R.gif 6db924002cba272c16a9471f79481e35 *inst/gui/page.gif a27c9dcaf1017d207f14665d4471a963 *inst/gui/page_dta.gif 4398dfe1046619f9d19475a2c38c69a6 *inst/gui/page_edit.gif a5c4257dd8d5e6de887bc4eab2448c9b *inst/gui/page_sas.gif 634f55072485ebdb671435811c2fed68 *inst/gui/page_spss.gif edc79234c547cf5bf3b55693053108e2 *inst/gui/page_text.gif 112732b1f75170ce72cf5dfd162481c8 *inst/gui/page_up.gif 9b97c9abda11ed64b980f4991685b1ff *inst/gui/table.gif 21f7d8554e24a693e7ba050c731a145f *inst/test/transform.R 37d30ad4633f2bd750f9681666083607 *man/AmeliaView.Rd 5c636f4f8b4e41f10666cf4b5b0e25f8 *man/africa.Rd c647c93f1bdcf3f967c50066497b1bf4 *man/amelia.Rd 74e6fdeed2b0cf6ba588c90c278568b4 *man/amelia.package.Rd 517f1a046fb8381e5ab0a09efa077b16 *man/ameliabind.Rd 26ded8512ff61a239ec633b0395b90ae *man/ameliagui.Rd 2daa2439c070d3ad834b4b1ec0603910 *man/combine.output.Rd f630487089b23c2089ba3193ab0adccb *man/compare.density.Rd 926d5d0d856e86712af0cdf7c2c41e41 *man/disperse.Rd 91ba5fa3642915466ca440d03511e61d *man/freetrade.Rd 618a748006de2e6f7837e423257d4c6a *man/mi.combine.Rd eab7586a6c1ca1ffa2267f3c5c87d083 *man/mi.meld.Rd e24b9595aa9f18328559c9c5dae26e1c *man/missmap.Rd fc309e2c554ffc33d52340551af6241a *man/moPrep.Rd 1363d91f1ac070f3b464ba12bf3a3ae8 *man/overimpute.Rd 087120fe131debe34f7700f8cfe3aef1 *man/plot.amelia.Rd b3f7b3973c452751be2c80cdb8c39166 *man/summary.amelia.Rd 2b927957d4fd099aa81c128bea9ede23 *man/transform.amelia.Rd d4f279647a692fbd491049bc91b37965 *man/tscsPlot.Rd 9eb89292cfc154b5d478070c92d89943 *man/with.amelia.Rd fd89d4d10f779ea8d3e404ff35510164 *man/write.amelia.Rd 371cf688624fab1e2a46f8e9c47ac8ce *src/Makevars d71689a962dab060f33692c43fa6b58a *src/Makevars.win 90e42272c14f24c47ccdae51eb889c89 *src/em.cpp c182effc109e2371c85015878e59f43f *src/em.h f42ee4edaf7a27d96aeb454f0887c8d2 *src/init.c 49a08d3c220b13d6bb55139d0615d080 *tests/bounds.R 5dfe660e79eb80bf7df9025a1809c7f9 *tests/moPrep-test.R d9aa3f50a48e1f323bcc88ffaad521ad *tests/overimp.R a16cb353877c2cf9c65b1b7019186716 *vignettes/amelia.bib eb6e3522db295745308d91497b419279 *vignettes/ameliaview.Rmd 6a015f587490c597c73e60a6abc58893 *vignettes/assets/context-menu.png a0893936798fff3451ac84696449742b *vignettes/assets/diag.png 03b5316759c7db0f6f2fbd8bb05cab33 *vignettes/assets/distpri.png 4a083b51939f02226d5b2d759bb17f1e *vignettes/assets/import.png c434b1666c1626126595266901cec454 *vignettes/assets/main.png 611b6871ea799cbe5896c533854c83e6 *vignettes/assets/numopts.png 98ba2f512146d27e3a5222d7ace6bf2c *vignettes/assets/options.png c4a528350c2ac0070dd137cef96a6bc1 *vignettes/assets/output-log.png 2da68f633c3407c925324b7a31286f28 *vignettes/assets/rangepri.png 5752c8903e98aba503265a10e089701c *vignettes/assets/splash.png 048cf04b72ce93ec709b1accb3b33063 *vignettes/diagnostics.Rmd bf56359289980c8896c9e2c09a8d6498 *vignettes/intro-mi.Rmd 7521730818adae6c009973dc7b665ec7 *vignettes/using-amelia.Rmd Amelia/inst/0000755000176200001440000000000014335756316012434 5ustar liggesusersAmelia/inst/gui/0000755000176200001440000000000014335240021013175 5ustar liggesusersAmelia/inst/gui/icon_accept.gif0000644000176200001440000000037114335240021016134 0ustar liggesusersGIF89avGKp.ݖi湋XSqD6Y~mR,WLw]zF~GOPL`RgB!,v'dYND0)b!EtԘR9QU*CƱ 3p=_f@X A`ig^H<y}}w# `$. .!;Amelia/inst/gui/flag_red.gif0000644000176200001440000000034714335240021015433 0ustar liggesusersGIF89aQQLLRKmmYY{{UUccmm]]T3E4`;##΋/55vEY6!,d'di-($TY uE8߄` RYcZETʥG9rĢ@2o没 y($p#?"3g$!;Amelia/inst/gui/page_spss.gif0000644000176200001440000000114714335240021015653 0ustar liggesusersGIF89aJ)1C >EUCK\IUkRa{?LBMEMWpQeVeWdZfZgeulk~jnpqmqruvϙמ̉쿯Ͱٚ˵οڕܤѮ!,ĀD<:511++((ICC2?=Ƃ+I @>>=IBB@=99G(H#! 7G$3 07n 6HaE KBs7Z D AF1(adHve (Lcr ;Amelia/inst/gui/page_sas.gif0000644000176200001440000000113714335240021015450 0ustar liggesusersGIF89aD)1C >EUCK\IUk+4Ra{?LDMWpQ]crcr_bmnkouvʄ|~ēĔǘȡ̉춷ٚ˵ľەۨޞѮ!,>640))$$6>ABA'4A=g& Vfx,z hcX r# &. z.!;Amelia/inst/gui/arrow_left.gif0000644000176200001440000000017214335240021016030 0ustar liggesusersGIF89a ^i_SXZTY|ZcE!,'I8m%@fdAx%ߣ;Amelia/inst/gui/page_edit.gif0000644000176200001440000000057714335240021015616 0ustar liggesusersGIF89a`wxIUkTe~CK\)1C>EUWT8e! ,pH, #E" PG"tDt8@& ^BH^H8Ѕwyq| zBg}  B     [\uFBA;Amelia/inst/gui/icon_clock.gif0000644000176200001440000000030014335240021015760 0ustar liggesusersGIF89a̬ǺA8c|{>p`7x!,mI}';Kv0 li:'Ҿ-RP 0Q`+E>@$)U`'m( s@PR}ii~^;Amelia/inst/gui/gallery19.jpg0000644000176200001440000004664614335240021015530 0ustar liggesusersJFIFddDucky2Adobed         #"""#''''''''''     !! !!'''''''''',"!1AQa"q2B#Rb3rC$cS4%!1AQaq"2RB ?`^}̌ k@y|1! ֔x~Z<<1+kl G2d3–TʄN1xrv8:Ҭ ÍG*bRJ(2χ 𥵵_<'U* qɶ"i`pd҄VAoB˞5Z܃Ø"*2(㪩3\p8)cXrsΘڨ@0D0,m" ם|0`|8BgD& N8, <+&`x  ,- yᅴ8 8pmn:k1(ON RZӄC`æ\F\h}!OG\&R F̦D}XKkZg9STQ3IO<G:g +SLHD>BO#PFKTY,\MoA4B\qlu#"KHzyW ӥHˆGĝI MxgQsHC4$<>*<7N5(SbǐCDY* p8Yp玩#A. rDcOpX:3Š8HT,/ `9ƠG< i ~x8=04ONHӋ.`)LNXs 4S;Ϟ-`pJJ`$WF"x\lBa2 g`KH:G~1qp>( ʠxpøA#k¼r.70!~.`@p46( L 0`qLu0nX`L ;;]1O} 5:53S0z`  a6@XXO?c%#*x9Z4g 6( `gZ: *irHY4Nl)@xeҹyVx6h4PA (yC, pS#5ŪhXaὼg,1%VD@cC˖F8"G:E?,^{_ۆ糴HT?/p[Hd_#>o;h=Mu7 kU$Ui`YTuWHy0Sf7O\Rd: .1$bU:|Mu,jW @8sn(Fr}?z}oٶ[Di}6q7=9v3Ihj;`Ad.b6ވ}I?8R'VqT_猟W ;d{Eg@:& t,iGhPKMJqլqФؕ[_O5>g!n?R;~,^;h|ur8wci xU `@<8(̞'qL;y`ldc O")LxH#B?cQbE2 ?3hYbJ2>'j:,Q# LV w!ȀeM~Ք| t8%XB'8JʊIr+=ac}ߦ+C6S gXUQT9q[oIn50Ol jq8(pͣ,18<9#-K ʲ|.´ቆR <G=KR8b5`dˬG_*7#? {}27}Z$L-Ji5Pj8c݁Ö&x蛫}hAI@c(X(Xľv*>{y-cS&yzj(35iۤHi5tǮ5N@8]V$\ 8fHK@e^oZt(@Ds#\;]HH*ѪqK 兢NuԽHoJ#}V ]V6G;:1[#QE Pm>2}}e]{u2ۅ{q+gԲG.:He8aVcx{;ex|EM=~݁wSK|l6G>qQ42228Rrћobvll8U]{Ԛ5FEᚤʴ\c{-c,&@ګ!qkix{268ns:*rwVv4xm%"hp\CO Vo[o{X{a-!bP}LGyGO׳>Աkf-v гwo#oQHF+*hI'RIm!Ce\ہ;׹W3%P\vؐ3kL?ቈ_x-}Y5H1I]%Tb^pvn7) AdƩz24]q؄۷$ooq0e_!A<;Qqrp$Uc,aŤBS0ܮM-!!^>$偨+s_^^菚'~o?[:FBE$$0>Tb:4) ҙxbB>* DQP%8a^XJ4L+,[NtP*!&Gn6kja,V<χq;zZuom>"Xݳg]R'謎%Vs\UKbK3&F4|23G/|K]\m{wV)ኚy#R@ŪXX6nʹ)蝂d6PT)h㌶^ eQpT^7xwvtjty# qRv | WFH+쑮`_5: & 2r p[,Rf X#hdԵ ƣāaVJ$gS]0e<NU5P~9Z, %o&mYR!'}I\8T\{{yFbԭ c+Kp"@-jR׃|<=y"my5ߡZcc JDcaTBR@|+ld}w ml-*;fW@3iLk[Voa9e,,YJW]qv#5Ya1\h2=8,? bwsmnst^dq-IH*פ2qo>T>T"EK0ёU2s7kNx.5M \*$cnN܅ >{omK٥m㕴ą Iيllo$Mlmㄹ,=mO6Sٻ/swpuCon{)*ҩ3lQ$A^s",,(Y)9l% 7GoTCƭ<_ruKQ (? ,3rbXT y~^H}̅CW⓸n~G+W㨒ivO}t@T@vʱ72}C-’H9-r8g&Wwb'xW4"޷8j,/i翨GØ9 "Þo[l{\Z=BjvQtKEygqFj<3xTؗ}GpfjoiidFU$*ժBϾll;-햴QFU,r^_]߿^M%]EڃTtcIcݿPl-vX߮޳ikAT++snK;[JV&k-V-HH!ʔ6XyzKay.-5 $t꾮>ZﯦRm3Jmfi}Fj \"v_.䵒F[ ;T"xYz&_ƇmCw #0\m2M%6>4+.Jt:F"Lѩ^X^7SK{l"u*#ȇOL|U͕KT3@GTe<ڧquo(7Bv ji\eEm(VEq oެ}+`+0]HFzV-U.#4/e)lj5~ RoQ _8@x8ׁʴgWD !8;ruwkBX9j`U CS bٶQ\tJuj.h9c7:rM]̒* ^J*(RO_|,]۔!uGb88_}It.wWȺ"A/Oi5Zm0jG4?$l9֕ڿ)\>ܗY’ԔWq+LI?$H~]ؤw PBG^΋h@E ňĄR-.;v3Ut?s\׽Smk7KHm*mѸg#5v׽k[,[1ZJzmwomounշf 0N`|nٰl~߷nOGpdܚݹ'-3| ;٭.IoNXjϧ-s<:"P;Bn/;I}bY4<yrcmnnm9.//sG'Z鷎V-LG5N49HeeT)$LWVSD0"Dc*iƒm(vؤkvmmӵG&;scxcpq譢k+6gj#w.' zAT?VuPKݷ^)Zt"5^MN4 sÑ;1G"  M,eWMsfԻt#E2&1oE+>T8F5v=̒=P[RJWɪ,}`}4 Kv}Sn5}k OH2xcJ ?9BFk(֜qD]0weԱ˭9|N%uk[6|~eNzӫ~F]]YledX(2)VZ餹Mn{Qj:*RI:<7w@nO$$f?InZNl g5 *^jʸ݁ˆŽa2IdH`+9cfxH"ČmGFvb(|?p$9y}؃|v G:`T)CR04jp2H*?BVOZqX+N)Þ*w_sc=+`HRTrr>8!;O`܃' Kn[3=\A#92">#.꣘`8\B\kkvOQ[-W_~NLqb6VZtVIndAR;Xb{r8):`4J 'Tl VSX~m{jqoRuĉEX.ZMHT(BM?j;sKie]p\4A($N/i[; 9RKG' ӱ=fen5IVfc!uըi\`_QVdh(ȫf}=IPl{l1I" -tR3md۫mVl\Q#c$j#㽭=Ξ)otehS:ΜKm[<'V.) a UVye:TTBŻmYK)nvR'Tiqq.65L4ިj"F`cLq{gm[݋ָ7eIwu(u-D~8B{-rkH O $h/Wd8Ok>ֽHFK;98&|rH<0ضQ< 52̌@wzufU]a$u:-Z>mWxOmsf\ rehCE(jM3 ꣏"{[qܿoKՊjfLH"υ~lwoCMx6d7,)qy!9uG&Ct{gN˷LVHEw1k tq@cyqں řLq"wPUχvIJyO;fR;}-BIoY<~U?a~͚y}v]6,ͅ)5jEs SP9p;{i!I 檫i`ַN։m㷌QF$,a9M4 2k;-Gm ֌ctLRff]ۛunn- F\ ƀӆ!{k!%u?)ʵnԟI 4&%W^އ\G59%\1los-hU."M!# BuWB1Jb|Kw:; : SZc,}S$l=+?eE+&:B:s#thG:ry)~^ we"0Ԥ W*a[C43v ݶѨUh*7z#6!͚)n- An\G clݭB'8~vZZM=Dxգ:H+_O.7hzͱ qiPqܤGA>+ݰqW}kŠ.$F p9rU .Mo_5ŴәeWv4Nض;dMz[S1 A1ë:TTvWz2 , S>xҟ/vRc  CKܟS5FFāAO qso̢kt!dLW:<+鶊^Կֱ4c;ʏc$ *FXR1DDGmGR)(J׉|8NzفmGF6Zd(tӒ<տ%ϿlזRH0Z0ueMakok'}8Vss$i9-FO>~׷EYGE(+R:Txb[yb!R8}dȽwzۅ]D+ (/=ަcErser$0J\͏MM1?ҭ[mvD S5!V'vఊqkڗTo-ƞ fXnFY{C*/qlK2^GH`ĩi1LsrMAuPK2X~Xژʏ ŷIlX}~mۂ7AX]nV TnixLOIFJ(1oq",l%B5Č!(fkv.:6C)Z!$2CG^% ԛY6ڛ&m=t#mHe#J:'úwMpsK+c$RB Qt]ƚr\3УjS* j}@ B8`‘K)mr"P.~cm]혵FZ0ej3'Etl#] @Kˤ2*2[I ]uQdQ榕eET!nê}8dZ(+H1V|Ft1%ݻM1ۼoGbm =-ceS}G=T8mTB j d}2=#@|ZYvhv=ZI pG-G!SSCvI[JŦdۣRjmn?*k3{n{t@L1\0jWo/%M5CM?%gݗu?渎fLecU\2v1f+tVɹ Dv;{&H\J`BeEIJ:pĻZi~%w;jE8_OXQq%{]æy',(zl-J#SĜZi53{=Vsn#]$@|*+[]q`gY&yfQNqWt2 'UscT~هi}Ek:IsuS#K\ܶ2/#NhUGα޴--yd"BGbMDȱ<6ufky @ג\i)>,M~Ku7Z @y *xW-uk3.Ӳw.[`(Af\*U4SdH4 { m#$hi]-f&g$AHHFSP7.ܮj \S MUem^/m[= []CCOx⅍q.nGKHnoMFꇬS/c,fۯ$Њѩ:Q" ܫO<͒cUh\J@`4+o[h%Dt^G,[6 c?mN/q035=RW% (1%c;=[_K,"U$j$kPr9slZI&%ʄڹg~RBz-[$"X),FzP^3\(3lHavn#R\Wꚷw[\^{zh%|ǧ2R[dZ,3۱iɩ$rb,rrM=д _Z5C2/\ +It:|:FXWKtz}^Ӧ=k\\kdetάIP#"/:b#F[_{M=6i:z}>}>O^~}HaCG#[#  v;n={ &~5=\8xC]tjs%WkU{? 4ՠHz!%/~IJݞHed|RwwJk.[Ok1G2z^3nl$m&;En"` ^ʣ&A$a,owposܥԃs%uM^Xcq!C3˩$jM\/ZsƑWm-.hn:%N P.T":j>t}0ϰo6͎gyrְGRbÇvxh ~@9Wp0|) Wa5I>[ r<|inQ/fr# zKm0eff(n[olmI.'xYVY-Pյ-!~wl[L ,ABg:brܠU F.n=yzIE2婜m ǶiY/-fCr7 e)d2q`Ǒzot䳢iJF5ӡۏyW7 4R85?m-xͷdYU+22>7'kWP\b"EcFc**w6sn֪YeJSTD,jEG1_]'{lyktӞCo J+槦ךeYbj/E[{x]r n78H ]RFiJ`qw/ln{[]FQj'mfh_"(\\pWf^Gܑ:jb>ސqwl-w xngR$S@È> :^ܙ "l-iX/ t|zshR;K AE5 klA&Ԋ?]հI]Wnmq1ަHz7gWfW*S^7'FOkwpmHVH#Ƌ qr##!l E1p֨sG\6gqSD4z>*d~sGƅ@zԼųP Gۻzu6E⪤c5~k9`>6wTtnQ쾗w1\Ěc"1ͥQṲNA&(EBG=VU,`VmGS\d<--9)X\-K 'yQ*GRbmZq0 6F-8}&C$R11* t{f)#3ȌPP@acK 7Z{J :ʬT:Tr>xÞǹַM&6f< 2kÏS&OCjSNU_v${z)V܋`46}R?@iשYtj| fs06CȱDyb^mA{,*@cCvgHlSэkm>IL<eoRbKJ|c;KHK9j2<>]l+S 6;m1 d6Z\7O:t- '<)&EH11 IQ(F5$GkS,h$詔ZNxtj_$%ؗzwf-uX@ K(X:Tn>`xWvkvd_FV-Lg6[xn.wjJ2[E*k¸"Mj۶yjжi#Ee`IX_mw-ݎf_e,cH +4BGЀ4:\02ߗeAQXHh`5T$WY6nڳ![;{iuuD7Z:_'ݭ6=D@*"*j5ՋNB;w'C7[@k&92x)%`6 wH(ee s熱]45\^Z\!vkȶnn.⺁UFIzf?W:2S$v[{[nJȸRGN?v'T /퉻Gu6%`H"dц* P 9&ͼvo-mu,'gvp)&\=,kK+TZ[{c:Z &h8PYv{ mk›8[$U1if5"^7l$Rw.i4,<%Kv= kZISx*p4fl^zQXk IT1@Yg֫xĚ RZ14 XfWΣb}cvŪ$"dԍne(=ǓbLCsaay6Ev:fPZ%*+V~ߥ7̷$bmϸwRgxz2DX.Rw% EԖIc89ӝ8/9GޏFٶn@:̑Оd ~BB kNѹ*V$ ^4mvumGst o6O`ѫ~qaW4pb5ޔ-QtU5zx㢫I$F;˾vGiMp^:Re}Rf5-5kԟh1!BRp#H=n*k1v[4G$.G%e*_IrA\X6!RjE(g6+]7NW!"i.#G j3([4[崩btPʱI d]`j(cS"A%T( e:e~'lX q#j7DmLS b>:Zv\&[r%i껳= hhėmZ;ȷ\&`G,,V\-+jV6O~Dkc^"`]^8\V&G@51+Lcئkmj0*!]Rߘs8ܞh |t3#f[H쭶$j 4Ƌ]uֶQ&e>8g ҫMI$E'SYUȍ \`vnmD Y4UXҟV=W[w>ˡ03!uVUɀ n>vO,2{U&HW1:z}*4TmW<7oonwiK :tPGvft6`)];[~ 牋]o/V41muJ\ūoDPQRNB*5eYA(I@ҩ\0^ݼʲKC, @C.5[[% PQ1ױēPP04 űE7 'ݧ!)AuΈD E9a.HlmQ+j29dw[k<@5HG}lUe.74VeC<U V}Cf Own-fyMVүE2U@tJewu(l1ioLPf εRBG,_o.,ْHHVL yj:+\2!;GgCw&IlҪQ5DaLKӨ)&/vG[p?PJś OcNXRv'W?{oFiOMtuiq]߻r de;iH-6NLT?{n:}Jk~v׀)PynDf&Vmid?(QB-J6YRCJ*ypqXvw)k[ATDTOYێ)LZe]f8W,m][3McM쥼ixiw $4yg5w]T8R6;Mǵ;m ]qߡ:rz%A)5vݲg G,%th U[5 ~`Xtˮ8r* uP1գ]ANvF#U[g gcӍAӋ$=nmey:dbWST:{[hn"jA5wC)KV4覬U`+f+I7"8X4LFMNtO)ocZhTE H\ YE,S<4kZSm M'NZӘC&=q3!1BP2W>yb\KpI'Z[ũ+wF&24ԫVI㎚]70SSQ3 *B\Sif24h84JR,W۴sC挨E @5|.wRP@\U$i_a`Ί2$`ZM5qN?T3ݮaJP)mX%MH,5pS3iˏB{uݞgmnc1}a:ܘ&vqR P+jʫR9 dBx(x<"ml2Rd 9åu%H$r! sD6I]s .)$\%XeEĭ /,\HP=!ǎx` @R@†K*ݖ3ВD G E%gI9/i#jqHMJ5oIRMUlᆰY,nF@<,ih9RYT434A\(ӟنrFa:aB0_H>6 5L7t$JTZ9S ݴ0VSZ.etM20Vd׏Xk 4Cڞ3 ى(^VC"tG ? "@ 8aF¢0$u}E估O@!^T5z-gBRN~xDB.uK燞",CheӍpI2huS8( 0m*c1ъV5ȑkpґj˕p[j"pt(ΟEW9`F H$\-2395U׈5 A" u**05:Ҡ dM+ n@0־5q?041G .|okDW&wFRɂU`!r1BPWyTQVjdS D]> 84D#gOWΝN7FdCzB|+\95Vǵ2(t{tɤx,?̿'iʕc7@ :[tџi:]ZcOݿzBGGZtҔc~`G;u_]U'`ϱ44i8c4Z$]\t~?5+ΘZt3\K]Fro25zƸv+p?^5|ߎ\ttLv;NgaIS|2;ڽTN^8v/oA:mZVqZJLv;|01 +y{$ t & p,#!;Amelia/inst/gui/action_go.gif0000644000176200001440000000040214335240021015622 0ustar liggesusersGIF89aD(_>V}R(WeAN/qI/g>&wMT\ZYzOI,iDmG%OtK6t!毎#L!,'d9h*IFutyuz@ H(NP0dX6Oi-HYzEUў!,'d)FVQApA>7ƨ `PBT4xB CDY X`(j4t_a)n :V91 w]y{"Opr oq{ [q 9Y `&"!;Amelia/inst/gui/action_save.gif0000644000176200001440000000060114335240021016154 0ustar liggesusersGIF89antVmVJWmb{oQa{DM_)1C?FVDߟԮ!,pH, '$'p@QL>,< B)K$O@jd C_y]  B  B B B FBA;Amelia/inst/gui/gallery19.gif0000644000176200001440000014666214335240021015514 0ustar liggesusersGIF87a,        )"&& ''&'(&'&"')%(/.011/#000/*11-1$7/%24 33::9;99-::#993::(;<A@)CCBDBD"BB6CC1CB;DD(FF/KM*MM%KKANL/ML4LL:JN/MR"QU6ST;UW3VX(XV9UVCWX/UVJWZhjEimuI''RIQ@uT UdiׅzUץfi^ `#Z912&㌕h*m`xjMqfX֒ pPT:'C,LC]vGލY&iFhFMri|{T')XhTV(Vn ab %16z#ชجرlr!xkJtQZFUN.LmB +D P=CP`ם]td+G'Ms.Nw牟 UoK oTޒ˿WBn Z|7~_$x)zxʹ w5*p]< %uXr%FAwS[hG"9~է TV袦7̰j W|mZ==Vxb_q9z,kۏ2 CbdZEc cTi-+ 龒4cYB-ԠZufLTZ:]/iO nbÕgǔ0_>ݏV#?ure_HF,ri) AʈxģrPKXB~plk $S5=|x @!#(EN1-rA+\ղ}cJӢ́1fT a,c(q$ `0x26SGZi0y6we|Y[!HAZHP:?p0XЀG4dB'H'9 vz*YG+a9f)_=OeFKDU1u#XKaJӚC8uiNwӞml(+H5,圢KYT7~P@$.3 D@eĠ:B2 *jD6칒\+rR潹 )`]ຉ\j=0Dva`(7&qW~ÉЍ -d!UZw3!˃Að.\b]f]fp"ĸkZ֥ogY4 -hk_#"`yt0Vηȶ  ؎;݊>ɠPţ$΍W!Q*7#NV/uob[v>c6Jo:?iJ옇~yZ%;Yo,#s)EfB ۉ%H?Nr[ \] Cp0.!=|Ü*OPPˉQ@+]!^Qۍ43]-W&Ly"0W~igETONW0=o7.{ātArvjNߦ=-Q|=yOډQXx!&5+5f5gh* f`EMU;ȷLjۧuv)[_*vk[|M'iN؎=C Qs fڐ0OF 8ӶT\6_݄9l+J`xZr#gYG2oVLҙjBmjI)!tTx P=̾d&tK'{)V*.k_$ۘEA2E"BFlM:2S7,szd&εuײL^(X#ܱWt&4jeHާ'9D ZoJF1kkXqcڴ_6 vڳX?IO'7zxLSUuPXG03XHcUo.B.w|!mx>:EF荝H8W븒5S(p/p rppaHD57'%wuwd mrL$  ccayTVM卞8,XǒhKǀW*xNQ<4aE&6WvtI}'y'Œ/hGR]AvY^Ǖd4RV?Eo"yiIxRI~5#sik$wǗnFq6g.rwr/ GDV'wdn1LE֔i?9SOvpyTxMkŚ+sW{!(\/r`w> C J‘!#R3L a-y,Yg9Xx((841T%%agrqkR.rhE\\C'ІwʘC~/Tm )gA|e*A+ǡ-ai :$& *,DŽ1&5t*1U7;;ʣEɷu@z`aw'=S=Vq(()@R9?V=Uidcʅkm*p1u >xjf>5415r j/ ٠=G4XSTbĩJ驦iɦzj4@zp|w}-w&n0m.5c;QHwYJ̪=bs"12nS^+魤y'J*uS:?u)1ZqbaOBZ.r<:1r`:61 q -cE#4ʼnx˒z8X+ ]Z$뙬Kڮӏbab&j6;E Qp^-<tG\ťg$Ib} !c 3WP3%Sbez+JkL)(jiV"g~IH+<;xwM}0pc40E&jݨ`jӫ)&;j)Suًd53VvJ/L}4 2 27ރ'2%%#NS\Œlea,SpgOA9&YgDo"'+0}DAI)[s3_",z |L|lɑ|̗`09*C#+ )W-{5{ʨ\ /U0)P~sZMgԫ >< Ш`9v+&uƸVS5Y!mJ27)6*W 8 Ј= ͢-ӼSJXeF^MQqQ`&wB"C| P l^FtA?Fn )5pk*kr ] BIX$ӌd;_ PU/&L7LPZB(O۾qڳB 98K6W>+sU֓ `pڭ>~.P }` 7#i/gjVSa$KD<5_LJCHңʓ0Oi+3d]6?N L$;ɾd ÇFHc4,ɆҀhh`|/>YdIRf))Zp*4SbG\o n={ # ("*vZ󢒪mnc/baK7qRr#3^z'Hd$G$ ( UޱgH^Y4YǛlԳ|W4N%,YJѤ\'tyC`?eI/H lfcu^:hûJ)HIziR|fj5M /bߕ@&lM[B s#X @4֨* h1`|a|,sF0jъPhFUj CJӝ3KN K:ࠓ:Eڵa R£Uֺė҉-H;&6U3l,@5 d4Ӑ}4bCRbFqA6?$ u`c2;v[ZhxL;O2Csa#8YR'xaOeDzSI$% iav#*pŸ_X.D!J&>@q4H31ic"p1A ~!E2r$'9#c3H6a2šYr "dx8E/PY䭲-F6+eWt"cP)WUVH $+1)@ Mhyu8#-6f'R%ōRcMΈ3I cx>Av`~*ff4 Aov.JQN`WQ'-!2RF/@ ^R *8CZ=&àyȣ9C G(`LJD  -4aSX'Z!w ;թ60?Bɰ8 &p#.Qf`F4lZtLhG.YCi Vz pQɊM[b$Ֆ[!E uFH8F_AP4pKpǰ)ӛoDùPD Ӻn\ (ޑ l8#8i j`$0 ʩyϋ^jFl/K2<SRc^CRZֺ'fq=2֒@;5!G;R>U{+pPD#fA)W ,NjZd0'X/T1 [\^H4ޱ(<i1x4dA ;x P nݧ@Fv1֖638zy%MIDRx%-!Y "KТ\hCu GF4 qdJ@Ѣݠ4]BTe1_LigbTAFhP5GE@ }b?ph`+@!h|Hin<[(J.A""W~@M@! h@3Y@b؅C8>HR/Cj JèG!#ɘ3 #lqh?Hph{w0f؅[ ^Z0_Hdp@vMB.p'12gd<؁@M@Zh/Єn.4` & qkۃh ThTHdX Ew01$IHCZ=K*{E9D =$+ 7i?HpX5r`[HMIȄX2id83ij 5wY;@!4HPӇ`}4nJ `@H@mh\9dA,4HuJe8b{@')b!H[^\]lP/z=)4̻ǿG‚PBIH1uxf1S@9f4 ?}>92.)(ZK0=0>7@҂.d<*DSPUОe2p󒩠}2uUs|p;lȅȡ?*mڔkcY7mhd ##pan/KSwG:h΃#\SjX pEI}T(@C1-؁ҩȁ29pz4TN0SfK`[y|6-G0Rc0[䅵AJ)#QΠĆfĎ܄1 `C3!Hc7C>8(r:uZ*i:FD0cD.[ E #eqr)Y@ @GKwkWRPļMi0@P #U@[nQ0"#>Hvi*0fo]({DOpULr0F79NH@Z=OlQr@pb^ZS؈n{~ D7DmVG[8?n:D#Z( biX_XdxSn\oJqH/i-(}Xn)#8cxYوQ^[+儃DKx[xTf*t/*[XgZt.| dtQv>Cꀕqalp7 ~[׏@nP'Dz2ud8`q4}]{x}MxKR#P؄- `WX ؔv8RQDNT@O@H0K_DVxe@.Uߢ aDuvrP2 2CI 8 3- )pDb"8C (Gqljaђӓ6-,. L4hNO=#O=>mC 2`" #0b.QLu2"'wVȐ M4hc8\i?3dXE`E`֔SVxg%0i4C&$B -4 6k 0Z5 ,ҋ&tHP\q (Lӌd*P"#}=Ә38XM:y~LRS6)kfdhvٲ M3ZMlƂ 0 .NA 3(kdZ`Hr m\!}C:f7VBDZ`щ=/Ȥ65G:a8h?␙/j!! F4d$#cBG; $1#`":HCQ'Fa*]ld#FZ<֎vPq|Lrˏ%EB,m29rUTf}@o / ^4GT Gcigp !)#%dvxdc0!&V@41Cc҇>i k*) P D.x!U#IMJ/e%_R1#じ#xȢCF$ҏp 57,Y2`9,!̖M0Bx! L+̀W! 6a nqNxA(4р; 9 8 Nvu>x`mF3f~Gec4 Yh"A'[4;F/`;B}J>; elª=b0L;ApVQ~/bQLF_4`(lTF~4i\c˝D *X*969r~ (ң;Za,R/=w01$7@. *g:prtl, =>­`O'ZRV1C<W3j+gŴ 5vQc4hY0chp :!w$YtDJp%I906mc82q ע,+f^K:$F@C8s?H2D`1zsF)p~.^ZXyͲę:G@ =DQ*9 A@\A/\#x@H$;78.PɈYЈV>Ä؃:|C>RV043 .0. 5L  l& B.h4@0P3HK14C#8Rs$@"on_ E4vC;-"HH8=hįa>r5B*XfB, q>B#CbaqS1Q iLqyC7C+t#L  ~ RTBW]_.HzɈZLahN/BR@ B4,D/3te8P%% \)7#v+IIg,)B-'2ANQŠՊnʂθA2G"C(,J1,@^@Rw6,B+,-X,XIi,<| 'wZPc4XnɃKAp hP0F2Gj@.ERT&hU];=(5e=xM>3J'@ @ $xB&2a}K.(5;E4[tBU`+*B14sB-S)V|밮R r$@^24B B@,@MHtc-t%, j½*UXC=C<ԃ07MXHNu;^ 9D* #(Aj(r<N-R(`RZZf]2mzE tP/&^!FU %*hp :BePN0p$)Bq('p,-Ba,$5B dqт(gB3q$C#58l A@oC C#:|3L p&#(U@ES= 6T1TCX88 fƂ0*@'|B%A mR2>͡")T pbk0nZ|ܯF.$Ȇ E%h\*NW* C `Jձ c8C5,$aCrBe9)$k 9aN94Xm@"x݊N0@#$Z@dA l@D/ B'DP􁼜Je'`*XF3=|C1M(B'H*X2"P m!d(2/*:/TDŤ2VWhFX+Ɣ`\ Ug$C7,+M( ,@$H@ !#FGPw *?g4C"77\54UEB浫z 24(&@ l[ xR1UO%l B/p[lD8PkB8,d{)y*B+(Cd6B)5M,p_BhrB'A $T@mr_j)SG֎ T":80,  4B-,%G@$ @Pȁlx,.DcJ njC5&/+'Tr%P$@d 73,Be10dliLB3HAr>B"`N=md2%A,4*,#T*% B%+44B;t#0p,4!9"B%D h́2*7KF֋JL Em46;]C.,%4q4 [ x8SQ@wo+" E ^\($7"E$@B,RѬC"@ X9)/&|A*x\Ԋ,Ԁ@@4lA6-0BBr p%,Ԃ,kC7>]Ox$ <@8>PC ;H>/xA$AA!AGB- EL3tC)dk P)=K@hXA  @DbE1^}Zg>nr@Ϟ;yK I מI͞E5 ٴfr+B#X"mܹ\mD"%il2TʔcΜM4 F0yɑ4nxcƬ0ex5cF ۤKꀙ1N $ PꩌLТ:喏bLjKg\)YriI"ITY n)#1c!"X@"~P" .pï>.1R\R6ekkSPđ9nǝ}ARiDM aDCey<yԀ5dx ͼJƠMÞprߘ&UZGk 2aH,0t8 2X 08FL΋_iazfƽJb! (#RP\qcY[EB Gk+ 5purJF ~2PoO}fAD p |p (!)ȘGȊTNiXHaĐS.!ET(T 61XpaoB%i4`TZ%=y?dGw#"@wp)Gy!IXH*A>0B` 2@J>S!9 ?I$J<̚47x1 3 rPP P8wfQ_LCE3h hH7A &nE/D }0Р!t`{ D-g0;8 '< t.RxCxP R( w0[#FDhDBg`ɀ6 uh;c |R`! .ACjp&|A"4`X |  PB>T0H!D&QCTg>&w8Pa7rM( *F;v">x;f C04(!l51!E".\9Jr*̠D=aS8$@;x()t Y Ȑ+3Xa91PB5QCNJ " AOS61pPDO~b,!)y$-p=A@ FmY@@8ԀGB$HSh 0T3$P;ȢG=d~tCE014FY EJ$Ҡ<81x}>͔Hy5H@Ѐ..uFh3M l0YMFՂ_RJ@x p C G`d>02TPA-a LC$ it0C hX1MIC,X& B) 7Ɛ 8^.a z#:a)RЉH,0%HZb{@v>D 2x x#=z1V BJH9& *Qh%4AfЄ,Y*oyza P| F>zE7aWgAE!W~lb  F^a]"Hx!gAYL2GpNH2YHAR(8(8 }bB| "ĉ zUPh$P4 Hͻr!4$:+bD5!N}p{%dZk |8 ЈBhbˆDÀn=]$a70JrP^GK e!(E.F4k,@$bh|v`nC!N|@Yv< B01= ,  ~a `ZD2 +4!QAAl@uT1,‹He@2J<@ @B ` A!D܀. , ȂT URj`"&*`N8J\Šn ~@B@ & PBaLc@AaAa nj@  u @ O p@@ ( `a%a:!Z!@b Raњ@ dA@8 j^AsBΎd 2QZ x@ > %I l j lr ܮVR~&n@> G(0R/J۸mBK,1x(1  ~ƀ ,Az!!Jlqb@A` \@y | (F@Ġ K j +:aH;rd!JAm!$`v&|AZA!;"Asj. 0!#"*@ ,` h'dʠ "  d>€ psA GT "J`Z"u8 R+E4! ( x& Zz!@j aIa Aaޣ Ta`'*S (A \@` Nn 2> !T&?A `"ja,X'nE >%Vna2 (`T-k"!@   @,P' ?QaA‾.!/ ׀/  R@S-" *!&^n+RE- ` @ @-X2 .& 0aa`lV! DA` xVD*@VRR n04@3 @2&uvr ncf` 2@ @ 0r!j!a |`L < @  >dn :AG ʠ Ƥ>!  @, * i_|bs@"mZo!x1T4%`l0! HzFA<*^!$v^ ƪ N8| ,zbuD@ `m 6 ܠ x XgED @`!,@ځiHI p@n`^T A6!}Q `A|~! (Ar`cTq)D$= D%D< O( UV nX @`dN  B~: 8[dxJ.`_F!1=c{!.GwA& H">f 8Tz2 @Aj@ , HfpN.!PR T z@D噸 X@S`  60 ΀ Nap @`4`BE ?@BT%> z'j ֊X aT.f`jjB/ n  R @a7 (AxK2VD8u >zˆ9@JG b, V2 aa ` t@ ɬ"> _%y^dk,D슂%``"`rO$F ,΀H!AW[h":xA` 8?$h Hae2H L;"@ A3e=!,@ $̻pzȁ@  ZƁ%`tə @ 2!@2[N`BяD,r@V 8 6 g>J}  "TuE(>8e%H59!&eءlVA``܀(` *ZF 0cAn! \ L:P/H `(@dA2:@ڀ $J2FH #Ax 4BD'r$  H J`-;` x N(_`!n8(A2 , 6Uݩ ѪEt!, a^]Ri27!J`0{"d"![xd +DF .a6a@ P4A]U ֲ˹Px@ ࣛ a!ARB&M7x(AH"pԋ @=j`ײkׂ9)P@(8@@Xqf S$ /H \H""CTѢ3*@L! T<aQL.2p8@9wz>\8Ō3> Yaɒ=2ذq̍8p߾+m/ܺu$&l r-e8gA@8r_\:$RJ. ̖ A ]Al)`_ր8 B0n`t1 x1d| 5@OX2.P3<Bcx0t.Ad|:bM1pL1\gxZߌV:+k0-`b)gdBJ'+L3820RP $ܲIВ5h5  S̅$(^X1S aX bl0p :Ko0Ydj]۠@LL3ӈ裗uY6ڄ&N࣍V:󍧠 m!c Oh(<<9F#I  &c܀ ,dͿp5@85 `A`gaj h( 35<Xܰ(uv0WXXXR|e }ƒ$`j 2 C &/D E:lRP` #8Bnu+Pܔ`~qB .@L%(A50 691oc@7׍f<"7K p;wnA#51hPLh1a3@( (`!}QJz;@!*I"4 hqXy=p$`;؁`L9pd HĄ׀G3ip#.a9, 0)PV`]g#"0hB# VP)==`4/Np T #P z DSc2I,`E.+j4:x/QHx05 E-ΑLG5vGü0@AH($n N"g9\PcP  'S@/hn WC] C8b:A@deC !?AFHb=pA'Q)pqK"r@|+.u*e)FˁLh<%*=(p$!ZJ8@ &88P@0 G"iIvR@alcZlDC!0Moupa|4fTЀ,0KI`?v\JbT"` {$%2?X`vAT&RYT`.v0a`tspD Qp0ـ j;^ oa>pB 0ZK<!-Z `* 1H"%w`hDH(P= `!4j %EB $ &0Xb0LBFjFFPq̋)x!tCc<_T`2y."`=Q{ 6A@=$$* XE*IT"U?a :"0FTRK0 a@#p!MB"HD@M1,P$@BC`gdX!a)Nщ;L {|pVG 1S#=  ( 24ad.s0oh%W&[u@& LшGבcE p0 ,'bXG9n؃XKO~ 0a!%zvp8,H! I܁ E#Dň! MhQxy\pU p8@?xT0}) ~0G pp&pY JbJFeXG%jvPRQgC/?%\]0?@pR$`3:#pR _Pwby1qw ā P`)Їՠ L&ocl  r^ g8J =f)v@ d%]hp S`Bo1G`  oo \ S| 7f[ @+itCG?hS tLR x1 P>tCl$oCqD 5@?1C+.M":jPP?3d@+j"8EP׈ ykCRȆlEv vJUz7%;aI~'%FJݰ  mS, A'5Efs d#ep'W~P6HR 0 vp 1hjOHPsa-4)0 c+pf/f_@ X(Tqs 0/tp. `"])P!TTpCd0Id0ȣ*0w$t6mcF,Iy_r`7 q)mz96ld@0 ԰ p g985z"} (d/@ Q{l0FiR @ Et#& `BV8V0XY >xO#@Y'Y*Jn  NYR%p! w.$t@rP=`B  j s5v!ySP2)QJ !CY1"2Z!WN`ą{'Y~Qڒ%E y P Zz&Jzmf Rp tg[  p 0\4VPj M*yFQ}`eS@0 C'b 0{Y?Ho7sqv\#6P pk Pin`Y8*ai" \_@< 7 r%_ WREK9W0 ( opYr2@e0Cpcw` opVLH ~@?ɒfJnj F E j{w |T  g?&z0QS{{rPe@'TC7 ` Π 7#tpz6 Yp gW13d- F+FH `IiRi&!> flKG `G+hFOl*N\pM0Gsb@ ;i L{Aj6)P@ q)j8:F Fa VES5` v<pApf$=Ϻ5U a}SpaWL&1QLP*"e_xXp,sG%%VjPR8)h6twЫ  P43"L#P{ 6_p S.-qTت:ĤG1qT^[RlӀ ڀz v8p1P ,v~0  }PVX$(pf)>@ GĵpĚreP ܘo|v1Ryv b@O?e\j 0?,Fȶ'0T)I E?Pg6rm:\3N` gpQVxrX`Xj@3꿆|)-ͦ5ʆ# Ő ` !& [] 9Y`  p + 3A0.  @Dt )0C&W94@`P[DMP 7P$Hp$ж@p@np/V!0fѡN;Z0@lD('H&-O0yܳ'O?`oSj{*Eė+ T`6oy.~ ? R౸s~ W@}p [K?k > 0pvCV``ĉ`t@9ɋa0! <3@p,A0`~Qq`]>Pȸgl!h3F( 'pMO}D1^~b E*(,0^@0`e0ZC"RP.1RՒO1R].1@_J?pIV)][Xx 30nW ?k^6h0.`\Ҕ#FR z`>9Ru d@C`ɛAq^@eA "mZbk /LCwl3R91J40 U_v Q8S][TnS ~ࢍCdCmB73VF-p`w3QpKvR  Ű7qjL M1FtP8.F0  ހG9RŒ.quP[ "F[PD2; xPpZpS 7)`l </˷,}X ot֘ P p$3n6+ +_GS]6; `  ;9. rvK N8RL\^ @KR*6a;P' /.Cd0p|2 CАC#6(Ѣ ̑A~QcEl0Y @@D vlc;$ %05`a'4 4 Kjh@@.ָF :dH,5<\]lxHn[+NZeO,QkN;nAMf|p"jUVHATL:r)ө1 $ҢTh84*lؠ\ ]D0a0Lc(8X`E[.LYfɑ$Xa6Z`x$h BÉ aFha'lp!3&" YRɅk+;o!bȇzG(!rA K]QGgagl@ nG> @0X2ӌ9`^ :t$: j<  4A "9xh ;yX #4q"9@D2܀ ч$BD'N #A a6,~6Sj M(OqsN;{<`O vk!ŁTi\xdY'cQGh,Faff%,>( N.ASidŏ8bkޅM x'@iDD98(he!h`c被]L@." ,T)6#X @P`! % "x xË30mV<B]4B(`|_ s-b&``+<1Ƙ`|flE8):HfD: jPIc@(6A zp(X <u%P FDb<`t Bx f08P`>xp@ { xe~lDjHh`M$(RaVu.Oײr$Dt(0q kC(&q78 HȨbb<oC߰?ԍT w$BR+n(п{@ 3 -(lAD 8@ DP@F 00Ǽ, P1iI[ROxjCHV!qZrí7v%"Gڗxlq_(#dAKE+BMR Rc}# (r(#)]/TAD&J! Z4 [i`WI)"-v/drPP(A(' E@'HA ,`T`{Kd `!ټDM`i 4jDRȻ/da;pFtV ~ 7؁vJ| vp<3CQpdCJ8[CoסIzx8NjYB(F1g/l Ea ˶z% HYĂ4A㊲mVi㶄p RRw8X!`%( 7< IQM, j%PpN=RX0)K`1d#ȍ$=}X#x0 p20d!@I78 ژ/7@Qo8{կ&M]jz=Ѡ54%nŰv(d9Wn"& n@46p؝!B480`d 0); Y@nW'E_B $c W悤CV#Bbl1<<Ƨ-H8E˃gqY:+(AѠ,pMn )iE}}Q=u#G>豰p#KG4Ŷ{eJ,#&D@E8| "q;Yv+4ЋNaE@"9uEB]}bDBة10C|}ՠ W<-HEl@ x{$vc)#R[?ĦD/b*/oo@Hd$p $鐅)/@合mٍ1mBHIp#8rJ9^4i3p HI(RYQ(((vć[C:H.A|ܖ4UUh7x%qxd#)C0Op&-.96 "x+X H[,mJ<t4+ba*Ģc+Iz`|D8{*:L z/}bЅhhoNWE'Ж}#P‹ g#QW_EЁ9 ('Q-P%3+5]`MU=(Q 2^[P 5ح[$h]x4 ( |8ZhLLb4.t,I89C?D&7C06Ђ&9@ eG))oy5)aN%:SD0IDꑞTcDgb:餩WP&\\fHoP:H#`/v"(q8+^3P3 3,#K~!s(HD8`^``)(= *hqQxk@ _x_P1H&By, xEЄ@HLCHAxAK7DHh9.T4 #(2079$3II?LMTIu?D{]OF|u]4-< ̄ HcHrfimO(1PzcETn Y(1E 0 H$x#xQU@8aL/؃eX`1kMLaۧr E|@\oHvP 2yΣ|<*hKbЇqVXTHJ@1[@N3/D> !"0@:C0ԣ-C HլIĬmO\5Ev/kU{Ȇ͆TUUGE YNB18]\`>OH Ȃ/ϼNzvF/(B PP0C1_Ixhqd8LU(M@Xq[(bcVIHu@Rx4 .in یV]t@HG8NS7L = `"@/(Q"hH u=88PFK PS`S`P x:ਲZ鑿{(kh+ 0ruhMmP6Y1k=/IX9/TZ 0Ṁzwhl`O-{@[ˇ"[gKtN:{ Ph@`P{fFayup\ȄEO@78%pB!! ylRȁ22(,H588SYPԆʆTq@J" AT ,MWf؆tv |hvc4 E31)yI,M%QkR@c@CPPr PЪ3&XNGqD1X"[`O-JT#UƎoɫɧ[JTrn$9Y@ -M/m~rVU nIXJ91`~7(wpx:z}0)@:hQ .8T8DslxNRj/tTȩ18T 0`fhq ?xxX=u )YE@5ag%F + 'F`IQ% !/N-p? `l:tZtz lSIxw"0S`$[W7Lrh/YR1_ۆ4G2,|n^vH((i}$+؃.xJF pw`.L?fj'ivёz1B XK-)h#9Ҳ{( j!K9 +U(k(+IvI +VeUqϲu5uGĦG[ʯ||ˇ}ffF,zLb$1?{:d`WQ|* |_HD093qxX}Sw)肣];3=Ʈz揇Ig_wz{hrf؅mUeH;P&)P*C.l04p !cN*@ +06D@-H[BdE[Q. SNgN6P>| Ux(a3PwpWO= , rUu!GEX?#hܓ=SӈA@aP̠ TtG$AsH" 2sw\橧UZ 1< 4b8 #x0>Ɉ@j4ӈANCZ*r@ OHawȊ .ܑ (  (HbVX<}\Is\ %]z& 6%H`;xXu!4Ap w0cƱqUA4hp2$ +d G"dKCz@*jъ|Sз"-6 ^wpt ^}@Xː$0$B+ r X1p,I!;@V+x>aY'q%AR RЁ;{HmjN> \xkЃ4(jTp9S|Sp?ar)FhCذ>@F;QΌX֑d#*B$hE(( (a7.* b< p*;0B&°n1ܠ$`*H89؃$y)8 y x(2oNl 9ME/rQ' f=7g>v&=І!b;|K-kJ#RI 9@Rf7ȡ u@>WOd<:,A5SrD6AP!$0 #7 P4 00DB m @(? ;~"dh0p01؃P`pryAXEv/YV\NvlJfP/yi(2F(` (C iYj#OyR5T\"߈l֠Aa͘;z،3a[NIMs:|bga#(=k ҀFtZځʹX N>摊ƢbV\[pM= {<1er*8ir<8}+ܠE\<}2AzX 6߆0(7~UNGЂ#t.B&29x]E%*BA"2.P6>tC7L/40/R|$] ChDȌ L@ pW}ˀUX? 6B1C3؂{hC58-(U@h?BT@ @etlb )8B  "8>c$?Bp@8hY:l 8/HLx%pYevD8C:v,T5p); s5.8ݕB3C38-@ U`076B;l,59P500B3L2$6R0B)B/($&B&11#Ȋ8k!ȸA-@(U1^āTdZu+΃:C2tDC43* A$@HX"PE @xC>?CTh`B/$ i5;I偃 a"O9C:x:8?< ܁!DLAq0C#V8UC|FpÝ3 8`&Cy|C5؃K6P3$8. 3%$9 242B)$B"10#J" ,xA܊8*8%!h9ET DEPT'q@ 4COULEHn3  b6D0B`T ̂>@D BVDUl8(,xO@5@ 9$C6h8DM1C4|KjC40t@ hAA|5ALB+~,C>iC΃8@C2L4%4@F4Hy X(h^l@}.d,P^aZf*L&B0XC,L/7B-$9!LgH$$ PA$!\!X]vJv@ pDe '<E6ElD1;<04C3T@F3@0F9dC67%X8B'BNtB8e܀µU91<,6J3+l8(䀍lߕ "^pC_vSՉC`*B7\!'0DpD0DCu/`2/pQ*̂-B*B05(C/c,(-l0#., t)CNB'dȦFB'Jzd@zv50x yKPh88ӭr2LC7=&l&7$ChC3Ln6hr)d .h13?G>d\&]0 N\ @܊!@B%,BL?9}:iC9C1Mct$Tv(+/,>,C0\)D# 41`Bx0~g Dl&4C@<@C3 6dC88B" x&> C|ډ@ 4nӔ71XX)tBr:x@@f5$;TJ>hÕBpC+FT(3C.(AS$*@fj@!<$x*BT43;8,8@--4CC(( $tX؁4#@0"4C9hb73o*o1 04 4߾( 4B*C7q*B,5X-4L& 3q{h1@Ȃ&@h@H(5C;0,C-528E}b$49-Ă*  g' !RƀOKIܲ06W&p@W Y 1 ?p4?Òv;0* -B&9$$$\@@U3|)Ie…k(NfRhR[d=u䜹 ˦f(!@×,*J̊FVW(Lh6N)eDʙ(*r '=dPᆤXmal~kpc"$HF)%,%QVYeD%aS12yXUdES^餖iQnYLTUjԌ[b9 ̣ڡBءJh@D(Ō @+`δ ̴qRrbY@q&0]\yd+dz)@5x3FD%%,Ř>.YdKdPxNzYEGy9p @5ID HpiET%c`3 7"TI38E#g\fR)wXrbc.c @ )| Y)Ec&UxLnIذ\ٹ`&et[VI+hnCXNjfZ<A N bVh!FSX5 DKхbIdC0Hegƙh,~)@a[z!I3` `$Tek>b ŧ^DHCd(@T0[@ʦJ!hxe3|,MP1sΘG@9btH.1D=D"|dJ͸E*l L0!!@`@ [ 4B0D L0Bf8G&b\Ȣ5mPcE1UYmb1$B4$(aSx &hA M P@ 5xBЀBP7 p@ "pla  --4p#0 !ncЅf\LcjX_/PȮ 鑄J@PNO3P@\0$ np˰0V e4C*B hUP'A Jx.2acpQ MS"0!8!yd5#IdHQ/n F 1oG1Fp5!`X)a _ M£349B0hs MEт(d#OȡH% 8h vR !` @ lh@  >|A ws Nc/&^8``4!IЩ,|Aa܂x@f0@ ݘ$"(A YxZ*0@(D"AI*x9LEXޥ@T弅)u N@AaeHIX-`P)j@D0 `i'Vᠫ T$q zDzh^0tH ,eԂ0H!fHT@!h4`BA /T!,1 KAPFcG)Dqe`'.,Ө8k  PpJ@P64!,u&Dl@ %`a , 2<D @8CHR$C( Ch`ư ZXIrpFtȀeũ0 ːD&V-H sC,p31 [C > ']F)xqX$u_ы"5 b y@R@=(p̏A fE-HT^o:@æ$|A dH!xA#:5:p d .*X$A5a !x   bBB 4$`°I4V`)@an"$ rA!!wA 0@AcA:Ўtn&! >kp%!!pR`AX`n |!b0 `A ``4;xaA-+^Z!Cz`~@a<  2!a2a0AP nfn(ԠA p@ r4j~hVAf!ݦ!V Q!:@2A nNZ`` .-V @V@ʌ$dD&` @b̠(W!*AAdA6!$ÐA`$R H`X@0C H j"K< `fav`4!*SFA2ԡwA`"a q   avljAS&f1%x|Ȧ"a$k.`rڠ  dAh!Vt`" d`: H@*b&8+BO\ (`@ 4z@ hP!V߁!.2e,_!" $jDa!~aaAءT2ށvbaZAh!6rRT'd ܀2xa rB$ĀL@D   P@eK`a r`?A!SWH;0aA~A(% @@.a2!p! C?|K  f baaڡh0@&QAA!Z@aaxAleD> Rn KVa .LPauxa@x藞wS0@uR NN!K0QvRf jB0 >PAN. fxa!,U R @ "Lv)F@XB. D~` X LAFh$t#6 qc43W:P~: Z! `7 $RΡ HNX rnہA¡0U 'U RAqܘf!( 8A 0 ^a!@ Vw @ NYaNڡRq !a-Pfdd}Z  PtllA~btO !`"L)l Z0'`G~٬T!sЮ ;Dahzu PuaxA+aA3H1:!P!J @a@!f,iR!~ArաaS(AO522au<` *N,a2na*'q rrqA3Rh Yc2AVr0!pA`>#I`pAHb`A RI!*)Bo"@D@92@ J6`) $,@0 | Ai&! R!!"6RA:plɰVXL-2@/AR 2p |A<!!؁a0laEcځ!rAiHP(v9%݆^ A1[|@l YR;01 C`!~hH[!nkt0!" !VwUUD&njZ: *`40!$+ Ja~ NቦI0ayOz!ra(Ai!WAnQ;Y7xal!3B7QNxޭ<|aaA 2ax$xv(X$:HA;xD @ d:ۭx@@~9;')a6L<pHhiRjiZAvA/Fn`Ncɥk>L ΀^f l&cJ& Bd  `b j;4Ap4 ٦AH9»laÓR6 !waNa"+L$BA^!AߝA"K@YS@»΀ H! J6(r&}g^!>dVCy!*if!(A&16-!E0R8 hE ڴ[zJX0XV! I{jf\Le.XZ% NtfF(^ȋ  B X`"G1Q2A! 7P)R7'xfԞ+Wn͞R5\;nd헥bl=5/hЀ!.6uر-;ѴcWn;} /ptň33 7#a1<р^2-8i3fL24y )1G}p*7@ 8(MlNg#!S f8c5(5=H/$p)h - L)M69M9 7e``͠aeH2r#)C1"H a0$E-Nq,;a@>Z^;`1>:T  Q L@+J$p! RAp.QcH  t>ڕ*gF7R!iPc~ %#P :-a/FPbY]F2^ FH] z1^XH3bC3e\ʀ6#N9.#YBS8#4 ],v8JA #/ӌ*h1ANoCZQ Y(b`4|QV=x&a !$UXRr XBw IL4bb> D?!> (@!T!B .Z$L}ЁPU&E)xipj361a#<]Xq܈9R=AyD/a(A /`F1l1hA=(#RLb80Z fXc8G0|QiH(c3!Kp@8hCAP ̃3 x$;qcSE7bȢ83l,ڢ8 LԢa,AM8p'/ZPe\-ڲ*{ pUh m2^q9D X7 QS@! W`)2! Np9`)`(XE+8_)&BF4RaFb#{!fBQĦR2ڠދ @x8/ nhn"6A*ߞqT0'&i^`(c.8NE C2F61\Vhr64 m+^Q )9Ȕ-~ vb؀6aMBƠ]Kmj(/PIXXF+Z~h:*:!LXa#01U|Ĥo!8"H B b!gJ , J+C8a "DDBo(&nqkƈ&.2{KE(p @L1/ހF;8L2$'&KT&1Z(&d P4`8S=tF8apD4AEMwT IÆᾁ[*γd)X_ko*&pN%$>By3˜=F41 u!)wcm,\23xaGWߌM:40X!&@(F0J5 }#tG 2qۿbF?E1 iі9vwd/x qcj&[Pm:`$8+9Qu  A 0 x -V[)@1?"RK:"+R @ 5 Bv rKp+xLjFh7t 9(pQ ! K `[ 0p0 v "BOF S %" p w6 J\R1O\p p 0 p} pA @jq0'so ! *^#Yv*Na NPo b !%K QF+Ѐu d iذW P(۠ OP [[pK0;*:  ‚$pq"\*(K\WKR OVhCuw$!Ѵ! "*۰A0 oT `4 BЂ ` >/PF+ ~0X 4p 0 wD P0@J^}! Ucзe N~e~   ` `   .2 ݃S/  Q `8B tKD$2\D?l [؀19j?hh$!*B*G#w . ͐P Aڲ@ p4q  p Co t ` F % @ Q|f@ ` 8 )0 NOSP0>ǵpe@ ??evэ0w iPР @ J ̰  IDw*+2,Z49j[8o.gqW.dϠ5m0fQm4dPFҧEdYMBJTir'[2qϠ[KueZfpECePwT GhКDȠg&aI$l~y-uL'Z%ub*p!ƙ JQ(z*Bzq1 @ziHfCa1gYl`RW f&Ҋf;*(2LoaRʅL瀡-AXfg)RnK"$Wzᓚn+!վtqy_~n`:&ldxaa?8aBHĘkiEIZyE!Zx9͙ҰlJ Qp4 ," 6&q1 Qݽ)Pg"2rvT(Z"Z:,fP)IhIE/i$2PCh1g!lZ"𨈴a .lɅ?+G b٥N1T%%Xij_`)ME^(v94MiD0fFf֚CТ$Z͡ 唿F0;\3 ΘN"Nk(HF:%!2r1FV%{Q}YߐUdǨ؁HE"?n8<2M21qcc cg" ,̀,^{aBgNOf`^)NZj0.:A)|\x4GL,IX< $Ħ-V!HDD(r0}i(;b7IF3ȼZ760uT2ըFR=m!!(8!%iNBW!cJ$4/tR*MD]*)hleN$͉Va+v#oq3MT][d,p +Ig1%wZpzy>ĎlfP Sk*bD#hQz)N^)5[.'9i:N>IbGMg4͍Ah82Ŗ|XRc"S"PCQj&.i!Pwsf:4E2jx9b/d C,E/zWH[DŘ,DZO ZtC4@ xtJ5M0ƢݰDI y3b2K vL1dZ /AC"WJZ,3D)]Ds IPdLq׌HYfOa!C2섣1&eK&Ð#g#]4Q Zv"mh0ՉaBE/j>XJssIR<0$r`$b(8hQ@4#<)1 _&v%/CڠI~mǰ2Iy^+H[^P(J:,DP'o/Dx8ojvS܋_|ī)3tV_U~xn,~[ Z'1`;AYͨ 7?zA9dФehN /\U^@# 0 pNQopH(ש /,9:5m éC}đuѾ ē?q'o |Qu1 ȯ ):- 9g ^x=UHб kWC]D)čH~A F8"|!#yB !=y` ;Amelia/inst/gui/arrow_down.gif0000644000176200001440000000016714335240021016051 0ustar liggesusersGIF89a ^i_SXZTY|ZcE!,$I8MO\2%@! s|;Amelia/inst/gui/table.gif0000644000176200001440000000056614335240021014762 0ustar liggesusersGIF89avլWotIUkTx=wQa{CK\Ō)1C>EUf>ˑ!,pH, $A( $6+4D&(AtEUCK\)2IUk7?7?:GRa{IWpUeYg^ivȉ̱ɚ֫ޞѮ!?,pH, ST(P 1bW~XpD~YK"JxCYSD?uax{}"B/ ##/ /$"",?-",B  +-#"*/{-?&/Y++*-  FBA;Amelia/inst/gui/page_text.gif0000644000176200001440000000042014335240021015640 0ustar liggesusersGIF89asWpIUkRa{CK\~̟ب)1C>EU!,'d)JtYGEA@B177`Ad4x #i:;CdYlvØ~ɳ@tEVi %$BC`nor~bvw~o" X 9^&"!;Amelia/inst/gui/icon_world.gif0000644000176200001440000000025414335240021016024 0ustar liggesusersGIF89aEڧUKl ^rWl !,YIXX~ہ \ r,Y10aij @I @3 XFB4`e 8pӨo`V:~;Amelia/inst/gui/arrow_up.gif0000644000176200001440000000016614335240021015525 0ustar liggesusersGIF89a ^i_SXZTY|ZcE!,#I8sJ] &Q^_Bi7\[w|c;Amelia/inst/gui/page.gif0000644000176200001440000000041614335240021014601 0ustar liggesusersGIF89avWpIUkRa{CK\֨)1C>EUў!,'d)FVQApA>7ƨ `PB4xG CDY3TAvKxB.'`0O!m9"{Osu zbskt9 ]&"!;Amelia/inst/gui/histogram.gif0000644000176200001440000000056414335240021015666 0ustar liggesusersGIF89avǕWoGIUkEU! ,@pH,  r8PC@(!,,֒PA&#!fhC\,nTn B  xzmBx   ˅ uFBA;Amelia/inst/gui/page_up.gif0000644000176200001440000000042714335240021015307 0ustar liggesusersGIF89a[vWpJWmaD~[Ra{DM_֨)1C?FVcE!,'d)2uYAp@RΠ1RP(6 =e1,<b"0N` ;Hh4Ēs? Ex"Eop]  ] 9; T&#!;Amelia/inst/gui/amelia.ico0000644000176200001440000006117614335240021015134 0ustar liggesusers 00h ( 00 h^"00 %'  nM h^(0`*"*(**򪪪*/(/򨈈򪪪򪨈/*/**򪪪򪪪**"*/""*"/"*///*򪪪򪪪**/򪪪///*/򪪪**(**/*/(??( @"""""""(* /򪪪򨈈///"*/*򪪪/*/"򪪪/*򪪪(򪪪򪪪"??( *(*"*/"򪈊"򈊢**"/**/*( (0`ʦ?*?*_*U_*_*_**U****U****U****U****U***UUUUUUUUUU?UU?U?U?U_UU_U_U_UUUUUUUUUUUUUUUUUUUUUUUUUUUU?U???_U___UUUUUUU?U???_U___UUUߪUߪߪߪUUU?U???_U___UUUUUUU?U???_U___UUUUU3fUUUUU*U****U***?*U?*}]aӲӮӲӮӭ4ӲӮ41Ӯ]ӲӅ Љ څ9  ] ӮӲӭ  ] вa  ӁвЭ aӲӮ]  aЩӲa Ӳ] Ӯ ]] 4вөӅ Ӆ 5Ё55Ӯ  өвӅ Ӯ9 9]9Ӧ4Ӯa95   5өӮЅ ]a5    989]]\]ЁӮ    8a ӮөӮ] ]5 څӲӮ9  9Ѕ 5өӮ]  944 ЁӅ Ӆ ӊ  9ӲӮөӭ9 ] 99 ӦӅ ]]ӭ  9ӲөӭaӮӅ a9 ЁвЮ 9Ӯ Ӳө] aӲ  вө]څ ]ЁӲӮa a]  вӪӮӲӭ  ӲЮ   9вӅ   вөӲӮ5 8ӁӲ5 aӮӮӮ   څ] Ӆ  aP ,  ?( @ʦ?*?*_*U_*_*_**U****U****U****U****U***UUUUUUUUUU?UU?U?U?U_UU_U_U_UUUUUUUUUUUUUUUUUUUUUUUUUUUU?U???_U___UUUUUUU?U???_U___UUUߪUߪߪߪUUU?U???_U___UUUUUUU?U???_U___UUUUU3fUUUUU*U****U***?*U?*aӲө1]ӭ9 Ӯ8Ӊ5 ]Ӆ ӆ 9Ӯ 9ӭ]54]Ӯ99aӮ]] 54]Ӯ9 ]]4Ӆ5 ]ӮӉ5]a 5ӭ5 ]5ӭ]ӆӮ вӭ]]]59a4]5aӲвa Ӂ Ӯa9a 9Ю99ЮE??????????????????( ʦ?*?*_*U_*_*_**U****U****U****U****U***UUUUUUUUUU?UU?U?U?U_UU_U_U_UUUUUUUUUUUUUUUUUUUUUUUUUUUU?U???_U___UUUUUUU?U???_U___UUUߪUߪߪߪUUU?U???_U___UUUUUUU?U???_U___UUUUU3fUUUUU*U****U***?*U?*}ҩ}Tӭ a9҅ 9aӉډ}ӭ=ө҉=88]ӭa=8`9=8ҩڱ99a8ډӅ Ӯ?9(0`  "$$$$$$$$$$$$$$$$$$$$$$$$$$$" &F\lvxxxxxxxxxxxxxxxxxxxxxxxxxxxthR43/ A,ZF^vmPz}V%##$dWRennnnnn(, 'ܟQnovϻ}~7nnnnp Qgq!(En#=Wp nnnn(D}u"r#n^Blnnnnq"W}u"[nv*Blnnnn,J}u"~7nHuKznnnn.N}u"jènos̷nnnn%@}u"(En3Uy/nnn|3}u"uκnnqʳ9]nnnr$CmIvCn^}u"0Pn!:uι[Bl(Et&nnnnnnnnnnnn0P}u"lūnnM}X+Innnnnnnp r#nnn|48\=dBlGsL{GtFr}u"s%nnnnnnx->fXoɱvлnnny/}u"Kzt&nnnn'CR}*Hnnng}u">ep nnnnn8]lƬnnn,K}u"Kzp nnno) !+12\Nbr|}{{{{{{{{|{pU1GG wY1 !6|YN~y,pH=#U#33}hTy1}:bhfI Vnϥ.N&Buκ'B[q!P 1Jj;qʳ l]vκt(]$>kĪ9LDRV=d}5}5_/Pa~~Xdt(X2T`-KSYcLzz1{Gshs$CndClg+HVYch%@meL{x.u'iĩVYcxѼ,J ky.[";Cnd k1SVYc,Kh|4wнJy}4.Ls&kŪVYcQt͸qʳp!tͶo#=VYcy0N}N~q!WVYc%?0Q ~8v+VXc5Xw,cr$5XWUYOo(Eq!_TL>\s% koJvw: #\s% l";@ab u[s% l@il..6 Yo lBk3^^> h:`n8]ǂ̄R}~)1>AA@HsHigq,vMEAA@7}! g?(  #+.+# 5YwwY20#^Uj{{hLcU4p9*trO4mY/)RRC(PL̘axqᒀ*UHǐPСPСLy&z̀x-jj5jh0Xױ'K˗2e>  ?=5j.l6l 3h``n{w#z:9suUԪ#G1b*TjYWկ]ݻwl [۶W1t ހ,-Z ~w$$G{4ހ+V=zyHYt }}*TVԬtnۖ)RR{Rhe;Amelia/inst/doc/0000755000176200001440000000000014335756316013201 5ustar liggesusersAmelia/inst/doc/intro-mi.R0000644000176200001440000000023614335756315015062 0ustar liggesusers## ----loadpkg, echo = FALSE, include = FALSE----------------------------------- knitr::opts_chunk$set(fig.width = 5, fig.height = 4, fig.align = "center") Amelia/inst/doc/ameliaview.html0000644000176200001440000231135214335756311016214 0ustar liggesusers AmeliaView GUI Guide

AmeliaView GUI Guide

2022-11-18

Below is a guide to the AmeliaView menus with references back to the users’s guide. The same principles from the user’s guide apply to AmeliaView. The only difference is how you interact with the program. Whether you use the GUI or the command line versions, the same underlying code is being called, and so you can read the command line-oriented discussion above even if you intend to use the GUI.

Loading AmeliaView

The easiest way to load AmeliaView is to open an R session and type the following two commands:

library(Amelia)
AmeliaView()

This will bring up the AmeliaView window on any platform.

AmeliaView welcome screen

Loading data into AmeliaView

AmeliaView loads with a welcome screen that has buttons which can load a data in many of the common formats. Each of these will bring up a window for choosing your dataset. Note that these buttons are only a subset of the possible ways to load data in AmeliaView. Under the File menu (shown below), you will find more options, including the datasets included in the package (africa and freetrade). You will also find import commands for Comma-Separated Values (.CSV), Tab-Delimited Text (.TXT), Stata v.5-10 (.DTA), SPSS (.DAT), and SAS Transport (.XPORT). Note that when using a CSV file, Amelia assumes that your file has a header (that is, a row at the top of the data indicating the variable names).

AmeliaView File and import menu.

You can also load data from an RData file. If the RData file contains more than one data.frame, a pop-up window will ask to you find the dataset you would like to load. In the file menu, you can also change the underlying working directory. This is where AmeliaView will look for data by default and where it will save imputed datasets.

Variable Dashboard

Main variable dashboard in AmeliaView

Once a dataset is loaded, AmeliaView will show the variable dashboard. In this mode, you will see a table of variables, with the current options for each of them shown, along with a few summary statistics. You can reorder this table by any of these columns by clicking on the column headings. This might be helpful to, say, order the variables by mean or amount of missingness.

Variable options via right-click menu on the variable dashboard

You can set options for individual variables by the right-click context menu or through the “Variables” menu. For instance, clicking “Set as Time-Series Variable” will set the currently selected variable in the dashboard as the time-series variable. Certain options are disabled until other options are enabled. For instance, you cannot add a lagged variable to the imputation until you have set the time-series variable. Note that any factor in the data is marked as a ID variable by default, since a factor cannot be included in the imputation without being set as an ID variable, a nominal variable, or the cross-section variable. If there is a factor that fails to meet one of these conditions, a red flag will appear next to the variable name.

  1. Set as Time-Series Variable - Sets the currently selected variable to the time-series variable. Disabled when more than one variable is selected. Once this is set, you can add lags and leads and add splines of time. The time-series variable will have a clock icon next to it.
  2. Set as Cross-Section Variable - Sets the currently selected variable to the cross-section variable. Disabled when more than one variable is selected. Once this is set, you can interact the splines of time with the cross-section. The cross-section variable will have a person icon next to it.
  3. Unset as Time-Series Variable - Removes the time-series status of the variable. This will remove any lags, leads, or splines of time.
  4. Unset as Cross-Section Variable - Removes the cross-section status of the variable. This will remove any intersection of the splines of time and the cross-section.
  5. Add Lag/Lead - Adds versions of the selected variables either lagged back (“lag”) or forward (“lead”).
  6. Remove Lag/Lead - Removes any lags or leads on the selected variables.
  7. Plot Histogram of Selected - Plots a histogram of the selected variables. This command will attempt to put all of the histograms on one page, but if more than nine histograms are requested, they will appear on multiple pages.
  8. Add Transformation… - Adds a transformation setting for the selected variables. Note that each variable can only have one transformation and the time-series and cross-section variables cannot be transformed.
  9. Remove Transformation - Removes any transformation for the selected variables.
  10. Add or Edit Bounds - Opens a dialog box to set logical bounds for the selected variable.

Amelia Options

Options menu

The “Variable” menu and the variable dashboard are the place to set variable-level options, but global options are set in the “Options” menu. For more information on these options, see vignette("using-amelia").

  1. Splines of Time with… - This option, if activated, will have Ameliause flexible trends of time with the specified number of knots in the imputation. The higher the number of knots the greater the variation in the trend structure, yet it will take more degrees of freedom to estimate.
  2. Interact with Cross-Section? - Include and interaction of the cross-section with the time trends. This interaction is way of allowing the trend of time to vary across cases as well. Using a 0-level spline of time and interacting with the cross section is the equivalent of using a fixed effects.
  3. Add Observational Priors… - Brings a dialog window to set prior beliefs about ranges for individual missing observations.
  4. Numerical Options - Brings a dialog window to set the tolerance of the EM algorithm, the seed of the random number generator, the ridge prior for numerical stability, and the maximum number of redraws for the logical bounds.
  5. Draw Missingness Map - Draws a missingness map.
  6. Output File Options - Bring a dialog to set the stub of the prefix of the imputed data files and the number of imputations. If you set the prefix to mydata, your output files will be mydata1.csv, mydata2.csv... etc.
  7. Output File Type - Sets the format of imputed data. If you would like to not save any output data sets (if you wanted, for instance, to simply look at diagnostics), set this option to “(no save).” Currently, you can save the output data as: Comma Separated Values (.CSV), Tab Delimited Text (.TXT), Stata (.DTA), R save object (.RData), or to hold it in R memory. This last option will only work if you have called AmeliaView from an R session and want to return to the R command line to work with the output. Its name in R workspace will be the file prefix. The stacked version of the Stata output will work with their built-in mi tools.

Numerical options

Numerical options menu

  1. Seed - Sets the seed for the random number generator used by Amelia. Useful if you need to have the same output twice.

  2. Tolerance - Adjust the level of tolerance that Amelia uses to check convergence of the EM algorithm. In very large datasets, if your imputation chains run a long time without converging, increasing the tolerance will allow a lower threshold to judge convergence and end chains after fewer iterations.

  3. Empirical Prior - A prior that adds observations to your data in order to shrink the covariances. A useful place to start is around 0.5% of the total number of observations in the dataset.

  4. Maximum Resample for Bounds - Amelia fits logical bounds by rejecting any draws that do not fall within the bounds. This value sets the number of times Amelia should attempt to resample to fit the bounds before setting the imputation to the bound.

Add Distributional Prior

Detail for Add Distributional Prior dialog

  1. Current Priors - A table of current priors in distributional form, with the variable and case name. You can remove priors by selecting them and using the right-click context menu.
  2. Case - Select the case name or number you wish to set the prior about. You can also choose to make the prior for the entire variable, which will set the prior for any missing cell in that variable. The case names are generated from the row name of the observation, the value of the cross-section variable of the observation and the value of the time series variable of the observation.
  3. Variable - The variable associated with the prior you would like specify. The list provided only shows the missing variables for the currently selected observation. 1.Mean - The mean value of the prior. The textbox will not accept letters or out of place punctuation.
  4. Standard Deviation - The standard deviation of the prior. The textbox will only accept positive non-zero values.

Add Range Prior

Detail for Add Range Prior dialog

  1. Case - Select the case name or number you wish to set the prior about. You can also choose to make the prior for the entire variable, which will set the prior for any missing cell in that variable. The case names are generated from the row name of the observation, the value of the cross-section variable of the observation and the value of the time series variable of the observation.
  2. Variable - The variable associated with the prior you would like specify. The list provided only shows the missing variables for the currently selected observation.
  3. Minimum - The minimum value of the prior. The textbox will not accept letters or out of place punctuation.
  4. Maximum - The maximum value of the prior. The textbox will not accept letters or out of place punctuation.
  5. Confidence - The confidence level of the prior. This should be between 0 and 1, non-inclusive. This value represents how certain your priors are. This value cannot be 1, even if you are absolutely certain of a give range. This is used to convert the range into an appropriate distributional prior.

Imputing and checking diagnostics

Output log showing Amelia output for a successful imputation.

Once you have set all the relevant options, you can impute your data by clicking the “Impute!” button in the toolbar. In the bottom right corner of the window, you will see a progress bar that indicates the progress of the imputations. For large datasets this could take some time. Once the imputations are complete, you should see a “Successful Imputation!” message appear where the progress bar was. You can click on this message to open the folder containing the imputed datasets.

If there was an error during the imputation, the output log will pop-up and give you the error message along with some information about how to fix the problem. Once you have fixed the problem, simply click “Impute!” again. Even if there was no error, you may want to view the output log to see how Ameliaran. To do so, simply click the “Show Output Log” button. The log also shows the call to the amelia() function in R. You can use this code snippet to run the same imputation from the R command line. You will have to replace the x argument in the amelia() call to the name of you dataset in the R session.

Diagnostics Dialog

Detail for the Diagnostics dialog

Upon the successful completion of an imputation, the diagnostics menu will become available. Here you can use all of the diagnostics available at the command-line.

  1. Compare Plots - This will display the relative densities of the observed (red) and imputed (black) data. The density of the imputed values are the average imputations across all of the imputed datasets.
  2. Overimpute - This will run Ameliaon the full data with one cell of the chosen variable artificially set to missing and then check the result of that imputation against the truth. The resulting plot will plot average imputations against true values along with 90% confidence intervals. These are plotted over a \(y=x\) line for visual inspection of the imputation model.
  3. Number of overdispersions - When running the overdispersion diagnostic, you need to run the imputation algorithm from several overdispersed starting points in order to get a clear idea of how the chain are converging. Enter the number of imputations here.
  4. Number of dimensions - The overdispersion diagnostic must reduce the dimensionality of the paths of the imputation algorithm to either one or two dimensions due to graphical restraints.
  5. Overdisperse - Run overdispersion diagnostic to visually inspect the convergence of the Amelia algorithm from multiple start values that are drawn randomly.

Sessions

It is often useful to save a session of AmeliaView to save time if you have impute the same data again. Using the Save Session button will do just that, saving all of the current settings (including the original and any imputed data) to an RData file. You can then reload your session, on the same computer or any other, simply by clicking the Load Session button and finding the relevant RData file. All of the settings will be restored, including any completed imputations. Thus, if you save the session after imputing, you can always load up those imputations and view their diagnostics using the sessions feature of AmeliaView.

Amelia/inst/doc/ameliaview.R0000644000176200001440000000052114335756311015440 0ustar liggesusers## ----setup, echo = FALSE, include = FALSE------------------------------------- knitr::opts_chunk$set(fig.width = 5, fig.height = 4, fig.align = "center") options(digits = 4, show.signif.stars = FALSE) set.seed(12345) ## ----load_av, eval = FALSE---------------------------------------------------- # library(Amelia) # AmeliaView() Amelia/inst/doc/diagnostics.html0000644000176200001440000132702314335756315016405 0ustar liggesusers Multiple Imputation Diagnostics

Multiple Imputation Diagnostics

2022-11-18

Amelia currently provides a number of diagnostic tools to inspect the imputations that are created. To illustrate these, we use the freetrade data from the package:

library(Amelia)
## Loading required package: Rcpp
## ## 
## ## Amelia II: Multiple Imputation
## ## (Version 1.8.1, built: 2022-11-18)
## ## Copyright (C) 2005-2022 James Honaker, Gary King and Matthew Blackwell
## ## Refer to http://gking.harvard.edu/amelia/ for more information
## ##
data(freetrade)
a.out <- amelia(freetrade, m = 5, ts = "year", cs = "country", p2s = 0)

One check on the plausibility of the imputation model is check the distribution of imputed values to the distribution of observed values. Obviously we cannot expect, a priori, that these distribution will be identical as the missing values may differ systematically from the observed value–this is fundamental reason to impute to begin with! Imputations with strange distributions or those that are far from the observed data may indicate that imputation model needs at least some investigation and possibly some improvement.

The plot.amelia() method works on output from amelia() and, by default, shows for each variable a plot of the relative frequencies of the observed data with an overlay of the relative frequency of the imputed values.

plot(a.out, which.vars = 3:6)

where the argument which.vars indicates which of the variables to plot (in this case, we are taking the 3rd through the 6th variables).

The imputed curve (in red) plots the density of the mean imputation over the \(m\) datasets. That is, for each cell that is missing in the variable, the diagnostic will find the mean of that cell across each of the \(m\) datasets and use that value for the density plot. The black distributions are the those of the observed data. When variables are completely observed, their densities are plotted in blue. These graphs will allow you to inspect how the density of imputations compares to the density of observed data. Some discussion of these graphs can be found in Abayomi, Gelman, and Levy (2008). Minimally, these graphs can be used to check that the mean imputation falls within known bounds, when such bounds exist in certain variables or settings.

We can also use the function compare.density() directly to make these plots for an individual variable:

compare.density(a.out, var = "signed")

Overimpute

Overimputing is a technique we have developed to judge the fit of the imputation model. Because of the nature of the missing data mechanism, it is impossible to tell whether the mean prediction of the imputation model is close to the unobserved value that is trying to be recovered. By definition this missing data does not exist to create this comparison, and if it existed we would no longer need the imputations or care about their accuracy. However, a natural question the applied researcher will often ask is how accurate are these imputed values?

Overimputing involves sequentially treating each of the observed values as if they had actually been missing. For each observed value in turn we then generate several hundred imputed values of that observed value, as if it had been missing. While \(m=5\) imputations are sufficient for most analysis models, this large number of imputations allows us to construct a confidence interval of what the imputed value would have been, had any of the observed data been missing. We can then graphically inspect whether our observed data tends to fall within the region where it would have been imputed had it been missing.

For example, we can run the overimputation diagnostic on our data by running

overimpute(a.out, var = "tariff")

Our overimputation diagnostic runs this procedure through all of the observed values for a user selected variable. We can graph the estimates of each observation against the true values of the observation. On this graph, a \(y=x\) line indicates the line of perfect agreement; that is, if the imputation model was a perfect predictor of the true value, all the imputations would fall on this line. For each observation, overimpute() also plots 90% confidence intervals that allows the user to visually inspect the behavior of the imputation model. By checking how many of the confidence intervals cover the \(y=x\) line, we can tell how often the imputation model can confidently predict the true value of the observation.

Occasionally, the overimputation can display unintuitive results. For example, different observations may have different numbers of observed covariates. If covariates that are useful to the prediction are themselves missing, then the confidence interval for this observation will be much larger. In the extreme, there may be observations where the observed value we are trying to overimpute is the only observed value in that observation, and thus there is nothing left to impute that observation with when we pretend that it is missing, other than the mean and variance of that variable. In these cases, we should correctly expect the confidence interval to be very large.

An example of this graph is show here:

In this simulated bivariate dataset, one variable is overimputed and the results displayed. The second variable is either observed, in which case the confidence intervals are very small and the imputations (yellow) are very accurate, or the second variable is missing in which case this variable is being imputed simply from the mean and variance parameters, and the imputations (red) have a very large and encompassing spread. The circles represent the mean of all the imputations for that value. As the amount of missing information in a particular pattern of missingness increases, we expect the width of the confidence interval to increase. The color of the confidence interval reflects the percent of covariates observed in that pattern of missingness, as reflected in the legend at the bottom.

Overdispersed Starting Values

If the data given to amelia() has a poorly behaved likelihood, the EM algorithm can have problems finding a global maximum of the likelihood surface and starting values can begin to effect imputations. Because the EM algorithm is deterministic, the point in the parameter space where you start it can impact where it ends, though this is irrelevant when the likelihood has only one mode. However, if the starting values of an EM chain are close to a local maximum, the algorithm may find this maximum, unaware that there is a global maximum farther away. To make sure that our imputations do not depend on our starting values, a good test is to run the EM algorithm from multiple, dispersed starting values and check their convergence. In a well behaved likelihood, we will see all of these chains converging to the same value, and reasonably conclude that this is the likely global maximum. On the other hand, we might see our EM chain converging to multiple locations. The algorithm may also wander around portions of the parameter space that are not fully identified, such as a ridge of equal likelihood, as would happen for example, if the same variable were accidentally included in the imputation model twice.

Amelia includes a diagnostic to run the EM chain from multiple starting values that are overdispersed from the estimated maximum. The overdispersion diagnostic will display a graph of the paths of each chain. Since these chains move through spaces that are in an extremely high number of dimensions and can not be graphically displayed, the diagnostic reduces the dimensionality of the EM paths by showing the paths relative to the largest principle components of the final mode(s) that are reached. Users can choose between graphing the movement over the two largest principal components, or more simply the largest dimension with time (iteration number) on the \(x\)-axis. The number of EM chains can also be adjusted. Once the diagnostic draws the graph, the user can visually inspect the results to check that all chains convergence to the same point.

For our original model, this is a simple call to disperse():

disperse(a.out, dims = 1, m = 5)

disperse(a.out, dims = 2, m = 5)

where m designates the number of places to start EM chains from and dims are the number of dimensions of the principal components to show.

In one dimension, the diagnostic plots movement of the chain on the \(y\)-axis and time, in the form of the iteration number, on the \(x\)-axis. The first plot shows a well behaved likelihood, as the starting values all converge to the same point. The black horizontal line is the point where amelia() converges when it uses the default method for choosing the starting values. The diagnostic takes the end point of this chain as the possible maximum and disperses the starting values away from it to see if the chain will ever finish at another mode.

Time-series Plots

As discussed above, information about time trends and fixed effects can help produce better imputations. One way to check the plausibility of our imputation model is to see how it predicts missing values in a time series. If the imputations for the Malaysian tariff rate were drastically higher in 1990 than the observed years of 1989 or 1991, we might worry that there is a problem in our imputation model. Checking these time series is easy to do with tscsPlot(). Simply choose the variable (with the var argument) and the cross-section (with the cs argument) to plot the observed time-series along with distributions of the imputed values for each missing time period. For instance, we can get the plot of the tariff variable for Malaysia with the following commands:

a.out.time <- amelia(freetrade, ts = "year", cs = "country", polytime = 1,
                     intercs = TRUE, p2s = 0)
tscsPlot(a.out.time, cs = "Malaysia", main = "Malaysia (with time settings)",
         var = "tariff", ylim = c(-10, 60))

Here, the black point are observed tariff rates for Malaysia from 1980 to 2000. The red points are the mean imputation for each of the missing values, along with their 95% confidence bands. We draw these bands by imputing each of missing values 100 times to get the imputation distribution for that observation.

In figure \(\ref{fig:tsplot1}\), we can see that the imputed 1990 tariff rate is quite in line with the values around it. Notice also that values toward the beginning and end of the time series have slightly higher imputation variance. This occurs because the fit of the polynomials of time in the imputation model have higher variance at the beginning and end of the time series. This is intuitive because these points have fewer neighbors from which to draw predictive power.

A word of caution is in order. As with comparing the histograms of imputed and observed values, there could be reasons that the missing values are systematically different than the observed time series. For instance, if there had been a major financial crisis in Malaysia in 1990 which caused the government to close off trade, then we would expect that the missing tariff rates should be quite different than the observed time series. If we have this information in our imputation model, we might expect to see out-of-line imputations in these time-series plots. If, on the other hand, we did not have this information, we might see “good” time-series plots that fail to point out this violation of the MAR assumption. Our imputation model would produce poor estimates of the missing values since it would be unaware that both the missingness and the true unobserved tariff rate depend on another variable. Hence, tscsPlot() is useful for finding obvious problems in imputation model and comparing the efficiency of various imputation models, but it cannot speak to the untestable assumption of MAR.

Missingness maps

One useful tool for exploring the missingness in a dataset is a missingness map. This is a map that visualizes the dataset a grid and colors the grid by missingness status. The column of the grid are the variables and the rows are the observations, as in any spreadsheet program. This tool allows for a quick summary of the patterns of missingness in the data.

If we simply call the missmap() function on our output from amelia(),

missmap(a.out)

The missmap() function arrange the columns so that the variables are in decreasing order of missingness from left to right. If the cs argument was set in the amelia function, the labels for the rows will indicate where each of the cross-sections begin.

In this missingness map, it is clear that the tariff rate is the variable most missing in the data and it tends to be missing in blocks of a few observations. Gross international reserves (intresmi) and financial openness (fivop), on the other hand, are missing mostly at the end of each cross-section. This suggests missingness by merging, when variables with different temporal coverages are merged to make one dataset. Sometimes this kind of missingness is an artifact of the date at which the data was merged and researchers can resolve it by finding updated versions of the relevant variables.

The missingness map is an important tool for understanding the patterns of missingness in the data and can often indicate potential ways to improve the imputation model or data collection process.

References

Abayomi, Kobi, Andrew Gelman, and Marc Levy. 2008. “Diagnostics for Multivariate Imputations.” Applied Statistics 57 (3): 273–91.
Amelia/inst/doc/diagnostics.Rmd0000644000176200001440000003351314335240021016136 0ustar liggesusers--- title: "Multiple Imputation Diagnostics" date: "`r Sys.Date()`" link-citations: yes bibliography: amelia.bib output: rmarkdown::html_vignette: keep_md: true vignette: > %\VignetteIndexEntry{Multiple Imputation Diagnostics} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r setup, echo = FALSE, include = FALSE} knitr::opts_chunk$set(fig.width = 5, fig.height = 4, fig.align = "center") options(digits = 4, show.signif.stars = FALSE) set.seed(12345) ``` Amelia currently provides a number of diagnostic tools to inspect the imputations that are created. To illustrate these, we use the `freetrade` data from the package: ```{r amelia, results = "hide"} library(Amelia) data(freetrade) a.out <- amelia(freetrade, m = 5, ts = "year", cs = "country", p2s = 0) ``` \subsubsection{Comparing Densities} One check on the plausibility of the imputation model is check the distribution of imputed values to the distribution of observed values. Obviously we cannot expect, *a priori*, that these distribution will be identical as the missing values may differ systematically from the observed value--this is fundamental reason to impute to begin with! Imputations with strange distributions or those that are far from the observed data may indicate that imputation model needs at least some investigation and possibly some improvement. The `plot.amelia()` method works on output from `amelia()` and, by default, shows for each variable a plot of the relative frequencies of the observed data with an overlay of the relative frequency of the imputed values. ```{r plot_amelia} plot(a.out, which.vars = 3:6) ``` where the argument `which.vars` indicates which of the variables to plot (in this case, we are taking the 3rd through the 6th variables). The imputed curve (in red) plots the density of the *mean* imputation over the $m$ datasets. That is, for each cell that is missing in the variable, the diagnostic will find the mean of that cell across each of the $m$ datasets and use that value for the density plot. The black distributions are the those of the observed data. When variables are completely observed, their densities are plotted in blue. These graphs will allow you to inspect how the density of imputations compares to the density of observed data. Some discussion of these graphs can be found in @AbaGelLev08. Minimally, these graphs can be used to check that the mean imputation falls within known bounds, when such bounds exist in certain variables or settings. We can also use the function `compare.density()` directly to make these plots for an individual variable: ```{r compare_density} compare.density(a.out, var = "signed") ``` ## Overimpute {#sec_overimpute} *Overimputing* is a technique we have developed to judge the fit of the imputation model. Because of the nature of the missing data mechanism, it is impossible to tell whether the mean prediction of the imputation model is close to the unobserved value that is trying to be recovered. By definition this missing data does not exist to create this comparison, and if it existed we would no longer need the imputations or care about their accuracy. However, a natural question the applied researcher will often ask is how accurate are these imputed values? Overimputing involves sequentially treating each of the *observed* values as if they had actually been missing. For each observed value in turn we then generate several hundred imputed values of that observed value, *as if it had been missing*. While $m=5$ imputations are sufficient for most analysis models, this large number of imputations allows us to construct a confidence interval of what the imputed value would have been, had any of the observed data been missing. We can then graphically inspect whether our observed data tends to fall within the region where it would have been imputed had it been missing. For example, we can run the overimputation diagnostic on our data by running ```{r} overimpute(a.out, var = "tariff") ``` Our overimputation diagnostic runs this procedure through all of the observed values for a user selected variable. We can graph the estimates of each observation against the true values of the observation. On this graph, a $y=x$ line indicates the line of perfect agreement; that is, if the imputation model was a perfect predictor of the true value, all the imputations would fall on this line. For each observation, `overimpute()` also plots 90\% confidence intervals that allows the user to visually inspect the behavior of the imputation model. By checking how many of the confidence intervals cover the $y=x$ line, we can tell how often the imputation model can confidently predict the true value of the observation. Occasionally, the overimputation can display unintuitive results. For example, different observations may have different numbers of observed covariates. If covariates that are useful to the prediction are themselves missing, then the confidence interval for this observation will be much larger. In the extreme, there may be observations where the observed value we are trying to overimpute is *the only* observed value in that observation, and thus there is nothing left to impute that observation with when we pretend that it is missing, other than the mean and variance of that variable. In these cases, we should correctly expect the confidence interval to be very large. An example of this graph is show here: ```{r overimp_bad, echo = FALSE, results = "hide"} dd <- Amelia:::rmvnorm(50, mu = c(0.5,0.5), vcv = matrix(c(0.25^2,.06, .06,0.25^2),2,2)) ddmiss <- sample(1:50, replace = FALSE, size = 10) is.na(dd) <- ddmiss aa.out <- amelia(dd, m = 5) overimpute(aa.out, var = 2, main = "Observed versus Imputed Values") ``` In this simulated bivariate dataset, one variable is overimputed and the results displayed. The second variable is either observed, in which case the confidence intervals are very small and the imputations (yellow) are very accurate, or the second variable is missing in which case this variable is being imputed simply from the mean and variance parameters, and the imputations (red) have a very large and encompassing spread. The circles represent the mean of all the imputations for that value. As the amount of missing information in a particular pattern of missingness increases, we expect the width of the confidence interval to increase. The color of the confidence interval reflects the percent of covariates observed in that pattern of missingness, as reflected in the legend at the bottom. ## Overdispersed Starting Values {#sec_overdisperse} If the data given to `amelia()` has a poorly behaved likelihood, the EM algorithm can have problems finding a global maximum of the likelihood surface and starting values can begin to effect imputations. Because the EM algorithm is deterministic, the point in the parameter space where you start it can impact where it ends, though this is irrelevant when the likelihood has only one mode. However, if the starting values of an EM chain are close to a local maximum, the algorithm may find this maximum, unaware that there is a global maximum farther away. To make sure that our imputations do not depend on our starting values, a good test is to run the EM algorithm from multiple, dispersed starting values and check their convergence. In a well behaved likelihood, we will see all of these chains converging to the same value, and reasonably conclude that this is the likely global maximum. On the other hand, we might see our EM chain converging to multiple locations. The algorithm may also wander around portions of the parameter space that are not fully identified, such as a ridge of equal likelihood, as would happen for example, if the same variable were accidentally included in the imputation model twice. Amelia includes a diagnostic to run the EM chain from multiple starting values that are overdispersed from the estimated maximum. The overdispersion diagnostic will display a graph of the paths of each chain. Since these chains move through spaces that are in an extremely high number of dimensions and can not be graphically displayed, the diagnostic reduces the dimensionality of the EM paths by showing the paths relative to the largest principle components of the final mode(s) that are reached. Users can choose between graphing the movement over the two largest principal components, or more simply the largest dimension with time (iteration number) on the $x$-axis. The number of EM chains can also be adjusted. Once the diagnostic draws the graph, the user can visually inspect the results to check that all chains convergence to the same point. For our original model, this is a simple call to `disperse()`: ```{r displd} disperse(a.out, dims = 1, m = 5) disperse(a.out, dims = 2, m = 5) ``` where `m` designates the number of places to start EM chains from and `dims` are the number of dimensions of the principal components to show. In one dimension, the diagnostic plots movement of the chain on the $y$-axis and time, in the form of the iteration number, on the $x$-axis. The first plot shows a well behaved likelihood, as the starting values all converge to the same point. The black horizontal line is the point where `amelia()` converges when it uses the default method for choosing the starting values. The diagnostic takes the end point of this chain as the possible maximum and disperses the starting values away from it to see if the chain will ever finish at another mode. ## Time-series Plots {#sec_tscsplots} As discussed above, information about time trends and fixed effects can help produce better imputations. One way to check the plausibility of our imputation model is to see how it predicts missing values in a time series. If the imputations for the Malaysian tariff rate were drastically higher in 1990 than the observed years of 1989 or 1991, we might worry that there is a problem in our imputation model. Checking these time series is easy to do with `tscsPlot()`. Simply choose the variable (with the `var` argument) and the cross-section (with the `cs` argument) to plot the observed time-series along with distributions of the imputed values for each missing time period. For instance, we can get the plot of the `tariff` variable for Malaysia with the following commands: ```{r tsplot1} a.out.time <- amelia(freetrade, ts = "year", cs = "country", polytime = 1, intercs = TRUE, p2s = 0) tscsPlot(a.out.time, cs = "Malaysia", main = "Malaysia (with time settings)", var = "tariff", ylim = c(-10, 60)) ``` Here, the black point are observed tariff rates for Malaysia from 1980 to 2000. The red points are the mean imputation for each of the missing values, along with their 95% confidence bands. We draw these bands by imputing each of missing values 100 times to get the imputation distribution for that observation. In figure \ref{fig:tsplot1}, we can see that the imputed 1990 tariff rate is quite in line with the values around it. Notice also that values toward the beginning and end of the time series have slightly higher imputation variance. This occurs because the fit of the polynomials of time in the imputation model have higher variance at the beginning and end of the time series. This is intuitive because these points have fewer neighbors from which to draw predictive power. A word of caution is in order. As with comparing the histograms of imputed and observed values, there could be reasons that the missing values are systematically different than the observed time series. For instance, if there had been a major financial crisis in Malaysia in 1990 which caused the government to close off trade, then we would expect that the missing tariff rates should be quite different than the observed time series. If we have this information in our imputation model, we might expect to see out-of-line imputations in these time-series plots. If, on the other hand, we did not have this information, we might see "good" time-series plots that fail to point out this violation of the MAR assumption. Our imputation model would produce poor estimates of the missing values since it would be unaware that both the missingness and the true unobserved tariff rate depend on another variable. Hence, `tscsPlot()` is useful for finding obvious problems in imputation model and comparing the efficiency of various imputation models, but it cannot speak to the untestable assumption of MAR. ## Missingness maps {#sec_missmaps} One useful tool for exploring the missingness in a dataset is a *missingness map*. This is a map that visualizes the dataset a grid and colors the grid by missingness status. The column of the grid are the variables and the rows are the observations, as in any spreadsheet program. This tool allows for a quick summary of the patterns of missingness in the data. If we simply call the `missmap()` function on our output from `amelia()`, ```{r mmap1} missmap(a.out) ``` The `missmap()` function arrange the columns so that the variables are in decreasing order of missingness from left to right. If the `cs` argument was set in the `amelia` function, the labels for the rows will indicate where each of the cross-sections begin. In this missingness map, it is clear that the tariff rate is the variable most missing in the data and it tends to be missing in blocks of a few observations. Gross international reserves (`intresmi`) and financial openness (`fivop`), on the other hand, are missing mostly at the end of each cross-section. This suggests *missingness by merging*, when variables with different temporal coverages are merged to make one dataset. Sometimes this kind of missingness is an artifact of the date at which the data was merged and researchers can resolve it by finding updated versions of the relevant variables. The missingness map is an important tool for understanding the patterns of missingness in the data and can often indicate potential ways to improve the imputation model or data collection process. ## References Amelia/inst/doc/diagnostics.R0000644000176200001440000000322714335756315015636 0ustar liggesusers## ----setup, echo = FALSE, include = FALSE------------------------------------- knitr::opts_chunk$set(fig.width = 5, fig.height = 4, fig.align = "center") options(digits = 4, show.signif.stars = FALSE) set.seed(12345) ## ----amelia, results = "hide"------------------------------------------------- library(Amelia) data(freetrade) a.out <- amelia(freetrade, m = 5, ts = "year", cs = "country", p2s = 0) ## ----plot_amelia-------------------------------------------------------------- plot(a.out, which.vars = 3:6) ## ----compare_density---------------------------------------------------------- compare.density(a.out, var = "signed") ## ----------------------------------------------------------------------------- overimpute(a.out, var = "tariff") ## ----overimp_bad, echo = FALSE, results = "hide"------------------------------ dd <- Amelia:::rmvnorm(50, mu = c(0.5,0.5), vcv = matrix(c(0.25^2,.06, .06,0.25^2),2,2)) ddmiss <- sample(1:50, replace = FALSE, size = 10) is.na(dd) <- ddmiss aa.out <- amelia(dd, m = 5) overimpute(aa.out, var = 2, main = "Observed versus Imputed Values") ## ----displd------------------------------------------------------------------- disperse(a.out, dims = 1, m = 5) disperse(a.out, dims = 2, m = 5) ## ----tsplot1------------------------------------------------------------------ a.out.time <- amelia(freetrade, ts = "year", cs = "country", polytime = 1, intercs = TRUE, p2s = 0) tscsPlot(a.out.time, cs = "Malaysia", main = "Malaysia (with time settings)", var = "tariff", ylim = c(-10, 60)) ## ----mmap1-------------------------------------------------------------------- missmap(a.out) Amelia/inst/doc/ameliaview.Rmd0000644000176200001440000003355214335240021015755 0ustar liggesusers--- title: "AmeliaView GUI Guide" date: "`r Sys.Date()`" link-citations: yes bibliography: amelia.bib output: rmarkdown::html_vignette: keep_md: true vignette: > %\VignetteIndexEntry{AmeliaView GUI Guide} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r setup, echo = FALSE, include = FALSE} knitr::opts_chunk$set(fig.width = 5, fig.height = 4, fig.align = "center") options(digits = 4, show.signif.stars = FALSE) set.seed(12345) ``` Below is a guide to the AmeliaView menus with references back to the users's guide. The same principles from the user's guide apply to AmeliaView. The only difference is how you interact with the program. Whether you use the GUI or the command line versions, the same underlying code is being called, and so you can read the command line-oriented discussion above even if you intend to use the GUI. ## Loading AmeliaView The easiest way to load AmeliaView is to open an R session and type the following two commands: ```{r load_av, eval = FALSE} library(Amelia) AmeliaView() ``` This will bring up the AmeliaView window on any platform. ![AmeliaView welcome screen](assets/splash.png) ## Loading data into AmeliaView AmeliaView loads with a welcome screen that has buttons which can load a data in many of the common formats. Each of these will bring up a window for choosing your dataset. Note that these buttons are only a subset of the possible ways to load data in AmeliaView. Under the File menu (shown below), you will find more options, including the datasets included in the package (`africa` and `freetrade`). You will also find import commands for Comma-Separated Values (.CSV), Tab-Delimited Text (.TXT), Stata v.5-10 (.DTA), SPSS (.DAT), and SAS Transport (.XPORT). Note that when using a CSV file, Amelia assumes that your file has a header (that is, a row at the top of the data indicating the variable names). ![AmeliaView File and import menu.](assets/import.png) You can also load data from an RData file. If the RData file contains more than one `data.frame`, a pop-up window will ask to you find the dataset you would like to load. In the file menu, you can also change the underlying working directory. This is where AmeliaView will look for data by default and where it will save imputed datasets. ## Variable Dashboard ![Main variable dashboard in AmeliaView](assets/main.png) Once a dataset is loaded, AmeliaView will show the variable dashboard. In this mode, you will see a table of variables, with the current options for each of them shown, along with a few summary statistics. You can reorder this table by any of these columns by clicking on the column headings. This might be helpful to, say, order the variables by mean or amount of missingness. ![Variable options via right-click menu on the variable dashboard](assets/context-menu.png) You can set options for individual variables by the right-click context menu or through the "Variables" menu. For instance, clicking "Set as Time-Series Variable" will set the currently selected variable in the dashboard as the time-series variable. Certain options are disabled until other options are enabled. For instance, you cannot add a lagged variable to the imputation until you have set the time-series variable. Note that any `factor` in the data is marked as a ID variable by default, since a `factor` cannot be included in the imputation without being set as an ID variable, a nominal variable, or the cross-section variable. If there is a `factor` that fails to meet one of these conditions, a red flag will appear next to the variable name. 1. **Set as Time-Series Variable** - Sets the currently selected variable to the time-series variable. Disabled when more than one variable is selected. Once this is set, you can add lags and leads and add splines of time. The time-series variable will have a clock icon next to it. 2. **Set as Cross-Section Variable** - Sets the currently selected variable to the cross-section variable. Disabled when more than one variable is selected. Once this is set, you can interact the splines of time with the cross-section. The cross-section variable will have a person icon next to it. 3. **Unset as Time-Series Variable** - Removes the time-series status of the variable. This will remove any lags, leads, or splines of time. 4. **Unset as Cross-Section Variable** - Removes the cross-section status of the variable. This will remove any intersection of the splines of time and the cross-section. 5. **Add Lag/Lead** - Adds versions of the selected variables either lagged back ("lag") or forward ("lead"). 6. **Remove Lag/Lead** - Removes any lags or leads on the selected variables. 7. **Plot Histogram of Selected** - Plots a histogram of the selected variables. This command will attempt to put all of the histograms on one page, but if more than nine histograms are requested, they will appear on multiple pages. 8. **Add Transformation...** - Adds a transformation setting for the selected variables. Note that each variable can only have one transformation and the time-series and cross-section variables cannot be transformed. 9. **Remove Transformation** - Removes any transformation for the selected variables. 10. **Add or Edit Bounds** - Opens a dialog box to set logical bounds for the selected variable. ## Amelia Options ![Options menu](assets/options.png) The "Variable" menu and the variable dashboard are the place to set variable-level options, but global options are set in the "Options" menu. For more information on these options, see `vignette("using-amelia")`. 1. **Splines of Time with...** - This option, if activated, will have Ameliause flexible trends of time with the specified number of knots in the imputation. The higher the number of knots the greater the variation in the trend structure, yet it will take more degrees of freedom to estimate. 2. **Interact with Cross-Section?** - Include and interaction of the cross-section with the time trends. This interaction is way of allowing the trend of time to vary across cases as well. Using a 0-level spline of time and interacting with the cross section is the equivalent of using a fixed effects. 3. **Add Observational Priors...** - Brings a dialog window to set prior beliefs about ranges for individual missing observations. 4. **Numerical Options** - Brings a dialog window to set the tolerance of the EM algorithm, the seed of the random number generator, the ridge prior for numerical stability, and the maximum number of redraws for the logical bounds. 5. **Draw Missingness Map** - Draws a missingness map. 6. **Output File Options** - Bring a dialog to set the stub of the prefix of the imputed data files and the number of imputations. If you set the prefix to `mydata`, your output files will be `mydata1.csv, mydata2.csv...` etc. 7. **Output File Type** - Sets the format of imputed data. If you would like to not save any output data sets (if you wanted, for instance, to simply look at diagnostics), set this option to "(no save)." Currently, you can save the output data as: Comma Separated Values (.CSV), Tab Delimited Text (.TXT), Stata (.DTA), R save object (.RData), or to hold it in R memory. This last option will only work if you have called AmeliaView from an R session and want to return to the R command line to work with the output. Its name in R workspace will be the file prefix. The stacked version of the Stata output will work with their built-in `mi` tools. ### Numerical options ![Numerical options menu](assets/numopts.png) 1. **Seed** - Sets the seed for the random number generator used by Amelia. Useful if you need to have the same output twice. 1. **Tolerance** - Adjust the level of tolerance that Amelia uses to check convergence of the EM algorithm. In very large datasets, if your imputation chains run a long time without converging, increasing the tolerance will allow a lower threshold to judge convergence and end chains after fewer iterations. 1. **Empirical Prior** - A prior that adds observations to your data in order to shrink the covariances. A useful place to start is around 0.5\% of the total number of observations in the dataset. 1. **Maximum Resample for Bounds** - Amelia fits logical bounds by rejecting any draws that do not fall within the bounds. This value sets the number of times Amelia should attempt to resample to fit the bounds before setting the imputation to the bound. ### Add Distributional Prior ![Detail for Add Distributional Prior dialog](assets/distpri.png) 1. **Current Priors** - A table of current priors in distributional form, with the variable and case name. You can remove priors by selecting them and using the right-click context menu. 1. **Case** - Select the case name or number you wish to set the prior about. You can also choose to make the prior for the entire variable, which will set the prior for any missing cell in that variable. The case names are generated from the row name of the observation, the value of the cross-section variable of the observation and the value of the time series variable of the observation. 1. **Variable** - The variable associated with the prior you would like specify. The list provided only shows the missing variables for the currently selected observation. 1.**Mean** - The mean value of the prior. The textbox will not accept letters or out of place punctuation. 1. **Standard Deviation** - The standard deviation of the prior. The textbox will only accept positive non-zero values. ### Add Range Prior ![Detail for Add Range Prior dialog](assets/rangepri.png) 1. **Case** - Select the case name or number you wish to set the prior about. You can also choose to make the prior for the entire variable, which will set the prior for any missing cell in that variable. The case names are generated from the row name of the observation, the value of the cross-section variable of the observation and the value of the time series variable of the observation. 1. **Variable** - The variable associated with the prior you would like specify. The list provided only shows the missing variables for the currently selected observation. 1. **Minimum** - The minimum value of the prior. The textbox will not accept letters or out of place punctuation. 1. **Maximum** - The maximum value of the prior. The textbox will not accept letters or out of place punctuation. 1. **Confidence** - The confidence level of the prior. This should be between 0 and 1, non-inclusive. This value represents how certain your priors are. This value cannot be 1, even if you are absolutely certain of a give range. This is used to convert the range into an appropriate distributional prior. ## Imputing and checking diagnostics ![Output log showing Amelia output for a successful imputation.](assets/output-log.png) Once you have set all the relevant options, you can impute your data by clicking the "Impute!" button in the toolbar. In the bottom right corner of the window, you will see a progress bar that indicates the progress of the imputations. For large datasets this could take some time. Once the imputations are complete, you should see a "Successful Imputation!" message appear where the progress bar was. You can click on this message to open the folder containing the imputed datasets. If there was an error during the imputation, the output log will pop-up and give you the error message along with some information about how to fix the problem. Once you have fixed the problem, simply click "Impute!" again. Even if there was no error, you may want to view the output log to see how Ameliaran. To do so, simply click the "Show Output Log" button. The log also shows the call to the `amelia()` function in R. You can use this code snippet to run the same imputation from the R command line. You will have to replace the `x` argument in the `amelia()` call to the name of you dataset in the R session. ### Diagnostics Dialog ![Detail for the Diagnostics dialog](assets/diag.png) Upon the successful completion of an imputation, the diagnostics menu will become available. Here you can use all of the diagnostics available at the command-line. 1. **Compare Plots** - This will display the relative densities of the observed (red) and imputed (black) data. The density of the imputed values are the average imputations across all of the imputed datasets. 1. **Overimpute** - This will run Ameliaon the full data with one cell of the chosen variable artificially set to missing and then check the result of that imputation against the truth. The resulting plot will plot average imputations against true values along with 90% confidence intervals. These are plotted over a $y=x$ line for visual inspection of the imputation model. 1. **Number of overdispersions** - When running the overdispersion diagnostic, you need to run the imputation algorithm from several overdispersed starting points in order to get a clear idea of how the chain are converging. Enter the number of imputations here. 1. **Number of dimensions** - The overdispersion diagnostic must reduce the dimensionality of the paths of the imputation algorithm to either one or two dimensions due to graphical restraints. 1. **Overdisperse** - Run overdispersion diagnostic to visually inspect the convergence of the Amelia algorithm from multiple start values that are drawn randomly. ## Sessions It is often useful to save a session of AmeliaView to save time if you have impute the same data again. Using the **Save Session** button will do just that, saving all of the current settings (including the original and any imputed data) to an RData file. You can then reload your session, on the same computer or any other, simply by clicking the **Load Session** button and finding the relevant RData file. All of the settings will be restored, including any completed imputations. Thus, if you save the session after imputing, you can always load up those imputations and view their diagnostics using the sessions feature of AmeliaView. Amelia/inst/doc/using-amelia.Rmd0000644000176200001440000012477614335240021016216 0ustar liggesusers--- title: "Using Amelia" date: "`r Sys.Date()`" link-citations: yes bibliography: amelia.bib output: rmarkdown::html_vignette: keep_md: true vignette: > %\VignetteIndexEntry{Using Amelia} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r setup, echo = FALSE, include = FALSE} knitr::opts_chunk$set(fig.width = 5, fig.height = 4, fig.align = "center") options(digits = 4, show.signif.stars = FALSE) set.seed(12345) ``` ## Data We now demonstrate how to use Amelia using data from @MilKub05 which studies the effect of democracy on trade policy. For the purposes of this user's guide, we will use a subset restricted to nine developing countries in Asia from 1980 to 1999[^freetrade]. This dataset includes 9 variables: | Variable | Description | |:-----------|:----------------------------------------------------| | `year` | year | | `country` | country | | `tariff` | average tariff rates | | `polity` | Polity IV Score[^polity] | | `pop` | total population | | `gdp.pc` | gross domestic product per capita | | `intresmi` | gross international reserves | | `signed` | dummy variable if signed an IMF agreement that year | | `fivop` | measure of financial openness | | `usheg` | measure of US hegemony[^hegemony] | These variables correspond to the variables used in the analysis model of @MilKub05 in table 2. [^freetrade]: We have artificially addedsome missingness to these data for presentational purposes. You can access the original data at [https://scholar.princeton.edu/hvmilner/data](https://scholar.princeton.edu/hvmilner/data). [^polity]: The Polity score is a number between -10 and 10 indicating how democratic a country is. A fully autocratic country would be a -10 while a fully democratic country would be 1 10. [^hegemony]: This measure of US hegemony is the US imports and exports as a percent of the world total imports and exports. We first load the Amelia and the data: ```{r load_data, results = "hide"} library(Amelia) data(freetrade) ``` We can check the summary statistics of the data to see that there is missingness on many of the variables: ```{r summarize_data} summary(freetrade) ``` In the presence of missing data, most statistical packages use *listwise deletion*, which removes any row that contains a missing value from the analysis. Using the base model of @MilKub05 Table 2, we run a simple linear model in R, which uses listwise deletion: ```{r mk_lm} summary(lm(tariff ~ polity + pop + gdp.pc + year + country, data = freetrade)) ``` Note that 60 of the 171 original observations are deleted due to missingness. These observations, however, are partially observed, and contain valuable information about the relationships between those variables which are present in the partially completed observations. Multiple imputation will help us retrieve that information and make better, more efficient, inferences. ## Multiple Imputation When performing multiple imputation, the first step is to identify the variables to include in the imputation model. It is crucial to include at least as much information as will be used in the analysis model. That is, any variable that will be in the analysis model should also be in the imputation model. This includes any transformations or interactions of variables that will appear in the analysis model. In fact, it is often useful to add more information to the imputation model than will be present when the analysis is run. Since imputation is predictive, any variables that would increase predictive power should be included in the model, even if including them in the analysis model would produce bias in estimating a causal effect (such as for post-treatment variables) or collinearity would preclude determining which variable had a relationship with the dependent variable (such as including multiple alternate measures of GDP). In our case, we include all the variables in `freetrade` in the imputation model, even though our analysis model focuses on `polity`, `pop` and `gdp.pc`. We're not incorporating time or spatial data yet, but we do below. To create multiple imputations in Amelia, we can simply run ```{r amelia} a.out <- amelia(freetrade, m = 5, ts = "year", cs = "country") a.out ``` Note that our example dataset is deliberately small both in variables and in cross-sectional elements. Typical datasets may often have hundreds or possibly a couple thousand steps to the EM algorithm. Long chains should remind the analyst to consider whether transformations of the variables would more closely fit the multivariate normal assumptions of the model (correct but omitted transformations will shorten the number of steps and improve the fit of the imputations), but do not necessarily denote problems with the imputation model. The output gives some information about how the algorithm ran. Each of the imputed datasets is now in the list `a.out$imputations`. Thus, we could plot a histogram of the `tariff` variable from the 3rd imputation, ```{r} hist(a.out$imputations[[3]]$tariff, col = "grey", border = "white") ``` ### Saving imputed datasets If you need to save your imputed datasets, one direct method is to save the output list from `amelia`, ```{r save, eval = FALSE} save(a.out, file = "imputations.RData") ``` As in the previous example, the ith imputed datasets can be retrieved from this list as `a.out$imputations[[i]]`. In addition, you can save each of the imputed datasets to its own file using the `write.amelia()` command, ```{r write_amelia, eval = FALSE} write.amelia(obj = a.out, file.stem = "outdata") ``` This will create one comma-separated value file for each imputed dataset in the following manner: outdata1.csv outdata2.csv outdata3.csv outdata4.csv outdata5.csv The `write.amelia` function can also save files in tab-delimited and Stata (`.dta`) file formats. For instance, to save Stata files, simply change the `format` argument to `"dta"`, ```{r write_dta, eval = FALSE} write.amelia(obj = a.out, file.stem = "outdata", format = "dta") ``` Additionally, `write.amelia()` can create a "stacked" version of the imputed dataset which stacks each imputed dataset on top of one another. This can be done by setting the \code{separate} argument to `FALSE`. The resulting matrix is of size $(N \cdot m) \times p$ if the original dataset is excluded (`orig.data = FALSE`) and of size $(N \cdot (m+1))\times p$ if it is included (`orig.data = TRUE`). The stacked dataset will include a variable (set with `impvar`) that indicates to which imputed dataset the observation belongs. ## Combining multiple calls to `amelia()` The EMB algorithm is what computer scientists call *embarrassingly parallel*, meaning that it is simple to separate each imputation into parallel processes. With Amelia it is simple to run subsets of the imputations on different machines and then combine them after the imputation for use in analysis model. This allows for a huge increase in the speed of the algorithm. Output lists from different Amelia runs can be combined together into a new list. For instance, suppose that we wanted to add another ten imputed datasets to our earlier call to `amelia()`. First, run the function to get these additional imputations, ```{r more_amelia} a.out.more <- amelia(freetrade, m = 10, ts = "year", cs = "country", p2s = 0) a.out.more ``` then combine this output with our original output using the `ameliabind()` function, ```{r ameliabind} a.out.more <- ameliabind(a.out, a.out.more) a.out.more ``` This function binds the two outputs into the same output so that you can pass the combined imputations easily to analysis models and diagnostics. Note that `a.out.more` now has a total of 15 imputations. A simple way to execute a parallel processing scheme with Amelia would be to run `amelia()` with `m` set to 1 on $m$ different machines or processors, save each output using the `save()` function, load them all on the same R session using `load()` command and then combine them using `ameliabind()`. In order to do this, however, make sure to name each of the outputs a different name so that they do not overwrite each other when loading into the same R session. Also, some parallel environments will dump all generated files into a common directory, where they may overwrite each other. If it is convenient in a parallel environment to run a large number of `amelia()` calls from a single piece of code, one useful way to avoid overwriting is to create the `file.stem` with a random suffix. For example: ```{r rand_stem, eval = FALSE} b <- round(runif(1, min = 1111, max = 9999)) random.name <- paste("am", b, sep = "") amelia <- write.amelia(obj = a.out, file.stem = random.name) ``` ### Screen output Screen output can be adjusted with the "print to screen" argument, `p2s`. At a value of 0, no screen printing will occur. This may be useful in large jobs or simulations where a very large number of imputation models may be required. The default value of 1, lists each bootstrap, and displays the number of iterations required to reach convergence in that bootstrapped dataset. The value of 2 gives more thorough screen output, including, at each iteration, the number of parameters that have significantly changed since the last iteration. This may be useful when the EM chain length is very long, as it can provide an intuition for many parameters still need to converge in the EM chain, and a sense of the time remaining. However, it is worth noting that the last several parameters can often take a significant fraction of the total number of iterations to converge. Setting `p2s` to 2 will also generate information on how EM algorithm is behaving, such as a `!` when the current estimated complete data covariance matrix is not invertible and a `*` when the likelihood has not monotonically increased in that step. Having many of these two symbols in the screen output is an indication of a problematic imputation model. Problems of non-invertible matrices often mean that current guess for the covariance matrix is singular. This is a sign that there may be two highly correlated variables in the model. One way to resolve is to use a ridge prior (see \@ref(sec_prior)). An example of the output when `p2s` is 2 would be ```{r p2s} a.out.p2s <- amelia(freetrade, m = 1, ts = "year", cs = "country", p2s = 2) ``` ## Parallel Imputation {#sec:parallel} Each imputation in the above EMB algorithm is completely independent of any other imputation, a property called embarrassingly parallel. This type of approach can take advantage of the multiple-core infrastructure of modern CPUs. Each core in a multi-core processor can execute independent operations in parallel. Amelia can utilize this parallel processing internally via the `parallel` and the `ncpus` arguments. The `parallel` argument sets the parallel processing backend, either with `"multicore"` or `"snow"` (or `"no"` for no parallel processing). The `"multicore"` backend is not available on Windows systems, but tends to be quicker at parallel processing. On a Windows system, the `"snow"` backend provides parallel processing through a cluster of worker processes across the CPUs. You can set the default for this argument using the `"amelia.parallel"` option. This allows you to run Amelia in parallel as the default for an entire R session without setting arguments in the `amelia()` call. For each of the parallel backends, Amelia requires a number of CPUs to use in parallel. This can be set using the `ncpus` argument. It can be higher than the number of physical cores in the system if hyperthreading or other technologies are available. You can use the `parallel::detectCores()` function to determine how many cores are available on your machine. The default for this argument can be set using the `"amelia.ncpus"` option. On Unix-alike systems (such as macOS and Linux distributions), the `"multicore"` backend automatically sets up and stops the parallel workers by forking the process. On Windows, the `"snow"` backend requires more attention. Amelia will attempt to create a parallel cluster of worker processes (since Windows systems cannot fork a process) and will stop this cluster after the imputations are complete. Alternatively, Amelia also has a `cl` argument, which accepts a predefined cluster made using the `parallel::makePSOCKcluster()`. For more information about parallel processing in R, see the documentation for the `parallel` package that ships along with R or the CRAN Task View on [Parallel Computing with R](https://cran.r-project.org/view=HighPerformanceComputing) ## Improving Imputations via Transformations {#sec:trans} Social science data commonly includes variables that fail to fit to a multivariate normal distribution. Indeed, numerous models have been introduced specifically to deal with the problems they present. As it turns out, much evidence in the literature [discussed in @KinHonJos01] indicates that the multivariate normal model used in Amelia usually works well for the imputation stage even when discrete or non-normal variables are included and when the analysis stage involves these limited dependent variable models. Nevertheless, Amelia includes some limited capacity to deal directly with ordinal and nominal variables and to modify variables that require other transformations. In general nominal and log transform variables should be declared to Amelia, whereas ordinal (including dichotomous) variables often need not be, as described below. (For harder cases, see [@Schafer97], for specialized MCMC-based imputation models for discrete variables.) Although these transformations are taken internally on these variables to better fit the data to the multivariate normal assumptions of the imputation model, all the imputations that are created will be returned in the original untransformed form of the data. If the user has already performed transformations on their data (such as by taking a log or square root prior to feeding the data to `amelia()`) these do not need to be declared, as that would result in the transformation occurring *doubly* in the imputation model. The fully imputed data sets that are returned will always be in the form of the original data that is passed to the `amelia()` routine. ### Ordinal {#sec:ord} In much statistical research, researchers treat independent ordinal (including dichotomous) variables as if they were really continuous. If the analysis model to be employed is of this type, then nothing extra is required of the of the imputation model. Users are advised to allow Amelia to impute non-integer values for any missing data, and to use these non-integer values in their analysis. Sometimes this makes sense, and sometimes this defies intuition. One particular imputation of 2.35 for a missing value on a seven point scale carries the intuition that the respondent is between a 2 and a 3 and most probably would have responded 2 had the data been observed. This is easier to accept than an imputation of 0.79 for a dichotomous variable where a zero represents a male and a one represents a female respondent. However, in both cases the non-integer imputations carry more information about the underlying distribution than would be carried if we were to force the imputations to be integers. Thus whenever the analysis model permits, missing ordinal observations should be allowed to take on continuously valued imputations. In the `freetrade` data, one such ordinal variable is `polity` which ranges from -10 (full autocracy) to 10 (full democracy). If we tabulate this variable from one of the imputed datasets, ```{r polity_tab} table(a.out$imputations[[3]]$polity) ``` we can see that there is one imputation between -4 and -3 and one imputation between 6 and 7. Again, the interpretation of these values is rather straightforward even if they are not strictly in the coding of the original Polity data. Often, however, analysis models require some variables to be strictly ordinal, as for example, when the dependent variable will be modeled in a logistical or Poisson regression. Imputations for variables set as ordinal are created by taking the continuously valued imputation and using an appropriately scaled version of this as the probability of success in a binomial distribution. The draw from this binomial distribution is then translated back into one of the ordinal categories. For our data we can simply add `polity` to the `ords` argument: ```{r polity_ord} a.out1 <- amelia(freetrade, m = 5, ts = "year", cs = "country", ords = "polity", p2s = 0) table(a.out1$imputations[[3]]$polity) ``` Now, we can see that all of the imputations fall into one of the original polity categories. ### Nominal {#sec:nom} Nominal variables[^binary] must be treated quite differently than ordinal variables. Any multinomial variables in the data set (such as religion coded 1 for Catholic, 2 for Jewish, and 3 for Protestant) must be specified to Amelia. In our \code{freetrade} dataset, we have `signed` which is 1 if a country signed an IMF agreement in that year and 0 if it did not. Of course, our first imputation did not limit the imputations to these two categories ```{r binary_tab} table(a.out1$imputations[[3]]$signed) ``` In order to fix this for a $p$-category multinomial variable, Amelia will determine $p$ (as long as your data contain at least one value in each category), and substitute $ p-1$ binary variables to specify each possible category. These new $p-1$ variables will be treated as the other variables in the multivariate normal imputation method chosen, and receive continuous imputations. These continuously valued imputations will then be appropriately scaled into probabilities for each of the $p$ possible categories, and one of these categories will be drawn, where upon the original $p$-category multinomial variable will be reconstructed and returned to the user. Thus all imputations will be appropriately multinomial. [^binary]: Dichotomous (two category) variables are a special case of nominal variables. For these variables, the nominal and ordinal methods of transformation in Amelia agree. For our data we can simply add `signed` to the `noms` argument: ```{r noms} a.out2 <- amelia(freetrade, m = 5, ts = "year", cs = "country", noms = "signed", p2s = 0) table(a.out2$imputations[[3]]$signed) ``` Note that Amelia can only fit imputations into categories that exist in the original data. Thus, if there was a third category of signed, say 2, that corresponded to a different kind of IMF agreement, but it never occurred in the original data, Amelia could not match imputations to it. Since Amelia properly treats a $p$-category multinomial variable as $p-1$ variables, one should understand the number of parameters that are quickly accumulating if many multinomial variables are being used. If the square of the number of real and constructed variables is large relative to the number of observations, it is useful to use a ridge prior as in section \@ref(sec_prior). ### Natural log {#sec:log} If one of your variables is heavily skewed or has outliers that may alter the imputation in an unwanted way, you can use a natural logarithm transformation of that variable in order to normalize its distribution. This transformed distribution helps Amelia to avoid imputing values that depend too heavily on outlying data points. Log transformations are common in expenditure and economic variables where we have strong beliefs that the marginal relationship between two variables decreases as we move across the range. For instance, we can show the `tariff` variable clearly has positive (or, right) skew while its natural log transformation has a roughly normal distribution. ```{r tarrif_hist} hist(freetrade$tariff, col="grey", border="white") hist(log(freetrade$tariff), col="grey", border="white") ``` ### Square root {#sec:sqrt} Event count data is often heavily skewed and has nonlinear relationships with other variables. One common transformation to tailor the linear model to count data is to take the square roots of the counts. This is a transformation that can be set as an option in Amelia. ### Logistic {#sec:lgstc} Proportional data is sharply bounded between 0 and 1. A logistic transformation is one possible option in Amelia to make the distribution symmetric and relatively unbounded. ## Identification Variables {#sec:idvars} Datasets often contain identification variables, such as country names, respondent numbers, or other identification numbers, codes or abbreviations. Sometimes these are text and sometimes these are numeric. Often it is not appropriate to include these variables in the imputation model, but it is useful to have them remain in the imputed datasets (However, there are models that would include the ID variables in the imputation model, such as fixed effects model for data with repeated observations of the same countries). Identification variables which are not to be included in the imputation model can be identified with the argument `idvars`. These variables will not be used in the imputation model, but will be kept in the imputed datasets. If the `year` and `country` contained no information except labels, we could omit them from the imputation: ```{r idvars} amelia(freetrade, idvars = c("year", "country")) ``` Note that Amelia will return with an error if your dataset contains a factor or character variable that is not marked as a nominal or identification variable. Thus, if we were to omit the factor `country` from the `cs` or `idvars` arguments, we would receive an error: ```{r idvars_error} a.out2 <- amelia(freetrade, idvars = c("year")) ``` In order to conserve memory, it is wise to remove unnecessary variables from a data set before loading it into Amelia. The only variables you should include in your data when running Amelia are variables you will use in the analysis stage and those variables that will help in the imputation model. While it may be tempting to simply mark unneeded variables as IDs, it only serves to waste memory and slow down the imputation procedure. ## Time Series, or Time Series Cross Sectional Data {#sec:tscs} Many variables that are recorded over time within a cross-sectional unit are observed to vary smoothly over time. In such cases, knowing the observed values of observations close in time to any missing value may enormously aid the imputation of that value. However, the exact pattern may vary over time within any cross-section. There may be periods of growth, stability, or decline; in each of which the observed values would be used in a different fashion to impute missing values. Also, these patterns may vary enormously across different cross-sections, or may exist in some and not others. Amelia can build a general model of patterns within variables across time by creating a sequence of polynomials of the time index. If, for example, tariffs vary smoothly over time, then we make the modeling assumption that there exists some polynomial that describes the economy in cross-sectional unit $i$ at time $t$ as: \[ \textrm{tariff}_{ti} = \beta_0 + \beta_1 t + \beta_1 t^2 + \beta_1 t^3 \ldots \] And thus if we include enough higher order terms of time then the pattern between observed values of the tariff rate can be estimated. Amelia will create polynomials of time up to the user defined $k$-th order, ($k\leq3$). We can implement this with the `ts` and `polytime` arguments. If we thought that a second-order polynomial would help predict we could run ```{r polytime, results = "hide"} a.out2 <- amelia(freetrade, ts = "year", cs = "country", polytime = 2) ``` With this input, Amelia will add covariates to the model that correspond to time and its polynomials. These covariates will help better predict the missing values. If cross-sectional units are specified these polynomials can be interacted with the cross-section unit to allow the patterns over time to vary between cross-sectional units. Unless you strongly believe all units have the same patterns over time in all variables (including the same constant term), this is a reasonable setting. When $k$ is set to 0, this interaction simply results in a model of *fixed effects* where every unit has a uniquely estimated constant term. Amelia does not smooth the observed data, and only uses this functional form, or one you choose, with all the other variables in the analysis and the uncertainty of the prediction, to impute the missing values. In order to impute with trends specific to each cross-sectional unit, we can set `intercs` to `TRUE`: ```{r intercs, results = "hide"} a.out.time <- amelia(freetrade, ts = "year", cs = "country", polytime = 1, intercs = TRUE, p2s = 2) ``` Note that attempting to use `polytime` without the `ts` argument, or `intercs` without the `cs` argument will result in an error. Using the `tscsPlot()` function (discussed below), we can see that we have a much better prediction about the missing values when incorporating time than when we omit it: ```{r tcomp1} tscsPlot(a.out, cs = "Malaysia", main = "Malaysia (no time settings)", var = "tariff", ylim = c(-10, 60)) tscsPlot(a.out.time, cs = "Malaysia", main = "Malaysia (with time settings)", var = "tariff", ylim = c(-10, 60)) ``` ### Lags and leads {#sec:lags} An alternative way of handling time-series information is to include lags and leads of certain variables into the imputation model. *Lags* are variables that take the value of another variable in the previous time period while *leads* take the value of another variable in the next time period. Many analysis models use lagged variables to deal with issues of endogeneity, thus using leads may seems strange. It is important to remember, however, that imputation models are predictive, not causal. Thus, since both past and future values of a variable are likely correlated with the present value, both lags and leads should improve the model. If we wanted to include lags and leads of tariffs, for instance, we would simply pass this to the `lags` and `leads` arguments: ```{r lags_leads} a.out2 <- amelia(freetrade, ts = "year", cs = "country", lags = "tariff", leads = "tariff") ``` ## Including Prior Information Amelia has a number of methods of setting priors within the imputation model. Two of these are commonly used and discussed below, ridge priors and observational priors. ### Ridge priors for high missingness, Small samples, or large correlations {#sec_prior} When the data to be analyzed contain a high degree of missingness or very strong correlations among the variables, or when the number of observations is only slightly greater than the number of parameters $p(p+3)/2$ (where $p$ is the number of variables), results from your analysis model will be more dependent on the choice of imputation model. This suggests more testing in these cases of alternative specifications under Amelia. This can happen when using the polynomials of time interacted with the cross section are included in the imputation model. For example, in our data, if we used a polynomial of degree 2 with unit-specific trends and there are 9 countries, it would add $3 \times 9 - 1= 17$ more variables to the imputation model (dropping one of the fixed effects for identification). When these are added, the EM algorithm can become unstable. You can detect this by inspecting the screen output under `p2s = 2` or by observing that the number iterations per imputation are very divergent. In these circumstances, we recommend adding a ridge prior which will help with numerical stability by shrinking the covariances among the variables toward zero without changing the means or variances. This can be done by including the `empri` argument. Including this prior as a positive number is roughly equivalent to adding `empri` artificial observations to the data set with the same means and variances as the existing data but with zero covariances. Thus, increasing the `empri` setting results in more shrinkage of the covariances, thus putting more a priori structure on the estimation problem: like many Bayesian methods, it reduces variance in return for an increase in bias that one hopes does not overwhelm the advantages in efficiency. In general, we suggest keeping the value on this prior relatively small and increase it only when necessary. A recommendation of 0.5 to 1 percent of the number of observations, $n$, is a reasonable starting value, and often useful in large datasets to add some numerical stability. For example, in a dataset of two thousand observations, this would translate to a prior value of 10 or 20 respectively. A prior of up to 5 percent is moderate in most applications and 10 percent is reasonable upper bound. For our data, it is easy to code up a 1 percent ridge prior: ```{r empri} a.out.time2 <- amelia(freetrade, ts = "year", cs = "country", polytime = 1, intercs = TRUE, p2s = 0, empri = .01 * nrow(freetrade)) a.out.time2 ``` ### Observation-level priors {#sec:obspri} Researchers often have additional prior information about missing data values based on previous research, academic consensus, or personal experience. Amelia can incorporate this information to produce vastly improved imputations. The Amelia algorithm allows users to include informative Bayesian priors about individual missing data cells instead of the more general model parameters, many of which have little direct meaning. The incorporation of priors follows basic Bayesian analysis where the imputation turns out to be a weighted average of the model-based imputation and the prior mean, where the weights are functions of the relative strength of the data and prior: when the model predicts very well, the imputation will down-weight the prior, and vice versa [@HonKin10]. The priors about individual observations should describe the analyst's belief about the distribution of the missing data cell. This can either take the form of a mean and a standard deviation or a confidence interval. For instance, we might know that 1986 tariff rates in Thailand around 40%, but we have some uncertainty as to the exact value. Our prior belief about the distribution of the missing data cell, then, centers on 40 with a standard deviation that reflects the amount of uncertainty we have about our prior belief. To input priors you must build a priors matrix with either four or five columns. Each row of the matrix represents a prior on either one observation or one variable. In any row, the entry in the first column is the row of the observation and the entry is the second column is the column of the observation. In the four column priors matrix the third and fourth columns are the mean and standard deviation of the prior distribution of the missing value. For instance, suppose that we had some expert prior information about tariff rates in Thailand. We know from the data that Thailand is missing tariff rates in many years, ```{r thailand} freetrade[freetrade$country == "Thailand", c("year", "country", "tariff")] ``` Suppose that we had expert information that tariff rates were roughly 40% in Thailand between 1986 and 1988 with about a 6% margin of error. This corresponds to a standard deviation of about 3. In order to include this information, we must form the priors matrix: ```{r build_prior} pr <- matrix( c(158, 159, 160, 3, 3, 3, 40, 40, 40, 3, 3, 3), nrow = 3, ncol = 4 ) pr ``` The first column of this matrix corresponds to the row numbers of Thailand in these three years, the second column refers to the column number of `tariff` in the data and the last two columns refer to the actual prior. Once we have this matrix, we can pass it to `amelia()`, ```{r amelia_prior} a.out.pr <- amelia(freetrade, ts = "year", cs = "country", priors = pr) ``` In the five column matrix, the last three columns describe a confidence range of the data. The columns are a lower bound, an upper bound, and a confidence level between 0 and 1, exclusive. Whichever format you choose, it must be consistent across the entire matrix. We could get roughly the same prior as above by utilizing this method. Our margin of error implies that we would want imputations between 34 and 46, so our matrix would be ```{r build_prior2} pr.2 <- matrix( c(158, 159, 160, 3, 3, 3, 34, 34, 34, 46, 46, 46, 0.95, 0.95, 0.95), nrow = 3, ncol = 5 ) pr.2 ``` These priors indicate that we are 95% confident that these missing values are in the range 34 to 46. If a prior has the value 0 in the first column, this prior will be applied to all missing values in this variable, except for explicitly set priors. Thus, we could set a prior for the entire `tariff` variable of 20, but still keep the above specific priors with the following code: ```{r build_prior3} pr.3 <- matrix( c(158, 159, 160, 0, 3, 3 , 3, 3, 40, 40, 40, 20, 3, 3, 3, 5), nrow = 4, ncol = 4) pr.3 ``` ### Logical bounds In some cases, variables in the social sciences have known logical bounds. Proportions must be between 0 and 1 and duration data must be greater than 0, for instance. Many of these logical bounds can be handled by using the correct transformation for that type of variable (see \@ref(sec:trans) for more details on the transformations handled by Amelia). In the occasional case that imputations must satisfy certain logical bounds not handled by these transformations, Amelia can take draws from a truncated normal distribution in order to achieve imputations that satisfy the bounds. Note, however, that this procedure imposes extremely strong restrictions on the imputations and can lead to lower variances than the imputation model implies. The mean value across all the imputed values of a missing cell is the best guess from the imputation model of that missing value. The variance of the distribution across imputed datasets correctly reflects the uncertainty in that imputation. It is often the mean imputed value that should conform to the any known bounds, even if individual imputations are drawn beyond those bounds. The mean imputed value can be checked with the diagnostics presented in the next section. In general, building a more predictive imputation model will lead to better imputations than imposing bounds. Amelia implements these bounds by rejection sampling. When drawing the imputations from their posterior, we repeatedly resample until we have a draw that satisfies all of the logical constraints. You can set an upper limit on the number of times to resample with the `max.resample` arguments. Thus, if after `max.resample` draws, the imputations are still outside the bounds, Amelia will set the imputation at the edge of the bounds. Thus, if the bounds were 0 and 100 and all of the draws were negative, Amelia would simply impute 0. As an extreme example, suppose that we know, for certain that tariff rates had to fall between 30 and 40. This, obviously, is not true, but we can generate imputations from this model. In order to specify these bounds, we need to generate a matrix of bounds to pass to the `bounds` argument. This matrix will have 3 columns: the first is the column for the bounded variable, the second is the lower bound and the third is the upper bound. Thus, to implement our bound on tariff rates (the 3rd column of the dataset), we would create the matrix, ```{r build_bounds} bds <- matrix(c(3, 30, 40), nrow = 1, ncol = 3) bds ``` which we can pass to the `bounds` argument to `amelia()`: ```{r amelia_bounds} a.out.bds <- amelia(freetrade, ts = "year", cs = "country", bounds = bds, max.resample = 1000) ``` The difference in results between the bounded and unbounded model are not obvious from the output, but inspection of the imputed tariff rates for Malaysia shows that there has been a drastic restriction of the imputations to the desired range: ```{r bounds_plot} tscsPlot(a.out, cs = "Malaysia", main = "No logical bounds", var = "tariff", ylim = c(-10, 60)) tscsPlot(a.out.bds, cs = "Malaysia", main = "Bounded between 30 and 40", var = "tariff", ylim = c(-10, 60)) ``` Again, analysts should be extremely cautious when using these bounds as they can seriously affect the inferences from the imputation model, as shown in this example. Even when logical bounds exist, we recommend simply imputing variables normally, as the violation of the logical bounds represents part of the true uncertainty of imputation. ## Post-imputations Transformations {#sec_postimptrans} In many cases, it is useful to create transformations of the imputed variables for use in further analysis. For instance, one may want to create an interaction between two variables or perform a log-transformation on the imputed data. To do this, Amelia includes a `transform()` function for `amelia()` output that adds or overwrites variables in each of the imputed datasets. For instance, if we wanted to create a log-transformation of the `gdp.pc` variable, we could use the following command: ```{r amelia_transform} a.out <- transform(a.out, lgdp = log(gdp.pc)) head(a.out$imputations[[1]][,c("country", "year","gdp.pc", "lgdp")]) ``` To create an interaction between two variables, we could simply use: ```{r interaction} a.out <- transform(a.out, pol_gdp = polity * gdp.pc) ``` Each transformation is recorded and the `summary()` command prints out each transformation that has been performed: ```{r sum_trans} summary(a.out) ``` Note the updated output is almost exactly the same as the fresh `amelia()` output. You can pass the transformed output back to `amelia()` and it will add imputations and update these imputations with the transformations you have performed. ## Analysis Models {#sec_analysis} Imputation is most often a data processing step as opposed to a final model in of itself. To this end, it is easy to pass output from `amelia()` to other functions. The easiest and most integrated way to run an analysis model is to use the `with()` and `mi.combine()` functions. For example, in @MilKub05, the dependent variable was tariff rates. We can replicate table 5.1 from their analysis with the original data simply by running ```{r lm_lwd} orig.model <- lm(tariff ~ polity + pop + gdp.pc + year + country, data = freetrade) orig.model ``` Running the same model with imputed data is almost identical. We can run the `lm` within each imputed data set by using the `with()` function: ```{r lm_imp} imp.models <- with( a.out, lm(tariff ~ polity + pop + gdp.pc + year + country) ) imp.models[1:2] ``` The result here is simply a list of output of `lm()` applied to each imputed data set. We can combine the imputed estimates using the rules described in @KinHonJos01 and @Schafer97 with the `mi.combine()` function: ```{r mi_combine} out <- mi.combine(imp.models, conf.int = TRUE) out ``` The combination of the results depends on the [broom](https://broom.tidymodels.org) package and results can be combined if a `tidy()` method exists for the estimation function passed to `with()`. Other packages such as [Zelig](https://zeligproject.org) can also combine imputed data sets across a number of statistical models. Furthermore, users can easily export their imputations using the `write.amelia()` function as described in \@ref(sec_saving) and use statistical packages other than R for the analysis model. In addition to the resources available in R, users can draw on Stata to implement their analysis models. As of version 11, Stata has built-in handling of multiply imputed datasets. In order to utilize this functionality, simply export the "stacked" imputations using the `write.amelia()` function: ```{r write_dta_stacked, eval = FALSE} write.amelia(a.out, separate = FALSE, file.stem = "outdata", format = "dta") ``` Once this stacked dataset is open in Stata, you must tell Stata that it is an imputed dataset using the \code{mi import flong} command: ```{stata eval = FALSE} mi import flong, m(imp) id(year country) imp(tariff-usheg) ``` The command takes a few options: `m` designates the imputation variable (set with `impvar` in `write.amelia()`), `id` sets the identifying varibles, and `imp` sets the variables that were imputed (or included in the imputation). The `tariff-usheg` indicates that Stata should treat the range of variables between `tariff` and `usheg` as imputed. Once we have set the dataset as imputed, we can use the built-in `mi` commands to analyze the data: ```{stata eval = FALSE} mi estimate: reg tariff polity pop gdp_pc ``` ``` Multiple-imputation estimates Imputations = 5 Linear regression Number of obs = 171 Average RVI = 1.4114 Complete DF = 167 DF adjustment: Small sample DF: min = 10.36 avg = 18.81 max = 37.62 Model F test: Equal FMI F( 2, 10.4) = 15.50 Within VCE type: OLS Prob > F = 0.0008 ------------------------------------------------------------------------------ tariff | Coef. Std. Err. t P>|t| [95% Conf. Interval] -------------+---------------------------------------------------------------- polity | -.2058115 .3911049 -0.53 0.610 -1.072968 .6613452 pop | 3.21e-08 8.72e-09 3.68 0.004 1.27e-08 5.14e-08 gdp_pc | -.0027561 .000644 -4.28 0.000 -.0040602 -.0014519 _cons | 32.70461 2.660091 12.29 0.000 27.08917 38.32005 ------------------------------------------------------------------------------ ``` ## The `amelia` class {#sec_out} The output from the `amelia()` function is an instance of the S3 class `amelia`. Instances of the `amelia` class contain much more than simply the imputed datasets. The `mu` object of the class contains the posterior draws of the means of the complete data. The `covMatrices` contains the posterior draws of the covariance matrices of the complete data. Note that these correspond to the variables as they are sent to the EM algorithm. Namely, they refer to the variables after being transformed, centered and scaled. The `iterHist` object is a list of `m` 3-column matrices. Each row of the matrices corresponds to an iteration of the EM algorithm. The first column indicates how many parameters had yet to converge at that iteration. The second column indicates if the EM algorithm made a step that decreased the number of converged parameters. The third column indicates whether the covariance matrix at this iteration was singular. Clearly, the last two columns are meant to indicate when the EM algorithm enters a problematic part of the parameter space. ## References Amelia/inst/doc/intro-mi.Rmd0000644000176200001440000002262114335240021015363 0ustar liggesusers--- title: "Introduction to Multiple Imputation" date: "`r Sys.Date()`" link-citations: yes bibliography: amelia.bib output: rmarkdown::html_vignette: keep_md: true vignette: > %\VignetteIndexEntry{Introduction to Multiple Imputation} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r loadpkg, echo = FALSE, include = FALSE} knitr::opts_chunk$set(fig.width = 5, fig.height = 4, fig.align = "center") ``` ## Introduction {#sec:intro} Missing data is a ubiquitous problem in social science data. Respondents do not answer every question, countries do not collect statistics every year, archives are incomplete, subjects drop out of panels. Most statistical analysis methods, however, assume the absence of missing data, and are only able to include observations for which every variable is measured. Amelia allows users to impute ("fill in" or rectangularize) incomplete data sets so that analyses which require complete observations can appropriately use all the information present in a dataset with missingness, and avoid the biases, inefficiencies, and incorrect uncertainty estimates that can result from dropping all partially observed observations from the analysis. Amelia performs *multiple imputation*, a general-purpose approach to data with missing values. Multiple imputation has been shown to reduce bias and increase efficiency compared to listwise deletion. Furthermore, ad-hoc methods of imputation, such as mean imputation, can lead to serious biases in variances and covariances. Unfortunately, creating multiple imputations can be a burdensome process due to the technical nature of algorithms involved. \Amelia\ provides users with a simple way to create and implement an imputation model, generate imputed datasets, and check its fit using diagnostics. The Amelia program goes several significant steps beyond the capabilities of the first version of Amelia [@HonJosKin98]. For one, the bootstrap-based EMB algorithm included in Amelia can impute many more variables, with many more observations, in much less time. The great simplicity and power of the EMB algorithm made it possible to write Amelia so that it virtually never crashes --- which to our knowledge makes it unique among all existing multiple imputation software --- and is much faster than the alternatives too. Amelia also has features to make valid and much more accurate imputations for cross-sectional, time-series, and time-series-cross-section data, and allows the incorporation of observation and data-matrix-cell level prior information. In addition to all of this, Amelia provides many diagnostic functions that help users check the validity of their imputation model. This software implements the ideas developed in @HonKin10. ## What Amelia Does {#sec:what} Multiple imputation involves imputing $m$ values for each missing cell in your data matrix and creating $m$ "completed" data sets. Across these completed data sets, the observed values are the same, but the missing values are filled in with a distribution of imputations that reflect the uncertainty about the missing data. After imputation with Amelia's EMB algorithm, you can apply whatever statistical method you would have used if there had been no missing values to each of the $m$ data sets, and use a simple procedure, described below, to combine the results[^combine]. Under normal circumstances, you only need to impute once and can then analyze the $m$ imputed data sets as many times and for as many purposes as you wish. The advantage of Amelia is that it combines the comparative speed and ease-of-use of our algorithm with the power of multiple imputation, to let you focus on your substantive research questions rather than spending time developing complex application-specific models for nonresponse in each new data set. Unless the rate of missingness is very high, $m = 5$ (the program default) is probably adequate. [^combine]: You can combine the results automatically by doing your data analyses within [Zelig for R](https://zeligproject.org), or within [Clarify for Stata](https://gking.harvard.edu/clarify). ### Assumptions The imputation model in Amelia assumes that the complete data (that is, both observed and unobserved) are multivariate normal. If we denote the $(n \times k)$ dataset as $D$ (with observed part $D^{obs}$ and unobserved part $D^{mis}$), then this assumption is \begin{equation} D \sim \mathcal{N}_k(\mu, \Sigma), \end{equation} which states that $D$ has a multivariate normal distribution with mean vector $\mu$ and covariance matrix $\Sigma$. The multivariate normal distribution is often a crude approximation to the true distribution of the data, yet there is evidence that this model works as well as other, more complicated models even in the face of categorical or mixed data [see @Schafer97, @SchOls98]. Furthermore, transformations of many types of variables can often make this normality assumption more plausible (see \@ref(sec:trans) for more information on how to implement this in Amelia). The essential problem of imputation is that we only observe $D^{obs}$, not the entirety of $D$. In order to gain traction, we need to make the usual assumption in multiple imputation that the data are *missing at random* (MAR). This assumption means that the pattern of missingness only depends on the observed data $D^{obs}$, not the unobserved data $D^{mis}$. Let $M$ to be the missingness matrix, with cells $m_{ij} = 1$ if $d_{ij} \in D^{mis}$ and $m_{ij} = 0$ otherwise. Put simply, $M$ is a matrix that indicates whether or not a cell is missing in the data. With this, we can define the MAR assumption as \[ p(M|D) = p(M|D^{obs}). \] Note that MAR includes the case when missing values are created randomly by, say, coin flips, but it also includes many more sophisticated missingness models. When missingness is not dependent on the data at all, we say that the data are *missing completely at random* (MCAR). Amelia requires both the multivariate normality and the MAR assumption (or the simpler special case of MCAR). Note that the MAR assumption can be made more plausible by including additional variables in the dataset $D$ in the imputation dataset than just those eventually envisioned to be used in the analysis model. ### Algorithm In multiple imputation, we are concerned with the complete-data parameters, $\theta = (\mu, \Sigma)$. When writing down a model of the data, it is clear that our observed data is actually $D^{obs}$ and $M$, the missingness matrix. Thus, the likelihood of our observed data is $p(D^{obs}, M|\theta)$. Using the MAR assumption\footnote{There is an additional assumption hidden here that $M$ does not depend on the complete-data parameters.}, we can break this up, \begin{align} p(D^{obs},M|\theta) = p(M|D^{obs})p(D^{obs}|\theta). \end{align} As we only care about inference on the complete data parameters, we can write the likelihood as \begin{align} L(\theta|D^{obs}) &\propto p(D^{obs}|\theta), \end{align} which we can rewrite using the law of iterated expectations as \begin{align} p(D^{obs}|\theta) &= \int p(D|\theta) dD^{mis}. \end{align} With this likelihood and a flat prior on $\theta$, we can see that the posterior is \begin{equation} p(\theta | D^{obs}) \propto p(D^{obs}|\theta) = \int p(D|\theta) dD^{mis}. \end{equation} The main computational difficulty in the analysis of incomplete data is taking draws from this posterior. The EM algorithm [@DemLaiRub77] is a simple computational approach to finding the mode of the posterior. Our EMB algorithm combines the classic EM algorithm with a bootstrap approach to take draws from this posterior. For each draw, we bootstrap the data to simulate estimation uncertainty and then run the EM algorithm to find the mode of the posterior for the bootstrapped data, which gives us fundamental uncertainty too [see @HonKin10 for details of the EMB algorithm]. Once we have draws of the posterior of the complete-data parameters, we make imputations by drawing values of $D^{mis}$ from its distribution conditional on $D^{obs}$ and the draws of $\theta$, which is a linear regression with parameters that can be calculated directly from $\theta$. ### Analysis In order to combine the results across $m$ data sets, first decide on the quantity of interest to compute, such as a univariate mean, regression coefficient, predicted probability, or first difference. Then, the easiest way is to draw $1/m$ simulations of $q$ from each of the $m$ data sets, combine them into one set of $m$ simulations, and then to use the standard simulation-based methods of interpretation common for single data sets @KinTomWit00. Alternatively, you can combine directly and use as the multiple imputation estimate of this parameter, $\bar{q}$, the average of the $m$ separate estimates, $q_j$ $(j=1,\dots,m)$: \begin{equation} \bar{q}=\frac{1}{m}\sum^{m}_{j=1}q_j. \end{equation} The variance of the point estimate is the average of the estimated variances from *within* each completed data set, plus the sample variance in the point estimates *across* the data sets (multiplied by a factor that corrects for the bias because $m<\infty$). Let $SE(q_j)^2$ denote the estimated variance (squared standard error) of $q_j$ from the data set $j$, and $S^{2}_{q}=\Sigma^{m}_{j=1}(q_j-\bar{q})^2/(m-1)$ be the sample variance across the $m$ point estimates. The standard error of the multiple imputation point estimate is the square root of \begin{equation} SE(q)^2=\frac{1}{m}\sum^{m}_{j=1}SE(q_j)^2+S^2_q(1+1/m). \end{equation} ## References Amelia/inst/doc/using-amelia.html0000644000176200001440000105651614335756316016460 0ustar liggesusers Using Amelia

Using Amelia

2022-11-18

Data

We now demonstrate how to use Amelia using data from Milner and Kubota (2005) which studies the effect of democracy on trade policy. For the purposes of this user’s guide, we will use a subset restricted to nine developing countries in Asia from 1980 to 19991. This dataset includes 9 variables:

Variable Description
year year
country country
tariff average tariff rates
polity Polity IV Score2
pop total population
gdp.pc gross domestic product per capita
intresmi gross international reserves
signed dummy variable if signed an IMF agreement that year
fivop measure of financial openness
usheg measure of US hegemony3

These variables correspond to the variables used in the analysis model of Milner and Kubota (2005) in table 2.

We first load the Amelia and the data:

library(Amelia)
data(freetrade)

We can check the summary statistics of the data to see that there is missingness on many of the variables:

summary(freetrade)
##       year        country              tariff          polity    
##  Min.   :1981   Length:171         Min.   :  7.1   Min.   :-8.0  
##  1st Qu.:1985   Class :character   1st Qu.: 16.3   1st Qu.:-2.0  
##  Median :1990   Mode  :character   Median : 25.2   Median : 5.0  
##  Mean   :1990                      Mean   : 31.6   Mean   : 2.9  
##  3rd Qu.:1995                      3rd Qu.: 40.8   3rd Qu.: 8.0  
##  Max.   :1999                      Max.   :100.0   Max.   : 9.0  
##                                    NA's   :58      NA's   :2     
##       pop               gdp.pc         intresmi         signed     
##  Min.   :1.41e+07   Min.   :  150   Min.   :0.904   Min.   :0.000  
##  1st Qu.:1.97e+07   1st Qu.:  420   1st Qu.:2.223   1st Qu.:0.000  
##  Median :5.28e+07   Median :  814   Median :3.182   Median :0.000  
##  Mean   :1.50e+08   Mean   : 1867   Mean   :3.375   Mean   :0.155  
##  3rd Qu.:1.21e+08   3rd Qu.: 2463   3rd Qu.:4.406   3rd Qu.:0.000  
##  Max.   :9.98e+08   Max.   :12086   Max.   :7.935   Max.   :1.000  
##                                     NA's   :13      NA's   :3      
##      fiveop         usheg      
##  Min.   :12.3   Min.   :0.256  
##  1st Qu.:12.5   1st Qu.:0.262  
##  Median :12.6   Median :0.276  
##  Mean   :12.7   Mean   :0.276  
##  3rd Qu.:13.2   3rd Qu.:0.289  
##  Max.   :13.2   Max.   :0.308  
##  NA's   :18

In the presence of missing data, most statistical packages use listwise deletion, which removes any row that contains a missing value from the analysis. Using the base model of Milner and Kubota (2005) Table 2, we run a simple linear model in R, which uses listwise deletion:

summary(lm(tariff ~ polity + pop + gdp.pc + year + country,
          data = freetrade))
## 
## Call:
## lm(formula = tariff ~ polity + pop + gdp.pc + year + country, 
##     data = freetrade)
## 
## Residuals:
##     Min      1Q  Median      3Q     Max 
## -30.764  -3.259   0.087   2.598  18.310 
## 
## Coefficients:
##                     Estimate Std. Error t value Pr(>|t|)
## (Intercept)         1.97e+03   4.02e+02    4.91  3.6e-06
## polity             -1.37e-01   1.82e-01   -0.75     0.45
## pop                -2.02e-07   2.54e-08   -7.95  3.2e-12
## gdp.pc              6.10e-04   7.44e-04    0.82     0.41
## year               -8.71e-01   2.08e-01   -4.18  6.4e-05
## countryIndonesia   -1.82e+02   1.86e+01   -9.82  3.0e-16
## countryKorea       -2.20e+02   2.08e+01  -10.61  < 2e-16
## countryMalaysia    -2.25e+02   2.17e+01  -10.34  < 2e-16
## countryNepal       -2.16e+02   2.25e+01   -9.63  7.7e-16
## countryPakistan    -1.55e+02   1.98e+01   -7.84  5.6e-12
## countryPhilippines -2.04e+02   2.09e+01   -9.77  3.7e-16
## countrySriLanka    -2.09e+02   2.21e+01   -9.46  1.8e-15
## countryThailand    -1.96e+02   2.10e+01   -9.36  3.0e-15
## 
## Residual standard error: 6.22 on 98 degrees of freedom
##   (60 observations deleted due to missingness)
## Multiple R-squared:  0.925,  Adjusted R-squared:  0.915 
## F-statistic:  100 on 12 and 98 DF,  p-value: <2e-16

Note that 60 of the 171 original observations are deleted due to missingness. These observations, however, are partially observed, and contain valuable information about the relationships between those variables which are present in the partially completed observations. Multiple imputation will help us retrieve that information and make better, more efficient, inferences.

Multiple Imputation

When performing multiple imputation, the first step is to identify the variables to include in the imputation model. It is crucial to include at least as much information as will be used in the analysis model. That is, any variable that will be in the analysis model should also be in the imputation model. This includes any transformations or interactions of variables that will appear in the analysis model.

In fact, it is often useful to add more information to the imputation model than will be present when the analysis is run. Since imputation is predictive, any variables that would increase predictive power should be included in the model, even if including them in the analysis model would produce bias in estimating a causal effect (such as for post-treatment variables) or collinearity would preclude determining which variable had a relationship with the dependent variable (such as including multiple alternate measures of GDP). In our case, we include all the variables in freetrade in the imputation model, even though our analysis model focuses on polity, pop and gdp.pc. We’re not incorporating time or spatial data yet, but we do below.

To create multiple imputations in Amelia, we can simply run

a.out <- amelia(freetrade, m = 5, ts = "year", cs = "country")
## -- Imputation 1 --
## 
##   1  2  3  4  5  6  7  8  9 10 11 12 13 14
## 
## -- Imputation 2 --
## 
##   1  2  3  4  5  6  7  8  9 10 11 12 13 14
## 
## -- Imputation 3 --
## 
##   1  2  3  4  5  6  7  8  9 10 11 12 13 14 15 16
## 
## -- Imputation 4 --
## 
##   1  2  3  4  5  6  7  8  9 10 11 12 13 14 15
## 
## -- Imputation 5 --
## 
##   1  2  3  4  5  6  7  8  9 10 11 12 13 14 15 16 17 18
a.out
## 
## Amelia output with 5 imputed datasets.
## Return code:  1 
## Message:  Normal EM convergence. 
## 
## Chain Lengths:
## --------------
## Imputation 1:  14
## Imputation 2:  14
## Imputation 3:  16
## Imputation 4:  15
## Imputation 5:  18

Note that our example dataset is deliberately small both in variables and in cross-sectional elements. Typical datasets may often have hundreds or possibly a couple thousand steps to the EM algorithm. Long chains should remind the analyst to consider whether transformations of the variables would more closely fit the multivariate normal assumptions of the model (correct but omitted transformations will shorten the number of steps and improve the fit of the imputations), but do not necessarily denote problems with the imputation model.

The output gives some information about how the algorithm ran. Each of the imputed datasets is now in the list a.out$imputations. Thus, we could plot a histogram of the tariff variable from the 3rd imputation,

hist(a.out$imputations[[3]]$tariff, col = "grey", border = "white")

Saving imputed datasets

If you need to save your imputed datasets, one direct method is to save the output list from amelia,

save(a.out, file = "imputations.RData")

As in the previous example, the ith imputed datasets can be retrieved from this list as a.out$imputations[[i]].

In addition, you can save each of the imputed datasets to its own file using the write.amelia() command,

write.amelia(obj = a.out, file.stem = "outdata")

This will create one comma-separated value file for each imputed dataset in the following manner:

outdata1.csv
outdata2.csv
outdata3.csv
outdata4.csv
outdata5.csv

The write.amelia function can also save files in tab-delimited and Stata (.dta) file formats. For instance, to save Stata files, simply change the format argument to "dta",

write.amelia(obj = a.out, file.stem = "outdata", format = "dta")

Additionally, write.amelia() can create a “stacked” version of the imputed dataset which stacks each imputed dataset on top of one another. This can be done by setting the argument to FALSE. The resulting matrix is of size \((N \cdot m) \times p\) if the original dataset is excluded (orig.data = FALSE) and of size \((N \cdot (m+1))\times p\) if it is included (orig.data = TRUE). The stacked dataset will include a variable (set with impvar) that indicates to which imputed dataset the observation belongs.

Combining multiple calls to amelia()

The EMB algorithm is what computer scientists call embarrassingly parallel, meaning that it is simple to separate each imputation into parallel processes. With Amelia it is simple to run subsets of the imputations on different machines and then combine them after the imputation for use in analysis model. This allows for a huge increase in the speed of the algorithm.

Output lists from different Amelia runs can be combined together into a new list. For instance, suppose that we wanted to add another ten imputed datasets to our earlier call to amelia(). First, run the function to get these additional imputations,

a.out.more <- amelia(freetrade, m = 10, ts = "year", cs = "country", p2s = 0)
a.out.more
## 
## Amelia output with 10 imputed datasets.
## Return code:  1 
## Message:  Normal EM convergence. 
## 
## Chain Lengths:
## --------------
## Imputation 1:  18
## Imputation 2:  16
## Imputation 3:  14
## Imputation 4:  17
## Imputation 5:  12
## Imputation 6:  21
## Imputation 7:  8
## Imputation 8:  14
## Imputation 9:  20
## Imputation 10:  9

then combine this output with our original output using the ameliabind() function,

a.out.more <- ameliabind(a.out, a.out.more)
a.out.more
## 
## Amelia output with 15 imputed datasets.
## Return code:  1 
## Message:  Normal EM convergence 
## 
## Chain Lengths:
## --------------
## Imputation 1:  14
## Imputation 2:  14
## Imputation 3:  16
## Imputation 4:  15
## Imputation 5:  18
## Imputation 6:  18
## Imputation 7:  16
## Imputation 8:  14
## Imputation 9:  17
## Imputation 10:  12
## Imputation 11:  21
## Imputation 12:  8
## Imputation 13:  14
## Imputation 14:  20
## Imputation 15:  9

This function binds the two outputs into the same output so that you can pass the combined imputations easily to analysis models and diagnostics. Note that a.out.more now has a total of 15 imputations.

A simple way to execute a parallel processing scheme with Amelia would be to run amelia() with m set to 1 on \(m\) different machines or processors, save each output using the save() function, load them all on the same R session using load() command and then combine them using ameliabind(). In order to do this, however, make sure to name each of the outputs a different name so that they do not overwrite each other when loading into the same R session. Also, some parallel environments will dump all generated files into a common directory, where they may overwrite each other. If it is convenient in a parallel environment to run a large number of amelia() calls from a single piece of code, one useful way to avoid overwriting is to create the file.stem with a random suffix. For example:

b <- round(runif(1, min = 1111, max = 9999))
random.name <- paste("am", b, sep = "")
amelia <- write.amelia(obj = a.out, file.stem = random.name)

Screen output

Screen output can be adjusted with the “print to screen” argument, p2s. At a value of 0, no screen printing will occur. This may be useful in large jobs or simulations where a very large number of imputation models may be required. The default value of 1, lists each bootstrap, and displays the number of iterations required to reach convergence in that bootstrapped dataset. The value of 2 gives more thorough screen output, including, at each iteration, the number of parameters that have significantly changed since the last iteration. This may be useful when the EM chain length is very long, as it can provide an intuition for many parameters still need to converge in the EM chain, and a sense of the time remaining. However, it is worth noting that the last several parameters can often take a significant fraction of the total number of iterations to converge. Setting p2s to 2 will also generate information on how EM algorithm is behaving, such as a ! when the current estimated complete data covariance matrix is not invertible and a * when the likelihood has not monotonically increased in that step. Having many of these two symbols in the screen output is an indication of a problematic imputation model. Problems of non-invertible matrices often mean that current guess for the covariance matrix is singular. This is a sign that there may be two highly correlated variables in the model. One way to resolve is to use a ridge prior (see @ref(sec_prior)).

An example of the output when p2s is 2 would be

a.out.p2s <- amelia(freetrade, m = 1, ts = "year", cs = "country", p2s = 2)
## 
## amelia starting
## beginning prep functions
## Variables used:  tariff polity pop gdp.pc intresmi signed fiveop usheg 
## running bootstrap
## -- Imputation 1 --
## setting up EM chain indicies
## 
##   1(44)  2(33)  3(27)  4(21)  5(22)  6(22)  7(19)  8(18)  9(14) 10(8) 11(6) 12(2) 13(0)
## 
##  saving and cleaning

Parallel Imputation

Each imputation in the above EMB algorithm is completely independent of any other imputation, a property called embarrassingly parallel. This type of approach can take advantage of the multiple-core infrastructure of modern CPUs. Each core in a multi-core processor can execute independent operations in parallel. Amelia can utilize this parallel processing internally via the parallel and the ncpus arguments. The parallel argument sets the parallel processing backend, either with "multicore" or "snow" (or "no" for no parallel processing). The "multicore" backend is not available on Windows systems, but tends to be quicker at parallel processing. On a Windows system, the "snow" backend provides parallel processing through a cluster of worker processes across the CPUs. You can set the default for this argument using the "amelia.parallel" option. This allows you to run Amelia in parallel as the default for an entire R session without setting arguments in the amelia() call.

For each of the parallel backends, Amelia requires a number of CPUs to use in parallel. This can be set using the ncpus argument. It can be higher than the number of physical cores in the system if hyperthreading or other technologies are available. You can use the parallel::detectCores() function to determine how many cores are available on your machine. The default for this argument can be set using the "amelia.ncpus" option.

On Unix-alike systems (such as macOS and Linux distributions), the "multicore" backend automatically sets up and stops the parallel workers by forking the process. On Windows, the "snow" backend requires more attention. Amelia will attempt to create a parallel cluster of worker processes (since Windows systems cannot fork a process) and will stop this cluster after the imputations are complete. Alternatively, Amelia also has a cl argument, which accepts a predefined cluster made using the parallel::makePSOCKcluster(). For more information about parallel processing in R, see the documentation for the parallel package that ships along with R or the CRAN Task View on Parallel Computing with R

Improving Imputations via Transformations

Social science data commonly includes variables that fail to fit to a multivariate normal distribution. Indeed, numerous models have been introduced specifically to deal with the problems they present. As it turns out, much evidence in the literature (discussed in King et al. 2001) indicates that the multivariate normal model used in Amelia usually works well for the imputation stage even when discrete or non-normal variables are included and when the analysis stage involves these limited dependent variable models. Nevertheless, Amelia includes some limited capacity to deal directly with ordinal and nominal variables and to modify variables that require other transformations. In general nominal and log transform variables should be declared to Amelia, whereas ordinal (including dichotomous) variables often need not be, as described below. (For harder cases, see (Schafer 1997), for specialized MCMC-based imputation models for discrete variables.)

Although these transformations are taken internally on these variables to better fit the data to the multivariate normal assumptions of the imputation model, all the imputations that are created will be returned in the original untransformed form of the data. If the user has already performed transformations on their data (such as by taking a log or square root prior to feeding the data to amelia()) these do not need to be declared, as that would result in the transformation occurring doubly in the imputation model. The fully imputed data sets that are returned will always be in the form of the original data that is passed to the amelia() routine.

Ordinal

In much statistical research, researchers treat independent ordinal (including dichotomous) variables as if they were really continuous. If the analysis model to be employed is of this type, then nothing extra is required of the of the imputation model. Users are advised to allow Amelia to impute non-integer values for any missing data, and to use these non-integer values in their analysis. Sometimes this makes sense, and sometimes this defies intuition. One particular imputation of 2.35 for a missing value on a seven point scale carries the intuition that the respondent is between a 2 and a 3 and most probably would have responded 2 had the data been observed. This is easier to accept than an imputation of 0.79 for a dichotomous variable where a zero represents a male and a one represents a female respondent. However, in both cases the non-integer imputations carry more information about the underlying distribution than would be carried if we were to force the imputations to be integers. Thus whenever the analysis model permits, missing ordinal observations should be allowed to take on continuously valued imputations.

In the freetrade data, one such ordinal variable is polity which ranges from -10 (full autocracy) to 10 (full democracy). If we tabulate this variable from one of the imputed datasets,

table(a.out$imputations[[3]]$polity)
## 
##               -8 -7.4578371590967               -7               -6 
##                1                1               22                4 
##               -5               -4               -2               -1 
##                7                3                9                1 
##                2                3                4                5 
##                7                7               15               26 
##                6                7                8                9 
##               13                5               36               13 
##  9.7802547928345 
##                1

we can see that there is one imputation between -4 and -3 and one imputation between 6 and 7. Again, the interpretation of these values is rather straightforward even if they are not strictly in the coding of the original Polity data.

Often, however, analysis models require some variables to be strictly ordinal, as for example, when the dependent variable will be modeled in a logistical or Poisson regression. Imputations for variables set as ordinal are created by taking the continuously valued imputation and using an appropriately scaled version of this as the probability of success in a binomial distribution. The draw from this binomial distribution is then translated back into one of the ordinal categories.

For our data we can simply add polity to the ords argument:

a.out1 <- amelia(freetrade, m = 5, ts = "year", cs = "country", ords =
                 "polity", p2s = 0)
table(a.out1$imputations[[3]]$polity)
## 
## -8 -7 -6 -5 -4 -2 -1  2  3  4  5  6  7  8  9 
##  1 22  4  8  3  9  1  7  7 15 26 13  5 36 14

Now, we can see that all of the imputations fall into one of the original polity categories.

Nominal

Nominal variables4 must be treated quite differently than ordinal variables. Any multinomial variables in the data set (such as religion coded 1 for Catholic, 2 for Jewish, and 3 for Protestant) must be specified to Amelia. In our dataset, we have signed which is 1 if a country signed an IMF agreement in that year and 0 if it did not. Of course, our first imputation did not limit the imputations to these two categories

table(a.out1$imputations[[3]]$signed)
## 
##  -0.58172703933406 -0.417310204209806                  0  0.914087081006918 
##                  1                  1                142                  1 
##                  1 
##                 26

In order to fix this for a \(p\)-category multinomial variable, Amelia will determine \(p\) (as long as your data contain at least one value in each category), and substitute $ p-1$ binary variables to specify each possible category. These new \(p-1\) variables will be treated as the other variables in the multivariate normal imputation method chosen, and receive continuous imputations. These continuously valued imputations will then be appropriately scaled into probabilities for each of the \(p\) possible categories, and one of these categories will be drawn, where upon the original \(p\)-category multinomial variable will be reconstructed and returned to the user. Thus all imputations will be appropriately multinomial.

For our data we can simply add signed to the noms argument:

a.out2 <- amelia(freetrade, m = 5, ts = "year", cs = "country",
                 noms = "signed", p2s = 0)
table(a.out2$imputations[[3]]$signed)
## 
##   0   1 
## 143  28

Note that Amelia can only fit imputations into categories that exist in the original data. Thus, if there was a third category of signed, say 2, that corresponded to a different kind of IMF agreement, but it never occurred in the original data, Amelia could not match imputations to it.

Since Amelia properly treats a \(p\)-category multinomial variable as \(p-1\) variables, one should understand the number of parameters that are quickly accumulating if many multinomial variables are being used. If the square of the number of real and constructed variables is large relative to the number of observations, it is useful to use a ridge prior as in section @ref(sec_prior).

Natural log

If one of your variables is heavily skewed or has outliers that may alter the imputation in an unwanted way, you can use a natural logarithm transformation of that variable in order to normalize its distribution. This transformed distribution helps Amelia to avoid imputing values that depend too heavily on outlying data points. Log transformations are common in expenditure and economic variables where we have strong beliefs that the marginal relationship between two variables decreases as we move across the range.

For instance, we can show the tariff variable clearly has positive (or, right) skew while its natural log transformation has a roughly normal distribution.

hist(freetrade$tariff, col="grey", border="white")

hist(log(freetrade$tariff), col="grey", border="white")

Square root

Event count data is often heavily skewed and has nonlinear relationships with other variables. One common transformation to tailor the linear model to count data is to take the square roots of the counts. This is a transformation that can be set as an option in Amelia.

Logistic

Proportional data is sharply bounded between 0 and 1. A logistic transformation is one possible option in Amelia to make the distribution symmetric and relatively unbounded.

Identification Variables

Datasets often contain identification variables, such as country names, respondent numbers, or other identification numbers, codes or abbreviations. Sometimes these are text and sometimes these are numeric. Often it is not appropriate to include these variables in the imputation model, but it is useful to have them remain in the imputed datasets (However, there are models that would include the ID variables in the imputation model, such as fixed effects model for data with repeated observations of the same countries). Identification variables which are not to be included in the imputation model can be identified with the argument idvars. These variables will not be used in the imputation model, but will be kept in the imputed datasets.

If the year and country contained no information except labels, we could omit them from the imputation:

amelia(freetrade, idvars = c("year", "country"))
## -- Imputation 1 --
## 
##   1  2  3  4  5  6  7  8  9 10 11 12 13 14 15 16 17 18
## 
## -- Imputation 2 --
## 
##   1  2  3  4  5  6  7  8  9 10 11 12 13
## 
## -- Imputation 3 --
## 
##   1  2  3  4  5  6  7  8  9 10 11 12 13 14
## 
## -- Imputation 4 --
## 
##   1  2  3  4  5  6  7  8  9 10 11 12 13 14 15 16 17
## 
## -- Imputation 5 --
## 
##   1  2  3  4  5  6  7  8  9 10 11 12 13 14 15
## 
## Amelia output with 5 imputed datasets.
## Return code:  1 
## Message:  Normal EM convergence. 
## 
## Chain Lengths:
## --------------
## Imputation 1:  18
## Imputation 2:  13
## Imputation 3:  14
## Imputation 4:  17
## Imputation 5:  15

Note that Amelia will return with an error if your dataset contains a factor or character variable that is not marked as a nominal or identification variable. Thus, if we were to omit the factor country from the cs or idvars arguments, we would receive an error:

a.out2 <- amelia(freetrade, idvars = c("year"))
## Amelia Error Code:  38 
##  The following variable(s) are characters: 
##   country
## You may have wanted to set this as a ID variable to remove it
## from the imputation model or as an ordinal or nominal
## variable to be imputed.  Please set it as either and
## try again.

In order to conserve memory, it is wise to remove unnecessary variables from a data set before loading it into Amelia. The only variables you should include in your data when running Amelia are variables you will use in the analysis stage and those variables that will help in the imputation model. While it may be tempting to simply mark unneeded variables as IDs, it only serves to waste memory and slow down the imputation procedure.

Time Series, or Time Series Cross Sectional Data

Many variables that are recorded over time within a cross-sectional unit are observed to vary smoothly over time. In such cases, knowing the observed values of observations close in time to any missing value may enormously aid the imputation of that value. However, the exact pattern may vary over time within any cross-section. There may be periods of growth, stability, or decline; in each of which the observed values would be used in a different fashion to impute missing values. Also, these patterns may vary enormously across different cross-sections, or may exist in some and not others. Amelia can build a general model of patterns within variables across time by creating a sequence of polynomials of the time index. If, for example, tariffs vary smoothly over time, then we make the modeling assumption that there exists some polynomial that describes the economy in cross-sectional unit \(i\) at time \(t\) as:

\[ \textrm{tariff}_{ti} = \beta_0 + \beta_1 t + \beta_1 t^2 + \beta_1 t^3 \ldots \]

And thus if we include enough higher order terms of time then the pattern between observed values of the tariff rate can be estimated. Amelia will create polynomials of time up to the user defined \(k\)-th order, (\(k\leq3\)).

We can implement this with the ts and polytime arguments. If we thought that a second-order polynomial would help predict we could run

a.out2 <- amelia(freetrade, ts = "year", cs = "country", polytime = 2)

With this input, Amelia will add covariates to the model that correspond to time and its polynomials. These covariates will help better predict the missing values.

If cross-sectional units are specified these polynomials can be interacted with the cross-section unit to allow the patterns over time to vary between cross-sectional units. Unless you strongly believe all units have the same patterns over time in all variables (including the same constant term), this is a reasonable setting. When \(k\) is set to 0, this interaction simply results in a model of fixed effects where every unit has a uniquely estimated constant term. Amelia does not smooth the observed data, and only uses this functional form, or one you choose, with all the other variables in the analysis and the uncertainty of the prediction, to impute the missing values.

In order to impute with trends specific to each cross-sectional unit, we can set intercs to TRUE:

a.out.time <- amelia(freetrade, ts = "year", cs = "country", polytime = 1,
                 intercs = TRUE, p2s = 2)

Note that attempting to use polytime without the ts argument, or intercs without the cs argument will result in an error.

Using the tscsPlot() function (discussed below), we can see that we have a much better prediction about the missing values when incorporating time than when we omit it:

tscsPlot(a.out, cs = "Malaysia", main = "Malaysia (no time settings)",
         var = "tariff", ylim = c(-10, 60))

tscsPlot(a.out.time, cs = "Malaysia", main = "Malaysia (with time settings)",
         var = "tariff", ylim = c(-10, 60))

Lags and leads

An alternative way of handling time-series information is to include lags and leads of certain variables into the imputation model. Lags are variables that take the value of another variable in the previous time period while leads take the value of another variable in the next time period. Many analysis models use lagged variables to deal with issues of endogeneity, thus using leads may seems strange. It is important to remember, however, that imputation models are predictive, not causal. Thus, since both past and future values of a variable are likely correlated with the present value, both lags and leads should improve the model.

If we wanted to include lags and leads of tariffs, for instance, we would simply pass this to the lags and leads arguments:

a.out2 <- amelia(freetrade, ts = "year", cs = "country", lags = "tariff",
                 leads = "tariff")
## -- Imputation 1 --
## 
##   1  2  3  4  5  6  7  8  9 10 11 12 13 14 15 16 17 18 19
## 
## -- Imputation 2 --
## 
##   1  2  3  4  5  6  7  8  9 10 11 12 13 14 15 16 17 18 19 20
##  21 22
## 
## -- Imputation 3 --
## 
##   1  2  3  4  5  6  7  8  9 10 11 12 13 14 15 16 17 18 19 20
##  21 22
## 
## -- Imputation 4 --
## 
##   1  2  3  4  5  6  7  8  9 10 11 12 13 14 15 16
## 
## -- Imputation 5 --
## 
##   1  2  3  4  5  6  7  8  9 10 11 12 13 14 15 16 17 18 19 20
##  21 22 23 24

Including Prior Information

Amelia has a number of methods of setting priors within the imputation model. Two of these are commonly used and discussed below, ridge priors and observational priors.

Ridge priors for high missingness, Small samples, or large correlations

When the data to be analyzed contain a high degree of missingness or very strong correlations among the variables, or when the number of observations is only slightly greater than the number of parameters \(p(p+3)/2\) (where \(p\) is the number of variables), results from your analysis model will be more dependent on the choice of imputation model. This suggests more testing in these cases of alternative specifications under Amelia. This can happen when using the polynomials of time interacted with the cross section are included in the imputation model. For example, in our data, if we used a polynomial of degree 2 with unit-specific trends and there are 9 countries, it would add \(3 \times 9 - 1= 17\) more variables to the imputation model (dropping one of the fixed effects for identification). When these are added, the EM algorithm can become unstable. You can detect this by inspecting the screen output under p2s = 2 or by observing that the number iterations per imputation are very divergent.

In these circumstances, we recommend adding a ridge prior which will help with numerical stability by shrinking the covariances among the variables toward zero without changing the means or variances. This can be done by including the empri argument. Including this prior as a positive number is roughly equivalent to adding empri artificial observations to the data set with the same means and variances as the existing data but with zero covariances. Thus, increasing the empri setting results in more shrinkage of the covariances, thus putting more a priori structure on the estimation problem: like many Bayesian methods, it reduces variance in return for an increase in bias that one hopes does not overwhelm the advantages in efficiency. In general, we suggest keeping the value on this prior relatively small and increase it only when necessary. A recommendation of 0.5 to 1 percent of the number of observations, \(n\), is a reasonable starting value, and often useful in large datasets to add some numerical stability. For example, in a dataset of two thousand observations, this would translate to a prior value of 10 or 20 respectively. A prior of up to 5 percent is moderate in most applications and 10 percent is reasonable upper bound.

For our data, it is easy to code up a 1 percent ridge prior:

a.out.time2 <- amelia(freetrade, ts = "year", cs = "country", polytime = 1,
                 intercs = TRUE, p2s = 0, empri = .01 * nrow(freetrade))
a.out.time2
## 
## Amelia output with 5 imputed datasets.
## Return code:  1 
## Message:  Normal EM convergence. 
## 
## Chain Lengths:
## --------------
## Imputation 1:  17
## Imputation 2:  17
## Imputation 3:  13
## Imputation 4:  19
## Imputation 5:  20

Observation-level priors

Researchers often have additional prior information about missing data values based on previous research, academic consensus, or personal experience. Amelia can incorporate this information to produce vastly improved imputations. The Amelia algorithm allows users to include informative Bayesian priors about individual missing data cells instead of the more general model parameters, many of which have little direct meaning.

The incorporation of priors follows basic Bayesian analysis where the imputation turns out to be a weighted average of the model-based imputation and the prior mean, where the weights are functions of the relative strength of the data and prior: when the model predicts very well, the imputation will down-weight the prior, and vice versa (Honaker and King 2010).

The priors about individual observations should describe the analyst’s belief about the distribution of the missing data cell. This can either take the form of a mean and a standard deviation or a confidence interval. For instance, we might know that 1986 tariff rates in Thailand around 40%, but we have some uncertainty as to the exact value. Our prior belief about the distribution of the missing data cell, then, centers on 40 with a standard deviation that reflects the amount of uncertainty we have about our prior belief.

To input priors you must build a priors matrix with either four or five columns. Each row of the matrix represents a prior on either one observation or one variable. In any row, the entry in the first column is the row of the observation and the entry is the second column is the column of the observation. In the four column priors matrix the third and fourth columns are the mean and standard deviation of the prior distribution of the missing value.

For instance, suppose that we had some expert prior information about tariff rates in Thailand. We know from the data that Thailand is missing tariff rates in many years,

freetrade[freetrade$country == "Thailand", c("year", "country", "tariff")]
##     year  country tariff
## 153 1981 Thailand   32.3
## 154 1982 Thailand     NA
## 155 1983 Thailand     NA
## 156 1984 Thailand     NA
## 157 1985 Thailand   41.2
## 158 1986 Thailand     NA
## 159 1987 Thailand     NA
## 160 1988 Thailand     NA
## 161 1989 Thailand   40.8
## 162 1990 Thailand   39.8
## 163 1991 Thailand   37.8
## 164 1992 Thailand     NA
## 165 1993 Thailand   45.6
## 166 1994 Thailand   23.3
## 167 1995 Thailand   23.1
## 168 1996 Thailand     NA
## 169 1997 Thailand     NA
## 170 1998 Thailand   20.1
## 171 1999 Thailand   17.1

Suppose that we had expert information that tariff rates were roughly 40% in Thailand between 1986 and 1988 with about a 6% margin of error. This corresponds to a standard deviation of about 3. In order to include this information, we must form the priors matrix:

pr <- matrix(
  c(158, 159, 160, 3, 3, 3, 40, 40, 40, 3, 3, 3),
  nrow = 3, ncol = 4
)
pr
##      [,1] [,2] [,3] [,4]
## [1,]  158    3   40    3
## [2,]  159    3   40    3
## [3,]  160    3   40    3

The first column of this matrix corresponds to the row numbers of Thailand in these three years, the second column refers to the column number of tariff in the data and the last two columns refer to the actual prior. Once we have this matrix, we can pass it to amelia(),

a.out.pr <- amelia(freetrade, ts = "year", cs = "country", priors = pr)
## -- Imputation 1 --
## 
##   1  2  3  4  5  6  7  8  9 10 11 12 13 14 15 16 17
## 
## -- Imputation 2 --
## 
##   1  2  3  4  5  6  7  8  9 10 11 12
## 
## -- Imputation 3 --
## 
##   1  2  3  4  5  6  7  8  9 10 11 12
## 
## -- Imputation 4 --
## 
##   1  2  3  4  5  6  7  8  9 10 11 12 13 14 15
## 
## -- Imputation 5 --
## 
##   1  2  3  4  5  6  7  8  9 10 11 12 13 14

In the five column matrix, the last three columns describe a confidence range of the data. The columns are a lower bound, an upper bound, and a confidence level between 0 and 1, exclusive. Whichever format you choose, it must be consistent across the entire matrix. We could get roughly the same prior as above by utilizing this method. Our margin of error implies that we would want imputations between 34 and 46, so our matrix would be

pr.2 <- matrix(
  c(158, 159, 160, 3, 3, 3, 34, 34, 34, 46, 46, 46, 0.95, 0.95, 0.95),
  nrow = 3, ncol = 5
)
pr.2
##      [,1] [,2] [,3] [,4] [,5]
## [1,]  158    3   34   46 0.95
## [2,]  159    3   34   46 0.95
## [3,]  160    3   34   46 0.95

These priors indicate that we are 95% confident that these missing values are in the range 34 to 46.

If a prior has the value 0 in the first column, this prior will be applied to all missing values in this variable, except for explicitly set priors. Thus, we could set a prior for the entire tariff variable of 20, but still keep the above specific priors with the following code:

pr.3 <- matrix(
  c(158, 159, 160, 0, 3, 3 , 3, 3, 40, 40, 40, 20, 3, 3, 3, 5),
  nrow = 4, ncol = 4)
pr.3
##      [,1] [,2] [,3] [,4]
## [1,]  158    3   40    3
## [2,]  159    3   40    3
## [3,]  160    3   40    3
## [4,]    0    3   20    5

Logical bounds

In some cases, variables in the social sciences have known logical bounds. Proportions must be between 0 and 1 and duration data must be greater than 0, for instance. Many of these logical bounds can be handled by using the correct transformation for that type of variable (see @ref(sec:trans) for more details on the transformations handled by Amelia). In the occasional case that imputations must satisfy certain logical bounds not handled by these transformations, Amelia can take draws from a truncated normal distribution in order to achieve imputations that satisfy the bounds. Note, however, that this procedure imposes extremely strong restrictions on the imputations and can lead to lower variances than the imputation model implies. The mean value across all the imputed values of a missing cell is the best guess from the imputation model of that missing value. The variance of the distribution across imputed datasets correctly reflects the uncertainty in that imputation. It is often the mean imputed value that should conform to the any known bounds, even if individual imputations are drawn beyond those bounds. The mean imputed value can be checked with the diagnostics presented in the next section. In general, building a more predictive imputation model will lead to better imputations than imposing bounds.

Amelia implements these bounds by rejection sampling. When drawing the imputations from their posterior, we repeatedly resample until we have a draw that satisfies all of the logical constraints. You can set an upper limit on the number of times to resample with the max.resample arguments. Thus, if after max.resample draws, the imputations are still outside the bounds, Amelia will set the imputation at the edge of the bounds. Thus, if the bounds were 0 and 100 and all of the draws were negative, Amelia would simply impute 0.

As an extreme example, suppose that we know, for certain that tariff rates had to fall between 30 and 40. This, obviously, is not true, but we can generate imputations from this model. In order to specify these bounds, we need to generate a matrix of bounds to pass to the bounds argument. This matrix will have 3 columns: the first is the column for the bounded variable, the second is the lower bound and the third is the upper bound. Thus, to implement our bound on tariff rates (the 3rd column of the dataset), we would create the matrix,

bds <- matrix(c(3, 30, 40), nrow = 1, ncol = 3)
bds
##      [,1] [,2] [,3]
## [1,]    3   30   40

which we can pass to the bounds argument to amelia():

a.out.bds <- amelia(freetrade, ts = "year", cs = "country", bounds = bds,
                    max.resample = 1000)
## -- Imputation 1 --
## 
##   1  2  3  4  5  6  7  8  9 10 11 12 13 14 15 16 17 18
## 
## -- Imputation 2 --
## 
##   1  2  3  4  5  6  7  8  9 10 11 12 13 14 15 16
## 
## -- Imputation 3 --
## 
##   1  2  3  4  5  6  7  8  9 10 11 12
## 
## -- Imputation 4 --
## 
##   1  2  3  4  5  6  7  8  9 10 11 12 13
## 
## -- Imputation 5 --
## 
##   1  2  3  4  5  6  7  8  9 10

The difference in results between the bounded and unbounded model are not obvious from the output, but inspection of the imputed tariff rates for Malaysia shows that there has been a drastic restriction of the imputations to the desired range:

tscsPlot(a.out, cs = "Malaysia", main = "No logical bounds", var = "tariff",
         ylim = c(-10, 60))

tscsPlot(a.out.bds, cs = "Malaysia", main = "Bounded between 30 and 40",
         var = "tariff", ylim = c(-10, 60))

Again, analysts should be extremely cautious when using these bounds as they can seriously affect the inferences from the imputation model, as shown in this example. Even when logical bounds exist, we recommend simply imputing variables normally, as the violation of the logical bounds represents part of the true uncertainty of imputation.

Post-imputations Transformations

In many cases, it is useful to create transformations of the imputed variables for use in further analysis. For instance, one may want to create an interaction between two variables or perform a log-transformation on the imputed data. To do this, Amelia includes a transform() function for amelia() output that adds or overwrites variables in each of the imputed datasets. For instance, if we wanted to create a log-transformation of the gdp.pc variable, we could use the following command:

a.out <- transform(a.out, lgdp = log(gdp.pc))
head(a.out$imputations[[1]][,c("country", "year","gdp.pc", "lgdp")])
##    country year gdp.pc  lgdp
## 1 SriLanka 1981  461.0 6.133
## 2 SriLanka 1982  473.8 6.161
## 3 SriLanka 1983  489.2 6.193
## 4 SriLanka 1984  508.2 6.231
## 5 SriLanka 1985  525.6 6.264
## 6 SriLanka 1986  538.9 6.290

To create an interaction between two variables, we could simply use:

a.out <- transform(a.out, pol_gdp = polity * gdp.pc)

Each transformation is recorded and the summary() command prints out each transformation that has been performed:

summary(a.out)
## 
## Amelia output with 5 imputed datasets.
## Return code:  1 
## Message:  Normal EM convergence. 
## 
## Chain Lengths:
## --------------
## Imputation 1:  14
## Imputation 2:  14
## Imputation 3:  16
## Imputation 4:  15
## Imputation 5:  18
## 
## Rows after Listwise Deletion:  96 
## Rows after Imputation:  171 
## Patterns of missingness in the data:  8 
## 
## Fraction Missing for original variables: 
## -----------------------------------------
## 
##          Fraction Missing
## year              0.00000
## country           0.00000
## tariff            0.33918
## polity            0.01170
## pop               0.00000
## gdp.pc            0.00000
## intresmi          0.07602
## signed            0.01754
## fiveop            0.10526
## usheg             0.00000
## lgdp              0.00000
## pol_gdp           0.01170
## 
## Post-imputation transformed variables: 
## -----------------------------------------
## 
##           Transformations
## lgdp =        log(gdp.pc)
## pol_gdp = polity * gdp.pc

Note the updated output is almost exactly the same as the fresh amelia() output. You can pass the transformed output back to amelia() and it will add imputations and update these imputations with the transformations you have performed.

Analysis Models

Imputation is most often a data processing step as opposed to a final model in of itself. To this end, it is easy to pass output from amelia() to other functions. The easiest and most integrated way to run an analysis model is to use the with() and mi.combine() functions. For example, in Milner and Kubota (2005), the dependent variable was tariff rates. We can replicate table 5.1 from their analysis with the original data simply by running

orig.model <- lm(tariff ~ polity + pop + gdp.pc + year + country, data = freetrade)
orig.model
## 
## Call:
## lm(formula = tariff ~ polity + pop + gdp.pc + year + country, 
##     data = freetrade)
## 
## Coefficients:
##        (Intercept)              polity                 pop              gdp.pc  
##           1.97e+03           -1.37e-01           -2.02e-07            6.10e-04  
##               year    countryIndonesia        countryKorea     countryMalaysia  
##          -8.71e-01           -1.82e+02           -2.20e+02           -2.25e+02  
##       countryNepal     countryPakistan  countryPhilippines     countrySriLanka  
##          -2.16e+02           -1.55e+02           -2.04e+02           -2.09e+02  
##    countryThailand  
##          -1.96e+02

Running the same model with imputed data is almost identical. We can run the lm within each imputed data set by using the with() function:

imp.models <- with(
  a.out,
  lm(tariff ~ polity + pop + gdp.pc + year + country)
)
imp.models[1:2]
## [[1]]
## 
## Call:
## lm(formula = tariff ~ polity + pop + gdp.pc + year + country)
## 
## Coefficients:
##        (Intercept)              polity                 pop              gdp.pc  
##           2.44e+03            3.54e-01           -7.78e-08           -2.41e-04  
##               year    countryIndonesia        countryKorea     countryMalaysia  
##          -1.17e+00           -7.97e+01           -9.96e+01           -1.00e+02  
##       countryNepal     countryPakistan  countryPhilippines     countrySriLanka  
##          -9.41e+01           -5.53e+01           -9.26e+01           -9.38e+01  
##    countryThailand  
##          -8.69e+01  
## 
## 
## [[2]]
## 
## Call:
## lm(formula = tariff ~ polity + pop + gdp.pc + year + country)
## 
## Coefficients:
##        (Intercept)              polity                 pop              gdp.pc  
##           2.43e+03           -1.66e-02           -1.40e-07            1.23e-03  
##               year    countryIndonesia        countryKorea     countryMalaysia  
##          -1.13e+00           -1.28e+02           -1.72e+02           -1.67e+02  
##       countryNepal     countryPakistan  countryPhilippines     countrySriLanka  
##          -1.54e+02           -1.08e+02           -1.51e+02           -1.55e+02  
##    countryThailand  
##          -1.49e+02

The result here is simply a list of output of lm() applied to each imputed data set. We can combine the imputed estimates using the rules described in King et al. (2001) and Schafer (1997) with the mi.combine() function:

out <- mi.combine(imp.models, conf.int = TRUE)
out
## # A tibble: 13 × 10
##    term   estimate std.e…¹ stati…² p.value    df     r miss.…³ conf.low conf.h…⁴
##    <chr>     <dbl>   <dbl>   <dbl>   <dbl> <dbl> <dbl>   <dbl>    <dbl>    <dbl>
##  1 (Inte…  2.72e+3 6.33e+2   4.31  2.21e-4 25.2  0.662   0.441  4.03e+3  1.42e+3
##  2 polity  1.61e-1 3.29e-1   0.488 6.29e-1 36.9  0.491   0.363  8.28e-1 -5.07e-1
##  3 pop    -9.80e-8 5.30e-8  -1.85  1.91e+0 12.0  1.37    0.634  1.74e-8 -2.13e-7
##  4 gdp.pc  2.20e-4 1.35e-3   0.163 8.71e-1 35.8  0.502   0.368  2.96e-3 -2.52e-3
##  5 year   -1.30e+0 3.33e-1  -3.89  2.00e+0 23.3  0.708   0.459 -6.09e-1 -1.99e+0
##  6 count… -9.72e+1 3.97e+1  -2.45  1.97e+0 10.1  1.70    0.686 -8.75e+0 -1.86e+2
##  7 count… -1.25e+2 4.82e+1  -2.59  1.97e+0  8.93 2.02    0.725 -1.54e+1 -2.34e+2
##  8 count… -1.25e+2 4.73e+1  -2.64  1.97e+0  9.85 1.76    0.694 -1.92e+1 -2.30e+2
##  9 count… -1.17e+2 4.67e+1  -2.50  1.97e+0 10.8  1.56    0.666 -1.36e+1 -2.20e+2
## 10 count… -7.34e+1 4.27e+1  -1.72  1.88e+0 10.1  1.69    0.685  2.15e+1 -1.68e+2
## 11 count… -1.13e+2 4.52e+1  -2.51  1.97e+0 10.0  1.72    0.689 -1.27e+1 -2.14e+2
## 12 count… -1.15e+2 4.75e+1  -2.41  1.96e+0 10.1  1.69    0.685 -8.80e+0 -2.20e+2
## 13 count… -1.08e+2 4.57e+1  -2.36  1.96e+0  9.74 1.79    0.697 -5.66e+0 -2.10e+2
## # … with abbreviated variable names ¹​std.error, ²​statistic, ³​miss.info,
## #   ⁴​conf.high

The combination of the results depends on the broom package and results can be combined if a tidy() method exists for the estimation function passed to with(). Other packages such as Zelig can also combine imputed data sets across a number of statistical models. Furthermore, users can easily export their imputations using the write.amelia() function as described in @ref(sec_saving) and use statistical packages other than R for the analysis model.

In addition to the resources available in R, users can draw on Stata to implement their analysis models. As of version 11, Stata has built-in handling of multiply imputed datasets. In order to utilize this functionality, simply export the “stacked” imputations using the write.amelia() function:

write.amelia(a.out, separate = FALSE, file.stem = "outdata", format = "dta")

Once this stacked dataset is open in Stata, you must tell Stata that it is an imputed dataset using the command:

mi import flong, m(imp) id(year country) imp(tariff-usheg)

The command takes a few options: m designates the imputation variable (set with impvar in write.amelia()), id sets the identifying varibles, and imp sets the variables that were imputed (or included in the imputation). The tariff-usheg indicates that Stata should treat the range of variables between tariff and usheg as imputed. Once we have set the dataset as imputed, we can use the built-in mi commands to analyze the data:

mi estimate: reg tariff polity pop gdp_pc
Multiple-imputation estimates                     Imputations     =          5
Linear regression                                 Number of obs   =        171
                                                  Average RVI     =     1.4114
                                                  Complete DF     =        167
DF adjustment:   Small sample                     DF:     min     =      10.36
                                                          avg     =      18.81
                                                          max     =      37.62
Model F test:       Equal FMI                     F(   2,   10.4) =      15.50
Within VCE type:          OLS                     Prob > F        =     0.0008

------------------------------------------------------------------------------
      tariff |      Coef.   Std. Err.      t    P>|t|     [95% Conf. Interval]
-------------+----------------------------------------------------------------
      polity |  -.2058115   .3911049    -0.53   0.610    -1.072968    .6613452
         pop |   3.21e-08   8.72e-09     3.68   0.004     1.27e-08    5.14e-08
      gdp_pc |  -.0027561    .000644    -4.28   0.000    -.0040602   -.0014519
       _cons |   32.70461   2.660091    12.29   0.000     27.08917    38.32005
------------------------------------------------------------------------------

The amelia class

The output from the amelia() function is an instance of the S3 class amelia. Instances of the amelia class contain much more than simply the imputed datasets. The mu object of the class contains the posterior draws of the means of the complete data. The covMatrices contains the posterior draws of the covariance matrices of the complete data. Note that these correspond to the variables as they are sent to the EM algorithm. Namely, they refer to the variables after being transformed, centered and scaled.

The iterHist object is a list of m 3-column matrices. Each row of the matrices corresponds to an iteration of the EM algorithm. The first column indicates how many parameters had yet to converge at that iteration. The second column indicates if the EM algorithm made a step that decreased the number of converged parameters. The third column indicates whether the covariance matrix at this iteration was singular. Clearly, the last two columns are meant to indicate when the EM algorithm enters a problematic part of the parameter space.

References

Honaker, James, and Gary King. 2010. “What to Do about Missing Values in Time Series Cross-Section Data.” American Journal of Political Science 54 (2): 561–81.
King, Gary, James Honaker, Anne Joseph, and Kenneth Scheve. 2001. “Analyzing Incomplete Political Science Data: An Alternative Algorithm for Multiple Imputation.” American Political Science Review 95 (1): 49–69.
Milner, Helen, and Keiko Kubota. 2005. Why the move to free trade? Democracy and trade policy in the developing countries.” International Organization 59 (1): 107–43.
Schafer, Joseph L. 1997. Analysis of Incomplete Multivariate Data. London: Chapman & Hall.

  1. We have artificially addedsome missingness to these data for presentational purposes. You can access the original data at https://scholar.princeton.edu/hvmilner/data.↩︎

  2. The Polity score is a number between -10 and 10 indicating how democratic a country is. A fully autocratic country would be a -10 while a fully democratic country would be 1 10.↩︎

  3. This measure of US hegemony is the US imports and exports as a percent of the world total imports and exports.↩︎

  4. Dichotomous (two category) variables are a special case of nominal variables. For these variables, the nominal and ordinal methods of transformation in Amelia agree.↩︎

Amelia/inst/doc/intro-mi.html0000644000176200001440000004471714335756315015641 0ustar liggesusers Introduction to Multiple Imputation

Introduction to Multiple Imputation

2022-11-18

Introduction

Missing data is a ubiquitous problem in social science data. Respondents do not answer every question, countries do not collect statistics every year, archives are incomplete, subjects drop out of panels. Most statistical analysis methods, however, assume the absence of missing data, and are only able to include observations for which every variable is measured. Amelia allows users to impute (“fill in” or rectangularize) incomplete data sets so that analyses which require complete observations can appropriately use all the information present in a dataset with missingness, and avoid the biases, inefficiencies, and incorrect uncertainty estimates that can result from dropping all partially observed observations from the analysis.

Amelia performs multiple imputation, a general-purpose approach to data with missing values. Multiple imputation has been shown to reduce bias and increase efficiency compared to listwise deletion. Furthermore, ad-hoc methods of imputation, such as mean imputation, can lead to serious biases in variances and covariances. Unfortunately, creating multiple imputations can be a burdensome process due to the technical nature of algorithms involved.  provides users with a simple way to create and implement an imputation model, generate imputed datasets, and check its fit using diagnostics.

The Amelia program goes several significant steps beyond the capabilities of the first version of Amelia (Honaker et al. 1998-2002). For one, the bootstrap-based EMB algorithm included in Amelia can impute many more variables, with many more observations, in much less time. The great simplicity and power of the EMB algorithm made it possible to write Amelia so that it virtually never crashes — which to our knowledge makes it unique among all existing multiple imputation software — and is much faster than the alternatives too. Amelia also has features to make valid and much more accurate imputations for cross-sectional, time-series, and time-series-cross-section data, and allows the incorporation of observation and data-matrix-cell level prior information. In addition to all of this, Amelia provides many diagnostic functions that help users check the validity of their imputation model. This software implements the ideas developed in Honaker and King (2010).

What Amelia Does

Multiple imputation involves imputing \(m\) values for each missing cell in your data matrix and creating \(m\) “completed” data sets. Across these completed data sets, the observed values are the same, but the missing values are filled in with a distribution of imputations that reflect the uncertainty about the missing data. After imputation with Amelia’s EMB algorithm, you can apply whatever statistical method you would have used if there had been no missing values to each of the \(m\) data sets, and use a simple procedure, described below, to combine the results1. Under normal circumstances, you only need to impute once and can then analyze the \(m\) imputed data sets as many times and for as many purposes as you wish. The advantage of Amelia is that it combines the comparative speed and ease-of-use of our algorithm with the power of multiple imputation, to let you focus on your substantive research questions rather than spending time developing complex application-specific models for nonresponse in each new data set. Unless the rate of missingness is very high, \(m = 5\) (the program default) is probably adequate.

Assumptions

The imputation model in Amelia assumes that the complete data (that is, both observed and unobserved) are multivariate normal. If we denote the \((n \times k)\) dataset as \(D\) (with observed part \(D^{obs}\) and unobserved part \(D^{mis}\)), then this assumption is

\[\begin{equation} D \sim \mathcal{N}_k(\mu, \Sigma), \end{equation}\]

which states that \(D\) has a multivariate normal distribution with mean vector \(\mu\) and covariance matrix \(\Sigma\). The multivariate normal distribution is often a crude approximation to the true distribution of the data, yet there is evidence that this model works as well as other, more complicated models even in the face of categorical or mixed data Schafer and Olsen (1998). Furthermore, transformations of many types of variables can often make this normality assumption more plausible (see @ref(sec:trans) for more information on how to implement this in Amelia).

The essential problem of imputation is that we only observe \(D^{obs}\), not the entirety of \(D\). In order to gain traction, we need to make the usual assumption in multiple imputation that the data are missing at random (MAR). This assumption means that the pattern of missingness only depends on the observed data \(D^{obs}\), not the unobserved data \(D^{mis}\). Let \(M\) to be the missingness matrix, with cells \(m_{ij} = 1\) if \(d_{ij} \in D^{mis}\) and \(m_{ij} = 0\) otherwise. Put simply, \(M\) is a matrix that indicates whether or not a cell is missing in the data. With this, we can define the MAR assumption as

\[ p(M|D) = p(M|D^{obs}). \]

Note that MAR includes the case when missing values are created randomly by, say, coin flips, but it also includes many more sophisticated missingness models. When missingness is not dependent on the data at all, we say that the data are missing completely at random (MCAR). Amelia requires both the multivariate normality and the MAR assumption (or the simpler special case of MCAR). Note that the MAR assumption can be made more plausible by including additional variables in the dataset \(D\) in the imputation dataset than just those eventually envisioned to be used in the analysis model.

Algorithm

In multiple imputation, we are concerned with the complete-data parameters, \(\theta = (\mu, \Sigma)\). When writing down a model of the data, it is clear that our observed data is actually \(D^{obs}\) and \(M\), the missingness matrix. Thus, the likelihood of our observed data is \(p(D^{obs}, M|\theta)\). Using the MAR assumption, we can break this up,

\[\begin{align} p(D^{obs},M|\theta) = p(M|D^{obs})p(D^{obs}|\theta). \end{align}\]

As we only care about inference on the complete data parameters, we can write the likelihood as

\[\begin{align} L(\theta|D^{obs}) &\propto p(D^{obs}|\theta), \end{align}\]

which we can rewrite using the law of iterated expectations as

\[\begin{align} p(D^{obs}|\theta) &= \int p(D|\theta) dD^{mis}. \end{align}\]

With this likelihood and a flat prior on \(\theta\), we can see that the posterior is

\[\begin{equation} p(\theta | D^{obs}) \propto p(D^{obs}|\theta) = \int p(D|\theta) dD^{mis}. \end{equation}\]

The main computational difficulty in the analysis of incomplete data is taking draws from this posterior. The EM algorithm (Dempster, Laird, and Rubin 1977) is a simple computational approach to finding the mode of the posterior. Our EMB algorithm combines the classic EM algorithm with a bootstrap approach to take draws from this posterior. For each draw, we bootstrap the data to simulate estimation uncertainty and then run the EM algorithm to find the mode of the posterior for the bootstrapped data, which gives us fundamental uncertainty too (see Honaker and King 2010 for details of the EMB algorithm).

Once we have draws of the posterior of the complete-data parameters, we make imputations by drawing values of \(D^{mis}\) from its distribution conditional on \(D^{obs}\) and the draws of \(\theta\), which is a linear regression with parameters that can be calculated directly from \(\theta\).

Analysis

In order to combine the results across \(m\) data sets, first decide on the quantity of interest to compute, such as a univariate mean, regression coefficient, predicted probability, or first difference. Then, the easiest way is to draw \(1/m\) simulations of \(q\) from each of the \(m\) data sets, combine them into one set of \(m\) simulations, and then to use the standard simulation-based methods of interpretation common for single data sets King, Tomz, and Wittenberg (2000).

Alternatively, you can combine directly and use as the multiple imputation estimate of this parameter, \(\bar{q}\), the average of the \(m\) separate estimates, \(q_j\) \((j=1,\dots,m)\):

\[\begin{equation} \bar{q}=\frac{1}{m}\sum^{m}_{j=1}q_j. \end{equation}\]

The variance of the point estimate is the average of the estimated variances from within each completed data set, plus the sample variance in the point estimates across the data sets (multiplied by a factor that corrects for the bias because \(m<\infty\)). Let \(SE(q_j)^2\) denote the estimated variance (squared standard error) of \(q_j\) from the data set \(j\), and \(S^{2}_{q}=\Sigma^{m}_{j=1}(q_j-\bar{q})^2/(m-1)\) be the sample variance across the \(m\) point estimates. The standard error of the multiple imputation point estimate is the square root of

\[\begin{equation} SE(q)^2=\frac{1}{m}\sum^{m}_{j=1}SE(q_j)^2+S^2_q(1+1/m). \end{equation}\]

References

Dempster, Arthur P., N. M. Laird, and D. B. Rubin. 1977. “Maximum Likelihood Estimation from Incomplete Data via the Em Algorithm.” Journal of the Royal Statistical Society B 39: 1–38.
Honaker, James, Anne Joseph, Gary King, Kenneth Scheve, and Naunihal Singh. 1998-2002. “: A Program for Missing Data.”
Honaker, James, and Gary King. 2010. “What to Do about Missing Values in Time Series Cross-Section Data.” American Journal of Political Science 54 (2): 561–81.
King, Gary, Michael Tomz, and Jason Wittenberg. 2000. “Making the Most of Statistical Analyses: Improving Interpretation and Presentation.” American Journal of Political Science 44 (2): 341–55.
Schafer, Joseph L. 1997. Analysis of Incomplete Multivariate Data. London: Chapman & Hall.
Schafer, Joseph L., and Maren K. Olsen. 1998. “Multiple Imputation for Multivariate Missing-Data Problems: A Data Analyst’s Perspective.” Multivariate Behavioral Research 33 (4): 545–71.

  1. You can combine the results automatically by doing your data analyses within Zelig for R, or within Clarify for Stata.↩︎

Amelia/inst/doc/using-amelia.R0000644000176200001440000001531114335756316015700 0ustar liggesusers## ----setup, echo = FALSE, include = FALSE------------------------------------- knitr::opts_chunk$set(fig.width = 5, fig.height = 4, fig.align = "center") options(digits = 4, show.signif.stars = FALSE) set.seed(12345) ## ----load_data, results = "hide"---------------------------------------------- library(Amelia) data(freetrade) ## ----summarize_data----------------------------------------------------------- summary(freetrade) ## ----mk_lm-------------------------------------------------------------------- summary(lm(tariff ~ polity + pop + gdp.pc + year + country, data = freetrade)) ## ----amelia------------------------------------------------------------------- a.out <- amelia(freetrade, m = 5, ts = "year", cs = "country") a.out ## ----------------------------------------------------------------------------- hist(a.out$imputations[[3]]$tariff, col = "grey", border = "white") ## ----save, eval = FALSE------------------------------------------------------- # save(a.out, file = "imputations.RData") ## ----write_amelia, eval = FALSE----------------------------------------------- # write.amelia(obj = a.out, file.stem = "outdata") ## ----write_dta, eval = FALSE-------------------------------------------------- # write.amelia(obj = a.out, file.stem = "outdata", format = "dta") ## ----more_amelia-------------------------------------------------------------- a.out.more <- amelia(freetrade, m = 10, ts = "year", cs = "country", p2s = 0) a.out.more ## ----ameliabind--------------------------------------------------------------- a.out.more <- ameliabind(a.out, a.out.more) a.out.more ## ----rand_stem, eval = FALSE-------------------------------------------------- # b <- round(runif(1, min = 1111, max = 9999)) # random.name <- paste("am", b, sep = "") # amelia <- write.amelia(obj = a.out, file.stem = random.name) ## ----p2s---------------------------------------------------------------------- a.out.p2s <- amelia(freetrade, m = 1, ts = "year", cs = "country", p2s = 2) ## ----polity_tab--------------------------------------------------------------- table(a.out$imputations[[3]]$polity) ## ----polity_ord--------------------------------------------------------------- a.out1 <- amelia(freetrade, m = 5, ts = "year", cs = "country", ords = "polity", p2s = 0) table(a.out1$imputations[[3]]$polity) ## ----binary_tab--------------------------------------------------------------- table(a.out1$imputations[[3]]$signed) ## ----noms--------------------------------------------------------------------- a.out2 <- amelia(freetrade, m = 5, ts = "year", cs = "country", noms = "signed", p2s = 0) table(a.out2$imputations[[3]]$signed) ## ----tarrif_hist-------------------------------------------------------------- hist(freetrade$tariff, col="grey", border="white") hist(log(freetrade$tariff), col="grey", border="white") ## ----idvars------------------------------------------------------------------- amelia(freetrade, idvars = c("year", "country")) ## ----idvars_error------------------------------------------------------------- a.out2 <- amelia(freetrade, idvars = c("year")) ## ----polytime, results = "hide"----------------------------------------------- a.out2 <- amelia(freetrade, ts = "year", cs = "country", polytime = 2) ## ----intercs, results = "hide"------------------------------------------------ a.out.time <- amelia(freetrade, ts = "year", cs = "country", polytime = 1, intercs = TRUE, p2s = 2) ## ----tcomp1------------------------------------------------------------------- tscsPlot(a.out, cs = "Malaysia", main = "Malaysia (no time settings)", var = "tariff", ylim = c(-10, 60)) tscsPlot(a.out.time, cs = "Malaysia", main = "Malaysia (with time settings)", var = "tariff", ylim = c(-10, 60)) ## ----lags_leads--------------------------------------------------------------- a.out2 <- amelia(freetrade, ts = "year", cs = "country", lags = "tariff", leads = "tariff") ## ----empri-------------------------------------------------------------------- a.out.time2 <- amelia(freetrade, ts = "year", cs = "country", polytime = 1, intercs = TRUE, p2s = 0, empri = .01 * nrow(freetrade)) a.out.time2 ## ----thailand----------------------------------------------------------------- freetrade[freetrade$country == "Thailand", c("year", "country", "tariff")] ## ----build_prior-------------------------------------------------------------- pr <- matrix( c(158, 159, 160, 3, 3, 3, 40, 40, 40, 3, 3, 3), nrow = 3, ncol = 4 ) pr ## ----amelia_prior------------------------------------------------------------- a.out.pr <- amelia(freetrade, ts = "year", cs = "country", priors = pr) ## ----build_prior2------------------------------------------------------------- pr.2 <- matrix( c(158, 159, 160, 3, 3, 3, 34, 34, 34, 46, 46, 46, 0.95, 0.95, 0.95), nrow = 3, ncol = 5 ) pr.2 ## ----build_prior3------------------------------------------------------------- pr.3 <- matrix( c(158, 159, 160, 0, 3, 3 , 3, 3, 40, 40, 40, 20, 3, 3, 3, 5), nrow = 4, ncol = 4) pr.3 ## ----build_bounds------------------------------------------------------------- bds <- matrix(c(3, 30, 40), nrow = 1, ncol = 3) bds ## ----amelia_bounds------------------------------------------------------------ a.out.bds <- amelia(freetrade, ts = "year", cs = "country", bounds = bds, max.resample = 1000) ## ----bounds_plot-------------------------------------------------------------- tscsPlot(a.out, cs = "Malaysia", main = "No logical bounds", var = "tariff", ylim = c(-10, 60)) tscsPlot(a.out.bds, cs = "Malaysia", main = "Bounded between 30 and 40", var = "tariff", ylim = c(-10, 60)) ## ----amelia_transform--------------------------------------------------------- a.out <- transform(a.out, lgdp = log(gdp.pc)) head(a.out$imputations[[1]][,c("country", "year","gdp.pc", "lgdp")]) ## ----interaction-------------------------------------------------------------- a.out <- transform(a.out, pol_gdp = polity * gdp.pc) ## ----sum_trans---------------------------------------------------------------- summary(a.out) ## ----lm_lwd------------------------------------------------------------------- orig.model <- lm(tariff ~ polity + pop + gdp.pc + year + country, data = freetrade) orig.model ## ----lm_imp------------------------------------------------------------------- imp.models <- with( a.out, lm(tariff ~ polity + pop + gdp.pc + year + country) ) imp.models[1:2] ## ----mi_combine--------------------------------------------------------------- out <- mi.combine(imp.models, conf.int = TRUE) out ## ----write_dta_stacked, eval = FALSE------------------------------------------ # write.amelia(a.out, separate = FALSE, file.stem = "outdata", format = "dta") Amelia/inst/CITATION0000644000176200001440000000150714335240021013551 0ustar liggesuserscitHeader("To cite Amelia in publications use:") bibentry( bibtype = "Article", title = "{Amelia II}: A Program for Missing Data", author = c(person("James", "Honaker", email = "james@hona.kr", role="aut"), person("Gary", "King", email="king@harvard.edu", role="aut"), person("Matthew", "Blackwell", email="mblackwell@gov.harvard.edu", role="aut")), journal = "Journal of Statistical Software", year = "2011", volume = "45", number = "7", pages = "1--47", doi = "10.18637/jss.v045.i07", textVersion = paste("James Honaker, Gary King, Matthew Blackwell (2011).", "Amelia II: A Program for Missing Data.", "Journal of Statistical Software, 45(7), 1-47.", "URL https://www.jstatsoft.org/v45/i07/.") ) Amelia/inst/test/0000755000176200001440000000000014335240021013370 5ustar liggesusersAmelia/inst/test/transform.R0000644000176200001440000000132214335240021015524 0ustar liggesuserslibrary(Amelia) data(africa) a.out <- amelia(x = africa, cs = "country", ts = "year", logs = "gdp_pc") a.out2 <- transform(a.out, lgdppc = log(gdp_pc), newinfl = infl*100) a.out2 <- transform(a.out2, newclib = civlib *100, newtrade = trade/100) summary(a.out2) summary(transform(a.out, lgdppc = log(gdp_pc), newinfl = infl*100)) a.out3 <- amelia(a.out2) a.out4 <- amelia(a.out) africa <- transform(africa, ivar = gdp_pc * trade) a.out <- amelia(x = africa, cs = "country", ts = "year", logs = "gdp_pc") a.out2 <- transform(a.out, ivar = gdp_pc *trade, lgdppc = log(gdp_pc)) summary(a.out2) a.out3 <- amelia(a.out2) compare.density compare.density(a.out2, "lgdppc")