tmvtnorm/0000755000176200001440000000000014533645532012152 5ustar liggesuserstmvtnorm/NAMESPACE0000644000176200001440000000074214216070274013365 0ustar liggesusersimportFrom("methods", "as") useDynLib(tmvtnorm) import(stats) import(utils) import(mvtnorm) import(stats4) import(gmm) import(Matrix) export(ptmvnorm) export(rtmvnorm) export(rtmvnorm2) export(rtmvnorm.sparseMatrix) export(dtmvnorm) export(dtmvnorm.marginal) export(dtmvnorm.marginal2) export(qtmvnorm.marginal) export(ptmvnorm.marginal) export(mtmvnorm) export(dtmvt) export(rtmvt) export(ptmvt) export(ptmvt.marginal) export(mle.tmvnorm) export(gmm.tmvnorm)tmvtnorm/demo/0000755000176200001440000000000014360222632013064 5ustar liggesuserstmvtnorm/demo/demo1.R0000644000176200001440000000376011212736062014222 0ustar liggesusersrequire(tmvtnorm) library(utils) # Example 1 from Horrace (2005) x1<-seq(-2, 3, by=0.1) x2<-seq(-2, 3, by=0.1) density<-function(x) { sigma=matrix(c(1, -0.5, -0.5, 1), 2, 2) z=dtmvnorm(x, mean=c(0,0), sigma=sigma, lower=c(-1,-1)) z } fgrid <- function(x, y, f) { z <- matrix(nrow=length(x), ncol=length(y)) for(m in 1:length(x)){ for(n in 1:length(y)){ z[m,n] <- f(c(x[m], y[n])) } } z } # compute the density function d=fgrid(x1, x2, density) # plot the density function as Contourplot contour(x1, x2, d, nlevels=5, main="Truncated Multivariate Normal Density", xlab=expression(x[1]), ylab=expression(x[2])) abline(v=-1, lty=3, lwd=2) abline(h=-1, lty=3, lwd=2) # Example 2: X=rtmvnorm(n=100, mean=c(0,0), sigma=matrix(c(1, 0.8, 0.8, 2), 2, 2), lower=c(-Inf,-Inf), upper=c(0,0)) plot(X, xlim=c(-3,3), ylim=c(-3,3), main="Samples from Multivariate Normal Distribution", xlab=expression(x[1]), ylab=expression(x[2])) abline(v=0, lty=2) abline(h=0, lty=2) # Example 3: Profiling of rejection sampling: 10000 samples ~ 0.8 second Rprof("rtmvnorm.out") X=rtmvnorm(n=10000, mean=c(0,0), sigma=matrix(c(1, 0.8, 0.8, 2), 2, 2), lower=c(-Inf,-Inf), upper=c(0,0)) Rprof(NULL) summaryRprof("rtmvnorm.out") # Example 4: Profiling of Gibbs sampling: 10000 samples ~ 0.8 second Rprof("rtmvnorm.gibbs.out") m = 10 a = rep(-1, m) b = rep(1, m) # Erwartungswert und Kovarianzmatrix erzeugen mu = rep(0, m) sigma = matrix(0.8, m, m) diag(sigma) = rep(1, m) # Akzeptanzrate ausrechnen alpha = pmvnorm(lower=a, upper=b, mean=mu, sigma=sigma) alpha X=rtmvnorm(n=10000, mean=mu, sigma=sigma, lower=a, upper=b, algorithm="gibbs") Rprof(NULL) summaryRprof("rtmvnorm.gibbs.out") # Sampling from non-truncated normal distribution 10000 samples ~ 0.02 second Rprof("rmvnorm.out") X=rmvnorm(n=10000, mean=c(0,0), sigma=matrix(c(1, 0.8, 0.8, 2), 2, 2)) Rprof(NULL) summaryRprof("rmvnorm.out") tmvtnorm/demo/00Index0000644000176200001440000000017311163722064014221 0ustar liggesusersdemo1 truncated multivariate normal densities demo2 3D scatterplot from a truncated trivariate normal distribution tmvtnorm/demo/demo2.R0000644000176200001440000000112511163723476014226 0ustar liggesuserslibrary(tmvtnorm) library(rgl) # simulate x1, x2, x3 from truncated multivariate normal distribution sigma = matrix(c(1, 0, 0, 0, 1, 0, 0, 0, 1), 3, 3) # not truncated X = rmvnorm(n=2000, mean=c(0,0,0), sigma=sigma) # truncated X2 = rtmvnorm(n=2000, mean=c(0,0,0), sigma=sigma, lower=c(-Inf,-Inf,-Inf), upper=c(0,1,Inf)) # display as 3D scatterplot open3d() plot3d(X[,1], X[,2], X[,3], col="black", size=2, xlab=expression(x[1]), ylab=expression(x[2]), zlab=expression(x[3])) plot3d(X2[,1], X2[,2], X2[,3], col="red", size=2, add=TRUE) tmvtnorm/README.md0000644000176200001440000000123414216073376013430 0ustar liggesusers# tmvtnorm [![CRAN](http://www.r-pkg.org/badges/version/tmvtnorm)](https://cran.r-project.org/package=tmvtnorm) ### tmvtnorm: A package for the Truncated Multivariate Normal Distribution This package contains a number of useful methods for the truncated multivariate normal distribution. It considers random number generation with rejection and Gibbs sampling, computation of marginal densities as well as computation of the mean and covariance of the truncated variables. For a more detailed introduction, see this RJournal (2010) paper [tmvtnorm: A Package for the Truncated Multivariate Normal Distribution](https://doi.org/10.32614/RJ-2010-005). tmvtnorm/man/0000755000176200001440000000000014360222632012713 5ustar liggesuserstmvtnorm/man/mtmvnorm.Rd0000644000176200001440000000635614216101440015064 0ustar liggesusers\name{mtmvnorm} \alias{mtmvnorm} \alias{moments} \title{Computation of Mean Vector and Covariance Matrix For Truncated Multivariate Normal Distribution} \description{ Computation of the first two moments, i.e. mean vector and covariance matrix for the Truncated Multivariate Normal Distribution based on the works of Tallis (1961), Lee (1979) and Leppard and Tallis (1989), but extended to the double-truncated case with general mean and general covariance matrix. } \usage{ mtmvnorm(mean = rep(0, nrow(sigma)), sigma = diag(length(mean)), lower = rep(-Inf, length = length(mean)), upper = rep(Inf, length = length(mean)), doComputeVariance=TRUE, pmvnorm.algorithm=GenzBretz()) } \arguments{ \item{mean}{Mean vector, default is \code{rep(0, length = ncol(x))}.} \item{sigma}{Covariance matrix, default is \code{diag(ncol(x))}.} \item{lower}{Vector of lower truncation points,\\ default is \code{rep(-Inf, length = length(mean))}.} \item{upper}{Vector of upper truncation points,\\ default is \code{rep( Inf, length = length(mean))}.} \item{doComputeVariance}{flag whether to compute the variance for users who are interested only in the mean. Defaults to \code{TRUE} for backward compatibility.} \item{pmvnorm.algorithm}{Algorithm used for \code{\link[mvtnorm]{pmvnorm}}} } \details{ Details for the moment calculation under double truncation and the derivation of the formula can be found in the Manjunath/Wilhelm (2009) working paper. If only a subset of variables are truncated, we calculate the truncated moments only for these and use the Johnson/Kotz formula for the remaining untruncated variables. } \value{ \item{tmean}{Mean vector of truncated variables} \item{tvar}{Covariance matrix of truncated variables} } \references{ Tallis, G. M. (1961). The moment generating function of the truncated multinormal distribution. \emph{Journal of the Royal Statistical Society, Series B}, \bold{23}, 223--229 Johnson, N./Kotz, S. (1970). Distributions in Statistics: Continuous Multivariate Distributions \emph{Wiley & Sons}, pp. 70--73 Lee, L.-F. (1979). On the first and second moments of the truncated multi-normal distribution and a simple estimator. \emph{Economics Letters}, \bold{3}, 165--169 Leppard, P. and Tallis, G. M. (1989). Evaluation of the Mean and Covariance of the Truncated Multinormal. \emph{Applied Statistics}, \bold{38}, 543--553 Manjunath B G and Wilhelm, S. (2009). Moments Calculation For the Double Truncated Multivariate Normal Density. Working Paper. Available at SSRN: \url{https://www.ssrn.com/abstract=1472153} } \author{Stefan Wilhelm , Manjunath B G } \examples{ mu <- c(0.5, 0.5, 0.5) sigma <- matrix(c( 1, 0.6, 0.3, 0.6, 1, 0.2, 0.3, 0.2, 2), 3, 3) a <- c(-Inf, -Inf, -Inf) b <- c(1, 1, 1) # compute first and second moments mtmvnorm(mu, sigma, lower=a, upper=b) # compare with simulated results X <- rtmvnorm(n=1000, mean=mu, sigma=sigma, lower=a, upper=b) colMeans(X) cov(X) } \keyword{distribution} \keyword{multivariate} tmvtnorm/man/rtmvnorm2.Rd0000644000176200001440000000735712063442312015161 0ustar liggesusers\name{rtmvnorm2} \alias{rtmvnorm2} \title{Sampling Random Numbers From The Truncated Multivariate Normal Distribution With Linear Constraints} \description{ This function generates random numbers from the truncated multivariate normal distribution with mean equal to \code{mean} and covariance matrix \code{sigma} and general linear constraints \deqn{lower \le D x \le upper}{lower <= D x <= upper} with either rejection sampling or Gibbs sampling. } \usage{ rtmvnorm2(n, mean = rep(0, nrow(sigma)), sigma = diag(length(mean)), lower = rep(-Inf, length = length(mean)), upper = rep(Inf, length = length(mean)), D = diag(length(mean)), algorithm = c("gibbs", "gibbsR", "rejection"), ...) } \arguments{ \item{n}{Number of random points to be sampled. Must be an integer \eqn{\ge 1}{>= 1}.} \item{mean}{Mean vector (d x 1), default is \code{rep(0, length = ncol(x))}.} \item{sigma}{Covariance matrix (d x d), default is \code{diag(ncol(x))}.} \item{lower}{Vector of lower truncation points (r x 1), default is \code{rep( Inf, length = length(mean))}.} \item{upper}{Vector of upper truncation points (r x 1), default is \code{rep( Inf, length = length(mean))}.} \item{D}{Matrix for linear constraints (r x d), defaults to diagonal matrix (d x d), i.e. r = d.} \item{algorithm}{Method used, possible methods are the Fortan Gibbs sampler ("gibbs", default), the Gibbs sampler implementation in R ("gibbsR") and rejection sampling ("rejection")} \item{\dots}{additional parameters for Gibbs sampling, given to the internal method \code{rtmvnorm.gibbs()}, such as \code{burn.in.samples}, \code{start.value} and \code{thinning}, see details in \code{\link{rtmvnorm}}} } \details{ This method allows for \eqn{r > d}{r > d} linear constraints, whereas \code{\link{rtmvnorm}} requires a full-rank matrix D \eqn{(d \times d)}{(d x d)} and can only handle \eqn{r \le d}{r <= d} constraints at the moment. The lower and upper bounds \code{lower} and \code{upper} are \eqn{(r \times 1)}{(r x 1)}, the matrix \code{D} is \eqn{(r \times d)}{(r x d)} and x is \eqn{(d \times 1)}{(d x 1)}. The default case is \eqn{r = d}{r = d} and \eqn{D = I_d}{D = I_d}. } \section{Warning}{This method will be merged with \code{\link{rtmvnorm}} in one of the next releases.} \author{ Stefan Wilhelm } \seealso{ \code{\link{rtmvnorm}} } \examples{ \dontrun{ ################################################################################ # # Example 5a: Number of linear constraints r > dimension d # ################################################################################ # general linear restrictions a <= Dx <= b with x (d x 1); D (r x d); a,b (r x 1) # Dimension d=2, r=3 linear constraints # # a1 <= x1 + x2 <= b2 # a2 <= x1 - x2 <= b2 # a3 <= 0.5x1 - x2 <= b3 # # [ a1 ] <= [ 1 1 ] [ x1 ] <= [b1] # [ a2 ] [ 1 -1 ] [ x2 ] [b2] # [ a3 ] [ 0.5 -1 ] [b3] D <- matrix( c( 1, 1, 1, -1, 0.5, -1), 3, 2, byrow=TRUE) a <- c(0, 0, 0) b <- c(1, 1, 1) # mark linear constraints as lines plot(NA, xlim=c(-0.5, 1.5), ylim=c(-1,1)) for (i in 1:3) { abline(a=a[i]/D[i, 2], b=-D[i,1]/D[i, 2], col="red") abline(a=b[i]/D[i, 2], b=-D[i,1]/D[i, 2], col="red") } ### Gibbs sampling for general linear constraints a <= Dx <= b mean <- c(0, 0) sigma <- matrix(c(1.0, 0.2, 0.2, 1.0), 2, 2) x0 <- c(0.5, 0.2) # Gibbs sampler start value X <- rtmvnorm2(n=1000, mean, sigma, lower=a, upper=b, D, start.value=x0) # show random points within simplex points(X, pch=20, col="black") } } \keyword{distribution} \keyword{multivariate}tmvtnorm/man/dmvnorm.marginal.Rd0000644000176200001440000001257514532763577016512 0ustar liggesusers% --- Source file: dtmvnorm-marginal.Rd --- \name{dtmvnorm.marginal} \alias{dtmvnorm.marginal} \title{One-dimensional marginal density functions from a Truncated Multivariate Normal distribution} \description{ This function computes the one-dimensional marginal density function from a Truncated Multivariate Normal density function using the algorithm given in Cartinhour (1990). } \usage{ dtmvnorm.marginal(xn, n=1, mean= rep(0, nrow(sigma)), sigma=diag(length(mean)), lower=rep(-Inf, length = length(mean)), upper=rep( Inf, length = length(mean)), log=FALSE) } \arguments{ \item{xn}{Vector of quantiles to calculate the marginal density for.} \item{n}{Index position (1..k) within the random vector x to calculate the one-dimensional marginal density for.} \item{mean}{Mean vector, default is \code{rep(0, length = nrow(sigma))}.} \item{sigma}{Covariance matrix, default is \code{diag(length(mean))}.} \item{lower}{Vector of lower truncation points,\\ default is \code{rep(-Inf, length = length(mean))}.} \item{upper}{Vector of upper truncation points,\\ default is \code{rep( Inf, length = length(mean))}.} \item{log}{Logical; if \code{TRUE}, densities d are given as log(d).} } \details{ The one-dimensional marginal density \eqn{f_i(x_i)} of \eqn{x_i} is \deqn{f_i(x_i) = \int_{a_1}^{b_1} \ldots \int_{a_{i-1}}^{b_{i-1}} \int_{a_{i+1}}^{b_{i+1}} \ldots \int_{a_k}^{b_k} f(x) dx_{-i}} Note that the one-dimensional marginal density is not truncated normal, but only conditional densities are truncated normal. } \author{Stefan Wilhelm } \references{ Cartinhour, J. (1990). One-dimensional marginal density functions of a truncated multivariate normal density function. \emph{Communications in Statistics - Theory and Methods}, \bold{19}, 197--203 Arnold et al. (1993). The Nontruncated Marginal of a Truncated Bivariate Normal Distribution. \emph{Psychometrika}, \bold{58}, 471--488 } \examples{ ############################################# # # Example 1: truncated bivariate normal # ############################################# # parameters of the bivariate normal distribution sigma = matrix(c(1 , 0.95, 0.95, 1 ), 2, 2) mu = c(0,0) # sample from multivariate normal distribution X = rmvnorm(5000, mu, sigma) # tuncation in x2 with x2 <= 0 X.trunc = X[X[,2]<0,] # plot the realisations before and after truncation par(mfrow=c(2,2)) plot(X, col="gray", xlab=expression(x[1]), ylab=expression(x[2]), main="realisations from a\n truncated bivariate normal distribution") points(X.trunc) abline(h=0, lty=2, col="gray") #legend("topleft", col=c("gray", "black") # marginal density for x1 from realisations plot(density(X.trunc[,1]), main=expression("marginal density for "*x[1])) # one-dimensional marginal density for x1 using the formula x <- seq(-5, 5, by=0.01) fx <- dtmvnorm.marginal(x, n=1, mean=mu, sigma=sigma, lower=c(-Inf,-Inf), upper=c(Inf,0)) lines(x, fx, lwd=2, col="red") # marginal density for x2 plot(density(X.trunc[,2]), main=expression("marginal density for "*x[2])) # one-dimensional marginal density for x2 using the formula x <- seq(-5, 5, by=0.01) fx <- dtmvnorm.marginal(x, n=2, mean=mu, sigma=sigma, lower=c(-Inf,-Inf), upper=c(Inf,0)) lines(x, fx, lwd=2, col="blue") ############################################# # # Example 2 : truncated trivariate normal # ############################################# # parameters of the trivariate normal distribution sigma = outer(1:3,1:3,pmin) mu = c(0,0,0) # sample from multivariate normal distribution X = rmvnorm(2000, mu, sigma) # truncation in x2 and x3 : x2 <= 0, x3 <= 0 X.trunc = X[X[,2]<=0 & X[,3]<=0,] par(mfrow=c(2,3)) plot(X, col="gray", xlab=expression(x[1]), ylab=expression(x[2]), main="realisations from a\n truncated trivariate normal distribution") points(X.trunc, col="black") abline(h=0, lty=2, col="gray") plot(X[,2:3], col="gray", xlab=expression(x[2]), ylab=expression(x[3]), main="realisations from a\n truncated trivariate normal distribution") points(X.trunc[,2:3], col="black") abline(h=0, lty=2, col="gray") abline(v=0, lty=2, col="gray") plot(X[,c(1,3)], col="gray", xlab=expression(x[1]), ylab=expression(x[3]), main="realisations from a\n truncated trivariate normal distribution") points(X.trunc[,c(1,3)], col="black") abline(h=0, lty=2, col="gray") # one-dimensional marginal density for x1 from realisations and formula plot(density(X.trunc[,1]), main=expression("marginal density for "*x[1])) x <- seq(-5, 5, by=0.01) fx <- dtmvnorm.marginal(x, n=1, mean=mu, sigma=sigma, lower=c(-Inf,-Inf,-Inf), upper=c(Inf,0,0)) lines(x, fx, lwd=2, col="red") # one-dimensional marginal density for x2 from realisations and formula plot(density(X.trunc[,2]), main=expression("marginal density for "*x[2])) x <- seq(-5, 5, by=0.01) fx <- dtmvnorm.marginal(x, n=2, mean=mu, sigma=sigma, lower=c(-Inf,-Inf,-Inf), upper=c(Inf,0,0)) lines(x, fx, lwd=2, col="red") # one-dimensional marginal density for x3 from realisations and formula plot(density(X.trunc[,3]), main=expression("marginal density for "*x[3])) x <- seq(-5, 5, by=0.01) fx <- dtmvnorm.marginal(x, n=3, mean=mu, sigma=sigma, lower=c(-Inf,-Inf,-Inf), upper=c(Inf,0,0)) lines(x, fx, lwd=2, col="red") } \keyword{distribution} \keyword{multivariate} tmvtnorm/man/qtmvnorm-marginal.Rd0000644000176200001440000000657714532766453016713 0ustar liggesusers\name{qtmvnorm-marginal} \alias{qtmvnorm.marginal} \title{ Quantiles of the Truncated Multivariate Normal Distribution in one dimension} \description{ Computes the equicoordinate quantile function of the truncated multivariate normal distribution for arbitrary correlation matrices based on an inversion of the algorithms by Genz and Bretz. } \usage{ qtmvnorm.marginal(p, interval = c(-10, 10), tail = c("lower.tail","upper.tail","both.tails"), n=1, mean=rep(0, nrow(sigma)), sigma=diag(length(mean)), lower=rep(-Inf, length = length(mean)), upper=rep( Inf, length = length(mean)), ...) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{p}{ probability.} \item{interval}{ a vector containing the end-points of the interval to be searched by \code{\link{uniroot}}.} \item{tail}{ specifies which quantiles should be computed. \code{lower.tail} gives the quantile \eqn{x} for which \eqn{P[X \le x] = p}{P[X <= x] = p}, \code{upper.tail} gives \eqn{x} with \eqn{P[X > x] = p} and \code{both.tails} leads to \eqn{x} with \eqn{P[-x \le X \le x] = p}{P[-x <= X <= x] = p} } \item{n}{ index (1..n) to calculate marginal quantile for} \item{mean}{ the mean vector of length n. } \item{sigma}{ the covariance matrix of dimension n. Either \code{corr} or \code{sigma} can be specified. If \code{sigma} is given, the problem is standardized. If neither \code{corr} nor \code{sigma} is given, the identity matrix is used for \code{sigma}. } \item{lower}{Vector of lower truncation points,\\ default is \code{rep(-Inf, length = length(mean))}.} \item{upper}{Vector of upper truncation points,\\ default is \code{rep( Inf, length = length(mean))}.} \item{...}{ additional parameters to be passed to \code{\link{uniroot}}.} } \details{ Only equicoordinate quantiles are computed, i.e., the quantiles in each dimension coincide. Currently, the distribution function is inverted by using the \code{\link{uniroot}} function which may result in limited accuracy of the quantiles. } \value{ A list with four components: \code{quantile} and \code{f.quantile} give the location of the quantile and the value of the function evaluated at that point. \code{iter} and \code{estim.prec} give the number of iterations used and an approximate estimated precision from \code{\link{uniroot}}. } \seealso{\code{\link{ptmvnorm}}, \code{\link[mvtnorm]{pmvnorm}}} \examples{ # finite dimensional distribution of the Geometric Brownian Motion log-returns # with truncation # volatility p.a. sigma=0.4 # risk free rate r = 0.05 # n=3 points in time T <- c(0.5, 0.7, 1) # covariance matrix of Geometric Brownian Motion returns Sigma = sigma^2*outer(T,T,pmin) # mean vector of the Geometric Brownian Motion returns mu = (r - sigma^2/2) * T # lower truncation vector a (a<=x<=b) a = rep(-Inf, 3) # upper truncation vector b (a<=x<=b) b = c(0, 0, Inf) # quantile of the t_1 returns qtmvnorm.marginal(p=0.95, interval = c(-10, 10), tail = "lower.tail", n=1, mean = mu, sigma = Sigma, lower=a, upper=b) } \keyword{distribution} \keyword{multivariate}tmvtnorm/man/ptmvt.Rd0000644000176200001440000000452314216143110014351 0ustar liggesusers\name{ptmvt} \alias{ptmvt} \title{Truncated Multivariate Student t Distribution} \description{ Computes the distribution function of the truncated multivariate t distribution } \usage{ ptmvt(lowerx, upperx, mean = rep(0, length(lowerx)), sigma, df = 1, lower = rep(-Inf, length = length(mean)), upper = rep(Inf, length = length(mean)), maxpts = 25000, abseps = 0.001, releps = 0) } \arguments{ \item{lowerx}{ the vector of lower limits of length n.} \item{upperx}{ the vector of upper limits of length n.} \item{mean}{ the mean vector of length n.} \item{sigma}{ the covariance matrix of dimension n. Either \code{corr} or \code{sigma} can be specified. If \code{sigma} is given, the problem is standardized. If neither \code{corr} nor \code{sigma} is given, the identity matrix is used for \code{sigma}. } \item{df}{Degrees of freedom parameter} \item{lower}{Vector of lower truncation points, default is \code{rep(-Inf, length = length(mean))}.} \item{upper}{Vector of upper truncation points, default is \code{rep( Inf, length = length(mean))}.} \item{maxpts}{ maximum number of function values as integer. } \item{abseps}{ absolute error tolerance as double. } \item{releps}{ relative error tolerance as double. } } \value{ The evaluated distribution function is returned with attributes \item{error}{estimated absolute error and} \item{msg}{status messages.} } \references{ Geweke, J. F. (1991) Efficient simulation from the multivariate normal and Student-t distributions subject to linear constraints and the evaluation of constraint probabilities. \url{https://www.researchgate.net/publication/2335219_Efficient_Simulation_from_the_Multivariate_Normal_and_Student-t_Distributions_Subject_to_Linear_Constraints_and_the_Evaluation_of_Constraint_Probabilities} Samuel Kotz, Saralees Nadarajah (2004). Multivariate t Distributions and Their Applications. \emph{Cambridge University Press} } \author{Stefan Wilhelm } \examples{ sigma <- matrix(c(5, 0.8, 0.8, 1), 2, 2) Fx <- ptmvt(lowerx=c(-1,-1), upperx=c(0.5,0), mean=c(0,0), sigma=sigma, df=3, lower=c(-1,-1), upper=c(1,1)) } \keyword{ math } \keyword{ multivariate } tmvtnorm/man/gmm.tmvnorm.Rd0000644000176200001440000001123314216101430015453 0ustar liggesusers\name{gmm.tmvnorm} \alias{gmm.tmvnorm} \title{ GMM Estimation for the Truncated Multivariate Normal Distribution } \description{ Generalized Method of Moments (GMM) Estimation for the Truncated Multivariate Normal Distribution } \usage{ gmm.tmvnorm(X, lower = rep(-Inf, length = ncol(X)), upper = rep(+Inf, length = ncol(X)), start = list(mu = rep(0, ncol(X)), sigma = diag(ncol(X))), fixed = list(), method=c("ManjunathWilhelm","Lee"), cholesky = FALSE, ...) } \arguments{ \item{X}{Matrix of quantiles, each row is taken to be a quantile.} \item{lower}{Vector of lower truncation points, default is \code{rep(-Inf, length = ncol(X))}.} \item{upper}{Vector of upper truncation points, default is \code{rep( Inf, length = ncol(X))}.} \item{start}{Named list with elements \code{mu} (mean vector) and \code{sigma} (covariance matrix). Initial values for optimizer.} \item{fixed}{Named list. Parameter values to keep fixed during optimization.} \item{method}{Which set of moment conditions used, possible methods are "ManjunathWilhelm" (default) and "Lee".} \item{cholesky}{if TRUE, we use the Cholesky decomposition of \code{sigma} as parametrization} \item{\dots}{Further arguments to pass to \code{\link{gmm}}} } \details{ This method performs an estimation of the parameters \code{mean} and \code{sigma} of a truncated multinormal distribution using the Generalized Method of Moments (GMM), when the truncation points \code{lower} and \code{upper} are known. \code{gmm.tmvnorm()} is a wrapper for the general GMM method \code{\link[gmm]{gmm}}, so one does not have to specify the moment conditions. \bold{Manjunath/Wilhelm moment conditions}\cr Because the first and second moments can be computed thanks to the \code{\link{mtmvnorm}} function, we can set up a method-of-moments estimator by equating the sample moments to their population counterparts. This way we have an exactly identified case. \bold{Lee (1979,1983) moment conditions}\cr The recursive moment conditions presented by Lee (1979,1983) are defined for \eqn{l=0,1,2,\ldots} as \deqn{ \sigma^{iT} E(x_i^l \textbf{x}) = \sigma^{iT} \mu E(x_i^l) + l E(x_i^{l-1}) + \frac{a_i^l F_i(a_i)}{F} - \frac{b_i^l F_i(b_i)}{F} } where \eqn{E(x_i^l)} and \eqn{E(x_i^l \textbf{x})} are the moments of \eqn{x_i^l} and \eqn{x_i^l \textbf{x}} respectively and \eqn{F_i(c)/F} is the one-dimensional marginal density in variable \eqn{i} as calculated by \code{\link{dtmvnorm.marginal}}. \eqn{\sigma^{iT}} is the \eqn{i}-th column of the inverse covariance matrix \eqn{\Sigma^{-1}}. This method returns an object of class \code{gmm}, for which various diagnostic methods are available, like \code{profile()}, \code{confint()} etc. See examples. } \value{ An object of class \code{\link[gmm]{gmm}} } \author{ Stefan Wilhelm \email{wilhelm@financial.com} } \references{ Tallis, G. M. (1961). The moment generating function of the truncated multinormal distribution. \emph{Journal of the Royal Statistical Society, Series B}, \bold{23}, 223--229 Lee, L.-F. (1979). On the first and second moments of the truncated multi-normal distribution and a simple estimator. \emph{Economics Letters}, \bold{3}, 165--169 Lee, L.-F. (1983). The determination of moments of the doubly truncated multivariate normal Tobit model. \emph{Economics Letters}, \bold{11}, 245--250 Manjunath B G and Wilhelm, S. (2009). Moments Calculation For the Double Truncated Multivariate Normal Density. Working Paper. Available at SSRN: \url{https://www.ssrn.com/abstract=1472153} } \seealso{ \code{\link[gmm]{gmm}} } \examples{ \dontrun{ set.seed(1.234) # the actual parameters lower <- c(-1, -2) upper <- c(3, Inf) mu <- c(0, 0) sigma <- matrix(c(1, 0.8, 0.8, 2), 2, 2) # generate random samples X <- rtmvnorm(n=500, mu, sigma, lower, upper) # estimate mean vector and covariance matrix sigma from random samples X # with default start values gmm.fit1 <- gmm.tmvnorm(X, lower=lower, upper=upper) # diagnostic output of the estimated parameters summary(gmm.fit1) vcov(gmm.fit1) # confidence intervals confint(gmm.fit1) # choosing a different start value gmm.fit2 <- gmm.tmvnorm(X, lower=lower, upper=upper, start=list(mu=c(0.1, 0.1), sigma=matrix(c(1, 0.4, 0.4, 1.8),2,2))) summary(gmm.fit2) # GMM estimation with Lee (1983) moment conditions gmm.fit3 <- gmm.tmvnorm(X, lower=lower, upper=upper, method="Lee") summary(gmm.fit3) confint(gmm.fit3) # MLE estimation for comparison mle.fit1 <- mle.tmvnorm(X, lower=lower, upper=upper) confint(mle.fit1) } } tmvtnorm/man/rtmvnorm.Rd0000644000176200001440000003456314216076734015113 0ustar liggesusers\name{rtmvnorm} \alias{rtmvnorm} \alias{rtmvnorm.sparseMatrix} \title{Sampling Random Numbers From The Truncated Multivariate Normal Distribution} \description{ This function generates random numbers from the truncated multivariate normal distribution with mean equal to \code{mean} and covariance matrix \code{sigma} (or alternatively precision matrix \code{H}), lower and upper truncation points \code{lower} and \code{upper} with either rejection sampling or Gibbs sampling. } \usage{ rtmvnorm(n, mean = rep(0, nrow(sigma)), sigma = diag(length(mean)), lower=rep(-Inf, length = length(mean)), upper=rep( Inf, length = length(mean)), D = diag(length(mean)), H = NULL, algorithm=c("rejection", "gibbs", "gibbsR"), ...) rtmvnorm.sparseMatrix(n, mean = rep(0, nrow(H)), H = sparseMatrix(i=1:length(mean), j=1:length(mean), x=1), lower = rep(-Inf, length = length(mean)), upper = rep( Inf, length = length(mean)), ...) } \arguments{ \item{n}{Number of random points to be sampled. Must be an integer \eqn{\ge 1}{>= 1}.} \item{mean}{Mean vector, default is \code{rep(0, length = ncol(x))}.} \item{sigma}{Covariance matrix, default is \code{diag(ncol(x))}.} \item{lower}{Vector of lower truncation points, default is \code{rep(-Inf, length = length(mean))}.} \item{upper}{Vector of upper truncation points, default is \code{rep( Inf, length = length(mean))}.} \item{D}{Matrix for linear constraints, defaults to diagonal matrix.} \item{H}{Precision matrix, default is \code{NULL}.} \item{algorithm}{Method used, possible methods are rejection sampling ("rejection", default), the Fortan Gibbs sampler ("gibbs") and the old Gibbs sampler implementation in R ("gibbsR").} \item{...}{additional parameters for Gibbs sampling, given to the internal method \code{rtmvnorm.gibbs()}, such as \code{burn.in.samples}, \code{start.value} and \code{thinning}, see details} } \details{ The generation of random numbers from a truncated multivariate normal distribution is done using either rejection sampling or Gibbs sampling. \bold{Rejection sampling}\cr Rejection sampling is done from the standard multivariate normal distribution. So we use the function \code{\link[mvtnorm]{rmvnorm}} of the \pkg{mvtnorm} package to generate proposals which are either accepted if they are inside the support region or rejected. In order to speed up the generation of N samples from the truncated distribution, we first calculate the acceptance rate alpha from the truncation points and then generate N/alpha samples iteratively until we have got N samples. This typically does not take more than 2-3 iterations. Rejection sampling may be very inefficient when the support region is small (i.e. in higher dimensions) which results in very low acceptance rates alpha. In this case the Gibbs sampler is preferable. \bold{Gibbs sampling}\cr The Gibbs sampler samples from univariate conditional distributions, so all samples can be accepted except for a burn-in period. The number of burn-in samples to be discarded can be specified, as well as a start value of the chain. If no start value is given, we determine a start value from the support region using either lower bound or upper bound if they are finite, or 0 otherwise. The Gibbs sampler has been reimplemented in Fortran 90 for performance reasons (\code{algorithm="gibbs"}). The old R implementation is still accessible through \code{algorithm="gibbsR"}. The arguments to be passed along with \code{algorithm="gibbs"} or \code{algorithm="gibbsR"} are: \describe{ \item{\code{burn.in.samples}}{number of samples in Gibbs sampling to be discarded as burn-in phase, must be non-negative.} \item{\code{start.value}}{Start value (vector of length \code{length(mean)}) for the MCMC chain. If one is specified, it must lie inside the support region (\eqn{lower <= start.value <= upper}). If none is specified, the start value is taken componentwise as the finite lower or upper boundaries respectively, or zero if both boundaries are infinite. Defaults to NULL.} \item{\code{thinning}}{Thinning factor for reducing autocorrelation of random points in Gibbs sampling. Must be an integer >= 1. We create a Markov chain of length \code{(n*thinning)} and take only those samples \code{j=1:(n*thinning)} where \code{j \%\% thinning == 0} Defaults to 1 (no thinning of the chain).} } \bold{Sampling with linear constraints}\cr We extended the method to also simulate from a multivariate normal distribution subject to general linear constraints \eqn{lower <= D x <= upper}. For general D, both rejection sampling or Gibbs sampling according to Geweke (1991) are available. \bold{Gibbs sampler and the use of the precision matrix H}\cr Why is it important to have a random sampler that works with the precision matrix? Especially in Bayesian and spatial statistics, there are a number of high-dimensional applications where the precision matrix \code{H} is readily available, but is sometimes nearly singular and cannot be easily inverted to sigma. Additionally, it turns out that the Gibbs sampler formulas are much simpler in terms of the precision matrix than in terms of the covariance matrix. See the details of the Gibbs sampler implementation in the package vignette or for example Geweke (2005), pp.171-172. (Thanks to Miguel Godinho de Matos from Carnegie Mellon University for pointing me to this.) Therefore, we now provide an interface for the direct use of the precision matrix \code{H} in \code{rtmvnorm()}. \bold{Gibbs sampler with sparse precision matrix H}\cr The size of the covariance matrix \code{sigma} or precision matrix \code{H} - if expressed as a dense \code{\link[base]{matrix}} - grows quadratic with the number of dimensions d. For high-dimensional problems (such as d > 5000), it is no longer efficient and appropriate to work with dense matrix representations, as one quickly runs into memory problems.\cr It is interesting to note that in many applications the precision matrix, which holds the conditional dependencies, will be sparse, whereas the covariance matrix will be dense. Hence, expressing H as a sparse matrix will significantly reduce the amount of memory to store this matrix and allows much larger problems to be handled. In the current version of the package, the precision matrix (not \code{sigma} since it will be dense in most cases) can be passed to \code{rtmvnorm.sparseMatrix()} as a \code{\link[Matrix]{sparseMatrix}} from the \code{Matrix} package. See the examples section below for a usage example. } \section{Warning}{ A word of caution is needed for useRs that are not familiar with Markov Chain Monte Carlo methods like Gibbs sampling: Rejection sampling is exact in the sense that we are sampling directly from the target distribution and the random samples generated are independent. So it is clearly the default method. Markov Chain Monte Carlo methods are only approximate methods, which may suffer from several problems: \itemize{ \item{Poor mixing} \item{Convergence problems} \item{Correlation among samples} } Diagnostic checks for Markov Chain Monte Carlo include trace plots, CUSUM plots and autocorrelation plots like \code{\link{acf}}. For a survey see for instance Cowles (1996). That is, consecutive samples generated from \code{rtmvnorm(..., algorithm=c("gibbs", "gibbsR"))} are correlated (see also example 3 below). One way of reducing the autocorrelation among the random samples is "thinning" the Markov chain, that is recording only a subset/subsequence of the chain. For example, one could record only every 100th sample, which clearly reduces the autocorrelation and "increases the independence". But thinning comes at the cost of higher computation times, since the chain has to run much longer. We refer to autocorrelation plots in order to determine optimal thinning. } \author{Stefan Wilhelm , Manjunath B G } \seealso{\code{\link{ptmvnorm}}, \code{\link[mvtnorm]{pmvnorm}}, \code{\link[mvtnorm]{rmvnorm}}, \code{\link[mvtnorm]{dmvnorm}}} \references{ Alan Genz, Frank Bretz, Tetsuhisa Miwa, Xuefei Mi, Friedrich Leisch, Fabian Scheipl, Torsten Hothorn (2009). mvtnorm: Multivariate Normal and t Distributions. R package version 0.9-7. URL \url{https://CRAN.R-project.org/package=mvtnorm} Johnson, N./Kotz, S. (1970). Distributions in Statistics: Continuous Multivariate Distributions \emph{Wiley & Sons}, pp. 70--73 Horrace, W. (2005). Some Results on the Multivariate Truncated Normal Distribution. \emph{Journal of Multivariate Analysis}, \bold{94}, 209--221 Jayesh H. Kotecha and Petar M. Djuric (1999). Gibbs Sampling Approach For Generation of Truncated Multivariate Gaussian Random Variables \emph{IEEE Computer Society}, 1757--1760 Cowles, M. and Carlin, B. (1996). Markov Chain Monte Carlo Convergence Diagnostics: A Comparative Review \emph{Journal of the American Statistical Association}, \bold{91}, 883--904 Geweke, J. F. (1991). Effcient Simulation from the Multivariate Normal and Student-t Distributions Subject to Linear Constraints \emph{Computer Science and Statistics. Proceedings of the 23rd Symposium on the Interface. Seattle Washington, April 21-24, 1991}, 571--578 Geweke, J. F. (2005). Contemporary Bayesian Econometrics and Statistics, \emph{Wiley & Sons}, pp.171--172 } \examples{ ################################################################################ # # Example 1: # rejection sampling in 2 dimensions # ################################################################################ sigma <- matrix(c(4,2,2,3), ncol=2) x <- rtmvnorm(n=500, mean=c(1,2), sigma=sigma, upper=c(1,0)) plot(x, main="samples from truncated bivariate normal distribution", xlim=c(-6,6), ylim=c(-6,6), xlab=expression(x[1]), ylab=expression(x[2])) abline(v=1, lty=3, lwd=2, col="gray") abline(h=0, lty=3, lwd=2, col="gray") ################################################################################ # # Example 2: # Gibbs sampler for 4 dimensions # ################################################################################ C <- matrix(0.8, 4, 4) diag(C) <- rep(1, 4) lower <- rep(-4, 4) upper <- rep(-1, 4) # acceptance rate alpha alpha <- pmvnorm(lower=lower, upper=upper, mean=rep(0,4), sigma=C) alpha # Gibbs sampler X1 <- rtmvnorm(n=20000, mean = rep(0,4), sigma=C, lower=lower, upper=upper, algorithm="gibbs", burn.in.samples=100) # Rejection sampling X2 <- rtmvnorm(n=5000, mean = rep(0,4), sigma=C, lower=lower, upper=upper) colMeans(X1) colMeans(X2) plot(density(X1[,1], from=lower[1], to=upper[1]), col="red", lwd=2, main="Kernel density estimates from random samples generated by Gibbs vs. Rejection sampling") lines(density(X2[,1], from=lower[1], to=upper[1]), col="blue", lwd=2) legend("topleft",legend=c("Gibbs Sampling","Rejection Sampling"), col=c("red","blue"), lwd=2, bty="n") ################################################################################ # # Example 3: # Autocorrelation plot for Gibbs sampler # with and without thinning # ################################################################################ sigma <- matrix(c(4,2,2,3), ncol=2) X1 <- rtmvnorm(n=10000, mean=c(1,2), sigma=sigma, upper=c(1,0), algorithm="rejection") acf(X1) # no autocorrelation among random points X2 <- rtmvnorm(n=10000, mean=c(1,2), sigma=sigma, upper=c(1,0), algorithm="gibbs") acf(X2) # exhibits autocorrelation among random points X3 <- rtmvnorm(n=10000, mean=c(1,2), sigma=sigma, upper=c(1,0), algorithm="gibbs", thinning=2) acf(X3) # reduced autocorrelation among random points plot(density(X1[,1], to=1)) lines(density(X2[,1], to=1), col="blue") lines(density(X3[,1], to=1), col="red") ################################################################################ # # Example 4: Univariate case # ################################################################################ X <- rtmvnorm(100, mean=0, sigma=1, lower=-1, upper=1) ################################################################################ # # Example 5: Linear Constraints # ################################################################################ mean <- c(0, 0) sigma <- matrix(c(10, 0, 0, 1), 2, 2) # Linear Constraints # # a1 <= x1 + x2 <= b2 # a2 <= x1 - x2 <= b2 # # [ a1 ] <= [ 1 1 ] [ x1 ] <= [b1] # [ a2 ] [ 1 -1 ] [ x2 ] [b2] a <- c(-2, -2) b <- c( 2, 2) D <- matrix(c(1, 1, 1, -1), 2, 2) X <- rtmvnorm(n=10000, mean, sigma, lower=a, upper=b, D=D, algorithm="gibbsR") plot(X, main="Gibbs sampling for multivariate normal with linear constraints according to Geweke (1991)") # mark linear constraints as lines for (i in 1:nrow(D)) { abline(a=a[i]/D[i, 2], b=-D[i,1]/D[i, 2], col="red") abline(a=b[i]/D[i, 2], b=-D[i,1]/D[i, 2], col="red") } ################################################################################ # # Example 6: Using precision matrix H rather than sigma # ################################################################################ lower <- c(-1, -1) upper <- c(1, 1) mean <- c(0.5, 0.5) sigma <- matrix(c(1, 0.8, 0.8, 1), 2, 2) H <- solve(sigma) D <- matrix(c(1, 1, 1, -1), 2, 2) X <- rtmvnorm(n=1000, mean=mean, H=H, lower=lower, upper=upper, D=D, algorithm="gibbs") plot(X, main="Gibbs sampling with precision matrix and linear constraints") ################################################################################ # # Example 7: Using sparse precision matrix H in high dimensions # ################################################################################ \dontrun{ d <- 1000 I_d <- sparseMatrix(i=1:d, j=1:d, x=1) W <- sparseMatrix(i=c(1:d, 1:(d-1)), j=c(1:d, (2:d)), x=0.5) H <- t(I_d - 0.5 * W) %*% (I_d - 0.5 * W) lower <- rep(0, d) upper <- rep(2, d) # Gibbs sampler generates n=100 draws in d=1000 dimensions X <- rtmvnorm.sparseMatrix(n=100, mean = rep(0,d), H=H, lower=lower, upper=upper, burn.in.samples=100) colMeans(X) cov(X) } } \keyword{distribution} \keyword{multivariate}tmvtnorm/man/dtmvt.Rd0000644000176200001440000000733114216143124014342 0ustar liggesusers\name{dtmvt} \alias{dtmvt} \title{Truncated Multivariate Student t Density} \description{ This function provides the joint density function for the truncated multivariate Student t distribution with mean vector equal to \code{mean}, covariance matrix \code{sigma}, degrees of freedom parameter \code{df} and lower and upper truncation points \code{lower} and \code{upper}. } \usage{ dtmvt(x, mean = rep(0, nrow(sigma)), sigma = diag(length(mean)), df = 1, lower = rep(-Inf, length = length(mean)), upper = rep(Inf, length = length(mean)), log = FALSE) } \arguments{ \item{x}{Vector or matrix of quantiles. If \code{x} is a matrix, each row is taken to be a quantile.} \item{mean}{Mean vector, default is \code{rep(0, nrow(sigma))}.} \item{sigma}{Covariance matrix, default is \code{diag(length(mean))}.} \item{df}{degrees of freedom parameter} \item{lower}{Vector of lower truncation points, default is \code{rep(-Inf, length = length(mean))}.} \item{upper}{Vector of upper truncation points, default is \code{rep( Inf, length = length(mean))}.} \item{log}{Logical; if \code{TRUE}, densities d are given as log(d).} } \details{ The Truncated Multivariate Student t Distribution is a conditional Multivariate Student t distribution subject to (linear) constraints \eqn{a \le \bold{x} \le b}. The density of the \eqn{p}-variate Multivariate Student t distribution with \eqn{\nu}{nu} degrees of freedom is \deqn{ f(\bold{x}) = \frac{\Gamma((\nu + p)/2)}{(\pi\nu)^{p/2} \Gamma(\nu/2) \|\Sigma\|^{1/2}} [ 1 + \frac{1}{\nu} (x - \mu)^T \Sigma^{-1} (x - \mu) ]^{- (\nu + p) / 2} } The density of the truncated distribution \eqn{f_{a,b}(x)} with constraints \eqn{(a \le x \le b)}{a <= x <= b} is accordingly \deqn{ f_{a,b}(x) = \frac{f(\bold{x})} {P(a \le x \le b)} } } \value{ a numeric vector with density values } \seealso{ \code{\link{ptmvt}} and \code{\link{rtmvt}} for probabilities and random number generation in the truncated case, see \code{\link[mvtnorm]{dmvt}}, \code{\link[mvtnorm]{rmvt}} and \code{\link[mvtnorm]{pmvt}} for the untruncated multi-t distribution. } \references{ Geweke, J. F. (1991) Efficient simulation from the multivariate normal and Student-t distributions subject to linear constraints and the evaluation of constraint probabilities. \url{https://www.researchgate.net/publication/2335219_Efficient_Simulation_from_the_Multivariate_Normal_and_Student-t_Distributions_Subject_to_Linear_Constraints_and_the_Evaluation_of_Constraint_Probabilities} Samuel Kotz, Saralees Nadarajah (2004). Multivariate t Distributions and Their Applications. \emph{Cambridge University Press} } \author{Stefan Wilhelm \email{wilhelm@financial.com}} \examples{ # Example x1 <- seq(-2, 3, by=0.1) x2 <- seq(-2, 3, by=0.1) mean <- c(0,0) sigma <- matrix(c(1, -0.5, -0.5, 1), 2, 2) lower <- c(-1,-1) density <- function(x) { z=dtmvt(x, mean=mean, sigma=sigma, lower=lower) z } fgrid <- function(x, y, f) { z <- matrix(nrow=length(x), ncol=length(y)) for(m in 1:length(x)){ for(n in 1:length(y)){ z[m,n] <- f(c(x[m], y[n])) } } z } # compute multivariate-t density d for grid d <- fgrid(x1, x2, function(x) dtmvt(x, mean=mean, sigma=sigma, lower=lower)) # compute multivariate normal density d for grid d2 <- fgrid(x1, x2, function(x) dtmvnorm(x, mean=mean, sigma=sigma, lower=lower)) # plot density as contourplot contour(x1, x2, d, nlevels=5, main="Truncated Multivariate t Density", xlab=expression(x[1]), ylab=expression(x[2])) contour(x1, x2, d2, nlevels=5, add=TRUE, col="red") abline(v=-1, lty=3, lwd=2) abline(h=-1, lty=3, lwd=2) } \keyword{distribution} \keyword{multivariate}tmvtnorm/man/dtmvnorm.marginal2.Rd0000644000176200001440000001007614216103636016731 0ustar liggesusers\name{dtmvnorm.marginal2} \Rdversion{1.1} \alias{dtmvnorm.marginal2} \title{ Bivariate marginal density functions from a Truncated Multivariate Normal distribution } \description{ This function computes the bivariate marginal density function \eqn{f(x_q, x_r)} from a k-dimensional Truncated Multivariate Normal density function (k>=2). The bivariate marginal density is obtained by integrating out (k-2) dimensions as proposed by Tallis (1961). This function is basically an extraction of the Leppard and Tallis (1989) Fortran code for moments calculation, but extended to the double truncated case. } \usage{ dtmvnorm.marginal2(xq, xr, q, r, mean = rep(0, nrow(sigma)), sigma = diag(length(mean)), lower = rep(-Inf, length = length(mean)), upper = rep(Inf, length = length(mean)), log = FALSE, pmvnorm.algorithm=GenzBretz()) } \arguments{ \item{xq}{Value \eqn{x_q}} \item{xr}{Value \eqn{x_r}} \item{q}{Index position for \eqn{x_q} within mean vector to calculate the bivariate marginal density for.} \item{r}{Index position for \eqn{x_r} within mean vector to calculate the bivariate marginal density for.} \item{mean}{Mean vector, default is \code{rep(0, length = nrow(sigma))}.} \item{sigma}{Covariance matrix, default is \code{diag(length(mean))}.} \item{lower}{Vector of lower truncation points, default is \code{rep(-Inf, length = length(mean))}.} \item{upper}{Vector of upper truncation points, default is \code{rep( Inf, length = length(mean))}.} \item{log}{Logical; if \code{TRUE}, densities d are given as log(d).} \item{pmvnorm.algorithm}{Algorithm used for \code{\link[mvtnorm]{pmvnorm}}} } \details{ The bivariate marginal density function \eqn{f(x_q, x_r)} for \eqn{x \sim TN(\mu, \Sigma, a, b)} and \eqn{q \ne r} is defined as \deqn{F_{q,r}(x_q=c_q, x_r=c_r) = \int^{b_1}_{a_1}...\int^{b_{q-1}}_{a_{q-1}}\int^{b_{q+1}}_{a_{q+1}}...\int^{b_{r-1}}_{a_{r-1}}\int^{b_{r+1}}_{a_{r+1}}...\int^{b_{k}}_{a_{k}} \varphi{_{\alpha}}_{\Sigma}(x_s, c_q, c_r) dx_s} } \references{ Tallis, G. M. (1961). The moment generating function of the truncated multinormal distribution. \emph{Journal of the Royal Statistical Society, Series B}, \bold{23}, 223--229 Leppard, P. and Tallis, G. M. (1989). Evaluation of the Mean and Covariance of the Truncated Multinormal \emph{Applied Statistics}, \bold{38}, 543--553 Manjunath B G and Wilhelm, S. (2009). Moments Calculation For the Double Truncated Multivariate Normal Density. Working Paper. Available at SSRN: \url{https://www.ssrn.com/abstract=1472153} } \author{Stefan Wilhelm , Manjunath B G } \examples{ lower = c(-0.5, -1, -1) upper = c( 2.2, 2, 2) mean = c(0,0,0) sigma = matrix(c(2.0, -0.6, 0.7, -0.6, 1.0, -0.2, 0.7, -0.2, 1.0), 3, 3) # generate random samples from untruncated and truncated distribution Y = rmvnorm(10000, mean=mean, sigma=sigma) X = rtmvnorm(500, mean=mean, sigma=sigma, lower=lower, upper=upper, algorithm="gibbs") # compute bivariate marginal density of x1 and x2 xq <- seq(lower[1], upper[1], by=0.1) xr <- seq(lower[2], upper[2], by=0.1) grid <- matrix(NA, length(xq), length(xr)) for (i in 1:length(xq)) { for (j in 1:length(xr)) { grid[i,j] = dtmvnorm.marginal2(xq=xq[i], xr=xr[j], q=1, r=2, sigma=sigma, lower=lower, upper=upper) } } plot(Y[,1], Y[,2], xlim=c(-4, 4), ylim=c(-4, 4), main=expression("bivariate marginal density ("*x[1]*","*x[2]*")"), xlab=expression(x[1]), ylab=expression(x[2]), col="gray80") points(X[,1], X[,2], col="black") lines(x=c(lower[1], upper[1], upper[1], lower[1], lower[1]), y=c(lower[2],lower[2],upper[2],upper[2],lower[2]), lty=2, col="red") contour(xq, xr, grid, add=TRUE, nlevels = 8, col="red", lwd=2) # scatterplot matrices for untruncated and truncated points require(lattice) splom(Y) splom(X) } \keyword{distribution} \keyword{multivariate} tmvtnorm/man/ptmvnorm.marginal.Rd0000644000176200001440000000653412067563134016674 0ustar liggesusers\name{ptmvtnorm.marginal} \Rdversion{1.1} \alias{ptmvnorm.marginal} \alias{ptmvt.marginal} \title{One-dimensional marginal CDF function for a Truncated Multivariate Normal and Student t distribution} \description{ This function computes the one-dimensional marginal probability function from a Truncated Multivariate Normal and Student t density function using integration in \code{pmvnorm()} and \code{pmvt()}. } \usage{ ptmvnorm.marginal(xn, n = 1, mean = rep(0, nrow(sigma)), sigma = diag(length(mean)), lower = rep(-Inf, length = length(mean)), upper = rep(Inf, length = length(mean))) ptmvt.marginal(xn, n = 1, mean = rep(0, nrow(sigma)), sigma = diag(length(mean)), df = 1, lower = rep(-Inf, length = length(mean)), upper = rep(Inf, length = length(mean))) } \arguments{ \item{xn}{Vector of quantiles to calculate the marginal probability for.} \item{n}{Index position (1..k) within the random vector xn to calculate the one-dimensional marginal probability for.} \item{mean}{ the mean vector of length k. } \item{sigma}{ the covariance matrix of dimension k. Either \code{corr} or \code{sigma} can be specified. If \code{sigma} is given, the problem is standardized. If neither \code{corr} nor \code{sigma} is given, the identity matrix is used for \code{sigma}.} \item{df}{degrees of freedom parameter} \item{lower}{Vector of lower truncation points, default is \code{rep(-Inf, length = length(mean))}.} \item{upper}{Vector of upper truncation points, default is \code{rep( Inf, length = length(mean))}.} } \details{ The one-dimensional marginal probability for index i is \eqn{F_i(x_i) = P(X_i \le x_i)}{F_i(x_i) = P(X_i <= x_i)} \deqn{F_i(x_i) = \int_{a_1}^{b_1} \ldots \int_{a_{i-1}}^{b_{i-1}} \int_{a_{i}}^{x_i} \int_{a_{i+1}}^{b_{i+1}} \ldots \int_{a_k}^{b_k} f(x) dx = \alpha^{-1} \Phi_k(a, u, \mu, \Sigma)} where \eqn{u = (b_1,\ldots,b_{i-1},x_i,b_{i+1},\ldots,b_k)'}{u = (b_1,...,b_{i-1},x_i,b_{i+1},...,b_k)'} is the upper integration bound and \eqn{\Phi_k} is the k-dimensional normal probability (i.e. functions \code{pmvnorm()} and \code{pmvt()} in R package \code{mvtnorm}). } \value{ Returns a vector of the same length as xn with probabilities. } \author{Stefan Wilhelm } \examples{ ## Example 1: Truncated multi-normal lower <- c(-1,-1,-1) upper <- c(1,1,1) mean <- c(0,0,0) sigma <- matrix(c( 1, 0.8, 0.2, 0.8, 1, 0.1, 0.2, 0.1, 1), 3, 3) X <- rtmvnorm(n=1000, mean=c(0,0,0), sigma=sigma, lower=lower, upper=upper) x <- seq(-1, 1, by=0.01) Fx <- ptmvnorm.marginal(xn=x, n=1, mean=c(0,0,0), sigma=sigma, lower=lower, upper=upper) plot(ecdf(X[,1]), main="marginal CDF for truncated multi-normal") lines(x, Fx, type="l", col="blue") ## Example 2: Truncated multi-t X <- rtmvt(n=1000, mean=c(0,0,0), sigma=sigma, df=2, lower=lower, upper=upper) x <- seq(-1, 1, by=0.01) Fx <- ptmvt.marginal(xn=x, n=1, mean=c(0,0,0), sigma=sigma, lower=lower, upper=upper) plot(ecdf(X[,1]), main="marginal CDF for truncated multi-t") lines(x, Fx, type="l", col="blue") } \keyword{distribution} \keyword{multivariate} tmvtnorm/man/tmvnorm.Rd0000644000176200001440000001067711330334360014713 0ustar liggesusers% --- Source file: tmvtnorm.Rd --- \name{tmvnorm} \alias{dtmvnorm} \title{Truncated Multivariate Normal Density} \description{ This function provides the joint density function for the truncated multivariate normal distribution with mean equal to \code{mean} and covariance matrix \code{sigma}, lower and upper truncation points \code{lower} and \code{upper}. For convenience, it furthermore serves as a wrapper function for the one-dimensional and bivariate marginal densities \code{dtmvnorm.marginal()} and \code{dtmvnorm.marginal2()} respectively when invoked with the \code{margin} argument. } \usage{ dtmvnorm(x, mean = rep(0, nrow(sigma)), sigma = diag(length(mean)), lower=rep(-Inf, length = length(mean)), upper=rep( Inf, length = length(mean)), log=FALSE, margin=NULL) } \arguments{ \item{x}{Vector or matrix of quantiles. If \code{x} is a matrix, each row is taken to be a quantile.} \item{mean}{Mean vector, default is \code{rep(0, nrow(sigma))}.} \item{sigma}{Covariance matrix, default is \code{diag(length(mean))}.} \item{lower}{Vector of lower truncation points, default is \code{rep(-Inf, length = length(mean))}.} \item{upper}{Vector of upper truncation points, default is \code{rep( Inf, length = length(mean))}.} \item{log}{Logical; if \code{TRUE}, densities d are given as log(d).} \item{margin}{if \code{NULL} then the joint density is computed (the default), if \code{MARGIN=1} then the one-dimensional marginal density in variate \code{q} (\code{q = 1..length(mean)}) is returned, if \code{MARGIN=c(q,r)} then the bivariate marginal density in variates \code{q} and \code{r} for \code{q,r = 1..length(mean)} and \eqn{q \ne r}{q != r} is returned.} } \details{ The computation of truncated multivariate normal probabilities and densities is done using conditional probabilities from the standard/untruncated multivariate normal distribution. So we refer to the documentation of the mvtnorm package and the methodology is described in Genz (1992, 1993). } \author{Stefan Wilhelm } \seealso{\code{\link{ptmvnorm}}, \code{\link[mvtnorm]{pmvnorm}}, \code{\link[mvtnorm]{rmvnorm}}, \code{\link[mvtnorm]{dmvnorm}}, \code{\link{dtmvnorm.marginal}} and \code{\link{dtmvnorm.marginal2}} for marginal density functions} \references{ Genz, A. (1992). Numerical computation of multivariate normal probabilities. \emph{Journal of Computational and Graphical Statistics}, \bold{1}, 141--150 Genz, A. (1993). Comparison of methods for the computation of multivariate normal probabilities. \emph{Computing Science and Statistics}, \bold{25}, 400--405 Johnson, N./Kotz, S. (1970). Distributions in Statistics: Continuous Multivariate Distributions \emph{Wiley & Sons}, pp. 70--73 Horrace, W. (2005). Some Results on the Multivariate Truncated Normal Distribution. \emph{Journal of Multivariate Analysis}, \bold{94}, 209--221 } \examples{ dtmvnorm(x=c(0,0), mean=c(1,1), upper=c(0,0)) ########################################### # # Example 1: # truncated multivariate normal density # ############################################ x1<-seq(-2, 3, by=0.1) x2<-seq(-2, 3, by=0.1) density<-function(x) { sigma=matrix(c(1, -0.5, -0.5, 1), 2, 2) z=dtmvnorm(x, mean=c(0,0), sigma=sigma, lower=c(-1,-1)) z } fgrid <- function(x, y, f) { z <- matrix(nrow=length(x), ncol=length(y)) for(m in 1:length(x)){ for(n in 1:length(y)){ z[m,n] <- f(c(x[m], y[n])) } } z } # compute density d for grid d=fgrid(x1, x2, density) # plot density as contourplot contour(x1, x2, d, nlevels=5, main="Truncated Multivariate Normal Density", xlab=expression(x[1]), ylab=expression(x[2])) abline(v=-1, lty=3, lwd=2) abline(h=-1, lty=3, lwd=2) ########################################### # # Example 2: # generation of random numbers # from a truncated multivariate normal distribution # ############################################ sigma <- matrix(c(4,2,2,3), ncol=2) x <- rtmvnorm(n=500, mean=c(1,2), sigma=sigma, upper=c(1,0)) plot(x, main="samples from truncated bivariate normal distribution", xlim=c(-6,6), ylim=c(-6,6), xlab=expression(x[1]), ylab=expression(x[2])) abline(v=1, lty=3, lwd=2, col="gray") abline(h=0, lty=3, lwd=2, col="gray") } \keyword{distribution} \keyword{multivariate}tmvtnorm/man/ptmvnorm.Rd0000644000176200001440000000614212303463400015062 0ustar liggesusers% --- Source file: ptmvnorm.Rd --- \name{ptmvnorm} \alias{ptmvnorm} \title{ Truncated Multivariate Normal Distribution } \description{ Computes the distribution function of the truncated multivariate normal distribution for arbitrary limits and correlation matrices based on the \code{pmvnorm()} implementation of the algorithms by Genz and Bretz. } \usage{ ptmvnorm(lowerx, upperx, mean=rep(0, length(lowerx)), sigma, lower = rep(-Inf, length = length(mean)), upper = rep( Inf, length = length(mean)), maxpts = 25000, abseps = 0.001, releps = 0) } \arguments{ \item{lowerx}{ the vector of lower limits of length n.} \item{upperx}{ the vector of upper limits of length n.} \item{mean}{ the mean vector of length n.} \item{sigma}{ the covariance matrix of dimension n. Either \code{corr} or \code{sigma} can be specified. If \code{sigma} is given, the problem is standardized. If neither \code{corr} nor \code{sigma} is given, the identity matrix is used for \code{sigma}. } \item{lower}{Vector of lower truncation points,\\ default is \code{rep(-Inf, length = length(mean))}.} \item{upper}{Vector of upper truncation points,\\ default is \code{rep( Inf, length = length(mean))}.} \item{maxpts}{ maximum number of function values as integer. } \item{abseps}{ absolute error tolerance as double. } \item{releps}{ relative error tolerance as double. } } \details{ The computation of truncated multivariate normal probabilities and densities is done using conditional probabilities from the standard/untruncated multivariate normal distribution. So we refer to the documentation of the \code{mvtnorm} package and the methodology is described in Genz (1992, 1993) and Genz/Bretz (2009). For properties of the truncated multivariate normal distribution see for example Johnson/Kotz (1970) and Horrace (2005). } \value{ The evaluated distribution function is returned with attributes \item{error}{estimated absolute error and} \item{msg}{status messages.} } \references{ Genz, A. (1992). Numerical computation of multivariate normal probabilities. \emph{Journal of Computational and Graphical Statistics}, \bold{1}, 141--150 Genz, A. (1993). Comparison of methods for the computation of multivariate normal probabilities. \emph{Computing Science and Statistics}, \bold{25}, 400--405 Genz, A. and Bretz, F. (2009). Computation of Multivariate Normal and t Probabilities. \emph{Lecture Notes in Statistics}, Vol. \bold{195}, Springer-Verlag, Heidelberg. Johnson, N./Kotz, S. (1970). Distributions in Statistics: Continuous Multivariate Distributions \emph{Wiley & Sons}, pp. 70--73 Horrace, W. (2005). Some Results on the Multivariate Truncated Normal Distribution. \emph{Journal of Multivariate Analysis}, \bold{94}, 209--221 } \examples{ sigma <- matrix(c(5, 0.8, 0.8, 1), 2, 2) Fx <- ptmvnorm(lowerx=c(-1,-1), upperx=c(0.5,0), mean=c(0,0), sigma=sigma, lower=c(-1,-1), upper=c(1,1)) } \keyword{distribution} \keyword{multivariate}tmvtnorm/man/rtmvt.Rd0000644000176200001440000001275214216143070014363 0ustar liggesusers\name{rtmvt} \alias{rtmvt} \title{Sampling Random Numbers From The Truncated Multivariate Student t Distribution} \description{ This function generates random numbers from the truncated multivariate Student-t distribution with mean equal to \code{mean} and covariance matrix \code{sigma}, lower and upper truncation points \code{lower} and \code{upper} with either rejection sampling or Gibbs sampling. } \usage{ rtmvt(n, mean = rep(0, nrow(sigma)), sigma = diag(length(mean)), df = 1, lower = rep(-Inf, length = length(mean)), upper = rep(Inf, length = length(mean)), algorithm=c("rejection", "gibbs"), ...) } \arguments{ \item{n}{Number of random points to be sampled. Must be an integer >= 1.} \item{mean}{Mean vector, default is \code{rep(0, length = ncol(x))}.} \item{sigma}{Covariance matrix, default is \code{diag(ncol(x))}.} \item{df}{Degrees of freedom parameter (positive, may be non-integer)} \item{lower}{Vector of lower truncation points,\\ default is \code{rep(-Inf, length = length(mean))}.} \item{upper}{Vector of upper truncation points,\\ default is \code{rep( Inf, length = length(mean))}.} \item{algorithm}{Method used, possible methods are rejection sampling ("rejection", default) and the R Gibbs sampler ("gibbs").} \item{...}{additional parameters for Gibbs sampling, given to the internal method \code{rtmvt.gibbs()}, such as \code{burn.in.samples}, \code{start.value} and \code{thinning}, see details} } \details{ We sample \eqn{x \sim T(\mu, \Sigma, df)}{x ~ T(mean, Sigma, df)} subject to the rectangular truncation \eqn{lower \le x \le upper}{lower <= x <= upper}. Currently, two random number generation methods are implemented: rejection sampling and the Gibbs Sampler. For rejection sampling \code{algorithm="rejection"}, we sample from \code{\link[mvtnorm]{rmvt}} and retain only samples inside the support region. The acceptance probability will be calculated with \code{\link[mvtnorm]{pmvt}}. \code{\link[mvtnorm]{pmvt}} does only accept integer degrees of freedom \code{df}. For non-integer \code{df}, \code{algorithm="rejection"} will throw an error, so please use \code{algorithm="gibbs"} instead. The arguments to be passed along with \code{algorithm="gibbs"} are: \describe{ \item{\code{burn.in.samples}}{number of samples in Gibbs sampling to be discarded as burn-in phase, must be non-negative.} \item{\code{start.value}}{Start value (vector of length \code{length(mean)}) for the MCMC chain. If one is specified, it must lie inside the support region (\eqn{lower \le start.value \le upper}{lower <= start.value <= upper}). If none is specified, the start value is taken componentwise as the finite lower or upper boundaries respectively, or zero if both boundaries are infinite. Defaults to NULL.} \item{\code{thinning}}{Thinning factor for reducing autocorrelation of random points in Gibbs sampling. Must be an integer \eqn{\ge 1}{>= 1}. We create a Markov chain of length \code{(n*thinning)} and take only those samples \code{j=1:(n*thinning)} where \code{j \%\% thinning == 0} Defaults to 1 (no thinning of the chain).} } } \section{Warning}{ The same warnings for the Gibbs sampler apply as for the method \code{\link{rtmvnorm}}. } \author{Stefan Wilhelm , Manjunath B G } \references{ Geweke, John F. (1991) Efficient Simulation from the Multivariate Normal and Student-t Distributions Subject to Linear Constraints. \emph{Computer Science and Statistics. Proceedings of the 23rd Symposium on the Interface. Seattle Washington, April 21-24, 1991}, pp. 571--578 An earlier version of this paper is available at \url{https://www.researchgate.net/publication/2335219_Efficient_Simulation_from_the_Multivariate_Normal_and_Student-t_Distributions_Subject_to_Linear_Constraints_and_the_Evaluation_of_Constraint_Probabilities} } \examples{ ########################################################### # # Example 1 # ########################################################### # Draw from multi-t distribution without truncation X1 <- rtmvt(n=10000, mean=rep(0, 2), df=2) X2 <- rtmvt(n=10000, mean=rep(0, 2), df=2, lower=c(-1,-1), upper=c(1,1)) ########################################################### # # Example 2 # ########################################################### df = 2 mu = c(1,1,1) sigma = matrix(c( 1, 0.5, 0.5, 0.5, 1, 0.5, 0.5, 0.5, 1), 3, 3) lower = c(-2,-2,-2) upper = c(2, 2, 2) # Rejection sampling X1 <- rtmvt(n=10000, mu, sigma, df, lower, upper) # Gibbs sampling without thinning X2 <- rtmvt(n=10000, mu, sigma, df, lower, upper, algorithm="gibbs") # Gibbs sampling with thinning X3 <- rtmvt(n=10000, mu, sigma, df, lower, upper, algorithm="gibbs", thinning=2) plot(density(X1[,1], from=lower[1], to=upper[1]), col="red", lwd=2, main="Gibbs vs. Rejection") lines(density(X2[,1], from=lower[1], to=upper[1]), col="blue", lwd=2) legend("topleft",legend=c("Rejection Sampling","Gibbs Sampling"), col=c("red","blue"), lwd=2) acf(X1) # no autocorrelation in Rejection sampling acf(X2) # strong autocorrelation of Gibbs samples acf(X3) # reduced autocorrelation of Gibbs samples after thinning } \keyword{distribution} \keyword{multivariate}tmvtnorm/man/mle.tmvnorm.Rd0000644000176200001440000000716411454422144015471 0ustar liggesusers\name{mle.tmvnorm} \alias{mle.tmvnorm} \title{ Maximum Likelihood Estimation for the Truncated Multivariate Normal Distribution } \description{ Maximum Likelihood Estimation for the Truncated Multivariate Normal Distribution } \usage{ mle.tmvnorm(X, lower = rep(-Inf, length = ncol(X)), upper = rep(+Inf, length = ncol(X)), start = list(mu = rep(0, ncol(X)), sigma = diag(ncol(X))), fixed = list(), method = "BFGS", cholesky = FALSE, lower.bounds = -Inf, upper.bounds = +Inf, ...) } \arguments{ \item{X}{Matrix of quantiles, each row is taken to be a quantile.} \item{lower}{Vector of lower truncation points, default is \code{rep(-Inf, length = ncol(X))}.} \item{upper}{Vector of upper truncation points, default is \code{rep( Inf, length = ncol(X))}.} \item{start}{Named list with elements \code{mu} (mean vector) and \code{sigma} (covariance matrix). Initial values for optimizer.} \item{fixed}{Named list. Parameter values to keep fixed during optimization.} \item{method}{Optimization method to use. See \code{\link{optim}}} \item{cholesky}{if TRUE, we use the Cholesky decomposition of \code{sigma} as parametrization} \item{lower.bounds}{lower bounds/box constraints for method "L-BFGS-B"} \item{upper.bounds}{upper bounds/box constraints for method "L-BFGS-B"} \item{\dots}{Further arguments to pass to \code{\link{optim}}} } \details{ This method performs a maximum likelihood estimation of the parameters \code{mean} and \code{sigma} of a truncated multinormal distribution, when the truncation points \code{lower} and \code{upper} are known. \code{mle.tmvnorm()} is a wrapper for the general maximum likelihood method \code{\link[stats4]{mle}}, so one does not have to specify the negative log-likelihood function. The log-likelihood function for a data matrix X (T x n) can be established straightforward as \deqn{ \log L(X | \mu,\Sigma) = -T \log{\alpha(\mu,\Sigma)} + {-T/2} \log{\|\Sigma\|} -\frac{1}{2} \sum_{t=1}^{T}{(x_t-\mu)' \Sigma^{-1} (x_t-\mu)} } As \code{\link[stats4]{mle}}, this method returns an object of class \code{mle}, for which various diagnostic methods are available, like \code{profile()}, \code{confint()} etc. See examples. In order to adapt the estimation problem to \code{\link[stats4]{mle}}, the named parameters for mean vector elements are "mu_i" and the elements of the covariance matrix are "sigma_ij" for the lower triangular matrix elements, i.e. (j <= i). } \value{ An object of class \code{\link[stats4]{mle-class}} } \author{ Stefan Wilhelm \email{wilhelm@financial.com} } \seealso{ \code{\link[stats4]{mle}} and \code{\link[stats4]{mle-class}} } \examples{ \dontrun{ set.seed(1.2345) # the actual parameters lower <- c(-1,-1) upper <- c(1, 2) mu <- c(0, 0) sigma <- matrix(c(1, 0.7, 0.7, 2), 2, 2) # generate random samples X <- rtmvnorm(n=500, mu, sigma, lower, upper) method <- "BFGS" # estimate mean vector and covariance matrix sigma from random samples X # with default start values mle.fit1 <- mle.tmvnorm(X, lower=lower, upper=upper) # diagnostic output of the estimated parameters summary(mle.fit1) logLik(mle.fit1) vcov(mle.fit1) # profiling the log likelihood and confidence intervals mle.profile1 <- profile(mle.fit1, X, method="BFGS", trace=TRUE) confint(mle.profile1) par(mfrow=c(3,2)) plot(mle.profile1) # choosing a different start value mle.fit2 <- mle.tmvnorm(X, lower=lower, upper=upper, start=list(mu=c(0.1, 0.1), sigma=matrix(c(1, 0.4, 0.4, 1.8),2,2))) summary(mle.fit2) } }tmvtnorm/DESCRIPTION0000644000176200001440000000177714533645532013674 0ustar liggesusersPackage: tmvtnorm Version: 1.6 Date: 2023-12-05 Title: Truncated Multivariate Normal and Student t Distribution Authors@R: c( person("Stefan", "Wilhelm", email = "wilhelm@financial.com", role = c("aut", "cre")), person("Manjunath", "B G", email = "bgmanjunath@gmail.com", role = "aut") ) Imports: stats, methods Depends: R (>= 1.9.0), mvtnorm, utils, Matrix, stats4, gmm Encoding: UTF-8 Suggests: lattice Description: Random number generation for the truncated multivariate normal and Student t distribution. Computes probabilities, quantiles and densities, including one-dimensional and bivariate marginal densities. Computes first and second moments (i.e. mean and covariance matrix) for the double-truncated multinormal case. License: GPL (>= 2) URL: https://www.r-project.org NeedsCompilation: yes Packaged: 2023-12-05 08:47:20 UTC; stefan Author: Stefan Wilhelm [aut, cre], Manjunath B G [aut] Maintainer: Stefan Wilhelm Repository: CRAN Date/Publication: 2023-12-05 16:10:02 UTC tmvtnorm/build/0000755000176200001440000000000014533561630013245 5ustar liggesuserstmvtnorm/build/vignette.rds0000644000176200001440000000033314533561630015603 0ustar liggesusers}OK 0Mm/A(uF IimB( #include #include void F77_SUB(rndstart)(void) { GetRNGstate(); } void F77_SUB(rndend)(void) { PutRNGstate(); } double F77_SUB(normrnd)(void) { return norm_rand(); } double F77_SUB(unifrnd)(void) { return unif_rand(); } double F77_SUB(pnormr)(double *x, double *mu, double *sigma, int *lower_tail, int *log_p) { return pnorm(*x, *mu, *sigma, *lower_tail, *log_p); } double F77_SUB(qnormr)(double *p, double *mu, double *sigma, int *lower_tail, int *log_p) { return qnorm(*p, *mu, *sigma, *lower_tail, *log_p); } tmvtnorm/src/linked_list.f900000644000176200001440000000331711700621540015550 0ustar liggesusersmodule linked_list implicit none ! type matrix row, holds a pointer to the root element of the linked list type matrixrow type(node),pointer :: first ! pointer to first node in linked list type(node),pointer :: last ! pointer to last node in linked list end type matrixrow ! matrix element for sparse matrix elements H[i,j]=v type matrixelem integer :: i, j double precision :: v end type matrixelem ! define a linked list of matrix elements type node type(matrixelem) data ! data type(node),pointer::next ! pointer to the ! next element end type node CONTAINS ! insert the new matrix element H[i,j]=v to the linked list of row "i" subroutine insert_list_element(row, newelem) type(matrixrow) :: row type(matrixelem) :: newelem if (.not. associated(row%first)) then allocate(row%first) nullify(row%first%next) row%first%data = newelem row%last => row%first !print *,"added element to linked list i=",newelem%i," j=",newelem%j," v=",newelem%v else allocate(row%last%next) nullify(row%last%next%next) row%last%next%data = newelem row%last => row%last%next !print *,"added element to linked list i=",newelem%i," j=",newelem%j," v=",newelem%v endif end subroutine ! remove all elements of the linked list and free memory subroutine free_all(row) implicit none type(matrixrow) :: row type(node), pointer :: tmp do tmp => row%first if (associated(tmp) .eqv. .FALSE.) exit row%first => row%first%next deallocate(tmp) end do end subroutine free_all end module linked_list tmvtnorm/src/Makevars0000644000176200001440000000013312567105776014440 0ustar liggesusersPKG_LIBS=$(LAPACK_LIBS) $(BLAS_LIBS) $(FLIBS) all: $(SHLIB) rtmvnormgibbs.o: linked_list.o tmvtnorm/src/rtmvnormgibbs.f900000644000176200001440000007401412704702124016147 0ustar liggesusers! Gibbs sampling from a truncated multinormal distribution ! ! References ! 1. Kotecha et al. (1999): ! Kotecha, J. H. & Djuric, P. M. ! "Gibbs sampling approach for generation of truncated multivariate Gaussian random variables", ! IEEE Computer Society, IEEE Computer Society, 1999, 1757-1760 ! ! 2. Geweke (2005): Contemporary Bayesian Econometrics and ! Statistics. John Wiley and Sons, 2005, pp. 171-172 ! ! ! Code written by Stefan Wilhelm as part of the R package tmvtnorm. ! (http://CRAN.R-project.org/package=tmvtnorm) ! ! To cite package tmvtnorm in publications use: ! ! Stefan Wilhelm, Manjunath B G (2012). tmvtnorm: Truncated ! Multivariate Normal Distribution. R package version 1.4-5. ! ! A BibTeX entry for LaTeX users is ! ! @Manual{, ! title = {{tmvtnorm}: Truncated Multivariate Normal Distribution}, ! author = {Stefan Wilhelm and Manjunath B G}, ! year = {2012}, ! note = {R package version 1.4-5}, ! url = {http://CRAN.R-project.org/package=tmvtnorm}, ! } ! ! ! @param n number of random sample to generate by Gibbs sampling ! @param d dimension (d >= 2) ! @param mean mean vector of dimension d (d x 1) ! @param sigma covariance matrix (d x d) ! @param lower lower truncation points (d x 1) ! @param upper upper truncation points (d x 1) ! @param x0 Startvektor (d x 1) ! @param burnin Number of Burn-in samples to be discarded ! @param thinning thinning factor for thinning the Markov chain ! @return return value X --> vektor (n * d) --> can be coerced into a (n x d) matrix subroutine rtmvnormgibbscov(n, d, mean, sigma, lower, upper, x0, burnin, thinning, X) IMPLICIT NONE integer :: n, d, i, j, k, l, ind = 0, burnin, thinning ! subindex "-i" integer, dimension(d-1) :: minus_i double precision :: unifrnd, qnormr, pnormr, u, q, prob, Fa, Fb, mu_i, s2 double precision, dimension(n*d), INTENT(INOUT) :: X double precision, dimension(d-1) :: s3 double precision, dimension(d) :: x0, xr, mean, lower, upper, sd ! Kovarianzmatrix sigma und Partitionen Sigma_i, sigma_ii und S double precision, dimension(d, d) :: sigma double precision, dimension(d, d-1) :: Sigma_i double precision :: sigma_ii double precision, dimension(d-1,d-1) :: S ! S_inv (d-1 x d-1) ist die Inverse von S double precision, dimension(d-1,d-1) :: S_inv ! Liste von d mal 1 x (d-1) Matrizen = d x (d-1) Matrix double precision, dimension(d, d-1) :: P ! Deklarationen frs Matrix-Invertieren mit LAPACK-Routinen (Dimension d-1) double precision, dimension( d-1 ) :: work ! ipiv = pivot indices integer, dimension( d-1 ) :: ipiv ! lda = leading dimension integer :: m, lda, lwork, info ! initialise R random number generator call rndstart() m =d-1 lda =d-1 lwork=d-1 ind = 0 ! Partitioning of sigma ! sigma = [ sigma_ii Sigma_i ] ! (d x d) [ (1 x 1) (1 x d-1) ] ! [ Sigma_i' S ] ! [ (d-1 x 1) (d-1 x d-1) ] ! List of conditional variances sd(i) can be precalculated do i = 1,d ! subindex "-i" minus_i = (/ (j, j=1,i-1), (j, j=i+1,d) /) S = sigma(minus_i, minus_i) ! Sigma_{-i,-i} : (d-1) x (d-1) sigma_ii = sigma(i,i) ! Sigma_{i,i} : 1 x 1 Sigma_i(i,:) = sigma(i, minus_i) ! Sigma_{i,-i} : 1 x (d-1) ! Matrix S --> S_inv umkopieren do k=1,(d-1) do l=1,(d-1) S_inv(k,l)=S(k,l) end do end do ! Matrix invertieren ! LU-Faktorisierung (Dreieckszerlegung) der Matrix S_inv call dgetrf( m, m, S_inv, lda, ipiv, info ) ! Inverse der LU-faktorisierten Matrix S_inv call dgetri( m, S_inv, lda, ipiv, work, lwork, info ) P(i,:) = pack(matmul(Sigma_i(i,:), S_inv), .TRUE.) ! (1 x d-1) %*% (d-1 x d-1) = (1 x d-1) s2 = 0 do j = 1,d-1 s2 = s2 + P(i,j) * Sigma_i(i,j) end do sd(i) = sqrt(sigma(i,i) - s2) ! (1 x d-1) * (d-1 x 1) --> sd[[i]] ist (1,1) end do ! start value xr = x0 ! Actual number of samples to create: ! #burn-in-samples + n * #thinning-factor !For all samples n times the thinning factor do j = 1,(burnin + n * thinning) ! For all dimensions do i = 1,d ! Berechnung von bedingtem Erwartungswert und bedingter Varianz: ! bedingte Varianz hngt nicht von x[-i] ab! ! subindex "-i" minus_i = (/ (k, k=1,i-1), (k, k=i+1,d) /) ! mu_i = mean(i) + P[[i]] %*% (x(-i) - mean(-i)) s3(1:(d-1))= xr(minus_i) - mean(minus_i) s2 = 0 do k = 1,d-1 s2 = s2 + P(i,k) * s3(k) end do mu_i = mean(i) + s2 Fa = pnormr(lower(i), mu_i, sd(i), 1, 0) Fb = pnormr(upper(i), mu_i, sd(i), 1, 0) u = unifrnd() prob = u * (Fb - Fa) + Fa q = qnormr(prob, 0.0d0, 1.0d0, 1, 0) xr(i) = mu_i + sd(i) * q ! Nur fr j > burnin samples aufzeichnen, Default ist thinning = 1 ! bei Thinning nur jedes x-te Element nehmen if (j > burnin .AND. mod(j - burnin,thinning) == 0) then ind = ind + 1 X(ind) = xr(i) end if end do end do ! reset R random number generator call rndend() end subroutine rtmvnormgibbscov ! Gibbs sampling based on covariance matrix and general linear constraints a <= Cx <= b ! with r >= d linear constraints. C is (r x d), x (d x 1), a,b (r x 1). ! x0 must satisfy the constraints a <= C x0 <= b. ! ! @param n number of random sample to generate by Gibbs sampling ! @param d dimension (d >= 2) ! @param r number of linear constraints ! @param mean mean vector of dimension d (d x 1) ! @param sigma covariance matrix (d x d) ! @param C matrix for linear constraints (r x d) ! @param a lower bound for linear constraints (r x 1) ! @param b upper bound for linear constraints (r x 1) ! @param x0 Startvektor (d x 1) ! @param burnin Number of Burn-in samples to be discarded ! @param thinning thinning factor for thinning the Markov chain ! @return return value X --> vektor (n * d) --> can be coerced into a (n x d) matrix subroutine rtmvnormgibbscov2(n, d, r, mean, sigma, C, a, b, x0, burnin, thinning, X) IMPLICIT NONE integer :: n, d, r, i, j, k = 1, l, ind = 0, burnin, thinning ! subindex "-i" integer, dimension(d-1) :: minus_i double precision :: unifrnd, qnormr, pnormr, u, q, prob, Fa, Fb, mu_i, s2 double precision, dimension(n*d), INTENT(INOUT) :: X double precision, dimension(d-1) :: s3 double precision, dimension(d) :: x0, xr, mean, sd double precision, dimension(r) :: a, b double precision, dimension(r, d) :: C double precision :: bound1, bound2, lower, upper ! Kovarianzmatrix sigma und Partitionen Sigma_i, sigma_ii und S double precision, dimension(d, d) :: sigma double precision, dimension(d, d-1) :: Sigma_12 double precision :: Sigma_11 double precision, dimension(d-1,d-1) :: Sigma_22 ! Sigma_22_inv (d-1 x d-1) ist die Inverse von Sigma_22 double precision, dimension(d-1,d-1) :: Sigma_22_inv ! Liste von d mal 1 x (d-1) Matrizen = d x (d-1) Matrix double precision, dimension(d, d-1) :: P ! Deklarationen frs Matrix-Invertieren mit LAPACK-Routinen (Dimension d-1) double precision, dimension( d-1 ) :: work ! ipiv = pivot indices integer, dimension( d-1 ) :: ipiv ! lda = leading dimension integer :: m, lda, lwork, info INTEGER, DIMENSION(1) :: seed seed(1) = 12345 ! initialise R random number generator call rndstart() !CALL RANDOM_SEED !CALL RANDOM_SEED (SIZE=K) ! Sets K = N !CALL RANDOM_SEED (PUT = SEED (1:K)) ! Uses the starting value ! ! given by the user m =d-1 lda =d-1 lwork=d-1 ind = 0 ! Partitioning of sigma ! sigma = [ Sigma_11 Sigma_12 ] = [ Sigma_{i,i} Sigma_{i,-i} ] ! (d x d) [ ] [ (1 x 1) (1 x d-1) ] ! [ Sigma_21 Sigma_22 ] [ Sigma_{-i,i} Sigma_{-i,-i}] ! [ ] [ (d-1 x 1) (d-1 x d-1) ] ! List of conditional variances sd(i) can be precalculated do i = 1,d ! subindex "-i" minus_i = (/ (j, j=1,i-1), (j, j=i+1,d) /) Sigma_22 = sigma(minus_i, minus_i) ! Sigma_{-i,-i} : (d-1) x (d-1) Sigma_11 = sigma(i,i) ! Sigma_{i,i} : 1 x 1 Sigma_12(i,:) = sigma(i, minus_i) ! Sigma_{i,-i} : 1 x (d-1) ! Matrix Sigma_22 --> Sigma_22_inv umkopieren do k=1,(d-1) do l=1,(d-1) Sigma_22_inv(k,l) = Sigma_22(k,l) end do end do ! Matrix invertieren ! LU-Faktorisierung (Dreieckszerlegung) der Matrix S_inv call dgetrf( m, m, Sigma_22_inv, lda, ipiv, info ) ! Inverse der LU-faktorisierten Matrix S_inv call dgetri( m, Sigma_22_inv, lda, ipiv, work, lwork, info ) P(i,:) = pack(matmul(Sigma_12(i,:), Sigma_22_inv), .TRUE.) ! (1 x d-1) %*% (d-1 x d-1) = (1 x d-1) s2 = 0 do j = 1,d-1 s2 = s2 + P(i,j) * Sigma_12(i,j) end do sd(i) = sqrt(sigma(i,i) - s2) ! (1 x d-1) * (d-1 x 1) --> sd[[i]] ist (1,1) end do ! start value xr = x0 ! Actual number of samples to create: ! #burn-in-samples + n * #thinning-factor !For all samples n times the thinning factor do j = 1,(burnin + n * thinning) ! For all dimensions do i = 1,d !print '("i=",I3)',i ! Berechnung von bedingtem Erwartungswert und bedingter Varianz: ! bedingte Varianz hngt nicht von x[-i] ab! ! subindex "-i" minus_i = (/ (k, k=1,i-1), (k, k=i+1,d) /) ! mu_i = mean(i) + P[[i]] %*% (x(-i) - mean(-i)) s3(1:(d-1))= xr(minus_i) - mean(minus_i) s2 = 0 do k = 1,d-1 s2 = s2 + P(i,k) * s3(k) end do mu_i = mean(i) + s2 ! TODO: Set to -Inf/+Inf lower = -1000.0d0 upper = 1000d0 ! Determine lower bounds for x[i] using all linear constraints relevant for x[i] do k = 1,r if (C(k,i) == 0 ) then CYCLE end if s2 = dot_product(C(k,minus_i), xr(minus_i)) bound1 = (a(k)- s2) /C(k, i) bound2 = (b(k)- s2) /C(k, i) if (C(k, i) > 0) then lower = max(lower, bound1) upper = min(upper, bound2) else lower = max(lower, bound2) upper = min(upper, bound1) end if end do !print '("mu_i = ",f6.3)', mu_i !print '("sd(i) = ",f6.3)', sd(i) !print '("lower = ",f6.3)', lower !print '("upper = ",f6.3)',upper Fa = pnormr(lower, mu_i, sd(i), 1, 0) Fb = pnormr(upper, mu_i, sd(i), 1, 0) u = unifrnd() !call RANDOM_NUMBER(u) prob = u * (Fb - Fa) + Fa q = qnormr(prob, 0.0d0, 1.0d0, 1, 0) xr(i) = mu_i + sd(i) * q !print '("xr(i)=",f6.3)',xr(i) ! Nur fr j > burnin samples aufzeichnen, Default ist thinning = 1 ! bei Thinning nur jedes x-te Element nehmen if (j > burnin .AND. mod(j - burnin,thinning) == 0) then ind = ind + 1 X(ind) = xr(i) end if end do end do ! reset R random number generator call rndend() end subroutine rtmvnormgibbscov2 ! Gibbs sampling based on precision matrix H and a <= x <= b (no linear constraints) ! x,a,b are (d x 1). ! ! @param n number of random sample to generate by Gibbs sampling ! @param d dimension (d >= 2) ! @param mean mean vector of dimension d (d x 1) ! @param H precision matrix (d x d) ! @param lower lower truncation points (d x 1) ! @param upper upper truncation points (d x 1) ! @param x0 Startvektor (d x 1) ! @param burnin Number of Burn-in samples to be discarded ! @param thinning thinning factor for thinning the Markov chain ! @return return value X --> vektor (n * d) --> can be coerced into a (n x d) matrix subroutine rtmvnormgibbsprec(n, d, mean, H, lower, upper, x0, burnin, thinning, X) IMPLICIT NONE integer :: n, d, i, j, k, ind = 0, burnin, thinning ! subindex "-i" integer, dimension(d-1) :: minus_i double precision :: unifrnd, qnormr, pnormr, u, q, prob, Fa, Fb, mu_i, s2 double precision, dimension(d, d) :: H ! Liste von d mal 1 x (d-1) Matrizen = d x (d-1) Matrix als H[i, -i] double precision, dimension(d, d-1) :: P double precision, dimension(d) :: H_inv_ii double precision, dimension(n*d), INTENT(INOUT) :: X double precision, dimension(d-1) :: s3 double precision, dimension(d) :: x0, xr, mean, lower, upper, sd ! initialise R random number generator call rndstart() ! initialise Fortran random number generator ! CALL RANDOM_SEED ! SW: I do not know why, but we have to reset ind each time!!! ! If we forget this line, ind will be incremented further and then Fortran crashes! ind = 0 ! List of conditional variances sd(i) can be precalculated ! Vector of conditional standard deviations sd(i | -i) = H_ii^{-1} = 1 / H[i, i] = sqrt(1 / diag(H)) ! does not depend on x[-i] and can be precalculated before running the chain. do i = 1,d minus_i = (/ (k, k=1,i-1), (k, k=i+1,d) /) H_inv_ii(i) = (1.0d0 / H(i, i)) ! H^{-1}(i,i) = 1 / H(i,i) sd(i) = sqrt(H_inv_ii(i)) ! sd(i) is sqrt(H^{-1}(i,i)) P(i,:) = H(i, minus_i) ! 1 x (d-1) end do ! start value xr = x0 ! Actual number of samples to create: ! #burn-in-samples + n * #thinning-factor !For all samples n times the thinning factor do j = 1,(burnin + n * thinning) ! For all dimensions do i = 1,d ! subindex "-i" minus_i = (/ (k, k=1,i-1), (k, k=i+1,d) /) ! conditional mean mu[i] = E[i | -i] = mean[i] - H_ii^{-1} H[i,-i] (x[-i] - mean[-i]) ! mu_i <- mean[i] (1 / H[i,i]) * H[i,-i] %*% (x[-i] - mean[-i]) s3(1:(d-1)) = xr(minus_i) - mean(minus_i) s2 = 0 do k = 1,d-1 s2 = s2 + P(i, k) * s3(k) end do mu_i = mean(i) - H_inv_ii(i) * s2 Fa = pnormr(lower(i), mu_i, sd(i), 1, 0) Fb = pnormr(upper(i), mu_i, sd(i), 1, 0) u = unifrnd() !call RANDOM_NUMBER(u) prob = u * (Fb - Fa) + Fa q = qnormr(prob, 0.0d0, 1.0d0, 1, 0) xr(i) = mu_i + sd(i) * q ! Nur fr j > burnin samples aufzeichnen, Default ist thinning = 1 ! bei Thinning nur jedes x-te Element nehmen if (j > burnin .AND. mod(j - burnin,thinning) == 0) then ind = ind + 1 X(ind) = xr(i) !call intpr("ind=", 4, ind, 1) !call dblepr("X(ind)=", 7, X(ind), 1) end if end do end do ! reset R random number generator call rndend() end subroutine rtmvnormgibbsprec ! Gibbs sampling based on precision matrix H and general linear constraints a <= Cx <= b ! with r >= d linear constraints. C is (r x d), x (d x 1), a,b (r x 1). ! x0 must satisfy the constraints a <= C x0 <= b. ! ! @param n number of random sample to generate by Gibbs sampling ! @param d dimension (d >= 2) ! @param r number of linear constraints ! @param mean mean vector of dimension d (d x 1) ! @param H precision matrix (d x d) ! @param C matrix for linear constraints (r x d) ! @param a lower bound for linear constraints (r x 1) ! @param b upper bound for linear constraints (r x 1) ! @param x0 start value (d x 1) ! @param burnin number of Burn-in samples to be discarded ! @param thinning thinning factor for thinning the Markov chain ! @return return value X --> vektor (n * d) --> can be coerced into a (n x d) matrix subroutine rtmvnormgibbsprec2(n, d, r, mean, H, C, a, b, x0, burnin, thinning, X) IMPLICIT NONE integer :: n, d, r, i, j, k, ind = 0, burnin, thinning ! subindex "-i" integer, dimension(d-1) :: minus_i double precision :: unifrnd, qnormr, pnormr, u, q, prob, Fa, Fb, mu_i, s2 double precision, dimension(d, d) :: H ! Liste von d mal 1 x (d-1) Matrizen = d x (d-1) Matrix als H[i, -i] double precision, dimension(d, d-1) :: P double precision, dimension(d) :: H_inv_ii double precision, dimension(n*d), INTENT(INOUT) :: X double precision, dimension(d-1) :: s3 double precision, dimension(d) :: x0, xr, mean, sd double precision, dimension(r) :: a, b double precision, dimension(r, d) :: C double precision :: bound1, bound2, lower, upper ! initialise R random number generator call rndstart() ! initialise Fortran random number generator ! CALL RANDOM_SEED ! SW: I do not know why, but we have to reset ind each time!!! ! If we forget this line, ind will be incremented further and then Fortran crashes! ind = 0 ! List of conditional variances sd(i) can be precalculated ! Vector of conditional standard deviations sd(i | -i) = H_ii^{-1} = 1 / H[i, i] = sqrt(1 / diag(H)) ! does not depend on x[-i] and can be precalculated before running the chain. do i = 1,d minus_i = (/ (k, k=1,i-1), (k, k=i+1,d) /) H_inv_ii(i) = (1.0d0 / H(i, i)) ! H^{-1}(i,i) = 1 / H(i,i) sd(i) = sqrt(H_inv_ii(i)) ! sd(i) is sqrt(H^{-1}(i,i)) P(i,:) = H(i, minus_i) ! 1 x (d-1) end do ! start value xr = x0 ! Actual number of samples to create: ! #burn-in-samples + n * #thinning-factor !For all samples n times the thinning factor do j = 1,(burnin + n * thinning) ! For all dimensions do i = 1,d ! subindex "-i" minus_i = (/ (k, k=1,i-1), (k, k=i+1,d) /) ! conditional mean mu[i] = E[i | -i] = mean[i] - H_ii^{-1} H[i,-i] (x[-i] - mean[-i]) ! mu_i <- mean[i] (1 / H[i,i]) * H[i,-i] %*% (x[-i] - mean[-i]) s3(1:(d-1)) = xr(minus_i) - mean(minus_i) s2 = 0 do k = 1,d-1 s2 = s2 + P(i, k) * s3(k) end do mu_i = mean(i) - H_inv_ii(i) * s2 ! TODO: Set to -Inf/+Inf lower = -1000.0d0 upper = 1000d0 ! Determine lower bounds for x[i] using all linear constraints relevant for x[i] do k = 1,r if (C(k,i) == 0 ) then CYCLE end if s2 = dot_product(C(k,minus_i), xr(minus_i)) bound1 = (a(k)- s2) /C(k, i) bound2 = (b(k)- s2) /C(k, i) if (C(k, i) > 0) then lower = max(lower, bound1) upper = min(upper, bound2) else lower = max(lower, bound2) upper = min(upper, bound1) end if end do !print '("mu_i = ",f6.3)', mu_i !print '("sd(i) = ",f6.3)', sd(i) !print '("lower = ",f6.3)', lower !print '("upper = ",f6.3)',upper Fa = pnormr(lower, mu_i, sd(i), 1, 0) Fb = pnormr(upper, mu_i, sd(i), 1, 0) u = unifrnd() !call RANDOM_NUMBER(u) prob = u * (Fb - Fa) + Fa q = qnormr(prob, 0.0d0, 1.0d0, 1, 0) xr(i) = mu_i + sd(i) * q ! Nur fr j > burnin samples aufzeichnen, Default ist thinning = 1 ! bei Thinning nur jedes x-te Element nehmen if (j > burnin .AND. mod(j - burnin,thinning) == 0) then ind = ind + 1 X(ind) = xr(i) !call intpr("ind=", 4, ind, 1) !call dblepr("X(ind)=", 7, X(ind), 1) end if end do end do ! reset R random number generator call rndend() end subroutine rtmvnormgibbsprec2 ! populate map (row --> linked list of matrix elements) for with all entries in Hi, Hj and Hv ! if upper_triangular is TRUE, then we assume that only matrix elements with Hi <= Hj are given and we will ! put two elements in the (Hi,Hj,Hv) and (Hj,Hi,Hv) to the list for all Hi <= Hj subroutine populate_map(map, Hi, Hj, Hv, num_nonzero, d, upper_triangular) use linked_list integer :: num_nonzero, d integer, dimension(num_nonzero) :: Hi, Hj double precision, dimension(num_nonzero) :: Hv type(matrixrow), dimension(d), INTENT(INOUT) :: map type(matrixelem) :: newelem integer :: i, k logical :: upper_triangular !allocate(map(d)) ! and allocate our map do i=1,d nullify(map(i)%first) ! "zero out" our list nullify(map(i)%last) enddo ! populate map for with all entries in Hi, Hj and Hv do k=1,num_nonzero i = Hi(k) if (upper_triangular) then !if only upper triangular elements (i,j,v) with (i <= j) are given, !insert element (i, j, v) and (j, i, v) fr i <> j if (Hi(k) <= Hj(k)) then ! (i, j, v) element newelem%i = Hi(k) newelem%j = Hj(k) newelem%v = Hv(k) call insert_list_element(map(Hi(k)), newelem) end if if (Hi(k) < Hj(k)) then ! (j, i, v) element newelem%i = Hj(k) newelem%j = Hi(k) newelem%v = Hv(k) call insert_list_element(map(Hj(k)), newelem) end if else ! insert all elements given by (Hi, Hj, Hv) newelem%i = Hi(k) newelem%j = Hj(k) newelem%v = Hv(k) call insert_list_element(map(i), newelem) end if enddo end subroutine ! Gibbs sampling of the truncated multivariate normal distribution using a sparse matrix representation of the precision matrix H, ! represented in triplet form ! ! @param n number of random sample to generate by Gibbs sampling ! @param d dimension (d >= 2) ! @param mean mean vector of dimension d (d x 1) ! @param Hi,Hj,Hv are the nonzero elements of the precision matrix H (d, d): H(i, j)=v, each a vector having the same length num_nonzero ! @param num_nonzero number of nonzero elements of the precision matrix H ! @param lower lower truncation points (d x 1) ! @param upper upper truncation points (d x 1) ! @param x0 Startvektor (d x 1) ! @param burnin Number of Burn-in samples to be discarded ! @param thinning thinning factor for thinning the Markov chain ! @return return value X --> vektor (n * d) --> can be coerced into a (n x d) matrix subroutine rtmvnorm_sparse_triplet(n, d, mean, Hi, Hj, Hv, num_nonzero, lower, upper, x0, burnin, thinning, X) use linked_list IMPLICIT NONE integer :: n, d, i, j, k, ind = 0, burnin, thinning, num_nonzero ! matrix representation of concentration matrix H integer, dimension(num_nonzero) :: Hi, Hj double precision, dimension(num_nonzero) :: Hv double precision :: unifrnd, qnormr, pnormr, u, q, prob, Fa, Fb, mu_i, s2 double precision, dimension(d) :: H_inv_ii double precision, dimension(n*d), INTENT(INOUT) :: X double precision, dimension(d) :: x0, xr, mean, lower, upper, sd ! in this map we store for every row i the non-zero entries (triplets) as a linked list of matrix elements ! example: i=1 --> (i=1,j=1,v=0.8), (i=1,j=2,v=0.2), (i=1,j=5,v=0.3) etc. ! The list will not be sorted ascending in j, so we can only iterate this list... type(matrixrow), dimension(d) :: map type(matrixelem) :: elem type( node ), pointer :: current ! initialise R random number generator call rndstart() ! initialise Fortran random number generator !CALL RANDOM_SEED ! We have to reset ind each time ! If we forget this line, ind will be incremented further and then Fortran crashes! ind = 0 ! loop through all elements and look for diagonal elements H[i,i], calculate conditional standard deviations sd(i | -i) ! List of conditional variances sd(i) can be precalculated ! Vector of conditional standard deviations sd(i | -i) = H_ii^{-1} = 1 / H[i, i] = sqrt(1 / diag(H)) ! does not depend on x[-i] and can be precalculated before running the chain. do k=1,num_nonzero i = Hi(k) j = Hj(k) if (i == j) then H_inv_ii(i) = (1.0d0 / Hv(k)) ! H^{-1}(i,i) = 1 / H(i,i) sd(i) = sqrt(H_inv_ii(i)) ! sd(i) is sqrt(H^{-1}(i,i)) end if end do ! populate map with linked lists of matrix elements H[i,j]=v and symmetric element H[j,i]=v call populate_map(map, Hi, Hj, Hv, num_nonzero, d, .TRUE.) ! start value xr = x0 ! Actual number of samples to create: ! #burn-in-samples + n * #thinning-factor !For all samples n times the thinning factor do j = 1,(burnin + n * thinning) ! For all dimensions do i = 1,d ! s2 will represent the term H[i,-i] (x[-i] - mean[-i]) s2 = 0 ! We avoid some n x d x d accesses to hash matrix H even for those elements that are zero... ! For n=30 and d=5000 this results in 30 x 5000 x 5000 = 75 million accesses to matrix H... ! Instead of iterating all (d-1) elements H[i,-i] we only iterate all m (m < d) NON-ZERO elements H[i,-i] which will dramatically reduce the number ! of hashtable accesses. This will scale as n x d x m and will be linear in d for a fixed m. current => map(i)%first do while (associated(current)) elem = current%data ! sum only non-zero H[i,-i] elements in H[i,-i] (x[-i] - mean[-i]) ! no summing for i = j elements! if (elem%j .ne. elem%i) then k = elem%j s2 = s2 + elem%v * (xr(k) - mean(k)) !TODO check end if current => current%next end do ! conditional mean mu[i] = E[i | -i] = mean[i] - H_ii^{-1} H[i,-i] (x[-i] - mean[-i]) ! we only loop through all non-zero elements in H[i,-i] = all indices j .ne. i in sparse matrix representation H[i,j]=v mu_i = mean(i) - H_inv_ii(i) * s2 Fa = pnormr(lower(i), mu_i, sd(i), 1, 0) Fb = pnormr(upper(i), mu_i, sd(i), 1, 0) u = unifrnd() !call RANDOM_NUMBER(u) prob = u * (Fb - Fa) + Fa q = qnormr(prob, 0.0d0, 1.0d0, 1, 0) xr(i) = mu_i + sd(i) * q ! Nur fr j > burnin samples aufzeichnen, Default ist thinning = 1 ! bei Thinning nur jedes x-te Element nehmen if (j > burnin .AND. mod(j - burnin,thinning) == 0) then ind = ind + 1 X(ind) = xr(i) end if end do end do ! deallocate linked list at the end of the program and free memory do i=1,d call free_all(map(i)) nullify(map(i)%first) ! "zero out" our list nullify(map(i)%last) enddo nullify(current) ! reset R random number generator call rndend() end subroutine rtmvnorm_sparse_triplet ! Gibbs sampling of the truncated multivariate normal distribution using a sparse matrix representation of the precision matrix H (d x d). ! ! Instead of using a triplet representation H(i,j)=v, we use the compressed sparse column (csc) format with 3 vectors ! Hi : integer vector of row index, length num_nonzero; starting from zero ! Hp : integer vector of pointers, length d + 1; starting from zero; non-decreasing vector ! Hv : double vector of values, length num_nonzero ! ! This format is good at accessing all non-zero elements in one column j ! (and -as in our case- for symmetric matrices also to acess all elements in one row i) ! ! To access an element H(i,j), the following steps are necessary ! j ! v = Hv(Hp(j):Hp(j+1)) ! i = Hi(Hp(j):Hp(j+1)) ! ! @param n number of random sample to generate by Gibbs sampling ! @param d dimension (d >= 2) ! @param mean mean vector of dimension d (d x 1) ! @param Hi,Hp,Hv are the nonzero elements of the precision matrix H (d, d): H(i, j)=v, each a vector having the same length num_nonzero ! @param num_nonzero number of nonzero elements of the precision matrix H ! @param lower lower truncation points (d x 1) ! @param upper upper truncation points (d x 1) ! @param x0 Startvektor (d x 1) ! @param burnin Number of Burn-in samples to be discarded ! @param thinning thinning factor for thinning the Markov chain ! @return return value X --> vektor (n * d) --> can be coerced into a (n x d) matrix subroutine rtmvnorm_sparse_csc(n, d, mean, Hi, Hp, Hv, num_nonzero, lower, upper, x0, burnin, thinning, X) IMPLICIT NONE integer :: n, d, i, j, k, r, ind = 0, burnin, thinning, num_nonzero ! compressed sparse column (csc) matrix representation of concentration matrix H integer, dimension(num_nonzero) :: Hi integer, dimension(d+1) :: Hp double precision, dimension(num_nonzero) :: Hv double precision :: unifrnd, qnormr, pnormr, u, q, prob, Fa, Fb, mu_i, s2 double precision, dimension(d) :: H_inv_ii double precision, dimension(n*d), INTENT(INOUT) :: X double precision, dimension(d) :: x0, xr, mean, lower, upper, sd ! initialise R random number generator call rndstart() ! initialise Fortran random number generator !CALL RANDOM_SEED ! SW: I do not know why, but we have to reset ind each time!!! ! If we forget this line, ind will be incremented further and then Fortran crashes! ind = 0 ! loop through all elements and look for diagonal elements H[i,i], calculate conditional standard deviations sd(i | -i) ! List of conditional variances sd(i) can be precalculated ! Vector of conditional standard deviations sd(i | -i) = H_ii^{-1} = 1 / H[i, i] = sqrt(1 / diag(H)) ! does not depend on x[-i] and can be precalculated before running the chain. do j=1,d do k=Hp(j),Hp(j+1)-1 ! k from 0..(d-1) i = Hi(k+1) + 1 ! Hi is index from 0..(d-1) --> need index i=1..d if (i == j) then H_inv_ii(i) = (1.0d0 / Hv(k+1)) ! H^{-1}(i,i) = 1 / H(i,i) sd(i) = sqrt(H_inv_ii(i)) ! sd(i) is sqrt(H^{-1}(i,i)) end if end do end do ! start value xr = x0 ! Actual number of samples to create: ! #burn-in-samples + n * #thinning-factor !For all samples n times the thinning factor do j = 1,(burnin + n * thinning) ! For all dimensions do i = 1,d ! conditional mean mu[i] = E[i | -i] = mean[i] - H_ii^{-1} H[i,-i] (x[-i] - mean[-i]) s2 = 0 ! For H[i,-i] (x[-i] - mean[-i]) we need to sum only all non-zero H[i,-i] elements! ! since H is symmetric, we can use the column sparse compressed (csc) format and sum all H[-i,i] elements instead do k=Hp(i),Hp(i+1)-1 ! loop all non-zero elements in column i, k is index 0..(d-1) r = Hi(k+1) + 1 ! row index r in column i is r=1..d if (i .ne. r) then s2 = s2 + Hv(k+1) * (xr(r) - mean(r)) end if end do mu_i = mean(i) - H_inv_ii(i) * s2 Fa = pnormr(lower(i), mu_i, sd(i), 1, 0) Fb = pnormr(upper(i), mu_i, sd(i), 1, 0) u = unifrnd() !call RANDOM_NUMBER(u) prob = u * (Fb - Fa) + Fa q = qnormr(prob, 0.0d0, 1.0d0, 1, 0) xr(i) = mu_i + sd(i) * q ! Only retain samples for j > burnin. Default is thinning = 1. ! If thinning>1 do retain only every x-th element if (j > burnin .AND. mod(j - burnin,thinning) == 0) then ind = ind + 1 X(ind) = xr(i) ! call intpr("ind=", 4, ind, 1) ! call dblepr("X(ind)=", 7, X(ind), 1) end if end do end do ! reset R random number generator call rndend() end subroutine rtmvnorm_sparse_csc tmvtnorm/vignettes/0000755000176200001440000000000014533561630014156 5ustar liggesuserstmvtnorm/vignettes/tmvtnorm.bib0000644000176200001440000001303711701134134016513 0ustar liggesusers% This file was created with JabRef 2.5. % Encoding: Cp1252 @BOOK{Geweke2005, title = {Contemporary Bayesian Econometrics and Statistics}, publisher = {John Wiley and Sons}, year = {2005}, author = {John F. Geweke}, file = {:John Geweke. Contemporary Bayesian Econometrics and Statistics (Wiley,2005)(ISBN 0471679321)(308s).pdf:PDF}, owner = {stefan}, timestamp = {2007.01.30} } @ELECTRONIC{Geweke1991, author = {John F. Geweke}, year = {1991}, title = {Effcient simulation from the multivariate normal and Student-t distributions subject to linear constraints and the evaluation of constraint probabilities}, howpublished = {http://www.biz.uiowa.edu/faculty/jgeweke/papers/paper47/paper47.pdf}, file = {:Geweke1991.pdf:PDF}, owner = {stefan}, timestamp = {2010.01.22} } @INPROCEEDINGS{Geweke1991a, author = {John F. Geweke}, title = {Effcient Simulation from the Multivariate Normal and Student-t Distributions Subject to Linear Constraints}, booktitle = {Computer Science and Statistics. Proceedings of the 23rd Symposium on the Interface. Seattle Washington, April 21-24, 1991}, year = {1991}, pages = {571-578}, file = {:Geweke1991a.pdf:PDF}, owner = {stefan}, timestamp = {2010.02.09} } @BOOK{Greene2003, title = {Econometric Analysis}, publisher = {Prentice-Hall}, year = {2003}, author = {William H. Greene}, edition = {5}, file = {Greene - Econometrics.pdf:Greene - Econometrics.pdf:PDF}, owner = {stefan}, timestamp = {2005.12.13} } @UNPUBLISHED{Griffiths2002, author = {William Griffiths}, title = {A {G}ibbs' Sampler for the Parameters of a Truncated Multivariate Normal Distribution}, note = {University of Melbourne}, year = {2002}, file = {:Griffiths2002.pdf:PDF}, institution = {The University of Melbourne}, number = {856}, owner = {stefan}, timestamp = {2012.01.04}, type = {Department of Economics - Working Papers Series}, url = {http://ideas.repec.org/p/mlb/wpaper/856.html} } @INBOOK{Griffiths2004, chapter = {A {G}ibbs' sampler for the parameters of a truncated multivariate normal distribution}, pages = {75 - 91}, title = {Contemporary Issues In Economics And Econometrics: Theory and Application}, publisher = {Edward Elgar Publishing}, year = {2004}, editor = {Ralf Becker and Stan Hurn}, author = {William E. Griffiths}, journal = {Contemporary issues in economics and econometrics}, owner = {stefan}, timestamp = {2009.09.09} } @INPROCEEDINGS{Kotecha1999, author = {Kotecha, J. H. and Djuric, P. M.}, title = {{G}ibbs sampling approach for generation of truncated multivariate Gaussian random variables}, booktitle = {ICASSP '99: Proceedings of the Acoustics, Speech, and Signal Processing, 1999. on 1999 IEEE International Conference}, year = {1999}, pages = {1757--1760}, address = {Washington, DC, USA}, publisher = {IEEE Computer Society}, doi = {http://dx.doi.org/10.1109/ICASSP.1999.756335}, file = {:Kotecha1999.pdf:PDF}, isbn = {0-7803-5041-3}, journal = {IEEE Computer Society}, owner = {stefan}, timestamp = {2009.04.16} } @MANUAL{tmvtnorm-0.7, title = {{tmvtnorm}: Truncated Multivariate Normal Distribution}, author = {Stefan Wilhelm}, year = {2009}, note = {R package version 0.7-2}, owner = {stefan}, timestamp = {2009.10.05}, url = {http://www.r-project.org} } @MANUAL{tmvtnorm-1.2, title = {{tmvtnorm}: Truncated Multivariate Normal and {S}tudent t Distribution}, author = {Stefan Wilhelm and B G Manjunath}, year = {2011}, note = {R package version 1.2-3}, owner = {stefan}, timestamp = {2012.01.04}, url = {http://CRAN.R-project.org/package=tmvtnorm} } @MANUAL{tmvtnorm-1.3, title = {{tmvtnorm}: Truncated Multivariate Normal and {S}tudent t Distribution}, author = {Stefan Wilhelm and B G Manjunath}, year = {2011}, note = {R package version 1.3-1}, owner = {stefan}, timestamp = {2012.01.04}, url = {http://CRAN.R-project.org/package=tmvtnorm} } @MANUAL{tmvtnorm-1.4, title = {{tmvtnorm}: Truncated Multivariate Normal and {S}tudent t Distribution}, author = {Stefan Wilhelm and B G Manjunath}, year = {2011}, note = {R package version 1.4-1}, owner = {stefan}, timestamp = {2012.01.04}, url = {http://CRAN.R-project.org/package=tmvtnorm} } @ARTICLE{RJournal:Wilhelm+Manjunath:2010, author = {Stefan Wilhelm and B. G. Manjunath}, title = {{tmvtnorm: A Package for the Truncated Multivariate Normal Distribution}}, journal = {The R Journal}, year = {2010}, volume = {2}, pages = {25--29}, number = {1}, month = {June}, owner = {stefan}, timestamp = {2012.01.04}, url = {http://journal.r-project.org/archive/2010-1/RJournal_2010-1_Wilhelm+Manjunath.pdf} } @MANUAL{tmvtnorm-0.9, title = {{tmvtnorm}: Truncated Multivariate Normal Distribution}, author = {Stefan Wilhelm and B G Manjunath}, year = {2010}, note = {R package version 0.9-2}, owner = {stefan}, timestamp = {2012.01.04}, url = {http://CRAN.R-project.org/package=tmvtnorm} } @MANUAL{tmvtnorm-1.1, title = {{tmvtnorm}: Truncated Multivariate Normal Distribution}, author = {Stefan Wilhelm and B G Manjunath}, year = {2010}, note = {R package version 1.1-0}, owner = {stefan}, timestamp = {2012.01.04}, url = {http://CRAN.R-project.org/package=tmvtnorm} } @comment{jabref-meta: selector_publisher:} @comment{jabref-meta: selector_author:} @comment{jabref-meta: selector_journal:} @comment{jabref-meta: selector_keywords:} tmvtnorm/vignettes/GibbsSampler.Rnw0000644000176200001440000002343314216137300017214 0ustar liggesusers%\VignetteIndexEntry{A short description of the Gibbs Sampler} \documentclass[a4paper]{article} \usepackage{Rd} \usepackage{amsmath} \usepackage{natbib} \usepackage{palatino,mathpazo} \usepackage{Sweave} %\newcommand{\pkg}[1]{\textbf{#1}} \newcommand{\vecb}[1]{\ensuremath{\boldsymbol{\mathbf{#1}}}} \def\bfx{\mbox{\boldmath $x$}} \def\bfy{\mbox{\boldmath $y$}} \def\bfz{\mbox{\boldmath $z$}} \def\bfalpha{\mbox{\boldmath $\alpha$}} \def\bfbeta{\mbox{\boldmath $\beta$}} \def\bfmu{\mbox{\boldmath $\mu$}} \def\bfa{\mbox{\boldmath $a$}} \def\bfb{\mbox{\boldmath $b$}} \def\bfu{\mbox{\boldmath $u$}} \def\bfSigma{\mbox{\boldmath $\Sigma$}} \def\bfD{\mbox{\boldmath $D$}} \def\bfH{\mbox{\boldmath $H$}} \def\bfT{\mbox{\boldmath $T$}} \def\bfX{\mbox{\boldmath $X$}} \def\bfY{\mbox{\boldmath $X$}} \title{Gibbs Sampler for the Truncated Multivariate Normal Distribution} \author{Stefan Wilhelm\thanks{wilhelm@financial.com}} \begin{document} \SweaveOpts{concordance=TRUE} \maketitle In this note we describe two ways of generating random variables with the Gibbs sampling approach for a truncated multivariate normal variable $\bfx$, whose density function can be expressed as: \begin{eqnarray*} f(\bfx,\bfmu,\bfSigma,\bfa,\bfb) & = & \frac{ \exp{\left\{ -\frac{1}{2} (\bfx-\bfmu)' \bfSigma^{-1} (\bfx-\bfmu) \right\}} } { \int_{\bfa}^{\bfb}{\exp{\left\{ -\frac{1}{2} (\bfx-\bfmu)' \bfSigma^{-1} (\bfx-\bfmu) \right\} } d\bfx } } \end{eqnarray*} for $\bfa \le \bfx \le \bfb$ and $0$ otherwise.\\ \par The first approach, as described by \cite{Kotecha1999}, uses the covariance matrix $\bfSigma$ and has been implemented in the R package \pkg{tmvtnorm} since version 0.9 (\cite{tmvtnorm-0.9}). The second way is based on the works of \cite{Geweke1991,Geweke2005} and uses the precision matrix $\bfH = \bfSigma^{-1}$. As will be shown below, the usage of the precision matrix offers some computational advantages, since it does not involve matrix inversions and is therefore favorable in higher dimensions and settings where the precision matrix is readily available. Applications are for example the analysis of spatial data, such as from telecommunications or social networks.\\ \par Both versions of the Gibbs sampler can also be used for general linear constraints $\bfa \le \bfD \bfx \le \bfb$, what we will show in the last section. The function \code{rtmvnorm()} in the package \pkg{tmvtnorm} contains the \R{} implementation of the methods described in this note (\cite{tmvtnorm-1.3}). \section{Gibbs Sampler with convariance matrix $\bfSigma$} We describe here a Gibbs sampler for sampling from a truncated multinormal distribution as proposed by \cite{Kotecha1999}. It uses the fact that conditional distributions are truncated normal again. Kotecha use full conditionals $f(x_i | x_{-i}) = f(x_i | x_1,\ldots,x_{i-1},x_{i+1},\ldots,x_{d})$.\\ \par We use the fact that the conditional density of a multivariate normal distribution is multivariate normal again. We cite \cite{Geweke2005}, p.171 for the following theorem on the Conditional Multivariate Normal Distribution.\\ Let $\bfz = \left( \begin{array}{c} \bfx \\ \bfy \end{array} \right) \sim N(\bfmu, \bfSigma)$ with $\bfmu = \left( \begin{array}{c}\bfmu_x \\ \bfmu_y \end{array} \right)$ and $\bfSigma = \left[ \begin{array}{cc} \bfSigma_{xx} & \bfSigma_{xy} \\ \bfSigma_{yx} & \bfSigma_{yy} \end{array} \right]$\\ Denote the corresponding precision matrix \begin{equation} \bfH = \bfSigma^{-1} = \left[ \begin{array}{cc} \bfH_{xx} & \bfH_{xy} \\ \bfH_{yx} & \bfH_{yy} \end{array} \right] \end{equation} Then the distribution of $\bfy$ conditional on $\bfx$ is normal with variance \begin{equation} \bfSigma_{y.x} = \bfSigma_{yy} - \bfSigma_{yx} \bfSigma_{xx}^{-1} \bfSigma_{xy} = \bfH_{yy}^{-1} \end{equation} and mean \begin{equation} \bfmu_{y.x} = \bfmu_{y} + \bfSigma_{yx} \bfSigma_{xx}^{-1} (\bfx - \bfmu_x) = \bfmu_y - \bfH_{yy}^{-1} \bfH_{yx}(\bfx - \bfmu_x) \end{equation} \par In the case of the full conditionals $f(x_i | x_{-i})$, which we will denote as $i.-i$ this results in the following formulas: $\bfz = \left( \begin{array}{c} \bfx_i \\ \bfx_{-i} \end{array} \right) \sim N(\bfmu, \bfSigma)$ with $\bfmu = \left( \begin{array}{c}\bfmu_i \\ \bfmu_{-i} \end{array} \right)$ and $\bfSigma = \left[ \begin{array}{cc} \bfSigma_{ii} & \bfSigma_{i,-i} \\ \bfSigma_{-i,i} & \bfSigma_{-i,-i} \end{array} \right]$ Then the distribution of $i$ conditional on $-i$ is normal with variance \begin{equation} \bfSigma_{i.-i} = \bfSigma_{ii} - \bfSigma_{i,-i} \bfSigma_{-i,-i}^{-1} \bfSigma_{-i,i} = \bfH_{ii}^{-1} \end{equation} and mean \begin{equation} \bfmu_{i.-i} = \bfmu_{i} + \bfSigma_{i,-i} \bfSigma_{-i,-i}^{-1} (\bfx_{-i} - \bfmu_{-i}) = \bfmu_i - \bfH_{ii}^{-1} \bfH_{i,-i}(\bfx_{-i} - \bfmu_{-i}) \end{equation} We can then construct a Markov chain which continously draws from $f(x_i | x_{-i})$ subject to $a_i \le x_i \le b_i$. Let $\bfx^{(j)}$ denote the sample drawn at the $j$-th MCMC iteration. The steps of the Gibbs sampler for generating $N$ samples $\bfx^{(1)},\ldots,\bfx^{(N)}$ are: \begin{itemize} \item Since the conditional variance $\bfSigma_{i.-i}$ is independent from the actual realisation $\bfx^{(j)}_{-i}$, we can well precalculate it before running the Markov chain. \item Choose a start value $\bfx^{(0)}$ of the chain. \item In each round $j=1,\ldots,N$ we go from $i=1,\ldots,d$ and sample from the conditional density $x^{(j)}_i | x^{(j)}_1,\ldots,x^{(j)}_{i-1},x^{(j-1)}_{i+1},\ldots,x^{(j-1)}_{d}$. \item Draw a uniform random variate $U \sim Uni(0, 1)$. This is where our approach slightly differs from \cite{Kotecha1999}. They draw a normal variate $y$ and then apply $\Phi(y)$, which is basically uniform. \item We draw from univariate conditional normal distributions with mean $\mu$ and variance $\sigma^2$. See for example \cite{Greene2003} or \cite{Griffiths2004} for a transformation between a univariate normal random $y \sim N(\mu,\sigma^2)$ and a univariate truncated normal variate $x \sim TN(\mu,\sigma^2, a, b)$. For each realisation $y$ we can find a $x$ such as $P(Y \le y) = P(X \le x)$: \begin{equation*} \frac{ \Phi \left( \frac{x - \mu}{\sigma} \right) - \Phi \left( \frac{a - \mu}{\sigma} \right) } { \Phi \left( \frac{b - \mu}{\sigma} \right) - \Phi \left( \frac{a - \mu}{\sigma} \right) } = \Phi \left( \frac{y - \mu}{\sigma} \right) = U \end{equation*} \item Draw $\bfx_{i.-i}$ from conditional univariate truncated normal distribution \\ $TN(\bfmu_{i.-i}, \bfSigma_{i.-i}, a_i, b_i)$ by \begin{equation} \begin{split} \bfx_{i.-i} & = \bfmu_{i.-i} + \\ & \sigma_{i.-i} \Phi^{-1} \left[ U \left( \Phi \left( \frac{b_i - \bfmu_{i.-i}}{\sigma_{i.-i}} \right) - \Phi \left( \frac{a_i - \bfmu_{i.-i}}{\sigma_{i.-i}} \right) \right) + \Phi \left( \frac{a_i - \bfmu_{i.-i}}{\sigma_{i.-i}} \right) \right] \end{split} \end{equation} \end{itemize} \section{Gibbs Sampler with precision matrix H} The Gibbs Sampler stated in terms of the precision matrix $\bfH = \bfSigma^{-1}$ instead of the covariance matrix $\bfSigma$ is much easier to write and to implement: Then the distribution of $i$ conditional on $-i$ is normal with variance \begin{equation} \bfSigma_{i.-i} = \bfH_{ii}^{-1} \end{equation} and mean \begin{equation} \bfmu_{i.-i} = \bfmu_i - \bfH_{ii}^{-1} \bfH_{i,-i}(\bfx_{-i} - \bfmu_{-i}) \end{equation} Most importantly, if the precision matrix $\bfH$ is known, the Gibbs sampler does only involve matrix inversions of $\bfH_{ii}$ which in our case is a diagonal element/scalar. Hence, from the computational and performance perspective, especially in high dimensions, using $\bfH$ rather than $\bfSigma$ is preferable. When using $\bfSigma$ in $d$ dimensions, we have to solve for $d$ $(d-1) \times (d-1)$ matrices $\bfSigma_{-i,-i}$, $i=1,\ldots,d$, which can be quite substantial computations. \section{Gibbs Sampler for linear constraints} In this section we present the Gibbs sampling for general linear constraints based on \cite{Geweke1991}. We want to sample from $\bfx \sim N(\bfmu, \bfSigma)$ subject to linear constraints $\bfa \le \bfD \bfx \le \bfb$ for a full-rank matrix $\bfD$.\\ Defining \begin{equation} \bfz = \bfD \bfx - \bfD \bfmu, \end{equation} we have $E[\bfz] = \bfD E[\bfx] - \bfD \bfmu = 0$ and $Var[\bfz] = \bfD Var[\bfx] \bfD' = \bfD \bfSigma \bfD'$. Hence, this problem can be transformed to the rectangular case $\bfalpha \le \bfz \le \bfbeta$ with $\bfalpha = \bfa - \bfD \bfmu$ and $\bfbeta = \bfb - \bfD \bfmu$. It follows $\bfz \sim N(0, \bfT)$ with $\bfT = \bfD \bfSigma \bfD'$.\\ In the precision matrix case, the corresponding precision matrix of the transformed problem will be $\bfT^{-1} = ( \bfD \bfSigma \bfD' )^{-1} = \bfD'^{-1} \bfH \bfD^{-1}$. We can then sample from $\bfz$ the way described in the previous sections (either with covariance or precision matrix approach) and then transform $\bfz$ back to $\bfx$ by \begin{equation} \bfx = \bfmu + \bfD^{-1} \bfz \end{equation} \bibliographystyle{plainnat} \bibliography{tmvtnorm} \end{document}tmvtnorm/NEWS0000644000176200001440000001326114533561270012650 0ustar liggesusers# User visible changes in tmvtnorm package ## changes in tmvtnorm 1.6 (2023-12-05) * Changed package encoding from 'latin1' to 'UTF-8'. * Converted the non-ASCII content to ASCII. * Fixed CITATION file ## changes in tmvtnorm 1.5 (2022-03-22) * fixed misleading stop message to "lower bound should be strictly less than the upper bound". Reported by Chao Wang [chao-wang@uiowa.edu] * Added README.md * Fixed two warnings/errors for R 4.2.0 in `tmvtnorm::rtmvnorm` input checks ``` 1: In !is.null(H) && sigma != diag(length(mean)) : 'length(x) = 9 > 1' in coercion to 'logical(1)' 2: In start.value < lower || start.value > upper : 'length(x) = 3 > 1' in coercion to 'logical(1)' ``` ## changes in tmvtnorm 1.4-10 (2015-08-24) * Fixed problem with build process in src/Makevars (parallel make) ## changes in tmvtnorm 1.4-9 (2014-03-03) * Moved package vignette to vignettes/ directory to be consistent with R 3.1.0 ## changes in tmvtnorm 1.4-8 (2013-03-29) * bugfix in dtmvnorm(...,margin=NULL). Introduced in 1.4-7. Reported by Julius.Vainora [julius.vainora@gmail.com] * bugfix in rtmvt(..., algorithm="gibbs"): Algorithm="gibbs" was not forwarded properly to rtmvnorm(). Reported by Aurelien Bechler [aurelien.bechler@agroparistech.fr] * allow non-integer degrees of freedom in rtmvt, e.g. rtmvt(..., df=3.2). Suggested by Aurelien Bechler [aurelien.bechler@agroparistech.fr] Rejection sampling does not work with non-integer df, only Gibbs sampling. ## changes in tmvtnorm 1.4-7 (2012-11-29) * new method rtmvnorm2() for drawing random samples with general linear constraints a <= Dx <= b with x (d x 1), D (r x d), a,b (r x 1) which can also handle the case r > d. Requested by Xiaojin Xu [xiaojinxu.fdu@gmail.com] Currently works with Gibbs sampling. * bugfix in dtmvnorm(...,log=TRUE). Reported by John Merrill [john.merrill@gmail.com] * optimization in mtmvnorm() to speed up the calculations * dtmvnorm.marginal2() can now be used with vectorized xq, xr. ## changes in tmvtnorm 1.4-6 (2012-03-23) * further optimization in mtmvnorm() and implementation of Johnson/Kotz-Formula when only a subset of variables is truncated ## changes in tmvtnorm 1.4-5 (2012-02-13) * rtmvnorm() can be used with both sparse triplet representation and (compressed sparse column) for H * dramatic performance gain in mtmvnorm() through optimization ## changes in tmvtnorm 1.4-4 (2012-01-10) * dramatic performance gain in rtmvnorm.sparseMatrix() through optimization * Bugfix in rtmvnorm() with linear constraints D: (reported by Claudia Köllmann [koellmann@statistik.tu-dortmund.de]) - forwarding "algorithm=" argument from rtmvnorm() to internal methods dealing with linear constraints was corrupt. - sampling with linear constraints D lead to wrong results due to missing t() ## changes in tmvtnorm 1.4-2 (2012-01-04) * Bugfix in rtmvnorm.sparseMatrix(): fixed a memory leak in Fortran code * Added a package vignette with a description of the Gibbs sampler ## changes in tmvtnorm 1.4-1 (2011-12-27) * Allow a sparse precision matrix H to be passed to rtmvnorm.sparseMatrix() which allows random number generation in very high dimensions (e.g. d >> 5000) * Rewritten the Fortran version of the Gibbs sampler for the use with sparse precision matrix H. ## changes in tmvtnorm 1.3-1 (2011-12-01) * Allow for the use of a precision matrix H rather than covariance matrix sigma in rtmvnorm() for both rejection and Gibbs sampling. (requested by Miguel Godinho de Matos from Carnegie Mellon University) * Rewritten both the R and Fortran version of the Gibbs sampler. * GMM estimation in gmm.tmvnorm(,method=c("ManjunathWilhelm","Lee")) can now be done using the Manjunath/Wilhelm and Lee moment conditions. ## changes in tmvtnorm 1.2-3 (2011-06-04) * rtmvnorm() works now with general linear constraints a<= Dx<=b, with x (d x 1), full-rank matrix D (d x d), a,b (d x 1). * Implemented with both rejection sampling and Gibbs sampling (Geweke (1991)) * Added GMM estimation in gmm.tmvnorm() * Bugfix in dtmvt() thanks to Jason Kramer: Using type="shifted" in pmvt() (reported by Jason Kramer [jskramer@uci.edu]) ## changes in tmvtnorm 1.1-5 (2010-11-20) * Added Maximum Likelihood estimation method (MLE) mle.tmvtnorm() * optimized mtmvnorm(): precalcuted F_a[i] in a separate loop which improved the computation of the mean, suggested by Miklos.Reiter@sungard.com * added a flag doComputeVariance (default TRUE), so users which are only interested in the mean, can compute only the variance (BTW: this flag does not make sense for the mean, since the mean has to be calculated anyway.) * Fixed a bug with LAPACK and BLAS/FLIBS libraries: Prof. Ripley/Writing R extensions: "For portability, the macros @code{BLAS_LIBS} and @code{FLIBS} should always be included @emph{after} @code{LAPACK_LIBS}." ## changes in tmvtnorm 1.0-2 (2010-01-28) * Added methods for the truncated multivariate t-Distribution : rtmvt(), dtmvt() und ptmvt() and ptmvt.marginal() ## changes in tmvtnorm 0.9-2 (2010-01-03) * Implementation of "thinning technique" for Gibbs sampling: Added parameter thinning=1 to rtmvnorm.gibbs() for thinning of Markov chains, i.e. reducing autocorrelations of random samples * Documenting additional arguments "thinning", "start.value" and "burn.in", for rmvtnorm.gibbs() * Added parameter "burn-in" and "thinning" in the Fortran code for discarding burn-in samples and thinng the Markov chain. * Added parameter log=FALSE to dtmvnorm.marginal() * Added parameter margin=NULL to dtmvnorm() as an interface/wrapper to marginal density functions dtmvnorm.marginal() and dtmvnorm.marginal2() * Code polishing and review tmvtnorm/R/0000755000176200001440000000000014360222632012341 5ustar liggesuserstmvtnorm/R/dtmvnorm.R0000644000176200001440000000770414532764510014351 0ustar liggesuserssource("R/rtmvnorm.R") # Dichtefunktion der Multivariaten Trunkierten Normalverteilung mit Trunkierungsvektor lower and upper # # vgl. Horrace (2005) "Some Results on the Multivariate Truncated Normal Distribution" # # @param x Argumentenvektor der Dichte der Laenge n oder Matrix (T x n) mit T Beobachtungen # @param mean Mittelwertvektor der Laenge n # @param sigma Kovarianzmatrix (n x n) # @param lower unterer Trunkierungsvektor (n x 1) mit lower <= x <= upper # @param upper oberer Trunkierungsvektor (n x 1) mit lower <= x <= upper # @param margin if NULL then joint density, if MARGIN=1 then first marginal density, if MARGIN=c(1,2) # then bivariate marginal density for x_1 and x_2 dtmvnorm <- function(x, mean = rep(0, nrow(sigma)), sigma = diag(length(mean)), lower = rep( -Inf, length = length(mean)), upper = rep( Inf, length = length(mean)), log = FALSE, margin=NULL) { # check of standard tmvnorm arguments cargs <- checkTmvArgs(mean=mean, sigma=sigma, lower=lower, upper=upper) mean <- cargs$mean sigma <- cargs$sigma lower <- cargs$lower upper <- cargs$upper # Check of optional argument "margin" if (!is.null(margin)) { # Aufpassen! dtmvnorm() nimmt als Argumente auch eine (T x n)-Matrix, # dtmvnorm.marginal() nimmt nur einen Vektor # dtmvnorm.marginal2() nimmt 2 Vektoren der gleichen Laenge # Aufpassen mit Checks auf die Laenge von x # Aufpassen mit dem log=TRUE Argument! if (!length(margin) %in% c(1, 2)) stop("Length of margin must be either 1 (one-dimensional marginal density) or 2 (bivariate marginal density).") if (any(margin <= 0) || any(margin > length(mean))) { stop("All elements in margin must be in 1..length(mean).") } # one-dimensional marginal density f_{n}(x_n) if (length(margin) == 1) { return(dtmvnorm.marginal(xn=x, n=margin, mean = mean, sigma = sigma, lower = lower, upper = upper, log = log)) } # for bivariate marginal density f_{q,r}(x_q, x_r) we need q <> r and "x" as (n x 2) matrix if (length(margin) == 2) { if(margin[1] == margin[2]) stop("Two different margins needed for bivariate marginal density.") if (is.vector(x)) { x <- matrix(x, ncol = length(x)) } if(!is.matrix(x) || ncol(x) != 2) stop("For bivariate marginal density x must be either a (n x 2) matrix or a vector of length 2.") # bivariate marginal density f_{q,r}(x_q, x_r) return(dtmvnorm.marginal2(xq=x[,1], xr=x[,2], q=margin[1], r=margin[2], mean = mean, sigma = sigma, lower = lower, upper = upper, log = log)) } } # Check of additional inputs like x if (is.vector(x)) { x <- matrix(x, ncol = length(x)) } # Anzahl der Beobachtungen T <- nrow(x) # check for each row if in support region insidesupportregion <- logical(T) for (i in 1:T) { insidesupportregion[i] = all(x[i,] >= lower & x[i,] <= upper & !any(is.infinite(x))) } if(log) { # density value for points inside the support region dvin <- dmvnorm(x, mean=mean, sigma=sigma, log=TRUE) - log(pmvnorm(lower=lower, upper=upper, mean=mean, sigma=sigma)) # density value for points outside the support region dvout <- -Inf } else { dvin <- dmvnorm(x, mean=mean, sigma=sigma, log=FALSE) / pmvnorm(lower=lower, upper=upper, mean=mean, sigma=sigma) dvout <- 0 } f <- ifelse(insidesupportregion, dvin, dvout) return(f) } #dtmvnorm(x=c(0,0)) #dtmvnorm(x=c(0,0), sigma=diag(2)) #dtmvnorm(x=c(0,0), mean=c(0,0), sigma=diag(2)) #dmvnorm(x=c(0,0), mean=c(0,0), sigma=diag(2)) #dtmvnorm(x=matrix(c(0,0,1,1),2,2, byrow=TRUE), mean=c(0,0), sigma=diag(2)) #dtmvnorm(x=matrix(c(0,0,1,1),2,2, byrow=TRUE), mean=c(0,0), sigma=diag(2), lower=c(-1,-1), upper=c(0.5, 0.5)) #dtmvnorm(x=matrix(c(0,0,1,1),2,2, byrow=TRUE), mean=c(0,0), sigma=diag(2), lower=c(-1,-1), upper=c(0.5, 0.5), log=TRUE) #dtmvnorm(as.matrix(seq(-1,2, by=0.1), ncol=1), mean=c(0.5), sigma=as.matrix(1.2^2), lower=0) tmvtnorm/R/bivariate-marginal-density.R0000644000176200001440000001601114532764701017707 0ustar liggesusers# SW: This method is private. It is the same as mvtnorm::dmvnorm() function, # but without sanity checks for sigma. We perform the sanity checks before. .dmvnorm <- function (x, mean, sigma, log = FALSE) { if (is.vector(x)) { x <- matrix(x, ncol = length(x)) } distval <- mahalanobis(x, center = mean, cov = sigma) logdet <- sum(log(eigen(sigma, symmetric = TRUE, only.values = TRUE)$values)) logretval <- -(ncol(x) * log(2 * pi) + logdet + distval)/2 if (log) return(logretval) exp(logretval) } # Computation of the bivariate marginal density F_{q,r}(x_q, x_r) (q != r) # of truncated multivariate normal distribution # following the works of Tallis (1961), Leppard and Tallis (1989) # # References: # Tallis (1961): # "The Moment Generating Function of the Truncated Multi-normal Distribution" # Leppard and Tallis (1989): # "Evaluation of the Mean and Covariance of the Truncated Multinormal" # Manjunath B G and Stefan Wilhelm (2009): # "Moments Calculation for the Doubly Truncated Multivariate Normal Distribution" # # (n-2) Integral, d.h. zweidimensionale Randdichte in Dimension q und r, # da (n-2) Dimensionen rausintegriert werden. # vgl. Tallis (1961), S.224 und Code Leppard (1989), S.550 # # f(xq=b[q], xr=b[r]) # # Attention: Function is not vectorized at the moment! # Idee: Vektorisieren xq, xr --> die Integration Bounds sind immer verschieden, # pmvnorm() kann nicht vektorisiert werden. Sonst spart man schon ein bisschen Overhead. # Der eigentliche bottleneck ist aber pmvnorm(). # Gibt es Unterschiede bzgl. der verschiedenen Algorithmen GenzBretz() vs. Miwa()? # pmvnorm(corr=) kann ich verwenden # # @param xq # @param xr # @param q index for dimension q # @param r Index for Dimension r # @param mean # @param sigma # @param lower # @param upper # @param log=FALSE dtmvnorm.marginal2 <- function(xq, xr, q, r, mean=rep(0, nrow(sigma)), sigma=diag(length(mean)), lower=rep(-Inf, length = length(mean)), upper=rep( Inf, length = length(mean)), log=FALSE, pmvnorm.algorithm=GenzBretz()) { # dimensionality n <- nrow(sigma) # number of xq values delivered N <- length(xq) # input checks if (n < 2) stop("Dimension n must be >= 2!") # TODO: Check eventuell rauslassen # SW; isSymmetric is sehr teuer #if (!isSymmetric(sigma, tol = sqrt(.Machine$double.eps))) { #if (!isTRUE(all.equal(sigma, t(sigma))) || any(diag(sigma) < 0)) { # stop("sigma must be a symmetric matrix") #} if (length(mean) != NROW(sigma)) { stop("mean and sigma have non-conforming size") } if (!(q %in% 1:n && r %in% 1:n)) { stop("Indexes q and r must be integers in 1:n") } if (q == r) { stop("Index q must be different than r!") } # Skalierungsfaktor der gestutzten Dichte (Anteil nach Trunkierung) # Idee: dtmvnorm.marginal2() braucht 80% der Zeit von mtmvnorm(). Die meiste Zeit davon in pmvnorm(). # pmvnorm()-Aufrufe sind teuer, daher koennte man das alpha schon vorher berechnen # lassen (nur 2 pmvnorm()-Aufrufe in der Methode, wuerde 50% sparen) # Da Methode jetzt vektorisiert ist, sparen wir die Aufrufe wg. alpha alpha <- pmvnorm(lower=lower, upper=upper, mean=mean, sigma=sigma, algorithm=pmvnorm.algorithm) if (n == 2) { density <- numeric(N) indOut <- xq < lower[q] | xq > upper[q] | xr < lower[r] | xr > upper[r] | is.infinite(xq) | is.infinite(xr) density[indOut] <- 0 # dmvnorm() macht auch viele Checks; Definiere eine private Methode .dmvnorm() ohne Checks density[!indOut] <- .dmvnorm(x=cbind(xq, xr)[!indOut,], mean=mean[c(q,r)], sigma=sigma[c(q,r),c(q,r)]) / alpha if (log == TRUE) { return(log(density)) } else { return(density) } } # standard deviation for normalisation SD <- sqrt(diag(sigma)) # normalised bounds lower.normalised <- (lower - mean) / SD upper.normalised <- (upper - mean) / SD xq.normalised <- (xq - mean[q]) / SD[q] # (N x 1) xr.normalised <- (xr - mean[r]) / SD[r] # (N x 1) # Computing correlation matrix R from sigma (matrix (n x n)): # R = D % sigma %*% D with diagonal matrix D as sqrt(sigma) # same as cov2cor() D <- matrix(0, n, n) diag(D) <- sqrt(diag(sigma))^(-1) R <- D %*% sigma %*% D # # Determine (n-2) x (n-2) correlation matrix RQR # RQR <- matrix(NA, n-2, n-2) RINV <- solve(R) WW <- matrix(NA, n-2, n-2) M1 <- 0 for (i in 1:n) { if (i != q && i != r) { M1 <- M1 + 1 M2 <- 0 for (j in 1:n) { if (j != q && j != r) { M2 <- M2 + 1 WW[M1, M2] <- RINV[i,j] } } } } WW <- solve(WW[1:(n-2),1:(n-2)]) for(i in 1:(n-2)) { for(j in 1:(n-2)) { RQR[i, j] <- WW[i, j] / sqrt(WW[i,i] * WW[j,j]) } } # # Determine bounds of integration vector AQR and BQR (n - 2) x 1 # # lower and upper integration bounds AQR <- matrix(NA, N, n-2) BQR <- matrix(NA, N, n-2) M2 <- 0 # counter = 1..(n-2) for (i in 1:n) { if (i != q && i != r) { M2 <- M2 + 1 BSQR <- (R[q, i] - R[q, r] * R[r, i]) / (1 - R[q, r]^2) BSRQ <- (R[r, i] - R[q, r] * R[q, i]) / (1 - R[q, r]^2) RSRQ <- (1 - R[i, q]^2) * (1 - R[q, r]^2) RSRQ <- (R[i, r] - R[i, q] * R[q, r]) / sqrt(RSRQ) # partial correlation coefficient R[r,i] given q # lower integration bound AQR[,M2] <- (lower.normalised[i] - BSQR * xq.normalised - BSRQ * xr.normalised) / sqrt((1 - R[i, q]^2) * (1 - RSRQ^2)) AQR[,M2] <- ifelse(is.nan(AQR[,M2]), -Inf, AQR[,M2]) # upper integration bound BQR[,M2] <- (upper.normalised[i] - BSQR * xq.normalised - BSRQ * xr.normalised) / sqrt((1 - R[i, q]^2) * (1 - RSRQ^2)) BQR[,M2] <- ifelse(is.nan(BQR[,M2]), Inf, BQR[,M2]) } } # Correlation matrix for r and q R2 <- matrix(c( 1, R[q,r], R[q,r], 1), 2, 2) sigma2 <- sigma[c(q,r),c(q,r)] density <- ifelse ( xq < lower[q] | xq > upper[q] | xr < lower[r] | xr > upper[r] | is.infinite(xq) | is.infinite(xr), 0, { # SW: RQR is a correlation matrix, so call pmvnorm(...,corr=) which is faster than # pmvnorm(...,corr=) # SW: Possibly vectorize this loop if pmvnorm allows vectorized lower and upper bounds prob <- numeric(N) # (N x 1) for (i in 1:N) { if ((n - 2) == 1) { # univariate case: pmvnorm(...,corr=) does not work, will work with sigma= prob[i] <- pmvnorm(lower=AQR[i,], upper=BQR[i,], sigma=RQR, algorithm=pmvnorm.algorithm) } else { prob[i] <- pmvnorm(lower=AQR[i,], upper=BQR[i,], corr=RQR, algorithm=pmvnorm.algorithm) } } dmvnorm(x=cbind(xq, xr), mean=mean[c(q,r)], sigma=sigma2) * prob / alpha } ) if (log == TRUE) { return(log(density)) } else { return(density) } } tmvtnorm/R/ptmvt-marginal.R0000644000176200001440000000255714532765421015450 0ustar liggesusers# Verteilungsfunktion fuer die eindimensionale Randdichte f(x_n) # einer Truncated Multivariate Student t Distribution, # by integrating out (n-1) dimensions. # # @param xn Vektor der Laenge l von Punkten, an dem die Verteilungsfunktion ausgewertet wird # @param i Index (1..n) dessen Randdichte berechnet werden soll # @param mean (nx1) Mittelwertvektor # @param sigma (nxn)-Kovarianzmatrix # @param df degrees of freedom parameter # @param lower,upper Trunkierungsvektor lower <= x <= upper ptmvt.marginal <- function(xn, n=1, mean=rep(0, nrow(sigma)), sigma=diag(length(mean)), df = 1, lower=rep(-Inf, length = length(mean)), upper=rep( Inf, length = length(mean))) { # check of standard tmvnorm arguments cargs <- checkTmvArgs(mean, sigma, lower, upper) mean <- cargs$mean sigma <- cargs$sigma lower <- cargs$lower upper <- cargs$upper if (n < 1 || n > length(mean) || !is.numeric(n) || length(n) > 1 || !n %in% 1:length(mean)) { stop("n must be a integer scalar in 1..length(mean)") } # Anzahl der Dimensionen k = length(mean) Fx = numeric(length(xn)) upper2 = upper alpha = pmvt(lower = lower, upper = upper, delta = mean, sigma = sigma, df = df) for (i in 1:length(xn)) { upper2[n] = xn[i] Fx[i] = pmvt(lower=lower, upper=upper2, delta=mean, sigma=sigma, df = df) } return (Fx/alpha) }tmvtnorm/R/rtmvnorm2.R0000644000176200001440000002600014532763352014442 0ustar liggesusers# Checks for lower <= Dx <= upper, where # mean (d x 1), sigma (d x d), D (r x d), x (d x 1), lower (r x 1), upper (r x 1) # Uses partly checks as in mvtnorm:::checkmvArgs! # checkTmvArgs2 <- function(mean, sigma, lower, upper, D) { if (is.null(lower) || any(is.na(lower))) stop(sQuote("lower"), " not specified or contains NA") if (is.null(upper) || any(is.na(upper))) stop(sQuote("upper"), " not specified or contains NA") if (!is.numeric(mean) || !is.vector(mean)) stop(sQuote("mean"), " is not a numeric vector") if (is.null(sigma) || any(is.na(sigma))) stop(sQuote("sigma"), " not specified or contains NA") if (is.null(D) || any(is.na(D))) stop(sQuote("D"), " not specified or contains NA") if (!is.matrix(sigma)) { sigma <- as.matrix(sigma) } if (!is.matrix(D)) { D <- as.matrix(D) } if (NCOL(lower) != NCOL(upper)) { stop("lower and upper have non-conforming size") } checkSymmetricPositiveDefinite(sigma) d <- length(mean) r <- length(lower) if (length(mean) != NROW(sigma)) { stop("mean and sigma have non-conforming size") } if (length(lower) != NROW(D) || length(upper) != NROW(D)) { stop("D (r x d), lower (r x 1) and upper (r x 1) have non-conforming size") } if (length(mean) != NCOL(D)) { stop("D (r x d) and mean (d x 1) have non-conforming size") } if (any(lower>=upper)) { stop("lower must be smaller than or equal to upper (lower<=upper)") } # checked arguments cargs <- list(mean=mean, sigma=sigma, lower=lower, upper=upper, D=D) return(cargs) } # Gibbs sampling with general linear constraints a <= Dx <= b # with x (d x 1), D (r x d), a,b (r x 1) requested by Xiaojin Xu [xiaojinxu.fdu@gmail.com] # which allows for (r > d) constraints! # @param n Anzahl der Realisationen # @param mean Mittelwertvektor (d x 1) der Normalverteilung # @param sigma Kovarianzmatrix (d x d) der Normalverteilung # @param lower unterer Trunkierungsvektor (d x 1) mit lower <= Dx <= upper # @param upper oberer Trunkierungsvektor (d x 1) mit lower <= Dx <= upper # @param D Matrix for linear constraints, defaults to (d x d) diagonal matrix # @param H Precision matrix (d x d) if given # @param algorithm c("rejection", "gibbs", "gibbsR") rtmvnorm2 <- function(n, mean = rep(0, nrow(sigma)), sigma = diag(length(mean)), lower = rep(-Inf, length = length(mean)), upper = rep( Inf, length = length(mean)), D = diag(length(mean)), algorithm=c("gibbs", "gibbsR", "rejection"), ...) { algorithm <- match.arg(algorithm) # check of standard tmvtnorm arguments # Have to change check procedure to handle r > d case cargs <- checkTmvArgs2(mean, sigma, lower, upper, D) mean <- cargs$mean sigma <- cargs$sigma lower <- cargs$lower upper <- cargs$upper D <- cargs$D # check of additional arguments if (n < 1 || !is.numeric(n) || n != as.integer(n) || length(n) > 1) { stop("n must be a integer scalar > 0") } if (!identical(D,diag(length(mean)))) { # D <> I : general linear constraints if (algorithm == "gibbs") { # precision matrix case H vs. covariance matrix case sigma will be handled inside method retval <- rtmvnorm.gibbs2.Fortran(n, mean=mean, sigma=sigma, D=D, lower=lower, upper=upper, ...) } else if (algorithm == "gibbsR") { # covariance matrix case sigma retval <- rtmvnorm.gibbs2(n, mean=mean, sigma=sigma, D=D, lower=lower, upper=upper, ...) } else if (algorithm == "rejection") { retval <- rtmvnorm.rejection(n, mean=mean, sigma=sigma, D=D, lower=lower, upper=upper, ...) } return(retval) } else { # for D = I (d x d) forward to normal rtmvnorm() method retval <- rtmvnorm(n, mean=mean, sigma=sigma, lower=lower, upper=upper, D=D, ...) return(retval) } return(retval) } # Gibbs sampler implementation in R for general linear constraints # lower <= Dx <= upper where D (r x d), x (d x 1), lower, upper (r x 1) # which can handle the case r > d. # # @param n # @param mean # @param sigma # @param D # @param lower # @param upper # @param burn.in.samples # @param start.value # @param thinning rtmvnorm.gibbs2 <- function (n, mean = rep(0, nrow(sigma)), sigma = diag(length(mean)), D = diag(length(mean)), lower = rep(-Inf, length = length(mean)), upper = rep(Inf, length = length(mean)), burn.in.samples = 0, start.value = NULL, thinning = 1) { if (thinning < 1 || !is.numeric(thinning) || length(thinning) > 1) { stop("thinning must be a integer scalar > 0") } d <- length(mean) S <- burn.in.samples if (!is.null(S)) { if (S < 0) stop("number of burn-in samples must be non-negative") } if (!is.null(start.value)) { if (length(mean) != length(start.value)) stop("mean and start value have non-conforming size") if (any(D %*% start.value < lower || D %*% start.value > upper)) stop("start value does not suffice linear constraints lower <= Dx <= upper") x0 <- start.value } else { x0 <- ifelse(is.finite(lower), lower, ifelse(is.finite(upper), upper, 0)) } if (d == 1) { X <- rtnorm.gibbs(n, mu = mean[1], sigma = sigma[1, 1], a = lower[1], b = upper[1]) return(X) } # number of linear constraints lower/a <= Dx <= upper/b, D (r x n), a,b (r x 1), x (n x 1) r <- nrow(D) X <- matrix(NA, n, d) U <- runif((S + n * thinning) * d) l <- 1 sd <- list(d) P <- list(d) # [ Sigma_11 Sigma_12 ] = [ sigma_{i,i} sigma_{i,-i} ] # [ Sigma_21 Sigma_22 ] [ sigma_{-i,i} sigma_{-i,-i} ] for (i in 1:d) { Sigma_11 <- sigma[i, i] # (1 x 1) Sigma_12 <- sigma[i, -i] # (1 x (d - 1)) Sigma_22 <- sigma[-i, -i] # ((d - 1) x (d - 1)) P[[i]] <- t(Sigma_12) %*% solve(Sigma_22) sd[[i]] <- sqrt(Sigma_11 - P[[i]] %*% Sigma_12) } x <- x0 # for all draws for (j in (1 - S):(n * thinning)) { # for all x[i] for (i in 1:d) { lower_i <- -Inf upper_i <- +Inf # for all linear constraints k relevant for variable x[i]. # If D[k,i]=0 then constraint is irrelevant for x[i] for (k in 1:r) { if (D[k,i] == 0) next bound1 <- lower[k]/D[k, i] - D[k,-i] %*% x[-i] /D[k, i] bound2 <- upper[k]/D[k, i] - D[k,-i] %*% x[-i] /D[k, i] if (D[k, i] > 0) { lower_i <- pmax(lower_i, bound1) upper_i <- pmin(upper_i, bound2) } else { lower_i <- pmax(lower_i, bound2) upper_i <- pmin(upper_i, bound1) } } mu_i <- mean[i] + P[[i]] %*% (x[-i] - mean[-i]) F.tmp <- pnorm(c(lower_i, upper_i), mu_i, sd[[i]]) Fa <- F.tmp[1] Fb <- F.tmp[2] x[i] <- mu_i + sd[[i]] * qnorm(U[l] * (Fb - Fa) + Fa) l <- l + 1 } if (j > 0) { if (thinning == 1) { X[j, ] <- x } else if (j%%thinning == 0) { X[j%/%thinning, ] <- x } } } return(X) } rtmvnorm.gibbs2.Fortran <- function(n, mean = rep(0, nrow(sigma)), sigma = diag(length(mean)), D = diag(length(mean)), lower = rep(-Inf, length = length(mean)), upper = rep( Inf, length = length(mean)), burn.in.samples = 0, start.value = NULL, thinning = 1) { # No checks of input arguments, checks are done in rtmvnorm() # dimension of X d <- length(mean) # number of burn-in samples S <- burn.in.samples if (!is.null(S)) { if (S < 0) stop("number of burn-in samples must be non-negative") } # Take start value given by user or determine from lower and upper if (!is.null(start.value)) { if (length(mean) != length(start.value)) stop("mean and start value have non-conforming size") if (NCOL(D) != length(start.value) || NROW(D) != length(lower) || NROW(D) != length(upper)) stop("D, start.value, lower, upper have non-conforming size") if (any(D %*% start.value < lower || D %*% start.value > upper)) stop("start value must lie in simplex defined by lower <= Dx <= upper") x0 <- start.value } else { stop("Must give start.value with lower <= D start.value <= upper") } # Sample from univariate truncated normal distribution which is very fast. if (d == 1) { X <- rtnorm.gibbs(n, mu=mean[1], sigma=sigma[1,1], a=lower[1], b=upper[1]) return(X) } # Ergebnismatrix (n x d) X <- matrix(0, n, d) # number of linear constraints lower/a <= Dx <= upper/b, D (r x n), a,b (r x 1), x (n x 1) r <- nrow(D) # Call to Fortran subroutine # TODO: Aufpassen, ob Matrix D zeilen- oder spaltenweise an Fortran uebergeben wird! # Bei sigma ist das wegen Symmetrie egal. ret <- .Fortran("rtmvnormgibbscov2", n = as.integer(n), d = as.integer(d), r = as.integer(r), mean = as.double(mean), sigma = as.double(sigma), C = as.double(D), a = as.double(lower), b = as.double(upper), x0 = as.double(x0), burnin = as.integer(burn.in.samples), thinning = as.integer(thinning), X = as.double(X), NAOK=TRUE, PACKAGE="tmvtnorm") X <- matrix(ret$X, ncol=d, byrow=TRUE) return(X) } if (FALSE) { # dimension d=2 # number of linear constraints r=3 > d # linear restrictions a <= Dx <= b with x (d x 1); D (r x d); a,b (r x 1) D <- matrix( c( 1, 1, 1, -1, 0.5, -1), 3, 2, byrow=TRUE) a <- c(0, 0, 0) b <- c(1, 1, 1) # mark linear constraints as lines plot(NA, xlim=c(-0.5, 1.5), ylim=c(-1,1)) for (i in 1:3) { abline(a=a[i]/D[i, 2], b=-D[i,1]/D[i, 2], col="red") abline(a=b[i]/D[i, 2], b=-D[i,1]/D[i, 2], col="red") } # Gibbs sampling: # determine lower and upper bounds for each index i given the remaining variables: x[i] | x[-i] ### Gibbs sampling for general linear constraints a <= Dx <= b x0 <- c(0.5, 0.2) sigma <- matrix(c(1, 0.2, 0.2, 1), 2, 2) X <- rtmvnorm.gibbs2(n=1000, mean=c(0, 0), sigma, D, lower=a, upper=b, start.value=x0) points(X, pch=20, col="black") X2 <- rtmvnorm.gibbs2(n=1000, mean=c(0, 0), sigma, D, lower=a, upper=b, start.value=x0) points(X2, pch=20, col="green") # Rejection sampling (rtmvnorm.rejection) funktioniert bereits mit beliebigen Restriktionen (r > d) X3 <- rtmvnorm.rejection(n=1000, mean=c(0, 0), sigma, D, lower=a, upper=b) points(X3, pch=20, col="red") rtmvnorm.gibbs2(n=1000, mean=c(0, 0), sigma, D, lower=a, upper=b, start.value=c(-1, -1)) colMeans(X) colMeans(X2) } tmvtnorm/R/dtmvt.R0000644000176200001440000000410311765142062013624 0ustar liggesusers# Density function for the truncated multivariate t-distribution # # Author: stefan ############################################################################### # Density function for the truncated multivariate t-distribution # @param x # @param mean # @param sigma # @param df degrees of freedom parameter # @param log dtmvt <- function(x, mean = rep(0, nrow(sigma)), sigma = diag(length(mean)), df = 1, lower= rep( -Inf, length = length(mean)), upper = rep( Inf, length = length(mean)), log = FALSE){ # Check of additional inputs like x if (is.vector(x)) { x <- matrix(x, ncol = length(x)) } # Anzahl der Beobachtungen T = nrow(x) # check for each row if in support region insidesupportregion <- logical(T) for (i in 1:T) { insidesupportregion[i] = all(x[i,] >= lower & x[i,] <= upper & !any(is.infinite(x))) } # density value for points outside the support region dv = if (log) { -Inf } else { 0 } # conditional density f <- ifelse(insidesupportregion, dmvt(x, delta=mean, sigma=sigma, df=df, log=log) / pmvt(lower=lower, upper=upper, delta=mean, sigma=sigma, df=df, type="shifted"), dv) return(f) } if (FALSE) { # Example x1<-seq(-2, 3, by=0.1) x2<-seq(-2, 3, by=0.1) mean=c(0,0) sigma=matrix(c(1, -0.5, -0.5, 1), 2, 2) lower=c(-1,-1) density<-function(x) { z=dtmvt(x, mean=mean, sigma=sigma, lower=lower) z } fgrid <- function(x, y, f) { z <- matrix(nrow=length(x), ncol=length(y)) for(m in 1:length(x)){ for(n in 1:length(y)){ z[m,n] <- f(c(x[m], y[n])) } } z } # compute multivariate-t density d for grid d=fgrid(x1, x2, function(x) dtmvt(x, mean=mean, sigma=sigma, lower=lower)) # compute multivariate normal density d for grid d2=fgrid(x1, x2, function(x) dtmvnorm(x, mean=mean, sigma=sigma, lower=lower)) # plot density as contourplot contour(x1, x2, d, nlevels=5, main="Truncated Multivariate t Density", xlab=expression(x[1]), ylab=expression(x[2])) contour(x1, x2, d2, nlevels=5, add=TRUE, col="red") abline(v=-1, lty=3, lwd=2) abline(h=-1, lty=3, lwd=2) } tmvtnorm/R/dtmvnorm-marginal.R0000644000176200001440000001017514532765530016140 0ustar liggesusers# Dichtefunktion und Verteilung einer multivariate truncated normal # # Problem ist die Bestimmung der Randverteilung einer Variablen. # # 1. Im bivariaten Fall kann explizit eine Formel angegeben werden (vgl. Arnold (1993)) # 2. Im multivariaten Fall kann ein Integral angegeben werden (vgl. Horrace (2005)) # 3. Bestimmung der Dichtefunktion ueber das Integral moeglich? # 4. Kann die Verteilungsfunktion pmvnorm() helfen? Kann man dann nach einer Variablen differenzieren? # Literatur: # # Genz, A. (1992). Numerical computation of multivariate normal probabilities. Journal of Computational and Graphical Statistics, 1, 141-150 # Genz, A. (1993). Comparison of methods for the computation of multivariate normal probabilities. Computing Science and Statistics, 25, 400-405 # Horrace (2005). # Jack Cartinhour (1990): One-dimensional marginal density functions of a truncated multivariate normal density function # Communications in Statistics - Theory and Methods, Volume 19, Issue 1 1990 , pages 197 - 203 # Dichtefunktion fuer Randdichte f(xn) einer Truncated Multivariate Normal Distribution, # vgl. Jack Cartinhour (1990) "One-dimensional marginal density functions of a truncated multivariate normal density function" # # @param xn Vektor der Laenge l von Punkten, an dem die Randdichte ausgewertet wird # @param i Index (1..n) dessen Randdichte berechnet werden soll # @param mean (nx1) Mittelwertvektor # @param sigma (nxn)-Kovarianzmatrix # @param lower,upper Trunkierungsvektor lower <= x <= upper dtmvnorm.marginal <- function(xn, n=1, mean=rep(0, nrow(sigma)), sigma=diag(length(mean)), lower=rep(-Inf, length = length(mean)), upper=rep( Inf, length = length(mean)), log=FALSE) { if (NROW(sigma) != NCOL(sigma)) { stop("sigma must be a square matrix") } if (length(mean) != NROW(sigma)) { stop("mean and sigma have non-conforming size") } # Anzahl der Dimensionen k <- length(mean) if (n < 1 || n > length(mean) || !is.numeric(n) || length(n) > 1 || !n %in% 1:length(mean)) { stop("n must be a integer scalar in 1..length(mean)") } # Univariater Fall, vgl. Greene (2003), S.573 if (k == 1) { prob <- pnorm(upper, mean=mean, sd=sqrt(sigma)) - pnorm(lower, mean=mean, sd=sqrt(sigma)) density <- ifelse( lower[1]<=xn & xn<=upper[1], dnorm(xn, mean=mean, sd=sqrt(sigma)) / prob, 0) if (log == TRUE) { return(log(density)) } else { return(density) } } # Standardize sigma to correlation matrix, mean to zero vector # adjust xn, lower, upper #sd <- sqrt(diag(sigma)) #xn <- (xn - mean) / sd #lower <- (lower - mean) / sd #upper <- (upper - mean) / sd #mean <- rep(0, k) #sigma <- cov2cor(sigma) # Kovarianzmatrix; nach Standardisierung Korrelationsmatrix C <- sigma # Inverse Kovarianzmatrix, Precision matrix A <- solve(sigma) # Partitionierung von A und C A_1 <- A[-n,-n] # (n-1) x (n-1) #a_nn <- A[n, n] # 1x1 #a <- A[-n, n] # (n-1) x 1 A_1_inv <- solve(A_1) C_1 <- C[-n,-n] # (n-1) x (n-1) c_nn <- C[n, n] # 1x1 c <- C[-n, n] # (n-1) x 1 # Partitionierung von Mittelwertvektor mu mu <- mean mu_1 <- mean[-n] mu_n <- mean[n] # Skalierungsfaktor der Dichte p <- pmvnorm(lower=lower, upper=upper, mean=mu, sigma=C) f_xn <- c() for (i in 1:length(xn)) { if (!(lower[n]<=xn[i] && xn[i]<=upper[n]) || is.infinite(xn[i])) { f_xn[i] <- 0 next } # m(x_n) --> (n-1x1) # Aufpassen bei z.B. m=c(Inf, Inf, NaN) und c=0 m <- mu_1 + (xn[i] - mu_n) * c / c_nn # SW: Possibly optimize with vectorized version of pmvnorm() which accepts different bounds # for univariate density, pmvnorm() does not accept corr= f_xn[i] <- exp(-0.5*(xn[i]-mu_n)^2/c_nn) * pmvnorm(lower=lower[-n], upper=upper[-n], mean=m, sigma=A_1_inv) } density <- 1/p * 1/sqrt(2*pi*c_nn) * f_xn if (log == TRUE) { return(log(density)) } else { return(density) } } tmvtnorm/R/ptmvnorm-marginal.R0000644000176200001440000000271114532764254016153 0ustar liggesusers# Verteilungsfunktion fuer die eindimensionale Randdichte f(xn) einer Truncated Multivariate Normal Distribution, # vgl. Jack Cartinhour (1990) "One-dimensional marginal density functions of a truncated multivariate normal density function" fuer die Dichtefunktion # # @param xn Vektor der Laenge l von Punkten, an dem die Verteilungsfunktion ausgewertet wird # @param i Index (1..n) dessen Randdichte berechnet werden soll # @param mean (nx1) Mittelwertvektor # @param sigma (nxn)-Kovarianzmatrix # @param lower,upper Trunkierungsvektor lower <= x <= upper ptmvnorm.marginal <- function(xn, n=1, mean=rep(0, nrow(sigma)), sigma=diag(length(mean)), lower=rep(-Inf, length = length(mean)), upper=rep( Inf, length = length(mean))) { # check of standard tmvnorm arguments cargs <- checkTmvArgs(mean, sigma, lower, upper) mean <- cargs$mean sigma <- cargs$sigma lower <- cargs$lower upper <- cargs$upper if (n < 1 || n > length(mean) || !is.numeric(n) || length(n) > 1 || !n %in% 1:length(mean)) { stop("n must be a integer scalar in 1..length(mean)") } # Anzahl der Dimensionen k = length(mean) Fx = numeric(length(xn)) upper2 = upper alpha = pmvnorm(lower = lower, upper = upper, mean = mean, sigma = sigma) for (i in 1:length(xn)) { upper2[n] = xn[i] Fx[i] = pmvnorm(lower=lower, upper=upper2, mean=mean, sigma=sigma) } return (Fx/alpha) } tmvtnorm/R/tmvnorm-estimation.R0000644000176200001440000002344414532763171016360 0ustar liggesusers# estimation methods for the parameters of the truncated multivariate normal distribution # # Literatur: # # Amemiya (1974) : Instrumental Variables estimator # Lee (1979) # Lee (1983) # Griffiths (2002) : # "Gibbs Sampler for the parameters of the truncated multivariate normal distribution" # # Stefan Wilhelm, wilhelm@financial.com #library(tmvtnorm) library(stats4) # Hilfsfunktion : VECH() Operator vech=function (x) { # PURPOSE: creates a column vector by stacking columns of x # on and below the diagonal #---------------------------------------------------------- # USAGE: v = vech(x) # where: x = an input matrix #--------------------------------------------------------- # RETURNS: # v = output vector containing stacked columns of x #---------------------------------------------------------- # Written by Mike Cliff, UNC Finance mcliff@unc.edu # CREATED: 12/08/98 #if(!is.matrix(x)) #{ # #} rows = nrow(x) columns = ncol(x); v = c(); for (i in 1:columns) { v = c(v, x[i:rows,i]); } v } # Hilfsfunktion : Operator fuer Namensgebung sigma_i.j (i <= j), d.h. wie vech(), nur Zeilenweise vech2 <- function (x) { # PURPOSE: creates a column vector by stacking columns of x # on and below the diagonal #---------------------------------------------------------- # USAGE: v = vech2(x) # where: x = an input matrix #--------------------------------------------------------- # RETURNS: # v = output vector containing stacked columns of x #---------------------------------------------------------- # Written by Mike Cliff, UNC Finance mcliff@unc.edu # CREATED: 12/08/98 rows = nrow(x) columns = ncol(x); v = c(); for (i in 1:rows) { v = c(v, x[i,i:columns]); } v } # Hilfsfunktion : Inverser VECH() Operator inv_vech=function(v) { #---------------------------------------------------------- # USAGE: x = inv_vech(v) # where: v = a vector #--------------------------------------------------------- # RETURNS: # x = a symmetric (m x m) matrix containing de-vectorized elements of v #---------------------------------------------------------- # Anzahl der Zeilen m = -0.5+sqrt(0.5^2+2*length(v)) x = matrix(0,nrow=m,ncol=m) if (length(v) != m*(m+1)/2) { # error stop("v must have m*(m+1)/2 elements") } for (i in 1:m) { #cat("r=",i:m," c=",i,"\n") x[ i:m, i] = v[((i-1)*(m-(i-2)*0.5)+1) : (i*(m-(i-1)*0.5))] x[ i, i:m] = v[((i-1)*(m-(i-2)*0.5)+1) : (i*(m-(i-1)*0.5))] } x } # 1. Maximum-Likelihood-Estimation of mu and sigma when truncation points are known # # TODO/Idee: Cholesky-Zerlegung der Kovarianzmatrix als Parametrisierung # # @param X data matrix (T x n) # @param lower, upper truncation points # @param start list of start values for mu and sigma # @param fixed a list of fixed parameters # @param method # @param cholesky flag, if TRUE, we use the Cholesky decomposition of sigma as parametrization # @param lower.bounds lower bounds for method "L-BFGS-B" # @param upper.bounds upper bounds for method "L-BFGS-B" mle.tmvnorm <- function(X, lower=rep(-Inf, length = ncol(X)), upper=rep(+Inf, length = ncol(X)), start=list(mu=rep(0,ncol(X)), sigma=diag(ncol(X))), fixed=list(), method="BFGS", cholesky=FALSE, lower.bounds=-Inf, upper.bounds=+Inf, ...) { # check of standard tmvtnorm arguments cargs <- checkTmvArgs(start$mu, start$sigma, lower, upper) start$mu <- cargs$mean start$sigma <- cargs$sigma lower <- cargs$lower upper <- cargs$upper # check if we have at least one sample if (!is.matrix(X) || nrow(X) == 0) { stop("Data matrix X with at least one row required.") } # verify dimensions of x and lower/upper match n <- length(lower) if (NCOL(X) != n) { stop("data matrix X has a non-conforming size. Must have ",length(lower)," columns.") } # check if lower <= X <= upper for all rows ind <- logical(nrow(X)) for (i in 1:nrow(X)) { ind[i] = all(X[i,] >= lower & X[i,] <= upper) } if (!all(ind)) { stop("some of the data points are not in the region lower <= X <= upper") } if ((length(lower.bounds) > 1L || length(upper.bounds) > 1L || lower.bounds[1L] != -Inf || upper.bounds[1L] != Inf) && method != "L-BFGS-B") { warning("bounds can only be used with method L-BFGS-B") method <- "L-BFGS-B" } # parameter vector theta = mu_1,...,mu_n,vech(sigma) if (cholesky) { # if cholesky == TRUE use Cholesky decomposition of sigma # t(chol(sigma)) returns a lower triangular matrix which can be vectorized using vech() theta <- c(start$mu, vech2(t(chol(start$sigma)))) } else { theta <- c(start$mu, vech2(start$sigma)) } # names for mean vector elements : mu_i nmmu <- paste("mu_",1:n,sep="") # names for sigma elements : sigma_ij nmsigma <- paste("sigma_",vech2(outer(1:n,1:n, paste, sep=".")),sep="") names(theta) <- c(nmmu, nmsigma) # negative log-likelihood-Funktion dynamisch definiert mit den formals(), # damit mle() damit arbeiten kann # # Eigentlich wollen wir eine Funktion negloglik(theta) mit einem einzigen Parametersvektor theta. # Die Methode mle() braucht aber eine "named list" der Parameter (z.B. mu_1=0, mu_2=0, sigma_1=2,...) und entsprechend eine # Funktion negloglik(mu1, mu2, sigma1,...) # Da wir nicht vorher wissen, wie viele Parameter zu schaetzen sind, definieren wir die formals() # dynamisch um # # @param x dummy/placeholder argument, will be overwritten by formals() with list of skalar parameters negloglik <- function(x) { nf <- names(formals()) # recover parameter vector from named arguments (mu1=...,mu2=...,sigma11,sigma12 etc). # stack all named arguments to parameter vector theta theta <- sapply(nf, function(x) {eval(parse(text=x))}) # mean vector herholen mean <- theta[1:n] # Matrix fuer sigma bauen if (cholesky) { L <- inv_vech(theta[-(1:n)]) L[lower.tri(L, diag=FALSE)] <- 0 # L entspricht jetzt chol(sigma), obere Dreiecksmatrix sigma <- t(L) %*% L } else { sigma <- inv_vech(theta[-(1:n)]) } # if sigma is not positive definite, return MAXVALUE if (det(sigma) <= 0 || any(diag(sigma) < 0)) { return(.Machine$integer.max) } # Log-Likelihood # Wieso hier nur dmvnorm() : Wegen Dichte = Conditional density f <- -(sum(dmvnorm(X, mean, sigma, log=TRUE)) - nrow(X) * log(pmvnorm(lower=lower, upper=upper, mean=mean, sigma=sigma))) if (is.infinite(f) || is.na(f)) { # cat("negloglik=",f," for parameter vector ",theta,"\n") # "L-BFGS-B" requires a finite function value, other methods can handle infinte values like +Inf # return a high finite value, e.g. integer.max, so optimize knows this is the wrong place to be # TODO: check whether to return +Inf or .Machine$integer.max, certain algorithms may prefer +Inf, others a finite value #return(+Inf) return(.Machine$integer.max) } f } formals(negloglik) <- theta # for method "L-BFGS-B" pass bounds parameter "lower.bounds" and "upper.bounds" # under names "lower" and "upper" if ((length(lower.bounds) > 1L || length(upper.bounds) > 1L || lower.bounds[1L] != -Inf || upper.bounds[1L] != Inf) && method == "L-BFGS-B") { mle.fit <- eval.parent(substitute(mle(negloglik, start=as.list(theta), fixed=fixed, method = method, lower=lower.bounds, upper=upper.bounds, ...))) #mle.call <- substitute(mle(negloglik, start=as.list(theta), fixed=fixed, method = method, lower=lower.bounds, upper=upper.bounds, ...)) #mle.fit <- mle(negloglik, start=as.list(theta), fixed=fixed, method = method, lower=lower.bounds, upper=upper.bounds, ...) #mle.fit@call <- mle.call return (mle.fit) } else { # we need evaluated arguments in the call for profile(mle.fit) mle.fit <- eval.parent(substitute(mle(negloglik, start=as.list(theta), fixed=fixed, method = method, ...))) #mle.call <- substitute(mle(negloglik, start=as.list(theta), fixed=fixed, method = method, ...)) #mle.fit <- mle(negloglik, start=as.list(theta), fixed=fixed, method = method, ...) #mle.fit@call <- mle.call return (mle.fit) } } # Beispiel: if (FALSE) { lower=c(-1,-1) upper=c(1, 2) mu =c(0, 0) sigma=matrix(c(1, 0.7, 0.7, 2), 2, 2) # generate random samples X <- rtmvnorm(n=500, mu, sigma, lower, upper) method <- "BFGS" # estimate mu and sigma from random samples # Standard-Startwerte mle.fit1 <- mle.tmvnorm(X, lower=lower, upper=upper) mle.fit1a <- mle.tmvnorm(X, lower=lower, upper=upper, cholesky=TRUE) mle.fit1b <- mle.tmvnorm(X, lower=lower, upper=upper, method="L-BFGS-B", lower.bounds=c(-1, -1, 0.001, -Inf, 0.001), upper.bounds=c(2, 2, 2, 2, 3)) Rprof("mle.profile1.out") mle.profile1 <- profile(mle.fit1, X, method="BFGS", trace=TRUE) Rprof(NULL) summaryRprof("mle.profile1.out") confint(mle.profile1) par(mfrow=c(2,2)) plot(mle.profile1) summary(mle.fit1) logLik(mle.fit1) vcov(mle.fit1) #TODO: confint(mle.fit1) #profile(mle.fit1) # andere Startwerte, näher am wahren Ergebnis mle.fit2 <- mle.tmvnorm(x=X, lower=lower, upper=upper, start=list(mu=c(0.1, 0.1), sigma=matrix(c(1, 0.4, 0.4, 1.8),2,2))) # --> funktioniert jetzt besser... summary(mle.fit2) # andere Startwerte, nimm mean und Kovarianz aus den Daten (stimmt zwar nicht, ist aber sicher # ein besserer Startwert als 0 und diag(n). mle.fit3 <- mle.tmvnorm(x=X, lower=lower, upper=upper, start=list(mu=colMeans(X), sigma=cov(X))) summary(mle.fit3) }tmvtnorm/R/ptmvt.R0000644000176200001440000000367714532764606013670 0ustar liggesusers # Verteilungsfunktion der truncated multivariate t distribution # # @param lower unterer Trunkierungsvektor (k x 1) mit lower <= x <= upper # @param upper oberer Trunkierungsvektor (k x 1) mit lower <= x <= upper ptmvt <- function( lowerx, upperx, mean=rep(0, length(lowerx)), sigma, df = 1, lower = rep(-Inf, length = length(mean)), upper = rep( Inf, length = length(mean)), maxpts = 25000, abseps = 0.001, releps = 0) { # check of standard tmvtnorm arguments cargs <- checkTmvArgs(mean, sigma, lower, upper) mean <- cargs$mean sigma <- cargs$sigma lower <- cargs$lower upper <- cargs$upper # check of additional arguments lowerx and upperx if (is.null(lowerx) || any(is.na(lowerx))) stop(sQuote("lowerx"), " not specified or contains NA") if (is.null(upperx) || any(is.na(upperx))) stop(sQuote("upperx"), " not specified or contains NA") if (!is.numeric(lowerx) || !is.vector(lowerx)) stop(sQuote("lowerx"), " is not a numeric vector") if (!is.numeric(upperx) || !is.vector(upperx)) stop(sQuote("upperx"), " is not a numeric vector") if (length(lowerx) != length(lower) || length(lower) != length(upperx)) stop("lowerx an upperx must have the same length as lower and upper!") if (any(lowerx>=upperx)) stop("lowerx must be smaller than or equal to upperx (lowerx<=upperx)") # Aufpassen: # Wir muessen garantieren, dass nur innerhalb des Support-Bereichs lower <= x <= upper integriert wird. Sonst kann Ergebnis >= 1 rauskommen. # Wenn einzelne Komponenten von lowerx <= lower sind, dann von der Untergrenze lower integrieren. Analog fuer upperx >= upper f <- pmvt(lower=pmax(lowerx, lower), upper=pmin(upperx, upper), delta=mean, sigma=sigma, df=df, maxpts = maxpts, abseps = abseps, releps = releps, type="shifted") / pmvt(lower=lower, upper=upper, delta=mean, sigma=sigma, df=df, maxpts = maxpts, abseps = abseps, releps = releps, type="shifted") return(f) }tmvtnorm/R/rtmvt.R0000644000176200001440000002102014532763264013647 0ustar liggesusers# Sampling from Truncated multivariate t distribution using # # a) Rejection sampling # b) Gibbs sampling # # Author: Stefan Wilhelm, Manjunath B G # # Literatur: # (1) Rejection Sampling : None # (2) Gibbs Sampling : # Geweke (1991) "Efficient simulation from the multivariate normal and Student-t distributions # subject to linear constraints and the evaluation of constraint probabilities" ############################################################################### rtmvt <- function(n, mean = rep(0, nrow(sigma)), sigma = diag(length(mean)), df = 1, lower = rep(-Inf, length = length(mean)), upper = rep( Inf, length = length(mean)), algorithm=c("rejection", "gibbs"), ...) { algorithm <- match.arg(algorithm) # check of standard tmvtnorm arguments cargs <- checkTmvArgs(mean, sigma, lower, upper) mean <- cargs$mean sigma <- cargs$sigma lower <- cargs$lower upper <- cargs$upper # check of additional arguments : n and df if (n < 1 || !is.numeric(n) || n != as.integer(n) || length(n) > 1) { stop("n must be a integer scalar > 0") } if (df < 1 || !is.numeric(df) || length(df) > 1) { stop("df must be a numeric scalar > 0") } if (algorithm == "rejection") { if (df != as.integer(df)) stop("Rejection sampling currenly works only for integer degrees of freedom. Consider using algorithm='gibbs'.") retval <- rtmvt.rejection(n, mean, sigma, df, lower, upper) } else if (algorithm == "gibbs") { retval <- rtmvt.gibbs(n, mean, sigma, df, lower, upper, ...) } return(retval) } # Erzeugt eine Matrix X (n x k) mit Zufallsrealisationen aus einer Trunkierten Multivariaten t Verteilung # mit k Dimensionen # ueber Rejection Sampling aus einer Multivariaten t-Verteilung # # @param n Anzahl der Realisationen # @param mean Mittelwertvektor (k x 1) der Normalverteilung # @param sigma Kovarianzmatrix (k x k) der Normalverteilung # @param df degrees of freedom parameter # @param lower unterer Trunkierungsvektor (k x 1) mit lower <= x <= upper # @param upper oberer Trunkierungsvektor (k x 1) mit lower <= x <= upper rtmvt.rejection <- function(n, mean = rep(0, nrow(sigma)), sigma = diag(length(mean)), df = 1, lower = rep(-Inf, length = length(mean)), upper = rep( Inf, length = length(mean))) { # No check of input parameters, checks are done in rtmvnorm()! # k = Dimension k <- length(mean) # mean as (1 x k) matrix mmean <- matrix(mean, 1, k) # Ergebnismatrix (n x k) Y <- matrix(NA, n, k) # Anzahl der noch zu ziehenden Samples numSamples <- n # Anzahl der akzeptierten Samples insgesamt numAcceptedSamplesTotal <- 0 # Akzeptanzrate alpha aus der Multivariaten t-Verteilung bestimmen alpha <- pmvt(lower=lower, upper=upper, delta=mean, sigma=sigma, df=df) if (alpha <= 0.01) warning("Acceptance rate is very low and rejection sampling becomes inefficient. Consider using Gibbs sampling.") # Ziehe wiederholt aus der Multivariaten Student-t und schaue, wieviel Samples nach Trunkierung uebrig bleiben while(numSamples > 0) { # Erzeuge N/alpha Samples aus einer multivariaten Normalverteilung: Wenn alpha zu niedrig ist, wird Rejection Sampling ineffizient und N/alpha zu gross. Dann nur N erzeugen nproposals <- ifelse (numSamples/alpha > 1000000, numSamples, ceiling(max(numSamples/alpha,10))) X <- rmvt(nproposals, sigma=sigma, df=df) # SW: rmvt() hat keinen Parameter delta # add mean : t(t(X) + mean) oder so: for (i in 1:k) { X[,i] = mean[i] + X[,i] } # Bestimme den Anteil der Samples nach Trunkierung # Bug: ind= rowSums(lower <= X & X <= upper) == k # wesentlich schneller als : ind=apply(X, 1, function(x) all(x >= lower & x<=upper)) ind <- logical(nproposals) for (i in 1:nproposals) { ind[i] = all(X[i,] >= lower & X[i,] <= upper) } # Anzahl der akzeptierten Samples in diesem Durchlauf numAcceptedSamples <- length(ind[ind==TRUE]) # Wenn nix akzeptiert wurde, dann weitermachen if (length(numAcceptedSamples) == 0 || numAcceptedSamples == 0) next #cat("numSamplesAccepted=",numAcceptedSamples," numSamplesToDraw = ",numSamples,"\n") numNeededSamples <- min(numAcceptedSamples, numSamples) Y[(numAcceptedSamplesTotal+1):(numAcceptedSamplesTotal+numNeededSamples),] <- X[which(ind)[1:numNeededSamples],] # Anzahl der akzeptierten Samples insgesamt numAcceptedSamplesTotal <- numAcceptedSamplesTotal + numAcceptedSamples # Anzahl der verbliebenden Samples numSamples <- numSamples - numAcceptedSamples } Y } # Gibbs sampler for the truncated multivariate Student-t # see Geweke (1991) # # @param n Anzahl der Realisationen # @param mean Mittelwertvektor (k x 1) der t-Verteilung # @param sigma Kovarianzmatrix (k x k) der t-Verteilung # @param df degrees of freedom parameter # @param lower unterer Trunkierungsvektor (k x 1) mit lower <= x <= upper # @param upper oberer Trunkierungsvektor (k x 1) mit lower <= x <= upper # @param burn.in number of burn-in samples to be discarded # @param start start value for Gibbs sampling # @param thinning rtmvt.gibbs <- function (n=1, mean=rep(0, ncol(sigma)), sigma = diag(length(mean)), df=1, lower = rep(-Inf, length = length(mean)), upper = rep( Inf, length = length(mean)), burn.in.samples = 0, start.value = NULL, thinning = 1) { # dimension of X k = length(mean) # Mean Vector mu = mean # number of burn-in samples S <- burn.in.samples if (!is.null(S)) { if (S < 0) stop("number of burn-in samples must be non-negative") } # Ergebnismatrix X (n x k) # Random sample from truncated Student-t density X <- matrix(NA, n, k) # Realisation from truncated multivariate normal Z <- numeric(k) # Chi-Square variable w w <- numeric(1) # x is one realisation from truncated Student-t density conditioned on Z and w x <- numeric(k) # Take start value given by user or use random start value if (!is.null(start.value)) { if (length(mean) != length(start.value)) stop("mean and start value have non-conforming size") if (any(start.valueupper)) stop("start value is not inside support region") Z <- start.value - mu } else { # If no start value is specified, # the initial value/start value for Z drawn from TN(0,\Sigma) # with truncation point a = a-mu and b = b-mu Z <- rtmvnorm(1, mean=rep(0,k), sigma=sigma, lower=lower-mu, upper=upper-mu, algorithm="gibbs") } # Algorithm begins : # Draw from Uni(0,1) U <- runif((S + n*thinning) * k) indU <- 1 # Index for accessing U # List of conditional standard deviations can be pre-calculated sd <- list(k) # List of t(Sigma_i) %*% solve(Sigma) term P <- list(k) for(i in 1:k) { # Partitioning of Sigma Sigma <- sigma[-i,-i] # (k-1) x (k-1) sigma_ii <- sigma[i,i] # 1 x 1 Sigma_i <- sigma[i,-i] # (k-1) x 1 P[[i]] <- t(Sigma_i) %*% solve(Sigma) sd[[i]] <- sqrt(sigma_ii - P[[i]] %*% Sigma_i) } for(i in (1-S):(n*thinning)) { # Step 1: Simulation of w conditional on Z from Chi-square distribution by rejection sampling # so that (lower - mu) * w <= Z <= (upper - mu) * w acceptedW <- FALSE while (!acceptedW) { w <- (rchisq(1, df, ncp=0)/df)^(1/2) acceptedW <- all((lower - mu) * w <= Z & Z <= (upper - mu) * w) } # Transformed Chi-Square sample subject to condition on Z0 alpha <- (lower - mu) * w beta <- (upper - mu) * w # Step 2: Simulation from Truncated normal Gibbs sampling approach for(j in 1:k) { mu_j <- P[[j]] %*% (Z[-j]) Fa <- pnorm( (lower[j]-mu[j])*w, mu_j, sd[[j]]) Fb <- pnorm( (upper[j]-mu[j])*w, mu_j, sd[[j]]) Z[j] <- mu_j + sd[[j]] * qnorm(U[indU] * (Fb - Fa) + Fa) # changed on 22nd February 2010 by Manju indU <- indU + 1 } # Step 3: Student-t transformation x <- mu + ( Z / w ) if (i > 0) { if (thinning == 1) { # no thinning, take all samples except for burn-in-period X[i,] <- x } else if (i %% thinning == 0){ X[i %/% thinning,] <- x } } } return(X) } # Ziehe aus einer multi-t-Distribution ohne Truncation X <- rtmvt.rejection(n=10000, mean=rep(0, 3), df=2) # Teste mit Kolmogoroff-Smirnoff-Test auf Verteilung tmvtnorm/R/checkTmvArgs.R0000644000176200001440000000313712705241146015053 0ustar liggesuserscheckSymmetricPositiveDefinite <- function(x, name="sigma") { if (!isSymmetric(x, tol = sqrt(.Machine$double.eps))) { stop(sprintf("%s must be a symmetric matrix", name)) } if (NROW(x) != NCOL(x)) { stop(sprintf("%s must be a square matrix", name)) } if (any(diag(x) <= 0)) { stop(sprintf("%s all diagonal elements must be positive", name)) } if (det(x) <= 0) { stop(sprintf("%s must be positive definite", name)) } } # Uses partly checks as in mvtnorm:::checkmvArgs! checkTmvArgs <- function(mean, sigma, lower, upper) { if (is.null(lower) || any(is.na(lower))) stop(sQuote("lower"), " not specified or contains NA") if (is.null(upper) || any(is.na(upper))) stop(sQuote("upper"), " not specified or contains NA") if (!is.numeric(mean) || !is.vector(mean)) stop(sQuote("mean"), " is not a numeric vector") if (is.null(sigma) || any(is.na(sigma))) stop(sQuote("sigma"), " not specified or contains NA") if (!is.matrix(sigma)) { sigma <- as.matrix(sigma) } if (NCOL(lower) != NCOL(upper)) { stop("lower and upper have non-conforming size") } checkSymmetricPositiveDefinite(sigma) if (length(mean) != NROW(sigma)) { stop("mean and sigma have non-conforming size") } if (length(lower) != length(mean) || length(upper) != length(mean)) { stop("mean, lower and upper must have the same length") } if (any(lower>=upper)) { stop("lower bound should be strictly less than the upper bound (lower x] = p and both.tails leads to x with P[-x <= X <= x] = p. # @param n # @param mean # @param sigma # @param lower # @param upper # @param ... additional parameters to uniroot() qtmvnorm.marginal <- function (p, interval = c(-10, 10), tail = c("lower.tail", "upper.tail", "both.tails"), n=1, mean=rep(0, nrow(sigma)), sigma=diag(length(mean)), lower=rep(-Inf, length = length(mean)), upper=rep( Inf, length = length(mean)), ...) { if (length(p) != 1 || (p <= 0 || p >= 1)) stop(sQuote("p"), " is not a double between zero and one") if (n > length(mean) || n < 1) stop(sQuote("n"), " is not a integer between 1 and ",length(mean)) pfct <- function(q) { switch(tail, both.tails = { low <- lower low[n] <- -abs(q) upp <- upper upp[n] <- abs(q) }, upper.tail = { low <- lower upp <- upper low[n] <- q }, lower.tail = { low <- lower upp <- upper upp[n] <- q }, ) ptmvnorm(low, upp, mean, sigma, lower, upper) - p } qroot <- uniroot(pfct, interval = interval, ...) qroot } tmvtnorm/R/tmvnorm-estimation-GMM.R0000644000176200001440000002443214532763000016763 0ustar liggesusers# Estimation of the parameters # of the truncated multivariate normal distribution using GMM # and # (1) the moment equations from Lee (1981) and Lee (1983) # (2) Our moment formula and equating mean and covariance matrix #library(gmm) #library(tmvtnorm) #source("rtmvnorm.R") # for checkTmvArgs() #source("tmvnorm-estimation.R") # for vec(), vech() and inv_vech() "%w/o%" <- function(x,y) x[!x %in% y] #-- x without y ################################################################################ # # Multivariater Fall # ################################################################################ # Definition einer Funktion mit Momentenbedingungen fuer gmm() # nach den Lee (1979, 1983, 1981) moment conditions # # N dimensions, K = N + N*(N+1)/2 parameters # number of moment conditions L=(l_max + 1) * N # parameter vector tet = c(mu, vech(sigma)), length K # @param tet named parameter vector theta = c(mu, vech(sigma)) # @param x data matrix (T x N) gmultiLee <- function(tet, fixed=c(), fullcoefnames, x, lower, upper, l_max = ceiling((ncol(x)+1)/2), cholesky=FALSE) { fullcoef <- rep(NA, length(tet) + length(fixed)) names(fullcoef) <- fullcoefnames if (any(!names(fixed) %in% names(fullcoef))) stop("some named arguments in 'fixed' are not arguments in parameter vector theta") fullcoef[names(tet)] <- tet fullcoef[names(fixed)] <- fixed K <- length(tet) # Anzahl der zu schaetzenden Parameter N <- ncol(x) # Anzahl der Dimensionen T <- nrow(x) # Anzahl der Beobachtungen #l_max <- ceiling((N+1)/2) # maximales l fuer Momentenbedingungen X <- matrix(NA, T, (l_max+1)*N) # Rueckgabematrix mit den Momenten # Parameter mean/sigma aus dem Parametervektor tet extrahieren mean <- fullcoef[1:N] # Matrix fuer sigma bauen if (cholesky) { L <- inv_vech(fullcoef[-(1:N)]) L[lower.tri(L, diag=FALSE)] <- 0 # L entspricht jetzt chol(sigma), obere Dreiecksmatrix sigma <- t(L) %*% L } else { sigma <- inv_vech(fullcoef[-(1:N)]) } #cat("Call to gmultiLee with tet=",tet," sigma=",sigma," det(sigma)=",det(sigma),"\n") #flush.console() # if sigma is not positive definite we return some maximum value if (det(sigma) <= 0 || any(diag(sigma) < 0)) { X <- matrix(+Inf, T, N + N * (N+1) / 2) return(X) } sigma_inv <- solve(sigma) # inverse Kovarianzmatrix F_a = numeric(N) F_b = numeric(N) F <- 1 for (i in 1:N) { # one-dimensional marginal density in dimension i F_a[i] <- dtmvnorm.marginal(lower[i], n=i, mean=mean, sigma=sigma, lower=lower, upper=upper) F_b[i] <- dtmvnorm.marginal(upper[i], n=i, mean=mean, sigma=sigma, lower=lower, upper=upper) } k <- 1 for(l in 0:l_max) { for (i in 1:N) { sigma_i <- sigma_inv[i,] # i-te Zeile der inversen Kovarianzmatrix (1 x N) = entpricht sigma^{i'} a_il <- ifelse(is.infinite(lower[i]), 0, lower[i]^l) b_il <- ifelse(is.infinite(upper[i]), 0, upper[i]^l) # Lee (1983) moment equation for l #X[,k] <- sigma_i %*% mean * x[,i]^l - (x[,i]^l * x) %*% sigma_inv[,i] + l * (x[,i]^(l-1)) + (a_il * F_a[i] - b_il * F_b[i]) / F X[,k] <- sigma_i %*% mean * x[,i]^l - sweep(x, 1, x[,i]^l, FUN="*") %*% sigma_inv[,i] + l * (x[,i]^(l-1)) + (a_il * F_a[i] - b_il * F_b[i]) / F #T x 1 (1 x N) (N x 1) (T x 1) (T x N) (N x 1) (T x 1) (skalar) k <- k + 1 # Zaehlvariable } } return(X) } # Definition einer Funktion mit Momentenbedingungen # mit Mean and Covariance-Matrix bauen anstatt mit Lee Bedingungen # # @param tet named parameter vector theta, should be part of c(vec(mu), vech(sigma)) # @param fixed a named list of fixed parameters # @param fullcoefnames # @param x data matrix (T x N) # @param lower # @param upper # @param cholesky flag whether we use Cholesky decompostion Sigma = LL' # of the covariance matrix in order to ensure positive-definiteness of sigma gmultiManjunathWilhelm <- function(tet, fixed=c(), fullcoefnames, x, lower, upper, cholesky=FALSE) { fullcoef <- rep(NA, length(tet) + length(fixed)) names(fullcoef) <- fullcoefnames if (any(!names(fixed) %in% names(fullcoef))) stop("some named arguments in 'fixed' are not arguments in parameter vector theta") fullcoef[names(tet)] <- tet fullcoef[names(fixed)] <- fixed N <- ncol(x) # Anzahl der Dimensionen T <- nrow(x) # Anzahl der Beobachtungen X <- matrix(NA, T, N + N * (N+1) / 2) # Rueckgabematrix mit den Momenten # Parameter mean/sigma aus dem Parametervektor tet extrahieren mean <- fullcoef[1:N] # Matrix f?r sigma bauen if (cholesky) { L <- inv_vech(fullcoef[-(1:N)]) L[lower.tri(L, diag=FALSE)] <- 0 # L entspricht jetzt chol(sigma), obere Dreiecksmatrix sigma <- t(L) %*% L } else { sigma <- inv_vech(fullcoef[-(1:N)]) } #cat("Call to gmultiManjunathWilhelm with tet=",tet," fullcoef=", fullcoef, " sigma=",sigma," det(sigma)=",det(sigma),"\n") #flush.console() # if sigma is not positive definite we return some maximum value if (det(sigma) <= 0 || any(diag(sigma) < 0)) { X <- matrix(+Inf, T, N + N * (N+1) / 2) return(X) } # Determine moments (mu, sigma) for parameters mean/sigma # experimental: moments <- mtmvnorm(mean=mean, sigma=sigma, lower=lower, upper=upper, doCheckInputs=FALSE) moments <- mtmvnorm(mean=mean, sigma=sigma, lower=lower, upper=upper) # Momentenbedingungen fuer die Elemente von mean : mean(x) for(i in 1:N) { X[,i] <- (moments$tmean[i] - x[,i]) } # Momentenbedingungen fuer alle Lower-Diagonal-Elemente von sigma k <- 1 for (i in 1:N) { for (j in 1:N) { # (1,1), (2, 1), (2,2) if (j > i) next #cat(sprintf("sigma[%d,%d]",i, j),"\n") X[,(N+k)] <- (moments$tmean[i] - x[,i]) * (moments$tmean[j] - x[,j]) - moments$tvar[i, j] k <- k + 1 } } return(X) } # GMM estimation method # # @param X data matrix (T x n) # @param lower, upper truncation points # @param start list of start values for mu and sigma # @param fixed a list of fixed parameters # @param method either "ManjunathWilhelm" or "Lee" moment conditions # @param cholesky flag, if TRUE, we use the Cholesky decomposition of sigma as parametrization # @param ... additional parameters passed to gmm() gmm.tmvnorm <- function(X, lower=rep(-Inf, length = ncol(X)), upper=rep(+Inf, length = ncol(X)), start=list(mu=rep(0,ncol(X)), sigma=diag(ncol(X))), fixed=list(), method=c("ManjunathWilhelm","Lee"), cholesky=FALSE, ... ) { method <- match.arg(method) # check of standard tmvtnorm arguments cargs <- checkTmvArgs(start$mu, start$sigma, lower, upper) start$mu <- cargs$mean start$sigma <- cargs$sigma lower <- cargs$lower upper <- cargs$upper # check if we have at least one sample if (!is.matrix(X) || nrow(X) == 0) { stop("Data matrix X with at least one row required.") } # verify dimensions of x and lower/upper match n <- length(lower) if (NCOL(X) != n) { stop("data matrix X has a non-conforming size. Must have ",length(lower)," columns.") } # check if lower <= X <= upper for all rows ind <- logical(nrow(X)) for (i in 1:nrow(X)) { ind[i] = all(X[i,] >= lower & X[i,] <= upper) } if (!all(ind)) { stop("some of the data points are not in the region lower <= X <= upper") } # parameter vector theta theta <- c(start$mu, vech2(start$sigma)) # names for mean vector elements : mu_i nmmu <- paste("mu_",1:n,sep="") # names for sigma elements : sigma_i.j nmsigma <- paste("sigma_",vech2(outer(1:n,1:n, paste, sep=".")),sep="") names(theta) <- c(nmmu, nmsigma) fullcoefnames <- names(theta) # use only those parameters without the fixed parameters for gmm(), # since I do not know how to specify fixed=c() in gmm() theta2 <- theta[names(theta) %w/o% names(fixed)] # define a wrapper function with only 2 arguments theta and x (f(theta, x)) # that will be invoked by gmm() gManjunathWilhelm <- function(tet, x) { gmultiManjunathWilhelm(tet=tet, fixed=unlist(fixed), fullcoefnames=fullcoefnames, x=x, lower=lower, upper=upper, cholesky=cholesky) } # TODO: Allow for l_max parameter for Lee moment conditions gLee <- function(tet, x) { gmultiLee(tet = tet, fixed = unlist(fixed), fullcoefnames = fullcoefnames, x = x, lower = lower, upper = upper, cholesky = cholesky) } if (method == "ManjunathWilhelm") { gmm.fit <- gmm(gManjunathWilhelm, x=X, t0=theta2, ...) } else { gmm.fit <- gmm(gLee, x=X, t0=theta2, ...) } return(gmm.fit) } # deprecated # GMM mit Lee conditions gmm.tmvnorm2 <- function (X, lower = rep(-Inf, length = ncol(X)), upper = rep(+Inf, length = ncol(X)), start = list(mu = rep(0, ncol(X)), sigma = diag(ncol(X))), fixed = list(), cholesky = FALSE, ...) { cargs <- checkTmvArgs(start$mu, start$sigma, lower, upper) start$mu <- cargs$mean start$sigma <- cargs$sigma lower <- cargs$lower upper <- cargs$upper if (!is.matrix(X) || nrow(X) == 0) { stop("Data matrix X with at least one row required.") } n <- length(lower) if (NCOL(X) != n) { stop("data matrix X has a non-conforming size. Must have ", length(lower), " columns.") } ind <- logical(nrow(X)) for (i in 1:nrow(X)) { ind[i] = all(X[i, ] >= lower & X[i, ] <= upper) } if (!all(ind)) { stop("some of the data points are not in the region lower <= X <= upper") } theta <- c(start$mu, vech2(start$sigma)) nmmu <- paste("mu_", 1:n, sep = "") nmsigma <- paste("sigma_", vech2(outer(1:n, 1:n, paste, sep = ".")), sep = "") names(theta) <- c(nmmu, nmsigma) fullcoefnames <- names(theta) theta2 <- theta[names(theta) %w/o% names(fixed)] gmultiwrapper <- function(tet, x) { gmultiLee(tet = tet, fixed = unlist(fixed), fullcoefnames = fullcoefnames, x = x, lower = lower, upper = upper, cholesky = cholesky) } gmm.fit <- gmm(gmultiwrapper, x = X, t0 = theta2, ...) return(gmm.fit) } tmvtnorm/R/mtmvnorm.R0000644000176200001440000002067114532765050014360 0ustar liggesusers# Expectation and covariance matrix computation # based on the algorithms by Lee (1979), Lee (1983), Leppard and Tallis (1989) # and Manjunath and Wilhelm (2009) # # References: # Amemiya (1973) : Regression Analysis When the Dependent Variable is Truncated Normal # Amemiya (1974) : Multivariate Regression and Simultaneous Equations Models When the Dependent Variables Are Truncated Normal # Lee (1979) : On the first and second moments of the truncated multi-normal distribution and a simple estimator # Lee (1983) : The Determination of Moments of the Doubly Truncated Multivariate Tobit Model # Leppard and Tallis (1989) : Evaluation of the Mean and Covariance of the Truncated Multinormal # Manjunath B G and Stefan Wilhelm (2009): # Moments Calculation for the Doubly Truncated Multivariate Normal Distribution # Johnson/Kotz (1972) # Compute truncated mean and truncated variance in the case # where only a subset of k < n x_1,..,x_k variables are truncated. # In this case, computations simplify and we only have to deal with k-dimensions. # Example: n=10 variables but only k=3 variables are truncated. # # Attention: Johnson/Kotz (1972), p.70 only works for zero mean vector! # We have to demean all variables first JohnsonKotzFormula <- function(mean = rep(0, nrow(sigma)), sigma = diag(length(mean)), lower = rep(-Inf, length = length(mean)), upper = rep( Inf, length = length(mean))) { # determine which variables are truncated idx <- which(!is.infinite(lower) | !is.infinite(upper)) # index of truncated variables n <- length(mean) k <- length(idx) # number of truncated variables if (k >= n) stop(sprintf("Number of truncated variables (%s) must be lower than total number of variables (%s).", k, n)) if (k == 0) { return(list(tmean=mean, tvar=sigma)) # no truncation } # transform to zero mean first lower <- lower - mean upper <- upper - mean # partitionining of sigma # sigma = [ V11 V12 ] # [ V21 V22 ] V11 <- sigma[idx,idx] V12 <- sigma[idx,-idx] V21 <- sigma[-idx,idx] V22 <- sigma[-idx,-idx] # determine truncated mean xi and truncated variance U11 r <- mtmvnorm(mean=rep(0, k), sigma=V11, lower=lower[idx], upper=upper[idx]) xi <- r$tmean U11 <- r$tvar invV11 <- solve(V11) # V11^(-1) # See Johnson/Kotz (1972), p.70 formula tmean <- numeric(n) tmean[idx] <- xi tmean[-idx] <- xi %*% invV11 %*% V12 tvar <- matrix(NA, n, n) tvar[idx, idx] <- U11 tvar[idx, -idx] <- U11 %*% invV11 %*% V12 tvar[-idx, idx] <- V21 %*% invV11 %*% U11 tvar[-idx, -idx] <- V22 - V21 %*% (invV11 - invV11 %*% U11 %*% invV11) %*% V12 tmean <- tmean + mean return(list(tmean=tmean, tvar=tvar)) } # Mean and Covariance of the truncated multivariate distribution (double truncation, general sigma, general mean) # # @param mean mean vector (k x 1) # @param sigma covariance matrix (k x k) # @param lower lower truncation point (k x 1) # @param upper upper truncation point (k x 1) # @param doComputeVariance flag whether to compute variance (for performance reasons) mtmvnorm <- function(mean = rep(0, nrow(sigma)), sigma = diag(length(mean)), lower = rep(-Inf, length = length(mean)), upper = rep( Inf, length = length(mean)), doComputeVariance=TRUE, pmvnorm.algorithm=GenzBretz()) { N <- length(mean) # Check input parameters cargs <- checkTmvArgs(mean, sigma, lower, upper) mean <- cargs$mean sigma <- cargs$sigma lower <- cargs$lower upper <- cargs$upper # check number of truncated variables; if only a subset of variables is truncated # we can use the Johnson/Kotz formula together with mtmvnorm() # determine which variables are truncated idx <- which(!is.infinite(lower) | !is.infinite(upper)) # index of truncated variables k <- length(idx) # number of truncated variables if (k < N) { return(JohnsonKotzFormula(mean=mean, sigma=sigma, lower=lower, upper=upper)) } # Truncated Mean TMEAN <- numeric(N) # Truncated Covariance matrix TVAR <- matrix(NA, N, N) # Verschiebe die Integrationsgrenzen um -mean, damit der Mittelwert 0 wird a <- lower - mean b <- upper - mean lower <- lower - mean upper <- upper - mean # eindimensionale Randdichte F_a <- numeric(N) F_b <- numeric(N) zero_mean <- rep(0,N) # pre-calculate one-dimensial marginals F_a[q] once for (q in 1:N) { tmp <- dtmvnorm.marginal(xn=c(a[q],b[q]), n = q, mean=zero_mean, sigma=sigma, lower=lower, upper=upper) F_a[q] <- tmp[1] F_b[q] <- tmp[2] } # 1. Bestimme E[X_i] = mean + Sigma %*% (F_a - F_b) TMEAN <- as.vector(sigma %*% (F_a - F_b)) if (doComputeVariance) { # TODO: # calculating the bivariate densities is not necessary # in case of conditional independence. # calculate bivariate density only on first use and then cache it # so we can avoid this memory overhead. F2 <- matrix(0, N, N) for (q in 1:N) { for (s in 1:N) { if (q != s) { d <- dtmvnorm.marginal2( xq=c(a[q], b[q], a[q], b[q]), xr=c(a[s], a[s], b[s], b[s]), q=q, r=s, mean=zero_mean, sigma=sigma, lower=lower, upper=upper, pmvnorm.algorithm=pmvnorm.algorithm) F2[q,s] <- (d[1] - d[2]) - (d[3] - d[4]) } } } # 2. Bestimme E[X_i, X_j] # Check if a[q] = -Inf or b[q]=+Inf, then F_a[q]=F_b[q]=0, but a[q] * F_a[q] = NaN and b[q] * F_b[q] = NaN F_a_q <- ifelse(is.infinite(a), 0, a * F_a) # n-dimensional vector q=1..N F_b_q <- ifelse(is.infinite(b), 0, b * F_b) # n-dimensional vector q=1..N for (i in 1:N) { for (j in 1:N) { sum <- 0 for (q in 1:N) { sum <- sum + sigma[i,q] * sigma[j,q] * (sigma[q,q])^(-1) * (F_a_q[q] - F_b_q[q]) if (j != q) { sum2 <- 0 for (s in 1:N) { # this term tt will be zero if the partial correlation coefficient \rho_{js.q} is zero! # even for s == q will the term be zero, so we do not need s!=q condition here tt <- (sigma[j,s] - sigma[q,s] * sigma[j,q] * (sigma[q,q])^(-1)) sum2 <- sum2 + tt * F2[q,s] } sum2 <- sigma[i, q] * sum2 sum <- sum + sum2 } } # end for q TVAR[i, j] <- sigma[i, j] + sum #general mean case: TVAR[i, j] = mean[j] * TMEAN[i] + mean[i] * TMEAN[j] - mean[i] * mean[j] + sigma[i, j] + sum } } # 3. Bestimme Varianz Cov(X_i, X_j) = E[X_i, X_j] - E[X_i]*E[X_j] fuer (0, sigma)-case TVAR <- TVAR - TMEAN %*% t(TMEAN) } else { TVAR = NA } # 4. Rueckverschiebung um +mean fuer (mu, sigma)-case TMEAN <- TMEAN + mean return(list(tmean=TMEAN, tvar=TVAR)) } # Bestimmung von Erwartungswert und Kovarianzmatrix ueber numerische Integration und die eindimensionale Randdichte # d.h. # E[X_i] = \int_{a_i}^{b_i}{x_i * f(x_i) d{x_i}} # Var[x_i] = \int_{a_i}^{b_i}{(x_i-\mu_i)^2 * f(x_i) d{x_i}} # Cov[x_i,x_j] = \int_{a_i}^{b_i}\int_{a_j}^{b_j}{(x_i-\mu_i)(x_j-\mu_j) * f(x_i,x_j) d{x_i}d{x_j}} # # Die Bestimmung von E[X_i] und Var[x_i] # Die Bestimmung der Kovarianz Cov[x_i,x_j] benoetigt die zweidimensionale Randdichte. # # # @param mean Mittelwertvektor (k x 1) # @param sigma Kovarianzmatrix (k x k) # @param lower, upper obere und untere Trunkierungspunkte (k x 1) mtmvnorm.quadrature <- function(mean = rep(0, nrow(sigma)), sigma = diag(length(mean)), lower = rep(-Inf, length = length(mean)), upper = rep( Inf, length = length(mean))) { k = length(mean) # Bestimmung des Erwartungswerts/Varianz ?ber numerische Integration expectation <- function(x, n=1) { x * dtmvnorm.marginal(x, n=n, mean=mean, sigma=sigma, lower=lower, upper=upper) } variance <- function(x, n=1) { (x - m.integration[n])^2 * dtmvnorm.marginal(x, n=n, mean=mean, sigma=sigma, lower=lower, upper=upper) } # Determine expectation from one-dimensional marginal distribution using integration # i=1..k m.integration<-numeric(k) for (i in 1:k) { m.integration[i] <- integrate(expectation, lower[i], upper[i], n=i)$value } # Determine variances from one-dimensional marginal distribution using integration # i=1..k v.integration<-numeric(k) for (i in 1:k) { v.integration[i] <- integrate(variance, lower[i], upper[i], n=i)$value } return(list(m=m.integration, v=v.integration)) } tmvtnorm/R/ptmvnorm.R0000644000176200001440000000361714532765325014371 0ustar liggesusers # Verteilungsfunktion der truncated multivariate normal distribution # # @param lower unterer Trunkierungsvektor (k x 1) mit lower <= x <= upper # @param upper oberer Trunkierungsvektor (k x 1) mit lower <= x <= upper ptmvnorm <- function(lowerx, upperx, mean=rep(0, length(lowerx)), sigma, lower = rep(-Inf, length = length(mean)), upper = rep( Inf, length = length(mean)), maxpts = 25000, abseps = 0.001, releps = 0) { # check of standard tmvtnorm arguments cargs <- checkTmvArgs(mean, sigma, lower, upper) mean <- cargs$mean sigma <- cargs$sigma lower <- cargs$lower upper <- cargs$upper # check of additional arguments lowerx and upperx if (is.null(lowerx) || any(is.na(lowerx))) stop(sQuote("lowerx"), " not specified or contains NA") if (is.null(upperx) || any(is.na(upperx))) stop(sQuote("upperx"), " not specified or contains NA") if (!is.numeric(lowerx) || !is.vector(lowerx)) stop(sQuote("lowerx"), " is not a numeric vector") if (!is.numeric(upperx) || !is.vector(upperx)) stop(sQuote("upperx"), " is not a numeric vector") if (length(lowerx) != length(lower) || length(lower) != length(upperx)) stop("lowerx an upperx must have the same length as lower and upper!") if (any(lowerx>=upperx)) stop("lowerx must be smaller than or equal to upperx (lowerx<=upperx)") # Aufpassen: # Wir muessen garantieren, dass nur innerhalb des Support-Bereichs lower <= x <= upper integriert wird. Sonst kann Ergebnis >= 1 rauskommen. # Wenn einzelne Komponenten von lowerx <= lower sind, dann von der Untergrenze lower integrieren. Analog fuer upperx >= upper f <- pmvnorm(lower=pmax(lowerx, lower), upper=pmin(upperx, upper), mean=mean, sigma=sigma, maxpts = maxpts, abseps = abseps, releps = releps) / pmvnorm(lower=lower, upper=upper, mean=mean, sigma=sigma, maxpts = maxpts, abseps = abseps, releps = releps) return(f) } tmvtnorm/R/rtmvnorm.R0000644000176200001440000007005114216151574014361 0ustar liggesusers################################################################################ # # Sampling from Truncated multivariate Gaussian distribution using # # a) Rejection sampling # b) Gibbs sampler # # for both rectangular constraints a <= x <= b and general linear constraints # a <= Dx <= b. For D = I this implies rectangular constraints. # The method can be used using both covariance matrix sigma and precision matrix H. # # Author: Stefan Wilhelm # # References: # (1) Jayesh H. Kotecha and Petar M. Djuric (1999) : # "GIBBS SAMPLING APPROACH FOR GENERATION OF TRUNCATED MULTIVARIATE GAUSSIAN RANDOM VARIABLES" # (2) Geweke (1991): # "Effcient simulation from the multivariate normal and Student-t distributions # subject to linear constraints and the evaluation of constraint probabilities" # (3) John Geweke (2005): Contemporary Bayesian Econometrics and Statistics, Wiley, pp.171-172 # (4) Wilhelm (2011) package vignette to package "tmvtnorm" # ################################################################################ # We need this separate method rtmvnorm.sparseMatrix() because # rtmvnorm() initialises dense d x d sigma and D matrix which will not work for high dimensions d. # It also does some sanity checks on sigma and D (determinant etc.) which will not # work for high dimensions. # returns a matrix X (n x d) with random draws # from a truncated multivariate normal distribution with d dimensionens # using Gibbs sampling # # @param n Anzahl der Realisationen # @param mean mean vector (d x 1) der Normalverteilung # @param lower lower truncation vector (d x 1) with lower <= x <= upper # @param upper upper truncation vector (d x 1) with lower <= x <= upper # @param H precision matrix (d x d) if given, defaults to identity matrix rtmvnorm.sparseMatrix <- function(n, mean = rep(0, nrow(H)), H = sparseMatrix(i=1:length(mean), j=1:length(mean), x=1), lower = rep(-Inf, length = length(mean)), upper = rep( Inf, length = length(mean)), ...) { if (is.null(H) || !inherits(H, "sparseMatrix")) { stop("H must be of class 'sparseMatrix'") } rtmvnorm.gibbs.Fortran(n, mean, sigma=NULL, H, lower, upper, ...) } # Erzeugt eine Matrix X (n x d) mit Zufallsrealisationen # aus einer Trunkierten Multivariaten Normalverteilung mit d Dimensionen # ?ber Rejection Sampling oder Gibbs Sampler aus einer Multivariaten Normalverteilung. # If matrix D is given, it must be a (d x d) full rank matrix. # Therefore this method can only cover the case with only r <= d linear restrictions. # For r > d linear restrictions, please see rtmvnorm2(n, mean, sigma, D, lower, upper), # where D can be defined as (r x d). # # @param n Anzahl der Realisationen # @param mean Mittelwertvektor (d x 1) der Normalverteilung # @param sigma Kovarianzmatrix (d x d) der Normalverteilung # @param lower unterer Trunkierungsvektor (d x 1) mit lower <= Dx <= upper # @param upper oberer Trunkierungsvektor (d x 1) mit lower <= Dx <= upper # @param D Matrix for linear constraints, defaults to (d x d) diagonal matrix # @param H Precision matrix (d x d) if given # @param algorithm c("rejection", "gibbs", "gibbsR") rtmvnorm <- function(n, mean = rep(0, nrow(sigma)), sigma = diag(length(mean)), lower = rep(-Inf, length = length(mean)), upper = rep( Inf, length = length(mean)), D = diag(length(mean)), H = NULL, algorithm=c("rejection", "gibbs", "gibbsR"), ...) { algorithm <- match.arg(algorithm) if (is.null(mean) && (is.null(sigma) || is.null(H))) { stop("Invalid arguments for ",sQuote("mean")," and ",sQuote("sigma"),"/",sQuote("H"),". Need at least mean vector and covariance or precision matrix.") } # check of standard tmvtnorm arguments cargs <- checkTmvArgs(mean, sigma, lower, upper) mean <- cargs$mean sigma <- cargs$sigma lower <- cargs$lower upper <- cargs$upper if (!is.null(H) && !identical(sigma, diag(length(mean)))) { stop("Cannot give both covariance matrix sigma and precision matrix H arguments at the same time") } else if (!is.null(H) && !inherits(H, "sparseMatrix")) { # check precision matrix H if it is symmetric and positive definite checkSymmetricPositiveDefinite(H, name="H") # H explicitly given, we will override sigma later if we need sigma # sigma <- solve(H) } # else sigma explicitly or implicitly given # check of additional arguments if (n < 1 || !is.numeric(n) || n != as.integer(n) || length(n) > 1) { stop("n must be a integer scalar > 0") } # check matrix D, must be n x n with rank n if (!is.matrix(D) || det(D) == 0) { stop("D must be a (n x n) matrix with full rank n!") } if (!identical(D,diag(length(mean)))) { # D <> I : general linear constraints retval <- rtmvnorm.linear.constraints(n=n, mean=mean, sigma=sigma, H=H, lower=lower, upper=upper, D=D, algorithm=algorithm, ...) return(retval) } else { # D == I : rectangular case if (algorithm == "rejection") { if (!is.null(H)) { # precision matrix case H retval <- rtmvnorm.rejection(n, mean, sigma=solve(H), lower, upper, ...) } else { # covariance matrix case sigma retval <- rtmvnorm.rejection(n, mean, sigma, lower, upper, ...) } } else if (algorithm == "gibbs") { # precision matrix case H vs. covariance matrix case sigma will be handled inside method retval <- rtmvnorm.gibbs.Fortran(n, mean, sigma, H, lower, upper, ...) } else if (algorithm == "gibbsR") { if (!is.null(H)) { # precision matrix case H retval <- rtmvnorm.gibbs.Precision(n, mean, H, lower, upper, ...) } else { # covariance matrix case sigma retval <- rtmvnorm.gibbs(n, mean, sigma, lower, upper, ...) } } } return(retval) } # Erzeugt eine Matrix X (n x k) mit Zufallsrealisationen # aus einer Trunkierten Multivariaten Normalverteilung mit k Dimensionen # ?ber Rejection Sampling aus einer Multivariaten Normalverteilung mit der Bedingung # lower <= Dx <= upper # # Wenn D keine Diagonalmatrix ist, dann ist gelten lineare Restriktionen f?r # lower <= Dx <= upper (siehe Geweke (1991)) # # @param n Anzahl der Realisationen # @param mean Mittelwertvektor (k x 1) der Normalverteilung # @param sigma Kovarianzmatrix (k x k) der Normalverteilung # @param lower unterer Trunkierungsvektor (k x 1) mit lower <= x <= upper # @param upper oberer Trunkierungsvektor (k x 1) mit lower <= x <= upper # @param D Matrix for linear constraints, defaults to diagonal matrix rtmvnorm.rejection <- function(n, mean = rep(0, nrow(sigma)), sigma = diag(length(mean)), lower = rep(-Inf, length = length(mean)), upper = rep( Inf, length = length(mean)), D = diag(length(mean))) { # No check of input parameters, checks are done in rtmvnorm()! # k = Dimension k <- length(mean) # Ergebnismatrix (n x k) Y <- matrix(NA, n, k) # Anzahl der noch zu ziehenden Samples numSamples <- n # Anzahl der akzeptierten Samples insgesamt numAcceptedSamplesTotal <- 0 # Akzeptanzrate alpha aus der Multivariaten Normalverteilung bestimmen r <- length(lower) d <- length(mean) if (r == d & identical(D, diag(d))) { alpha <- pmvnorm(lower=lower, upper=upper, mean=mean, sigma=sigma) if (alpha <= 0.01) warning(sprintf("Acceptance rate is very low (%s) and rejection sampling becomes inefficient. Consider using Gibbs sampling.", alpha)) estimatedAlpha <- TRUE } else { # TODO: Wie bestimme ich aus lower <= Dx <= upper f?r r > d Restriktionen die Akzeptanzrate alpha? # Defere calculation of alpha. Assume for now that all samples will be accepted. alpha <- 1 estimatedAlpha <- FALSE } # Ziehe wiederholt aus der Multivariaten NV und schaue, wieviel Samples nach Trunkierung ?brig bleiben while(numSamples > 0) { # Erzeuge N/alpha Samples aus einer multivariaten Normalverteilung: Wenn alpha zu niedrig ist, wird Rejection Sampling ineffizient und N/alpha zu gro?. Dann nur N erzeugen nproposals <- ifelse (numSamples/alpha > 1000000, numSamples, ceiling(max(numSamples/alpha,10))) X <- rmvnorm(nproposals, mean=mean, sigma=sigma) # Bestimme den Anteil der Samples nach Trunkierung # Bug: ind= rowSums(lower <= X & X <= upper) == k # wesentlich schneller als : ind=apply(X, 1, function(x) all(x >= lower & x<=upper)) X2 <- X %*% t(D) ind <- logical(nproposals) for (i in 1:nproposals) { ind[i] <- all(X2[i,] >= lower & X2[i,] <= upper) } # Anzahl der akzeptierten Samples in diesem Durchlauf numAcceptedSamples <- length(ind[ind==TRUE]) # Wenn nix akzeptiert wurde, dann weitermachen if (length(numAcceptedSamples) == 0 || numAcceptedSamples == 0) next if (!estimatedAlpha) { alpha <- numAcceptedSamples / nproposals if (alpha <= 0.01) warning(sprintf("Acceptance rate is very low (%s) and rejection sampling becomes inefficient. Consider using Gibbs sampling.", alpha)) } #cat("numSamplesAccepted=",numAcceptedSamples," numSamplesToDraw = ",numSamples,"\n") numNeededSamples <- min(numAcceptedSamples, numSamples) Y[(numAcceptedSamplesTotal+1):(numAcceptedSamplesTotal+numNeededSamples),] <- X[which(ind)[1:numNeededSamples],] # Anzahl der akzeptierten Samples insgesamt numAcceptedSamplesTotal <- numAcceptedSamplesTotal + numAcceptedSamples # Anzahl der verbliebenden Samples numSamples <- numSamples - numAcceptedSamples } Y } # Gibbs Sampler for Truncated Univariate Normal Distribution # # Jayesh H. Kotecha and Petar M. Djuric (1999) : GIBBS SAMPLING APPROACH FOR GENERATION OF TRUNCATED MULTIVARIATE GAUSSIAN RANDOM VARIABLES # # Im univariaten Fall sind die erzeugten Samples unabh?ngig, # deswegen gibt es hier keine Chain im eigentlichen Sinn und auch keinen Startwert/Burn-in/Thinning. # # As a change to Kotecha, we do not draw a sample x from the Gaussian Distribution # and then apply pnorm(x) - which is uniform - but rather draw directly from the # uniform distribution u ~ U(0, 1). # # @param n number of realisations # @param mu mean of the normal distribution # @param sigma standard deviation # @param a lower truncation point # @param b upper truncation point rtnorm.gibbs <- function(n, mu=0, sigma=1, a=-Inf, b=Inf) { # Draw from Uni(0,1) F <- runif(n) #Phi(a) und Phi(b) Fa <- pnorm(a, mu, sd=sigma) Fb <- pnorm(b, mu, sd=sigma) # Truncated Normal Distribution, see equation (6), but F(x) ~ Uni(0,1), # so we directly draw from Uni(0,1) instead of doing: # x <- rnorm(n, mu, sigma) # y <- mu + sigma * qnorm(pnorm(x)*(Fb - Fa) + Fa) y <- mu + sigma * qnorm(F * (Fb - Fa) + Fa) y } # Gibbs Sampler Implementation in R for Truncated Multivariate Normal Distribution # (covariance case with sigma) # Jayesh H. Kotecha and Petar M. Djuric (1999) : # GIBBS SAMPLING APPROACH FOR GENERATION OF TRUNCATED MULTIVARIATE # GAUSSIAN RANDOM VARIABLES # # # @param n Anzahl der Realisationen # @param mean Mittelwertvektor (k x 1) der Normalverteilung # @param sigma Kovarianzmatrix (k x k) der Normalverteilung # @param lower unterer Trunkierungsvektor (k x 1) mit lower <= Dx <= upper # @param upper oberer Trunkierungsvektor (k x 1) mit lower <= Dx <= upper # @param burn.in number of burn-in samples to be discarded # @param start start value for Gibbs sampling # @param thinning rtmvnorm.gibbs <- function(n, mean = rep(0, nrow(sigma)), sigma = diag(length(mean)), lower = rep(-Inf, length = length(mean)), upper = rep( Inf, length = length(mean)), burn.in.samples = 0, start.value = NULL, thinning = 1) { # We check only additional arguments like "burn.in.samples", "start.value" and "thinning" if (thinning < 1 || !is.numeric(thinning) || length(thinning) > 1) { stop("thinning must be a integer scalar > 0") } # dimension of X d <- length(mean) # number of burn-in samples S <- burn.in.samples if (!is.null(S)) { if (S < 0) stop("number of burn-in samples must be non-negative") } # Take start value given by user or determine from lower and upper if (!is.null(start.value)) { if (length(mean) != length(start.value)) stop("mean and start value have non-conforming size") if (any(start.value < lower) || any(start.value > upper)) stop("start value is not inside support region") x0 <- start.value } else { # Start value from support region, may be lower or upper bound, if they are finite, # if both are infinite, we take 0. x0 <- ifelse(is.finite(lower), lower, ifelse(is.finite(upper), upper, 0)) } # Sample from univariate truncated normal distribution which is very fast. if (d == 1) { X <- rtnorm.gibbs(n, mu=mean[1], sigma=sigma[1,1], a=lower[1], b=upper[1]) return(X) } # Ergebnismatrix (n x k) X <- matrix(NA, n, d) # Draw from Uni(0,1) U <- runif((S + n*thinning) * d) l <- 1 # List of conditional standard deviations can be pre-calculated sd <- list(d) # List of t(Sigma_i) %*% solve(Sigma) term P <- list(d) for(i in 1:d) { # Partitioning of Sigma Sigma <- sigma[-i,-i] # (d-1) x (d-1) sigma_ii <- sigma[i,i] # 1 x 1 Sigma_i <- sigma[i,-i] # 1 x (d-1) P[[i]] <- t(Sigma_i) %*% solve(Sigma) # (1 x (d-1)) * ((d-1) x (d-1)) = (1 x (d-1)) sd[[i]] <- sqrt(sigma_ii - P[[i]] %*% Sigma_i) # (1 x (d-1)) * ((d-1) x 1) } x <- x0 # Runn chain from index (1 - #burn-in-samples):(n*thinning) and only record samples from j >= 1 # which discards the burn-in-samples for (j in (1-S):(n*thinning)) { # For all dimensions for(i in 1:d) { # Berechnung von bedingtem Erwartungswert und bedingter Varianz: # bedingte Varianz h?ngt nicht von x[-i] ab! mu_i <- mean[i] + P[[i]] %*% (x[-i] - mean[-i]) # Transformation F.tmp <- pnorm(c(lower[i], upper[i]), mu_i, sd[[i]]) Fa <- F.tmp[1] Fb <- F.tmp[2] x[i] <- mu_i + sd[[i]] * qnorm(U[l] * (Fb - Fa) + Fa) l <- l + 1 } if (j > 0) { if (thinning == 1) { # no thinning, take all samples except for burn-in-period X[j,] <- x } else if (j %% thinning == 0){ X[j %/% thinning,] <- x } } } return(X) } # R-Implementation of Gibbs sampler with precision matrix H # # @param n number of random draws # @param mean Mittelwertvektor (k x 1) der Normalverteilung # @param H Precision matrix (k x k) der Normalverteilung # @param lower unterer Trunkierungsvektor (k x 1) mit lower <= x <= upper # @param upper oberer Trunkierungsvektor (k x 1) mit lower <= x <= upper # @param burn.in number of burn-in samples to be discarded # @param start start value for Gibbs sampling # @param thinning rtmvnorm.gibbs.Precision <- function(n, mean = rep(0, nrow(H)), H = diag(length(mean)), lower = rep(-Inf, length = length(mean)), upper = rep( Inf, length = length(mean)), burn.in.samples = 0, start.value = NULL, thinning = 1) { # We check only additional arguments like "burn.in.samples", "start.value" and "thinning" if (thinning < 1 || !is.numeric(thinning) || length(thinning) > 1) { stop("thinning must be a integer scalar > 0") } # dimension of X d <- length(mean) # number of burn-in samples S <- burn.in.samples if (!is.null(S)) { if (S < 0) stop("number of burn-in samples must be non-negative") } # Take start value given by user or determine from lower and upper if (!is.null(start.value)) { if (length(mean) != length(start.value)) stop("mean and start value have non-conforming size") if (any(start.valueupper)) stop("start value is not inside support region") x0 <- start.value } else { # Start value from support region, may be lower or upper bound, if they are finite, # if both are infinite, we take 0. x0 <- ifelse(is.finite(lower), lower, ifelse(is.finite(upper), upper, 0)) } # Sample from univariate truncated normal distribution which is very fast. if (d == 1) { X <- rtnorm.gibbs(n, mu=mean[1], sigma=1/H[1,1], a=lower[1], b=upper[1]) return(X) } # Ergebnismatrix (n x k) X <- matrix(NA, n, d) # Draw from U ~ Uni(0,1) for all iterations we need in advance U <- runif((S + n*thinning) * d) l <- 1 # Vector of conditional standard deviations sd(i | -i) = H_ii^{-1} = 1 / H[i, i] = sqrt(1 / diag(H)) # does not depend on x[-i] and can be precalculated before running the chain. sd <- sqrt(1 / diag(H)) # start value of the chain x <- x0 # Run chain from index (1 - #burn-in-samples):(n*thinning) and only record samples from j >= 1 # which discards the burn-in-samples for (j in (1-S):(n*thinning)) { # For all dimensions for(i in 1:d) { # conditional mean mu[i] = E[i | -i] = mean[i] - H_ii^{-1} H[i,-i] (x[-i] - mean[-i]) mu_i <- mean[i] - (1 / H[i,i]) * H[i,-i] %*% (x[-i] - mean[-i]) # draw x[i | -i] from conditional univariate truncated normal distribution with # TN(E[i | -i], sd(i | -i), lower[i], upper[i]) F.tmp <- pnorm(c(lower[i], upper[i]), mu_i, sd[i]) Fa <- F.tmp[1] Fb <- F.tmp[2] x[i] <- mu_i + sd[i] * qnorm(U[l] * (Fb - Fa) + Fa) l <- l + 1 } if (j > 0) { if (thinning == 1) { # no thinning, take all samples except for burn-in-period X[j,] <- x } else if (j %% thinning == 0){ X[j %/% thinning,] <- x } } } return(X) } # Gibbs sampler with compiled Fortran code # Depending on, whether covariance matrix Sigma or precision matrix H (dense or sparse format) # is specified as parameter, we call either # Fortran routine "rtmvnormgibbscov" (dense covariance matrix sigma), # "rtmvnormgibbsprec" (dense matrix H) or "rtmvnormgibbssparseprec" (sparse precision matrix H). # # @param H precision matrix in sparse triplet format (i, j, v) # Memory issues: We want to increase dimension d, and return matrix X will be (n x d) # so if we want to create a large number of random samples X (n x d) with high d then # we will probably also run into memory problems (X is dense). In most MCMC applications, # we only have to create a small number n in high dimensions, # e.g. 1 random sample per iteration (+ burn-in-samples). # In this case we will not experience any problems. Users should be aware of choosing n and d appropriately rtmvnorm.gibbs.Fortran <- function(n, mean = rep(0, nrow(sigma)), sigma = diag(length(mean)), H = NULL, lower = rep(-Inf, length = length(mean)), upper = rep( Inf, length = length(mean)), burn.in.samples = 0, start.value = NULL, thinning = 1) { # No checks of input arguments, checks are done in rtmvnorm() # dimension of X d <- length(mean) # number of burn-in samples S <- burn.in.samples if (!is.null(S)) { if (S < 0) stop("number of burn-in samples must be non-negative") } # Take start value given by user or determine from lower and upper if (!is.null(start.value)) { if (length(mean) != length(start.value)) stop("mean and start value have non-conforming size") if (any(start.valueupper)) stop("start value is not inside support region") x0 <- start.value } else { # Start value from support region, may be lower or upper bound, if they are finite, # if both are infinite, we take 0. x0 <- ifelse(is.finite(lower), lower, ifelse(is.finite(upper), upper, 0)) } # Sample from univariate truncated normal distribution which is very fast. if (d == 1) { if (!is.null(H)) { X <- rtnorm.gibbs(n, mu=mean[1], sigma=1 / sigma[1,1], a=lower[1], b=upper[1]) } else { X <- rtnorm.gibbs(n, mu=mean[1], sigma=sigma[1,1], a=lower[1], b=upper[1]) } return(X) } # Ergebnismatrix (n x d) X <- matrix(0, n, d) # Call to Fortran subroutine if (!is.null(H)){ if (!inherits(H, "sparseMatrix")) { ret <- .Fortran("rtmvnormgibbsprec", n = as.integer(n), d = as.integer(d), mean = as.double(mean), H = as.double(H), lower = as.double(lower), upper = as.double(upper), x0 = as.double(x0), burnin = as.integer(burn.in.samples), thinning = as.integer(thinning), X = as.double(X), NAOK=TRUE, PACKAGE="tmvtnorm") } else if (inherits(H, "dgCMatrix")) { # H is given in compressed sparse column (csc) representation ret <- .Fortran("rtmvnorm_sparse_csc", n = as.integer(n), d = as.integer(d), mean = as.double(mean), Hi = as.integer(H@i), Hp = as.integer(H@p), Hv = as.double(H@x), num_nonzero = as.integer(length(H@x)), lower = as.double(lower), upper = as.double(upper), x0 = as.double(x0), burnin = as.integer(burn.in.samples), thinning = as.integer(thinning), X = as.double(X), NAOK=TRUE, PACKAGE="tmvtnorm") } else { # H is given in sparse matrix triplet representation # Es muss klar sein, dass nur die obere Dreiecksmatrix (i <= j) ?bergeben wird... sH <- as(H, "dgTMatrix") # precision matrix as triplet representation # ATTENTION: sH@i and sH@j are zero-based (0..(n-1)), we need it as 1...n ind <- sH@i <= sH@j # upper triangular matrix elements of H[i,j] with i <= j ret <- .Fortran("rtmvnorm_sparse_triplet", n = as.integer(n), d = as.integer(d), mean = as.double(mean), Hi = as.integer(sH@i[ind]+1), Hj = as.integer(sH@j[ind]+1), Hv = as.double(sH@x[ind]), num_nonzero = as.integer(sum(ind)), lower = as.double(lower), upper = as.double(upper), x0 = as.double(x0), burnin = as.integer(burn.in.samples), thinning = as.integer(thinning), X = as.double(X), NAOK=TRUE, PACKAGE="tmvtnorm") } } else { ret <- .Fortran("rtmvnormgibbscov", n = as.integer(n), d = as.integer(d), mean = as.double(mean), sigma = as.double(sigma), lower = as.double(lower), upper = as.double(upper), x0 = as.double(x0), burnin = as.integer(burn.in.samples), thinning = as.integer(thinning), X = as.double(X), NAOK=TRUE, PACKAGE="tmvtnorm") } X <- matrix(ret$X, ncol=d, byrow=TRUE) return(X) } # Gibbs sampling f?r Truncated Multivariate Normal Distribution # with linear constraints based on Geweke (1991): # This is simply a wrapper function around our rectangular sampling version... # # x ~ N(mu, sigma) subject to a <= Dx <= b # # alpha <= z <= beta # mit alpha = a - D * mu, beta = b - D * mu # z ~ N(0, T), T = D Sigma D' # x = mu + D^(-1) z # # @param n Anzahl der Realisationen # @param mean Mittelwertvektor (k x 1) der t-verteilung # @param sigma Kovarianzmatrix (k x k) der t-Verteilung # @param lower unterer Trunkierungsvektor (k x 1) mit lower <= x <= upper # @param upper oberer Trunkierungsvektor (k x 1) mit lower <= x <= upper # @param D Matrix for linear constraints, defaults to diagonal matrix # @param burn.in number of burn-in samples to be discarded # @param start start value for Gibbs sampling # @param thinning rtmvnorm.linear.constraints <- function(n, mean = rep(0, nrow(sigma)), sigma = diag(length(mean)), H = NULL, lower = rep(-Inf, length = length(mean)), upper = rep( Inf, length = length(mean)), D = diag(length(mean)), algorithm,...) { # dimension of X d <- length(mean) # check matrix D, must be n x n with rank n if (!is.matrix(D) || det(D) == 0) { stop("D must be a (n x n) matrix with full rank n!") } # create truncated multi-normal samples in variable Z ~ N(0, T) # with alpha <= z <= beta # Parameter-Transformation for given sigma: # x ~ N(mean, sigma) subject to a <= Dx <= b # define z = D x - D mu # alpha <= z <= beta # mit alpha = a - D * mu # beta = b - D * mu # z ~ N(0, T), # T = D Sigma D' # x = mu + D^(-1) z # Parameter-Transformation for given H: # x ~ N(mean, H^{-1}) # precision matrix in z is: # T^{-1} = D'^{-1} H D^{-1} # (AB)^{-1} = B^{-1} %*% A^{-1} alpha <- as.vector(lower - D %*% mean) beta <- as.vector(upper - D %*% mean) Dinv <- solve(D) # D^(-1) if (!is.null(H)) { Tinv <- t(Dinv) %*% H %*% Dinv Z <- rtmvnorm(n, mean=rep(0, d), sigma=diag(d), H=Tinv, lower=alpha, upper=beta, algorithm=algorithm, ...) } else { T <- D %*% sigma %*% t(D) Z <- rtmvnorm(n, mean=rep(0, d), sigma=T, H=NULL, lower=alpha, upper=beta, algorithm=algorithm, ...) } # For each z do the transformation # x = mu + D^(-1) z X <- sweep(Z %*% t(Dinv), 2, FUN="+", mean) return(X) } ################################################################################ if (FALSE) { checkSymmetricPositiveDefinite(matrix(1:4, 2, 2), name = "H") lower <- c(-1, -1) upper <- c(1, 1) mean <- c(0.5, 0.5) sigma <- matrix(c(1, 0.8, 0.8, 1), 2, 2) H <- solve(sigma) D <- matrix(c(1, 1, 1, -1), 2, 2) checkSymmetricPositiveDefinite(H, name = "H") # 1. covariance matrix sigma case # 1.1. rectangular case D == I X0 <- rtmvnorm(n=1000, mean, sigma, lower, upper, algorithm="rejection") X1 <- rtmvnorm(n=1000, mean=mean, sigma=sigma, lower=lower, upper=upper, algorithm="rejection") X2 <- rtmvnorm(n=1000, mean=mean, sigma=sigma, lower=lower, upper=upper, algorithm="gibbsR") X3 <- rtmvnorm(n=1000, mean=mean, sigma=sigma, lower=lower, upper=upper, algorithm="gibbs") par(mfrow=c(2,2)) plot(X1) plot(X2) plot(X3) cov(X1) cov(X2) cov(X3) # 1.2. general linear constraints case D <> I X1 <- rtmvnorm(n=1000, mean=mean, sigma=sigma, lower=lower, upper=upper, D=D, algorithm="rejection") X2 <- rtmvnorm(n=1000, mean=mean, sigma=sigma, lower=lower, upper=upper, D=D, algorithm="gibbsR") X3 <- rtmvnorm(n=1000, mean=mean, sigma=sigma, lower=lower, upper=upper, D=D, algorithm="gibbs") par(mfrow=c(2,2)) plot(X1) plot(X2) plot(X3) # 2. precision matrix case H # 2.1. rectangular case D == I X1 <- rtmvnorm(n=1000, mean=mean, H=H, lower=lower, upper=upper, algorithm="rejection") X2 <- rtmvnorm(n=1000, mean=mean, H=H, lower=lower, upper=upper, algorithm="gibbsR") X3 <- rtmvnorm(n=1000, mean=mean, H=H, lower=lower, upper=upper, algorithm="gibbs") par(mfrow=c(2,2)) plot(X1) plot(X2) plot(X3) # 2.2. general linear constraints case D <> I X1 <- rtmvnorm(n=1000, mean=mean, H=H, lower=lower, upper=upper, D=D, algorithm="rejection") X2 <- rtmvnorm(n=1000, mean=mean, H=H, lower=lower, upper=upper, D=D, algorithm="gibbsR") X3 <- rtmvnorm(n=1000, mean=mean, H=H, lower=lower, upper=upper, D=D, algorithm="gibbs") par(mfrow=c(2,2)) plot(X1) plot(X2) plot(X3) } tmvtnorm/MD50000644000176200001440000000456714533645532012476 0ustar liggesuserseb1f2f19a3ebdb6ea2aea450bb670080 *DESCRIPTION c480d4327126a0be194507a408d3b1ce *NAMESPACE 48b94cb6443280073a3e66ab2c4867b1 *NEWS ca1eb4bdc33b12c6a59a6908ea43ffa2 *R/bivariate-marginal-density.R 166a71d0a1ce1769eb7dd1a094c45eb7 *R/checkTmvArgs.R 794c0dcf67756b7b9d0de6ab6eba591e *R/dtmvnorm-marginal.R 9d7bfce7a98d353876568da6bdb22416 *R/dtmvnorm.R a17c645dd934398b4640de0fd7f3a54a *R/dtmvt.R bad20aba562fa5350a1b5291fca7e236 *R/mtmvnorm.R c40bf477a1ff02d5f4f70a83b5c2b485 *R/ptmvnorm-marginal.R 391811e35091c605b72f0ae16f355940 *R/ptmvnorm.R 8b709592e4821a482fee717fe7987cbf *R/ptmvt-marginal.R 652d04ae2331c0661cc0b61ff8f39b6c *R/ptmvt.R 2c10f680c0d50127650e636e4613278f *R/qtmvnorm-marginal.R 608ef8db9d92d21acb5a70e39142316c *R/rtmvnorm.R d4f8929005064ca9729b666ba4d1a296 *R/rtmvnorm2.R cb637925ff96579fd3400541333c9d90 *R/rtmvt.R 43cfbe37d3bd1f9bc37b4f37a666c74b *R/tmvnorm-estimation-GMM.R e81d08d5358a28a8d3dfcf5200a8a04d *R/tmvnorm-estimation.R 86d6122a6b1347ad7e01ad45c114f2de *README.md dbad21dbf495decc29548ff411a63738 *build/vignette.rds 05f0324f7a52081a7faca3fe125d2879 *demo/00Index 43befa36b93301eb64f743780e3ceac8 *demo/demo1.R 8cb92af9f083a0e444c58f5c82d79ace *demo/demo2.R 6f0a77f6c83ea4528fca2ba6f7c86f2a *inst/CITATION 4888713acfa7437d004bda25a2881fce *inst/doc/GibbsSampler.Rnw 9606a3b11da7e34d59a626e171b6aabe *inst/doc/GibbsSampler.pdf d1f23c2a2d60f9bffa9ac5e6a6cb0991 *man/dmvnorm.marginal.Rd 8a150cc866ec4050391cfa67b2338991 *man/dtmvnorm.marginal2.Rd 6eaba0c191da0713b906af250775dbbc *man/dtmvt.Rd a920d1818bca742362e105c79ce68442 *man/gmm.tmvnorm.Rd 1827e59a9f4bfa78a87b4115a64c4ea5 *man/mle.tmvnorm.Rd 85ca9833690d110eefb7225485af713e *man/mtmvnorm.Rd b774db13d8f8cd2cc45095571f6db206 *man/ptmvnorm.Rd c48a6ae141dea1f55e97ea86275ee3f5 *man/ptmvnorm.marginal.Rd 34b4439bf4d1b08488f1145d3e13befa *man/ptmvt.Rd 80c6c48214efbd8ad412677db32f5531 *man/qtmvnorm-marginal.Rd af6420244ef46e734fcabca86ecc77fb *man/rtmvnorm.Rd 6679f9d0fc409cd57555b9a2f0af55d2 *man/rtmvnorm2.Rd 819475b0d7403d8675597a99debc3771 *man/rtmvt.Rd 56ff9ee8d7d9d645a83ccf47d5fe5b1e *man/tmvnorm.Rd ae25215cedbedbe2d9de559d8f4f5d66 *src/Fortran2CWrapper.c b250052c55dffdfaf7f35a9f61fb22d0 *src/Makevars 36b6e5d6d9569aa0771c5173f60e7027 *src/linked_list.f90 ce89e576587c7fc5af00ac60244b9bde *src/rtmvnormgibbs.f90 4888713acfa7437d004bda25a2881fce *vignettes/GibbsSampler.Rnw 28065219dbfddba2e85a093c5a456c95 *vignettes/tmvtnorm.bib tmvtnorm/inst/0000755000176200001440000000000014533561630013123 5ustar liggesuserstmvtnorm/inst/doc/0000755000176200001440000000000014533561630013670 5ustar liggesuserstmvtnorm/inst/doc/GibbsSampler.Rnw0000644000176200001440000002343314216137300016726 0ustar liggesusers%\VignetteIndexEntry{A short description of the Gibbs Sampler} \documentclass[a4paper]{article} \usepackage{Rd} \usepackage{amsmath} \usepackage{natbib} \usepackage{palatino,mathpazo} \usepackage{Sweave} %\newcommand{\pkg}[1]{\textbf{#1}} \newcommand{\vecb}[1]{\ensuremath{\boldsymbol{\mathbf{#1}}}} \def\bfx{\mbox{\boldmath $x$}} \def\bfy{\mbox{\boldmath $y$}} \def\bfz{\mbox{\boldmath $z$}} \def\bfalpha{\mbox{\boldmath $\alpha$}} \def\bfbeta{\mbox{\boldmath $\beta$}} \def\bfmu{\mbox{\boldmath $\mu$}} \def\bfa{\mbox{\boldmath $a$}} \def\bfb{\mbox{\boldmath $b$}} \def\bfu{\mbox{\boldmath $u$}} \def\bfSigma{\mbox{\boldmath $\Sigma$}} \def\bfD{\mbox{\boldmath $D$}} \def\bfH{\mbox{\boldmath $H$}} \def\bfT{\mbox{\boldmath $T$}} \def\bfX{\mbox{\boldmath $X$}} \def\bfY{\mbox{\boldmath $X$}} \title{Gibbs Sampler for the Truncated Multivariate Normal Distribution} \author{Stefan Wilhelm\thanks{wilhelm@financial.com}} \begin{document} \SweaveOpts{concordance=TRUE} \maketitle In this note we describe two ways of generating random variables with the Gibbs sampling approach for a truncated multivariate normal variable $\bfx$, whose density function can be expressed as: \begin{eqnarray*} f(\bfx,\bfmu,\bfSigma,\bfa,\bfb) & = & \frac{ \exp{\left\{ -\frac{1}{2} (\bfx-\bfmu)' \bfSigma^{-1} (\bfx-\bfmu) \right\}} } { \int_{\bfa}^{\bfb}{\exp{\left\{ -\frac{1}{2} (\bfx-\bfmu)' \bfSigma^{-1} (\bfx-\bfmu) \right\} } d\bfx } } \end{eqnarray*} for $\bfa \le \bfx \le \bfb$ and $0$ otherwise.\\ \par The first approach, as described by \cite{Kotecha1999}, uses the covariance matrix $\bfSigma$ and has been implemented in the R package \pkg{tmvtnorm} since version 0.9 (\cite{tmvtnorm-0.9}). The second way is based on the works of \cite{Geweke1991,Geweke2005} and uses the precision matrix $\bfH = \bfSigma^{-1}$. As will be shown below, the usage of the precision matrix offers some computational advantages, since it does not involve matrix inversions and is therefore favorable in higher dimensions and settings where the precision matrix is readily available. Applications are for example the analysis of spatial data, such as from telecommunications or social networks.\\ \par Both versions of the Gibbs sampler can also be used for general linear constraints $\bfa \le \bfD \bfx \le \bfb$, what we will show in the last section. The function \code{rtmvnorm()} in the package \pkg{tmvtnorm} contains the \R{} implementation of the methods described in this note (\cite{tmvtnorm-1.3}). \section{Gibbs Sampler with convariance matrix $\bfSigma$} We describe here a Gibbs sampler for sampling from a truncated multinormal distribution as proposed by \cite{Kotecha1999}. It uses the fact that conditional distributions are truncated normal again. Kotecha use full conditionals $f(x_i | x_{-i}) = f(x_i | x_1,\ldots,x_{i-1},x_{i+1},\ldots,x_{d})$.\\ \par We use the fact that the conditional density of a multivariate normal distribution is multivariate normal again. We cite \cite{Geweke2005}, p.171 for the following theorem on the Conditional Multivariate Normal Distribution.\\ Let $\bfz = \left( \begin{array}{c} \bfx \\ \bfy \end{array} \right) \sim N(\bfmu, \bfSigma)$ with $\bfmu = \left( \begin{array}{c}\bfmu_x \\ \bfmu_y \end{array} \right)$ and $\bfSigma = \left[ \begin{array}{cc} \bfSigma_{xx} & \bfSigma_{xy} \\ \bfSigma_{yx} & \bfSigma_{yy} \end{array} \right]$\\ Denote the corresponding precision matrix \begin{equation} \bfH = \bfSigma^{-1} = \left[ \begin{array}{cc} \bfH_{xx} & \bfH_{xy} \\ \bfH_{yx} & \bfH_{yy} \end{array} \right] \end{equation} Then the distribution of $\bfy$ conditional on $\bfx$ is normal with variance \begin{equation} \bfSigma_{y.x} = \bfSigma_{yy} - \bfSigma_{yx} \bfSigma_{xx}^{-1} \bfSigma_{xy} = \bfH_{yy}^{-1} \end{equation} and mean \begin{equation} \bfmu_{y.x} = \bfmu_{y} + \bfSigma_{yx} \bfSigma_{xx}^{-1} (\bfx - \bfmu_x) = \bfmu_y - \bfH_{yy}^{-1} \bfH_{yx}(\bfx - \bfmu_x) \end{equation} \par In the case of the full conditionals $f(x_i | x_{-i})$, which we will denote as $i.-i$ this results in the following formulas: $\bfz = \left( \begin{array}{c} \bfx_i \\ \bfx_{-i} \end{array} \right) \sim N(\bfmu, \bfSigma)$ with $\bfmu = \left( \begin{array}{c}\bfmu_i \\ \bfmu_{-i} \end{array} \right)$ and $\bfSigma = \left[ \begin{array}{cc} \bfSigma_{ii} & \bfSigma_{i,-i} \\ \bfSigma_{-i,i} & \bfSigma_{-i,-i} \end{array} \right]$ Then the distribution of $i$ conditional on $-i$ is normal with variance \begin{equation} \bfSigma_{i.-i} = \bfSigma_{ii} - \bfSigma_{i,-i} \bfSigma_{-i,-i}^{-1} \bfSigma_{-i,i} = \bfH_{ii}^{-1} \end{equation} and mean \begin{equation} \bfmu_{i.-i} = \bfmu_{i} + \bfSigma_{i,-i} \bfSigma_{-i,-i}^{-1} (\bfx_{-i} - \bfmu_{-i}) = \bfmu_i - \bfH_{ii}^{-1} \bfH_{i,-i}(\bfx_{-i} - \bfmu_{-i}) \end{equation} We can then construct a Markov chain which continously draws from $f(x_i | x_{-i})$ subject to $a_i \le x_i \le b_i$. Let $\bfx^{(j)}$ denote the sample drawn at the $j$-th MCMC iteration. The steps of the Gibbs sampler for generating $N$ samples $\bfx^{(1)},\ldots,\bfx^{(N)}$ are: \begin{itemize} \item Since the conditional variance $\bfSigma_{i.-i}$ is independent from the actual realisation $\bfx^{(j)}_{-i}$, we can well precalculate it before running the Markov chain. \item Choose a start value $\bfx^{(0)}$ of the chain. \item In each round $j=1,\ldots,N$ we go from $i=1,\ldots,d$ and sample from the conditional density $x^{(j)}_i | x^{(j)}_1,\ldots,x^{(j)}_{i-1},x^{(j-1)}_{i+1},\ldots,x^{(j-1)}_{d}$. \item Draw a uniform random variate $U \sim Uni(0, 1)$. This is where our approach slightly differs from \cite{Kotecha1999}. They draw a normal variate $y$ and then apply $\Phi(y)$, which is basically uniform. \item We draw from univariate conditional normal distributions with mean $\mu$ and variance $\sigma^2$. See for example \cite{Greene2003} or \cite{Griffiths2004} for a transformation between a univariate normal random $y \sim N(\mu,\sigma^2)$ and a univariate truncated normal variate $x \sim TN(\mu,\sigma^2, a, b)$. For each realisation $y$ we can find a $x$ such as $P(Y \le y) = P(X \le x)$: \begin{equation*} \frac{ \Phi \left( \frac{x - \mu}{\sigma} \right) - \Phi \left( \frac{a - \mu}{\sigma} \right) } { \Phi \left( \frac{b - \mu}{\sigma} \right) - \Phi \left( \frac{a - \mu}{\sigma} \right) } = \Phi \left( \frac{y - \mu}{\sigma} \right) = U \end{equation*} \item Draw $\bfx_{i.-i}$ from conditional univariate truncated normal distribution \\ $TN(\bfmu_{i.-i}, \bfSigma_{i.-i}, a_i, b_i)$ by \begin{equation} \begin{split} \bfx_{i.-i} & = \bfmu_{i.-i} + \\ & \sigma_{i.-i} \Phi^{-1} \left[ U \left( \Phi \left( \frac{b_i - \bfmu_{i.-i}}{\sigma_{i.-i}} \right) - \Phi \left( \frac{a_i - \bfmu_{i.-i}}{\sigma_{i.-i}} \right) \right) + \Phi \left( \frac{a_i - \bfmu_{i.-i}}{\sigma_{i.-i}} \right) \right] \end{split} \end{equation} \end{itemize} \section{Gibbs Sampler with precision matrix H} The Gibbs Sampler stated in terms of the precision matrix $\bfH = \bfSigma^{-1}$ instead of the covariance matrix $\bfSigma$ is much easier to write and to implement: Then the distribution of $i$ conditional on $-i$ is normal with variance \begin{equation} \bfSigma_{i.-i} = \bfH_{ii}^{-1} \end{equation} and mean \begin{equation} \bfmu_{i.-i} = \bfmu_i - \bfH_{ii}^{-1} \bfH_{i,-i}(\bfx_{-i} - \bfmu_{-i}) \end{equation} Most importantly, if the precision matrix $\bfH$ is known, the Gibbs sampler does only involve matrix inversions of $\bfH_{ii}$ which in our case is a diagonal element/scalar. Hence, from the computational and performance perspective, especially in high dimensions, using $\bfH$ rather than $\bfSigma$ is preferable. When using $\bfSigma$ in $d$ dimensions, we have to solve for $d$ $(d-1) \times (d-1)$ matrices $\bfSigma_{-i,-i}$, $i=1,\ldots,d$, which can be quite substantial computations. \section{Gibbs Sampler for linear constraints} In this section we present the Gibbs sampling for general linear constraints based on \cite{Geweke1991}. We want to sample from $\bfx \sim N(\bfmu, \bfSigma)$ subject to linear constraints $\bfa \le \bfD \bfx \le \bfb$ for a full-rank matrix $\bfD$.\\ Defining \begin{equation} \bfz = \bfD \bfx - \bfD \bfmu, \end{equation} we have $E[\bfz] = \bfD E[\bfx] - \bfD \bfmu = 0$ and $Var[\bfz] = \bfD Var[\bfx] \bfD' = \bfD \bfSigma \bfD'$. Hence, this problem can be transformed to the rectangular case $\bfalpha \le \bfz \le \bfbeta$ with $\bfalpha = \bfa - \bfD \bfmu$ and $\bfbeta = \bfb - \bfD \bfmu$. It follows $\bfz \sim N(0, \bfT)$ with $\bfT = \bfD \bfSigma \bfD'$.\\ In the precision matrix case, the corresponding precision matrix of the transformed problem will be $\bfT^{-1} = ( \bfD \bfSigma \bfD' )^{-1} = \bfD'^{-1} \bfH \bfD^{-1}$. We can then sample from $\bfz$ the way described in the previous sections (either with covariance or precision matrix approach) and then transform $\bfz$ back to $\bfx$ by \begin{equation} \bfx = \bfmu + \bfD^{-1} \bfz \end{equation} \bibliographystyle{plainnat} \bibliography{tmvtnorm} \end{document}tmvtnorm/inst/doc/GibbsSampler.pdf0000644000176200001440000041143414533645532016750 0ustar liggesusers%PDF-1.5 % 1 0 obj << /Type /ObjStm /Length 3384 /Filter /FlateDecode /N 58 /First 453 >> stream xZrܶ}_oR6Ym95$ƣef%_O$٤rʷ ݍL3% 3L[,R0ǜ3̳  ,z"R)oRy FTfkF&C@91%#{ȔSR'BGŴJ{|@Fz3MTdӂZOKfA'Ōkf\` ꖙ݁^#1XGzF#x0 ]x*,5" $-k D<*V -ƢbТE|OH0@ H"{DxPD4??h< M!M*m*UTTTTB!CGF2O[Ѭa!GI]n?cej F!҈r$&j|r岢ngL r=Գx>U2~]ONIHRESƟтZW2 ?:%,/чW L Wlvq6O.6Dqd&5VVh(.;"JGF֐qT RU<{R}>T,m,.}a7.#T!5q)!4ApXx}/\Bߋc0Z"mt_pV!]J. z_XBFHm~6!^K 'e_xhs`DY1Dt䢄@#mWo G[}+ty7\4QvG֊BStCLt,." ߾~GoayR_l/JwFI=:-M[Oru8ޮI}9O(J}^MQ^ͨƬ6#˻dv>؎>-AgϏn'fFؼKx"^7x>9]`X/a5KXXzz5փ__?zLj%8/f4z=Fҿ~W=~Y;)׃>6x%laU736vy-⑷ή#Vyzmqj&-bձ_ Ic,P c5fhdڙOݜĄl`σHaierY _I3C{2Df G FOYuE!pnl(`]PUZf E$( 4q ?*"Zh8-Y"]ʂe'gG%)DY.(aVJWєBk+Zm2o]Wḟ|Lƃ{ۻ_}]d]61*Ϣxy.9$鹠v5[u~P@!.8? _^>:PXRĒW7#둹o"V&w_B\H5ϴsa*N^) zRcKӰ:m_f F :`|Aj~2۟)dw53>fmX ?}ZFQ}U^`TӀx81.T]!b1v>ߤ=w lv 7 BD`h~al V~<)YȦI 㠊3~rqX 2A>8hVƯ'ApIyq &脘9.Tʂ$S6pݏD{xx!֥wi2k~H &BG4 3!!=i0!JN+ S,FO2-ʴ( h.Rh}kWȦdjˠ \'1i>7e{WA YQ,ȶ٦s!5AoH+8PxE jn6?n4 S"Ϻ&um0 b_3rKׁ^k!.| GbvB8om)t"խ>0Υq:;>Xuvr{s2ڧ+$HZ&SGU#`ӎI،vpQ{Ӗ6]-oERׄ7bv6&@ƊpEٲ%dZ3AS{$2oJ~4-f̓k?ZךOO5Zo1[ ks$ݧ3 ZBCokQ{2}ƭOۀ֍z=?lxc6%tv OȾCGkIob;L D cj0"16EmM<2 A.4tķtJkR: $SF)tdȞNQ䐭՝C6qtN|MbI[CPGY't?`ht0ϢP7)IT`19![Zrx:Se˧9v i:s6>|ҦC7?spx!bZ47"7Wʙanۓ~qOz5ϑE;l#v-$t+tK;kղKXв@,e6xNMwnRgg/+㐿6GKX;"J)ْ9UeӞ2в:ekR↟Ar'em:%\RrK2%mMJќ^e%٦lҜ0T<)V(Z~Q믠W*rѪ9^-g80#B:]*A4<,\ X_n \Ə[ Y7|Q_)O`{<8UwMHh{ʼ**Ĺw?hWPMyl@6j0#;njpml_.4Jt5B)%Z(ʹ>7mSU~ƣxrRs9ujv ? nM l DW VDHGc^n35T kOӜ{t{;G.Lq9U2Y?')daD ʝYyrҭ9a|ۤ w4]o[rendstream endobj 60 0 obj << /Filter /FlateDecode /Length 3110 >> stream xْ}Bd&*W&;;U~ę-Ӎ ivlWRy"]'\LgJ.n2B3ƒ̚f~mW\廼z;sׄFP͵ݵYu~ەO.CǛLY8#˶ەljJqSFq!r]W嵇RrPla!VkKCIfPj%&Z@b]Tr&&%F-VB} d ́oe-d  ߳)5 u& Òu˻4sLaɡ"8-yrGD+%|u8Lw,|="HO7Qװ@g-.ftE3u|JqZD8OEiMQeҜhs[ù+\mc$V1'R .NTaDp$B@M]:(_0#9̨0#ӧ`K$ˎ@̶{y "6VE#5bU put8v4l$1Pe2l]>R: QΘia]by2rs+ ̂]b-+胛Z̠l9X L!ʔ Rd/)G2´cńp2:'39]C6 .A4 ISoޥ`Yje?pSN(D5v2Kߒs!') 68( )6൧Bh< @6,zhP">2 pv0Nߊ34¢竿2'e߃_=WےP~^TѬʯeV~r'!9A$֥24) Q>P.0K]⺵dC $ݡl ⍘̈м%7.2OIHϋϼ Z JbwͷoݒC6~hG#i+aO!l,%Ζ-?*mI$@[7.aU৔"cbFf)ӹY>8A(j*1-CrlʴulΖSFm ^L{q,b 1Xp/ ..5~~ny9oCsq )e󝼺S~M+< CGChruO]P=I'oI8R3FBSA'ȇ̲ z͡]pm9ـ}:蚮#8@B!T)ڦr,qX3Y}Dq/4ܡU7f>I0V Ёm "~{YQ}?5;WI*{tPgr7ekU za-)t"F*(=b$R 76(|[$Hq[]OĎ?ޘAʐ'QmV'>S6뀄.s6o1Ҝ >p`pÁq4c6bXy~WFm?~%Ŗg n˺#ڔCvkXz^k',Ht8M[I CMd&uAAsWu+̎s_(`j۰ 'uŰ35N:2`aFGF+9hMО@V͝q@4:-؃@HޞȮM>CUx(\*fCW esesl}ilK|l RJKrr8NC1ERuؓ.~-8Q  y4%nWq0d4ة(#op O$یxCY{Tx{ %!d=W쥾!/"px_ѿ<Ak4D4 `ǘ2g*F7t={Z4ct طAm/6UkGADD\*5!KbVCl82`Y? ‡dS*675'D7" nIv OptD+*s1 i0*) '2Y& q/ɍ\Nzp8'hEoqJH1Ӵ8uIhک`J{zM 5a MkdxF9S( ʙuL3h&SME@Ooq1vX?x:tO\\ :\c;Zw~ؿ.Bf(Ÿ(Q%q<$eS"l,^B4:vs}v߯?9$]^8^ʐj"U_蒙&޿2;)%med8X I|xj035ҧŊ?pQb),5&>ԙ%J۫l0".O<Ң2q2{0x&-F;UĔ?L;9_G :xV/O!2a/W#`/|:|}9lr3ѧSv"Oχ9}:-aTOL4j&3Rχ WE 0tjle93P:z3"˂04;z+Ac誱D+ 8Y7Մ=ڼ1Xyhxendstream endobj 61 0 obj << /Type /ObjStm /Length 2087 /Filter /FlateDecode /N 58 /First 466 >> stream xY]s6}_h3&:N:9I$j%*sAJ(r:Nb//=b94Jüe2 sL+Ti#FǼdNR,W&EQcR Gm?0$i I'5 Ib2ȠA(FҳPѡ TBQ[vdJ͢@FL) 5gѠh220@QCras ص]H6fQHa`axʁ.CRBH.n[jb/o|* [tt&iyХ?"Qh)wզ;ȇoRTge K U03=H(FV(nU/;<pq݀/#_6P>%T]5 W/:-="4`oNI(w _I5%f>O,y9yri#tX@RMQu[ fMq.oEr5?]b(|RN >Yr6grRE9)iH"Bq~` 'O=RtAEY9ܾsOyG89뺘Og@ti2ZCrjѵs[H9Lо)oCp"Ύ =sY&S~ɇ<#>Ք;~)>!@iNJ-8_٘t@2k巈sIa M<ujk]H$8flf;/4 ze[fVK`I{}H'ce=.V//wS$~m i33 Ȑ}ҡIz$]f/tV%~4ĭ^ M䶬ٮ!v ې܀~7UЯ0H?lv&~ 0CI4uW}KnjփEh>n'9]C׹>wv|_iendstream endobj 120 0 obj << /Filter /FlateDecode /Length 663 >> stream xmTMo0WxvB+8l[+ML7RI;onDo3ތ?n~<&YվI|/ŋ;t硋nn\3<:Wj\=?-wn6pGۦ|Tnʽgxt{o $Nߗj s6BK)ɱn=A9n1 9Ң!X-O4$>΃;mc-bB`ew\_7o Y KQ#^''ߛ.?:'4ʫ)ʝCC梅Klkok*ؔb%9v r(CXYh&(9O&"f0H9)Dhbc6a@*&GY52־+vĎ]=\ ~ҟ"t&g>mK z> stream xmTn0+Jb;!B Hv[jWHL7RI7vJwD/3ތ|{M=I|/ųK_I}E77E[^N9prvxO}[Yb; 6ۥrcVn!/פ'|o B\ ?]?m ԽuS ևht4t"EUgy y>ihgz9wvMu*n]h`Ah;R5K__;'4S}}ꢅKlkީr8)9*QK"&VZ 5ID b!"U\H(jm} ۄ @*&"C,jdkknn"cW}O= W¢mu|]CXz :3xl=΀WOTƘ$U ۊu':S9xT>&^s 1eΘOd~`xՌk?s׾G0N-۰o|e>ha>6h Z8sseY1:@++܊psqKoZ׺q=7cMendstream endobj 122 0 obj << /Filter /FlateDecode /Length 664 >> stream xmTn0C6U@"mTt@;olvR3ތm~<&YվI|+œs_IsEWWE[9;Wj|;܉Ǿ-w$mm o\1A+Z7g{uo}m $|{'Bp/u u+$bTy{!y1 GҢSX< {NmmXN;{}y[Dδt d\{?:1 kmn_~߼h!R,6ew*ؔb%k e+Kӄ$a"1x*s.$S56P>Ƅm„A Fs 5577vرϾ+uaя6R:!,əCxg+ѧy*JcL|*m:fvuiWUꧏɩ\g%<Ϛ"sÖ0_:3x0kjhyIYx0aCnOg3$cx0<<v5O#ܵu7A 6*sZ ZcΜ-ܠeYksL ?"@>qh|tngk;dGGMcendstream endobj 123 0 obj << /Filter /FlateDecode /Length 739 >> stream xmUMo0WxvHUdCmU^!1H#x?gx]OTm$|͜s_Iss :L;<Sz==׾f`*_`ɫڟk3'iѴ}=M;7rfnj-eSӵOLg~8 )ok A8 $`I\3`Af<Z]! xNky"7 _㓧q H`nḱRONH=CpB:# =%888QA~!*zƜАT?!~> tw8y*sύ }nFE>7*QύR>7G];~<6OIyktg>O:yұϓN|I/|yIg>O:y҅ϓ.}2 L> stream xmSn0C@c ঊ"Ih&Y`C~=3UW9<߼y3g?>~Z6gg)>k[Zo6˛bo%n*>lSsyoPzLZ#G_[}PǪ9}{PP/]՘W> stream xmMo0 !Rz|UAa۪V&$E 6~=HUAgɯ~uo$ƛLD- t @ZcNt=YNk`T=Ro æeCڕ(>Պ AiZsn[6uc^0Xah\je?0bprOY[AKS|dۙoF)MZ}4W@{YmG;<9`;K (EytbabisbgEjq(po$}Idon-p!J m-O[Lendstream endobj 126 0 obj << /Filter /FlateDecode /Length 663 >> stream xmTMo0WxvB+8l[jWHL7RI;onDo3ތ?n~<&Y$ŝK_IsE77E[^N\5sߖ;7|[lzmS_*7F?h3ONL7u]~l+l+w͛R.bYFr8)9>*QKr7P:MȡQ^s$LD6aȑ*s.$S56`>ƄmÁ#TL 5kd}WXssc*{Rh/#? bE$L|ږ8^y>eSQc̯bV̯cNa'_OAJ195kd3EH@8ܰ%~As*=F 0`{RLPh33Y$LƹǬ oqMsȼ tx\ \cΜ-eksL ?"@>qhx ׷=l~1֍>*]!Ma@endstream endobj 127 0 obj << /Filter /FlateDecode /Length 4336 >> stream x\[s~#51QL^I/ɴ2m%Q]tUd`/{D4r˹~tvFg߾/~i>jg3F)R lv~5{3?Ym\yy7WC__ܕ] W!tZXiS~rZcM6׮Df X|kn~]_r{{A3 %j(j}s7vS0?&b-Vv۾&1 AH!N $0'\ *7v5MK 4iC [ɢ~f\Z]KXGB ;~ڠI SELЄˍn / RbzUfSVDS6EK>Y4V.GMt1ctWB!Dk}`qbaZw+eM#U՜nsU(ESdW;lDwEvq`D}ѫ:B$MDDبDQr#~"il ܣ)a51K Ye&㔇>H⩢4=SwDFܨIM!\~|^rhm00Zc?"c„L/X0_m6KcC;`:Ygb,h!"a혎L!e,81}ֻ\opk2oBjXl rNgaSyʦ7ϗuk5Zmweŵe= }ag|QdOcJ۸ Dێ/{҆03nS ׁzU 0nݯoe+n<|v hah# x *^i;-m10DĪ(CHli| DSDd$7bCD+ .b!S{Y ,6NאtXc Bv\00Έ,ʎ#>ʆoDE,}.qs5EF0&) ![!skæAf;Jh=@̴ε 4'mJ@H$b(s2NAԢ6g !'Nli`j4#Eͺ#".rF]Lb!">tA?"cH*~` <L1@Qy HSNEcp<Gϗ'gd8 QS -)' uztRt7caWaHuD2M#,r6Vdj?]6g 4JfLw.7faȃF) qئr^#mE~B?reU,F6X-DtNZ ~kH:rEa$( 9,ooܽpje^v &cE:ʄ~A:~yl߭~ɢ͐ŕNYıct?~:{zWݼ]m]_۷`]'ku!'vH1HJfD6Itb)xy`Eif"܅mpI6&ؔ(*""Z'm&Y-[Il 0A\c`]u-j/}}BQ:BZ=&h⃡<"ÈOat ĖR2CYW+o[[᲼)9ܬN2" K(Ύdb\Qj-c J JFلvui@ -az=-/˻Ͳ_/Vh/yCr$QWc]H[6_veel徬뛻U>2`ѱ iMdǛWQ.O."*WÃP#6Wv3tBr-lySY>|qL}8VÎ0XS{! _`0C)b*_0zp@ř58)%**cC_\ Fau1h("F1xZ,z4桹%СݛG}E _1`gE„)j36lKl9BGR%ǍbOPL5c(}yLW=ٺ#u{V N*PJfcTUe.5\. ?+EM_`"1u^h] [wo _n'+h`%*a84f:Xin (3ߊkV`k +>l>x(>ŷ<B3#j)5#͠ooP0kI*PZ}bUޯ Ko@o .4sF埲9vL~Ó7C}1fl;܊nQFzY9]Ē "Z?_^ƒ+F-> zdbcZ»HFpc_!4[v2ߐj X_y8YJˎ4;Ł/?U/~Of3ю  "L|YDW-s=hF#G 6Z>2 מ!Bq WOJcTq֏ZbF1 -VgNaPgBJrL6PF>tE3M)++2\ X 8̹r6 @C o1}g:? bQbQksE>*aΦӎV =*F9,R0Pq:UU} U`q`\Uph3]|@_:~l w ۞!?u@O3Յ1@d5;&,Ҹ4*0(u8LvErALΆzl 7m"ɔ9b,)FguRŸ1Zor=f 'Csv=bϓxRxlpOry7ʁ=VM#׋x#yE YdU!*2N G;i"-3ُSnyVw38ɘb8BWОS,H0[;( FCpVXn4[Ks5V(9 il]? KȝR0;f-Ҍ3b$YbI$Z 5Ξh#@@C&Bw[]ԫ|   ļ߯.ׇX%wRܑ֊7gkz۶'q(Jm+CGu;FzLSn2Du3w<Vq1@2P 4ik=UE.wa] |HOhNNܒZ%5ɹWO˝v_}uZ‡g7|F߮g >z ^8&S8&KXٓWsFv1-,42&бLvX&cVRS#yeza3O@\[ lyEmxzX hϝ0nU4/3bS3endstream endobj 128 0 obj << /Filter /FlateDecode /Length 443 >> stream xmRn0+EtA1!$B)>DWwK!d`R83sf8X}\'FoG# (ʎ,V @+48@~j(af&7CGT]ݘ4zP:`=[aj}􇏺?`zW=0Ƭce6COI geLb S0Y-; L>\Z4~0FQL?aPe$˨ VWΎ*JR)HXF.#ʛ0C~ !)"I-札?9#.<^[0$gı#?I> stream xmSn0C@c ঊ"Ih&Y`C~=3UW9<߼y3g?>~Z6gg)>k[Zo6˛bo%n*>lSsyoPzLZ#G_[m+> stream xڍtT.)H5  1ҝ"RtJ" HI=x=޷{k֚ߟ71!Re]U @PP_PPˎfAxC01ΦBu0( IťB"R/ Ђ ޸lpy_`.PRR;@A0. y`(2H?ӛpA#7 qji ` 0;#@C`w.>0'p`"Ew?޿AaA`0  \P@_M`N o?9~)@wz!zn̪0'e'U =@~0'_m8x  | *8w&\ H J` z{ڀC!w?A _ 8AH# O;3#k;>g{0'8#?W,jJJp@OHRL Sߜ5ap_ _5 3Nn#(*M濢?xx9"zaIye7B>j"Aws jP vK1M1{C.>aw v{Ad?Sp_&$*!ܻk;w-d? sܵ p#p)N-qF*A]~%Ar PB p'4 䁄#L u Oy@ɿ#v>; qg`(Q t~|c6ͳ>? 2E|F*f /r(,0]6ažybvf8]ŗ 8|& [<4 wGoEb+|#AhPLvVeէ13[bxWU|ɦI6SlEy,| ܤDS'I >2i= K. ZJ\1OcÈ~B~=HklPers׭I8(>}ԚNiH)cO .>J }.^ݯ`FYI=$շs~OUTz"|a$~9o *c]gkLyTjW>Odg6.aE(bݦceԭo8ڕMWQ|z%7 fڎ_oT y& ;-;F v(B%?9nzznǹMb/bUZb+2u_tpsM]|4KH3&\{lX'GlW\:uBq+m`fS?#;ܬ(4^L4Mss( AX)mp9?ko6Q/ ;Űh4ѹ1(GhvoLjfFYMtn¹X'V"h:$&-|-Y21)Qfh .hy@߰VV0#}[jlWvq\ի؝R'O֏2?\ Zg mJ;2NN'dRN^yTNŕs_ Gfi[&Bj0&If5.9!O WH,F'Ϭ뚄/ZmB4/_b0G16TfsMݵՏ?ZJ"?ǪyPǁj#7tmPNvsChD5й{}bckǡ÷Y>q!OJU59t>&;^8WZwvэ.m.!g,ke5hΛT#Z%y;7Q5xĝK._^B}=ѝ^Ff֔3=4ԞNFwwH9SU[,EpV '\S~Kgye=ZX+kDR NkO{1fbB߱ɰMV| %rd`ӥY1\_6y|Ѕ*G .\`zT<{h*x\wn& OOmd!k艹p%hP\һ,(dt۳W)PQՋFc(3c^z5Alrk\&]_2YJ 3_(p̈mб`;$ EK ?1FJf}Y?UVp-p'[8&5?1^/w]bs&PIfY%4E>w` ON=RZ77g`g[GmE'Q}! }:~:;?$&3HI <8аe%$m( T[0d:~q>!C@Or6J}2܄{E]\Pq$wv7tp(g ف >ʡSxkC6|<' \. hځ'a[ݭ8v'iLFΩ{-76uEPY#R߬ >nJѻ8u\ug ~Ь :YGk#Jf]d~kWy'dүb+vwiu/E-$ͺ9{YL!t!džVUW_qk^voX:+ƞҝY[KqRg)3H(DIթ ꊓ$cG-w @S'} ڳQJo1 @#W}7hJ Qav eƬ|j6iNiv{z5OV".23mTv\Xt)Q0VGF!ZbhTK (zͯ<X'B,;ϭX6c?qGJIP:ENX$$ YVj} P^ %drUpfHb`ufq)偫ȩf)1NsH?ĘmL4rGCLqCrIS",/]}duXcRnMxebDvF my&*I͉@`s]琚|<&Ldcu#F =ռY Cb{ϫeeb6 ={HSI\Lo]$}A>Z0ǿ}tceIn}@_3ϸ'uPm̴Ilj'OzHOâFg>o#1q#KշruBN#hL20 sW[Q$be7OP͌WO?$s9xѠd4&HHӃbhnlsáB3B`% `B][YqAe5!&Y-vDg&Cc{&8a }))yӔЊN@i5h"8U oe\^Ð#qQgT!?"XʃFi>ۗrTd>IVu;ۏnC7l ճu=o:6A-F1<tڅ_f GWIh"\ttގnEN,._yY0wkO -i)JzD /!D~|~&(&0.%Ɨh$O?':E1 2mZ~fr f|t]n6ΩSтw 0I?-*.a+N|Df˰=+UR̯ҮMɣs/Z$ՏS$Se\ȈDuB28uH%ީzUM]]ZNaixˈ8bC.9atU{x+yZ2eT"/k*3`v?UiWRUq&TFQK 5Ry"۵au]llU+}_וN@Ւ3F9%N}?%phX5icAE${2h}@)B'=VN~8vJ 5~S3B)jQmO.$| `Ѳcnfdp ?頃?w9'B0˷ U\TI1s4APNO9dTYp; 5W %v~_MBE=qzIJ=9Ý/G'8cfQ|/>mqp_eKq]lnb}h1 F,ˍS~^ [xPo?" tKntʈz#k;aW(AHγ Xq'Wwh]3նfMSQ{τIU8̭ȑtfs=$UV̉ r?Rt*?ς*"7]/2(f6/F.ꐀTA 29c; §qg@/SߒdHVmN&}VQ#vqywO7(HF{w)8a`$z6m0ft:ו `ٵw&@£5;Zp":hl#~ sO{ Pd@fGF)Wʥt^E(iO΍5fO]Zub,Li՝cCB,Y,hm{3&\HGDzk}&>)ŃxY;}&3x~ gAbdxP[8Dާhb) 򨉍+/=}KG8V 1II5r~ENʬp.%U3+ } 1=b᠍8'=;=Th̲GGL 3N\LP~Ӱ:o"l>iGÇ=L^So((%&fE Ζl`Z;}Tڐ!#endstream endobj 131 0 obj << /Filter /FlateDecode /Length1 1478 /Length2 6403 /Length3 0 /Length 7390 >> stream xڍtT>H( R3tww30 /!ҭ04ҡtҠt|ck}ߚ~sγgVc~E'D GRe]# B&P &0 =8(#!`S0~8@  @bR q)  J@JTP'@ xs(#PW昿nGHRRW8@ :]09 #?Rp˸PR>>>`7OEE 7 0@L` #Q>`$`PGw ƚ:}wo>@J'g"(W0A.g( W@`OG0{0W`!!#~Rs˪p'e$Y  q\>#|6PON^pDS "7AD@0: Loe1 g H#{C($(? @'# q‰C1GB}V@@߿V6y9!0ꯠ!o))!|"@()2q"i ?eG&]- B5PRS, 5/엙1ݠ0?z0]!Vro& E _TB (Gj21@xB>+~Lkˆ-LJ#_&fry*sĄD`$Gi2f' af KA8 `H`LJi bQ#e c? uyy F_/ q$G8J? o WnW)Iv! 4 l?"͸5#cm]SocXˬ`֯E:jҢq8DN։吴Y+ySŪiƊ.VO]&a +c^z<9KBlu<YKlhoDkbϳ}s %wbWϲX'uh+n_. asxLq;kYf2!e߈@X55_6ūAśZxSZXZ(4g{8S ⻡f-ccwc?TpaS}oX~0XxAB2dL&3XHz-mt2cuo>'|kau۷)4$v}9xVϛ%| dD@cL'XdbuAHm/W4Se, }Z֦%W4SJ0Wb Z7y;k3 kDASKSԠߍn2h =}Egg5`a}aN9ﰜSbG$i0dkYm8{^X1x30Ƃ{ȟ޴mv?U=Jwx=+J_I'[*+i^qw_z %ub9Qղٍdj lٺ/{_CIa5C; Y /C/ޝ)C9=`ު!bDCB/N= 5;/.Wnվf~?oeD⵻A+Y&SdbdgRI/vjxr2vR{\$5P/j4V-vΈt~˷dX7>da+l/NWoηJo],kz|2JRZ=LY>kbSoaZ! civX0;iېp㱁xO(l.Rf-޳­ϑjXH3l"8D|(a$kB滔>s][l)?S|e3 }fm=,Ԅf?~VpdkViaN^[<(u| Y~crX1HZ{SĄ'jD ~6#oJ_$dzO7jbԞ=&[8)V][E?v>1 шȡF`~Q%=T4U&ܘa}RL4~T`ǘȯ09v.A>Ae{@ o2z.MXebjE[U_7lIB']7g/JxͲ#R{;9z=bqf:ATf4>|Dz\4(.UpuTQkJR꫐󇒚ߓ3p?_RNPzrG֖v{Z37[- 7v̐1qO)nhEk .]i`by3te:/E^˧c}::n? Vu2S]~~Uh+X[QՎ{I>pb1"-={CwRAncD^/m3AC1=]&==$t7^'=3Ƌai>;hSsSzv4D:%@ź1רصaKLCi߻OXYnopԢ >_d0!C}m7,fVۄ2OqDͷsդoqqQBc+ [54FțHm+LҮ][+d4.68שԸ&L3ck7 #WJ=Ē$R6z'8lM~}ueE>V]ok|iV`,ERuwT-1Mk# ^3rc$ihF& wM{V2q "~,}Q}-A´0̇lυ2ǨԢӃuѮ$2:$bmue@kUA:cUƔ1R!1m» {$BVj&/2g028nMӅ\B5? u+#bZʮY">?74Eax켌i#yG# mY%cQȓw,w.&_Vl; g+|ߜlP%/GR"K- e #9i/4 F?`[]dh0槟3/4^5/SDsut̞ѾS0o-g޶\1T+mRjYt;&Ui9]W '+wb;#{|UqPv_h6.~/V IRwu:4P4lתjs^&\?u?F,RnѕPKJ Hħ>ÑGj؀ϣ}_O*F!|=]/b:t9M9hN64c'˅i^qKGcJnhT [Q5fB<b]iIM3A9]쁱Â5c{,m_^s[D.7[*\xcܒPrDC3_U/q_j;v4?Ī7|<l佷X=p7m0e_}2)wb\;ǦZ+-iChg@fi/snNN0cl'2*_ wtGrc M.FRF83T7.Ney,Ay RpJR9l0Grԥ +Vv4I@opw:-e.ҝgρʥX{օX6&ǝCZUFe> stream xuW 8mۖ,YR$=ی}lc613f$Y"RT5R,^ɖ]*D(";?9yZ뼮9C6ml&lIr˙hzܠvO[3_ث۵hι\hCl|8Itq8rJ/1Ekٷя;k2}~'iFsi7X]ײ,>q \vgUBV[s>ع8.}QǓa)qڙ=cui+ͣv#,x4V{2N\#!\^Rj|iteȬ%q[>!5 #N8oL:B)Xby~gZ;kH2UO$RF[}CBϑ',,*\m (Rjy+dv#^> ͆r[3_B>Ynn`pI/[^jZ1鹬!U{ڌxUs Js;NJ7[ٍ^2`u6Zdݶo[I/}ZJsW.i:&aĝŶc&=ZeFb[= R!UkoBSXY ~S*J>CrYl"O3#OP >.v_:jp."sO#K65c`-b(:`|qƤ'pJsK]-ٟb\ty 78t?F)?_Inglzg \?ǝg~1|k\6wj֛3y0o&rC>WmXG`ԌWLȑ0O,鯶gKb2ɧX_qq̒r­i[?t97ԯì4aA&dԛ^yWGՍÏKs`@DȰBPȹ$O6">9~!Yv"%>9?jwRSFIq;1 n\9i<ใYPW渾p +li,Va:0Α_ajuN:oV(\"ifPN52ki&#cR`=WoZEKqRs @kJ<5Pr$3E,1txa9__Wَ)zm.#&]0ST]fjW3zWD5Ba@ #oz6μ8B=rպ,sYNnE]FL:w4x>U jYzn'{쵷ןc59:>qmu.+\XZLjV6&&~3՘p/4vG{eëo!GeXߦs<a0-تe>|"W=?m4TjNG:'і}2f;QA"ԛcٙ$-|HzOM5C Â8(b=HǨ*&|aȰsjmoY`HzR܏Qmdrчٓ'C10N^W7>TwjCϮ9+FlO ݕ-#"F'.$;E'6||o:|.<*.8DFi;7;U7-]t||cXWKq>qCqy/ ?Av=pѲ}nKV|b\\&ޞ拻DڄykiF J ;/`oBFZfsݩ0{yRr4LhTjxFHpo1#Vtt\FY ?gtuH {mʵ#AسCڋGS],*zl'Gcm7hަ/.F^ʢoio9c+ +rLYhIA/5>'#fyL!bxVmJ\ #Q3rIf|iHbPmUĤ\4ىK^ݚq"}؀ikآ:Ma꠸K(JX+O[UԺT r[QNڎrl3eJ*cFkҒxUha= DA oz fNfj-şl%~i=0_rCf;X ΑݬZ5H6¡>$ >{;D*kn5{ȩFCԎ}Q7SK̓9q-3E5'kQGRd)b+ *A,/r琾7?q#3W粻c@9f6e"ȥַl˱@^2S5py**Pۙkhln5/YUlB!kNZR:}B^O/A QΧ:fxY䱦̩b!Xtr҉/C/afફ .i9U;T҆e>􏕩{,nKa) xRO$Sؓ2LpXEIV:dƓ+bu_=bvIYŏ?5\<qZ|꒑P86gFޟ 1))9|17]iб̂Y K+'{(gm؟İh/SR坚q(Lw드d^IdeIb4ec'_'?P`0cONoVjzlM=E-D g)jUqX8=VS g:_i&ɗ >Y-^s5YVg5w) #>П t-C. !rL_?3!2^h+r&`w=!-!R`+.Vendstream endobj 133 0 obj << /Filter /FlateDecode /Length1 1144 /Length2 1556 /Length3 0 /Length 2342 >> stream xuS XL&72xHYR7$4[TVCӜ3333gH\B*dɍ%%Pt۳?3{yfmχpč)sm |57DM ##g(] n$ !9Xǖcm˵\6K.I!!á l6X)Q>!3XJ,\@;,|E F&_M 8&0"` D" M  +Bتo.E`Lۥ(2FH@(#8BB|Eťt]EIlYիW3IBJ0&I2|)TQG$JQg BQmT $+)8 Oj@B_L{R瓨 |]܀Ŕ.ېB@G_&Fo> Bq)HJsJ0HNҥ$$ ,ZU!D"UUBׂQeRe"UH"\B&|dJ*(_!?+ƃhq! R|@MNeTCqQs|"Wvk|?D( '̚MřaTnWOJVDA Qz0\`1qSDFE ʛfUK%uPQ9?4K)W}~^Hen\ `cmXΎg$_FUMKZND{nӊ=ԉIY.K[XLr}?sˆ5Mx58xWQM5', gJI.*{FRw3eΔNzFVңÚ/W}\an#]F͉<|AS}a5g\I܎}lrRpW~1vlK2V|]w{|F:X00(zZdbS3ҫO&{K'3-3$ާNW/ɻ;0yδ~q>QQ~SNwV{lTSNjO&z[^$aENN}wPʲ]T@-jxz/oV. xJ^VNk9ܷn3&KjcYVmvpXdLέRT-Fbaɮ^Zit~@ʈ63(Zż:{fvnW}_3_lF6:=iApHѐ ;JngH_:fo6̜9zئ=[y3ƛUYc#uaAW&VɼyQuaONM ^u~n ;H2栳pXW]y ʴ-OΩ! IO`a8=RfTд=y}%{ӚW#,[>rBNP55GdW4ny}Fw8}*ɭ5?jvɈm ]ƌALpVwuAXögkOӿ\7o\3E͐ĝo'+fm֔-=ܵBqNܞ6ڔ͒m D𝡾ލL{DV GHڕ?CQYbuV9'.4l_=<*hK 3(b8H&M' uNݳHϳ|1AW`)sӵKe| >[eKc*zeݹ3덯$0Ŗ5f uNc/Ω/Z_zn'GFSqtƻ[^DѩVAY.OP{8jڜ`궉 {A:*opmkŷW}F_to_S nz<5aV}']qShY *-uqM+~1\д|Ix -zղkxMy5W^tm>pXy&p5z=a]wVo|xޒʛhM׈ClvWgO c_/%c/:vkt0lؕ+3Ѵ'q<^ʺ{9o9Q9$#r\ج&?9輐vD "c#z'˻-)endstream endobj 134 0 obj << /Filter /FlateDecode /Length1 1188 /Length2 2758 /Length3 0 /Length 3594 >> stream xuT <cI$%[Jc杅13famDWB(J*B/""[vo;yy96W- <-Q!-6pc!>K!46uV  iTk,6 20c14:0}?% l@>`< LEadz߅ &]4*AeEsd" p5$ "@@߼3lgB r D['7 , *2$PaaaD*K p0$@(" !um `&đ ҹjtLfMfDeh L jA8 d: @ Sh,hT&\`$cwLeȆ< &p(:Ikª <d“e0 )u_:_d R !BLp~b_1?<-DX -#m}3iCfx2#,sm1La=K0h/wHd\[<t$(X/IT rٿo!_mXAk v< @iSiYQ= $w0h#1I&cM <!?S\?c~ d6+Q% Dh-CCO$`X' EÙ ݖyd[WWVGgdvhߖ zW=="Sk @HJrv=xgV%i䵶q+hBe4ef-кi -TNxRz(pߪʞ>l=d.|m*4m/eҡ #Qs6*@(S[Uʝ .]ޠ5)=BcAnH!md>edx =dd_|I$)e_! Ej=|ٰ5]l&ebSoX98hW^ҀwxZ3mcFμru׆# 1:Cv1z t34Wjq֘v?ZǧYN=~\7U"VOorC-^E^J#R6A835>V_בټ[Դ8*-Bڹ7ﺩ{2"T[t\~4ZBJjtrzJ~>Z'ҎW";aBm| [rccm36#Rb̿:.}ڴ+=phi B3}y2یngT 䠻OH$>_{\6=Iw)v+kFHA]c|*Ov;$Ѿ0k:XwfS :9UӖ謡 &av޾(15۱zY){6Va iǙCQQ''UB<@Q+^9'&mʞ+.tAuMUbYlxk Y`o^zpj\O|hKTs3wo@A~ce040 -o mO 1L?yym}x1GRY/:HoqwA=z$:FP>&)e'zcA]fuX3fIX 7=Ps,cQgks2bD7!]S~j `ZAQCZy>MUևS7 epUxrQ7+`\>)r!]CmҘAζW{zӚ]/Nm5j$)8:l|N{WKeW͌4ʢ#jdԙCywf6}-^vO(L04?XuYdb!YQv#PYOM27Znҷ7vr ͬ6<")<@mnUo c#|K t{תORͻ(rkK.Y'jhhtoW DقsBJ\c֤MEDƣ/Vl0pT扶' .|ߤ'un|O!DB /ӶaHq5$TDz'̖N̜<>)endstream endobj 135 0 obj << /Filter /FlateDecode /Length1 1614 /Length2 12956 /Length3 0 /Length 13779 >> stream xڭxct$ܖvl۩c۶mbc۶1;IǶ8޹sgݙ_͏ZƳ}ΪU$Jf&@ {Wzf&GN. 4vr3v4f1) GupttP}bPWL+ {痯@[G;':QZV@$JRA :LlLrV@{ 5`\>]GgtrqrX8ۻ~`eojfO߄>#>}`J..VϪJbjiWmO73}|z]]@׿jfV.ƞ?beo/tg-Ol ֜峦gm +{8ƿVE?}_ꯝ$al`o 01*8~PTf@O5/>;$?GYzO  Esb010h"a4Sr5~No߃331O7wMQJ\YLA)}*I?:w0_(""ozfv=+?T_gycWg+g۟5 F]Qu57\46usvT?/:4[[v0 teu,oR+) uψ2z ehyt@p˖' xUHu0Ѡ1\3zQ'At3#uEdH~( hg'c#C=}y~g$)FwM/_+`<&iUCNO4IjN*[Ȧf\l(~0 E_wGyj6+-`6HVAxIS\ OHKKgvՆ@ [ߓ3D1h]4NPQwF}T0TM…~ h ;);@h>Bnz\Uc&@}53.XMy~#pRuT(?8J%MaetNm3A|܂z1z/Vq qAT+;_@NnHGJ;^UuKi]]db`غW.z *G)LsȤ|qЅ_jEҥ- #T6%JLi^k$wW ::l!0˴psvXh2.P8 ŻǐLAf]#B "ֈc۬ ۼ/ۗo o)S!K9Z*Xzp+{ 8Ð'PItA+_&Y7n7l"$ I dZ%+<@u"Nsޱ( (4 /K9)x1/(Fw,%cbtSʳH9w(R ɻG$R#K> @fInBQuoC>~ пźC>'ch `NaMLkIi![%[P?|CNTc[_#]:iV2}Ƚr4źP41 *AS.f]'\oF2jζFޛWjɇ/zˉ:/o:,Nb$O=}|~ݕpLg/G+0w ךsKYz-kwћ{5/5,7[&y;˦C!Z'#Pt0(6Zyg;[W9ńFzjk_0[[,e;,6a bJTZ(2A-r32~/s%a(Dp!W4W bX.YyB}B8<-lDG` J Zo@@hμ˵OcJ̡HV; ZPa"-7Wl{38!p}0e9dv4E'>^ Amyz*y%މ Nͮ¹?_8.3KD$zYNaþg u3C!D wH7yH4S8i/I4cw[4vbzB8Wh3\n*jy:e@.%iԖ ^ O5~E +q$dv甬<3!+85+qTL` -}`/:n?x<$t^vZ[ȐY")jvV%z'C" 쁤3E?ie-=+ $iۑHVDR@j͌x?]w$ T9Y/n"fv¥mW,UΣ ˚[d'neoh!9XLZJgآ*\>#{LfBPCw,>uy wJ,3S GۮfDRtbnT`|Fn\|c$l"~CfH\s%O/!UV@4.+76s<(+7bj8'z'Il)UHY4{Rן'(d[j ^J o]l&PPYEV+?qߝeڤb)!37:Vs9@:1?N2SKGIxH2HE ,4kO1d~@dɳ5jΙ{``8@:Z}66yl:afV`xot4ڙ DJ>7fiae#ZhP}Symq=WWaunGۢ2E~FTpi/ਁٗf R>1,,rʈ 00ܙߞl]0D]Bǐ=7o K>%h4)5W1 FDhc W d*',Q: `੬I|ị(]T '.$%5a4^iUf2PSJ B+]f6/Nb'sddle@2  ~y9ؼ=vjO5ٽ=ò۔Đj4[QH-sFe_vz UuOg'!I_;Oaj;P'cǴ׭e4"PUľ#F2.vRm$N$ӄP*vWsS+׺QZ &FdTM#tSsjbq5[(M ȗ'CȔ$ k!v'#M$ԝ;–DNB"P\w n{E!ڴ9BEMB/9pέg$1L!#B'Q$K™4fE1A.󡺔iIV S.5ԄR]ѥP| K8V gCu,-vXxZ~3 Z/˱bvyiC{|5)RXKBA0zٺ*ZmuonFڮ#{d/0~&;,CN =|p6}c=yTJpK*>bWœ6l*#;!<BܶAb(|F6LJ]N2ݼH0' 38@ɶGTJЗ)$-fߞ6 cg4^YXr9_|6_`N}A0Ҁx!# %q22igЉE9)dgu1L8WPvS QB'lsT}({nO㼭JPkLzSxƤ]~;vQ`ݺ_;nv| )hߟ "G[I=9hф,,% ĺK DunR @3GF\S&䦰DL0<ê,Q3mq _`#> C bk|T5YQ+ O+pNuQ6媷?7&]6Z1H2#kKwF8G}F6vj^  n{z\"+ٮk0,f<Ff}BsRE!1q>?YŁjf\w,!35oCQlr[ iK6* aӷ.D龏][v*y,_Օ=$/`\,/$[)nϓDZu큆B/j63w OÇ6s_z#BN}>(d.rQw// REoG3ŧSH@?4x!ְ@tf+)j=);?>:t@^eC"soo+Dç$V:>rClX "D= V}))"*%(j/9ߞ#\u85\˳fZ+b|j]}5e~r(L5VI̷Ti>MMz"Ƶ͇`.3< A@s-=lDU߂_eAHl*8b> P/\!O-+#fZ=I/dCWGz y˞žK#*4̈M,cCz$!bLU6m]WU %wlۤQEpAKq"=+:bųp GhX 8xNVL%"F} %T%,S5PA%@iěX\BHNA ֠v3PU22Cvvu߰4x9?p[V8f ҧzT@Lofڪ<._50ڭ%׏6೹ ,:/ 9oóm blx:i+em(B1!C EY(۠Lّeja]}ܑ",[e %Tķ ۔6ZQ=F{\z-/*Mw_m)-Ex)id#k(±^bxeRVL-bt'YJ hnv*ļM|+Ml~J0=CqP V[EJ]C*G//oٖfFRArӜU_-@4SXDޫ-N'B5)W N≽w vueM:pސ3wb oJX/[A*dqy܃tlDH>)#AM\ +anYǂ(ͱ,UBB=?$ӽBm>i]r&}Jօ;yp*E55`]{&%P<;uɏ3&CM=9$]l/Pfe~])Km[Kxk_eLo@pD'f-9~tr-!#VECdxlI΄%{o^?0)y>3vg.{x?z{E}΍ |ĤadP3UZM4#>B)Qu^VOt=];n4O@A7,<ׁk._cYB`j^ܗpOBES,k^ze :q*Iz\{ a$:JP±.) )A䣞ջB\ \UIoԘͮŕUKOEd,:n,9KZ&z C"PVG:|2nwSq jO5>lۛvWO+p75:QD»#ʌ3|, b_.XV߿|hc !LL;FF)^`gl۲Zw;e9EOBd{n}knq2ҭ1&!6k?8*6)2HQ5NkAqoZHF<:,ưZYÑ5?~6()Rڃ hBKd+9#Kj;jӪM`{"[Ֆ%ةG8OݯCjSft{2ƠȄTL VKm=1: i`S~sH:N5ja` gsz.Q'BOs.,Wg5IC!| %I5gUǰ ^FݚdsGe.MfUc"qb %ZM_t)f*9x fD&Nawon*4l 'tMP'j߈XO\WR< %82}﷏M,BaO:R4d3MDmEd+z&$=Qe{78dXTBZHmvvX7m+-x0|kbm6ҧ<V}D*3w@ڮQndŦ~0< ͻTʡ3;tLnI3Mq³ګcB"4LN +; |0N* E1>S R nȃ8#ӗ5_ TfTt֮*=wmGE㧩N!lD7$3_R=*)^a^91Xi\9wkAü4 d%]t'})ӟm@  7zӢDA;ʐ4&6գ=qw DKӭ=I٥^nq`cbs eGq0/dA >GĚgu#LiPl[lXG}<^hG?s2IKSA$$+aM<@J^ zk3wpr#l".JNbHCͼf6o\C.y﷍-}L|a1-C#_I@ԴHƈT lv?'C`љU2#Xs "7ъGU y U a{_D ,}RMQ{a;^#.nQ7 ?А.oPuPc˫V6 /`̫$bGL>|r5AV mҽ:yl,[ZY"uGEl؆FN{"1u$k{jC +&uKݭ{7+AA~8ֻ~ +CuU: jH&H ]Uwnv8gk(v;aLlX? H^=0EKNcPh !*^f\LԒ9eY5"vKIS2X(> LFj97Q9⡵(D;|)}M]g0q4TqM xFC}P?Q%*bvww0d-nYy-Zm@Qj.NZP*^:r.C`vF2y!FUfW55YC}'& G~>(*k2iU?b"4Q)AG#)?|垂1+iG jKcuV!ߵDb2mi{dRRG $yڥ$؞_ Izd RlGR3iݖzDt:cȡ_Z1i}P%H?psax1=#z88/[J/UwW Kx,lDŽ }D0V˟phAdq~_>2S)ocf,rf 7DY{}89Ʉq6Բ=jՁN nĪ>N8GwN^e#su()yo:Ð6q/l_:cKxya6]Uݥ Ʒ@˹M7Dz(E .IDP@Gm;~O͞μ%[%"]!tc,S%ٷv)6&`X$S r9]@hl4Eّ#q7MGa}# .%!L|pHmf 6Wh#_ԡAC1jD54m_|,<.`!Lpni}96@U3N|]H8;KHɠ1>܋X&`$<W*h(JFU;)|Cv/ɳP1гE<1uoXJ"3l/Y0PiD([5Laa4C*^BtxV&V"pWkIo%(:4e6aCUvs}:AQ4\kElhG~K&bmLgtdS{> VfETN(LJ"'x$?cZs<33=yxV:UYaM&*x;yA<~fR% }BIb9ypxjaNYNfIbێ}tb {0vZ/*%ګi3kiJPbY@NK:x[2T5"hdU$F^>v"S3{?ll-i?km3O|hkG\R8и]lIɱJ,5x ''HQ6;wAflV :/;WpBd/~V\)⾩L׉au5^3az+gTac3kh}Q\TzqϪ*g v.J ߲i6펲-C=AGO( ޏ̮hUWT̗泽~ha({ּ\}^S46[iQ}a5Swn~<8c3(3apbQþJS;,x$UmOГ*{_8OMr:י3)HT8?ӶN;4#~bhqFfuLW۞Ql*So: )NJIuedq)OJHΨzJձˠ}wN*b ,XziJaYm\/,2QA$ݩ `ʤGaؘy՝TWK˅+{f ,/&$&ɭCUM}#pᨬy.O0)᳑$I)G ?(</i EaFY΢*'J<@q\پ fŒc+O$V))aJJ{/Kn5XYyhi/>"/4W~$[v@/3HqjMTC]I]JW8I.RXyNG\b9(}ʠnâ民y΍؈ER=tX` wdBb?pm|JBE)V(b 0dБ\o !`)68˔,k19FG]K.os\RV_~p4VQU0C,[aȓ G-FtոS?c[zY>WLKM  UU7)m稂]&d883%Vc8pQp=ӾkW7 EնerZڿ.=o atmHuBNsp*C4V} G$zjPfB 4sZRUTQd!%"m(\%SJ l yp T2M沛_m蒠mi[oQ.}OP 1tݦaЕ[W>3r_ ج1[l||Bn(M.kDq oΧoVu?hl endstream endobj 136 0 obj << /Filter /FlateDecode /Length1 1630 /Length2 7141 /Length3 0 /Length 7967 >> stream xڭTeX;%ACRJf!f`fnN鐔.iI~g}_3{{+^KK)@!.>n}C-k''k %u*# , dC!rs!|8LY lg>qpp`!>ANPg@{% 2VP*jA-7'-@ l AlP ~p-! i r q\@0g0 v0k( ur` #}p~ȴpvAj)ao~З@`4(  O\6  wqz@CYÀN 8wwU'To'8%7CN[Cn;0(C^B|فn.A? b3l"PzãE<ߦ`Gg?.>ZI'w~4`c vwQ4%7߱!v3 +=A@-0[ .>^޿az`[G  k<*ʪFu=/C7Yn=ܼ|u2< 7kai ۺ`s ҃@ [YkԌ4D㜁9Ӟ.>ԁzWU΀7_E˭Cߏ>5sS}k܉-(ڏ;x0VE ~ھaiU4!^m; `X߯^1"et!M%k#z`o9%Kߧ'ݛTٱLb֏^Vz_?܅Uuqp^32\*6ЦMl@%,1P!99\?[8 e*+rҋ=>㸌R]c /(K&천5DYMSѰABղ8ů3:Lu-uw-p(" |TM?pϒ_wrJUԙzKHJ;U)!m,w9qog1x|j&e8g]Im>ımǣO[= v_h|aB{s4ʼn{F!$(d؍;912S*F.{w )AJ(,?.;A?Vئ\ŝMz)\$^)fzr2DjJ5)p0wd+.`V ]% Z.EU숪~ 4*lQ*lAbhkkqpXp*Uv 3 @t|a%"`Yec7i5eu~lv͜'F 8qIZɩ= ]j5߯ӴzgTt/@ƜK`zh|%C89PRNa::~JIW'Y"\:BMU eGdz<X*FSp,7HGju@bPV$ ܵI(Yх9lJva\h&.5rY}EOMcT9U>3?4cipgo0[G5L)6W B 8}ގ-osE[S1r ל?7*Id2_zt:04{~U}'qBe-zݫp_'q,zXL͞IYx'?JɶLV-QUu򸦷f\:DX#N?Bj&@$pdX 32kDd!s_7&,qRfŷ\~#)nW"/OM6~RA2pR-8I~Kr0hM rx"m|7yxlۋF~$CmKj}&o__Y7/60ߚ+LPQK' J1b<ֈxjy`%B%#dez|7m@ViF@dI1Z2C mK47kOSYuvoMfeUoWv}֎~1VYR$gh~cb;]Q33uC]=l$JY\:_mp\{8kةVumw\:,7^ؓr񓍜uׁ\3?D!)_johϤ ܻcŐIjiekk=:Q<۞x:Vz>'BBd$%k̪B%_3E#s }[QGCL}djMb`BmjS'7m#flĹ]Dq~ 8Љ?hp7 $϶ׯ=zy^fpZy*'&"? jزz1ڜDJ*+1:wʰ\,aJ^ơ-\8<؈Os_`&7畎,E1tP69}=M'V1z[w@ֻZvMhO"}>@̱ߎ,sWo~D2;vM/ۛbG[Z-;{P^R0z,9b^]fԘsw ޜ3Q67!s-Ȇт|!EɇzT9ʜUg!/Hw1 !Q=(=My)]Zbykc9SAḀYW+ზTUClN}QĿ}N*3k%rCو /*]9|uuU҅l%NHq#dbKxPP^'T3DMb)({U![_R.ep4驈2EkƦSX:@b) :V)LUҺ Z 7eV6븎Ħ;kKeRE.^G]}Q+18@\N2*_hc6?eHmQ(v'W .Ce'&ٸO_@YFkf:S1R2[0wC~ҫްT?b4ݚ1 öj2cwaЩY]B ǼZ$V[+ YyKl-?ns>48$@^h<1u%^5ZG8z`=iɢ%P] K| X5f JvFr{r ǎPbexD/_VĶ`nQkё"s]vãR^d$r>) ~8r`oEޙ MNRe`.=fcq;FWϽ"ZUHU͂.:ϿAek uOXL͏tX>QCyFǚBPww9u*3Ww>,}\U=c䣖SpQAǽK2bi}L"4 (%OIhi3%"|^襛7xb:״o(̓dQ6*rUisvIA-5GXCXsy 6tp [xQ/Eniɰ2cPA~/gfϨ#ZN8 Z'C\_=zQӾX__ތeFj_vyy9^ ḡ}0(D]5ZToܔ`7VJ^ŕv? bW NOS h$9=H%6FGS$YI~~Md{L3>e8F 'Qs+`0%0ӫhzoF2 Ui2><3+ѤC.ZaI0a3Y%oJw*O.ִwnkVtҸɻ1PX^|yy)9kYJ?\g$ ǒJpdwJM#ƳHMYкMNܿ=#nF)}bfwv#c:+sn8z2g421a dzhFxR *n>Q.#Bؐ'ᶯNݙ wl;|J]4|]{ElM3ctX=NaŹ%WUzsG{/Lv}㆗QkqI ^jE]&P蒲{-:QH⇰+rqҫ~s2e;Ns x>2,{dޮ1A{UOgs=Lt(Yzdx(ftT-V]] ե2ȏ8ˊokM:"/u]7(8 q)`>Y1{ בgz+G3OߝCݒrߑHNE.jy8禱qm}f#ɼנ@/EWkKHʞ`tQIJ~t<3e _YL k-'1Go"~i}rӰ +𙿬c&u[4T<َ)dWIRRf3d3j0q]b4c>H۱o t'ҀϹ ٹ+DXAF]~Fm$YF)p͊jm='#E v|~()>2 K9ao\Enw)luY&'W Lj?$4_ҁx\f$AEHF7:LyZӄIL4vb) ,V1*@3JĻ**~)ST#+|s2qIZZ{I'Mv.U!ys:E`D,TIF?t n-Ƹcxm](뫲ӆƽqt# 'dYK9-!=e¨ o **]{FҺ6Rk99=SWÔv-GwchiI4{|Sẇr(X_gv0 ߔ>!9vJr*Ӈ3:)~)T*m_,J:g(ەV B$h27$KN1>Y@%ć[cYeO(x%*4p~0cD0]x,k! /rpCp0o9oZz,ȽjK{-B#uxZZ+VEQקq\gW[NZUgPd&i4,6 E iХmІiE=wvܶ@ES/f\"dERw幝VE6 zȨy ɜF6yEݾؙ%;GmKC5d/;r.+#(]iSF˺;*xcY `xvA9J׈H̳NQws/ƴI/d;~`d{!;=Hqy*a"#k'qMnM)ZL:0&LYZ쓧D'*G)k-B2i@+-1!gpr ްR#f9Aa>L0%ϹT`Sc̩31V8 IWusO∱kNsotjCMVz^U$ձP3H1-sJ;މgcܒ#\zz(d=T'x6ӗ<=2P!I1'r#z ~x/"c^5+PtJSwDҲXQ4.['wK8A{1SYdwbئ 瑚Wi_߰}"~s!QGd-@Dn@\eH)b %_nps:&Hj2/&4f;+4ZO`%Z <æ _"•dĔH]_MO5)A*PRaD2sֹgu"e= .ҞS>H "&'@u%Q^}9J }LROQ_Խ.(%7x/xb*{xQwdfxzC.dd3(*I.;}h_~Gk4ߍO3OؚB>Ft3x3,ꊿ;j\GV^حil?!~!ÈeElr㻠m̈zGBڗw+ROِFr-K{n&<^fJLCd6ώ^kl__E$+8w*YWPe uBc8GtnC+ L*wL nendstream endobj 137 0 obj << /Filter /FlateDecode /Length1 1616 /Length2 22407 /Length3 0 /Length 23240 >> stream xڬctek&W̊cbc'ٱmŠm'۶msn߾=nc3Ƣ QTa658102)k(ZYlem N [QC'7 4Xxxx)m@fN4tt%` 35/ p2LAV@wE-)y @ht0(:Y c#`jOiG;1h`t9:}f6N{d [9_ 9 hhs(*y::W 5kibkOI:lN@7b& G;+C9Mdc_f&V@Gǿ0 _7r_he7f xFErgԹm?3C7 C[+w IoHˌH@C;[YZusoֆ +GG)'ÿ1K3#A 7"`jhOlLV _>m%TAƖ64?T@_͜I]I]Q~k{'UwRlM[7' ''ʱxټb lft?Ӣdhcw_^EQ݀+ƼA)Nكڽ,vuy~U])a<o'}/ڽK wcYQu%/s hz6(۸R4~z^nAdV?WR+z!lcsz#sJ`c\َZ^zFdhpg.+'$Ņ#(]%2lr;~ wSE{t;@Mtߞ3u&"8VBb]UnrK=z\jIbkyOn{_p4 e#"<#R3KKx!>gG^ =qJD'~Rوo04ux2 ]$&WOL-մW d# E{.> Z[NZkd/Y]iȯd0d6.Mr>uWa ?NMM!_/VYa䉠ء9=GHCa nc3_cӗijRza[5W19? ~rܬ6x7  GvO [v[H"~]٥OW)9hEI)Z])q5a)<9EyEm[ t句$'=Z&ɶ2OE)S|@#m ܜmnƵ'-2K'u1J*t3VS^T6R^6LBjhG( gւT̀r} wk2V CY SIVX8m:o\TV<;1U^6[H,m=UҞh*|4F_^O쬛Cmov?ER+e}x瓗nv˽!pC(::ל/|xSvu &x,mB*06. 8 D#.q2upnZp{|~wGgLDQdz0ֶY*3T-<ٟ2G"->D9oбe46uFhDIzUѢW):*g M_:l{'u#b~R{(?4tJ|#{#II&]SC}0Gn A\CRàktTk))6icl*7.4;'_I*-1VDjC(K4ds6pGFQ̩[T)(SK rݷV}Lo`N |_%$4f,'1ݼfWkfNEO{ѝ*FV՘5#+(uv;Y&* pX _P=fa#^\W* _:y: &yo_+;h FtxӧH!wi|5@?ckI]8Fhə)=#d9Kenĸtj!Ė%|*E0ӵO-Ɛ?Y}+e0eeXՎS:*|q:O%~ %w~2oR)AIc;I42P3Z^0,1,pF0rI^G(O{e.iEԎ";z0;gpF qRkb?oIrSQA.:VJu2#_)%h;aG#+2FVi줋p~3B^uA/c034L>=rEةV}7W/kPt'QbH⪋= +viNH j62^ .A Q)\V&ß6qǍ]Bh9V31O+%lR3_ql% "WcoOJfZ\u?ﺞfP0bH /gﴎ4ke0*8ֈXx{ԐuHZ7q>s紧>>s{(|J7 آ&f݈ڟOEρ_볽neVGB[XeZ`Y62!5eXCh0gyVRDG=f{?+Abԏ$)7e-dt -X={s@s2x-wYv΁l;m7& SQׯq`__?n&00[e[h; (g )$fF x8,6|n1qO {իyo5fQ 5y24ē uloXyKEΪ ]>c ֧NHNRUV2;sEGVj-]ҽ(=L SC2`gi:oN;{wN,:;fO=@ % Y]l`d4>&Xg-_-< NEw* e}z(23q*f/yU2;-HasWa=v~ ჯ-;˶n\H;V36fHN^\lźJNd}#0F֤=SZcG4ƀ.T⾅5UFL5; *#9ҽ@tr:]6.!pEBurƧ'$*%ޏ= zd\RjYwCRΠEs4ؿX6Kn `Ek|4V%6_~'?Ui2|<@_r} %$ddD5t{!bIȑ6S/yK٧dyX)٦p"ߟ0;pv7LE⹺;3W{Cv9>J_#Č+-W~~]ȯF/3j2R5:a H{[kd.Xݽjnj7컝&8+~kq?_v ghM%!^DrٽHw? +شI$C_{Ygگ *VW"#cElPǏlNz?ZoGCd\-Fr j:!6?ɲb9lfF{/r2_ Wg6&[ ]rL*9 cĒsXU``s0g{Qk]|ZѬA,U0dXV.dcIN+pfx6f(1r, \dž>gF`Q] !OaAP>Y7;zG&bCߗ DK>8Mfc\ӥ?/=tv, 8 p|w d1Ii^PCx|f&?f_~!"4LǠ5|/[.{QIa~3h\N%Wi3@ۢ/tALH|j'$20!qE3hOKH'CpsS%#L7b&,_0nޗyO^_9y'a5ܝteܽ}y~6~K*-3"gM(Z6^JqdK50>4~!AsYp{}a`kÊlҍoFMh K!']<ѐЍC &;,tŒq mdv`(%#9|WegH<"U%_H.MNݧbD3rTwGdl65uòzQ$K2ߋNkۤ` #;G@#C-> Ӥ#LKaB.Mo5))*ɛ/7x56JR;u31.6j&Ƭ࿜LV*;:xN ykS=(!y&Hq˕ J[7qW0㨂‰0w]y}܂rr&=7AU3_Y9Փջ\>&8-A\?] 3N]\g8[gIw͸q%ɋg<`i:SY%[N$A@+F6}|ps>n$)ӟBcM-cy{ 4ヶ66aฯElĉ lu]s%)-]󩠁~b#IӾCZ] \|֘lUL(8!Ł^`q ɛ1At~ȐG8?*r-XxxO쨓^X\pR*&LJ´bs =H]C@q U 3${zкs{nl휇X M/~)0 `p[k_-Ǭ݉58DZ۸oNbcE8WdJ/_tҳ M%WXATo_n0+Xp ^2+#ɛs n%[A>.VڟvSP',7hH $?ZOS `OӲh)aWqi4/ M==SQYv^K!JT+dl|% 8Hd3dusOR9J‰Ali8J%]V@?PۆOYrZysO)`k7MAya6*ZyY?]#'0J;˥4#Z iWמrWTn1N XFI%Z̈́-?oYfw$`]5?e}4.~6Sg5DAk*R\ (Nk!g"vZϦ.$ˆrx鍴㗃TXr+mot iiތ*~>P9Ooڝ֓<ΊE/)US,jSyp/Et^"/jb YN~z6YkZ8?Vܲ\\H[CY5o,=Kr{E%i0DK~\zc(3ԯ Yxz8!WMGMjTaA⊴hn" ^ҡ {?s,@[fR!|4\Ձ%r C7t8.Opgl=wFt m Dhn$Mu7IÆ5Uh2n+OBi~A4Mu:BMTO푯JN1/gK*{=gdMw0E캬j9m$.|yf]zyUY}%CUbD.~gu+Wi9 f>|X.H4?1DX١O%宱wg3[z^T @,36-٢Wz/n#|#vpTgj?BweJ Ϲk.~ a,>ى<fs ~ *xb9Kx/=( O2Yr/Yir#" [tpbCoqV̪,S38ҽ[*L PYj&ҩgC8tk=+<?8c0 lr2 N-sfwњKtN (֔4RsS=T[ч`Z9@o H sPZZg4l!uQ+M8bi#N/30ЦwIHb d˜v@@ŊdgWmCLE@ؖ(u*B8=AӚQBE[V)Z!XmZnlB}ݳ`qw|_hD AFeZ&e+A@}MxV0ՊrjF5%{%T>)7j F"o"Y5Zݿx 2JVψ#g*vġເZ千cfoq6糅繜GNt(j6jn~!i~g~_]B*1Of wAYسgZ\Cp>B@JzD"qeR붴4*tܳVlqVϙjeO&kljCI"7AS﵂ҽkS5'?]=6?Kb9\CODu}oY79]N%hi7 ?7ҧH~D"x?>]G3xXr9:/dp^}~=#)%c;N&(9yc((xw}747'lI.!Kr}dp" Y=WilfӳL)`#X \`(Ru-T<i@Mʔ2*dsM &EY59j"nWWn}Cu?F9_7'xC3;V6TG2yE+ Whx! &'V8\~<յh2 7fթVJŋmZͪ*$GUϐz sʌВ}o!ݢ18V_Z}A[lUf w aQ_9F$$ɹR@K)_]@Ab-4ݽ˷(byB1Z=mő8!MU+}[kB0KB& m;] ӫݬ'OB<\'6R ұ H=2P<ݜg \N§OΰÚ D?o_>0,j8H%'ni2^[.Ni!\cm ?w!p<6.Hn-8MBnL@z.b\GUHc no. |Ũ>| @Qh$LX;`4*<*n@tүSXY)aoЧ޷!H^ߌ^[*~ 7欩$ƌc;QJF (MPpFI+=#Ø?7 >Bﺏᬭl`ʓ+?fZ2ŋZ!ZI7xnw6(P=Z=hqK֯e W0>*~ELYHcʙ Nbn-g&/mJ"TE״3;';.q@iGEyABꩠ+>gJG@^?$՝Jq(1Auwv7:D.\a~RcB:IQ?V諀]瓏I.cAnbGB;qTܺ](D03楇LIDS Ō)iA gj9*-i;Că/xȅՄ$ͭ~lAD||6zf| uźg;M-&X /\5M3#&SsX$U+7<֞P>!Tk Em#}+"ӑMs| nMqAkEj߰"TMEwWS˄?4'3A%3E(D$32M f Pu9TQ$:ڸ1z*{ux"vd!"[0+DaqGcKbI9KؔbO2~ưk՛d\Pg@G?7?{,[ ɯj4Ii]g{2 p7vѵr@Cp/H8G0z2|FFP:XiEAۃSkxspim+;ȳ-Vjt(F5 ljL0maКwgp~:#j.@V]IC,辟`99t;Dߝs5 E6@M鲘caߌCMi5ht0g7G)hAw^?oIxgHˡlUt(Ni<=#H!뱨 22 ('/ir)t9,j#~ ?{V%-Al23W6R?w0z Vg.҉<:V],G >4Iϓh,U4{31K ?AjA=<2R!arڥ 3 8K:AT;Dopc!D\t|؂V}&~xp9$zͨ>dw&8Q3y]9zsk:%j*ΟinUӊ;N2Ssޔimf_hm_x+]3 WI5s̲ih,TJΫ [H9m*pyVJr1R)5a]^yIV5!'>Y!!r 0dG?mp)Lc79%CddS#໺(x{}" Adٯب E9vs %3y3kxdur( M+%BԸ:IC\^x d̽x&j1$*pa4U_^FȯWٿ[ď1-ߦ.%)f뾂$yڔ~ *t'Lz"7;\5Awr8CTJշHo3֋!+~D JIXbioGB3!HX'm0<-"SeE-=ޓC d}R {j Q"w+ک+Jtt.ODi4?-QȇQH+ E4i 3\`aEʕGa>elxsh4v`^o4`>~n9 +y<]'@r5ҫ;5I=\^Rp+djfdq԰]1D<,l<,ߞoj4@{!$|o~in$9MCzO~8"4o^tme@2%kƌ ) 4 Η˥mBV LlA[WvP2'9 nrO ~SBo=:.DNj~ GIbT0Us 0ҐXEA*c[SkQC$r^8l _lvRIY-n`V#XTwYT4sVejԍ܂*|ʜF0?c}`/ jt0_Qm2 JrHGh-ٌRTV=GGb)>$zK!R=DsWpSJ*E:8t>T@ joC]4J@EKr$nIʐڳTy*GءM|iVF/+[i6=* b~n/ݖTGꒊ*w8x!E㒍zdyGC%[ Ii5(y^xodI%uUOh,h_F ;rYa[[z?P]7\kbm&ˣ:p-g"-rL +74-_Vom]R _GQ1b~@y :oce.** ]g% -.\t$Mbq N70q4> e0i1S}MGF,Uh:%ށ-tH2GcsE}8fXXneݘpC՜nhy<|oa[O[YeVu.@McP@!͵XFpE~N(lKu돧xַD`f!ι`Rer1]wHє*K T;FD@WClçCꎰ:<n.np4$z"c>޾y GtiDoVw c{ zd0 &]y1"AO 씑ϛCHWN#,MK!fMfv1o!y= MhD6 [~xW "@_9\gj#`75.2~q }~APaP:n upЁ'6$(JdeHr4L':4XM}G,5^)!n4ll[,3"da;qƭonuyd6.cSMGVV"@9IUqZk<二%ͦ)ҭƣq6d:R[P]4Z+f>͝؂@o<Ֆp00BNr˖!)]g`GA+~75A6H0HVolZ+n5UƉq l}ǽ,- hKn?'N4OSQSFO(I ]2 {J:ru'h$-%'tQL1| vu>*s +ipaFlπqqP(?xK7[(ݞ1d,{qDzgl|bjL霤 h{ \{oܳ=yͦ̀^utB*ɞW5 iT[nY$msa۪;'$~NWV#UvHgjQw0ؑ#fn!Tс{(x,Ozh3s9U̳.7Jut_xms?jExrr8jҮWinܲ!3m+jylrϥ2@KpJ4~kwЬ*sdrOpoIjv0FEx}RهF.q;Au"z8ZIšT{J%0`Z} eӑaX~| aٗadN6H_=I kx2 q,KrԟG]~ XZޡ|q| jܘ:ɌH}UqE= H(9cU@:>gA]I9Z?Su’n$Į/KK8 j,-SeTedU00X IJNX=p%{Uc"$Xs  g? j I&SF4wimUKE2.D}8ԫ#LWۣR=D~j5X%$|f쭥VyW6jT9K5 jTp{Q0*q3rCJ1ȵ}c|ҋvOI hoEh=q!M ?dU> R6Z[dH0iMgVvvR|%KC?Hown7ӱBN1_ #qON٫FAARqw=6 N]3fugF])H9r~bB dxXz {jh݀_mxqunf q7&[~4~$2J}iDwQʹA\R%c655o l1wJ4;q2ܸ8$g )FwDN S;}  ٪$翿h3MK8 g,sx @=?!u=_22,E*;uN.ќ#GQҿ '1A("Q>j!1cQ CQ pMM,`sYCUҾ@z7CijqN E`~3(+K|Q&HmH{O}[^,3RhA(1 Z| ̬lf;P̒CXd-t1kil?_0k'H nYb&gFx>hg}jI$}[cnGfU0Mf)iɊ3$:Pf4rʦn.?(1?YGprb} ڴj 7PD! vl7@Q0@κׯ%_ PW`NR[?~cMuLSZ!C $|۽*Pѝ?BUC<1KPUkaBџ>ᅬr M8T88 .R(ɉuW32:.41Mh%B¯Hk ϛd%P9g T0T_;*\>  m__'mlx] iI_).SLBk ^O1 l~39V* * g"@կep5Q POf =dḧ?*K諳ҼWޙaX4'Jyt KAk0gkfFgU"]<3 e˞m(~IW=y'NnXkĥ?gW sCNxM'cTfyv PlU'4D+}!|a72I!BzPNMP˓ W}wvHŠM#ȫ fUIC$PY$1Ӡ-cE&h "xZ.%!XQn)߅s/f vV.ऺЯ?Zq:X;&11J$MD>.Wt"⇀c Dw.wzS]}CmX_1䳱ώty\7\P֯/nJcb W'TǩM`[wH\_HŋpC"0c+4حҢ  ]mJ'AmJUDK;1D#kiRl#_vёi^~ q$=d=B|2B 5 rj_ BpkMԅѾ`,qh5V+4lfBzͧ`%!O暑w_s/=ylyyheh\,#hyR96coG$QzrtcX u[LKtXt_ִ4x[%nfy/2nwus_>ߤu5=*jn=vlVO M"WCCkFyy<0! ͻ>Ygfo USItYX c*jqE?kd \b$yRNs>"CP.'f&"TshL}⛪C',LנQlҭP_Te(fqG*F)\xKZ|G,N[\MLj@ȏ츣 s&ɕ!LMeq}]1g}ExM+0i˯\/M:qmHJb]Jѧ11kҵ ZSR &DF=O-+aa\ 4\E}8lҸD ̖ ţEW7؄zaKf0UUژJ!؜jIn|w: fhFb1\٘6:WuĈ*lE|Z8wy-{7u%rW0Pz IDŽ~3q2 :y,ߘW۳\ h@"cQqt%?cpdI,Ms3M=[do PY ˒ 泭ۑɱ?LxdK $3h‘)qX՛R ~c>[5Vһe صZ09Ex@L8ļ5RJjf#[!絩PFVw+Wލݟ~B67 "s"!&&1^ rcW giEzp_o7'33 R"suPEQq^5`!طFDre?L#b Z@&w(dGL#/Q"7tΚfy~sntwʟ]# .P_%sL/l▤E3ljѦȪduDcl!TG"؊(he&Pu9~B hfxv~!oXXXc0~k]`Bu,nq˜qM²/m EfiUa `-Y\ZPw;v/#>bZA0# -[ݲQS{ikJ;%u|7iL|Q3F-}K::OE}H 2 OkuOx$Gr:w#9v/c=~4y("{Wpת1}/رrգݫ+t9? }##Mf]/8*r83FuS錈 1Hbi”XvxCo(h<+[NSk)ӨxLg>%ke^%5 b1`ZKHwBg?=r;-$Z*aĆ$mdn㊆s YqT",,[# sG5gߌv]N y㷦 ~Bxw.CA|1 s*%So  hM캔WJC2ԆRA\'$0w4أʘs'%K~ʞU5ϼ51f"8~T|7EvXCH|vW~T;ɖv!֮3M|~o({DD5kPؼh_{o;-͖p`!IpcvyYIsH@}UI_ik5*endstream endobj 138 0 obj << /Filter /FlateDecode /Length1 1620 /Length2 14328 /Length3 0 /Length 15163 >> stream xڭzcdۖuڶm۶J+ҶmTڶmvVf}_D\k.̽ω3F+ J:330h*Z[襝 aEM vbΦMƿ7tv21011_F#nklg׎Qu65lGۿ{ì-[e;aL1ؗ6pW׆04M|{,aXS^R oQtr2§kF{]/`8ܝTV+ydu~'u-G#{G5NmCBjB+<;H#Nr0p|h4ut}N= U&N4(|#QvCSCXf]P?MwN좚%zx0KϢ?.H qր_~x `ߠY#׫+Fu:e,P+3b7]LL2s] 'MHsZ]Fs*n܌Ϲks"~2ʂiA0Z],_]Ǯ;#m%0wDIJ<-xm7%*kmB+Ŀ[vtĎ74 s!bQ4z(Pi~/sܻUTY8p;z=HR_Xiu,fv1$6 aɿs-;舔)ԉʎGb=D)>:pb;!|{sJ#1S۝_IF)VM]^~r(H7f`rπeQ㼛 G뻟pcK$)jqv2StJBax6[EAV@M owbUV׈1{B4:h'E 3b`N17rbC}&%!QHN+!n^.=泒# [!"}\I 1~N8lutu\zٗ@ɝ{h'KgrC NZo$OzUgrÓ^)fUzyX0br3w+s=Q:R]~ fr]ez,`6nj u sY!:N*dCI&mG>3xYF,$4H:],3yý0ȧTgE@ v/WuSD3(\ Ù*pWG]ԋUr145'l>PW1/[_ 0mUA7=c)ļEt>Kיj0-P ӭyH>Ci;PN$yxZ;jZvcw(+&z#}O?X) @o=RD٨1[)HLX)3oVj#+ƶW]LZ[%ئ&kT腁6(gx2#8Z~l1 Gq*+L Akq)s.{rȨsd8/1Krqcqu І1pxLp1GaF/-BڦƙA :4Vz;%֛Ma5TYe&3o2Ro/D͚l[G$0Ywhx-QyР -7 1\z9V>` XNr2p4SϞu=zUcJw!- FrKsU4e!A@˲e~ۻ/lw -;m͆Я0/iDөr*T:q/ZE:ok .}VPXb4"qH[ۃcDG70c/0{F;җ}ݹlI&@@f%B.ʴK&^hV5~y&W4-;m遚ܙPU+SuvQx5W Iy8H_hKP!}L{H|fȭ) U"1 SPN\)kT_Z3 tKBNַܾ!inPtzuT`gl2mM =(w * ZBqǾ]XQD%Q3RɛBC65LZ2=iC+ͬ9"unc7(CyG.7v]ܷ>aC ib6H0ɢ73_<.,E<ɥːno7IȒm*5RvPմmø F-IS-@VTP23*) Pzzw|9֮c;+a45R1>E=, z N%A`]O=҄j| SK]L|%#n3~g37}B{ξoOkl*x ?P\THS>9bjtQ']퟇F#yt-t|] L\a6 `JF t} W}$+ %5/}H%)̡=m5 '0# :qY-mF?h6KL}6mqzyAgW* 0=qt&zAFvR%ߕjU^qFi=TԎUTl[ A-6|:JBϬI#w7{i v-7_75z:6|HU~YM U-$u~50l 9gZDwDc/ ]Lcel?>GT2bE]?B'W0=J4MkA2eFmi)i)wYzJd6;􅌍VL~ko.O%y3|!BTg[~qHN)j2a v HV'?)-z$OTSQCHYZO8ˀTc B|!w`-J*d|4PMHJ1TvZ3`:ERV;DZu(hHcGRO :!L)C'F׺f7P*(Y bd2ɂ<%;jt8 84R6Pӿ4`A5F"@B#@YldM2"~>U*"\a~Sj 8(TՄӨ[6_*l*[아&ϜѡWq//].|D܀F+^7K{yek4ϐN껌\bQfp dmgo->nmd) 4$g,/Hlwq v0>RJ 9Hf+7/sڢf|?}~׋<\p!d:AerfFs,ǡWWdBI ܼYNHBP˿DQ_HWV%BJX wK`3LmМKw|QfB6uZG|Eߙll߈$AΘ#";+KG}v1a 2@˻ oO|tf} a&1~=+)g`/@AKw e> iG6Y*{)FP SoUb0BkqW`\ĺv8mtI4<ԣ5o.?9,>o%ph?'?((lL<1޵˺H}\&e%y'RBX޳}D[x$snD c;u `K&`;іo4:įqSPa`.E{ɤtiFVJČ.?ڲ]7)1$,v Et`CǛLϑ8T- C'՜" ѴR[i-KH6k9OPү\ L67iK]>x)wuʖeLyuP v}n1ΓJCQ_,pGr}5ExG:UӺ[ҩgi#c:lNܜf~ReVj?]+ -ER]dp^jaݮol}ECjo 'Tbm O%IEI/7e^"'v>j+.}%,M>8+JC\Qm#=Vp$cL "02ư}WNj=m){)Vxp|Fܱ^-ߗ| r8^^cI7 s-MU\.բ<VZv@H#]]1ǧ;WtZ/,17Cn#)ʱJ* Ɗ˾z ҇NѼڋ,,D"B{XE[!XqmQbf!a~% [GUWΘSjMoN=]mz P&d>E)}ܝ8SFz*-/JLjZ$hB1+32!IDBZiqz=1 Ds7DT\~0™OحOi`>h~= FY9o@~"O nD6|-҇.D+3Ó0ޣ8fculƴSv8.u.B6i,USeK!eԂG”U@-4r,ԘHt5GkLQuy&h_Q{ohY]>I3=萶d`x<:YA)'mO4cincO'#/zY3EzE$Y/Ÿk%{ T#-扂L>$9 ;=ƽ|1$F؜ X.=(}Q.gzK4+'# _Ѡ֏etܘ1YvJ^ieъB:=ur`"(?S|zWUoy :CZ^n RNM>(M7vRh?UMY%Wh1Be}ZՇ8V&eH#r<0 ߡgVq'*yfbt ,4PX i`;V@\r=zdNi6r2pl #[Wdq,-3Ke8 ~0 Cɵ!E*O6Wt;;E]Vjz^0 g1?j;>De=nvvO.5PBmAxp縢:ݜ `br|-Zh@AJf![@-y-w\P hzKi؊: [HjG1.6TKFOVf߳ 5 ShMLW%P]9 viVpѰ 7.Ѽ-ke' e h #@M;=GI]lҴ}8H0`n I=yfMEV-EI"n*}b܏%4s#/;:s=F_\Q|d@ E>;I}G?=.UV cuA |!&(TetT!px˿)`Qv~bk lxPSwSV~"*#`j h"hRN[ϝ>b".j;wkt(}lcU\|"/3]@q~ 0,1@? j9L8ˑi6.!Β&@_0Cl*XN& CUa`0ueaD;Xm>?3wEJi݄ ]ŰչsI`m2y5D<⯕s95 gD_*w@Ws\$`dۂ-N0پsT)4:?akű^ :Bp ==7.c,c(J=봬t$jPn9>c"°(Ҋ+@S O2~KUmQ.F~ t`%nٽk)_f#qڙ+[SHc~`9 0VU D-&'ф͜!#Nk6|jrP&NAѨO҃XLSRiՎ-ԌC^6/f{erpM^WRQ0oG^rDTQ`jϱ[m""r鋴@dܟ ^iVw؂ @ 봲HV%'PW=¶1F}>.2w^X [~V;~3n f̟3 Zc̦2c8 k$̃H]j Wᛱ)V|~_۳)vRxfR.,UDYv.MPvPBC( d#_2,-jY[yg`4 f]F]P(̓ԉ%v|4V􊄔ZR X2%RW?*?|O0cjV5{2PiǓAO5a^Ѻ1!zCՂp3i[R}:Z\1uŦHVHvؼ XL0.AopH\@Li9$XM!7p .r<7gl ٰ3@1w| = ~_\1 JBOq"J3;%Rr1unm=o-1 }v<:L&E}.̅>?$  Pٴ:~*q\IY囑v;~-4.뺵N) d}tūҪ["#U@'a$.?F\-x1SG1WZmu4ؓ &1 εNf\DjdQ"J)Lr,0c旿?-թ//L&OݬS<분d?#⎱\aq#uQaʡlU{#A%k̂),ʩyWv;=f#褭#Z;P@wjP BuDѐT79ǐ}qk+8c$_g羣905.x6+iDiG#5JF H iCshYXx3&6x4t0RM Ebeqːz&xWk+%5ۑӄ8"w)^[FY=H߰&fKP!:0^D8"R,0tZfͣgM㹖VR;N4I^g*Y_ʟ߇z)EHp?,$n'n&Ty\dNUT/]oR-2\FMzs3/F[b??E(?xQeSBR ͦ0|YcϿԌ~h!:1ЋئZ]a  l_.:m s/j\uXaE4tk&Ckf9G+Ese dq3G孶,`&Ky3uPQ+ I0(dh 79SD6 r<[Fc:Q".rN7nWj(_¹gXubq7̝>1ѡWd84/\ݻ/=oяǔaFʔ_ 9Ou8j\m`@i1aU$e ZԜo" ϣ|,C@v@d:7O#8W9Դ@SAFrТbb"f\/j/@j4SnV?6/JoI'IHY*~~l$~+h=\]UPܪ7}9 I#1c};jvG0(A8"C^BwŶˈ?ynC'ڙ+<Ĝ'[ZBmt3fx )MGJ`=0jE3Xv}p# r|*ߒ\{Ka37\6)hS7x\5OA;CCz3"$9T[z4s O nXwX9nsy3'sϪظ'Hnhe:l@=V:9R@1ߠOÎ%N@| }_j(f. ?q{Wަ!%6}K39+Fp~˼P"ѻPKHN'dS@u8Mh(_i5 *kR `sGoV2awQvr2G̴ܱ#f P$?[(%zX8{sEFtf iJU]AMm xL6($.:Z0U[$&@N%k"GV1H5 )qdve+Bɻs.$/W)J7ӂЁ=R¬v+9_"@]hB]cy _0vƴ&sۼbI޺5Jɘ`3dShϟՆh&"ٲQ]?`'͵_K<)ԯCcŮk$(΂Dk"y)x OCY4*poMR `!lX I-fUP6:R/5PH2?YYḤxQc0츠7uDNq ̞1¥ԘaR껱PPYlԆPnBDL'Bv 5Kh !&P(?! ",lI0|s~*]9_Y+Hn]߳Y'/n\PlhŦӣ}E,KV 6=:v!%=gT̙UTޗqvnL3"/pZԔ5} ԥWv88r%#{ih˺f5FQ䧊zBH('qrm%KINq:ׯ,&)٫i=m캃nЙ.7>|׳OVh(R&H$+6LgkHھgzjʝ4C;sh@l2Sb+G#eX3dj(%WG{׬F\ӵm7}|S9~NLw~7Nb,+TXNU޽rkpH-׽a d: FI:rWY7FB5{Ee׆QIsx7ziv9 1ύSLX "{ 90&(yaӤj԰K/}=x6xU]TZpӽm&It%_f+x8>JV01PBJUWHat@˪zo6KNZ퍶1!+ ְN1K;5 j@ܽjWתwZ=.nԠ'SrZAXǃlEiX #{ z2D$+uܾ+4CN/SuGÞ>M@Z52SM厽aOi-uZxV7m۟ڵƅ\3O0*4?}|Ew"o(suŀwi.1'Xށo"@ 8 U0y|fU(Ӷ~_}o/a<˹ӓtu3pDnEPSI5\A?;DIjQX8exg|nT-5sL#`ދf_ #?}))΢J;4_d΂ +)m/w:KU\W /ἃ9nqY+KEΟ*^cTawJBu\W~pvqw$4񅐅 r-glge2 ͐?JهN9Yy-f\XT0ށ:-! Cayʁ|2?@PcIɌendstream endobj 139 0 obj << /Length 49 /Filter /FlateDecode >> stream xKK/JIKNrLJ*N-I-+I@+2T020R0"Iendstream endobj 140 0 obj << /Filter /FlateDecode /Length 429 >> stream xmRMo0WHbLI !%$r臚%8k<)`vwf]{vqV-! -:2mZ167 r/ô^ޒZԣu(:?wPuuSA!rn?9$_`z=0lbe68CO‹N*%Z#Rh2P|84{]$Mii`= ߍ8u5 HQ mhg+]yWwS]F@j+ʇ0h9OaHʱ cCG9g +?Ljs'sPD>8y' b#='"~Ԯ'gqDY_|}b-G-M ûZSp'춋{Uk;Tݞb#endstream endobj 141 0 obj << /Filter /FlateDecode /Length 900 >> stream xmUMo:W5?$R. d9M eCkmCp;;w~>|3E_?O]5߶w]Occ]=~?}Oyh9%?۹׬B|Ɯ>);vw%g43>\ 6 EJ78 1{~`W(-;]%=xe_,b+-O;q\L}UI--=BKE1p[! Mߊyu>.N5K)Wb٬8i[_uʕMzQ)V(Txޢjy!Z2P="Zd0\ÃGR\).2*Шa!U,H`+j.5Nα@VK-x%3%AYӀzΚ>kP#5m0Woþj.ZT$X/)n)#Wo(oRZ $Kp4Z-b\1ܰJ P"GXQi/8k^Zq:Zs9dB )sL-7xJ`aɽ)f$1 dъcCZC<73JgznHȰYɚTa,_-O87}KԴܗLloK+gJ.GZyVc48Wt]:P~`rZq.n1] S/Pu7Ue:?&?!d&1yHn5)yғBx#1ޞ]Go׏M?Xendstream endobj 142 0 obj << /Filter /FlateDecode /Length 664 >> stream xmTMo0WxNB+8l[+ML7RI";onDo3ތ?n~<&yݽIr/ŋ=wWIG77eW]Nm=ij몝m-m3Q/oMq'}vIֿ/ ˺sӵBK)ɱn;A9n1vAxHŢn!XN4$>΃=mc-bB}hjM^Uwww BF˥푊QM]1ʫڞCeݡ}BʥXl6ȶ5R^clFrJՒk ;%9& }8K|y091x&GϹPT#Z%)&!lRvDr䨑\#G|bǚHUʸ4'22| ^Dm=^sS<cLUي_3;S}Ш2?}LN=8g,u..Q/)87l _??q Zqб<4 4谡Цg~ѧ,I 4sY^y?4hv5O#ܵy7S4 &*s0P.9S0׬p~ne8|p\ouqn6|kq_^~& amendstream endobj 143 0 obj << /Filter /FlateDecode /Length 665 >> stream xmTMk0WhFG*! miʲVZCcYy#9햅ļ{3񸟤e&Oo]&C]]Mq>zwt߉Ǯ)n.pCx?nڽVgx=itO"i [\l\WM}'ԭ̚t4pXeȉeU oq yM\-CnCW_Ey}wP dZz891euB)] W-\v\]~[S!8&+Zce"'2Ɍ5I@|"B2AQhSlLء28a}ɑFq5ҍnnbfǮCG= Wܢe$g;A,:sx l=NOTƘ$0_س/vЧQ%~Zx pX2]$^qnaK??q FqMyc0=) &l(mi,3|d &\c ]͹&ӈ9w{d-tx\ \cΜekqLJs?<@>qhx .׷8wl~1V<*m"mmDaendstream endobj 144 0 obj << /Filter /FlateDecode /Length 3525 >> stream xMs۸_Jv&'&6L{$b"Hq~}Pٶ@|x_\M_<9er(%\d\,'f9KRIEy+~_qN$#/T|T.DS1I`nYKRJ(į Vhm܎hծR-#iGIp͘'nٳ(y d-IG7 Ywd\{@a4@dL |b'DO{- kt'&'P'b8Q? E6Pe;$-(Ŕ! )? L:пNZ@A+3aUK^xRIK{v- %a"aVVuV&4c NKYn͘.Λ]%ӱ݌|Q3v0?oۍǡU9u~]; ?]m}GZ)Du!E{.cFx߇̌L7 >j2X{*#l¿HoYW&_b]\^|7Sq6jw|&mh!|:Uyr3жrs` 1U*d;Oꂦ_HFßǀ& bJCda\|.L_: ' 7,wg!MwAHUfyDz[m\7~Tvd "#:C7LoA@ ݴ"GXԈ;J6na=B]!DQua hDC\z |0R ) 1IByP{zvDl[1e;3`2D ^tALY>}$׾lk eH n~XV/ c&plǁEy1T~.7Ek./7M#B1QA*Wn}/j 6LC! 7hq/7!b~|:L 0!,/ݻzfGde~xdm] }Hа> eʌa-@ znsSm9ɽ2X& X=wC8x,J z:;pqWف8!.ྌxk*M`6@6ܡj} # "/gLan'1Nh"E\ Ź7 ;Bݾܯ.|Lm ɀ=_7GIaHc{q)j(@ƳXuLPطE2Hb4LсS΍JƲp#=ʏdG"kQ'qa2?85j1f=1BC\ aLݟ0"P'u,b9H9K&x^AxPa?S/}E=-V@q8 sPP( s%<> !-8βg,!:6 lՖNKf`O9jX};Mpj{~iA{_TspxީC6vClz"4FE[o^ռ۲Y@6l (;{'!K>&ỢNHB"e:ݘfgr~f_OtmJL4N/f 䨻u\)?.3Oĝu̧OY1∃4xDCDDl$ ld@őzD)JoEuJϮ\«;|f!|9SӉսJE|"F/GNxl~'=:/TWD1orD<ޭtG 2b1lIFPl!>%q]cZtQ :8v>KAq-19SF|^˦,y8fJeub?OW'ݟP?./ ITer(M&[iFgԗb/MS |w>}uK-s<é|KlBF>rZ1Tr ÔRy&䅇.su硋M%:Hn$O7.dzL{xS$$MeSɏQ )a-YJgڕl+ݬ(Goz(w-C㩻A>XU *oםnr-Bΰ :>X3 3p;ž=.|r4{LO3 ~z3| ^sNOca_jiZՖmn[=|zeL,>0 fj `3_/}ibؠL{cM\5Lm{wXAg-K1qerCvi:1+sR8y#Oʴq8B^kμyqz~Pf!7jQXHn/KGږ0qD LϷ0?Z]zҬ߬X`{o3K)7zsvvG`ŻMJSlZQ$]߱UY +8!jV!VPx69S?p)<8^@ŧ 27߹4QDR*tY , Z 9RYүN  1jNЭ[ܔ87󦸴q)5:e^b7+6|ڃVQ̤&\4p~Fg ˗]g<^pPˁ@ :Tx9j|/p," ~&ZZ^|8}G>$ 9lTρȮ Z||yɈ{QzH_t$7, S5sF9> stream xڍxT6" ] 00 ] ҭtIt#~9=Yk}ݵu5L:IggC8`R2:jNKJ/M5?6< 2qIlV"eTl+y(밽G2V2U -&UyT0&η߷uGY)L:zwT/>q) k]֜zy:mʒfFD$s[hZwM% ɜ_y),*|8q|~49ÑX$vGu Hx> CI7wEufi] $q *Iޮ4N9zKٶ> 2̬~"OڴQ"=m/M-?;>pv)>.cii/G`=X袍D2VmvM[A(6|!4|0'_@P)Ъ13+\Ewjh[Q^<2k͑'YHߛ㫺C·a\tw<E qr-Ƥpmpl͜D+VBEi}~(3 h9ֆQ}k\1 kz|_*P1L{jD%ndUfwHhgM9AbL>L'%\_l_܉1>Õ ?(ni<9v(H?T_c$"%9*m`2%}iɈuY֬;>Xm |g}X%pzt~pj 2/V޻GmkfߜfCka]6&L;V<|W1yA1\"N qi ,Jw}&"^[Ѧ?h:-{:,źp[FGޫ0zv 9f|IB[k&]͆ۋ8!D7IH<kΓ/thvy#NK0UkUƧQM{6eƺ;>޴̛t "Ux1D'?(y@CpkWE #7i1(^!xW5-, e z{X$6$'Inz_6`]ΉrI=5\A&F> fg#r9/d&,jX=ޘK ,tBl/-7a=Iݧ:AS~R?cPgtzY'% q\xrks&Q :tVݐ>OGçTJK;2 ,HC嬶F2yiu_7f]\O"5kiA?wKt)vΟ+R&8I=TlĪ&,R:tz!/WZA0Ih„C:?ܭ<{8ߗ'rގEҭϢTֻ {\Yw&PٽUL*y@IBaH̹qfqvfګ<7&nS4N=>!"ݕ[2YKfj)ٝtO;&0ogH`{dp~H>r9]7S:a碼yjhpv|D;|ʇ+S&B͙; bū~Krʑ9r򣃶:]+,oi> 8-iwgfX4Uzh2wr%^f` vP;)wEe5VWN* k,E 45[B {5V]*l j5j!< t})5W:Ϲy^T>OUjxܦ>]>e %Vp^Bhi4%qߑr$@z(dbD뗫* km*͞!zVYԄ^(~HSypjD#z=B̪$=/}p>XL޼Suz72P؀gyGi)1AwgoVX& `QH%l_`swW|@[걺uh웣9,(3[yjM˜Ză1!8dM+T1nT Y<ΪLAa$^rruHԤI$сnHiop [gV>Z9|({U_qA: |d6Q$]GRAd,d;'NOi_t%)1Ϲ?N+&N;!^p5չrk%^繁Ƚa:Jʪu׳e[C8y1 nkؽ#,1l~5 ]ѸR7rw 5=6W8%!+Ae.0DHBޘWqUw;&2Z|em"'|+Htf4!1 _'3bPt,..""1L̰\0=U\~qxNr[M8lIۖ-<;@[P8 *~nҢ#=s8%!ӍLO7: (}r6\TPց>lR搾2aɛ9E1 "bP8B?-I$$ՎB[QXk7.F̷4ƥ_ܶ8";ئipWLL^J2N:DkU_Č-ƝK[k% ~Ⓙ'Xk`iZQ'g }1ʓ`q ]ޢM:p`ݩ; Ŷ]wovv+wM^sc#ksj#o˴e Y5Uw !34-|=FJC@cΪieM≫sAdDc|?/ xyюyqupaQg8*Ih+"GJ$X%ZD&<\t16#'STs9$USkE \"J[xEg#Գz"Jͤ,51l98^CҕYv4Ǜ8Q Zd nkRoJBg rOCX^`T8/8Kl>Ly9sbh_L>|W @ؖG`hftCؕ Pc׌n"5;W[-^E7 m\d1OaW.:? M6:ܐX-gw)H|ӎyۊAؼ[d">c.voZuadw?`^ie5:>L&ov|Kwbz"{}k77msl ɖ U 9wûQnΚMز`/S3=CQH/A|zW o˙ 4e({j%f4-%_3>oF꾡9?ч9]i|p<шמXhGn]_R*<\ LHiR8y@F %h &#'3u Ѿ:(ĥ|J0e^4??N+'Jbz?C9aؐRt*_Ht* ^Va IF%LYMקfSt\V tUe%c%~m[66]B}ꉧ+h܀,-ӢO ښإij[zߴ>ʏ*].fHx]ѡΕ<܁[3vvJ3]  sE=c2G5]\yvu#"o8qptF=,Bkuk'kgJHmPRqc)k| QǙnev30W$B?*<'<LvfV(4w,U+uNMv#w7𪈢8Huk?܋o[*53}QXͶ΂(;4[?LuEn+>OGnÍ1ނ dyGMƵ@&TŒY.ur/PWԇstùHZiPy[ \k=N3ȓuָxBlyfjYr?Uh0`RZMXJQJdJ<ޛ4R?_MLƒ("#~m!cm#e{"v:! #賓J\RU1rKZ \YVtsq-%h,; 9c" 5qvCڞ 6Y顨59<[.I0N>7κ[}-װOE7Ѽ,-B2Z+Jzf@w|V& 7\tlvZ#7(tۅzlX㘅aR4 6Zl[tn}?18&4N3w#T,HBNpfZ[}Ks񑱞Jaרȴo S*$ΉByqom1qK)YywV,fWR[Yh틩J'Sl`Z#+[KYtzVo=k=i+a ?m5> stream xuS dBx PybQ< smC|2`13 X NٸjEp8S!|D! (f!&6Ƒ,)| 11Oρ2D8/Bs%ʾW@Y\?W  񸢹Xd7?A._qpp\I ;/#⊾"o1N/RP0A|H  \9x\q% 7>P[6BQ!-n`VfDXbhFE"9vEg} $'r15tIB\ܛBB_Xђ-"p1`J3͌d P܍RLNigSXrf{ i͜TuIq;b6j&?O,ҚuG++G4puRivؐ~^Iċ5#NGP񩒹kw?1tO)8vM(W4Ĝ m905%{Øw^|캡<dD=!bylΘp&//GU,g/k EeINi%^y(%5QH[xտ4gM\H|EfH WR1J?-J |ѕ"r:tҹ~>喜^r=ӯ+tX3O(P]r%!AU@L]G 7JƦ=W?ߡNãzk/3U$IlKg2|ui~%O7荚"650j̶۶*K<UfCca \Msf)G%/|?[rkqUj;bf}/o-xxpl_Iv ٭ҲEm Lާz;eEVWkrDrWSH߲e|zP(.c9:PoJ}Lw=V˩cR)c%5L_( yfs+vLO3 5M.kp n]n%w_jʥ->XeqT3l5.j_Y.? =Iкr1θwd{#gͥ{ZQ V/+ WMYxcz4_V첤AetW?Vjyuڕ|)}.zNVP꺠6O? 4r7kH VW}Aاx|?pn|yb[M 0s+c[Uן6R<=_m8Na~wQ<˜ή{$D\> stream xڭweT\[-5]Bܭi44$kp $HnA-C`xV㧒yx*?L#9Zm i rßT? gA/?ئTeE r*plK ]- {hu_W~AjҞ>̡a`%Xbnf6+Mӏ:R]A1X2+Cic]33^8kIZ!V1o_^ti?A\dόdz $MܥKp񴀟ׂPo܄^j>+CKB{d#U" `KΔ:RE ǟvFm[07f& jc-tF+ō( `23aB?i%6NAb͑~ ig|Rg-H!!)r+EvoނoM`uxsSb!%ԔD֖Fksb6! LsA qripەd 6xl,.n+ojZsz5Q:*q@ A"SRRTG|sϣ׺̾&kkw\Fo~wwcm(}l_` hZ vvRepǍc̻z9 o6%2p׆~o}WA ;l%4L6|~t!&aL e,Fz̒J( VyҾL$prK?#Z]*.tXoſr|/@aبn*-'eJ_ʠEWsI8f}=IRkA!ciq1:h_y??JK\`JZm/Bc>mؖ$)REg?Xm6w/:{-<үoM=H[IT9?>}[Wn8Ċ> }mWBI'9@om>Hnv\'v%^65WUXY UZ+^>d%^T|M^5RSpP5BѩQh@4иMsEqKRdGkG' g!oB?t`\!KTaw#(> %v,Ӄ@{dKMV?qR:ŮWdHo ԭdK~*if.~~ֱ\K*i< & פ|{ w㫲  LF&.\#I쵌wGX>R,c|vx_zԸB#= !91=R}h)s@Pavwf:&QH+Nh!Y`  0M3X=hp 1>{y5=uTԕ۬X<_,G~tdBz)Uf٩Vר-@m|iIdk#j#+5u@ e +ґ!F& FVQ_b5VF\<>P1$8wԚ2]ͦtNiY5*jP8w<9r i**:m"zp.KLns4 \H4!6rʪZ8z]v\s (u,kySQ'Bg5uzUKk~'{z.m5~ج?e.u`Hi 2kuE6.޹ }4GoM>9$"^aW#Fѥ*۞ȥ8elW$B/kR>"I {ʎw`=uvp }\1X=] I5/$lϴ#9Zw~Rďv3(b[-XSb%wa3>QۻB#HO3n,9 S}R%Ui-m̓ 60.+fluN1ZRAg8L6A,;rG毖_Xu'}=NSʦ z-駝 bڕS3O iD%@rR`-VGgkUT_~$Y(,Y>J&qmNw>W=?i0嚆(xW ;bWi, ,mE:%F Q~4 Վe$+$2ݼgM_k/9 æt$Z*ؿ"Gӡ P3]VTvT˳^w=":>jm>G2 _ 1Q' {{Kl|OtrF EMRQH˞9`Ix:6S%SNJly{F0/vy3bf8)6 7|LA$o&PA<|}8i2}MSpJ -Mu6/WYw,N=,l_0$|x?Nt{oO[.דbL>|#0`ć׼rGZ y|9V^)균|%L~ :`TKk,)H:Y@noWmZ{ 7u=fTBbҥuCZ/݈ٚLُtZZ%c,l8BP;]G0A v]-ӆ#u(I:%O6#AΊ!$jo>NCwj Z,ǰ\,杔+XjbKwSTSy.ʒfQ%nX*hƔר-e!0\q-~CwUTJtoH|B#L&tybBkd۟v|FPr98Mo3QxH-@a2j uq(c?58Y~&췓gS3bYk,ef. !K44Iien|Ag&l3UQpMz8cmZ0փdc#tyA:V=I5R7Waܽ&~c^&D[2SR;Š7 ur oՇ.rJsPe96 )g%gs~}O3\xu%]gt[+6r#fŨ31BͪccE3./4% Lg&` hƕ &Qn^^шY ${pٛ-$?Dߣg˩>k)j<{nP]<>ӌOI[1եO؆0ƖqMI:=M((G9kǰ-x^ĶΫbM',_'y<^2s['⼶+RGKѹ'h/а~6\?(AYA}kڅ=WFQDYa(AJyKɁ³F̸_ pI*;lə ;7,{?A"ne9Nqύ!"g).ZEBLC?vFb1.oKqWnN^Hb\R 5nu'k h﹯鳀l|G ٵRB};"]ܯu#N"ƶyڊ&ux4'oi<4_>dhn0*NO%ˮf26)'MSpuۃOEc}>y.vTegg+y\u D`ޱCBfG[EE~8\ fB: oz1AT1+CffeY(Uץ F:=d7ng˜eæȰX8+ R 7"Yv,pDB_ҕ7Tg[l?Dm;Lvtec?6ӥ. dϲCB]"GЀ#yKFR[Z'_1ի[ź.5^xzM*\' ?^(]($nǪ0}(FfsQ"l,SQd>]@mIT?cSjbVr~". e0@Y{R=IDFg4bPQ]GꪭK3ߞoM(Sk/!fܵ9򝆷Dߧax*SL-HA{i 6 Xmh$a:%$aW3:XqD#rRᒁ=h(/!eC|uA^ P[N|~%X% HvVʯ#s#_tEw#J.C"jۃ23IbăuǪB;>S趇jj Յɂ5- W`?ېsó]mKآOt CQ' HFR=JUH>-L֡e az-D4w#OȻ_9) Y?uV3f۾)FApmʘ#ѧ*Ǘ>nTUgtE \K B 6;m8oX [1>ѯEoAԪOx*˻GMC4z,בTg0uk hIko}fK_:(-\PRSʲUDc[P/ 1TbWF:F%HNp_u!3 MQB?Ĕ-!~;;IM)Kn@j.]kNS$mU׹k ,7Y!7NVaRcH^,Mv7l]ٖ0S7ZEl)2kc9Iem>wp95cQp}Ysu;~Bn3)|='y-m+)AFr5m'oe燢k;gÞ/9z0kj O]$H\}rQQS0iq SZn>T5SȘpXҼ_B6oHVNQѣq9L=sYۢ)73B;&e$.+k+,PF\эQSI!˨tBԺW|Q=Vw^*^ǿB}]:րkBͰ֗)\̗?)0j "QO R b mdw,VIwT6TatdZ͚t&qZ)!sX-r r8:\!SDh Z+%t!1g+fΤ Pɼ[Nx##nY);1dеŸ"@PCFZ"QɇO۽ҪJѶ|78Xq>&>jغߗZ^/Jvޟ+Sg[|\S)lzee\L.~¹qա6$3~NX7! R.C>aTЧZOrkz5W:"T`GjQ50_߬Wk_Whkh,e Z۪slF6| cƛ txY=3 %VGguTJ6UR y}QLP{cT!a'Gpyfnl9AF5N\ߣSE|9q8ᒻXˆNV 6.91@ebsQwasU޼b(K9r M 1 Z1Pmo)eD`<]{7ZLuɆL4@gOF &nc׸I87aJBatC(2l|VfGWZ6G-]"mQuTkҕtJ"}9gicD>~j'%gk8(=M˽$;9~'FW$@ 7ښK%} n%}WítnPV&#V/Xb'1%!)M,\K(dR`ߑy<ҪЅF}V3ȶH}Nj2 cq Kw(D7x}Wg[kѕ^E!gr-%2I>蝅+ťJgW-J&A;\_q n#fyH)SL*ODBVsx ӌP 8 /-giDEw>^ؐ3"SX Lal⾨R{o!vuSؠa"2+T\*IGU3B}T k?ͽ Bo8{(Kh=JVW4wYNCSΖ8U^T+?oWmdR1=x@XYTeQ{7,!Z>lxtn [|E#fNgwvJ"8$]{{c4$| \cY!1bxU~[ohnpTࣁ a\=- O3__oI ų@em.qpP]|EqPJ(yēdQ``jV;ԌǙG`+g{wN>s2!E7U??ßGa)٣}Z\b= ZLJ\[`/,QaL{g8!I1E&}>!ag_g#p q}jnON :q퇼jˊi*_W{J֣K ]f;x }2)0=!}Ը`+jvdd;$r *Qp#fK%~[(:c33|<∅ux)m%6 Ki=ISSyb#5j*v w]=<3?nH\& I6S!*V,sg{\4"\3<%9o^⬄{Pv'/w)(;Nj1}lᷦ9GZ{=)"4Y*Ԥ[ɈEo7B|pϬv("-ڏF`f;%qx1XS׈ب5×:6o8I jolxd|f*&3>C8ݸKqJjyȍZŐ=+wW2* ̓ok*i55S[:&o9֩[Ŗ"L~>ɟr ?(n~jfn#HVfzB$ 2LBͧ<S/*&-+9^_9d=+5E.}q# AVe(0:A2ԺZ8˱{ηǗ⴯Umۿ@EeSF)fnMTl N^/~QG2 Yfg8jE-by0E]>2^-\r.,v=m-fjR1u}Gxt[Iq=<HPᲸ [2jܹnm˹aբC.Uu.'%o[[w ZSU{<NA_7=(_2YƦh0]џW_&6BLy7-פ3 ׵3G#yԩތU7] Ќ!~endstream endobj 148 0 obj << /Filter /FlateDecode /Length1 1144 /Length2 1528 /Length3 0 /Length 2250 >> stream xuSyQa"AXHx\dDg"B+1+|&WY#]AĆ#t rt&TA>Z4s:¢gBvP#X4L,SB ]3i̜!>@͝[q?,fδ6Ptw'alPXp+c62@gH4Lx`Ѹp;џb B;E`B !@5|SGa5 V ku^(o>H0fn_T06x)"o1WB;Blľ  îWALd3Ep?5wO-47˝dq\xӽsiiWsYw! 10uL 2)5,fμ87 `px.1"`P @7C0sN0aB0 Q̯4xf.=eςAp+P/AIg'ϐc0nYXm,Zn+t^fD6r)m`9o9L{c" j湥i0=gCT~Ф5EkcϝWFWO;T&#񺓛Qz|%1͏(u#%[҅S.x^Ѡ[ꨂJvU}E*&6޼d(۴dzt̬]ӣ뫻5S^ّX}Dkm60dx0t~zli^Kɚv󶞆{k'֩#%ILf=?x$6wjVurhu(237k<]iu4Mтָ'" ^&?S^PZo#fn=q-ޞ'IS 6Ɖg'v5+:+E-%F#/7삯O$1w_H\W8PAݓҨ@BT9>2hZJ?U7[qf*L&\꺪#oXl-Aih\Fѹw)}ʭDءx5{b 2+: M%w:~uxe[ؤ=j*/ާ z:V]q[e"Y)sa@&YDtd[~Lwp[:eMY1uX|ƹڪ~9qluL,a$+o[{$mr>[4|x~p7>Qi\XZT< 0\8e@<2}llDUޭ\Q=D-)p#1ve9k|U\3)J)}AؾގWuЉ<گ4kli3[}!FW7=81&A[%E R9etI犓%?Hd)g֍{}:drވ>~s@ҞhReQ? {#nq69WxKKԇn7r겜p=*VmI.xu$ #c|?M>ՙe:Y`{Yt2C eͺiۍ{6i8U捞5 K֭^]%+ ڍ#VE\~E"Pk~%lLs+ęyoj UVHF`iͶ8QO 6kKZ$M sSC] ąhv~B1Ja:`:>LcKRa-4&w([nR(UK}5*a㧬'R4>o R:`4V̷(2語rnxjo \s͓T҅ اPPhy`#qRãvEjA fR[SiNuC%eNy՝թsG9޷h{cdE>!Gm,)hi|-M7Q21dՈDZêhEm 쩒\hendstream endobj 149 0 obj << /Type /XRef /Length 141 /Filter /FlateDecode /DecodeParms << /Columns 5 /Predictor 12 >> /W [ 1 3 1 ] /Info 59 0 R /Root 58 0 R /Size 150 /ID [<64840529264b5daa0af8ab688ef630a7>] >> stream xcb&F~0 $8JR8͛ g LpV3P3?D"u@$S!mW#hgA*n`Y5DUHs7 (?Ď6@$c%"Y"DH_ @lNk endstream endobj startxref 135546 %%EOF tmvtnorm/inst/CITATION0000644000176200001440000000160014533002733014247 0ustar liggesuserscitHeader("To cite package tmvtnorm in publications use:") ## R >= 2.8.0 passes package metadata to citation(). if(!exists("meta") || is.null(meta)) meta <- packageDescription("tmvtnorm") year <- sub("-.*", "", meta$Date) note <- sprintf("R package version %s", meta$Version) bibentry( bibtype = "Manual", title = "{tmvtnorm}: Truncated Multivariate Normal and Student t Distribution", author = c(as.person("Stefan Wilhelm"), as.person("Manjunath B G")), year = year, note = note, url = "https://CRAN.R-project.org/package=tmvtnorm", textVersion = paste("Stefan Wilhelm, Manjunath B G (", year, "). tmvtnorm: Truncated Multivariate Normal and Student t Distribution. ", note, ".", sep="") )