tmvtnorm/0000755000176200001440000000000015055376632012154 5ustar liggesuserstmvtnorm/MD50000644000176200001440000000464415055376632012474 0ustar liggesusersd17a3e724495e0708857c944e66a5a47 *DESCRIPTION cf4d0a52cf3c76653ebe34e668f383e2 *NAMESPACE ca648e728d8b5043b21a99e6db4205ce *NEWS ca1eb4bdc33b12c6a59a6908ea43ffa2 *R/bivariate-marginal-density.R 166a71d0a1ce1769eb7dd1a094c45eb7 *R/checkTmvArgs.R 794c0dcf67756b7b9d0de6ab6eba591e *R/dtmvnorm-marginal.R 9d7bfce7a98d353876568da6bdb22416 *R/dtmvnorm.R a17c645dd934398b4640de0fd7f3a54a *R/dtmvt.R bad20aba562fa5350a1b5291fca7e236 *R/mtmvnorm.R c40bf477a1ff02d5f4f70a83b5c2b485 *R/ptmvnorm-marginal.R 391811e35091c605b72f0ae16f355940 *R/ptmvnorm.R 8b709592e4821a482fee717fe7987cbf *R/ptmvt-marginal.R 652d04ae2331c0661cc0b61ff8f39b6c *R/ptmvt.R 2c10f680c0d50127650e636e4613278f *R/qtmvnorm-marginal.R 608ef8db9d92d21acb5a70e39142316c *R/rtmvnorm.R d4f8929005064ca9729b666ba4d1a296 *R/rtmvnorm2.R cb637925ff96579fd3400541333c9d90 *R/rtmvt.R 43cfbe37d3bd1f9bc37b4f37a666c74b *R/tmvnorm-estimation-GMM.R e81d08d5358a28a8d3dfcf5200a8a04d *R/tmvnorm-estimation.R 86d6122a6b1347ad7e01ad45c114f2de *README.md cd455640d28bb34f3f67216972f1bffe *build/vignette.rds 05f0324f7a52081a7faca3fe125d2879 *demo/00Index 43befa36b93301eb64f743780e3ceac8 *demo/demo1.R 8cb92af9f083a0e444c58f5c82d79ace *demo/demo2.R 6f0a77f6c83ea4528fca2ba6f7c86f2a *inst/CITATION 4888713acfa7437d004bda25a2881fce *inst/doc/GibbsSampler.Rnw a05ba9e0d2b649d76217a8b2d5434207 *inst/doc/GibbsSampler.pdf d1f23c2a2d60f9bffa9ac5e6a6cb0991 *man/dmvnorm.marginal.Rd 8a150cc866ec4050391cfa67b2338991 *man/dtmvnorm.marginal2.Rd 6eaba0c191da0713b906af250775dbbc *man/dtmvt.Rd df642147d3d90de431be577e52e76cb3 *man/gmm.tmvnorm.Rd 1827e59a9f4bfa78a87b4115a64c4ea5 *man/mle.tmvnorm.Rd 85ca9833690d110eefb7225485af713e *man/mtmvnorm.Rd b774db13d8f8cd2cc45095571f6db206 *man/ptmvnorm.Rd c48a6ae141dea1f55e97ea86275ee3f5 *man/ptmvnorm.marginal.Rd 34b4439bf4d1b08488f1145d3e13befa *man/ptmvt.Rd 80c6c48214efbd8ad412677db32f5531 *man/qtmvnorm-marginal.Rd af6420244ef46e734fcabca86ecc77fb *man/rtmvnorm.Rd 6679f9d0fc409cd57555b9a2f0af55d2 *man/rtmvnorm2.Rd 819475b0d7403d8675597a99debc3771 *man/rtmvt.Rd 56ff9ee8d7d9d645a83ccf47d5fe5b1e *man/tmvnorm.Rd ae25215cedbedbe2d9de559d8f4f5d66 *src/Fortran2CWrapper.c b250052c55dffdfaf7f35a9f61fb22d0 *src/Makevars 6c686fcd8fa8d9f531ddd7d67484722a *src/init.c 36b6e5d6d9569aa0771c5173f60e7027 *src/linked_list.f90 ce89e576587c7fc5af00ac60244b9bde *src/rtmvnormgibbs.f90 4888713acfa7437d004bda25a2881fce *vignettes/GibbsSampler.Rnw 28065219dbfddba2e85a093c5a456c95 *vignettes/tmvtnorm.bib tmvtnorm/R/0000755000176200001440000000000014360222632012341 5ustar liggesuserstmvtnorm/R/rtmvt.R0000644000176200001440000002102014532763264013647 0ustar liggesusers# Sampling from Truncated multivariate t distribution using # # a) Rejection sampling # b) Gibbs sampling # # Author: Stefan Wilhelm, Manjunath B G # # Literatur: # (1) Rejection Sampling : None # (2) Gibbs Sampling : # Geweke (1991) "Efficient simulation from the multivariate normal and Student-t distributions # subject to linear constraints and the evaluation of constraint probabilities" ############################################################################### rtmvt <- function(n, mean = rep(0, nrow(sigma)), sigma = diag(length(mean)), df = 1, lower = rep(-Inf, length = length(mean)), upper = rep( Inf, length = length(mean)), algorithm=c("rejection", "gibbs"), ...) { algorithm <- match.arg(algorithm) # check of standard tmvtnorm arguments cargs <- checkTmvArgs(mean, sigma, lower, upper) mean <- cargs$mean sigma <- cargs$sigma lower <- cargs$lower upper <- cargs$upper # check of additional arguments : n and df if (n < 1 || !is.numeric(n) || n != as.integer(n) || length(n) > 1) { stop("n must be a integer scalar > 0") } if (df < 1 || !is.numeric(df) || length(df) > 1) { stop("df must be a numeric scalar > 0") } if (algorithm == "rejection") { if (df != as.integer(df)) stop("Rejection sampling currenly works only for integer degrees of freedom. Consider using algorithm='gibbs'.") retval <- rtmvt.rejection(n, mean, sigma, df, lower, upper) } else if (algorithm == "gibbs") { retval <- rtmvt.gibbs(n, mean, sigma, df, lower, upper, ...) } return(retval) } # Erzeugt eine Matrix X (n x k) mit Zufallsrealisationen aus einer Trunkierten Multivariaten t Verteilung # mit k Dimensionen # ueber Rejection Sampling aus einer Multivariaten t-Verteilung # # @param n Anzahl der Realisationen # @param mean Mittelwertvektor (k x 1) der Normalverteilung # @param sigma Kovarianzmatrix (k x k) der Normalverteilung # @param df degrees of freedom parameter # @param lower unterer Trunkierungsvektor (k x 1) mit lower <= x <= upper # @param upper oberer Trunkierungsvektor (k x 1) mit lower <= x <= upper rtmvt.rejection <- function(n, mean = rep(0, nrow(sigma)), sigma = diag(length(mean)), df = 1, lower = rep(-Inf, length = length(mean)), upper = rep( Inf, length = length(mean))) { # No check of input parameters, checks are done in rtmvnorm()! # k = Dimension k <- length(mean) # mean as (1 x k) matrix mmean <- matrix(mean, 1, k) # Ergebnismatrix (n x k) Y <- matrix(NA, n, k) # Anzahl der noch zu ziehenden Samples numSamples <- n # Anzahl der akzeptierten Samples insgesamt numAcceptedSamplesTotal <- 0 # Akzeptanzrate alpha aus der Multivariaten t-Verteilung bestimmen alpha <- pmvt(lower=lower, upper=upper, delta=mean, sigma=sigma, df=df) if (alpha <= 0.01) warning("Acceptance rate is very low and rejection sampling becomes inefficient. Consider using Gibbs sampling.") # Ziehe wiederholt aus der Multivariaten Student-t und schaue, wieviel Samples nach Trunkierung uebrig bleiben while(numSamples > 0) { # Erzeuge N/alpha Samples aus einer multivariaten Normalverteilung: Wenn alpha zu niedrig ist, wird Rejection Sampling ineffizient und N/alpha zu gross. Dann nur N erzeugen nproposals <- ifelse (numSamples/alpha > 1000000, numSamples, ceiling(max(numSamples/alpha,10))) X <- rmvt(nproposals, sigma=sigma, df=df) # SW: rmvt() hat keinen Parameter delta # add mean : t(t(X) + mean) oder so: for (i in 1:k) { X[,i] = mean[i] + X[,i] } # Bestimme den Anteil der Samples nach Trunkierung # Bug: ind= rowSums(lower <= X & X <= upper) == k # wesentlich schneller als : ind=apply(X, 1, function(x) all(x >= lower & x<=upper)) ind <- logical(nproposals) for (i in 1:nproposals) { ind[i] = all(X[i,] >= lower & X[i,] <= upper) } # Anzahl der akzeptierten Samples in diesem Durchlauf numAcceptedSamples <- length(ind[ind==TRUE]) # Wenn nix akzeptiert wurde, dann weitermachen if (length(numAcceptedSamples) == 0 || numAcceptedSamples == 0) next #cat("numSamplesAccepted=",numAcceptedSamples," numSamplesToDraw = ",numSamples,"\n") numNeededSamples <- min(numAcceptedSamples, numSamples) Y[(numAcceptedSamplesTotal+1):(numAcceptedSamplesTotal+numNeededSamples),] <- X[which(ind)[1:numNeededSamples],] # Anzahl der akzeptierten Samples insgesamt numAcceptedSamplesTotal <- numAcceptedSamplesTotal + numAcceptedSamples # Anzahl der verbliebenden Samples numSamples <- numSamples - numAcceptedSamples } Y } # Gibbs sampler for the truncated multivariate Student-t # see Geweke (1991) # # @param n Anzahl der Realisationen # @param mean Mittelwertvektor (k x 1) der t-Verteilung # @param sigma Kovarianzmatrix (k x k) der t-Verteilung # @param df degrees of freedom parameter # @param lower unterer Trunkierungsvektor (k x 1) mit lower <= x <= upper # @param upper oberer Trunkierungsvektor (k x 1) mit lower <= x <= upper # @param burn.in number of burn-in samples to be discarded # @param start start value for Gibbs sampling # @param thinning rtmvt.gibbs <- function (n=1, mean=rep(0, ncol(sigma)), sigma = diag(length(mean)), df=1, lower = rep(-Inf, length = length(mean)), upper = rep( Inf, length = length(mean)), burn.in.samples = 0, start.value = NULL, thinning = 1) { # dimension of X k = length(mean) # Mean Vector mu = mean # number of burn-in samples S <- burn.in.samples if (!is.null(S)) { if (S < 0) stop("number of burn-in samples must be non-negative") } # Ergebnismatrix X (n x k) # Random sample from truncated Student-t density X <- matrix(NA, n, k) # Realisation from truncated multivariate normal Z <- numeric(k) # Chi-Square variable w w <- numeric(1) # x is one realisation from truncated Student-t density conditioned on Z and w x <- numeric(k) # Take start value given by user or use random start value if (!is.null(start.value)) { if (length(mean) != length(start.value)) stop("mean and start value have non-conforming size") if (any(start.valueupper)) stop("start value is not inside support region") Z <- start.value - mu } else { # If no start value is specified, # the initial value/start value for Z drawn from TN(0,\Sigma) # with truncation point a = a-mu and b = b-mu Z <- rtmvnorm(1, mean=rep(0,k), sigma=sigma, lower=lower-mu, upper=upper-mu, algorithm="gibbs") } # Algorithm begins : # Draw from Uni(0,1) U <- runif((S + n*thinning) * k) indU <- 1 # Index for accessing U # List of conditional standard deviations can be pre-calculated sd <- list(k) # List of t(Sigma_i) %*% solve(Sigma) term P <- list(k) for(i in 1:k) { # Partitioning of Sigma Sigma <- sigma[-i,-i] # (k-1) x (k-1) sigma_ii <- sigma[i,i] # 1 x 1 Sigma_i <- sigma[i,-i] # (k-1) x 1 P[[i]] <- t(Sigma_i) %*% solve(Sigma) sd[[i]] <- sqrt(sigma_ii - P[[i]] %*% Sigma_i) } for(i in (1-S):(n*thinning)) { # Step 1: Simulation of w conditional on Z from Chi-square distribution by rejection sampling # so that (lower - mu) * w <= Z <= (upper - mu) * w acceptedW <- FALSE while (!acceptedW) { w <- (rchisq(1, df, ncp=0)/df)^(1/2) acceptedW <- all((lower - mu) * w <= Z & Z <= (upper - mu) * w) } # Transformed Chi-Square sample subject to condition on Z0 alpha <- (lower - mu) * w beta <- (upper - mu) * w # Step 2: Simulation from Truncated normal Gibbs sampling approach for(j in 1:k) { mu_j <- P[[j]] %*% (Z[-j]) Fa <- pnorm( (lower[j]-mu[j])*w, mu_j, sd[[j]]) Fb <- pnorm( (upper[j]-mu[j])*w, mu_j, sd[[j]]) Z[j] <- mu_j + sd[[j]] * qnorm(U[indU] * (Fb - Fa) + Fa) # changed on 22nd February 2010 by Manju indU <- indU + 1 } # Step 3: Student-t transformation x <- mu + ( Z / w ) if (i > 0) { if (thinning == 1) { # no thinning, take all samples except for burn-in-period X[i,] <- x } else if (i %% thinning == 0){ X[i %/% thinning,] <- x } } } return(X) } # Ziehe aus einer multi-t-Distribution ohne Truncation X <- rtmvt.rejection(n=10000, mean=rep(0, 3), df=2) # Teste mit Kolmogoroff-Smirnoff-Test auf Verteilung tmvtnorm/R/mtmvnorm.R0000644000176200001440000002067114532765050014360 0ustar liggesusers# Expectation and covariance matrix computation # based on the algorithms by Lee (1979), Lee (1983), Leppard and Tallis (1989) # and Manjunath and Wilhelm (2009) # # References: # Amemiya (1973) : Regression Analysis When the Dependent Variable is Truncated Normal # Amemiya (1974) : Multivariate Regression and Simultaneous Equations Models When the Dependent Variables Are Truncated Normal # Lee (1979) : On the first and second moments of the truncated multi-normal distribution and a simple estimator # Lee (1983) : The Determination of Moments of the Doubly Truncated Multivariate Tobit Model # Leppard and Tallis (1989) : Evaluation of the Mean and Covariance of the Truncated Multinormal # Manjunath B G and Stefan Wilhelm (2009): # Moments Calculation for the Doubly Truncated Multivariate Normal Distribution # Johnson/Kotz (1972) # Compute truncated mean and truncated variance in the case # where only a subset of k < n x_1,..,x_k variables are truncated. # In this case, computations simplify and we only have to deal with k-dimensions. # Example: n=10 variables but only k=3 variables are truncated. # # Attention: Johnson/Kotz (1972), p.70 only works for zero mean vector! # We have to demean all variables first JohnsonKotzFormula <- function(mean = rep(0, nrow(sigma)), sigma = diag(length(mean)), lower = rep(-Inf, length = length(mean)), upper = rep( Inf, length = length(mean))) { # determine which variables are truncated idx <- which(!is.infinite(lower) | !is.infinite(upper)) # index of truncated variables n <- length(mean) k <- length(idx) # number of truncated variables if (k >= n) stop(sprintf("Number of truncated variables (%s) must be lower than total number of variables (%s).", k, n)) if (k == 0) { return(list(tmean=mean, tvar=sigma)) # no truncation } # transform to zero mean first lower <- lower - mean upper <- upper - mean # partitionining of sigma # sigma = [ V11 V12 ] # [ V21 V22 ] V11 <- sigma[idx,idx] V12 <- sigma[idx,-idx] V21 <- sigma[-idx,idx] V22 <- sigma[-idx,-idx] # determine truncated mean xi and truncated variance U11 r <- mtmvnorm(mean=rep(0, k), sigma=V11, lower=lower[idx], upper=upper[idx]) xi <- r$tmean U11 <- r$tvar invV11 <- solve(V11) # V11^(-1) # See Johnson/Kotz (1972), p.70 formula tmean <- numeric(n) tmean[idx] <- xi tmean[-idx] <- xi %*% invV11 %*% V12 tvar <- matrix(NA, n, n) tvar[idx, idx] <- U11 tvar[idx, -idx] <- U11 %*% invV11 %*% V12 tvar[-idx, idx] <- V21 %*% invV11 %*% U11 tvar[-idx, -idx] <- V22 - V21 %*% (invV11 - invV11 %*% U11 %*% invV11) %*% V12 tmean <- tmean + mean return(list(tmean=tmean, tvar=tvar)) } # Mean and Covariance of the truncated multivariate distribution (double truncation, general sigma, general mean) # # @param mean mean vector (k x 1) # @param sigma covariance matrix (k x k) # @param lower lower truncation point (k x 1) # @param upper upper truncation point (k x 1) # @param doComputeVariance flag whether to compute variance (for performance reasons) mtmvnorm <- function(mean = rep(0, nrow(sigma)), sigma = diag(length(mean)), lower = rep(-Inf, length = length(mean)), upper = rep( Inf, length = length(mean)), doComputeVariance=TRUE, pmvnorm.algorithm=GenzBretz()) { N <- length(mean) # Check input parameters cargs <- checkTmvArgs(mean, sigma, lower, upper) mean <- cargs$mean sigma <- cargs$sigma lower <- cargs$lower upper <- cargs$upper # check number of truncated variables; if only a subset of variables is truncated # we can use the Johnson/Kotz formula together with mtmvnorm() # determine which variables are truncated idx <- which(!is.infinite(lower) | !is.infinite(upper)) # index of truncated variables k <- length(idx) # number of truncated variables if (k < N) { return(JohnsonKotzFormula(mean=mean, sigma=sigma, lower=lower, upper=upper)) } # Truncated Mean TMEAN <- numeric(N) # Truncated Covariance matrix TVAR <- matrix(NA, N, N) # Verschiebe die Integrationsgrenzen um -mean, damit der Mittelwert 0 wird a <- lower - mean b <- upper - mean lower <- lower - mean upper <- upper - mean # eindimensionale Randdichte F_a <- numeric(N) F_b <- numeric(N) zero_mean <- rep(0,N) # pre-calculate one-dimensial marginals F_a[q] once for (q in 1:N) { tmp <- dtmvnorm.marginal(xn=c(a[q],b[q]), n = q, mean=zero_mean, sigma=sigma, lower=lower, upper=upper) F_a[q] <- tmp[1] F_b[q] <- tmp[2] } # 1. Bestimme E[X_i] = mean + Sigma %*% (F_a - F_b) TMEAN <- as.vector(sigma %*% (F_a - F_b)) if (doComputeVariance) { # TODO: # calculating the bivariate densities is not necessary # in case of conditional independence. # calculate bivariate density only on first use and then cache it # so we can avoid this memory overhead. F2 <- matrix(0, N, N) for (q in 1:N) { for (s in 1:N) { if (q != s) { d <- dtmvnorm.marginal2( xq=c(a[q], b[q], a[q], b[q]), xr=c(a[s], a[s], b[s], b[s]), q=q, r=s, mean=zero_mean, sigma=sigma, lower=lower, upper=upper, pmvnorm.algorithm=pmvnorm.algorithm) F2[q,s] <- (d[1] - d[2]) - (d[3] - d[4]) } } } # 2. Bestimme E[X_i, X_j] # Check if a[q] = -Inf or b[q]=+Inf, then F_a[q]=F_b[q]=0, but a[q] * F_a[q] = NaN and b[q] * F_b[q] = NaN F_a_q <- ifelse(is.infinite(a), 0, a * F_a) # n-dimensional vector q=1..N F_b_q <- ifelse(is.infinite(b), 0, b * F_b) # n-dimensional vector q=1..N for (i in 1:N) { for (j in 1:N) { sum <- 0 for (q in 1:N) { sum <- sum + sigma[i,q] * sigma[j,q] * (sigma[q,q])^(-1) * (F_a_q[q] - F_b_q[q]) if (j != q) { sum2 <- 0 for (s in 1:N) { # this term tt will be zero if the partial correlation coefficient \rho_{js.q} is zero! # even for s == q will the term be zero, so we do not need s!=q condition here tt <- (sigma[j,s] - sigma[q,s] * sigma[j,q] * (sigma[q,q])^(-1)) sum2 <- sum2 + tt * F2[q,s] } sum2 <- sigma[i, q] * sum2 sum <- sum + sum2 } } # end for q TVAR[i, j] <- sigma[i, j] + sum #general mean case: TVAR[i, j] = mean[j] * TMEAN[i] + mean[i] * TMEAN[j] - mean[i] * mean[j] + sigma[i, j] + sum } } # 3. Bestimme Varianz Cov(X_i, X_j) = E[X_i, X_j] - E[X_i]*E[X_j] fuer (0, sigma)-case TVAR <- TVAR - TMEAN %*% t(TMEAN) } else { TVAR = NA } # 4. Rueckverschiebung um +mean fuer (mu, sigma)-case TMEAN <- TMEAN + mean return(list(tmean=TMEAN, tvar=TVAR)) } # Bestimmung von Erwartungswert und Kovarianzmatrix ueber numerische Integration und die eindimensionale Randdichte # d.h. # E[X_i] = \int_{a_i}^{b_i}{x_i * f(x_i) d{x_i}} # Var[x_i] = \int_{a_i}^{b_i}{(x_i-\mu_i)^2 * f(x_i) d{x_i}} # Cov[x_i,x_j] = \int_{a_i}^{b_i}\int_{a_j}^{b_j}{(x_i-\mu_i)(x_j-\mu_j) * f(x_i,x_j) d{x_i}d{x_j}} # # Die Bestimmung von E[X_i] und Var[x_i] # Die Bestimmung der Kovarianz Cov[x_i,x_j] benoetigt die zweidimensionale Randdichte. # # # @param mean Mittelwertvektor (k x 1) # @param sigma Kovarianzmatrix (k x k) # @param lower, upper obere und untere Trunkierungspunkte (k x 1) mtmvnorm.quadrature <- function(mean = rep(0, nrow(sigma)), sigma = diag(length(mean)), lower = rep(-Inf, length = length(mean)), upper = rep( Inf, length = length(mean))) { k = length(mean) # Bestimmung des Erwartungswerts/Varianz ?ber numerische Integration expectation <- function(x, n=1) { x * dtmvnorm.marginal(x, n=n, mean=mean, sigma=sigma, lower=lower, upper=upper) } variance <- function(x, n=1) { (x - m.integration[n])^2 * dtmvnorm.marginal(x, n=n, mean=mean, sigma=sigma, lower=lower, upper=upper) } # Determine expectation from one-dimensional marginal distribution using integration # i=1..k m.integration<-numeric(k) for (i in 1:k) { m.integration[i] <- integrate(expectation, lower[i], upper[i], n=i)$value } # Determine variances from one-dimensional marginal distribution using integration # i=1..k v.integration<-numeric(k) for (i in 1:k) { v.integration[i] <- integrate(variance, lower[i], upper[i], n=i)$value } return(list(m=m.integration, v=v.integration)) } tmvtnorm/R/checkTmvArgs.R0000644000176200001440000000313712705241146015053 0ustar liggesuserscheckSymmetricPositiveDefinite <- function(x, name="sigma") { if (!isSymmetric(x, tol = sqrt(.Machine$double.eps))) { stop(sprintf("%s must be a symmetric matrix", name)) } if (NROW(x) != NCOL(x)) { stop(sprintf("%s must be a square matrix", name)) } if (any(diag(x) <= 0)) { stop(sprintf("%s all diagonal elements must be positive", name)) } if (det(x) <= 0) { stop(sprintf("%s must be positive definite", name)) } } # Uses partly checks as in mvtnorm:::checkmvArgs! checkTmvArgs <- function(mean, sigma, lower, upper) { if (is.null(lower) || any(is.na(lower))) stop(sQuote("lower"), " not specified or contains NA") if (is.null(upper) || any(is.na(upper))) stop(sQuote("upper"), " not specified or contains NA") if (!is.numeric(mean) || !is.vector(mean)) stop(sQuote("mean"), " is not a numeric vector") if (is.null(sigma) || any(is.na(sigma))) stop(sQuote("sigma"), " not specified or contains NA") if (!is.matrix(sigma)) { sigma <- as.matrix(sigma) } if (NCOL(lower) != NCOL(upper)) { stop("lower and upper have non-conforming size") } checkSymmetricPositiveDefinite(sigma) if (length(mean) != NROW(sigma)) { stop("mean and sigma have non-conforming size") } if (length(lower) != length(mean) || length(upper) != length(mean)) { stop("mean, lower and upper must have the same length") } if (any(lower>=upper)) { stop("lower bound should be strictly less than the upper bound (lower length(mean))) { stop("All elements in margin must be in 1..length(mean).") } # one-dimensional marginal density f_{n}(x_n) if (length(margin) == 1) { return(dtmvnorm.marginal(xn=x, n=margin, mean = mean, sigma = sigma, lower = lower, upper = upper, log = log)) } # for bivariate marginal density f_{q,r}(x_q, x_r) we need q <> r and "x" as (n x 2) matrix if (length(margin) == 2) { if(margin[1] == margin[2]) stop("Two different margins needed for bivariate marginal density.") if (is.vector(x)) { x <- matrix(x, ncol = length(x)) } if(!is.matrix(x) || ncol(x) != 2) stop("For bivariate marginal density x must be either a (n x 2) matrix or a vector of length 2.") # bivariate marginal density f_{q,r}(x_q, x_r) return(dtmvnorm.marginal2(xq=x[,1], xr=x[,2], q=margin[1], r=margin[2], mean = mean, sigma = sigma, lower = lower, upper = upper, log = log)) } } # Check of additional inputs like x if (is.vector(x)) { x <- matrix(x, ncol = length(x)) } # Anzahl der Beobachtungen T <- nrow(x) # check for each row if in support region insidesupportregion <- logical(T) for (i in 1:T) { insidesupportregion[i] = all(x[i,] >= lower & x[i,] <= upper & !any(is.infinite(x))) } if(log) { # density value for points inside the support region dvin <- dmvnorm(x, mean=mean, sigma=sigma, log=TRUE) - log(pmvnorm(lower=lower, upper=upper, mean=mean, sigma=sigma)) # density value for points outside the support region dvout <- -Inf } else { dvin <- dmvnorm(x, mean=mean, sigma=sigma, log=FALSE) / pmvnorm(lower=lower, upper=upper, mean=mean, sigma=sigma) dvout <- 0 } f <- ifelse(insidesupportregion, dvin, dvout) return(f) } #dtmvnorm(x=c(0,0)) #dtmvnorm(x=c(0,0), sigma=diag(2)) #dtmvnorm(x=c(0,0), mean=c(0,0), sigma=diag(2)) #dmvnorm(x=c(0,0), mean=c(0,0), sigma=diag(2)) #dtmvnorm(x=matrix(c(0,0,1,1),2,2, byrow=TRUE), mean=c(0,0), sigma=diag(2)) #dtmvnorm(x=matrix(c(0,0,1,1),2,2, byrow=TRUE), mean=c(0,0), sigma=diag(2), lower=c(-1,-1), upper=c(0.5, 0.5)) #dtmvnorm(x=matrix(c(0,0,1,1),2,2, byrow=TRUE), mean=c(0,0), sigma=diag(2), lower=c(-1,-1), upper=c(0.5, 0.5), log=TRUE) #dtmvnorm(as.matrix(seq(-1,2, by=0.1), ncol=1), mean=c(0.5), sigma=as.matrix(1.2^2), lower=0) tmvtnorm/R/rtmvnorm2.R0000644000176200001440000002600014532763352014442 0ustar liggesusers# Checks for lower <= Dx <= upper, where # mean (d x 1), sigma (d x d), D (r x d), x (d x 1), lower (r x 1), upper (r x 1) # Uses partly checks as in mvtnorm:::checkmvArgs! # checkTmvArgs2 <- function(mean, sigma, lower, upper, D) { if (is.null(lower) || any(is.na(lower))) stop(sQuote("lower"), " not specified or contains NA") if (is.null(upper) || any(is.na(upper))) stop(sQuote("upper"), " not specified or contains NA") if (!is.numeric(mean) || !is.vector(mean)) stop(sQuote("mean"), " is not a numeric vector") if (is.null(sigma) || any(is.na(sigma))) stop(sQuote("sigma"), " not specified or contains NA") if (is.null(D) || any(is.na(D))) stop(sQuote("D"), " not specified or contains NA") if (!is.matrix(sigma)) { sigma <- as.matrix(sigma) } if (!is.matrix(D)) { D <- as.matrix(D) } if (NCOL(lower) != NCOL(upper)) { stop("lower and upper have non-conforming size") } checkSymmetricPositiveDefinite(sigma) d <- length(mean) r <- length(lower) if (length(mean) != NROW(sigma)) { stop("mean and sigma have non-conforming size") } if (length(lower) != NROW(D) || length(upper) != NROW(D)) { stop("D (r x d), lower (r x 1) and upper (r x 1) have non-conforming size") } if (length(mean) != NCOL(D)) { stop("D (r x d) and mean (d x 1) have non-conforming size") } if (any(lower>=upper)) { stop("lower must be smaller than or equal to upper (lower<=upper)") } # checked arguments cargs <- list(mean=mean, sigma=sigma, lower=lower, upper=upper, D=D) return(cargs) } # Gibbs sampling with general linear constraints a <= Dx <= b # with x (d x 1), D (r x d), a,b (r x 1) requested by Xiaojin Xu [xiaojinxu.fdu@gmail.com] # which allows for (r > d) constraints! # @param n Anzahl der Realisationen # @param mean Mittelwertvektor (d x 1) der Normalverteilung # @param sigma Kovarianzmatrix (d x d) der Normalverteilung # @param lower unterer Trunkierungsvektor (d x 1) mit lower <= Dx <= upper # @param upper oberer Trunkierungsvektor (d x 1) mit lower <= Dx <= upper # @param D Matrix for linear constraints, defaults to (d x d) diagonal matrix # @param H Precision matrix (d x d) if given # @param algorithm c("rejection", "gibbs", "gibbsR") rtmvnorm2 <- function(n, mean = rep(0, nrow(sigma)), sigma = diag(length(mean)), lower = rep(-Inf, length = length(mean)), upper = rep( Inf, length = length(mean)), D = diag(length(mean)), algorithm=c("gibbs", "gibbsR", "rejection"), ...) { algorithm <- match.arg(algorithm) # check of standard tmvtnorm arguments # Have to change check procedure to handle r > d case cargs <- checkTmvArgs2(mean, sigma, lower, upper, D) mean <- cargs$mean sigma <- cargs$sigma lower <- cargs$lower upper <- cargs$upper D <- cargs$D # check of additional arguments if (n < 1 || !is.numeric(n) || n != as.integer(n) || length(n) > 1) { stop("n must be a integer scalar > 0") } if (!identical(D,diag(length(mean)))) { # D <> I : general linear constraints if (algorithm == "gibbs") { # precision matrix case H vs. covariance matrix case sigma will be handled inside method retval <- rtmvnorm.gibbs2.Fortran(n, mean=mean, sigma=sigma, D=D, lower=lower, upper=upper, ...) } else if (algorithm == "gibbsR") { # covariance matrix case sigma retval <- rtmvnorm.gibbs2(n, mean=mean, sigma=sigma, D=D, lower=lower, upper=upper, ...) } else if (algorithm == "rejection") { retval <- rtmvnorm.rejection(n, mean=mean, sigma=sigma, D=D, lower=lower, upper=upper, ...) } return(retval) } else { # for D = I (d x d) forward to normal rtmvnorm() method retval <- rtmvnorm(n, mean=mean, sigma=sigma, lower=lower, upper=upper, D=D, ...) return(retval) } return(retval) } # Gibbs sampler implementation in R for general linear constraints # lower <= Dx <= upper where D (r x d), x (d x 1), lower, upper (r x 1) # which can handle the case r > d. # # @param n # @param mean # @param sigma # @param D # @param lower # @param upper # @param burn.in.samples # @param start.value # @param thinning rtmvnorm.gibbs2 <- function (n, mean = rep(0, nrow(sigma)), sigma = diag(length(mean)), D = diag(length(mean)), lower = rep(-Inf, length = length(mean)), upper = rep(Inf, length = length(mean)), burn.in.samples = 0, start.value = NULL, thinning = 1) { if (thinning < 1 || !is.numeric(thinning) || length(thinning) > 1) { stop("thinning must be a integer scalar > 0") } d <- length(mean) S <- burn.in.samples if (!is.null(S)) { if (S < 0) stop("number of burn-in samples must be non-negative") } if (!is.null(start.value)) { if (length(mean) != length(start.value)) stop("mean and start value have non-conforming size") if (any(D %*% start.value < lower || D %*% start.value > upper)) stop("start value does not suffice linear constraints lower <= Dx <= upper") x0 <- start.value } else { x0 <- ifelse(is.finite(lower), lower, ifelse(is.finite(upper), upper, 0)) } if (d == 1) { X <- rtnorm.gibbs(n, mu = mean[1], sigma = sigma[1, 1], a = lower[1], b = upper[1]) return(X) } # number of linear constraints lower/a <= Dx <= upper/b, D (r x n), a,b (r x 1), x (n x 1) r <- nrow(D) X <- matrix(NA, n, d) U <- runif((S + n * thinning) * d) l <- 1 sd <- list(d) P <- list(d) # [ Sigma_11 Sigma_12 ] = [ sigma_{i,i} sigma_{i,-i} ] # [ Sigma_21 Sigma_22 ] [ sigma_{-i,i} sigma_{-i,-i} ] for (i in 1:d) { Sigma_11 <- sigma[i, i] # (1 x 1) Sigma_12 <- sigma[i, -i] # (1 x (d - 1)) Sigma_22 <- sigma[-i, -i] # ((d - 1) x (d - 1)) P[[i]] <- t(Sigma_12) %*% solve(Sigma_22) sd[[i]] <- sqrt(Sigma_11 - P[[i]] %*% Sigma_12) } x <- x0 # for all draws for (j in (1 - S):(n * thinning)) { # for all x[i] for (i in 1:d) { lower_i <- -Inf upper_i <- +Inf # for all linear constraints k relevant for variable x[i]. # If D[k,i]=0 then constraint is irrelevant for x[i] for (k in 1:r) { if (D[k,i] == 0) next bound1 <- lower[k]/D[k, i] - D[k,-i] %*% x[-i] /D[k, i] bound2 <- upper[k]/D[k, i] - D[k,-i] %*% x[-i] /D[k, i] if (D[k, i] > 0) { lower_i <- pmax(lower_i, bound1) upper_i <- pmin(upper_i, bound2) } else { lower_i <- pmax(lower_i, bound2) upper_i <- pmin(upper_i, bound1) } } mu_i <- mean[i] + P[[i]] %*% (x[-i] - mean[-i]) F.tmp <- pnorm(c(lower_i, upper_i), mu_i, sd[[i]]) Fa <- F.tmp[1] Fb <- F.tmp[2] x[i] <- mu_i + sd[[i]] * qnorm(U[l] * (Fb - Fa) + Fa) l <- l + 1 } if (j > 0) { if (thinning == 1) { X[j, ] <- x } else if (j%%thinning == 0) { X[j%/%thinning, ] <- x } } } return(X) } rtmvnorm.gibbs2.Fortran <- function(n, mean = rep(0, nrow(sigma)), sigma = diag(length(mean)), D = diag(length(mean)), lower = rep(-Inf, length = length(mean)), upper = rep( Inf, length = length(mean)), burn.in.samples = 0, start.value = NULL, thinning = 1) { # No checks of input arguments, checks are done in rtmvnorm() # dimension of X d <- length(mean) # number of burn-in samples S <- burn.in.samples if (!is.null(S)) { if (S < 0) stop("number of burn-in samples must be non-negative") } # Take start value given by user or determine from lower and upper if (!is.null(start.value)) { if (length(mean) != length(start.value)) stop("mean and start value have non-conforming size") if (NCOL(D) != length(start.value) || NROW(D) != length(lower) || NROW(D) != length(upper)) stop("D, start.value, lower, upper have non-conforming size") if (any(D %*% start.value < lower || D %*% start.value > upper)) stop("start value must lie in simplex defined by lower <= Dx <= upper") x0 <- start.value } else { stop("Must give start.value with lower <= D start.value <= upper") } # Sample from univariate truncated normal distribution which is very fast. if (d == 1) { X <- rtnorm.gibbs(n, mu=mean[1], sigma=sigma[1,1], a=lower[1], b=upper[1]) return(X) } # Ergebnismatrix (n x d) X <- matrix(0, n, d) # number of linear constraints lower/a <= Dx <= upper/b, D (r x n), a,b (r x 1), x (n x 1) r <- nrow(D) # Call to Fortran subroutine # TODO: Aufpassen, ob Matrix D zeilen- oder spaltenweise an Fortran uebergeben wird! # Bei sigma ist das wegen Symmetrie egal. ret <- .Fortran("rtmvnormgibbscov2", n = as.integer(n), d = as.integer(d), r = as.integer(r), mean = as.double(mean), sigma = as.double(sigma), C = as.double(D), a = as.double(lower), b = as.double(upper), x0 = as.double(x0), burnin = as.integer(burn.in.samples), thinning = as.integer(thinning), X = as.double(X), NAOK=TRUE, PACKAGE="tmvtnorm") X <- matrix(ret$X, ncol=d, byrow=TRUE) return(X) } if (FALSE) { # dimension d=2 # number of linear constraints r=3 > d # linear restrictions a <= Dx <= b with x (d x 1); D (r x d); a,b (r x 1) D <- matrix( c( 1, 1, 1, -1, 0.5, -1), 3, 2, byrow=TRUE) a <- c(0, 0, 0) b <- c(1, 1, 1) # mark linear constraints as lines plot(NA, xlim=c(-0.5, 1.5), ylim=c(-1,1)) for (i in 1:3) { abline(a=a[i]/D[i, 2], b=-D[i,1]/D[i, 2], col="red") abline(a=b[i]/D[i, 2], b=-D[i,1]/D[i, 2], col="red") } # Gibbs sampling: # determine lower and upper bounds for each index i given the remaining variables: x[i] | x[-i] ### Gibbs sampling for general linear constraints a <= Dx <= b x0 <- c(0.5, 0.2) sigma <- matrix(c(1, 0.2, 0.2, 1), 2, 2) X <- rtmvnorm.gibbs2(n=1000, mean=c(0, 0), sigma, D, lower=a, upper=b, start.value=x0) points(X, pch=20, col="black") X2 <- rtmvnorm.gibbs2(n=1000, mean=c(0, 0), sigma, D, lower=a, upper=b, start.value=x0) points(X2, pch=20, col="green") # Rejection sampling (rtmvnorm.rejection) funktioniert bereits mit beliebigen Restriktionen (r > d) X3 <- rtmvnorm.rejection(n=1000, mean=c(0, 0), sigma, D, lower=a, upper=b) points(X3, pch=20, col="red") rtmvnorm.gibbs2(n=1000, mean=c(0, 0), sigma, D, lower=a, upper=b, start.value=c(-1, -1)) colMeans(X) colMeans(X2) } tmvtnorm/R/ptmvt-marginal.R0000644000176200001440000000255714532765421015450 0ustar liggesusers# Verteilungsfunktion fuer die eindimensionale Randdichte f(x_n) # einer Truncated Multivariate Student t Distribution, # by integrating out (n-1) dimensions. # # @param xn Vektor der Laenge l von Punkten, an dem die Verteilungsfunktion ausgewertet wird # @param i Index (1..n) dessen Randdichte berechnet werden soll # @param mean (nx1) Mittelwertvektor # @param sigma (nxn)-Kovarianzmatrix # @param df degrees of freedom parameter # @param lower,upper Trunkierungsvektor lower <= x <= upper ptmvt.marginal <- function(xn, n=1, mean=rep(0, nrow(sigma)), sigma=diag(length(mean)), df = 1, lower=rep(-Inf, length = length(mean)), upper=rep( Inf, length = length(mean))) { # check of standard tmvnorm arguments cargs <- checkTmvArgs(mean, sigma, lower, upper) mean <- cargs$mean sigma <- cargs$sigma lower <- cargs$lower upper <- cargs$upper if (n < 1 || n > length(mean) || !is.numeric(n) || length(n) > 1 || !n %in% 1:length(mean)) { stop("n must be a integer scalar in 1..length(mean)") } # Anzahl der Dimensionen k = length(mean) Fx = numeric(length(xn)) upper2 = upper alpha = pmvt(lower = lower, upper = upper, delta = mean, sigma = sigma, df = df) for (i in 1:length(xn)) { upper2[n] = xn[i] Fx[i] = pmvt(lower=lower, upper=upper2, delta=mean, sigma=sigma, df = df) } return (Fx/alpha) }tmvtnorm/R/ptmvt.R0000644000176200001440000000367714532764606013670 0ustar liggesusers # Verteilungsfunktion der truncated multivariate t distribution # # @param lower unterer Trunkierungsvektor (k x 1) mit lower <= x <= upper # @param upper oberer Trunkierungsvektor (k x 1) mit lower <= x <= upper ptmvt <- function( lowerx, upperx, mean=rep(0, length(lowerx)), sigma, df = 1, lower = rep(-Inf, length = length(mean)), upper = rep( Inf, length = length(mean)), maxpts = 25000, abseps = 0.001, releps = 0) { # check of standard tmvtnorm arguments cargs <- checkTmvArgs(mean, sigma, lower, upper) mean <- cargs$mean sigma <- cargs$sigma lower <- cargs$lower upper <- cargs$upper # check of additional arguments lowerx and upperx if (is.null(lowerx) || any(is.na(lowerx))) stop(sQuote("lowerx"), " not specified or contains NA") if (is.null(upperx) || any(is.na(upperx))) stop(sQuote("upperx"), " not specified or contains NA") if (!is.numeric(lowerx) || !is.vector(lowerx)) stop(sQuote("lowerx"), " is not a numeric vector") if (!is.numeric(upperx) || !is.vector(upperx)) stop(sQuote("upperx"), " is not a numeric vector") if (length(lowerx) != length(lower) || length(lower) != length(upperx)) stop("lowerx an upperx must have the same length as lower and upper!") if (any(lowerx>=upperx)) stop("lowerx must be smaller than or equal to upperx (lowerx<=upperx)") # Aufpassen: # Wir muessen garantieren, dass nur innerhalb des Support-Bereichs lower <= x <= upper integriert wird. Sonst kann Ergebnis >= 1 rauskommen. # Wenn einzelne Komponenten von lowerx <= lower sind, dann von der Untergrenze lower integrieren. Analog fuer upperx >= upper f <- pmvt(lower=pmax(lowerx, lower), upper=pmin(upperx, upper), delta=mean, sigma=sigma, df=df, maxpts = maxpts, abseps = abseps, releps = releps, type="shifted") / pmvt(lower=lower, upper=upper, delta=mean, sigma=sigma, df=df, maxpts = maxpts, abseps = abseps, releps = releps, type="shifted") return(f) }tmvtnorm/R/tmvnorm-estimation-GMM.R0000644000176200001440000002443214532763000016763 0ustar liggesusers# Estimation of the parameters # of the truncated multivariate normal distribution using GMM # and # (1) the moment equations from Lee (1981) and Lee (1983) # (2) Our moment formula and equating mean and covariance matrix #library(gmm) #library(tmvtnorm) #source("rtmvnorm.R") # for checkTmvArgs() #source("tmvnorm-estimation.R") # for vec(), vech() and inv_vech() "%w/o%" <- function(x,y) x[!x %in% y] #-- x without y ################################################################################ # # Multivariater Fall # ################################################################################ # Definition einer Funktion mit Momentenbedingungen fuer gmm() # nach den Lee (1979, 1983, 1981) moment conditions # # N dimensions, K = N + N*(N+1)/2 parameters # number of moment conditions L=(l_max + 1) * N # parameter vector tet = c(mu, vech(sigma)), length K # @param tet named parameter vector theta = c(mu, vech(sigma)) # @param x data matrix (T x N) gmultiLee <- function(tet, fixed=c(), fullcoefnames, x, lower, upper, l_max = ceiling((ncol(x)+1)/2), cholesky=FALSE) { fullcoef <- rep(NA, length(tet) + length(fixed)) names(fullcoef) <- fullcoefnames if (any(!names(fixed) %in% names(fullcoef))) stop("some named arguments in 'fixed' are not arguments in parameter vector theta") fullcoef[names(tet)] <- tet fullcoef[names(fixed)] <- fixed K <- length(tet) # Anzahl der zu schaetzenden Parameter N <- ncol(x) # Anzahl der Dimensionen T <- nrow(x) # Anzahl der Beobachtungen #l_max <- ceiling((N+1)/2) # maximales l fuer Momentenbedingungen X <- matrix(NA, T, (l_max+1)*N) # Rueckgabematrix mit den Momenten # Parameter mean/sigma aus dem Parametervektor tet extrahieren mean <- fullcoef[1:N] # Matrix fuer sigma bauen if (cholesky) { L <- inv_vech(fullcoef[-(1:N)]) L[lower.tri(L, diag=FALSE)] <- 0 # L entspricht jetzt chol(sigma), obere Dreiecksmatrix sigma <- t(L) %*% L } else { sigma <- inv_vech(fullcoef[-(1:N)]) } #cat("Call to gmultiLee with tet=",tet," sigma=",sigma," det(sigma)=",det(sigma),"\n") #flush.console() # if sigma is not positive definite we return some maximum value if (det(sigma) <= 0 || any(diag(sigma) < 0)) { X <- matrix(+Inf, T, N + N * (N+1) / 2) return(X) } sigma_inv <- solve(sigma) # inverse Kovarianzmatrix F_a = numeric(N) F_b = numeric(N) F <- 1 for (i in 1:N) { # one-dimensional marginal density in dimension i F_a[i] <- dtmvnorm.marginal(lower[i], n=i, mean=mean, sigma=sigma, lower=lower, upper=upper) F_b[i] <- dtmvnorm.marginal(upper[i], n=i, mean=mean, sigma=sigma, lower=lower, upper=upper) } k <- 1 for(l in 0:l_max) { for (i in 1:N) { sigma_i <- sigma_inv[i,] # i-te Zeile der inversen Kovarianzmatrix (1 x N) = entpricht sigma^{i'} a_il <- ifelse(is.infinite(lower[i]), 0, lower[i]^l) b_il <- ifelse(is.infinite(upper[i]), 0, upper[i]^l) # Lee (1983) moment equation for l #X[,k] <- sigma_i %*% mean * x[,i]^l - (x[,i]^l * x) %*% sigma_inv[,i] + l * (x[,i]^(l-1)) + (a_il * F_a[i] - b_il * F_b[i]) / F X[,k] <- sigma_i %*% mean * x[,i]^l - sweep(x, 1, x[,i]^l, FUN="*") %*% sigma_inv[,i] + l * (x[,i]^(l-1)) + (a_il * F_a[i] - b_il * F_b[i]) / F #T x 1 (1 x N) (N x 1) (T x 1) (T x N) (N x 1) (T x 1) (skalar) k <- k + 1 # Zaehlvariable } } return(X) } # Definition einer Funktion mit Momentenbedingungen # mit Mean and Covariance-Matrix bauen anstatt mit Lee Bedingungen # # @param tet named parameter vector theta, should be part of c(vec(mu), vech(sigma)) # @param fixed a named list of fixed parameters # @param fullcoefnames # @param x data matrix (T x N) # @param lower # @param upper # @param cholesky flag whether we use Cholesky decompostion Sigma = LL' # of the covariance matrix in order to ensure positive-definiteness of sigma gmultiManjunathWilhelm <- function(tet, fixed=c(), fullcoefnames, x, lower, upper, cholesky=FALSE) { fullcoef <- rep(NA, length(tet) + length(fixed)) names(fullcoef) <- fullcoefnames if (any(!names(fixed) %in% names(fullcoef))) stop("some named arguments in 'fixed' are not arguments in parameter vector theta") fullcoef[names(tet)] <- tet fullcoef[names(fixed)] <- fixed N <- ncol(x) # Anzahl der Dimensionen T <- nrow(x) # Anzahl der Beobachtungen X <- matrix(NA, T, N + N * (N+1) / 2) # Rueckgabematrix mit den Momenten # Parameter mean/sigma aus dem Parametervektor tet extrahieren mean <- fullcoef[1:N] # Matrix f?r sigma bauen if (cholesky) { L <- inv_vech(fullcoef[-(1:N)]) L[lower.tri(L, diag=FALSE)] <- 0 # L entspricht jetzt chol(sigma), obere Dreiecksmatrix sigma <- t(L) %*% L } else { sigma <- inv_vech(fullcoef[-(1:N)]) } #cat("Call to gmultiManjunathWilhelm with tet=",tet," fullcoef=", fullcoef, " sigma=",sigma," det(sigma)=",det(sigma),"\n") #flush.console() # if sigma is not positive definite we return some maximum value if (det(sigma) <= 0 || any(diag(sigma) < 0)) { X <- matrix(+Inf, T, N + N * (N+1) / 2) return(X) } # Determine moments (mu, sigma) for parameters mean/sigma # experimental: moments <- mtmvnorm(mean=mean, sigma=sigma, lower=lower, upper=upper, doCheckInputs=FALSE) moments <- mtmvnorm(mean=mean, sigma=sigma, lower=lower, upper=upper) # Momentenbedingungen fuer die Elemente von mean : mean(x) for(i in 1:N) { X[,i] <- (moments$tmean[i] - x[,i]) } # Momentenbedingungen fuer alle Lower-Diagonal-Elemente von sigma k <- 1 for (i in 1:N) { for (j in 1:N) { # (1,1), (2, 1), (2,2) if (j > i) next #cat(sprintf("sigma[%d,%d]",i, j),"\n") X[,(N+k)] <- (moments$tmean[i] - x[,i]) * (moments$tmean[j] - x[,j]) - moments$tvar[i, j] k <- k + 1 } } return(X) } # GMM estimation method # # @param X data matrix (T x n) # @param lower, upper truncation points # @param start list of start values for mu and sigma # @param fixed a list of fixed parameters # @param method either "ManjunathWilhelm" or "Lee" moment conditions # @param cholesky flag, if TRUE, we use the Cholesky decomposition of sigma as parametrization # @param ... additional parameters passed to gmm() gmm.tmvnorm <- function(X, lower=rep(-Inf, length = ncol(X)), upper=rep(+Inf, length = ncol(X)), start=list(mu=rep(0,ncol(X)), sigma=diag(ncol(X))), fixed=list(), method=c("ManjunathWilhelm","Lee"), cholesky=FALSE, ... ) { method <- match.arg(method) # check of standard tmvtnorm arguments cargs <- checkTmvArgs(start$mu, start$sigma, lower, upper) start$mu <- cargs$mean start$sigma <- cargs$sigma lower <- cargs$lower upper <- cargs$upper # check if we have at least one sample if (!is.matrix(X) || nrow(X) == 0) { stop("Data matrix X with at least one row required.") } # verify dimensions of x and lower/upper match n <- length(lower) if (NCOL(X) != n) { stop("data matrix X has a non-conforming size. Must have ",length(lower)," columns.") } # check if lower <= X <= upper for all rows ind <- logical(nrow(X)) for (i in 1:nrow(X)) { ind[i] = all(X[i,] >= lower & X[i,] <= upper) } if (!all(ind)) { stop("some of the data points are not in the region lower <= X <= upper") } # parameter vector theta theta <- c(start$mu, vech2(start$sigma)) # names for mean vector elements : mu_i nmmu <- paste("mu_",1:n,sep="") # names for sigma elements : sigma_i.j nmsigma <- paste("sigma_",vech2(outer(1:n,1:n, paste, sep=".")),sep="") names(theta) <- c(nmmu, nmsigma) fullcoefnames <- names(theta) # use only those parameters without the fixed parameters for gmm(), # since I do not know how to specify fixed=c() in gmm() theta2 <- theta[names(theta) %w/o% names(fixed)] # define a wrapper function with only 2 arguments theta and x (f(theta, x)) # that will be invoked by gmm() gManjunathWilhelm <- function(tet, x) { gmultiManjunathWilhelm(tet=tet, fixed=unlist(fixed), fullcoefnames=fullcoefnames, x=x, lower=lower, upper=upper, cholesky=cholesky) } # TODO: Allow for l_max parameter for Lee moment conditions gLee <- function(tet, x) { gmultiLee(tet = tet, fixed = unlist(fixed), fullcoefnames = fullcoefnames, x = x, lower = lower, upper = upper, cholesky = cholesky) } if (method == "ManjunathWilhelm") { gmm.fit <- gmm(gManjunathWilhelm, x=X, t0=theta2, ...) } else { gmm.fit <- gmm(gLee, x=X, t0=theta2, ...) } return(gmm.fit) } # deprecated # GMM mit Lee conditions gmm.tmvnorm2 <- function (X, lower = rep(-Inf, length = ncol(X)), upper = rep(+Inf, length = ncol(X)), start = list(mu = rep(0, ncol(X)), sigma = diag(ncol(X))), fixed = list(), cholesky = FALSE, ...) { cargs <- checkTmvArgs(start$mu, start$sigma, lower, upper) start$mu <- cargs$mean start$sigma <- cargs$sigma lower <- cargs$lower upper <- cargs$upper if (!is.matrix(X) || nrow(X) == 0) { stop("Data matrix X with at least one row required.") } n <- length(lower) if (NCOL(X) != n) { stop("data matrix X has a non-conforming size. Must have ", length(lower), " columns.") } ind <- logical(nrow(X)) for (i in 1:nrow(X)) { ind[i] = all(X[i, ] >= lower & X[i, ] <= upper) } if (!all(ind)) { stop("some of the data points are not in the region lower <= X <= upper") } theta <- c(start$mu, vech2(start$sigma)) nmmu <- paste("mu_", 1:n, sep = "") nmsigma <- paste("sigma_", vech2(outer(1:n, 1:n, paste, sep = ".")), sep = "") names(theta) <- c(nmmu, nmsigma) fullcoefnames <- names(theta) theta2 <- theta[names(theta) %w/o% names(fixed)] gmultiwrapper <- function(tet, x) { gmultiLee(tet = tet, fixed = unlist(fixed), fullcoefnames = fullcoefnames, x = x, lower = lower, upper = upper, cholesky = cholesky) } gmm.fit <- gmm(gmultiwrapper, x = X, t0 = theta2, ...) return(gmm.fit) } tmvtnorm/R/bivariate-marginal-density.R0000644000176200001440000001601114532764701017707 0ustar liggesusers# SW: This method is private. It is the same as mvtnorm::dmvnorm() function, # but without sanity checks for sigma. We perform the sanity checks before. .dmvnorm <- function (x, mean, sigma, log = FALSE) { if (is.vector(x)) { x <- matrix(x, ncol = length(x)) } distval <- mahalanobis(x, center = mean, cov = sigma) logdet <- sum(log(eigen(sigma, symmetric = TRUE, only.values = TRUE)$values)) logretval <- -(ncol(x) * log(2 * pi) + logdet + distval)/2 if (log) return(logretval) exp(logretval) } # Computation of the bivariate marginal density F_{q,r}(x_q, x_r) (q != r) # of truncated multivariate normal distribution # following the works of Tallis (1961), Leppard and Tallis (1989) # # References: # Tallis (1961): # "The Moment Generating Function of the Truncated Multi-normal Distribution" # Leppard and Tallis (1989): # "Evaluation of the Mean and Covariance of the Truncated Multinormal" # Manjunath B G and Stefan Wilhelm (2009): # "Moments Calculation for the Doubly Truncated Multivariate Normal Distribution" # # (n-2) Integral, d.h. zweidimensionale Randdichte in Dimension q und r, # da (n-2) Dimensionen rausintegriert werden. # vgl. Tallis (1961), S.224 und Code Leppard (1989), S.550 # # f(xq=b[q], xr=b[r]) # # Attention: Function is not vectorized at the moment! # Idee: Vektorisieren xq, xr --> die Integration Bounds sind immer verschieden, # pmvnorm() kann nicht vektorisiert werden. Sonst spart man schon ein bisschen Overhead. # Der eigentliche bottleneck ist aber pmvnorm(). # Gibt es Unterschiede bzgl. der verschiedenen Algorithmen GenzBretz() vs. Miwa()? # pmvnorm(corr=) kann ich verwenden # # @param xq # @param xr # @param q index for dimension q # @param r Index for Dimension r # @param mean # @param sigma # @param lower # @param upper # @param log=FALSE dtmvnorm.marginal2 <- function(xq, xr, q, r, mean=rep(0, nrow(sigma)), sigma=diag(length(mean)), lower=rep(-Inf, length = length(mean)), upper=rep( Inf, length = length(mean)), log=FALSE, pmvnorm.algorithm=GenzBretz()) { # dimensionality n <- nrow(sigma) # number of xq values delivered N <- length(xq) # input checks if (n < 2) stop("Dimension n must be >= 2!") # TODO: Check eventuell rauslassen # SW; isSymmetric is sehr teuer #if (!isSymmetric(sigma, tol = sqrt(.Machine$double.eps))) { #if (!isTRUE(all.equal(sigma, t(sigma))) || any(diag(sigma) < 0)) { # stop("sigma must be a symmetric matrix") #} if (length(mean) != NROW(sigma)) { stop("mean and sigma have non-conforming size") } if (!(q %in% 1:n && r %in% 1:n)) { stop("Indexes q and r must be integers in 1:n") } if (q == r) { stop("Index q must be different than r!") } # Skalierungsfaktor der gestutzten Dichte (Anteil nach Trunkierung) # Idee: dtmvnorm.marginal2() braucht 80% der Zeit von mtmvnorm(). Die meiste Zeit davon in pmvnorm(). # pmvnorm()-Aufrufe sind teuer, daher koennte man das alpha schon vorher berechnen # lassen (nur 2 pmvnorm()-Aufrufe in der Methode, wuerde 50% sparen) # Da Methode jetzt vektorisiert ist, sparen wir die Aufrufe wg. alpha alpha <- pmvnorm(lower=lower, upper=upper, mean=mean, sigma=sigma, algorithm=pmvnorm.algorithm) if (n == 2) { density <- numeric(N) indOut <- xq < lower[q] | xq > upper[q] | xr < lower[r] | xr > upper[r] | is.infinite(xq) | is.infinite(xr) density[indOut] <- 0 # dmvnorm() macht auch viele Checks; Definiere eine private Methode .dmvnorm() ohne Checks density[!indOut] <- .dmvnorm(x=cbind(xq, xr)[!indOut,], mean=mean[c(q,r)], sigma=sigma[c(q,r),c(q,r)]) / alpha if (log == TRUE) { return(log(density)) } else { return(density) } } # standard deviation for normalisation SD <- sqrt(diag(sigma)) # normalised bounds lower.normalised <- (lower - mean) / SD upper.normalised <- (upper - mean) / SD xq.normalised <- (xq - mean[q]) / SD[q] # (N x 1) xr.normalised <- (xr - mean[r]) / SD[r] # (N x 1) # Computing correlation matrix R from sigma (matrix (n x n)): # R = D % sigma %*% D with diagonal matrix D as sqrt(sigma) # same as cov2cor() D <- matrix(0, n, n) diag(D) <- sqrt(diag(sigma))^(-1) R <- D %*% sigma %*% D # # Determine (n-2) x (n-2) correlation matrix RQR # RQR <- matrix(NA, n-2, n-2) RINV <- solve(R) WW <- matrix(NA, n-2, n-2) M1 <- 0 for (i in 1:n) { if (i != q && i != r) { M1 <- M1 + 1 M2 <- 0 for (j in 1:n) { if (j != q && j != r) { M2 <- M2 + 1 WW[M1, M2] <- RINV[i,j] } } } } WW <- solve(WW[1:(n-2),1:(n-2)]) for(i in 1:(n-2)) { for(j in 1:(n-2)) { RQR[i, j] <- WW[i, j] / sqrt(WW[i,i] * WW[j,j]) } } # # Determine bounds of integration vector AQR and BQR (n - 2) x 1 # # lower and upper integration bounds AQR <- matrix(NA, N, n-2) BQR <- matrix(NA, N, n-2) M2 <- 0 # counter = 1..(n-2) for (i in 1:n) { if (i != q && i != r) { M2 <- M2 + 1 BSQR <- (R[q, i] - R[q, r] * R[r, i]) / (1 - R[q, r]^2) BSRQ <- (R[r, i] - R[q, r] * R[q, i]) / (1 - R[q, r]^2) RSRQ <- (1 - R[i, q]^2) * (1 - R[q, r]^2) RSRQ <- (R[i, r] - R[i, q] * R[q, r]) / sqrt(RSRQ) # partial correlation coefficient R[r,i] given q # lower integration bound AQR[,M2] <- (lower.normalised[i] - BSQR * xq.normalised - BSRQ * xr.normalised) / sqrt((1 - R[i, q]^2) * (1 - RSRQ^2)) AQR[,M2] <- ifelse(is.nan(AQR[,M2]), -Inf, AQR[,M2]) # upper integration bound BQR[,M2] <- (upper.normalised[i] - BSQR * xq.normalised - BSRQ * xr.normalised) / sqrt((1 - R[i, q]^2) * (1 - RSRQ^2)) BQR[,M2] <- ifelse(is.nan(BQR[,M2]), Inf, BQR[,M2]) } } # Correlation matrix for r and q R2 <- matrix(c( 1, R[q,r], R[q,r], 1), 2, 2) sigma2 <- sigma[c(q,r),c(q,r)] density <- ifelse ( xq < lower[q] | xq > upper[q] | xr < lower[r] | xr > upper[r] | is.infinite(xq) | is.infinite(xr), 0, { # SW: RQR is a correlation matrix, so call pmvnorm(...,corr=) which is faster than # pmvnorm(...,corr=) # SW: Possibly vectorize this loop if pmvnorm allows vectorized lower and upper bounds prob <- numeric(N) # (N x 1) for (i in 1:N) { if ((n - 2) == 1) { # univariate case: pmvnorm(...,corr=) does not work, will work with sigma= prob[i] <- pmvnorm(lower=AQR[i,], upper=BQR[i,], sigma=RQR, algorithm=pmvnorm.algorithm) } else { prob[i] <- pmvnorm(lower=AQR[i,], upper=BQR[i,], corr=RQR, algorithm=pmvnorm.algorithm) } } dmvnorm(x=cbind(xq, xr), mean=mean[c(q,r)], sigma=sigma2) * prob / alpha } ) if (log == TRUE) { return(log(density)) } else { return(density) } } tmvtnorm/R/ptmvnorm.R0000644000176200001440000000361714532765325014371 0ustar liggesusers # Verteilungsfunktion der truncated multivariate normal distribution # # @param lower unterer Trunkierungsvektor (k x 1) mit lower <= x <= upper # @param upper oberer Trunkierungsvektor (k x 1) mit lower <= x <= upper ptmvnorm <- function(lowerx, upperx, mean=rep(0, length(lowerx)), sigma, lower = rep(-Inf, length = length(mean)), upper = rep( Inf, length = length(mean)), maxpts = 25000, abseps = 0.001, releps = 0) { # check of standard tmvtnorm arguments cargs <- checkTmvArgs(mean, sigma, lower, upper) mean <- cargs$mean sigma <- cargs$sigma lower <- cargs$lower upper <- cargs$upper # check of additional arguments lowerx and upperx if (is.null(lowerx) || any(is.na(lowerx))) stop(sQuote("lowerx"), " not specified or contains NA") if (is.null(upperx) || any(is.na(upperx))) stop(sQuote("upperx"), " not specified or contains NA") if (!is.numeric(lowerx) || !is.vector(lowerx)) stop(sQuote("lowerx"), " is not a numeric vector") if (!is.numeric(upperx) || !is.vector(upperx)) stop(sQuote("upperx"), " is not a numeric vector") if (length(lowerx) != length(lower) || length(lower) != length(upperx)) stop("lowerx an upperx must have the same length as lower and upper!") if (any(lowerx>=upperx)) stop("lowerx must be smaller than or equal to upperx (lowerx<=upperx)") # Aufpassen: # Wir muessen garantieren, dass nur innerhalb des Support-Bereichs lower <= x <= upper integriert wird. Sonst kann Ergebnis >= 1 rauskommen. # Wenn einzelne Komponenten von lowerx <= lower sind, dann von der Untergrenze lower integrieren. Analog fuer upperx >= upper f <- pmvnorm(lower=pmax(lowerx, lower), upper=pmin(upperx, upper), mean=mean, sigma=sigma, maxpts = maxpts, abseps = abseps, releps = releps) / pmvnorm(lower=lower, upper=upper, mean=mean, sigma=sigma, maxpts = maxpts, abseps = abseps, releps = releps) return(f) } tmvtnorm/R/ptmvnorm-marginal.R0000644000176200001440000000271114532764254016153 0ustar liggesusers# Verteilungsfunktion fuer die eindimensionale Randdichte f(xn) einer Truncated Multivariate Normal Distribution, # vgl. Jack Cartinhour (1990) "One-dimensional marginal density functions of a truncated multivariate normal density function" fuer die Dichtefunktion # # @param xn Vektor der Laenge l von Punkten, an dem die Verteilungsfunktion ausgewertet wird # @param i Index (1..n) dessen Randdichte berechnet werden soll # @param mean (nx1) Mittelwertvektor # @param sigma (nxn)-Kovarianzmatrix # @param lower,upper Trunkierungsvektor lower <= x <= upper ptmvnorm.marginal <- function(xn, n=1, mean=rep(0, nrow(sigma)), sigma=diag(length(mean)), lower=rep(-Inf, length = length(mean)), upper=rep( Inf, length = length(mean))) { # check of standard tmvnorm arguments cargs <- checkTmvArgs(mean, sigma, lower, upper) mean <- cargs$mean sigma <- cargs$sigma lower <- cargs$lower upper <- cargs$upper if (n < 1 || n > length(mean) || !is.numeric(n) || length(n) > 1 || !n %in% 1:length(mean)) { stop("n must be a integer scalar in 1..length(mean)") } # Anzahl der Dimensionen k = length(mean) Fx = numeric(length(xn)) upper2 = upper alpha = pmvnorm(lower = lower, upper = upper, mean = mean, sigma = sigma) for (i in 1:length(xn)) { upper2[n] = xn[i] Fx[i] = pmvnorm(lower=lower, upper=upper2, mean=mean, sigma=sigma) } return (Fx/alpha) } tmvtnorm/R/rtmvnorm.R0000644000176200001440000007005114216151574014361 0ustar liggesusers################################################################################ # # Sampling from Truncated multivariate Gaussian distribution using # # a) Rejection sampling # b) Gibbs sampler # # for both rectangular constraints a <= x <= b and general linear constraints # a <= Dx <= b. For D = I this implies rectangular constraints. # The method can be used using both covariance matrix sigma and precision matrix H. # # Author: Stefan Wilhelm # # References: # (1) Jayesh H. Kotecha and Petar M. Djuric (1999) : # "GIBBS SAMPLING APPROACH FOR GENERATION OF TRUNCATED MULTIVARIATE GAUSSIAN RANDOM VARIABLES" # (2) Geweke (1991): # "Effcient simulation from the multivariate normal and Student-t distributions # subject to linear constraints and the evaluation of constraint probabilities" # (3) John Geweke (2005): Contemporary Bayesian Econometrics and Statistics, Wiley, pp.171-172 # (4) Wilhelm (2011) package vignette to package "tmvtnorm" # ################################################################################ # We need this separate method rtmvnorm.sparseMatrix() because # rtmvnorm() initialises dense d x d sigma and D matrix which will not work for high dimensions d. # It also does some sanity checks on sigma and D (determinant etc.) which will not # work for high dimensions. # returns a matrix X (n x d) with random draws # from a truncated multivariate normal distribution with d dimensionens # using Gibbs sampling # # @param n Anzahl der Realisationen # @param mean mean vector (d x 1) der Normalverteilung # @param lower lower truncation vector (d x 1) with lower <= x <= upper # @param upper upper truncation vector (d x 1) with lower <= x <= upper # @param H precision matrix (d x d) if given, defaults to identity matrix rtmvnorm.sparseMatrix <- function(n, mean = rep(0, nrow(H)), H = sparseMatrix(i=1:length(mean), j=1:length(mean), x=1), lower = rep(-Inf, length = length(mean)), upper = rep( Inf, length = length(mean)), ...) { if (is.null(H) || !inherits(H, "sparseMatrix")) { stop("H must be of class 'sparseMatrix'") } rtmvnorm.gibbs.Fortran(n, mean, sigma=NULL, H, lower, upper, ...) } # Erzeugt eine Matrix X (n x d) mit Zufallsrealisationen # aus einer Trunkierten Multivariaten Normalverteilung mit d Dimensionen # ?ber Rejection Sampling oder Gibbs Sampler aus einer Multivariaten Normalverteilung. # If matrix D is given, it must be a (d x d) full rank matrix. # Therefore this method can only cover the case with only r <= d linear restrictions. # For r > d linear restrictions, please see rtmvnorm2(n, mean, sigma, D, lower, upper), # where D can be defined as (r x d). # # @param n Anzahl der Realisationen # @param mean Mittelwertvektor (d x 1) der Normalverteilung # @param sigma Kovarianzmatrix (d x d) der Normalverteilung # @param lower unterer Trunkierungsvektor (d x 1) mit lower <= Dx <= upper # @param upper oberer Trunkierungsvektor (d x 1) mit lower <= Dx <= upper # @param D Matrix for linear constraints, defaults to (d x d) diagonal matrix # @param H Precision matrix (d x d) if given # @param algorithm c("rejection", "gibbs", "gibbsR") rtmvnorm <- function(n, mean = rep(0, nrow(sigma)), sigma = diag(length(mean)), lower = rep(-Inf, length = length(mean)), upper = rep( Inf, length = length(mean)), D = diag(length(mean)), H = NULL, algorithm=c("rejection", "gibbs", "gibbsR"), ...) { algorithm <- match.arg(algorithm) if (is.null(mean) && (is.null(sigma) || is.null(H))) { stop("Invalid arguments for ",sQuote("mean")," and ",sQuote("sigma"),"/",sQuote("H"),". Need at least mean vector and covariance or precision matrix.") } # check of standard tmvtnorm arguments cargs <- checkTmvArgs(mean, sigma, lower, upper) mean <- cargs$mean sigma <- cargs$sigma lower <- cargs$lower upper <- cargs$upper if (!is.null(H) && !identical(sigma, diag(length(mean)))) { stop("Cannot give both covariance matrix sigma and precision matrix H arguments at the same time") } else if (!is.null(H) && !inherits(H, "sparseMatrix")) { # check precision matrix H if it is symmetric and positive definite checkSymmetricPositiveDefinite(H, name="H") # H explicitly given, we will override sigma later if we need sigma # sigma <- solve(H) } # else sigma explicitly or implicitly given # check of additional arguments if (n < 1 || !is.numeric(n) || n != as.integer(n) || length(n) > 1) { stop("n must be a integer scalar > 0") } # check matrix D, must be n x n with rank n if (!is.matrix(D) || det(D) == 0) { stop("D must be a (n x n) matrix with full rank n!") } if (!identical(D,diag(length(mean)))) { # D <> I : general linear constraints retval <- rtmvnorm.linear.constraints(n=n, mean=mean, sigma=sigma, H=H, lower=lower, upper=upper, D=D, algorithm=algorithm, ...) return(retval) } else { # D == I : rectangular case if (algorithm == "rejection") { if (!is.null(H)) { # precision matrix case H retval <- rtmvnorm.rejection(n, mean, sigma=solve(H), lower, upper, ...) } else { # covariance matrix case sigma retval <- rtmvnorm.rejection(n, mean, sigma, lower, upper, ...) } } else if (algorithm == "gibbs") { # precision matrix case H vs. covariance matrix case sigma will be handled inside method retval <- rtmvnorm.gibbs.Fortran(n, mean, sigma, H, lower, upper, ...) } else if (algorithm == "gibbsR") { if (!is.null(H)) { # precision matrix case H retval <- rtmvnorm.gibbs.Precision(n, mean, H, lower, upper, ...) } else { # covariance matrix case sigma retval <- rtmvnorm.gibbs(n, mean, sigma, lower, upper, ...) } } } return(retval) } # Erzeugt eine Matrix X (n x k) mit Zufallsrealisationen # aus einer Trunkierten Multivariaten Normalverteilung mit k Dimensionen # ?ber Rejection Sampling aus einer Multivariaten Normalverteilung mit der Bedingung # lower <= Dx <= upper # # Wenn D keine Diagonalmatrix ist, dann ist gelten lineare Restriktionen f?r # lower <= Dx <= upper (siehe Geweke (1991)) # # @param n Anzahl der Realisationen # @param mean Mittelwertvektor (k x 1) der Normalverteilung # @param sigma Kovarianzmatrix (k x k) der Normalverteilung # @param lower unterer Trunkierungsvektor (k x 1) mit lower <= x <= upper # @param upper oberer Trunkierungsvektor (k x 1) mit lower <= x <= upper # @param D Matrix for linear constraints, defaults to diagonal matrix rtmvnorm.rejection <- function(n, mean = rep(0, nrow(sigma)), sigma = diag(length(mean)), lower = rep(-Inf, length = length(mean)), upper = rep( Inf, length = length(mean)), D = diag(length(mean))) { # No check of input parameters, checks are done in rtmvnorm()! # k = Dimension k <- length(mean) # Ergebnismatrix (n x k) Y <- matrix(NA, n, k) # Anzahl der noch zu ziehenden Samples numSamples <- n # Anzahl der akzeptierten Samples insgesamt numAcceptedSamplesTotal <- 0 # Akzeptanzrate alpha aus der Multivariaten Normalverteilung bestimmen r <- length(lower) d <- length(mean) if (r == d & identical(D, diag(d))) { alpha <- pmvnorm(lower=lower, upper=upper, mean=mean, sigma=sigma) if (alpha <= 0.01) warning(sprintf("Acceptance rate is very low (%s) and rejection sampling becomes inefficient. Consider using Gibbs sampling.", alpha)) estimatedAlpha <- TRUE } else { # TODO: Wie bestimme ich aus lower <= Dx <= upper f?r r > d Restriktionen die Akzeptanzrate alpha? # Defere calculation of alpha. Assume for now that all samples will be accepted. alpha <- 1 estimatedAlpha <- FALSE } # Ziehe wiederholt aus der Multivariaten NV und schaue, wieviel Samples nach Trunkierung ?brig bleiben while(numSamples > 0) { # Erzeuge N/alpha Samples aus einer multivariaten Normalverteilung: Wenn alpha zu niedrig ist, wird Rejection Sampling ineffizient und N/alpha zu gro?. Dann nur N erzeugen nproposals <- ifelse (numSamples/alpha > 1000000, numSamples, ceiling(max(numSamples/alpha,10))) X <- rmvnorm(nproposals, mean=mean, sigma=sigma) # Bestimme den Anteil der Samples nach Trunkierung # Bug: ind= rowSums(lower <= X & X <= upper) == k # wesentlich schneller als : ind=apply(X, 1, function(x) all(x >= lower & x<=upper)) X2 <- X %*% t(D) ind <- logical(nproposals) for (i in 1:nproposals) { ind[i] <- all(X2[i,] >= lower & X2[i,] <= upper) } # Anzahl der akzeptierten Samples in diesem Durchlauf numAcceptedSamples <- length(ind[ind==TRUE]) # Wenn nix akzeptiert wurde, dann weitermachen if (length(numAcceptedSamples) == 0 || numAcceptedSamples == 0) next if (!estimatedAlpha) { alpha <- numAcceptedSamples / nproposals if (alpha <= 0.01) warning(sprintf("Acceptance rate is very low (%s) and rejection sampling becomes inefficient. Consider using Gibbs sampling.", alpha)) } #cat("numSamplesAccepted=",numAcceptedSamples," numSamplesToDraw = ",numSamples,"\n") numNeededSamples <- min(numAcceptedSamples, numSamples) Y[(numAcceptedSamplesTotal+1):(numAcceptedSamplesTotal+numNeededSamples),] <- X[which(ind)[1:numNeededSamples],] # Anzahl der akzeptierten Samples insgesamt numAcceptedSamplesTotal <- numAcceptedSamplesTotal + numAcceptedSamples # Anzahl der verbliebenden Samples numSamples <- numSamples - numAcceptedSamples } Y } # Gibbs Sampler for Truncated Univariate Normal Distribution # # Jayesh H. Kotecha and Petar M. Djuric (1999) : GIBBS SAMPLING APPROACH FOR GENERATION OF TRUNCATED MULTIVARIATE GAUSSIAN RANDOM VARIABLES # # Im univariaten Fall sind die erzeugten Samples unabh?ngig, # deswegen gibt es hier keine Chain im eigentlichen Sinn und auch keinen Startwert/Burn-in/Thinning. # # As a change to Kotecha, we do not draw a sample x from the Gaussian Distribution # and then apply pnorm(x) - which is uniform - but rather draw directly from the # uniform distribution u ~ U(0, 1). # # @param n number of realisations # @param mu mean of the normal distribution # @param sigma standard deviation # @param a lower truncation point # @param b upper truncation point rtnorm.gibbs <- function(n, mu=0, sigma=1, a=-Inf, b=Inf) { # Draw from Uni(0,1) F <- runif(n) #Phi(a) und Phi(b) Fa <- pnorm(a, mu, sd=sigma) Fb <- pnorm(b, mu, sd=sigma) # Truncated Normal Distribution, see equation (6), but F(x) ~ Uni(0,1), # so we directly draw from Uni(0,1) instead of doing: # x <- rnorm(n, mu, sigma) # y <- mu + sigma * qnorm(pnorm(x)*(Fb - Fa) + Fa) y <- mu + sigma * qnorm(F * (Fb - Fa) + Fa) y } # Gibbs Sampler Implementation in R for Truncated Multivariate Normal Distribution # (covariance case with sigma) # Jayesh H. Kotecha and Petar M. Djuric (1999) : # GIBBS SAMPLING APPROACH FOR GENERATION OF TRUNCATED MULTIVARIATE # GAUSSIAN RANDOM VARIABLES # # # @param n Anzahl der Realisationen # @param mean Mittelwertvektor (k x 1) der Normalverteilung # @param sigma Kovarianzmatrix (k x k) der Normalverteilung # @param lower unterer Trunkierungsvektor (k x 1) mit lower <= Dx <= upper # @param upper oberer Trunkierungsvektor (k x 1) mit lower <= Dx <= upper # @param burn.in number of burn-in samples to be discarded # @param start start value for Gibbs sampling # @param thinning rtmvnorm.gibbs <- function(n, mean = rep(0, nrow(sigma)), sigma = diag(length(mean)), lower = rep(-Inf, length = length(mean)), upper = rep( Inf, length = length(mean)), burn.in.samples = 0, start.value = NULL, thinning = 1) { # We check only additional arguments like "burn.in.samples", "start.value" and "thinning" if (thinning < 1 || !is.numeric(thinning) || length(thinning) > 1) { stop("thinning must be a integer scalar > 0") } # dimension of X d <- length(mean) # number of burn-in samples S <- burn.in.samples if (!is.null(S)) { if (S < 0) stop("number of burn-in samples must be non-negative") } # Take start value given by user or determine from lower and upper if (!is.null(start.value)) { if (length(mean) != length(start.value)) stop("mean and start value have non-conforming size") if (any(start.value < lower) || any(start.value > upper)) stop("start value is not inside support region") x0 <- start.value } else { # Start value from support region, may be lower or upper bound, if they are finite, # if both are infinite, we take 0. x0 <- ifelse(is.finite(lower), lower, ifelse(is.finite(upper), upper, 0)) } # Sample from univariate truncated normal distribution which is very fast. if (d == 1) { X <- rtnorm.gibbs(n, mu=mean[1], sigma=sigma[1,1], a=lower[1], b=upper[1]) return(X) } # Ergebnismatrix (n x k) X <- matrix(NA, n, d) # Draw from Uni(0,1) U <- runif((S + n*thinning) * d) l <- 1 # List of conditional standard deviations can be pre-calculated sd <- list(d) # List of t(Sigma_i) %*% solve(Sigma) term P <- list(d) for(i in 1:d) { # Partitioning of Sigma Sigma <- sigma[-i,-i] # (d-1) x (d-1) sigma_ii <- sigma[i,i] # 1 x 1 Sigma_i <- sigma[i,-i] # 1 x (d-1) P[[i]] <- t(Sigma_i) %*% solve(Sigma) # (1 x (d-1)) * ((d-1) x (d-1)) = (1 x (d-1)) sd[[i]] <- sqrt(sigma_ii - P[[i]] %*% Sigma_i) # (1 x (d-1)) * ((d-1) x 1) } x <- x0 # Runn chain from index (1 - #burn-in-samples):(n*thinning) and only record samples from j >= 1 # which discards the burn-in-samples for (j in (1-S):(n*thinning)) { # For all dimensions for(i in 1:d) { # Berechnung von bedingtem Erwartungswert und bedingter Varianz: # bedingte Varianz h?ngt nicht von x[-i] ab! mu_i <- mean[i] + P[[i]] %*% (x[-i] - mean[-i]) # Transformation F.tmp <- pnorm(c(lower[i], upper[i]), mu_i, sd[[i]]) Fa <- F.tmp[1] Fb <- F.tmp[2] x[i] <- mu_i + sd[[i]] * qnorm(U[l] * (Fb - Fa) + Fa) l <- l + 1 } if (j > 0) { if (thinning == 1) { # no thinning, take all samples except for burn-in-period X[j,] <- x } else if (j %% thinning == 0){ X[j %/% thinning,] <- x } } } return(X) } # R-Implementation of Gibbs sampler with precision matrix H # # @param n number of random draws # @param mean Mittelwertvektor (k x 1) der Normalverteilung # @param H Precision matrix (k x k) der Normalverteilung # @param lower unterer Trunkierungsvektor (k x 1) mit lower <= x <= upper # @param upper oberer Trunkierungsvektor (k x 1) mit lower <= x <= upper # @param burn.in number of burn-in samples to be discarded # @param start start value for Gibbs sampling # @param thinning rtmvnorm.gibbs.Precision <- function(n, mean = rep(0, nrow(H)), H = diag(length(mean)), lower = rep(-Inf, length = length(mean)), upper = rep( Inf, length = length(mean)), burn.in.samples = 0, start.value = NULL, thinning = 1) { # We check only additional arguments like "burn.in.samples", "start.value" and "thinning" if (thinning < 1 || !is.numeric(thinning) || length(thinning) > 1) { stop("thinning must be a integer scalar > 0") } # dimension of X d <- length(mean) # number of burn-in samples S <- burn.in.samples if (!is.null(S)) { if (S < 0) stop("number of burn-in samples must be non-negative") } # Take start value given by user or determine from lower and upper if (!is.null(start.value)) { if (length(mean) != length(start.value)) stop("mean and start value have non-conforming size") if (any(start.valueupper)) stop("start value is not inside support region") x0 <- start.value } else { # Start value from support region, may be lower or upper bound, if they are finite, # if both are infinite, we take 0. x0 <- ifelse(is.finite(lower), lower, ifelse(is.finite(upper), upper, 0)) } # Sample from univariate truncated normal distribution which is very fast. if (d == 1) { X <- rtnorm.gibbs(n, mu=mean[1], sigma=1/H[1,1], a=lower[1], b=upper[1]) return(X) } # Ergebnismatrix (n x k) X <- matrix(NA, n, d) # Draw from U ~ Uni(0,1) for all iterations we need in advance U <- runif((S + n*thinning) * d) l <- 1 # Vector of conditional standard deviations sd(i | -i) = H_ii^{-1} = 1 / H[i, i] = sqrt(1 / diag(H)) # does not depend on x[-i] and can be precalculated before running the chain. sd <- sqrt(1 / diag(H)) # start value of the chain x <- x0 # Run chain from index (1 - #burn-in-samples):(n*thinning) and only record samples from j >= 1 # which discards the burn-in-samples for (j in (1-S):(n*thinning)) { # For all dimensions for(i in 1:d) { # conditional mean mu[i] = E[i | -i] = mean[i] - H_ii^{-1} H[i,-i] (x[-i] - mean[-i]) mu_i <- mean[i] - (1 / H[i,i]) * H[i,-i] %*% (x[-i] - mean[-i]) # draw x[i | -i] from conditional univariate truncated normal distribution with # TN(E[i | -i], sd(i | -i), lower[i], upper[i]) F.tmp <- pnorm(c(lower[i], upper[i]), mu_i, sd[i]) Fa <- F.tmp[1] Fb <- F.tmp[2] x[i] <- mu_i + sd[i] * qnorm(U[l] * (Fb - Fa) + Fa) l <- l + 1 } if (j > 0) { if (thinning == 1) { # no thinning, take all samples except for burn-in-period X[j,] <- x } else if (j %% thinning == 0){ X[j %/% thinning,] <- x } } } return(X) } # Gibbs sampler with compiled Fortran code # Depending on, whether covariance matrix Sigma or precision matrix H (dense or sparse format) # is specified as parameter, we call either # Fortran routine "rtmvnormgibbscov" (dense covariance matrix sigma), # "rtmvnormgibbsprec" (dense matrix H) or "rtmvnormgibbssparseprec" (sparse precision matrix H). # # @param H precision matrix in sparse triplet format (i, j, v) # Memory issues: We want to increase dimension d, and return matrix X will be (n x d) # so if we want to create a large number of random samples X (n x d) with high d then # we will probably also run into memory problems (X is dense). In most MCMC applications, # we only have to create a small number n in high dimensions, # e.g. 1 random sample per iteration (+ burn-in-samples). # In this case we will not experience any problems. Users should be aware of choosing n and d appropriately rtmvnorm.gibbs.Fortran <- function(n, mean = rep(0, nrow(sigma)), sigma = diag(length(mean)), H = NULL, lower = rep(-Inf, length = length(mean)), upper = rep( Inf, length = length(mean)), burn.in.samples = 0, start.value = NULL, thinning = 1) { # No checks of input arguments, checks are done in rtmvnorm() # dimension of X d <- length(mean) # number of burn-in samples S <- burn.in.samples if (!is.null(S)) { if (S < 0) stop("number of burn-in samples must be non-negative") } # Take start value given by user or determine from lower and upper if (!is.null(start.value)) { if (length(mean) != length(start.value)) stop("mean and start value have non-conforming size") if (any(start.valueupper)) stop("start value is not inside support region") x0 <- start.value } else { # Start value from support region, may be lower or upper bound, if they are finite, # if both are infinite, we take 0. x0 <- ifelse(is.finite(lower), lower, ifelse(is.finite(upper), upper, 0)) } # Sample from univariate truncated normal distribution which is very fast. if (d == 1) { if (!is.null(H)) { X <- rtnorm.gibbs(n, mu=mean[1], sigma=1 / sigma[1,1], a=lower[1], b=upper[1]) } else { X <- rtnorm.gibbs(n, mu=mean[1], sigma=sigma[1,1], a=lower[1], b=upper[1]) } return(X) } # Ergebnismatrix (n x d) X <- matrix(0, n, d) # Call to Fortran subroutine if (!is.null(H)){ if (!inherits(H, "sparseMatrix")) { ret <- .Fortran("rtmvnormgibbsprec", n = as.integer(n), d = as.integer(d), mean = as.double(mean), H = as.double(H), lower = as.double(lower), upper = as.double(upper), x0 = as.double(x0), burnin = as.integer(burn.in.samples), thinning = as.integer(thinning), X = as.double(X), NAOK=TRUE, PACKAGE="tmvtnorm") } else if (inherits(H, "dgCMatrix")) { # H is given in compressed sparse column (csc) representation ret <- .Fortran("rtmvnorm_sparse_csc", n = as.integer(n), d = as.integer(d), mean = as.double(mean), Hi = as.integer(H@i), Hp = as.integer(H@p), Hv = as.double(H@x), num_nonzero = as.integer(length(H@x)), lower = as.double(lower), upper = as.double(upper), x0 = as.double(x0), burnin = as.integer(burn.in.samples), thinning = as.integer(thinning), X = as.double(X), NAOK=TRUE, PACKAGE="tmvtnorm") } else { # H is given in sparse matrix triplet representation # Es muss klar sein, dass nur die obere Dreiecksmatrix (i <= j) ?bergeben wird... sH <- as(H, "dgTMatrix") # precision matrix as triplet representation # ATTENTION: sH@i and sH@j are zero-based (0..(n-1)), we need it as 1...n ind <- sH@i <= sH@j # upper triangular matrix elements of H[i,j] with i <= j ret <- .Fortran("rtmvnorm_sparse_triplet", n = as.integer(n), d = as.integer(d), mean = as.double(mean), Hi = as.integer(sH@i[ind]+1), Hj = as.integer(sH@j[ind]+1), Hv = as.double(sH@x[ind]), num_nonzero = as.integer(sum(ind)), lower = as.double(lower), upper = as.double(upper), x0 = as.double(x0), burnin = as.integer(burn.in.samples), thinning = as.integer(thinning), X = as.double(X), NAOK=TRUE, PACKAGE="tmvtnorm") } } else { ret <- .Fortran("rtmvnormgibbscov", n = as.integer(n), d = as.integer(d), mean = as.double(mean), sigma = as.double(sigma), lower = as.double(lower), upper = as.double(upper), x0 = as.double(x0), burnin = as.integer(burn.in.samples), thinning = as.integer(thinning), X = as.double(X), NAOK=TRUE, PACKAGE="tmvtnorm") } X <- matrix(ret$X, ncol=d, byrow=TRUE) return(X) } # Gibbs sampling f?r Truncated Multivariate Normal Distribution # with linear constraints based on Geweke (1991): # This is simply a wrapper function around our rectangular sampling version... # # x ~ N(mu, sigma) subject to a <= Dx <= b # # alpha <= z <= beta # mit alpha = a - D * mu, beta = b - D * mu # z ~ N(0, T), T = D Sigma D' # x = mu + D^(-1) z # # @param n Anzahl der Realisationen # @param mean Mittelwertvektor (k x 1) der t-verteilung # @param sigma Kovarianzmatrix (k x k) der t-Verteilung # @param lower unterer Trunkierungsvektor (k x 1) mit lower <= x <= upper # @param upper oberer Trunkierungsvektor (k x 1) mit lower <= x <= upper # @param D Matrix for linear constraints, defaults to diagonal matrix # @param burn.in number of burn-in samples to be discarded # @param start start value for Gibbs sampling # @param thinning rtmvnorm.linear.constraints <- function(n, mean = rep(0, nrow(sigma)), sigma = diag(length(mean)), H = NULL, lower = rep(-Inf, length = length(mean)), upper = rep( Inf, length = length(mean)), D = diag(length(mean)), algorithm,...) { # dimension of X d <- length(mean) # check matrix D, must be n x n with rank n if (!is.matrix(D) || det(D) == 0) { stop("D must be a (n x n) matrix with full rank n!") } # create truncated multi-normal samples in variable Z ~ N(0, T) # with alpha <= z <= beta # Parameter-Transformation for given sigma: # x ~ N(mean, sigma) subject to a <= Dx <= b # define z = D x - D mu # alpha <= z <= beta # mit alpha = a - D * mu # beta = b - D * mu # z ~ N(0, T), # T = D Sigma D' # x = mu + D^(-1) z # Parameter-Transformation for given H: # x ~ N(mean, H^{-1}) # precision matrix in z is: # T^{-1} = D'^{-1} H D^{-1} # (AB)^{-1} = B^{-1} %*% A^{-1} alpha <- as.vector(lower - D %*% mean) beta <- as.vector(upper - D %*% mean) Dinv <- solve(D) # D^(-1) if (!is.null(H)) { Tinv <- t(Dinv) %*% H %*% Dinv Z <- rtmvnorm(n, mean=rep(0, d), sigma=diag(d), H=Tinv, lower=alpha, upper=beta, algorithm=algorithm, ...) } else { T <- D %*% sigma %*% t(D) Z <- rtmvnorm(n, mean=rep(0, d), sigma=T, H=NULL, lower=alpha, upper=beta, algorithm=algorithm, ...) } # For each z do the transformation # x = mu + D^(-1) z X <- sweep(Z %*% t(Dinv), 2, FUN="+", mean) return(X) } ################################################################################ if (FALSE) { checkSymmetricPositiveDefinite(matrix(1:4, 2, 2), name = "H") lower <- c(-1, -1) upper <- c(1, 1) mean <- c(0.5, 0.5) sigma <- matrix(c(1, 0.8, 0.8, 1), 2, 2) H <- solve(sigma) D <- matrix(c(1, 1, 1, -1), 2, 2) checkSymmetricPositiveDefinite(H, name = "H") # 1. covariance matrix sigma case # 1.1. rectangular case D == I X0 <- rtmvnorm(n=1000, mean, sigma, lower, upper, algorithm="rejection") X1 <- rtmvnorm(n=1000, mean=mean, sigma=sigma, lower=lower, upper=upper, algorithm="rejection") X2 <- rtmvnorm(n=1000, mean=mean, sigma=sigma, lower=lower, upper=upper, algorithm="gibbsR") X3 <- rtmvnorm(n=1000, mean=mean, sigma=sigma, lower=lower, upper=upper, algorithm="gibbs") par(mfrow=c(2,2)) plot(X1) plot(X2) plot(X3) cov(X1) cov(X2) cov(X3) # 1.2. general linear constraints case D <> I X1 <- rtmvnorm(n=1000, mean=mean, sigma=sigma, lower=lower, upper=upper, D=D, algorithm="rejection") X2 <- rtmvnorm(n=1000, mean=mean, sigma=sigma, lower=lower, upper=upper, D=D, algorithm="gibbsR") X3 <- rtmvnorm(n=1000, mean=mean, sigma=sigma, lower=lower, upper=upper, D=D, algorithm="gibbs") par(mfrow=c(2,2)) plot(X1) plot(X2) plot(X3) # 2. precision matrix case H # 2.1. rectangular case D == I X1 <- rtmvnorm(n=1000, mean=mean, H=H, lower=lower, upper=upper, algorithm="rejection") X2 <- rtmvnorm(n=1000, mean=mean, H=H, lower=lower, upper=upper, algorithm="gibbsR") X3 <- rtmvnorm(n=1000, mean=mean, H=H, lower=lower, upper=upper, algorithm="gibbs") par(mfrow=c(2,2)) plot(X1) plot(X2) plot(X3) # 2.2. general linear constraints case D <> I X1 <- rtmvnorm(n=1000, mean=mean, H=H, lower=lower, upper=upper, D=D, algorithm="rejection") X2 <- rtmvnorm(n=1000, mean=mean, H=H, lower=lower, upper=upper, D=D, algorithm="gibbsR") X3 <- rtmvnorm(n=1000, mean=mean, H=H, lower=lower, upper=upper, D=D, algorithm="gibbs") par(mfrow=c(2,2)) plot(X1) plot(X2) plot(X3) } tmvtnorm/R/tmvnorm-estimation.R0000644000176200001440000002344414532763171016360 0ustar liggesusers# estimation methods for the parameters of the truncated multivariate normal distribution # # Literatur: # # Amemiya (1974) : Instrumental Variables estimator # Lee (1979) # Lee (1983) # Griffiths (2002) : # "Gibbs Sampler for the parameters of the truncated multivariate normal distribution" # # Stefan Wilhelm, wilhelm@financial.com #library(tmvtnorm) library(stats4) # Hilfsfunktion : VECH() Operator vech=function (x) { # PURPOSE: creates a column vector by stacking columns of x # on and below the diagonal #---------------------------------------------------------- # USAGE: v = vech(x) # where: x = an input matrix #--------------------------------------------------------- # RETURNS: # v = output vector containing stacked columns of x #---------------------------------------------------------- # Written by Mike Cliff, UNC Finance mcliff@unc.edu # CREATED: 12/08/98 #if(!is.matrix(x)) #{ # #} rows = nrow(x) columns = ncol(x); v = c(); for (i in 1:columns) { v = c(v, x[i:rows,i]); } v } # Hilfsfunktion : Operator fuer Namensgebung sigma_i.j (i <= j), d.h. wie vech(), nur Zeilenweise vech2 <- function (x) { # PURPOSE: creates a column vector by stacking columns of x # on and below the diagonal #---------------------------------------------------------- # USAGE: v = vech2(x) # where: x = an input matrix #--------------------------------------------------------- # RETURNS: # v = output vector containing stacked columns of x #---------------------------------------------------------- # Written by Mike Cliff, UNC Finance mcliff@unc.edu # CREATED: 12/08/98 rows = nrow(x) columns = ncol(x); v = c(); for (i in 1:rows) { v = c(v, x[i,i:columns]); } v } # Hilfsfunktion : Inverser VECH() Operator inv_vech=function(v) { #---------------------------------------------------------- # USAGE: x = inv_vech(v) # where: v = a vector #--------------------------------------------------------- # RETURNS: # x = a symmetric (m x m) matrix containing de-vectorized elements of v #---------------------------------------------------------- # Anzahl der Zeilen m = -0.5+sqrt(0.5^2+2*length(v)) x = matrix(0,nrow=m,ncol=m) if (length(v) != m*(m+1)/2) { # error stop("v must have m*(m+1)/2 elements") } for (i in 1:m) { #cat("r=",i:m," c=",i,"\n") x[ i:m, i] = v[((i-1)*(m-(i-2)*0.5)+1) : (i*(m-(i-1)*0.5))] x[ i, i:m] = v[((i-1)*(m-(i-2)*0.5)+1) : (i*(m-(i-1)*0.5))] } x } # 1. Maximum-Likelihood-Estimation of mu and sigma when truncation points are known # # TODO/Idee: Cholesky-Zerlegung der Kovarianzmatrix als Parametrisierung # # @param X data matrix (T x n) # @param lower, upper truncation points # @param start list of start values for mu and sigma # @param fixed a list of fixed parameters # @param method # @param cholesky flag, if TRUE, we use the Cholesky decomposition of sigma as parametrization # @param lower.bounds lower bounds for method "L-BFGS-B" # @param upper.bounds upper bounds for method "L-BFGS-B" mle.tmvnorm <- function(X, lower=rep(-Inf, length = ncol(X)), upper=rep(+Inf, length = ncol(X)), start=list(mu=rep(0,ncol(X)), sigma=diag(ncol(X))), fixed=list(), method="BFGS", cholesky=FALSE, lower.bounds=-Inf, upper.bounds=+Inf, ...) { # check of standard tmvtnorm arguments cargs <- checkTmvArgs(start$mu, start$sigma, lower, upper) start$mu <- cargs$mean start$sigma <- cargs$sigma lower <- cargs$lower upper <- cargs$upper # check if we have at least one sample if (!is.matrix(X) || nrow(X) == 0) { stop("Data matrix X with at least one row required.") } # verify dimensions of x and lower/upper match n <- length(lower) if (NCOL(X) != n) { stop("data matrix X has a non-conforming size. Must have ",length(lower)," columns.") } # check if lower <= X <= upper for all rows ind <- logical(nrow(X)) for (i in 1:nrow(X)) { ind[i] = all(X[i,] >= lower & X[i,] <= upper) } if (!all(ind)) { stop("some of the data points are not in the region lower <= X <= upper") } if ((length(lower.bounds) > 1L || length(upper.bounds) > 1L || lower.bounds[1L] != -Inf || upper.bounds[1L] != Inf) && method != "L-BFGS-B") { warning("bounds can only be used with method L-BFGS-B") method <- "L-BFGS-B" } # parameter vector theta = mu_1,...,mu_n,vech(sigma) if (cholesky) { # if cholesky == TRUE use Cholesky decomposition of sigma # t(chol(sigma)) returns a lower triangular matrix which can be vectorized using vech() theta <- c(start$mu, vech2(t(chol(start$sigma)))) } else { theta <- c(start$mu, vech2(start$sigma)) } # names for mean vector elements : mu_i nmmu <- paste("mu_",1:n,sep="") # names for sigma elements : sigma_ij nmsigma <- paste("sigma_",vech2(outer(1:n,1:n, paste, sep=".")),sep="") names(theta) <- c(nmmu, nmsigma) # negative log-likelihood-Funktion dynamisch definiert mit den formals(), # damit mle() damit arbeiten kann # # Eigentlich wollen wir eine Funktion negloglik(theta) mit einem einzigen Parametersvektor theta. # Die Methode mle() braucht aber eine "named list" der Parameter (z.B. mu_1=0, mu_2=0, sigma_1=2,...) und entsprechend eine # Funktion negloglik(mu1, mu2, sigma1,...) # Da wir nicht vorher wissen, wie viele Parameter zu schaetzen sind, definieren wir die formals() # dynamisch um # # @param x dummy/placeholder argument, will be overwritten by formals() with list of skalar parameters negloglik <- function(x) { nf <- names(formals()) # recover parameter vector from named arguments (mu1=...,mu2=...,sigma11,sigma12 etc). # stack all named arguments to parameter vector theta theta <- sapply(nf, function(x) {eval(parse(text=x))}) # mean vector herholen mean <- theta[1:n] # Matrix fuer sigma bauen if (cholesky) { L <- inv_vech(theta[-(1:n)]) L[lower.tri(L, diag=FALSE)] <- 0 # L entspricht jetzt chol(sigma), obere Dreiecksmatrix sigma <- t(L) %*% L } else { sigma <- inv_vech(theta[-(1:n)]) } # if sigma is not positive definite, return MAXVALUE if (det(sigma) <= 0 || any(diag(sigma) < 0)) { return(.Machine$integer.max) } # Log-Likelihood # Wieso hier nur dmvnorm() : Wegen Dichte = Conditional density f <- -(sum(dmvnorm(X, mean, sigma, log=TRUE)) - nrow(X) * log(pmvnorm(lower=lower, upper=upper, mean=mean, sigma=sigma))) if (is.infinite(f) || is.na(f)) { # cat("negloglik=",f," for parameter vector ",theta,"\n") # "L-BFGS-B" requires a finite function value, other methods can handle infinte values like +Inf # return a high finite value, e.g. integer.max, so optimize knows this is the wrong place to be # TODO: check whether to return +Inf or .Machine$integer.max, certain algorithms may prefer +Inf, others a finite value #return(+Inf) return(.Machine$integer.max) } f } formals(negloglik) <- theta # for method "L-BFGS-B" pass bounds parameter "lower.bounds" and "upper.bounds" # under names "lower" and "upper" if ((length(lower.bounds) > 1L || length(upper.bounds) > 1L || lower.bounds[1L] != -Inf || upper.bounds[1L] != Inf) && method == "L-BFGS-B") { mle.fit <- eval.parent(substitute(mle(negloglik, start=as.list(theta), fixed=fixed, method = method, lower=lower.bounds, upper=upper.bounds, ...))) #mle.call <- substitute(mle(negloglik, start=as.list(theta), fixed=fixed, method = method, lower=lower.bounds, upper=upper.bounds, ...)) #mle.fit <- mle(negloglik, start=as.list(theta), fixed=fixed, method = method, lower=lower.bounds, upper=upper.bounds, ...) #mle.fit@call <- mle.call return (mle.fit) } else { # we need evaluated arguments in the call for profile(mle.fit) mle.fit <- eval.parent(substitute(mle(negloglik, start=as.list(theta), fixed=fixed, method = method, ...))) #mle.call <- substitute(mle(negloglik, start=as.list(theta), fixed=fixed, method = method, ...)) #mle.fit <- mle(negloglik, start=as.list(theta), fixed=fixed, method = method, ...) #mle.fit@call <- mle.call return (mle.fit) } } # Beispiel: if (FALSE) { lower=c(-1,-1) upper=c(1, 2) mu =c(0, 0) sigma=matrix(c(1, 0.7, 0.7, 2), 2, 2) # generate random samples X <- rtmvnorm(n=500, mu, sigma, lower, upper) method <- "BFGS" # estimate mu and sigma from random samples # Standard-Startwerte mle.fit1 <- mle.tmvnorm(X, lower=lower, upper=upper) mle.fit1a <- mle.tmvnorm(X, lower=lower, upper=upper, cholesky=TRUE) mle.fit1b <- mle.tmvnorm(X, lower=lower, upper=upper, method="L-BFGS-B", lower.bounds=c(-1, -1, 0.001, -Inf, 0.001), upper.bounds=c(2, 2, 2, 2, 3)) Rprof("mle.profile1.out") mle.profile1 <- profile(mle.fit1, X, method="BFGS", trace=TRUE) Rprof(NULL) summaryRprof("mle.profile1.out") confint(mle.profile1) par(mfrow=c(2,2)) plot(mle.profile1) summary(mle.fit1) logLik(mle.fit1) vcov(mle.fit1) #TODO: confint(mle.fit1) #profile(mle.fit1) # andere Startwerte, näher am wahren Ergebnis mle.fit2 <- mle.tmvnorm(x=X, lower=lower, upper=upper, start=list(mu=c(0.1, 0.1), sigma=matrix(c(1, 0.4, 0.4, 1.8),2,2))) # --> funktioniert jetzt besser... summary(mle.fit2) # andere Startwerte, nimm mean und Kovarianz aus den Daten (stimmt zwar nicht, ist aber sicher # ein besserer Startwert als 0 und diag(n). mle.fit3 <- mle.tmvnorm(x=X, lower=lower, upper=upper, start=list(mu=colMeans(X), sigma=cov(X))) summary(mle.fit3) }tmvtnorm/R/dtmvt.R0000644000176200001440000000410311765142062013624 0ustar liggesusers# Density function for the truncated multivariate t-distribution # # Author: stefan ############################################################################### # Density function for the truncated multivariate t-distribution # @param x # @param mean # @param sigma # @param df degrees of freedom parameter # @param log dtmvt <- function(x, mean = rep(0, nrow(sigma)), sigma = diag(length(mean)), df = 1, lower= rep( -Inf, length = length(mean)), upper = rep( Inf, length = length(mean)), log = FALSE){ # Check of additional inputs like x if (is.vector(x)) { x <- matrix(x, ncol = length(x)) } # Anzahl der Beobachtungen T = nrow(x) # check for each row if in support region insidesupportregion <- logical(T) for (i in 1:T) { insidesupportregion[i] = all(x[i,] >= lower & x[i,] <= upper & !any(is.infinite(x))) } # density value for points outside the support region dv = if (log) { -Inf } else { 0 } # conditional density f <- ifelse(insidesupportregion, dmvt(x, delta=mean, sigma=sigma, df=df, log=log) / pmvt(lower=lower, upper=upper, delta=mean, sigma=sigma, df=df, type="shifted"), dv) return(f) } if (FALSE) { # Example x1<-seq(-2, 3, by=0.1) x2<-seq(-2, 3, by=0.1) mean=c(0,0) sigma=matrix(c(1, -0.5, -0.5, 1), 2, 2) lower=c(-1,-1) density<-function(x) { z=dtmvt(x, mean=mean, sigma=sigma, lower=lower) z } fgrid <- function(x, y, f) { z <- matrix(nrow=length(x), ncol=length(y)) for(m in 1:length(x)){ for(n in 1:length(y)){ z[m,n] <- f(c(x[m], y[n])) } } z } # compute multivariate-t density d for grid d=fgrid(x1, x2, function(x) dtmvt(x, mean=mean, sigma=sigma, lower=lower)) # compute multivariate normal density d for grid d2=fgrid(x1, x2, function(x) dtmvnorm(x, mean=mean, sigma=sigma, lower=lower)) # plot density as contourplot contour(x1, x2, d, nlevels=5, main="Truncated Multivariate t Density", xlab=expression(x[1]), ylab=expression(x[2])) contour(x1, x2, d2, nlevels=5, add=TRUE, col="red") abline(v=-1, lty=3, lwd=2) abline(h=-1, lty=3, lwd=2) } tmvtnorm/R/dtmvnorm-marginal.R0000644000176200001440000001017514532765530016140 0ustar liggesusers# Dichtefunktion und Verteilung einer multivariate truncated normal # # Problem ist die Bestimmung der Randverteilung einer Variablen. # # 1. Im bivariaten Fall kann explizit eine Formel angegeben werden (vgl. Arnold (1993)) # 2. Im multivariaten Fall kann ein Integral angegeben werden (vgl. Horrace (2005)) # 3. Bestimmung der Dichtefunktion ueber das Integral moeglich? # 4. Kann die Verteilungsfunktion pmvnorm() helfen? Kann man dann nach einer Variablen differenzieren? # Literatur: # # Genz, A. (1992). Numerical computation of multivariate normal probabilities. Journal of Computational and Graphical Statistics, 1, 141-150 # Genz, A. (1993). Comparison of methods for the computation of multivariate normal probabilities. Computing Science and Statistics, 25, 400-405 # Horrace (2005). # Jack Cartinhour (1990): One-dimensional marginal density functions of a truncated multivariate normal density function # Communications in Statistics - Theory and Methods, Volume 19, Issue 1 1990 , pages 197 - 203 # Dichtefunktion fuer Randdichte f(xn) einer Truncated Multivariate Normal Distribution, # vgl. Jack Cartinhour (1990) "One-dimensional marginal density functions of a truncated multivariate normal density function" # # @param xn Vektor der Laenge l von Punkten, an dem die Randdichte ausgewertet wird # @param i Index (1..n) dessen Randdichte berechnet werden soll # @param mean (nx1) Mittelwertvektor # @param sigma (nxn)-Kovarianzmatrix # @param lower,upper Trunkierungsvektor lower <= x <= upper dtmvnorm.marginal <- function(xn, n=1, mean=rep(0, nrow(sigma)), sigma=diag(length(mean)), lower=rep(-Inf, length = length(mean)), upper=rep( Inf, length = length(mean)), log=FALSE) { if (NROW(sigma) != NCOL(sigma)) { stop("sigma must be a square matrix") } if (length(mean) != NROW(sigma)) { stop("mean and sigma have non-conforming size") } # Anzahl der Dimensionen k <- length(mean) if (n < 1 || n > length(mean) || !is.numeric(n) || length(n) > 1 || !n %in% 1:length(mean)) { stop("n must be a integer scalar in 1..length(mean)") } # Univariater Fall, vgl. Greene (2003), S.573 if (k == 1) { prob <- pnorm(upper, mean=mean, sd=sqrt(sigma)) - pnorm(lower, mean=mean, sd=sqrt(sigma)) density <- ifelse( lower[1]<=xn & xn<=upper[1], dnorm(xn, mean=mean, sd=sqrt(sigma)) / prob, 0) if (log == TRUE) { return(log(density)) } else { return(density) } } # Standardize sigma to correlation matrix, mean to zero vector # adjust xn, lower, upper #sd <- sqrt(diag(sigma)) #xn <- (xn - mean) / sd #lower <- (lower - mean) / sd #upper <- (upper - mean) / sd #mean <- rep(0, k) #sigma <- cov2cor(sigma) # Kovarianzmatrix; nach Standardisierung Korrelationsmatrix C <- sigma # Inverse Kovarianzmatrix, Precision matrix A <- solve(sigma) # Partitionierung von A und C A_1 <- A[-n,-n] # (n-1) x (n-1) #a_nn <- A[n, n] # 1x1 #a <- A[-n, n] # (n-1) x 1 A_1_inv <- solve(A_1) C_1 <- C[-n,-n] # (n-1) x (n-1) c_nn <- C[n, n] # 1x1 c <- C[-n, n] # (n-1) x 1 # Partitionierung von Mittelwertvektor mu mu <- mean mu_1 <- mean[-n] mu_n <- mean[n] # Skalierungsfaktor der Dichte p <- pmvnorm(lower=lower, upper=upper, mean=mu, sigma=C) f_xn <- c() for (i in 1:length(xn)) { if (!(lower[n]<=xn[i] && xn[i]<=upper[n]) || is.infinite(xn[i])) { f_xn[i] <- 0 next } # m(x_n) --> (n-1x1) # Aufpassen bei z.B. m=c(Inf, Inf, NaN) und c=0 m <- mu_1 + (xn[i] - mu_n) * c / c_nn # SW: Possibly optimize with vectorized version of pmvnorm() which accepts different bounds # for univariate density, pmvnorm() does not accept corr= f_xn[i] <- exp(-0.5*(xn[i]-mu_n)^2/c_nn) * pmvnorm(lower=lower[-n], upper=upper[-n], mean=m, sigma=A_1_inv) } density <- 1/p * 1/sqrt(2*pi*c_nn) * f_xn if (log == TRUE) { return(log(density)) } else { return(density) } } tmvtnorm/R/qtmvnorm-marginal.R0000644000176200001440000000267314532763516016163 0ustar liggesusers# Berechnet die Quantile der eindimensionalen Randverteilung ueber uniroot() # # @param p probability # @param interval a vector containing the end-points of the interval to be searched by uniroot. # @param tail specifies which quantiles should be computed. lower.tail gives the quantile x for which P[X <= x] = p, upper.tail gives x with P[X > x] = p and both.tails leads to x with P[-x <= X <= x] = p. # @param n # @param mean # @param sigma # @param lower # @param upper # @param ... additional parameters to uniroot() qtmvnorm.marginal <- function (p, interval = c(-10, 10), tail = c("lower.tail", "upper.tail", "both.tails"), n=1, mean=rep(0, nrow(sigma)), sigma=diag(length(mean)), lower=rep(-Inf, length = length(mean)), upper=rep( Inf, length = length(mean)), ...) { if (length(p) != 1 || (p <= 0 || p >= 1)) stop(sQuote("p"), " is not a double between zero and one") if (n > length(mean) || n < 1) stop(sQuote("n"), " is not a integer between 1 and ",length(mean)) pfct <- function(q) { switch(tail, both.tails = { low <- lower low[n] <- -abs(q) upp <- upper upp[n] <- abs(q) }, upper.tail = { low <- lower upp <- upper low[n] <- q }, lower.tail = { low <- lower upp <- upper upp[n] <- q }, ) ptmvnorm(low, upp, mean, sigma, lower, upper) - p } qroot <- uniroot(pfct, interval = interval, ...) qroot } tmvtnorm/demo/0000755000176200001440000000000014360222632013064 5ustar liggesuserstmvtnorm/demo/demo1.R0000644000176200001440000000376011212736062014222 0ustar liggesusersrequire(tmvtnorm) library(utils) # Example 1 from Horrace (2005) x1<-seq(-2, 3, by=0.1) x2<-seq(-2, 3, by=0.1) density<-function(x) { sigma=matrix(c(1, -0.5, -0.5, 1), 2, 2) z=dtmvnorm(x, mean=c(0,0), sigma=sigma, lower=c(-1,-1)) z } fgrid <- function(x, y, f) { z <- matrix(nrow=length(x), ncol=length(y)) for(m in 1:length(x)){ for(n in 1:length(y)){ z[m,n] <- f(c(x[m], y[n])) } } z } # compute the density function d=fgrid(x1, x2, density) # plot the density function as Contourplot contour(x1, x2, d, nlevels=5, main="Truncated Multivariate Normal Density", xlab=expression(x[1]), ylab=expression(x[2])) abline(v=-1, lty=3, lwd=2) abline(h=-1, lty=3, lwd=2) # Example 2: X=rtmvnorm(n=100, mean=c(0,0), sigma=matrix(c(1, 0.8, 0.8, 2), 2, 2), lower=c(-Inf,-Inf), upper=c(0,0)) plot(X, xlim=c(-3,3), ylim=c(-3,3), main="Samples from Multivariate Normal Distribution", xlab=expression(x[1]), ylab=expression(x[2])) abline(v=0, lty=2) abline(h=0, lty=2) # Example 3: Profiling of rejection sampling: 10000 samples ~ 0.8 second Rprof("rtmvnorm.out") X=rtmvnorm(n=10000, mean=c(0,0), sigma=matrix(c(1, 0.8, 0.8, 2), 2, 2), lower=c(-Inf,-Inf), upper=c(0,0)) Rprof(NULL) summaryRprof("rtmvnorm.out") # Example 4: Profiling of Gibbs sampling: 10000 samples ~ 0.8 second Rprof("rtmvnorm.gibbs.out") m = 10 a = rep(-1, m) b = rep(1, m) # Erwartungswert und Kovarianzmatrix erzeugen mu = rep(0, m) sigma = matrix(0.8, m, m) diag(sigma) = rep(1, m) # Akzeptanzrate ausrechnen alpha = pmvnorm(lower=a, upper=b, mean=mu, sigma=sigma) alpha X=rtmvnorm(n=10000, mean=mu, sigma=sigma, lower=a, upper=b, algorithm="gibbs") Rprof(NULL) summaryRprof("rtmvnorm.gibbs.out") # Sampling from non-truncated normal distribution 10000 samples ~ 0.02 second Rprof("rmvnorm.out") X=rmvnorm(n=10000, mean=c(0,0), sigma=matrix(c(1, 0.8, 0.8, 2), 2, 2)) Rprof(NULL) summaryRprof("rmvnorm.out") tmvtnorm/demo/demo2.R0000644000176200001440000000112511163723476014226 0ustar liggesuserslibrary(tmvtnorm) library(rgl) # simulate x1, x2, x3 from truncated multivariate normal distribution sigma = matrix(c(1, 0, 0, 0, 1, 0, 0, 0, 1), 3, 3) # not truncated X = rmvnorm(n=2000, mean=c(0,0,0), sigma=sigma) # truncated X2 = rtmvnorm(n=2000, mean=c(0,0,0), sigma=sigma, lower=c(-Inf,-Inf,-Inf), upper=c(0,1,Inf)) # display as 3D scatterplot open3d() plot3d(X[,1], X[,2], X[,3], col="black", size=2, xlab=expression(x[1]), ylab=expression(x[2]), zlab=expression(x[3])) plot3d(X2[,1], X2[,2], X2[,3], col="red", size=2, add=TRUE) tmvtnorm/demo/00Index0000644000176200001440000000017311163722064014221 0ustar liggesusersdemo1 truncated multivariate normal densities demo2 3D scatterplot from a truncated trivariate normal distribution tmvtnorm/NEWS0000644000176200001440000001351115055363550012647 0ustar liggesusers# User visible changes in tmvtnorm package ## changes in tmvtnorm 1.7 (2025-09-01) * Eleminated all CRAN NOTEs during package build process, e.g. Rd \link{} targets missing package anchors ## changes in tmvtnorm 1.6 (2023-12-05) * Changed package encoding from 'latin1' to 'UTF-8'. * Converted the non-ASCII content to ASCII. * Fixed CITATION file ## changes in tmvtnorm 1.5 (2022-03-22) * fixed misleading stop message to "lower bound should be strictly less than the upper bound". Reported by Chao Wang [chao-wang@uiowa.edu] * Added README.md * Fixed two warnings/errors for R 4.2.0 in `tmvtnorm::rtmvnorm` input checks ``` 1: In !is.null(H) && sigma != diag(length(mean)) : 'length(x) = 9 > 1' in coercion to 'logical(1)' 2: In start.value < lower || start.value > upper : 'length(x) = 3 > 1' in coercion to 'logical(1)' ``` ## changes in tmvtnorm 1.4-10 (2015-08-24) * Fixed problem with build process in src/Makevars (parallel make) ## changes in tmvtnorm 1.4-9 (2014-03-03) * Moved package vignette to vignettes/ directory to be consistent with R 3.1.0 ## changes in tmvtnorm 1.4-8 (2013-03-29) * bugfix in dtmvnorm(...,margin=NULL). Introduced in 1.4-7. Reported by Julius.Vainora [julius.vainora@gmail.com] * bugfix in rtmvt(..., algorithm="gibbs"): Algorithm="gibbs" was not forwarded properly to rtmvnorm(). Reported by Aurelien Bechler [aurelien.bechler@agroparistech.fr] * allow non-integer degrees of freedom in rtmvt, e.g. rtmvt(..., df=3.2). Suggested by Aurelien Bechler [aurelien.bechler@agroparistech.fr] Rejection sampling does not work with non-integer df, only Gibbs sampling. ## changes in tmvtnorm 1.4-7 (2012-11-29) * new method rtmvnorm2() for drawing random samples with general linear constraints a <= Dx <= b with x (d x 1), D (r x d), a,b (r x 1) which can also handle the case r > d. Requested by Xiaojin Xu [xiaojinxu.fdu@gmail.com] Currently works with Gibbs sampling. * bugfix in dtmvnorm(...,log=TRUE). Reported by John Merrill [john.merrill@gmail.com] * optimization in mtmvnorm() to speed up the calculations * dtmvnorm.marginal2() can now be used with vectorized xq, xr. ## changes in tmvtnorm 1.4-6 (2012-03-23) * further optimization in mtmvnorm() and implementation of Johnson/Kotz-Formula when only a subset of variables is truncated ## changes in tmvtnorm 1.4-5 (2012-02-13) * rtmvnorm() can be used with both sparse triplet representation and (compressed sparse column) for H * dramatic performance gain in mtmvnorm() through optimization ## changes in tmvtnorm 1.4-4 (2012-01-10) * dramatic performance gain in rtmvnorm.sparseMatrix() through optimization * Bugfix in rtmvnorm() with linear constraints D: (reported by Claudia Köllmann [koellmann@statistik.tu-dortmund.de]) - forwarding "algorithm=" argument from rtmvnorm() to internal methods dealing with linear constraints was corrupt. - sampling with linear constraints D lead to wrong results due to missing t() ## changes in tmvtnorm 1.4-2 (2012-01-04) * Bugfix in rtmvnorm.sparseMatrix(): fixed a memory leak in Fortran code * Added a package vignette with a description of the Gibbs sampler ## changes in tmvtnorm 1.4-1 (2011-12-27) * Allow a sparse precision matrix H to be passed to rtmvnorm.sparseMatrix() which allows random number generation in very high dimensions (e.g. d >> 5000) * Rewritten the Fortran version of the Gibbs sampler for the use with sparse precision matrix H. ## changes in tmvtnorm 1.3-1 (2011-12-01) * Allow for the use of a precision matrix H rather than covariance matrix sigma in rtmvnorm() for both rejection and Gibbs sampling. (requested by Miguel Godinho de Matos from Carnegie Mellon University) * Rewritten both the R and Fortran version of the Gibbs sampler. * GMM estimation in gmm.tmvnorm(,method=c("ManjunathWilhelm","Lee")) can now be done using the Manjunath/Wilhelm and Lee moment conditions. ## changes in tmvtnorm 1.2-3 (2011-06-04) * rtmvnorm() works now with general linear constraints a<= Dx<=b, with x (d x 1), full-rank matrix D (d x d), a,b (d x 1). * Implemented with both rejection sampling and Gibbs sampling (Geweke (1991)) * Added GMM estimation in gmm.tmvnorm() * Bugfix in dtmvt() thanks to Jason Kramer: Using type="shifted" in pmvt() (reported by Jason Kramer [jskramer@uci.edu]) ## changes in tmvtnorm 1.1-5 (2010-11-20) * Added Maximum Likelihood estimation method (MLE) mle.tmvtnorm() * optimized mtmvnorm(): precalcuted F_a[i] in a separate loop which improved the computation of the mean, suggested by Miklos.Reiter@sungard.com * added a flag doComputeVariance (default TRUE), so users which are only interested in the mean, can compute only the variance (BTW: this flag does not make sense for the mean, since the mean has to be calculated anyway.) * Fixed a bug with LAPACK and BLAS/FLIBS libraries: Prof. Ripley/Writing R extensions: "For portability, the macros @code{BLAS_LIBS} and @code{FLIBS} should always be included @emph{after} @code{LAPACK_LIBS}." ## changes in tmvtnorm 1.0-2 (2010-01-28) * Added methods for the truncated multivariate t-Distribution : rtmvt(), dtmvt() und ptmvt() and ptmvt.marginal() ## changes in tmvtnorm 0.9-2 (2010-01-03) * Implementation of "thinning technique" for Gibbs sampling: Added parameter thinning=1 to rtmvnorm.gibbs() for thinning of Markov chains, i.e. reducing autocorrelations of random samples * Documenting additional arguments "thinning", "start.value" and "burn.in", for rmvtnorm.gibbs() * Added parameter "burn-in" and "thinning" in the Fortran code for discarding burn-in samples and thinng the Markov chain. * Added parameter log=FALSE to dtmvnorm.marginal() * Added parameter margin=NULL to dtmvnorm() as an interface/wrapper to marginal density functions dtmvnorm.marginal() and dtmvnorm.marginal2() * Code polishing and review tmvtnorm/vignettes/0000755000176200001440000000000015055364525014162 5ustar liggesuserstmvtnorm/vignettes/GibbsSampler.Rnw0000644000176200001440000002343314216137300017214 0ustar liggesusers%\VignetteIndexEntry{A short description of the Gibbs Sampler} \documentclass[a4paper]{article} \usepackage{Rd} \usepackage{amsmath} \usepackage{natbib} \usepackage{palatino,mathpazo} \usepackage{Sweave} %\newcommand{\pkg}[1]{\textbf{#1}} \newcommand{\vecb}[1]{\ensuremath{\boldsymbol{\mathbf{#1}}}} \def\bfx{\mbox{\boldmath $x$}} \def\bfy{\mbox{\boldmath $y$}} \def\bfz{\mbox{\boldmath $z$}} \def\bfalpha{\mbox{\boldmath $\alpha$}} \def\bfbeta{\mbox{\boldmath $\beta$}} \def\bfmu{\mbox{\boldmath $\mu$}} \def\bfa{\mbox{\boldmath $a$}} \def\bfb{\mbox{\boldmath $b$}} \def\bfu{\mbox{\boldmath $u$}} \def\bfSigma{\mbox{\boldmath $\Sigma$}} \def\bfD{\mbox{\boldmath $D$}} \def\bfH{\mbox{\boldmath $H$}} \def\bfT{\mbox{\boldmath $T$}} \def\bfX{\mbox{\boldmath $X$}} \def\bfY{\mbox{\boldmath $X$}} \title{Gibbs Sampler for the Truncated Multivariate Normal Distribution} \author{Stefan Wilhelm\thanks{wilhelm@financial.com}} \begin{document} \SweaveOpts{concordance=TRUE} \maketitle In this note we describe two ways of generating random variables with the Gibbs sampling approach for a truncated multivariate normal variable $\bfx$, whose density function can be expressed as: \begin{eqnarray*} f(\bfx,\bfmu,\bfSigma,\bfa,\bfb) & = & \frac{ \exp{\left\{ -\frac{1}{2} (\bfx-\bfmu)' \bfSigma^{-1} (\bfx-\bfmu) \right\}} } { \int_{\bfa}^{\bfb}{\exp{\left\{ -\frac{1}{2} (\bfx-\bfmu)' \bfSigma^{-1} (\bfx-\bfmu) \right\} } d\bfx } } \end{eqnarray*} for $\bfa \le \bfx \le \bfb$ and $0$ otherwise.\\ \par The first approach, as described by \cite{Kotecha1999}, uses the covariance matrix $\bfSigma$ and has been implemented in the R package \pkg{tmvtnorm} since version 0.9 (\cite{tmvtnorm-0.9}). The second way is based on the works of \cite{Geweke1991,Geweke2005} and uses the precision matrix $\bfH = \bfSigma^{-1}$. As will be shown below, the usage of the precision matrix offers some computational advantages, since it does not involve matrix inversions and is therefore favorable in higher dimensions and settings where the precision matrix is readily available. Applications are for example the analysis of spatial data, such as from telecommunications or social networks.\\ \par Both versions of the Gibbs sampler can also be used for general linear constraints $\bfa \le \bfD \bfx \le \bfb$, what we will show in the last section. The function \code{rtmvnorm()} in the package \pkg{tmvtnorm} contains the \R{} implementation of the methods described in this note (\cite{tmvtnorm-1.3}). \section{Gibbs Sampler with convariance matrix $\bfSigma$} We describe here a Gibbs sampler for sampling from a truncated multinormal distribution as proposed by \cite{Kotecha1999}. It uses the fact that conditional distributions are truncated normal again. Kotecha use full conditionals $f(x_i | x_{-i}) = f(x_i | x_1,\ldots,x_{i-1},x_{i+1},\ldots,x_{d})$.\\ \par We use the fact that the conditional density of a multivariate normal distribution is multivariate normal again. We cite \cite{Geweke2005}, p.171 for the following theorem on the Conditional Multivariate Normal Distribution.\\ Let $\bfz = \left( \begin{array}{c} \bfx \\ \bfy \end{array} \right) \sim N(\bfmu, \bfSigma)$ with $\bfmu = \left( \begin{array}{c}\bfmu_x \\ \bfmu_y \end{array} \right)$ and $\bfSigma = \left[ \begin{array}{cc} \bfSigma_{xx} & \bfSigma_{xy} \\ \bfSigma_{yx} & \bfSigma_{yy} \end{array} \right]$\\ Denote the corresponding precision matrix \begin{equation} \bfH = \bfSigma^{-1} = \left[ \begin{array}{cc} \bfH_{xx} & \bfH_{xy} \\ \bfH_{yx} & \bfH_{yy} \end{array} \right] \end{equation} Then the distribution of $\bfy$ conditional on $\bfx$ is normal with variance \begin{equation} \bfSigma_{y.x} = \bfSigma_{yy} - \bfSigma_{yx} \bfSigma_{xx}^{-1} \bfSigma_{xy} = \bfH_{yy}^{-1} \end{equation} and mean \begin{equation} \bfmu_{y.x} = \bfmu_{y} + \bfSigma_{yx} \bfSigma_{xx}^{-1} (\bfx - \bfmu_x) = \bfmu_y - \bfH_{yy}^{-1} \bfH_{yx}(\bfx - \bfmu_x) \end{equation} \par In the case of the full conditionals $f(x_i | x_{-i})$, which we will denote as $i.-i$ this results in the following formulas: $\bfz = \left( \begin{array}{c} \bfx_i \\ \bfx_{-i} \end{array} \right) \sim N(\bfmu, \bfSigma)$ with $\bfmu = \left( \begin{array}{c}\bfmu_i \\ \bfmu_{-i} \end{array} \right)$ and $\bfSigma = \left[ \begin{array}{cc} \bfSigma_{ii} & \bfSigma_{i,-i} \\ \bfSigma_{-i,i} & \bfSigma_{-i,-i} \end{array} \right]$ Then the distribution of $i$ conditional on $-i$ is normal with variance \begin{equation} \bfSigma_{i.-i} = \bfSigma_{ii} - \bfSigma_{i,-i} \bfSigma_{-i,-i}^{-1} \bfSigma_{-i,i} = \bfH_{ii}^{-1} \end{equation} and mean \begin{equation} \bfmu_{i.-i} = \bfmu_{i} + \bfSigma_{i,-i} \bfSigma_{-i,-i}^{-1} (\bfx_{-i} - \bfmu_{-i}) = \bfmu_i - \bfH_{ii}^{-1} \bfH_{i,-i}(\bfx_{-i} - \bfmu_{-i}) \end{equation} We can then construct a Markov chain which continously draws from $f(x_i | x_{-i})$ subject to $a_i \le x_i \le b_i$. Let $\bfx^{(j)}$ denote the sample drawn at the $j$-th MCMC iteration. The steps of the Gibbs sampler for generating $N$ samples $\bfx^{(1)},\ldots,\bfx^{(N)}$ are: \begin{itemize} \item Since the conditional variance $\bfSigma_{i.-i}$ is independent from the actual realisation $\bfx^{(j)}_{-i}$, we can well precalculate it before running the Markov chain. \item Choose a start value $\bfx^{(0)}$ of the chain. \item In each round $j=1,\ldots,N$ we go from $i=1,\ldots,d$ and sample from the conditional density $x^{(j)}_i | x^{(j)}_1,\ldots,x^{(j)}_{i-1},x^{(j-1)}_{i+1},\ldots,x^{(j-1)}_{d}$. \item Draw a uniform random variate $U \sim Uni(0, 1)$. This is where our approach slightly differs from \cite{Kotecha1999}. They draw a normal variate $y$ and then apply $\Phi(y)$, which is basically uniform. \item We draw from univariate conditional normal distributions with mean $\mu$ and variance $\sigma^2$. See for example \cite{Greene2003} or \cite{Griffiths2004} for a transformation between a univariate normal random $y \sim N(\mu,\sigma^2)$ and a univariate truncated normal variate $x \sim TN(\mu,\sigma^2, a, b)$. For each realisation $y$ we can find a $x$ such as $P(Y \le y) = P(X \le x)$: \begin{equation*} \frac{ \Phi \left( \frac{x - \mu}{\sigma} \right) - \Phi \left( \frac{a - \mu}{\sigma} \right) } { \Phi \left( \frac{b - \mu}{\sigma} \right) - \Phi \left( \frac{a - \mu}{\sigma} \right) } = \Phi \left( \frac{y - \mu}{\sigma} \right) = U \end{equation*} \item Draw $\bfx_{i.-i}$ from conditional univariate truncated normal distribution \\ $TN(\bfmu_{i.-i}, \bfSigma_{i.-i}, a_i, b_i)$ by \begin{equation} \begin{split} \bfx_{i.-i} & = \bfmu_{i.-i} + \\ & \sigma_{i.-i} \Phi^{-1} \left[ U \left( \Phi \left( \frac{b_i - \bfmu_{i.-i}}{\sigma_{i.-i}} \right) - \Phi \left( \frac{a_i - \bfmu_{i.-i}}{\sigma_{i.-i}} \right) \right) + \Phi \left( \frac{a_i - \bfmu_{i.-i}}{\sigma_{i.-i}} \right) \right] \end{split} \end{equation} \end{itemize} \section{Gibbs Sampler with precision matrix H} The Gibbs Sampler stated in terms of the precision matrix $\bfH = \bfSigma^{-1}$ instead of the covariance matrix $\bfSigma$ is much easier to write and to implement: Then the distribution of $i$ conditional on $-i$ is normal with variance \begin{equation} \bfSigma_{i.-i} = \bfH_{ii}^{-1} \end{equation} and mean \begin{equation} \bfmu_{i.-i} = \bfmu_i - \bfH_{ii}^{-1} \bfH_{i,-i}(\bfx_{-i} - \bfmu_{-i}) \end{equation} Most importantly, if the precision matrix $\bfH$ is known, the Gibbs sampler does only involve matrix inversions of $\bfH_{ii}$ which in our case is a diagonal element/scalar. Hence, from the computational and performance perspective, especially in high dimensions, using $\bfH$ rather than $\bfSigma$ is preferable. When using $\bfSigma$ in $d$ dimensions, we have to solve for $d$ $(d-1) \times (d-1)$ matrices $\bfSigma_{-i,-i}$, $i=1,\ldots,d$, which can be quite substantial computations. \section{Gibbs Sampler for linear constraints} In this section we present the Gibbs sampling for general linear constraints based on \cite{Geweke1991}. We want to sample from $\bfx \sim N(\bfmu, \bfSigma)$ subject to linear constraints $\bfa \le \bfD \bfx \le \bfb$ for a full-rank matrix $\bfD$.\\ Defining \begin{equation} \bfz = \bfD \bfx - \bfD \bfmu, \end{equation} we have $E[\bfz] = \bfD E[\bfx] - \bfD \bfmu = 0$ and $Var[\bfz] = \bfD Var[\bfx] \bfD' = \bfD \bfSigma \bfD'$. Hence, this problem can be transformed to the rectangular case $\bfalpha \le \bfz \le \bfbeta$ with $\bfalpha = \bfa - \bfD \bfmu$ and $\bfbeta = \bfb - \bfD \bfmu$. It follows $\bfz \sim N(0, \bfT)$ with $\bfT = \bfD \bfSigma \bfD'$.\\ In the precision matrix case, the corresponding precision matrix of the transformed problem will be $\bfT^{-1} = ( \bfD \bfSigma \bfD' )^{-1} = \bfD'^{-1} \bfH \bfD^{-1}$. We can then sample from $\bfz$ the way described in the previous sections (either with covariance or precision matrix approach) and then transform $\bfz$ back to $\bfx$ by \begin{equation} \bfx = \bfmu + \bfD^{-1} \bfz \end{equation} \bibliographystyle{plainnat} \bibliography{tmvtnorm} \end{document}tmvtnorm/vignettes/tmvtnorm.bib0000644000176200001440000001303711701134134016513 0ustar liggesusers% This file was created with JabRef 2.5. % Encoding: Cp1252 @BOOK{Geweke2005, title = {Contemporary Bayesian Econometrics and Statistics}, publisher = {John Wiley and Sons}, year = {2005}, author = {John F. Geweke}, file = {:John Geweke. Contemporary Bayesian Econometrics and Statistics (Wiley,2005)(ISBN 0471679321)(308s).pdf:PDF}, owner = {stefan}, timestamp = {2007.01.30} } @ELECTRONIC{Geweke1991, author = {John F. Geweke}, year = {1991}, title = {Effcient simulation from the multivariate normal and Student-t distributions subject to linear constraints and the evaluation of constraint probabilities}, howpublished = {http://www.biz.uiowa.edu/faculty/jgeweke/papers/paper47/paper47.pdf}, file = {:Geweke1991.pdf:PDF}, owner = {stefan}, timestamp = {2010.01.22} } @INPROCEEDINGS{Geweke1991a, author = {John F. Geweke}, title = {Effcient Simulation from the Multivariate Normal and Student-t Distributions Subject to Linear Constraints}, booktitle = {Computer Science and Statistics. Proceedings of the 23rd Symposium on the Interface. Seattle Washington, April 21-24, 1991}, year = {1991}, pages = {571-578}, file = {:Geweke1991a.pdf:PDF}, owner = {stefan}, timestamp = {2010.02.09} } @BOOK{Greene2003, title = {Econometric Analysis}, publisher = {Prentice-Hall}, year = {2003}, author = {William H. Greene}, edition = {5}, file = {Greene - Econometrics.pdf:Greene - Econometrics.pdf:PDF}, owner = {stefan}, timestamp = {2005.12.13} } @UNPUBLISHED{Griffiths2002, author = {William Griffiths}, title = {A {G}ibbs' Sampler for the Parameters of a Truncated Multivariate Normal Distribution}, note = {University of Melbourne}, year = {2002}, file = {:Griffiths2002.pdf:PDF}, institution = {The University of Melbourne}, number = {856}, owner = {stefan}, timestamp = {2012.01.04}, type = {Department of Economics - Working Papers Series}, url = {http://ideas.repec.org/p/mlb/wpaper/856.html} } @INBOOK{Griffiths2004, chapter = {A {G}ibbs' sampler for the parameters of a truncated multivariate normal distribution}, pages = {75 - 91}, title = {Contemporary Issues In Economics And Econometrics: Theory and Application}, publisher = {Edward Elgar Publishing}, year = {2004}, editor = {Ralf Becker and Stan Hurn}, author = {William E. Griffiths}, journal = {Contemporary issues in economics and econometrics}, owner = {stefan}, timestamp = {2009.09.09} } @INPROCEEDINGS{Kotecha1999, author = {Kotecha, J. H. and Djuric, P. M.}, title = {{G}ibbs sampling approach for generation of truncated multivariate Gaussian random variables}, booktitle = {ICASSP '99: Proceedings of the Acoustics, Speech, and Signal Processing, 1999. on 1999 IEEE International Conference}, year = {1999}, pages = {1757--1760}, address = {Washington, DC, USA}, publisher = {IEEE Computer Society}, doi = {http://dx.doi.org/10.1109/ICASSP.1999.756335}, file = {:Kotecha1999.pdf:PDF}, isbn = {0-7803-5041-3}, journal = {IEEE Computer Society}, owner = {stefan}, timestamp = {2009.04.16} } @MANUAL{tmvtnorm-0.7, title = {{tmvtnorm}: Truncated Multivariate Normal Distribution}, author = {Stefan Wilhelm}, year = {2009}, note = {R package version 0.7-2}, owner = {stefan}, timestamp = {2009.10.05}, url = {http://www.r-project.org} } @MANUAL{tmvtnorm-1.2, title = {{tmvtnorm}: Truncated Multivariate Normal and {S}tudent t Distribution}, author = {Stefan Wilhelm and B G Manjunath}, year = {2011}, note = {R package version 1.2-3}, owner = {stefan}, timestamp = {2012.01.04}, url = {http://CRAN.R-project.org/package=tmvtnorm} } @MANUAL{tmvtnorm-1.3, title = {{tmvtnorm}: Truncated Multivariate Normal and {S}tudent t Distribution}, author = {Stefan Wilhelm and B G Manjunath}, year = {2011}, note = {R package version 1.3-1}, owner = {stefan}, timestamp = {2012.01.04}, url = {http://CRAN.R-project.org/package=tmvtnorm} } @MANUAL{tmvtnorm-1.4, title = {{tmvtnorm}: Truncated Multivariate Normal and {S}tudent t Distribution}, author = {Stefan Wilhelm and B G Manjunath}, year = {2011}, note = {R package version 1.4-1}, owner = {stefan}, timestamp = {2012.01.04}, url = {http://CRAN.R-project.org/package=tmvtnorm} } @ARTICLE{RJournal:Wilhelm+Manjunath:2010, author = {Stefan Wilhelm and B. G. Manjunath}, title = {{tmvtnorm: A Package for the Truncated Multivariate Normal Distribution}}, journal = {The R Journal}, year = {2010}, volume = {2}, pages = {25--29}, number = {1}, month = {June}, owner = {stefan}, timestamp = {2012.01.04}, url = {http://journal.r-project.org/archive/2010-1/RJournal_2010-1_Wilhelm+Manjunath.pdf} } @MANUAL{tmvtnorm-0.9, title = {{tmvtnorm}: Truncated Multivariate Normal Distribution}, author = {Stefan Wilhelm and B G Manjunath}, year = {2010}, note = {R package version 0.9-2}, owner = {stefan}, timestamp = {2012.01.04}, url = {http://CRAN.R-project.org/package=tmvtnorm} } @MANUAL{tmvtnorm-1.1, title = {{tmvtnorm}: Truncated Multivariate Normal Distribution}, author = {Stefan Wilhelm and B G Manjunath}, year = {2010}, note = {R package version 1.1-0}, owner = {stefan}, timestamp = {2012.01.04}, url = {http://CRAN.R-project.org/package=tmvtnorm} } @comment{jabref-meta: selector_publisher:} @comment{jabref-meta: selector_author:} @comment{jabref-meta: selector_journal:} @comment{jabref-meta: selector_keywords:} tmvtnorm/src/0000755000176200001440000000000015055364525012741 5ustar liggesuserstmvtnorm/src/Fortran2CWrapper.c0000644000176200001440000000105011257117724016237 0ustar liggesusers#include #include #include void F77_SUB(rndstart)(void) { GetRNGstate(); } void F77_SUB(rndend)(void) { PutRNGstate(); } double F77_SUB(normrnd)(void) { return norm_rand(); } double F77_SUB(unifrnd)(void) { return unif_rand(); } double F77_SUB(pnormr)(double *x, double *mu, double *sigma, int *lower_tail, int *log_p) { return pnorm(*x, *mu, *sigma, *lower_tail, *log_p); } double F77_SUB(qnormr)(double *p, double *mu, double *sigma, int *lower_tail, int *log_p) { return qnorm(*p, *mu, *sigma, *lower_tail, *log_p); } tmvtnorm/src/init.c0000644000176200001440000000272215055355235014051 0ustar liggesusers#include #include // for NULL #include /* FIXME: Check these declarations against the C/Fortran source code. */ /* .Fortran calls */ extern void F77_NAME(rtmvnorm_sparse_csc)(void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *); extern void F77_NAME(rtmvnorm_sparse_triplet)(void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *); extern void F77_NAME(rtmvnormgibbscov)(void *, void *, void *, void *, void *, void *, void *, void *, void *, void *); extern void F77_NAME(rtmvnormgibbscov2)(void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *); extern void F77_NAME(rtmvnormgibbsprec)(void *, void *, void *, void *, void *, void *, void *, void *, void *, void *); static const R_FortranMethodDef FortranEntries[] = { {"rtmvnorm_sparse_csc", (DL_FUNC) &F77_NAME(rtmvnorm_sparse_csc), 13}, {"rtmvnorm_sparse_triplet", (DL_FUNC) &F77_NAME(rtmvnorm_sparse_triplet), 13}, {"rtmvnormgibbscov", (DL_FUNC) &F77_NAME(rtmvnormgibbscov), 10}, {"rtmvnormgibbscov2", (DL_FUNC) &F77_NAME(rtmvnormgibbscov2), 12}, {"rtmvnormgibbsprec", (DL_FUNC) &F77_NAME(rtmvnormgibbsprec), 10}, {NULL, NULL, 0} }; void R_init_tmvtnorm(DllInfo *dll) { R_registerRoutines(dll, NULL, NULL, FortranEntries, NULL); R_useDynamicSymbols(dll, FALSE); } tmvtnorm/src/rtmvnormgibbs.f900000644000176200001440000007401412704702124016147 0ustar liggesusers! Gibbs sampling from a truncated multinormal distribution ! ! References ! 1. Kotecha et al. (1999): ! Kotecha, J. H. & Djuric, P. M. ! "Gibbs sampling approach for generation of truncated multivariate Gaussian random variables", ! IEEE Computer Society, IEEE Computer Society, 1999, 1757-1760 ! ! 2. Geweke (2005): Contemporary Bayesian Econometrics and ! Statistics. John Wiley and Sons, 2005, pp. 171-172 ! ! ! Code written by Stefan Wilhelm as part of the R package tmvtnorm. ! (http://CRAN.R-project.org/package=tmvtnorm) ! ! To cite package tmvtnorm in publications use: ! ! Stefan Wilhelm, Manjunath B G (2012). tmvtnorm: Truncated ! Multivariate Normal Distribution. R package version 1.4-5. ! ! A BibTeX entry for LaTeX users is ! ! @Manual{, ! title = {{tmvtnorm}: Truncated Multivariate Normal Distribution}, ! author = {Stefan Wilhelm and Manjunath B G}, ! year = {2012}, ! note = {R package version 1.4-5}, ! url = {http://CRAN.R-project.org/package=tmvtnorm}, ! } ! ! ! @param n number of random sample to generate by Gibbs sampling ! @param d dimension (d >= 2) ! @param mean mean vector of dimension d (d x 1) ! @param sigma covariance matrix (d x d) ! @param lower lower truncation points (d x 1) ! @param upper upper truncation points (d x 1) ! @param x0 Startvektor (d x 1) ! @param burnin Number of Burn-in samples to be discarded ! @param thinning thinning factor for thinning the Markov chain ! @return return value X --> vektor (n * d) --> can be coerced into a (n x d) matrix subroutine rtmvnormgibbscov(n, d, mean, sigma, lower, upper, x0, burnin, thinning, X) IMPLICIT NONE integer :: n, d, i, j, k, l, ind = 0, burnin, thinning ! subindex "-i" integer, dimension(d-1) :: minus_i double precision :: unifrnd, qnormr, pnormr, u, q, prob, Fa, Fb, mu_i, s2 double precision, dimension(n*d), INTENT(INOUT) :: X double precision, dimension(d-1) :: s3 double precision, dimension(d) :: x0, xr, mean, lower, upper, sd ! Kovarianzmatrix sigma und Partitionen Sigma_i, sigma_ii und S double precision, dimension(d, d) :: sigma double precision, dimension(d, d-1) :: Sigma_i double precision :: sigma_ii double precision, dimension(d-1,d-1) :: S ! S_inv (d-1 x d-1) ist die Inverse von S double precision, dimension(d-1,d-1) :: S_inv ! Liste von d mal 1 x (d-1) Matrizen = d x (d-1) Matrix double precision, dimension(d, d-1) :: P ! Deklarationen fürs Matrix-Invertieren mit LAPACK-Routinen (Dimension d-1) double precision, dimension( d-1 ) :: work ! ipiv = pivot indices integer, dimension( d-1 ) :: ipiv ! lda = leading dimension integer :: m, lda, lwork, info ! initialise R random number generator call rndstart() m =d-1 lda =d-1 lwork=d-1 ind = 0 ! Partitioning of sigma ! sigma = [ sigma_ii Sigma_i ] ! (d x d) [ (1 x 1) (1 x d-1) ] ! [ Sigma_i' S ] ! [ (d-1 x 1) (d-1 x d-1) ] ! List of conditional variances sd(i) can be precalculated do i = 1,d ! subindex "-i" minus_i = (/ (j, j=1,i-1), (j, j=i+1,d) /) S = sigma(minus_i, minus_i) ! Sigma_{-i,-i} : (d-1) x (d-1) sigma_ii = sigma(i,i) ! Sigma_{i,i} : 1 x 1 Sigma_i(i,:) = sigma(i, minus_i) ! Sigma_{i,-i} : 1 x (d-1) ! Matrix S --> S_inv umkopieren do k=1,(d-1) do l=1,(d-1) S_inv(k,l)=S(k,l) end do end do ! Matrix invertieren ! LU-Faktorisierung (Dreieckszerlegung) der Matrix S_inv call dgetrf( m, m, S_inv, lda, ipiv, info ) ! Inverse der LU-faktorisierten Matrix S_inv call dgetri( m, S_inv, lda, ipiv, work, lwork, info ) P(i,:) = pack(matmul(Sigma_i(i,:), S_inv), .TRUE.) ! (1 x d-1) %*% (d-1 x d-1) = (1 x d-1) s2 = 0 do j = 1,d-1 s2 = s2 + P(i,j) * Sigma_i(i,j) end do sd(i) = sqrt(sigma(i,i) - s2) ! (1 x d-1) * (d-1 x 1) --> sd[[i]] ist (1,1) end do ! start value xr = x0 ! Actual number of samples to create: ! #burn-in-samples + n * #thinning-factor !For all samples n times the thinning factor do j = 1,(burnin + n * thinning) ! For all dimensions do i = 1,d ! Berechnung von bedingtem Erwartungswert und bedingter Varianz: ! bedingte Varianz hängt nicht von x[-i] ab! ! subindex "-i" minus_i = (/ (k, k=1,i-1), (k, k=i+1,d) /) ! mu_i = mean(i) + P[[i]] %*% (x(-i) - mean(-i)) s3(1:(d-1))= xr(minus_i) - mean(minus_i) s2 = 0 do k = 1,d-1 s2 = s2 + P(i,k) * s3(k) end do mu_i = mean(i) + s2 Fa = pnormr(lower(i), mu_i, sd(i), 1, 0) Fb = pnormr(upper(i), mu_i, sd(i), 1, 0) u = unifrnd() prob = u * (Fb - Fa) + Fa q = qnormr(prob, 0.0d0, 1.0d0, 1, 0) xr(i) = mu_i + sd(i) * q ! Nur für j > burnin samples aufzeichnen, Default ist thinning = 1 ! bei Thinning nur jedes x-te Element nehmen if (j > burnin .AND. mod(j - burnin,thinning) == 0) then ind = ind + 1 X(ind) = xr(i) end if end do end do ! reset R random number generator call rndend() end subroutine rtmvnormgibbscov ! Gibbs sampling based on covariance matrix and general linear constraints a <= Cx <= b ! with r >= d linear constraints. C is (r x d), x (d x 1), a,b (r x 1). ! x0 must satisfy the constraints a <= C x0 <= b. ! ! @param n number of random sample to generate by Gibbs sampling ! @param d dimension (d >= 2) ! @param r number of linear constraints ! @param mean mean vector of dimension d (d x 1) ! @param sigma covariance matrix (d x d) ! @param C matrix for linear constraints (r x d) ! @param a lower bound for linear constraints (r x 1) ! @param b upper bound for linear constraints (r x 1) ! @param x0 Startvektor (d x 1) ! @param burnin Number of Burn-in samples to be discarded ! @param thinning thinning factor for thinning the Markov chain ! @return return value X --> vektor (n * d) --> can be coerced into a (n x d) matrix subroutine rtmvnormgibbscov2(n, d, r, mean, sigma, C, a, b, x0, burnin, thinning, X) IMPLICIT NONE integer :: n, d, r, i, j, k = 1, l, ind = 0, burnin, thinning ! subindex "-i" integer, dimension(d-1) :: minus_i double precision :: unifrnd, qnormr, pnormr, u, q, prob, Fa, Fb, mu_i, s2 double precision, dimension(n*d), INTENT(INOUT) :: X double precision, dimension(d-1) :: s3 double precision, dimension(d) :: x0, xr, mean, sd double precision, dimension(r) :: a, b double precision, dimension(r, d) :: C double precision :: bound1, bound2, lower, upper ! Kovarianzmatrix sigma und Partitionen Sigma_i, sigma_ii und S double precision, dimension(d, d) :: sigma double precision, dimension(d, d-1) :: Sigma_12 double precision :: Sigma_11 double precision, dimension(d-1,d-1) :: Sigma_22 ! Sigma_22_inv (d-1 x d-1) ist die Inverse von Sigma_22 double precision, dimension(d-1,d-1) :: Sigma_22_inv ! Liste von d mal 1 x (d-1) Matrizen = d x (d-1) Matrix double precision, dimension(d, d-1) :: P ! Deklarationen fürs Matrix-Invertieren mit LAPACK-Routinen (Dimension d-1) double precision, dimension( d-1 ) :: work ! ipiv = pivot indices integer, dimension( d-1 ) :: ipiv ! lda = leading dimension integer :: m, lda, lwork, info INTEGER, DIMENSION(1) :: seed seed(1) = 12345 ! initialise R random number generator call rndstart() !CALL RANDOM_SEED !CALL RANDOM_SEED (SIZE=K) ! Sets K = N !CALL RANDOM_SEED (PUT = SEED (1:K)) ! Uses the starting value ! ! given by the user m =d-1 lda =d-1 lwork=d-1 ind = 0 ! Partitioning of sigma ! sigma = [ Sigma_11 Sigma_12 ] = [ Sigma_{i,i} Sigma_{i,-i} ] ! (d x d) [ ] [ (1 x 1) (1 x d-1) ] ! [ Sigma_21 Sigma_22 ] [ Sigma_{-i,i} Sigma_{-i,-i}] ! [ ] [ (d-1 x 1) (d-1 x d-1) ] ! List of conditional variances sd(i) can be precalculated do i = 1,d ! subindex "-i" minus_i = (/ (j, j=1,i-1), (j, j=i+1,d) /) Sigma_22 = sigma(minus_i, minus_i) ! Sigma_{-i,-i} : (d-1) x (d-1) Sigma_11 = sigma(i,i) ! Sigma_{i,i} : 1 x 1 Sigma_12(i,:) = sigma(i, minus_i) ! Sigma_{i,-i} : 1 x (d-1) ! Matrix Sigma_22 --> Sigma_22_inv umkopieren do k=1,(d-1) do l=1,(d-1) Sigma_22_inv(k,l) = Sigma_22(k,l) end do end do ! Matrix invertieren ! LU-Faktorisierung (Dreieckszerlegung) der Matrix S_inv call dgetrf( m, m, Sigma_22_inv, lda, ipiv, info ) ! Inverse der LU-faktorisierten Matrix S_inv call dgetri( m, Sigma_22_inv, lda, ipiv, work, lwork, info ) P(i,:) = pack(matmul(Sigma_12(i,:), Sigma_22_inv), .TRUE.) ! (1 x d-1) %*% (d-1 x d-1) = (1 x d-1) s2 = 0 do j = 1,d-1 s2 = s2 + P(i,j) * Sigma_12(i,j) end do sd(i) = sqrt(sigma(i,i) - s2) ! (1 x d-1) * (d-1 x 1) --> sd[[i]] ist (1,1) end do ! start value xr = x0 ! Actual number of samples to create: ! #burn-in-samples + n * #thinning-factor !For all samples n times the thinning factor do j = 1,(burnin + n * thinning) ! For all dimensions do i = 1,d !print '("i=",I3)',i ! Berechnung von bedingtem Erwartungswert und bedingter Varianz: ! bedingte Varianz hängt nicht von x[-i] ab! ! subindex "-i" minus_i = (/ (k, k=1,i-1), (k, k=i+1,d) /) ! mu_i = mean(i) + P[[i]] %*% (x(-i) - mean(-i)) s3(1:(d-1))= xr(minus_i) - mean(minus_i) s2 = 0 do k = 1,d-1 s2 = s2 + P(i,k) * s3(k) end do mu_i = mean(i) + s2 ! TODO: Set to -Inf/+Inf lower = -1000.0d0 upper = 1000d0 ! Determine lower bounds for x[i] using all linear constraints relevant for x[i] do k = 1,r if (C(k,i) == 0 ) then CYCLE end if s2 = dot_product(C(k,minus_i), xr(minus_i)) bound1 = (a(k)- s2) /C(k, i) bound2 = (b(k)- s2) /C(k, i) if (C(k, i) > 0) then lower = max(lower, bound1) upper = min(upper, bound2) else lower = max(lower, bound2) upper = min(upper, bound1) end if end do !print '("mu_i = ",f6.3)', mu_i !print '("sd(i) = ",f6.3)', sd(i) !print '("lower = ",f6.3)', lower !print '("upper = ",f6.3)',upper Fa = pnormr(lower, mu_i, sd(i), 1, 0) Fb = pnormr(upper, mu_i, sd(i), 1, 0) u = unifrnd() !call RANDOM_NUMBER(u) prob = u * (Fb - Fa) + Fa q = qnormr(prob, 0.0d0, 1.0d0, 1, 0) xr(i) = mu_i + sd(i) * q !print '("xr(i)=",f6.3)',xr(i) ! Nur für j > burnin samples aufzeichnen, Default ist thinning = 1 ! bei Thinning nur jedes x-te Element nehmen if (j > burnin .AND. mod(j - burnin,thinning) == 0) then ind = ind + 1 X(ind) = xr(i) end if end do end do ! reset R random number generator call rndend() end subroutine rtmvnormgibbscov2 ! Gibbs sampling based on precision matrix H and a <= x <= b (no linear constraints) ! x,a,b are (d x 1). ! ! @param n number of random sample to generate by Gibbs sampling ! @param d dimension (d >= 2) ! @param mean mean vector of dimension d (d x 1) ! @param H precision matrix (d x d) ! @param lower lower truncation points (d x 1) ! @param upper upper truncation points (d x 1) ! @param x0 Startvektor (d x 1) ! @param burnin Number of Burn-in samples to be discarded ! @param thinning thinning factor for thinning the Markov chain ! @return return value X --> vektor (n * d) --> can be coerced into a (n x d) matrix subroutine rtmvnormgibbsprec(n, d, mean, H, lower, upper, x0, burnin, thinning, X) IMPLICIT NONE integer :: n, d, i, j, k, ind = 0, burnin, thinning ! subindex "-i" integer, dimension(d-1) :: minus_i double precision :: unifrnd, qnormr, pnormr, u, q, prob, Fa, Fb, mu_i, s2 double precision, dimension(d, d) :: H ! Liste von d mal 1 x (d-1) Matrizen = d x (d-1) Matrix als H[i, -i] double precision, dimension(d, d-1) :: P double precision, dimension(d) :: H_inv_ii double precision, dimension(n*d), INTENT(INOUT) :: X double precision, dimension(d-1) :: s3 double precision, dimension(d) :: x0, xr, mean, lower, upper, sd ! initialise R random number generator call rndstart() ! initialise Fortran random number generator ! CALL RANDOM_SEED ! SW: I do not know why, but we have to reset ind each time!!! ! If we forget this line, ind will be incremented further and then Fortran crashes! ind = 0 ! List of conditional variances sd(i) can be precalculated ! Vector of conditional standard deviations sd(i | -i) = H_ii^{-1} = 1 / H[i, i] = sqrt(1 / diag(H)) ! does not depend on x[-i] and can be precalculated before running the chain. do i = 1,d minus_i = (/ (k, k=1,i-1), (k, k=i+1,d) /) H_inv_ii(i) = (1.0d0 / H(i, i)) ! H^{-1}(i,i) = 1 / H(i,i) sd(i) = sqrt(H_inv_ii(i)) ! sd(i) is sqrt(H^{-1}(i,i)) P(i,:) = H(i, minus_i) ! 1 x (d-1) end do ! start value xr = x0 ! Actual number of samples to create: ! #burn-in-samples + n * #thinning-factor !For all samples n times the thinning factor do j = 1,(burnin + n * thinning) ! For all dimensions do i = 1,d ! subindex "-i" minus_i = (/ (k, k=1,i-1), (k, k=i+1,d) /) ! conditional mean mu[i] = E[i | -i] = mean[i] - H_ii^{-1} H[i,-i] (x[-i] - mean[-i]) ! mu_i <- mean[i] (1 / H[i,i]) * H[i,-i] %*% (x[-i] - mean[-i]) s3(1:(d-1)) = xr(minus_i) - mean(minus_i) s2 = 0 do k = 1,d-1 s2 = s2 + P(i, k) * s3(k) end do mu_i = mean(i) - H_inv_ii(i) * s2 Fa = pnormr(lower(i), mu_i, sd(i), 1, 0) Fb = pnormr(upper(i), mu_i, sd(i), 1, 0) u = unifrnd() !call RANDOM_NUMBER(u) prob = u * (Fb - Fa) + Fa q = qnormr(prob, 0.0d0, 1.0d0, 1, 0) xr(i) = mu_i + sd(i) * q ! Nur für j > burnin samples aufzeichnen, Default ist thinning = 1 ! bei Thinning nur jedes x-te Element nehmen if (j > burnin .AND. mod(j - burnin,thinning) == 0) then ind = ind + 1 X(ind) = xr(i) !call intpr("ind=", 4, ind, 1) !call dblepr("X(ind)=", 7, X(ind), 1) end if end do end do ! reset R random number generator call rndend() end subroutine rtmvnormgibbsprec ! Gibbs sampling based on precision matrix H and general linear constraints a <= Cx <= b ! with r >= d linear constraints. C is (r x d), x (d x 1), a,b (r x 1). ! x0 must satisfy the constraints a <= C x0 <= b. ! ! @param n number of random sample to generate by Gibbs sampling ! @param d dimension (d >= 2) ! @param r number of linear constraints ! @param mean mean vector of dimension d (d x 1) ! @param H precision matrix (d x d) ! @param C matrix for linear constraints (r x d) ! @param a lower bound for linear constraints (r x 1) ! @param b upper bound for linear constraints (r x 1) ! @param x0 start value (d x 1) ! @param burnin number of Burn-in samples to be discarded ! @param thinning thinning factor for thinning the Markov chain ! @return return value X --> vektor (n * d) --> can be coerced into a (n x d) matrix subroutine rtmvnormgibbsprec2(n, d, r, mean, H, C, a, b, x0, burnin, thinning, X) IMPLICIT NONE integer :: n, d, r, i, j, k, ind = 0, burnin, thinning ! subindex "-i" integer, dimension(d-1) :: minus_i double precision :: unifrnd, qnormr, pnormr, u, q, prob, Fa, Fb, mu_i, s2 double precision, dimension(d, d) :: H ! Liste von d mal 1 x (d-1) Matrizen = d x (d-1) Matrix als H[i, -i] double precision, dimension(d, d-1) :: P double precision, dimension(d) :: H_inv_ii double precision, dimension(n*d), INTENT(INOUT) :: X double precision, dimension(d-1) :: s3 double precision, dimension(d) :: x0, xr, mean, sd double precision, dimension(r) :: a, b double precision, dimension(r, d) :: C double precision :: bound1, bound2, lower, upper ! initialise R random number generator call rndstart() ! initialise Fortran random number generator ! CALL RANDOM_SEED ! SW: I do not know why, but we have to reset ind each time!!! ! If we forget this line, ind will be incremented further and then Fortran crashes! ind = 0 ! List of conditional variances sd(i) can be precalculated ! Vector of conditional standard deviations sd(i | -i) = H_ii^{-1} = 1 / H[i, i] = sqrt(1 / diag(H)) ! does not depend on x[-i] and can be precalculated before running the chain. do i = 1,d minus_i = (/ (k, k=1,i-1), (k, k=i+1,d) /) H_inv_ii(i) = (1.0d0 / H(i, i)) ! H^{-1}(i,i) = 1 / H(i,i) sd(i) = sqrt(H_inv_ii(i)) ! sd(i) is sqrt(H^{-1}(i,i)) P(i,:) = H(i, minus_i) ! 1 x (d-1) end do ! start value xr = x0 ! Actual number of samples to create: ! #burn-in-samples + n * #thinning-factor !For all samples n times the thinning factor do j = 1,(burnin + n * thinning) ! For all dimensions do i = 1,d ! subindex "-i" minus_i = (/ (k, k=1,i-1), (k, k=i+1,d) /) ! conditional mean mu[i] = E[i | -i] = mean[i] - H_ii^{-1} H[i,-i] (x[-i] - mean[-i]) ! mu_i <- mean[i] (1 / H[i,i]) * H[i,-i] %*% (x[-i] - mean[-i]) s3(1:(d-1)) = xr(minus_i) - mean(minus_i) s2 = 0 do k = 1,d-1 s2 = s2 + P(i, k) * s3(k) end do mu_i = mean(i) - H_inv_ii(i) * s2 ! TODO: Set to -Inf/+Inf lower = -1000.0d0 upper = 1000d0 ! Determine lower bounds for x[i] using all linear constraints relevant for x[i] do k = 1,r if (C(k,i) == 0 ) then CYCLE end if s2 = dot_product(C(k,minus_i), xr(minus_i)) bound1 = (a(k)- s2) /C(k, i) bound2 = (b(k)- s2) /C(k, i) if (C(k, i) > 0) then lower = max(lower, bound1) upper = min(upper, bound2) else lower = max(lower, bound2) upper = min(upper, bound1) end if end do !print '("mu_i = ",f6.3)', mu_i !print '("sd(i) = ",f6.3)', sd(i) !print '("lower = ",f6.3)', lower !print '("upper = ",f6.3)',upper Fa = pnormr(lower, mu_i, sd(i), 1, 0) Fb = pnormr(upper, mu_i, sd(i), 1, 0) u = unifrnd() !call RANDOM_NUMBER(u) prob = u * (Fb - Fa) + Fa q = qnormr(prob, 0.0d0, 1.0d0, 1, 0) xr(i) = mu_i + sd(i) * q ! Nur für j > burnin samples aufzeichnen, Default ist thinning = 1 ! bei Thinning nur jedes x-te Element nehmen if (j > burnin .AND. mod(j - burnin,thinning) == 0) then ind = ind + 1 X(ind) = xr(i) !call intpr("ind=", 4, ind, 1) !call dblepr("X(ind)=", 7, X(ind), 1) end if end do end do ! reset R random number generator call rndend() end subroutine rtmvnormgibbsprec2 ! populate map (row --> linked list of matrix elements) for with all entries in Hi, Hj and Hv ! if upper_triangular is TRUE, then we assume that only matrix elements with Hi <= Hj are given and we will ! put two elements in the (Hi,Hj,Hv) and (Hj,Hi,Hv) to the list for all Hi <= Hj subroutine populate_map(map, Hi, Hj, Hv, num_nonzero, d, upper_triangular) use linked_list integer :: num_nonzero, d integer, dimension(num_nonzero) :: Hi, Hj double precision, dimension(num_nonzero) :: Hv type(matrixrow), dimension(d), INTENT(INOUT) :: map type(matrixelem) :: newelem integer :: i, k logical :: upper_triangular !allocate(map(d)) ! and allocate our map do i=1,d nullify(map(i)%first) ! "zero out" our list nullify(map(i)%last) enddo ! populate map for with all entries in Hi, Hj and Hv do k=1,num_nonzero i = Hi(k) if (upper_triangular) then !if only upper triangular elements (i,j,v) with (i <= j) are given, !insert element (i, j, v) and (j, i, v) für i <> j if (Hi(k) <= Hj(k)) then ! (i, j, v) element newelem%i = Hi(k) newelem%j = Hj(k) newelem%v = Hv(k) call insert_list_element(map(Hi(k)), newelem) end if if (Hi(k) < Hj(k)) then ! (j, i, v) element newelem%i = Hj(k) newelem%j = Hi(k) newelem%v = Hv(k) call insert_list_element(map(Hj(k)), newelem) end if else ! insert all elements given by (Hi, Hj, Hv) newelem%i = Hi(k) newelem%j = Hj(k) newelem%v = Hv(k) call insert_list_element(map(i), newelem) end if enddo end subroutine ! Gibbs sampling of the truncated multivariate normal distribution using a sparse matrix representation of the precision matrix H, ! represented in triplet form ! ! @param n number of random sample to generate by Gibbs sampling ! @param d dimension (d >= 2) ! @param mean mean vector of dimension d (d x 1) ! @param Hi,Hj,Hv are the nonzero elements of the precision matrix H (d, d): H(i, j)=v, each a vector having the same length num_nonzero ! @param num_nonzero number of nonzero elements of the precision matrix H ! @param lower lower truncation points (d x 1) ! @param upper upper truncation points (d x 1) ! @param x0 Startvektor (d x 1) ! @param burnin Number of Burn-in samples to be discarded ! @param thinning thinning factor for thinning the Markov chain ! @return return value X --> vektor (n * d) --> can be coerced into a (n x d) matrix subroutine rtmvnorm_sparse_triplet(n, d, mean, Hi, Hj, Hv, num_nonzero, lower, upper, x0, burnin, thinning, X) use linked_list IMPLICIT NONE integer :: n, d, i, j, k, ind = 0, burnin, thinning, num_nonzero ! matrix representation of concentration matrix H integer, dimension(num_nonzero) :: Hi, Hj double precision, dimension(num_nonzero) :: Hv double precision :: unifrnd, qnormr, pnormr, u, q, prob, Fa, Fb, mu_i, s2 double precision, dimension(d) :: H_inv_ii double precision, dimension(n*d), INTENT(INOUT) :: X double precision, dimension(d) :: x0, xr, mean, lower, upper, sd ! in this map we store for every row i the non-zero entries (triplets) as a linked list of matrix elements ! example: i=1 --> (i=1,j=1,v=0.8), (i=1,j=2,v=0.2), (i=1,j=5,v=0.3) etc. ! The list will not be sorted ascending in j, so we can only iterate this list... type(matrixrow), dimension(d) :: map type(matrixelem) :: elem type( node ), pointer :: current ! initialise R random number generator call rndstart() ! initialise Fortran random number generator !CALL RANDOM_SEED ! We have to reset ind each time ! If we forget this line, ind will be incremented further and then Fortran crashes! ind = 0 ! loop through all elements and look for diagonal elements H[i,i], calculate conditional standard deviations sd(i | -i) ! List of conditional variances sd(i) can be precalculated ! Vector of conditional standard deviations sd(i | -i) = H_ii^{-1} = 1 / H[i, i] = sqrt(1 / diag(H)) ! does not depend on x[-i] and can be precalculated before running the chain. do k=1,num_nonzero i = Hi(k) j = Hj(k) if (i == j) then H_inv_ii(i) = (1.0d0 / Hv(k)) ! H^{-1}(i,i) = 1 / H(i,i) sd(i) = sqrt(H_inv_ii(i)) ! sd(i) is sqrt(H^{-1}(i,i)) end if end do ! populate map with linked lists of matrix elements H[i,j]=v and symmetric element H[j,i]=v call populate_map(map, Hi, Hj, Hv, num_nonzero, d, .TRUE.) ! start value xr = x0 ! Actual number of samples to create: ! #burn-in-samples + n * #thinning-factor !For all samples n times the thinning factor do j = 1,(burnin + n * thinning) ! For all dimensions do i = 1,d ! s2 will represent the term H[i,-i] (x[-i] - mean[-i]) s2 = 0 ! We avoid some n x d x d accesses to hash matrix H even for those elements that are zero... ! For n=30 and d=5000 this results in 30 x 5000 x 5000 = 75 million accesses to matrix H... ! Instead of iterating all (d-1) elements H[i,-i] we only iterate all m (m < d) NON-ZERO elements H[i,-i] which will dramatically reduce the number ! of hashtable accesses. This will scale as n x d x m and will be linear in d for a fixed m. current => map(i)%first do while (associated(current)) elem = current%data ! sum only non-zero H[i,-i] elements in H[i,-i] (x[-i] - mean[-i]) ! no summing for i = j elements! if (elem%j .ne. elem%i) then k = elem%j s2 = s2 + elem%v * (xr(k) - mean(k)) !TODO check end if current => current%next end do ! conditional mean mu[i] = E[i | -i] = mean[i] - H_ii^{-1} H[i,-i] (x[-i] - mean[-i]) ! we only loop through all non-zero elements in H[i,-i] = all indices j .ne. i in sparse matrix representation H[i,j]=v mu_i = mean(i) - H_inv_ii(i) * s2 Fa = pnormr(lower(i), mu_i, sd(i), 1, 0) Fb = pnormr(upper(i), mu_i, sd(i), 1, 0) u = unifrnd() !call RANDOM_NUMBER(u) prob = u * (Fb - Fa) + Fa q = qnormr(prob, 0.0d0, 1.0d0, 1, 0) xr(i) = mu_i + sd(i) * q ! Nur für j > burnin samples aufzeichnen, Default ist thinning = 1 ! bei Thinning nur jedes x-te Element nehmen if (j > burnin .AND. mod(j - burnin,thinning) == 0) then ind = ind + 1 X(ind) = xr(i) end if end do end do ! deallocate linked list at the end of the program and free memory do i=1,d call free_all(map(i)) nullify(map(i)%first) ! "zero out" our list nullify(map(i)%last) enddo nullify(current) ! reset R random number generator call rndend() end subroutine rtmvnorm_sparse_triplet ! Gibbs sampling of the truncated multivariate normal distribution using a sparse matrix representation of the precision matrix H (d x d). ! ! Instead of using a triplet representation H(i,j)=v, we use the compressed sparse column (csc) format with 3 vectors ! Hi : integer vector of row index, length num_nonzero; starting from zero ! Hp : integer vector of pointers, length d + 1; starting from zero; non-decreasing vector ! Hv : double vector of values, length num_nonzero ! ! This format is good at accessing all non-zero elements in one column j ! (and -as in our case- for symmetric matrices also to acess all elements in one row i) ! ! To access an element H(i,j), the following steps are necessary ! j ! v = Hv(Hp(j):Hp(j+1)) ! i = Hi(Hp(j):Hp(j+1)) ! ! @param n number of random sample to generate by Gibbs sampling ! @param d dimension (d >= 2) ! @param mean mean vector of dimension d (d x 1) ! @param Hi,Hp,Hv are the nonzero elements of the precision matrix H (d, d): H(i, j)=v, each a vector having the same length num_nonzero ! @param num_nonzero number of nonzero elements of the precision matrix H ! @param lower lower truncation points (d x 1) ! @param upper upper truncation points (d x 1) ! @param x0 Startvektor (d x 1) ! @param burnin Number of Burn-in samples to be discarded ! @param thinning thinning factor for thinning the Markov chain ! @return return value X --> vektor (n * d) --> can be coerced into a (n x d) matrix subroutine rtmvnorm_sparse_csc(n, d, mean, Hi, Hp, Hv, num_nonzero, lower, upper, x0, burnin, thinning, X) IMPLICIT NONE integer :: n, d, i, j, k, r, ind = 0, burnin, thinning, num_nonzero ! compressed sparse column (csc) matrix representation of concentration matrix H integer, dimension(num_nonzero) :: Hi integer, dimension(d+1) :: Hp double precision, dimension(num_nonzero) :: Hv double precision :: unifrnd, qnormr, pnormr, u, q, prob, Fa, Fb, mu_i, s2 double precision, dimension(d) :: H_inv_ii double precision, dimension(n*d), INTENT(INOUT) :: X double precision, dimension(d) :: x0, xr, mean, lower, upper, sd ! initialise R random number generator call rndstart() ! initialise Fortran random number generator !CALL RANDOM_SEED ! SW: I do not know why, but we have to reset ind each time!!! ! If we forget this line, ind will be incremented further and then Fortran crashes! ind = 0 ! loop through all elements and look for diagonal elements H[i,i], calculate conditional standard deviations sd(i | -i) ! List of conditional variances sd(i) can be precalculated ! Vector of conditional standard deviations sd(i | -i) = H_ii^{-1} = 1 / H[i, i] = sqrt(1 / diag(H)) ! does not depend on x[-i] and can be precalculated before running the chain. do j=1,d do k=Hp(j),Hp(j+1)-1 ! k from 0..(d-1) i = Hi(k+1) + 1 ! Hi is index from 0..(d-1) --> need index i=1..d if (i == j) then H_inv_ii(i) = (1.0d0 / Hv(k+1)) ! H^{-1}(i,i) = 1 / H(i,i) sd(i) = sqrt(H_inv_ii(i)) ! sd(i) is sqrt(H^{-1}(i,i)) end if end do end do ! start value xr = x0 ! Actual number of samples to create: ! #burn-in-samples + n * #thinning-factor !For all samples n times the thinning factor do j = 1,(burnin + n * thinning) ! For all dimensions do i = 1,d ! conditional mean mu[i] = E[i | -i] = mean[i] - H_ii^{-1} H[i,-i] (x[-i] - mean[-i]) s2 = 0 ! For H[i,-i] (x[-i] - mean[-i]) we need to sum only all non-zero H[i,-i] elements! ! since H is symmetric, we can use the column sparse compressed (csc) format and sum all H[-i,i] elements instead do k=Hp(i),Hp(i+1)-1 ! loop all non-zero elements in column i, k is index 0..(d-1) r = Hi(k+1) + 1 ! row index r in column i is r=1..d if (i .ne. r) then s2 = s2 + Hv(k+1) * (xr(r) - mean(r)) end if end do mu_i = mean(i) - H_inv_ii(i) * s2 Fa = pnormr(lower(i), mu_i, sd(i), 1, 0) Fb = pnormr(upper(i), mu_i, sd(i), 1, 0) u = unifrnd() !call RANDOM_NUMBER(u) prob = u * (Fb - Fa) + Fa q = qnormr(prob, 0.0d0, 1.0d0, 1, 0) xr(i) = mu_i + sd(i) * q ! Only retain samples for j > burnin. Default is thinning = 1. ! If thinning>1 do retain only every x-th element if (j > burnin .AND. mod(j - burnin,thinning) == 0) then ind = ind + 1 X(ind) = xr(i) ! call intpr("ind=", 4, ind, 1) ! call dblepr("X(ind)=", 7, X(ind), 1) end if end do end do ! reset R random number generator call rndend() end subroutine rtmvnorm_sparse_csc tmvtnorm/src/Makevars0000644000176200001440000000013312567105776014440 0ustar liggesusersPKG_LIBS=$(LAPACK_LIBS) $(BLAS_LIBS) $(FLIBS) all: $(SHLIB) rtmvnormgibbs.o: linked_list.o tmvtnorm/src/linked_list.f900000644000176200001440000000331711700621540015550 0ustar liggesusersmodule linked_list implicit none ! type matrix row, holds a pointer to the root element of the linked list type matrixrow type(node),pointer :: first ! pointer to first node in linked list type(node),pointer :: last ! pointer to last node in linked list end type matrixrow ! matrix element for sparse matrix elements H[i,j]=v type matrixelem integer :: i, j double precision :: v end type matrixelem ! define a linked list of matrix elements type node type(matrixelem) data ! data type(node),pointer::next ! pointer to the ! next element end type node CONTAINS ! insert the new matrix element H[i,j]=v to the linked list of row "i" subroutine insert_list_element(row, newelem) type(matrixrow) :: row type(matrixelem) :: newelem if (.not. associated(row%first)) then allocate(row%first) nullify(row%first%next) row%first%data = newelem row%last => row%first !print *,"added element to linked list i=",newelem%i," j=",newelem%j," v=",newelem%v else allocate(row%last%next) nullify(row%last%next%next) row%last%next%data = newelem row%last => row%last%next !print *,"added element to linked list i=",newelem%i," j=",newelem%j," v=",newelem%v endif end subroutine ! remove all elements of the linked list and free memory subroutine free_all(row) implicit none type(matrixrow) :: row type(node), pointer :: tmp do tmp => row%first if (associated(tmp) .eqv. .FALSE.) exit row%first => row%first%next deallocate(tmp) end do end subroutine free_all end module linked_list tmvtnorm/NAMESPACE0000644000176200001440000000077015055355411013367 0ustar liggesusersimportFrom("methods", "as") useDynLib(tmvtnorm, .registration = TRUE) import(stats) import(utils) import(mvtnorm) import(stats4) import(gmm) import(Matrix) export(ptmvnorm) export(rtmvnorm) export(rtmvnorm2) export(rtmvnorm.sparseMatrix) export(dtmvnorm) export(dtmvnorm.marginal) export(dtmvnorm.marginal2) export(qtmvnorm.marginal) export(ptmvnorm.marginal) export(mtmvnorm) export(dtmvt) export(rtmvt) export(ptmvt) export(ptmvt.marginal) export(mle.tmvnorm) export(gmm.tmvnorm)tmvtnorm/inst/0000755000176200001440000000000015055364525013127 5ustar liggesuserstmvtnorm/inst/CITATION0000644000176200001440000000160014533002733014247 0ustar liggesuserscitHeader("To cite package tmvtnorm in publications use:") ## R >= 2.8.0 passes package metadata to citation(). if(!exists("meta") || is.null(meta)) meta <- packageDescription("tmvtnorm") year <- sub("-.*", "", meta$Date) note <- sprintf("R package version %s", meta$Version) bibentry( bibtype = "Manual", title = "{tmvtnorm}: Truncated Multivariate Normal and Student t Distribution", author = c(as.person("Stefan Wilhelm"), as.person("Manjunath B G")), year = year, note = note, url = "https://CRAN.R-project.org/package=tmvtnorm", textVersion = paste("Stefan Wilhelm, Manjunath B G (", year, "). tmvtnorm: Truncated Multivariate Normal and Student t Distribution. ", note, ".", sep="") ) tmvtnorm/inst/doc/0000755000176200001440000000000015055364525013674 5ustar liggesuserstmvtnorm/inst/doc/GibbsSampler.pdf0000644000176200001440000041124115055376632016746 0ustar liggesusers%PDF-1.5 %¿÷¢þ 1 0 obj << /Type /ObjStm /Length 3192 /Filter /FlateDecode /N 58 /First 451 >> stream xœÅZ×rÛÖ}¿_qÞ®=áô‚;™ÌXÍ–m9²ŠKÊLAbŠPHвóõwíƒ"$¥(JFcõ”µ{L3% 3L[Ï,³R0Çœ3̳  ,õ‚¥LJ¥ÞIå n˜4*à†IpÐLzmñÉðÜÒš옒)Þ{¦”öxÉ”xŸ2eƒÁ ¦¼¥AL…/ÓÂb²fZ œJ{L¶@§0ÉáŒõ•gÚšÄtH±NÊŒ0x)˜‘XGKf´¥E˜±ØOkfphÐ î-3iŠ÷ôZ gì£Î›¥Ì:bŠ`–pa+› ÇŒÂÙaQÍœ2ØÔ0g1ÞXæ˜^;ϰ¤w‹­| ÍS„Å¢‚ŽMX0ÌV,x¼Tl0†¥Æa3 ®§åÀrF@4¸H1Ì|ÅjR(«þóÃŒ?ŸLÊjÆ~,;„,éhãQÕ7.}<†xLã ãIÖ'UŸêed=UÖ ýÆøV9©ò 6rõ,¾ŸŸÙfù ;Ó›ÚDyÚdRšqM1äÇæ³r>å3æ›'Ç߯rvž³¬iatÜfOFE•'¯Ë*]d2MÓ§Œ1þ¢<.1”ñÍrzšO›E„od}ó’ñ=ÚpTá©ò*1à§q&1àœv"QÐ#ã]b\ ñGóÏUÄò¦˜|iqE¾þ›À4òáXp‰†˜¬ºüZMÊéå†H‚ æ8˜¨±*±BCeu"HÕ׈Lj“(2F#kÈÙ¸Dª©ý[<{‘_ç_rÈR>D–6M¬K;\:õ‰†§zl\F¨D¨~¥“ìÁ¸<Óƒ4Ì'Aöp™4ñúA¸: “‰~ÇtBîWk™¤ä}ÝלƒW|0e|â¡ý0—&Θ‡ûG´_‘C H×¼Ž! ©M3<.DJ:\i@L—÷õM Õþã§Ÿ)uI4åFÈ bÉd>c¡fðf6Ëw‘Y0þþÝûƒ??;9üpÇÙiQ¾Ù8,/3Æw&£ò´˜œS†—Ý-¦³jë"›bN³·óÙhZ\U唲©8æMÖ ‘È»nP^‰Sy2)°j#ýd¤Æò¡8­.f”ÙÑ«%¨¯Þì½þ¸u³ŸîUÙøV¸0Þ%¼jˆyð­xåz¼f5Þã÷/Žw¬½+Òà%¬f «¼«ZU¯ÆzôÓÑñá»g[û‡ÈN{pŒX‚cÜ’ú[ÑèõhÔIÿ|´óiûÙAög¹ŸU”‹Q¢Å¶0dÌûVpf=8¹Ü‹W'ŸNÁõaÁ`—`ù¬pËìzTb5ª—;ï¶ß¾_6Ž[•ͬP6¹¤lw²[‹U§}¬ÿJb𦠖mZªŠ{¦[å˜!×Åé¬s¢º©Ç„l+±›²iö¯Fõ bò¯AšC±LÁSRJ+áÈá/ª{D'Tó0Tžêò`/¦y>¡èùüL!/“žz)¨ßàú”E–]×^$Õõ£á²!ñ®‡ ‚uA= WqvVÀ´Í<Œ” 4дFY@½œÇ‡¦JRä>4Ô¾!¤÷ƒ½P¼^P'¬ö{Ò[ÖÆñ]ê=µ¡ 7eíMJݪæ&Hjµ7±ŸÕÞPÆÖÝÖ†]ܤ̷«Á—{ßøA¸i9:ʉP~°½ ðù·ê&-ëzJSUõDU¯¥jºÞSwŽ©kÉfüzDz!uè÷ˆnBÍöևí½.öŠ_‘.¤ƒ€âïˆ'a} ã Š 3~r/ÿqxdÔ—À 2A>È8h›VÆçŒeüšqøÜ`’ržÏ(†`„<Ÿœ“ºàÞ¥Jµ Ilwü”­N¸îG"÷ÔŽ†øÃŽ:ÒNR—ÙÅkúirkä*hµ÷l_Ò:ÓdºŽ a ýâ@zÐþ4MÀÎí9"Ã@:ÓdBwÇ=íH(èžÆëf3:“£mQÆMéyHã|= €Ö"…¦ýélmlÜ3b‘з0ìàq-¥ NÄ{ki ŒÂ¹8OÄ¢æHkzXôtÄž‰n®ÿŠ4:©ôÖ¨õª¿jŽõÔ^„$2šž: ŽB5sÈ"éc‚I|§LdlL¥ÍlšJ×¼ožÅ¹ÍêÚÑ kéI½>ÃO Õmo£0ð†ÐúŠØCblÅD¢ ñ’ÂÑ}T¾Ôýu¶¬PÖÈrã—ô¬5=÷= ôžž§¤ä>ÒØVÙ[Õ‰ï07Ž£µïPUR1i¾·jÚªp;¦§š}i¯Ö¿ÒÜÖ hmziÃÞíÑÿöikÖé ±áAK[hèmí°O=vØgÜZ;$0ˆ^lZ76D÷-sÛëÐÐþ´¿±ÃÆ~ûvØíQXüà$O¨í_ÁÏjYï!:tµ‰Ì&e ¾ÉÏ–ÿÅ÷X3*1ÌËŽ©ÁˆÈ»µåÇ?âUI‚¬ÐÜÓßb.÷¤d-LⓌ—šñì Ø®OZE½—SµVwNÕøžÆ!Ö³•®M×ò<·†Húʉ8¢•¾´HúÒã,@bŽt0‡û×¢èÛD„ÜÞÐ0)IÁTìM£4 Bl¶´ÔYÈóÙˆ2A—"¶oeW/ëÀQƒ‚’"ÊUŸôCôBx®ó‚›,¥—¤ð«ñ|†:€.Ú`CÒ»ãì|ÆL=mÖêhCEŸH"µ±& ÷»ÅY—nSlæ—|ÂK*XøŸò¯ê’…ªþ•_óo ¼pëx!­bZ4Yuª‡Öh‡ÜXß@]m‘JßébüÐÅxµÈ‘h¢G¶Á‘ãÈoü;ÿsN»–NºŽÁÀëtº%˼õ+ÊjÙ#®ÜI,hKåt?¤,hös¾É·@òߪ°Ç_ñ×ü ßçoùATŒc~Â?€m±‹Ò¬YŠêšª«Öº*æ§P¬¼ŽT±Ôƒ–±ì0?‡ÒÕ…3tïwþ¥Ñ?*;%\Ýb•ÍÿèUëQ[cMu]/hï¼Ö_’m,ËLN`µ€¡âÑM¡d lfhÖ~(àõ_ôV*rø;¢U .Ë¥zµh÷qÖÂ|Ï?òO½…t6Å*A4<_äxù ®.ú…uq¬±K}øÄÕEv†!;×Å[ÇÂü„¬~¾).‹Øð{²]Ž6ŽªlZ=]Õ´FB3)kë^þÕ`–^Ôߨƒí¿èµ¹Ó…‹ÍÜ´?iWPM²¡§ù„bë ²Á'›¬lZ¬²i¨þ¶Ž³|Tå„þl£[œÄ@Û5ÚèYÍÛ˜ÀOWùäyœˆ ¥y4¯ÆP¡YìíÅ'ÔÒܧv?™åÝëæ³NÛ=ºÎ³¯ùV9•ÓÓl2"ÅXhþme}yޙżº€„Ÿ@·¦yF¶³*QÿS©m*¤FkõL¨ÿ ñßvÍy“çÙ5¸Í þùtšŸáõëüû5öžÅ5÷®åŽw>&»pŸ³É$DzÇÅŒáÿ~ñëo\žÑ6&Q2ì×'21"Qþ×§OcOút>¢Iõ`VÞhÆ> stream xÚåË’ã¶ñ>_Á#'±ÝøIqf7I¥r"l »Ño€ ÞÞЉï·÷7¿û3‹iÀb™’Áýð0ÊyiCb£ƒûmð>|›­×åíŠ+Þ%ûÃ.=ºÎCáÕSê÷·††Ç[žòMR¥[7úÃiWeÏÉ1ƒ!7òcqÜ'»Û÷ß_b 4‰¤VœÃ<o²²:fëS•y þPŠ·t4ý "ŒâšhÃÝwUúänÿŸn• ³ÝSºÛ·Ëib¢¨åJÓ 4%±8¡Ô¸¥~³‡•Ö„ X(¢iEz¨Òýºf!ûÊ}9åª]ÔÀÖÍnÃæ )o­]?bÃø² çR÷) u8ž³2%c0uªÀQD±÷6×ÔOœË ;q/iÀxÝÀPéþ«3Ÿ­ûsýÉš!´*”èàøÔÍwooIù+¤O›§Ä/m1†Ñ7¿œŽÙÆC»”Úþ‡ïg— (é,üà±<•©ÇÓ%Ѱڦ°ÙJ¾ñ„îÈ}?.Pl̬43ýc"äæ÷;ʸŸ,¯ ±NÓܵ2Ìó÷iîR,ðMh¼sŸC²ù5yl’¸¿ ìŸ+LÅ&ð€?ÊÌÑ ÍçôXº\ :”lÄU˜fq7¡·° m?$ùxJ ãªÏ«_Ny)íà„…0×N˜SF/N- ¹]©8rB+šn Ä ×„ÜÛ5lÖ“ëÄf8„,ÀoUž‹ã¯¥Ÿ{˜—`üçmzNM/)Ñ d• )ùêú~œRuÁ‡nâèÕ^Ôkª&’cA(õ±O7™“A£¾* ŠÂ¿LØy ÒþõxZ*©Zä …€â¥.p>·$ ë:@Çá7È)ÀìœívŽt[º‘ò©8çõЮ8ß[¡@ƒí„&N%j¤mÚ²€5s\U5WëÿaúÔp¼vƒþ§¯4e±·K0\ûéJ°’X¬A ·ÏI^6%âîÖë:Îe•ÛV. •~(»eaþ\ìžýÊ-†0—{#áœ|áz~=Š¥ý"~b’çâh DN#‘3 3OÙã ‚Õ:ìn³=V~ÜÖ­hPë +f~ø\#à ¼› Ö‹aµX?Å¿’m¶óö!yN²"GPòÍá°Ë6ölÍX v|Í6†¤Á–r]ÇpÑÙ%»Oef!µ-°ž-¥Ag›T‰­ZÙ'[¢SÞÙâòÎ ïý²éìÝ~ÊûµñÊ !e±ñ{Ò0O+k¯ >Jø¶ÀÒ$³°°Q–"µc£®mÃ\Y×¶qÜÖñ°‘ìÊÂͯ=ÈÉZil9f”+šîÜè.ËÓ¤^v®ŽI–WåDˆî-y] ÅyøÜ Ÿ Ô&C¼ËTt$tXÏ–DáÏO Æ_JÚ23Wµ½ÂÚ)ײ‘ƒRµÉp—ذ Jmé” mP>´ƒš’ªÛ’ãËðQt’¥apq¹»º°Àî3Ë’ ޼‚G¡´ãêLßïàä» ´aª‰¿_[Nxë5ApÀz¤ÕS±õ;uã\\ ¯ÎüîR[/§D+Bâ©‘0éŠ._ŸØDøÔœ¨Q}ÑÒíãfBȘºÎ°bà™'¯­Üµ¶à¨:¡6 CíÎ&ïmUÿnP:«RÞý„Ú†ò´s ƒ½ÖM`/qŸkÝ1HØq¶&6nÔÝ–àpkpÛ5$0‚$Ys!‚˜toÔl9g”£nLV w_!£ëùüÓæSÐq’ ›Ow½L©`ò5)qþÿ»Ê­ïS+h9…ÆC²±“hÿäY³à@—5~Ö…âjŽ»«>¬s Ú0ïxÁ@ ¶Œvx§Ò âÃi·kd´F¯)9 #ùZx÷3€ìÖTX¯–Öí\([¹^e^²EµC¬1© …úež ›ŒÀ5‘Fw‘èÄß=|ñ®©Åp¶eƒÐ¡»$Þ qmÚta€«3ÌŸÏl°dÌÛ€×såSà5•¶@­t§z{ÅòIÂÑh¶@'àeÁ`ˆ»³°7_#ÈL´L(#ˆúÄŸZ¡ÁRÖHÔ€»Êt¸û™È¢ Å] qÿí—Füßz"&ÆÊ2Õª_íœWBIäü°éˆŽ!²c‰©ÀqÍ{¥ÂÆóÆÒ[Vh8_«ÚØ¡¤êMÊS€¿·)¦¤þÖú6‹¥õ¸ðÙ÷Ÿ¸ÀHcï= ¾Ohü- Ú°¬]ðRÂó¦ÙDÐö‰¶k§"cÚPŽã›¬J¯p௉‚Ì\ÝF*?¶ …Âbæ°°á 6|!‚ü\ ‹³{jB-÷}ƾwÿºE#Â?â±,q¼ÍÑÍ=?²íîÓ¢Éü:"RùÍßÒj"“‹DøÏ©{ÓÔŸænðP„A'UM‰‰óéëFP½˜˜Øs`´JùŠˆõéúÝ!%pÃ@ó‹EN ›ø–«O’˜ðBÄá¼a\ñÈW(ó+¢3³×J¹g*Âþ’Í06ènv]üÀY‚Rñš¬ŽüÍ_”÷…“Qq@í½û,í•pnbxlrÁµ ì…:‚º+²3œŒì£!¤i4¾W šÛQ“˜ƒˆµ°¯ÞQ¨ú篦'ÅJ ñ—lŠ÷ÎvSY L/a˜'SKt`—î(‡;rµÄN Ð?påÍS‰eï:¹‘$Âà ø¤êGBoR_²é>±ÝGÿdð€ÎÐÖ`xP[¾(fÌzgŠU©ö‡3u•"$›ºJA½ä*…)8„}ŒÉ/÷˜@¸”¥³êuÅ›yM)í,Ϩ ¬­ÖÊÇf„K“ˆÇ=áêkíì–†PÓçWïØÑÚÙª°ˆW¬¯µË6õ©zc*úZ{…LŽo-ZØWïøb­U/ÒZ& Ä…ÃèFQVMLÜ{ôÄ(FE u°%Ñ웵Í‚º|÷N;Ž[œš.H4>*Ãws‚°ˆ]¼WGy¥¬“ñû.Ö¸5{Bø:êÙÕÿ€-ò$ÇÛ²)ö=Â&ê¿œÒ »ïh XnE¡Ë¢41—¬endstream endobj 61 0 obj << /Type /ObjStm /Length 2234 /Filter /FlateDecode /N 58 /First 466 >> stream xœíY]sÛ6}ß_Çh;&ˆo ÓÍŒíÄ©»Ž›±Ólº?(2#³‘DU¢²Í¿ï¹ES¢>œíôeÇÈ ^\œ{à^€V²œYÅJÍœaÖ0¡4³–É ˜uLI\ä’Á © dس9ôaÏYèÁ^0èØ•Ð@‹N•Ê?—Li¼9HÓh(rÍ”s"7¸ ôÈ‚.E@Ÿ%'òH ¼C':GÏ …i+ˆÁŒ ;¨5FÒbÆ5(c`‡nð^ˆ7N­Ò€–Öh¢×ƒ~P&Þ÷âoß}Çø ölYŒê²šebÀø-㯪·{þ¼yú £×~ÃøûŸÿMNf ¦Tn²÷f«É„Ý­•OÑsÒ½.~¯i$DáÍpQÌêˆ,ÊoËzRÐà!‰š~,>~ÌsíòÜüdú9•ç¸äW‹Ží=ê Û~÷2=wM;§qõ©ÎRØ´êmjOúѾHõQÇ4ú÷Ž~ÔsÑÖóm’än’”계á•Y ‘K¶Ã’“»Yz³(>Ó<ëRþDÊH/ºnjB²í7T¦ x|.µ›.š$=¾0\3mT/,"(ò]ŠœúQdmr»•G¢¡HtžuG›êP#ÛÄ:Õ›Te O·º¨àf¼ðó7é Ìo)Z ‘jÌs¿h‰ ­€õ,o,j²-²€eϵm°öùuihIN¤åÕ趨áóâ¬Ó¾Û³Z„_«s!Әõl¸,¢ŸüìŸ/ß_}sþúå{¬t d±¬Ï† ¬÷‰ŠÅr´(çuµˆK`´5\+¡†ß®>Ô_æð „ü¶úiVŽªû±£qäm|žzüWy_?,ãº-ˆ.Î^¾û‘Ýþ¼‰(ßHnÂ’~Ø(»½º}q{úÃ7×åôÃjùºš]Üãã/g°YÎÆL­_d UïªzäùÃXå¬~7Ö«w—?\^4Xo‡OÁŠXÞǪ·°’Ò!¨êT×…º5J¥¶YLB»ÌøíÕº·X9“!€K§2÷DUŠ•F?M?C:ò4ee\¦ç­gÓélVÕ".dÊH”€Ç‹Œ–SÑDœƒ¯À?Ý\¶ 1 IUÏêzþ-çç7§×ÙÍÉ|QýŠÞ³j1æóáèÓp\ü£ž~®gÕb:8âè{ d&Í2ŸI0«<¦–$òL~³‘Ø$al iÅr¨ŒgØëdž6Æg´z’iâr:P™îM"Ó¸šÂ“• ©øÄL,m²ƒIRjƒè…É%™“‰`É@@¼ˆP)ÏŒ!×Fˆ|„@H”Á§ÎT¦K/àêÖͽe‡4 ›zG¹æ_E’R™ .HI¥S„9•Éù¦ô„ª_:“oüD;l.. Le·•U„ _úÈL¿ lsñÑצĈ ßŽzåpÙè7¶Û>:L{E~`iw4ªÒˆkª„I—CR„8”’‰du_)tTmXØìîÀ%ìhçU2ׯIX§ºc Svy½Kv9¢(H{Ïóáüû¢?Ô´i¦ …ågüÃb8*&ÅÇú¬'aAJkéSQÓÃ帕ãóXQ"*ÃI ŽŒwm¨¢^”Z³=D–J»LByB +¿˜ ÇK¦Sîr–Bü 65'2Xb;7ç$…xR¸('´×l¢<Õ\§Eï ã²NÊÑél mˆ·u1}2»»ÓÎ^—¿o˜ÒJ´©JÂHÓ©C¥E@î’9\ðI±\¿­†>-g«%Ÿ®&u9Ÿ|áóE9-ø²œ–“á¢ë6ïìq;°“è5Ó±ò¦×vÛë­s’®×'Ø¿¯ýοÞo,‰]¿q]¿Où9¿æ7|ÈG|TMª/xâ`ÌøÃ—ùC1ã¿òO|Êg¼âóÇñÑ |Ž¡WÝó_N†Ë^óÏ,ÑëÚÉ’`JÑ¡zN ÐIn›¤}g7»Çˆ8Êrô­1@®’ØruÓuJ µÏ)GCÞÇЊå»ãM·ü¶[ûŽyvº»GÜ2Ø4nW+`UÍQ{NAíiy2ö㪞”3$ÙMË+ ö˜ß§½éz3t‚p<`Ïh„¬Ï¤á¹±O×§ýÉÖ#A-}šÖ°ñõb».Äo<›µr@_¡6ëÈâzßÑó¢ÕÒ]øa~W)ä[æÍ€>pmÖÙ}ìÚ¬súðµYçñËÕfeįX{ϱ»!Bº_KºX×Ï]Cb”‰×‘Õ ~:‹¿S#:XíVRU›U°eÝ>toÝû.ÀNµvþ•XûŸendstream endobj 120 0 obj << /Filter /FlateDecode /Length 698 >> stream xÚmTÁn£0½óÞC¥öƆ@LE"¤¶­šhµ×œ.Rˆ$‡þýΛ1i·Z)A3Ï3óžÆ7?ž·“¬î^Ý$º×êźËP¹Iþsß77EW]Ž®=?:W»z\==¨ç¡«¶î¬nóM±i›óoÚêýR»±êÿE+÷Ö´Ÿ%àQ·;÷{Ò÷ïƒ&ôÓ¨Û5çwZÿ¾¤(W×\qé/7œš®}Pæ^kMÀº­óîݧ`ê¹ÕtTshÚzðÔ+ä&TuS}ÆÏêH yûq:»ã¦=tÁb¡¦/´x:¬ì.˜> µšöMÝ^Uº½Pæ @é`¹Tµ;Ð0ÚëãþèÔôû¦®Ë»Þ©s#jª®v§~_¹aß¾¹`¡õR-Êr¸¶þ¶fbiy=Œµ Õê9ff–¬— Ì Œ¥Ø¤ôÃdF@ˆñ!÷iŠ @E\ ` H Ås¤ µ¨¶` ¥%(Ng ˜*R©0fd9ÏÈ0#üb.Àš€´k©(@».°o¶†ôÕ劊 @u)Â(†A£±©þìòü:,RxÊVús}æ¯e#3¸£cé¿¶¯§gˆWC´ÎÙYÂjÍx©†ùM„­æ#Æ¿p'Èg_r¯ÏÄÿbÐhyƒpßÌ….šŒcÍü+Á™StÑI Æê(ç^ÿZ9öïºBÿîÀŸh‰±?+q ÖðÌ~ØPjà‡å=™¬?Ðfc91ا/sŽù4êlƽ¬Ç®‡ËÃÄ<úÉbî]Ë™·äx©çB}êßÌTz÷\ð'õ\˜“z.œÛ´ßÀ•2Wœsÿ¤À•âŸQñЮþôáÃÅår½ªË0ÐMÁ7ßøþ›Ö]/©¾ëÑÅ¾ÝÆûÙSü/Aržendstream endobj 121 0 obj << /Filter /FlateDecode /Length 699 >> stream xÚmTÁn›@½óÛC¤äàx ^"Ë#ùÐ&Š£ªWÖ)R ãCþ¾ófÖqÚT²ÑìcfÞ›·ËÞ|{ÚN²ºÛ»It¯Õ³;uç¡r“üû®nnŠ®:];þp®võåíéA= ]µu£ºÍ7ŦmÆ;JÞ´ÕÛ¹v—¬ÿ'­ÜkÓ^SÀ£n_ܯIß¿í;Lè§‘øÒŒo”ðå"@]ÅÉ?ÝpjºöA™{­5ë¶Î»#¤Ÿ‚©§WÓ‹ CÓփנöP˜PÕM5ú?«#y€âíûitÇM{è‚ÅBMŸéåiÞYÛ]0}j74í«º½Ê"x{¦¥ƒ¥ƒåRÕî@ÝhÞ»£SÓ/s}¼yï ymDOÕÕîÔï*7ìÚW,´^ªEY.×Öÿ¼3±”ì—Ü„rõ33KÖˆKæÆRlRz„a2# Dûë4Å #.°$†â97Ò šZd[°„R’F§3L©d˜ˆ€ =²œ{dè‘¡_1`M@Úµd ]˜[„­!½Dv¹b€b]Š0ŠaÐʼnX_œ©~ïòü:,RxÊ( ýy]ׯeÜѱÔ_[‰WˆS‰3Ä+‰!Zçì¬ aµf<‡TÃü&Âè†ùÈñOÜ Ö³Ok¯ÏÄcÐhÙA¸oæÂ MƱfþ•àÌ)ºè$P cu”s­ßVŽýžCWè÷ü‰–óY‰3h°†{†ðÆ’?,ÏdRh°þP@›åÄ`N+^æóiÔ4ØŒkY]  –†‰¹'ô“Å\»–™·äx©çB~ê÷f*µŒ{.ø“z.ôI=ÎmZˆoàJ™+Î9ÇRàÊ ñϨxè?Wúðáâzù¸ªó0ÐUÁwßøþ›Ö}\S}×£Šÿ|¿].Q¬Ëàõt[endstream endobj 122 0 obj << /Filter /FlateDecode /Length 699 >> stream xÚmTÁn£0½óÞC¥öƆ@LE"¤¶­šjµ×œ.RˆCÿ~çÍ8M»])AãÇ̼7ÏÆW?·“¬î^Ü$ºÕêÉ»ÓP¹Iþs×WWEW®ï«]}~{¼SCWmݨ®óM±i›ñ†’7mõvªÝ9ëÿI+÷Ú´—ð¨ëg÷{Ò÷oCc‡ ý4Ÿ›ñ¾½S¨  8ù—ŽM×Þ)s«µ&`ÝÖyw€ôc0õôjz´oÚzðÔ &TuS~ÅÏê@ xû~ÝaÓî»`±PÓ'zy‡wÖvL†Ú Mûª®/²Þžhé Aé`¹TµÛS7š÷~wpjúm®÷Ïï½S!¯è©ºÚû]å†]ûê‚…ÖKµ(ËeàÚúŸw&–’—ý97¡\=ÇÃÌÌ’€5â’yA€±›”a˜ÌÑ>ä:M1ȈK,‰¡xÎt‚¦Ù,¡”¤Å錓@F*&" C,çzdèWÌXP€v-h׿akH/‘]® ˜d—"Œbtv"Öggª?»|¿‹ž„2JB^G—5Äkdwt,uà×VââTâ ñJbˆÖ9;kBX­Ï!Õ0¿‰0ºaþ0büw‚õìÓÚë3ñW M";÷Í\8á¢É8ÖÌ¿œ9EŠa¬Žr®õÛʱßsè ýÞ?Ñc>+q ÖpÏ~ØPrà‡å™L Ö h³±œÌiÅËœc>:‚›q-ë±+Á¡ÁòÁ01÷„~²˜k×2#ó–Ï 3õ\ÈOý¾ñÁL¥–qÏRÏ…>©ç¹M ñ \)sÅ9çøO \Y!¾ñýçêO>\\/÷Auº*øâ[ßÓºkªïzTñŸï·ó%ŠÕCü¢t«endstream endobj 123 0 obj << /Filter /FlateDecode /Length 739 >> stream xÚmUMoâ0¼çWx•ÚÅvHU„dçCâ°mUªÕ^!1ÝH ý÷ëñ#xÙö?ŸgìÁÜýx]OTÝmÍ$|äìÍœºs_™Iöss îîò®:L;<S›zœ==±×¾«Öf`÷Ù*_µÍð`É«¶ÚŸk3²¾'ióÑ´ž‚}Øý»ù=©½à“í¹ÙM;áà¾7ÃÞr¾›f¶ÆnjÌ-ùeúSÓµOLg~¼À8÷ã ãâþÈ)okà çA„8 ö$`I\èÎ×3`çAfŽã<ÈZ]ƒÂ!‹„ê xNkÇyã¹ãÐð"œ7Á¿ _¥ã“§Ìq âH`òáö•‚nú¥¤kÌÂðRONH=CpB:# =Ñ%8“ˆ88QA~¡!*ÉzÆœøÐäT?!~Ž> étw©8éÄy*ás£¤Ï }nÔÌçFE>7*ö¹Q‰ÏR>7в¢ G]¼;~îó¤ŠÛ<©ò6OšßæI‹¯yÒòkžtèó¤g>O:òyұϓN|žôÜçI/|ž´òyÒÚçIg>O:÷yÒ…Ï“.}ž2îó” Ÿ§Lú> stream xÚmSÁn£0½óÞC¤ô@c ঊ"I¤¶­šhµ×&Y¤`Cþ~=3ÐUW9„<ß¼y3¶g?>~Z6gð£g)>¡k[€¯žZo6Ë›b¨Áôo%”Ón÷*>lS s½Ï÷¦êŸyoŠÛPÂÄzLÊàZ™¬#æGøí_Ú[}®üóPÝúÊøÉǪ¿9ÒÃ}á‚â{PPÒ/°]Õ˜W> stream xÚmRMo«0¼ûW¸‡HébLIÚ !%$‘r臚¨êì%µ8äß×k‡<õ)`vwf™]{v÷qV²-!ˆý„¾€ -:2›mZ16 ‡7 rªö/ôôâçûÍ^«áÞ’÷ZÔ£„‰u›´†“Òÿ(ø:?ÂwPuuSå¨êAé€!÷¨†Úrn•©ÍÑ?9ê$_`zÕê=0Ælb«eÞ68CO‹NÎ*¥¥¹˜¡%Z#§R‰á¹·hì2P|8÷4{]µ$Miøi‹ý`ÎÎá= ߣô‰Îÿ8³•ÃØu5  ÊH–Q •mhg+ á­¯”ã¹Ê]yW¢•Ðw…Sè”±Œ¦»]F@ËÿjÌ+Êʇ–0Áh9•ÄOaHʱ cöCÒÇG‹9g Ä ÌÇ+‡—?Ljs×'ÌsP›D>ÿ„8öyä'‰Ç bìÃ#Ž=“'Ïß"~öÔ®°'g«qâüDÈY_ðÆâ|á}bÏ-G-—™ŸÒM… Áû®ZŒÆØSp'ì¶‹{U®— k;T¹ÇÝžé®bô¾#¿ßôendstream endobj 126 0 obj << /Filter /FlateDecode /Length 698 >> stream xÚmTÁn£0½óÞC¥öƆ@LE"¤¶­šjµ×œ.Rˆ$‡þýΛ1i·Z)A3Ï3óžÆ7?ž·“¬îönÝkõâNÝe¨Ü$ÿ¹ëƒ››¢«.Gמ«]=®žÔóÐU[wV·ù¦Ø´ÍùŽŠ7mõ~©ÝXõÿ¢•{kÚÏð¨ÛW÷{Ò÷ï{;Lè§Q÷Úœßiýû’¢\]sÅ¥¿ÜpjºöA™{­5ë¶Î»#tŸ‚©çVÓQÍ¡iëÁ P{È L¨ê¦:ûŒŸÕ‘ @óöãtvÇM{è‚ÅBM_hñt>XÙ]0}j74훺½ª"t{¡ÌAÒÁr©jw a´×ÇÝÑ©é÷M]—_?z§BΨ©ºÚú]å†]ûæ‚…ÖKµ(ËeàÚúÛš‰¥ekªÕs<ÌÌ, X#.˜K±Ié†ÉŒ€ãCîӀЏÀŠç˜©ô2î¹àOê¹0'õ\8·i!¾+e®8çÿI++Ä7>£â¡ÿ\ýéÇ‹ËåzT—a ›‚o ¾ðý7­»^R}×£‹ÿ|»÷'²§2ø ­rNendstream endobj 127 0 obj << /Filter /FlateDecode /Length 4330 >> stream xÚí\[“Û¶~÷¯Ð#w¡¸_2“—&MÒ´É´µ3mÇɃvWëeª•\‰›µûë{" t±§Y‘”œ¾sp<{5ó¯Ÿáöó÷/žýî+¢ðÌ #©œ½¸‹ngcĸœ)Fädöâvö²zq¿\_Í©ÀUs¿ô·õ®ÙÖ×M½i¿ÚÜ]ýôâ[ÛžÚ{9§’Vo»¯pô«n6ëÛÚ¶²Xµí¬í(S½I·SïüëëÍö¡kê©nîýÕ/‹m½Xß,Û÷5p ¼&Ät6§ l<žw/’™BB‘îÅàv&‘–|6'ˆîß †Ýÿì地 ´gÒQ3bš¶«ðˆ)Á“¾¯ÏG†-2L‹ˆ' œÛA›ˆ3 #íoÿˆ1§ˆSJ¡1Eœö ÊÈP#©iHÎó~ imä~û['?1ãˆq0†X‚´"CÀ0=›Kx }“o®æ¢lÀRõŒ".É1:h 0 zdóZE£dL«"[dWß°”¸—.ËÓ?‡JšulŒ#¦XÈбÀþ‘1¯2çØŠOIæ_Y¬o½CyX.ö~J {¡"ð%ÐpÖˆ–£H І°žð%4åK>Η}}>2²Ö—”O@ 9¥>O‰DðZðÖ'ó ât"Nñ üòDŒz¼d|ˆUùq“æ¤otàÛ-¿ñ`$â@%ZAO‚ ´¢lš0¡Z8“SmÒŠÊÊHbÈh1®¨Ùù.VÔ;jªÔSÆ.«;½‰<¥ú¬hðUˆ´æ›‚Þ4˜ŠÎ[VÆÖÜXhn¿EE¬JSåÆÈ’ó(1"Ò²Ô ÜáÐ?Z°MD‹Ã‰¬n»¥ø;üJTw«Uû£cïz ,ÔônlVÝŽŠ#;'0$<Â4íÜ]™EäØ¿õs‚ jEÒÌöZÚ5âÔáüItOaVì`¹<¤0-uBªOA"”VO÷õͽÎS+®§Ú‹KT·Ëõ¦iŸ.£”ìÉ‹£!ÜC‚È@ˆTшäiœl“YÅr1µ½æ,w«¦}P¯ýX¬îé21 ÃÂâ ¨j½ÊÝfµÚ<ÕëWð®áp»}x\-vŸ%Ì›ê? eRwS€±ðüEŒÁ¤ÎÚÉŠÒ—x ñ‚¤~.Aq„«1•Ïõ5׈Á¤=‡'&î+£ÙÑcõ:Ç+˜ˆà B`möœbe± AĺÏ!›YÒ}èêû„X‡~'öæ|ïÌc+£`e£À‘¨8FQž1\†…O&$à†ì'–“TÓ0ÄÖ”ff ÐLE½fbrT|[àÃ"ÕÌtªi¤´ZѨ³¬jîƒvQí¤È°Cõ̧·@6t¨œ6¦KäNdR]ÊäÊ)¸;Ë•—„ÿ&´xœÂ¡Cca_]Ð#p“³XiodÔ¿ +ÌHŸŽè‰·ÕŒ.¨"Âajì»;íZéD€Sšh+í·$©›4´ÈK=¯*Ä6Ý­É8ÈesžT:AÏÇW2„Næù;[M5ñ,ïLüB !ô¢ !á,DGg¡‰ÕH-.\ áâý¸ªÉµÝ³ú¿ ž›êçe+{ÿš%ãêymó¸sÆ;k‹AŠÙ>Èä{‘ˆ²pX*iÙò :/‡–å+xL¦¡l²Û³^ß._/áߺñö#ä —›æ±c—ÈbUïœÙ'¼§*óž ‰Š ,J.å>'brƒ°Ç¥;“œ5ˆ [O$ß[#“ÂVŒŒÂI ŽÍ…¹+!írÃrÕ.<¼öL¾Y¬nW‹¦]Q©ÿy½´NÐþÀß; ¹^û2p f6áÚ¡M”·#WVf\vN’7£/î7›]ÛÏ¢õÓÍbÛtë%«Ç噾kˆL±oÅgûV†ˆúVW°r´ˆ‹à…¨«£2ˆá od«Å˜°e{å-ð±_ñŽð¡ P>‡páóñ)\‘pª ጡ™îa0ÇS³(‡ÝVFÙ´WO|>Ba’ãqG».¦=5Í,ø™ê6Q-¢°Ï;[¢÷X)€–w€ð8œ Fì &0 謃e»ºy›`k¡p°D@——uù<Œ« |dÌÌdè b¼Ì¸!Ú†ï¢Ã=Fì@ì°-ÕZüIм¿_šâ]åÈ)JZŠÅ,F–ïYhJÂ6!P*ÊÿMD5¶i0‘žÅ.ÐG~¬0‡Ü*Él榜S™Ìb&r‡#$†*±²NÍG)«ú(Øo™ßZïm¥t!‘2]"!Œ@X{PÅø¨ú"u …Töãq]Û¢[³…kÓ^»¥Iñ‡ÿ¨‘Î\SµžÛ7ç߬¯’Ù³ÎæC0ÕPc«\R…"æ#i¾ÔPJ]½põÌv¼ÝçÓý²ƒ¾övó¸mùõÚcæÇQðd·ª_Ý7«·Ùù:€á·õmàn¹µ]Q3À2 (øCz¶µòò—sbc WÚ4K@†þ5+àâËŸ·õM,fûû—Ù&+bŒ‰^«~Žn9²|뛿õºÂü¶ºÊ%{WžÜîk¿ÚÀ§´]¯_ï©é0Íܺ½P|ÈÖ¯²Å¼ 5„/ßžRÜVïÛXªÍ¨‹¾²ëz±«!¢ryu¸mMk:$"œµXžå­·[°æ×J®D wÐk'w?Ì<Àƒ½m#A ÝÎ?ò%iGª%Lnuªðš©NA¡­(ɶ7ºgƒ•lí €•@JûôX‹Šiåˆjîã0µOÖ*Ì«çKË5r\t¢Z¾ñØ=o¢ðÆ×Þk,×˃„f& ’bÌ ÒS„LôM oïZ`2£ÍýîZ@%€ô¬Xx‚˜%vV±}vÆ}u½lž–Ζá‹ñíӡ醺jW¢ö¥‘Ðd;%2¯||“žs0*3ý0À…[ L´ù%¹Õ€õYPs–è0‘6Å?ªÅSÕ5›ç–•X[åDäxó¿ñ‰žxpëÑK„N¸|’Þ’Ç„L‰ä`‹(† ŽfÎWsÍ}rcÂ-éœ#œˆDø¸- hII\Ÿ8³dW_u >eä€c)ÙØ*%Kòá39.ÃÕe#­·ê*óÓYŒƒ’jVí;“[ä@oÿr @àÕ?SÚŒunÓId¶ˆûšçè}z"ÁÌTÿHQLHŽbš8$ÀaÉœÆÐê³È&9Rî«ñd€Ùr»¡ ¼öu7û* Š ".& !—€p¦7ιybû™ájI»±ìÙ^<û÷3â&K2£¢N€ŸŒ˜ËÔìæáÙËŸðì¾üæR¾àÉýôÁ.4,àr5{þì¯Ý™;fèKÂ>© ˆÃІhQšÊîgŠÜæ"÷0%E5óX:`r¿\Ajn¦¥fˇÁ§Éͦƹ-ŠböHnbTnF#ÍU‘Ü\5D¹Øìm°æX±a¤¤[D:5H»ö9 äH7¦"Ús†IFFK LµÝa/ÇX&;RÂÌ®TZ ·C¾¾œ„Á2©T0€DZLHXRr‚eR­‘êÛz6ñvÉ.o—ìT»$gØ%OÙ¥/Y&µ£íRqsŠÐ¤Lmê˜n ƒ°Šv5Ñ„0-1 ±•©ÝÀ9­‘Rʺ²Ž·É£2H$³Ñ*ˡИà Qâ‘(siRK&˜d@5˜š}1dÔìÎ|Ç1-7ªO‰q3Ü üy J¶9ÏT.ç°jD ÄìæX¦!¬ 3;¾ž­qQgÀõákADT×ç§ü7[‚P»> B£“ƒMŸñ$×0íˆ3ÂI‘ '³ÅôVðôýlš¨8°˜²§¢äP„\½‘-Ò–^Åܰz" èܨ ÂDœRîY@‚*+9ÕÈàñƒq²î+®{Τ$©®KN~𠂍6íÑ—ú\Z5 6¿Ñ±-‘çï}cv[ŸßÈòIAf‚x„¹=Ë”†¢ÃÔz^ ˜ê÷³]<¥ì^ùª¨‚½‰90(¢Ð°qV®È> stream xÚmRËn«0Ýû+ÜE¤tA1¦¤!$B)‹>ÔDWwKì!×d`‘¿¿RµÊ83sf8óXÜ}‚\¶'âF¿ oG# (ÞÊŽ,›VŒ èá@‚œ£ý+ý4­8À@—Å~³×j¸·ä½õ(afÝ&­á¬ô7ÿC—GøT]ݘà4ªzP:`È=ª¡¶œ[aj}ô‡º”?`zÕêW=0Ƭc«eÑ6ØCOÂI ge•ÒÒLbè ¥‘ˆS©Ä0Yî-; L>\úš½®Z’¦4ü²Á~0§ðž„F‚QúL—?”ÙÈaìºPe$˨„Ê´½¿— ÐðVƒWÊñÒåÎŽ¼*ÑJè»R€)õHÊXFÓÝ.# å¯ó§Ê›–0C~ ‰¥!)Ç"ŒÙI-札¯Ðç?9ü#.<^[œ0ÏÁÜ$òþgı÷#?I> stream xÚmSÁn£0½óÞC¤ô@c ঊ"I¤¶­šhµ×&Y¤`Cþ~=3ÐUW9„<ß¼y3¶g?>~Z6gð£g)>¡k[€¯žZo6Ë›b¨Áôo%”Ón÷*>lS s½Ï÷¦êŸyoŠÛPÂÄzLÊàZ™¬#æGøí_Ú[m+ÿ> stream xÚtTÔÝÖ>H—( ’R3tww‡äŒ30 ’/!ŠÒ­0‚4Ò¡€t—´Ò t|c¼÷Þ÷þÿk}ßšµ~sγãìçìgVc~E'„D Gñƒ€Re]# BÄ&P ò&æ0ƒ =¡¸Ô8(#!`S£0~º8@Ë  @bR q)  Jþíˆ@JTÀÞP'€®@ ‡xs(#ÜýPW昿—nGHRRœïW8@Ñ ‚„:‚á]0Êâ†9Ñ #¡”ß?Rp˸¢PîR‚‚>>>`7OÒEއàE¹Œ ž¤7Ä ð“0@ìùÍL€˜`â õü#œQ>`$€`PGÜáw‚ ˜ÃÆš:}wü·³Îo>ÀŸ»€@ÿJ÷'úg"(üW0ØÑáæ†ûAá.g( ÐWÓ@ù¢ø`¸ÓOG0̉{ƒ¡0°ÆáWå`€š¢!Œ!ø‡ž§#êŽòð„Â~Rü™s˪p'e„›Žò$þYŸ  qÄ\»ŸàïÎ>„#|à6ÎP¸“óON^p¨‡DSå "þ7æAD@ ¸¤0â€ø:º þLoâçùeý„1 ‚Üîg HÔ‚ù#ð{C(¤$(à? ÿ܃@'¨# àq‰ÿCœï1ÍGB}V@Œö@àÏß¿V6y9!à0¿»ÿꯠ±¾±‰‘!ïoÆÿ²))!|ü"@¿(ú)2qÌ"èŸi ÀÐ?eüG¬&Üü]-æšþ®Øû¸ÿ àŸ¹ôÕBÜÿ¹5Pèˆù€þÏRÿòÿSøÏ,ÿ›Èÿ» 5/ì—™û—ýÿ1ƒÝ 0¿?Ñz¡0 ‹ÀŒü¿]Í!¿‡Vâõrûo«& ŒE¸ ì_×õTƒúBœ  (G×ßjù›þœ21@xB>+~Lkþˆ-LJ˜§Ã#É_&frþy¤*ÜáôsÄ„DÅ`$ìGŒi2f' afÑ âûKÄA8… `èœH⟕`LJÌëçŒúiü ÿ ÿÝÁ¿ b˜ˆ‡¦Q#e ºcø?ˆ€þ ÿ Áäu‡yyþþÁÄÑ ‰ñFýÒ†æßû_/â q$žG8J‡?¨ o´¦ Wá”n§öW™¨À)›I•Âvú·! 4ý¨ ªlÒ?áÆ"͸5÷„#cmç®]SoÙþƒÒc““¼»þ‹XíÃýˬŸ†–Ù`¥Ö¯E:jÒ¢q©8°DNÖ‰å´ÃY+ç˜ÚySŪiÆŠôÑ.ÈÂVO®Ò]‚&a •ÜÀ–  ˜+c^§—êz<9ëÁ’¦ÂKB­l™§uå<Õ™ˆYëKlöùhÓoDƒkbϳ‡}Õs áÝ%wÒbèW åãϲX'†ˆu„°h+¬²Òn_. æasްxLè¸q;–kYfŒ‹ª§2!e߈Ò@X5—5œ_ªâ6×íÅ«AÅ›ZäåxS•ÊZXZ(4g{€8þS± ⻡fƒíƒ-›cãcw’c‹?ÞT›pñaS}–à³oÍX~Š0”XxAB2dÿè½L&3XH˜z-ÓðÕm·ªæt2c¾×uo>'|¡ôkauõÛ·éò½ôöò‡â)›4$v}9xVÏ›%| dD@cL‡²¨Ï'XÓädÒb©uµAHm/ÐWáó4Se, }ZÂÖ¦%WÁ4SJ0Wb Z7ãyŠ;kÜ3¥çËÝ ½kDASKSØÔ ߃n2ÈØhÒÁ ñ=Á}Egg5`a}›¶ûð»aN9™‰Áõï°œSbÈG¾$÷«i†0d°kòYm²ÍþÓð8{^Xá1Äðxü©‚3­˜0ƒÆ‚Ø{ÈŸÞ´ŸÚîmv?U×ýó=ôJwÏx¨â‚=+£J_åI'[*+ó£•i^qwë_z Ë%ubÛ9QÕ²þÙdj Æl»Ùºõêã/{_C¸™Ia5”C; ûY /C/Þå)¶³C9é=¾`ÿÞª!bDCùBŽ/Nð¥çãÛã=ôæ 5ßÏ;£/.Wn‰ñÕ¾f~?ª§oe²ÙÜõD÷âµ»÷A+Y&SdbêÑdg‘RIò/vjx­¥r©2謅vR¸{\$5øP/j4Vû-vΈt~Ë·ÑdX¨Ú7>ÉdœÞaÌ+léÅ/…NWoÚ籓Jo],ÜkŒ‘éz•û|¹“2œãÙòJR¦Z=LYÕ>ÂìkžëbSoµaò€Z! ©cièvX0;úiÛ°pã±ä„äÖýxOÉ(l.ûåRf²-ä´ù©”޳­Ï‘ÝÏjXðÃH3l™Ìç"8…D|(Šaþ$kì£Bæ»”•>s][ú’×ù«lú¯²)?ö›·ßS£°™|e3¬ Šî}fmø¼íŽÚ=ô,Ô„f?~ÃVpäd—¯kViÌaòN¯^[<¡œ×(u|Ë Y˜~Ñcù£“rÛÒÔX1ÉHZ{S¬Ä„'â«jD– ~Æ6ö#²o¤–žÂJã_ß$±¤dz³O7jbÔž=&[8Û)V][E?v>1ð шȡüF`~Q%=£ÎT4U&ܘaûÓÛÖ}…äRºL¢4ð›ª~ªTÅ`ǘȯ0„9v•.÷A>«æõÝû‰A¸eÀƒ{á@Ø o¦2ûz.MXÎeðbëjÕE[ÏòðUË_îʸƒ7lIB']7õâØgÖì¤/»Ÿœ‘–JxùͲ#RæŸ{;9z´óжú=b½œ³Ë<»sÒ²uÓ”¯»•ó•Ò8ÈiÖè˜á.³ƒè‡~eR3k¾òħ‘{ãµ±ØÁ¯.ýe¿Ã?,+"]ã¾5G‚é¶÷d 3Ù ÿ²-Ýâ'¥x?Ž=“¥Ú÷²te›‹ònTZI'4ü#ÂáÇLÐå …Ù§¡ÎŸÃ.Ö l¦ÿ¸ôæ év àê7^nSi蛂ù]°Ð‰'Óä‘ZþW~WÚéöµÚö}¹º—¨z:ÁIõèÕ99J0n”äj1¾oixQä¯ P)<Úî\E.ÈŸwÍí"éÊÎtVÔ‰m£°«{j?i;\§®}$-Z‚_àÓ½Ù€»¢‰ôØU,vV|¼`yêõ&¤mn¥ý ihwk˜Âyë+“Ê#©„øi'f£Bœ9@²"ê6é&?ú)ÏR=]Ð[ïºôYÒjôµ×5¡ñ:¤v}øííb°ûŠKoò{ý0«¥¾±w<ºÃòýp5¹S®ÜÕyÖlq¬ tenaOôWY¢4ýÜöÔ§Á2ž·éòÍ}©ÝõŽÏ Õ°à)HËnl{¯… ¸o¶X¨Çg£7ªÒºl§Åîìfù Ûƒ÷r)^õÅX•ÄܲnîSÍÊoóahÅÛ3ß!Íõ_ƒè€Ãi)‚||»Þ«pž³ÔìK¾¶kþ uåùä%~ïfÀÍr˜üýbÿÃØç)båj×%¶YÜùu†8{òÏqvµíåöUEmö)y;sz=ÑòõÞŸºl­/þº—D=~iøñK‘žƒsïLcó|x‚(~êµ)Q£ÿСË7—~ðÊ“§^Uj9C©2í”g~sµè7ƒ7Îî5Þèq9º‘·²i{*R¤‘ÖŸ—>qfÛ:è¡ATf¼4Ý>|Dz\˜”4(.·ÅÓUp˜ªšu£¾¨TQœík€J¤‘Œ êR·³ûû©çê«ó‡’šß“œó‹3†p–®?_žóRN½²PÝzr«GàÛÖ–Áv¶{Z3Æâ7[¦-™Ë 7ðvÌ1qO)nh‘ò–¬E¨k ¤˜¦ý‘¢—»Ê.æ]iÌ`byû¯»3te:/®E^ï¬Ë§c}æ:Ú:n? V£u2S×çã]§ñ¤~£~U°h+X[ìQÕŽÛ{„í×Iþæôô>óÿp•b”©ð1„"-={CwRA«ýŠnc´ÜõD^ïõ£/Åm3­±ÀõõäAC¯•‘1=´]&=Š=$Ît—7^æ'=3’Æ‹aiÔ>¹;h’SsSíøÌzáòØv4‡‰‡D:•¨%ð›@źã1™†×¨ØµaçåàK®LCáiß»µOX„YŒû¤ÐnoÏpÔ¢´ å>æê_ïd0!ÍæCÛè}mÞÖá7Êñ,íf·VÛ„À2OqDÍ·¬ÖsÕ¤oñéqqQB‹c¬+ô [54Fû¿²´È›Hm+LªíÒ®ë]“[´Î+dÞð4¸.šã‡6‰8ש©Ô¸&ÌL¾3¹ÁckŒ7ÄÏ÷ ºä#‰¾¯W«J=¨Œý½ü˜Ä’$ÞRíß6z´'Ü8lMü~µ}ueùEÔ>Ví‘]ºok|ŽiV`,—ERuwT¸º¹éø-1Mk–ÔðõÙ#¹ ©^3»ó½rc¯$ïiƒhFª­Ù&© w…M§ý»ö{½VÔ2œqú¦ "~,}äQ}-AþøÂ´0ã̇§lÏ…2æµÇ¨Ô¢Óƒ»…uå³ÔÑ®Ý$…ž2þ:…$bmuñ„e¶Ò@ÃÁ¨kè³UÀýÏÀˆÇAù:cUÆ”1…R!Îè1·Æ×mŠÂ» ã{Æ$B²ô‰ãVïœj&“/´2¬gßÑ0—2Ð8nMÓ…\œ’âB5? u¥+®#¯b¨ËZ†Ê®èû´¼Ûø´Y±‰ª">?7›ú4ƒøåEºax•Õ켌i«…#îãéyGÞ#ŽÅ ¢m½†Y%cQÉÈ“°ÖwØñè,¶öñŽŒw.&_çVlŠÍ; ñg¸+ï±â|ðßœlðPÇÄ%/GØRé±"KÛ-ý e #9i‘/¤4•ïØ F‡ó?`‹ù[‚÷]Ædh‹0ÜëÕæ§Ÿ˜3/4^5ˆí/SD«s…uÐt̞ѾîSÝšÔ0o¥-gÞ¶\1ïå¯Të+µmˆRjóYtëÌ;&ÏUåi¦9]ßW ¯ßÀ'+Ž›wbà;#é{¾|U¤qPì”vâ_h6¼¾‚”–.‰Ö~/Ʊ­V °I×âRwÉÂu:Î4´Pñ¾4Šlת£jsí‰ÒÙ^õ¸&ò±\?õu‡?Š·F£,’ÛRÓnÑ•PÑKJõ “HħÝóŽüè>§Ã‘ÌG¥´jýÂúØ€§ÑÏ£ùÄ}_O*Fƒ!ÎÂÏ÷|=ž]/²b‡Ôñ–Öù•÷:t9¯—Mùó9•hN64c'¶Ë…Èi^ŸqKÄêGcJnhTÍä÷ Å[ýüóà“Qõ5“ðêf¦B·<ð·Äb]¯i•žIÔM3A9‚]ì±Ã‚î»5c¢Ÿ{Ô,mãà_ñ^úò»s[³D’Â.7[*É\xcÜ’PûŠŒ¤³¶r¡±DCË3ä_óUµéã¢ý/®q™_®j;vëÏó‰4?‡ŽĪ7|<¹lä½·XŒ¥=Àpé7Ðm0©žeã_š}2Ãõ)w¨b\;ǦžZ+-iCÜ—h¨÷ƒ°¬g·@¶fi/ºŽs°¼n«NÊéÞäæ‹N¥0›ù«cî¢ål'ÿ2Â*œ_ Ú¹¨ýwúåtGërc š°Må.F‰‡áR‚çFÉ8›Þ3ñTÜ7.‰NÄeÈyÖ,Ay RpJ—R9Ìä«êlÑ0¤‹GràÕÔ¥“ +£øVÄëÆv­4I¬@ž”šopw:-¦eº.‚­ÒgãêÞãϩʥXäýê{Ö…ÙX6¼¹&­Çð÷CÉZUFê¦eè> stream xÚuS dBx PÈybQ×< ‡øsmC|€ƒ2`13†Ìä X NÛÙ¸jEp8îÁS¹!|ŒÎD! (àfï°!®ÍÀ&óñ6øÂÆ‘,„)ñø¿|‚ù 11OˆÏÏã2D8/žŠ‡Bs%øÌùʾW•Ã@Y\?—W¬ ž‹‰Ûà‹¹ Âàñ¸¢¹Xdõ7?„ñA.û_óqp¹®Œp\Iñ âÄ;ø/#⊾"ÅÀo1N/ÃæàRPþ0A|H²Ü Œ °\¼Â9»§x¸\q%çš ¨Ê7>Pˆ‹[6úÓåþ±BÿQ!³¾-n®`²£³§çVýÙfÌDXb¹hFÆE"9üvð—EÅgÍ…â} È$Áð€'À¢ñ™ râÓ15ÈtˆÎ›åþIï†óñþÖB\ÏÜ›úõ—ƒBBÀ‚_œX€¹Ï×ÿÿÑ’­-"¢p¹š±1`J3ŒÍŒ¢ÿ‰d PŸúÜãÊüõžÛR‚L¹îN„i±gûÉÔSX±rÆíf{¿¶ë •i´é›ïÍœTËÙuIqµ„é;ÞÞƒ‡£Êbë6‡Èj&?O, Ž¢ÒšuÒG++ýGµ4»¼¾pÿuRií¯vØ~Ó^IÛÔÄ‹Î5·¯…#ŸÚNGöÝÈòñPñ©’¹Ökw?€þÅò1¡…ÆöŽtO)œ8vô´ô<˜w°€jÐjó€±K¡ ¹öSRtÉODíM÷Ö-°’ULY:•!¯N65‡*Zß!1­'³h=t\žqôy†Ëí´Ï9#WÙôW–êë {å&·Ø7=Io½[LÈö;9Bí°ä¦»ó¤E'æù›\=ó9Ò°è=!¾cäÞ˜©§<|wÅÅ[¹¡ƒÂTDS<ŸûþçGõŽœËµóòR—pêhÇö¢ùü()¯Pžýâ˜Ùz÷,q¡VþHi5õ©þçá2¢‘ èü‰lãÔ]Åg>M(W4ÆÄœôõ œ¬ëmý¸ËÞ905ˆã%Ù{¸¨ªÃ˜§¥w^Ò|캡<éádD=!byû¶lúΘüpç›&‹íÃóŽù//³GU,g/k –ŽE™ÈªËeIè³Ni%^Ÿæàúy(%5ÛQ­‚Hñê[“ïÖxÕ¿à4gMï÷ÖïÐ\º¨HÓ|E¢fHÐÓÝŸÕà W„ýºRˆõ1Òòþ¡J£ëºÜ?½òò-²J §Í¥¨üè—êµùЧ£Ò|Ñ•"µ³r:tÒ¹~‹î>‚å–œ™^¢rË=Ó¯+tÝÓÎX»ÈêÚÅ3çOíó(ŒP]µ±r%¯!A¸Uû@‡ÁL]¯G‹§û Êþ7J¼Æ¦ù=W¼?ôß¡Nãzº½½kâ/3U$õï²IlK’ág2Ì|ôu—¶åiì~%O7¢åèš"6¾50¤¹jý„Îíµ”Ž̶‚Û¶¹€³‰*K¾£áȃ³ç‡<ÚUf¯¥‰šñCºca \MÌsf)—†¼¿§›ËG¥%/|˜æ“?•[rkqUÔj;bfÝÇÅ}Ö/óoÊú-xxõpl_˜IðÏv ‡­Ù­Ò²Eßm èLªÞ§¸zý;ÛeÌâEþ…”ˆŽV×µW„ŽÖkrÊöîÂDÊr‡ÕWS˜©½ÓHß²§Àe|‰÷¨zPõ(Ÿ.c¡9:åôPoJ©Ïó}ÃùLô–w¦¢à©=VË©ƒcR)c%Áñ…»ë5ËLÀë_»È( ìö¹yfsÏîè+vLÊêOâµ3 5ÌM.ŠÕk˜pÈ n]n%Øw_±jÊ¥->’‘¶Xe±„…‘¯©´ìq’®ÝT–ú3lí5.‡jõµ_ŒYç.ÛÕ?³« =“I÷½ßù±ÔкšäžÀërî1ûθwd{¹‹ï¥#¯gÍ¥{Œ»ô½ZQ Vá/+ ¢ÕWMª”Y½Óxcà÷Özœ4_ V첤³A³žÙó°etàW?V¥äÁjõyíužÏÊÚ•·öÎ|)}.…þzçÔNŒVP꺠Ú6¦O? 4§Áýérë7kH ·üVW}³AاxäéÉ|ÍéˆÑý?¿pn|yb[MËæÙ ¿ø0s‚+cÕÂÒ¯[ÒUºÔן™6R<Ë=_¤–m8‹Nýaðœ~ÄÉÈþë¢wQ<¦ËœÎ®{$õDû\> stream xÚuW 8”mÛ–,YR¢$ä‘=ÛŒ}ß÷l£¬c6£13fž±$Y"ûRT¶5R¢,Ù^É–]*D(Ù";Õ?£÷ûû¿÷ûþã9ŽyæZÎë¼®ó¾î9޲¶•ÑC6÷ml«ö&l¾Ir¨Ë™»hDZž±zÜ ßÅvOëþ[3¨æ_Ø«ÍÛµhιúõ\ôhCl|–8I™ÛtËq¹ìƒ8®üÑ“rJ¹/1»¼EkÙ·êòžÑ£;ökç2}˜~'iFsòiå7ÍX]ײ,>¿ŠðÃqç \vgUB†åVó[öÙÈÉs>ÀÎØ¹8›.Ç}QÇ“­µ«a)qÚ™=†cuúÔi+Í£­±v#íÜ¢,ðòÀx4êÕéãîÇÝVðÔò{2®ýàN\#!\^Rj|›ità´eÃȬ%q[>!5 ±#ÂõN8oLä:½ëBýœ)ÒXb‰óy~gúZ;kûÐóH2UO$ðRã÷ F [·§Á}²Á„CBÔêùÏ‘',Ñ,*œ\ÏÅm (íÄRjy+°d‡åvž#¶ý^°´ï Š> ±Ôÿ͆r[®í3Ì_ëëÝÓíBÙâ¥ãÁ¸>ðYðnÑn­Ðþ`ñpI©åÙ¥ð/˜º™[ô…^jZ1鹬ÜíŽ!œU{ÚŒ±xU°s Js;æNúJ7žë[Ù“^â¼2æ`°†u6’ë¢Z…d®àݶ’o[I/}³ZéJôsãW¬…×Íë.ùèÖi:¬&×a¿‹¬ÄŶòc&™¬=ZeïFûb[=µüŒ R!ÈUko©B–ŽSXY˜Å ~ÓSò*JŽ>šðÆ÷ÕÁC¬rYl"Oç3#æ•O‰P ƒ>œÈ.vê_:jp.½Äñ²"sO#žK6œ ‚Öò½¿5„c`Š-ù®—™bÙ(ä·:`º|Íq󉯤Ô'pJsK´¾»]ÊÉ-ùÙŸæšõ‹b\š±ñ®tìyÔ 78וßâèt“½<ŸZãâ&f}‰vÐò–ºpòhŒÔcÁ]O±.æ‰>ƒ³?­Fá†)?…_èIèn¨glzg «\?Çg~ú1¯|–k\€ÿ³6Û´§®wj¹Ýþ¡³éÖ›3³Åy0o†&rÁCá>WmÃþX–G×`±ÔŒW“LÈ‘0ðO,鯶gKìbÁ§¡2îɧX²Íø_Ðqq»´·Ì’ÁÜr­ëiˆ[?¹t97ÈÔ¯öì4aA&d“ÇÔ›Ç^yéWGÕøíÃø¦¾·ŠKs`@D½È°BPÛê·È¹©$O…§6ã">9~äˆ×Í!ŸY¦‡õû°ùv"%ëÊ>‹‡Ã9™ˆ¦Ú?¹êj²¢w±R§SýFŽ›²I×q†Î;1Î ëòn\°9Þ“iö<Ú–öใ›­£¿«YP“ôW渾’‚óp ‡+©lži,½ÙVa:0Α_aíúÂÐjþuN:oV˜(\ç"’i®fPNº52øêký“i&½ð®#cR…`¬®=W¡‘o†éZE«˜¬ÖKØqR’s•½¤ @¾kJ<5”Pr$3¦EÃ,1ÑÌÞÈtûxÇÝÛÑãÙaƒ9__ÖWÿˆÙŽ)zmæ.©å#&ÈÞ]Á0STíÓ]fjWñ¾¥3zµWD£æ5ìëÇøBµañÀøá@Ãä Å#oz6μ8úB=rÕº,s·é¦YÝæµNnE]­†¯©FL:ÆÅËüw4x>©Uð jâ§Y»zn'¼{óìµ·¼×Ÿ¿§óác59•:>qmu.+\ŒœXÒZLj©ØÊËV6ôúÀ&“&~Ð3Õ˜p/š¥4´¡vèGÍÈ{eáëÊéo®Í!GeX¢ß¦sº©Ž<a0å-تe>|"ßêW=ëà©·Ù?màò÷4òTèjNG:Š'ÿ¼Ñ–ß}2f;œûQA "Ô›ùŒcÂëÙ™$Û-³|ñ‚HzÅOžãM…5C Ÿ‹Ã‚8¦ã³(b=ÎHǨ*”ýšä&ó|aȰsj¤moYÏ`æHzRÀ˜ÜQ²mdrìñâÊчÜÙ“æ ø'Cô<úhµØyžÔÎÌ«²½ŽÎ]?£|j¸Éâc×ÇNZEÍaÜ–ª½\é¹H«ÆÈa¾‘ua3Ï›Î^Ùµ€23Ö„aÕsa<<7Øj‘Z}q >10¸NÑ^W7>ÐT½¨›«ø´ÝãŽÊßú¸wßjCý¯ìÏ®ë9+FlOÝ öàÝ•‡-#"Fâ‡Å'’.­Úå$;žE‹æä'µ6·||oÆ:¶º|.âÁ<žïÇ*.8õDèÝFióœáÐú;7ü;U7-]­Ôütý|™‘›|cïëî›±ÌXWúK¼áqƒ¶>q©Cqyø/Éç ú—¼¹ÿî³?’Av=²p¸Ñ²}nKñVü| ¹b\Ð\õ&ÞžÑñæ‹»„žèDÚ„›½î³ykiF´ J è;/è`oŸÀ¿ˆBF’—Zf–sÝ©0Þ{¿yÍR»ér§¦4Lëh“TjòxFHpoïëõ1#VtÌÃtÑ\ý©FY»¾‰± ?µgôtu»“ÂæÍH ‚˜{mö°÷ö»éʵ#AÒØ³ÃC¶‘Ú‹GSÝ],*…zšlë˜õ'GcÕmÝ7Èh¾Þ¦ê’Ï/.F–ç™Ì^•åÊ¢oio9” Ãc+ +rLYh²I™“ñ¸A/æ®å5>'É#f™yïLã!Ÿºbxá¢àV¤mJ×Ú\ #’ÅQþ3rÖIf|·i¬HbP‘ÄmU’šÄ¤¹\4é‡Ù‰ò¡KÜþ^«Ýšq"Á}áØ€§i’k›Ø¢:³Maê ¸K(ÉõŽ€õJX+O[—ÐUÔºôT r[Qô¾ŒNÚŽÃrl3e•ÂJ*côFkÈÒ’ÀÓx½U¤øha= D§A†¼× ¹œoŠþ÷z fNfj-ÅŸœlõ%Ÿ~–iúÍ=0¾_rCfÛ;êÄX Þñ¨Î‘ݬëàZ5¶øH6â¡“¥œ’ëôœË>šï$Õ óêÕ >¦{;Dç*kønüóà«5{îÈ©ð¼²FCÔŽÀ}­Û®Qðòå7SKøÌ“9á½q¿¦î-Ô3ö·E5'»·k¾QóàËÿºÕñöGRd)¥ãÞbÞË+ *AïŠ,/rßç¾7Ã?qü—#Ýø3Wœç²»¸cµêž@¶µø9f²šÞÐ6e"È¥§Ö·l—˱ø@^2S5pÈæyõ€Èõ*Ý*„Pä£Û™ÙÓkÈhºª¥Šl·n5þ•‰/Y‘U†l¨žòBº!ÇkNZR:}·B^˜ö•O/AÇ ¹ ¢Q‹Î§:òfxY„䱦Œ½Ì©¿b!¼Xtþ»ÆÀör­Ò‰ø/C/ýafŸàª«æ— .Œiê9Ïå‰U—;TÒ†ƒ€ðªeŸö>ô•©Ž{,nKÐïa)”úû “ÌçxR³¥ÿãËOÈ$‰±ªSØ“2Lø©pòXEIV—ËÁ:dÆ“Î+buüª½å_æé=bv•IYų»´?¡5ø\€Ê<á™qZ|ê’‘ßP86†ÔgŽÐFÞŸ 1))Ÿ9|1Ô7]iÛб®Ì‚Yâ ì–»K+'´ëâ{(gÚùm؟İh/ÌSžíR¹Ñåšq­ý(L¿wÆë“œïdæìÖô’ø^ÓId¦à¹eÐáIÊÃb4û¡çÕec¼'_¾»¸žòÝÑ'Ú?šÂPò`¨¼ë™0cO»É÷ÈNoVÐj€®z£lM§ž=½ŽE-DŒÑùª »‰À³g’)j»ïU‡¿îqXë8ð˜=ôVSä g³:_ói&É—ë ¯>ËY-Ú^s5ù¼YVÀ÷ñ’gÂ5¶´w) Ÿ#>П tšÏ-Ï÷ëÊC¿. é!ärÿÌLŽä_?3Ø!2^h›ø+¥ó«°þ…–rî&`w=!Å-!ÈR`+¸øüÿ¥.Vendstream endobj 133 0 obj << /Filter /FlateDecode /Length1 1144 /Length2 1556 /Length3 0 /Length 2342 >> stream xÚuS XLû&72xHYR7¿$•4[¥T’V¥”ÕCÓœ33‡3çŒ3gÒH\B—Ð*©ˆd¹É%%•²–ˆP¦tÛ³ó?3¶{Ýûæyfæ÷mïû½ß÷øú›Ï‡‰pÄÀ)s“m |¡5„7D‰›M¿Ù ##g(”À] ±n$ ¼!9àXÇ–cm˵\6›K.ñ–I!!á l6àX)€Q>Â!Š3XJ,\@Žå;,“|óE ¤”F&_ÑM 8&0"`°œ ‰œD…" ˜˜ðM Š à+B³”تo.óç¬E…ò`ò·ìÿŸLÛ¥‰†Ë(2FH@‰à¾(¸#8BBŽ¡|àEÅ¥t]EIlY¬Õ«W3…¸ŒIBŸ†ÂÅJ0&I™2Á|)€€T‚ðQºÉG$JQg BŠQ©ªmT „$„+‘) 8“Á OÛ´j@B´_L{èR¾„”ò瓨„ |]܀ŔÑø.¥ÛB@GÂ_&Fèüo> Bq) HJ‰ŽÐsJ0HNãÒ¥$$ª¢ “¢¸ðú,ZU!DÂ"UÕUªBׂQeRe"öUH"Áäª\Bõ¥¤&ø×|Üd¶ÓJ*·(×ï_!Åä?‚þ+ƃ‚hóq!­û‹ •º¡‘ì‹R|@MNeTÎCq„QŘsØìŸ|"”¿WvkõÕ„|Ù?D(à ò'ÿÌš–MÅ™åâäçìaöýºTnWœOÀJ¸V³D’œA ý²QzÈ0©\`1q‚¢S€DFEÓà Ê›±f–¯UÿÄõ¥çKÈ%ßuPQ½9?Þ4KÙô¡)›W}~üý^œœˆHenÉæ\ `cmæXÎŽþg$_F’ô°U§MKòí­ZN‰DøŒ†{ßnÓŠ´Œ=Ô‰IY•Õ.KëÖÚ[XLârŸ}¼Ô?Çã×s‚ò„߈5Mûx58xÁòWQ…ëMªÃ5¦',Œ ÜågJíI.*º¾¬{FRµwûÑúö„3e­Î”ÂìÆæ¡NñzF”Vž¯ëңÚ/îW„è…«Ÿ}ì\êÿan#Þ]F͉<|ÓA§ºÓS³·}a5g\IÜŽ}ÆälôrRpW~“1vò¸lK™2ûV|ÖÉò¹]¹œÐƒ´w‰{|F•:Xìì00ü(z§ÉÇëZàd­bŸS3Ò«øOÄ&{šõK›õì'¦3-¡‹Ç̺3$øÞ§ÀNWýèý—/ëÉ»ä;×0yδ»~qý“¨>ìúQQÜõ•~—œªÐSù‹NwV{—lTÈïS½N§èjO&òzôÏã[^$ºëaEú¼ŽNN¡¡}wPʲ¤]ž†T@-Åj­›xÇzÝÒÂ/oÙV¹. ¤x¿ö•úÅü•üðJ^Âú…VNk9ÉÜ·ÞÞn3ˆ&ÍKjÇcYÖëVmvpÄXÿdLέRT-ÃFûbaìí’ɮׂÇ^ÍèÜÝZi”t¹~é@ʈ§6ò3ßé¶®Óê(æZż:{fáë“vùn¹ö§Ýü§WªÝ}_ 3Ö_ò›¹lF¬6:=·÷¢‰ÏiA¼æpÜHÑ ;JnŸgHíò“ë_:…fë‘o6Ìœ9z‡Åئ”=ûË[y3Æ›UŽþœ«Û—Yc¹#¶ÿêœuaAž Wû&Vɼ²åyQÈùuaON•Mî ^u~nÙ ;H¨Ñ2æ ³pXW]yÅ žÑÖÊ´-O…ËñΩ‰!µ£Àƒ¸ IÙO‹ñÖûù`–»aöŠ‚Œ8=«RfûT·í®¦Ð´=y}£%‰{ÓšWÝ#Ì,ë–[Ÿé>rûBNP55”ÇGdW4nöy޶}·ÈáFõçšwçÎ8}*É­5Ó?ßj…v‰ùɈå½mæÛæù š]ÆŒALÏpæVw¥®„uüŸ­ÏAžXÝø¤ÏögÚkOÎÓ¿» Ù\7¬oÜñ\3EÍôÄýoÃ'¼ù+fÏãmÿ¸Ö”ºÒÁì-ŸÔû=ܵ¿§Bq®NÜž6ñèÊÚ”ÇãÍ’³m Dð¡¾ÞÇÏ”L‹{DVô‰ œüGÝHõÊð¨ë¿Ú•?CQÄýYb†uîV¼9'.4Žl_ÓÙ=<*hK© 3Ú(bÊ8HÅí&ÂöÀ¾M—'° uNݳÊÐ÷HϳÕü æ|é1òAW×`)sÉÓµ±KeœÝËó| >ö[ÃeŠKc*z¶ÝÝàeݹ3ï믛$0”Å–5fû u¯Nê×çíc/ΩÈ/ZÚ_z×ÁnÀÂÂ'©¸ßGFS½ÜqtƒÆ»[^ðÝÄD»·µÑ©VA…ж…ãö–Y.OîP›â{¤8jÚœ`õÂ궉 {Aò–Ì:ë*¬ñþo™©ñ½æýÞpmkÛÚÓÅ·ÞW}Fæ±_³to¹_S nzÔ<5aÜ­‘VÜ}']ïý qSh´ÕYÞ *¾-uü•™åqM+~1\öö´Ð´|ŸIx Ë-¿¥ïzÕ²Žkxö‡¦MÜ®y5×Wð^¯tøÂm>ËpXæï¶ûyÉ&мp5z§É=´a]ý¶ä¾wç¸û†V”oÜ|xøÞ’Ê›hM׈Cœß<’?ïÛâµ(R³"ºÊhN\cž.V÷®(ÐâìŸØ°dI¯yí¡÷üÆÖ”YD»z”®ÅÍÒú¢þœò'3¢îqœ3qÚä­W^ÈþÐÀnì’O‘îÈR»vdx¦æL§à ‡n¹Z4³B7li6ÓÑ™º5>lˆv®ÎÑW–g˜³³çOÿž ‚cÌÁú_/%Õ×¢c/:vk­»tâÎ0ý‹‹ÖlûØ•+©3Ñ´íšì'Úq<^½Êº£{ë¹Ùë9o«9Qµ9$µôž#r\›Ø¬&ä?õ9Áó輦Æ×vD "c#žÛz'Ë»³ª-¬´­êÉíâ€ü)Œ’åendstream endobj 134 0 obj << /Filter /FlateDecode /Length1 1188 /Length2 2758 /Length3 0 /Length 3594 >> stream xÚuT <”ýþìcÈúI$½%[–™±†²‡J¶cæ…13fÞam¾D²ÓWB(”J*’ÒB²/•"¤ø"[÷Ýïvo÷ÞßûûÍ;ÿóÎyÎyÎ9¯ò6W- <-´¡Q!-Œ6ÚpÁ†Óöc!’Ö>K!ã46£u‘ÊÊV  ‘iTk,6 2°Ë0†cŒ14ÖÙè Ñ:0Ð}?‹‰% l@£Œ>÷…Ä`< ‘LE¢¸”û¨€Ñûadzèß…‚ &̨ý“]€¹ñ4*…àAeE£sd" ÔÔpê€5$Ò«Éͽþ«£ý«— "ã@@íß¼ÿ¿3lgB r ñ‹ŠD['7À¤‚ ,ö à*ä¥2á¸$¢£PaaaÚD*K›Æ ¢p0$@("¢ ±!umÀ‚ `&Ä‘á Ò¹¢jtLf®—MfD–Êe†h™Š£°ð LÛ °jAƒïƒá8” ¹âd:Ѐ‹µ @ S¸h,àhT&\ `$ž†cƒ°ÿßw–LeȆ¸< Ü&‚åÀ¼p(:ƒ¼ž‹I¦²kª± <d®ÇåªÇ“¹e0¹Ž” ‚¥Ó)œu_Ú:ê_üdˆ RÿÕ…â„ †•ä!ÀBàÇþL¦p~b¹Ð_1?<-¨DX -#m}ô3™iCfƒx2„#,ÎsÝîÆm1…La=×K´0hô/w‡Hd\•[øßá<Àƒt$²(XÆ/I€Tü¯™Ã ®çrÙ¿o¯ó!_÷mµ—Š£á¹ÊéèXËAÂkŸô Üv<ÈæŽ @iSiìÐYÐQ¸= $w‹0h#Ìâœ1úŠI&c×Mÿ™‘ <Ð!Ð?S\?c~žá d6à†·‘+Ëúóóÿ‘ÿQ¥¥% Dh¢-CCÀ­êêýO$ŽÅ`À±¾ÿ°XŸ×'Ù ÙÛEÙœ ¼™Ý–ÍyÚdíݹ[WWVGgdµvÞhß– ÂãäØz¾·ÛW==íü§"îïSk @ìHöJº¡ØrÊëvó§=xðÜgV%µiÿäµ¶Éäòúq+hBãe4efÒ-ûк§÷ƒi -úTçNxRðz(p¿ßªíˆëÊž>êl=dÄ.|m*×4m/ùeÒ¡ #Qs6á’*Ã@Žü(ÕóSÉ[UÊâ .]Þ 5)çÎã=ŸB¯c޼AnH!ŽÅmÚd>eÓdxéú =d¹ýdãô_|ªêI‹Î$)e×ú_™! ÷÷´ÏÆEŒ´j‰=|ÕÙ°5]l&ebÓÞÎS‘o®—±îX†9þþ8hõW½^¦ÒÒ€¶ºw‰xZÝ3mcF­÷ªÎ¼¡r¦u׆—•Œê# 1:¯C¥­Õív1z ’tˆÊ‰3þƒ4½ÚÍWjqÖ˜Çæ‘v?Ûã‚ZòÒè÷¡ǧ¾YíNáÛ=æ~\¾7Uš"V©‚ãÙÔÊOõ´o„ÒrC-·‹À^©Ež^J#ŸR‘Æåþ¯ú6éÕA8“35Ìû>ã…áƒ÷V_בöÙ¼–Ÿ¤[Ô´8*š-B¹Ú¹7¥ïº©¨{û2"«Tô[út€\~4ªúæZÆBšJjÁtùø»íæ­rzÈJªµ~>Z÷ÙÏ'úÒŽW®§"è·;¬œëŸaBm|Ÿ« [rÚÌcÏøcm–Î3ÃÝ»6#Rb–ãá¶ÅÌ¿:.}€Ú´Ú+ª=ÐÑÖph©i ƒ§ÓËB3¢}ñyîó³ä2ÞÛŒnëgT ä »OH$ê¼¼ì>_½{\¯6=IwÌï)õv±Š+šºk²÷›FHA–ƒˆû]c¾|²ø©«*¦öºêOv;˜Ëøí$Ѿí0°k¸:ØXw¯¢f·S :9ŒU™øøÓ–Š¥óÆè¬¡ó¢– &ƒ½avÞ¾¥(15¯Û±èz’ôY)è{ñçÛÈ6¢òVa ¯iÇ™CQ÷Q''¯ÜUB<³@ëQ+ß^9•ò¨'&mìÃÊžŠ+ý»ø¤.tâåA”’uúMãîåíUáb‡ÑYËlÒÕìx±º¥kéñŒ ’áËþ€Y`Ào^¬“zó¯pjø\ñ‡­’Ož|hKTsÂÛ3âwoë@A~c¢e040Š -³oæ¸ …”ŸmOÔ ØÒ1°L ?¼ï‘yÞÖymô‹°æ}¯ƒÐx1ŸG¹RY/ÃîÂ:Hoqwê¨A=Èz$»Ô:ËFPÒ>&¸ñÅ)³·¬·eéÅ'Êz£ÅüŸ±“cAõ¥ãÍ]üfñ²žÍu»X3ìÙf‹IX 7=­Ps,½c¥Qgkõs2ŠØbò±îDã7!ò]”âÓè£S~j ˜ `ÒZAQCZy>MæU¨ÿÖ‡ìS7 ãý¿ôˆð epU°xr°ü’æíQ¢7ñ”+î`\>·ã)r!â˜]CmÒ˜AζWµ{ùzÓš]«…ÚÀ/çŠN˜Ým5j’$Œ¤)š8:«l|£øN±¡{§ÏW¡KŒ²Ñe›W ¿ÍŒ4Ê¢™#þýjßd—ÖýüØÔ™CÃy›wf6}Š-^ž“ììçvÇÒO½(L0â4?X‘ßëuYdñàë÷b!ŽïYQ™v#à£P­YµÝO†MçÙ27ZÜù–n¹Ò·•7ßëvrþ ͬ6<¿Ôøö’ó")<³@âõÍmíïåòn®Uo c#Ï|ª²ÅKÛ Ž× ÙûtÞ{ýתªO§ÉR¯ô“¯õÍ»ø•(‚r‚kK.Yò'j‹hhšt¦oW••® ·ýæ´ðDÙ‚sB¬J\cÛÖ¤MEDÆ£/Vßl0pËT–´æ‰¶'à .½ÚÍ|×ߤ'ãu–ßéœnÝá‘|OÝ!DB» ®/äÓ¶aHq¨5$¾­‰·ÀTDz£'¥Ì–ÔNÌœ£<–>Û)þüßÞÅendstream endobj 135 0 obj << /Filter /FlateDecode /Length1 1614 /Length2 12956 /Length3 0 /Length 13779 >> stream xÚ­xct$Ü–vlÛ©¤cÛ¶m«b£cÛ¶Ý1;IǶíŽÍî8éäËûÞ¹sgÝ™_óÍZ«ÎƳŸ½Ÿ}ΪUä$JªôÂf&@ {Wzf&€ºŠ¦’±­­±™•ƒ½ˆƒ­àÓÌGN.ê 4vµr°3vò4f1 )€…ÀÌÍÍ Gupôt¶²°tP}bPÓÒÒýËòWÀÄóŸžÏL+ {Åç—¯@[G; ½ë'Äÿ:Q¸ZæV¶@€¨¢’¶´‚$€JRA ´:Û”ÜLl­LrV¦@{ 5ÀÜÁ`ûÀÔÁÞÌê¯Ö\>±„]ÆG ©ÕgÐÃèø—‹àt¶³rqùü°rX8Û»~ÎÀÕ`eojëföO»¹Ãß„>#ì>}Ÿ`J.®.¦ÎVŽ®€ÏªJbÿàéjiìúWm«O7ÀÁü3ÒÌÁÔí¯–þö}Â|z]­ì]®@׿j™fV.Ž¶ÆžŸµ?Á­þ¦áæbeoñ/tg …±³™-ÐÅåæû¯éü«OÀéÞØÑÑÖóïl‡¿£þ“ƒ•« ÐÖœޙ峦©ëgm +{8Æ¿VEÚÞÜÀÌô»™›ã?}_Έꯡþ$alæ`oë 0šÃ1*8¸~–PýïTfø¿ùÿ@âÿÿOäýÿ÷ß5ú/—øÿ÷>ÿ;´„›­­‚±ðï$À?߀à¯Gæ¿ÛYÙzþOáÿ© üÇÿ EÚÕøsÂöŸb010ýÃhå"aå4S²r5µ˜Û~Néo»º½ÐÙÖÊø©æßƒÐ331ý›OÍÒÊÔÆþ¯±³ÿô7ûwêŸýMœQJ\YLAƒö¿¿¨Ç)}*ïªæéøIí?:‘w0ûÏÃ_(""ozfv=+óç…ûäÃÍÊîû?Tüˆù_gycWg+€îgÛŸ™5ÿŸôÿ FÜÞÔÁì¯]Qu5¶7û\¯ÿ4üå6usvþTõïÿÙô?Ï/:è4…[[v0å ±ÎÈÎt­ÇΞÓíïeu,oR+) ¨uèñψØá®2z« ehžæyÿá¹tæøç@†æp´Ë–²' xUHàû…º¯u‹¢ƒ“ö0ˆÑ 1ó\3ÆûzQî'„“ÆáŠAÙát«3Ìõ#uÀ—¯EdŽH~¦éñ˜(Í hõÅgçÉ'”ƒc#ÃC=·}ø´yñ°ä¼ÆØ~©g$)®žFÎwM¦ï/_Ù+`<¹&i·UC£äNÁóO´ÁŒó4ùIjN*ðËý[¼úȦ÷¹fá\¨l¼þ(~0 E±ÿŠ¢_ØwGyjª6˜žŸ+†-Þó`˜6HVÓAxIS\þØ Á•OHKš¡KgŠ÷‰vžšÕ†áÛ@ €[èß““”¸3ÌD¿úé1hµ]4ÂN ÅP¼‰ôQ¿wF‚Ä}T0¸„TMÉõ©»Â…~߃® h £–;);@ô‡üñÛhŽé>ä§ÁÛB»nºz³îé\²ÞæUc&@‰©è}5á3.ÜX³ýª©äŒ÷M¿y~#ýp›¢œÃ”âÝÏRŒuT¥(‡?ò8J%MÔa<šãzE@‘%Q•÷"Ý‹… )æi]#u`ŒéÉv £°óGïï/Y.$ÚdsƒÆ9Bn˜^ñ8KG W‹::¯9s´¦†œdö2/س1Ûäñ¢Ù–ˆx½oâ´J÷Š¢gÖÉ7ütE9¬° ¿A,kBÅ ´5™ ÞC`³Æ ÃBaöÚ‘%߆㎫^kÖð¾>e´Øt•™ÚÇNmã3A|ã„Ü‚ÊÆz1zü³ì/Vq qAT•+æ;_°@N‚œ‚nƒHƒÿG£úû—J©;^ª‡ŒUÉuŸKçé—i]]dÅb`àØºW´–ëëåô.òz¨ý®û¬ •í·Ää… *G)“L—sä±È¤þ|¥q‡ßÐ…_j E…Ò¥Î- „ŠŒÑ#Ðìê™Éé»ü„T–…èÄ6%´†”JLiÐ^îký$á wÍWû ::èálÄäÝÚ!0¢Ë´psÎvXhí2.”óäÿPþ8 Å»Ÿ¾ÇLª³¦Af‹]á#üB "—Öˆ‡cÛ¬ ˆ¢Û¼Ñ/‹’¤Û—o ë³o)§ÁŸSÂ!K¨¼9©Z*ð–Xz¶pÕ+µŠì®{ êòÇ8ÔúÃ'P¨I˜tAŒ+_‡©&ìÀY7¾n7ïúl¤"¯$ú I ¢§ÖÊdZ%+<ήì@ßu"­£ÉNsÈÞ±(¤ñ– ó(Š4 ©»/K9)öx1/ž¡î(ÝFæß×w,%§ÑcøÝüb»ë¯Ù­¹tSƒ¤Ê³›”âH9w(R É»ø…GåÁë‡$R#¼‘K> «@ÓfàInBQ‹¬¬ŽuoÈóCÒ>~½ëëÏ Ð¿źCœ> '¯ch `‰N½¸¹µaMïàLçkI¦ë®i!¶ÿ¼‚Œ[ð%[P?¸Ó|óŽƒC›N÷ÀƒúÃT¨‚cÿ[_»#]:àiV2’}Ƚrü4zÒÌ›“ŒÄPÕð41 *ìAS.f]'Ö\§ÕoF2Çj±Î¶âFÞ›‡WjÂɇ/‚‡‘àz¶Ë‰:¨ûÁ/oýúÚß:±,NbÅ$ò¿OŸ=}|äÀ~Ý•ïp¥³Lg/Gä×+à×ø0w Ò Çòךs´KáöŸŒYÊØûz-kÒw²Ñ›{5/5Ñ,7î[&y;ƒò˦C!¿Z'#ÎP•t0Ç(6ZÖygú;[êWöì9ÇÅ„FzjãÇùk_0ã[[,ìêÁe;—,6Ùa èbÇJûÆT‰ÓÇZ¬(2Aä-Ž™ŽÐøóï²rÝ32~ù†¥/Îs%Âð¢æÓ ¨a°È(DÔp!ç„W4ÂW²à bX.àYáÓñyŸ½B}øé¬B8£<-·lDGò`ý€ J ZoÂØÍ@@h²Î¼<•R*74­MMg¯â  ¢Þrìú°ä(Ãõ}Dšñ\ôøµbz‘t²b«}þ([æ„2Lnn¶ñ½`éLÓnì´*8êýâj©)¼&DÛ«‚´„ƒ&òÀÁw _ÝK.,ø;µznb“'/à)‘ƒK‹²5q§ºax¹E‘}§ yê€`7‚Ƶ~!¸&Œµz—u¯°Ïß&ò¥wv5(;S4¬ÎÍŹRߟôãM›-|‘_/»ÔY*ÂÍ;:uÝV#~.éW›XµUön˵…ÊOcJÌ¡H·VžÉ; Z¾P‰aÖ"-7WØlÚç{3¡É8‰Ž!ÄãðüÕýp}Ù0eÄ9dív4EÕ'>^Ï ’ŸAmyô³z*y%¹Þ‰¬ ‰‰¤ Nͮ¹‰ò?·_8.Ÿ´3KÃÃD$zËYðN˜“‘aîþég ÙßÔï·u3C!žëÕD ëwçH7ÂÉy«ÉHô4S8iž­/I×4þ‰c¦w²•[4vbÀ˜zºB8•ÑW‹áhê3\n*½»j·y:e@.%i¢ôÔ– ¼^¢ çOù5~âE ²Á+qØ$dv甬Óð<3!+â85ê+¾Žq€TL` -}¾‘`/:¦n?ƒxÝ<$•’çútÝ’^vZú[¸ŒßÈY¿œ¥"ñá)jÓvÉV›%z•þ¨¨'Žø¡¿ðCö" ÅÏ줡åŽ3¡òE?ie-=„î÷¸ÝËç¬+ääñæ $΂iÛ‘HV˜DŸÅçR£Á@jÍŒ®¤xÆÕ?²Ž]¡w˜$Þµ T9´Y/nìµ"fvÂ¥m„W,ïUË–èý»Î£Àþ ›šËš[d¬¬'¢nð¨ÎeøÓÖohÖ! 9§XLî–Z±§JógõØ¢*\®>#¹{LfBPCw,—ãÃ>uyÈ Ðw”J,ã3S G§›Û®f¤ÁãÝDïR§tÑbn½±àTµ`|¸F¿ŒŽn©Ÿ¡„\|¶c$l"Ê~Cf£HÃ\µs¨§%Oâ/!–UØV@Ñ4.é+7Œ6»óÑøs•×<…(+7ö‹bjÛÒ8'Ùz'ÁI»Ål‹Ñ)U–HèY4{І–§Rןëò'Ø(dÈÀ[åj¾ ^Ëã£J Ô±o]çl¾Î&PÔÖPYE£V+€í?”q¿ßeÚ¤›§¨b툹ÿ¼â)¥¥í´ !37:Vs9@:1ƒŒ®ãÉ?Œ¶ÖãN2SæÌû®«ìKÂ÷øŸGÙÜIxÍì×á™õèHÔü2H’èE ,çàáì4¶ŠïkÏøÆOìÞ1¸d›…~ï@d˜Òð¢ç¯íɳü5jΙ{`ÝÌæéÍÌ`8åÂ@…:ÀÓZîõý}6ºõ6¾•yl:øaªfžV`xožtôýÕõ4þ¤Ú™ DçÕêÂJ>7Éfiða‡Še#Zˆ¨Çh¢Pôõ›ëå}À·¸Sy€Àm®Ùq=WÓùWaøòîunGÿ’Û¢º¤2EêØ~·†×ºŽF<¶z$gBEgÔïß'#™÷&!wá)Ã|qè§Î·5Uw÷§¹ MÏ·lð R€è›®HôM{²zØER‚Ï::–Û‚˜Ð}¼{î.BõªÚXÍ»Á † Ù®+O0‘=«KI!¹q—;v«œYi À1K‹ð¦}8ý¨8ÌËŸFám©õH}¦Ç ê‡/|¸;wÙ†,Ñú³`AŠÓ\«øG(nÇźÓÒ\æÛ~cñW°'¬‹Wëó•ÝHM8ñtãBÜÙ«Co#ñmªÜ7?º+TCwÚm"òB4ì?%hÄÙÎâìNú«TüÃp½H!ÛoˆÁøšÚ¬c¤ã~P¥¸G‰qJÄÑl<ê­¾½mÒmvÝA>âTŽ¢“öËêp—i×/§Þíà¨Ù—f× µRùÛ>áð1,½‰,íÁrÊˆë² 00¨Ü™¦ßžl]è0Dºù]ÃBÖÇëû«=ƒÃ7oÐç úKÁ„«ä>žÓ%hÞ4)‰5W1ú¢ ÁFD¹hºc Á†W ÷Ád–â*ž¯„¡'»,õQ: ÏîÊ`੬I¨|iºÌ£(]T 'õ.$%¹5÷­a4^¥–iUf2áPÈSJ šB+]f6¯ÙÐëÔÆÊæï±Á»/Nb'©sddle@2éÙ Æ ÃáÀîÁ½þ¾~«þ¼yí9ؼ=ÖývÆjìO5Ù½¤=˜ò۔Ęžj¥4[QH-Üýs‚悚Fe¶†_vz –ìÇ¡„U×ïä—uOêg'¾´±!ú‚I©_;OÓaŽj‹Ü;ÛP€“⑘é'Âc¯Ç´“›×­œ‡e¥4åŒ"ȲPÍUûľ¾†#¢¨§ÚFì2.ùv­R³m$ÏÒN$þÓ„P¦*úvßW°‹®sSå+׺úQZ &ƃFdTÃMòÀ#õtS¬•s…jæÙb¾q5Ž[Ñé(³M ©È—'CÈ””ß$ÔÔ ¶k!vÏ'#M»$Ô;øç™Â–DNÑßB"P\wç ºn£{E!Ú´ÿñ¸Ÿ9ãžBEM¿B/ÒÏÀ·9pέg$1µÔL!#B“'«Q¨$˜†ËK™²’4f§E1àA.󡺔iœÙÞþI¾Vš ÉúÑS¾.5•Ô„R]°Ñ¥P¸| ¡ÜK8™ÔV gøC‚uô,³€-vXäx½¢Z¸¦¿~3Ö Z/«¸à’žõé˱Îbvy­iCÍ{|5)‘˜RïXÊüK¼BAþ0…›»zæŽÙº*ZmÊu¢onƒFÚ®#{dþøÄ/0Ù~&à ò¨;,Œ½Œ¨ôçÇÒòCN »ç=ç|p¤6ÿ}cÅ=yÎÚTµ¬•JpKýŠ*>b‰Wžå¦Å“£6lö*¦#½ñ;!š¶<B“ܶAšb(ˆ|ÌF»±¼6ˆžLJ]N—ý2¹Ý¼H0œ¶°—Éã•'Ûþ 38@ôɶñ½GƒúTJЗ¬½)$ý¾Œ-…f¾ßž6øÙ ˜ç› ª…¢cö‘§½g4¶^½¯YXr9_|çã¥á6×_`²¨NÎÒ}A0Ò€x!–ºé# ²%q2½2ÏigЉ¨E9)dÒõÙgŒu°1Lˆ8´æîWP³ö†vçSÝ Qä‚Bµ'•l÷ÑsðT}÷À¤­({nO‹ñ³ã¼­÷J¬²PkðLz½SÓÉx®Æ¤]~;­vQÇä`ݺ_„©ˆ;nÉÌv|ª ) hߟà ¿"ÇGˆ¡©[I=9©hÑ„,ë,²%ú ĺáÜK¥›Œº†þ„‹ Dþžu§°ænR” @–3GãF\S&䦰DL×0<˜Ûê±,Q3ómòq È_ß`§#>¸ C Š<½ý¾—gxb¼3»aÊþ&cCy\ÒÆŒ5Ÿ>bÑûèk|§T5ç™êYÀQé Ø+ ‚O€ñ°+p¬ï¹ôNŠu–ÁÍQ6á€Õ媷ëöƒÄÊ?7ÿ&€]û½6Z1H¤›2#kƒKw’êF8G}ßFöö¸6vj^ ×î»î¨ë  nš{ìz\"¹+Ù®‚€k‘§ÍÏ0¨,³¶üf<Fïf}ƒBs£ Á£áRE!Ç1q>?Y«øÅòj£œ—õf\wª,â!à35o¹¶…¤¯÷Cµ±Ql£rî[›ˆ ðiéìK®6* aÓ·.Dçé¾]¼‚[Ðv*yè,_¨Õ•=$„/`¬ž¡\,/Í$[)ïßnšÏ“D™£Z¡uí†B/ƒ†j63퇖wÍ ¬OÇ6Åìæs_z#BNç}û>Þ(±¯dù.rÃøQw// ¿¬úRíEôÙoG3¼†•ŸóçàŧSH@?¼ÿ4x!Ö°@áŽ×tf+Ï)“÷…j=);?>ú:tšµ@ŠŽ^eCûš“"”sooƒ+Dç$V:>rÊCõÕlŸX ‘"â‚D=óþÕ ’€VÔ}ìÈ)ö›)"*%(j/9ßž#\¹u¯žÑ8ì5\°Ë³fZ´+bŒÊ|jë]}ª5eÆ~ˆrÌ(ˆLÜ5ü—VIÌ·ìTië†>³¹MöÌMìû‡z"¦Æµ§¸”›Í‡ñ¨`ø©.3ü< A@Ýá¬Ìîá÷ës‹÷-=lD‹Uß‚_eAûHªž—ýÂë¼l’ù¸×*Ï8ÁöÞbÑ> Pê£/ä˜õ\¨!¢ÜßćO-×+#fZÅ=õäéIª/dÙC¶ºÀ›ÄWÍGz ï¤yËžúëÏôž”K#´·*…4̈šM,ŠcCz$©‚õ!bLËí®½²üÊU6 m]WU “ßß À±%¼ÒwlÛ¤ÈQ§„¯Eîp¼›ÐAK²îq"=+Ü:¹bųp´ GähX 8xƒNÍVËÊL²%"F} µ®Â%Tì%ª™,…ÍSÇÙ5PAÿð¿Öã%@¡òiõšÄ›¯ÉX\BHÿNAƒ ´Ö àŽöv3PÄUƒÜ2Èõ2C”ºãü®ÏvüvÚúËü©uß°µ4£xÞ9­?ûp[VÆ×8f ‡¾Ò§zTöŒ‚ˆ¼@L»ÍãofÚª‡<‡._ðÅ5…£0©Ú­©%ƒ×ôÀš6À‰šà³¹ ¢¨ž¡äÈñö,:û/ 9Úoï¾Ã³…mâ ™·®b¤lÛx:ßõiý+em(¥âB1Ý×ÑÕ!Cž‹Í …²öEY(¶Û LÙ‘’•eƒŠja]}’ó²Ü‘…Ó"úÎÒ,ί[°e %TÄ· ¨Û”÷6Z¨Q„ ™žÉì=â×Fµ{\z-/*M—w²‘Í_mª)þ-Ex)ìiödïÊ#kº(±^bxeôRØVÅèL-bt'¢´YðJ— Œhn µv*ļM•–|ê¸Ï+Ml~€Jä0œ×=©ñCÖq°ˆƒ¢’P Vþ[EøJ]»î­‹C*G//æ³ûoµó Ù–ìßïfÅFÄÆRArÓœŸØ¹Ã¨àU׃_œÀ-Ò@4üS„ÛXåDÞ«´ê-N'úB5)W N≽w²Ò ªvueM:pÞ3w·bû o–¡­ÙJÚX/êõ[•µA*ÖdqyÌ܃°Œ‘èt€lDª–€›H>ݧ—ý)#AM\ µ+·Ìaù—ÛnŠøúÌY”ù Ç‚ƒ(Ûõͱ,™UBB£Æ=?$×ÅÓ½BmÈØ>‰øi]r&}±£JÖ…;Ü÷´yáp*E5ƒ5Ó`]º³¹{&%ÙçPû´Ç<¹Îü¸;uþÇÉþšº3‚&CM=‘¥9 $ù÷§š]˜l¡¯/€Pfe~üÖ])ÝKÄm“[”Œ›KÎx¢ôk_e›L¨o°þ@pD—“'ŒÄfñ-ƒîû9³Æ~¹øtr-Š­í!ýÛ#ÞVE×C–ª…·ýdæx÷lI΄%{èoÿ^í?·þ½0Œƒ)y¹>„¸3évg•çÂ.{x?z÷‡{E}ΑΠ|˜Ä¤adPß»áÀ3U´´Z¸®†MÀ4#¨>B)²Q´uª^VOt¾=]ÒËñ;³ñá«nê„4ßû¥æ·ÞO ìÁ£Œ@õA7,Ì<Òè‚×k°¾»._c»»YB`ƒšÐÑÛjÚ÷^Ü—pÕÀOBŠÚñEžãŒÿ×åS,‹ék^zíÇÇe :q¢*‰Izì\{ò a$:«JPÞüù±.¡¨¿§ãÒõ) )Aï”Ü䣞ջµ¬ýðB¹\œ \ËUóÁšÒI¸o›Ô˜¢¼¢æÍ®Å•Á–UKìOEÑÃÄdºã,:ÕÊøn,9ý˜³KZ&z ù–˜…CÍÅ"ˆ‚ÕäPVÈÖáÒG¦þ¹:|ê2™¯nwSqÓ —ójO5>l¢ÉÛ›Öv°WOè+p75:¼ÔQü“÷ÛD»±#ÃäÊŒ3™|ù°­,òà bûìÚÓ_ëø.XVß¿†ÌÀ·|h”có­“ !LL;ŽFЇF˜)Ñ^ë›`glÛ²Zïw;e9EOB™­dâ¸{ën—Ê}ÞËk°“´æßäñnqú‡Çâ2Ò­¸í£1&!Ð6k?8*6)Ìöåò»2HQ5N¦Ëk­Aq¨‰oü¡ZHFì<¦:´,ưú§ZüYÛÑ5?~6()RýÚƒÁ ®h©BK™d+9Ù#K’öj;†jžÌÕõÓªM`{–"[Õ–%ÕØ©¾ÜG8ÏOòÉݯÝÙCj’SfÃît{©2Æ ¢ÍÈ„T‡èL ØVKm¬=1¢µÿ:£ùÁ iÇ`S~s«H¶:NÏõ½Þ5÷·jÝã÷–ñaÑÆÙ` gßsz.Q“'BOsœ.»,îWgΑ5¶‚IC!¾ú|ÿ %‹†„I5gÞUǰ‘ì× Å^çFð»ûìÝšdsGüe.Má½fâUcü"qb Â%ZMœà_t)¾fíÛ*Ž9›Ðx ¼¨ÁfúD°&Na§wˆ¿“Çon*£«À„¹4l› ™ú'štÅ‚M­P'“j߈XO¡\WR< %¸¸¾82ÌÿÞ}üæáÊï·„ÙÄM,«˜Bƒ²ô¨aÆûÿ£íO–:šR4d3MìáDm¼Ed+z¹Á&$=Qe{7Ÿÿ²Ö8d°£XTBöµ—öÈÓZH•mvùŠvÎëí§Xï7mÕÒ+¹Ý- xû0Þ|€¤kbþm6Ò§Þ<ÑèV}D»*”3w@Ú®œQˆ‘¯ndËŦ~0<ÌÒ Í»’¤Tè±Ê¡3Ç;„tõáçLnIø÷¯3Mq³ګcB"˜4îLº­Nˆ +; |0“N* ÌEÂÈ1—¶½>Ž®ˆüS ¤ŠùR n¾…‹ȃ8#Ó—¿5–¾ïÅ_ó ƒµT„fŸüÿTõ†tÖ®*=wÍmGÒîÖç’Eœã§©õN!ólDñ7$äÎ3¬_»Rò”´À=ò*)^Ða¸^Å9ô°‘1ÃXúüûi\9ŠÛúwkA’ü‘©¨šÖ4Ô °‡«† d%]tðÄÆ'})ÓŸm§á@ ÍŠ ¥¹7zÓ¢ÍD¾ÎAí;ÊÙö4&­Å6ˆÕ£=qØw DKÓ­±¸ƒ‹=IÙ¥à¾Ö^Ý쬟«³nÞéÀq`cÕbs eÎÈGq»0Ø/ëdAà >¥ùGÄšÄgÚu#Løi†PÕl[lXG–¦}‘©<^hÙGß?Ñàõ¼s¦¹2IñÈKSAÆ$$¶+aš‘M„<Ü@J^ñ žz†œk3ŸwÐprÖ#ê¯là"û.JN¡½ábHCͼfÓ6oº\©ÄðCê.y½µ®ï·-„}L|a1-C«™Ø#_Iš@Ô´HŸÆˆóªT—þú l‰v?'C²`Ñ™U2ªµ#ùÂXÓs¨ðÐÑ "7ÑŠG©òUÇ yÞ U a{_½D Ä,}RMQØ{Ìa;^#û.n¸ŒëQ7ç“ ?Ð¥.oØPíuPcË«VýÕåÄ6µ¼ /Ÿ`©…‘ú¥ùÌ«$b¥ž³G‚L…Õ>Þ| rÀê—5â×ÄAV½Îö ÏmÒ½°:™çëyl,[Zï…Y"uGEl¼Ä؆FN{"1uó$k{œËèjC +&Æu±è¢K²Ý­è{7+ûAç¬õA~£8´Ö»~ +C¥‘uU: jú•ùH…Òó&H Ø]ø×Uwnv8ë“gk(«Ýv;až²L¾¼ ÷ô¾ˆlXè? H±òÚ^=0÷EKNc„P¤×h ·øâ!*^f\¾LÌãúÐðÔ’9eY5¯‰"ä³vÆKI®ÉíîS2X(> LF¹€jº97ÅQ9⡵( ùD¢;ÒȈ|)Î}M]g¼¡0qä†4ÈÔT½q¼M ŠxøFéCÊò}ÓP?Q¨–›%‡*¨»§bÓvwwž0då‚â-nYðÄÍèŠyÉèÂÌ-ÓãZm@Qj.ØNZžP*^:žrú’.ÚC¸¶‹ð`ªãvìF¥Ú2y!èF¿ƒU¡fW5°5—YC‘…}'&Ê G~‘æ>(ù­Œ*k2i–ÎU?êÊbµªî"4Q…)A°œÔùG¥®™ñÛ#ûÔ)å?|åž‚1×+ãþiÙG±µ ª‚ŠjK†c©°í½÷°ÛéuV!¯ßµDb­2m¶…i‘{Ždå—Rð¬ÑâþøRÆG Ï$yŠÚ¥÷¢è·ð‡$Éø¿™ÅØžðì_ Içzd‰ ³RËlGÄÖR¼‡¾<¯Ûx×…X¬í A¡pö- ¸3Ql‡Œ§±·§“/Ô0–ÊxáõÛbÀóB«îˆ„@³½ŠIÖp³Ó4­5H6š4H»ÔŠ6™-!/”+©ÕöåQRGY(µ$9‚IèUµ®Y MººäâXY’‘ðÿÌnÊöÀw |S*ºìYHýU6C¢§³³bã†Í§Ï á§L8p¾ ¾CÀ®¤@±üƒ*ÞÇ¡²‡ˆ<9xÔƒñ›)òüŠf«–O¢ÆÖ}„\|„É®‘‰IÔÊr¨`•Ð~Å¥ìb˜6ŒBÈ‘“Îî#j94_ïÏí×£ÆÜ̧¥áö®zl‹Ã¬JwÛß’¨C”^|—TÜñgF|*Ù“ /ñéƒ&Ù#ΚôÖµ¶¹ÐôÌY\íó"·¿\¦Ý}’GHev× w® ¦.w–Íð ?‹װȯŠ`‹V¸è…™Š*Ë)ILè’7BEüÊå F#$o Ó°þÕ¤Ìà‰Û'¢&ûT'ËË iK:My`á¹&ÑžŒŠŒÏkõ¨Ù4ß´išV3"äÅyrâÒäÝôóÆIh*1é%—»ù¦J¡h¤çí3né NâKÜåD኿$V‘AhQ »uѼ ˆ—4ðp©|ŸôTÀ£‹`S‡Æ•#:ßc&&™&ý®m&[þòûÇ-gíµmx¿.ÿ{0þ Øú··ó¾† ¤Û´JŠO\Ûg'¯3vK_€€«-H™µ…bÐT£6˜÷FA5>¼ Ù3›i¢ùÝ–°z€DüÏtì:Îcú¹È¡…_×åZ¶öÛ1i}PÐ%Hë?psax1¯Ã=ô#™z8å°¶8/[J/UwW šKòÎ×xؘý©ã,·ˆâëÕlëÇ„© ®à}ÕDâ0ºöV¡ÙËŸpáíòçhùAdq~†Ù_>…¬2SÚ)¯‡îÌo—c‰fÝâ«,rf 7DYÚ{˜É}®€Ù8ô¹9¾‚É„q6Ô²=™ÝjüÕÿ¨Ní ÛÙìnĪ>þëN8Ûã£G«w–N´ ½ê^e#sžõÎu()ñy†»”o:‰‡Ã·Û6˜q/¼l„_‹:¦„ÄcKxy‘aˆ€6è]ÈUÅÐÝ¥ Æ·@­É˹‚àõæMÃ7D×z(¦îE €.ID•P–@á¤GÜmŒž™Ÿ;ž~OÍžÛμ%¿[%"±]!Ätc,þšÌS—ø¬%ÝÙ·vŸ¹)Ó6›ž&`X$S‹¬­ Ó„r9­]@òhlúš4ÞEÙ‘#q7MžÌ‘ò£G÷æaå”Û}á#ã ˆ¯Œ”¤.%ü­¶!œL¶|pÝÊHÂmèÇÑÓf ›úò6ÃWh#_·‰Ô¡A²½º—C÷1jDèÂ54£ÄÌmßÊâ‚_”÷¡õòúúäÇÝ| ,<Ä.`¼Œ¿Û!³çàêLpn¨i}9‚ϱ6ø@ÿUóÜ3NŽ•|šòµ]H8;ÍK˜HÊø“—É 1>Ü‹X&`€Õåµ$í¢èæÆ<W*hÁÍ(JFåUŒ;)|CÚv¶/ɳߦÜP1гÎE<Øá1ŒÁº·u…o¡XJ"…3«l‡/×Y0ÖÐPŸÂiã‚DÊ([ÿÌÁ5L¬aa®4ôçC¬Ä*µ˜^ÐÌÌB„›tõxVÙË&ÐV"pWk»ÚÜIo‰%¯–Ñ(ð:ò¡‚4âe6a®’CUvsû’ñ}‘þ:ð—éAQ4 \kEˆ±lŽ×hù÷†èÁá²üG­î~ùK¿&ƒƒ›båˆûÞm²÷Lšã‰éõgòt¦dØÚS¢{‚Ä>ÉÝò óô—ôVófE“TN(›ÑÁ¶Ç‡"'ÎäøÛxœ$?cþŽó‡úßZåsª<à33=yÍx”à‚V:£ÙU¥YaMýî&*ÁìÆÑx;y«A½<ßÞÜ~ñfRïÊ%¯Œ ç}“BìÇI‰ýb9…ypöx‚jaÛžÖNÀ‚õY»ˆÿNfIbÛŽ}tåbè ô{0åçóvZÎ/‹¯¥À*%º´Ú«·i†–¸®Œ3ÎÆkšiJPbYâàµ@á™NKÿ:x[2¢„T5"h°´ád°U›$F^×>¹vïžö"’S3èíŽ{îýÇ?ÓlËlîÁ-iÏ?·kýÁþm3O|•×h—kG\R8ˆÂи]¡ÊlI´É±ÎâªJ,Í5²xÕ Â''àHÒÀ»éQ6çý;w¦£›AflïÇVÓ :ï/¶°;ØWˆÇpªBd/²~ž’¸V¦\)⾩´ñæ¨ê‹LÝ׉au5§–â^3añêßz½Ñ+­gèýTa’ˆc¯3kh¬‡}Q‹Î\T‰zq¼Ϫô•*Íç„ÅÙ–g Äv.¾ÉÈJ ß²iñ6펲Î-õ™°C=Aóº­¢…×GO¢•( ÞÌ®¦hýÀÌØUWT©Ì—徿³½~h£a­(˜¯{ÈÖ¼Õ\}Œ^™æüS¿žÅì46Ñß×[ÅiQµ}a¥ê5ÙSÆw©n~æÏ<»8c¯3(3a­pbQŠ¢þJ<žQ’\™¯2à¥E³gýá ˜Ã4‹ïd]qïjl[iïÅ'±‡ÇÑ 7ZþPº#Ï&÷üµˆ‡7¸óä@‚!óü%N0Âj©úEV–›ø\‡>ëSø;,x$Uå¹mØýÌýÌOГ*°®{˜_–8Oâ´ÌMr:×™3Õ)HçT8?…Ó¶NŸ;4#~b³ëíhùqFáfµ¦ÿ‚uÕÐLWÛžQl*ÐSoÇ: )N‰JŠIããued†qͳ̸)ØOJ’HÂöΨˆzJձˠ}§éwþN*Ób ,Xziÿ åòøJßÖáa™Œ’öYm½ßõíÇ\/¢Â,˜2Q¾´A$Ý© þ³Àô¡`·õžùʤëðG‰ÑÈaؘêyÕT§W¶† KË…+ðœ°ëˆôŽ{ä§fª ,/¬°í&¥×Þ$ʶ­Ä&É­­CUM}#çpÜᨬyíõ.ÁÁO0˜)¨×º¹á³‘$ÏÌô×I·)G ?¶(</˜†‚§ùi E™a¥F²¹Y¶Î¢*ìÀ'JÏ<@qß\Ù¾ †f‰ºÅ’cŽª+O$ßV¥))aJíJ{™/‡¶K·n£¶5‘XŸYyµªÛhiÎ/èþ>"/Ûì4W~¦$[µv@®àª/3ÖHqíjM TøÞC]I”]·€JW8IÇ¡Óù.Rý‹÷XyÀ¢áâNøê©G\¢º¤ÌÏb9€(›‚}¶þÊ œ¸nâœÏæ°‘€yøð—ÎØˆúER=ƒtŸX` ‰´úœ¡ w¤ðÆÉç÷d©Bb?ípmá¼|ŽJBµE)V(bË 0ºdÁБé\oÕ ¤!`¾é)”6„8õË”îû,ãkãŠ1¨9°FG]K¼õ.Îosˆ\R›Á ÷V‘²™¥‚‰_~†p4…VQíU0C™,[âaÈ“ G-FïtÕ¸ÞS»†?c…[×èzY>WLKM â  UΓñU›‡7)­¢múù稂]&d8¤83%‘ÊÇVc8ªpìQp±¤é¾=Ó¾àûòkWï7¤Ìõ ÍñÞäEÕ¶³’eÓÇrZ‘Ú¿ý.ÔþÃä=o³ °€Àat¢mÄÂHuŽBN—sp*ÄôCš¯¥4»ÑóºýöýÕVÄ}ã G¸$z‹™j¨ÁPfB Ë4û°Ç×sZ—ÌÃR«ÑUTð—ÀáQ®Ôdæ!ì%"ÞmØ·(Ê\¼Õ%SJ lÅ yp ¬T2ÝãâM½æ²›Ü_ðmè’ mi—©Ó[oQ.}O›P¿ 1˜t¯ù§Ý¦aÝЕ¹[Wíê«ÑÚ>3r¡Ž_¤Í ج1[·ûül||B²n„(M¿º.kDqˆ  o‹Î§äoóŠŠÍVuÁûþ?hl endstream endobj 136 0 obj << /Filter /FlateDecode /Length1 1630 /Length2 7141 /Length3 0 /Length 7967 >> stream xÚ­TeX”í¶¦;%©AºCRJº»»f„!f`fènéNé”.îiI锃~gï}®ïì_çì3×û¬{­{Ý+ž‡‰^K—Kµ)@!.>nÞç}C-k''k ªÆ%u*#¬ “, dC!rÖÐs€!Ùøù|¢¢¢8LY¨‹ lg°>ð°qppþËòÛ`ãõä!¶ƒ˜>ÜANPgñ@ñÔ{à%Ø ÕÔ2VÖP°*jèAì¡-7'°-@ l ‚ÀAl€—PÀé¯À ‚—ç~à’†¬p-ø! äi rù q\@0g0þð Ãv0kâ¡( ±urþð` ý#È}ðp~ÀÈ´ pÜvA²jÉ)ü¥aoø~€Зž@¨­Ûï’þ`4(  OÄï\6  wq²özÈý@æÿ‘áCìþ¥€ÙYÀN 8üæûwwþU'àToíââäõ'úÇëŸÀ8Èé%7ÿCN[ÄCn;0‡ç÷º(C^B|¼Ùn.ÿÀÜA°? bý½3l"¬Pˆ“z‰Ã£E<¤°þߦÌýŸò`Äÿ‘ÿGÆûÿîßgô?.ñÿ÷>ÿZÁÍÉIÃÚô'ðw øýÐ~¿4`Ûÿcí vòúwQ÷4ý%õ7Ùß±¿¸¥!v3áåýË †+€=A@-0ÂÖðÒÚé¡[ìú 憀¦ú§¡.>^Þ¿azö`[GÈïö þ À¿kÔå<*ÚúʪFÿþuýã«õ°=/࿪Cÿ<üf’‘z|¸ø„y\Ïäðñ ?ˆ>ôû7Yÿñýë¬n€=¦¼Ü¼¼|€‡ÿüþu2ÿ<Ä ü½7ºkðaÕþiø ÛºÁ`þsû ÿÇùÏÒƒ@ž [œ…Y¨­Øk‡ÔŒ4DÍãœ9Óž.>Ô—’z½ÂüWUÐ΀Ô7_EË­îªC¸ß>ÿÕâ5³çòsS…}k°‹Ü‰¥ó-è(Ú­;Ÿx…ùƒ0ÇVE ~Ú¾a”Ï÷iµU4!^ƒ­µm‹â; šÑÏ`X߯Ø^1¸ç¿"e¼t!ð·M©‹%k#zô¨¦`oŸ9ñÛÕ%Kß§ý'èÝ›TÙ±ØLbÖý“÷è“^V°ózÛ_è?Ü…áUuq˜½ÔŽp´^3®2\Ç*6’ЦMlõ@%ôíœ,§1Pã‰Ë!9ÑäË9å©\†ú’³íÂ?°[±8 eÑ*+rÒ‹ú=þ>㸌RËÜ]c /(K&천5ëDYáMŸSѰ¦ABÕ²™8ů3:Lu‚-îuw-pÂ(€ß" ®•|´÷£ë¬TM?pÏ’_Ÿwr²“›JUÔ™zé¨KšHJ;UýÈ)ÂÕ!÷ºém,w‹9qoì²g1å‡ÏÃx|«jãºÙ&e·8Ög]I”ÈmŽ>öımÇ£OÒ[=ï ïv_âãÍhÛ|aB{”s4ʼn{‹úF!ôš$(dØò;¤í9ã1Ï2Sÿ¸Ù*FÝ.ë{w Ÿš¥Öâ„)A•øà©J›–(,?.†®;A±„…?šVئ\ÅMÊ‘zç§)\$^›õ)fz¤r2Dƒ™jÏJ‰Ë5Ü)Üp0äwõdšø«+.`ŒÉV »í]% šÒZØ.œE¢ýU숪~Ú 4*lQ*ÆlA…bhkÌk Çúqî™âpXp*þûUòõvÿç º –3ë ׂ@ét±|¤òaæ%æ—"¹`àYeôcÀ»Ý7½ÎÌi5e“¦…ÿ•u³~¬lvÍœãá'Fˆ Á8¶qI³ãÅZÉ©ª=ü²ÈÊ Ã]j5߯¯Ó´zgTªt/@Æœ¹K”¸`©«†’·†zh|ó¼%C8×Õð9øPò³RçÏNaƒ¦:Ñûý:~JIW'ûY"™\ÿ:ßBÑÊMU e¸Gµÿdz<¼ÊX*›€¤ïFÑêSp,‚¼7HGÿjuˆ@b¤ÍPìV$ ܵI(YÑ…9½l•ÀJv¹øa\h&ï.¡5ãÐrY}•ÕEŽO™¼²MÒcÍÏT×9U¡ŽãÂ>3Ÿ?4¿ciápÛÿ‘¬go0¥Ü[ºùáG5L)6W·Õ ÖB 8í}·Þޏ-æôâÙo‡ÇsE[S1£r†È ªš×œ?Ü7†*IÕd•2_zt:Œ04{ß~U}Ç'µqB•…eâ÷-zóÝ«p¯º_'qÈ,zXÓé×L¡ÍžÙIYãçx'?JßÊçɶLVœû¼-„QŸ¨©„îU†uÖò¸¦·f\¥—…:Dø²²X#ÿÚøŠN?Bjø&@$pðd·øX 32Ýëk‹Dd!s_¡¨7&ð¬¿,qR…§ÈÈfÅ·’†\~#­)n©WÊ"/OªîM…6Õ~á•ÎÌêRñA2»¦pR¤-³8I~›Ï—½Kr0hŒùM¥ž r„x¨Šä"mÿô¦|7yxŠÒlåÛ‹àìFô¯ä~Œèì‹ü$Cm¹×K²ÊÅÏÁj±Ñ}š­˜²&Öo__ ŒYä´7/ì‹ø¶õÖ6Ï0»Œš¹Õßšç+LP±QK¥' Jñ1èb<ÖˆÇxjòy`ží%ƒ±”îBüî%ã#d¤¤ezêºá¤|7ÆÂm‡@ÑViF¼÷¦Â@dê€ñ±I1Z2áC mÚK4ÿí 7kOÍS©YÞuvo¿¤¨MÏfeÙUoWýv} ˆÖŽ~³1õ„£¥VYÅÀ¿Rà$ŒÑé³g¿h~§óäcb;ë]Q3±¼Ú3‡uC]ø=˜ÛÏùl÷«ØÈÞ$JY««òæ²\:_mÏp½©\¦{ÚÀí8¯´kØ©Vuæmw\:,7úñ^Ø“rñ“œ»u·±”˜ûŠØ×\¬¡‚3È?DŒ!´Á)_Ûj‡¨äo¯½hϤ¥• ØÜ»ÔcÅ‚¡´Ij†iežkk²Ð=:Qý<Ûžèx³›«§:¹V°ÆÊz>'BݪBõª„òŸdÍá$%k̪±B’%û²_3ÝûE#ßsÇ }[÷QÑGÉC LØ}d§jMãb`BmˆjÝSÆ'òÒ7m#èØëæ…fã÷ëlĹˆÉ÷]ˆDÈàqè~í Á×8ÊàÖäЉ?hùÄp„7 ü$ÜÐ϶Þׯ=‰»z•y^fÚpZÍyÀ*¸'¸ã&"?è— ôjزz1óÚœDJ*+Êç°1½:‹wʰÊ\,a…J”^úÆ¡‘-Õö\Û×8‘ƒ<ŸØˆ…O…s_`&É7畎´Ã,E1étP69}öêç=M'V1•„zÍ[øwÍ@Ö»ZvMÎhO"å‡}­>Œš©š@¾Ì±¾ð‘÷Œþߎ•„,ÞsÚWo¢~D2;ÝvÐØMžîŸ/µçÛ› bçGûˆÊ[‡µZ-ô;¦ã²Ç{P^€R´0zÒ,¿9÷Œ¢é™úª“˜b­˜à»Ü^™]ËÝáÅf¦Ô˜sµwû »ÞœÔõ3Qû6¶7Á!•¤§±÷s¾Ÿ-ȆéÑ‚|¬š!«»EɇñzTòîª9Êœ¸UÓg!È/„Hw1¿ !Q=þ(=ÃÖßMµùyñ)]ZbykŸ­c9‘‰S¨šÊùA†á¸€ãYïÅW+ზTUClNº}QÄ¿“}ÊN*°3k%rCýÙˆ /§÷ñ*]ï9…|îuuU°Ò…ñl%릩†Nü·›—ÍH¬q×#¯dbKÏxP›P^•'„ÑT²±3DM¹bå“à Ö)(Öã{U![_ÒÔRæ.ep¿ùÏ4驈2ÙEkƒÆ¦SŠ®Äí‘XÎ:@bà£) :›V)Lô‰Uºÿ˜æŸ ÄðÒº úÚøŠãúZá 7 ²eVñ‹6븎Ħš¦;ýkàŽ—ÿKñeš·“ƒ®¹RE.¯ªÌ^ØG]éå»ç}Q+18¤@ž\ûNð2èîÄ*‘§¹_hÁc 6?ùøeHmQ(vÕ'Wî ¿.®ºœCe’Œã¢'&Ù¸ŽÁOõ_¯¢é@Y¹Fkf:éS1ÏRò2î[0wCú~øôÒ«Þ°TÓ?ËÅb4¸Ýš1ï üÖËö–ãçj2cwaЩY]ˆïBî®ãÓ ¸ ë÷”ǼZ$V[‹÷+ YyKòÜl-?n©s>â4¤8$€@^Ÿh <1uü%^øš½™5Z¨G8z“ž`=¦iÉ¢%P ¹] £K| ïXçèïâ5f JvêF©ùr{ü­žîÎà²rùÓ ÇŽ˜ˆÆúáPÄÔbe¸±“èxD/¸ž™ß_VͲ`nQkˆÑ‘"š¡sÚ]‰¦¦vãÛR^d¬$r>‹§) ~8rº`o“EÞ™È ²žMNôRe`.Ö=”fcq;F§ËâWäϽ"ŽÈZUÇÇHUüÍ‚¡µÄ×.…:Ï¿­AeØküÞ uOX™L¦ÑÍt™X>ýõ³QC¦éyF¤ÇšëB›ÿPÀ‡wŸw9u*3£Wœ˜w>,}â\U=c䣖çSpÝQAǽK—À2ëªbi}L“"4 è(Æ%ù³ø¥ïOåI†hýi3%"|^ù襛½7úxb:×´o(̓dÉàQ6ò*ùr§Uis¦v¿µIA•ôÑ-»¼5G¹XC»½Xs¸¬y 6õ¨¢tpŠ £[üxQ/ôEƒnˆi‡É°°À2ècPáç§A~/Úgf”ϨÇ#ZîN8 Z'½C‚ôå\_=zQ¨Ó¾X_è_«ÞŒ°eFjÊÆ_vy¦y9÷^áÒç gÿÌ„}Ÿ0–(D]Ùç5ZTo²ªÜ”‘Ž`Á­7ü´šœ†¢V¥€ÐJ^Å•þv¥õæØ? bW ÇÃN»¶Oªæò–S ïh$9Ê=ªH%¼6±F’GSé$Y”I~~MôÌd{¨L3€>e…®Ù8Fàþ 'QØÝs€ž¸…+¿é¼Ò`0É%ÖàÅ£²0Ó«hšz¼oîF2 U™i2°>ÑçÛ<3é+ѤC<¦s%Œ?ùº™íò‹éWÀ‡ c4`5r’jþŒ)Y-#Ö6~,bxÔèe~­Â»‚fxb¬'¢Ÿ—Âìfw„š?dýEf.â;²r˜ ›Ù}I x^ÜöDç ITSóУ±!·b;ì±ÇÁ½¡ Fgdv¯¥„Ô¬›_Å•ªq7øßÛEgcIÑG„=žJX¹Ó‹‚øåˬ¦Ñê)ü÷Ac&ñì uï"5©üŒ`?áÈ1Á%Y›pJ±w)&Ý+(…b³ms ·?±ë¿ì–J´ÓÖšfѤDœôÎ?$¦]yÅ­%Éûæ¾w˜eÅÓ vÓh{OÏš¿ç{ÚT©sô‚Ï0~ÚävøÓù Åg{ ‚b1Ÿ%àjƒG»;ö½.ZaI0é°Áaä±3Y%î†ïoJÂw£*ççÞO.Ö´¶¾¤wëçÖnäkÅVtÒÒ¸–ë®øÉ»1P¾X^|y¶¼¬´y)¦9º÷kY¿¤ý ðJ¢É?†\ùíg$ ×Ç’JÓïpüdýŒü—w›JMÆË#êÇÆ³ò’HÛûž“¤ë³“MYкMøÅÙÊéžN¹óÀÜ¿šíÈ=Á#ð¬únF)}bfçwvûü#Õ×éØc :¡ÅÉ+sŠÏn8³z³2g4ý21a®Äë Èdzéhü—FxRùã *£n³>Q§Ó.#B·úŽØœ…¹âŠ'ᶯNÝ™½¶ ‹wl;ž|õJž‹]È4|]×ô±{·El„Mð3ctXÍ=NaŹ%WÂØÐ„Uz˜±ÚëËsG{Í/ÎLïví}㆗QkqI ^Êðô—ÜjÉEÕ]&™Pñè’²¨{-:QåH‹â‡°+¹r­qÒ«Ÿ¬~Ís2³e¯;»¥„Ns ƒ©¬¢Æx>Ö×2ò,{§d· Þ®1»A{Uîâ…Og¤·¼›s¨=¾žLt(Yæ÷ózdxÊ(ftT-V]]­ª Õ¥›2ï’»È8˜ÚËŠãoµäk†Ù–M¼™§Ÿì¦:¼£"/Ÿu¢»]¤–÷7À(öŸ8 qè)Ô`ä¤>›Yì³1{ ב¨gz‹+GÎ3Úö–O߈ò—C’Ý’¿ðr˜¸¹ß‘Hž“ŸÞNÂE.j£ûy8û³¶ç¦±q»Ÿø¸¯m}f÷#ɼנ@/EWkµKÞH…æáÊžž`ÔtQŸºIJ³~¡¨t<®3eœÃ _Y§ÅL› k¬©´«Ç-Ç'1ãGoüú"~i}°ùØrÓ° +𙿬™âc&¬u‹¬°[“˜4T<ÙŽÁú)dÁWIÂRRf˜Œœ3dšË3•Çj0¬q€]bÙ«Ä4c”•ŒÍ>¤¼HÛ±oê¬ täú'öÒ€€¨ž¨Ï¹ öÙ¹‡ø+DXô«A¢Fó ]~F—•‚õòm$YÑFŸ‹)§pÍŠµj¤·mùæ·Ð=œãò'Š#µ»šE v|¯~(Ù)>™ƒ2 ”KÊÜîð9Ìao¢œ\œ‰ÎEnßw)luŠà¿ßY¡&À'W ÷–´‰ÎÇðLj?Ùð$4½_ÿÒx©\×f$A†E°ÚHúý¢Âà¼FÇ7:ÑLyZâÓ„žIôëL4Òváb)ê ü,µVü1èòœ*@3ØÀ¹J­Ä»üÑ**~)ST¼#+|s2qIúZZ–{I'æMƒv¶.U¢™!éÛys:E­ôöâô·ñà`D¹,¦TI×F?‹t Â—ã ÃÑÖn„ßãõ-Ƹcxm]â(ä¬ë«²Ó†ƽ‰qŽt#Á®ï ©'ùdYœ«Öôœýª¦K9±-!=eøÌ¨þ o ñüÔ**¶]¡{ÿÖF˜šÒº«†±Ë6ÙÝRk9ÖØ9=SWÔývüÄ-£ˆÈGwchi—I4áÑ{íïÎ|Sẇr(¿X©ï¡¬_éÈgëúv†0›¯ ªß”>!9v´øJ°²rÜ*ñ ßÓ‡Ø3:)Ë~)T*m©‹_û¬ù,J:•ú©ØÁgœ‹(²Û•ñ—V ž£B$¸çh2ºŒ7$ßóêKN¥1ß>YƒÏ@£%ć[cÒYŒŸe…¿ÙO(x®%*4½pœ~0¡ŽcDƒ§0ôð]x…Ôñ´Ã,k! ±/§r¹¤´œpCp0o9oZz,úýȽ§jK{-˜BÎ#uì—¦xçZZœ¥ý+VE¼QÇù×§©£¤q\ógÆW[NZÎUg†ãPd&i4Á,6µ¥ E ¹iòúÇÐ¥³ÚmІ¨i•å†ñE©=£wvÈܶþ@ES/”£f…Ñ\ÝÇ"‡ýƒdE˜Rø‹w“þôå¹VEµé‘6Ú zȨy ÉœFë6y‡Eݾؙ%Î;¦GòmKC5æÓd/;r.¿+#õ²¬£(]iSF˺Æ;*xcY `xÄÐvAî9J׈Hˆ˜Ì³N„QìËé²¥»wšés/Æ´‹ÝI/—†¹d;Šû¢„~`dž·œŽÕâ{ó³ÑÌ !;=ßÂHäíqyÌ*a"#k'ÊqÙM†¿nëMÈ)Z‰L:ñ‚Õ0&˜L²šYÒè£ÇÔßZì“§D'*G)ïâkû-B°¤2Ôð©ÐiÍ@éá+Ž©è-…켡1 !g‚prŽ ãÞ°¶R#f9Aaß>L0%Ϲ²ç‹Tê¨`ó’ScÞÌ©¬œ31V½¶8â IWu¡sßÌO∱kNs’o¤¥tjC°MV¬z¬ýëº^ÒÈÛûU$¿¶Õ±éP3ÅHò§ô1”-sœ¸¸J;ÙÞ‰‹Šg™cÜ’Ž#„\²—·Ìàzäz€â(d=T'xÆ6ú“‚Ó—<=ÞÇ2ÚîP³!‹I1³'Ôr#z¹ ~xÚ/"c^5+²Pÿè½tJSwDËÒ²Ã<½ŸpUüø™3Aeø»Á”F›pZÊ*ªù[ >’ÚX–μŸQ4žþä‘ä.['w”Kå8ìA‡À{1ÆÁSYd‰Ðw‡bØ¦Ü Ôåéšç‘šWi_ß°}â"~ós®áÞôé!QGíÝéØd¢-É@ÍDn@œƒ\e«H)bÂì°É %åŒ_¯äƒnps:¥¤í&Hƒjå2/å´&4f;‹ˆ+4œ¾ZO¤äÕÓ`%Z Ã<©Ã¦ _"•Ÿdæ¯çÄ”•³Hô ÀŒ]´·_‰éMO¦5)ËA*PªÍR†aDÂ2sÖ¹Šgu¤"e= ã.µíïÒžÃS>H³¾¨ "&›'ë»Û@u%±Q^}’ð9J Œ ó}LRO–£¢ûQ_Ô½¾.Å(%7x‹›/xÃbõ’¿Ò*¿œ{xQ…èwdÎêfx¦ñzâCñ.ŒÙî©dd3ú(*IŸÉ.;}¦Ñhš_¿~GµÖk4ßÜO3OÖØš×Bœ>Fê”ùÆï€t€3xµŠÆ3ø,çÕÿꊿÏ;jô\G‘÷VŸ¥^ŸëÈØ­˜€¶¤il?!~»!ÈeEýl¥r°ã» Ôm̈zÐìG¬šBÚ—wö£‡’š+ROÄÙŸFrÌ-ÎÁK{ºnÝô&×<^äfùžJLCdÈþ6ÇÏŽÊ^klÓÔ_«_E$+úú«¾ËÈ8w*«ù³ö¶YWÖPƒìe êã†üï·uBcà8ŽGtäÈnŸÈ¯CÝ+˜ôã® L*ë¦wL’Éþ nÜÁendstream endobj 137 0 obj << /Filter /FlateDecode /Length1 1616 /Length2 22407 /Length3 0 /Length 23240 >> stream xÚ¬·ctek´&WÌŠµcÛ§bc'Ù±m£ÂŠm'Û¶mÛó«snß¾=n÷¯þúÇc½ÏÄ3ç»Æ¢ QTa6±5ŠÛÚ81°02¨)k(ZYš€le”m­ Åð߀†N [QC'à7€Ð 4°²Xxxxà)ßmíÜ@fæNê¿4ttôÿ%ùÇ`äþŸš¿žŽ 3åß •­5ÐÆé/Äÿµ£ p2LAV@ÀwE-)y µ„¼@ht0´(:YŒ² c #`jë°úÀØÖÆôOiŽŒ±„†G; 1è¯ÐÍh÷Š`t°9:þ}€f†6N{àd Ù[9›ü“À_¹©í¿ Ù9Øþµ°þ«û ¦hëèähì²süª(*þy:™:ýÛôW °5ýkibkìüOIÿêþÂüÕ:‚lN@7§b& G;+C÷¿±ÿ‚Ù9€þMÃÙdcö_Ѐf†&V@GÇ¿0±ÿéÎÕ ø_ª7´³³rÿ×Ûö_«ÿ™ÈÉheÊÏÂú7¦±Óߨf x¦FEÊÆÔÀÂürg»ÿÔ¹þmõ?3Có7 C[+w€ ОIÞÖéoHõÿËŒÿïHþ@ñÿ‚ÿŸÐûÿÜÿÎÑÿ²Äÿ÷ù¿C‹;[YÉZÿuüçüsÉØüoÖ†Ö +÷ÿ“ý·ÔþG’ÿG)'ÿ­¶1ûK3#óAŽâ 7 ‰"ÈÉØ`jhõ·OÿÊÕlL€V à_>ÿm%€…™ù¿éTÍAÆ–6ÿ4žã?T@“ÿžû_ŠþÍœI]I]QãÝÿ~§þk§ø—{'Uw»¿©ýRälMþçá[7€' ''€•›íïʱ²xØÙ¼ÿÿbù¯³œ¡“È  ý·lf–‹ÿÏtÿŒ˜±­É?Ó¢âdhcòwÀþ§àµ±³ƒÃ_^ÿÝù¿EÿçùßQÝ€Æð+‹¶Æ¼A)é©NÕØÙƒã¢Ú½Ý,ƒÁvÅuªy~U¶]¾)aÛ<åo¿ƒë'¿}´¸/œÚ½ïKÓ wcYQu%/s ¼ÉhzòÐ6(Û¸è˜ôŠ‘RÏ4~z^ÍËnAýàdV?ØWRÖ+zƒ!œlcs€½z¤ñ#sÉóûJþ`‡ìcœ\ƒÙŽZ†^zFùëøñªdhp ëºgŸ.+Ž‚×Û'ñ”$ÁÉÝÀá®ÎøúÅ…#öÉ(Ž©]£´%Õã2ª–ÌlËËr;~ wSE¥ú{t;›²@º¼MÑtõìßž3Õu&Š Íæ"àæÍØÈ8VÎBæbø]UŸnÁ¹rK²=‰©z\ÏjÆIbäkÄyOnÎþ©³{™_p4ûÕ eì#©"<#R3©K‰K«x!À³‘Æ>©úÕõgGž^ ¬=ùqÚä¨JíD'~´R¯Ùˆæo0ç4uúéxÒ2 ýØ]$&W’¢ÄOL‹-Õ´ÀWÃçÊö dé#¡¼ÁÝ E{í.> ZÓ[—ÚNZkd/Š‹üY]i¡È¯ôd0d«6ª¨.ë÷MÓr>±uWa •?¤NMM°!_/‰VYa䉠ء9=G„HCa† üî¢ ncùúÌ3_¹cî–Ó—iòjÛÑR¦“Ñz»a[5žW1ˆ9ù? ª’Æ~rܬ6è±xðŽ7ß«÷ ‘ GvO Æ[’é¿Öìv[Hµ"~ÜÛ]Ù¥íOW‘)9h EIê©)ßZ©])qž5a)<»¶9”²ÐEëyEmà[ tå¥$'ó=‹ÞèZ¶—&ɶ2Oü²¨Eþ®è)S|@#m ÜœmÚn·›½óƵ'á•-Ë2K¹Ÿä'uÏþ1J*t3VSØ^ìTÒ6R^6L¢öþ¾ËBjhG(ñ gŽª³öÖ‚TÌ€„r”} wÒõöŸkùº2V¶ýü C§YÏ SIó²”èVX¨8ám:Ùo\ù˜ÔTV<;¦œ‚Ò1U^¶Ê×6[ý£ÇßHù,¢m=ŽUÒžìh*|í4ÚFù¯ä_^«O쬛ÁˆC‚ßmoèv?E±¶âR’+÷¦¤e˜}xç“—nv˽!pC(ѧ‹:ø¾:’ל/|ïxS‚êªØvÝuÐ &x,mBž*06.ì É8 ÝDÌ#.µqÜ2upn“Z‘p{®è¼Ï|Ï~w˜àGÍg³ÝLD²QdõzÑ0ÜÖ¶ìY*‹3¢ïTº‘ Úô´-¨<ÜÞûäÙŸ©¦Ë2G"-’>D£9oбe46u·FhDIzUëÑ¢áàW)„ :*´‚gÂÓ ÆM©õÎì_ð:l{'¡u#b~÷R{¹÷¢(ë?4tJ|¤ù#¡{‰±#I†º‚ŽI&“â]SC¿}0GnÆ ¹ãAÒý\CÝRà²ktTk))¥6ƒiþóc²l*7.4§¢êç;'ß_I*Ò†›Û-Î1ÓV“DjžC(Kì4díÈs6«pGFßQÌ©[TÍÐ)(—SK rŽãÝ·V}–L–¼Ýo›Öñ`N |²_ÐÑ%‹µ‡ê£$Ü4f,'1çÁÒáݼ†• fW“¼k³ÆfNE¡ ¸û¥O{ÙÑ*²FVÕ˜ðŒî–†5¹#ö+©(uÙv;Y—&* ‹pX¨ä _Pæ=f„aÕï#^½“¡\šÈW”* _É:Öy:¯ &Õàý–yãˆo‰›º_+;íÇh¿ FÁÒôt‹xÌÊÓ§ºH!ˆw´ði|5ž×@’¿›?ÿcîÌîÕkµ»Êù÷IÛçº]ç“éü8FhÉ™í¤·)=#d¦9Ke¸n«ĸtj‚!ƒÄ–%³|*EÄ0ÓµO-Æ?ºY}+©®e0ÝeeÑáßXªòÕŽS:£*ü|Óq:“O¶%~‹¼¶‹ ¡%w~±—2oú„µ ÿòRØÆ)¤AÞIÓÛc»¨§;IÜ42P›‘3÷ZœÕ„^ñ°0,1,¢òpïÆßF0rßIñ³^G(O{…eÖ†ªÍåð¸.iEÔŽ å¥Á"øç;™z0;ñgp›²âë«F qðR©þ¢Ìkb­?½oIâúØrñÜSQíAÏÚ.:V†ŠýÈJu¢2–#†_·)%h‘;‚àaøG#”Ì+á2FVi줋²‰»Ñp¯~ùˆ’–ªü¸¥3ƒÙï°B¨¢^uA/¼Àc0‘š‡34·Lä>•µ=rE¦¶°Ø©îVº}Ë7W/ÒkPÚtåè'±QbH⪋ä»ú¤=íç˜ +ƒviNšüH j6Û2ç^ .AÖÞ Q)\ŒVç³&ß6qÇ噈]Bâ’hó9¢ÜV3Ï1Èý˜Oª+%lRª3_ql% è"WÿÔc‹oÈOJÎfZ¶ò\µu?ﺞÐÂfžœPç0bH¢ /gñ“öãï´Žñ“ãº4ÄËke‡ëû„üýó0À²…Ä*8ÖˆXxÐ{â¨ÂÔ•uÉHZ7æq>sÎç´§ÎÅ>Œõ>s{(|³J7 Ø¢&f¾Á³ÝˆÚŸOEÏ_ë³½nÚeÊVGðÓBØý[ÒXÂeùäZë`Y·ªà£­êýÂ6å­2¹ÐÓì!ï5e¿X»ýCñhò0gy¾ÓVÄRD ÓG=f¸ï{à?Ä+’Ab‰¿Ôò¸$Â)´Á‘7¤eñ-¼dåÝt º-X=•{sÄ@õs2¼x…-wËYvΉl;mÚ7­¯&÷² S½QׯÇq惜ƒ`·ßÔ__?‚Ün&À0‘÷²‘˜ùŠ»0[œÆeƒ[hµƒã;¡ (‡·ýgæ¡ )ƒ$Öf‹F¢ ¾x»8,6|¢«n¿1øq»O {Õ«æžøy«o5‰fQ °5•íçy24ÖìÄ“ßè u¡lÎÅoXÞ‡y½£ðKEÖòΪ Öõ]>ä¯ÜÕc Ö§NHNRÆUÓøV’2Ê;sEÚGV”j-]Ò½—ÀÙ(=ÒïL±Ž¡è ³´ÎSC2þ`­•gi›Á¿†²:÷oNß;³ó±{µÇwNŒ,É:;fØOç=Å@´ ½„% YŠ]l`d4¼óÕ>Ë&Xg-µá_ƒ-™< ±åN¹¬EÌw*¢ e}³Ìz(ž2‡ç3ºûq„*÷”Öf/›šyU2;-äHÂas†Õâ÷êWa=íŸv¤~ ჯ‰-‘Éù;˶ÇëÎÂçn\’Hë…û š;Vó3©Ç6f«„‘HNÝ^Ã\lzÌÿJNëd’}ª#0 ÑFÖ¤=S‡Z‚cG4Æ€š.¤Tþâ¾…«5ñ§æU…ÙFLçÜ5‚‡;à Ã*”#9Ò½@¸áîtr:º]“¸6.!âô°ä÷pEäBuÏrƧ'’Â$±*Áø%ÞÙ=‘ zd\óRÿ„jÉY¦wŒßCRΠE‚¹sÿí4…Ø¿ÝÎòX6â‡Kó»ÚÁn „`Ek|4äžÖÙ—V%›6_~½'ä?½ææ¨ÉUi2‡|æ<í@_ò˜r} Çû³ä“%àô$‡­ddDð5µ™Ìåt {ƒŸ!bIá÷·‘È‘è6«º¯Sé/‹yK€™Ù§‘À÷dy¹Xà)ª†þÙ¦p"ºäߟ0;”™pv7L¬ÊE¬⹺;3ÛÞWµ{Cv±9>JÓòâÍ_´#ÄŒ+-W~~”¾•]·ŠêȯFå/ï3Åéj©2äÝRÙ5†ù:aˆ H{[›kdÃ.XݽæjãÁÇŒ7ì»&Ÿ8¤ê+”¿é~kƒÄq?¸_øÐv†ù gh¾MÓþ±Ç%!^DröÙ½ˆH®w?˜› +ö»Š’Ø´¼I$±C’_¹{YªgÚ¯ *VWò"¼#cÅE÷ølPÇlNz?ÿZo¦GòCd\-á†Fr jà¨:¶³!Ç6â®?ÉɲbÎ9«lŠfFÑ{/³rä2_ ˆ£ŠWg6&çÕ[¼è¸ ]ÄrìèLåÈîÒ*Á‡9 ˆú–ˆ™‰‡нcöÄ’˜–sˆ·X¾UåÈ``ûs®Ã0ŠŠƒg{¹QÏk]|ZѬA²,U»0d¨XV.dcIN+pfåÙûx6©ðfÌçú(1©ér¬,‹ Ø\í¼Ç†ª>gF`Q] à¼æ!ëO†aAP¯Úíöï>Y¼7;zë½G‘ÏÑì¬ç÷&ÝÇbCåßß—Û D¨K>ýî8MâfÚà¼cœš\Ó¥…åóï?¢/=tv,‡Ïáñ´‹ Ç8„ ¨Úp|‘wÂú· d1Ii^£P¶Cx|¬°f&?ŽÁf_Ù~ñ¶!ª™"íª4LÇ 5|/¤¡[žþœî.{¹QI˜ãaŸ~‡¤ü3h\µN†¤%Wi3@Û¢/Ó<ÕÊRÊëÅ~NÒGì ¨1$3Ws;H!?`Ž Vÿ–²!ºösÄ”îT¤vUBꛪ 4“^_ê†J€X³Ö·WŠ0:ŠšÈŸ‡ÙA0Lcº’`.ªKŒXŸ™ú3»dB©û`»¹¾yU`äÔÆ¯c0ôWÔlôC!ÉÆW @†Y•éƒsnv¬ÌSjGFþÉO~¨n2ÅKÝkßá&áãs\jìÆuº~ÍöÚ¸Ú²˜oOBa +Ô¢j‚<Ê€úL€N¯@Mð̵Á´Äþp^ÿyeK £Jt$ÖTgÐh" ¹i,Û”RwsbxT5º\ø²òâ¸)¬/ªƒîá{Ù,í>t”ØAâLH|Íj…¢ÛÏ'Ÿ‚û$¾•§2›ª0ò”˜!Ýq÷áE®“3¹¬hçOKHàü'CpsšÞS%á€#L›7¤¾é—ØbˆÍ&,º_0½nƒ­Þ—¶ŽyO‰óíñõ—¸Ù^ˆ˜…Œ_9¬ÒÂy½'aª€5ÜËÝÔtúeðܽ}¶»ðy™~6~¼K*-3Ú"gËMü(âïZŸÊ6^JqúdK5¢0¦ø«>ÄÏ4~ÉÔ!A—sŒùYºpÖ{}þ‰§a`kÊÍlÍõÒoFMÕh KÊÉÚ!'çœçÃ]´þ<ÑÐC –€&Ü;Œ,àîtü‹²Å’ã¸Âq mçdèvó`(¶%#9Ï|¸WeÆgØH<ò"U%_H.‰Mî÷N¡Ý§übÊDš3r½T¸wÝæGûdlóÂ65ÓuòzþQ$”K2¡³¬ªŽß‹š˜NækÛ¤` #âò;®G…@#Cƒ->‡¶ô µ€ãØÓ¤#ÒþÊL¯ÏK’aûB.èMo5))*ÈúÉ›/»7x5¤6¤ƒÒJR;u3™1á.ÎÏþïŠù6j¬²Ã&¿Æ¬à¿œLV‡¼ÖÀÆ*äÏ;è•:ŽÆâxì¼úN y¹ÈkŸS§=¥(!y&ÈHÞ÷ÏqØÐ½Ë• ”J[¨¿–7÷Ñòq¯W¥0õ㨂ŠÂ‰ø¥0°w]Óyø}ìüæÑäÜ‚ørަrº&‹=©7ªøA”¯U3ÿ_Y9Õ“ÂüÕ»\Í>&8-A\‡žô¥?] „3–Ÿ‰çN—žÛë·]\gÝû8[gIw͸“q%ßÉ‹g·<`Þi:ÎS¸YÌæ%ð[ÿ’¶´¶N$A¼ÈÈ@+F6œû–}|ñ‚ýpsªÇø“>»ðí´çÛnØ$)ÓŸBc¼ð…M-©cy’Ùê{¨ —4ヶ6ÒæØ6±Áða”ฯÊÞÛEßlÄ‰Ä ÿlu]s%)-§¬ÃñÔï]•¿îó© ¸~•bëŽ#I˜ªÓ¾æC‰…’‡ZÌË]ÿ‚ © \|Ö˜lÛé’UƒèL(8‹!Å^îÃÏ`qÃà ¨˜×ÑÉ›×1˜ÌêÉþAt~ÈG8?*ò¨r-X©xxÎOÄÑõÅ쨓êð­^ÅÊøX\³¤päRÞï*Ü&íïLJ´bs =ê·H]äì½C›úùÇ@߯q ³UË ž3ü$®{zœ­Ðºs{öÁÐÛÆnl휇X ‚MÉù/ã~)›0 ‘–`p[k_-´ŒÆÇ¬Ý‰¹ÕÙ58߃DZŒ€“Û¸oNÊbcE8ËÖÀÎWødé÷J¼íÇ/¿ò…_ØÂtÒ³ MË%WXAÁëT¼äoé_n0¤+½XóÙÅÁpùÊä¹ ^ßûµ2+#É›ós úné%Š[§A>äûÝ.VœÇÚŸèv¦S–Œ…P',7hšH·³ ލÈð$?öÕZOSÄ `ŽOÓ²h)aòW–Ð×öq§‡³¡i4‘›/ M=¿=ÌõSQYv^¡¤K!ÍJÜT+Ðdl|% 8ò’öçH§d¢3Ãdus¯žOR9J»í§Â‰œAl“—iø8J%ÈÚì]V@ü®?äPçÛ†OŠ…ÉYr¦ÕZ’ys€O)ÌëƒÀ¥`k7MAyúa6*‡Zð½ïy‰ç¤Yý?]#ª'0J;¿Ë¥í4ï#„ ¥Z iW«×žrë£WTn1£NÖ XFIÖ%ZÍ„þ-?‚o¨ÁYfÖw$`]¼Ôñ5<¶KAT5Upu—ø !»!ïr{A}Wâ0pêó†\›/Ì:2è;äÛÌäêÍ«ÆÛcEÏ”Í>?e}˜Ì4.~Ïù§í…÷6Sg5DA‡k€Žƒ*R†óö¡\êâ (Nk™ !àg"‡vZµÙÕϦàŒ.$™Âˆrx¯é´ã—ƒ„­TÆö£Xr+æmo€ìtí iÈiÞŒ*˜~>¬P9¼¥ÆåO‹oÚ÷Ö“½“Îé<ΊE/„)™Uò®S«,õjS½ypÁ/–Et^¹õ"/jb… YïN›~z6îY„kZ Œ°˜8ò?½…ÅõÊVܲ…\\ºH[äCä¶ÖÃYáÇ5Ýìïoœ,™=¢KÃáŒr{E%ýÁiî0ú™ŽDKŠ~\z†ÇÇ›c“(Ð3Ô¯ Yx¥zñ8ž!²ÞW¼MGýMþjŽæT–à°aõ¡þÚA⊴»h’nš‹àÑé"ðÓ ¹êÇ^ƒÒ¡úñ÷ žá{?s,@[fR!ÓÀ©|4ä\²Õ—%rÿ‘ ç°çÓõCÐ7t8.¿®O’ë»Öpgl=‡w‘Ft m Dhnþ$Ëãå†M³u7ÍIÁ§Ã†5UõØhéÀ2‰n+OèBi~ÖA4M’u:BâMT¬OÁ푯¦JN1/g‘K*{Ü=gd™áMÄøèŒÇw0EÔýç캬°j9ÿm$•.| yÅf]À‘âzyõ—U¥Y}%Cª±ãUbá¬D.~gëõáÏu+ßWÊi±9¸ äÂþïâf×áõ>|•ôšX.ØàéH4ú?1DÈX«öÙ¡†îO%å®±wÅÔÔg3[z€^›ÆïÍT¬… @,ü3µ¥É6-Ù¢¨‘WzÉ/n#÷§|É#évpêTÞg£jµ?Büµƒ¾wåøeì–J Ϲk».¼~ðßçèãä a,´ú>Ù‰ŒØ< ýÀfs ~ *xbËà9Kxúú°/àöøö˜ñ=äè½(º OöŸò®¯2°•áY›Ÿ¤r»…/YŒiìr¾#²µú…Ó"¸ ¾ë [øÄéÐtpbøÊCžÔoçqÙV̪,S38ҽͿ‚[*®LŸ PYj&Ò©gC8tk¸¤=¢ú+<ˆù?8ƒØc0 ¡ßlr2× N-‡ÒsfwÑšüÃK‰tÀN ËïÀ(¾£ýÖ”‹ÌÍ4ðRòÐsS=T[ч¥`ÜZ…£÷˜9@ßo  ×H sPZùZÂøg4Ôl!ÔuQ±ª+M8µbiÛ#NŠ/æò3ã0™Ð¦í䉖wIHb õdËœv@ž@ØÚò…ÅŠd†½ågWçm™C¿ÐL…Eõ@Ø–(u*BÖ8=¸ÖA¹§óÓšQBE[VÌ)Zÿ!ÀX©mŠZnç…lBðª}…ݳ`qwÿ±|_÷µhÒD AFÃæe¿°…¦ïZ&eÛ+A@÷}Mxà“âV0ÖÕŠ±rjF5%{%§¹ÉTœº>¼ÑåŸú)è¡êô7j ÔF"ªo¼×"öY5Z’Ý¿ÓxóŒŽ 2J¬V‡Ëψ ŒÛß#g*vġເZþÄåƒcüŠf½Áo¼êËúqÿ6ç³…Æç¹œGüˆNØîþt(ñòj6—jnàï~ô!i~Ñg~_ª]ÕB²”²Á*1ŠíO ºfÙ ªw¹A«±Yسgß×ËZë’êÎ\ñÜCp>B•@J¼À×zDæ"q¢²eR×ÍÌøúë¶´4*t“»Ü³VlqøVÏ™jÃeOñÖ&kl–j±CüIº"7ASÍ﵂ҽk‹S5廎'î€É?]=6¼Á?Kb9\Cú›Ð‡“Ýð…÷ODuÏ}o°Y79È]«N%»¤hói7 ?Ô7—³Ò§Hû×~D"x?­>]G3ŽxXÞrÕšÎ9µ:/„ÌïdúÑp^Ç}~=¶#)%cÖâÄ;ãíÜNÖ&Ö(•½Ð9y–c(ð(xwáß}747'ú޼Â÷lŽI§Á³.!§ø³KýrŸ}dpæ"à YÄè=Wåilð‰çóÍñ‚fì„Ó³¡íÈÏL²Ü)÷`#X±˜Û ë\`(ÕRþ‡uÜêÝ-–žT<i@MÊÊ”¶2ÏÀ*øÝd¶sMÕ &EY59j‘"½nWW܃Çnªžéˆ}Cu?üÀFñ9_ÝÜ7'ÒxºCèè³3;ôíî€V6TG2›yE©+ …WÜhØxíï! —&Ò'V8\é~<ÉöÕµh2 7f„Õ©ý­˜VJ‚Å‹ÒmZͪ*ä$GUŒÏµz÷Û× sàŒ£ú§˜ÊŒ²Ð’}‹Áo£“ã!Ý¢1†å‹8ñò¸«º¿×ÍV_Z}©AÒÀ¢§”±øâ‰[lUf ž¸w £aQ©Óò ò_ÍØ9­êÕàùÄÜF$Ö$Ô<Þ?œ¬z¥Ï<™Ž[&ã^Á\s˨*+›ÑÉ÷>ɹR @…Kšƒ‰»ŒÉ)û”´_]@¿Ašb-ÚØ4…ð×ݽ˷(¦b°ÙyèâB›1ZÐÎ÷ÔÁ=âmÅ‘8â!ÇMðUøŠ+î}[ߟkêÃB0Kè­B& Óm;ï]°˜€úâ Ó«ݬÿ'Oõ—B<‡\'’6îÀûR Ò±‹ˆ¸¢ê Hø=2š¡P<ÝœƒîgÏ “\N¹ÿðñ¼Ä§OΰÚ ”ðD?Öo‘_>Œ0ÜèÍ¡,j¬8ð§H%â'ni¬×܇£2^[.¾íÎÁƒNÂi!‘Œ¸°\ùc±½m ?çw²£ãŸ!p§»<6.H³Õn-šÿИ·8ªÙM¾Bë×nL†ÊÊ@éïzßñ.büÅ\“G”·‰UHòc ž³n±öo‚ì.é Ù|óŨ>³| °@±Q€Àh$LXª;`4*<‘´ž*nñÓàÆ@tÍÒ¯SãXíY)ŠaoЧ”Þ·¬!šH^¦î˜ñ£†õïߌ^[*î~ ß7欩¿ªÇÉ$ÆŒôc¼;¹ôQJFÛ Ôç(ÎMPpõFýI€¬+”»=#íØ?7Ü >‰ÊBôì™Îïºá¬­îäªlØæ`Ê“+?¯fZ2Å‹‡Z!û²è´Z›IŸò7²xÙéÁôšµnw6Ê(P=Z=hÊqKÖ¯œeƒá ¤…¼W0ËíÃ>Û÷*~žEáÈLYèH¿cÆÊ™÷ N’¡bn-ég&ô/mµJ"TEÍó×´3µ;';ÀÔðø°î.òq@‰ì¢iGEŸyABŽªµê© +øšúàÐ>gJGðŸî„ƒ˜@^‹Ë?«$ÕÂJq(1AÑuñwv7†:D.\¯a€Ö~RcB¬¥:­ŸšIŽ…Q?Vè«€]ºç“èI.cAâ™n³ÖbGžB“‡;q¢ÉTܺ]˜(øDþ03楇LâI¢DS ÅŒ¡”)ŒãiA Ÿ©ºgj9*‹äÞ¸-i§ñ;ìÌC£Äƒš/x•È…Õ„¤$õÍ­è~lAD|Î|6¿Æüèz…ˆÆfœ|ƒ½®ò Äuź‚g;M-”&·‹X /\5MË3–¯#áª&•Sæ±±ºsX±$Ö×Uüë¹+ŠÛ7ð‹ÍØ<êÖžÃÆ×P¤>!äT‡k›Ç E¨®™¶¿‘m#}+"Ó‘MsÉâÌï§| nMqAkŽàEjÑÂß°‹âŒóÍ"ÊTMã¬EÞwW”SÛË„÷?4à'÷ç3A%3”E(D$3ãª2M £f•÷ ¦ŒÉP®uÖùÕ9÷TQ$äÖ:Ú¸·ß1øâz*Ì{ˆšu—Ïâx¡"”vd!"[¿é0¨ä+¢DaqGcKøbÇI9Kº‰“Ø”ÅbO2–µ~ưçkÕíÕ›d\á§íPg@§§¢G£?7?‘{£,[ ɯºjó4I¶i]g{í£Ô2À…û¬ pÁ7v€õѵ…¸­r@âCp/Hä8Gî…0zÎ2”‰ë´|§FFÂPîæ:XiÁEAÛƒSákxÎúé•sˆp½¨i޹ÛÊØm+»;ȳ»©-†Vj‹Ðtä§(¯F’ˆ5 ¯ÝljL™Ø0¼ ¢¤˜m˜“aК¯Íwgp×~:›#j¥.@„üV]ÓICò,Ì辟`9§9tÕ;‹ÊâôDÏüàßÀs5 E6@øM–鲘ùcaߌûCƒMãi¿5htì°üÙ0¶Ôg7àõGüÙÓ)hãA§w·¼^£ÓëÌ?ƒoÃI¢xg½H×Ë¡lUìšt(NëÂÝÇôˆi<ù=#HÀÕ!é뱨 22Ë Ø('’£/ðiùèrŠ[ùÆFœŽ)t9,Øjø˜ö¸²îï#á¥Íù~¸ ¯?{Vý%õ-œ¬úûÁ AŽl2Â3—WÜó6R?w0º·¿º¹‰z ¼Vg.‚Ò‰Ù<:н×V],‹G >4ÿù§‰ÑIÏ“húî,Uì4¦{31×KæÒʬ ?åã»ÔA¦jÇ›ìA=<2R¥¾!°aÄrÚ¥ Ò3Ü Áãë8æKÎ:»AT;DÈÊ­üo«pc!«‚D\ìÍtÑ|°»‰÷“ŸØ‚äá®Vµ}ù&~xp·¹9$zëã¾Í¨>©dw‚€¾ß&„8äQ3×yÊ]Ñã9zsýk:§%j*ÉΟi­nUÓŠäû§;NÂ2Ss‹ð½þÞ”iÍmf‚_hm“_x+Û]ë3ŽøùÉ „WöI¡ôÅò5‹s̲”ƒáihÁ,´TéJ•‹¿Î« [µHƒ9ÃÕmî*ÄãÜæpœyVJ“r1´æ˜Rº)5Îa]^†yIØØV5ë!ÞÔä'>Yíé!ú¥¡!Çr —¯þ´0‚äÊdƒGÖ?máp‚‹æŸØ)îL¾³cãž79%³ýÜC¬Üèò¿äÄdÓdý¢S#໺ÅâèËùõäËæ(xð{­}²"»¾‹ ç«A„ÑdÙ¯ب ´Á¬E9 –œvs %˜î3y™3kö»xíd§ur‚˜(§ë M +§Á%BÔ¸ :IÆCÊç\»^x÷»®§ûô öÎd«Ù̽…ãÑåxà·þ&Õj1$*öpa4²‘üUÔ_‹^›FȯÙ÷WÙ¿[Ä1×-Œ¬ïߦ.%â)f뾂$yÚ”¿Þç~ *‰tÍ'L’z"7è;\Á¶«Ù5öAwr8·ÇØøCT÷J–Õ·Hoò3Ö‹î!Ñ+º~D J—IXbïi”oGÝB3!HøX÷§'Äm0<à„-"SeŠEÒÍ-=Þ“ÏC¾ d}®†¶R {jÓ Q"Üw+•Ú©+¤’íJtÔtˆ.OÎD Íç¯ì¦iÊí4?-øçQȇúQ±°…ïH+ ‚E—4i ¸Ÿ3ù\°½çà`Ôº™¦aÌåE¨Ê•Ga>œâæel™¸x°s«¯h÷Û4v­`—ß^oÎÑõ‡÷•›—ÀÌ4±`>òÕý~n9 +y<¾ÏÎ]†Êå'@ò…îr5Ò«à;Ÿ³Ùû5ãIÊ=Ê\^øRp+dïö…jÀõfÆdqÔ°ûÉ]úµ1¿D¯·<–,lÛ<ñ,ßžoj4@²{!ñ$Ô|o~iœn$”9Mè™ÖCÜzOÝ~½8¶Š"ù4o^úÞÃçØtmþeôª@2›’%Ôkƒ²þÆŒ ) 4Æ öΗ˥“ðmBŸV‹ž LŠl¬A[WÁvPœ2ýôÞ'Œ9Œ¿œ ønrO Û~SŒóB”o=:.´ÙÊDˆÛNj~’ G‚IbT0Us 0ûöÐÒX•E¶·Aÿ¦*cè[¬±ª½éœSk¶ÄðŒ¼Q³ÀC$ãŠôär^ô¯8ŒÞl _lÕÊÍý”ŽØþvRI®Y-³n`ÉVãŠä#XTwYTé4ísVÌe˜ƒÖj£Ô·Ü‚ý*™|êàÊœï·F0?ûßc£ÿÛ}`/ ›žjt0_QþÔmÃ2 JØr˜H½€Gh-ýÙŒ¾RTV=G¢«Gëb)ùˆ>$zÞK!±R‹úØ=Dâÿ˜¦s‚ŠWžpSJ*E:¼8ét>T@ ë†öæï«Âjo›§C]ú…4¢JÌóò@ßåE¾ŠãKr$nôÝIÊÅÖïÚ³Töy*çîGØ¡Mª|«ç‹iV§F/Þ+Åë[i¥¨6ý×=*ô b~”n/ÏüÝ–TGÔê’Šš²¨*–wš8Ñx£!E¥Âû·Ûã’öÑâzâdy‘öê²ãGC¨%[ IÙÍi5†•(y^xodIÞ%uUOh‹,Ìh_”F «;r´Y¼îaÆ[³¼[z?PÌ]7\òkb¼m&Ë£Ùèž:p-«ìg—"-íé£rL +7‡ùힸ×4-Äç_Vom]£RÏ È_˜GQ¸1b¢~îˆ@§ëyˆŠ :oce.‡** ]ì†g¿% á¯-’.øíî\tÇ$MŠbq  ÇN70Ÿñq4‚ø> eÐ0i1ÉÝS»}ÚMеÓçÇóG‹ˆFÉÙÍ,°Uhú:Æ%Þ-øt¹¦Hèµï2GcsEÐÀ}Ñ8÷¨¦‰ñÝÄð›fXX×néêÍeݘÄpô—óC¦ÁÀÕœínhy<ê|Áoa[êO[›ä鲊YeìÜV·»u¶Õ.ó@ØM‡Òc¹P@!͵ÅXÒÇõñFpE~Š©NÔ(lÂKÚuëë§ì„xÖ·D`›f!– °Î¹ï¤“º°`Re¨Ôr1]”w€îÕHú³Ñ”ß*œ½˜³­éK òT°;FœõD‹ù@ÚWCl‡çC˜êް:<”³ç«Ònò³.npàºÙå4½ÀÂãíí­$Õz¿"cáù>’Þ¾y ±GtiDê´oVù¼w §Öc š’•£{ zdƒ0à ¾çÈ&]‚y…1‹ƒ¼"ÉAO• 씑µÏ›CâÙHÇWNð#,œMK¹!ºö¦f¬‡ÐMúªfÜv1o!öÀÿç×àyÉù= ¦¼MžhD6 ãõ[É~ÁŠ¥x¹W‘ ‰"û¤ª€@¡_9\gj³Ëæ#Ö`75.2…~¿ÙðÅÌq ¾}~°ÚÙA“P°úaP:æn upÐ'6$Ê(ÍJ–deúHr4ÃL':4XÇM‚ŠÔé÷}G¤ß,æ5¬^)»!n4³³lë÷l¾Ûó[,â3"†da;qóÆ­on¤u¿yÂd6ãé.c©S”M®G·´V¸V“"@9IU”ÖqZ˜Þk<二%Ü÷ÕñͦÛ)åëÒ­ü¤å’߸ƣq•¯Ê6ðdÕü:RÞ[¸Pû]á·4ýZËÎ+fÇ>—ÍØ‚˜¾@öo³<Õ–·ÇÆp00ºµBø‚÷NrÆÒË–µ!¨)ðÃ]†Ägó`¥GA+Ó~7â5¿AÓ6ßH0ÈHüøVo«lZ+ÝnÐ5ÕUƉ±q làð¸}ǽ,œ- Óh¥K÷n¿?'¾˜ùºNÁå4þOSQSÏâ•Fó×Ož(I îÀ<ÔWß0·€0²cÖp¡ÆôÇš,lF—7< ô¯ß ß™{ü;•E¡m‚´Òö´]…2­ þ{JâÅ:r§u'ݳhÕç$„ùÕ-%î'†t†QåÍL×ñ1ì|ª vƒÚÍu>*s¿Ö ÇÜ+ipa°F´lÏ€¦–qÏq„¥P(ë?xKô¯7[ž›ø×éœ(Ýž‡í1™dÈ,{šqDz¥gl‚|b‚Äj¡»L霤 Á­h{ \{¥oܳÕ=yͦÍ€^utÆB*ûÀÉžøW—é5 §îi<ûýÝeUõëÄôò;>Tºú[nªÎY$ëmsaÛªãÕ;'$~NýWïÁV#ßUœvHgjQw0Ø‘þâ#Ûfn!ôTÑ{¡¯(©¶x,OÞÍz¬Âh3‘s9Uš¿ˆ¤̳.®7áJþÇutë_x‘ÊÏmîsÐÅ?ø±jEïx¹r‚r8jûÒ®Winåܲ¯!ë3çÛm+ãjylrøìÏ¥ª2¶²îÐ@ÙK­pJ×ïÅÀŸÕ4¥×Ò~åkw´¾Ð¬*sïÐødærO˜p„oIjv0FÜEŠx®}ÖRÕÙ‡å£F¹ÿÝ.qÃ;ûƒùAu"÷†z³8ZI«¡Å¡‚²¡´ŠT{س²àJ%Õ0`ÁZ§} eó¼©Ë×Ó‘a¿àöçXÀ~à|û˜‚–† áa€Ù—šadNöÆ6¡HÄ_=èÓûI káx2 ´q¿ô,K÷rÔŸGí]~… XZÞ¡¬|—©Àq|÷™ jêܘ:ÉŒŽÖH¹ü}ÜÃËÝÿš¼˜UqÏÝ‹ßE†= ÆH(9<¿USPÖëFCEÈñ™^“¹*˜'E\§œ¢£‘á«?ÇÑ«=áÓ•‰³N§ü-¯lwÇ·‡²—'Ô„QSTn0t–O”D˜!åÑC·]ñ6(òhSCòà>ßõœcÁ‡Ž‚UÖ@•üç:ô>ŽÞ½œð›çgŽAß]¢IÄ9Z?SšøuÂ’nþ$„Ä®ˆ™¬/ýÒKK8ó j½,-SeæTþÉedØUù00ËX ï˜IJ­N”X=ñp%{ÕUc"$êXésÒó¡  ÄÈ›­g—? ×já I›&½SF4ÓwiÅmUKª°°žE¿2†.DŸ}Á8ÓÔ«#»LWÛ£R=üD˜~j5¾X%€¸$Á|fì­¥â¼VyW6ƒjTè•Äæ9¿äK5 jTp{¥Q0*åqæí3¶rCJ¶1Ðȵ€}c|Ò‹v”ãOI hŒžoøE”h=qõ!M ?¶ädUøü‹> ¨R6Z[dH0iMgVÅv®évRŠŠ|±ç%KC?HowÛnŒÀ”7©Ó±B†šN1Õ_ #ýqONÙ«¶F•‚AÀ¿A¶Rºq¿w¼Í=€Š6é‡ ë÷±½¥N]€3 fÜugŠF°·ï¡ú])ÜH»¹9r~bBç ªd¾öÕÕùxXz  {jÀ¹hÝ€Œü_§mx¾qµuØ”nf †—úq¯ß7„Ü&¸[µð~¨4~$­2ßJ}iD­wQ˜×èöÍ´Aó\RºŽ%Ö¢¬c655o› l¬1ŒýwºÅJýÒ4ê;¤™¨q2ôܸ8$güÕ )FÒwåÓD”NªÝ ¾šæ§S¾;®à} ¶„ ¥“÷Ùª$Ïç¿¿ºhÒ3ž°MÆùÛßK¹ÏÛ8Œ g,s³x @ò=“Œ?!u=_Æ2Ãß2,ÒE*¬ö;üáuŠ’N.áÑœ#GQÀÍÒ¿ ·'…1ŸA(¥"QÛ>½j!Ÿ1cŒQ ÆCQ ºÁpMM,`–sYÞCû¥ó˜UҾNJ@ ôz7ŸCijqN ÂõE³`~€3‡±(é¯õ‰Œ±ñ×+K|QùÝ&H™mŠHêí{íO}[Û^©,í3R¶hA(Þ1Œ Z| ̬±ªl«Ž¦Öf;PÌ’CXd-¡·Ôt–†1™”•kil?¥è½_õþ0ßèk'<ÑtóŽ |ÈM+•· Up“Áe|§-È^úœ›öæTÇÚ_  ÆÎõÁqlš?ƒ€ÁwrÎ/K3:Z«é¶ã]êvUkVÏVÒuÓ%ŸÎûìŸó (¯ôÿä2 WwSZÆ­ˆCÚ1ƒ ‡Ýwyš ¦ˆºþë~<2ˆ¹“\Œ_Hv¨[Žtã s&Ž¨Ã¤t"âZÆÚŽŸ¦–H†P›aØÆ;µ÷Ü BåÇs-í>ºHö nïYb&gûFx>‚hg}ŸjI$}[†“ýÇîcÁnêÐGfUé0Âß‚MåäÆf)éáài± ÉŠ3“$”:Pf†4rìʦûƒ¤¨ûŒáùÍn.?(«1?¸YGëpÉrbº¤}Í ½Ú´ïj 7PD!° vlûý7@Á¶Q¶0@κóׯ%_ PWÍê`ÊNR[á´?ÚÏë»à“Øâ•~›cMÑúuLS½Z³ÿî!ôC ¡‡¤¥$|ø§Û½Ùì¯*PÑÎë?ÀBU±C<1ŒKPUÐkaBÑŸé>·ï¿r M×8T‹£‘88ô ‘.RÐîÚË—šÌî™ì(Ôô€É‰uW÷Ý3Ç2:ð.ˆ41MÝìh%‰B¯H…²k§ ÁÏ›Šd%P9ÃËÜg úÕèŸØT0ÌTß_;Ã*·\‘ÖÜí>å  óÌm“µˆ__Ÿ'ÒmlxÄ] iI²_¦ÃÜ)“.’ÎS‹•LBk ±·ü«^O1 lÐ~39V* *Ü ïg"è@Õ¯àùëˆ×epÔ5QëŸ …P…éµÃOàfÜ òº=dâḧË?*¤Kï諳Ҽ¢WÞ™a¬Xº4óò'JyâtÞÎú æòKAéÊkÄ0gkfFg·U"µ]<3 îeÿËžm (~û‘“œIÀW‰àš’=y'N¸nXk–úÎÖçÄ¥?gW¥ sÀÇCçÌNƦÇÔxøM'—¿ŠcÎÃTòf£yv ÛúÖPŽ›älU'4D¤“+õ}Åâ·óùÃá!|Áa72¾I!BzØP®™²NÞMòàPË“ ¤Wá}Ôw©ž¬v“èHÁÅ MÄ#ȫ⠘f«§óUICÞÈñ°â$èŒPYÐ$ƒï¤1”Ó -²cE&hÇ³Æ "Îà™Àð¼xZ¶Þ.%!X¿˜¨Qn)ß…s/Êûöf vÑÆ†ŽV²´ô.ÙऺÉЯõ÷Á?ÚZ¢ÊqÈÏ:ÌX×Í;&1¯¸1å‡Jšå³ˆ‡$‡ŽŒ‰MD>.Wt"⇀c Dw.Æàw ù¶z¿S]Î}”ÂCm£XŸâ€_†¾1䳱ώ‡t†y\7’Þê\PÖ¯Öî”×/Ãóõãn…†Jcb ‹ õW'ÝTôÇ©‚ª¤Mù `–[wHïï\_H¨Å‹p¢´ÅC·Á…"¥ñ0˜cùö+Óµ±4Ø­Ò¢É  ±]ŸÎýmÅûñœJ‰'ù±´µAmJU£DKõÀ­;ü­1D‡Á¢#‰k‘ýiRàêl#_óvÑ‘§•ÝiØ^~ q$ñ=d˜=‚BùÐ|òí2ÒBç —¦­«¶5 ë¡ò ™Œrjÿä_  ·BÉp“òkMÔ…ÌëѾâÔõ`œ­¶,qh˜ ƒ›5VœÑ+‹¦Ç4ÿl›fB–¼€z±Í§•”›¦`%ñî!ÝO暑¦w_s/âÈ=yl“y±y‹ÐÈìÁhŸehÐ\,#Ðh°yRø96coG”–$ªQõ¡zãÛrtƤc÷X‡ êu‘æ[LÇä‘Kå‚tXÌöà¶t•_Ö´éñ4ˆ‚Œïxº›ÿÄË[%nf¤y/2÷ÏnÂwœus_ç>Øÿ–šß¤„šÙu¨ƒ5ò=*jn­=â«ÈÅøvlVOè Mô"¿WCCÌøÃkF³¸yÀyÿ<ú²Þýæ0!‚øß ùÍ»Æ>è±è®ØYÐgfžo UÚSäIt °ŠûÊYˆäX ‰cç*jqE?Çk¡£–d \öËb$yúRN¨ì€s>ÊüÞ"C©·P¨.ï'fñ§Ÿ&È"ûTshL•}⛪C'Ø,ÃÁÔõL× ‚Ž©êQ¿lîé€Ò­±PÏÞ_ØöTøe£ÅÀÏÍ(¯ ¤è´èfqêG•*F²ƒ“)‹\ñ˜x²ÍúK¶îÁ»½Z¦|õG,åÓòN[¿\ML¹«ù¶j@È츣¹ s&¨ŸÉ•ï¸ñ!LÑÁôñ›öñMªÙÖeq}]»ýî1¸g´}ExM˜¡+0ìi†Ë¯\/ÿþM:–ëÔqm·óHJbÈÁÂ]Jѧ1•1ÓkÒµ ZSáR &ßDF=O-“°+íé¬aa\ «Ó4\ÅE}8l·Ò¸D ãÌ– ÇÅ£ˆÖEW7–¹Ø„¡ÃzÎæaàKf0þUÿÇUÚ˜„J!ÕØœü¶üjIn|w: fŽhFb¨»ô1\Ù˜³½ÄÜ6:WuÐĈú*—lE|ÝêZ8wíy-{7Üu%©r‡µü‹W0þ¬P­z´ ðI¿ÑÑÆÇ„óçúæ~3Îqäë2 Ù:yÙ,¾¬üòˆºëÑüߘ¸œÃÝWÛ³\Ÿ‘üëŠã½íá“ö h@ó™ê"¼àcþQ¯¥½q¡t%¡?‰ÎcpdIä,MÀšèsÚ3üMÐ=ö´–[—™¥d›o P†¢½Y Ë’ö ú†æ³­ÎÛ‘Âɱþ?¸LÄxÛdK $3³hú¦ñ‘)ÄqXÕ›ûR ¤À~ýÏÐcŠ«>[ãÇ5VÒ»ö¨æe• ãØµÐã÷Z›0®9âEx@°’L8¯Ä¼Ž­°á5ÙõRJj…¾f#[”!絩PFVw+•²èW€ÞýÝŸ~àB6å7Š "s"§«!¤¶&&1^ ržc¨ñW gìÆi¤îEzp‚Ç_Üßo7ü'ô‹ÇÁ¹ÌÎ3ì3» ÓR"òŠs–uõP¸EŸQàÉÙîq¼^5¡±—çÉ`Ê!áè©ûÙ¡˜–¦®”Ø·øÇFõìD„rÎe?L#b¯òÑÀ Z@&w(€õd‚GLÊû#/Q"7tΚfy~ôµªî“ÿs¥ntwÊŸµ‡Ö]³# .P¹‰_©ô„â®% sLû/l±øâ–¤ÄEý3lõæjѦȪÔdàuDècý—l³¤!Tá„Gœ"ªØŠº(he…&·Pu9~ñB’ hù§fxv‚~!oXXXcû0Ì~ÑÚk—]ø`Û÷B¯®ýó—Œu,äˆýŠînq˜ŒÊq©€ÍMܲ÷/mú ÞEÐf÷iÁUþaÇØ `-Y º\ZPæwÇ;v/#>‰¯bˆóZÕA0#å -[™Ý²QºSä{êiŸkJ;%ƒuÿ|7Äi¯àL•|®Q½óÝ3å”F…©- º§}KàÝ:ïýˆÉ:‰OEÊì}¹ÕHŒÝì×óÔ ‹2 »¬O€õ·«èkuOîx$G³ºr:w#¬š9¥vÑà/ËýëÍc=~4yï("{×Wˆªpת˜1žÚ}Ç/رrգݫ+¯t9?žà È}´##M˜f]ü/ßã8¹*ßr8âŸ3F—uûÆS錈 µÿ±¿Î1H£bÚüi›”XŽÞvî­xˆŒCo•¥èÀ(×h<¬Æ+Á½ðèÿ[¢§ÂNSºköÚ)Ðý±þæÖÓ¨¸ƒx¯LÖg>ñþÀ%Óke^%5¾ b1²É`š‹ZKðÛèHœwBg?À¥§ã=¨rõ£¤þñ;º-ó$–þZŠ*¸½aŸŠÄ†$mdn‘Ì㊆ÿ‰s¤’ YqT»",,†[#ÎÀ ½s‰G5gߌv]ð¥N õyþã·¦ä ~Bˆªßý€x wî¶.ÛØCA|1Å s*«%‰So  ñÁ¶h˜Mžë캔óWµ¿ûJC2†Ô†›€RöÀAÎ\¸ÛÔå'¨±$¢0ùw½§¼à4أʘäðÓs'Ž%«ÈðÕÚK~ÊžUïö¢5ϼ5«1°f"8èä~à™…T|šàÀ´›7†˜ÑÆEØv­™XæCšHÿ£¾Ðñþ|vW~ÈT;ŽÉ–švåã™Û!Ö®3M|~…o({DD¯5kP蔸“‹Ø¼à½çÐh«_Ñ{úo;¾-Í–éŒÀþp`š»!IÐïáípcðäõvyY­Iåú¹sH¥@}¨UI_þ¦¿ik5¹*ƒíúendstream endobj 138 0 obj << /Filter /FlateDecode /Length1 1620 /Length2 14328 /Length3 0 /Length 15163 >> stream xÚ­zc”dÛ–uÚ¶mÛ¶í¬J+Ò¶mTÚ¶m»ÒvVÚf¥¿º÷}¯_×ý«»DŒ³×\k.̽ω3F+©Ò ›Ø™JØÙ:Ó330ñ©«h*Z[šìäè¥ ­‰þ˜ÙaÈÉEM v¶b†Î¦ÿMÆ¿‰˜ÿµ–7tv¸é21011ýùþçç_«ïÿF#nklgò׎Qu6´5ù³ÉþÃðlìâèøGÛ¿ÏýŸ¦ÿ¹þ{»›šº›ì-Ûó[¦e¦;×aæŽLŠéô1ƒŽ„Ø—6ªø×Øõú¥…ïpW¼×†04Mó|¶{,ÙÈÐŽõaXSö¦˜^åãùR÷ oQtrÒ2ê•§ŸkF{]/ÊýÓá`Ò8ÜTVÑ+y‡ÀŸîdu„º~¦ö'u-ðG#{²Gð5NmˆCïBjB©+<;§H#Nrö0p|h4þuåt˜†}íÁN³ª’=ÓõÄ ¼UýÅß&Nû4(©|#Q’ÒÙvþCSCþý¢ÝX‡f]P•¿?àMwN­Ñ좚%Ûzx0õ°“ÃKÏ¢Æ?„ÍÝ.H qÖÕÂ<àŒhùÓR?¢#‚Í×zì†PN„E{Ðv|pè‰æI%uq¹+Aô‡‰n­L¢ •"|\ ß±*ür/âuÙµÙÅxtÓT?ÁaÜ8f²º­8a±óJñ꿹Åðÿ®|˜d€ª'4Ãä§sÚ.€Ä|YX‘míf8²„;CMòýscèmÛE×ýW-E‚`‘²o… †X ðA³¿R+Pc'ª=¦á•j›5ºFðUdøø– q8fmÊü†Nt Ò¢dìvK³Z衟BõJN*œMå̆é{WõÆtQ”m6àРy)=ÜNøŽ©JÞ» œZéû°>‰ÉÖ€‚ù€_~xÓ Œº`õ¼ß ßéëYû³#׫¯+’Fúu”:e,Pƒ+§3b€ÌÔ7¨¾]¯LØL2¸s–…ÕÙ] ¨'ª¶²à¤æ¢MHsŒãZ]F<ÔðS@ßh5‹rëV"ô$ ð%¦Èè¤zËŠ`ʹ–L«$>•sÄ*nÜŒ÷Ϲkí†s"~í2Ê‚iA„ÎÇ0õ°½½ZôÓ]ù,Ó_]Ç®·;#­mæÀù¯%0wDIJ<‹-Êxm7ÓÔ%*kÈmB+Ä¿[ŸÎv±¶tÄŽú74É s™¥!bQ4zø(ÑP×iÅ~/Ñs°Ü»ˆUT•­—Yó8pÌ;z=HR“‚_Ÿòè¬Xžiu,Áfv˜1œ™‡â$¡¼†›á6Ö aÉ¿Ÿós-¢;¼舔üÒ)Ô‰ÖÊŽGˆµ²´b=©ÛDÊ)ƒ  >:pb;!Ø|¯{¶sJ‚ІÁ#ì—1ŒSÀÛ¾_IFÿ)VÊMëÿ]^~´rÃ(ç»H‡Ü7f`rÏ€¥óe™Qã¼›† …G뻟pºcïKó$)¿jqv2˜·ˆStJ‡ðBaíƒxÈ6¡[ËEùAVžáš«@Mš oûwb‘ÛU™ÚV׈1{B4:Òhž'÷E €˜3žßÛÃbùˆñ«`N1ã7rbC}ý&Õð%Š!ïQHN+·!‡n^Åï».û=æ³’# [Í!"}\­˜ËI ¥1€è~N8Žlu±‚t´u\áz€²ÀÊÔáÙ—@É©»{h'ÊK­grCØ NZo$õÂOòºzU¢g§‘ê³rÓƒ^Ö)‚fU–ßzyãX0br3wä+s•=Q:¢R]~ ýfr]eèÇzÎ,`¿6ÊÄÁˆÇŒ u ¾sYˆ™!£:Nª*dCI&mïâÇG>3x¢º÷YFàò,$4HŠ:]ï ñÝ,3yýÍ0»È§TágEœñÈ@© v/‹WµuŸ‘¤SD­ï3(\ ¿¾Ù›*pñWG­]¯×Ô‹ÓU¥“’rÀ¶1Ò4‰¬5Ðï‡'l>PW´1£/[›_ 0»m¢U£ÛA•­á7óŸ¢Û=c×)”ļEtÚ>ˆ‡üKŠ××™j0é-P« Ó­½çÛyH>Ciƒ‚œ;PðíöÁN™¢žÉ$yè‹xZ›;jZŸßv‘cŠêÛó­õñÏw³ð(+–Ï&z#}¢O¨ö?X‘) ø@Áüæ ”ño=RDÙ¨1[ÃÎ)‰HLXÅ)›3oV”¶ j#+£Æ¶øì–WÜ÷]LZ’[%ئ—&¤k»ÍTè…×ï6(±g†™Óîøx2Ê‹#8çZ~l¿1 ÃGq*÷+üL ç‰AÉÂkq²)ˆsÔ÷¹èÔ.÷Ë{µrȨsdç8À/1ÇKrÖqc­q²u„ ¬Ð†1p¸xLºáp1G–aˆñ‹F/-BڦƙõÇÌA· : 4ý°VûýšÜz;%´õªÖ›M¬âˆì‹šˆaŸõ5ùÝTYšeˆ&€·ã3o2Ro©þ/DÍšl‚[GŒ$0áYwhx-³QÐÆyР -á7 ý1üޏÙ\åz9£ÇVòú³“>çÃ`äŠöç îñŒáXNrþ2p„ˆ4SžÏž‡uÔ=zUcJw!- FrÕK„sŒUÚ4eœ!A‡‰º@˲®eâ~Û»/lw ¬-˜˜ ¼;¶m«‰Í†…ÕЯ0/iàD¢ïÓ©rÜ*ÐT®¢Ÿ´Õ:‚q”/¨ZE¼:åÂok ß.íÄ}VPÜXžÑbß4âÎù"qH[ÛƒcàDG70c‚ìûŽ/å0Ã{£FÎñ;Ò—‡Ô×}Ùݹ‡lœòIÏ‹Þ×&@@f%BØÙ.Ê´Kâ&—^€ˆhV5Í~y&W›“4Ùë¦-ÑÄ;þmäéš·™Ü™PU+×÷øS±uvQ‘äõúx5 W Iy8ÁHã_hKP!}Lµ{®æHÂåÎÏù|fÈ­) U¯"–¤1÷ ÝáSëPN·Û\Å)ë©kæÒæ¶T¹_Z´3‰¾µ t¡ÖK¿Bî´Nܾ‚Ö·!inƒ‹¿Pt€«îžzÁuT„´’`gðÊæl2èmMÈ÷ㆠ=Æ(Æw * îûZBqǾ]XçQÄÂD%ßQ—3íRÖÉ›BCø65ÌLZÇ2=i•C+µÍ¬9±"­²Üuóén³c7Ü(áCyG.7Ývˆ]®Ü·¸>’ŠßaCë ói¸bý6ËHÔ0É¢ó7½3_<óþ÷ó³ÿá.Ýî—,“ÛàE<º­¾É¥Ë‹¸nàþ›o±7IÈ’mÔÚÂ*Ùž5ÑRëv³PµÕ´mõ• ­›Ã¸ FäèÕâŽÊ-¬º¼IS-¦“@VôÖT£P–¤ø2Ëö3*)ŸÀó× ˆPzzw|µú9Ö®cßò;+a4•±ˆÛî5R«1êÏ>EöþÀ=, žˆ zäí N´Ùõ%ùA•`æ]ÝO§=Ò„j|ø üÅSœK¿]ÞÃL¶•|%Ç#n3~g†ÝÆê3ÿú“Ï7ý}BÚæ{ξoOòŸœÂkl*ÇxÒ ?³P\T¯šHŒéS>¬ƒ9b¥¬áðƒÆjtº¹†Q']¬íŸ‡Fµ«#y¤¶tÚ-Éêèæt|]ô ›Lò˜Ø\®ç‹aý¨6 `á“J”FÁÆÄÎ t}–£‡û¦› W¤§”÷Ò}$+ %Ð5Š£/„´¼àÒ}²H¶%©Ýì¸)Ì¡=ÇmæëÜú5 ŽÅ'0# :ùq¤¥Y–¯Ü-mˆF›?ýh6K¿ÿ¬Lè}Ž6mqzyAgâå¯WáÀ* ª0ö‚ì=q™çt&z°¿øAFõvR†í%½ß•³jU^qFÑÏ÷i±–þ=TÔŽÉUTlò¶ýÀ‡[ AÖÄ-6|:€ó€JBÓϬI#w7{Æi˜ vë-Ù7ß_75„z:ˆ—à¢6É×|£HU~YM U-üë$u³~™5×0ÌËl þ9‹g—ÔÃæ²ZDw½DÉc/ ]¸Lceîl¸?ê>GöT2ÓbŸ°E]ù‡›£?B'ÄW0=Já4ÇMkA2eŽFÉmö¢ñïëiÎ)³£›åi)‹³¡ˆwÜY¿zâ“Ü‹Jd6½•;ô…ŒÀVL~‘kìîžo—.OÝ%åþŠ…üyùØ3|¾Û!ûBçTg§§[~ÊíqöÖH©N)·j2•¹»a 鸊v¤ ÛH“V'?)-æz$OTSQûËøCÄH¤YæõZO8ãË€ñTù¹c ˜‰š×âB|¼þØç­õ!ýw`-JÙ*Ød|ì4Ñù—£êäPM£H†J1ŠTvZ3í½·`©:ÌEŠÉR‰øV;µÇ±uúð(­Õàhá†HcGRO¸ô ŽËÞ¾”:!¾L¸)C'¸¶×F׺fº7‰P›*Ë(Y b•d¥»ý±2³É‚<º±é% ™å;¶ûßÇÜjtˆ8„¬ 8ªÍ4R6ÄPÓ¿4Û`Aî5çÂÙ­F"©óå@óØÃB¿#î­@µ’Y™ƒ‹l÷dM2Ù"ØòÈ~Š>U*"\a~ÔãøëîS£j ½”8ÎÕ(åÁTÕ„Ó¨Áú[6_´¼ì*œl*Ê[–†ì•„&ϜѡWq­­á//þü]»÷ï¬.|DíÒîÜ€£ŸF¢³+”¿¨ù^7K{ŠèÓyðekêÿ4ƒÏ×N껌\×b…Qfp dâ„mègoÉÖü-Þ>ÖÊnmdã) 4$ãgŽßôÃ,/HåŽlw–q ÂËvóÀ0ˆ>RJ€àÜê 9H²f­þæ+7/sÚ¢‰fê|?}®~׋<‹\p !ÙdÛ:ôAƒe„•r±fFsö,Ç¡WœWdBI ܼ£YÜN£¯úHBËÈÛÈPË¿DÁQœ_HWñ´V‡þ%Õ¸B¾J«X´Ñ wK`3—¢LmÑМKw½€À|¸Q·¦fßËBúÚ6uZG‰|E‡¤ß™llè߈$A®–ΘýŸ#±";ñ+ÃKøÖG}v1íÂa Ç2@ÛË» Øð oßçŒO|Ïtf„}¶ aˆÊß&1~=+õ)g`ð/ÆÉÄ@AKw󯋲¼ eËÚó> òi±ÚG­è6Y¥*{)ä©FP So²âÒU¬òŽªb0í•ÚB‰ºkïÊ­qW”`\ĺv†8•mtI÷Òý¨†4<åÔ£5½ÒoÇöï.˜Â?9,â>ñoü%àpÜhË?ƒ'?Þ(Úé(lêL†À<È1›Êñ¥øÞµ<ïò”SóR­Ô·õcŸzfh¯ÑÖ^Q‡!½tbÚ¼õEõ›l“øçg'¦ÊĨj’ý¨±Yæñußóüúˆïµ‰tRQw¡Yå—¼buñ£Ìçmã ULà¹`ÆÞú š¸1Â0q“,îe~î”PÃTði ”SI‹.)‘Jù0†·—¤aFN¶L5g«“àÃ*îä(\&š]¢ƒU»'HëÀ +1CÚpv22¶!fä!˜m3Ûð¢dÑ+?)ý““2l¨æ(Ê¿w*Ìæü†'¤Sòyñ_˜³ô#ÝÓΘôÒµÄá”îg£yJ"¼<¦Ø?FØéMsKîíG3PAÅèÄüHLÀÇMõ&0,,Gë&„;ƒ0E6ð% 1¢¦JÛyíà „ñ[éKõÛbœ=*­TbœªÊÚvàBÙF—ØîøB„3©á1Pó”7B´îŽðÚ÷¾Ftª´Ä¿[j|ø‡ÎêÊ _º˜cRUëfJ+z‘¤:7r@A ½VvóǤG©Iya\Otª­Hz8è‚V†qÚñx¨ÜŠK"8ñÔŽG·z}Reç`um¶6”ÓZ®v5‰(bÁ¿EÙ*u¯?ôgô{Ý}Õ í²½Ò-³"‡KÁX7÷«Ç?žÆ‰Þ­è0'!„³×tL}Ž(]-AèÝ@½óõ˲9 4½3`“kåRxKüžþÖA„–©ü[§¤à2Þ\ÝÀ6ʉ5¹â4MŸŠ>˺øHííÓ}×Á\&ÃìeºàÆ%Áy‘úñä'ÑRBÈXÞ³}Dè[ôx˜$›sÆnD ­ù÷c·;ÚÐã¥u ©`¶K&`€;ÊÑ–¢o÷„¤4:²ûœÄ¯Åq—S““Pa`¦¯÷©åí.E{´É¤ˆti¡„¤æ¬éFVJÄŒó…û.ñݧ„„œ?Ú²]úˆ¼7—)1Ð$,Øí£­vš ·Et`Š¥CÇ›LÏ‘ì8T‰§-  ŒC'øÅÕœÐ"î ¬Ñ´ŸRì[i¾Ëá-­òöKHƒ6ýº’⌬¿kޱà9OPÒ¯ˆ\•«® 僧LÀÿ67°iê¢Áž•K]ø>éúx)wÄÆÔuåÝÊ–eLyuP v‘‡}n¿¥—îÔ1Γ«—ÄæJÞüCQÕþ´_Æ,œpGñé¶Æûr}5ŸEÜùçÜx¡»˜G:UÓº[Ò©g¬iû“#c:lNÜœóÆ´œfÈòÇÏ~ReVîj?]ìµ+ã -ÝEÊRá]dpð^ja˜ø˜¦Ý®šçolõ}EC¶îj’¨o•·¿˜çìâ ’'ô”Tbmé O«%I•Á“èEI‹/‘7e«åæÎ^Ú"—Ë'í¥öv©>jóá+Ú.}ù%,M>8ñ+ÀJCô‹è\ìQ‹m#=Vp$cö‹ÒæœåLé "02ÊÆ°}ùWNj=mð){Þç)VÊxp|Fܱ^-®¹í€ˆß—¾á|ðà ‹¬r„8^^¤cI7 …sï¹-M¤Uÿ\…Ê.Õ¢Ñ<ÖÇùé¢öVZñvžè@H#Ýÿ]Ï×Â]Å1þêǧ;´WtZõ/,óð1¹7ÞCn#)ÄØâʱ§¯ÝùåJµÇ*† ÈÆŠË¾ºzÌ ÐÒ‡NÄѼèËÚ‹à,,°µD"B{«XE[ô¯ƒ!‚Xùqm¾´Qbf’!¹a~% [²GUÍWΘSjìMo˜N=]´mâz£ †­Pµ&d™µ>œðÚE)¬}Ü8¹™SÃFzŠð‡Ü*»-/JûL‹jZ$hñB1¸¨+ë32™®!¸×IËDBçÝZi¢Œq­©Üz=1 D¡s„7D´T\û~Í0Š™’OØ­æ¿êOi`>h~Ø= §ƒFY9oÅ@À~Â"”O¡Ü ƒnÞÛDÈç6|-Ò‡À.àDÉø+3Ó0âÞ£ˆ8f cÎö“u’lÆ´S¸vç–êò©8.u.B®¹á6¹i,³ÌUçSþeK!ƒêeÔ‚G”Ã×U@-4†‡r,Ô˜Hìt5ÜGkL†êôQuÿy&Æh_Q{ohY©]>I3=è¶’d«ð•ÖÔ`x<ðÛü:YA)'Ñ­mÕO€4Øc¥i³n™”cçO'Ò# /zŸYòö3ÀEàÀözE—Ú$åY’/Ÿk§%Ü{ÚÌ T#-扂LÌ>$9 ;æ•=ÚÆ½|1ªä³$FµØœ X¢.=(œ}Q.gzK4+òÅ'#í _«Ñ ´’í˜òÂÖˆe‰tܘÅ1YøvJ‚Ãñ^«·iÈeÑŠB:=šurÉèÉ`"(²Š?ˆS²ü|ýú˜zWU½oyå ¨:ÝCZ¢^¬n õRNM²Ä>í(Mª7×vR¨•Àh?ÙUMY%Wh1Be}¬²…óZ¥ÄÕ‡8V&eÐHª#r…Ö<0Ü ³ß¡ægØVq¹î'²* yfþbt Ù,4PX¬ i®`Û;›øÖ¾‡V¼@\r=zd±ƒ™Ni¯‹6‚‚r–óÝæ2Åpl† È ·å#[WdÐèq©,Ìà-3ÀïKe8 Ð~0 CɵŸ!¿EÊ*ÎO6û¸W«tâ;;ÍÑõE©Ý]VùÒjøz^³0æ¿Ä g1?j;‹>D•e=Æn–vv©O.5—PBm‰ýÉAx†p縢:Ýœ¡ ˜•‰Ô`bú‘r|¦—-ùªZh@AJf!Ñ[•@½-y÷å-©wä©â\P hëòzKiØŠ£: ™§Õ†å[Hj¬ŸG1.6Tý˜…¤K„½FOªVfß³ì 5§ SÀhMýLWÞ”úƒ%Þÿ¶šPä]9© viVpѰ Ã7ÿ.±Ѽé-ke' üÜížÅÓe €hêä§ #@M;=GI]lÒ´ç}8H‡—Ѝ§0ÎËË`Ãn‚ ­¬I‰ø=yfMEVœ-ÓEÆI"Ün*à}¾bù¾ü„Ü%4çs™#/¢¯å©²;¶:s¬=F•_·ŸîµÜ\Q|dÚ@ èE>‚;º×æIï}G?=.¤‰UV… cšuA¶­£š É|!ø&¨£Ÿ„(TeÜÍâ tT“Šã!Àœpx‚³Ë¿)´à¾Ö`QvÛ~Öb k»£ Þlx¦PSˆÄw‹SŸVÂä~ò"Û ™ÿý*#Ä`j hŸ"h·RžÃìNšì[²ÏÌ>bÈùýÊ".¨Êj;ìîíwkœt(}™lcÖâU\³|"/±¼3ã]Ì@q~ ™0,1@¶Œ?“Å £ŒÛ ìËj9L²8åË‘½­iÁ6.û¡!Î’&@_ý0ùŸÀC»ÅÀlŸ†­*ÉXëNÆ& åCšU¿a`¨0uûeÇaD®Ÿä;XmÖ>½?3wEJiÝ„«„ò¦ ]ŰÐÕ¹‚sI` m2yåÜÌ5D<§Ó⯕s9„5¹ gD_*îw@Ws\$Ü`dÛ‚¼-N’ã0âÙ¾ŽŒsT²›)õ©¬4:Š?à’Ðakű^¼ :ÙÓB„Ç÷öp¡ =–ÆÕÂÖåöå=7Ó.¨c,c(JŒ=ë´¬t²$jêPn9>c"å÷´†Â°(ÒŠ+µ@S¨ Íô£O2úûš~KŠUÿ°mQÂ.ôF~¨¤ t`%nÙ½k)›ŽÀì_áf #qÚ™+[µSÇHc~`9 ×òê0ìVU¼ ûD‰-&åÜ'Åúìф͜®ÛÂøãá!¹#úýNÃk6|ßjrPÄ&°þªöÞùÜÿ¹¢N®‘ž¡A¼Ñ¨OÒƒXLîŒS¼î±RiÕŽ£²ü-ÔŒC­Ä^6š/f{ÅeÿržpƒM^WÕRËQ0žo³¾ûöG§^¬réDTQž`€ßäjáϱ[m¥"ã"rÌé‹´”ÿ­@dÜŸƒ Êû^ÑiVÿƒwØ‚ ³@ ë´²®¶Hœ¸V%'úPÝèW³=¶³š×1“ˆF}>.2…±w¬©^´äX¤ û[~V×;~º­à3ìÓn ÖfêÌŸ3¥ Zc̦ƒ2cÞ8 k$»êÜÏÚݘ̃‹H™•”]òj㬠‰ìWÁ®á›±)VÖë|æ~¹_ôÚÎú¬Û³ã)v½—RxfR.˜êþÍÕî±É,îõU”­D‡¤‹Y÷‰vŒ.MPŸ è›°ªvóPBCà(Çõå dªÛ#_2Ì,”-jY¦[äyíÖg`Ó4¶ü ¸Ýf]F]P½(Ì“ñôšßÔ‰%´vÐ|4ŠV¹°‡ôŠ„”ZRœ• X2è%ƒRWÏæ?Ò*öÄÊÉ?|õÅO0½cjº¨V¶¿5{2ÝñPiÇ“AOÉ5a^Ѻ1!¼z<‚F3Fcd‡Ùq͵(²ï4"Nàí†Ó8×-@…2³}y»+ Ö×ZÈ\)gÛ[–Q'EãKT^ä& ·¹¸;/"óû¯&µ`EDzM»VØ©Øm™!Íg#ç¢Übc =•ù¡Èhãy»"ÍÎ.MeoЬ%ºê'pDnÔϳ­†Y!ßÖ~Gªx¯÷Œ8׳{C,GIþ<6üXƧì üÖxMãí\µr6bƒÃ<¼YC„õ;¨Â_nÒöM0 6ß<ý>ƒîCÕ‚p3™i[Rˆ·ô}®ÙË:Z\Õ1“óàëuŽÅ¦H¦ VŒ¯¬½H¾Ôvؼ X¥ðÛLâ’úÔèÃ0.éA³opŒHÆ\@¦ù¤Li¥9âº$XM!7p .r<˜òë7¸gµ¨l Ù°ú3¿öñ@âƒÀ1Š”£ïwú| èí= ”~è‹ï_\Þü’¶1ê‡ ÔöÃJBùOÈìq"™J£3õ–;ï%æRr1êóuÚ¶nú´ëå—m¼=o-·¾1§¶ÈÊç }v<:ÅþL&E×}.¸¡Ì…>½?•$û— ’Ð PÙ´:~*Öq\IåÑY囑ªv;~-4ç.뺵¬«µN) Îd‰}ýtçūҪ“[ç"#ƒU@'Ía$.?F\Ç-ÿx¤ê1‘SG÷Á1ƒWÒøZþmu¶4Ø“ &Áñ¼Ð1 —εNf\Dj›ÍdQ"J°æ)LòÎrˆè,ô0ñcòæ—¿?-Õ©//ô»¹›¾²ŒµŠL&OݬÚSö<ë¶„ÆÁd?#⎱¤ÿœæöä\aòq#ÂíuQýaÑÊ¡Êl£U{ü#âA%kÌ‚µ),Ê©þÑæðŒ¹yèâWv;=f­#™½´Øí褭ïáÊÝçà#èZô;P@ìªôàw±ÑjP º÷ÍúÙBª—uDçäÑTÕ7Ã9Ç—½}âqk­¶ˆÐú+8c$™_gç¾£90Š5.öÈx6+„‰iÇDïiØG#5ËJF ˜¬HßÜ ’i ô÷ŠÎCà‰×s»hY­Xê³xÕáûëº3&ç ß¼®6xë4šøt0R¯‰éM EÐbôeÌžÖqËöz&xùWk+ßÈë·Ã›%—5Û‘¹§ÓÓ„8­Õ"w¬)øï^[˜FóÕÔYƒ=Hß°&fKÎP!:0Ä^ÀDá8"…RÃ,øï0tåÙZfÍ£ÉgÁMÊã¹–³¥VR;éN4®I^äªÝg®·*ã—Y_íÊŸí¶Ä߇ûáz)„Œ¢Š–—EèHp?†ö…,$õn§'n&ÖTy\­‰dNóºUàÉT/]”oR³-2\ãFMózs3óá/Áõ­ôìF[bš€¥Æ?¹×Ô?…ËE(?Å󱤀œxQÅ•ežSº¶BR¥Áæ¨Ç ͦ0|Y¼c”Ï¿ûÔŒ~éh!ø:1åЋ†Ø¦¬î®Zª]Ïõ›a× ›×ûÀ l²è_.:mƒ÷ô› s…/ºj\ÒuXaòƒEÎ4Ët¦Ì½k&‹¼Cð²kf9Gù+E¹s¹˜e »dßÙõqœ¿3Gå­¶±‹¸,`¼&ÐÓK±y3øŽuPÿ“QàޤŒ+ü© I˜Ü©È0(dh•É÷ 7´9SºD6˯Π½â—ý ñr<[Fc¡:·úÍáQ©±"íñ.ƒîr—â¬ôN7²n¿œÎÀèüW¢¨j(_¹™‡g·ö×XuãÃbq7±ŠÆÌ>É1Ñ¡ Wd8†4Ý/\Ý»ç/µ=oÑÇ”a²FÊ”Œ_ öá9±ššµ»Ou8Þj\m`®œ@·éôæi®1aU$àªe ZÀµÅÖôÔœo"ï¿åþŠ Ï£³ô|‚,C@àÉ‚v@d:7¾O޳ô#8W9Ô´à@SAFrÄõТÞbb"ÂÂÌf\/j©ñ/@आ…”j4¾„SnÇëüVþÅ?6Ä/J¤oíIˆ'IHYú*¬~ñ¬~l$~+hƒ=\]§ÌUÉâPܪ7}9 I#ã1c};ÜjvG0(A8ý¸¸"C®^®œÂBÃwŶñˈ?âynC'Ú™²Ã+<Äœí'–[ðàZBmtË3†f¤xˆ )¤M¨GôJ¦`ƒ=‡˜¢0žjŽõ¼è€E²3ñX…¢Ïv§}£Ép# þ¹r|ñ*ß’\ÁŠ{ôKa„37\ç6½¨§‚üÂà)ŒhSÔË7x‹”\5OÁA‚¦—‡;ÀІCCz3ÜÜðº"˜Ø$9žT[z4ïs òºO Äøª•ð„‹nâXñðØwXÙ9Ë÷n˜s°Ž¸y3±¶˜'sϪ“ظ'‰ó¶HÉÖnâ¡ÒheÀ©:­lÃ@=V:…9R†ËÍž«@°1ˆß ®OÃŽ€û“îäŠ%Á¾<œTÚ¹ ( m—H°-ZÛ3NÇ9öl÷IÚ Kø§Î¹}lï¨Üw ËϾ2$±Çöb3.kûÙDçÜÛHþÕ=&rì¼)—?¿‰•%à #§Ñz–yíg(‚«~ös=vRÔ K]Ú8Y–éD€Uj3€u ÙfÖÚZÕxhÁàïŒû›ðÛ{~y‚ö´ŠœÍñ[¼ˆõ!Û ïwî™ÐÃui!Ý™éºcÔû« ~;YI¿{$&žc½‹4Ö5ÜÉ0'ŒF9 *˜ä5®éÛ­¹0Sw—³eû“ˆ—*ëw´É|)msú¸+À÷V`¤uÞ•Ù¸llJùgÁŽ#o¨Ó¥šQ>ã…êN@|ƒ Œ½}_j¶(f¯µÂ. ?ò®îq—{WáÞ¦¦!%6³}ÞK¢¤û3š9ÿ°ùˆ+F°p~˼ÿP"ÁÃþÑ»¿ßíPKH†‘NêÚ'ÝdS·àßÏíß@Çuú8M×h(…_Ÿi·þì5 *kR ¢`s™§÷°GoV2aw™Qvr2ïG¿Ì´´Ü±#fÇ  P$?õ[ð¸(%‡þz«ËüX¯8Ã{sEFÔtf—¥ Àši¢JUÉ]AâM–®­¹ìmÊ ÝæxûнàLŒ íª6($â‹Èë.:Z‘0U¶È[$ãõ&@™ÐéN%À·kµÐ"€¸GV´ø1HÔ5Žï –“ äá)qîÃdvƒe¤+BÉ»söÝ.Šã$›/W±š)ð„àJ7ӂГ=æRô¬vÞ+9_"éý@]ÅàhB]cô‘®ŒyÛ _ô0v²ƒ‚•Æ´ž•&s¿µÛ¼b¨’IÞºö5Jøøáɘ`3ýÔÐdShÏŸÕ††¯€h&"†Ù²€¦Q]?ø`'͵±_K<ø)Ô¯¡CcÅ®†kð‹$ŠÕ¦(©Î‚ÙDêkøÈ"y±•Ó)xéæ Oó‹—C‡ˆY˜4ä*pãoMR Ÿ`!lÊX¦´ ™¤I-fâU¸ñÞP6:R­èí/¿5§PH—2?á¯YŸYḤxá¥ÑQ‹cÀÆ0û츠7º´uDNœq áÌž1ìÂ¥Ô˜aŸRþê»±žˆPPÏ¢YÊlÔ†ôìPñïn‰B®DL'Bv 5ºKh ¹«!&ŠïÄP°à(?!§ í"Ï,´l¾IØ0¹Þ|Þs~´*ò«]9_YÄÉ+œHn]ß³Y'›¸/n\PÈl€hŦ·”‡âÓ£è}ßE,îKVü 6ú=:v¶!¢%’’Û=gTîêÿÌ™U©TìÞ—°q´vnÑËÝL3"“”®ƒñÅâ/ðÔpýÃZ·Ô”Á—ž5§¦} ÒÔ¥®Wv88äÝräô%©ñ#{ih©Ëºf5Ì÷FQä§ŠzBÖH('qˆþ•Ærm%²KIN°qñÖ•¶ˆ<Úcбí­%•D&¹1s¿›L<>:ׯº,&Ïæ)Ù«i™=m¨캃n±ý©ÁÄЙ¥²‚.àï–‚7>Ý|²×³µð×ÌOV•³h´(ßR&Hý $Šø+Œ6L¢žg k¾¤HÚ¾Égz™jÑíÊ4CÒÚ;sžh@†œlÀ2ÒS§b+G#¦‰¥e¼Œ˜X²3d§j“ä(%WGÏ{’׬FÁìÝ\êÀõ¸ÓµÌÑmóü÷Á7}¬®ÝÁ|§SÎ9ž~ºÚN–L›šÆw»~7<ôrägFþ µ@‘‡Uåcd+Xß š3¿¹ÎEø ¹)ŠB\°YÉý ]Ù1€7ù‰¢ENRZCRÌ“]=~NÁöd-+‘Ýõ›ÛÞ©Õ>NbÒæ,åÍ+TXNU£¾Þ½rûkpH-•Œû×½a d: ›FI:r¢WY‘7çF¢B5{°EµeâŸ×†QIðsx7zÇiýv9÷ 1þ’ÏÕSÓéLÚX "{ð ¿9ùº0¿&Û(š¡yaÓ¤¯jÙúþ¢Ô°÷Kó/í}Î=x6ßÕxU]°TZpÂÓ½¶‰ñÞm&It%_f¸+x8§>J¼Vª0é1ûPÚªêBµJUÂW®Hat°á˜@¿Ëªâ¥ü‘çàz”o®Áú§6KNZóºí¶¿1ãÆ!¡+  “üÂÖ°NŠ1î…÷Æ•KÎ;5 åÒÿj¦@ÈܽØj˜W¿×ªéwZÔ=ÍéÈ.núÔ 'SÈrZ©AXÊǃÉñÙl±EiãX éý#{è ¯„–šñ¨z2ÕðD$óðÞ+uܾ+À4¸ÿ™C®N÷Ð/Sôu¿ÛG”ÅÞóœ>M@Z¨52SM§å޽‚´a¢¹OàüiÀý‘‡-užÆZxýVâ7ÿmÛŸ—Á·®Óþڵƅ\3ÂèO0*4?âŠ}|Ew"o(¥¿suÅ€¯wüæiÚ.¿1'XüÞo"¡@• û8 UÕ0y¢¨¹Ý|“fU(ÂÓ¶~…Ù_â}Øéo/áËa<˹ӓtu¹3¶pöDnEPS§I¿5ð‰ê\AÝ?û;ŒDIö“ïjQÄøX8©ex½gÍë„|ÆÔnT-5sL#`Þ‹¶fÇ_ §#?}))΢ÄJ;4ñö›µ_êd΂¸ˆ ù+)¨m/w£:Á³³ÌKìU÷Ü\ŸW /¿Š©æ<„ ÉtŒSq‹´J kTpÌçîè+®Ñ^€…Þ ¢ÚŸ)-^' '{Zà¶w±mp­ýLhË[ï‹ÖÕÕû”kè¹!pn蚥êTS‚üvØãåX%ë€ÕEÑRýþe°i\d³° ó~%Û=éÑÍZa¤$6%Ô{ó¥ð˜·_BŠžq¼ܧ÷ùžé± éc=™Þ_ #irJ‹ÎM]SháÆ k’ØD•Œ÷¹Í\Å Ï ûÒyî¦ëàK÷¨º-·—#½æ£×Š; ¬°DǨC*—¶â ûìT®N±¾ÐcÔqkát³Ž§Ð¶ÔóŠÎQI˜?Ð8c·\ûtµß÷õË…ÁѧRò+!\Ó㎗Y2Ê ‚‹j~ëÒ‰ÚNªþËß@=‘Í0B9Ÿ>²ἃ9nqY¹+KE™ÎŸ*^…cTaŒÄwJBÓðuá\W‹¥ôË~pÐvqëºwº$4´‰ñ……¼Å rä§å‰î-ég¨¼ûÀlg¢e2 ¿îÍû?JÙ‡N½9Yy-Õë¥f¸\š¼X¹T0Þ:ô-Ú! Cìašy´‚ßÊ|2?ß@¯ªÇPîc‰½I¦ÉŒôÁçõÿòŒúendstream endobj 139 0 obj << /Length 49 /Filter /FlateDecode >> stream xœKÎÏKÎ/JIÌKNµrÏLJ*NÌ-ÈI-Ò+I­@Ê+·2T020R0"I¾endstream endobj 140 0 obj << /Filter /FlateDecode /Length 494 >> stream xÚm“Mo£0†ïü ï!Rz ˜|U‰ÄAÊaÛª‰V½&ö$E 6ÿ~=HÕUAgÞ¿“ɯ÷½Ÿ«ú~üÌÙ´uo$ø›ßÇÆ›LD-û t÷  @Ùö…½›Zî¡cÓÍNìtÙ=YñNËk¯`T=­áRêo ÞæøôeCîŸúòÚ•Úç(>”ÝÕŠæ™ ²ŸAæŠþ€iËZ¿°ð™sn[­6u…c´^0XaÁhî\je?ì„î¼0bª”ÝprOYÙ÷Åû[ÛAµÓçÚKS|ØdÛ™›óøäoF)õ…MZ³©}ß4W@Œ{YÆœmG;ÿë±<œñ®9Ü`‘;‡äKÖ Úæ(Áõ¼”óŒ¥E‘y Õ¹¡ât¤ba¥bi<Îg®bÌÅw­ü:/]×åvYsäˆâ[ä˜â+䄘#ψ]íœôò‚â9ò’8D^osâyMìîÚGÈ‚X o‰ä‚îBŸÉà5Éà‰<øÇ»’ÁÿÂò k£(Do9Örá,Âq¼B?"tŽýEDqì)bbœW$ÄèYÌèM»>sb×gEìjqÞ(ŒæÃ×po¿$îÝ}IdoŒÝ·œn-p!J ÷ýmê«ÜÏ-þøOÃÓ[áýL‡endstream endobj 141 0 obj << /Filter /FlateDecode /Length 900 >> stream xÚmUMoÛ:¼ëW°‡éÁ5?$R. ¤d9ôMðð®ŽÄä ˆeC¶ù÷³k›m‘CŒÕp¹;;†wŸ~>Î|¿Ž3óEŠ_ñ¸?O]œ5ß¶‡âî®Ýwç]Oßcìc]=~?§}÷Oâ¾yhÆáô9%?ŒÝ۹׬“B|Æœ‚>âþ)þ;ëvÇw%gÏçáí4Œ3‰ä§áô–’>\ ‚‚6ý§ã°¿ õEJ™€õØ7ûÆ8ó 1¿’{Æ~ºðÏ`W(-ú¡;]¾è·Û%=°ùñýxŠ»‡ñe_,—bþ+-OÓ;qü\ÌL}œ†ñUÜÿI--=ž‡·B«•èãKª˜æÿ¾ÝE1ÿpÆ[ÎÓû! Mߊyuû>Û.NÛñ5K)Wb¹Ù¬Š8ö­iÇ[ž_®¹uÊ•MúÑzQ­Š¥Ò)V†€Ú(TØ€àx¿àÞ¢ žjy‹°°!ÀÐÔ•µZÔÀ2àP="¦ZdÔ0\ÃG©R\¡·”).–2*ÎШa!„U¼Ä,†³ÔÛHð° `+jÐÃ.¸5Nα@èâ°èÐVK-àxŸ%ô˜Ü3š% A°YÓ€z¡ÎšÔ>kP#¬³¦õ™5m0W£oš¦Ã¾žj­®§Üý·.†ÐZ¡ŽT$X/©)n)æ#W—„o(æ“oÀRZÞ $K¢p4’ŽZ¶-bâ\­1¦Ü°Jä æP"Gñ‘XÔQ¬‚i/8ºkÉ^€ÂZqŒ:ZsŒ½š9”d š­Bù Ž)ßsLù-ï7½æx˜ÏJ›¡¾Ò`¯ažÉ½)f¥É$†µ’1™¸ dÑŠcªCZCù<£7Ã3JÊgózÌnøþHȰíáÌYÉšäTœ¯a…Šï¯Æ,_»œ-Ÿ—Oë87Ë}êÛKÔ´Ü—Ll¹oKñšò+Êg­JÌâ.¾GZyóº‹Vðc­48¸’ï¼äØWtù]Í:P~`áŒñ±–rZŽq.nÍ1]Ç ÇàSÿæ/©ßP•ýïuö¿7Ùÿ¾Ìþ÷Uö¿·ÙÿÞeÿû:û?Èìÿ ²ÿƒÎþ&û?”Ùÿ!dÿ‡&û¿1y–¦¼ÍH·œn5þ¹ã)º½ÝyšÒ“Bï½x#†1Þž´Ãþ€]ôGoáõñÅ×Mñ?®Xêendstream endobj 142 0 obj << /Filter /FlateDecode /Length 699 >> stream xÚmTÁn£0½óÞC¥öƆ@LE"¤¶­šhµ×œ.Rˆ$‡þýΛ1i·Z)Aãç™yoÆ7?ž·“¬î^Ý$º×êźËP¹Iþsß77EW]Ž®=?:W»zÜ==¨ç¡«¶î¬nóM±i›ó%oÚêýR»1ëÿI+÷Ö´Ÿ)àQ·;÷{ÒWÃ`‡ ý4òvÍùö¿o)Z«ëZqê/7œš®}Pæ^kMÀº­óîݧ`ê¹ÕtTshÚzðÔ+ä&TuSýŠŸÕ‘ @ñöãtvÇM{è‚ÅBM_hót>XÙ]0}j74훺½ª"t{éûwJË¥ªÝšÑ¬û£SÓïC]·w½S!¯¨©ºÚú}å†}ûæ‚…ÖKµ(ËeàÚúÛž‰¥äõ0æ&”«çx˜™Y°F\20/0–b“Ò# “!Ú‡\§)&q)€% 1Ϲ‘NÐÔ"Û‚%”’4¢81`rÈH%ÃDdè‘åÜ#C ýйk Ю%£íºÀÜ"l é%²Ë€ìR„Q ƒF'b=:SýÙäøuX¤ð$”Qúó:ú\C¼–AfpGÇR~m%^!N%ί$†h³³&„ÕšñR 󛣿#Æ¿p'XϾ¬½>ÿ‹A£Iä Â}3N¸h2Ž5ó¯gNÑE'b«£œkýkåØ¿sè ý»¢%Æ|Vâ ¬áž!ü°¡äÀË3™¬?Ðfc91˜ÓŠ—9Ç|u 6ãZÖcW‚Cƒåƒabî ýd1×®eFæ-9žAg깟ú÷Æ3•ZÆ=üI=ú¤ž ç6-Ä7p¥Ìçœã?)pe…øÆgT<ôŸ«?}øpq¹\¯ƒê2 tSð Ä·¾ÿ¦u×KªïzTñŸo·ñþÄê© þ…çr{endstream endobj 143 0 obj << /Filter /FlateDecode /Length 700 >> stream xÚmTÁn£0½óÞC¥öƆ@LE"¤¶­šjµ×œ©D’•ú÷;oƤÝj¥Ÿgæ½y_ýxÜN²º{q“èV«'wìÎCå&ùÏ]\]]u>¸ötï\íêq÷x§‡®Úº“ºÎ7ŦmN7”¼i«÷síÆ¬ÿ'­ÜkÓ~¦€G]?»ß“þíÏ`‡ ý4òž›Ó;íßR´V—µâÔ_n86]{§Ì­Öš€u[çݺÁÔs«é¨fß´õà¨È L¨ê¦:ù?«€âíÇñä›vß‹…š>Ñæñ4|°²›`ú0ÔnhÚWu}QEèöÜ÷ï ”–KU»=5£Yïw§¦ß‡ºl?ôN…¼6¢¦êjwìw•ví« Z/Õ¢,—këo{&–’—ý˜›P®žãaffIÀqÉÀ¼ ÀXŠMJ0Lf„hr¦˜dÄ¥–€ÄP<çF:AS‹l –PJÒˆâtÆ€É #• ¡G–s =2ô+æ¬ (@»–Œ´ës‹°5¤—È.W PL²KF1 ˆõèLõ¶Èðë°HáI(£$ôçuô¹†x-ƒÌàŽŽ¥üÚJ¼BœJœ!^I Ñ:ggM«5ã9¤æ7F7ÌFŒáN°ž}Y{}&þƒF“È„ûf.œpÑdkæ_ Μ¢‹NÅ0VG9×ú×ʱçÐúwþDKŒù¬Ä4XÃ=CøaCÉ–g2)4X( ÍÆrb0§/sŽù4êlƵ¬Ç®‡ËÃÄÜúÉb®]ËŒÌ[r<ƒÎÔs!?õïf*µŒ{.ø“z.ôI=ÎmZˆoàJ™+Î9ÇRàÊ ñϨxè?Wúðáâr¹\Õyè¦àˆo|ÿMë.—Tßõ¨â?ßnãý‰ÕCü_er¨endstream endobj 144 0 obj << /Filter /FlateDecode /Length 3245 >> stream xÚÅK“›8úž_áÛàÚ˜ÖAªæÐIz’žÝ¤RížÍ!™¶±M‚Ákp;_¿ß' hä¦3“Ýò !é{?MF«½yFìóåí³‹ß¨"£ÈŒn—éˆâsŒ§~ èèv1úäÅùbû4aÄóëó"X ÃÈ®Pæ}&„ô\†‹êx`ÈUŸ‡”Z8šó |*Ùá_{І9aƒ0ƒÈÔx5@  Is>~H[€4°Sa &•÷¶‡*]"ÑÈÁɔʇg¶ ä¤Лà7M”pV£ø¶¥¥ 9H à(©2{߸2ô…@ó©ó¢òü§Š ‡¡àøL¤ƒA‚zßð'ÅãMÌÎ@#¤qÈF½gvDšÑè$ÒçÄt„Š–Hÿ äÙ¢-ï’ž8Û”2†šÜ¡ë#ÁÈW åDùQ-šïвéä@¼Í¶ØUq^e÷cJ)HÊ„ â¥K³\­3ØîÆ@êdž–i‘›W›¸Ú¥ßšFGîÍ ÷ÒÒñ5/¹¾’ž®z“Îfv½Œ7Û,ٙɢHìë"h øù]‘Ý%½˜SáG@ɶ 60kS{“¢Sšy±ìÇ…:H£Œ·[ÚkÎx8†«QcëaÎ×5 À=RBRo—‰]²°Çæ±HãU‘Ç™™%Y²IòꢜÇY¼+.cÂÉo“|ž<7-5c‹™&àÅfÒOU¢À=ZªZUÚJíÅœYoÉ©·MvËb·‰á:³/Êm2¯Ò;}?—^‚ó4Î G™A¶®ÓÕÚ¼Y¤€…æ“ݲ/Ó|å’;æ’;…Þ.wæ‚j}ôäàk[_ áMðŒ´eKóBµª,“]<Ë$9‰¼ë$7ËMè»˰÷b»”æ')l¯EÞ½­M?¡¼Cb YÇwvTæYjeÒCà›ë¶ðx[ÛrÁZ@šÖ¿»Sœ6FDu|pk#˜ ê¸Rû°£í{xn€ç2¼zùÿïv)c¢æ`êŠHWbˆOY'Èœð[‰âÉîµE?U„ƒÌ!Dh’E oýÜ%ŒA¿[Õ ¯NAkׂ6P¡M¤5¦*r]‡KM£Ü³µa¹ç±5Ý3k¯ÿ³O+;,÷³oZ›k0¹µ!-ëØ^O˜Ž0K¤æœ{!cˆðYh)Å ÿÑÂÉÓ£Eƒ_ØA–æI¼«ïÎËj§yU3|ŒùŠØhâ‘ThQµ „Q‰&¾°¯µ¹Q¬¶‹%¸¤úûÄ jPq'‚ŠRÏ ¬0X%9˜ÓÌLŽ€+Ú\ß2߸0k…¶—úÁÏG;ÌÍðæÍ3”¼:9$_ûI1óŸÎêÑ(¢­ãàÝŸ U"”ÞÇ1HXbÂ!F2pjã‹Oì˜Õ£WîH òùæ°L< ~ÜaÒ cðÞ;ôEŠVä.»¶pã BOÚÒvg4$'{ÔUNqÆ r0. _@Š 9jÕœÇqÈv©@\䢒„û™p¤)œx¯ÇÉ¡²s?ÄX3jÌJ5¢÷}Ó·å>Ë&»8ÿjëgƒn`÷<~,œ»ìu‚Î2ï¯d­Jú¯¶"ÁïN[LO¶¸Ã&Æ!6£ä\ÒÏ£ÖÚ„t ­¶Ö4Ä}äÌŸŽX6Ò'cѤ  zõ “œ+·èr "ÜÞŸ 4€‘‹ŒAx$C›§Ã"üèƒÅŪè«D8€UÝëZHw´‰+^`ÎÑ"(åý{•áˆû¡°2O/ˆH± rÜ6„.¢+ Sðrð\ú„ö~.¤p&Zcâ>û˜+ƒq…g ! \æâmŒÎO•áv& }";)óÌ&ÄàÁòsd´à‹Â>×ö [xPqµÏtØ2¡+G-œ^­°Ûka%÷ûÀåeÊåïœCZ­]ÀʰOpT9q¤Ú³;-cOwÊ£y颒<û€Ø³ÿ ”kyow]±ZYVÊ~¾'Ëû/û8Ö„endstream endobj 145 0 obj << /Filter /FlateDecode /Length1 1597 /Length2 7227 /Length3 0 /Length 8256 >> stream xÚtTÚ.)ÝÝHçÐÝÝÝ5  1ƒÃÐÒ"R‚tJ£€" ‚€´€´’‚HI=ôxï=çÞ·Ö{kÖš™ýߟûû7³1Ÿ¢Ü¢‡!ù€ü‚Re]U   @PP˜_PP—ÍŠô€üËŽËfAxCá0©¿1”òΦBÞuá0€–( ŠIÅ¥B‚‚’ÿ"ÂR/Ô  ËЂà ޸lÊp¯ÔÅy—ç_œ`.PRRœ÷·;@Ñ‚€‚A0€.é ñ¼ËyŒá`(ðœ2®H¤—”€€ŸŸ?ÈÓ›Žp‘ãâøA‘®#ˆ7á qüj ò„üi— `â õþ 0†;#ý@àÎàC`Þw.>0'p—`¬©Ð÷‚Àþ"ëüEàüÈüw¸?Þ¿Aa¿A`0ÜÓ  €Â\ÎP@_M‡éä€`N¿ˆ oø?Èõ9Þ~—¨)@wþéÏŒ€z!½ù½¡¿zøæn̪0'e¸§'†ôÆýUŸ ßÍ=@àÏåºÃà~° œ¡0'ç_m8ùx ˜Â | š*8w&ÜÿØ\ H€¨ ¤¸˜òñ» üJ`àù ™ïzò‚{œïÚ€C!w?¸AÞ _‰ðýøç 8AÁH€#Ä ÃýOô;3Äù¯óÝý# þkÁ;ù‚¿>ÿþg{§0'8Ì#à?ôßW, ¤­¦j¡Ãó§åƒJJp@Ÿ€OHRLˆ‹ ‚ÿÈýSÈßœ5aÎp€ä_õÞ ê_5ûþçŸ áü3–üNºç”n#(*¾ûþëý·ËÿMæ¿¢ü?•þß©ùxxüÆ9ÿ"üò„züaÜI×y·ºð»e€ý7Õò×îêBœ >žÿj"Awë sñø÷ ¡ÞjPˆ“ výK1ÙMíš1€{C½.>  àaw v¿{A¼ïdù‚ÜíÏ?SªÂÀp§_‹&$*! Ü»k¾;‰‚€wéñÿ-d€? ޼sܵ p†#pÝ)ðN-ŽâqF*A]~Áàä÷%þúããAþòr„ºü þûí÷PB p'4ˆ 䄸#ÿ“L àu÷Áþ§ ‰¿ÿˆÿƒüOy@É¿#ÿvúÇÜÀ>ˆ;ò·¶ï†ú¯óï× ñ‡€qçgà`é(·Qíç Št~|›c²Ù6ͳ¹ø‚æ>? ±2¸êŸE|Fœ*f õ/®«rž(,0]í¶6ažybØvùðÊþ±ÑÄfîÜ8åÀ‡Ò]Å—ý 8ô|& [¯<4 wGoEíÒb+|à#AhPLvî÷VÝÿeÕ§÷13›†[õbÚxWU“|ɦI6áSlEŽyÓÔ,÷| ØÜ¤‡þDS'§I >Ü2i=æÁ ÞK. ²ZJ¹˜\ª1òOcEÍ€~Bú~‚=Hék¦ÕlPeÙrÉsг׭º¼õ©I8ÒÝç‡(¥>¾ø}Ôš¿NiH)cO .˜>JðÞ Ž}¨.^òݯ`—î¤FY×I‹=†ÝÆÒ$ˆÕ·†ÀsÐ~OUTüzÈç"|žaëþ²èñ$áþ¾~9o ’*æcÎ]ØgkL¯ÈyTjÍËúW>O½dÜgØœ6Èó—.a³Eä‘(”® bݦ©ceÔ­„ío†8ÿÔ‰Ú•˜»òMWÓýQ|z¾öŠ%7¤æ fÚŽ_ŽoÍÚàTì °y÷æí®& ¾§–…;-Ž;F ùôv¿(B%?9n™zÀzÄîå±nǹÿMšbÒ/bUÆüZßb‘§’Ò+Ì2uï¥_ÑàìªÂtpÖsMЉýôŸ]¤Å¯Âû¹–|4KH3&°\à{lX'°ÌÍG¬lWÔ\:õuB”q¯+ì÷¨m³`f¼S?ÏÈè#;ܬ§(4^L½4‹ÖMss”(Õ AžX)mæpž…®–9?ãko¶6ÀQ/åê ;—ŰÇý–¼h4ѹÅ1Ìä¦(Gh£voæÇˆfà™Fôó†€ûYÅ÷˜øMtóüþn£¹ÄX'ŸVå"åh·:ˆ$Ïô&žùò-|÷-Yáº21)QáfhÓÔ .hy@ß°VùÉÑVîì”Á‡0¸#µŽ}[jlWÖÃvšqïé\êÕ«¦ØRÕ'‰µÐOûÖí2?\ŸÄ Zg mªJ¿;2ÚN–N'dãìþR›N^™yTNÚÅ•«s_Ôú Gføœi©[&Bjë0&“ŒIf5.9—î!O Wð´ÁÀH,¹ûóF'ÆÌžà«Ï¬ëš„®/Z¾ömªB4/ùâŒ_b0äÏãG1Á6¾¼TfŠsðM¬»ìݵõÕ•?ZJ´"ý?¬ÇªyPÇj#¹è7tÙmP‡îõèøóÃN´vƒsChD“5й¬ø{}’bûðckùÇ¡÷ÁY¡Åõ>qõó!O¸JíªU59íÓt­‰>&;^ïš8WZwîvÑ.m.â!ˆ•g„êº,ÁkÊý¥ƒ˜eý5ŒµhΛ×T¾#°Z“%—y;ç7Qú䥿5xÑÄÂÚK._^B}¿Î=ÈÑ^èµF§Îfœ“Ö”3=¨ÎóÌóŒï4ÔžNFwÆwöH9SU[èö,îEp’V„ ï…ø'½ýú\ÉSå ~¼áKÎòŒgyËe=Áƒ˜ZšÍýëX+kDRŽï NÄk¶OºÏ{1ÓÄfbô‚B£ß±½É°M¦V¹|á ½%§¹Ûþr«d`Ó¥…ùY‘²1ôë\_6Üy|Ð…*ÁƒÚG .\É`z¸T<{h*À¤ì‘ÆÜãx²ï\­°šûÔwn­&áÆêºÞ OOé…mdù!Àk艹pß%ŽhP\Ò»û,(dÒtÛ³–W¶Õ)…ÌPàQÐÕ‹F«¨ÑÃc”š(3c^ßz±Åþ5þAçý‡ëläór“kû\&¡Ê]_2§ÜYJ 3‘¯Ã_(p™Îò¦ÌˆÅmÈбŠôŠ`;$ù« ¾E…K©Î ?1éF‰±JÅfÅÔåØú}YÎ?ÉUV¬ÊÏàp³-’½¼Ô‰ÃüÁp ªØ'[Ú8&5¢«?˜ä1³²^/wä]bðÍs·&P§ŠIfY%4Ø—¶µÇE>¬w`þ ON™ˆô=RûZì77†âg`ŠgœØà[G×÷mEï'éQ}ž! °}Ã:~‹:;ñîâÕ?$&å3ÎHIàŒÊ <…é8¨äü†öаe%$¸·¸m²Æ(î¡ ¾T›[í0d—°:È~’Ýq>ß!C¿@‹¸Or6J}2øöç‹Ü„{Eù]’\íÂPüqïàÃ$wéÃó®v7œ—tüpé(g€ó Ù«• >Ê¡SxkæÁC–ý­6|Ÿ’<'ÞÇÞ •Î\. ±hêúÚ'¸a[Ý­8ÆÅv‡é'“i¼LFÓΩ{-7ª½6¹u·EPÔßç¦Y#ÒR›ß¬ Œ>nJÎÑ»£8uÆ\øug Û~ú‘ÍЬ‰Ó :®Y²õœšGºåkÄ#J”f]íd~›ŠðkWôñy'dÒ¯b+þævwèÐiu/èÉE-ö$ͺ9’â{¤®YL!t!œ“¬èŒÇ†—ŠÀËÊVâUW_qk­ØÃ^•voîªûõX˜:+ÝÅìÔÇÆžÞÒÚY[KqRîg)3ˆHó¹áâÇ(DÿIÕ© ꊓ®$c÷G-w›Á @S'} Ú³•ÇÏÂQÕÛJoü1ˆ Äø§ @#W}7ÌóhJìè ›³ìQ¥°av eáÀ°€ÒÜüñ£Æ¬|jÿ6iN†iv¹×ö{Õz¢¯åü5šOêVÀ".23ÛmTŸ”vÞ\XíÚt¡)Q0VG‹ºFä!ZbˆhTK £(œìÈúýªzÐæ¡Í¯<ÅÒX'BøÔ,òý;¸¶ö”Ï­ÔXŒ¨6Ûc?ü²Ÿq…ÓGJ²I’Pç:E‹…NÑX$ËÇ$ñ¬çë YVj}° P^ %dìrº™¾ ½‡¼ ñ¬ÞçUpÚÈfH˜b`ѧ•ïuÃfqš)„»å«ßäáÈ©f)1ªNòósã¾H?Ęå…m¥Lã‹4rG… ÷C¥ŠL¨üÆqCÎrIðîS’"±,¥/]é}ýdúuÅÕÒXŒÁc¬ÄRn›¸MâxeÝbÍDô–ÝãvF¾ ˆ¦myÌ&*½ó³I͉ù@`s¹¿]žâöØôíçš­ó|í<&Ldšñíc«Èu#F“ á=‘Õ¼YŽ ˆ¶CÕb¸{Ï«êešÕeb·‡«­â‹ð•6 =ƒ{HãSI´\Æï‰L†îo¯”]°$}AÓÃ>ÂZÒú¶0Ç¿}tŒ‰þceœIn}@æÛø˜_3ϸ'uP’˜ÂmÌ´Ilj'®ù‰³OzûHÙÛOâFgÆ>oõ#Ý1Çq®#K¡Õ·ruBNñ²¯#ÇhÚL‰2Ž0Š û»sWß[Q$b¸e¸å›7¶˜O¥òïØ¸¦PéÍŒWOÉÞ?ëû¶…°$s9xÑ ›Òd4¸&HHºÓƒûÃÞbhnl£­sááøÙËBÆ3Bë`‰%÷ `þÝB½]Ìé[ÜY§qA×e5!„àÙ&Yì-vDúÉÑÏg&­çÅCûßìc£{Ýìù¶&8¬a¥° }«)ý›)yÓ”ôªÐŠÔNá@i5hµŠ£"8UÀž —oe\¥€þ‚™^Ã#q¸QgìT!ÿù?³©ÿø÷"ˆ‘›¬©ìX™Êƒ­FíòÏiìù>’Û—£rÇTd¤>šëIVu;ø…ÛnC£À7˜l þ¶Õ³ñÒuµ=oÑñ©:6Aû-ÙF1Ÿ¬¤¦“û<§tžÚ…_Àf‰ ™GW¦IåhÓå"\ý®ú¯Òt±tÞŽ·n¥…ÞÌàïENš,.ã_yƒ¨Y0«wkšO ˆ§«-i)èJzD …ˆ‰¼‰¥Ë/!ˆÉD×Ä~ù°ë|~&óÏ(&0.¹%Æ—¤hý$âŽO?Á¥¤'‹Áˆ:E1 ²2mZ~fr¾ ˜f¦|t]n·6¿‹Î©SÑ‚žwó üáš0I?½-¹ý*½.a+‹N|†ÌêDÙòôêfò˰Ö÷ú=+UR¼Ì¯Ò®MÉ£•ïs/¸Z«‡÷²¡Š$ÕùÑÉÊSð$SŒe\ÎȈDŠuÈB‡­Ë2Ÿ¢8uH%Þ©º–×zùÞUM]]¬Z±ÉNaixœËˆ8b¼±«Cª”.9›atU{xÛîÊæ+y¤ÄZ2eT"/k*3ìÏ`ü©vëÕ?UöiŠWÂÃRUóöqÕ¢Þù¬ü&÷T€ÄF‚QK 5R—÷—‚y"ÜÛµæé¡ù¹§˜aËu¦Ž]ÏllU+}½±öÉë¸Ã_”¦èוNÝ@Õ’ãü3F9%â«N‘}?%ü°pøóhÃXž5°icÛAE£û${”žÙ2h}Ù@£)òBºõ'ô=VNÏ~›ž8Šû¶–vJ ò5~S€ÀˆÎúç³3ˆB—)Æñ»j£àQmO.Ú$–©|½ `ÕѲôcÇnì™fd©çpÖ ?é ƒ?Íwž9²ˆ'B¬¤0ÜÖË·‘æºÔ ÚUÒõåŠÏð½÷Ø\ÙÀT¾I1sÃ4°Aë„ÝÃÓPîÃNŽO¨9Èûd“TY¹ûpÍÍÌð; 5W %v©Ã~¬_£ËM­„Bñ§E¨=qzIJ=¡—¡›9Ãü/ÌG'8„Žc·fQÆ|/ý<øm!‰OÇ#B÷.–ú™ë,³²sgXF®#Õ>»>m™Ûä­Íqp_eKÌÖôq]ÁlnŒb}¨h–1 F¥‹ò¯ö,ËS~^ ˜[xPêo?"¬³í ªtKÅíntÎʈ³÷z˜¾¹#k¥Ÿ;Èù§êÚaW÷(AHγ ÆXq'Wìwh]3Õ¶fŸ€MÓSQ{Ï„ŠIÆU8ÇÌ­ûÈ‘ê¸æ€tÎf‚½ÀÂísŒú™ä¯=$UÂV̉ r?Rt*?›Ï‚Õ*¤ë"üŠ—7À]/¿2(f6¤/Fì.ê€ÃÛTAÒë ø2ˆ9c; §Ïqg@¼/SÑâß’dÇH–ÚìµôV‘måÒN¡¶&Êú†Žñ}çØVQ#vq”õ yw©O¿…ã7(ýHF{¿ÜÂw<„pý‘¥Æ…À =c5óMÂÍ€Ú‡½ªõ^Ôc¢¡ÖL£ý\xšÉ”ñ¡)²;¦)ywóoc..›Çöõiä-…·OTÔ~èY䟹ðJíl¹E=ý ðämo¶Ôž¨|¤Ü³×§¦šè;<Ö< }œtðJlÈŒtlç‡òk“x°9²zó¦"YÜóÇéüCS•„å§áå“!Ðïê͹ñ Ûu‡WRî÷óbÛRÛy±6” =lŠ G:_ù¡äô–†Ž²ïͯ)î1‘üþ{ p[êû„z°åχ:_t¸$r l¢w|Yþ&É_¤5l›ÇD¡¢nv ª PØeU˜Y* “Æ\jS¢Aé€+…ÇÔ­Oc› ⺠3< ð-ß|˜Æ”pÕ@ìi{¨„Óíˆ! [þ»×EsãmƒýfòüƆ{G‡-L±d(•[é jà—˜­éµZ3jÄù£‰I[׳…ÕÏÈÏÞÂgp‡Šk"±’eáG¨|›Uç/™;æ†c—ÅEÚunt¿e­Ð¹å¸ÃUÔ›EÃ4ˆ-&Ù¼_?¦ö{Ç}tžU”Ö;²!M·³áÍY›Ìj 4ß p…ë#Ù´µõáôQûL œ.óK]ýÈŒûIpËYßôfô®W-ؽÏà~zc°“ë›Î]æn‹-zŠ¢r)¶•,_ã¿%¼ô‰íÕx½êqe3Ôì¶»ðÖmàce}kEºAI$£ž¿uÎ)Þ·ñç°¡$³ @Vs¬qß²\E‰å.±"§À'¯ótöꎢWy~¼üyX]ÚîUG'ÝŒ«IÍw Š\Kù)™)€èõ73„Ês`Ì=x^m.&~ (õEñ›ô%YÇíÀï©ñÒŽÒLK…êä–`tÁ„ É+¥±ûÃYS2Ì )û-ë+ƒ²éÒ+‘ÔÆ!à¼1}Å%‹Ïµ—êúÝG£ÙÁfodIPÜ}åÆ媇)a"²”¬+]¯C.–3D¾ݳ—­ŸÜ|CˆÍаOÛ Ö6²`ù0è\6‚ø¹„Þ/7ð®sŠÓú“GÄü¢;ftµýM!hN¼ÜÁ„¢úÉT‘~½ ‘~w6a’}?N¼Cªb‚›´ÞKÍX Aqã£5–:þŠb\9|ˆÛžÈ‡–tãý2EÓ£:6]¶Oß+ciŒZÈRaR…,ò©øO4óYÈ ‡nÃNmy§Œh:É„{‰ºËð°Óü]÷ª æÒí~äCFøµccÙk¨æüÐ ¶wÀ,Lk(ÃW9‰{H$%8@å8/e#„íbáynRýB| Ýñ¥ICp9[˜]E©ªq73<ê=ùæ Ž§&^VÉyŸ0s޹ ™÷¿¤€‚coÄSד¸^‘-ñ`÷×åVyžè«è¸¿Òš¹ÉÞgJYÊ*:ÚyùÇ…Æêè|·#¨3s«êhõ®ŒLNÍ··Ï6g­·6ñ±g¾÷ýÆ¢Uz{‘ËV2$±ù²}Œð»if/a‚PÚ:ÔÊK@EïÃÂ.ÄL3H kà#Uo9Ðú½`»ƒ¢w ßNT}TòLZ0iO²Î•íÆv mÂf›Ê¦2­ñæ‡3.¡&¼r¿u,h4bJñ÷))¨·“Úµ ³-qºUÚ ì“=Æ+0];i§d]š{ûèÊ<~ᄃI¶ÚdYV{áfß<ðÉ¥ IïÖãIÔO<¯\™ÍU^ÃßK Ñ‹´•§Ñyä£"ÎîÛh\~·çÀ†£†Œœ|€mK¼+3.vÉácVé0$öš^{’qg&ñ o#êéoMýC7oD˱ÊPF )Ï™Ø9d‹ìÚéÔ»-ø‘Ø[nÙG€Ø˜ Dúfr‚ˆj{~ôé]Äõ·'AÒ \×p{o°²‹)%@åZÔÅ®¼k}-¨—¯ûæ;–g=¼™Û^âí²VW³«O…‚)8a`$z6Ám÷0»Çft„:ו ñ`ÙµÍý¨½wáÕ&†’@£5;´“½âÃZp"‡·:hl¯#~Ú sO{êÊ Á‰ŽÄPÞ×ûª‹d¥@ëf¼ò¼GùFŸ)¦WÊ¥Ñt^º­ËÔE(øÑÉiO¤Ð붉5f±½Oä´]Z’uØÉb,L¢iÕ¹cCB¨ƒ¡á,Yˆ,›h½—Õôím{ö3&¹\H¾íGÇD¾zkí}›×&>)ŽÅƒx¬ïY;ïÑÑ“¿©£ýüý}&«Ü3½öÏxÇ~ gÑAbÓdxPÊåï[™‹8‰èD€Þ§höìbý¹)Èü ›™ò¨‰Ö+/Éèå=ÎÅ}KGŒ­8V– 1›™ŒñµÌII5÷¯Ò×r~žE‚²¨Nʬp.%U¤3•+ } 1=º¡bá 8'=²;=Th̲GGL 3ªN˜Á\LÉP~Ó°…:«ˆoÎþ±Ž"l>iGë‹Ã‡òÑ=°òL^S¢þÂo§(Œ(Çá%&íªfÊøE ÝΖl`ZÜ;}Ãú’œTÚ!ëÿØô#endstream endobj 146 0 obj << /Filter /FlateDecode /Length1 1476 /Length2 6641 /Length3 0 /Length 7644 >> stream xÚxT”ÝÚ6Ò"ÒÒ ]ÒÒÝ Â0ÀÀ0ƒÌÐ Ò]‚ Ò­¤tI‰€”t# ~£¯ï9ç=ÿ¿Ö÷­YkæÙ×}ݵ÷uï™5¬L:ú¬Ig‘g­¶C»Ê8ª„æõ`R2°ë:jNÃKJ/M5Ð?6Æà< ´ú2qÿêIlV"¿€eªTlàó+y(ƒë°½ñÇëG©2V2U -&§U÷áy¿¾êƒT˜0&Ãη›ß·uGÃYé)¦‚L:²özw›T…/>q)Š kÕçà]Öœñ¹ñÓzy¹˜:mÊ’f²âóÌñF‚Dù$s[hZwM%Â䧉 ÉœËè_¬y)Ü,•*|¶8q|ç±î~ΠKÁ¯ уAûúC†:ªuö¾êce£š0üÃQÓêöü¾}wñãæù-?æš5Xµóe÷2UB“I-ë°Ç©½^ ðHœyØÏRÏìhX¢)s^·'³šìÊ5*êA8G%µ¨Ãð_W&J–-ÔJ¶:L2ج³o ÐxëG„´hs†ÖTE§¿L±†Ý UÆ/üž¤çóŠììÌ~Ç7bŠä±ã§óG¾™™kò©Ã»³þßßö/ä"ìÙμ—×j¬^O¿–µ1ˆ%×ÂhDŸ²‡P¹£y-ä¬-Eup[!;kH*ý}¸¤W|¾LõÅýC&¡(îS óLØû—ß›Ææ·§-àmÁVs,Ml1›g§–$bÐÔø&D [hè;¾„µ5+Î,&ƒÍÞ@‚3ôÕÊÃ…‰ºUÊ }æ|ÉøíÔkw+4ý(<9áÓLy)’ÅØb¦Á:nUf|íx;sØäe™%ö(O€¿oädÿ lÞt÷sº,á›2͘+lpq½~úóŒo9VY0Ú‘#+â[ùÎq.­enU«2xc³ÒèãÆØ@H—MY–åù¶ÌÌC|ôƒS[d>49ÑX$ñ§v¤G·u ¶ÞÎHx‚>À Cç€I×õòî»ÚË7‘Ëöw’EufiÛ]ýó âÎøÞ$q” *IÞ®’4çÖÉNÔü9Çz¡–•KÙ¶>Ê 2̬Á~à¥"O¦íÚ´Q‘Øá"=m–/M-?;>pvÄ)>˜ß.²c¯ìÙì¢åiiÄ/€ÆGÛ`=‰óX袙ÿD…ï2VmvMÓ[ A(6þö|!4|Ö0'_@¯ŒÎÖP)úЪþéü¬1—3ã+ÎÓ\î¹E©wjßh[Q^ö<2kÍ‘˜›'<²u8@R#ìZöóœñÚ‰‹2¤~é`!‘ÌR­ &GˆiHX拾–qƒéÛ¥ùGÛòþui;Îu‹=8+zgÆ­æY‰èr×7û.¡mh1cD;P?—øôûý2!jÝÍVŠªÐ|ïd~Éñ=I¸Ÿ%æ1ô¦(6€â>¼YÀHÒìߛ㫺 §ÜC·a\«€³Âtêw<´·˜•Eüñ‹÷ qòr-øóå¨ÞƤpûmÕpl´Íœ¬D+—VBEƒÕíiÉ}–Ýò¬¤~(3û²— ‡›hŠ9ºî÷¤Ö†Qûð}ªk\ð™Òè1 õk‰z|‡¡_ˆ‡èÛ*PñÀµ1šÆLÿ{ó™ÜÀjD%ndÝûUêùf¹”–wHhgí˜ÊM¾9šAbL³€>L'Š«þ%\_l_܉æéü1â>Õ ?§(ni<„9v¢ç(˜H?T_Ñc$é"%ò9Ûëïóë*’¶ûéämÔÓ`ä2ð%}•úiɈƒÂuâYâÑüÛÖ¬;â>Xm›‘ Ë|½Èg­‰}áX%pz®ƒtñ˜~·ñÕçùp§jÃõ 2/§üVÞ»±G¯¸mùkfßœà÷¨fCkéša]6êðÀàçí§ì&L;V°Á<ûü|W„1y¢A1\ŠÂÒí"N áqi¾– åÒ,·Jw}˜&"¹^Á[Ѧ±œ¢?ðhý:-­‚ŸŠÅ÷©{ê:,źæpñ[‘óÎFG¾ëÞ«Å×0Šzvþþçõ• 9ÔfŒÀ’•|Ÿ—¹øIB[™õkÝ&]͆ëÛ‹¾8†!Dº°7¯IH<ÝÔèkΓ/tùhÅvüêyÎ#ÑNK¸´0 U¹òkúUƧQMñ{6eƺ;¬>¡Ó÷ÕÞ´Ì›Òt "U½½¯x1§Dõ™'?(¤yü@ñCp’«kµWEé×Úå #7íi»1˜(ì^!Ɇ½xW5í¾‰Ä×Î-éö, ®Ñe z{ôÑXŠñ$6›$'Inz_Š6`Ä]ΉrI=5ê\»A&êÝè‹Fì> fg#rô9ïÂ/ÆïÕÍ´§ôd&,jŒ­ªíîX=†”Þ˜K ½øÔ,ÐÍtB¾lñÝ/-íö7a£Ð=ûIÃèݧ:Aþ—×S~R?c²P¿gº…µýtz°·Yâ'% qþ›É\xôÜ®rk¾“òsª&Q Íþ:ƒŸt”VÝ>OGÀ»Ã§TJK;Ø2 ,HCê嬶F£ó2Óy‚ñi¨»u_‚7f]\OÕ"àõ²5µkiA?­wå¦KÖt)vºäŒÎŸ±‹+´R&8Iá=·”Tl¿ðõζ&¼,R:t˜îªz!Ÿ¹½‘½/WZAºÚñôÍøñ0ãÊÛåI¡œh†ˆÂ„‰Cþ×:ö÷?Ü­<{8«ß—'r¾ÞŽE÷Ò­°ÄÏ¢TÖ» â{®µ\YâwÐ&PìÙ½ôUL¢*êyî´@½šIÂËBa´ºåHƒÌ¹ðqfqvf±Ú«¦À<Ù7ç¯&nSÀ4N“=>û!øâ"Ý•ª[ý2Y÷Kf¦íj)ŸëÙ†³—tÉO;•&Å0­ogäH’º`{dºp~¸¨Çù²ùH>šéÊÅr9]7‡ˆŽŒSÖïå¼:a碼yjhpvŠ|D;ã|ʇ+S&àBÍ™; bÅ«åÜ~K¾‚»rÆíù‘âÊ‘9rò£ƒ¶:]¬î+–ï,o¤Èi>ÏåÈìÚÉ ‚8‰-ËiÒwgèfÐX¹4ýUèzh2wŠr%ì^­f` º¹vP×ð“;Ýä)‘ÐwE‚ÂôÈÔeöË5‹´VWNÌ*Î ìÛkÆ,’ÎÚŽ–ïÙE ÷45[ñêB žö{°5V]*¬l j¡5j˜!ê< tÜ})í­ã5W:¹Ï¹y^T•å>O¯Uújx¥Ãܦ>Î]Ñ>eÚ %“Vp¤^àBìhi³4ê%œqÄß‘r˜ªò$Ä@Ÿ¼þz(ùd†bë‹Ü®ŠDýë—«¦*ô¶üÉ km*¾Íž!zV¦“YÔ„ñ^²(•~HSy¼pjD#ãzŠã=™ÞB®³Ìª$ÕÜ=Ð/}±p‘¯¬>¯ãX÷L«Þ¼SuÈz72P÷Ø€ÒgyÊþîÖÝGÓi)1×Awž¡¾•ùgÕØÛoV—XžøëÝÅ& Æù`QH²¦%àl_`swW|@¼[걺Ûu ¾h웣9½,(3[¦yjùM˜Z’œÄƒ1!Œ8ËdM+Tž1énT‰ Y<¯Îªª¤L“Aa$€÷^‚rãÂÐrãuH’Ô¤Iôí½›$áú·ÑnšHio©êp®õ [ŒÎgVÈ>Z9·Ú|({ãU_qêõÙ‹óËù¼A: ô½|d6Q$’ýÂ]ÙGRÝAèãÙd,ƒd“;'óNä÷Oú©iÝÃÇ×_žÕÉt©…´ñ%)1‹òÖ¥·°ÑϹ?©ëN‡©™‡¦+&¥ö‚N;Ë!^p5ËÕ¹r¦Ïkí%^ÿç¹È½áa’㣵á²:îJ’¢¦Êª·×ô¢Ñu†ˆà¬×³eýÞ[›C½É8y1 Â×Ênk†Ø½ÄÂ#,ñé1l~5ñ ]ѸÏR—”Ë7æÅrÓw ò«±5=6W8œ¨%!ë+‚“ñØÉA¶å˜eŸ.úˆñý0íùDšHBÞ˜WqUÇwõÌ;©&2àZœ|Á½eÞêmç¯"›'|+éÏHtf¡ð4Ë!1ޝ•¹ _'3ËbPtˆÀ,ëÛ.ì.Û""“É1¨L̰Û\0³ö«û=ÑU÷ÿ\ð~ýÔýqxNr¡ÿ[àM¯˜8ïÍlIÛ–-‡<ˆ;@±‹[P¸8’ *~nÒ¢#=s8¿«ª´%!ÓL™OÀÖ7Ôõ: (¸˜}®Ð•r6þî\TPÖ>lRÍò澘2‡ˆ¢û‘aÉ›9­E1 ¸Ó"bŽP8B?ó-ÍIÚ$’Ý$ÕŽBœ[Q´öXk7Ó.FÌ·Ïò4Æ¥å_õ½þâܶ¹8Š";êç¦èáØ¦i·ŒžpW²óL¦LÅ^¢J2ðûã½N:Dâk§UÌÙï’å£_ÄŒõ-èÆKÊ[éÆk—% ~ëâ’¿'XâkÃ`iZQí'Üý™g­ ¾}ò1ƒåíÊ“`q ]¾Þ¢MÍ:p`®Ìü»ÁÝ©š†; ÄûŽ–ÑŶÇ]wovÚv+¬wM^¦õscï#éºËkÌÇsj#©ÕòoË´e Y5Uƒ—wÅ óØ!á34-|¶ë“=§šFJC@áŸcÜΪ›ÆâÒöiþeMâ‰«ÞÆsAdDcÅÏÖ|§òðâ?æ’ú/ç‡ ³xyƒæÓÑŽyèq¦¤uœópaQgŠ•Ûä¬8*Ih+½ÆüÎÕ¯ƒ"•ÜGJê$ëóX£Ž%÷ZÌD&¹<\žtÔç16#'ÄS»ÏÞTs9$UºñÉSkóE ´\"ŠåJ[xEg#Ô³Çz"Jͤ¾,51Ìlµ9ùÍáé­8°Ö^±ŸCÒ•YêvÈ4Ç›í¬Œ8˜QÞ Z¾«ýídï én‰kR†oóJ©ª•BšúŸgÍ rœŠOCúX^ëþ`˜TÐ8/8Kl¦ôÊð¼û¹³˜>L©y9‹àsè†b²ôh¸_£äL¢>|¬WŒ« Ì@³Ø–GÁ`ôhfåtC§Ø• ìPïèc¤‡×Œ„»n"¥»™·5ù;æäòW¶Ü[ï´Ã-ç¡Õ^ŸÙEß7 š—¿–Èm\dß1ÏöñOšaÁóW.:ϧÛéÉí?áè “õMÂò6–î:ŽÜþžX½å-û¥ïg‹áêwÜß)ô¨H€|ÓŽyËÛŠ–A™Ø¼â[±dË">¥âòØåc.v¸oìZ u¢aθdw?ò£`’^ü¬¥êie5ó:å>çL&o“v|Köìwb½Òîz"¯÷Û{¸Œ‘º}¸çžò‰kí7ú7m¹ÙÌsåÐlÛã É–Ú Uï Å9¬wûQ®n¥ÎšøM²Ø²Ý`¶·/S3 =CQHýË/½¬ôAœÛ|®zWË oË™ï 4¸ãe(‚“„¯¸{j%f§ì4¸äµ-%_3>oÙF꾡9?®Ñ‡’æ9õ]¨i|p<¨Ñˆ—מ±ò’©ûXh‘çG‡n]¯_R*îâ<\ L‘HiRÁÉ8Éy‚Ô@FŸ %¦h &›¥ù#çÞ'3ué·¾ Ѿù:™(Ä¥íó£•Ô|æJ©0e»^ù4??N¿¿+µ'Jšbz?³þâêýñîC9aØêŠRt*_ŽÌõHtæ*¥ ‚·^‹ïVa¦ IõéF%LYè妷ÇM×§fû–S‰»Ütëü\¡V ˜ˆtŽ®UeÑ%c%Ä~˜³m[6Ø6Îø–]B}ꉧ+hÜ€,-žéÓ¢õOü • Úš«è–Ø¥èðij[zß´>Êþ·*].fHxÊç„å]Ñ¡ŠÎ•Ä<ÝܦÎ[ˆô§3톗¡ÑÉv­vJ3]  sEÆ=¿c2G5]\yv‹u£ä¬#"Ýo·«Õ8q䂨ÞptŸF•¿=,¹BkŠuƒ­­kì'kg¾ÓJH‘mPôÙRqc)³ãðk½|×À…íù QêÄÀÇ™‰Ìne«˜½­ˆñv30¯á¤W¸$B†?*€Ù<'«Ä<ÜL¯³­ v¹ëå›°fV½ê(4w,˜U¼+uNMv#»Ûõøä³à¾éw·›7𪈢©¨œ8çH×î骮÷¼u¥k?Ü‹¯o²Í[*53}¢QX›Í¶œúî΂Á(;4[?L˜ÿuˆEÐn†+>€OGnÃû1Þ‚ó‹ ìídyGMƵÒ@&ä‚TÀÂŒ—ÿY.ur/P´©¢W¸Ô‡sŸtðÆÃ¹åëHØþêZiÒPy¿[¿ é\kË=N3·¨È“u²°Ö¸ÎxêB¥³lyÜfjYç§r?–Uh0`Šã»RZûMX¼JÅQªJêdJ<“Þ›ã4æúÜR?_¥MËÑLƒö("#ç~šm­ž!c¦m#ÒeëÏø{Ò"ó¸ôv:! #ä´ï©¤JÁ\RUõ1rKZ• ¤\YVtsq-¥%h‰Œ“Â,;ަ É9Ücá§È" é¥ä5ƒqívØí÷CÚž 6·¿¢YÔ顨5Ô9<[Ç.I0N>Ñ7áκ[}–°-×°OE7Êü©Ѽ,-™BÔ2Z”+ýâ¥üJzÿøf@w©°’|éVô&² 7\üä¦ÃÕtlªvZ#ú°Ü7é(t¡˜‚ÎÛ…¨zãæ‰lÊX㘅ža‹½éRª4 6·Zâl[t§n}?1‹8ø&ÃÍï4NÉ3†Á¦¹ãw#ŒßT,H¹BN“pæçf¿ZÉëß[Ñ}ýÜKsä«ñ‘±žJðaר­È´Éo S‹±¡*$ïΉÎByÔ¯qom‰1ÿqK¡)í°çYywVŽ„,¡ßËfWRÅÞõ”[YÃâhí‹©çJó¡ß'˜S”‰l¾`³ëZ#+îÃæÌÜ[õïK‘YtÖzûV†o=­k=ÌÌi÷+a °™½â?¸ÙÍm5> stream xÚ­weT\[³-î‚5Á]BîÜ­iº¡‘†44$¸kp î$HÐànAÜ-¸ÿC¶¿`xÿµVºÀ¡ã§’yxÿ*ü?ž­Lÿ#9Zý™m Ìêi¼þËðÇ r…ßTýëÄ?üŸë¿ öƒ°ægA/?ئ¤§ºT‘eõÉëàEî r*ªÕÉÏõ¯pl÷K ]-µ¸« âª{hòœÞuº_W~±ÑßAjÏÒž>Ì¡òa`ëÌ%Xbnfßà6+ÂMÝÓð:šR]A1âÑÛXÒÔ2+¼C£iá‡c]²ù3¸åú3^8áù‚’k¢IZŸÕ!Våíî1Ço_^°tïëíi?Aí\§dÏŒÆdz $óMÜ¥Kpñ´€Ÿ×‚PoÜ„Ý×^j>+CæÚKøáÓïB{dØ#Uï…"Õ œÛ`KΔ:®é—ÙíR¬EÊ÷½ ÇŸvF¾žmä[ÏÉ0Àüè7f&“ Ûjc-œØtÅ•ãF£õ+¡Å( Ïëá¿`õ23aB?iº%»6NšéAbû®Í‘ö™~ ˆ•î–òiþ˜ùgÅ|R§gªðüÀ-İH!ïÅç!…êúïö̶)îr•˜+ªEvÕߘÁoÀÞÞ‚oý…M°­`’uxïìsSäb!„±%™Ô”D»”¤ÇÖ–F¿ÎkséÀb6!ß ÆÝLsÍA Œqriö…pÛ•ŸdÙ €6xÊë™ޏlº,.öªn+oŽûjüZsz5¬Q:î*q¬ÄŒ˜@ ÃA"SRí‡ñ‹RÒTóG›ƒ|sÏ£¨×ºå÷̾öë®Ë&ó¼ÊéÚk¿kw\Fo‡ïÂ~ç—ÖÃwwcm(}³l_` hZ àv¦¼výRºeÞÁ¢pÇcÌ»zý9 åÙôµo‘6%«ÍóúÍÚ2¡p¥×†êÈ~oò»}WAšƒ ÏÏ;“lË%4¥L6|«~Œ•‰t!ÿê&­Úa‡L­ ©ûe,F½z¼ÔÌ’J(Þ ªàV¡y¸Ò¾øL$prøáK?Ç÷ü·#ZÙ]*ö.ãÕt¦–XoŒÅ¿ôr³‹©|/´´@aب’n*-š'¸ºÍeûâJ_™º¦¤Ê šéEWˆsI8fò}=³IÒ÷¼Äç´„‹Rk–¨AåŽ!³ëcàÀišà©âq1–‚Ò:âh÷öÞ˯_y?Œ?JKÂ\`JZm/Bcš>mØ–$)REïgœ?ò÷Xùm»6w/:‘{-®<½Ò¯o÷M=H»ÉÉ[IT9?‘û>ëæ}[„˜¯WñnÐ8¨ÄŠ>ì }¼m™WBIÐæ'ðö9@–oÎÕm>„²üHn‘¼“»£®v‚\Œ'v%^6î5Wë÷UX´Y öU´Z+^>–d›%¾Ã^ËT|M^5ý±öRSpP5®µ¬BÑ©Qþh@4·ÑÜиMsóEqKRdG¸kGù' gœö!…ÓoŸ…B?êét`™ó\!þKæ¦Taã´wºÃ#ûŸ”(Ž×ä¯> ˆ¹ƒ%üÂÌîv¾³,ýÓƒû@ÍêàÍ{þdK™ïM¡V?ëqŽ£R:µÅ®–¸íÆÏWd¦¢ÝHo ¼Ô­ùÉdK~§…*‡ëi«à©f.â®~~Ö±\K*¦iž<ò —Â& ¦Ýפ|“{Þ Æw㫲 Ñäê®× LF&.ª\#êÁI¿ìµŒwG™X>ÚR,cÌä|Ãvx_zÔ¸ŸB#=ü Á!9ø1µ=¬«R•úÇ}hë)Šs@P¥a·Ùv„w§ÄÊÃfµ:’&ŸQ¶¬H+ñ”÷¤N±h!Y¹ýË`«´ û°ú ¤0ó¢ÉM÷3âX°=Šhöp ì1>«š{Œy5í=ïÑuTÔ•Û¬Xõ<¨_È,G~ótÝdBz”)âUÛf™§Ù©V¦²ëר¥Üó-‚Ÿ¤¢@m|i·»çÄIÌá™dk#j#ù+† 5—u’@Ô ßë·äóße ¥‘+ìÒ‘!ªF& FŠVQù«¿Ç_b5ÈVö°£Fá\ÁÂ<>P1•$8×wÔš2]ÍͦtNÖiÌY5Ô*jPó8w¿è<¬Û9´ËrŸ‘þ Òç¯i¾½äø*¾*:²m"zƒäpì.¾KëLnösÍ4•Ï \H4˜!…6Ý»ˆrʪZ8z]v\s (u,ÓkySQ'õ“B©›ƒgÌž5uz×UØäKk~×'{÷z.m5ëŒ~±šžëج?ýéeŸ.þu`âH©”Ìiù 2ñÀÈýk —u‰•æE6.ŸÞ¹ }4G¸oMÜØú>9ç$"^a‰ÍWö#™F¬½Ñ¥*۞ȥ8Ýeçül½éW$B–/kÏRõ>·"IÛ ‹{­ÝÊŽwø“`¬åÕ=ÕÛÁ›øuv·ÇêÃp }½èñ–\¹1ÞØX=]“ »I5/$ÍælçœÏ´á#9ÕZwå~•RÄÖv3(b[èÄæ-XÂòSŠ­çb¾%ºïwa3¡Åã>Ò×QÛ»B#ÉHÄOí3n,è9ÇÆ ãS}ÔR‹%¼ÞUi- ‰mí²Á̓ÒŠ »60.¥+fluN1ZRAˆ”ëg•8LË6˜ A¥Ê,‡£à;ãrG­æ¯–½_›„¿¦¦X›µuŸ'}ñº=N•SʦË–‰ýâ£ï ÿ˜ûz-é§ Ùb™Ú•SÛÂÉ3ÚO¸ iD†%“ýÊâ@–šrR`-ñóVæGgÙk¶UT_~ìÓ$´Y(û›,Yµ›¥þ>‡Jõ·«±&qmNwÎÄß>›âWš=?i›0嚆½Èû£(x«Ç䇻Wæ µ;Á™bý¦‹žüW¬“£i­¬,…ô‡ Ù,mE‡®:Œï%ÇFŽ ‡Qå~4 •†ÕŽŽeä$í+$Ñ2ݼÂgMä_k/™Œ‚ŒÏ9 Áæ’âÐt›$Z*Ø¿"GÓ¡ø Pì3]VTvT›ï‚˳^½þ¨wß=":á>ú¸ùj¨m€ð‹è>Gû£ß÷2¢ ÂÊ_ÍÖñÏ 1Q™' ’ {{KÝl|OýtörF°¾ ºE­ üøMRQHËžŽ9ü`IxŸ:ÀÒ6™ñS%SáÇŠlªËy{FžÁÞ0/ÓÃvòy×3bf³û8´¥¥)ÓÉß6 í¼7Þè|ŽºçLÅÕýåAÃ$o&PµA¥<|ü¼„}8ÿi2}MSÁüýp×JÅ -«Mu¹6/¶WÀÏYw,N¦=œƒ,‘lŽ_0ÕÎ$|…xé?Ntà{æo¿ÁO[.דbL>|#0èÎ`ć°×¼rG™Z ·y|9çV^)¢ê· †|%±èÈL~×Ç :þŒø¸`·TžÒKõ½šk,)…H:¾¢Y›Üí@noîW䯋´ô·mZç{Õ 7u=¬‚fTB÷bËÒ¥í½uCÂËýÃZ/ˆÝˆÙš†L“âÙúÔtZZÊ%’Ácí®÷á«,ül–˜8‘B…æP†«;]Gô·0Aé ¾³v]-ÃÓ†#Œäu(¡Iƒ:%¯ùOÏ×6Û#ÉAΊ!‡$·jo>¹NC¥Ãí°wªj ZòÕÓ,æÂǰ£\ôÂ,攄+X¨…çjbKwÄùSTSóy³öå.Ê’µÁfŸµQ‚%ënø¶ëXÈ*•hÁÆ”á¨×¨-e¡!0‚Ú\qÏ-Û~üCíÓwé¬üíU÷TÑJtoËH|BÔøÄ#LÌ&€ªÝt‰yb¯¶‰ÆBâÑküdôÛŸŠv|™FP’³ßë¸r98MÝo3Qšxå÷H¼-@a2j ªuÒqÅ(c?5‘8³ØïYú~&º½ì·“gS3©b­ˆ™æY¹®k,Äefã. !K44I÷i­œ¯eÄn€|AŒg&l3ÜîÞUQÈËÈpÞMz8ü™cÏmŒ½ÚûZâ0º˜Öƒ‰ëèÕd»cø÷#tyáAô:ÔV=I5ìR7Waܽ&~c^â&¶D‡Ë[ËÜ2SR;äŠ7Õ Åuãù‘rù oÕ‡.r¦è›JsíP¾Úe9ï6™ )Üßgú%gs~}OÑ3Š­\ xòûÐu%ªð]ÝÚêðgÙt¢ãÃ[‚+ÉõÎ6r#fŨ¢ˆ31×áBÚÓòͪÖccúEåï¿÷Ÿ3äì.³›/4’÷¿°¢%­ ‘L•g&`£ ™hÆ• Ò &QÊn^Ú^ÑшýÕõ°äY° Þ ôÇóá${½ÎpÙ›‹È¹-ì$÷?ÒÁ·µDߣgË©>k‰)j°Ìýóâ<{nêP¸““¾ˆ]<£>ÓŒOéIÀÙ´ƒ[1ÆÕ¥O؆‘Â0úÆ–qMÎI”:=M‰¨(üÐ(‡G9kǰ°-x¤^ĶΫbM'´,†_é'™yß<^2säø…['øÏâ¼¶+ÖRGKѹœ¤œ'h/„Áа~º6Üî„\?‹ŠÔ(­AYA}kµóÚ…Ò=ÆäWFñžQDYåa(ŸAÁ딸ç®ý³J¬y‘ò¼ÉKɳ·˜F̸‹ˆ_– pIã*Æ;ÓlÉ™ ©•Ù;7,ü»{ÐõÈ?AÙ"”nŸe9ÆNqÏ!"g)ÆÛ.ZEðçÁÇBë¤LCˆ?vFbÆ1©¦.˜š‘o”KqõWnN݉°¤^¼H‹¾bæŠ\ƒÅÉõR 5Ïn¢u»'k× h﹯é³€õl´¸|G¾¨ –ñÙµR›B£û};"]­¼ü–ܯ›u#NõÇã„"ƶyåØÚŠô›Š&ußx4·޼'Üo©i¹éÖ<¿¢4_>”ùdhòn™çàÓ0*¼NO%¤ÿŽ€Ë®f26ä)Õÿóäå'ÅîMSØõüp³uÄÛƒOç°ÍEc}—>yñ.vTegÂg+‚y\u D‰`Þ±åCBfï„G‹[EöE~Å8×\ ÑfB: o˽z1AªÙÃÍT1Œ+Cþîå¨ÙífófÈeÍY(ð¦¶U×¥ í¾F…Û:á=ðÚ˜â„ÇÃÛd7°ønÇÈg˜¢ÞeæȰX­Ìùòö¶8¥Ç+ µÄR 7ê"Yvƒ,ÑpDæøôB_¬ÝÒ•–7…¤û´Tg[l©?D©m;‡¿LÇvtñecÎöÈ?6ìÓ¥. däϲCB]Ä"GЀû ˜Â#yKˆ–ÓFR[Z'_ßþ1Õ«š©²÷¡ï¾[»Åº.5ðõ^x™zM˜*\' ?ß^(¦]($n§¾ƒ ÔǪ0}(ìFfsŽËQù™žñ"ÞúŽælÓ,ÔS›ÕQdÅ>Ö]@ûm˜I»ïç†ð·TÔ?cÍöž®SjbV„r~£"Û.é ÷ûe0…@ YÀµü{äR=IDÿF£gµ4üد¥béPëQú]GÞꪭé­K‡ç3ßžoM(ì´Sîk/!fܵò¤–ê9Õñ½ò†·ÆâÆDß§aúx¿áŠœÅ*ÔSííל¶L-HáA{¤•ÙÑi½Î 6‚´ Ÿ›Xmê×hšÒ$a:%ì$úËaW«3:ÝXqD¡™Н#þr€“R±þá’=›¢h(Á/!‘eÞÇC|³u•A^ “P[ÄN®§¢Ž|ö~%Xò<ˆtuðz¯• ž×¼!!¡~óC‘ˆ„쵫¬#èªÔ {ûEõ$Š¡ÆliMßææªJWš)ÏsŽèŬv[7zÁÍKTYù·Œ“¿±!}¯j¿Š[ص¬(ö7ßbË9>¬%€Ã ¤Hˆ´ÏvVʯÝ#s#ƒ_út†äÊEšwŸòÅ#JŒþ.­C¸±"j®œÛƒðå23©ÿ²åIªb®ÄƒuǪB;>Sœ„“趇jj Õ…àÉ‚ìº5-ýª ÝíæWúÐ`?ÛäsóŽ]mK”Ø¢š’™Ot¨ð³ñ C³Q'÷„ ë¯Hç¾FR=·‘JUH>-LÖ¡ÿe aôýóz»-Dâ°4’ÆËw#O¿È»_9)äó Y?uÓV3ïfÛ¾½Ãä)ŸÄFâ€øApŸ³mæÊ˜ˆ#ѧ*Ç—>nT×Ugt¤´ôÍE \K þBß æÑ6ì;mÖ8oX º[æÁý1î>ѯçŸEµºoô ô‡‹AÔª„Oñ¦x*Ë»GMâCñ4z©Ä,¤×‘T½g0Šuk h©Ÿ¹IkÌo¦}ˆêfKÓ_â:(-\è¢PRïèS¤Ê²U§¨õDc½ÊÀ‰[†¨P/ËÌï†õ† Ã1ÔäšTèbW§FêÅ:²F—%÷±¸ÝHÔïõöN¼pÕ_¿u!3õÈ MQÇB?Ä”-©!ïÓ~;¯;<¡€ÂTxJúî¶®º´yJ¥Hg{¸>IˆùõÝMµ·)ñÙKnÁ–Êß@j.]kòN¶ïSÁ$ƒ‹ôñ³mœUŒ×¹k ,¹Ž7Y!òŒ7NƒVÂaŽ›üRc©µˆ±ÓHÁôªˆ^ÎÃ,üMøçv7lÓìîÏñò]Ù–Þÿ0S×7ËZElð)2kÖc9IÂïe«ÖƒÄÇøš‰óm>wp„95cçæQpŸã}áÅY–«su;£~áBn3)‰|”ì='÷yîÝ-‡µ¤‚½m•+)£ŽA’¡Fír˜5¢½m'oeµ†Ã燢¬k ¾Õ;úg½õÞ³/І9ò®ãz¬0kj ŽóÁO¢]$“´òÁHµ\°ªã}™ïrñ¿QQ­S0iqç äSZn>T5éSȘp¼X­Ò¼_šÊB6oHVºNŸ¼ÏQÑ£q9¤ØL=öòÅãý­÷s•â¦YÛ¢)ÀÖ73Bù;õ&eç$ï.È+kž+¦,ºšPF\ ÑóQSã«I÷²!˨ðtB»Ôºì•W¸|¿ÍÌÄÍQ€=áVŸ‚¹w^*^Ç¿B}Æ]:Ö€àkBÜͰ֗)Ó\Ì—’?—ò)üÒ0j®À Ö"QÓOã” R Õ b ŠÃÃmédw,VIwT½›½š6Tatd†Z¥ÓýÍšûªt&ƒqŒ¡Z)‹!î—÷sX-ïÕrë œr8—¨:\ž”!SD¶¨h ü¨º®‡ìZ+ý%t!Ï1gö+fΤ “¦Pɼ[¥Nx¼#®#nY)®;1ßó®dеŸàÍÎ"£@PáCF»ªšZ"‹Qɇ™OÛ½±·ƒÒªðJ×Î÷ѶÔ|’ó78•Xq>&>‹«¯†jغå½ß—ÒZÎ^/JÍv²øÞŸ+Søg[£|Š\S)lz¥ee¢\L®ã.~¹ÓqÕ¡6Æ$¸3—~ŸNX7ã÷! Rè».ûéCýÉ>a¶TЧµZOrkz5Wïýß:"ØôT`GÝjQ5±0©ú_ó߬Wk_Whk¿h¬¨¨,û…êüòºe‹á ZÛªslFˆ6¤|õ¿ ƒœcÆ› †ØÞtxÃYú=»™3ä %VGgóuTåÂJ6ŒóUR› ÞyÏ}¸†…¿ÁQL™P{îcT‡!Çaúú¿'GpyšfÈÉÙnl9A¦ˆ¹F5NŒ\ߣSEâÈ|¢9qÜ8ìá’»†‹ÅëùïXˆNÎV› 6ë.9²ë1š@¹ºe”ºbsQÎó³œwa·sUÞ¼b(ÇôŽÎKö9ç„rÉ úëMÍ 1ˆ» Z˜ž1®Pímo)ˆµäeD•`<]ëš{7ÓùZLuɆLß4@gOF ²à &nc׸I«87a…ËJBa×téåˆÌãâ›C(¬2l|V»šfØGÐWZ6ÊÅäÅG-¦×ûÀ]É"ñœ˜ï·ïûüìÝ¢Ùò©£mQøuTk²ë÷û«–Ò•„¸t›ŽJ"‰}æâ›9ªg‚icïÈDÄ>–~ð‰j'%Ög¾k8ì(˜«=‰M˽$;9ç~'FW$@ Ç7ÚšÜKä%}ö n%}WíýÝt¡nèØþPÓ° åV&#àÚæV¶/¨Xb'1%®†››ÎÐïš!)¾MÏ,Âò\K(ÉdR`ß‘y²<ҪЅF}ŽV3ÃȶH•}éçÇ‹2 cq KwêÊþê(DÒÀ§÷Æ7ðxð}W÷ g[k¿—›Ñ•^E!g÷Š»ˆr™±-%Ã2IÔòðö>¸Ëè…+Å¥²–Jg«†WÀ-Jë&ÖAôê;ž\_—Ôq Ín#f£øy‚ÁéÖ÷H)SöLß*OÈ¥ûñšDBËêºÎVsxõ¥ ÓŒéP 8 š/-gi‚D¦ÚEw‡>^Ää–Ø3Š"SX¥Ì ÝLa¦l⾨±µó¥R{o´°ÌÚ!v‘uSØ ÀŒaÿ"ñ²Åý2¶¶+·T›\* ö³÷‹­øÙIGð¦U3B}€¯T‰ Ûkå”?”³©™ýͽ B·çoº8{û­(KÒh=˜JŽÙØÏVW4w±YšNCÄSïˆΖÝ8Uñª^T™+?’oè×W¾óËmÇdR1ƒ=xØ@†XYTeQ«{õá7÷Ž,Þ!¯ßZðéóü>½lxt·nÈ ¸[|åE¼°¬ª#·fN™ìê­Ägw¿vÂJ"8ö$]î±ç¿{Ç÷‹ðËõÈ{cµ4Ûì$êô| \˜cY±è„“¦!›ùÃ1bx篧Uøó¯~[ôohnÃÆpåTöê•ࣧ’ a\â…£¾ï¡=ü²ñ-é¿ ºá€O3__´¢o½IŠų@ÜemÅ.•qápP]|ŽE¢ž¿q‹ PJ(ŒyÄ“dQ``j V¦žÑ;ÔŒ™Ç™Gü`ö+g{ÎÃwN>¹s¨2!²æE¥7´UŒ??߸G¶½a)ó‹úÙ£Ùìã}Zô\ëÔØb= ZLJ\ð°[§å`/²,Qa´L{Šg‘ðÈ8!õ IìŒû±1’™E&}‰£ñó>!aùÀ×êÙg_Ùægî°µ#pä ýq}j£nÈOñÑÁN Ã:qô퇼žj‘ËŠði*Î_†¨á”W{ÓJ–Ö£KÜ ]÷f»;x }”ñ2É)È0=ÙÌ!}Ô¸‘Œæ`³+öjvŠddõ;˜$rË ˆ*Qò¯p#fK%ˆÍ~[ñë(×ø:cõ3Ô3Øð|ú<∅ux )m%È6 ‡Kiƒ=I¢ž‰©‹·¸S“øSy¶ùb”#Œ5Ýj*v wÃÿº]=Ü<3²à?nH\&‚Õ I6S!ÿ*·Vó,sÿˆòg{ÀôØ\4"Ãþ\3<µ¿ÕÃË%¡9oÃ^ä⬄‚{Pv'/‰w)øöóÞ(“®¶;NµjÌ1¥}ñÞl¥úå±á·¦ß9¹ô«G”¾¶³ZÌúÀ{Úõ=)"·4íæY*Ô¤[ɈEo7B|–pϬéÛ”ËÒvé("-¦ÚFë¹`¤f;%qÖØx1”XS®׈ب5ïë×ä:ÃÔ6Ìo…Õ8‰ßI µjÛo¾låxdÆÂ|fý*&3‰>Cšµý8ݸļK’°qJjyȱZ¹ÙÅ=á§+ŠwWö2š* ©Ì“—ok*i5âÕÿÖÈ’ò5S¦[:Ÿ¨ï‡&o¼÷9«Ö©´ã[Å–è"L÷~Íð¸ì>±ÉŸr ¸?·Ý(íâ†Ûøn~jÕfn‡æ®Æ#ºHV«åfz¨B˜Ÿ¶$ äª2‚LÄîÉBͧ<Ãû¬S/*&-+ªñ9^_9¤d‚±=ƒ+äòâ5ˆEÊ.•}«q#á A÷ÔÈVe»(í0:‡¶‰A2ÔºZü×÷8˱¶{óÙη×Ç—â´¯„UmÛ¿‚@E¥íþ»e†SàF)ÌfòönM‹óëTl²‡œ ©N‰ô”ï‰Ùß^õ/~‰µQ·ÜG2 ãYfgý8“jEÁ öò-•âb¦y0E]ã>„‡¹óý2^î-îþ\r.,v=m-„fjR1uï¡}žæ†äG­‹Ûë„xÊt[IÙq=<„ãšHèPä–Ჸ— [×2jܹnmŸË¹aÕ¢—C.Uu–•‚á.'%o¯[€©[Âw ZSUóà{÷<õ…âéËN†©•ÜA°_ë7¹=(ü_¬û2€¨YæÆ¦ÊhŽõ0­]ÕÑŸW_&6BL˜y…€ó7¦-פ3’ §ó×µ3G†Ë#yÔ©ª°ÞŒU¿¨7§Ý]‘  «êÐŒžÿË!ø~endstream endobj 148 0 obj << /Filter /FlateDecode /Length1 1144 /Length2 1528 /Length3 0 /Length 2250 >> stream xÚuSy‚QÊaˆ"AXHñx\dúƒDÄg€"ÀB+û1+à„ðÄ|ˆÍ&üW­Y³ö‚#]ü…œAĆ#t rÞt&T„A>Z4s:–¢­gBÂévŽPÈ#š™ñX4ÅL,Sš­B ]3ÈiâôÌœ!>È@››Í[ŒÄÀq?À,fδÄñÌ6ÀP”twþ'…¿alPà±æXÖ£p+ƒc62@ÌgHÜ4Lƒ™Ûãx`Ѹp;ÄÑŸbœ€ B¾Ü÷¿Ä÷;E`B !@Ùè5|SGa5»÷¦ ùÐV kŠÅâìô÷u‚^(¹âoá>´H0óÚèîáî²fnï_£Tƒ³¶0æ6xÔ)¨"o1W‘Bƒþ©ûí°;ÌBÂláèľ ò¨ “î¾WòA„L¾™d3Eýþp?5ÏwüO-47‡‹ˆËéßd¶qí\xÓ½siüÂi‘Wü“sÁYŸÿw! 1È0›ûuLÀÚ 2)Á™5Æ,¾fμ8‚ é7 `pxÜ.€1"`P @Ý7C0sNÊõ0aB0  QÿÑøÌ¯À4Íñùèxf.=ûeÏ‚ÐAp+ÈP¼/A¶Iág“ê'ϵc0ÏnY´Xm˜ø,Zn©¸—+ûÄt^³fD©¶åñ·6‡r)mûÊ`9o9ÙäLíõ{­c"Ž ¦ jê»Ôæ¹¥i½0=gçCT~µÐ¤ó5¶EkÿÍìcÏÄWÜFW”OÚ;ÝT£&#ñº“›¤ƒQ¾òÚzå‹|%«¿¾ß1âëÆýÍ(ßu½Ñ¾#%[ÉãÒ…ÛSÆ›”.¼x§^Ñ ¶¥[ÒꨂïJvUž}E×*µ&6Þ¼dþžÓàéÐ(Û´ödz¾±t̬]éӣ뫻”å5ì×íS^¬©Ù‘ˆ÷Š´½ÏX¨}¤ûDkÃm“ëÊ6÷0dxË0ýtŒ~zli^¾¦ÈKÁõÑÀƒÉšv󶞆{kŒ²'éêÒúšÖ©#%‘Ëúî„´ýíÔÜèùøI»LÕf=?xäôÑ$äÂÐ6wjØå‚ÃV®u–ár¡Ýhõu(¾±237káò¡<]iu4ôM¨Ñ‚µ”ò‰Ö¸­'ÿ©°" ^…õý&Ç?»¹öøÀ’êS^PZo“#f¹÷n=q-—Þž'ÂI©S£ 6Ɖg'v5+ÂÁ:+E-%F#/7óË삯Où—$1×èwßÔé_èHÒû\Ýå¿äWÏ8™¶·PîA¾›ëóîݓҨý©ç㊻ûØ@BþžT9åÆ>2…§hZJ?UÚ7Ç[qËf¤¶*íLÐ&½\¶û꺪#žo£ÕÎXlñ-¡ÿAòi÷‘Ûh\•õñ¯Fѹw‘ö)¼µ}Ê­ÔDØ¡x5{báÙË ºœ2+î:£ M¯¶°ê%wäû:~ßuxéþÚe[ؤÅé=Ãj×Ì*/Þ§ Þš¨z:…¬žŸV]Òùq[ÓeÅ"Y)¹saÚ@šý¸Ôü&YÔäDtd[Ÿ–ø«¯­Ó~³L²wñp[Í:e…MY1¦ušX|®Æ¹Úª~9qluL,a$+Üo”[{‘$Ýmš—r>[š4øÉçá|½x~âÝp7>QåiíŠÏ¾\ÃîÈXZñ¨T< 0å‹\ž8Øeë@<2}ll¢DùöÓU§Þ­®\™Qý=ðD-£)p#1Íve©9Èk|UßÞ\Ü´3)¨J)´º}©AؾގÄWÜ÷uˆÐ‰<ôÚ¯4kç©l×i3[µ}!òFW½¿7ýå=ÇÛ81ì&A¦ò¯[¹%îúEµ Rˆ9e§tIÐÙçŠ“Šš%?Hdö)‰¯g×ÖÒ{}:Æô†dr—±Þˆ‚ëæó>~¼s¢Ø@ÒžhReQ?©ÿäÝü âý– {Á¥Ìóú#¿†‡èn»q69WæxKëïKÔ‡‘¶ná7ûr§ê²œÊp=í*VømI.xžÕu$ #c|¥œ?µ­M>¯óÕ™e–†:éËïÈÿY©`Ž{Yt2öéCƒ eºÍº÷iÛÔ{6iÁ8UÒææž5 ØKßËÕÖ­^ô]ð᪉Û%´«+Í Ú©#‰õVEó–\î~Eù¥"P•½kù®ì£ÏÎ~%²lž÷¸LÍs+Ä™yŽÕoj† UVÉHFŽª`åØiÝéͶûÈó8Q¿ïŠO ³½6ÿ²ªk“êƒÕK½Z$ÖM‡îžŒ áßôàsS—ƒC]·‘Õó ÷×Ä…h„vöú~¸Bù1‰Jìa:™¬Àœ¼í`:Øö>°LcœKRÍa¿ºƒ-â–†4&¤w(Ù[ðˆnRç·×Ë(U¿ã’KÚÓ}ã5*ñòa㧬æ§'RÊ4¥ï¯>o R:`¬4¤VàñÌ·÷÷(Ö2ë語rn­çÇêx¨‰êåj¦Ï×o \sš¿ÌÍ“òôTÖÒ…Š Ø§P²´ÓPËÛhy£Ø`#qRãÈvEjÆÁA fRð[SiN¿óëuC%þeͳNyÕÕ©ÈsîáGƒ©ÝÅ9ÞÄÞ·h{ÜÏܶc¸dE>!¥Gm,)hiþ|êß-MŸèî7ÂQ21dÕˆáœÝÀ²Ç±êh½ÞEÍm ì©’ÿ\hŽendstream endobj 149 0 obj << /Type /XRef /Length 143 /Filter /FlateDecode /DecodeParms << /Columns 5 /Predictor 12 >> /W [ 1 3 1 ] /Info 59 0 R /Root 58 0 R /Size 150 /ID [<7c0861473744d4d74ef6531bb0cd7b07><0eb1fbd9ee81a28a4b4ed87886677472>] >> stream xœcb&F~0ù‰ $À8JR™üÏÀóö8Í› ççOFÙVá¬d gffP3³ ‘:`²D2}c€«4‹€Õ0eåüA$÷/°,ˆäœ"ù«A¤™ˆ”Ÿ$c¬Alëu ’±l‚9ˆd9‹d/+ˆäý"7‚Í÷éUM›lÍþ*= endstream endobj startxref 135421 %%EOF tmvtnorm/inst/doc/GibbsSampler.Rnw0000644000176200001440000002343314216137300016726 0ustar liggesusers%\VignetteIndexEntry{A short description of the Gibbs Sampler} \documentclass[a4paper]{article} \usepackage{Rd} \usepackage{amsmath} \usepackage{natbib} \usepackage{palatino,mathpazo} \usepackage{Sweave} %\newcommand{\pkg}[1]{\textbf{#1}} \newcommand{\vecb}[1]{\ensuremath{\boldsymbol{\mathbf{#1}}}} \def\bfx{\mbox{\boldmath $x$}} \def\bfy{\mbox{\boldmath $y$}} \def\bfz{\mbox{\boldmath $z$}} \def\bfalpha{\mbox{\boldmath $\alpha$}} \def\bfbeta{\mbox{\boldmath $\beta$}} \def\bfmu{\mbox{\boldmath $\mu$}} \def\bfa{\mbox{\boldmath $a$}} \def\bfb{\mbox{\boldmath $b$}} \def\bfu{\mbox{\boldmath $u$}} \def\bfSigma{\mbox{\boldmath $\Sigma$}} \def\bfD{\mbox{\boldmath $D$}} \def\bfH{\mbox{\boldmath $H$}} \def\bfT{\mbox{\boldmath $T$}} \def\bfX{\mbox{\boldmath $X$}} \def\bfY{\mbox{\boldmath $X$}} \title{Gibbs Sampler for the Truncated Multivariate Normal Distribution} \author{Stefan Wilhelm\thanks{wilhelm@financial.com}} \begin{document} \SweaveOpts{concordance=TRUE} \maketitle In this note we describe two ways of generating random variables with the Gibbs sampling approach for a truncated multivariate normal variable $\bfx$, whose density function can be expressed as: \begin{eqnarray*} f(\bfx,\bfmu,\bfSigma,\bfa,\bfb) & = & \frac{ \exp{\left\{ -\frac{1}{2} (\bfx-\bfmu)' \bfSigma^{-1} (\bfx-\bfmu) \right\}} } { \int_{\bfa}^{\bfb}{\exp{\left\{ -\frac{1}{2} (\bfx-\bfmu)' \bfSigma^{-1} (\bfx-\bfmu) \right\} } d\bfx } } \end{eqnarray*} for $\bfa \le \bfx \le \bfb$ and $0$ otherwise.\\ \par The first approach, as described by \cite{Kotecha1999}, uses the covariance matrix $\bfSigma$ and has been implemented in the R package \pkg{tmvtnorm} since version 0.9 (\cite{tmvtnorm-0.9}). The second way is based on the works of \cite{Geweke1991,Geweke2005} and uses the precision matrix $\bfH = \bfSigma^{-1}$. As will be shown below, the usage of the precision matrix offers some computational advantages, since it does not involve matrix inversions and is therefore favorable in higher dimensions and settings where the precision matrix is readily available. Applications are for example the analysis of spatial data, such as from telecommunications or social networks.\\ \par Both versions of the Gibbs sampler can also be used for general linear constraints $\bfa \le \bfD \bfx \le \bfb$, what we will show in the last section. The function \code{rtmvnorm()} in the package \pkg{tmvtnorm} contains the \R{} implementation of the methods described in this note (\cite{tmvtnorm-1.3}). \section{Gibbs Sampler with convariance matrix $\bfSigma$} We describe here a Gibbs sampler for sampling from a truncated multinormal distribution as proposed by \cite{Kotecha1999}. It uses the fact that conditional distributions are truncated normal again. Kotecha use full conditionals $f(x_i | x_{-i}) = f(x_i | x_1,\ldots,x_{i-1},x_{i+1},\ldots,x_{d})$.\\ \par We use the fact that the conditional density of a multivariate normal distribution is multivariate normal again. We cite \cite{Geweke2005}, p.171 for the following theorem on the Conditional Multivariate Normal Distribution.\\ Let $\bfz = \left( \begin{array}{c} \bfx \\ \bfy \end{array} \right) \sim N(\bfmu, \bfSigma)$ with $\bfmu = \left( \begin{array}{c}\bfmu_x \\ \bfmu_y \end{array} \right)$ and $\bfSigma = \left[ \begin{array}{cc} \bfSigma_{xx} & \bfSigma_{xy} \\ \bfSigma_{yx} & \bfSigma_{yy} \end{array} \right]$\\ Denote the corresponding precision matrix \begin{equation} \bfH = \bfSigma^{-1} = \left[ \begin{array}{cc} \bfH_{xx} & \bfH_{xy} \\ \bfH_{yx} & \bfH_{yy} \end{array} \right] \end{equation} Then the distribution of $\bfy$ conditional on $\bfx$ is normal with variance \begin{equation} \bfSigma_{y.x} = \bfSigma_{yy} - \bfSigma_{yx} \bfSigma_{xx}^{-1} \bfSigma_{xy} = \bfH_{yy}^{-1} \end{equation} and mean \begin{equation} \bfmu_{y.x} = \bfmu_{y} + \bfSigma_{yx} \bfSigma_{xx}^{-1} (\bfx - \bfmu_x) = \bfmu_y - \bfH_{yy}^{-1} \bfH_{yx}(\bfx - \bfmu_x) \end{equation} \par In the case of the full conditionals $f(x_i | x_{-i})$, which we will denote as $i.-i$ this results in the following formulas: $\bfz = \left( \begin{array}{c} \bfx_i \\ \bfx_{-i} \end{array} \right) \sim N(\bfmu, \bfSigma)$ with $\bfmu = \left( \begin{array}{c}\bfmu_i \\ \bfmu_{-i} \end{array} \right)$ and $\bfSigma = \left[ \begin{array}{cc} \bfSigma_{ii} & \bfSigma_{i,-i} \\ \bfSigma_{-i,i} & \bfSigma_{-i,-i} \end{array} \right]$ Then the distribution of $i$ conditional on $-i$ is normal with variance \begin{equation} \bfSigma_{i.-i} = \bfSigma_{ii} - \bfSigma_{i,-i} \bfSigma_{-i,-i}^{-1} \bfSigma_{-i,i} = \bfH_{ii}^{-1} \end{equation} and mean \begin{equation} \bfmu_{i.-i} = \bfmu_{i} + \bfSigma_{i,-i} \bfSigma_{-i,-i}^{-1} (\bfx_{-i} - \bfmu_{-i}) = \bfmu_i - \bfH_{ii}^{-1} \bfH_{i,-i}(\bfx_{-i} - \bfmu_{-i}) \end{equation} We can then construct a Markov chain which continously draws from $f(x_i | x_{-i})$ subject to $a_i \le x_i \le b_i$. Let $\bfx^{(j)}$ denote the sample drawn at the $j$-th MCMC iteration. The steps of the Gibbs sampler for generating $N$ samples $\bfx^{(1)},\ldots,\bfx^{(N)}$ are: \begin{itemize} \item Since the conditional variance $\bfSigma_{i.-i}$ is independent from the actual realisation $\bfx^{(j)}_{-i}$, we can well precalculate it before running the Markov chain. \item Choose a start value $\bfx^{(0)}$ of the chain. \item In each round $j=1,\ldots,N$ we go from $i=1,\ldots,d$ and sample from the conditional density $x^{(j)}_i | x^{(j)}_1,\ldots,x^{(j)}_{i-1},x^{(j-1)}_{i+1},\ldots,x^{(j-1)}_{d}$. \item Draw a uniform random variate $U \sim Uni(0, 1)$. This is where our approach slightly differs from \cite{Kotecha1999}. They draw a normal variate $y$ and then apply $\Phi(y)$, which is basically uniform. \item We draw from univariate conditional normal distributions with mean $\mu$ and variance $\sigma^2$. See for example \cite{Greene2003} or \cite{Griffiths2004} for a transformation between a univariate normal random $y \sim N(\mu,\sigma^2)$ and a univariate truncated normal variate $x \sim TN(\mu,\sigma^2, a, b)$. For each realisation $y$ we can find a $x$ such as $P(Y \le y) = P(X \le x)$: \begin{equation*} \frac{ \Phi \left( \frac{x - \mu}{\sigma} \right) - \Phi \left( \frac{a - \mu}{\sigma} \right) } { \Phi \left( \frac{b - \mu}{\sigma} \right) - \Phi \left( \frac{a - \mu}{\sigma} \right) } = \Phi \left( \frac{y - \mu}{\sigma} \right) = U \end{equation*} \item Draw $\bfx_{i.-i}$ from conditional univariate truncated normal distribution \\ $TN(\bfmu_{i.-i}, \bfSigma_{i.-i}, a_i, b_i)$ by \begin{equation} \begin{split} \bfx_{i.-i} & = \bfmu_{i.-i} + \\ & \sigma_{i.-i} \Phi^{-1} \left[ U \left( \Phi \left( \frac{b_i - \bfmu_{i.-i}}{\sigma_{i.-i}} \right) - \Phi \left( \frac{a_i - \bfmu_{i.-i}}{\sigma_{i.-i}} \right) \right) + \Phi \left( \frac{a_i - \bfmu_{i.-i}}{\sigma_{i.-i}} \right) \right] \end{split} \end{equation} \end{itemize} \section{Gibbs Sampler with precision matrix H} The Gibbs Sampler stated in terms of the precision matrix $\bfH = \bfSigma^{-1}$ instead of the covariance matrix $\bfSigma$ is much easier to write and to implement: Then the distribution of $i$ conditional on $-i$ is normal with variance \begin{equation} \bfSigma_{i.-i} = \bfH_{ii}^{-1} \end{equation} and mean \begin{equation} \bfmu_{i.-i} = \bfmu_i - \bfH_{ii}^{-1} \bfH_{i,-i}(\bfx_{-i} - \bfmu_{-i}) \end{equation} Most importantly, if the precision matrix $\bfH$ is known, the Gibbs sampler does only involve matrix inversions of $\bfH_{ii}$ which in our case is a diagonal element/scalar. Hence, from the computational and performance perspective, especially in high dimensions, using $\bfH$ rather than $\bfSigma$ is preferable. When using $\bfSigma$ in $d$ dimensions, we have to solve for $d$ $(d-1) \times (d-1)$ matrices $\bfSigma_{-i,-i}$, $i=1,\ldots,d$, which can be quite substantial computations. \section{Gibbs Sampler for linear constraints} In this section we present the Gibbs sampling for general linear constraints based on \cite{Geweke1991}. We want to sample from $\bfx \sim N(\bfmu, \bfSigma)$ subject to linear constraints $\bfa \le \bfD \bfx \le \bfb$ for a full-rank matrix $\bfD$.\\ Defining \begin{equation} \bfz = \bfD \bfx - \bfD \bfmu, \end{equation} we have $E[\bfz] = \bfD E[\bfx] - \bfD \bfmu = 0$ and $Var[\bfz] = \bfD Var[\bfx] \bfD' = \bfD \bfSigma \bfD'$. Hence, this problem can be transformed to the rectangular case $\bfalpha \le \bfz \le \bfbeta$ with $\bfalpha = \bfa - \bfD \bfmu$ and $\bfbeta = \bfb - \bfD \bfmu$. It follows $\bfz \sim N(0, \bfT)$ with $\bfT = \bfD \bfSigma \bfD'$.\\ In the precision matrix case, the corresponding precision matrix of the transformed problem will be $\bfT^{-1} = ( \bfD \bfSigma \bfD' )^{-1} = \bfD'^{-1} \bfH \bfD^{-1}$. We can then sample from $\bfz$ the way described in the previous sections (either with covariance or precision matrix approach) and then transform $\bfz$ back to $\bfx$ by \begin{equation} \bfx = \bfmu + \bfD^{-1} \bfz \end{equation} \bibliographystyle{plainnat} \bibliography{tmvtnorm} \end{document}tmvtnorm/README.md0000644000176200001440000000123414216073376013430 0ustar liggesusers# tmvtnorm [![CRAN](http://www.r-pkg.org/badges/version/tmvtnorm)](https://cran.r-project.org/package=tmvtnorm) ### tmvtnorm: A package for the Truncated Multivariate Normal Distribution This package contains a number of useful methods for the truncated multivariate normal distribution. It considers random number generation with rejection and Gibbs sampling, computation of marginal densities as well as computation of the mean and covariance of the truncated variables. For a more detailed introduction, see this RJournal (2010) paper [tmvtnorm: A Package for the Truncated Multivariate Normal Distribution](https://doi.org/10.32614/RJ-2010-005). tmvtnorm/build/0000755000176200001440000000000015055364525013251 5ustar liggesuserstmvtnorm/build/vignette.rds0000644000176200001440000000033315055364525015607 0ustar liggesusers‹}O] ‚0i–BøöØ“ÿ!{è%¬‡^§N¨“m ½õ˳;ÓÈ]ØåÜswÎÕG-ãB¶Ú$(О…äi|`i*Ϥn+*¤éŒùneÉ…Â9•™`­b¼Á¼Àª¤x â‘û÷n›Æ\K †úÕ‡póeCj* Ò*¢-mrÝ~üæ[½AYé­ãbâÌvÜqljYE§/L½ ûÅ#´SÃ×ý™~Oð.œ} \examples{ ## Example 1: Truncated multi-normal lower <- c(-1,-1,-1) upper <- c(1,1,1) mean <- c(0,0,0) sigma <- matrix(c( 1, 0.8, 0.2, 0.8, 1, 0.1, 0.2, 0.1, 1), 3, 3) X <- rtmvnorm(n=1000, mean=c(0,0,0), sigma=sigma, lower=lower, upper=upper) x <- seq(-1, 1, by=0.01) Fx <- ptmvnorm.marginal(xn=x, n=1, mean=c(0,0,0), sigma=sigma, lower=lower, upper=upper) plot(ecdf(X[,1]), main="marginal CDF for truncated multi-normal") lines(x, Fx, type="l", col="blue") ## Example 2: Truncated multi-t X <- rtmvt(n=1000, mean=c(0,0,0), sigma=sigma, df=2, lower=lower, upper=upper) x <- seq(-1, 1, by=0.01) Fx <- ptmvt.marginal(xn=x, n=1, mean=c(0,0,0), sigma=sigma, lower=lower, upper=upper) plot(ecdf(X[,1]), main="marginal CDF for truncated multi-t") lines(x, Fx, type="l", col="blue") } \keyword{distribution} \keyword{multivariate} tmvtnorm/man/tmvnorm.Rd0000644000176200001440000001067711330334360014713 0ustar liggesusers% --- Source file: tmvtnorm.Rd --- \name{tmvnorm} \alias{dtmvnorm} \title{Truncated Multivariate Normal Density} \description{ This function provides the joint density function for the truncated multivariate normal distribution with mean equal to \code{mean} and covariance matrix \code{sigma}, lower and upper truncation points \code{lower} and \code{upper}. For convenience, it furthermore serves as a wrapper function for the one-dimensional and bivariate marginal densities \code{dtmvnorm.marginal()} and \code{dtmvnorm.marginal2()} respectively when invoked with the \code{margin} argument. } \usage{ dtmvnorm(x, mean = rep(0, nrow(sigma)), sigma = diag(length(mean)), lower=rep(-Inf, length = length(mean)), upper=rep( Inf, length = length(mean)), log=FALSE, margin=NULL) } \arguments{ \item{x}{Vector or matrix of quantiles. If \code{x} is a matrix, each row is taken to be a quantile.} \item{mean}{Mean vector, default is \code{rep(0, nrow(sigma))}.} \item{sigma}{Covariance matrix, default is \code{diag(length(mean))}.} \item{lower}{Vector of lower truncation points, default is \code{rep(-Inf, length = length(mean))}.} \item{upper}{Vector of upper truncation points, default is \code{rep( Inf, length = length(mean))}.} \item{log}{Logical; if \code{TRUE}, densities d are given as log(d).} \item{margin}{if \code{NULL} then the joint density is computed (the default), if \code{MARGIN=1} then the one-dimensional marginal density in variate \code{q} (\code{q = 1..length(mean)}) is returned, if \code{MARGIN=c(q,r)} then the bivariate marginal density in variates \code{q} and \code{r} for \code{q,r = 1..length(mean)} and \eqn{q \ne r}{q != r} is returned.} } \details{ The computation of truncated multivariate normal probabilities and densities is done using conditional probabilities from the standard/untruncated multivariate normal distribution. So we refer to the documentation of the mvtnorm package and the methodology is described in Genz (1992, 1993). } \author{Stefan Wilhelm } \seealso{\code{\link{ptmvnorm}}, \code{\link[mvtnorm]{pmvnorm}}, \code{\link[mvtnorm]{rmvnorm}}, \code{\link[mvtnorm]{dmvnorm}}, \code{\link{dtmvnorm.marginal}} and \code{\link{dtmvnorm.marginal2}} for marginal density functions} \references{ Genz, A. (1992). Numerical computation of multivariate normal probabilities. \emph{Journal of Computational and Graphical Statistics}, \bold{1}, 141--150 Genz, A. (1993). Comparison of methods for the computation of multivariate normal probabilities. \emph{Computing Science and Statistics}, \bold{25}, 400--405 Johnson, N./Kotz, S. (1970). Distributions in Statistics: Continuous Multivariate Distributions \emph{Wiley & Sons}, pp. 70--73 Horrace, W. (2005). Some Results on the Multivariate Truncated Normal Distribution. \emph{Journal of Multivariate Analysis}, \bold{94}, 209--221 } \examples{ dtmvnorm(x=c(0,0), mean=c(1,1), upper=c(0,0)) ########################################### # # Example 1: # truncated multivariate normal density # ############################################ x1<-seq(-2, 3, by=0.1) x2<-seq(-2, 3, by=0.1) density<-function(x) { sigma=matrix(c(1, -0.5, -0.5, 1), 2, 2) z=dtmvnorm(x, mean=c(0,0), sigma=sigma, lower=c(-1,-1)) z } fgrid <- function(x, y, f) { z <- matrix(nrow=length(x), ncol=length(y)) for(m in 1:length(x)){ for(n in 1:length(y)){ z[m,n] <- f(c(x[m], y[n])) } } z } # compute density d for grid d=fgrid(x1, x2, density) # plot density as contourplot contour(x1, x2, d, nlevels=5, main="Truncated Multivariate Normal Density", xlab=expression(x[1]), ylab=expression(x[2])) abline(v=-1, lty=3, lwd=2) abline(h=-1, lty=3, lwd=2) ########################################### # # Example 2: # generation of random numbers # from a truncated multivariate normal distribution # ############################################ sigma <- matrix(c(4,2,2,3), ncol=2) x <- rtmvnorm(n=500, mean=c(1,2), sigma=sigma, upper=c(1,0)) plot(x, main="samples from truncated bivariate normal distribution", xlim=c(-6,6), ylim=c(-6,6), xlab=expression(x[1]), ylab=expression(x[2])) abline(v=1, lty=3, lwd=2, col="gray") abline(h=0, lty=3, lwd=2, col="gray") } \keyword{distribution} \keyword{multivariate}tmvtnorm/man/gmm.tmvnorm.Rd0000644000176200001440000001124015055354043015465 0ustar liggesusers\name{gmm.tmvnorm} \alias{gmm.tmvnorm} \title{ GMM Estimation for the Truncated Multivariate Normal Distribution } \description{ Generalized Method of Moments (GMM) Estimation for the Truncated Multivariate Normal Distribution } \usage{ gmm.tmvnorm(X, lower = rep(-Inf, length = ncol(X)), upper = rep(+Inf, length = ncol(X)), start = list(mu = rep(0, ncol(X)), sigma = diag(ncol(X))), fixed = list(), method=c("ManjunathWilhelm","Lee"), cholesky = FALSE, ...) } \arguments{ \item{X}{Matrix of quantiles, each row is taken to be a quantile.} \item{lower}{Vector of lower truncation points, default is \code{rep(-Inf, length = ncol(X))}.} \item{upper}{Vector of upper truncation points, default is \code{rep( Inf, length = ncol(X))}.} \item{start}{Named list with elements \code{mu} (mean vector) and \code{sigma} (covariance matrix). Initial values for optimizer.} \item{fixed}{Named list. Parameter values to keep fixed during optimization.} \item{method}{Which set of moment conditions used, possible methods are "ManjunathWilhelm" (default) and "Lee".} \item{cholesky}{if TRUE, we use the Cholesky decomposition of \code{sigma} as parametrization} \item{\dots}{Further arguments to pass to \code{\link[gmm]{gmm}}} } \details{ This method performs an estimation of the parameters \code{mean} and \code{sigma} of a truncated multinormal distribution using the Generalized Method of Moments (GMM), when the truncation points \code{lower} and \code{upper} are known. \code{gmm.tmvnorm()} is a wrapper for the general GMM method \code{\link[gmm]{gmm}}, so one does not have to specify the moment conditions. \bold{Manjunath/Wilhelm moment conditions}\cr Because the first and second moments can be computed thanks to the \code{\link{mtmvnorm}} function, we can set up a method-of-moments estimator by equating the sample moments to their population counterparts. This way we have an exactly identified case. \bold{Lee (1979,1983) moment conditions}\cr The recursive moment conditions presented by Lee (1979,1983) are defined for \eqn{l=0,1,2,\ldots} as \deqn{ \sigma^{iT} E(x_i^l \textbf{x}) = \sigma^{iT} \mu E(x_i^l) + l E(x_i^{l-1}) + \frac{a_i^l F_i(a_i)}{F} - \frac{b_i^l F_i(b_i)}{F} } where \eqn{E(x_i^l)} and \eqn{E(x_i^l \textbf{x})} are the moments of \eqn{x_i^l} and \eqn{x_i^l \textbf{x}} respectively and \eqn{F_i(c)/F} is the one-dimensional marginal density in variable \eqn{i} as calculated by \code{\link{dtmvnorm.marginal}}. \eqn{\sigma^{iT}} is the \eqn{i}-th column of the inverse covariance matrix \eqn{\Sigma^{-1}}. This method returns an object of class \code{gmm}, for which various diagnostic methods are available, like \code{profile()}, \code{confint()} etc. See examples. } \value{ An object of class \code{\link[gmm]{gmm}} } \author{ Stefan Wilhelm \email{wilhelm@financial.com} } \references{ Tallis, G. M. (1961). The moment generating function of the truncated multinormal distribution. \emph{Journal of the Royal Statistical Society, Series B}, \bold{23}, 223--229 Lee, L.-F. (1979). On the first and second moments of the truncated multi-normal distribution and a simple estimator. \emph{Economics Letters}, \bold{3}, 165--169 Lee, L.-F. (1983). The determination of moments of the doubly truncated multivariate normal Tobit model. \emph{Economics Letters}, \bold{11}, 245--250 Manjunath B G and Wilhelm, S. (2009). Moments Calculation For the Double Truncated Multivariate Normal Density. Working Paper. Available at SSRN: \url{https://www.ssrn.com/abstract=1472153} } \seealso{ \code{\link[gmm]{gmm}} } \examples{ \dontrun{ set.seed(1.234) # the actual parameters lower <- c(-1, -2) upper <- c(3, Inf) mu <- c(0, 0) sigma <- matrix(c(1, 0.8, 0.8, 2), 2, 2) # generate random samples X <- rtmvnorm(n=500, mu, sigma, lower, upper) # estimate mean vector and covariance matrix sigma from random samples X # with default start values gmm.fit1 <- gmm.tmvnorm(X, lower=lower, upper=upper) # diagnostic output of the estimated parameters summary(gmm.fit1) vcov(gmm.fit1) # confidence intervals confint(gmm.fit1) # choosing a different start value gmm.fit2 <- gmm.tmvnorm(X, lower=lower, upper=upper, start=list(mu=c(0.1, 0.1), sigma=matrix(c(1, 0.4, 0.4, 1.8),2,2))) summary(gmm.fit2) # GMM estimation with Lee (1983) moment conditions gmm.fit3 <- gmm.tmvnorm(X, lower=lower, upper=upper, method="Lee") summary(gmm.fit3) confint(gmm.fit3) # MLE estimation for comparison mle.fit1 <- mle.tmvnorm(X, lower=lower, upper=upper) confint(mle.fit1) } } tmvtnorm/man/rtmvt.Rd0000644000176200001440000001275214216143070014363 0ustar liggesusers\name{rtmvt} \alias{rtmvt} \title{Sampling Random Numbers From The Truncated Multivariate Student t Distribution} \description{ This function generates random numbers from the truncated multivariate Student-t distribution with mean equal to \code{mean} and covariance matrix \code{sigma}, lower and upper truncation points \code{lower} and \code{upper} with either rejection sampling or Gibbs sampling. } \usage{ rtmvt(n, mean = rep(0, nrow(sigma)), sigma = diag(length(mean)), df = 1, lower = rep(-Inf, length = length(mean)), upper = rep(Inf, length = length(mean)), algorithm=c("rejection", "gibbs"), ...) } \arguments{ \item{n}{Number of random points to be sampled. Must be an integer >= 1.} \item{mean}{Mean vector, default is \code{rep(0, length = ncol(x))}.} \item{sigma}{Covariance matrix, default is \code{diag(ncol(x))}.} \item{df}{Degrees of freedom parameter (positive, may be non-integer)} \item{lower}{Vector of lower truncation points,\\ default is \code{rep(-Inf, length = length(mean))}.} \item{upper}{Vector of upper truncation points,\\ default is \code{rep( Inf, length = length(mean))}.} \item{algorithm}{Method used, possible methods are rejection sampling ("rejection", default) and the R Gibbs sampler ("gibbs").} \item{...}{additional parameters for Gibbs sampling, given to the internal method \code{rtmvt.gibbs()}, such as \code{burn.in.samples}, \code{start.value} and \code{thinning}, see details} } \details{ We sample \eqn{x \sim T(\mu, \Sigma, df)}{x ~ T(mean, Sigma, df)} subject to the rectangular truncation \eqn{lower \le x \le upper}{lower <= x <= upper}. Currently, two random number generation methods are implemented: rejection sampling and the Gibbs Sampler. For rejection sampling \code{algorithm="rejection"}, we sample from \code{\link[mvtnorm]{rmvt}} and retain only samples inside the support region. The acceptance probability will be calculated with \code{\link[mvtnorm]{pmvt}}. \code{\link[mvtnorm]{pmvt}} does only accept integer degrees of freedom \code{df}. For non-integer \code{df}, \code{algorithm="rejection"} will throw an error, so please use \code{algorithm="gibbs"} instead. The arguments to be passed along with \code{algorithm="gibbs"} are: \describe{ \item{\code{burn.in.samples}}{number of samples in Gibbs sampling to be discarded as burn-in phase, must be non-negative.} \item{\code{start.value}}{Start value (vector of length \code{length(mean)}) for the MCMC chain. If one is specified, it must lie inside the support region (\eqn{lower \le start.value \le upper}{lower <= start.value <= upper}). If none is specified, the start value is taken componentwise as the finite lower or upper boundaries respectively, or zero if both boundaries are infinite. Defaults to NULL.} \item{\code{thinning}}{Thinning factor for reducing autocorrelation of random points in Gibbs sampling. Must be an integer \eqn{\ge 1}{>= 1}. We create a Markov chain of length \code{(n*thinning)} and take only those samples \code{j=1:(n*thinning)} where \code{j \%\% thinning == 0} Defaults to 1 (no thinning of the chain).} } } \section{Warning}{ The same warnings for the Gibbs sampler apply as for the method \code{\link{rtmvnorm}}. } \author{Stefan Wilhelm , Manjunath B G } \references{ Geweke, John F. (1991) Efficient Simulation from the Multivariate Normal and Student-t Distributions Subject to Linear Constraints. \emph{Computer Science and Statistics. Proceedings of the 23rd Symposium on the Interface. Seattle Washington, April 21-24, 1991}, pp. 571--578 An earlier version of this paper is available at \url{https://www.researchgate.net/publication/2335219_Efficient_Simulation_from_the_Multivariate_Normal_and_Student-t_Distributions_Subject_to_Linear_Constraints_and_the_Evaluation_of_Constraint_Probabilities} } \examples{ ########################################################### # # Example 1 # ########################################################### # Draw from multi-t distribution without truncation X1 <- rtmvt(n=10000, mean=rep(0, 2), df=2) X2 <- rtmvt(n=10000, mean=rep(0, 2), df=2, lower=c(-1,-1), upper=c(1,1)) ########################################################### # # Example 2 # ########################################################### df = 2 mu = c(1,1,1) sigma = matrix(c( 1, 0.5, 0.5, 0.5, 1, 0.5, 0.5, 0.5, 1), 3, 3) lower = c(-2,-2,-2) upper = c(2, 2, 2) # Rejection sampling X1 <- rtmvt(n=10000, mu, sigma, df, lower, upper) # Gibbs sampling without thinning X2 <- rtmvt(n=10000, mu, sigma, df, lower, upper, algorithm="gibbs") # Gibbs sampling with thinning X3 <- rtmvt(n=10000, mu, sigma, df, lower, upper, algorithm="gibbs", thinning=2) plot(density(X1[,1], from=lower[1], to=upper[1]), col="red", lwd=2, main="Gibbs vs. Rejection") lines(density(X2[,1], from=lower[1], to=upper[1]), col="blue", lwd=2) legend("topleft",legend=c("Rejection Sampling","Gibbs Sampling"), col=c("red","blue"), lwd=2) acf(X1) # no autocorrelation in Rejection sampling acf(X2) # strong autocorrelation of Gibbs samples acf(X3) # reduced autocorrelation of Gibbs samples after thinning } \keyword{distribution} \keyword{multivariate}tmvtnorm/man/dtmvt.Rd0000644000176200001440000000733114216143124014342 0ustar liggesusers\name{dtmvt} \alias{dtmvt} \title{Truncated Multivariate Student t Density} \description{ This function provides the joint density function for the truncated multivariate Student t distribution with mean vector equal to \code{mean}, covariance matrix \code{sigma}, degrees of freedom parameter \code{df} and lower and upper truncation points \code{lower} and \code{upper}. } \usage{ dtmvt(x, mean = rep(0, nrow(sigma)), sigma = diag(length(mean)), df = 1, lower = rep(-Inf, length = length(mean)), upper = rep(Inf, length = length(mean)), log = FALSE) } \arguments{ \item{x}{Vector or matrix of quantiles. If \code{x} is a matrix, each row is taken to be a quantile.} \item{mean}{Mean vector, default is \code{rep(0, nrow(sigma))}.} \item{sigma}{Covariance matrix, default is \code{diag(length(mean))}.} \item{df}{degrees of freedom parameter} \item{lower}{Vector of lower truncation points, default is \code{rep(-Inf, length = length(mean))}.} \item{upper}{Vector of upper truncation points, default is \code{rep( Inf, length = length(mean))}.} \item{log}{Logical; if \code{TRUE}, densities d are given as log(d).} } \details{ The Truncated Multivariate Student t Distribution is a conditional Multivariate Student t distribution subject to (linear) constraints \eqn{a \le \bold{x} \le b}. The density of the \eqn{p}-variate Multivariate Student t distribution with \eqn{\nu}{nu} degrees of freedom is \deqn{ f(\bold{x}) = \frac{\Gamma((\nu + p)/2)}{(\pi\nu)^{p/2} \Gamma(\nu/2) \|\Sigma\|^{1/2}} [ 1 + \frac{1}{\nu} (x - \mu)^T \Sigma^{-1} (x - \mu) ]^{- (\nu + p) / 2} } The density of the truncated distribution \eqn{f_{a,b}(x)} with constraints \eqn{(a \le x \le b)}{a <= x <= b} is accordingly \deqn{ f_{a,b}(x) = \frac{f(\bold{x})} {P(a \le x \le b)} } } \value{ a numeric vector with density values } \seealso{ \code{\link{ptmvt}} and \code{\link{rtmvt}} for probabilities and random number generation in the truncated case, see \code{\link[mvtnorm]{dmvt}}, \code{\link[mvtnorm]{rmvt}} and \code{\link[mvtnorm]{pmvt}} for the untruncated multi-t distribution. } \references{ Geweke, J. F. (1991) Efficient simulation from the multivariate normal and Student-t distributions subject to linear constraints and the evaluation of constraint probabilities. \url{https://www.researchgate.net/publication/2335219_Efficient_Simulation_from_the_Multivariate_Normal_and_Student-t_Distributions_Subject_to_Linear_Constraints_and_the_Evaluation_of_Constraint_Probabilities} Samuel Kotz, Saralees Nadarajah (2004). Multivariate t Distributions and Their Applications. \emph{Cambridge University Press} } \author{Stefan Wilhelm \email{wilhelm@financial.com}} \examples{ # Example x1 <- seq(-2, 3, by=0.1) x2 <- seq(-2, 3, by=0.1) mean <- c(0,0) sigma <- matrix(c(1, -0.5, -0.5, 1), 2, 2) lower <- c(-1,-1) density <- function(x) { z=dtmvt(x, mean=mean, sigma=sigma, lower=lower) z } fgrid <- function(x, y, f) { z <- matrix(nrow=length(x), ncol=length(y)) for(m in 1:length(x)){ for(n in 1:length(y)){ z[m,n] <- f(c(x[m], y[n])) } } z } # compute multivariate-t density d for grid d <- fgrid(x1, x2, function(x) dtmvt(x, mean=mean, sigma=sigma, lower=lower)) # compute multivariate normal density d for grid d2 <- fgrid(x1, x2, function(x) dtmvnorm(x, mean=mean, sigma=sigma, lower=lower)) # plot density as contourplot contour(x1, x2, d, nlevels=5, main="Truncated Multivariate t Density", xlab=expression(x[1]), ylab=expression(x[2])) contour(x1, x2, d2, nlevels=5, add=TRUE, col="red") abline(v=-1, lty=3, lwd=2) abline(h=-1, lty=3, lwd=2) } \keyword{distribution} \keyword{multivariate}tmvtnorm/man/dmvnorm.marginal.Rd0000644000176200001440000001257514532763577016512 0ustar liggesusers% --- Source file: dtmvnorm-marginal.Rd --- \name{dtmvnorm.marginal} \alias{dtmvnorm.marginal} \title{One-dimensional marginal density functions from a Truncated Multivariate Normal distribution} \description{ This function computes the one-dimensional marginal density function from a Truncated Multivariate Normal density function using the algorithm given in Cartinhour (1990). } \usage{ dtmvnorm.marginal(xn, n=1, mean= rep(0, nrow(sigma)), sigma=diag(length(mean)), lower=rep(-Inf, length = length(mean)), upper=rep( Inf, length = length(mean)), log=FALSE) } \arguments{ \item{xn}{Vector of quantiles to calculate the marginal density for.} \item{n}{Index position (1..k) within the random vector x to calculate the one-dimensional marginal density for.} \item{mean}{Mean vector, default is \code{rep(0, length = nrow(sigma))}.} \item{sigma}{Covariance matrix, default is \code{diag(length(mean))}.} \item{lower}{Vector of lower truncation points,\\ default is \code{rep(-Inf, length = length(mean))}.} \item{upper}{Vector of upper truncation points,\\ default is \code{rep( Inf, length = length(mean))}.} \item{log}{Logical; if \code{TRUE}, densities d are given as log(d).} } \details{ The one-dimensional marginal density \eqn{f_i(x_i)} of \eqn{x_i} is \deqn{f_i(x_i) = \int_{a_1}^{b_1} \ldots \int_{a_{i-1}}^{b_{i-1}} \int_{a_{i+1}}^{b_{i+1}} \ldots \int_{a_k}^{b_k} f(x) dx_{-i}} Note that the one-dimensional marginal density is not truncated normal, but only conditional densities are truncated normal. } \author{Stefan Wilhelm } \references{ Cartinhour, J. (1990). One-dimensional marginal density functions of a truncated multivariate normal density function. \emph{Communications in Statistics - Theory and Methods}, \bold{19}, 197--203 Arnold et al. (1993). The Nontruncated Marginal of a Truncated Bivariate Normal Distribution. \emph{Psychometrika}, \bold{58}, 471--488 } \examples{ ############################################# # # Example 1: truncated bivariate normal # ############################################# # parameters of the bivariate normal distribution sigma = matrix(c(1 , 0.95, 0.95, 1 ), 2, 2) mu = c(0,0) # sample from multivariate normal distribution X = rmvnorm(5000, mu, sigma) # tuncation in x2 with x2 <= 0 X.trunc = X[X[,2]<0,] # plot the realisations before and after truncation par(mfrow=c(2,2)) plot(X, col="gray", xlab=expression(x[1]), ylab=expression(x[2]), main="realisations from a\n truncated bivariate normal distribution") points(X.trunc) abline(h=0, lty=2, col="gray") #legend("topleft", col=c("gray", "black") # marginal density for x1 from realisations plot(density(X.trunc[,1]), main=expression("marginal density for "*x[1])) # one-dimensional marginal density for x1 using the formula x <- seq(-5, 5, by=0.01) fx <- dtmvnorm.marginal(x, n=1, mean=mu, sigma=sigma, lower=c(-Inf,-Inf), upper=c(Inf,0)) lines(x, fx, lwd=2, col="red") # marginal density for x2 plot(density(X.trunc[,2]), main=expression("marginal density for "*x[2])) # one-dimensional marginal density for x2 using the formula x <- seq(-5, 5, by=0.01) fx <- dtmvnorm.marginal(x, n=2, mean=mu, sigma=sigma, lower=c(-Inf,-Inf), upper=c(Inf,0)) lines(x, fx, lwd=2, col="blue") ############################################# # # Example 2 : truncated trivariate normal # ############################################# # parameters of the trivariate normal distribution sigma = outer(1:3,1:3,pmin) mu = c(0,0,0) # sample from multivariate normal distribution X = rmvnorm(2000, mu, sigma) # truncation in x2 and x3 : x2 <= 0, x3 <= 0 X.trunc = X[X[,2]<=0 & X[,3]<=0,] par(mfrow=c(2,3)) plot(X, col="gray", xlab=expression(x[1]), ylab=expression(x[2]), main="realisations from a\n truncated trivariate normal distribution") points(X.trunc, col="black") abline(h=0, lty=2, col="gray") plot(X[,2:3], col="gray", xlab=expression(x[2]), ylab=expression(x[3]), main="realisations from a\n truncated trivariate normal distribution") points(X.trunc[,2:3], col="black") abline(h=0, lty=2, col="gray") abline(v=0, lty=2, col="gray") plot(X[,c(1,3)], col="gray", xlab=expression(x[1]), ylab=expression(x[3]), main="realisations from a\n truncated trivariate normal distribution") points(X.trunc[,c(1,3)], col="black") abline(h=0, lty=2, col="gray") # one-dimensional marginal density for x1 from realisations and formula plot(density(X.trunc[,1]), main=expression("marginal density for "*x[1])) x <- seq(-5, 5, by=0.01) fx <- dtmvnorm.marginal(x, n=1, mean=mu, sigma=sigma, lower=c(-Inf,-Inf,-Inf), upper=c(Inf,0,0)) lines(x, fx, lwd=2, col="red") # one-dimensional marginal density for x2 from realisations and formula plot(density(X.trunc[,2]), main=expression("marginal density for "*x[2])) x <- seq(-5, 5, by=0.01) fx <- dtmvnorm.marginal(x, n=2, mean=mu, sigma=sigma, lower=c(-Inf,-Inf,-Inf), upper=c(Inf,0,0)) lines(x, fx, lwd=2, col="red") # one-dimensional marginal density for x3 from realisations and formula plot(density(X.trunc[,3]), main=expression("marginal density for "*x[3])) x <- seq(-5, 5, by=0.01) fx <- dtmvnorm.marginal(x, n=3, mean=mu, sigma=sigma, lower=c(-Inf,-Inf,-Inf), upper=c(Inf,0,0)) lines(x, fx, lwd=2, col="red") } \keyword{distribution} \keyword{multivariate} tmvtnorm/man/ptmvnorm.Rd0000644000176200001440000000614212303463400015062 0ustar liggesusers% --- Source file: ptmvnorm.Rd --- \name{ptmvnorm} \alias{ptmvnorm} \title{ Truncated Multivariate Normal Distribution } \description{ Computes the distribution function of the truncated multivariate normal distribution for arbitrary limits and correlation matrices based on the \code{pmvnorm()} implementation of the algorithms by Genz and Bretz. } \usage{ ptmvnorm(lowerx, upperx, mean=rep(0, length(lowerx)), sigma, lower = rep(-Inf, length = length(mean)), upper = rep( Inf, length = length(mean)), maxpts = 25000, abseps = 0.001, releps = 0) } \arguments{ \item{lowerx}{ the vector of lower limits of length n.} \item{upperx}{ the vector of upper limits of length n.} \item{mean}{ the mean vector of length n.} \item{sigma}{ the covariance matrix of dimension n. Either \code{corr} or \code{sigma} can be specified. If \code{sigma} is given, the problem is standardized. If neither \code{corr} nor \code{sigma} is given, the identity matrix is used for \code{sigma}. } \item{lower}{Vector of lower truncation points,\\ default is \code{rep(-Inf, length = length(mean))}.} \item{upper}{Vector of upper truncation points,\\ default is \code{rep( Inf, length = length(mean))}.} \item{maxpts}{ maximum number of function values as integer. } \item{abseps}{ absolute error tolerance as double. } \item{releps}{ relative error tolerance as double. } } \details{ The computation of truncated multivariate normal probabilities and densities is done using conditional probabilities from the standard/untruncated multivariate normal distribution. So we refer to the documentation of the \code{mvtnorm} package and the methodology is described in Genz (1992, 1993) and Genz/Bretz (2009). For properties of the truncated multivariate normal distribution see for example Johnson/Kotz (1970) and Horrace (2005). } \value{ The evaluated distribution function is returned with attributes \item{error}{estimated absolute error and} \item{msg}{status messages.} } \references{ Genz, A. (1992). Numerical computation of multivariate normal probabilities. \emph{Journal of Computational and Graphical Statistics}, \bold{1}, 141--150 Genz, A. (1993). Comparison of methods for the computation of multivariate normal probabilities. \emph{Computing Science and Statistics}, \bold{25}, 400--405 Genz, A. and Bretz, F. (2009). Computation of Multivariate Normal and t Probabilities. \emph{Lecture Notes in Statistics}, Vol. \bold{195}, Springer-Verlag, Heidelberg. Johnson, N./Kotz, S. (1970). Distributions in Statistics: Continuous Multivariate Distributions \emph{Wiley & Sons}, pp. 70--73 Horrace, W. (2005). Some Results on the Multivariate Truncated Normal Distribution. \emph{Journal of Multivariate Analysis}, \bold{94}, 209--221 } \examples{ sigma <- matrix(c(5, 0.8, 0.8, 1), 2, 2) Fx <- ptmvnorm(lowerx=c(-1,-1), upperx=c(0.5,0), mean=c(0,0), sigma=sigma, lower=c(-1,-1), upper=c(1,1)) } \keyword{distribution} \keyword{multivariate}tmvtnorm/man/dtmvnorm.marginal2.Rd0000644000176200001440000001007614216103636016731 0ustar liggesusers\name{dtmvnorm.marginal2} \Rdversion{1.1} \alias{dtmvnorm.marginal2} \title{ Bivariate marginal density functions from a Truncated Multivariate Normal distribution } \description{ This function computes the bivariate marginal density function \eqn{f(x_q, x_r)} from a k-dimensional Truncated Multivariate Normal density function (k>=2). The bivariate marginal density is obtained by integrating out (k-2) dimensions as proposed by Tallis (1961). This function is basically an extraction of the Leppard and Tallis (1989) Fortran code for moments calculation, but extended to the double truncated case. } \usage{ dtmvnorm.marginal2(xq, xr, q, r, mean = rep(0, nrow(sigma)), sigma = diag(length(mean)), lower = rep(-Inf, length = length(mean)), upper = rep(Inf, length = length(mean)), log = FALSE, pmvnorm.algorithm=GenzBretz()) } \arguments{ \item{xq}{Value \eqn{x_q}} \item{xr}{Value \eqn{x_r}} \item{q}{Index position for \eqn{x_q} within mean vector to calculate the bivariate marginal density for.} \item{r}{Index position for \eqn{x_r} within mean vector to calculate the bivariate marginal density for.} \item{mean}{Mean vector, default is \code{rep(0, length = nrow(sigma))}.} \item{sigma}{Covariance matrix, default is \code{diag(length(mean))}.} \item{lower}{Vector of lower truncation points, default is \code{rep(-Inf, length = length(mean))}.} \item{upper}{Vector of upper truncation points, default is \code{rep( Inf, length = length(mean))}.} \item{log}{Logical; if \code{TRUE}, densities d are given as log(d).} \item{pmvnorm.algorithm}{Algorithm used for \code{\link[mvtnorm]{pmvnorm}}} } \details{ The bivariate marginal density function \eqn{f(x_q, x_r)} for \eqn{x \sim TN(\mu, \Sigma, a, b)} and \eqn{q \ne r} is defined as \deqn{F_{q,r}(x_q=c_q, x_r=c_r) = \int^{b_1}_{a_1}...\int^{b_{q-1}}_{a_{q-1}}\int^{b_{q+1}}_{a_{q+1}}...\int^{b_{r-1}}_{a_{r-1}}\int^{b_{r+1}}_{a_{r+1}}...\int^{b_{k}}_{a_{k}} \varphi{_{\alpha}}_{\Sigma}(x_s, c_q, c_r) dx_s} } \references{ Tallis, G. M. (1961). The moment generating function of the truncated multinormal distribution. \emph{Journal of the Royal Statistical Society, Series B}, \bold{23}, 223--229 Leppard, P. and Tallis, G. M. (1989). Evaluation of the Mean and Covariance of the Truncated Multinormal \emph{Applied Statistics}, \bold{38}, 543--553 Manjunath B G and Wilhelm, S. (2009). Moments Calculation For the Double Truncated Multivariate Normal Density. Working Paper. Available at SSRN: \url{https://www.ssrn.com/abstract=1472153} } \author{Stefan Wilhelm , Manjunath B G } \examples{ lower = c(-0.5, -1, -1) upper = c( 2.2, 2, 2) mean = c(0,0,0) sigma = matrix(c(2.0, -0.6, 0.7, -0.6, 1.0, -0.2, 0.7, -0.2, 1.0), 3, 3) # generate random samples from untruncated and truncated distribution Y = rmvnorm(10000, mean=mean, sigma=sigma) X = rtmvnorm(500, mean=mean, sigma=sigma, lower=lower, upper=upper, algorithm="gibbs") # compute bivariate marginal density of x1 and x2 xq <- seq(lower[1], upper[1], by=0.1) xr <- seq(lower[2], upper[2], by=0.1) grid <- matrix(NA, length(xq), length(xr)) for (i in 1:length(xq)) { for (j in 1:length(xr)) { grid[i,j] = dtmvnorm.marginal2(xq=xq[i], xr=xr[j], q=1, r=2, sigma=sigma, lower=lower, upper=upper) } } plot(Y[,1], Y[,2], xlim=c(-4, 4), ylim=c(-4, 4), main=expression("bivariate marginal density ("*x[1]*","*x[2]*")"), xlab=expression(x[1]), ylab=expression(x[2]), col="gray80") points(X[,1], X[,2], col="black") lines(x=c(lower[1], upper[1], upper[1], lower[1], lower[1]), y=c(lower[2],lower[2],upper[2],upper[2],lower[2]), lty=2, col="red") contour(xq, xr, grid, add=TRUE, nlevels = 8, col="red", lwd=2) # scatterplot matrices for untruncated and truncated points require(lattice) splom(Y) splom(X) } \keyword{distribution} \keyword{multivariate} tmvtnorm/man/rtmvnorm2.Rd0000644000176200001440000000735712063442312015161 0ustar liggesusers\name{rtmvnorm2} \alias{rtmvnorm2} \title{Sampling Random Numbers From The Truncated Multivariate Normal Distribution With Linear Constraints} \description{ This function generates random numbers from the truncated multivariate normal distribution with mean equal to \code{mean} and covariance matrix \code{sigma} and general linear constraints \deqn{lower \le D x \le upper}{lower <= D x <= upper} with either rejection sampling or Gibbs sampling. } \usage{ rtmvnorm2(n, mean = rep(0, nrow(sigma)), sigma = diag(length(mean)), lower = rep(-Inf, length = length(mean)), upper = rep(Inf, length = length(mean)), D = diag(length(mean)), algorithm = c("gibbs", "gibbsR", "rejection"), ...) } \arguments{ \item{n}{Number of random points to be sampled. Must be an integer \eqn{\ge 1}{>= 1}.} \item{mean}{Mean vector (d x 1), default is \code{rep(0, length = ncol(x))}.} \item{sigma}{Covariance matrix (d x d), default is \code{diag(ncol(x))}.} \item{lower}{Vector of lower truncation points (r x 1), default is \code{rep( Inf, length = length(mean))}.} \item{upper}{Vector of upper truncation points (r x 1), default is \code{rep( Inf, length = length(mean))}.} \item{D}{Matrix for linear constraints (r x d), defaults to diagonal matrix (d x d), i.e. r = d.} \item{algorithm}{Method used, possible methods are the Fortan Gibbs sampler ("gibbs", default), the Gibbs sampler implementation in R ("gibbsR") and rejection sampling ("rejection")} \item{\dots}{additional parameters for Gibbs sampling, given to the internal method \code{rtmvnorm.gibbs()}, such as \code{burn.in.samples}, \code{start.value} and \code{thinning}, see details in \code{\link{rtmvnorm}}} } \details{ This method allows for \eqn{r > d}{r > d} linear constraints, whereas \code{\link{rtmvnorm}} requires a full-rank matrix D \eqn{(d \times d)}{(d x d)} and can only handle \eqn{r \le d}{r <= d} constraints at the moment. The lower and upper bounds \code{lower} and \code{upper} are \eqn{(r \times 1)}{(r x 1)}, the matrix \code{D} is \eqn{(r \times d)}{(r x d)} and x is \eqn{(d \times 1)}{(d x 1)}. The default case is \eqn{r = d}{r = d} and \eqn{D = I_d}{D = I_d}. } \section{Warning}{This method will be merged with \code{\link{rtmvnorm}} in one of the next releases.} \author{ Stefan Wilhelm } \seealso{ \code{\link{rtmvnorm}} } \examples{ \dontrun{ ################################################################################ # # Example 5a: Number of linear constraints r > dimension d # ################################################################################ # general linear restrictions a <= Dx <= b with x (d x 1); D (r x d); a,b (r x 1) # Dimension d=2, r=3 linear constraints # # a1 <= x1 + x2 <= b2 # a2 <= x1 - x2 <= b2 # a3 <= 0.5x1 - x2 <= b3 # # [ a1 ] <= [ 1 1 ] [ x1 ] <= [b1] # [ a2 ] [ 1 -1 ] [ x2 ] [b2] # [ a3 ] [ 0.5 -1 ] [b3] D <- matrix( c( 1, 1, 1, -1, 0.5, -1), 3, 2, byrow=TRUE) a <- c(0, 0, 0) b <- c(1, 1, 1) # mark linear constraints as lines plot(NA, xlim=c(-0.5, 1.5), ylim=c(-1,1)) for (i in 1:3) { abline(a=a[i]/D[i, 2], b=-D[i,1]/D[i, 2], col="red") abline(a=b[i]/D[i, 2], b=-D[i,1]/D[i, 2], col="red") } ### Gibbs sampling for general linear constraints a <= Dx <= b mean <- c(0, 0) sigma <- matrix(c(1.0, 0.2, 0.2, 1.0), 2, 2) x0 <- c(0.5, 0.2) # Gibbs sampler start value X <- rtmvnorm2(n=1000, mean, sigma, lower=a, upper=b, D, start.value=x0) # show random points within simplex points(X, pch=20, col="black") } } \keyword{distribution} \keyword{multivariate}tmvtnorm/man/ptmvt.Rd0000644000176200001440000000452314216143110014351 0ustar liggesusers\name{ptmvt} \alias{ptmvt} \title{Truncated Multivariate Student t Distribution} \description{ Computes the distribution function of the truncated multivariate t distribution } \usage{ ptmvt(lowerx, upperx, mean = rep(0, length(lowerx)), sigma, df = 1, lower = rep(-Inf, length = length(mean)), upper = rep(Inf, length = length(mean)), maxpts = 25000, abseps = 0.001, releps = 0) } \arguments{ \item{lowerx}{ the vector of lower limits of length n.} \item{upperx}{ the vector of upper limits of length n.} \item{mean}{ the mean vector of length n.} \item{sigma}{ the covariance matrix of dimension n. Either \code{corr} or \code{sigma} can be specified. If \code{sigma} is given, the problem is standardized. If neither \code{corr} nor \code{sigma} is given, the identity matrix is used for \code{sigma}. } \item{df}{Degrees of freedom parameter} \item{lower}{Vector of lower truncation points, default is \code{rep(-Inf, length = length(mean))}.} \item{upper}{Vector of upper truncation points, default is \code{rep( Inf, length = length(mean))}.} \item{maxpts}{ maximum number of function values as integer. } \item{abseps}{ absolute error tolerance as double. } \item{releps}{ relative error tolerance as double. } } \value{ The evaluated distribution function is returned with attributes \item{error}{estimated absolute error and} \item{msg}{status messages.} } \references{ Geweke, J. F. (1991) Efficient simulation from the multivariate normal and Student-t distributions subject to linear constraints and the evaluation of constraint probabilities. \url{https://www.researchgate.net/publication/2335219_Efficient_Simulation_from_the_Multivariate_Normal_and_Student-t_Distributions_Subject_to_Linear_Constraints_and_the_Evaluation_of_Constraint_Probabilities} Samuel Kotz, Saralees Nadarajah (2004). Multivariate t Distributions and Their Applications. \emph{Cambridge University Press} } \author{Stefan Wilhelm } \examples{ sigma <- matrix(c(5, 0.8, 0.8, 1), 2, 2) Fx <- ptmvt(lowerx=c(-1,-1), upperx=c(0.5,0), mean=c(0,0), sigma=sigma, df=3, lower=c(-1,-1), upper=c(1,1)) } \keyword{ math } \keyword{ multivariate } tmvtnorm/man/mtmvnorm.Rd0000644000176200001440000000635614216101440015064 0ustar liggesusers\name{mtmvnorm} \alias{mtmvnorm} \alias{moments} \title{Computation of Mean Vector and Covariance Matrix For Truncated Multivariate Normal Distribution} \description{ Computation of the first two moments, i.e. mean vector and covariance matrix for the Truncated Multivariate Normal Distribution based on the works of Tallis (1961), Lee (1979) and Leppard and Tallis (1989), but extended to the double-truncated case with general mean and general covariance matrix. } \usage{ mtmvnorm(mean = rep(0, nrow(sigma)), sigma = diag(length(mean)), lower = rep(-Inf, length = length(mean)), upper = rep(Inf, length = length(mean)), doComputeVariance=TRUE, pmvnorm.algorithm=GenzBretz()) } \arguments{ \item{mean}{Mean vector, default is \code{rep(0, length = ncol(x))}.} \item{sigma}{Covariance matrix, default is \code{diag(ncol(x))}.} \item{lower}{Vector of lower truncation points,\\ default is \code{rep(-Inf, length = length(mean))}.} \item{upper}{Vector of upper truncation points,\\ default is \code{rep( Inf, length = length(mean))}.} \item{doComputeVariance}{flag whether to compute the variance for users who are interested only in the mean. Defaults to \code{TRUE} for backward compatibility.} \item{pmvnorm.algorithm}{Algorithm used for \code{\link[mvtnorm]{pmvnorm}}} } \details{ Details for the moment calculation under double truncation and the derivation of the formula can be found in the Manjunath/Wilhelm (2009) working paper. If only a subset of variables are truncated, we calculate the truncated moments only for these and use the Johnson/Kotz formula for the remaining untruncated variables. } \value{ \item{tmean}{Mean vector of truncated variables} \item{tvar}{Covariance matrix of truncated variables} } \references{ Tallis, G. M. (1961). The moment generating function of the truncated multinormal distribution. \emph{Journal of the Royal Statistical Society, Series B}, \bold{23}, 223--229 Johnson, N./Kotz, S. (1970). Distributions in Statistics: Continuous Multivariate Distributions \emph{Wiley & Sons}, pp. 70--73 Lee, L.-F. (1979). On the first and second moments of the truncated multi-normal distribution and a simple estimator. \emph{Economics Letters}, \bold{3}, 165--169 Leppard, P. and Tallis, G. M. (1989). Evaluation of the Mean and Covariance of the Truncated Multinormal. \emph{Applied Statistics}, \bold{38}, 543--553 Manjunath B G and Wilhelm, S. (2009). Moments Calculation For the Double Truncated Multivariate Normal Density. Working Paper. Available at SSRN: \url{https://www.ssrn.com/abstract=1472153} } \author{Stefan Wilhelm , Manjunath B G } \examples{ mu <- c(0.5, 0.5, 0.5) sigma <- matrix(c( 1, 0.6, 0.3, 0.6, 1, 0.2, 0.3, 0.2, 2), 3, 3) a <- c(-Inf, -Inf, -Inf) b <- c(1, 1, 1) # compute first and second moments mtmvnorm(mu, sigma, lower=a, upper=b) # compare with simulated results X <- rtmvnorm(n=1000, mean=mu, sigma=sigma, lower=a, upper=b) colMeans(X) cov(X) } \keyword{distribution} \keyword{multivariate} tmvtnorm/man/mle.tmvnorm.Rd0000644000176200001440000000716411454422144015471 0ustar liggesusers\name{mle.tmvnorm} \alias{mle.tmvnorm} \title{ Maximum Likelihood Estimation for the Truncated Multivariate Normal Distribution } \description{ Maximum Likelihood Estimation for the Truncated Multivariate Normal Distribution } \usage{ mle.tmvnorm(X, lower = rep(-Inf, length = ncol(X)), upper = rep(+Inf, length = ncol(X)), start = list(mu = rep(0, ncol(X)), sigma = diag(ncol(X))), fixed = list(), method = "BFGS", cholesky = FALSE, lower.bounds = -Inf, upper.bounds = +Inf, ...) } \arguments{ \item{X}{Matrix of quantiles, each row is taken to be a quantile.} \item{lower}{Vector of lower truncation points, default is \code{rep(-Inf, length = ncol(X))}.} \item{upper}{Vector of upper truncation points, default is \code{rep( Inf, length = ncol(X))}.} \item{start}{Named list with elements \code{mu} (mean vector) and \code{sigma} (covariance matrix). Initial values for optimizer.} \item{fixed}{Named list. Parameter values to keep fixed during optimization.} \item{method}{Optimization method to use. See \code{\link{optim}}} \item{cholesky}{if TRUE, we use the Cholesky decomposition of \code{sigma} as parametrization} \item{lower.bounds}{lower bounds/box constraints for method "L-BFGS-B"} \item{upper.bounds}{upper bounds/box constraints for method "L-BFGS-B"} \item{\dots}{Further arguments to pass to \code{\link{optim}}} } \details{ This method performs a maximum likelihood estimation of the parameters \code{mean} and \code{sigma} of a truncated multinormal distribution, when the truncation points \code{lower} and \code{upper} are known. \code{mle.tmvnorm()} is a wrapper for the general maximum likelihood method \code{\link[stats4]{mle}}, so one does not have to specify the negative log-likelihood function. The log-likelihood function for a data matrix X (T x n) can be established straightforward as \deqn{ \log L(X | \mu,\Sigma) = -T \log{\alpha(\mu,\Sigma)} + {-T/2} \log{\|\Sigma\|} -\frac{1}{2} \sum_{t=1}^{T}{(x_t-\mu)' \Sigma^{-1} (x_t-\mu)} } As \code{\link[stats4]{mle}}, this method returns an object of class \code{mle}, for which various diagnostic methods are available, like \code{profile()}, \code{confint()} etc. See examples. In order to adapt the estimation problem to \code{\link[stats4]{mle}}, the named parameters for mean vector elements are "mu_i" and the elements of the covariance matrix are "sigma_ij" for the lower triangular matrix elements, i.e. (j <= i). } \value{ An object of class \code{\link[stats4]{mle-class}} } \author{ Stefan Wilhelm \email{wilhelm@financial.com} } \seealso{ \code{\link[stats4]{mle}} and \code{\link[stats4]{mle-class}} } \examples{ \dontrun{ set.seed(1.2345) # the actual parameters lower <- c(-1,-1) upper <- c(1, 2) mu <- c(0, 0) sigma <- matrix(c(1, 0.7, 0.7, 2), 2, 2) # generate random samples X <- rtmvnorm(n=500, mu, sigma, lower, upper) method <- "BFGS" # estimate mean vector and covariance matrix sigma from random samples X # with default start values mle.fit1 <- mle.tmvnorm(X, lower=lower, upper=upper) # diagnostic output of the estimated parameters summary(mle.fit1) logLik(mle.fit1) vcov(mle.fit1) # profiling the log likelihood and confidence intervals mle.profile1 <- profile(mle.fit1, X, method="BFGS", trace=TRUE) confint(mle.profile1) par(mfrow=c(3,2)) plot(mle.profile1) # choosing a different start value mle.fit2 <- mle.tmvnorm(X, lower=lower, upper=upper, start=list(mu=c(0.1, 0.1), sigma=matrix(c(1, 0.4, 0.4, 1.8),2,2))) summary(mle.fit2) } }tmvtnorm/man/qtmvnorm-marginal.Rd0000644000176200001440000000657714532766453016713 0ustar liggesusers\name{qtmvnorm-marginal} \alias{qtmvnorm.marginal} \title{ Quantiles of the Truncated Multivariate Normal Distribution in one dimension} \description{ Computes the equicoordinate quantile function of the truncated multivariate normal distribution for arbitrary correlation matrices based on an inversion of the algorithms by Genz and Bretz. } \usage{ qtmvnorm.marginal(p, interval = c(-10, 10), tail = c("lower.tail","upper.tail","both.tails"), n=1, mean=rep(0, nrow(sigma)), sigma=diag(length(mean)), lower=rep(-Inf, length = length(mean)), upper=rep( Inf, length = length(mean)), ...) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{p}{ probability.} \item{interval}{ a vector containing the end-points of the interval to be searched by \code{\link{uniroot}}.} \item{tail}{ specifies which quantiles should be computed. \code{lower.tail} gives the quantile \eqn{x} for which \eqn{P[X \le x] = p}{P[X <= x] = p}, \code{upper.tail} gives \eqn{x} with \eqn{P[X > x] = p} and \code{both.tails} leads to \eqn{x} with \eqn{P[-x \le X \le x] = p}{P[-x <= X <= x] = p} } \item{n}{ index (1..n) to calculate marginal quantile for} \item{mean}{ the mean vector of length n. } \item{sigma}{ the covariance matrix of dimension n. Either \code{corr} or \code{sigma} can be specified. If \code{sigma} is given, the problem is standardized. If neither \code{corr} nor \code{sigma} is given, the identity matrix is used for \code{sigma}. } \item{lower}{Vector of lower truncation points,\\ default is \code{rep(-Inf, length = length(mean))}.} \item{upper}{Vector of upper truncation points,\\ default is \code{rep( Inf, length = length(mean))}.} \item{...}{ additional parameters to be passed to \code{\link{uniroot}}.} } \details{ Only equicoordinate quantiles are computed, i.e., the quantiles in each dimension coincide. Currently, the distribution function is inverted by using the \code{\link{uniroot}} function which may result in limited accuracy of the quantiles. } \value{ A list with four components: \code{quantile} and \code{f.quantile} give the location of the quantile and the value of the function evaluated at that point. \code{iter} and \code{estim.prec} give the number of iterations used and an approximate estimated precision from \code{\link{uniroot}}. } \seealso{\code{\link{ptmvnorm}}, \code{\link[mvtnorm]{pmvnorm}}} \examples{ # finite dimensional distribution of the Geometric Brownian Motion log-returns # with truncation # volatility p.a. sigma=0.4 # risk free rate r = 0.05 # n=3 points in time T <- c(0.5, 0.7, 1) # covariance matrix of Geometric Brownian Motion returns Sigma = sigma^2*outer(T,T,pmin) # mean vector of the Geometric Brownian Motion returns mu = (r - sigma^2/2) * T # lower truncation vector a (a<=x<=b) a = rep(-Inf, 3) # upper truncation vector b (a<=x<=b) b = c(0, 0, Inf) # quantile of the t_1 returns qtmvnorm.marginal(p=0.95, interval = c(-10, 10), tail = "lower.tail", n=1, mean = mu, sigma = Sigma, lower=a, upper=b) } \keyword{distribution} \keyword{multivariate}tmvtnorm/man/rtmvnorm.Rd0000644000176200001440000003456314216076734015113 0ustar liggesusers\name{rtmvnorm} \alias{rtmvnorm} \alias{rtmvnorm.sparseMatrix} \title{Sampling Random Numbers From The Truncated Multivariate Normal Distribution} \description{ This function generates random numbers from the truncated multivariate normal distribution with mean equal to \code{mean} and covariance matrix \code{sigma} (or alternatively precision matrix \code{H}), lower and upper truncation points \code{lower} and \code{upper} with either rejection sampling or Gibbs sampling. } \usage{ rtmvnorm(n, mean = rep(0, nrow(sigma)), sigma = diag(length(mean)), lower=rep(-Inf, length = length(mean)), upper=rep( Inf, length = length(mean)), D = diag(length(mean)), H = NULL, algorithm=c("rejection", "gibbs", "gibbsR"), ...) rtmvnorm.sparseMatrix(n, mean = rep(0, nrow(H)), H = sparseMatrix(i=1:length(mean), j=1:length(mean), x=1), lower = rep(-Inf, length = length(mean)), upper = rep( Inf, length = length(mean)), ...) } \arguments{ \item{n}{Number of random points to be sampled. Must be an integer \eqn{\ge 1}{>= 1}.} \item{mean}{Mean vector, default is \code{rep(0, length = ncol(x))}.} \item{sigma}{Covariance matrix, default is \code{diag(ncol(x))}.} \item{lower}{Vector of lower truncation points, default is \code{rep(-Inf, length = length(mean))}.} \item{upper}{Vector of upper truncation points, default is \code{rep( Inf, length = length(mean))}.} \item{D}{Matrix for linear constraints, defaults to diagonal matrix.} \item{H}{Precision matrix, default is \code{NULL}.} \item{algorithm}{Method used, possible methods are rejection sampling ("rejection", default), the Fortan Gibbs sampler ("gibbs") and the old Gibbs sampler implementation in R ("gibbsR").} \item{...}{additional parameters for Gibbs sampling, given to the internal method \code{rtmvnorm.gibbs()}, such as \code{burn.in.samples}, \code{start.value} and \code{thinning}, see details} } \details{ The generation of random numbers from a truncated multivariate normal distribution is done using either rejection sampling or Gibbs sampling. \bold{Rejection sampling}\cr Rejection sampling is done from the standard multivariate normal distribution. So we use the function \code{\link[mvtnorm]{rmvnorm}} of the \pkg{mvtnorm} package to generate proposals which are either accepted if they are inside the support region or rejected. In order to speed up the generation of N samples from the truncated distribution, we first calculate the acceptance rate alpha from the truncation points and then generate N/alpha samples iteratively until we have got N samples. This typically does not take more than 2-3 iterations. Rejection sampling may be very inefficient when the support region is small (i.e. in higher dimensions) which results in very low acceptance rates alpha. In this case the Gibbs sampler is preferable. \bold{Gibbs sampling}\cr The Gibbs sampler samples from univariate conditional distributions, so all samples can be accepted except for a burn-in period. The number of burn-in samples to be discarded can be specified, as well as a start value of the chain. If no start value is given, we determine a start value from the support region using either lower bound or upper bound if they are finite, or 0 otherwise. The Gibbs sampler has been reimplemented in Fortran 90 for performance reasons (\code{algorithm="gibbs"}). The old R implementation is still accessible through \code{algorithm="gibbsR"}. The arguments to be passed along with \code{algorithm="gibbs"} or \code{algorithm="gibbsR"} are: \describe{ \item{\code{burn.in.samples}}{number of samples in Gibbs sampling to be discarded as burn-in phase, must be non-negative.} \item{\code{start.value}}{Start value (vector of length \code{length(mean)}) for the MCMC chain. If one is specified, it must lie inside the support region (\eqn{lower <= start.value <= upper}). If none is specified, the start value is taken componentwise as the finite lower or upper boundaries respectively, or zero if both boundaries are infinite. Defaults to NULL.} \item{\code{thinning}}{Thinning factor for reducing autocorrelation of random points in Gibbs sampling. Must be an integer >= 1. We create a Markov chain of length \code{(n*thinning)} and take only those samples \code{j=1:(n*thinning)} where \code{j \%\% thinning == 0} Defaults to 1 (no thinning of the chain).} } \bold{Sampling with linear constraints}\cr We extended the method to also simulate from a multivariate normal distribution subject to general linear constraints \eqn{lower <= D x <= upper}. For general D, both rejection sampling or Gibbs sampling according to Geweke (1991) are available. \bold{Gibbs sampler and the use of the precision matrix H}\cr Why is it important to have a random sampler that works with the precision matrix? Especially in Bayesian and spatial statistics, there are a number of high-dimensional applications where the precision matrix \code{H} is readily available, but is sometimes nearly singular and cannot be easily inverted to sigma. Additionally, it turns out that the Gibbs sampler formulas are much simpler in terms of the precision matrix than in terms of the covariance matrix. See the details of the Gibbs sampler implementation in the package vignette or for example Geweke (2005), pp.171-172. (Thanks to Miguel Godinho de Matos from Carnegie Mellon University for pointing me to this.) Therefore, we now provide an interface for the direct use of the precision matrix \code{H} in \code{rtmvnorm()}. \bold{Gibbs sampler with sparse precision matrix H}\cr The size of the covariance matrix \code{sigma} or precision matrix \code{H} - if expressed as a dense \code{\link[base]{matrix}} - grows quadratic with the number of dimensions d. For high-dimensional problems (such as d > 5000), it is no longer efficient and appropriate to work with dense matrix representations, as one quickly runs into memory problems.\cr It is interesting to note that in many applications the precision matrix, which holds the conditional dependencies, will be sparse, whereas the covariance matrix will be dense. Hence, expressing H as a sparse matrix will significantly reduce the amount of memory to store this matrix and allows much larger problems to be handled. In the current version of the package, the precision matrix (not \code{sigma} since it will be dense in most cases) can be passed to \code{rtmvnorm.sparseMatrix()} as a \code{\link[Matrix]{sparseMatrix}} from the \code{Matrix} package. See the examples section below for a usage example. } \section{Warning}{ A word of caution is needed for useRs that are not familiar with Markov Chain Monte Carlo methods like Gibbs sampling: Rejection sampling is exact in the sense that we are sampling directly from the target distribution and the random samples generated are independent. So it is clearly the default method. Markov Chain Monte Carlo methods are only approximate methods, which may suffer from several problems: \itemize{ \item{Poor mixing} \item{Convergence problems} \item{Correlation among samples} } Diagnostic checks for Markov Chain Monte Carlo include trace plots, CUSUM plots and autocorrelation plots like \code{\link{acf}}. For a survey see for instance Cowles (1996). That is, consecutive samples generated from \code{rtmvnorm(..., algorithm=c("gibbs", "gibbsR"))} are correlated (see also example 3 below). One way of reducing the autocorrelation among the random samples is "thinning" the Markov chain, that is recording only a subset/subsequence of the chain. For example, one could record only every 100th sample, which clearly reduces the autocorrelation and "increases the independence". But thinning comes at the cost of higher computation times, since the chain has to run much longer. We refer to autocorrelation plots in order to determine optimal thinning. } \author{Stefan Wilhelm , Manjunath B G } \seealso{\code{\link{ptmvnorm}}, \code{\link[mvtnorm]{pmvnorm}}, \code{\link[mvtnorm]{rmvnorm}}, \code{\link[mvtnorm]{dmvnorm}}} \references{ Alan Genz, Frank Bretz, Tetsuhisa Miwa, Xuefei Mi, Friedrich Leisch, Fabian Scheipl, Torsten Hothorn (2009). mvtnorm: Multivariate Normal and t Distributions. R package version 0.9-7. URL \url{https://CRAN.R-project.org/package=mvtnorm} Johnson, N./Kotz, S. (1970). Distributions in Statistics: Continuous Multivariate Distributions \emph{Wiley & Sons}, pp. 70--73 Horrace, W. (2005). Some Results on the Multivariate Truncated Normal Distribution. \emph{Journal of Multivariate Analysis}, \bold{94}, 209--221 Jayesh H. Kotecha and Petar M. Djuric (1999). Gibbs Sampling Approach For Generation of Truncated Multivariate Gaussian Random Variables \emph{IEEE Computer Society}, 1757--1760 Cowles, M. and Carlin, B. (1996). Markov Chain Monte Carlo Convergence Diagnostics: A Comparative Review \emph{Journal of the American Statistical Association}, \bold{91}, 883--904 Geweke, J. F. (1991). Effcient Simulation from the Multivariate Normal and Student-t Distributions Subject to Linear Constraints \emph{Computer Science and Statistics. Proceedings of the 23rd Symposium on the Interface. Seattle Washington, April 21-24, 1991}, 571--578 Geweke, J. F. (2005). Contemporary Bayesian Econometrics and Statistics, \emph{Wiley & Sons}, pp.171--172 } \examples{ ################################################################################ # # Example 1: # rejection sampling in 2 dimensions # ################################################################################ sigma <- matrix(c(4,2,2,3), ncol=2) x <- rtmvnorm(n=500, mean=c(1,2), sigma=sigma, upper=c(1,0)) plot(x, main="samples from truncated bivariate normal distribution", xlim=c(-6,6), ylim=c(-6,6), xlab=expression(x[1]), ylab=expression(x[2])) abline(v=1, lty=3, lwd=2, col="gray") abline(h=0, lty=3, lwd=2, col="gray") ################################################################################ # # Example 2: # Gibbs sampler for 4 dimensions # ################################################################################ C <- matrix(0.8, 4, 4) diag(C) <- rep(1, 4) lower <- rep(-4, 4) upper <- rep(-1, 4) # acceptance rate alpha alpha <- pmvnorm(lower=lower, upper=upper, mean=rep(0,4), sigma=C) alpha # Gibbs sampler X1 <- rtmvnorm(n=20000, mean = rep(0,4), sigma=C, lower=lower, upper=upper, algorithm="gibbs", burn.in.samples=100) # Rejection sampling X2 <- rtmvnorm(n=5000, mean = rep(0,4), sigma=C, lower=lower, upper=upper) colMeans(X1) colMeans(X2) plot(density(X1[,1], from=lower[1], to=upper[1]), col="red", lwd=2, main="Kernel density estimates from random samples generated by Gibbs vs. Rejection sampling") lines(density(X2[,1], from=lower[1], to=upper[1]), col="blue", lwd=2) legend("topleft",legend=c("Gibbs Sampling","Rejection Sampling"), col=c("red","blue"), lwd=2, bty="n") ################################################################################ # # Example 3: # Autocorrelation plot for Gibbs sampler # with and without thinning # ################################################################################ sigma <- matrix(c(4,2,2,3), ncol=2) X1 <- rtmvnorm(n=10000, mean=c(1,2), sigma=sigma, upper=c(1,0), algorithm="rejection") acf(X1) # no autocorrelation among random points X2 <- rtmvnorm(n=10000, mean=c(1,2), sigma=sigma, upper=c(1,0), algorithm="gibbs") acf(X2) # exhibits autocorrelation among random points X3 <- rtmvnorm(n=10000, mean=c(1,2), sigma=sigma, upper=c(1,0), algorithm="gibbs", thinning=2) acf(X3) # reduced autocorrelation among random points plot(density(X1[,1], to=1)) lines(density(X2[,1], to=1), col="blue") lines(density(X3[,1], to=1), col="red") ################################################################################ # # Example 4: Univariate case # ################################################################################ X <- rtmvnorm(100, mean=0, sigma=1, lower=-1, upper=1) ################################################################################ # # Example 5: Linear Constraints # ################################################################################ mean <- c(0, 0) sigma <- matrix(c(10, 0, 0, 1), 2, 2) # Linear Constraints # # a1 <= x1 + x2 <= b2 # a2 <= x1 - x2 <= b2 # # [ a1 ] <= [ 1 1 ] [ x1 ] <= [b1] # [ a2 ] [ 1 -1 ] [ x2 ] [b2] a <- c(-2, -2) b <- c( 2, 2) D <- matrix(c(1, 1, 1, -1), 2, 2) X <- rtmvnorm(n=10000, mean, sigma, lower=a, upper=b, D=D, algorithm="gibbsR") plot(X, main="Gibbs sampling for multivariate normal with linear constraints according to Geweke (1991)") # mark linear constraints as lines for (i in 1:nrow(D)) { abline(a=a[i]/D[i, 2], b=-D[i,1]/D[i, 2], col="red") abline(a=b[i]/D[i, 2], b=-D[i,1]/D[i, 2], col="red") } ################################################################################ # # Example 6: Using precision matrix H rather than sigma # ################################################################################ lower <- c(-1, -1) upper <- c(1, 1) mean <- c(0.5, 0.5) sigma <- matrix(c(1, 0.8, 0.8, 1), 2, 2) H <- solve(sigma) D <- matrix(c(1, 1, 1, -1), 2, 2) X <- rtmvnorm(n=1000, mean=mean, H=H, lower=lower, upper=upper, D=D, algorithm="gibbs") plot(X, main="Gibbs sampling with precision matrix and linear constraints") ################################################################################ # # Example 7: Using sparse precision matrix H in high dimensions # ################################################################################ \dontrun{ d <- 1000 I_d <- sparseMatrix(i=1:d, j=1:d, x=1) W <- sparseMatrix(i=c(1:d, 1:(d-1)), j=c(1:d, (2:d)), x=0.5) H <- t(I_d - 0.5 * W) %*% (I_d - 0.5 * W) lower <- rep(0, d) upper <- rep(2, d) # Gibbs sampler generates n=100 draws in d=1000 dimensions X <- rtmvnorm.sparseMatrix(n=100, mean = rep(0,d), H=H, lower=lower, upper=upper, burn.in.samples=100) colMeans(X) cov(X) } } \keyword{distribution} \keyword{multivariate}tmvtnorm/DESCRIPTION0000644000176200001440000000200415055376632013656 0ustar liggesusersPackage: tmvtnorm Version: 1.7 Date: 2025-09-01 Title: Truncated Multivariate Normal and Student t Distribution Authors@R: c( person("Stefan", "Wilhelm", email = "wilhelm@financial.com", role = c("aut", "cre")), person("Manjunath", "B G", email = "bgmanjunath@gmail.com", role = "aut") ) Imports: stats, methods Depends: R (>= 1.9.0), mvtnorm, utils, Matrix, stats4, gmm Encoding: UTF-8 Suggests: lattice, rgl Description: Random number generation for the truncated multivariate normal and Student t distribution. Computes probabilities, quantiles and densities, including one-dimensional and bivariate marginal densities. Computes first and second moments (i.e. mean and covariance matrix) for the double-truncated multinormal case. License: GPL (>= 2) URL: https://www.r-project.org NeedsCompilation: yes Packaged: 2025-09-01 18:43:33 UTC; stefan Author: Stefan Wilhelm [aut, cre], Manjunath B G [aut] Maintainer: Stefan Wilhelm Repository: CRAN Date/Publication: 2025-09-01 20:10:02 UTC