markovchain/0000755000176200001440000000000014050525163012555 5ustar liggesusersmarkovchain/NAMESPACE0000644000176200001440000000525314050513016013773 0ustar liggesusers# Generated by roxygen2: do not edit by hand export("name<-") export(ExpectedTime) export(absorptionProbabilities) export(assessOrder) export(assessStationarity) export(committorAB) export(createSequenceMatrix) export(ctmcFit) export(expectedRewards) export(expectedRewardsBeforeHittingA) export(firstPassage) export(firstPassageMultiple) export(fitHighOrderMultivarMC) export(fitHigherOrder) export(freq2Generator) export(generatorToTransitionMatrix) export(impreciseProbabilityatT) export(inferHyperparam) export(is.CTMCirreducible) export(is.TimeReversible) export(markovchainFit) export(markovchainListFit) export(markovchainSequence) export(meanAbsorptionTime) export(meanFirstPassageTime) export(meanRecurrenceTime) export(multinomialConfidenceIntervals) export(name) export(noofVisitsDist) export(period) export(predictHommc) export(predictiveDistribution) export(priorDistribution) export(probabilityatT) export(rctmc) export(rmarkovchain) export(seq2freqProb) export(seq2matHigh) export(states) export(transition2Generator) export(verifyEmpiricalToTheoretical) export(verifyHomogeneity) export(verifyMarkovProperty) exportClasses(HigherOrderMarkovChain) exportClasses(ctmc) exportClasses(hommc) exportClasses(markovchain) exportClasses(markovchainList) exportMethods("!=") exportMethods("*") exportMethods("==") exportMethods("[") exportMethods("[[") exportMethods("^") exportMethods(absorbingStates) exportMethods(canonicForm) exportMethods(coerce) exportMethods(communicatingClasses) exportMethods(conditionalDistribution) exportMethods(dim) exportMethods(hittingProbabilities) exportMethods(is.accessible) exportMethods(is.irreducible) exportMethods(is.regular) exportMethods(meanNumVisits) exportMethods(plot) exportMethods(predict) exportMethods(print) exportMethods(recurrentClasses) exportMethods(recurrentStates) exportMethods(show) exportMethods(steadyStates) exportMethods(summary) exportMethods(t) exportMethods(transientClasses) exportMethods(transientStates) exportMethods(transitionProbability) import(Matrix) import(igraph) import(methods) import(parallel) importFrom(Rcpp,evalCpp) importFrom(RcppParallel,RcppParallelLibs) importFrom(expm,"%^%") importFrom(expm,logm) importFrom(grDevices,colors) importFrom(matlab,eye) importFrom(matlab,find) importFrom(matlab,ones) importFrom(matlab,size) importFrom(matlab,zeros) importFrom(stats,aggregate) importFrom(stats,chisq.test) importFrom(stats,pchisq) importFrom(stats,predict) importFrom(stats,rexp) importFrom(stats,sd) importFrom(stats4,plot) importFrom(stats4,summary) importFrom(utils,packageDescription) useDynLib(markovchain, .registration = TRUE) markovchain/demo/0000755000176200001440000000000013762012754013507 5ustar liggesusersmarkovchain/demo/computationTime.R0000644000176200001440000000111213762012754017006 0ustar liggesuserslibrary(microbenchmark) library(markovchain) #using the rain data sequence data(rain) rainSequence<-rain$rain #choosing different sample size sizes<-c(10,50,100,250,500,1096) #estimating microseconds microseconds<-numeric(length(sizes)) for(i in 1:length(sizes)) { mydim<-sizes[i] mysequence<-rainSequence[1:mydim] out<-microbenchmark( myFit<-markovchainFit(data=mysequence) ) microseconds[i]<-mean(out$time) } #plot(sizes, microseconds,type="o",col="steelblue",xlab="character sequence size",ylab="microseconds",main="Computational time vs size") markovchain/demo/quickStart.R0000644000176200001440000000301113762012754015757 0ustar liggesusers# TODO: Add comment # # Author: Giorgio Spedicato ############################################################################### #creates some markovchain objects statesNames=c("a","b") mcA<-new("markovchain", transitionMatrix=matrix(c(0.7,0.3,0.1,0.9),byrow=TRUE, nrow=2, dimnames=list(statesNames,statesNames) )) mcB<-new("markovchain", states=c("a","b","c"), transitionMatrix= matrix(c(0.2,0.5,0.3, 0,1,0, 0.1,0.8,0.1),nrow=3, byrow=TRUE)) mcC<-new("markovchain", states=c("a","b","c","d"), transitionMatrix=matrix(c(0.25,0.75,0,0,0.4, 0.6,0,0,0,0,0.1,0.9,0,0,0.7,0.3), nrow=4, byrow=TRUE) ) mcD<-new("markovchain", transitionMatrix=matrix(c(0,1,0,1), nrow=2,byrow=TRUE)) #apply some methods testConversion<-as(mcC, "data.frame") markovD<-t(mcC) steadyStates(mcC) steadyStates(mcC) #perform some fitting sequence<-c("a", "b", "a", "a", "a", "a", "b", "a", "b", "a", "b", "a", "a", "b", "b", "b", "a") mcFit<-markovchainFit(data=sequence,byrow=FALSE) #canonic form #from https://math.dartmouth.edu/archive/m20x06/public_html/Lecture14.pdf P<-zeros(5) P[1,1]<-P[5,5]<-1 P[2,1]<-P[2,3]<-0.5 P[3,2]<-P[3,4]<-0.5 P[4,3]<-P[4,5]<-0.5 mcP<-as(P,"markovchain") mcPCan<-canonicForm(mcP) # coercing markov chains to sparse matrix forth and back require(Matrix) ciauz<-c(0,.5,.5,1,0,0,.2,0,.8) matrix(ciauz, nrow=3, byrow=TRUE) sparse<-as(matrix(ciauz, nrow=3, byrow=TRUE),"sparseMatrix") mc<-as(sparse,"markovchain") sparse2<-as(mc,"sparseMatrix") markovchain/demo/bard.R0000644000176200001440000000213513762012754014543 0ustar liggesusers#Bard PPT examples #Page 8 require(matlab) statesNames=as.character(0:3) pg8<-zeros(4) pg8[1,c(2,3)]<-0.5 pg8[2,1]<-1 pg8[3,4]<-1 pg8[4,c(1,4)]<-0.5 pg8Mc<-new("markovchain",transitionMatrix=pg8,states=statesNames, name="Page 8") summary(pg8Mc) #Page 9 statesNames=as.character(0:4) pg9<-zeros(5) pg9[c(1,2),c(1,2)]<-0.5 pg9[3,3]<-1 pg9[4,c(3,4)]<-1/2 pg9[5,1]<-1 pg9Mc<-new("markovchain", transitionMatrix=pg9, states=statesNames, name="Page 9") summary(pg9Mc) #Page 10 statesNames=as.character(0:3) pg10<-zeros(4) pg10[1,c(2,3)]<-1/2 pg10[c(2,3),4]<-1 pg10[4,1]<-1 pg10Mc<-new("markovchain", transitionMatrix=pg10, states=statesNames, name="Page 10") summary(pg10Mc) #Page 11 statesNames=as.character(1:5) pg11<-zeros(5) pg11[1,c(1,2)]<-c(0.4,0.6) pg11[2,c(1,2)]<-c(0.5,0.5) pg11[3,c(3,4)]<-c(0.3,0.7) pg11[4,c(3,4,5)]<-c(5,4,1)/10 pg11[5,c(4,5)]<-c(0.8,0.2) pg11Mc<-new("markovchain", transitionMatrix=pg11, states=statesNames, name="Page 10") summary(pg11Mc) markovchain/demo/extractMatrices.R0000644000176200001440000000134713762012754017001 0ustar liggesusers#function to extract matrices extractMatrices <- function(mcObj) { require(matlab) mcObj <- canonicForm(object = mcObj) #get the indices of transient and absorbing transIdx <- which(states(mcObj) %in% transientStates(mcObj)) absIdx <- which(states(mcObj) %in% absorbingStates(mcObj)) #get Q, R and I Q <- as.matrix(mcObj@transitionMatrix[transIdx,transIdx]) R <- as.matrix(mcObj@transitionMatrix[transIdx,absIdx]) I <- as.matrix(mcObj@transitionMatrix[absIdx, absIdx]) #get fundamental matrix N <- solve(eye(size(Q)) - Q) #final absorbion probabilities NR <- N %*% R #return out <- list( canonicalForm = mcObj, Q = Q, R = R, I = I, N=N, NR=NR ) return(out) }markovchain/demo/quasiBayesian.R0000644000176200001440000000131513762012754016430 0ustar liggesusers#PSEUDO BAYESIAN ESTIMATOR getAlphaVector <- function(raw, apriori){ v_i <- rowSums(raw) K_i <- numeric(nrow(raw)) sumSquaredY <- rowSums(raw^2) #get numerator K_i_num <- v_i^2-sumSquaredY #get denominator VQ <- matrix(0,nrow= nrow(apriori),ncol=ncol(apriori)) for (i in 1:nrow(VQ)) { VQ[i,]<-v_i[i]*apriori[i,] } K_i_den<-rowSums((raw - VQ)^2) K_i <- K_i_num/K_i_den #get the alpha vector alpha <- K_i / (v_i+K_i) #empirical transition matrix Emp<-raw/rowSums(raw) #get the estimate out<-matrix(0, nrow= nrow(raw),ncol=ncol(raw)) for (i in 1:nrow(out)) { out[i,]<-alpha[i]*apriori[i,]+(1-alpha[i])*Emp[i,] } return(out) }markovchain/demo/mathematica.R0000644000176200001440000000232613762012754016112 0ustar liggesusers#STRUCTURAL PROPRIETIES #see mathematica 9 mathematicaAMatr <- matrix(c(0, 1/3, 0, 2/3, 0, 1/2, 0, 0, 0, 1/2, 0, 0, 1/2, 1/2, 0, 0, 0, 1/2, 1/2, 0, 0, 0, 0, 0, 1), byrow = TRUE, nrow=5) #should behave like #http://www.wolfram.com/mathematica/new-in-9/markov-chains-and-queues/structural-properties-of-finite-markov-processes.html mathematicaAMc<-as(mathematicaAMatr, "markovchain") summary(mathematicaAMc) canonicForm(mathematicaAMc) is.irreducible(mathematicaAMc) transientStates(mathematicaAMc) absorbingStates(mathematicaAMc) communicatingClasses(mathematicaAMc) recurrentClasses(mathematicaAMc) #first passage time #should behave like #http://www.wolfram.com/mathematica/new-in-9/markov-chains-and-queues/distribution-of-times-to-reach-a-target-state.html mathematicaBMatr= matrix(c( 0, 1/2, 1/2, 1/2, 0, 1/2, 1/2, 1/2, 0),byrow=TRUE, nrow=3) ; mathematicabMc<-as(mathematicaBMatr, "markovchain") firstPassage(mathematicabMc, "s3",9) #if you repeat this more thime results change. No good communicatingClasses(mathematicabMc) recurrentClasses(mathematicabMc)markovchain/demo/examples.R0000644000176200001440000000122213762012754015445 0ustar liggesusers#up and down markov chains mcUpDown<-new("markovchain", states=c("up","down"), transitionMatrix=matrix(c(0,1,1,0),nrow=2, byrow=TRUE),name="UpDown") #gamblers ruin gamblerRuinMarkovChain<-function(moneyMax, prob=0.5) { require(matlab) matr<-zeros(moneyMax+1) states<-as.character(seq(from=0, to=moneyMax, by=1)) rownames(matr)=states; colnames(matr)=states matr[1,1]=1;matr[moneyMax+1,moneyMax+1]=1 for(i in 2:moneyMax) { matr[i,i-1]=1-prob;matr[i,i+1]=prob } out<-new("markovchain", transitionMatrix=matr, name=paste("Gambler ruin",moneyMax,"dim",sep=" ") ) return(out) }markovchain/demo/00Index0000644000176200001440000000074513762012754014647 0ustar liggesusersquickStart Quick start for markovchain package examples Various examples of Markov Chain bard Structural analysis of Markov chains from Bard PPT extractMatrices Given a matrix, returns the canonical form, fundamental matrix etc... mathematica Mathematica Markov Chains reliability Simulate reliability in inverting a markovchain quasiBayesian Quasi Bayesian estimator function computationTime Computation time by size of the character sequence markovchain/demo/reliability.R0000644000176200001440000001133613762012754016147 0ustar liggesusers#load required libraries library(parallel) require(MCMCpack) require(markovchain) dimensions2Test<-2:32 numSim=10000 #helper function to create a random stochastic matrix createMatrix<-function(matr_size) { out<-matrix(0, nrow=matr_size, ncol = matr_size) for (i in 1:matr_size) { priors.dirichlet<-runif(n=matr_size) myStochasticRow<-rdirichlet(n=1,alpha=priors.dirichlet) out[i,]<-myStochasticRow } return(out) } createSparseMatrix<-function(matr_size, sparsity=0.75){ out <- matrix(0, nrow=matr_size, ncol = matr_size) nonzeroitems<-ceiling(matr_size*(1-sparsity)) for (i in 1:matr_size) { priors.dirichlet<-runif(n=nonzeroitems) myStochasticRow<-rdirichlet(n=1,alpha=priors.dirichlet) columnsPositions<-sample(x = 1:matr_size,size = nonzeroitems,replace = FALSE) out[i,columnsPositions]<-myStochasticRow } return(out) } #first test: function to simulate the inversion of a matrix of size num checkInversion<-function(i,num){ #simulate the priors myStochasticMatrix<-createMatrix(matr_size = num) #this code returns FALSE -> 0 if error in inversion 1 otherwise out<-tryCatch(steadyStates(as(myStochasticMatrix, "markovchain")), error=function(c) return(FALSE) ) if(class(out)=="logical") return(0) else return(1) } checkSparseMInversion<-function(i,num){ #simulate the priors myStochasticMatrix<-createSparseMatrix(matr_size = num) #this code returns FALSE -> 0 if error in inversion 1 otherwise out<-tryCatch(steadyStates(as(myStochasticMatrix, "markovchain")), error=function(c) return(FALSE) ) if(class(out)=="logical") return(0) else return(1) } #performing the simulation successRate<-numeric(length(dimensions2Test)) #using parallel backend no_cores <- detectCores() - 1 cl <- makeCluster(no_cores) clusterExport(cl, "checkInversion") clusterExport(cl, "createMatrix") clusterEvalQ(cl, library(markovchain)) clusterEvalQ(cl, library(MCMCpack)) k=1 for (dimension in dimensions2Test){ simulations<-parSapply(cl=cl,1:numSim,FUN=checkInversion,num=dimension) successRate[k]<-mean(simulations) k=k+1 } stopCluster(cl) #summarising first test: #plot(x=dimensions2Test,y=successRate,type="l",xlab="matrix size",ylab="success rate",main="Steady state computation success rate") #abline(h=0.5,col="red") #text(x=dimensions2Test,y=successRate,labels=round(successRate,digits=2),col="darkred",cex=0.7) #dev.off() dimensions2Test = 2^seq(from=3, to=8) successRate<-numeric(length(dimensions2Test)) #using parallel backend no_cores <- detectCores() - 1 cl <- makeCluster(no_cores) clusterExport(cl, "checkSparseMInversion") clusterExport(cl, "createSparseMatrix") clusterEvalQ(cl, library(markovchain)) clusterEvalQ(cl, library(MCMCpack)) k=1 for (dimension in dimensions2Test){ simulations<-parSapply(cl=cl,1:numSim,FUN=checkSparseMInversion,num=dimension) successRate[k]<-mean(simulations) k=k+1 } stopCluster(cl) # # # plot(x=dimensions2Test,y=successRate,type="l",xlab="matrix size",ylab="success rate",main="Steady state computation success rate ??? sparse matrices") # abline(h=0.5,col="red") # text(x=dimensions2Test,y=successRate,labels=round(successRate,digits=2),col="darkred",cex=0.7) #second test: simulating exponentiation checkExponentiation<-function(i,num){ #simulate the priors myStochasticMatrix<-createMatrix(matr_size = num) #this code returns FALSE -> 0 if error in inversion 1 otherwise out<-tryCatch((as(myStochasticMatrix, "markovchain"))^2, error=function(c) return(FALSE) ) if(class(out)=="logical") return(0) else return(1) } #performing the simulation successRate2<-numeric(length(dimensions2Test)) #using parallel backend no_cores <- detectCores() - 1 cl <- makeCluster(no_cores) clusterExport(cl, "checkExponentiation") clusterExport(cl, "createMatrix") clusterEvalQ(cl, library(markovchain)) clusterEvalQ(cl, library(MCMCpack)) k=1 for (dimension in dimensions2Test){ simulations<-parSapply(cl=cl,1:numSim,FUN=checkExponentiation,num=dimension) successRate2[k]<-mean(simulations) k=k+1 } stopCluster(cl) #summarising first test: #par(mfrow=c(1,2)) # plot(x=dimensions2Test,y=successRate,type="l",xlab="matrix size",ylab="success rate",main="Steady state computation success rate") # abline(h=0.5,col="red") # text(x=dimensions2Test,y=successRate,labels=round(successRate,digits=2),col="darkred",cex=0.7) # plot(x=dimensions2Test,y=successRate2,type="l",xlab="matrix sixe",ylab="success rate",main="Exponentiation computation success rate") # abline(h=0.5,col="red") # text(x=dimensions2Test,y=successRate2,labels=round(successRate2,digits=2),col="darkred",cex=0.7) markovchain/ChangeLog0000644000176200001440000001317214050510601014322 0ustar liggesusers2021-05-7 0.8.6 Fix a bug in markovchainListFit that made confusion between lists and data.frames 2020-12-04 0.8.5-2 Fixing unavailable software issues and language glitches 2020-09-21 0.8.5-1 Coping with etm unavailability 2020-05-21 0.8.5 Fixed DoF in verify markov property and supported input in compare teorethical 2020-05-04 0.8.4.1 Fixed presentation 2020-03-16 0.8.4 Limiting output lines in vignettes. 2020-03-15 0.8.3 Add small changes in code to cope with upcoming R 4.0.0 (stringsAsFactor=TRUE in data.frame). 2019-12-10 0.8.2 Add small changes in code to cope with upcoming R 4.0.0 (no more check class(x)=='matrix') as well as packages' unavailable. 2019-08-13 0.7.0 * Improves performance and refactors `communicatingClasses`, `recurrentClasses`, `transientStates`, `is.irreducible`, `canonicForm`, `summary` and `steadyStates` methods, porting them to C++ whenever possible and improving the algorithmic complexity of the code. * Solves a bug with `steadyStates` method. * Adds the methods `recurrentStates` and `transientClasses`. * Makes the aforementioned methods work on by column Markov chains. * Improves tests, adding checking of mathematical structural properties and hundreds of random test cases. * Fixes documentation for `roxygen` and NAMESPACE file for automatic generation using `devtools::document()` * Bumps Ignacio Cordón as author (ORCID included) 2019-07-01 0.6.9.15 Fixed confidence interval calculation: true confidence intervals are now 1-(1-confidence_interval)/2 Various code refactoring 09-12-2018 0.6.9.14 Added plot from MmgraphR Added meanFirstPassageTime (thanks to Toni Giorgino) Add orcid Add more warning to Statistical Inference Functions 12-08-2018 0.6.9.12 Improved Rcpp performance as suggested by https://www.r-bloggers.com/boost-the-speed-of-r-calls-from-rcpp/ 20-04-2018 0.6.9.9 Fixed typo in vignette MAP method now works also with lists (issue #141) Fix valgrid error 14-08-2017 0.6.9.8-1 Added is.TimeReversible function added gm_to_markovchain example 10-07-2017 0.6.9.5 Added empirical bayesian estimate Various additions from GSOC 2017 (see the new vignette) 31-03-2014 Version 0.6.9 Added sort method Revised numeric tolerance when creating markovchains Added suggestion for which row to fix 16-03-2017 Version 0.6.8 Deep restructuring of statistical tests Add parameter confint to markovchainFit Fixed bug in markovchainFitList Handling of NA 02-02-2017 Version 0.6.6.2 Add parameter confint to markovchainFit 27-01-2017 Version 0.6.6.1 Fixing bug in markovchainListFit 22-01-2017 markovchainFit accepts an uneven list now Added confidence intervals when markovchainFit is given a matrix 08-12-2016 Added patch to divergence test 20-08-2016 Fully parallelized bootstrapped markovchain fit 08-08-2016 Version 0.6 Added multivariate higher order markov chains Better handlign of steady state analysis on non - recurrent Markov Chains Fixed an error in the igraph conversion 08-07-2016 Fixed C++ 11 variables types 24-06-2016 Version 0.4.5 Speeding up rmarkovchain using parallel and RcppParallel library. 14-06-2016 Version 0.4.4.4 Bug fixed for markovchainFit when method = bootstrap 09-06-2016 Version 0.4.4.2 added sanitize=false paramter to markovchainFit 31-05-2016 Version 0.4.4.1 Improvement of the internal method checkSequence. name method to set and get the names of markovchain object. 10-05-2016 Version 0.4.4 rmarkovchain in RCpp (thanks to Deepak and GSOC 2016) Various small fixes 05-03-2016 Version 0.4.3.1 fixed a bug in the states classification added options to save output of random sampler in a matrix 10-10-2015 Version 0.4.3 fixed an error in plot function 08-07-2015 Version 0.3.1 Period to Rcpp (thanks to TAE) communicatingClasses and recurrentClasses (thanks to TAE) Various optimization (thanks to TAE) Initial support for Continuous Time Markov Chains (thanks to SAI) Added new methods: names, != 15-06-2015 Version 0.3 Added a CrashIntro vignette Most probability function rewritten in Rcpp Added standard errors and confidence intervals for MLE (thanks to Tae) Added confidence intervals for bootstap (thanks to Tae) Added bayesian Maximum A Posteriori estimation (thanks to Sai) 12-05-2015 Version 0.2.1 Fixed a compatibility issue with R 3 development 12-04-2015 Version 0.2 This is a milestone for markovchain package, since the package project has been selected within the funded GSOC 2015 projects. Thanks to Tae support now the fitting functions have been rewritten in Rcpp. 20-03-2015 Version 0.1.3 Fastened the firstpassage time code thanks to Thoralf suggestion 01-03-2015 Version 0.1.2 Add GitHub project url 17-02-2015 Version 0.1.1 Fasten markovchain sequence thanks to Mildenberger Thoralf suggesti 04-01-2015 Version 0.1.0 It is now possible to fit a markovchain and a markovchainList object from a matrix or data.frame Updated vignettes Added tests 21-06-2014 Version 0.0.9.5 Updated vignettes Added a method to convert a square matrix into a markovchain object. 20-04-2014 Version 0.0.9 Updated vignette Added parallel processing for bootstrap estimation 09-02-2014 Version 0.0.8 Minor vignette enhancements Added function to find period of a DTMC 12-01-2014 Version 0.0.7 Deeply improved vignettes Added predict and summary methods Added function to perform probabilistic analysis 31-12-2013 Version 0.0.5 Improved vignettes Add predict methods Add methods for transitory states 04-11-2013 Version 0.0.3 Added various method to easily handle markovchain and markovchainList objects Implemented rmarkovchain and bootstrap fit Improved vignettes markovchain/README.md0000644000176200001440000000145713762012754014051 0ustar liggesusers# markovchain R package providing classes, methods and function for easily handling Discrete Time Markov Chains (DTMC), performing probabilistic analysis and fitting. ## Install the current release from CRAN: ```r install.packages('markovchain') ``` ## Install the development version from GitHub: ```r devtools::install_github('spedygiorgio/markovchain') ``` ![alt tag](https://travis-ci.org/spedygiorgio/markovchain.svg?branch=master) [![Downloads](http://cranlogs.r-pkg.org/badges/markovchain)](https://cran.r-project.org/package=markovchain) [![CRAN_Status_Badge](http://www.r-pkg.org/badges/version/markovchain)](https://cran.r-project.org/package=markovchain) [![Research software impact](http://depsy.org/api/package/cran/markovchain/badge.svg)](http://depsy.org/package/r/markovchain) markovchain/data/0000755000176200001440000000000013762012754013474 5ustar liggesusersmarkovchain/data/kullback.rda0000644000176200001440000000040314050513461015741 0ustar liggesusers͑M@+&:QfR<`VnH 3xONFw1_WTU~'`x\ȌǚI2f1t5 1 /%z 5Mpg9]nwZӮh>&Z/֤̹Y1bf܊g_Fus؇OBqsN >{ױ"c hl30ߙy\n markovchain/data/preproglucacon.rda0000644000176200001440000000210614050513461017176 0ustar liggesusersBZh91AY&SYe'!3H0,@/ހ@!B*jia00& @䒥奴DV^TKA{Y$#2"<33Ȣ)5fF n_ -ȕ\M4cHbuMδ:8:ˍZ&ct،kF Nэa94p5lښU"UQƭ8!Ʋc,ur7FcJHukV ^ri妍81f " @n,-&(hC^M3{ߗշwl  kg6y(!<9oS Yg}{TR~@Uߨ?@T?TR?TRT 誁!USUGꦍ4@PJGF`$U*  #Ld&F&CFeTThdUh 4SUTFi416 Mo#FLOQw)KGODDDDDDDDDDDDDDDDDDDDDDc\  .\?K1_|_UUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUV7c7sCn}V[qmZ-ֵX6kLhٛ,֍I(6ճ(lY4b*LjUPj K#S6lZ-,Ve "-Ȳ hɶ-RƓRYl Clf&VX1ZM6[ljحhȒfAXSm[[++XiLX4Ōmb6"bH*HєL6U#&,&EQcJ&LYK+dVaJLMjS%Z)+bIDZ,j1fdZ5&ѵM%mZٌbM6 5hQcb&Z6э%m&55RZH3%ECVȲbD&Ͳ%5X,ilb2$bl͓b4m"Yi[(L5l2JJ`Բ5haU ֱBZelj#*$j60) eE)i(,QeAL-bZLeZViQjJDe+5hC5i*+ȋ`eJMXlZ%e&ĊbCRd4E&+ViEM 4!mK6lIAZ-5MBj!RbLƦ-eIFFJFbSfVFkeK+iaE %RTXV5EBسF4[dQfEDFbLVj&bIm!*mccQX4Ph2Vi $Ikkmɵ%2kL6VIhѶ[5T+V 6Ab`RV*+&c2YQj*+mSYP2$͖A,dDZih̵̵0آLFƶ4""Vh RidĆ6f6[dRգ*Ųh3PRYIM`DVldͨ٥A)[4ԉJ!F @.u { {i}fvwpG&V?.<,dGBލ.S#;=Cw//^g`~_@&'PlHF(ii&RJ cLFX ZϱvSh!yc jr\ Z).Ny*B%- 2eOOB 2̣sS*eLUrpY-1^Uw=q/&ʣK|QM_RHET !8dYD;eGwϽ}G;!<99xSCrE"ǝ'$>sZ\@xLw7|(##iE9]\IypT Р"rY#P.\p5;wZ#삧=yU8=}>rQr>끢DH_9}ːrT[KaWguY] ۜm9?>=8 ! !"BH7c` V>X7H2rE8Pqmarkovchain/man/0000755000176200001440000000000013762012754013336 5ustar liggesusersmarkovchain/man/setName.Rd0000644000176200001440000000156113762012754015224 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/classesAndMethods.R \name{name<-} \alias{name<-} \alias{name<-,markovchain-method} \title{Method to set name of markovchain object} \usage{ name(object) <- value \S4method{name}{markovchain}(object) <- value } \arguments{ \item{object}{A markovchain object} \item{value}{New name of markovchain object} } \description{ This method modifies the existing name of markovchain object } \examples{ statesNames <- c("a", "b", "c") markovB <- new("markovchain", states = statesNames, transitionMatrix = matrix(c(0.2, 0.5, 0.3, 0, 1, 0, 0.1, 0.8, 0.1), nrow = 3, byrow = TRUE, dimnames=list(statesNames,statesNames)), name = "A markovchain Object" ) name(markovB) <- "dangerous mc" } \author{ Giorgio Spedicato, Deepak Yadav } markovchain/man/holson.Rd0000644000176200001440000000236013762012754015130 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/data.R \docType{data} \name{holson} \alias{holson} \title{Holson data set} \format{ A data frame with 1000 observations on the following 12 variables. \describe{ \item{\code{id}}{unique id} \item{\code{time1}}{observed status at i-th time} \item{\code{time2}}{observed status at i-th time} \item{\code{time3}}{observed status at i-th time} \item{\code{time4}}{observed status at i-th time} \item{\code{time5}}{observed status at i-th time} \item{\code{time6}}{observed status at i-th time} \item{\code{time7}}{observed status at i-th time} \item{\code{time8}}{observed status at i-th time} \item{\code{time9}}{observed status at i-th time} \item{\code{time10}}{observed status at i-th time} \item{\code{time11}}{observed status at i-th time} } } \source{ Private communications } \usage{ data(holson) } \description{ A data set containing 1000 life histories trajectories and a categorical status (1,2,3) observed on eleven evenly spaced steps. } \details{ The example can be used to fit a \code{markovchain} or a \code{markovchainList} object. } \examples{ data(holson) head(holson) } \references{ Private communications } \keyword{datasets} markovchain/man/firstPassageMultiple.Rd0000644000176200001440000000236713762012754020004 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/probabilistic.R \name{firstPassageMultiple} \alias{firstPassageMultiple} \title{function to calculate first passage probabilities} \usage{ firstPassageMultiple(object, state, set, n) } \arguments{ \item{object}{a markovchain-class object} \item{state}{intital state of the process (charactervector)} \item{set}{set of states A, first passage of which is to be calculated} \item{n}{Number of rows on which compute the distribution} } \value{ A vector of size n showing the first time proabilities } \description{ The function calculates first passage probability for a subset of states given an initial state. } \examples{ statesNames <- c("a", "b", "c") markovB <- new("markovchain", states = statesNames, transitionMatrix = matrix(c(0.2, 0.5, 0.3, 0, 1, 0, 0.1, 0.8, 0.1), nrow = 3, byrow = TRUE, dimnames = list(statesNames, statesNames) )) firstPassageMultiple(markovB,"a",c("b","c"),4) } \references{ Renaldo Feres, Notes for Math 450 Matlab listings for Markov chains; MIT OCW, course - 6.262, Discrete Stochastic Processes, course-notes, chap -05 } \seealso{ \code{\link{firstPassage}} } \author{ Vandit Jain } markovchain/man/hommc-class.Rd0000644000176200001440000000254413762012754016040 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/hommc.R \docType{class} \name{hommc-class} \alias{hommc-class} \alias{hommc} \title{An S4 class for representing High Order Multivariate Markovchain (HOMMC)} \description{ An S4 class for representing High Order Multivariate Markovchain (HOMMC) } \section{Slots}{ \describe{ \item{\code{order}}{an integer equal to order of Multivariate Markovchain} \item{\code{states}}{a vector of states present in the HOMMC model} \item{\code{P}}{array of transition matrices} \item{\code{Lambda}}{a vector which stores the weightage of each transition matrices in P} \item{\code{byrow}}{if FALSE each column sum of transition matrix is 1 else row sum = 1} \item{\code{name}}{a name given to hommc} }} \examples{ statesName <- c("a", "b") P <- array(0, dim = c(2, 2, 4), dimnames = list(statesName, statesName)) P[,,1] <- matrix(c(0, 1, 1/3, 2/3), byrow = FALSE, nrow = 2) P[,,2] <- matrix(c(1/4, 3/4, 0, 1), byrow = FALSE, nrow = 2) P[,,3] <- matrix(c(1, 0, 1/3, 2/3), byrow = FALSE, nrow = 2) P[,,4] <- matrix(c(3/4, 1/4, 0, 1), byrow = FALSE, nrow = 2) Lambda <- c(0.8, 0.2, 0.3, 0.7) ob <- new("hommc", order = 1, states = statesName, P = P, Lambda = Lambda, byrow = FALSE, name = "FOMMC") } \author{ Giorgio Spedicato, Deepak Yadav } markovchain/man/meanFirstPassageTime.Rd0000644000176200001440000000354313762012754017705 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/probabilistic.R \name{meanFirstPassageTime} \alias{meanFirstPassageTime} \title{Mean First Passage Time for irreducible Markov chains} \usage{ meanFirstPassageTime(object, destination) } \arguments{ \item{object}{the markovchain object} \item{destination}{a character vector representing the states respect to which we want to compute the mean first passage time. Empty by default} } \value{ a Matrix of the same size with the average first passage times if destination is empty, a vector if destination is not } \description{ Given an irreducible (ergodic) markovchain object, this function calculates the expected number of steps to reach other states } \details{ For an ergodic Markov chain it computes: \itemize{ \item If destination is empty, the average first time (in steps) that takes the Markov chain to go from initial state i to j. (i, j) represents that value in case the Markov chain is given row-wise, (j, i) in case it is given col-wise. \item If destination is not empty, the average time it takes us from the remaining states to reach the states in \code{destination} } } \examples{ m <- matrix(1 / 10 * c(6,3,1, 2,3,5, 4,1,5), ncol = 3, byrow = TRUE) mc <- new("markovchain", states = c("s","c","r"), transitionMatrix = m) meanFirstPassageTime(mc, "r") # Grinstead and Snell's "Oz weather" worked out example mOz <- matrix(c(2,1,1, 2,0,2, 1,1,2)/4, ncol = 3, byrow = TRUE) mcOz <- new("markovchain", states = c("s", "c", "r"), transitionMatrix = mOz) meanFirstPassageTime(mcOz) } \references{ C. M. Grinstead and J. L. Snell. Introduction to Probability. American Mathematical Soc., 2012. } \author{ Toni Giorgino, Ignacio Cordón } markovchain/man/firstPassage.Rd0000644000176200001440000000205313762012754016260 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/probabilistic.R \name{firstPassage} \alias{firstPassage} \title{First passage across states} \usage{ firstPassage(object, state, n) } \arguments{ \item{object}{A \code{markovchain} object} \item{state}{Initial state} \item{n}{Number of rows on which compute the distribution} } \value{ A matrix of size 1:n x number of states showing the probability of the first time of passage in states to be exactly the number in the row. } \description{ This function compute the first passage probability in states } \details{ Based on Feres' Matlab listings } \examples{ simpleMc <- new("markovchain", states = c("a", "b"), transitionMatrix = matrix(c(0.4, 0.6, .3, .7), nrow = 2, byrow = TRUE)) firstPassage(simpleMc, "b", 20) } \references{ Renaldo Feres, Notes for Math 450 Matlab listings for Markov chains } \seealso{ \code{\link{conditionalDistribution}} } \author{ Giorgio Spedicato } markovchain/man/names.Rd0000644000176200001440000000061213762012754014727 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/classesAndMethods.R \name{names,markovchain-method} \alias{names,markovchain-method} \title{Returns the states for a Markov chain object} \usage{ \S4method{names}{markovchain}(x) } \arguments{ \item{x}{object we want to return states for} } \description{ Returns the states for a Markov chain object } markovchain/man/structuralAnalysis.Rd0000644000176200001440000000630313762012754017543 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/RcppExports.R, R/probabilistic.R \name{period} \alias{period} \alias{communicatingClasses} \alias{transientStates} \alias{recurrentStates} \alias{absorbingStates} \alias{transientClasses} \alias{recurrentClasses} \alias{canonicForm} \title{Various function to perform structural analysis of DTMC} \usage{ period(object) communicatingClasses(object) recurrentClasses(object) transientClasses(object) transientStates(object) recurrentStates(object) absorbingStates(object) canonicForm(object) } \arguments{ \item{object}{A \code{markovchain} object.} } \value{ \describe{ \item{\code{period}}{returns a integer number corresponding to the periodicity of the Markov chain (if it is irreducible)} \item{\code{absorbingStates}}{returns a character vector with the names of the absorbing states in the Markov chain} \item{\code{communicatingClasses}}{returns a list in which each slot contains the names of the states that are in that communicating class} \item{\code{recurrentClasses}}{analogously to \code{communicatingClasses}, but with recurrent classes} \item{\code{transientClasses}}{analogously to \code{communicatingClasses}, but with transient classes} \item{\code{transientStates}}{returns a character vector with all the transient states for the Markov chain} \item{\code{recurrentStates}}{returns a character vector with all the recurrent states for the Markov chain} \item{\code{canonicForm}}{returns the Markov chain reordered by a permutation of states so that we have blocks submatrices for each of the recurrent classes and a collection of rows in the end for the transient states} } } \description{ These functions return absorbing and transient states of the \code{markovchain} objects. } \examples{ statesNames <- c("a", "b", "c") mc <- new("markovchain", states = statesNames, transitionMatrix = matrix(c(0.2, 0.5, 0.3, 0, 1, 0, 0.1, 0.8, 0.1), nrow = 3, byrow = TRUE, dimnames = list(statesNames, statesNames)) ) communicatingClasses(mc) recurrentClasses(mc) recurrentClasses(mc) absorbingStates(mc) transientStates(mc) recurrentStates(mc) canonicForm(mc) # periodicity analysis A <- matrix(c(0, 1, 0, 0, 0.5, 0, 0.5, 0, 0, 0.5, 0, 0.5, 0, 0, 1, 0), nrow = 4, ncol = 4, byrow = TRUE) mcA <- new("markovchain", states = c("a", "b", "c", "d"), transitionMatrix = A, name = "A") is.irreducible(mcA) #true period(mcA) #2 # periodicity analysis B <- matrix(c(0, 0, 1/2, 1/4, 1/4, 0, 0, 0, 0, 1/3, 0, 2/3, 0, 0, 0, 0, 0, 0, 0, 1/3, 2/3, 0, 0, 0, 0, 0, 1/2, 1/2, 0, 0, 0, 0, 0, 3/4, 1/4, 1/2, 1/2, 0, 0, 0, 0, 0, 1/4, 3/4, 0, 0, 0, 0, 0), byrow = TRUE, ncol = 7) mcB <- new("markovchain", transitionMatrix = B) period(mcB) } \references{ Feres, Matlab listing for markov chain. } \seealso{ \code{\linkS4class{markovchain}} } \author{ Giorgio Alfredo Spedicato, Ignacio Cordón } markovchain/man/rain.Rd0000644000176200001440000000131313762012754014554 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/data.R \docType{data} \name{rain} \alias{rain} \title{Alofi island daily rainfall} \format{ A data frame with 1096 observations on the following 2 variables. \describe{ \item{\code{V1}}{a numeric vector, showing original coding} \item{\code{rain}}{a character vector, showing daily rainfall millilitres brackets} } } \source{ Avery Henderson } \usage{ data(rain) } \description{ Rainfall measured in Alofi Island } \examples{ data(rain) rainMc<-markovchainFit(data=rain$rain) } \references{ Avery Henderson, Fitting markov chain models on discrete time series such as DNA sequences } \keyword{datasets} markovchain/man/is.TimeReversible.Rd0000644000176200001440000000201413762012754017155 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/ctmcProbabilistic.R \name{is.TimeReversible} \alias{is.TimeReversible} \title{checks if ctmc object is time reversible} \usage{ is.TimeReversible(ctmc) } \arguments{ \item{ctmc}{a ctmc-class object} } \value{ Returns a boolean value stating whether ctmc object is time reversible a boolean value as described above } \description{ The function returns checks if provided function is time reversible } \examples{ energyStates <- c("sigma", "sigma_star") byRow <- TRUE gen <- matrix(data = c(-3, 3, 1, -1), nrow = 2, byrow = byRow, dimnames = list(energyStates, energyStates)) molecularCTMC <- new("ctmc", states = energyStates, byrow = byRow, generator = gen, name = "Molecular Transition Model") is.TimeReversible(molecularCTMC) } \references{ INTRODUCTION TO STOCHASTIC PROCESSES WITH R, ROBERT P. DOBROW, Wiley } \author{ Vandit Jain } markovchain/man/freq2Generator.Rd0000644000176200001440000000241113762012754016511 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/ctmcProbabilistic.R \name{freq2Generator} \alias{freq2Generator} \title{Returns a generator matrix corresponding to frequency matrix} \usage{ freq2Generator(P, t = 1, method = "QO", logmethod = "Eigen") } \arguments{ \item{P}{relative frequency matrix} \item{t}{(default value = 1)} \item{method}{one among "QO"(Quasi optimaisation), "WA"(weighted adjustment), "DA"(diagonal adjustment)} \item{logmethod}{method for computation of matrx algorithm (by default : Eigen)} } \value{ returns a generator matix with same dimnames } \description{ The function provides interface to calculate generator matrix corresponding to a frequency matrix and time taken } \examples{ sample <- matrix(c(150,2,1,1,1,200,2,1,2,1,175,1,1,1,1,150),nrow = 4,byrow = TRUE) sample_rel = rbind((sample/rowSums(sample))[1:dim(sample)[1]-1,],c(rep(0,dim(sample)[1]-1),1)) freq2Generator(sample_rel,1) data(tm_abs) tm_rel=rbind((tm_abs/rowSums(tm_abs))[1:7,],c(rep(0,7),1)) ## Derive quasi optimization generator matrix estimate freq2Generator(tm_rel,1) } \references{ E. Kreinin and M. Sidelnikova: Regularization Algorithms for Transition Matrices. Algo Research Quarterly 4(1):23-40, 2001 } markovchain/man/preproglucacon.Rd0000644000176200001440000000152213762012754016650 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/data.R \docType{data} \name{preproglucacon} \alias{preproglucacon} \title{Preprogluccacon DNA protein bases sequences} \format{ A data frame with 1572 observations on the following 2 variables. \describe{ \item{\code{V1}}{a numeric vector, showing original coding} \item{\code{preproglucacon}}{a character vector, showing initial of DNA bases (Adenine, Cytosine, Guanine, Thymine)} } } \source{ Avery Henderson } \usage{ data(preproglucacon) } \description{ Sequence of bases for preproglucacon DNA protein } \examples{ data(preproglucacon) preproglucaconMc<-markovchainFit(data=preproglucacon$preproglucacon) } \references{ Averuy Henderson, Fitting markov chain models on discrete time series such as DNA sequences } \keyword{datasets} markovchain/man/impreciseProbabilityatT.Rd0000644000176200001440000000217213762012754020461 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/ctmcProbabilistic.R \name{impreciseProbabilityatT} \alias{impreciseProbabilityatT} \title{Calculating full conditional probability using lower rate transition matrix} \usage{ impreciseProbabilityatT(C,i,t,s,error,useRCpp) } \arguments{ \item{C}{a ictmc class object} \item{i}{initial state at time t} \item{t}{initial time t. Default value = 0} \item{s}{final time} \item{error}{error rate. Default value = 0.001} \item{useRCpp}{logical whether to use RCpp implementation; by default TRUE} } \description{ This function calculates full conditional probability at given time s using lower rate transition matrix } \examples{ states <- c("n","y") Q <- matrix(c(-1,1,1,-1),nrow = 2,byrow = TRUE,dimnames = list(states,states)) range <- matrix(c(1/52,3/52,1/2,2),nrow = 2,byrow = 2) name <- "testictmc" ictmc <- new("ictmc",states = states,Q = Q,range = range,name = name) impreciseProbabilityatT(ictmc,2,0,1,10^-3,TRUE) } \references{ Imprecise Continuous-Time Markov Chains, Thomas Krak et al., 2016 } \author{ Vandit Jain } markovchain/man/craigsendi.Rd0000644000176200001440000000165213762012754015741 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/data.R \docType{data} \name{craigsendi} \alias{craigsendi} \title{CD4 cells counts on HIV Infects between zero and six month} \format{ The format is: table [1:3, 1:3] 682 154 19 33 64 19 25 47 43 - attr(*, "dimnames")=List of 2 ..$ : chr [1:3] "0-49" "50-74" "75-UP" ..$ : chr [1:3] "0-49" "50-74" "75-UP" } \source{ Estimation of the transition matrix of a discrete time Markov chain, Bruce A. Craig and Peter P. Sendi, Health Economics 11, 2002. } \usage{ data(craigsendi) } \description{ This is the table shown in Craig and Sendi paper showing zero and six month CD4 cells count in six brakets } \details{ Rows represent counts at the beginning, cols represent counts after six months. } \examples{ data(craigsendi) csMc<-as(craigsendi, "markovchain") steadyStates(csMc) } \references{ see source } \keyword{datasets} markovchain/man/ExpectedTime.Rd0000644000176200001440000000206613762012754016211 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/ctmcProbabilistic.R \name{ExpectedTime} \alias{ExpectedTime} \title{Returns expected hitting time from state i to state j} \usage{ ExpectedTime(C,i,j,useRCpp) } \arguments{ \item{C}{A CTMC S4 object} \item{i}{Initial state i} \item{j}{Final state j} \item{useRCpp}{logical whether to use Rcpp} } \value{ A numerical value that returns expected hitting times from i to j } \description{ Returns expected hitting time from state i to state j } \details{ According to the theorem, holding times for all states except j should be greater than 0. } \examples{ states <- c("a","b","c","d") byRow <- TRUE gen <- matrix(data = c(-1, 1/2, 1/2, 0, 1/4, -1/2, 0, 1/4, 1/6, 0, -1/3, 1/6, 0, 0, 0, 0), nrow = 4,byrow = byRow, dimnames = list(states,states)) ctmc <- new("ctmc",states = states, byrow = byRow, generator = gen, name = "testctmc") ExpectedTime(ctmc,1,4,TRUE) } \references{ Markovchains, J. R. Norris, Cambridge University Press } \author{ Vandit Jain } markovchain/man/transitionProbability.Rd0000644000176200001440000000227313762012754020224 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/classesAndMethods.R \name{transitionProbability} \alias{transitionProbability} \alias{transitionProbability,markovchain-method} \title{Function to get the transition probabilities from initial to subsequent states.} \usage{ transitionProbability(object, t0, t1) \S4method{transitionProbability}{markovchain}(object, t0, t1) } \arguments{ \item{object}{A \code{markovchain} object.} \item{t0}{Initial state.} \item{t1}{Subsequent state.} } \value{ Numeric Vector } \description{ This is a convenience function to get transition probabilities. } \examples{ statesNames <- c("a", "b", "c") markovB <- new("markovchain", states = statesNames, transitionMatrix = matrix(c(0.2, 0.5, 0.3, 0, 1, 0, 0.1, 0.8, 0.1), nrow = 3, byrow = TRUE, dimnames=list(statesNames,statesNames)), name = "A markovchain Object" ) transitionProbability(markovB,"b", "c") } \references{ A First Course in Probability (8th Edition), Sheldon Ross, Prentice Hall 2010 } \seealso{ \code{\linkS4class{markovchain}} } \author{ Giorgio Spedicato } markovchain/man/expectedRewards.Rd0000644000176200001440000000210713762012754016756 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/probabilistic.R \name{expectedRewards} \alias{expectedRewards} \title{Expected Rewards for a markovchain} \usage{ expectedRewards(markovchain,n,rewards) } \arguments{ \item{markovchain}{the markovchain-class object} \item{n}{no of steps of the process} \item{rewards}{vector depicting rewards coressponding to states} } \value{ returns a vector of expected rewards for different initial states } \description{ Given a markovchain object and reward values for every state, function calculates expected reward value after n steps. } \details{ the function uses a dynamic programming approach to solve a recursive equation described in reference. } \examples{ transMatr<-matrix(c(0.99,0.01,0.01,0.99),nrow=2,byrow=TRUE) simpleMc<-new("markovchain", states=c("a","b"), transitionMatrix=transMatr) expectedRewards(simpleMc,1,c(0,1)) } \references{ Stochastic Processes: Theory for Applications, Robert G. Gallager, Cambridge University Press } \author{ Vandit Jain } markovchain/man/ictmc-class.Rd0000644000176200001440000000122113762012754016023 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/ctmcClassesAndMethods.R \docType{class} \name{ictmc-class} \alias{ictmc-class} \alias{ictmc} \title{An S4 class for representing Imprecise Continuous Time Markovchains} \description{ An S4 class for representing Imprecise Continuous Time Markovchains } \section{Slots}{ \describe{ \item{\code{states}}{a vector of states present in the ICTMC model} \item{\code{Q}}{matrix representing the generator demonstrated in the form of variables} \item{\code{range}}{a matrix that stores values of range of variables} \item{\code{name}}{name given to ICTMC} }} markovchain/man/ctmc-class.Rd0000644000176200001440000000455513762012754015667 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/ctmcClassesAndMethods.R \docType{class} \name{ctmc-class} \alias{ctmc-class} \alias{dim,ctmc-method} \alias{initialize,ctmc_method} \alias{states,ctmc-method} \alias{steadyStates,ctmc-method} \alias{plot,ctmc,missing-method} \title{Continuous time Markov Chains class} \arguments{ \item{states}{Name of the states. Must be the same of \code{colnames} and \code{rownames} of the generator matrix} \item{byrow}{TRUE or FALSE. Indicates whether the given matrix is stochastic by rows or by columns} \item{generator}{Square generator matrix} \item{name}{Optional character name of the Markov chain} } \description{ The S4 class that describes \code{ctmc} (continuous time Markov chain) objects. } \note{ \enumerate{ \item \code{ctmc} classes are written using S4 classes \item Validation method is used to assess whether either columns or rows totals to zero. Rounding is used up to 5th decimal. If state names are not properly defined for a generator \code{matrix}, coercing to \code{ctmc} object leads to overriding states name with artificial "s1", "s2", ... sequence } } \section{Methods}{ \describe{ \item{dim}{\code{signature(x = "ctmc")}: method to get the size} \item{initialize}{\code{signature(.Object = "ctmc")}: initialize method } \item{states}{\code{signature(object = "ctmc")}: states method. } \item{steadyStates}{\code{signature(object = "ctmc")}: method to get the steady state vector. } \item{plot}{\code{signature(x = "ctmc", y = "missing")}: plot method for \code{ctmc} objects } } } \examples{ energyStates <- c("sigma", "sigma_star") byRow <- TRUE gen <- matrix(data = c(-3, 3, 1, -1), nrow = 2, byrow = byRow, dimnames = list(energyStates, energyStates)) molecularCTMC <- new("ctmc", states = energyStates, byrow = byRow, generator = gen, name = "Molecular Transition Model") steadyStates(molecularCTMC) \dontrun{plot(molecularCTMC)} } \references{ Introduction to Stochastic Processes with Applications in the Biosciences (2013), David F. Anderson, University of Wisconsin at Madison. Sai Bhargav Yalamanchi, Giorgio Spedicato } \seealso{ \code{\link{generatorToTransitionMatrix}},\code{\link{rctmc}} } \keyword{classes} markovchain/man/probabilityatT.Rd0000644000176200001440000000243613762012754016623 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/ctmcProbabilistic.R \name{probabilityatT} \alias{probabilityatT} \title{Calculating probability from a ctmc object} \usage{ probabilityatT(C,t,x0,useRCpp) } \arguments{ \item{C}{A CTMC S4 object} \item{t}{final time t} \item{x0}{initial state} \item{useRCpp}{logical whether to use RCpp implementation} } \value{ returns a vector or a matrix in case \code{x0} is provided or not respectively. } \description{ This function returns the probability of every state at time t under different conditions } \details{ The initial state is not mandatory, In case it is not provided, function returns a matrix of transition function at time \code{t} else it returns vector of probaabilities of transition to different states if initial state was \code{x0} } \examples{ states <- c("a","b","c","d") byRow <- TRUE gen <- matrix(data = c(-1, 1/2, 1/2, 0, 1/4, -1/2, 0, 1/4, 1/6, 0, -1/3, 1/6, 0, 0, 0, 0), nrow = 4,byrow = byRow, dimnames = list(states,states)) ctmc <- new("ctmc",states = states, byrow = byRow, generator = gen, name = "testctmc") probabilityatT(ctmc,1,useRCpp = TRUE) } \references{ INTRODUCTION TO STOCHASTIC PROCESSES WITH R, ROBERT P. DOBROW, Wiley } \author{ Vandit Jain } markovchain/man/multinomialConfidenceIntervals.Rd0000644000176200001440000000237313762012754022032 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/fittingFunctions.R \name{multinomialConfidenceIntervals} \alias{multinomialConfidenceIntervals} \title{A function to compute multinomial confidence intervals of DTMC} \usage{ multinomialConfidenceIntervals( transitionMatrix, countsTransitionMatrix, confidencelevel = 0.95 ) } \arguments{ \item{transitionMatrix}{An estimated transition matrix.} \item{countsTransitionMatrix}{Empirical (conts) transition matrix, on which the \code{transitionMatrix} was performed.} \item{confidencelevel}{confidence interval level.} } \value{ Two matrices containing the confidence intervals. } \description{ Return estimated transition matrix assuming a Multinomial Distribution } \examples{ seq<-c("a", "b", "a", "a", "a", "a", "b", "a", "b", "a", "b", "a", "a", "b", "b", "b", "a") mcfit<-markovchainFit(data=seq,byrow=TRUE) seqmat<-createSequenceMatrix(seq) multinomialConfidenceIntervals(mcfit$estimate@transitionMatrix, seqmat, 0.95) } \references{ Constructing two-sided simultaneous confidence intervals for multinomial proportions for small counts in a large number of cells. Journal of Statistical Software 5(6) (2000) } \seealso{ \code{markovchainFit} } markovchain/man/markovchain.Rd0000644000176200001440000000343313762012754016132 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/markovchain.R \docType{package} \name{markovchain-package} \alias{markovchain-package} \title{Easy Handling Discrete Time Markov Chains} \description{ The package contains classes and method to create and manage (plot, print, export for example) discrete time Markov chains (DTMC). In addition it provide functions to perform statistical (fitting and drawing random variates) and probabilistic (analysis of DTMC proprieties) analysis } \details{ \tabular{ll}{ Package: \tab markovchain\cr Type: \tab Package\cr Version: \tab 0.8.2\cr Date: \tab 2020-01-5\cr License: \tab GPL-2\cr Depends: \tab R (>= 3.6.0), methods, expm, matlab, igraph, Matrix\cr } } \examples{ # create some markov chains statesNames=c("a","b") mcA<-new("markovchain", transitionMatrix=matrix(c(0.7,0.3,0.1,0.9),byrow=TRUE, nrow=2, dimnames=list(statesNames,statesNames))) statesNames=c("a","b","c") mcB<-new("markovchain", states=statesNames, transitionMatrix= matrix(c(0.2,0.5,0.3,0,1,0,0.1,0.8,0.1), nrow=3, byrow=TRUE, dimnames=list(statesNames, statesNames))) statesNames=c("a","b","c","d") matrice<-matrix(c(0.25,0.75,0,0,0.4,0.6,0,0,0,0,0.1,0.9,0,0,0.7,0.3), nrow=4, byrow=TRUE) mcC<-new("markovchain", states=statesNames, transitionMatrix=matrice) mcD<-new("markovchain", transitionMatrix=matrix(c(0,1,0,1), nrow=2,byrow=TRUE)) #operations with S4 methods mcA^2 steadyStates(mcB) absorbingStates(mcB) markovchainSequence(n=20, markovchain=mcC, include=TRUE) } \references{ Discrete-Time Markov Models, Bremaud, Springer 1999 } \author{ Giorgio Alfredo Spedicato Maintainer: Giorgio Alfredo Spedicato } \keyword{package} markovchain/man/expectedRewardsBeforeHittingA.Rd0000644000176200001440000000203013762012754021524 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/probabilistic.R \name{expectedRewardsBeforeHittingA} \alias{expectedRewardsBeforeHittingA} \title{Expected first passage Rewards for a set of states in a markovchain} \usage{ expectedRewardsBeforeHittingA(markovchain, A, state, rewards, n) } \arguments{ \item{markovchain}{the markovchain-class object} \item{A}{set of states for first passage expected reward} \item{state}{initial state} \item{rewards}{vector depicting rewards coressponding to states} \item{n}{no of steps of the process} } \value{ returns a expected reward (numerical value) as described above } \description{ Given a markovchain object and reward values for every state, function calculates expected reward value for a set A of states after n steps. } \details{ The function returns the value of expected first passage rewards given rewards coressponding to every state, an initial state and number of steps. } \author{ Sai Bhargav Yalamanchi, Vandit Jain } markovchain/man/noofVisitsDist.Rd0000644000176200001440000000201113762012754016606 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/fittingFunctions.R \name{noofVisitsDist} \alias{noofVisitsDist} \title{return a joint pdf of the number of visits to the various states of the DTMC} \usage{ noofVisitsDist(markovchain,N,state) } \arguments{ \item{markovchain}{a markovchain-class object} \item{N}{no of steps} \item{state}{the initial state} } \value{ a numeric vector depicting the above described probability density function. } \description{ This function would return a joint pdf of the number of visits to the various states of the DTMC during the first N steps. } \details{ This function would return a joint pdf of the number of visits to the various states of the DTMC during the first N steps. } \examples{ transMatr<-matrix(c(0.4,0.6,.3,.7),nrow=2,byrow=TRUE) simpleMc<-new("markovchain", states=c("a","b"), transitionMatrix=transMatr, name="simpleMc") noofVisitsDist(simpleMc,5,"a") } \author{ Vandit Jain } markovchain/man/steadyStates.Rd0000644000176200001440000000241413762012754016303 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/classesAndMethods.R \name{steadyStates} \alias{steadyStates} \title{Stationary states of a \code{markovchain} object} \usage{ steadyStates(object) } \arguments{ \item{object}{A discrete \code{markovchain} object} } \value{ A matrix corresponding to the stationary states } \description{ This method returns the stationary vector in matricial form of a markovchain object. } \note{ The steady states are identified starting from which eigenvectors correspond to identity eigenvalues and then normalizing them to sum up to unity. When negative values are found in the matrix, the eigenvalues extraction is performed on the recurrent classes submatrix. } \examples{ statesNames <- c("a", "b", "c") markovB <- new("markovchain", states = statesNames, transitionMatrix = matrix(c(0.2, 0.5, 0.3, 0, 1, 0, 0.1, 0.8, 0.1), nrow = 3, byrow = TRUE, dimnames=list(statesNames,statesNames)), name = "A markovchain Object" ) steadyStates(markovB) } \references{ A First Course in Probability (8th Edition), Sheldon Ross, Prentice Hall 2010 } \seealso{ \code{\linkS4class{markovchain}} } \author{ Giorgio Spedicato } markovchain/man/markovchainSequence.Rd0000644000176200001440000000261413762012754017623 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/fittingFunctions.R \name{markovchainSequence} \alias{markovchainSequence} \title{Function to generate a sequence of states from homogeneous Markov chains.} \usage{ markovchainSequence( n, markovchain, t0 = sample(markovchain@states, 1), include.t0 = FALSE, useRCpp = TRUE ) } \arguments{ \item{n}{Sample size} \item{markovchain}{\code{markovchain} object} \item{t0}{The initial state} \item{include.t0}{Specify if the initial state shall be used} \item{useRCpp}{Boolean. Should RCpp fast implementation being used? Default is yes.} } \value{ A Character Vector } \description{ Provided any \code{markovchain} object, it returns a sequence of states coming from the underlying stationary distribution. } \details{ A sequence of size n is sampled. } \examples{ # define the markovchain object statesNames <- c("a", "b", "c") mcB <- new("markovchain", states = statesNames, transitionMatrix = matrix(c(0.2, 0.5, 0.3, 0, 0.2, 0.8, 0.1, 0.8, 0.1), nrow = 3, byrow = TRUE, dimnames = list(statesNames, statesNames))) # show the sequence outs <- markovchainSequence(n = 100, markovchain = mcB, t0 = "a") } \references{ A First Course in Probability (8th Edition), Sheldon Ross, Prentice Hall 2010 } \seealso{ \code{\link{markovchainFit}} } \author{ Giorgio Spedicato } markovchain/man/tm_abs.Rd0000644000176200001440000000131113762012754015066 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/data.R \docType{data} \name{tm_abs} \alias{tm_abs} \title{Single Year Corporate Credit Rating Transititions} \format{ The format is: num [1:8, 1:8] 17 2 0 0 0 0 0 0 1 455 ... - attr(*, "dimnames")=List of 2 ..$ : chr [1:8] "AAA" "AA" "A" "BBB" ... ..$ : chr [1:8] "AAA" "AA" "A" "BBB" ... } \usage{ data(tm_abs) } \description{ Matrix of Standard and Poor's Global Corporate Rating Transition Frequencies 2000 (NR Removed) } \examples{ data(tm_abs) } \references{ European Securities and Markets Authority, 2016 https://cerep.esma.europa.eu/cerep-web/statistics/transitionMatrice.xhtml } \keyword{datasets} markovchain/man/rctmc.Rd0000644000176200001440000000341613762012754014741 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/ctmcProbabilistic.R \name{rctmc} \alias{rctmc} \title{rctmc} \usage{ rctmc(n, ctmc, initDist = numeric(), T = 0, include.T0 = TRUE, out.type = "list") } \arguments{ \item{n}{The number of samples to generate.} \item{ctmc}{The CTMC S4 object.} \item{initDist}{The initial distribution of states.} \item{T}{The time up to which the simulation runs (all transitions after time T are not returned).} \item{include.T0}{Flag to determine if start state is to be included.} \item{out.type}{"list" or "df"} } \value{ Based on out.type, a list or a data frame is returned. The returned list has two elements - a character vector (states) and a numeric vector (indicating time of transitions). The data frame is similarly structured. } \description{ The function generates random CTMC transitions as per the provided generator matrix. } \details{ In order to use the T0 argument, set n to Inf. } \examples{ energyStates <- c("sigma", "sigma_star") byRow <- TRUE gen <- matrix(data = c(-3, 3, 1, -1), nrow = 2, byrow = byRow, dimnames = list(energyStates, energyStates)) molecularCTMC <- new("ctmc", states = energyStates, byrow = byRow, generator = gen, name = "Molecular Transition Model") statesDist <- c(0.8, 0.2) rctmc(n = Inf, ctmc = molecularCTMC, T = 1) rctmc(n = 5, ctmc = molecularCTMC, initDist = statesDist, include.T0 = FALSE) } \references{ Introduction to Stochastic Processes with Applications in the Biosciences (2013), David F. Anderson, University of Wisconsin at Madison } \seealso{ \code{\link{generatorToTransitionMatrix}},\code{\link{ctmc-class}} } \author{ Sai Bhargav Yalamanchi } markovchain/man/absorptionProbabilities.Rd0000644000176200001440000000222113762012754020513 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/probabilistic.R \name{absorptionProbabilities} \alias{absorptionProbabilities} \title{Absorption probabilities} \usage{ absorptionProbabilities(object) } \arguments{ \item{object}{the markovchain object} } \value{ A named vector with the expected number of steps to go from a transient state to any of the recurrent ones } \description{ Computes the absorption probability from each transient state to each recurrent one (i.e. the (i, j) entry or (j, i), in a stochastic matrix by columns, represents the probability that the first not transient state we can go from the transient state i is j (and therefore we are going to be absorbed in the communicating recurrent class of j) } \examples{ m <- matrix(c(1/2, 1/2, 0, 1/2, 1/2, 0, 0, 1/2, 1/2), ncol = 3, byrow = TRUE) mc <- new("markovchain", states = letters[1:3], transitionMatrix = m) absorptionProbabilities(mc) } \references{ C. M. Grinstead and J. L. Snell. Introduction to Probability. American Mathematical Soc., 2012. } \author{ Ignacio Cordón } markovchain/man/hommc-show.Rd0000644000176200001440000000064113762012754015707 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/hommc.R \name{show,hommc-method} \alias{show,hommc-method} \title{Function to display the details of hommc object} \usage{ \S4method{show}{hommc}(object) } \arguments{ \item{object}{An object of class hommc} } \description{ This is a convenience function to display the slots of hommc object in proper format } markovchain/man/inferHyperparam.Rd0000644000176200001440000000440313762012754016762 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/RcppExports.R \name{inferHyperparam} \alias{inferHyperparam} \title{Function to infer the hyperparameters for Bayesian inference from an a priori matrix or a data set} \usage{ inferHyperparam(transMatr = matrix(), scale = numeric(), data = character()) } \arguments{ \item{transMatr}{A valid transition matrix, with dimension names.} \item{scale}{A vector of scaling factors, each element corresponds to the row names of the provided transition matrix transMatr, in the same order.} \item{data}{A data set from which the hyperparameters are inferred.} } \value{ Returns the hyperparameter matrix in a list. } \description{ Since the Bayesian inference approach implemented in the package is based on conjugate priors, hyperparameters must be provided to model the prior probability distribution of the chain parameters. The hyperparameters are inferred from a given a priori matrix under the assumption that the matrix provided corresponds to the mean (expected) values of the chain parameters. A scaling factor vector must be provided too. Alternatively, the hyperparameters can be inferred from a data set. } \details{ transMatr and scale need not be provided if data is provided. } \note{ The hyperparameter matrix returned is such that the row and column names are sorted alphanumerically, and the elements in the matrix are correspondingly permuted. } \examples{ data(rain, package = "markovchain") inferHyperparam(data = rain$rain) weatherStates <- c("sunny", "cloudy", "rain") weatherMatrix <- matrix(data = c(0.7, 0.2, 0.1, 0.3, 0.4, 0.3, 0.2, 0.4, 0.4), byrow = TRUE, nrow = 3, dimnames = list(weatherStates, weatherStates)) inferHyperparam(transMatr = weatherMatrix, scale = c(10, 10, 10)) } \references{ Yalamanchi SB, Spedicato GA (2015). Bayesian Inference of First Order Markov Chains. R package version 0.2.5 } \seealso{ \code{\link{markovchainFit}}, \code{\link{predictiveDistribution}} } \author{ Sai Bhargav Yalamanchi, Giorgio Spedicato } markovchain/man/markovchain-class.Rd0000644000176200001440000002100313762012754017226 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/classesAndMethods.R \docType{class} \name{markovchain-class} \alias{markovchain-class} \alias{*,markovchain,markovchain-method} \alias{*,markovchain,matrix-method} \alias{*,markovchain,numeric-method} \alias{*,matrix,markovchain-method} \alias{*,numeric,markovchain-method} \alias{==,markovchain,markovchain-method} \alias{!=,markovchain,markovchain-method} \alias{absorbingStates,markovchain-method} \alias{transientStates,markovchain-method} \alias{recurrentStates,markovchain-method} \alias{transientClasses,markovchain-method} \alias{recurrentClasses,markovchain-method} \alias{communicatingClasses,markovchain-method} \alias{steadyStates,markovchain-method} \alias{meanNumVisits,markovchain-method} \alias{is.regular,markovchain-method} \alias{is.irreducible,markovchain-method} \alias{is.accessible,markovchain,character,character-method} \alias{is.accessible,markovchain,missing,missing-method} \alias{absorptionProbabilities,markovchain-method} \alias{meanFirstPassageTime,markovchain,character-method} \alias{meanFirstPassageTime,markovchain,missing-method} \alias{meanAbsorptionTime,markovchain-method} \alias{meanRecurrenceTime,markovchain-method} \alias{conditionalDistribution,markovchain-method} \alias{hittingProbabilities,markovchain-method} \alias{canonicForm,markovchain-method} \alias{coerce,data.frame,markovchain-method} \alias{coerce,markovchain,data.frame-method} \alias{coerce,table,markovchain-method} \alias{coerce,markovchain,igraph-method} \alias{coerce,markovchain,matrix-method} \alias{coerce,markovchain,sparseMatrix-method} \alias{coerce,sparseMatrix,markovchain-method} \alias{coerce,matrix,markovchain-method} \alias{coerce,msm,markovchain-method} \alias{coerce,msm.est,markovchain-method} \alias{coerce,etm,markovchain-method} \alias{dim,markovchain-method} \alias{initialize,markovchain-method} \alias{names<-,markovchain-method} \alias{plot,markovchain,missing-method} \alias{predict,markovchain-method} \alias{print,markovchain-method} \alias{show,markovchain-method} \alias{summary,markovchain-method} \alias{sort,markovchain-method} \alias{t,markovchain-method} \alias{[,markovchain,ANY,ANY,ANY-method} \alias{^,markovchain,numeric-method} \title{Markov Chain class} \arguments{ \item{states}{Name of the states. Must be the same of \code{colnames} and \code{rownames} of the transition matrix} \item{byrow}{TRUE or FALSE indicating whether the supplied matrix is either stochastic by rows or by columns} \item{transitionMatrix}{Square transition matrix} \item{name}{Optional character name of the Markov chain} } \description{ The S4 class that describes \code{markovchain} objects. } \note{ \enumerate{ \item \code{markovchain} object are backed by S4 Classes. \item Validation method is used to assess whether either columns or rows totals to one. Rounding is used up to \code{.Machine$double.eps * 100}. If state names are not properly defined for a probability \code{matrix}, coercing to \code{markovhcain} object leads to overriding states name with artificial "s1", "s2", ... sequence. In addition, operator overloading has been applied for \eqn{+,*,^,==,!=} operators. } } \section{Creation of objects}{ Objects can be created by calls of the form \code{new("markovchain", states, byrow, transitionMatrix, ...)}. } \section{Methods}{ \describe{ \item{*}{\code{signature(e1 = "markovchain", e2 = "markovchain")}: multiply two \code{markovchain} objects} \item{*}{\code{signature(e1 = "markovchain", e2 = "matrix")}: markovchain by matrix multiplication} \item{*}{\code{signature(e1 = "markovchain", e2 = "numeric")}: markovchain by numeric vector multiplication } \item{*}{\code{signature(e1 = "matrix", e2 = "markovchain")}: matrix by markov chain} \item{*}{\code{signature(e1 = "numeric", e2 = "markovchain")}: numeric vector by \code{markovchain} multiplication } \item{[}{\code{signature(x = "markovchain", i = "ANY", j = "ANY", drop = "ANY")}: ... } \item{^}{\code{signature(e1 = "markovchain", e2 = "numeric")}: power of a \code{markovchain} object} \item{==}{\code{signature(e1 = "markovchain", e2 = "markovchain")}: equality of two \code{markovchain} object} \item{!=}{\code{signature(e1 = "markovchain", e2 = "markovchain")}: non-equality of two \code{markovchain} object} \item{absorbingStates}{\code{signature(object = "markovchain")}: method to get absorbing states } \item{canonicForm}{\code{signature(object = "markovchain")}: return a \code{markovchain} object into canonic form } \item{coerce}{\code{signature(from = "markovchain", to = "data.frame")}: coerce method from markovchain to \code{data.frame}} \item{conditionalDistribution}{\code{signature(object = "markovchain")}: returns the conditional probability of subsequent states given a state} \item{coerce}{\code{signature(from = "data.frame", to = "markovchain")}: coerce method from \code{data.frame} to \code{markovchain}} \item{coerce}{\code{signature(from = "table", to = "markovchain")}: coerce method from \code{table} to \code{markovchain} } \item{coerce}{\code{signature(from = "msm", to = "markovchain")}: coerce method from \code{msm} to \code{markovchain} } \item{coerce}{\code{signature(from = "msm.est", to = "markovchain")}: coerce method from \code{msm.est} (but only from a Probability Matrix) to \code{markovchain} } \item{coerce}{\code{signature(from = "etm", to = "markovchain")}: coerce method from \code{etm} to \code{markovchain} } \item{coerce}{\code{signature(from = "sparseMatrix", to = "markovchain")}: coerce method from \code{sparseMatrix} to \code{markovchain} } \item{coerce}{\code{signature(from = "markovchain", to = "igraph")}: coercing to \code{igraph} objects } \item{coerce}{\code{signature(from = "markovchain", to = "matrix")}: coercing to \code{matrix} objects } \item{coerce}{\code{signature(from = "markovchain", to = "sparseMatrix")}: coercing to \code{sparseMatrix} objects } \item{coerce}{\code{signature(from = "matrix", to = "markovchain")}: coercing to \code{markovchain} objects from \code{matrix} one } \item{dim}{\code{signature(x = "markovchain")}: method to get the size} \item{names}{\code{signature(x = "markovchain")}: method to get the names of states} \item{names<-}{\code{signature(x = "markovchain", value = "character")}: method to set the names of states} \item{initialize}{\code{signature(.Object = "markovchain")}: initialize method } \item{plot}{\code{signature(x = "markovchain", y = "missing")}: plot method for \code{markovchain} objects } \item{predict}{\code{signature(object = "markovchain")}: predict method } \item{print}{\code{signature(x = "markovchain")}: print method. } \item{show}{\code{signature(object = "markovchain")}: show method. } \item{sort}{\code{signature(x = "markovchain", decreasing=FALSE)}: sorting the transition matrix. } \item{states}{\code{signature(object = "markovchain")}: returns the names of states (as \code{names}. } \item{steadyStates}{\code{signature(object = "markovchain")}: method to get the steady vector. } \item{summary}{\code{signature(object = "markovchain")}: method to summarize structure of the markov chain } \item{transientStates}{\code{signature(object = "markovchain")}: method to get the transient states. } \item{t}{\code{signature(x = "markovchain")}: transpose matrix } \item{transitionProbability}{\code{signature(object = "markovchain")}: transition probability } } } \examples{ #show markovchain definition showClass("markovchain") #create a simple Markov chain transMatr<-matrix(c(0.4,0.6,.3,.7),nrow=2,byrow=TRUE) simpleMc<-new("markovchain", states=c("a","b"), transitionMatrix=transMatr, name="simpleMc") #power simpleMc^4 #some methods steadyStates(simpleMc) absorbingStates(simpleMc) simpleMc[2,1] t(simpleMc) is.irreducible(simpleMc) #conditional distributions conditionalDistribution(simpleMc, "b") #example for predict method sequence<-c("a", "b", "a", "a", "a", "a", "b", "a", "b", "a", "b", "a", "a", "b", "b", "b", "a") mcFit<-markovchainFit(data=sequence) predict(mcFit$estimate, newdata="b",n.ahead=3) #direct conversion myMc<-as(transMatr, "markovchain") #example of summary summary(simpleMc) \dontrun{plot(simpleMc)} } \references{ A First Course in Probability (8th Edition), Sheldon Ross, Prentice Hall 2010 } \seealso{ \code{\link{markovchainSequence}},\code{\link{markovchainFit}} } \author{ Giorgio Spedicato } \keyword{classes} markovchain/man/predictHommc.Rd0000644000176200001440000000162013762012754016242 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/hommc.R \name{predictHommc} \alias{predictHommc} \title{Simulate a higher order multivariate markovchain} \usage{ predictHommc(hommc,t,init) } \arguments{ \item{hommc}{a hommc-class object} \item{t}{no of iterations to predict} \item{init}{matrix of previous states size of which depends on hommc} } \value{ The function returns a matrix of size s X t displaying t predicted states in each row coressponding to every categorical sequence. } \description{ This function provides a prediction of states for a higher order multivariate markovchain object } \details{ The user is required to provide a matrix of giving n previous coressponding every categorical sequence. Dimensions of the init are s X n, where s is number of categorical sequences and n is order of the homc. } \author{ Vandit Jain } markovchain/man/is.accessible.Rd0000644000176200001440000000251613762012754016340 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/probabilistic.R \name{is.accessible} \alias{is.accessible} \title{Verify if a state j is reachable from state i.} \usage{ is.accessible(object, from, to) } \arguments{ \item{object}{A \code{markovchain} object.} \item{from}{The name of state "i" (beginning state).} \item{to}{The name of state "j" (ending state).} } \value{ A boolean value. } \description{ This function verifies if a state is reachable from another, i.e., if there exists a path that leads to state j leaving from state i with positive probability } \details{ It wraps an internal function named \code{reachabilityMatrix}. } \examples{ statesNames <- c("a", "b", "c") markovB <- new("markovchain", states = statesNames, transitionMatrix = matrix(c(0.2, 0.5, 0.3, 0, 1, 0, 0.1, 0.8, 0.1), nrow = 3, byrow = TRUE, dimnames = list(statesNames, statesNames) ) ) is.accessible(markovB, "a", "c") } \references{ James Montgomery, University of Madison } \seealso{ \code{is.irreducible} } \author{ Giorgio Spedicato, Ignacio Cordón } markovchain/man/sales.Rd0000644000176200001440000000131713762012754014736 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/data.R \docType{data} \name{sales} \alias{sales} \title{Sales Demand Sequences} \format{ An object of class \code{matrix} (inherits from \code{array}) with 269 rows and 5 columns. } \usage{ data("sales") } \description{ Sales demand sequences of five products (A, B, C, D, E). Each row corresponds to a sequence. First row corresponds to Sequence A, Second row to Sequence B and so on. } \details{ The example can be used to fit High order multivariate markov chain. } \examples{ data("sales") # fitHighOrderMultivarMC(seqMat = sales, order = 2, Norm = 2) } \keyword{datasets} markovchain/man/meanAbsorptionTime.Rd0000644000176200001440000000174013762012754017427 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/probabilistic.R \name{meanAbsorptionTime} \alias{meanAbsorptionTime} \title{Mean absorption time} \usage{ meanAbsorptionTime(object) } \arguments{ \item{object}{the markovchain object} } \value{ A named vector with the expected number of steps to go from a transient state to any of the recurrent ones } \description{ Computes the expected number of steps to go from any of the transient states to any of the recurrent states. The Markov chain should have at least one transient state for this method to work } \examples{ m <- matrix(c(1/2, 1/2, 0, 1/2, 1/2, 0, 0, 1/2, 1/2), ncol = 3, byrow = TRUE) mc <- new("markovchain", states = letters[1:3], transitionMatrix = m) times <- meanAbsorptionTime(mc) } \references{ C. M. Grinstead and J. L. Snell. Introduction to Probability. American Mathematical Soc., 2012. } \author{ Ignacio Cordón } markovchain/man/markovchainList-class.Rd0000644000176200001440000000516713762012754020077 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/classesAndMethods.R \docType{class} \name{markovchainList-class} \alias{markovchainList-class} \alias{[[,markovchainList-method} \alias{dim,markovchainList-method} \alias{predict,markovchainList-method} \alias{print,markovchainList-method} \alias{show,markovchainList-method} \title{Non homogeneus discrete time Markov Chains class} \arguments{ \item{markovchains}{Object of class \code{"list"}: a list of markovchains} \item{name}{Object of class \code{"character"}: optional name of the class} } \description{ A class to handle non homogeneous discrete Markov chains } \note{ The class consists in a list of \code{markovchain} objects. It is aimed at working with non homogeneous Markov chains. } \section{Objects from the Class}{ A \code{markovchainlist} is a list of \code{markovchain} objects. They can be used to model non homogeneous discrete time Markov Chains, when transition probabilities (and possible states) change by time. } \section{Methods}{ \describe{ \item{[[}{\code{signature(x = "markovchainList")}: extract the i-th \code{markovchain} } \item{dim}{\code{signature(x = "markovchainList")}: number of \code{markovchain} underlying the matrix } \item{predict}{\code{signature(object = "markovchainList")}: predict from a \code{markovchainList} } \item{print}{\code{signature(x = "markovchainList")}: prints the list of markovchains } \item{show}{\code{signature(object = "markovchainList")}: same as \code{print} } } } \examples{ showClass("markovchainList") #define a markovchainList statesNames=c("a","b") mcA<-new("markovchain",name="MCA", transitionMatrix=matrix(c(0.7,0.3,0.1,0.9), byrow=TRUE, nrow=2, dimnames=list(statesNames,statesNames)) ) mcB<-new("markovchain", states=c("a","b","c"), name="MCB", transitionMatrix=matrix(c(0.2,0.5,0.3,0,1,0,0.1,0.8,0.1), nrow=3, byrow=TRUE)) mcC<-new("markovchain", states=c("a","b","c","d"), name="MCC", transitionMatrix=matrix(c(0.25,0.75,0,0,0.4,0.6, 0,0,0,0,0.1,0.9,0,0,0.7,0.3), nrow=4, byrow=TRUE) ) mcList<-new("markovchainList",markovchains=list(mcA, mcB, mcC), name="Non - homogeneous Markov Chain") } \references{ A First Course in Probability (8th Edition), Sheldon Ross, Prentice Hall 2010 } \seealso{ \code{\linkS4class{markovchain}} } \author{ Giorgio Spedicato } \keyword{classes} markovchain/man/markovchainListFit.Rd0000644000176200001440000000240313762012754017425 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/fittingFunctions.R \name{markovchainListFit} \alias{markovchainListFit} \title{markovchainListFit} \usage{ markovchainListFit(data, byrow = TRUE, laplacian = 0, name) } \arguments{ \item{data}{Either a matrix or a data.frame or a list object.} \item{byrow}{Indicates whether distinc stochastic processes trajectiories are shown in distinct rows.} \item{laplacian}{Laplacian correction (default 0).} \item{name}{Optional name.} } \value{ A list containing two slots: estimate (the estimate) name } \description{ Given a data frame or a matrix (rows are observations, by cols the temporal sequence), it fits a non - homogeneous discrete time markov chain process (storing row). In particular a markovchainList of size = ncol - 1 is obtained estimating transitions from the n samples given by consecutive column pairs. } \details{ If \code{data} contains \code{NAs} then the transitions containing \code{NA} will be ignored. } \examples{ # using holson dataset data(holson) # fitting a single markovchain singleMc <- markovchainFit(data = holson[,2:12]) # fitting a markovchainList mclistFit <- markovchainListFit(data = holson[, 2:12], name = "holsonMcList") } markovchain/man/fitHigherOrder.Rd0000644000176200001440000000227613762012754016541 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/fitHigherOrder.R \name{fitHigherOrder} \alias{fitHigherOrder} \alias{seq2freqProb} \alias{seq2matHigh} \title{Functions to fit a higher order Markov chain} \usage{ fitHigherOrder(sequence, order = 2) seq2freqProb(sequence) seq2matHigh(sequence, order) } \arguments{ \item{sequence}{A character list.} \item{order}{Markov chain order} } \value{ A list containing lambda, Q, and X. } \description{ Given a sequence of states arising from a stationary state, it fits the underlying Markov chain distribution with higher order. } \note{ This function is written in Rcpp. } \examples{ sequence<-c("a", "a", "b", "b", "a", "c", "b", "a", "b", "c", "a", "b", "c", "a", "b", "c", "a", "b", "a", "b") fitHigherOrder(sequence) } \references{ Ching, W. K., Huang, X., Ng, M. K., & Siu, T. K. (2013). Higher-order markov chains. In Markov Chains (pp. 141-176). Springer US. Ching, W. K., Ng, M. K., & Fung, E. S. (2008). Higher-order multivariate Markov chains and their applications. Linear Algebra and its Applications, 428(2), 492-507. } \author{ Giorgio Spedicato, Tae Seung Kang } markovchain/man/fitHighOrderMultivarMC.Rd0000644000176200001440000000205413762012754020150 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/hommc.R \name{fitHighOrderMultivarMC} \alias{fitHighOrderMultivarMC} \title{Function to fit Higher Order Multivariate Markov chain} \usage{ fitHighOrderMultivarMC(seqMat, order = 2, Norm = 2) } \arguments{ \item{seqMat}{a matrix or a data frame where each column is a categorical sequence} \item{order}{Multivariate Markov chain order. Default is 2.} \item{Norm}{Norm to be used. Default is 2.} } \value{ an hommc object } \description{ Given a matrix of categorical sequences it fits Higher Order Multivariate Markov chain. } \examples{ data <- matrix(c('2', '1', '3', '3', '4', '3', '2', '1', '3', '3', '2', '1', c('2', '4', '4', '4', '4', '2', '3', '3', '1', '4', '3', '3')), ncol = 2, byrow = FALSE) fitHighOrderMultivarMC(data, order = 2, Norm = 2) } \references{ W.-K. Ching et al. / Linear Algebra and its Applications } \author{ Giorgio Spedicato, Deepak Yadav } markovchain/man/meanRecurrenceTime.Rd0000644000176200001440000000215013762012754017400 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/probabilistic.R \name{meanRecurrenceTime} \alias{meanRecurrenceTime} \title{Mean recurrence time} \usage{ meanRecurrenceTime(object) } \arguments{ \item{object}{the markovchain object} } \value{ For a Markov chain it outputs is a named vector with the expected time to first return to a state when the chain starts there. States present in the vector are only the recurrent ones. If the matrix is ergodic (i.e. irreducible), then all states are present in the output and order is the same as states order for the Markov chain } \description{ Computes the expected time to return to a recurrent state in case the Markov chain starts there } \examples{ m <- matrix(1 / 10 * c(6,3,1, 2,3,5, 4,1,5), ncol = 3, byrow = TRUE) mc <- new("markovchain", states = c("s","c","r"), transitionMatrix = m) meanRecurrenceTime(mc) } \references{ C. M. Grinstead and J. L. Snell. Introduction to Probability. American Mathematical Soc., 2012. } \author{ Ignacio Cordón } markovchain/man/transition2Generator.Rd0000644000176200001440000000145213762012754017752 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/ctmcProbabilistic.R \name{transition2Generator} \alias{transition2Generator} \title{Return the generator matrix for a corresponding transition matrix} \usage{ transition2Generator(P, t = 1, method = "logarithm") } \arguments{ \item{P}{transition matrix between time 0 and t} \item{t}{time of observation} \item{method}{"logarithm" returns the Matrix logarithm of the transition matrix} } \value{ A matrix that represent the generator of P } \description{ Calculate the generator matrix for a corresponding transition matrix } \examples{ mymatr <- matrix(c(.4, .6, .1, .9), nrow = 2, byrow = TRUE) Q <- transition2Generator(P = mymatr) expm::expm(Q) } \seealso{ \code{\link{rctmc}} } markovchain/man/markovchainFit.Rd0000644000176200001440000001113413762012754016572 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/RcppExports.R \name{createSequenceMatrix} \alias{createSequenceMatrix} \alias{markovchainFit} \title{Function to fit a discrete Markov chain} \usage{ createSequenceMatrix( stringchar, toRowProbs = FALSE, sanitize = FALSE, possibleStates = character() ) markovchainFit( data, method = "mle", byrow = TRUE, nboot = 10L, laplacian = 0, name = "", parallel = FALSE, confidencelevel = 0.95, confint = TRUE, hyperparam = matrix(), sanitize = FALSE, possibleStates = character() ) } \arguments{ \item{stringchar}{It can be a {n x n} matrix or a character vector or a list} \item{toRowProbs}{converts a sequence matrix into a probability matrix} \item{sanitize}{put 1 in all rows having rowSum equal to zero} \item{possibleStates}{Possible states which are not present in the given sequence} \item{data}{It can be a character vector or a {n x n} matrix or a {n x n} data frame or a list} \item{method}{Method used to estimate the Markov chain. Either "mle", "map", "bootstrap" or "laplace"} \item{byrow}{it tells whether the output Markov chain should show the transition probabilities by row.} \item{nboot}{Number of bootstrap replicates in case "bootstrap" is used.} \item{laplacian}{Laplacian smoothing parameter, default zero. It is only used when "laplace" method is chosen.} \item{name}{Optional character for name slot.} \item{parallel}{Use parallel processing when performing Boostrap estimates.} \item{confidencelevel}{\deqn{\alpha} level for conficence intervals width. Used only when \code{method} equal to "mle".} \item{confint}{a boolean to decide whether to compute Confidence Interval or not.} \item{hyperparam}{Hyperparameter matrix for the a priori distribution. If none is provided, default value of 1 is assigned to each parameter. This must be of size {k x k} where k is the number of states in the chain and the values should typically be non-negative integers.} } \value{ A list containing an estimate, log-likelihood, and, when "bootstrap" method is used, a matrix of standards deviations and the bootstrap samples. When the "mle", "bootstrap" or "map" method is used, the lower and upper confidence bounds are returned along with the standard error. The "map" method also returns the expected value of the parameters with respect to the posterior distribution. } \description{ Given a sequence of states arising from a stationary state, it fits the underlying Markov chain distribution using either MLE (also using a Laplacian smoother), bootstrap or by MAP (Bayesian) inference. } \details{ Disabling confint would lower the computation time on large datasets. If \code{data} or \code{stringchar} contain \code{NAs}, the related \code{NA} containing transitions will be ignored. } \note{ This function has been rewritten in Rcpp. Bootstrap algorithm has been defined "heuristically". In addition, parallel facility is not complete, involving only a part of the bootstrap process. When \code{data} is either a \code{data.frame} or a \code{matrix} object, only MLE fit is currently available. } \examples{ sequence <- c("a", "b", "a", "a", "a", "a", "b", "a", "b", "a", "b", "a", "a", "b", "b", "b", "a") sequenceMatr <- createSequenceMatrix(sequence, sanitize = FALSE) mcFitMLE <- markovchainFit(data = sequence) mcFitBSP <- markovchainFit(data = sequence, method = "bootstrap", nboot = 5, name = "Bootstrap Mc") na.sequence <- c("a", NA, "a", "b") # There will be only a (a,b) transition na.sequenceMatr <- createSequenceMatrix(na.sequence, sanitize = FALSE) mcFitMLE <- markovchainFit(data = na.sequence) # data can be a list of character vectors sequences <- list(x = c("a", "b", "a"), y = c("b", "a", "b", "a", "c")) mcFitMap <- markovchainFit(sequences, method = "map") mcFitMle <- markovchainFit(sequences, method = "mle") } \references{ A First Course in Probability (8th Edition), Sheldon Ross, Prentice Hall 2010 Inferring Markov Chains: Bayesian Estimation, Model Comparison, Entropy Rate, and Out-of-Class Modeling, Christopher C. Strelioff, James P. Crutchfield, Alfred Hubler, Santa Fe Institute Yalamanchi SB, Spedicato GA (2015). Bayesian Inference of First Order Markov Chains. R package version 0.2.5 } \seealso{ \code{\link{markovchainSequence}}, \code{\link{markovchainListFit}} } \author{ Giorgio Spedicato, Tae Seung Kang, Sai Bhargav Yalamanchi } markovchain/man/HigherOrderMarkovChain-class.Rd0000644000176200001440000000047513762012754021263 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/fitHigherOrder.R \docType{class} \name{HigherOrderMarkovChain-class} \alias{HigherOrderMarkovChain-class} \title{Higher order Markov Chains class} \description{ The S4 class that describes \code{HigherOrderMarkovChain} objects. } markovchain/man/is.regular.Rd0000644000176200001440000000155013762012754015701 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/probabilistic.R \name{is.regular} \alias{is.regular} \title{Check if a DTMC is regular} \usage{ is.regular(object) } \arguments{ \item{object}{a markovchain object} } \value{ A boolean value } \description{ Function to check wether a DTCM is regular } \details{ A Markov chain is regular if some of the powers of its matrix has all elements strictly positive } \examples{ P <- matrix(c(0.5, 0.25, 0.25, 0.5, 0, 0.5, 0.25, 0.25, 0.5), nrow = 3) colnames(P) <- rownames(P) <- c("R","N","S") ciao <- as(P, "markovchain") is.regular(ciao) } \references{ Matrix Analysis. Roger A.Horn, Charles R.Johnson. 2nd edition. Corollary 8.5.8, Theorem 8.5.9 } \seealso{ \code{\link{is.irreducible}} } \author{ Ignacio Cordón } markovchain/man/states.Rd0000644000176200001440000000177413762012754015141 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/classesAndMethods.R \name{states} \alias{states} \alias{states,markovchain-method} \title{Defined states of a transition matrix} \usage{ states(object) \S4method{states}{markovchain}(object) } \arguments{ \item{object}{A discrete \code{markovchain} object} } \value{ The character vector corresponding to states slot. } \description{ This method returns the states of a transition matrix. } \examples{ statesNames <- c("a", "b", "c") markovB <- new("markovchain", states = statesNames, transitionMatrix = matrix(c(0.2, 0.5, 0.3, 0, 1, 0, 0.1, 0.8, 0.1), nrow = 3, byrow = TRUE, dimnames=list(statesNames,statesNames)), name = "A markovchain Object" ) states(markovB) names(markovB) } \references{ A First Course in Probability (8th Edition), Sheldon Ross, Prentice Hall 2010 } \seealso{ \code{\linkS4class{markovchain}} } \author{ Giorgio Spedicato } markovchain/man/is.irreducible.Rd0000644000176200001440000000177513762012754016542 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/probabilistic.R \name{is.irreducible} \alias{is.irreducible} \title{Function to check if a Markov chain is irreducible (i.e. ergodic)} \usage{ is.irreducible(object) } \arguments{ \item{object}{A \code{markovchain} object} } \value{ A boolean values. } \description{ This function verifies whether a \code{markovchain} object transition matrix is composed by only one communicating class. } \details{ It is based on \code{.communicatingClasses} internal function. } \examples{ statesNames <- c("a", "b") mcA <- new("markovchain", transitionMatrix = matrix(c(0.7,0.3,0.1,0.9), byrow = TRUE, nrow = 2, dimnames = list(statesNames, statesNames) )) is.irreducible(mcA) } \references{ Feres, Matlab listings for Markov Chains. } \seealso{ \code{\link{summary}} } \author{ Giorgio Spedicato } markovchain/man/predictiveDistribution.Rd0000644000176200001440000000406713762012754020372 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/RcppExports.R \name{predictiveDistribution} \alias{predictiveDistribution} \title{predictiveDistribution} \usage{ predictiveDistribution(stringchar, newData, hyperparam = matrix()) } \arguments{ \item{stringchar}{This is the data using which the Bayesian inference is performed.} \item{newData}{This is the data whose predictive probability is computed.} \item{hyperparam}{This determines the shape of the prior distribution of the parameters. If none is provided, default value of 1 is assigned to each parameter. This must be of size kxk where k is the number of states in the chain and the values should typically be non-negative integers.} } \value{ The log of the probability is returned. } \description{ The function computes the probability of observing a new data set, given a data set } \details{ The underlying method is Bayesian inference. The probability is computed by averaging the likelihood of the new data with respect to the posterior. Since the method assumes conjugate priors, the result can be represented in a closed form (see the vignette for more details), which is what is returned. } \examples{ sequence<- c("a", "b", "a", "a", "a", "a", "b", "a", "b", "a", "b", "a", "a", "b", "b", "b", "a") hyperMatrix<-matrix(c(1, 2, 1, 4), nrow = 2,dimnames=list(c("a","b"),c("a","b"))) predProb <- predictiveDistribution(sequence[1:10], sequence[11:17], hyperparam =hyperMatrix ) hyperMatrix2<-hyperMatrix[c(2,1),c(2,1)] predProb2 <- predictiveDistribution(sequence[1:10], sequence[11:17], hyperparam =hyperMatrix2 ) predProb2==predProb } \references{ Inferring Markov Chains: Bayesian Estimation, Model Comparison, Entropy Rate, and Out-of-Class Modeling, Christopher C. Strelioff, James P. Crutchfield, Alfred Hubler, Santa Fe Institute Yalamanchi SB, Spedicato GA (2015). Bayesian Inference of First Order Markov Chains. R package version 0.2.5 } \seealso{ \code{\link{markovchainFit}} } \author{ Sai Bhargav Yalamanchi } markovchain/man/statisticalTests.Rd0000644000176200001440000000564713762012754017210 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/statisticalTests.R \name{verifyMarkovProperty} \alias{verifyMarkovProperty} \alias{assessOrder} \alias{assessStationarity} \alias{verifyEmpiricalToTheoretical} \alias{verifyHomogeneity} \title{Various functions to perform statistical inference of DTMC} \usage{ verifyMarkovProperty(sequence, verbose = TRUE) assessOrder(sequence, verbose = TRUE) assessStationarity(sequence, nblocks, verbose = TRUE) verifyEmpiricalToTheoretical(data, object, verbose = TRUE) verifyHomogeneity(inputList, verbose = TRUE) } \arguments{ \item{sequence}{An empirical sequence.} \item{verbose}{Should test results be printed out?} \item{nblocks}{Number of blocks.} \item{data}{matrix, character or list to be converted in a raw transition matrix} \item{object}{a markovchain object} \item{inputList}{A list of items that can coerced to transition matrices} } \value{ Verification result a list with following slots: statistic (the chi - square statistic), dof (degrees of freedom), and corresponding p-value. In case a cell in the empirical transition matrix is >0 that is 0 in the theoretical transition matrix the null hypothesis is rejected. In that case a p-value of 0 and statistic and dof of NA are returned. a list of transition matrices? } \description{ These functions verify the Markov property, assess the order and stationarity of the Markov chain. This function tests whether an empirical transition matrix is statistically compatible with a theoretical one. It is a chi-square based test. In case a cell in the empirical transition matrix is >0 that is 0 in the theoretical transition matrix the null hypothesis is rejected. Verifies that the s elements in the input list belongs to the same DTMC } \examples{ sequence <- c("a", "b", "a", "a", "a", "a", "b", "a", "b", "a", "b", "a", "a", "b", "b", "b", "a") mcFit <- markovchainFit(data = sequence, byrow = FALSE) verifyMarkovProperty(sequence) assessOrder(sequence) assessStationarity(sequence, 1) #Example taken from Kullback Kupperman Tests for Contingency Tables and Markov Chains sequence<-c(0,1,2,2,1,0,0,0,0,0,0,1,2,2,2,1,0,0,1,0,0,0,0,0,0,1,1, 2,0,0,2,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,0,0,0,0,2,1,0, 0,2,1,0,0,0,0,0,0,1,1,1,2,2,0,0,2,1,1,1,1,2,1,1,1,1,1,1,1,1,1,0,2, 0,1,1,0,0,0,1,2,2,0,0,0,0,0,0,2,2,2,1,1,1,1,0,1,1,1,1,0,0,2,1,1, 0,0,0,0,0,2,2,1,1,1,1,1,2,1,2,0,0,0,1,2,2,2,0,0,0,1,1) mc=matrix(c(5/8,1/4,1/8,1/4,1/2,1/4,1/4,3/8,3/8),byrow=TRUE, nrow=3) rownames(mc)<-colnames(mc)<-0:2; theoreticalMc<-as(mc, "markovchain") verifyEmpiricalToTheoretical(data=sequence,object=theoreticalMc) data(kullback) verifyHomogeneity(inputList=kullback,verbose=TRUE) } \references{ Anderson and Goodman. } \seealso{ \code{markovchain} } \author{ Tae Seung Kang, Giorgio Alfredo Spedicato } \concept{statisticalTests} markovchain/man/meanNumVisits.Rd0000644000176200001440000000167613762012754016441 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/probabilistic.R \name{meanNumVisits} \alias{meanNumVisits} \title{Mean num of visits for markovchain, starting at each state} \usage{ meanNumVisits(object) } \arguments{ \item{object}{the markovchain-class object} } \value{ a matrix with the expect number of visits to each state } \description{ Given a markovchain object, this function calculates a matrix where the element (i, j) represents the expect number of visits to the state j if the chain starts at i (in a Markov chain by columns it would be the element (j, i) instead) } \examples{ M <- matlab::zeros(5, 5) M[1,1] <- M[5,5] <- 1 M[2,1] <- M[2,3] <- 1/2 M[3,2] <- M[3,4] <- 1/2 M[4,2] <- M[4,5] <- 1/2 mc <- new("markovchain", transitionMatrix = M) meanNumVisits(mc) } \references{ R. Vélez, T. Prieto, Procesos Estocásticos, Librería UNED, 2013 } \author{ Ignacio Cordón } markovchain/man/rmarkovchain.Rd0000644000176200001440000000547313762012754016322 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/fittingFunctions.R \name{rmarkovchain} \alias{rmarkovchain} \title{Function to generate a sequence of states from homogeneous or non-homogeneous Markov chains.} \usage{ rmarkovchain( n, object, what = "data.frame", useRCpp = TRUE, parallel = FALSE, num.cores = NULL, ... ) } \arguments{ \item{n}{Sample size} \item{object}{Either a \code{markovchain} or a \code{markovchainList} object} \item{what}{It specifies whether either a \code{data.frame} or a \code{matrix} (each rows represent a simulation) or a \code{list} is returned.} \item{useRCpp}{Boolean. Should RCpp fast implementation being used? Default is yes.} \item{parallel}{Boolean. Should parallel implementation being used? Default is yes.} \item{num.cores}{Number of Cores to be used} \item{...}{additional parameters passed to the internal sampler} } \value{ Character Vector, data.frame, list or matrix } \description{ Provided any \code{markovchain} or \code{markovchainList} objects, it returns a sequence of states coming from the underlying stationary distribution. } \details{ When a homogeneous process is assumed (\code{markovchain} object) a sequence is sampled of size n. When a non - homogeneous process is assumed, n samples are taken but the process is assumed to last from the begin to the end of the non-homogeneous markov process. } \note{ Check the type of input } \examples{ # define the markovchain object statesNames <- c("a", "b", "c") mcB <- new("markovchain", states = statesNames, transitionMatrix = matrix(c(0.2, 0.5, 0.3, 0, 0.2, 0.8, 0.1, 0.8, 0.1), nrow = 3, byrow = TRUE, dimnames = list(statesNames, statesNames))) # show the sequence outs <- rmarkovchain(n = 100, object = mcB, what = "list") #define markovchainList object statesNames <- c("a", "b", "c") mcA <- new("markovchain", states = statesNames, transitionMatrix = matrix(c(0.2, 0.5, 0.3, 0, 0.2, 0.8, 0.1, 0.8, 0.1), nrow = 3, byrow = TRUE, dimnames = list(statesNames, statesNames))) mcB <- new("markovchain", states = statesNames, transitionMatrix = matrix(c(0.2, 0.5, 0.3, 0, 0.2, 0.8, 0.1, 0.8, 0.1), nrow = 3, byrow = TRUE, dimnames = list(statesNames, statesNames))) mcC <- new("markovchain", states = statesNames, transitionMatrix = matrix(c(0.2, 0.5, 0.3, 0, 0.2, 0.8, 0.1, 0.8, 0.1), nrow = 3, byrow = TRUE, dimnames = list(statesNames, statesNames))) mclist <- new("markovchainList", markovchains = list(mcA, mcB, mcC)) # show the list of sequence rmarkovchain(100, mclist, "list") } \references{ A First Course in Probability (8th Edition), Sheldon Ross, Prentice Hall 2010 } \seealso{ \code{\link{markovchainFit}}, \code{\link{markovchainSequence}} } \author{ Giorgio Spedicato } markovchain/man/ctmcFit.Rd0000644000176200001440000000257513762012754015227 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/RcppExports.R \name{ctmcFit} \alias{ctmcFit} \title{Function to fit a CTMC} \usage{ ctmcFit(data, byrow = TRUE, name = "", confidencelevel = 0.95) } \arguments{ \item{data}{It is a list of two elements. The first element is a character vector denoting the states. The second is a numeric vector denoting the corresponding transition times.} \item{byrow}{Determines if the output transition probabilities of the underlying embedded DTMC are by row.} \item{name}{Optional name for the CTMC.} \item{confidencelevel}{Confidence level for the confidence interval construnction.} } \value{ It returns a list containing the CTMC object and the confidence intervals. } \description{ This function fits the underlying CTMC give the state transition data and the transition times using the maximum likelihood method (MLE) } \details{ Note that in data, there must exist an element wise corresponding between the two elements of the list and that data[[2]][1] is always 0. } \examples{ data <- list(c("a", "b", "c", "a", "b", "a", "c", "b", "c"), c(0, 0.8, 2.1, 2.4, 4, 5, 5.9, 8.2, 9)) ctmcFit(data) } \references{ Continuous Time Markov Chains (vignette), Sai Bhargav Yalamanchi, Giorgio Alfredo Spedicato 2015 } \seealso{ \code{\link{rctmc}} } \author{ Sai Bhargav Yalamanchi } markovchain/man/hittingProbabilities.Rd0000644000176200001440000000145013762012754020004 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/probabilistic.R \name{hittingProbabilities} \alias{hittingProbabilities} \title{Hitting probabilities for markovchain} \usage{ hittingProbabilities(object) } \arguments{ \item{object}{the markovchain-class object} } \value{ a matrix of hitting probabilities } \description{ Given a markovchain object, this function calculates the probability of ever arriving from state i to j } \examples{ M <- matlab::zeros(5, 5) M[1,1] <- M[5,5] <- 1 M[2,1] <- M[2,3] <- 1/2 M[3,2] <- M[3,4] <- 1/2 M[4,2] <- M[4,5] <- 1/2 mc <- new("markovchain", transitionMatrix = M) hittingProbabilities(mc) } \references{ R. Vélez, T. Prieto, Procesos Estocásticos, Librería UNED, 2013 } \author{ Ignacio Cordón } markovchain/man/blanden.Rd0000644000176200001440000000150713762012754015233 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/data.R \docType{data} \name{blanden} \alias{blanden} \title{Mobility between income quartiles} \format{ An object of class \code{table} with 4 rows and 4 columns. } \source{ Personal reworking } \usage{ data(blanden) } \description{ This table show mobility between income quartiles for father and sons for the 1970 cohort born } \details{ The rows represent fathers' income quartile when the son is aged 16, whilst the columns represent sons' income quartiles when he is aged 30 (in 2000). } \examples{ data(blanden) mobilityMc<-as(blanden, "markovchain") } \references{ Jo Blanden, Paul Gregg and Stephen Machin, Intergenerational Mobility in Europe and North America, Center for Economic Performances (2005) } \keyword{datasets} markovchain/man/conditionalDistribution.Rd0000644000176200001440000000213013762012754020524 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/classesAndMethods.R \name{conditionalDistribution} \alias{conditionalDistribution} \title{\code{conditionalDistribution} of a Markov Chain} \usage{ conditionalDistribution(object, state) } \arguments{ \item{object}{A \code{markovchain} object.} \item{state}{Subsequent state.} } \value{ A named probability vector } \description{ It extracts the conditional distribution of the subsequent state, given current state. } \examples{ # define a markov chain statesNames <- c("a", "b", "c") markovB <- new("markovchain", states = statesNames, transitionMatrix = matrix(c(0.2, 0.5, 0.3, 0, 1, 0, 0.1, 0.8, 0.1),nrow = 3, byrow = TRUE, dimnames = list(statesNames, statesNames))) conditionalDistribution(markovB, "b") } \references{ A First Course in Probability (8th Edition), Sheldon Ross, Prentice Hall 2010 } \seealso{ \code{\linkS4class{markovchain}} } \author{ Giorgio Spedicato, Deepak Yadav } markovchain/man/is.CTMCirreducible.Rd0000644000176200001440000000165613762012754017207 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/ctmcProbabilistic.R \name{is.CTMCirreducible} \alias{is.CTMCirreducible} \title{Check if CTMC is irreducible} \usage{ is.CTMCirreducible(ctmc) } \arguments{ \item{ctmc}{a ctmc-class object} } \value{ a boolean value as described above. } \description{ This function verifies whether a CTMC object is irreducible } \examples{ energyStates <- c("sigma", "sigma_star") byRow <- TRUE gen <- matrix(data = c(-3, 3, 1, -1), nrow = 2, byrow = byRow, dimnames = list(energyStates, energyStates)) molecularCTMC <- new("ctmc", states = energyStates, byrow = byRow, generator = gen, name = "Molecular Transition Model") is.CTMCirreducible(molecularCTMC) } \references{ Continuous-Time Markov Chains, Karl Sigman, Columbia University } \author{ Vandit Jain } markovchain/man/generatorToTransitionMatrix.Rd0000644000176200001440000000201413762012754021353 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/RcppExports.R \name{generatorToTransitionMatrix} \alias{generatorToTransitionMatrix} \title{Function to obtain the transition matrix from the generator} \usage{ generatorToTransitionMatrix(gen, byrow = TRUE) } \arguments{ \item{gen}{The generator matrix} \item{byrow}{Flag to determine if rows (columns) sum to 0} } \value{ Returns the transition matrix. } \description{ The transition matrix of the embedded DTMC is inferred from the CTMC's generator } \examples{ energyStates <- c("sigma", "sigma_star") byRow <- TRUE gen <- matrix(data = c(-3, 3, 1, -1), nrow = 2, byrow = byRow, dimnames = list(energyStates, energyStates)) generatorToTransitionMatrix(gen) } \references{ Introduction to Stochastic Processes with Applications in the Biosciences (2013), David F. Anderson, University of Wisconsin at Madison } \seealso{ \code{\link{rctmc}},\code{\link{ctmc-class}} } \author{ Sai Bhargav Yalamanchi } markovchain/man/kullback.Rd0000644000176200001440000000064613762012754015423 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/data.R \docType{data} \name{kullback} \alias{kullback} \title{Example from Kullback and Kupperman Tests for Contingency Tables} \format{ A list containing two 6x6 non - negative integer matrices } \usage{ data(kullback) } \description{ A list of two matrices representing raw transitions between two states } \keyword{datasets} markovchain/man/priorDistribution.Rd0000644000176200001440000000372013762012754017362 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/RcppExports.R \name{priorDistribution} \alias{priorDistribution} \title{priorDistribution} \usage{ priorDistribution(transMatr, hyperparam = matrix()) } \arguments{ \item{transMatr}{The transition matrix whose probability is the parameter of interest.} \item{hyperparam}{The hyperparam matrix (optional). If not provided, a default value of 1 is assumed for each and therefore the resulting probability distribution is uniform.} } \value{ The log of the probabilities for each state is returned in a numeric vector. Each number in the vector represents the probability (log) of having a probability transition vector as specified in corresponding the row of the transition matrix. } \description{ Function to evaluate the prior probability of a transition matrix. It is based on conjugate priors and therefore a Dirichlet distribution is used to model the transitions of each state. } \details{ The states (dimnames) of the transition matrix and the hyperparam may be in any order. } \note{ This function can be used in conjunction with inferHyperparam. For example, if the user has a prior data set and a prior transition matrix, he can infer the hyperparameters using inferHyperparam and then compute the probability of their prior matrix using the inferred hyperparameters with priorDistribution. } \examples{ priorDistribution(matrix(c(0.5, 0.5, 0.5, 0.5), nrow = 2, dimnames = list(c("a", "b"), c("a", "b"))), matrix(c(2, 2, 2, 2), nrow = 2, dimnames = list(c("a", "b"), c("a", "b")))) } \references{ Yalamanchi SB, Spedicato GA (2015). Bayesian Inference of First Order Markov Chains. R package version 0.2.5 } \seealso{ \code{\link{predictiveDistribution}}, \code{\link{inferHyperparam}} } \author{ Sai Bhargav Yalamanchi, Giorgio Spedicato } markovchain/man/committorAB.Rd0000644000176200001440000000220313762012754016042 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/probabilistic.R \name{committorAB} \alias{committorAB} \title{Calculates committor of a markovchain object with respect to set A, B} \usage{ committorAB(object,A,B,p) } \arguments{ \item{object}{a markovchain class object} \item{A}{a set of states} \item{B}{a set of states} \item{p}{initial state (default value : 1)} } \value{ Return a vector of probabilities in case initial state is not provided else returns a number } \description{ Returns the probability of hitting states rom set A before set B with different initial states } \details{ The function solves a system of linear equations to calculate probaility that the process hits a state from set A before any state from set B } \examples{ transMatr <- matrix(c(0,0,0,1,0.5, 0.5,0,0,0,0, 0.5,0,0,0,0, 0,0.2,0.4,0,0, 0,0.8,0.6,0,0.5), nrow = 5) object <- new("markovchain", states=c("a","b","c","d","e"),transitionMatrix=transMatr) committorAB(object,c(5),c(3)) } markovchain/man/getName.Rd0000644000176200001440000000142413762012754015206 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/classesAndMethods.R \name{name} \alias{name} \alias{name,markovchain-method} \title{Method to retrieve name of markovchain object} \usage{ name(object) \S4method{name}{markovchain}(object) } \arguments{ \item{object}{A markovchain object} } \description{ This method returns the name of a markovchain object } \examples{ statesNames <- c("a", "b", "c") markovB <- new("markovchain", states = statesNames, transitionMatrix = matrix(c(0.2, 0.5, 0.3, 0, 1, 0, 0.1, 0.8, 0.1), nrow = 3, byrow = TRUE, dimnames=list(statesNames,statesNames)), name = "A markovchain Object" ) name(markovB) } \author{ Giorgio Spedicato, Deepak Yadav } markovchain/DESCRIPTION0000644000176200001440000000525314050525162014267 0ustar liggesusersPackage: markovchain Type: Package Title: Easy Handling Discrete Time Markov Chains Version: 0.8.6 Date: 2021-05-17 Authors@R: c( person("Giorgio Alfredo", "Spedicato", role = c("aut", "cre"), email = "spedicato_giorgio@yahoo.it",comment = c(ORCID = "0000-0002-0315-8888")), person("Tae", "Seung Kang", role = "aut"), person("Sai", "Bhargav Yalamanchi", role = "aut"), person("Mildenberger", "Thoralf", role = "ctb", comment = c(ORCID = "0000-0001-7242-1873")), person("Deepak", "Yadav", role = "aut"), person("Ignacio", "Cordón", role = "aut", comment = c(ORCID = "0000-0002-3152-0231")), person("Vandit", "Jain", role = "ctb"), person("Toni", "Giorgino", role="ctb", comment = c(ORCID = "0000-0001-6449-0596")), person("Richèl J.C.", "Bilderbeek", role = "ctb", comment = c(ORCID = "0000-0003-1107-7049")), person("Daniel", "Ebbert", email = "daniel@ebbert.nrw", role = "ctb", comment = c(ORCID = "0000-0003-3666-7205")), person("Shreyash", "Maheshwari", email = "coolshreysh1000@gmail.com", role = "ctb") ) Maintainer: Giorgio Alfredo Spedicato Description: Functions and S4 methods to create and manage discrete time Markov chains more easily. In addition functions to perform statistical (fitting and drawing random variates) and probabilistic (analysis of their structural proprieties) analysis are provided. See Spedicato (2017) . License: GPL-2 Depends: R (>= 3.6.0), methods Imports: igraph, Matrix, matlab, expm, stats4, parallel, Rcpp (>= 1.0.2), RcppParallel, utils, stats, grDevices Suggests: knitr, testthat, diagram, DiagrammeR, msm, Rsolnp, rmarkdown, ctmcd, bookdown, rticles Enhances: etm VignetteBuilder: utils, knitr LinkingTo: Rcpp, RcppParallel, RcppArmadillo (>= 0.9.600.4.0) SystemRequirements: GNU make LazyLoad: yes ByteCompile: yes Encoding: UTF-8 BugReports: https://github.com/spedygiorgio/markovchain/issues URL: https://github.com/spedygiorgio/markovchain/ RoxygenNote: 7.1.1 NeedsCompilation: yes Packaged: 2021-05-17 16:17:53 UTC; giorgio Author: Giorgio Alfredo Spedicato [aut, cre] (), Tae Seung Kang [aut], Sai Bhargav Yalamanchi [aut], Mildenberger Thoralf [ctb] (), Deepak Yadav [aut], Ignacio Cordón [aut] (), Vandit Jain [ctb], Toni Giorgino [ctb] (), Richèl J.C. Bilderbeek [ctb] (), Daniel Ebbert [ctb] (), Shreyash Maheshwari [ctb] Repository: CRAN Date/Publication: 2021-05-17 17:40:02 UTC markovchain/build/0000755000176200001440000000000014050513453013653 5ustar liggesusersmarkovchain/build/vignette.rds0000644000176200001440000000051114050513453016207 0ustar liggesusersRK0v D$PԋᘂQBh6MJ9&ڍQC^~~!G.P߁33_9}A%Њ-B(I ER-$FJ8r˫BUSJ"&J3(Rj&qp xS}zTpHS bW\r1G5\03qd,'%AU q % 'iʭtg"^D8әy,۝)ϸdve} VK߂Uz,ˏMFABsG5U*markovchain/tests/0000755000176200001440000000000013762012754013725 5ustar liggesusersmarkovchain/tests/testthat/0000755000176200001440000000000014050525162015556 5ustar liggesusersmarkovchain/tests/testthat/setupData.R0000644000176200001440000000443013762012754017643 0ustar liggesusersnumInstances <- 150 # Instances of markov chains with all transitions positive smallNumInstances <- 20 numByRow <- numInstances / 2 maxDim <- 80 set.seed(1234567) transposed <- function(markovChains) { lapply(markovChains, function(mc) { t(mc) }) } randomDims <- sample(1:maxDim, numByRow, replace = TRUE) # Make positive matrices smaller, since it is costly to make computations on them randomPositiveDims <- sample(1:20, smallNumInstances, replace = TRUE) # Get 1:[maxDim] identity by-row-markov-chains .diagonalMCs <- lapply(1:numByRow, function(n) { new("markovchain", transitionMatrix = diag(n)) }) .colDiagonalMCs <- transposed(.diagonalMCs) # Append by-columns MarkovChains .allDiagonalMCs <- append(.diagonalMCs, .colDiagonalMCs) # Get [numByRow] random by-row-markov-chains, # with dimensions ranging from 1 to 100 .MCs <- lapply(randomDims, randomMarkovChain) .colMCs <- lapply(randomDims, function(s) { randomMarkovChain(s, byrow = FALSE) }) # Append by-columns MarkovChains .allMCs <- append(.MCs, .colMCs) mcsIndexes <- seq_along(.allMCs) diagonalIndexes <- seq_along(.allDiagonalMCs) # Markov chains with transition matrix P > 0 .positiveMCs <- lapply(randomPositiveDims, function(n) { randomMarkovChain(n, zeroProb = 0) }) .colPositiveMCs <- transposed(.positiveMCs) .allPositiveMCs <- append(.positiveMCs, .colPositiveMCs) ################################################################# # Classes and states pre-computed data ################################################################# allMCs <- lapply(.allMCs, markovchain:::precomputeData) subsetAllMCs <- sample(allMCs, smallNumInstances, replace = FALSE) steadyStatesMCs <- lapply(knownSteadyStatesMCs, markovchain:::precomputeData) allDiagonalMCs <- lapply(.allDiagonalMCs, markovchain:::precomputeData) allAndDiagonalMCs <- append(allMCs, allDiagonalMCs) allPositiveMCs <- lapply(.allPositiveMCs, function(mc) { list(object = mc, byrow = mc@byrow, states = mc@states, communicatingClasses = communicatingClasses(mc), canonicForm = canonicForm(mc), transitionMatrix = mc@transitionMatrix, regular = is.regular(mc), irreducible = is.irreducible(mc) ) }) allAndPositiveMCs <- append(allMCs, allPositiveMCs)markovchain/tests/testthat/testMarkovChainsClassification.R0000644000176200001440000000425013762012754024052 0ustar liggesuserscontext("Checking is.regular") #https://www.math.ucdavis.edu/~gravner/MAT135B/materials/ch13.pdf mc1Matrix <- matrix(c(0.5, 0.5, 0, 0.5, 0.25, 0.25, 0, 1/3, 2/3), nrow = 3, byrow = TRUE) mc1 <- as(mc1Matrix, "markovchain") test_that("Markov chains with strictly positive transition matrices are regular", { for (mc in allPositiveMCs) { expect_true(mc$regular) } }) test_that("Regularity implies ergodicity", { for (mc in allAndPositiveMCs) if (mc$regular) expect_true(mc$irreducible) }) # Perron–Frobenius theorem: a non negative matrix is primitive # (i.e.regular) iff 1 is the maximal unique eigen value test_that("Regularity iff a single eigen value of modulo |1|", { for (mc in allMCs) { if (mc$irreducible) { # Compute the number of eigen values greater or equal than 1 eigenValues <- eigen(mc$transitionMatrix, only.values = TRUE)$values eigenValues <- sapply(eigenValues, abs) maxEigenValues <- sapply(eigenValues, function(e) {isTRUE(all.equal(e, 1)) || e > 1 }) numMaxEigenValues <- length(which(maxEigenValues)) if (numMaxEigenValues == 1) expect_true(mc$regular) else expect_false(mc$regular) } } }) context("Checking canonicForm and is.irreducible") test_that("Markov chain is irreducible iff there is a single communicating class", { for (mc in allAndPositiveMCs) { states <- mc$states commClasses <- mc$communicatingClasses numCommClasses <- length(commClasses) irreducible <- mc$irreducible if (irreducible) expect_equal(numCommClasses, 1) if (numCommClasses == 1) expect_true(irreducible) } }) test_that("If the matrix is irreducible then the canonic form equals the Markov chain", { for (mc in allAndPositiveMCs) { canonic <- mc$canonicForm irreducible <- mc$irreducible canonicEqual <- canonic == mc$object if (irreducible) expect_true(canonicEqual) } }) test_that("Check known Markov chain is irreducible", { expect_true(is.irreducible(mc1)) }) markovchain/tests/testthat/testPlot.R0000644000176200001440000000570113762012754017531 0ustar liggesusers#library(markovchain) require(diagram) require(DiagrammeR) # P <- matrix(c(0, 0.5, 0.5, # 0.5, 0, 0.5, # 0.5, 0.5, 0), byrow=T, ncol=3) # P # k <- new("markovchain", transitionMatrix=P) # plot(k) # P <- matrix(c(0, 0.5, 0.5, # 0.5, 0, 0.5, # 0.4, 0.6, 0), byrow=T, ncol=3) # P # k <- new("markovchain", transitionMatrix=P) # plot(k) # P <- matrix(c(0, 0.5, 0.5, # 0.5, 0, 0.5, # 0.5, 0.4, 0.1), byrow=T, ncol=3) # P # k <- new("markovchain", transitionMatrix=P) # plot(k) # P <- matrix(c(0.1, 0.4, 0.5, # 0.4, 0.1, 0.5, # 0.5, 0.3, 0.2), byrow=T, ncol=3) # P # k <- new("markovchain", states = c("1", "2", "3"), transitionMatrix=P, name = "test") # plot(k) # mcWeather <- new("markovchain", states = c("sunny", "cloudy", "rain"), # transitionMatrix = matrix(data = c(0.70, 0.2, 0.1, # 0.3, 0.4, 0.3, # 0.2, 0.45, 0.35), byrow = T, nrow = 3), # name = "Weather") # mcWeather # plot(mcWeather) # plot(mcWeather, package = "diagram", box.size = 0.06, main = "Weather transition matrix") # plot(mcWeather, package = "DiagrammeR", label ="Weather transition matrix", labelloc="t") # # # # # ### TESTS FOR CTMC PLOTTING # energyStates <- c("sigma", "sigma_star") # byRow <- TRUE # gen <- matrix(data = c(-3, 3, # 1, -1), nrow = 2, # byrow = byRow, dimnames = list(energyStates, energyStates)) # molecularCTMC <- new("ctmc", states = energyStates, # byrow = byRow, generator = gen, # name = "Molecular Transition Model") # plot(molecularCTMC) # plot(molecularCTMC, package = "diagram", box.size = 0.06, main = "Energy States CTMC") # plot(molecularCTMC, package = "DiagrammeR", label ="Energy States CTMC", labelloc="t") # curves <- matrix(nrow = ncol(mat), ncol = ncol(mat), 0) # curves[3, 1] <- curves[1, 6] <- -0.35 # curves[4, 6] <- curves[6, 4] <- curves[5, 6] <- curves[6, 5] <- 0.08 # curves[3, 6] <- 0.35 # plotmat(mat, pos = c(3, 2, 1), curve = curves, # name = colnames(mat), lwd = 1, box.lwd = 2, # cex.txt = 0.8, box.cex = 0.8, box.size = 0.08, # arr.length = 0.5, box.type = "circle", box.prop = 1, # shadow.size = 0.01, self.cex = 0.6, my = -0.075, mx = -0.01, # relsize = 0.9, self.shiftx = c(0, 0, 0.125, -0.12, 0.125, 0), # self.shifty = 0, main = "Diagram") test_that("plotting with a certain vertex color, should show that color", { markov_chain <- new( "markovchain", states = c("X"), transitionMatrix = matrix(c(1.0), nrow = 1) ) expect_silent( markovchain::plot(markov_chain, vertex.color = "white", vertex.size = 100 ) ) }) markovchain/tests/testthat/testmsm.R0000644000176200001440000000120313762012754017400 0ustar liggesusers#library(markovchain) library("msm") context("Checking conversion of objects to msm") # cav cav <- msm::cav Q <- rbind ( c(0, 0.25, 0, 0.25), c(0.166, 0, 0.166, 0.166), c(0, 0.25, 0, 0.25), c(0, 0, 0, 0) ) cavmsm <- msm(state ~ years, subject = PTNUM, data = cav, qmatrix = Q, death = 4) # cavmsm # qmatrix.msm(cavmsm) # qmatrix.msm(cavmsm, covariates = list(sex=1)) # pmatrix.msm(cavmsm) # .msm2Mc(cavmsm) msmMc <- as(cavmsm, "markovchain") # print(msmMc) test_that("Conversion of objects", { expect_equal(class(msmMc)=="markovchain",TRUE) })markovchain/tests/testthat/testStatisticalTests.R0000644000176200001440000000574613762012754022133 0ustar liggesuserscontext("Checking verifyEmpiricalToTheoretical") example_results <- list(statistic = 6.551795, dof = 6, pvalue = 0.3642899) test_that("Sequence data input is computed correctly", { sequence <- c(0,1,2,2,1,0,0,0,0,0,0,1,2,2,2,1,0,0,1,0,0,0,0,0,0,1,1, 2,0,0,2,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,0,0,0,0,2,1,0, 0,2,1,0,0,0,0,0,0,1,1,1,2,2,0,0,2,1,1,1,1,2,1,1,1,1,1,1,1,1,1,0,2, 0,1,1,0,0,0,1,2,2,0,0,0,0,0,0,2,2,2,1,1,1,1,0,1,1,1,1,0,0,2,1,1, 0,0,0,0,0,2,2,1,1,1,1,1,2,1,2,0,0,0,1,2,2,2,0,0,0,1,1) mc <- matrix(c(5/8,1/4,1/8,1/4,1/2,1/4,1/4,3/8,3/8), byrow = TRUE, nrow = 3) rownames(mc) <- colnames(mc) <- 0:2 theoreticalMc <- as(mc, "markovchain") result <- verifyEmpiricalToTheoretical(data = sequence, object = theoreticalMc, verbose = FALSE) expect_equivalent(example_results, result) }) # TODO: check this # test_that("Matrix data input is computed correctly", { # matrix <- matrix(c(51, 11, 8, # 12, 31, 9, # 6, 11, 10), # byrow = TRUE, # nrow = 3) # rownames(matrix) <- colnames(matrix) <- 0:2 # # mc <- matrix(c(5/8,1/4,1/8,1/4,1/2,1/4,1/4,3/8,3/8), # byrow = TRUE, # nrow = 3) # rownames(mc) <- colnames(mc) <- 0:2 # theoreticalMc <- as(mc, "markovchain") # # result <- verifyEmpiricalToTheoretical(data = matrix, object = theoreticalMc, verbose = FALSE) # # expect_equivalent(example_results, result) # }) test_that("Input data sequences can contain missing states", { mc <- matrix(c( 1 / 10, 7 / 10, 1 / 10, 1 / 10, 1 / 10, 1 / 10, 4 / 10, 4 / 10, 1 / 10, 5 / 10, 1 / 10, 3 / 10, 1 / 10, 5 / 10, 3 / 10, 1 / 10 ), byrow = TRUE, nrow = 4 ) rownames(mc) <- c(1:4) colnames(mc) <- c(1:4) theoreticalMc <- as(mc, "markovchain") sequence <- c(1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 4, 1, 1, 1, 1, 1, 1, 1) result <- verifyEmpiricalToTheoretical(data = sequence, object = theoreticalMc, verbose = FALSE) expect_true(exists("result")) }) #TODO: review this part @DanielEbbert # test_that("Null hypothesis is rejected when 0 in the object matrix is not 0 in the data matrix", { # example <- matrix(c( # 0.6105, 0.1665, 0.0393, 0.1837, # 0.1374, 0.5647, 0.0637, 0.2342, # 0.3010, 0.1142, 0.3218, 0.2630, # 0.2595, 0.3109, 0.0000, 0.4296 # ), # byrow = TRUE, # nrow = 4 # ) # rownames(example) <- c(1:4) # colnames(example) <- c(1:4) # # mc <- matrix(c( # 0.00, 1.00, 0.00, 0.00, # 0.00, 0.00, 0.50, 0.50, # 0.00, 0.75, 0.00, 0.25, # 0.00, 0.75, 0.25, 0.00 # ), # byrow = TRUE, # nrow = 4 # ) # rownames(mc) <- c(1:4) # colnames(mc) <- c(1:4) # theoreticalMc <- as(mc, "markovchain") # # result <- verifyEmpiricalToTheoretical(data = example, object = theoreticalMc, verbose = FALSE) # # expect_equivalent(result$pvalue, 0) # })markovchain/tests/testthat/testctmc.R0000644000176200001440000000733713762012754017550 0ustar liggesuserslibrary(markovchain) context("Checking that ExpectedTime function works as expected") # Example from the book Markovchains, J. R. Norris, Cambridge University Press states <- c("a","b","c","d") byRow <- TRUE gen <- matrix(data = c(-1, 1/2, 1/2, 0, 1/4, -1/2, 0, 1/4, 1/6, 0, -1/3, 1/6, 0, 0, 0, 0), nrow = 4,byrow = byRow, dimnames = list(states,states)) ctmc <- new("ctmc",states = states, byrow = byRow, generator = gen, name = "testctmc") test_that("Check Expected hitting time from one state to another",{ expect_equal(ExpectedTime(ctmc,1,4),7) expect_equal(ExpectedTime(ctmc,2,4),5.5) }) context("Checking that probabilityatT function works as expected") # TESTS for probabilityatT function # Example taken from the book INTRODUCTION TO STOCHASTIC PROCESSES WITH R, ROBERT P. DOBROW, Wiley states <- c("a","b","c","d","e") # taken exactly from book ansMatrix <- matrix(data = c(0.610, 0.290, 0.081, 0.016, 0.003, 0.232, 0.443, 0.238, 0.071, 0.017, 0.052, 0.190, 0.435, 0.238, 0.085, 0.008, 0.045, 0.191, 0.446, 0.310, 0.001, 0.008, 0.054, 0.248, 0.688),nrow = 5,byrow = T,dimnames = list(states,states)) byRow <- TRUE gen <- matrix(c(-1/4,1/4,0,0,0,1/5,-9/20,1/4,0,0,0,1/5,-9/20,1/4,0,0,0,1/5,-9/20,1/4,0,0,0,1/5,-1/5), nrow=5,byrow=byRow, dimnames = list(states,states)) ctmc <- new("ctmc",states = states, byrow = byRow, generator = gen, name = "testctmc") test_that("Check probabilityatT using a ctmc object:",{ expect_equal(round(probabilityatT(ctmc,2.5),3),ansMatrix) }) ### Adds tests for impreciseprobabilityatT function context("Checking that impreciseprobabilityatT function works as expected:") states <- c("n","y") Q <- matrix(c(-1,1,1,-1),nrow = 2,byrow = T,dimnames = list(states,states)) range <- matrix(c(1/52,3/52,1/2,2),nrow = 2,byrow = 2) name <- "testictmc" ictmc <- new("ictmc",states = states,Q = Q,range = range,name = name) test_that("Check impreciseProbabilityatT function using an ictmc object:",{ expect_equal(round(impreciseProbabilityatT(ictmc,2,0,1,error = 10^-3),4),c(0.0083,0.1410)) }) ### Adds tests for freq2Generator function sample <- matrix(c(150,2,1,1,1,200,2,1,2,1,175,1,1,1,1,150),nrow = 4,byrow = TRUE) sample_rel = rbind((sample/rowSums(sample))[1:dim(sample)[1]-1,],c(rep(0,dim(sample)[1]-1),1)) answer <- matrix(c( -0.024, 0.015, 0.009, 0, 0.007, -0.018, 0.012, 0, 0.013 , 0.007, -0.021, 0, 0.000, 0.000, 0.000, 0 ),nrow = 4,byrow = TRUE) test_that("Check if ",{ expect_equal(round(freq2Generator(sample_rel,1),3),answer) }) ### tests for is.CTMCirreducible fcuntion energyStates <- c("sigma", "sigma_star") byRow <- TRUE gen <- matrix(data = c(-3, 3, 1, -1), nrow = 2, byrow = byRow, dimnames = list(energyStates, energyStates)) molecularCTMC <- new("ctmc", states = energyStates, byrow = byRow, generator = gen, name = "Molecular Transition Model") test_that("is.CTMCirreducible works", { expect_equal(is.CTMCirreducible(molecularCTMC),TRUE) }) ### tests for is.TimeReversible function energyStates <- c("sigma", "sigma_star") byRow <- TRUE gen <- matrix(data = c(-3, 3, 1, -1), nrow = 2, byrow = byRow, dimnames = list(energyStates, energyStates)) molecularCTMC <- new("ctmc", states = energyStates, byrow = byRow, generator = gen, name = "Molecular Transition Model") test_that("is.TimeReversible works", { expect_equal(is.TimeReversible(molecularCTMC),TRUE) }) markovchain/tests/testthat/testFits.R0000644000176200001440000000165213762012754017521 0ustar liggesuserscontext("Checking that fitting works") #load & prepare data data(rain) data(holson) myHolson<-as.matrix(holson[,-1]); rownames(myHolson)<-holson$id test_that("Check createSequenceMatrix", { expect_equal(createSequenceMatrix(rain$rain), checksAlofiRawTransitions) }) #data preparation ciao<-c("a","a","b","b","a",NA,"b","a","b","a","a") test_that("Check markovchainFit & listFit", { expect_equal(markovchainFit(ciao), simpleMcCiaoFit) expect_equal(markovchainListFit(data=myHolson), checkmarkovchainFitList) }) #### tests for noofVisitsDist function transMatr<-matrix(c(0.4,0.6,.3,.7),nrow=2,byrow=TRUE) simpleMc<-new("markovchain", states=c("a","b"), transitionMatrix=transMatr, name="simpleMc") answer <- c(0.348148, 0.651852) names(answer) <- c("a","b") test_that("Check noofVisitsDist works", { expect_equal(noofVisitsDist(simpleMc,5,"a"),answer) }) markovchain/tests/testthat/testOptimization.R0000644000176200001440000000147113762012754021301 0ustar liggesuserslibrary(markovchain) #create basic markov chains require(matlab) mathematicaMatr <- zeros(5) mathematicaMatr[1,] <- c(0, 1/3, 0, 2/3, 0) mathematicaMatr[2,] <- c(1/2, 0, 0, 0, 1/2) mathematicaMatr[3,] <- c(0, 0, 1/2, 1/2, 0) mathematicaMatr[4,] <- c(0, 0, 1/2, 1/2, 0) mathematicaMatr[5,] <- c(0, 0, 0, 0, 1) statesNames <- letters[1:5] mathematicaMc <- new("markovchain", transitionMatrix = mathematicaMatr, name = "Mathematica MC", states = statesNames) # ####end creating DTMC # test .gcdRcpp function, .commClassesKernelRcpp function, .commStatesFinderRcpp function context("Optimization of functions") test_that("Optimized functions should satisfy", { expect_equal(.gcdRcpp(9, 12), 3) # .gcdRcpp function is also tested in testPeriod.R }) markovchain/tests/testthat/testHittingProbabilities.R0000644000176200001440000000414513762012754022733 0ustar liggesuserscontext("Checking hittingProbabilities method") test_that("Hitting probabilities of identity markov chain is identity", { for (mc in allDiagonalMCs) { states <- mc$states numStates <- length(states) result <- diag(numStates) rownames(result) <- states colnames(result) <- states hittingProbabilities <- mc$hittingProbabilities expect_equal(hittingProbabilities, result) } }) test_that("Hitting probabilities hold their characteristic system and are non negative", { # Check that the following recurrence holds, # naming p = probs, f = hitting, it checks: # # f(i, j) = p(i, j) + ∑_{k ≠ j} p(i, k) f(k, j) for (mc in allMCs) { probs <- mc$transitionMatrix byrow <- mc$byrow hitting <- mc$hittingProbabilities #expect_true(all(hitting >= 0)) expect_true(.testthatAreHittingRcpp(probs, hitting, byrow)) } }) test_that("All hitting probabilities are 1 iff the Markov chain is irreducible", { for (mc in allMCs) { hitting <- mc$hittingProbabilities hittingOne <- .testthatHittingAreOneRcpp(hitting) irreducible <- mc$irreducible if (irreducible) expect_true(hittingOne) if (hittingOne) expect_true(irreducible) } }) # Test with a matrix with known hitting probabilities # Taken from the book Procesos Estocásticos, Ricardo Vélez & Tomás Prieto test_that("Tests hitting probabilities for a known markov chain", { # For mcHitting defined in data-raw/db4Tests.R result <- matlab::zeros(5, 5) result[1,1] <- result[5,5] <- 1 result[2,1] <- 4/5 result[3,1] <- 3/5 result[4,1] <- 2/5 result[2,2] <- 3/8 result[3,2] <- 3/4 result[4,2] <- 1/2 result[2,3] <- 1/2 result[3,3] <- 3/8 result[4,3] <- 1/4 result[2,4] <- 1/3 result[3,4] <- 2/3 result[4,4] <- 1/6 result[2,5] <- 1/5 result[3,5] <- 2/5 result[4,5] <- 3/5 rownames(result) <- mcHitting@states colnames(result) <- mcHitting@states expect_equal(hittingProbabilities(mcHitting), result) expect_equal(hittingProbabilities(t(mcHitting)), t(result)) })markovchain/tests/testthat/testMultinomCI.R0000644000176200001440000000327013762012754020632 0ustar liggesusers#library(markovchain) seq<-c("a", "b", "a", "a", "a", "a", "b", "a", "b", "a", "b", "a", "a", "b", "b", "b", "a") mcfit<-markovchainFit(data=seq,byrow=TRUE) # print(mcfit) seqmat<-createSequenceMatrix(seq) seqmat mCI <- .multinomialCIRcpp(mcfit$estimate@transitionMatrix, seqmat, 0.95) # print(mCI) ####end of creating multinomialCI context("Multinomial confidence interval") test_that("multinomial CI statisfay", { # expect_equal(mCI$lowerEndpointMatrix, matrix(c(0.2222222,0.3333333, # 0.5714286,0.1428571),nrow=2, byrow=TRUE, dimnames=list(c("a","b"), # c("a","b")) # )) # expect_equal(mCI$upperEndpointMatrix, matrix(c(0.8111456,0.9222567, # 1,0.6839473),nrow=2, byrow=TRUE, dimnames=list(c("a","b"), # c("a","b")) # )) expect_equal(mCI$upperEndpointMatrix[2,1],1) }) # Multinomial distribution with 3 classes, from which 79 samples # were drawn: 23 of them belong to the first class, 12 to the # second class and 44 to the third class. Punctual estimations # of the probabilities from this sample would be 23/79, 12/79 # and 44/79 but we want to build 95% simultaneous confidence intervals # for the true probabilities # m = multinomialCI(c(23,12,44), 0.05) # print(paste("First class: [", m[1,1], m[1,2], "]")) # print(paste("Second class: [", m[2,1], m[2,2], "]")) # print(paste("Third class: [", m[3,1], m[3,2], "]")) # seq<-c(4, 5) # m = multinomialCI(seq, 0.05) # m markovchain/tests/testthat/testStatesClassification.R0000644000176200001440000001477113762012754022741 0ustar liggesuserscontext("Checking classification of states: recurrentStates, transientStates, absorbingStates") A <- structure(c(0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0.5, 0.3, 0.3, 0, 0, 0, 0, 0, 0.5, 0.7, 0.7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.3, 0.4, 0, 0, 0, 0, 0, 0.4, 0, 0.5, 0, 0, 0, 0, 0, 0.6, 0.7, 0, 0, 0, 0, 0, 0, 0, 0, 0.1, 1), .Dim = c(8L, 8L), .Dimnames = list(c("1", "2", "3", "4", "5", "6", "7", "8"), c("1", "2", "3", "4", "5", "6", "7", "8"))) mchain <- new("markovchain", transitionMatrix = A) mc1Matrix <- matrix(c(0, 0, 1/2, 1/2, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0), ncol = 4, byrow=TRUE) mc1 <- as(mc1Matrix, "markovchain") mc2Matrix <- matrix(c(0, 1, 0, 0, 0, 0, 0.4, 0.6, 0, 0, 0, 0, 0.3, 0, 0.4, 0.2, 0.1, 0, 0, 0, 0, 0.3, 0.7, 0, 0, 0, 0, 0.5, 0, 0.5, 0, 0, 0, 0.3, 0, 0.7), nrow = 6, byrow=TRUE) mc2 <- as(mc2Matrix,"markovchain") mc3Matrix <- matlab::zeros(5) mc3Matrix[1:2,1:2] <- 0.5*matlab::ones(2) mc3Matrix[5,1] <- 1 mc3Matrix[3,3] <- 1 mc3Matrix[4,3:4] <- 0.5 mc3 <- as(mc3Matrix,"markovchain") test_that("Test recurrent / transient / absorbing states for known Markov chains", { expect_equal(recurrentClasses(mc1), list(c("s1","s2","s3","s4"))) expect_equal(recurrentClasses(mc2), list(c("s1","s2"),c("s4","s5","s6") )) expect_equal(transientStates(mc2), "s3") expect_equal(recurrentClasses(mc3), list(c("s1","s2"),c("s3"))) expect_equal(absorbingStates(mc3), "s3") expect_equal(transientStates(mc3), c("s4","s5")) expect_equal(recurrentClasses(mchain), list(c("3", "4"), c("8"))) expect_equal(transientStates(mchain), c("1", "2", "5", "6", "7")) expect_equal(absorbingStates(mchain), "8") }) test_that("A state is absorbing iff it is singleton recurrent class", { for (mc in allMCs) { states <- mc$states classes <- mc$recurrentClasses absorbing <- mc$absorbingStates expect_true(.testthatAbsorbingAreRecurrentClassRcpp(absorbing, classes)) } }) test_that("Recurrent states and transient states are a partition of states", { for (mc in allMCs) { states <- mc$states recurrentStates <- mc$recurrentStates transientStates <- mc$transientStates states <- mc$states statesUnion <- sort(unique(append(recurrentStates, transientStates))) expect_equal(statesUnion, sort(states)) } }) test_that("hittingProb(i,i) < 1 for i a transient state", { for (mc in allMCs) { transStates <- mc$transientStates hitting <- mc$hittingProbabilities transientHittingLessOne <- all(sapply(transStates, function(s){ hitting[s, s] < 1})) expect_true(transientHittingLessOne) } }) test_that("All states are recurrent in a identity Markov chain", { for (mc in allDiagonalMCs) { states <- mc$states recurrentStates <- mc$recurrentStates expect_true(setequal(recurrentStates, states)) } }) test_that("If Markov chain is irreducible then all states are recurrent", { for (mc in allMCs) { states <- mc$states recurrent <- mc$recurrentStates irreducible <- mc$irreducible allRecurrent <- setequal(states, recurrent) if (irreducible) expect_true(allRecurrent) } }) test_that("If there are transient states then Markov chain is not irreducible", { for (mc in allMCs) { states <- mc$states transient <- mc$transientStates irreducible <- mc$irreducible if (length(transient) > 0) expect_false(irreducible) } }) context("Checking recurrentClasses method") P <- matlab::zeros(10) P[1, c(1, 3)] <- 1/2; P[2, 2] <- 1/3; P[2,7] <- 2/3; P[3, 1] <- 1; P[4, 5] <- 1; P[5, c(4, 5, 9)] <- 1/3; P[6, 6] <- 1; P[7, 7] <- 1/4; P[7,9] <- 3/4; P[8, c(3, 4, 8, 10)] <- 1/4; P[9, 2] <- 1; P[10, c(2, 5, 10)] <- 1/3; rownames(P) <- letters[1:10] colnames(P) <- letters[1:10] probMc <- new("markovchain", transitionMatrix = P, name = "Probability MC") test_that("Checking recurrent classes for known Markov chains", { expect_equal(recurrentClasses(probMc), list(c("a", "c") , c("b", "g", "i") , c("f"))) }) test_that("hittingProb(i,j) = 1 for i, j in same recurrent class, hittingProb(i, k) = 0 for k otherwise", { for (mc in allMCs) { byrow <- mc$byrow states <- mc$states hitting <- mc$hittingProbabilities recurrentClasses <- mc$recurrentClasses expect_true(.testthatRecurrentHittingRcpp(recurrentClasses, hitting, states, byrow)) } }) test_that("Union of recurrentClasses is recurrentStates", { for (mc in allMCs) { recClasses <- mc$recurrentClasses recStates <- as.character(sort(unlist(recClasses))) target <- sort(mc$recurrentStates) expect_equal(recStates, target) } }) test_that("Recurrent classes are disjoint", { for (mc in allMCs) { recClasses <- mc$recurrentClasses lengthRecClasses <- sapply(recClasses, function(c){ length(c) }) numRecurrentStates <- ifelse(length(recClasses) > 0, sum(lengthRecClasses), 0) numUnion <- length(unique(unlist(recClasses))) expect_equal(numRecurrentStates, numUnion) } }) context("Checking transientClasses method") test_that("Checking recurrent classes for known Markov chains", { expect_equal(transientClasses(probMc), list(c("d", "e"), c("h"), c("j"))) }) test_that("Union of transientClases is transientStates", { for (mc in allMCs) { transClasses <- mc$transientClasses # as.character forces the result to be a char vector when it is empty transStates <- as.character(sort(unlist(transClasses))) target <- sort(mc$transientStates) expect_equal(transStates, target) } }) test_that("Transient classes are disjoint", { for (mc in allMCs) { transClasses <- mc$transientClasses lengthTransClasses <- sapply(transClasses, function(c){ length(c) }) numTransientStates <- ifelse(length(transClasses) > 0, sum(lengthTransClasses), 0) numUnion <- length(unique(unlist(transClasses))) expect_equal(numTransientStates, numUnion) } })markovchain/tests/testthat/testPeriod.R0000644000176200001440000000302613762012754020033 0ustar liggesusers# Period examples from http://www.math.wisc.edu/~anderson/605F11/Notes/StochBioChapter3.pdf #library(markovchain) mcPeriodic<-new("markovchain", states=c("0","1","2"), transitionMatrix= matrix(c(0,1,0, 0,0,1, 1,0,0),nrow=3, byrow=TRUE, dimnames=list(c("0","1","2"), c("0","1","2")) )) myMatr<-matrix(c(0 , 0 , 1/2 , 1/4 , 1/4 , 0 , 0, 0 , 0 , 1/3 , 0 , 2/3, 0 , 0 , 0 , 0 , 0 , 0 , 0 , 1/3 , 2/3 , 0 , 0 , 0 , 0 , 0 , 1/2 , 1/2 , 0 , 0 , 0 , 0 , 0 , 3/4 , 1/4 , 1/2, 1/2 , 0 , 0 , 0 , 0 , 0 , 1/4, 3/4 , 0 , 0 , 0 , 0 , 0),byrow=TRUE, nrow = 7) mcPeriodic2<-as(myMatr, "markovchain") mcAperiodic<-new("markovchain", states=c("0","1","2","3","4"), transitionMatrix= matrix(c(1/2,1/2,0,0,0, 1/2,0,1/2,0,0, 0,1/2,0,1/2,0, 0,0,1/2,0,1/2, 0,0,0,1,0),nrow=5, byrow=TRUE, dimnames=list(c("0","1","2","3","4"), c("0","1","2","3","4")) )) ####end creating DTMC context("Basic DTMC proprieties") test_that("States are those that should be", { expect_equal(period(mcPeriodic),3) expect_equal(period(mcPeriodic2),3) expect_equal(period(mcAperiodic),1) })markovchain/tests/testthat/testSteadyStates.R0000644000176200001440000000305013762012754021223 0ustar liggesuserscontext("Checking steadyStates method") testMCs <- append(allAndDiagonalMCs, steadyStatesMCs) test_that("Num of steady states is the same as num of recurrent classes", { for (mc in testMCs) { byrow <- mc$byrow steady <- mc$steadyStates numSteadyStates <- ifelse(byrow, nrow(steady), ncol(steady)) numRecurrentClasses <- length(mc$recurrentClasses) expect_equal(numSteadyStates, numRecurrentClasses) } }) test_that("Steady states are prob vectors", { for (mc in testMCs) { byrow <- mc$byrow steady <- mc$steadyStates margin <- ifelse(byrow, 1, 2) steadyAreProbVectors <- all(apply(steady, MARGIN = margin, .isProbabilityVector)) expect_true(steadyAreProbVectors) } }) test_that("Steady states are linearly independent vectors", { for (mc in testMCs) { byrow <- mc$byrow steady <- mc$steadyStates rank <- rankMatrix(steady)[[1]] expect_equal(rank, min(nrow(steady), ncol(steady))) } }) test_that("Steady states v are eigen vectors, i.e. vP = v (by rows) or Pv = v (by cols)", { for (mc in testMCs) { byrow <- mc$byrow steady <- mc$steadyStates P <- mc$transitionMatrix margin <- ifelse(byrow, 1, 2) areEigenVectors <- apply(steady, MARGIN = margin, function(v) { v <- as.numeric(v) if (byrow) result <- as.numeric(v %*% P) else result <- as.numeric(P %*% v) all.equal(result, v) }) expect_true(all(areEigenVectors)) } }) markovchain/tests/testthat/testetm.R0000644000176200001440000000263313762012754017401 0ustar liggesusers#library(markovchain) context("Checking conversion of objects to etm") check_etm_availability <- function(){ is_available <- require("etm") if (!is_available) { skip("etm package unavailable") } } get_etm_transition_matrix <- function() { require(etm) data(sir.cont) # Modification for patients entering and leaving a state # at the same date # Change on ventilation status is considered # to happen before end of hospital stay sir.cont <- sir.cont[order(sir.cont$id, sir.cont$time), ] for (i in 2:nrow(sir.cont)) { if (sir.cont$id[i]==sir.cont$id[i-1]) { if (sir.cont$time[i]==sir.cont$time[i-1]) { sir.cont$time[i-1] <- sir.cont$time[i-1] - 0.5 } } } ### Computation of the transition probabilities # Possible transitions. tra <- matrix(ncol=3,nrow=3,FALSE) tra[1, 2:3] <- TRUE tra[2, c(1, 3)] <- TRUE # print(tra) # etm tr.prob <- etm::etm(sir.cont, c("0", "1", "2"), tra, "cens", 1) return(tr.prob) } # tr.prob # df<-tr.prob$trans # df # summary(tr.prob) # etm2mc test_that("Conversion of objects", { check_etm_availability() #check package availablity obj_to_test <- get_etm_transition_matrix() #obtain the etm obj etm2mc<-as(obj_to_test, "markovchain") #try to convert expect_equal(class(etm2mc)=="markovchain",TRUE) }) markovchain/tests/testthat/testBasic.R0000644000176200001440000004437613762012754017647 0ustar liggesusers#library(markovchain) #create basic markov chains markov1<-new("markovchain", states=c("a","b","c"), transitionMatrix= matrix(c(0.2,0.5,0.3, 0,1,0, 0.1,0.8,0.1),nrow=3, byrow=TRUE, dimnames=list(c("a","b","c"), c("a","b","c")) )) require(matlab) mathematicaMatr <- zeros(5) mathematicaMatr[1,] <- c(0, 1/3, 0, 2/3, 0) mathematicaMatr[2,] <- c(1/2, 0, 0, 0, 1/2) mathematicaMatr[3,] <- c(0, 0, 1/2, 1/2, 0) mathematicaMatr[4,] <- c(0, 0, 1/2, 1/2, 0) mathematicaMatr[5,] <- c(0, 0, 0, 0, 1) statesNames <- letters[1:5] mathematicaMc <- new("markovchain", transitionMatrix = mathematicaMatr, name = "Mathematica MC", states = statesNames) ####end creating DTMC context("Basic DTMC proprieties") test_that("States are those that should be", { expect_equal(absorbingStates(markov1), "b") expect_equal(transientStates(markov1), c("a","c")) expect_equal(is.irreducible(mathematicaMc), FALSE) expect_equal(transientStates(mathematicaMc), c("a","b")) expect_equal(is.accessible(mathematicaMc, "a", "c"), TRUE) expect_equal(.recurrentClassesRcpp(mathematicaMc), list(c("c", "d"), c("e"))) # expect_equal(summary(mathematicaMc), list(closedClasses = list(c("c", "d"), c("e")), # recurrentClasses = list(c("c", "d"), c("e")), # transientClasses = list(c("a", "b")))) }) ###testing proper conversion of objects context("Conversion of objects") provaMatr2Mc<-as(mathematicaMatr,"markovchain") test_that("Conversion of objects", { expect_equal(class(provaMatr2Mc)=="markovchain",TRUE) }) ### Markovchain Fitting sequence1 <- c("a", "b", "a", "a", NA, "a", "a", NA) sequence2 <- c(NA, "a", "b", NA, "a", "a", "a", NA, "a", "b", "a", "b", "a", "b", "a", "a", "b", "b", "b", "a", NA) mcFit <- markovchainFit(data = sequence1, byrow = FALSE, sanitize = TRUE) mcFit2 <- markovchainFit(c("a","b","a","b"), sanitize = TRUE) test_that("Fit should satisfy", { expect_equal((mcFit["logLikelihood"])[[1]], log(1/3) + 2*log(2/3)) expect_equal(markovchainFit(data = sequence2, method = "bootstrap")["confidenceInterval"] [[1]]["confidenceLevel"][[1]], 0.95) expect_equal(mcFit2$upperEndpointMatrix, matrix(c(0,1,1,0), nrow = 2, byrow = TRUE, dimnames = list(c("a", "b"), c("a", "b")))) }) ### Markovchain Fitting for bigger markov chain bigseq <- rep(c("a", "b", "c"), 500000) bigmcFit <- markovchainFit(bigseq) test_that("MC Fit for large sequence 1", { expect_equal(bigmcFit$logLikelihood, 0) expect_equal(bigmcFit$confidenceLevel, 0.95) expect_equal(bigmcFit$estimate@transitionMatrix, bigmcFit$upperEndpointMatrix) }) bigmcFit <- markovchainFit(bigseq, sanitize = TRUE) test_that("MC Fit for large sequence 2", { expect_equal(bigmcFit$logLikelihood, 0) expect_equal(bigmcFit$confidenceLevel, 0.95) expect_equal(bigmcFit$estimate@transitionMatrix, bigmcFit$upperEndpointMatrix) }) ### Markovchain Fitting For dataframe or matrix as an input matseq <- matrix(c("a", "b", "c", NA ,"b", "c"), nrow = 2, byrow = T) # for matrix as input test_that("Markovchain Fit for matrix as input", { # for matrix as input expect_equal(markovchainFit(matseq)$estimate@transitionMatrix, matrix(c(0, 1, 0, 0, 0, 1, 0, 0, 0), nrow = 3, byrow = TRUE, dimnames = list(c("a", "b", "c"), c("a", "b", "c")))) expect_equal(markovchainFit(matseq, sanitize = TRUE)$estimate@transitionMatrix, matrix(c(0, 1, 0, 0, 0, 1, 1/3, 1/3, 1/3), nrow = 3, byrow = TRUE, dimnames = list(c("a", "b", "c"), c("a", "b", "c")))) # for data frame as input expect_equal(markovchainFit(as.data.frame(matseq))$estimate@transitionMatrix, matrix(c(0, 1, 0, 0, 0, 1, 0, 0, 0), nrow = 3, byrow = TRUE, dimnames = list(c("a", "b", "c"), c("a", "b", "c")))) expect_equal(markovchainFit(as.data.frame(matseq), sanitize = TRUE)$estimate@transitionMatrix, matrix(c(0, 1, 0, 0, 0, 1, 1/3, 1/3, 1/3), nrow = 3, byrow = TRUE, dimnames = list(c("a", "b", "c"), c("a", "b", "c")))) }) ### Markovchain Fitting(mle) with sanitize parameter mle_sequence <- c("a", "b", NA, "b", "b", "a", "a", "a", "b", "b", NA, "b", "b", "a", "a", "b", "a", "a", "b", "c") mle_fit1 <- markovchainFit(mle_sequence) mle_fit2 <- markovchainFit(mle_sequence, sanitize = TRUE) test_that("MarkovchainFit MLE", { expect_equal(mle_fit1$estimate@transitionMatrix, matrix(c(0.5, 0.5, 0, 3/7, 3/7, 1/7, 0, 0, 0), nrow = 3, byrow = TRUE, dimnames = list(c("a", "b", "c"), c("a", "b", "c")))) expect_equal(mle_fit2$estimate@transitionMatrix, matrix(c(0.5, 0.5, 0, 3/7, 3/7, 1/7, 1/3, 1/3, 1/3), nrow = 3, byrow = TRUE, dimnames = list(c("a", "b", "c"), c("a", "b", "c")))) expect_equal(mle_fit1$logLikelihood, mle_fit2$logLikelihood) expect_equal(mle_fit1$confidenceInterval, mle_fit2$confidenceInterval) expect_equal(mle_fit2$standardError, mle_fit2$standardError) }) ### Markovchain Fitting(laplace) with sanitize parameter lap_sequence <- c("a", "b", NA, "b", "b", "a", "a", "a", "b", "b", NA, "b", "b", "a", "a", "b", "a", "a", "b", "c") lap_fit1 <- markovchainFit(lap_sequence, "laplace") lap_fit2 <- markovchainFit(lap_sequence, "laplace", sanitize = TRUE) test_that("Markovchain Laplace", { expect_equal(lap_fit1$estimate@transitionMatrix, matrix(c(0.5, 0.5, 0, 3/7, 3/7, 1/7, 0, 0, 0), nrow = 3, byrow = TRUE, dimnames = list(c("a", "b", "c"), c("a", "b", "c")))) expect_equal(lap_fit2$estimate@transitionMatrix, matrix(c(0.5, 0.5, 0, 3/7, 3/7, 1/7, 1/3, 1/3, 1/3), nrow = 3, byrow = TRUE, dimnames = list(c("a", "b", "c"), c("a", "b", "c")))) expect_equal(lap_fit1$logLikelihood, lap_fit2$logLikelihood) }) ### Markovchain Fitting when some states are not present in the given sequence mix_seq <- c("a", "b", NA, "b", "b", "a", "a", "a", "b", "b", NA, "b", "b", "a", "a", "b", "a", "a", "b", "c") mix_fit1 <- markovchainFit(mix_seq, "mle", sanitize = TRUE, possibleStates = c("d")) mix_fit2 <- markovchainFit(mix_seq, "laplace", sanitize = TRUE, possibleStates = c("d")) mix_fit3 <- markovchainFit(mix_seq, "map", sanitize = TRUE, possibleStates = c("d")) test_that("Mixture of Markovchain Fitting", { expect_equal(mix_fit2$estimate@transitionMatrix, matrix(c(.5, .5, 0, 0, 3/7, 3/7, 1/7, 0, 1/4, 1/4, 1/4, 1/4, 1/4, 1/4, 1/4, 1/4), nrow = 4, byrow = TRUE, dimnames = list(c("a", "b", "c", "d"), c("a", "b", "c", "d")) ) ) expect_equal(mix_fit1$estimate@transitionMatrix, matrix(c(.5, .5, 0, 0, 3/7, 3/7, 1/7, 0, 1/4, 1/4, 1/4, 1/4, 1/4, 1/4, 1/4, 1/4), nrow = 4, byrow = TRUE, dimnames = list(c("a", "b", "c", "d"), c("a", "b", "c", "d")) ) ) expect_equal(mix_fit3$estimate@transitionMatrix, matrix(c(.5, .5, 0, 0, 3/7, 3/7, 1/7, 0, 1/4, 1/4, 1/4, 1/4, 1/4, 1/4, 1/4, 1/4), nrow = 4, byrow = TRUE, dimnames = list(c("a", "b", "c", "d"), c("a", "b", "c", "d")) ) ) }) ### Test for createSequenceMatrix rsequence <- c("a", "b", NA, "b", "b", "a", "a", "a", "b", "b", NA, "b", "b", "a", "a", "b", "a", "a", "b", "c") test_that("createSequenceMatrix : Permutation of parameters",{ expect_equal(createSequenceMatrix(rsequence, FALSE, FALSE), matrix(c(4, 4, 0, 3, 3, 1, 0, 0, 0), nrow = 3, byrow = TRUE, dimnames = list(c("a", "b", "c"), c("a", "b", "c")))) expect_equal(createSequenceMatrix(rsequence, FALSE, TRUE), matrix(c(4, 4, 0, 3, 3, 1, 1, 1, 1), nrow = 3, byrow = TRUE, dimnames = list(c("a", "b", "c"), c("a", "b", "c")))) expect_equal(createSequenceMatrix(rsequence, TRUE, FALSE), matrix(c(4/8, 4/8, 0, 3/7, 3/7, 1/7, 0, 0, 0), nrow = 3, byrow = TRUE, dimnames = list(c("a", "b", "c"), c("a", "b", "c")))) expect_equal(createSequenceMatrix(rsequence, TRUE, TRUE), matrix(c(4/8, 4/8, 0, 3/7, 3/7, 1/7, 1/3, 1/3, 1/3), nrow = 3, byrow = TRUE, dimnames = list(c("a", "b", "c"), c("a", "b", "c")))) }) ### Test for createSequenceMatrix : input {n x n} matrix data <- matrix(c("a", "a", "b", "a", "b", "a", "b", "a", NA, "a", "a", "a", "a", "b", NA, "b"), ncol = 2, byrow = TRUE) test_that("createSequenceMatrix : input as matrix",{ expect_equal(createSequenceMatrix(data), matrix(c(2, 1, 3, 0), nrow = 2, byrow = TRUE, dimnames = list(c("a", "b"), c("a", "b")))) expect_equal(createSequenceMatrix(data, toRowProbs = TRUE), matrix(c(2/3, 1/3, 3/3, 0), nrow = 2, byrow = TRUE, dimnames = list(c("a", "b"), c("a", "b")))) expect_equal(createSequenceMatrix(data, toRowProbs = TRUE, possibleStates = "d", sanitize = TRUE), matrix(c(2/3, 1/3, 0, 1, 0, 0, 1/3, 1/3, 1/3), nrow = 3, byrow = TRUE, dimnames = list(c("a", "b", "d"), c("a", "b", "d")))) }) ### Test for markovchainSequence and rmarkovchain statesNames <- c("a", "b", "c") mcB <- new("markovchain", states = statesNames, transitionMatrix = matrix(c(0.2, 0.5, 0.3, 0, 0.2, 0.8, 0.1, 0.8, 0.1), nrow = 3, byrow = TRUE, dimnames = list(statesNames, statesNames))) s1 <- markovchainSequence(10, mcB) s2 <- markovchainSequence(10, mcB, include.t0 = TRUE) s3 <- markovchainSequence(10, mcB, t0 = "b", include.t0 = TRUE) s4 <- markovchainSequence(10, mcB, useRCpp = FALSE) s5 <- markovchainSequence(10, mcB, include.t0 = TRUE, useRCpp = FALSE) s6 <- markovchainSequence(10, mcB, t0 = "b", include.t0 = TRUE, useRCpp = FALSE) test_that("Output format of markovchainSequence", { expect_equal(length(s1), 10) expect_equal(length(s2), 11) expect_equal(length(s3), 11) expect_equal(s3[1], "b") expect_equal(length(s4), 10) expect_equal(length(s5), 11) expect_equal(length(s6), 11) expect_equal(s6[1], "b") }) statesNames <- c("a", "b", "c") mcA <- new("markovchain", states = statesNames, transitionMatrix = matrix(c(0.2, 0.5, 0.3, 0, 0.2, 0.8, 0.1, 0.8, 0.1), nrow = 3, byrow = TRUE, dimnames = list(statesNames, statesNames))) mcB <- new("markovchain", states = statesNames, transitionMatrix = matrix(c(0.2, 0.5, 0.3, 0, 0.2, 0.8, 0.1, 0.8, 0.1), nrow = 3, byrow = TRUE, dimnames = list(statesNames, statesNames))) mcC <- new("markovchain", states = statesNames, transitionMatrix = matrix(c(0.2, 0.5, 0.3, 0, 0.2, 0.8, 0.1, 0.8, 0.1), nrow = 3, byrow = TRUE, dimnames = list(statesNames, statesNames))) mclist <- new("markovchainList", markovchains = list(mcA, mcB, mcC)) o1 <- rmarkovchain(15, mclist, "list") o2 <- rmarkovchain(15, mclist, "matrix") o3 <- rmarkovchain(15, mclist, "data.frame") o4 <- rmarkovchain(15, mclist, "list", t0 = "a", include.t0 = TRUE) o5 <- rmarkovchain(15, mclist, "matrix", t0 = "a", include.t0 = TRUE) o6 <- rmarkovchain(15, mclist, "data.frame", t0 = "a", include.t0 = TRUE) test_that("Output format of rmarkovchain", { expect_equal(length(o1), 15) expect_equal(length(o1[[1]]), 3) expect_equal(all(dim(o2) == c(15, 3)), TRUE) expect_equal(all(dim(o3) == c(45, 2)), TRUE) expect_equal(length(o4), 15) expect_equal(length(o4[[1]]), 4) expect_equal(o4[[1]][1], "a") expect_equal(all(dim(o5) == c(15, 4)), TRUE) expect_equal(all(o5[, 1] == "a"), TRUE) expect_equal(all(dim(o6) == c(60, 2)), TRUE) }) ### MAP fit function tests data1 <- c("a", "b", "a", "c", "a", "b", "a", "b", "c", "b", "b", "a", "b") data2 <- c("c", "a", "b") test_that("MAP fits must satisfy", { expect_identical(markovchainFit(data1, method = "map")$estimate@transitionMatrix, markovchainFit(data1, method = "mle")$estimate@transitionMatrix) expect_identical(markovchainFit(data1, method = "map")$estimate@transitionMatrix, matrix(c(0.0, 0.6, 0.5, 0.8, 0.2, 0.5, 0.2, 0.2, 0.0), nrow = 3, dimnames = list(c("a", "b", "c"), c("a", "b", "c")))) expect_identical(markovchainFit(data1, method = "map", hyperparam = matrix(c(2, 1, 3, 4, 5, 2, 2, 2, 1), nrow = 3, dimnames = list(c("a", "b", "c"), c("a", "b", "c"))))$estimate@transitionMatrix, matrix(c(1/10, 3/10, 3/5, 7/10, 5/10, 2/5, 2/10, 2/10, 0), nrow = 3, dimnames = list(c("a", "b", "c"), c("a", "b", "c")))) }) test_that("predictiveDistribution must satisfy", { expect_equal(predictiveDistribution(data1, character()), 0) expect_equal(predictiveDistribution(data1, data2, hyperparam = matrix(c(2, 1, 3, 4, 5, 2, 2, 2, 1), nrow = 3, dimnames = list(c("a", "b", "c"), c("a", "b", "c")))), log(4 / 13)) }) test_that("inferHyperparam must satisfy", { expect_identical(inferHyperparam(data = data1)$dataInference, matrix(c(1, 4, 2, 5, 2, 2, 2, 2, 1), nrow = 3, dimnames = list(c("a", "b", "c"), c("a", "b", "c")))) expect_identical(inferHyperparam(transMatr = matrix(c(0.0, 0.6, 0.5, 0.8, 0.2, 0.5, 0.2, 0.2, 0.0), nrow = 3, dimnames = list(c("a", "b", "c"), c("a", "b", "c"))), scale = c(10, 10, 10))$scaledInference, matrix(c(0, 6, 5, 8, 2, 5, 2, 2, 0), nrow = 3, dimnames = list(c("a", "b", "c"), c("a", "b", "c")))) }) pDRes <- c(log(3/2), log(3/2)) names(pDRes) <- c("a", "b") test_that("priorDistribution must sastisfy", { expect_equal(priorDistribution(matrix(c(0.5, 0.5, 0.5, 0.5), nrow = 2, dimnames = list(c("a", "b"), c("a", "b"))), matrix(c(2, 2, 2, 2), nrow = 2, dimnames = list(c("a", "b"), c("a", "b")))), pDRes) }) energyStates <- c("sigma", "sigma_star") byRow <- TRUE gen <- matrix(data = c(-3, 3, 1, -1), nrow = 2, byrow = byRow, dimnames = list(energyStates, energyStates)) molecularCTMC <- new("ctmc", states = energyStates, byrow = byRow, generator = gen, name = "Molecular Transition Model") test_that("steadyStates must satisfy", { expect_identical(steadyStates(molecularCTMC), matrix(c(1/4, 3/4), nrow = 1, dimnames = list(c(), energyStates))) }) ### Tests for expectedRewards function ### Examples taken from Stochastic Processes: Theory for Applications, Robert G. Gallager,Cambridge University Press transMatr<-matrix(c(0.99,0.01,0.01,0.99),nrow=2,byrow=TRUE) simpleMc<-new("markovchain", states=c("a","b"), transitionMatrix=transMatr) test_that("expectedRewards must satisfy", { expect_equal(expectedRewards(simpleMc,1,c(0,1)),c(0.01,1.99)) expect_equal(expectedRewards(simpleMc,2,c(0,1)),c(0.0298,2.9702)) }) ### Tests for committorAB function transMatr <- matrix(c(0,0,0,1,0.5, 0.5,0,0,0,0, 0.5,0,0,0,0, 0,0.2,0.4,0,0, 0,0.8,0.6,0,0.5),nrow = 5) object <- new("markovchain", states=c("a","b","c","d","e"),transitionMatrix=transMatr, name="simpleMc") answer <- c(0.444,0.889,0.000,0.444,1.000) names <- c("a","b","c","d","e") names(answer) <- names test_that("committorAB must satisfy", { expect_equal(round(committorAB(object,c(5),c(3)),3),answer) }) ### Tests for firstPassageMultiple function statesNames <- c("a", "b", "c") testmarkov <- new("markovchain", states = statesNames, transitionMatrix = matrix(c(0.2, 0.5, 0.3, 0.5, 0.1, 0.4, 0.1, 0.8, 0.1), nrow = 3, byrow = TRUE, dimnames = list(statesNames, statesNames) )) answer <- matrix(c(.8000, 0.6000, 0.2540 ),nrow = 3,dimnames = list(c("1","2","3"),"set")) test_that("firstPassageMultiple function satisfies", { expect_equal(firstPassageMultiple(testmarkov,"a",c("b","c"),3),answer) }) # See https://github.com/spedygiorgio/markovchain/issues/171 for context # Test that closed classes = recurrent classes M <- matrix(0,nrow=10,ncol=10,byrow=TRUE) M[1,2]<- 0.7 M[1,3]<- 0.3 M[2,3]<- 1 M[3,4]<- 1 M[4,1]<- 1 M[5,6]<- 1 M[6,7]<- 1 M[7,8]<- 1 M[8,9]<- 1 M[9,10]<- 1 M[10,1]<- 1 markovChain <- new("markovchain",transitionMatrix=M) context("Checking is.accesible") test_that("is accesible is equivalent to reachability matrix", { for (mc in subsetAllMCs) { expect_true(.testthatIsAccesibleRcpp(mc$object)) } })markovchain/tests/testthat/testCommClasses.R0000644000176200001440000000611313762012754021022 0ustar liggesuserscontext("Checking .commClassesKernelRcpp method") # Not very good in efficiency, but it serves its purpose though # O(n³) implementation checkInterchangeability <- function(matrix) { # Matrix should be square matrix n <- ncol(matrix) correctCommClasses <- sapply(1:n, function(i) { currentRow <- matrix[i, ] onesIdx <- which(currentRow) whichEqual <- sapply(onesIdx, function(j) { matrix[j, ] == currentRow}) # Is there any row unequal to the row taken as ref any(!whichEqual) }) any(!correctCommClasses) } test_that("Communicating classes matrix is symmetric", { for (mc in allMCs) { if (mc$byrow) { transitionMatrix <- mc$transitionMatrix communicating <- .commClassesKernelRcpp(transitionMatrix) C <- communicating$classes expect_equal(C, t(C)) } } }) test_that("Rows of the same class are interchangeable in a communicating classes matrix", { for (mc in allMCs) { if (mc$byrow) { transitionMatrix <- mc$transitionMatrix communicating <- .commClassesKernelRcpp(transitionMatrix) C <- communicating$classes expect_equal(checkInterchangeability(C), TRUE) } } }) test_that("Communicating classes of identity matrix of size n are {1, ..., n}", { for (mc in allDiagonalMCs) { if (mc$byrow) { transitionMatrix <- mc$transitionMatrix states <- mc$states expected <- as.matrix(apply(transitionMatrix, 1, function(x){ x == 1 })) colnames(expected) <- states rownames(expected) <- states communicating <- .commClassesKernelRcpp(transitionMatrix) C <- communicating$classes expect_equal(C, expected) } } }) test_that("All clasess are closed for identity matrixes", { for (mc in allDiagonalMCs) { if (mc$byrow) { transitionMatrix <- mc$transitionMatrix areClosed <- .commClassesKernelRcpp(transitionMatrix)$closed expect_true(all(areClosed)) } } }) test_that("Communicating class matrix is correct", { for (mc in allMCs) { if (mc$byrow) { # P transitionMatrix <- mc$transitionMatrix n <- ncol(transitionMatrix) # The communicating matrix has a 1 in an entry (i,j) iff # P'^{n - 1} has a positive number in its entries (i,j) and (j,i) # When we say P' we refer to making i always communicate with itself p_n <- (transitionMatrix + diag(n)) %^% (n - 1) > 0 commClasses <- .commClassesKernelRcpp(transitionMatrix)$classes # Correct the diagonal to be always positive # (i always communicates with itself) expectedCommMatrix <- (p_n * t(p_n)) > 0 expect_true(all(commClasses == expectedCommMatrix)) } } }) context("Checking communicatingClasses method") test_that("Communicating classes are a partition of states", { for (mc in allAndDiagonalMCs) { states <- mc$states commClasses <- mc$communicatingClasses expect_true(.testthatIsPartitionRcpp(commClasses, states)) } }) markovchain/tests/testthat/testHigherOrder.R0000644000176200001440000000547613762012754021026 0ustar liggesusers#library(markovchain) library(Rsolnp) # Example from Ching, W. K., Huang, X., Ng, M. K., & Siu, T. K. (2013). Higher-order markov chains. # In Markov Chains (pp. 141-176). Springer US. sequence<-c("a", "a", "b", "b", "a", "c", "b", "a", "b", "c", "a", "b", "c", "a", "b", "c", "a", "b", "a", "b") # mcFit<-fitHigherOrder(sequence, 3) " $lambda [1] 9.999995e-01 3.242176e-07 2.165397e-07 $Q $Q[[1]] a b c a 0.125 0.4285714 0.75 b 0.750 0.1428571 0.25 c 0.125 0.4285714 0.00 $Q[[2]] a b c a 0.1428571 0.5714286 0.25 b 0.4285714 0.2857143 0.75 c 0.4285714 0.1428571 0.00 $Q[[3]] a b c a 0.7142857 0.0000000 0.25 b 0.2857143 0.6666667 0.25 c 0.0000000 0.3333333 0.50 $X a b c 0.4 0.4 0.2 " mcFit<-fitHigherOrder(sequence) " $lambda [1] 1.000000e+00 1.626306e-08 $Q $Q[[1]] a b c a 0.125 0.4285714 0.75 b 0.750 0.1428571 0.25 c 0.125 0.4285714 0.00 $Q[[2]] a b c a 0.1428571 0.5714286 0.25 b 0.4285714 0.2857143 0.75 c 0.4285714 0.1428571 0.00 $X a b c 0.4 0.4 0.2 " # print(mcFit) # From the original paper by Y.Ye # see the unit tests for more.... #--------------------------------------------------------------------------------- # POWELL Problem # x0 = c(-2, 2, 2, -1, -1) # powell=Rsolnp::solnp(x0, fun = fn1, eqfun = eqn1, eqB = c(10, 0, -1)) # print(powell) # Iter: 1 fn: 0.03526 Pars: -1.59385 1.51051 2.07795 -0.81769 -0.81769 # Iter: 2 fn: 0.04847 Pars: -1.74461 1.62029 1.80509 -0.77020 -0.77020 # Iter: 3 fn: 0.05384 Pars: -1.71648 1.59482 1.82900 -0.76390 -0.76390 # Iter: 4 fn: 0.05395 Pars: -1.71713 1.59570 1.82727 -0.76364 -0.76364 # Iter: 5 fn: 0.05395 Pars: -1.71714 1.59571 1.82725 -0.76364 -0.76364 # solnp--> Completed in 6 iterations # $pars # [1] -1.7171436 1.5957097 1.8272457 -0.7636431 -0.7636430 # # $convergence # [1] 0 # # $values # [1] 0.0003354626 0.0352631086 0.0484680325 0.0538408088 0.0539495955 # [6] 0.0539498477 0.0539498478 # # $lagrange # [,1] # [1,] -0.040162744 # [2,] 0.037957783 # [3,] -0.005222642 # # $hessian # [,1] [,2] [,3] [,4] [,5] # [1,] 0.829090663 0.22304599 -0.26485541 -0.002160537 -0.002190363 # [2,] 0.223045985 0.72432381 0.17986818 0.079114683 0.079196221 # [3,] -0.264855413 0.17986818 0.41652769 0.074697268 0.074612806 # [4,] -0.002160537 0.07911468 0.07469727 0.969603684 -0.030376752 # [5,] -0.002190363 0.07919622 0.07461281 -0.030376752 0.969642726 # # $ineqx0 # NULL # # $nfuneval # [1] 172 # # $outer.iter # [1] 6 # # $elapsed # Time difference of 0.03999281 secs # # $vscale # [1] 0.05394985 0.00000001 0.00000001 0.00000001 1.71714345 1.59570955 1.82724598 # [8] 0.76364315 0.76364303 markovchain/tests/testthat/testMeanMeasures.R0000644000176200001440000001614313762012754021202 0ustar liggesuserscontext("Checking meanFirstPassageTime and meanRecurrenceTime") # Prepare matrices with known solution scr <- c("s","c","r") Pmat <- matrix( c(6,3,1, 2,3,5, 4,1,5)/10,3,byrow=T) P <- new("markovchain", states=scr, transitionMatrix=Pmat) # Analytic solutions P_r <- c(s=50,c=30)/11 P_full <- matrix( c( 0, 15/4, 50/11, 10/3, 0, 30/11, 8/3, 5, 0 ), byrow=T, ncol=3) rownames(P_full) <- scr colnames(P_full) <- scr Poz <- new("markovchain", states=scr, transitionMatrix=matrix(c(2,1,1, 2,0,2, 1,1,2)/4, byrow=T, ncol=3)) Poz_full <- matrix( c( 0, 4, 10/3, 8/3, 0, 8/3, 10/3, 4, 0 ), byrow=T, ncol=3) rownames(Poz_full) <- scr colnames(Poz_full) <- scr test_that("meanFirstPassageTime works for known matrices", { expect_equal(meanFirstPassageTime(P,"r"), P_r) expect_equal(meanFirstPassageTime(P), P_full) expect_equal(meanFirstPassageTime(Poz), Poz_full) }) # Given M = (m_{ij}) where m_{ij} is the mean recurrence time from i to j # Given P the transition probabilities # Given C a matrix with all its components as a 1 # Given D a matrix where the diagonal is formed by the recurrence times r_i and # the rest of the elements are 0s # # It must hold M = PM + C - D (by rows equation) test_that("meanFirstPassageTime and recurrenceTime hold their characteristic equation", { for (mc in allPositiveMCs) { P <- mc$transitionMatrix M <- meanFirstPassageTime(mc$object) C <- matlab::ones(ncol(P)) D <- diag(meanRecurrenceTime(mc$object)) if (mc$byrow) expect_true(all.equal(M, P %*% M + C - D)) else expect_true(all.equal(M, M %*% P + C - D)) } }) # Note that meanRecurrenceTimes are the inverse of the steady states elements # which are not negative, # # One steady state: Other: # 0 0 # 0 u > 0 # . v > 0 # . 0 # . . # x > 0 . # y > 0 . # z > 0 . # 0 . # # So if we invert the mean recurrenceTimes and fill the positions corresponding # to transient states with 0s, the result should be an eigen vector of the # transition matrix # test_that("We can manufacture an eigen vector with meanRecurrenceTimes", { for (mc in allMCs) { P <- mc$transitionMatrix byrow <- mc$byrow times <- mc$meanRecurrenceTime states <- mc$states inverse <- times ** (-1) v <- sapply(states, function(s) { ifelse(is.na(inverse[s]), 0, inverse[s]) }) v <- unname(v) if (byrow) result <- as.numeric(v %*% P) else result <- as.numeric(P %*% v) expect_true(all.equal(result, v)) } }) context("Checking meanNumVisits method") test_that("Mean number visits of identity markov chain is identity * Inf", { for (mc in allDiagonalMCs) { states <- mc$states numStates <- length(states) result <- diag(numStates) diag(result) <- Inf rownames(result) <- states colnames(result) <- states meanNumVisits <- mc$meanNumVisits expect_equal(meanNumVisits, result) } }) test_that("Mean number of visits hold their characteristic system and are non negative", { # Check that the following recurrence holds, # naming p = probs, f = hitting, E = mean number of visits, it checks: # # E(i, j) = p(i, j) / (1 - f(j, j)) + ∑_{k ≠ j} p(i, k) E(k, j) for (mc in allMCs) { probs <- mc$transitionMatrix byrow <- mc$byrow hitting <- mc$hittingProbabilities numVisits <- mc$meanNumVisits #expect_true(all(numVisits >= 0)) expect_true(.testthatAreMeanNumVisitsRcpp(probs, numVisits, hitting, byrow)) } }) test_that("All mean number of visits are ∞ iff the Markov chain is irreducible", { for (mc in allMCs) { meanNumVisits <- mc$meanNumVisits numVisitsInf <- all(meanNumVisits == Inf) irreducible <- mc$irreducible if (irreducible) expect_true(numVisitsInf) if (numVisitsInf) expect_true(irreducible) } }) # Test mean number of visits with a known matrix # Taken from the book Procesos Estocásticos, Ricardo Vélez & Tomás Prieto test_that("Tests mean number of visits for a known markov chain", { M <- matlab::zeros(5, 5) M[1,1] <- M[5,5] <- 1 M[2,1] <- M[2,3] <- 1/2 M[3,2] <- M[3,4] <- 1/2 M[4,2] <- M[4,5] <- 1/2 markovChain <- new("markovchain", transitionMatrix = M) result <- matlab::zeros(5, 5) result[1:4, 1] <- Inf result[2:5, 5] <- Inf result[1, 2:5] <- 0 result[5, 1:4] <- 0 result[2,2] <- result[3,3] <- 3/5 result[2,3] <- result[4,2] <- result[3,4] <- 4/5 result[2,4] <- result[4,3] <- 2/5 result[3,2] <- 6/5 result[4,4] <- 1/5 rownames(result) <- markovChain@states colnames(result) <- markovChain@states expect_equal(meanNumVisits(markovChain), result) expect_equal(meanNumVisits(t(markovChain)), t(result)) }) context("Checking absorptionProbabilities") test_that("Test mean absorption times for known matrix", { # For mcHitting, defined in data-raw/db4Tests.R M <- matlab::zeros(3, 3) result <- 1/5 * matrix(c(4, 1, 3, 2, 2, 3), nrow = 3, byrow = TRUE) rownames(result) <- c(2, 3, 4) colnames(result) <- c(1, 5) expect_equal(absorptionProbabilities(mcHitting), result) expect_equal(absorptionProbabilities(t(mcHitting)), t(result)) }) # Fs is mean absorption probabilities # N is the fundamental matrix, (I - Q)^{-1} # This equation is by rows, need to transpose left part for by columns matrices test_that("Test that (I - Q) Fs = P[transient, recurrent]", { for (mc in allMCs) { if (length(mc$transientStates) > 0) { recurrent <- mc$recurrentStates transient <- mc$transientStates byrow <- mc$byrow states <- mc$states whichRecurrent <- which(states %in% recurrent) whichTransient <- which(states %in% transient) P <- mc$transitionMatrix Fs <- absorptionProbabilities(mc$object) Ninv <- diag(length(transient)) - P[whichTransient, whichTransient, drop = FALSE] if (byrow) { expected <- P[whichTransient, whichRecurrent, drop = FALSE] expect_equal(Ninv %*% Fs, expected) } else { expected <- P[whichRecurrent, whichTransient, drop = FALSE] expect_equal(Fs %*% Ninv, expected) } } } }) context("Checking meanAbsorptionTime") test_that("Mean absorption time for known matrix", { result <- c(3, 4, 3) names(result) <- c(2, 3, 4) expect_equal(meanAbsorptionTime(mcDrunkard), result) expect_equal(meanAbsorptionTime(t(mcDrunkard)), result) }) test_that("All mean absorption times are greater or equal than 1", { for (mc in allMCs) { if (length(mc$transientStates) > 0) { expect_true(all(meanAbsorptionTime(mcDrunkard) > 1)) } } })markovchain/tests/testthat.R0000644000176200001440000000010413762012754015703 0ustar liggesuserslibrary(testthat) library(markovchain) test_check("markovchain") markovchain/src/0000755000176200001440000000000014050513461013342 5ustar liggesusersmarkovchain/src/ctmcFittingFunctions.cpp0000644000176200001440000001057113762012754020226 0ustar liggesusers#include #include using namespace Rcpp; #include List markovchainFit(SEXP data, String method = "mle", bool byrow = true, int nboot = 10, double laplacian = 0, String name = "", bool parallel = false, double confidencelevel = 0.95, bool confint = true, NumericMatrix hyperparam = NumericMatrix(), bool sanitize = false, CharacterVector possibleStates = CharacterVector()); //' @name ctmcFit //' @title Function to fit a CTMC //' @description This function fits the underlying CTMC give the state //' transition data and the transition times using the maximum likelihood //' method (MLE) //' @usage ctmcFit(data, byrow = TRUE, name = "", confidencelevel = 0.95) //' @param data It is a list of two elements. The first element is a character //' vector denoting the states. The second is a numeric vector denoting the //' corresponding transition times. //' @param byrow Determines if the output transition probabilities of the //' underlying embedded DTMC are by row. //' @param name Optional name for the CTMC. //' @param confidencelevel Confidence level for the confidence interval //' construnction. //' @return It returns a list containing the CTMC object and the confidence intervals. //' //' @details Note that in data, there must exist an element wise corresponding //' between the two elements of the list and that data[[2]][1] is always 0. //' @references Continuous Time Markov Chains (vignette), Sai Bhargav Yalamanchi, Giorgio Alfredo Spedicato 2015 //' @author Sai Bhargav Yalamanchi //' @seealso \code{\link{rctmc}} //' //' @examples //' data <- list(c("a", "b", "c", "a", "b", "a", "c", "b", "c"), c(0, 0.8, 2.1, 2.4, 4, 5, 5.9, 8.2, 9)) //' ctmcFit(data) //' //' @export //' // [[Rcpp::export]] List ctmcFit(List data, bool byrow=true, String name="", double confidencelevel = 0.95) { CharacterVector stateData(as(data[0]).size()); for (int i = 0; i < as(data[0]).size(); i++) stateData[i] = as(data[0])[i]; NumericVector transData = data[1]; CharacterVector sortedStates = unique(as(data[0])).sort(); NumericVector stateCount(sortedStates.size()); NumericVector stateSojournTime(sortedStates.size()); List dtmcData = markovchainFit(stateData, "mle", byrow, 10, 0, name, false, confidencelevel); for (int i = 0; i < stateData.size() - 1; i++){ int idx = std::find(sortedStates.begin(), sortedStates.end(), stateData[i]) - sortedStates.begin(); stateCount[idx]++; stateSojournTime[idx] += transData[i+1] - transData[i]; } S4 dtmcEst = dtmcData["estimate"]; NumericMatrix gen = dtmcEst.slot("transitionMatrix"); for (int i = 0; i < gen.nrow(); i++){ for (int j = 0; j < gen.ncol(); j++){ if (stateCount[i] > 0) gen(i, j) *= stateCount[i] / stateSojournTime[i]; } if (stateCount[i] > 0) gen(i, i) = - stateCount[i] / stateSojournTime[i]; else gen(i, i) = -1; } double zscore = stats::qnorm_0(confidencelevel, 1.0, 0.0); NumericVector lowerConfVecLambda(sortedStates.size()), upperConfVecLambda(sortedStates.size()); for (int i = 0; i < sortedStates.size(); i++){ if (stateCount[i] > 0){ auto factor = stateCount[i] / stateSojournTime[i] * (1 - zscore / sqrt(stateCount[i])); lowerConfVecLambda(i) = std::max(0., factor); upperConfVecLambda(i) = std::min(1., factor); } else { lowerConfVecLambda(i) = 1; upperConfVecLambda(i) = 1; } } S4 outCtmc("ctmc"); outCtmc.slot("states") = sortedStates; outCtmc.slot("generator") = gen; outCtmc.slot("name") = name; return List::create(_["estimate"] = outCtmc, _["errors"] = List::create( _["dtmcConfidenceInterval"] = List::create( _["confidenceLevel"] = dtmcData["confidenceLevel"], _["lowerEndpointMatrix"] = dtmcData["lowerEndpointMatrix"], _["upperEndpointMatrix"] = dtmcData["upperEndpointMatrix"]), _["lambdaConfidenceInterval"] = List::create( _["lowerEndpointVector"] = lowerConfVecLambda, _["upperEndpointVector"] = upperConfVecLambda)) ); } markovchain/src/classesAndMethods.cpp0000644000176200001440000000073613762012754017470 0ustar liggesusers// [[Rcpp::depends(RcppArmadillo)]] #include #include #include #include using namespace Rcpp; using namespace arma; using namespace std; // TODO meaning of this method // [[Rcpp::export(.isGenRcpp)]] bool isGen(NumericMatrix gen) { for (int i = 0; i < gen.nrow(); i++) for (int j = 0; j < gen.ncol(); j++) if ((i == j && gen(i, j) > 0) || (i != j && gen(i, j) < 0)) return false; return true; } markovchain/src/fitHigherOrder.cpp0000644000176200001440000000261313762012754016765 0ustar liggesusers// [[Rcpp::depends(RcppArmadillo)]] #include using namespace Rcpp; // sequence to frequency probability vector //' @export // [[Rcpp::export]] NumericVector seq2freqProb(CharacterVector sequence) { int n = sequence.size(); CharacterVector states = unique(sequence).sort(); int nstates = states.length(); NumericVector v(nstates); v.names() = states; for (int i = 0; i < n; i ++) v[std::string(sequence[i])] = v[std::string(sequence[i])] + 1.0; NumericVector out = v/sum(v); out.names() = v.names(); return out; } // sequence to transition matrix for higher order markov chai //' @export // [[Rcpp::export]] NumericMatrix seq2matHigh(CharacterVector sequence, int order) { int n = sequence.size(); CharacterVector states = unique(sequence).sort(); int nstates = states.length(); NumericVector colsums(nstates); NumericMatrix out(nstates); out.attr("dimnames") = List::create(states, states); for (int i = 0; i < n - order; i++) { int from = -1, to = -1; for (int j = 0; j < nstates; j++) { if (sequence[i] == states[j]) from = j; if (sequence[i + order] == states[j]) to = j; } if (from != -1 && to != -1) { out(to, from) ++; colsums[from] ++; } } for (int i = 0; i < nstates; i ++) { for (int j = 0; j < nstates; j ++) out(i, j) /= colsums[j]; } return out; } markovchain/src/probabilistic.cpp0000644000176200001440000013266213762012754016716 0ustar liggesusers// [[Rcpp::depends(RcppArmadillo)]] #include #include #include #include #include #include using namespace Rcpp; using namespace std; using namespace arma; template T sortByDimNames(const T m); typedef unsigned int uint; // Returns whether a Markov chain is ergodic // Declared in this same file bool isIrreducible(S4 obj); // Declared in utils.cpp bool anyElement(const mat& matrix, bool (*condition)(const double&)); // Declared in utils.cpp bool allElements(const mat& matrix, bool (*condition)(const double&)); // Declared in utils.cpp bool approxEqual(const cx_double& a, const cx_double& b); // Used in commClassesKernel void strongConnect(int v, vector& disc, vector& low, vector& onStack, int& index, stack& exploring, NumericMatrix& P, vector>& components, int numStates) { disc[v] = index; low[v] = index; ++index; exploring.push(v); onStack[v] = true; // For each edge (v, w) that goes out of v for (int w = 0; w < numStates; ++w) { if (P(v, w) > 0) { // If w has not been visited yet, compute [w], and update // the minimum node we can travel to from v if (disc[w] == -1) { strongConnect(w, disc, low, onStack, index, exploring, P, components, numStates); low[v] = min(low[v], low[w]); // Otherwise, if w is on the stack of nodes been explored, // update the minimum node we can travel to from v } else if (onStack[w]) { low[v] = min(low[v], disc[w]); } // Otherwise, (v, w) is a cross edge between components // in the DFS tree, do nothing } } // If v is the root of [v], unwind the strongly connected // component from the stack if (low[v] == disc[v]) { bool remaining = true; unordered_set component; int w; while (remaining) { w = exploring.top(); exploring.pop(); component.insert(w); onStack[w] = false; remaining = w != v; } components.push_back(component); } } // This method is based on Tarjan's algorithm to find strongly // connected components in a directed graph: // https://en.wikipedia.org/wiki/Tarjan's_strongly_connected_components_algorithm // to compute the communicating classes. // Output: // - classes: a matrix whose entry (i, j) is true iff i and // j are in the same communicating class // - closed: a vector whose i-th entry indicates whether the // class [i] is closed // // [[Rcpp::export(.commClassesKernelRcpp)]] List commClassesKernel(NumericMatrix P) { // The matrix must be stochastic by rows int numStates = P.ncol(); vector disc(numStates, -1); vector low(numStates, -1); vector onStack(numStates, false); vector> components; stack exploring; int index = 0; // If the component [v] has not been computed yet // (disc[v] == -1), compute it for (int v = 0; v < numStates; ++v) { if (disc[v] == -1) strongConnect(v, disc, low, onStack, index, exploring, P, components, numStates); } // Create the output data structures CharacterVector stateNames = rownames(P); LogicalMatrix classes(numStates, numStates); classes.attr("dimnames") = P.attr("dimnames"); std::fill(classes.begin(), classes.end(), false); LogicalVector closed(numStates); closed.names() = stateNames; for (auto component : components) { bool isClosed = true; // The class is closed iff there is no edge going out of the class for (int i : component) { for (int j = 0; j < numStates; ++j) if (P(i, j) > 0 && component.count(j) == 0) isClosed = false; } // Set the communicating matrix and whether it is closed or not for (int i : component) { closed(i) = isClosed; for (int j : component) classes(i, j) = true; } } return List::create(_["classes"] = classes, _["closed"] = closed); } // Wrapper that computes the communicating states from the matrix generated by // commClassesKernel (a matrix where an entry i,j is TRUE iff i and j are in the // same communicating class). It also needs the list of states names from the // Markov Chain List computeCommunicatingClasses(LogicalMatrix& commClasses, CharacterVector& states) { int numStates = states.size(); vector computed(numStates, false); List classesList; for (int i = 0; i < numStates; ++i) { CharacterVector currentClass; if (!computed[i]) { for (int j = 0; j < numStates; ++j) { if (commClasses(i, j)) { currentClass.push_back(states[j]); computed[j] = true; } } classesList.push_back(currentClass); } } return classesList; } // [[Rcpp::export(.communicatingClassesRcpp)]] List communicatingClasses(S4 object) { // Returns the underlying communicating classes // It is indifferent if the matrices are stochastic by rows or columns NumericMatrix transitionMatrix = object.slot("transitionMatrix"); bool byrow = object.slot("byrow"); CharacterVector states = object.slot("states"); if (!byrow) transitionMatrix = transpose(transitionMatrix); List commClassesList = commClassesKernel(transitionMatrix); LogicalMatrix commClasses = commClassesList["classes"]; return computeCommunicatingClasses(commClasses, states); } // Wrapper that computes the transient states from a list of the states and a // vector indicating whether the communicating class for each state is closed CharacterVector computeTransientStates(CharacterVector& states, LogicalVector& closedClass) { CharacterVector transientStates; for (int i = 0; i < states.size(); i++) if (!closedClass[i]) transientStates.push_back(states[i]); return transientStates; } // Wrapper that computes the recurrent states from a list of states and a // vector indicating whether the communicating class for each state is closed CharacterVector computeRecurrentStates(CharacterVector& states, LogicalVector& closedClass) { CharacterVector recurrentStates; for (int i = 0; i < states.size(); i++) if (closedClass[i]) recurrentStates.push_back(states[i]); return recurrentStates; } // [[Rcpp::export(.transientStatesRcpp)]] CharacterVector transientStates(S4 object) { NumericMatrix transitionMatrix = object.slot("transitionMatrix"); bool byrow = object.slot("byrow"); if (!byrow) transitionMatrix = transpose(transitionMatrix); List commKernel = commClassesKernel(transitionMatrix); LogicalVector closed = commKernel["closed"]; CharacterVector states = object.slot("states"); return computeTransientStates(states, closed); } // [[Rcpp::export(.recurrentStatesRcpp)]] CharacterVector recurrentStates(S4 object) { NumericMatrix transitionMatrix = object.slot("transitionMatrix"); bool byrow = object.slot("byrow"); CharacterVector states = object.slot("states"); if (!byrow) transitionMatrix = transpose(transitionMatrix); List commKernel = commClassesKernel(transitionMatrix); LogicalVector closed = commKernel["closed"]; return computeRecurrentStates(states, closed); } // Wrapper that computes the recurrent classes from the matrix given by // commClassesKernel (which entry i,j is TRUE iff i and j are in the same // communicating class), a vector indicating wheter the class for state is // closed and the states of the Markov Chain List computeRecurrentClasses(LogicalMatrix& commClasses, LogicalVector& closedClass, CharacterVector& states) { int numStates = states.size(); vector computed(numStates, false); List recurrentClassesList; bool isRecurrentClass; for (int i = 0; i < numStates; ++i) { CharacterVector currentClass; isRecurrentClass = closedClass(i) && !computed[i]; if (isRecurrentClass) { for (int j = 0; j < numStates; ++j) { if (commClasses(i, j)) { currentClass.push_back(states[j]); computed[j] = true; } } recurrentClassesList.push_back(currentClass); } } return recurrentClassesList; } // returns the recurrent classes // [[Rcpp::export(.recurrentClassesRcpp)]] List recurrentClasses(S4 object) { NumericMatrix transitionMatrix = object.slot("transitionMatrix"); bool byrow = object.slot("byrow"); CharacterVector states = object.slot("states"); if (!byrow) transitionMatrix = transpose(transitionMatrix); List commClassesList = commClassesKernel(transitionMatrix); LogicalMatrix commClasses = commClassesList["classes"]; LogicalVector closed = commClassesList["closed"]; return computeRecurrentClasses(commClasses, closed, states); } // Wrapper that computes the transient classes from the matrix given by // commClassesKernel (which entry i,j is TRUE iff i and j are in the same // communicating class), a vector indicating wheter the class for state is // closed and the states of the Markov Chain List computeTransientClasses(LogicalMatrix& commClasses, LogicalVector& closedClass, CharacterVector& states) { int numStates = states.size(); vector computed(numStates, false); List transientClassesList; bool isTransientClass; for (int i = 0; i < numStates; ++i) { CharacterVector currentClass; isTransientClass = !closedClass(i) && !computed[i]; if (isTransientClass) { for (int j = 0; j < numStates; ++j) { if (commClasses(i, j)) { currentClass.push_back(states[j]); computed[j] = true; } } transientClassesList.push_back(currentClass); } } return transientClassesList; } // returns the transient classes // [[Rcpp::export(.transientClassesRcpp)]] List transientClasses(S4 object) { NumericMatrix transitionMatrix = object.slot("transitionMatrix"); bool byrow = object.slot("byrow"); CharacterVector states = object.slot("states"); if (!byrow) transitionMatrix = transpose(transitionMatrix); List commClassesList = commClassesKernel(transitionMatrix); LogicalMatrix commClasses = commClassesList["classes"]; LogicalVector closed = commClassesList["closed"]; return computeTransientClasses(commClasses, closed, states); } // Defined in probabilistic.cpp mat matrixPow(const mat& A, int n); // [[Rcpp::export(.reachabilityMatrixRcpp)]] LogicalMatrix reachabilityMatrix(S4 obj) { NumericMatrix matrix = obj.slot("transitionMatrix"); // Reachability matrix int m = matrix.nrow(); mat X(matrix.begin(), m, m, true); mat reachability = eye(m, m) + sign(X); reachability = matrixPow(reachability, m - 1); LogicalMatrix result = wrap(reachability > 0); result.attr("dimnames") = matrix.attr("dimnames"); return result; } // [[Rcpp::export(.isAccessibleRcpp)]] bool isAccessible(S4 obj, String from, String to) { NumericMatrix probs = obj.slot("transitionMatrix"); CharacterVector states = obj.slot("states"); int fromPos = -1, toPos = -1; bool byrow = obj.slot("byrow"); int m = probs.ncol(); // Compute indices for states from and pos for (int i = 0; i < m; ++i) { if (states[i] == from) fromPos = i; if (states[i] == to) toPos = i; } if (fromPos == -1 || toPos == -1) stop("Please give valid states method"); stack toExplore; toExplore.push(fromPos); vector visited(m, false); visited[fromPos] = true; bool isReachable = false; // DFS until we hit 'to' state or we cannot traverse to more states while (!toExplore.empty() && !isReachable) { int i = toExplore.top(); toExplore.pop(); visited[i] = true; isReachable = i == toPos; for (int j = 0; j < m; ++j) if (((byrow && !approxEqual(probs(i, j), 0)) || (!byrow && !approxEqual(probs(j, i), 0))) && !visited[j]) toExplore.push(j); } return isReachable; } // summary of markovchain object // [[Rcpp::export(.summaryKernelRcpp)]] List summaryKernel(S4 object) { NumericMatrix transitionMatrix = object.slot("transitionMatrix"); bool byrow = object.slot("byrow"); CharacterVector states = object.slot("states"); if (!byrow) transitionMatrix = transpose(transitionMatrix); List commClassesList = commClassesKernel(transitionMatrix); LogicalMatrix commClasses = commClassesList["classes"]; LogicalVector closed = commClassesList["closed"]; List recurrentClasses = computeRecurrentClasses(commClasses, closed, states); List transientClasses = computeTransientClasses(commClasses, closed, states); List summaryResult = List::create(_["closedClasses"] = recurrentClasses, _["recurrentClasses"] = recurrentClasses, _["transientClasses"] = transientClasses); return(summaryResult); } //here the kernel function to compute the first passage // [[Rcpp::export(.firstpassageKernelRcpp)]] NumericMatrix firstpassageKernel(NumericMatrix P, int i, int n) { arma::mat G = as(P); arma::mat Pa = G; arma::mat H(n, P.ncol()); //here Thoralf suggestion //initializing the first row for (unsigned int j = 0; j < G.n_cols; j++) H(0, j) = G(i-1, j); arma::mat E = 1 - arma::eye(P.ncol(), P.ncol()); for (int m = 1; m < n; m++) { G = Pa * (G%E); for (unsigned int j = 0; j < G.n_cols; j ++) H(m, j) = G(i-1, j); } NumericMatrix R = wrap(H); return R; } // [[Rcpp::export(.firstPassageMultipleRCpp)]] NumericVector firstPassageMultipleRCpp(NumericMatrix P,int i, NumericVector setno, int n) { arma::mat G = as(P); arma::mat Pa = G; arma::vec H = arma::zeros(n); //here Thoralf suggestion unsigned int size = setno.size(); //initializing the first row for (unsigned int k = 0; k < size; k++) { H[0] += G(i-1, setno[k]-1); } arma::mat E = 1 - arma::eye(P.ncol(), P.ncol()); for (int m = 1; m < n; m++) { G = Pa * (G%E); for (unsigned int k = 0; k < size; k++) { H[m] += G(i-1, setno[k]-1); } } NumericVector R = wrap(H); return R; } // [[Rcpp::export(.expectedRewardsRCpp)]] NumericVector expectedRewardsRCpp(NumericMatrix matrix, int n, NumericVector rewards) { // initialises output vector NumericVector out; // gets no of states int no_of_states = matrix.ncol(); // initialises armadillo matrices and vectors arma::vec temp = arma::zeros(no_of_states); arma::mat matr = as(matrix); arma::vec v = arma::zeros(no_of_states); // initialses the vector for the base case of dynamic programming expression for (int i=0;i(matrix); arma::mat temp = as(matrix); arma::vec r = as(rewards); arma::mat I = arma::zeros(1,size); I(0,s0-1) = 1; for (int j = 0; j < n; j++) { arma::mat res = I*(temp*r); result = result + res(0,0); temp = temp*matr; } return result; } // greatest common denominator // [[Rcpp::export(.gcdRcpp)]] int gcd (int a, int b) { int c; a = abs(a); b = abs(b); while ( a != 0 ) { c = a; a = b%a; b = c; } return b; } // function to get the period of a DTMC //' @rdname structuralAnalysis //' //' @export // [[Rcpp::export(period)]] int period(S4 object) { bool irreducible = isIrreducible(object); if (!irreducible) { warning("The matrix is not irreducible"); return 0; } else { NumericMatrix P = object.slot("transitionMatrix"); int n = P.ncol(); std::vector r, T(1), w; int d = 0, m = T.size(), i = 0, j = 0; if (n > 0) { arma::vec v(n); v[0] = 1; while (m>0 && d!=1) { i = T[0]; T.erase(T.begin()); w.push_back(i); j = 0; while (j < n) { if (P(i,j) > 0) { r.insert(r.end(), w.begin(), w.end()); r.insert(r.end(), T.begin(), T.end()); double k = 0; for (std::vector::iterator it = r.begin(); it != r.end(); it ++) if (*it == j) k ++; if (k > 0) { int b = v[i] + 1 - v[j]; d = gcd(d, b); } else { T.push_back(j); v[j] = v[i] + 1; } } j++; } m = T.size(); } } // v = v - floor(v/d)*d; return d; } } //' @title predictiveDistribution //' //' @description The function computes the probability of observing a new data //' set, given a data set //' @usage predictiveDistribution(stringchar, newData, hyperparam = matrix()) //' //' @param stringchar This is the data using which the Bayesian inference is //' performed. //' @param newData This is the data whose predictive probability is computed. //' @param hyperparam This determines the shape of the prior distribution of the //' parameters. If none is provided, default value of 1 is assigned to each //' parameter. This must be of size kxk where k is the number of states in the //' chain and the values should typically be non-negative integers. //' @return The log of the probability is returned. //' //' @details The underlying method is Bayesian inference. The probability is //' computed by averaging the likelihood of the new data with respect to the //' posterior. Since the method assumes conjugate priors, the result can be //' represented in a closed form (see the vignette for more details), which is //' what is returned. //' @references //' Inferring Markov Chains: Bayesian Estimation, Model Comparison, Entropy Rate, //' and Out-of-Class Modeling, Christopher C. Strelioff, James P. //' Crutchfield, Alfred Hubler, Santa Fe Institute //' //' Yalamanchi SB, Spedicato GA (2015). Bayesian Inference of First Order Markov //' Chains. R package version 0.2.5 //' //' @author Sai Bhargav Yalamanchi //' @seealso \code{\link{markovchainFit}} //' @examples //' sequence<- c("a", "b", "a", "a", "a", "a", "b", "a", "b", "a", "b", "a", "a", //' "b", "b", "b", "a") //' hyperMatrix<-matrix(c(1, 2, 1, 4), nrow = 2,dimnames=list(c("a","b"),c("a","b"))) //' predProb <- predictiveDistribution(sequence[1:10], sequence[11:17], hyperparam =hyperMatrix ) //' hyperMatrix2<-hyperMatrix[c(2,1),c(2,1)] //' predProb2 <- predictiveDistribution(sequence[1:10], sequence[11:17], hyperparam =hyperMatrix2 ) //' predProb2==predProb //' @export //' // [[Rcpp::export]] double predictiveDistribution(CharacterVector stringchar, CharacterVector newData, NumericMatrix hyperparam = NumericMatrix()) { // construct list of states CharacterVector elements = stringchar; for (int i = 0; i < newData.size(); i++) elements.push_back(newData[i]); elements = unique(elements).sort(); int sizeMatr = elements.size(); // if no hyperparam argument provided, use default value of 1 for all if (hyperparam.nrow() == 1 && hyperparam.ncol() == 1) { NumericMatrix temp(sizeMatr, sizeMatr); temp.attr("dimnames") = List::create(elements, elements); for (int i = 0; i < sizeMatr; i++) for (int j = 0; j < sizeMatr; j++) temp(i, j) = 1; hyperparam = temp; } // validity check if (hyperparam.nrow() != hyperparam.ncol()) stop("Dimensions of the hyperparameter matrix are inconsistent"); if (hyperparam.nrow() < sizeMatr) stop("Hyperparameters for all state transitions must be provided"); List dimNames = hyperparam.attr("dimnames"); CharacterVector colNames = dimNames[1]; CharacterVector rowNames = dimNames[0]; int sizeHyperparam = hyperparam.ncol(); CharacterVector sortedColNames(sizeHyperparam), sortedRowNames(sizeHyperparam); for (int i = 0; i < sizeHyperparam; i++) sortedColNames(i) = colNames(i), sortedRowNames(i) = rowNames(i); sortedColNames.sort(); sortedRowNames.sort(); for (int i = 0; i < sizeHyperparam; i++) { if (i > 0 && (sortedColNames(i) == sortedColNames(i-1) || sortedRowNames(i) == sortedRowNames(i-1))) stop("The states must all be unique"); else if (sortedColNames(i) != sortedRowNames(i)) stop("The set of row names must be the same as the set of column names"); bool found = false; for (int j = 0; j < sizeMatr; j++) if (elements(j) == sortedColNames(i)) found = true; // hyperparam may contain states not in stringchar if (!found) elements.push_back(sortedColNames(i)); } // check for the case where hyperparam has missing data for (int i = 0; i < sizeMatr; i++) { bool found = false; for (int j = 0; j < sizeHyperparam; j++) if (sortedColNames(j) == elements(i)) found = true; if (!found) stop("Hyperparameters for all state transitions must be provided"); } elements = elements.sort(); sizeMatr = elements.size(); for (int i = 0; i < sizeMatr; i++) for (int j = 0; j < sizeMatr; j++) if (hyperparam(i, j) < 1.) stop("The hyperparameter elements must all be greater than or equal to 1"); // permute the elements of hyperparam such that the row, column names are sorted hyperparam = sortByDimNames(hyperparam); NumericMatrix freqMatr(sizeMatr), newFreqMatr(sizeMatr); double predictiveDist = 0.; // log of the predictive probability // populate frequeny matrix for old data; this is used for inference int posFrom = 0, posTo = 0; for (int i = 0; i < stringchar.size() - 1; i ++) { for (int j = 0; j < sizeMatr; j ++) { if (stringchar[i] == elements[j]) posFrom = j; if (stringchar[i + 1] == elements[j]) posTo = j; } freqMatr(posFrom,posTo)++; } // frequency matrix for new data for (int i = 0; i < newData.size() - 1; i ++) { for (int j = 0; j < sizeMatr; j ++) { if (newData[i] == elements[j]) posFrom = j; if (newData[i + 1] == elements[j]) posTo = j; } newFreqMatr(posFrom,posTo)++; } for (int i = 0; i < sizeMatr; i++) { double rowSum = 0, newRowSum = 0, paramRowSum = 0; for (int j = 0; j < sizeMatr; j++) { rowSum += freqMatr(i, j), newRowSum += newFreqMatr(i, j), paramRowSum += hyperparam(i, j); predictiveDist += lgamma(freqMatr(i, j) + newFreqMatr(i, j) + hyperparam(i, j)) - lgamma(freqMatr(i, j) + hyperparam(i, j)); } predictiveDist += lgamma(rowSum + paramRowSum) - lgamma(rowSum + newRowSum + paramRowSum); } return predictiveDist; } //' @title priorDistribution //' //' @description Function to evaluate the prior probability of a transition //' matrix. It is based on conjugate priors and therefore a Dirichlet //' distribution is used to model the transitions of each state. //' @usage priorDistribution(transMatr, hyperparam = matrix()) //' //' @param transMatr The transition matrix whose probability is the parameter of //' interest. //' @param hyperparam The hyperparam matrix (optional). If not provided, a //' default value of 1 is assumed for each and therefore the resulting //' probability distribution is uniform. //' @return The log of the probabilities for each state is returned in a numeric //' vector. Each number in the vector represents the probability (log) of //' having a probability transition vector as specified in corresponding the //' row of the transition matrix. //' //' @details The states (dimnames) of the transition matrix and the hyperparam //' may be in any order. //' @references Yalamanchi SB, Spedicato GA (2015). Bayesian Inference of First //' Order Markov Chains. R package version 0.2.5 //' //' @author Sai Bhargav Yalamanchi, Giorgio Spedicato //' //' @note This function can be used in conjunction with inferHyperparam. For //' example, if the user has a prior data set and a prior transition matrix, //' he can infer the hyperparameters using inferHyperparam and then compute //' the probability of their prior matrix using the inferred hyperparameters //' with priorDistribution. //' @seealso \code{\link{predictiveDistribution}}, \code{\link{inferHyperparam}} //' //' @examples //' priorDistribution(matrix(c(0.5, 0.5, 0.5, 0.5), //' nrow = 2, //' dimnames = list(c("a", "b"), c("a", "b"))), //' matrix(c(2, 2, 2, 2), //' nrow = 2, //' dimnames = list(c("a", "b"), c("a", "b")))) //' @export // [[Rcpp::export]] NumericVector priorDistribution(NumericMatrix transMatr, NumericMatrix hyperparam = NumericMatrix()) { // begin validity checks for the transition matrix if (transMatr.nrow() != transMatr.ncol()) stop("Transition matrix dimensions are inconsistent"); int sizeMatr = transMatr.nrow(); for (int i = 0; i < sizeMatr; i++) { double rowSum = 0., eps = 1e-10; for (int j = 0; j < sizeMatr; j++) if (transMatr(i, j) < 0. || transMatr(i, j) > 1.) stop("The entries in the transition matrix must each belong to the interval [0, 1]"); else rowSum += transMatr(i, j); if (rowSum <= 1. - eps || rowSum >= 1. + eps) stop("The rows of the transition matrix must each sum to 1"); } List dimNames = transMatr.attr("dimnames"); if (dimNames.size() == 0) stop("Provide dimnames for the transition matrix"); CharacterVector colNames = dimNames[1]; CharacterVector rowNames = dimNames[0]; CharacterVector sortedColNames(sizeMatr), sortedRowNames(sizeMatr); for (int i = 0; i < sizeMatr; i++) sortedColNames(i) = colNames(i), sortedRowNames(i) = rowNames(i); sortedColNames.sort(); sortedRowNames.sort(); for (int i = 0; i < sizeMatr; i++) if (i > 0 && (sortedColNames(i) == sortedColNames(i-1) || sortedRowNames(i) == sortedRowNames(i-1))) stop("The states must all be unique"); else if (sortedColNames(i) != sortedRowNames(i)) stop("The set of row names must be the same as the set of column names"); // if no hyperparam argument provided, use default value of 1 for all if (hyperparam.nrow() == 1 && hyperparam.ncol() == 1) { NumericMatrix temp(sizeMatr, sizeMatr); temp.attr("dimnames") = List::create(sortedColNames, sortedColNames); for (int i = 0; i < sizeMatr; i++) for (int j = 0; j < sizeMatr; j++) temp(i, j) = 1; hyperparam = temp; } // validity check for hyperparam if (hyperparam.nrow() != hyperparam.ncol()) stop("Dimensions of the hyperparameter matrix are inconsistent"); if (hyperparam.nrow() != sizeMatr) stop("Hyperparameter and the transition matrices differ in dimensions"); List _dimNames = hyperparam.attr("dimnames"); if (_dimNames.size() == 0) stop("Provide dimnames for the hyperparameter matrix"); CharacterVector _colNames = _dimNames[1]; CharacterVector _rowNames = _dimNames[0]; int sizeHyperparam = hyperparam.ncol(); CharacterVector _sortedColNames(sizeHyperparam), _sortedRowNames(sizeHyperparam); for (int i = 0; i < sizeHyperparam; i++) _sortedColNames(i) = colNames(i), _sortedRowNames(i) = rowNames(i); _sortedColNames.sort(); _sortedRowNames.sort(); for (int i = 0; i < sizeHyperparam; i++) if (sortedColNames(i) != _sortedColNames(i) || sortedRowNames(i) != _sortedRowNames(i)) stop("Hyperparameter and the transition matrices states differ"); for (int i = 0; i < sizeMatr; i++) for (int j = 0; j < sizeMatr; j++) if (hyperparam(i, j) < 1.) stop("The hyperparameter elements must all be greater than or equal to 1"); transMatr = sortByDimNames(transMatr); hyperparam = sortByDimNames(hyperparam); NumericVector logProbVec; for (int i = 0; i < sizeMatr; i++) { double logProb_i = 0., hyperparamRowSum = 0; for (int j = 0; j < sizeMatr; j++) { hyperparamRowSum += hyperparam(i, j); logProb_i += (hyperparam(i, j) - 1.) * log(transMatr(i, j)) - lgamma(hyperparam(i, j)); } logProb_i += lgamma(hyperparamRowSum); logProbVec.push_back(logProb_i); } logProbVec.attr("names") = sortedColNames; return logProbVec; } // [[Rcpp::export(.hittingProbabilitiesRcpp)]] NumericMatrix hittingProbabilities(S4 object) { NumericMatrix transitionMatrix = object.slot("transitionMatrix"); CharacterVector states = object.slot("states"); bool byrow = object.slot("byrow"); if (!byrow) transitionMatrix = transpose(transitionMatrix); int numStates = transitionMatrix.nrow(); arma::mat transitionProbs = as(transitionMatrix); arma::mat hittingProbs(numStates, numStates); // Compute closed communicating classes List commClasses = commClassesKernel(transitionMatrix); List closedClass = commClasses["closed"]; LogicalMatrix communicating = commClasses["classes"]; for (int j = 0; j < numStates; ++j) { arma::mat coeffs = as(transitionMatrix); arma::vec right_part = -transitionProbs.col(j); for (int i = 0; i < numStates; ++i) { coeffs(i, j) = 0; coeffs(i, i) -= 1; } for (int i = 0; i < numStates; ++i) { if (closedClass(i)) { for (int k = 0; k < numStates; ++k) if (k != i) coeffs(i, k) = 0; else coeffs(i, i) = 1; if (communicating(i, j)) right_part(i) = 1; else right_part(i) = 0; } } hittingProbs.col(j) = arma::solve(coeffs, right_part); } NumericMatrix result = wrap(hittingProbs); colnames(result) = states; rownames(result) = states; if (!byrow) result = transpose(result); return result; } // method to convert into canonic form a markovchain object // [[Rcpp::export(.canonicFormRcpp)]] S4 canonicForm(S4 obj) { NumericMatrix transitions = obj.slot("transitionMatrix"); bool byrow = obj.slot("byrow"); int numRows = transitions.nrow(); int numCols = transitions.ncol(); NumericMatrix resultTransitions(numRows, numCols); CharacterVector states = obj.slot("states"); unordered_map stateToIndex; unordered_set usedIndices; int currentIndex; List recClasses; S4 input("markovchain"); S4 result("markovchain"); vector indexPermutation(numRows); if (!byrow) { input.slot("transitionMatrix") = transpose(transitions); input.slot("states") = states; input.slot("byrow") = true; transitions = transpose(transitions); } else { input = obj; } recClasses = recurrentClasses(input); // Map each state to the index it has for (int i = 0; i < states.size(); ++i) { string state = (string) states[i]; stateToIndex[state] = i; } int toFill = 0; for (CharacterVector recClass : recClasses) { for (auto state : recClass) { currentIndex = stateToIndex[(string) state]; indexPermutation[toFill] = currentIndex; ++toFill; usedIndices.insert(currentIndex); } } for (int i = 0; i < states.size(); ++i) { if (usedIndices.count(i) == 0) { indexPermutation[toFill] = i; ++toFill; } } CharacterVector newStates(numRows); for (int i = 0; i < numRows; ++i) { int r = indexPermutation[i]; newStates(i) = states(r); for (int j = 0; j < numCols; ++j) { int c = indexPermutation[j]; resultTransitions(i, j) = transitions(r, c); } } rownames(resultTransitions) = newStates; colnames(resultTransitions) = newStates; if (!byrow) resultTransitions = transpose(resultTransitions); result.slot("transitionMatrix") = resultTransitions; result.slot("byrow") = byrow; result.slot("states") = newStates; result.slot("name") = input.slot("name"); return result; } // Function to sort a matrix of vectors lexicographically NumericMatrix lexicographicalSort(NumericMatrix m) { int numCols = m.ncol(); int numRows = m.nrow(); if (numRows > 0 && numCols > 0) { vector> x(numRows, vector(numCols)); for (int i = 0; i < numRows; ++i) for (int j = 0; j < numCols; ++j) x[i][j] = m(i,j); sort(x.begin(), x.end()); NumericMatrix result(numRows, numCols); for (int i = 0; i < numRows; ++i) for (int j = 0; j < numCols; ++j) result(i, j) = x[i][j]; colnames(result) = colnames(m); return result; } else { return m; } } // This method computes the *unique* steady state that exists for an // matrix has to be schocastic by rows // ergodic (= irreducible) matrix vec steadyStateErgodicMatrix(const mat& submatrix) { int nRows = submatrix.n_rows; int nCols = submatrix.n_cols; vec rightPart(nRows + 1, fill::zeros); vec result; mat coeffs(nRows + 1, nCols); // If P is Ergodic, the system (I - P)*w = 0 plus the equation // w_1 + ... + w_m = 1 must have a soultion for (int i = 0; i < nRows; ++i) { for (int j = 0; j < nCols; ++j) { // transpose matrix in-place coeffs(i, j) = submatrix(j, i); if (i == j) coeffs(i, i) -= 1; } } for (int j = 0; j < nCols; ++j) coeffs(nRows, j) = 1; rightPart(nRows) = 1; if (!solve(result, coeffs, rightPart)) stop("Failure computing eigen values / vectors for submatrix in steadyStateErgodicMatrix"); return result; } // Precondition: the matrix should be stochastic by rows NumericMatrix steadyStatesByRecurrentClasses(S4 object) { List recClasses = recurrentClasses(object); int numRecClasses = recClasses.size(); NumericMatrix transitionMatrix = object.slot("transitionMatrix"); CharacterVector states = object.slot("states"); int numCols = transitionMatrix.ncol(); NumericMatrix steady(numRecClasses, numCols); unordered_map stateToIndex; int steadyStateIndex = 0; // Map each state to the index it has for (int i = 0; i < states.size(); ++i) { string state = (string) states[i]; stateToIndex[state] = i; } // For each recurrent class, there must be an steady state for (CharacterVector recurrentClass : recClasses) { int recClassSize = recurrentClass.size(); mat subMatrix(recClassSize, recClassSize); // Fill the submatrix corresponding to the current steady class // Note that for that we have to subset the matrix with the indices // the states in the recurrent class ocuppied in the transition matrix for (int i = 0; i < recClassSize; ++i) { int r = stateToIndex[(string) recurrentClass[i]]; for (int j = 0; j < recClassSize; ++j) { int c = stateToIndex[(string) recurrentClass[j]]; subMatrix(i, j) = transitionMatrix(r, c); } } // Compute the steady states for the given submatrix vec steadyState = steadyStateErgodicMatrix(subMatrix); for (int i = 0; i < recClassSize; ++i) { int c = stateToIndex[(string) recurrentClass[i]]; steady(steadyStateIndex, c) = steadyState(i);; } ++steadyStateIndex; } colnames(steady) = states; return steady; } // [[Rcpp::export(.steadyStatesRcpp)]] NumericMatrix steadyStates(S4 obj) { NumericMatrix transitions = obj.slot("transitionMatrix"); CharacterVector states = obj.slot("states"); bool byrow = obj.slot("byrow"); S4 object("markovchain"); if (!byrow) { object.slot("transitionMatrix") = transpose(transitions); object.slot("states") = states; object.slot("byrow") = true; } else { object = obj; } // Compute steady states using recurrent classes (there is // exactly one steady state associated with each recurrent class) NumericMatrix result = lexicographicalSort(steadyStatesByRecurrentClasses(object)); if (!byrow) result = transpose(result); return result; } // This method is agnostic on whether the matrix is stochastic // by rows or by columns, we just need the diagonal // [[Rcpp::export(.absorbingStatesRcpp)]] CharacterVector absorbingStates(S4 obj) { NumericMatrix transitionMatrix = obj.slot("transitionMatrix"); CharacterVector states = obj.slot("states"); CharacterVector absorbing; int numStates = states.size(); for (int i = 0; i < numStates; ++i) if (approxEqual(transitionMatrix(i, i), 1)) absorbing.push_back(states(i)); return absorbing; } // [[Rcpp::export(.isIrreducibleRcpp)]] bool isIrreducible(S4 obj) { List commClasses = communicatingClasses(obj); // The markov chain is irreducible iff has only a single communicating class return commClasses.size() == 1; } // [[Rcpp::export(.isRegularRcpp)]] bool isRegular(S4 obj) { NumericMatrix transitions = obj.slot("transitionMatrix"); int m = transitions.ncol(); mat probs(transitions.begin(), m, m, true); mat reachable; // Let alias this as d int positiveDiagonal = 0; auto arePositive = [](const double& x){ return x > 0; }; // Count positive elements in the diagonal for (int i = 0; i < m; ++i) if (probs(i, i) > 0) ++positiveDiagonal; // Taken from the book: // Matrix Analysis. Roger A.Horn, Charles R.Johnson. 2nd edition. // Corollary 8.5.8 and Theorem 8.5.9 // // If A is irreducible and has 0 < d positive diagonal elements // A is regular and $A^{2m - d - 1} > 0 // // A is regular iff A^{m²- 2m + 2} > 0 if (positiveDiagonal > 0) reachable = matrixPow(probs, 2*m - positiveDiagonal - 1); else reachable = matrixPow(probs, m*m - 2*m + 2); return allElements(reachable, arePositive); } NumericMatrix computeMeanAbsorptionTimes(mat& probs, CharacterVector& absorbing, CharacterVector& states) { unordered_set toErase; vector indicesToKeep; CharacterVector newNames; string current; for (auto state : absorbing) toErase.insert((string) state); // Compute the states which are not in absorbing // and subset the sub-probability matrix of those // states which are not considered absorbing, Q for (uint i = 0; i < states.size(); ++i) { current = (string) states(i); if (toErase.count(current) == 0) { indicesToKeep.push_back(i); newNames.push_back(current); } } int n = indicesToKeep.size(); uvec indices(indicesToKeep); // Comppute N = 1 - Q mat coeffs = eye(n, n) - probs(indices, indices); vec rightPart = vec(n, fill::ones); mat meanTimes; // Mean absorbing times A are computed as N * A = 1, // where 1 is a column vector of 1s if (!solve(meanTimes, coeffs, rightPart)) stop("Error solving system in meanAbsorptionTime"); NumericMatrix result = wrap(meanTimes); rownames(result) = newNames; return result; } // [[Rcpp::export(.meanAbsorptionTimeRcpp)]] NumericVector meanAbsorptionTime(S4 obj) { NumericMatrix transitions = obj.slot("transitionMatrix"); CharacterVector states = obj.slot("states"); bool byrow = obj.slot("byrow"); unordered_set allStates; if (!byrow) transitions = transpose(transitions); // Compute recurrent and transient states List commKernel = commClassesKernel(transitions); LogicalVector closed = commKernel["closed"]; CharacterVector transient = computeTransientStates(states, closed); CharacterVector recurrent = computeRecurrentStates(states, closed); // Compute the mean absorption time for the transient states mat probs(transitions.begin(), transitions.nrow(), transitions.ncol(), true); NumericMatrix meanTimes = computeMeanAbsorptionTimes(probs, recurrent, states); NumericVector result; if (meanTimes.ncol() > 0) { result = meanTimes(_, 0); result.attr("names") = transient; } return result; } // [[Rcpp::export(.absorptionProbabilitiesRcpp)]] NumericMatrix absorptionProbabilities(S4 obj) { NumericMatrix transitions = obj.slot("transitionMatrix"); CharacterVector states = obj.slot("states"); string current; bool byrow = obj.slot("byrow"); if (!byrow) transitions = transpose(transitions); unordered_map stateToIndex; // Map each state to the index it has for (int i = 0; i < states.size(); ++i) { current = (string) states[i]; stateToIndex[current] = i; } List commKernel = commClassesKernel(transitions); LogicalVector closed = commKernel["closed"]; CharacterVector transient = computeTransientStates(states, closed); CharacterVector recurrent = computeRecurrentStates(states, closed); vector transientIndxs, recurrentIndxs; // Compute the indexes of the matrix which correspond to transient and recurrent states for (auto state : transient) { current = (string) state; transientIndxs.push_back(stateToIndex[current]); } for (auto state : recurrent) { current = (string) state; recurrentIndxs.push_back(stateToIndex[current]); } int m = transitions.ncol(); int n = transientIndxs.size(); if (n == 0) stop("Markov chain does not have transient states, method not applicable"); // Get the indices in arma::uvec s uvec transientIndices(transientIndxs); uvec recurrentIndices(recurrentIndxs); // Compute N = (1 - Q)^{-1} mat probs(transitions.begin(), m, m, true); mat toInvert = eye(n, n) - probs(transientIndices, transientIndices); mat fundamentalMatrix; if (!inv(fundamentalMatrix, toInvert)) stop("Could not compute fundamental matrix"); // Compute the mean absorption probabilities as F* = N*P[transient, recurrent] mat meanProbs = fundamentalMatrix * probs(transientIndices, recurrentIndices); NumericMatrix result = wrap(meanProbs); rownames(result) = transient; colnames(result) = recurrent; if (!byrow) result = transpose(result); return result; } // [[Rcpp::export(.meanFirstPassageTimeRcpp)]] NumericMatrix meanFirstPassageTime(S4 obj, CharacterVector destination) { bool isErgodic = isIrreducible(obj); if (!isErgodic) stop("Markov chain needs to be ergodic (= irreducile) for this method to work"); else { NumericMatrix transitions = obj.slot("transitionMatrix"); mat probs(transitions.begin(), transitions.nrow(), transitions.ncol(), true); CharacterVector states = obj.slot("states"); bool byrow = obj.slot("byrow"); int numStates = states.size(); NumericMatrix result; if (!byrow) probs = probs.t(); if (destination.size() > 0) { result = computeMeanAbsorptionTimes(probs, destination, states); // This transpose is intentional to return a row always instead of a column result = transpose(result); return result; } else { result = NumericMatrix(numStates, numStates); vec steadyState = steadyStateErgodicMatrix(probs); mat toInvert(numStates, numStates); mat Z; // Compute inverse for (I - P + W), where P = probs, // and W = steadyState pasted row-wise for (int i = 0; i < numStates; ++i) { for (int j = 0; j < numStates; ++j) { toInvert(i, j) = -probs(i, j) + steadyState(j); if (i == j) toInvert(i, i) += 1; } } if (!inv(Z, toInvert)) stop("Problem computing inverse of matrix inside meanFirstPassageTime"); // Set the result matrix for (int j = 0; j < numStates; ++j) { double r_j = 1.0 / steadyState(j); for (int i = 0; i < numStates; ++i) { result(i, j) = (Z(j,j) - Z(i,j)) * r_j; } } colnames(result) = states; rownames(result) = states; if (!byrow) result = transpose(result); return result; } } } // [[Rcpp::export(.meanRecurrenceTimeRcpp)]] NumericVector meanRecurrenceTime(S4 obj) { NumericMatrix steady = steadyStates(obj); bool byrow = obj.slot("byrow"); if (!byrow) steady = transpose(steady); CharacterVector states = obj.slot("states"); NumericVector result; CharacterVector recurrentStates; for (int i = 0; i < steady.nrow(); ++i) { for (int j = 0; j < steady.ncol(); ++j) { // This depends on our imlementation of the steady // states, but we have the guarantee that the entry // corresponding to a state in a recurrent class is // only going to be positive in one vector and the // entries corresponding to transient states are // going to be zero if (!approxEqual(steady(i, j), 0)) { result.push_back(1.0 / steady(i, j)); recurrentStates.push_back(states(j)); } } } result.attr("names") = recurrentStates; return result; } // [[Rcpp::export(.minNumVisitsRcpp)]] NumericMatrix meanNumVisits(S4 obj) { NumericMatrix hitting = hittingProbabilities(obj); CharacterVector states = obj.slot("states"); bool byrow = obj.slot("byrow"); if (!byrow) hitting = transpose(hitting); int n = hitting.ncol(); bool closeToOne; double inverse; NumericMatrix result(n, n); rownames(result) = states; colnames(result) = states; // Lets call the matrix of hitting probabilities as f // Then mean number of visits from i to j are given by // f_{ij} / (1 - f_{jj}) // having care when f_{ij} -> mean num of visits is zero // and when f_{ij} > 0 and f_{jj} = 1 -> infinity mean // num of visits for (int j = 0; j < n; ++j) { closeToOne = approxEqual(hitting(j, j), 1); if (!closeToOne) inverse = 1 / (1 - hitting(j, j)); for (int i = 0; i < n; ++i) { if (hitting(i, j) == 0) result(i, j) = 0; else { if (closeToOne) result(i, j) = R_PosInf; else result(i, j) = hitting(i, j) * inverse; } } } if (!byrow) result = transpose(result); return result; } markovchain/src/Makevars0000644000176200001440000000010013762012754015035 0ustar liggesusersPKG_LIBS = $(LAPACK_LIBS) $(BLAS_LIBS) $(FLIBS) CXX_STD = CXX11 markovchain/src/fittingFunctions.cpp0000644000176200001440000015216513762012754017425 0ustar liggesusers#ifndef STRICT_R_HEADERS #define STRICT_R_HEADERS // [[Rcpp::depends(RcppParallel)]] // [[Rcpp::depends(RcppArmadillo)]] #include #include #include using namespace Rcpp; using namespace RcppArmadillo; using namespace RcppParallel; using namespace std; #include "helpers.h" #include "mapFitFunctions.h" #include #include // [[Rcpp::export(.markovchainSequenceRcpp)]] CharacterVector markovchainSequenceRcpp(int n, S4 markovchain, CharacterVector t0, bool include_t0 = false) { // character vector to store the result CharacterVector chain(n); // transition mastrix NumericMatrix transitionMatrix = markovchain.slot("transitionMatrix"); // possible states CharacterVector states = markovchain.slot("states"); // current state CharacterVector state = t0; NumericVector rowProbs(states.size()); CharacterVector outstate; for (int i = 0;i < n;i++) { // extracting row probabilties for the given state from transition matrix int row_no = 0; for (int j = 0;j < states.size();j++) { /* last element of state character vector because of markovchainListRcpp, a seq of length greater than 1 is also passed whose end state is the beginning state here */ if (states[j] == state[state.size()-1]) { row_no = j; break; } } for (int j = 0; j < states.size(); j++) { rowProbs[j] = transitionMatrix(row_no, j); } // calculate next state outstate = sample(states, 1, false, rowProbs); chain[i] = outstate[0]; state = outstate; } if (include_t0) chain.push_front(t0[0]); return chain; } bool checkSequenceRcpp(List object) { bool out = true; int nob = object.size(); // if there is only one markovchain object return true if (nob == 1) return(true); S4 ob0, ob1; CharacterVector statesN1, statesN, intersection; for (int i = 1; i < nob;i++) { CharacterVector statesNm1; ob0 = S4(object[i-1]); ob1 = S4(object[i]); statesN1 = ob0.slot("states"); statesN = ob1.slot("states"); NumericMatrix matr = ob0.slot("transitionMatrix"); double csum = 0; for (int j = 0;j < matr.ncol();j++) { csum = 0; for (int k = 0;k < matr.nrow();k++) { csum += matr(k, j); } if (csum != 0) statesNm1.push_back(statesN1[j]); } intersection = intersect(statesNm1, statesN); if (not setequal(intersection, statesNm1)) { out = false; break; } } return(out); } // [[Rcpp::export(.markovchainListRcpp)]] List markovchainListRcpp(int n, List object, bool include_t0 = false, CharacterVector t0 = CharacterVector()) { bool verify = checkSequenceRcpp(object); if (not verify) { warning("Warning: some states in the markovchain sequences are not contained in the following states!"); } // size of result vector int sz = n*object.size(); if (include_t0) sz += n; int vin = 0; // useful in filling below vectors NumericVector iteration(sz); CharacterVector values(sz); S4 ob(object[0]); CharacterVector sampledValues, newVals; // Initial State selection if not passed //----------------------------------------------------------------------------- CharacterVector ustates = ob.slot("states"); NumericVector rowProbs; for (int i = 0;i < ustates.size();i++) { rowProbs.push_back(1.0 / ustates.size()); } bool rselect = (t0.size() == 0); if (rselect) { t0 = sample(ustates, 1, false, rowProbs); } //------------------------------------------------------------------------------ // check whether t0 is in unique states or not for (int i = 0;i < ustates.size();i++) { if (ustates[i] == t0[0]) break; else if (i == ustates.size()-1) stop("Error! Initial state not defined"); } // generate n sequence for (int i = 0;i < n;i++) { // random selection of initial state if not passed to the function if (rselect) { t0 = sample(ustates, 1, false, rowProbs); } sampledValues = markovchainSequenceRcpp(1, object[0], t0, include_t0); if (object.size() > 1) { for (int j = 1;j < object.size();j++) { newVals = markovchainSequenceRcpp(1, object[j], sampledValues); sampledValues.push_back(newVals[0]); } } for (int k = 0;k < sampledValues.size();k++) { iteration[vin] = i + 1; values[vin++] = sampledValues[k]; } } return(List::create(iteration, values)); } struct MCList : public Worker { // 3-D matrix where each slice is a transition matrix const arma::cube mat; // number of transition matrices const int num_mat; // matrix where ith row vector store the list of states // names present in ith transition matrix const vector > names; // vector whose ith element store the dimension of ith // transition matrix const vector size_emat; // whether to include first state const bool include_t0; // info about initial state const bool init; // whether initial state is passed to the method const string init_state; // if yes what's the name // each element of list is a sequence list > output; // constructor for initialization MCList(const arma::cube &pmat, const int &pnum_mat, const vector > &pnames, const vector psize_emat, const bool &pinclude_t0, const bool &pinit, const string &pinit_state) : mat(pmat), num_mat(pnum_mat), names(pnames), size_emat(psize_emat), include_t0(pinclude_t0), init(pinit), init_state(pinit_state) {} MCList(const MCList& mclist, Split) : mat(mclist.mat), num_mat(mclist.num_mat), names(mclist.names), size_emat(mclist.size_emat), include_t0(mclist.include_t0), init(mclist.init), init_state(mclist.init_state) {} void operator()(std::size_t begin, std::size_t end) { // to take care of include_t0 unsigned int ci = 0; if (include_t0) ci = 1; // to store single sequence generated each time vector temp(num_mat+ci); // initial probability and states indices arma::vec in_probs(size_emat[0]); arma::vec in_states(size_emat[0]); // assume equal chances of selection of states for the first time for (unsigned int i = 0; i < in_probs.size(); i++) { in_probs[i] = 1.0 / size_emat[0]; in_states[i] = i; } // to store the index of the state selected arma::vec istate; string t0; // every time generate one sequence for (unsigned int p = begin; p < end; p++) { if (not init) { // randomly selected state istate = sample(in_states, 1, false, in_probs); t0 = names[0][istate[0]]; } else { t0 = init_state; } // include the state in the sequence if (include_t0) temp[0] = t0; // to generate one sequence for (unsigned int i = 0; i < (unsigned int)num_mat; i++) { // useful for generating rows probabilty vector unsigned int j = 0; for (j = 0; j < (unsigned int)size_emat[i]; j++) { if (names[i][j] == t0) break; } // vector to be passed to sample method arma::vec probs(size_emat[i]); arma::vec states(size_emat[i]); for (unsigned int k=0; k < probs.size(); k++) { probs[k] = mat(j, k, i); states[k] = k; } // new state selected arma::vec elmt = sample(states, 1, false, probs); t0 = names[i][elmt[0]]; // populate sequence temp[i+ci] = t0; } // insert one sequence to the output output.push_back(temp); } } void join(const MCList& rhs) { // constant iterator to the first element of rhs.output list >::const_iterator it = rhs.output.begin(); // merge the result of two parallel computation for (;it != rhs.output.end();it++) { output.push_back(*it); } } }; // Function to generate a list of sequence of states in parallel from non-homogeneous Markov chains. // // Provided any markovchainList object, it returns a list of sequence of states coming // from the underlying stationary distribution. // // @param listObject markovchainList object // @param n Sample size // @param include_t0 Specify if the initial state shall be used // // @return A List // @author Giorgio Spedicato, Deepak Yadav // // @examples // statesNames <- c("a") // mcA <- new("markovchain", states = statesNames, transitionMatrix = // matrix(c(1), nrow = 1, byrow = TRUE, // dimnames = list(statesNames, statesNames))) // // statesNames <- c("a","b") // mcB <- new("markovchain", states = statesNames, transitionMatrix = // matrix(c(0.5, 0.5, 0.3, 0.7), nrow = 2, byrow = TRUE, // dimnames = list(statesNames, statesNames))) // // statesNames <- c("a","b","c") // mcC <- new("markovchain", states = statesNames, transitionMatrix = // matrix(c(0.2, 0.5, 0.3, 0, 0.2, 0.8, 0.1, 0.8, 0.1), nrow = 3, // byrow = TRUE, dimnames = list(statesNames, statesNames))) // // mclist <- new("markovchainList", markovchains = list(mcA, mcB, mcC)) // // markovchainSequenceParallelRcpp(mclist, 99999, TRUE) // // // [[Rcpp::export(.markovchainSequenceParallelRcpp)]] List markovchainSequenceParallelRcpp(S4 listObject, int n, bool include_t0 = false, CharacterVector init_state = CharacterVector()) { // list of markovchain object List object = listObject.slot("markovchains"); bool verify = checkSequenceRcpp(object); if (not verify) { warning("Warning: some states in the markovchain sequences are not contained in the following states!"); } // store number of transition matrices int num_matrix = object.size(); // maximum of dimension of all transition matrices int max_dim_mat = 0; // to store the dimension of each transition matrices vector size_emat(num_matrix); // to store list of states in a transition matrix CharacterVector states; // calculate max_dim_mat and populate size_emat for (int i = 0; i < num_matrix; i++) { // extract ith markovchain object S4 ob = object[i]; // list of states in ith markovchain object states = ob.slot("states"); // keep track of maximun dimension if (states.size() > max_dim_mat) max_dim_mat = states.size(); // size of ith transition matrix size_emat[i] = states.size(); } // Matrix with ith row store the states in ith t-matrix vector > names(num_matrix, vector(max_dim_mat)); // to store all t-matrix arma::cube mat(max_dim_mat, max_dim_mat, num_matrix); mat.fill(0); for (int i = 0; i < num_matrix;i++) { // ith markovchain object S4 ob = object[i]; // t-matrix and states names NumericMatrix tmat = ob.slot("transitionMatrix"); CharacterVector stat_names = ob.slot("states"); // populate 3-D matrix for (int j = 0;j < tmat.nrow();j++) { for (int k = 0; k < tmat.ncol();k++) { mat(j, k, i) = tmat(j, k); } // populate names of states names[i][j] = stat_names[j]; } } // initial state is passed or not bool init = false; string ini_state; if (init_state.size() != 0) { init = true; ini_state = as(init_state[0]); } // create an object of MCList class MCList mcList(mat, num_matrix, names, size_emat, include_t0, init, ini_state); // start parallel computation parallelReduce(0, n, mcList); // list of sequences return wrap(mcList.output); } // convert a frequency matrix to a transition probability matrix NumericMatrix _toRowProbs(NumericMatrix x, bool sanitize = false) { int nrow = x.nrow(), ncol = x.ncol(); NumericMatrix out(nrow); for (int i = 0; i < nrow; i++) { double rowSum = 0; for (int j = 0; j < ncol; j++) rowSum += x(i, j); for (int j = 0; j < ncol; j++) { // not updating out(i,j) outside sanitize condition as it may lead to runtime error(rowSum=0) if (sanitize == true) { if (rowSum == 0) { out(i, j) = 1.0/ncol; } else { out(i, j) = x(i, j) / rowSum; } } else { if (rowSum == 0) { out(i, j) = 0; } else { out(i, j) = x(i, j) / rowSum; } } } } out.attr("dimnames") = List::create(rownames(x), colnames(x)); return out; } // Create a frequency matrix //' @rdname markovchainFit //' //' @export // [[Rcpp::export]] NumericMatrix createSequenceMatrix(SEXP stringchar, bool toRowProbs = false, bool sanitize = false, CharacterVector possibleStates = CharacterVector()) { //--------------------------------------------------------------------- // check whether stringchar is a list or not if (TYPEOF(stringchar) == VECSXP) { List seqs = as(stringchar); CharacterVector pstates; // possiblestates for (int i = 0;i < seqs.size();i++) { CharacterVector tseq = unique(as(seqs[i])); for (int j = 0;j < tseq.size();j++) { if (tseq[j] != "NA") { pstates.push_back(tseq[j]); } } } for (int i = 0;i < possibleStates.size();i++) { pstates.push_back(possibleStates[i]); } pstates = unique(pstates); pstates = pstates.sort(); int sizeMatr = pstates.size(); NumericMatrix freqMatrix(sizeMatr); freqMatrix.attr("dimnames") = List::create(pstates, pstates); for (int i = 0;i < seqs.size();i++) { NumericMatrix temp = createSequenceMatrix(seqs[i], false, false, pstates); freqMatrix += temp; } if (sanitize == true) { for (int i = 0; i < sizeMatr; i++) { double rowSum = 0; for (int j = 0; j < sizeMatr; j++) rowSum += freqMatrix(i, j); if (rowSum == 0) for (int j = 0; j < sizeMatr; j++) freqMatrix(i, j) = 1; } } if (toRowProbs == true) return _toRowProbs(freqMatrix, sanitize); return freqMatrix; } //--------------------------------------------------------------------- CharacterVector stringChar = as(stringchar); // may include missing values CharacterVector elements_na = unique(union_(stringChar, possibleStates)); // free from missing values CharacterVector elements = clean_nas(elements_na); elements = elements.sort(); int sizeMatr = elements.size(); // output matrix of dimensions equal to total possible states NumericMatrix freqMatrix(sizeMatr); freqMatrix.attr("dimnames") = List::create(elements, elements); CharacterVector rnames = rownames(freqMatrix); if (Rf_isMatrix(stringchar)) { // coerce to CharacterMatrix CharacterMatrix seqMat = as(stringchar); // number of columns must be 2 if (seqMat.ncol() != 2) { stop("Number of columns in the matrix must be 2"); } // populate frequency matrix int posFrom = 0, posTo = 0; for (long int i = 0; i < seqMat.nrow(); i ++) { if (seqMat(i, 0) != "NA" && seqMat(i, 1) != "NA") { for (int j = 0; j < rnames.size(); j ++) { if (seqMat(i, 0) == rnames[j]) posFrom = j; if (seqMat(i, 1) == rnames[j]) posTo = j; } freqMatrix(posFrom, posTo)++; } } } else { int posFrom = 0, posTo = 0; for (long int i = 0; i < stringChar.size() - 1; i ++) { if (stringChar[i] != "NA" && stringChar[i+1] != "NA") { for (int j = 0; j < rnames.size(); j ++) { if (stringChar[i] == rnames[j]) posFrom = j; if (stringChar[i + 1] == rnames[j]) posTo = j; } freqMatrix(posFrom, posTo)++; } } } // sanitizing if any row in the matrix sums to zero by posing the corresponding diagonal equal to 1/dim if (sanitize == true) { for (int i = 0; i < sizeMatr; i++) { double rowSum = 0; for (int j = 0; j < sizeMatr; j++) rowSum += freqMatrix(i, j); if (rowSum == 0) for (int j = 0; j < sizeMatr; j++) freqMatrix(i, j) = 1; } } if (toRowProbs == true) return _toRowProbs(freqMatrix, sanitize); return (freqMatrix); } // log-likelihood double _loglikelihood(CharacterVector seq, NumericMatrix transMatr) { // to store the result double out = 0; // states names CharacterVector rnames = rownames(transMatr); // caculate out int from = 0, to = 0; for (long int i = 0; i < seq.size() - 1; i ++) { if (seq[i] != "NA" && seq[i+1] != "NA") { for (int r = 0; r < rnames.size(); r ++) { if (rnames[r] == seq[i]) from = r; if (rnames[r] == seq[i + 1]) to = r; } out += log(transMatr(from, to)); } } return out; } // [[Rcpp::export(.mcListFitForList)]] List mcListFitForList(List data) { int l = data.size(); // length of list // pair of length and index // length of sequence data[index] vector > length_seq(l); for (int i = 0;i < l;i++) { CharacterVector temp = as(data[i]); length_seq[i] = make_pair(temp.size(), i); } // increasing order of the length of sequence in the list sort(length_seq.begin(), length_seq.end()); int i = 1; // ith transition int j = 0; // start from least length sequence List out; // to store result while(j < l) { int len = length_seq[j].first; if (i < len) { // transition from (i-1)th to ith CharacterMatrix temp(l-j, 2); // indicates wheter there is a valid transition for the current time of the // markov chain bool validTransition = false; for (int k = j;k < l;k++) { temp(k-j, 0) = (as(data[length_seq[k].second]))[i-1]; temp(k-j, 1) = (as(data[length_seq[k].second]))[i]; if (temp(k-j,0) != "NA" && temp(k-j, 1) != "NA") validTransition = true; } // frequency matrix if (validTransition) out.push_back(createSequenceMatrix(temp, false, true)); i++; } else { j++; } } return out; } List generateCI(double confidencelevel, NumericMatrix freqMatr) { int sizeMatr = freqMatr.nrow(); // the true confidence level is 1-(1-alpha)/2 float true_confidence_level = 1-(1-confidencelevel)/2.0; // transition matrix NumericMatrix initialMatr(sizeMatr, sizeMatr); // calculation of transition matrix // take care of rows with all entries 0 for (int i = 0; i < sizeMatr; i++) { double rowSum = 0; for (int j = 0; j < sizeMatr; j++) { rowSum += freqMatr(i, j); } // calculate rows probability for (int j = 0; j < sizeMatr; j++) { if (rowSum == 0) { initialMatr(i, j) = 1.0/sizeMatr; } else { initialMatr(i, j) = freqMatr(i, j)/rowSum; } } } // matrices to store end results NumericMatrix lowerEndpointMatr(sizeMatr, sizeMatr); NumericMatrix upperEndpointMatr(sizeMatr, sizeMatr); NumericMatrix standardError(sizeMatr, sizeMatr); // z score for given confidence interval // double zscore = stats::qnorm_0(confidencelevel, 1.0, 0.0); double zscore = stats::qnorm_0(true_confidence_level, 1.0, 0.0); // populate above defined matrix double marginOfError, lowerEndpoint, upperEndpoint; for (int i = 0; i < sizeMatr; i++) { for (int j = 0; j < sizeMatr; j++) { if (freqMatr(i, j) == 0) { // whether entire ith row is zero or not bool notrans = true; for (int k = 0; k < sizeMatr; k++) { // if the entire ith row is not zero then set notrans to false if (freqMatr(i, k) != 0) { standardError(i, j) = lowerEndpointMatr(i, j) = upperEndpointMatr(i, j) = 0; notrans = false; break; } } // if entire ith row is zero if (notrans) standardError(i, j) = lowerEndpointMatr(i, j) = upperEndpointMatr(i, j) = 1; } else { // standard error calculation standardError(i, j) = initialMatr(i, j) / sqrt(freqMatr(i, j)); // marginal error calculation marginOfError = zscore * standardError(i, j); // lower and upper end point calculation lowerEndpoint = initialMatr(i, j) - marginOfError; upperEndpoint = initialMatr(i, j) + marginOfError; // taking care that upper and lower end point should be between 0(included) and 1(included) lowerEndpointMatr(i, j) = (lowerEndpoint > 1.0) ? 1.0 : ((0.0 > lowerEndpoint) ? 0.0 : lowerEndpoint); upperEndpointMatr(i, j) = (upperEndpoint > 1.0) ? 1.0 : ((0.0 > upperEndpoint) ? 0.0 : upperEndpoint); } } } // set the rows and columns name as states names standardError.attr("dimnames") = upperEndpointMatr.attr("dimnames") = lowerEndpointMatr.attr("dimnames") = freqMatr.attr("dimnames"); return List::create(_["standardError"] = standardError, _["confidenceLevel"] = confidencelevel, _["lowerEndpointMatrix"] = lowerEndpointMatr, _["upperEndpointMatrix"] = upperEndpointMatr); } // Fit DTMC using MLE List _mcFitMle(SEXP data, bool byrow, double confidencelevel, bool sanitize = false, CharacterVector possibleStates = CharacterVector()) { NumericMatrix freqMatr = createSequenceMatrix(data, false, false, possibleStates); // matrix size = nrows = ncols int sizeMatr = freqMatr.nrow(); // initial matrix = transition matrix NumericMatrix initialMatr(sizeMatr); // set names of states as rows name and columns name initialMatr.attr("dimnames") = freqMatr.attr("dimnames"); // take care of rows with all entries 0 for (int i = 0; i < sizeMatr; i++) { double rowSum = 0; for (int j = 0; j < sizeMatr; j++) { rowSum += freqMatr(i, j); } // calculate rows probability for (int j = 0; j < sizeMatr; j++) { if (rowSum == 0) { initialMatr(i, j) = (sanitize ? 1.0/sizeMatr : 0); } else { initialMatr(i, j) = freqMatr(i, j)/rowSum; } } } // transpose the matrix if byrow is false if (byrow == false) { initialMatr = transposeMatrix(initialMatr); } // create markov chain object S4 outMc("markovchain"); outMc.slot("transitionMatrix") = initialMatr; outMc.slot("name") = "MLE Fit"; List CI = generateCI(confidencelevel, freqMatr); // return a list of important results return List::create(_["estimate"] = outMc, _["standardError"] = CI[0], _["confidenceLevel"] = CI[1], _["lowerEndpointMatrix"] = CI[2], _["upperEndpointMatrix"] = CI[3] ); } // Fit DTMC using Laplacian smooth List _mcFitLaplacianSmooth(CharacterVector stringchar, bool byrow, double laplacian = 0.01, bool sanitize = false, CharacterVector possibleStates = CharacterVector()) { // create frequency matrix NumericMatrix origNum = createSequenceMatrix(stringchar, false, sanitize, possibleStates); // store dimension of frequency matrix int nRows = origNum.nrow(), nCols = origNum.ncol(); // convert frequency matrix to transition matrix for (int i = 0; i < nRows; i ++) { double rowSum = 0; // add laplacian correction to each entry // also calculate row's sum for (int j = 0; j < nCols; j ++) { origNum(i,j) += laplacian; rowSum += origNum(i,j); } // get a transition matrix and a DTMC for (int j = 0; j < nCols; j ++) { if (rowSum == 0) origNum(i,j) = sanitize ? origNum(i,j)/rowSum : 0; else origNum(i,j) = origNum(i,j)/rowSum; } } // transpose transition matrix = columnwise storage if (byrow == false) { origNum = transposeMatrix(origNum); } // create markovchain object S4 outMc("markovchain"); outMc.slot("transitionMatrix") = origNum; outMc.slot("name") = "Laplacian Smooth Fit"; return List::create(_["estimate"] = outMc); } // bootstrap a sequence to produce a list of sample sequences List _bootstrapCharacterSequences(CharacterVector stringchar, int n, long int size = -1, CharacterVector possibleStates = CharacterVector()) { // store length of sequence if (size == -1) { size = stringchar.size(); } // frequency matrix NumericMatrix contingencyMatrix = createSequenceMatrix(stringchar, true, true, possibleStates); // many samples from a given a sequence :: bootstrap // res list is helper list List samples, res; // state names CharacterVector itemset = rownames(contingencyMatrix); // number of distinct states int itemsetsize = itemset.size(); // access R sample function Function sample("sample"); for (int i = 0; i < n; i ++) { // charseq to store a fresh sequence CharacterVector charseq, resvec; // select a random number between 0 ans itemsize int rnd = (int)(runif (1)(0) * itemsetsize); // random state String ch = itemset[rnd]; // push start state to charseq charseq.push_back(ch); for (long int j = 1; j < size; j ++) { // store row probability NumericVector probsVector; // populate probsVector for (int k = 0; k < itemsetsize; k ++) { if ((std::string)itemset[k] == (std::string) ch) { probsVector = contingencyMatrix(k, _); break; } } // select next state of sequence res = sample(itemset, 1, true, probsVector); resvec = res[0]; // current state ch = resvec[0]; charseq.push_back(ch); } // every add one sequence samples.push_back(charseq); } // return a list of n sequence of same length as of given sequence return samples; } // estimate from the list of bootstrapped matrices List _fromBoot2Estimate(List listMatr) { // number of transition matrices int sampleSize = listMatr.size(); // first transition matrix NumericMatrix firstMat = listMatr[0]; // dimension of matrix int matrDim = firstMat.nrow(); // matrix to store mean and standard deviation NumericMatrix matrMean(matrDim), matrSd(matrDim); // populate mean and sd matrix for (int i = 0; i < matrDim; i ++) { for (int j = 0; j < matrDim; j ++) { NumericVector probsEstimated; for (int k = 0; k < sampleSize; k ++) { NumericMatrix mat = listMatr[k]; probsEstimated.push_back(mat(i,j)); } matrMean(i, j) = mean(probsEstimated); matrSd(i, j) = Rcpp::sd(probsEstimated); } } // set rows and columns names = states names matrMean.attr("dimnames") = List::create(rownames(firstMat), colnames(firstMat)); matrSd.attr("dimnames") = matrMean.attr("dimnames"); // return list of estimated mean transition matrix and estimated sd matrix return List::create(_["estMu"] = matrMean, _["estSigma"] = matrSd); } struct BootstrapList : public Worker { // transition matrix const RMatrix input; // unique states const vector states; // length of sequence const int len; // list of new sequences list > output; // constructor BootstrapList(const NumericMatrix input, const vector states, const int len) : input(input), states(states), len(len) {} BootstrapList(const BootstrapList& bsList, Split) : input(bsList.input), states(bsList.states), len(bsList.len) {} // generate (end-begin) sequences void operator()(std::size_t begin, std::size_t end) { // number of unique states unsigned int n = states.size(); // initial probability vector arma::vec iprobs(n); // probability vector (can be any row of transition matrix) arma::vec probs(n); // unique states indices arma::vec ustates(n); // initialization for (unsigned int i = 0;i < n;i++) { iprobs[i] = 1.0/n; ustates[i] = i; } // to store new state generated arma::vec istate; // every time generate one sequence for (unsigned int p = begin; p < (unsigned int)end;p++) { // randomly select starting state vector result(len); istate = sample(ustates, 1, false, iprobs); result[0] = states[istate[0]]; // given a present state generate a future state for (unsigned int j = 1; j < (unsigned int)len;j++) { // row vector corresponding to state istate[0] for (unsigned int k = 0;k < (unsigned int)n;k++) { probs[k] = input(istate[0], k); } // select future state istate = sample(ustates, 1, false, probs); result[j] = states[istate[0]]; } // populate a sequence output.push_back(result); } } void join(const BootstrapList& rhs) { // constant iterator to the first element of rhs.output list >::const_iterator it = rhs.output.begin(); // merge the result of two parallel computation for (;it != rhs.output.end();it++) { output.push_back(*it); } } }; List _bootstrapCharacterSequencesParallel(CharacterVector stringchar, int n, long int size = -1, CharacterVector possibleStates = CharacterVector()) { // store length of sequence if (size == -1) { size = stringchar.size(); } // frequency matrix NumericMatrix contingencyMatrix = createSequenceMatrix(stringchar, true, true, possibleStates); // state names vector itemset = as >(rownames(contingencyMatrix)); // number of distinct states // int itemsetsize = itemset.size(); BootstrapList bsList(contingencyMatrix, itemset, size); parallelReduce(0, n, bsList); return wrap(bsList.output); } // Fit DTMC using bootstrap method List _mcFitBootStrap(CharacterVector data, int nboot, bool byrow, bool parallel, double confidencelevel, bool sanitize = false, CharacterVector possibleStates = CharacterVector()) { // list of sequence generated using given sequence List theList = (parallel) ? _bootstrapCharacterSequencesParallel(data, nboot, data.size()) : _bootstrapCharacterSequences(data, nboot, data.size()); // number of new sequence int n = theList.size(); // to store frequency matrix for every sequence List pmsBootStrapped(n); // populate pmsBootStrapped if (parallel) for (int i = 0; i < n; i++) pmsBootStrapped[i] = createSequenceMatrix(theList[i], true, sanitize, possibleStates); else for (int i = 0; i < n; i++) pmsBootStrapped[i] = createSequenceMatrix(theList[i], true, sanitize, possibleStates); List estimateList = _fromBoot2Estimate(pmsBootStrapped); // transition matrix NumericMatrix transMatr = _toRowProbs(estimateList["estMu"], sanitize); // markovchain object S4 estimate("markovchain"); estimate.slot("transitionMatrix") = transMatr; estimate.slot("byrow") = byrow; estimate.slot("name") = "BootStrap Estimate"; // z score for given confidence interval double zscore = stats::qnorm_0(confidencelevel, 1.0, 0.0); // store dimension of matrix int nrows = transMatr.nrow(); int ncols = transMatr.ncol(); // matrix to store end results NumericMatrix lowerEndpointMatr(nrows, ncols), upperEndpointMatr(nrows, ncols); NumericMatrix sigma = estimateList["estSigma"], standardError(nrows, ncols); // populate above defined matrix double marginOfError, lowerEndpoint, upperEndpoint; for (int i = 0; i < nrows; i ++) { for (int j = 0; j < ncols; j ++) { // standard error calculation standardError(i, j) = sigma(i, j) / sqrt(double(n)); // marginal error calculation marginOfError = zscore * standardError(i, j); // lower and upper end point calculation lowerEndpoint = transMatr(i, j) - marginOfError; upperEndpoint = transMatr(i, j) + marginOfError; // taking care that upper and lower end point should be between 0(included) and 1(included) lowerEndpointMatr(i, j) = (lowerEndpoint > 1.0) ? 1.0 : ((0.0 > lowerEndpoint) ? 0.0 : lowerEndpoint); upperEndpointMatr(i, j) = (upperEndpoint > 1.0) ? 1.0 : ((0.0 > upperEndpoint) ? 0.0 : upperEndpoint); } } // set the rows and columns name as states names standardError.attr("dimnames") = upperEndpointMatr.attr("dimnames") = lowerEndpointMatr.attr("dimnames") = transMatr.attr("dimnames"); // return a list of important results List out = List::create(_["estimate"] = estimate, _["standardError"] = standardError, _["confidenceInterval"] = List::create(_["confidenceLevel"] = confidencelevel, _["lowerEndpointMatrix"] = lowerEndpointMatr, _["upperEndpointMatrix"] = upperEndpointMatr), _["bootStrapSamples"] = pmsBootStrapped ); return out; } // convert matrix data to transition probability matrix // [[Rcpp::export(.matr2Mc)]] S4 _matr2Mc(CharacterMatrix matrData, double laplacian = 0, bool sanitize = false, CharacterVector possibleStates = CharacterVector()) { // dimension of input matrix long int nRows = matrData.nrow(), nCols = matrData.ncol(); // set of states std::set uniqueVals; // populate uniqueVals set for (long int i = 0; i < nRows; i++) for (long int j = 0; j < nCols; j++) { if (matrData(i,j) != "NA") uniqueVals.insert((std::string)matrData(i, j)); } for (int i = 0;i < possibleStates.size();i++) { uniqueVals.insert((std::string)possibleStates[i]); } // unique states int usize = uniqueVals.size(); // matrix of dimension usize NumericMatrix contingencyMatrix (usize); // state names as rows name and columns name contingencyMatrix.attr("dimnames") = List::create(uniqueVals, uniqueVals); // iterator for set of states std::set::iterator it; // populate contingency matrix int stateBegin = 0, stateEnd = 0; for (long int i = 0; i < nRows; i ++) { for (long int j = 1; j < nCols; j ++) { if (matrData(i,j-1) != "NA" && matrData(i,j) != "NA") { // row and column number of begin state and end state int k = 0; for (it = uniqueVals.begin(); it != uniqueVals.end(); ++it, k++) { if (*it == (std::string)matrData(i, j-1)) { stateBegin = k; } if (*it == (std::string)matrData(i,j)) { stateEnd = k; } } contingencyMatrix(stateBegin,stateEnd)++; } } } // add laplacian correction if needed for (int i = 0; i < usize; i++) { double rowSum = 0; for (int j = 0; j < usize; j++) { contingencyMatrix(i,j) += laplacian; rowSum += contingencyMatrix(i, j); } // get the transition matrix and a DTMC for (int j = 0; j < usize; j ++) { if (sanitize == true) { if (rowSum == 0) { contingencyMatrix(i,j) = 1.0/usize; } else { contingencyMatrix(i,j) /= rowSum; } } else { if (rowSum == 0) { contingencyMatrix(i,j) = 0; } else { contingencyMatrix(i,j) /= rowSum; } } } } // markovchain object S4 outMc("markovchain"); outMc.slot("transitionMatrix") = contingencyMatrix; return(outMc); } // convert matrix data to transition probability matrix // [[Rcpp::export(.list2Mc)]] S4 _list2Mc(List data, double laplacian = 0, bool sanitize = false) { // set of states std::set uniqueVals; // populate uniqueVals set for (long int i = 0; i < data.size(); i++) { CharacterVector temp = as(data[i]); for (long int j = 0; j < temp.size(); j++) { uniqueVals.insert((std::string)temp[j]); } } // unique states int usize = uniqueVals.size(); // matrix of dimension usize NumericMatrix contingencyMatrix (usize); // state names as rows name and columns name contingencyMatrix.attr("dimnames") = List::create(uniqueVals, uniqueVals); // iterator for set of states std::set::iterator it; // populate contingency matrix int stateBegin = 0, stateEnd = 0; for (long int i = 0; i < data.size(); i ++) { CharacterVector temp = as(data[i]); for (long int j = 1; j < temp.size(); j ++) { // row and column number of begin state and end state int k = 0; for (it = uniqueVals.begin(); it != uniqueVals.end(); ++it, k++) { if (*it == (std::string)temp[j-1]) { stateBegin = k; } if (*it == (std::string)temp[j]) { stateEnd = k; } } contingencyMatrix(stateBegin,stateEnd)++; } } // add laplacian correction if needed for (int i = 0; i < usize; i++) { double rowSum = 0; for (int j = 0; j < usize; j++) { contingencyMatrix(i,j) += laplacian; rowSum += contingencyMatrix(i, j); } // get the transition matrix and a DTMC for (int j = 0; j < usize; j ++) { if (sanitize == true) { if (rowSum == 0) { contingencyMatrix(i,j) = 1.0/usize; } else { contingencyMatrix(i,j) /= rowSum; } } else { if (rowSum == 0) { contingencyMatrix(i,j) = 0; } else { contingencyMatrix(i,j) /= rowSum; } } } } // markovchain object S4 outMc("markovchain"); outMc.slot("transitionMatrix") = contingencyMatrix; return(outMc); } //' @name inferHyperparam //' @title Function to infer the hyperparameters for Bayesian inference from an a priori matrix or a data set //' @description Since the Bayesian inference approach implemented in the package is based on conjugate priors, //' hyperparameters must be provided to model the prior probability distribution of the chain //' parameters. The hyperparameters are inferred from a given a priori matrix under the assumption //' that the matrix provided corresponds to the mean (expected) values of the chain parameters. A //' scaling factor vector must be provided too. Alternatively, the hyperparameters can be inferred //' from a data set. //' //' @param transMatr A valid transition matrix, with dimension names. //' @param scale A vector of scaling factors, each element corresponds to the row names of the provided transition //' matrix transMatr, in the same order. //' @param data A data set from which the hyperparameters are inferred. //' //' @details transMatr and scale need not be provided if data is provided. //' @return Returns the hyperparameter matrix in a list. //' //' @note The hyperparameter matrix returned is such that the row and column names are sorted alphanumerically, //' and the elements in the matrix are correspondingly permuted. //' //' @references Yalamanchi SB, Spedicato GA (2015). Bayesian Inference of First Order Markov Chains. R //' package version 0.2.5 //' //' @author Sai Bhargav Yalamanchi, Giorgio Spedicato //' @seealso \code{\link{markovchainFit}}, \code{\link{predictiveDistribution}} //' //' @examples //' data(rain, package = "markovchain") //' inferHyperparam(data = rain$rain) //' //' weatherStates <- c("sunny", "cloudy", "rain") //' weatherMatrix <- matrix(data = c(0.7, 0.2, 0.1, //' 0.3, 0.4, 0.3, //' 0.2, 0.4, 0.4), //' byrow = TRUE, nrow = 3, //' dimnames = list(weatherStates, weatherStates)) //' inferHyperparam(transMatr = weatherMatrix, scale = c(10, 10, 10)) //' //' @export //' // [[Rcpp::export]] List inferHyperparam(NumericMatrix transMatr = NumericMatrix(), NumericVector scale = NumericVector(), CharacterVector data = CharacterVector()) { // stop if there is only one element in the matrix and size of data sequence is zero if (transMatr.nrow() * transMatr.ncol() == 1 && data.size() == 0) stop("Provide the prior transition matrix or the data set in order to infer the hyperparameters"); // to store final result List out; // Number of elements are greater than 1 if (transMatr.nrow() * transMatr.ncol() != 1) { if (scale.size() == 0) { stop("Provide a non-zero scaling factor vector to infer integer hyperparameters"); } // --------begin validity checks for the transition matrix--------- if (transMatr.nrow() != transMatr.ncol()) { stop("Transition matrix dimensions are inconsistent"); } // number of rows in transition matrix int sizeMatr = transMatr.nrow(); // if any element is greater than 1 or less than 0 then raise error // sum of each rows must lie between 1 - eps and 1 + eps for (int i = 0; i < sizeMatr; i++) { double rowSum = 0., eps = 1e-10; for (int j = 0; j < sizeMatr; j++) { if (transMatr(i, j) < 0. || transMatr(i, j) > 1.) stop("The entries in the transition matrix must each belong to the interval [0, 1]"); else rowSum += transMatr(i, j); } if (rowSum <= 1. - eps || rowSum >= 1. + eps) stop("Each rows of the transition matrix must sum to 1"); } // rows and columns name of transition matrix List dimNames = transMatr.attr("dimnames"); CharacterVector colNames = dimNames[1]; CharacterVector rowNames = dimNames[0]; // sorted rows and columns names CharacterVector sortedColNames(sizeMatr), sortedRowNames(sizeMatr); for (int i = 0; i < sizeMatr; i++) { sortedColNames(i) = colNames(i), sortedRowNames(i) = rowNames(i); } sortedColNames.sort(); sortedRowNames.sort(); // rows names vector and columns name vector must be same // and no names in names vectors should be same for (int i = 0; i < sizeMatr; i++) { if (i > 0 && (sortedColNames(i) == sortedColNames(i-1) || sortedRowNames(i) == sortedRowNames(i-1))) stop("The states must all be unique"); else if (sortedColNames(i) != sortedRowNames(i)) stop("The set of row names must be the same as the set of column names"); } // --------end of validity checks for the transition matrix--------- // --------beginning of validity checks for the scale factor vector--------- // length of scale vector must be equal to number of rows in transition matrix if (scale.size() != sizeMatr) stop("The dimensions of the scale vector must match the number of states in the chain"); // if any value in the scale vector is zero for (int i = 0; i < sizeMatr; i++) { if (scale(i) == 0) stop("The scaling factors must be non-zero!"); } // --------end of validity checks for the scale factor vector--------- // Creation of output matrix i.e. hyper param matrix NumericMatrix hpScaled(sizeMatr); hpScaled.attr("dimnames") = List::create(rowNames, colNames); // populate hyper param matrix for (int i = 0; i < sizeMatr; i++) for (int j = 0; j < sizeMatr; j++) hpScaled(i, j) = scale(i) * transMatr(i, j); /* shift rows and columns so that names of rows and columns names will be in sorted order */ hpScaled = sortByDimNames(hpScaled); // store list of hyper param scaled matrix out = List::create(_["scaledInference"] = hpScaled); } else if (data.size() != 0) { // to store unique states in sorted order CharacterVector elements = data; for (int i = 0; i < data.size(); i++) elements.push_back(data[i]); elements = unique(elements).sort(); // size of hyperparam matrix int sizeMatr = elements.size(); // create hyperparam matrix NumericMatrix hpData(sizeMatr); hpData.attr("dimnames") = List::create(elements, elements); std::fill(hpData.begin(), hpData.end(), 1); // populate hyper param matrix int posFrom = 0, posTo = 0; for (long int i = 0; i < data.size() - 1; i ++) { for (int j = 0; j < sizeMatr; j ++) { if (data[i] == elements[j]) posFrom = j; if (data[i + 1] == elements[j]) posTo = j; } hpData(posFrom,posTo)++; } // ouput data out = List::create(_["dataInference"] = hpData); } return out; } //' @name markovchainFit //' @title Function to fit a discrete Markov chain //' @description Given a sequence of states arising from a stationary state, //' it fits the underlying Markov chain distribution using either MLE (also using a //' Laplacian smoother), bootstrap or by MAP (Bayesian) inference. //' //' @param data It can be a character vector or a {n x n} matrix or a {n x n} data frame or a list //' @param method Method used to estimate the Markov chain. Either "mle", "map", "bootstrap" or "laplace" //' @param byrow it tells whether the output Markov chain should show the transition probabilities by row. //' @param nboot Number of bootstrap replicates in case "bootstrap" is used. //' @param laplacian Laplacian smoothing parameter, default zero. It is only used when "laplace" method //' is chosen. //' @param name Optional character for name slot. //' @param parallel Use parallel processing when performing Boostrap estimates. //' @param confidencelevel \deqn{\alpha} level for conficence intervals width. //' Used only when \code{method} equal to "mle". //' @param confint a boolean to decide whether to compute Confidence Interval or not. //' @param hyperparam Hyperparameter matrix for the a priori distribution. If none is provided, //' default value of 1 is assigned to each parameter. This must be of size //' {k x k} where k is the number of states in the chain and the values //' should typically be non-negative integers. //' @param stringchar It can be a {n x n} matrix or a character vector or a list //' @param toRowProbs converts a sequence matrix into a probability matrix //' @param sanitize put 1 in all rows having rowSum equal to zero //' @param possibleStates Possible states which are not present in the given sequence //' //' @details Disabling confint would lower the computation time on large datasets. If \code{data} or \code{stringchar} //' contain \code{NAs}, the related \code{NA} containing transitions will be ignored. //' //' @return A list containing an estimate, log-likelihood, and, when "bootstrap" method is used, a matrix //' of standards deviations and the bootstrap samples. When the "mle", "bootstrap" or "map" method //' is used, the lower and upper confidence bounds are returned along with the standard error. //' The "map" method also returns the expected value of the parameters with respect to the //' posterior distribution. //' @references A First Course in Probability (8th Edition), Sheldon Ross, Prentice Hall 2010 //' //' Inferring Markov Chains: Bayesian Estimation, Model Comparison, Entropy Rate, //' and Out-of-Class Modeling, Christopher C. Strelioff, James P. Crutchfield, //' Alfred Hubler, Santa Fe Institute //' //' Yalamanchi SB, Spedicato GA (2015). Bayesian Inference of First Order Markov Chains. R //' package version 0.2.5 //' //' @author Giorgio Spedicato, Tae Seung Kang, Sai Bhargav Yalamanchi //' @note This function has been rewritten in Rcpp. Bootstrap algorithm has been defined "heuristically". //' In addition, parallel facility is not complete, involving only a part of the bootstrap process. //' When \code{data} is either a \code{data.frame} or a \code{matrix} object, only MLE fit is //' currently available. //' //' @seealso \code{\link{markovchainSequence}}, \code{\link{markovchainListFit}} //' @examples //' sequence <- c("a", "b", "a", "a", "a", "a", "b", "a", "b", "a", "b", "a", "a", //' "b", "b", "b", "a") //' sequenceMatr <- createSequenceMatrix(sequence, sanitize = FALSE) //' mcFitMLE <- markovchainFit(data = sequence) //' mcFitBSP <- markovchainFit(data = sequence, method = "bootstrap", nboot = 5, name = "Bootstrap Mc") //' //' na.sequence <- c("a", NA, "a", "b") //' # There will be only a (a,b) transition //' na.sequenceMatr <- createSequenceMatrix(na.sequence, sanitize = FALSE) //' mcFitMLE <- markovchainFit(data = na.sequence) //' //' # data can be a list of character vectors //' sequences <- list(x = c("a", "b", "a"), y = c("b", "a", "b", "a", "c")) //' mcFitMap <- markovchainFit(sequences, method = "map") //' mcFitMle <- markovchainFit(sequences, method = "mle") //' @rdname markovchainFit //' //' @export //' // [[Rcpp::export]] List markovchainFit(SEXP data, String method = "mle", bool byrow = true, int nboot = 10, double laplacian = 0, String name = "", bool parallel = false, double confidencelevel = 0.95, bool confint = true, NumericMatrix hyperparam = NumericMatrix(), bool sanitize = false, CharacterVector possibleStates = CharacterVector()) { if (method != "mle" && method != "bootstrap" && method != "map" && method != "laplace") { stop ("method should be one of \"mle\", \"bootsrap\", \"map\" or \"laplace\""); } // list to store the output List out; // if input data is data frame or matrix if (Rf_inherits(data, "data.frame") || Rf_isMatrix(data)) { // store input data in mat CharacterMatrix mat; // if data is a data.frame force it to matrix if (Rf_inherits(data, "data.frame")) { DataFrame df(data); // matrix : no of rows = no of rows in df : same for number of columns mat = CharacterMatrix(df.nrows(), df.size()); for (long int i = 0; i < df.size(); i++) mat(_,i) = CharacterVector(df[i]); } else { mat = data; } // byrow assumes distinct observations (trajectiories) are per row // otherwise transpose if (!byrow) mat = transposeMatrix(mat); S4 outMc = _matr2Mc(mat, laplacian, sanitize, possibleStates); // whether to compute confidence interval or not if (confint) { // convert matrix to list int nrows = mat.nrow(); List manyseq(nrows); for (int i = 0; i < nrows; i++) manyseq[i] = mat(i, _); out = _mcFitMle(manyseq, byrow, confidencelevel, sanitize, possibleStates); out[0] = outMc; } else { out = List::create(_["estimate"] = outMc); } } else if (TYPEOF(data) == VECSXP) { if (method == "mle") { out = _mcFitMle(data, byrow, confidencelevel, sanitize, possibleStates); } else if (method == "map") { out = _mcFitMap(data, byrow, confidencelevel, hyperparam, sanitize, possibleStates); } else stop("method not available for a list"); } else { if (method == "mle") { out = _mcFitMle(data, byrow, confidencelevel, sanitize, possibleStates); } else if (method == "bootstrap") { out = _mcFitBootStrap(data, nboot, byrow, parallel, confidencelevel, sanitize, possibleStates); } else if (method == "laplace") { out = _mcFitLaplacianSmooth(data, byrow, laplacian, sanitize, possibleStates); } else if (method == "map") { out = _mcFitMap(data, byrow, confidencelevel, hyperparam, sanitize, possibleStates); } } // markovchain object S4 estimate = out["estimate"]; if (name != "") { estimate.slot("name") = name; } // transition matrix NumericMatrix transMatr = estimate.slot("transitionMatrix"); // data is neither data frame nor matrix if (!Rf_inherits(data, "data.frame") && !Rf_isMatrix(data) && TYPEOF(data) != VECSXP) out["logLikelihood"] = _loglikelihood(data, transMatr); estimate.slot("states") = rownames(transMatr); out["estimate"] = estimate; return out; } // [[Rcpp::export(.noofVisitsDistRCpp)]] NumericVector noofVisitsDistRCpp(NumericMatrix matrix, int i,int N) { // no of states in the process int noOfStates = matrix.ncol(); arma::vec out = arma::zeros(noOfStates); arma::mat Tmatrix = as(matrix); arma::mat temp = Tmatrix; // initial distribution is in the transition matrix itself for (int j = 0; j < noOfStates; j++) out[j] = Tmatrix(i - 1, j); //The distribution for Nth step is calculates after N multiplication of the transition matrix // and adding everytime the coresponding ratios // finally dividing by N for (int p = 0; p < N - 1; p++) { temp = temp * Tmatrix; for (int j = 0; j < noOfStates; j++) out[j] += temp(i - 1, j); } out = out/N; NumericVector R = wrap(out); return R; } #endif markovchain/src/Makevars.win0000644000176200001440000000010013762012754015631 0ustar liggesusersPKG_LIBS = $(LAPACK_LIBS) $(BLAS_LIBS) $(FLIBS) CXX_STD = CXX11 markovchain/src/utils.cpp0000644000176200001440000002405713762012754015226 0ustar liggesusers// [[Rcpp::depends(RcppArmadillo)]] #include #include #include #include #include #include using namespace Rcpp; using namespace arma; using namespace std; // Defined in probabilistic.cpp bool isAccessible(S4 obj, String from, String to); // Defined in probabilistic.cpp LogicalMatrix reachabilityMatrix(S4 obj); // matrix power function // O(log n * m³) where m is the number of rows / cols of A mat matrixPow(const mat& A, int n) { int m = A.n_rows; mat result = eye(m, m); mat partial = A; // We can decompose n = 2^a + 2^b + 2^c ... with a > b > c >= 0 // Compute last = a + 1 while (n > 0) { if ((n & 1) > 0) result = result + partial; partial = partial * partial; n >>= 1; } return result; } // check if two vectors are intersected bool intersects(CharacterVector x, CharacterVector y) { if (x.size() < y.size()) return intersects(y, x); else { unordered_set values; bool intersect = false; for (auto value : x) values.insert(as(value)); for (auto it = y.begin(); it != y.end() && !intersect; ++it) intersect = values.count(as(*it)) > 0; return intersect; } } bool anyElement(const mat& matrix, bool (*condition)(const double&)) { int numRows = matrix.n_rows; int numCols = matrix.n_cols; bool found = false; for (int i = 0; i < numRows && !found; ++i) for (int j = 0; j < numCols && !found; ++j) found = condition(matrix(i, j)); return found; } bool allElements(const mat& matrix, bool (*condition)(const double&)) { int numRows = matrix.n_rows; int numCols = matrix.n_cols; bool all = true; for (int i = 0; i < numRows && all; ++i) for (int j = 0; j < numCols && all; ++j) all = condition(matrix(i, j)); return all; } bool approxEqual(const double& a, const double& b) { if (a >= b) return (a - b) <= 1E-7; else return approxEqual(b, a); } bool approxEqual(const cx_double& a, const cx_double& b){ double x = a.real() - b.real(); double y = a.imag() - b.imag(); return (x*x - y*y) <= 1E-14; } // check if prob is probability or not // [[Rcpp::export(.isProbability)]] bool isProb(double prob) { return (prob >= 0 && prob <= 1); } // checks if a matrix is stochastic (by rows or by columns), i.e. all // elements are probabilities and the rows (cols, resp.) sum 1 // [[Rcpp::export(.isStochasticMatrix)]] bool isStochasticMatrix(NumericMatrix m, bool byrow) { if (!byrow) m = transpose(m); int nrow = m.nrow(); int ncol = m.ncol(); bool isStochastic = true; double rowSum; for (int i = 0; i < nrow && isStochastic; ++i) { rowSum = 0; for (int j = 0; j < ncol && isStochastic; ++j) { isStochastic = m(i, j) >= 0; rowSum += m(i, j); } isStochastic = approxEqual(rowSum, 1); } return isStochastic; } // [[Rcpp::export(.isProbabilityVector)]] bool isProbVector(NumericVector prob) { bool result = true; double sumProbs = 0; for (int i = 0; i < prob.size() && result; ++i) { result = prob[i] >= 0; sumProbs += prob[i]; } return result && approxEqual(sumProbs, 1); } // [[Rcpp::export(.testthatIsAccesibleRcpp)]] bool checkIsAccesibleMethod(S4 obj) { CharacterVector states = obj.slot("states"); bool byrow = obj.slot("byrow"); LogicalMatrix reachability = reachabilityMatrix(obj); int m = states.size(); bool correct = true; bool reachable; for (int i = 0; i < m && correct; ++i) { for (int j = 0; j < m && correct; ++j) { reachable = (byrow ? reachability(i, j) : reachability(j, i)); correct = isAccessible(obj, states(i), states(j)) == reachable; } } return correct; } // [[Rcpp::export(.approxEqualMatricesRcpp)]] bool approxEqual(NumericMatrix a, NumericMatrix b) { int a_ncol = a.ncol(); int b_ncol = b.ncol(); int a_nrow = a.nrow(); int b_nrow = b.nrow(); if (a_ncol != b_ncol || a_nrow != b_nrow) return false; else { bool equal = true; for (int i = 0; i < a_nrow && equal; ++i) for (int j = 0; j < a_ncol && equal; ++j) equal = approxEqual(a(i, j), b(i, j)); return equal; } } // This method receives the output of communicatingClasses(object) and object@states // and checks that in fact the communicating classes are a partition of states // Is a method agnostic on whether the Markov Chain was given by rows or columns // [[Rcpp::export(.testthatIsPartitionRcpp)]] bool isPartition(List commClasses, CharacterVector states) { int n = states.size(); unordered_set used; unordered_set originalStates; int numClassStates = 0; bool partition = true; for (auto state : states) originalStates.insert((string) state); // Check that the union of the classes is // states and they do not overlap for (int i = 0; i < commClasses.size() && partition; ++i) { CharacterVector currentClass = commClasses(i); numClassStates += currentClass.size(); for (int j = 0; j < currentClass.size() && partition; ++j) { string state = (string) currentClass(j); partition = used.count(state) == 0 && originalStates.count(state) > 0; used.insert(state); } } return partition && numClassStates == n; } // This is simply a method that checks the following recurrence, // naming p = probs, f = hitting, it checks: // // f(i, j) = p(i, j) + ∑_{k ≠ j} p(i, k) f(k, j) // // where p are the transitionMatrix probs and hitting are the // hitting probabilities for the Markov Chain associated to // probs. byrow indicates whether probs is an stochastic matrix // by rows or by columns. // [[Rcpp::export(.testthatAreHittingRcpp)]] bool areHittingProbabilities(NumericMatrix probs, NumericMatrix hitting, bool byrow) { if (!byrow) { probs = transpose(probs); hitting = transpose(hitting); } int numStates = probs.nrow(); bool holds = true; double result; for (int i = 0; i < numStates && holds; ++i) { for (int j = 0; j < numStates && holds; ++j) { result = 0; for (int k = 0; k < numStates; ++k) if (k != j) result -= probs(i, k) * hitting(k, j); result += hitting(i, j) - probs(i, j); holds = approxEqual(result, 0); } } return holds; } // This is simply a method that checks the following recurrence, // naming p = probs, E = mean number of visits, f = hitting // probabilities, it checks: // // E(i, j) = p(i, j) / (1 - f(j, j)) + ∑_{k ≠ j} p(i, k) E(k, j) // // Note this recurrence is similar to the one for hitting probabilities // We have to take care when E(i, j) = 0, because we would have to check // that in either p(i, k) = 0 or E(k, j) = 0. If E(i, j) = ∞ we would have // to check that either p(i, j) > 0 or (p(i, k) != 0 and E(k, j) = ∞) // // where p are the transitionMatrix probs, numVisits are the mean // number of visits for each state, and hitting are the hitting // probabilities for the Markov Chain associated to probs. // byrow indicates whether probs is an stochastic matrix // by rows or by columns. // [[Rcpp::export(.testthatAreMeanNumVisitsRcpp)]] bool areMeanNumVisits(NumericMatrix probs, NumericMatrix numVisits, NumericMatrix hitting, bool byrow) { if (!byrow) { probs = transpose(probs); numVisits = transpose(numVisits); hitting = transpose(hitting); } int numStates = probs.ncol(); bool holds = true; double result; double inverse; for (int j = 0; j < numStates && holds; ++j) { if (!approxEqual(hitting(j, j), 1)) { inverse = 1 / (1 - hitting(j, j)); for (int i = 0; i < numStates && holds; ++i) { result = 0; for (int k = 0; k < numStates; ++k) if (k != j) result -= probs(i, k) * numVisits(k, j); result += numVisits(i, j) - probs(i, j) * inverse; holds = approxEqual(result, 0); } } } return holds; } // [[Rcpp::export(.testthatRecurrentHittingRcpp)]] bool recurrentHitting(List recurrentClasses, NumericMatrix hitting, CharacterVector states, bool byrow) { if (!byrow) hitting = transpose(hitting); unordered_map stateToIndex; bool correct = true; int n = states.size(); for (int i = 0; i < n; ++i) stateToIndex[(string) states(i)] = i; for (CharacterVector recClass : recurrentClasses) { unordered_set classIndexes; for (auto state : recClass) classIndexes.insert(stateToIndex[(string) state]); for (int i : classIndexes) { for (int j = 0; j < n; ++j) { if (classIndexes.count(j) > 0) correct = correct && approxEqual(hitting(i, j), 1); else correct = correct && approxEqual(hitting(i, j), 0); } } } return correct; } // [[Rcpp::export(.testthatHittingAreOneRcpp)]] bool hittingProbsAreOne(NumericMatrix matrix) { bool allOne = true; int nrow = matrix.nrow(); int ncol = matrix.ncol(); for (int i = 0; i < nrow && allOne; ++i) for (int j = 0; j < ncol && allOne; ++j) allOne = approxEqual(matrix(i, j), 1); return allOne; } // [[Rcpp::export(.testthatAbsorbingAreRecurrentClassRcpp)]] bool absorbingAreRecurrentClass(CharacterVector absorbingStates, List recurrentClasses) { unordered_set singletonRecurrent; unordered_set absorbing; string current; bool diffEmpty = true; for (CharacterVector recClass : recurrentClasses) if (recClass.size() == 1) singletonRecurrent.insert((string) (*recClass.begin())); for (auto state : absorbingStates) absorbing.insert((string) state); for (int i = 0; i < absorbingStates.size() && diffEmpty; ++i) { current = (string) absorbingStates(i); diffEmpty = singletonRecurrent.count(current) > 0; } for (auto it = singletonRecurrent.begin(); it != singletonRecurrent.end() && diffEmpty; ++it) { current = (string) (*it); diffEmpty = absorbing.count(current) > 0; } return diffEmpty; } markovchain/src/ctmcClassesAndMethods.cpp0000644000176200001440000000304613762012754020274 0ustar liggesusers#include using namespace Rcpp; //' @name generatorToTransitionMatrix //' @title Function to obtain the transition matrix from the generator //' @description The transition matrix of the embedded DTMC is inferred from the CTMC's generator //' //' @usage generatorToTransitionMatrix(gen, byrow = TRUE) //' //' @param gen The generator matrix //' @param byrow Flag to determine if rows (columns) sum to 0 //' @return Returns the transition matrix. //' //' @references //' Introduction to Stochastic Processes with Applications in the Biosciences (2013), David F. //' Anderson, University of Wisconsin at Madison //' //' @author Sai Bhargav Yalamanchi //' @seealso \code{\link{rctmc}},\code{\link{ctmc-class}} //' @examples //' energyStates <- c("sigma", "sigma_star") //' byRow <- TRUE //' gen <- matrix(data = c(-3, 3, 1, -1), nrow = 2, //' byrow = byRow, dimnames = list(energyStates, energyStates)) //' generatorToTransitionMatrix(gen) //' //' @export // [[Rcpp::export]] NumericMatrix generatorToTransitionMatrix(NumericMatrix gen, bool byrow = true){ NumericMatrix transMatr(gen.nrow()); transMatr.attr("dimnames") = gen.attr("dimnames"); if (byrow) { for (int i = 0; i < gen.nrow(); i++){ for (int j = 0; j < gen.ncol(); j++){ if (i != j) transMatr(i, j) = -gen(i, j) / gen(i, i); } } } else { for (int j = 0; j < gen.ncol(); j++){ for (int i = 0; i < gen.nrow(); i++){ if (i != j) transMatr(i, j) = -gen(i, j) / gen(j, j); } } } return transMatr; } markovchain/src/helpers.h0000644000176200001440000001676113762012754015200 0ustar liggesusers#ifndef HELPERS_H #define HELPERS_H // [[Rcpp::depends(RcppArmadillo)]] #include #include #include using namespace Rcpp; using namespace std; template T sortByDimNames(const T mat){ CharacterVector colNames = colnames(mat); CharacterVector rowNames = rownames(mat); int n = colNames.size(); CharacterVector sortedNames(n); copy(rowNames.begin(), rowNames.end(), sortedNames.begin()); sortedNames.sort(); NumericVector colIdx(n); NumericVector rowIdx(n); for (int i = 0; i < n; i++) { for (int j = 0; j < n; j++) { if (colNames(j) == sortedNames(i)) colIdx(i) = j; if (rowNames(j) == sortedNames(i)) rowIdx(i) = j; } } T sortedMatrix(n); sortedMatrix.attr("dimnames") = List::create(sortedNames, sortedNames); for (int i = 0; i < n; i++) for (int j = 0; j < n; j++) sortedMatrix(i, j) = mat(rowIdx(i), colIdx(j)); return sortedMatrix; } double lbeta(double p, double q){ return lgamma(p) + lgamma(q) - lgamma(p + q); } template T transposeMatrix(T & mat) { int numRows = mat.nrow(); int numCols = mat.ncol(); T transpose(numCols, numRows); // Assign dim names transposed (rows to cols and viceversa) transpose.attr("dimnames") = List::create(colnames(mat), rownames(mat)); for (int i = 0; i < numCols; ++i) { transpose(i, _) = mat(_, i); } return transpose; } /* Purpose: BETAIN computes the incomplete Beta function ratio. Licensing: This code is distributed under the GNU LGPL license. Modified: 25 September 2014 Author: Original FORTRAN77 version by KL Majumder, GP Bhattacharjee. C++ version by John Burkardt. Reference: KL Majumder, GP Bhattacharjee, Algorithm AS 63: The incomplete Beta Integral, Applied Statistics, Volume 22, Number 3, 1973, pages 409-411. Parameters: Input, double X, the argument, between 0 and 1. Input, double P, Q, the parameters, which must be positive. Input, double BETA, the logarithm of the complete beta function. Output, int &IFAULT, error flag. 0, no error. nonzero, an error occurred. Output, double BETAIN, the value of the incomplete Beta function ratio. */ double betain(double x, double p, double q, double beta) { double acu = 0.1E-14; double ai; double cx; bool indx; int ns; double pp; double psq; double qq; double rx; double temp; double term; double value; double xx; value = x; // Special cases. if (x == 0.0 || x == 1.0) { return value; } // Change tail if necessary and determine S. psq = p + q; cx = 1.0 - x; if (p < psq * x){ xx = cx; cx = x; pp = q; qq = p; indx = true; } else { xx = x; pp = p; qq = q; indx = false; } term = 1.0; ai = 1.0; value = 1.0; ns = (int) (qq + cx * psq); // Use the Soper reduction formula. rx = xx / cx; temp = qq - ai; if (ns == 0) { rx = xx; } bool loop = true; while (loop) { term = term * temp * rx / (pp + ai); value = value + term;; temp = fabs(term); if (temp <= acu && temp <= acu * value) { value = value * exp (pp * log(xx) + (qq - 1.0) * log(cx) - beta) / pp; if (indx) value = 1.0 - value; loop = false; } ai = ai + 1.0; ns = ns - 1; if (0 <= ns) { temp = qq - ai; if (ns == 0) rx = xx; } else { temp = psq; psq = psq + 1.0; } } return value; } /* Purpose: XINBTA computes inverse of the incomplete Beta function. Discussion: The accuracy exponent SAE was loosened from -37 to -30, because the code would not otherwise accept the results of an iteration with p = 0.3, q = 3.0, alpha = 0.2. Licensing: This code is distributed under the GNU LGPL license. Modified: 25 September 2014 Author: Original FORTRAN77 version by GW Cran, KJ Martin, GE Thomas. C++ version by John Burkardt. Reference: GW Cran, KJ Martin, GE Thomas, Remark AS R19 and Algorithm AS 109: A Remark on Algorithms AS 63: The Incomplete Beta Integral and AS 64: Inverse of the Incomplete Beta Integeral, Applied Statistics, Volume 26, Number 1, 1977, pages 111-114. Parameters: Input, double P, Q, the parameters of the incomplete Beta function. Input, double BETA, the logarithm of the value of the complete Beta function. Input, double ALPHA, the value of the incomplete Beta function. 0 <= ALPHA <= 1. Output, int &IFAULT, error flag. 0, no error occurred. nonzero, an error occurred. Output, double XINBTA, the argument of the incomplete Beta function which produces the value ALPHA. Local Parameters: Local, double SAE, requests an accuracy of about 10^SAE. */ double xinbta ( double p, double q, double beta, double alpha ) { double a; double acu; double adj; double fpu; double g; double h; int iex; bool indx; double pp; double prev; double qq; double r; double s; double sae = -30.0; double sq; double t; double tx; double value; double w; double xin; double y; double yprev; fpu = pow(10.0, sae); value = alpha; // If the answer is easy to determine, return immediately. if (alpha == 0.0 || alpha == 1) return alpha; // Change tail if necessary. if (0.5 < alpha) { a = 1.0 - alpha; pp = q; qq = p; indx = true; } else { a = alpha; pp = p; qq = q; indx = false; } // Calculate the initial approximation. r = sqrt(- log (a * a)); y = r - (2.30753 + 0.27061 * r) / (1.0 + ( 0.99229 + 0.04481 * r) * r); if (1.0 < pp && 1.0 < qq) { r = (y * y - 3.0) / 6.0; s = 1.0 / (pp + pp - 1.0); t = 1.0 / (qq + qq - 1.0); h = 2.0 / (s + t); w = y * sqrt(h + r) / h - (t - s) * (r + 5.0 / 6.0 - 2.0 / (3.0 * h)); value = pp / (pp + qq * exp(w + w)); } else { r = qq + qq; t = 1.0 / (9.0 * qq); t = r * pow(1.0 - t + y * sqrt ( t ), 3); if (t <= 0.0) { value = 1.0 - exp( (log((1.0 - a) * qq) + beta) / qq ); } else { t = (4.0 * pp + r - 2.0) / t; if (t <= 1.0) value = exp( (log(a * pp) + beta) / pp ); else value = 1.0 - 2.0 / (t + 1.0); } } // Solve for X by a modified Newton-Raphson method, // using the function BETAIN. r = 1.0 - pp; t = 1.0 - qq; yprev = 0.0; sq = 1.0; prev = 1.0; if (value < 0.0001) value = 0.0001; if (0.9999 < value) value = 0.9999; iex = max(- 5.0 / pp / pp - 1.0 / pow (a, 0.2) - 13.0, sae); acu = pow(10.0, iex); // Iteration loop. for ( ; ; ) { y = betain(value, pp, qq, beta); xin = value; y = (y - a) * exp(beta + r * log (xin) + t * log (1.0 - xin)); if (y * yprev <= 0.0) prev = max(sq, fpu); g = 1.0; for ( ; ; ) { for ( ; ; ) { adj = g * y; sq = adj * adj; if (sq < prev) { tx = value - adj; if (0.0 <= tx && tx <= 1.0) break; } g = g / 3.0; } // Check whether the current estimate is acceptable. // The change "VALUE = TX" was suggested by Ivan Ukhov. if (prev <= acu || y * y <= acu) { value = tx; if (indx) value = 1.0 - value; return value; } if (tx != 0.0 && tx != 1.0) break; g = g / 3.0; } if (tx == value) break; value = tx; yprev = y; } if(indx) value = 1.0 - value; return value; } #endif markovchain/src/mapFitFunctions.h0000644000176200001440000001732713762012754016646 0ustar liggesusers/* * Function to remove NA values from a vector */ CharacterVector clean_nas(CharacterVector elements_na){ CharacterVector elements; for(int i = 0; i < elements_na.size();i++) if(elements_na[i] != "NA") elements.push_back(elements_na[i]); return elements; } List _mcFitMap(SEXP data, bool byrow, double confidencelevel, NumericMatrix hyperparam = NumericMatrix(), bool sanitize = false, CharacterVector possibleStates = CharacterVector()) { if(TYPEOF(data) != VECSXP) { data = List::create(as(data)); } List seqs = as(data); CharacterVector elements; for(int i = 0;i < (int)seqs.size();i++) { CharacterVector tseq = unique(as(seqs[i])); for(int j = 0;j < (int)tseq.size();j++) { if(tseq[j] != "NA") { elements.push_back(tseq[j]); } } } elements = unique(union_(elements, possibleStates)).sort(); // number of unique states int sizeMatr = elements.size(); // if no hyperparam argument provided, use default value of 1 for all if(hyperparam.nrow() == 1 && hyperparam.ncol() == 1) { // matrix with all entries 1 NumericMatrix temp(sizeMatr, sizeMatr); temp.attr("dimnames") = List::create(elements, elements); for(int i = 0; i < sizeMatr; i++) for(int j = 0; j < sizeMatr; j++) temp(i, j) = 1; hyperparam = temp; } //-----------beginning of validity checking of hyperparam matrix---------------------- // validity check for hyperparam matrix if(hyperparam.nrow() != hyperparam.ncol()) { stop("Dimensions of the hyperparameter matrix are inconsistent"); } if(hyperparam.nrow() < sizeMatr) { stop("Hyperparameters for all state transitions must be provided"); } // extract rows and columns name out of hyperparam matrix List dimNames = hyperparam.attr("dimnames"); CharacterVector colNames = dimNames[1]; CharacterVector rowNames = dimNames[0]; // size of hyperparam matrix int sizeHyperparam = hyperparam.ncol(); // sorted order of hyperparam rows and columns name CharacterVector sortedColNames(sizeHyperparam), sortedRowNames(sizeHyperparam); for(int i = 0; i < sizeHyperparam; i++) { sortedColNames(i) = colNames(i), sortedRowNames(i) = rowNames(i); } sortedColNames.sort(); sortedRowNames.sort(); // validity of hyperparam matrix for(int i = 0; i < sizeHyperparam; i++){ // columns names must be different // rows names must be different if(i > 0 && (sortedColNames(i) == sortedColNames(i-1) || sortedRowNames(i) == sortedRowNames(i-1))) { stop("The states must all be unique"); } // same states should be present in rows and columns else if(sortedColNames(i) != sortedRowNames(i)) { stop("The set of row names must be the same as the set of column names"); } // chech whether any state in column names exists which is not in the given sequence bool found = false; for(int j = 0; j < sizeMatr; j++) { if(elements(j) == sortedColNames(i)) { found = true; break; } } // hyperparam may contain states not in stringchar if(!found) { elements.push_back(sortedColNames(i)); } } // check for the case where hyperparam has missing data for(int i = 0; i < sizeMatr; i++){ bool found = false; for(int j = 0; j < sizeHyperparam; j++) { if(sortedColNames(j) == elements(i)) { found = true; break; } } if(!found) stop("Hyperparameters for all state transitions must be provided"); } elements = elements.sort(); sizeMatr = elements.size(); for(int i = 0; i < sizeMatr; i++) for(int j = 0; j < sizeMatr; j++) if(hyperparam(i, j) < 1.) stop("The hyperparameter elements must all be greater than or equal to 1"); //-----------end of validity checking of hyperparam matrix---------------------- // permute the elements of hyperparam such that the row, column names are sorted hyperparam = sortByDimNames(hyperparam); // helper matrices which will help in the calculation of transition matrix // other matrices will be returned as a result NumericMatrix mapEstMatr(sizeMatr), expMatr(sizeMatr); NumericMatrix freqMatr(sizeMatr); mapEstMatr.attr("dimnames") = List::create(elements, elements); expMatr.attr("dimnames") = List::create(elements, elements); // matrices to be returned NumericMatrix lowerEndpointMatr = NumericMatrix(mapEstMatr.nrow(), mapEstMatr.ncol()); NumericMatrix upperEndpointMatr = NumericMatrix(mapEstMatr.nrow(), mapEstMatr.ncol()); NumericMatrix stdError = NumericMatrix(mapEstMatr.nrow(), mapEstMatr.ncol()); // populate frequeny matrix for old data; this is used for inference for(int k = 0;k < seqs.size();k++) { CharacterVector stringchar = as(seqs[k]); int posFrom = 0, posTo = 0; for(long int i = 0; i < stringchar.size() - 1; i ++) { if(stringchar[i] != "NA" && stringchar[i+1] != "NA"){ for (int j = 0; j < sizeMatr; j ++) { if(stringchar[i] == elements[j]) posFrom = j; if(stringchar[i + 1] == elements[j]) posTo = j; } freqMatr(posFrom,posTo)++; } } } // sanitize and to row probs for (int i = 0; i < sizeMatr; i++) { // rowsum of frequency matrix and hyperparam matrix double rowSum = 0, paramRowSum = 0; for (int j = 0; j < sizeMatr; j++) { rowSum += freqMatr(i, j), paramRowSum += hyperparam(i, j); } // toRowProbs for (int j = 0; j < sizeMatr; j++) { // confidence intervals and bounds double p = freqMatr(i, j) + hyperparam(i, j), q = rowSum + paramRowSum - freqMatr(i, j) - hyperparam(i, j); // expected value of the transition parameters expMatr(i, j) = p / (p + q); if(p + q == sizeMatr) { mapEstMatr(i, j) = (sanitize ? 1.0 / sizeMatr : 0); } else { // maximum a posteriori estimate mapEstMatr(i, j) = (p - 1) / (p + q - sizeMatr); } // populate lowerEndPoint, upperEndPoint and stand error matrices double beta = lbeta(p, q); double cdf = betain(double(mapEstMatr(i, j)), p, q, beta); if(cdf + confidencelevel / 2 > 1.) { upperEndpointMatr(i, j) = 1.; lowerEndpointMatr(i, j) = xinbta(p, q, beta, 1 - confidencelevel); } else if(cdf - confidencelevel / 2 < 0.) { lowerEndpointMatr(i, j) = 0.; upperEndpointMatr(i, j) = xinbta(p, q, beta, confidencelevel); } else { lowerEndpointMatr(i, j) = xinbta(p, q, beta, cdf - confidencelevel / 2); upperEndpointMatr(i, j) = xinbta(p, q, beta, cdf + confidencelevel / 2); } stdError(i, j) = sqrt(p * q / (p + q) / (p + q) / (1 + p + q)); } } // transpose the matrix if columwise result is required if(byrow == false) { mapEstMatr = transposeMatrix(mapEstMatr); } // markovchain object S4 outMc("markovchain"); outMc.slot("transitionMatrix") = mapEstMatr; outMc.slot("name") = "Bayesian Fit"; // message("\n\'estimate\' is the MAP set of parameters where as \'expectedValue\' \nis the expectation // of the parameters with respect to the posterior.\nThe confidence intervals are given for \'estimate\'."); return List::create(_["estimate"] = outMc, _["expectedValue"] = expMatr, _["standardError"] = stdError, _["confidenceInterval"] = List::create(_["confidenceLevel"] = confidencelevel, _["lowerEndpointMatrix"] = lowerEndpointMatr, _["upperEndpointMatrix"] = upperEndpointMatr)); } markovchain/src/multinomCI.cpp0000644000176200001440000001135213762012754016140 0ustar liggesusers// [[Rcpp::depends(RcppArmadillo)]] #include using namespace Rcpp; // returns the column sums of a matrix NumericVector colSums(NumericMatrix m) { NumericVector out; for (int i = 0; i < m.cols(); i ++) out.push_back(sum(m.column(i))); return out; } // poisson distribution double ppois(double n, double lambda) { return R::ppois(n,lambda, true, false); } // moments NumericVector moments(int c, double lambda) { double a = lambda+c; double b = lambda-c; double den = 0, poisA = 0, poisB = 0; if (b < 0) b = 0; if (b > 0) den = ppois(a, lambda) - ppois(b - 1, lambda); if (b == 0) den = ppois(a, lambda); NumericVector mu(4); NumericVector mom(5); for (int r = 1; r <= 4; r ++) { poisA = 0; poisB = 0; if ((a - r) >= 0) poisA = ppois(a, lambda) - ppois(a - r, lambda); if ((a - r) < 0) poisA = ppois(a, lambda); if ((b - r - 1) >= 0) poisB = ppois(b - 1, lambda) - ppois(b - r - 1, lambda); if ((b - r - 1) < 0 && (b - 1) >= 0) poisB = ppois(b - 1, lambda); if ((b - r - 1) < 0 && (b - 1) < 0) poisB = 0; mu[r - 1] = (pow(lambda, r)) * (1 - (poisA - poisB)/den); } mom[0] = mu[0]; mom[1] = mu[1] + mu[0] - pow(mu[0], 2); mom[2] = mu[2] + mu[1] * (3 - 3*mu[0]) + (mu[0] - 3*pow(mu[0], 2) + 2*pow(mu[0], 3)); mom[3] = mu[3] + mu[2] * (6 - 4*mu[0]) + mu[1] * (7 - 12*mu[0] + 6*pow(mu[0], 2)) + mu[0] - 4 * pow(mu[0], 2) + 6 * pow(mu[0], 3) - 3 * pow(mu[0], 4); mom[4] = den; return mom; } // coverage probability for the particular choice of c double truncpoi(int c, NumericVector x, double n, int k) { NumericMatrix m(k,5); for (int i = 0; i < k; i++) { double lambda = x[i]; NumericVector mom = moments(c, lambda); for (int j = 0; j < 5; j ++) m(i, j) = mom[j]; } for (int i = 0; i < k; i ++) m(i, 3) = m(i, 3) - 3 * m(i, 1) * m(i, 1); NumericVector s = colSums(m); double s1 = s[0]; double s2 = s[1]; double s3 = s[2]; double s4 = s[3]; double probn = 1 / (ppois(n, n) - ppois(n - 1, n)); double z = (n - s1) / sqrt(s2); double g1 = s3 / (pow(s2, (3.0 / 2.0))); double g2 = s4 / (pow(s2, 2)); double poly = 1.0 + g1 * (pow(z, 3) - 3 * z) / 6.0 + g2 * (pow(z, 4) - 6.0 * pow(z, 2) + 3.0) / 24.0 + pow(g1, 2) * (pow(z, 6) - 15.0 * pow(z, 4) + 45.0*pow(z, 2) - 15.0)/72.0; double f = poly * exp(-pow(z, 2)/2) / (sqrt(2.0) * R::gammafn(0.5)); double probx=1; for (int i = 0; i < k; i++) probx = probx * m(i, 4); return(probn * probx * f / sqrt(s2)); } // multinomial confidence intervals for a row // [[Rcpp::export(.multinomialCIForRowRcpp)]] NumericMatrix multinomialCIForRow(NumericVector x, double confidencelevel) { double n = std::accumulate(x.begin(), x.end(), 0.0); int k = x.size(); double c = 0; double p = 0, pold = 0; for (int cc = 1; cc <= n; cc ++) { p = truncpoi(cc, x, n, k); if (p > confidencelevel && pold < confidencelevel) { c = cc; break; } pold = p; } NumericMatrix salida(k, 2); double delta = (confidencelevel - pold) / (p - pold); NumericMatrix out(k,5); NumericMatrix num(k,1); c--; double obsp = 0; for (int i = 0; i < k; i++) { num(i, 0) = i; obsp = x[i] / n; out(i, 0) = obsp; out(i, 1) = obsp - c/n; out(i, 2) = obsp + c/n + 2*delta/n; if (out(i, 1) < 0) out(i, 1) = 0; if (out(i, 2) > 1) out(i, 2) = 1; out(i, 3) = obsp - c/n - 1/n; out(i, 4) = obsp + c/n + 1/n; salida(i, 0) = out(i, 1); salida(i, 1) = out(i, 2); } return salida; } // multinomial confidence intervals // [[Rcpp::export(.multinomialCIRcpp)]] List multinomCI(NumericMatrix transMat, NumericMatrix seqMat, double confidencelevel) { NumericMatrix res; NumericVector v; int nrows = transMat.nrow(); int ncols = transMat.ncol(); NumericMatrix lowerEndpointMatr(nrows, ncols); NumericMatrix upperEndpointMatr(nrows, ncols); double lowerEndpoint, upperEndpoint; for (int i = 0; i < nrows; i ++) { v = seqMat.row(i); res = multinomialCIForRow(v, confidencelevel); for (int j = 0; j < res.rows(); j++) { lowerEndpoint = res(j, 0); lowerEndpointMatr(i,j) = lowerEndpoint; upperEndpoint = res(j, 1); upperEndpointMatr(i,j) = upperEndpoint; } } upperEndpointMatr.attr("dimnames") = lowerEndpointMatr.attr("dimnames") = seqMat.attr("dimnames"); List out = List::create(_["confidenceLevel"] = confidencelevel, _["lowerEndpointMatrix"] = lowerEndpointMatr, _["upperEndpointMatrix"] = upperEndpointMatr); return out; } markovchain/src/ctmcProbabilistic.cpp0000644000176200001440000000704113762012754017515 0ustar liggesusers#include // [[Rcpp::depends(RcppArmadillo)]] #include #include using namespace Rcpp; using namespace RcppArmadillo; using namespace arma; using namespace std; // [[Rcpp::export(.ExpectedTimeRCpp)]] NumericVector ExpectedTimeRcpp(NumericMatrix x, NumericVector y) { NumericVector out; int size = x.nrow(); arma::mat T = arma::zeros(size, size); for (int i = 0; i < size; i++) for (int j = 0; j < size; j++) T(i, j) = x(i, j); arma::vec c = arma::zeros(size); for (int i = 0; i < size; i++) c[i] = y[i]; out = wrap(solve(T,c)); return out; } // [[Rcpp::export(.probabilityatTRCpp)]] NumericMatrix probabilityatTRCpp(NumericMatrix y) { int size = y.nrow(); NumericMatrix out(size,size); arma::mat T = arma::zeros(size, size); for (int i = 0; i < size; i++) for (int j = 0; j < size; j++) T(i, j) = y(i, j); T = expmat(T); for (int i = 0; i < size; i++) for (int j = 0; j < size; j++) out(i, j) = T(i, j); return out; } // [[Rcpp::export(.impreciseProbabilityatTRCpp)]] NumericVector impreciseProbabilityatTRCpp(S4 C, int i,int t, int s, double error) { CharacterVector states = C.slot("states"); int noOfstates = states.size(); NumericMatrix non_Q = C.slot("Q"); NumericMatrix non_range = C.slot("range"); arma::mat Q = arma::zeros(noOfstates, noOfstates); arma::mat range = arma::zeros(noOfstates, 2); // initialises armadillo matrices for (int p = 0; p < noOfstates; p++) for (int q = 0; q < noOfstates; q++) Q(p, q) = non_Q(p, q); // initialises armadillo matrices for (int p = 0; p < noOfstates; p++) for (int q = 0; q < 2; q++) range(p, q) = non_range(p, q); // initialses value of norm of Q double QNorm = -1.0; // calculates norm of Q for (int p =0; p < noOfstates; p++) { float sum = 0.0; for (int q = 0; q < noOfstates; q++) { if (Q(p, q) >= 0) sum = sum + Q(p, q); else sum = sum + -Q(p, q); } if (sum * range(p, 1) > QNorm) QNorm = sum * range(p, 1); } // calculates no. of iterations according to error rate, QNorm and other parameters int n; if ((s - t) * QNorm > (s - t) * (s - t) * QNorm * QNorm * 1/ (2 * error)) n = (int)(s - t) * QNorm; else n = (int)(s - t) * (s-t) * QNorm * QNorm * 1/(2 * error); // sets delta value float delta = (s - t) * 1.0/n; // declares and initialises initial f arma::vec Ii(noOfstates); for (int p = 0; p < noOfstates; p++) Ii[p] = 0; Ii[i-1] = 1; // calculation of Qgx vector arma::vec values = Q * Ii; arma::vec Qgx(noOfstates); for (int p = 0; p < noOfstates; p++) Qgx[p] = 0; for (int p = 0; p < noOfstates; p++) { if (values[p] * range(p, 0) < values[p] * range(p, 1)) Qgx[p] = values[p] * range(p,0); else Qgx[p] = values[p] * range(p,1); } Qgx = delta * Qgx; // runs n-1 iterations according to the algorithm // Qgx_i = Qgx_{i-1} + delta*Q*Qgx_{i-1} Qgx = Qgx + Ii; for (int iter = 0; iter < n - 1; iter++) { arma::vec temp = Qgx; values = Q * Qgx; for (int p = 0; p < noOfstates; p++) { // calculating keeping in mind the lower opertaotr values if (values[p] * range(p,0) < values[p] * range(p,1)) Qgx[p] = values[p] * range(p,0); else Qgx[p] = values[p] * range(p,1); } Qgx = delta * Qgx; Qgx = temp + Qgx; } NumericVector out; for (int p = 0; p < noOfstates; p++) out.push_back(Qgx[p]); return out; } markovchain/src/RcppExports.cpp0000644000176200001440000010652313762013513016350 0ustar liggesusers// Generated by using Rcpp::compileAttributes() -> do not edit by hand // Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393 #include #include using namespace Rcpp; // isGen bool isGen(NumericMatrix gen); RcppExport SEXP _markovchain_isGen(SEXP genSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< NumericMatrix >::type gen(genSEXP); rcpp_result_gen = Rcpp::wrap(isGen(gen)); return rcpp_result_gen; END_RCPP } // generatorToTransitionMatrix NumericMatrix generatorToTransitionMatrix(NumericMatrix gen, bool byrow); RcppExport SEXP _markovchain_generatorToTransitionMatrix(SEXP genSEXP, SEXP byrowSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< NumericMatrix >::type gen(genSEXP); Rcpp::traits::input_parameter< bool >::type byrow(byrowSEXP); rcpp_result_gen = Rcpp::wrap(generatorToTransitionMatrix(gen, byrow)); return rcpp_result_gen; END_RCPP } // ctmcFit List ctmcFit(List data, bool byrow, String name, double confidencelevel); RcppExport SEXP _markovchain_ctmcFit(SEXP dataSEXP, SEXP byrowSEXP, SEXP nameSEXP, SEXP confidencelevelSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< List >::type data(dataSEXP); Rcpp::traits::input_parameter< bool >::type byrow(byrowSEXP); Rcpp::traits::input_parameter< String >::type name(nameSEXP); Rcpp::traits::input_parameter< double >::type confidencelevel(confidencelevelSEXP); rcpp_result_gen = Rcpp::wrap(ctmcFit(data, byrow, name, confidencelevel)); return rcpp_result_gen; END_RCPP } // ExpectedTimeRcpp NumericVector ExpectedTimeRcpp(NumericMatrix x, NumericVector y); RcppExport SEXP _markovchain_ExpectedTimeRcpp(SEXP xSEXP, SEXP ySEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< NumericMatrix >::type x(xSEXP); Rcpp::traits::input_parameter< NumericVector >::type y(ySEXP); rcpp_result_gen = Rcpp::wrap(ExpectedTimeRcpp(x, y)); return rcpp_result_gen; END_RCPP } // probabilityatTRCpp NumericMatrix probabilityatTRCpp(NumericMatrix y); RcppExport SEXP _markovchain_probabilityatTRCpp(SEXP ySEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< NumericMatrix >::type y(ySEXP); rcpp_result_gen = Rcpp::wrap(probabilityatTRCpp(y)); return rcpp_result_gen; END_RCPP } // impreciseProbabilityatTRCpp NumericVector impreciseProbabilityatTRCpp(S4 C, int i, int t, int s, double error); RcppExport SEXP _markovchain_impreciseProbabilityatTRCpp(SEXP CSEXP, SEXP iSEXP, SEXP tSEXP, SEXP sSEXP, SEXP errorSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< S4 >::type C(CSEXP); Rcpp::traits::input_parameter< int >::type i(iSEXP); Rcpp::traits::input_parameter< int >::type t(tSEXP); Rcpp::traits::input_parameter< int >::type s(sSEXP); Rcpp::traits::input_parameter< double >::type error(errorSEXP); rcpp_result_gen = Rcpp::wrap(impreciseProbabilityatTRCpp(C, i, t, s, error)); return rcpp_result_gen; END_RCPP } // seq2freqProb NumericVector seq2freqProb(CharacterVector sequence); RcppExport SEXP _markovchain_seq2freqProb(SEXP sequenceSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< CharacterVector >::type sequence(sequenceSEXP); rcpp_result_gen = Rcpp::wrap(seq2freqProb(sequence)); return rcpp_result_gen; END_RCPP } // seq2matHigh NumericMatrix seq2matHigh(CharacterVector sequence, int order); RcppExport SEXP _markovchain_seq2matHigh(SEXP sequenceSEXP, SEXP orderSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< CharacterVector >::type sequence(sequenceSEXP); Rcpp::traits::input_parameter< int >::type order(orderSEXP); rcpp_result_gen = Rcpp::wrap(seq2matHigh(sequence, order)); return rcpp_result_gen; END_RCPP } // markovchainSequenceRcpp CharacterVector markovchainSequenceRcpp(int n, S4 markovchain, CharacterVector t0, bool include_t0); RcppExport SEXP _markovchain_markovchainSequenceRcpp(SEXP nSEXP, SEXP markovchainSEXP, SEXP t0SEXP, SEXP include_t0SEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< int >::type n(nSEXP); Rcpp::traits::input_parameter< S4 >::type markovchain(markovchainSEXP); Rcpp::traits::input_parameter< CharacterVector >::type t0(t0SEXP); Rcpp::traits::input_parameter< bool >::type include_t0(include_t0SEXP); rcpp_result_gen = Rcpp::wrap(markovchainSequenceRcpp(n, markovchain, t0, include_t0)); return rcpp_result_gen; END_RCPP } // markovchainListRcpp List markovchainListRcpp(int n, List object, bool include_t0, CharacterVector t0); RcppExport SEXP _markovchain_markovchainListRcpp(SEXP nSEXP, SEXP objectSEXP, SEXP include_t0SEXP, SEXP t0SEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< int >::type n(nSEXP); Rcpp::traits::input_parameter< List >::type object(objectSEXP); Rcpp::traits::input_parameter< bool >::type include_t0(include_t0SEXP); Rcpp::traits::input_parameter< CharacterVector >::type t0(t0SEXP); rcpp_result_gen = Rcpp::wrap(markovchainListRcpp(n, object, include_t0, t0)); return rcpp_result_gen; END_RCPP } // markovchainSequenceParallelRcpp List markovchainSequenceParallelRcpp(S4 listObject, int n, bool include_t0, CharacterVector init_state); RcppExport SEXP _markovchain_markovchainSequenceParallelRcpp(SEXP listObjectSEXP, SEXP nSEXP, SEXP include_t0SEXP, SEXP init_stateSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< S4 >::type listObject(listObjectSEXP); Rcpp::traits::input_parameter< int >::type n(nSEXP); Rcpp::traits::input_parameter< bool >::type include_t0(include_t0SEXP); Rcpp::traits::input_parameter< CharacterVector >::type init_state(init_stateSEXP); rcpp_result_gen = Rcpp::wrap(markovchainSequenceParallelRcpp(listObject, n, include_t0, init_state)); return rcpp_result_gen; END_RCPP } // createSequenceMatrix NumericMatrix createSequenceMatrix(SEXP stringchar, bool toRowProbs, bool sanitize, CharacterVector possibleStates); RcppExport SEXP _markovchain_createSequenceMatrix(SEXP stringcharSEXP, SEXP toRowProbsSEXP, SEXP sanitizeSEXP, SEXP possibleStatesSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< SEXP >::type stringchar(stringcharSEXP); Rcpp::traits::input_parameter< bool >::type toRowProbs(toRowProbsSEXP); Rcpp::traits::input_parameter< bool >::type sanitize(sanitizeSEXP); Rcpp::traits::input_parameter< CharacterVector >::type possibleStates(possibleStatesSEXP); rcpp_result_gen = Rcpp::wrap(createSequenceMatrix(stringchar, toRowProbs, sanitize, possibleStates)); return rcpp_result_gen; END_RCPP } // mcListFitForList List mcListFitForList(List data); RcppExport SEXP _markovchain_mcListFitForList(SEXP dataSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< List >::type data(dataSEXP); rcpp_result_gen = Rcpp::wrap(mcListFitForList(data)); return rcpp_result_gen; END_RCPP } // _matr2Mc S4 _matr2Mc(CharacterMatrix matrData, double laplacian, bool sanitize, CharacterVector possibleStates); RcppExport SEXP _markovchain__matr2Mc(SEXP matrDataSEXP, SEXP laplacianSEXP, SEXP sanitizeSEXP, SEXP possibleStatesSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< CharacterMatrix >::type matrData(matrDataSEXP); Rcpp::traits::input_parameter< double >::type laplacian(laplacianSEXP); Rcpp::traits::input_parameter< bool >::type sanitize(sanitizeSEXP); Rcpp::traits::input_parameter< CharacterVector >::type possibleStates(possibleStatesSEXP); rcpp_result_gen = Rcpp::wrap(_matr2Mc(matrData, laplacian, sanitize, possibleStates)); return rcpp_result_gen; END_RCPP } // _list2Mc S4 _list2Mc(List data, double laplacian, bool sanitize); RcppExport SEXP _markovchain__list2Mc(SEXP dataSEXP, SEXP laplacianSEXP, SEXP sanitizeSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< List >::type data(dataSEXP); Rcpp::traits::input_parameter< double >::type laplacian(laplacianSEXP); Rcpp::traits::input_parameter< bool >::type sanitize(sanitizeSEXP); rcpp_result_gen = Rcpp::wrap(_list2Mc(data, laplacian, sanitize)); return rcpp_result_gen; END_RCPP } // inferHyperparam List inferHyperparam(NumericMatrix transMatr, NumericVector scale, CharacterVector data); RcppExport SEXP _markovchain_inferHyperparam(SEXP transMatrSEXP, SEXP scaleSEXP, SEXP dataSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< NumericMatrix >::type transMatr(transMatrSEXP); Rcpp::traits::input_parameter< NumericVector >::type scale(scaleSEXP); Rcpp::traits::input_parameter< CharacterVector >::type data(dataSEXP); rcpp_result_gen = Rcpp::wrap(inferHyperparam(transMatr, scale, data)); return rcpp_result_gen; END_RCPP } // markovchainFit List markovchainFit(SEXP data, String method, bool byrow, int nboot, double laplacian, String name, bool parallel, double confidencelevel, bool confint, NumericMatrix hyperparam, bool sanitize, CharacterVector possibleStates); RcppExport SEXP _markovchain_markovchainFit(SEXP dataSEXP, SEXP methodSEXP, SEXP byrowSEXP, SEXP nbootSEXP, SEXP laplacianSEXP, SEXP nameSEXP, SEXP parallelSEXP, SEXP confidencelevelSEXP, SEXP confintSEXP, SEXP hyperparamSEXP, SEXP sanitizeSEXP, SEXP possibleStatesSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< SEXP >::type data(dataSEXP); Rcpp::traits::input_parameter< String >::type method(methodSEXP); Rcpp::traits::input_parameter< bool >::type byrow(byrowSEXP); Rcpp::traits::input_parameter< int >::type nboot(nbootSEXP); Rcpp::traits::input_parameter< double >::type laplacian(laplacianSEXP); Rcpp::traits::input_parameter< String >::type name(nameSEXP); Rcpp::traits::input_parameter< bool >::type parallel(parallelSEXP); Rcpp::traits::input_parameter< double >::type confidencelevel(confidencelevelSEXP); Rcpp::traits::input_parameter< bool >::type confint(confintSEXP); Rcpp::traits::input_parameter< NumericMatrix >::type hyperparam(hyperparamSEXP); Rcpp::traits::input_parameter< bool >::type sanitize(sanitizeSEXP); Rcpp::traits::input_parameter< CharacterVector >::type possibleStates(possibleStatesSEXP); rcpp_result_gen = Rcpp::wrap(markovchainFit(data, method, byrow, nboot, laplacian, name, parallel, confidencelevel, confint, hyperparam, sanitize, possibleStates)); return rcpp_result_gen; END_RCPP } // noofVisitsDistRCpp NumericVector noofVisitsDistRCpp(NumericMatrix matrix, int i, int N); RcppExport SEXP _markovchain_noofVisitsDistRCpp(SEXP matrixSEXP, SEXP iSEXP, SEXP NSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< NumericMatrix >::type matrix(matrixSEXP); Rcpp::traits::input_parameter< int >::type i(iSEXP); Rcpp::traits::input_parameter< int >::type N(NSEXP); rcpp_result_gen = Rcpp::wrap(noofVisitsDistRCpp(matrix, i, N)); return rcpp_result_gen; END_RCPP } // multinomialCIForRow NumericMatrix multinomialCIForRow(NumericVector x, double confidencelevel); RcppExport SEXP _markovchain_multinomialCIForRow(SEXP xSEXP, SEXP confidencelevelSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< NumericVector >::type x(xSEXP); Rcpp::traits::input_parameter< double >::type confidencelevel(confidencelevelSEXP); rcpp_result_gen = Rcpp::wrap(multinomialCIForRow(x, confidencelevel)); return rcpp_result_gen; END_RCPP } // multinomCI List multinomCI(NumericMatrix transMat, NumericMatrix seqMat, double confidencelevel); RcppExport SEXP _markovchain_multinomCI(SEXP transMatSEXP, SEXP seqMatSEXP, SEXP confidencelevelSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< NumericMatrix >::type transMat(transMatSEXP); Rcpp::traits::input_parameter< NumericMatrix >::type seqMat(seqMatSEXP); Rcpp::traits::input_parameter< double >::type confidencelevel(confidencelevelSEXP); rcpp_result_gen = Rcpp::wrap(multinomCI(transMat, seqMat, confidencelevel)); return rcpp_result_gen; END_RCPP } // commClassesKernel List commClassesKernel(NumericMatrix P); RcppExport SEXP _markovchain_commClassesKernel(SEXP PSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< NumericMatrix >::type P(PSEXP); rcpp_result_gen = Rcpp::wrap(commClassesKernel(P)); return rcpp_result_gen; END_RCPP } // communicatingClasses List communicatingClasses(S4 object); RcppExport SEXP _markovchain_communicatingClasses(SEXP objectSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< S4 >::type object(objectSEXP); rcpp_result_gen = Rcpp::wrap(communicatingClasses(object)); return rcpp_result_gen; END_RCPP } // transientStates CharacterVector transientStates(S4 object); RcppExport SEXP _markovchain_transientStates(SEXP objectSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< S4 >::type object(objectSEXP); rcpp_result_gen = Rcpp::wrap(transientStates(object)); return rcpp_result_gen; END_RCPP } // recurrentStates CharacterVector recurrentStates(S4 object); RcppExport SEXP _markovchain_recurrentStates(SEXP objectSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< S4 >::type object(objectSEXP); rcpp_result_gen = Rcpp::wrap(recurrentStates(object)); return rcpp_result_gen; END_RCPP } // recurrentClasses List recurrentClasses(S4 object); RcppExport SEXP _markovchain_recurrentClasses(SEXP objectSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< S4 >::type object(objectSEXP); rcpp_result_gen = Rcpp::wrap(recurrentClasses(object)); return rcpp_result_gen; END_RCPP } // transientClasses List transientClasses(S4 object); RcppExport SEXP _markovchain_transientClasses(SEXP objectSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< S4 >::type object(objectSEXP); rcpp_result_gen = Rcpp::wrap(transientClasses(object)); return rcpp_result_gen; END_RCPP } // reachabilityMatrix LogicalMatrix reachabilityMatrix(S4 obj); RcppExport SEXP _markovchain_reachabilityMatrix(SEXP objSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< S4 >::type obj(objSEXP); rcpp_result_gen = Rcpp::wrap(reachabilityMatrix(obj)); return rcpp_result_gen; END_RCPP } // isAccessible bool isAccessible(S4 obj, String from, String to); RcppExport SEXP _markovchain_isAccessible(SEXP objSEXP, SEXP fromSEXP, SEXP toSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< S4 >::type obj(objSEXP); Rcpp::traits::input_parameter< String >::type from(fromSEXP); Rcpp::traits::input_parameter< String >::type to(toSEXP); rcpp_result_gen = Rcpp::wrap(isAccessible(obj, from, to)); return rcpp_result_gen; END_RCPP } // summaryKernel List summaryKernel(S4 object); RcppExport SEXP _markovchain_summaryKernel(SEXP objectSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< S4 >::type object(objectSEXP); rcpp_result_gen = Rcpp::wrap(summaryKernel(object)); return rcpp_result_gen; END_RCPP } // firstpassageKernel NumericMatrix firstpassageKernel(NumericMatrix P, int i, int n); RcppExport SEXP _markovchain_firstpassageKernel(SEXP PSEXP, SEXP iSEXP, SEXP nSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< NumericMatrix >::type P(PSEXP); Rcpp::traits::input_parameter< int >::type i(iSEXP); Rcpp::traits::input_parameter< int >::type n(nSEXP); rcpp_result_gen = Rcpp::wrap(firstpassageKernel(P, i, n)); return rcpp_result_gen; END_RCPP } // firstPassageMultipleRCpp NumericVector firstPassageMultipleRCpp(NumericMatrix P, int i, NumericVector setno, int n); RcppExport SEXP _markovchain_firstPassageMultipleRCpp(SEXP PSEXP, SEXP iSEXP, SEXP setnoSEXP, SEXP nSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< NumericMatrix >::type P(PSEXP); Rcpp::traits::input_parameter< int >::type i(iSEXP); Rcpp::traits::input_parameter< NumericVector >::type setno(setnoSEXP); Rcpp::traits::input_parameter< int >::type n(nSEXP); rcpp_result_gen = Rcpp::wrap(firstPassageMultipleRCpp(P, i, setno, n)); return rcpp_result_gen; END_RCPP } // expectedRewardsRCpp NumericVector expectedRewardsRCpp(NumericMatrix matrix, int n, NumericVector rewards); RcppExport SEXP _markovchain_expectedRewardsRCpp(SEXP matrixSEXP, SEXP nSEXP, SEXP rewardsSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< NumericMatrix >::type matrix(matrixSEXP); Rcpp::traits::input_parameter< int >::type n(nSEXP); Rcpp::traits::input_parameter< NumericVector >::type rewards(rewardsSEXP); rcpp_result_gen = Rcpp::wrap(expectedRewardsRCpp(matrix, n, rewards)); return rcpp_result_gen; END_RCPP } // expectedRewardsBeforeHittingARCpp double expectedRewardsBeforeHittingARCpp(NumericMatrix matrix, int s0, NumericVector rewards, int n); RcppExport SEXP _markovchain_expectedRewardsBeforeHittingARCpp(SEXP matrixSEXP, SEXP s0SEXP, SEXP rewardsSEXP, SEXP nSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< NumericMatrix >::type matrix(matrixSEXP); Rcpp::traits::input_parameter< int >::type s0(s0SEXP); Rcpp::traits::input_parameter< NumericVector >::type rewards(rewardsSEXP); Rcpp::traits::input_parameter< int >::type n(nSEXP); rcpp_result_gen = Rcpp::wrap(expectedRewardsBeforeHittingARCpp(matrix, s0, rewards, n)); return rcpp_result_gen; END_RCPP } // gcd int gcd(int a, int b); RcppExport SEXP _markovchain_gcd(SEXP aSEXP, SEXP bSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< int >::type a(aSEXP); Rcpp::traits::input_parameter< int >::type b(bSEXP); rcpp_result_gen = Rcpp::wrap(gcd(a, b)); return rcpp_result_gen; END_RCPP } // period int period(S4 object); RcppExport SEXP _markovchain_period(SEXP objectSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< S4 >::type object(objectSEXP); rcpp_result_gen = Rcpp::wrap(period(object)); return rcpp_result_gen; END_RCPP } // predictiveDistribution double predictiveDistribution(CharacterVector stringchar, CharacterVector newData, NumericMatrix hyperparam); RcppExport SEXP _markovchain_predictiveDistribution(SEXP stringcharSEXP, SEXP newDataSEXP, SEXP hyperparamSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< CharacterVector >::type stringchar(stringcharSEXP); Rcpp::traits::input_parameter< CharacterVector >::type newData(newDataSEXP); Rcpp::traits::input_parameter< NumericMatrix >::type hyperparam(hyperparamSEXP); rcpp_result_gen = Rcpp::wrap(predictiveDistribution(stringchar, newData, hyperparam)); return rcpp_result_gen; END_RCPP } // priorDistribution NumericVector priorDistribution(NumericMatrix transMatr, NumericMatrix hyperparam); RcppExport SEXP _markovchain_priorDistribution(SEXP transMatrSEXP, SEXP hyperparamSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< NumericMatrix >::type transMatr(transMatrSEXP); Rcpp::traits::input_parameter< NumericMatrix >::type hyperparam(hyperparamSEXP); rcpp_result_gen = Rcpp::wrap(priorDistribution(transMatr, hyperparam)); return rcpp_result_gen; END_RCPP } // hittingProbabilities NumericMatrix hittingProbabilities(S4 object); RcppExport SEXP _markovchain_hittingProbabilities(SEXP objectSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< S4 >::type object(objectSEXP); rcpp_result_gen = Rcpp::wrap(hittingProbabilities(object)); return rcpp_result_gen; END_RCPP } // canonicForm S4 canonicForm(S4 obj); RcppExport SEXP _markovchain_canonicForm(SEXP objSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< S4 >::type obj(objSEXP); rcpp_result_gen = Rcpp::wrap(canonicForm(obj)); return rcpp_result_gen; END_RCPP } // steadyStates NumericMatrix steadyStates(S4 obj); RcppExport SEXP _markovchain_steadyStates(SEXP objSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< S4 >::type obj(objSEXP); rcpp_result_gen = Rcpp::wrap(steadyStates(obj)); return rcpp_result_gen; END_RCPP } // absorbingStates CharacterVector absorbingStates(S4 obj); RcppExport SEXP _markovchain_absorbingStates(SEXP objSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< S4 >::type obj(objSEXP); rcpp_result_gen = Rcpp::wrap(absorbingStates(obj)); return rcpp_result_gen; END_RCPP } // isIrreducible bool isIrreducible(S4 obj); RcppExport SEXP _markovchain_isIrreducible(SEXP objSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< S4 >::type obj(objSEXP); rcpp_result_gen = Rcpp::wrap(isIrreducible(obj)); return rcpp_result_gen; END_RCPP } // isRegular bool isRegular(S4 obj); RcppExport SEXP _markovchain_isRegular(SEXP objSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< S4 >::type obj(objSEXP); rcpp_result_gen = Rcpp::wrap(isRegular(obj)); return rcpp_result_gen; END_RCPP } // meanAbsorptionTime NumericVector meanAbsorptionTime(S4 obj); RcppExport SEXP _markovchain_meanAbsorptionTime(SEXP objSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< S4 >::type obj(objSEXP); rcpp_result_gen = Rcpp::wrap(meanAbsorptionTime(obj)); return rcpp_result_gen; END_RCPP } // absorptionProbabilities NumericMatrix absorptionProbabilities(S4 obj); RcppExport SEXP _markovchain_absorptionProbabilities(SEXP objSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< S4 >::type obj(objSEXP); rcpp_result_gen = Rcpp::wrap(absorptionProbabilities(obj)); return rcpp_result_gen; END_RCPP } // meanFirstPassageTime NumericMatrix meanFirstPassageTime(S4 obj, CharacterVector destination); RcppExport SEXP _markovchain_meanFirstPassageTime(SEXP objSEXP, SEXP destinationSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< S4 >::type obj(objSEXP); Rcpp::traits::input_parameter< CharacterVector >::type destination(destinationSEXP); rcpp_result_gen = Rcpp::wrap(meanFirstPassageTime(obj, destination)); return rcpp_result_gen; END_RCPP } // meanRecurrenceTime NumericVector meanRecurrenceTime(S4 obj); RcppExport SEXP _markovchain_meanRecurrenceTime(SEXP objSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< S4 >::type obj(objSEXP); rcpp_result_gen = Rcpp::wrap(meanRecurrenceTime(obj)); return rcpp_result_gen; END_RCPP } // meanNumVisits NumericMatrix meanNumVisits(S4 obj); RcppExport SEXP _markovchain_meanNumVisits(SEXP objSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< S4 >::type obj(objSEXP); rcpp_result_gen = Rcpp::wrap(meanNumVisits(obj)); return rcpp_result_gen; END_RCPP } // isProb bool isProb(double prob); RcppExport SEXP _markovchain_isProb(SEXP probSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< double >::type prob(probSEXP); rcpp_result_gen = Rcpp::wrap(isProb(prob)); return rcpp_result_gen; END_RCPP } // isStochasticMatrix bool isStochasticMatrix(NumericMatrix m, bool byrow); RcppExport SEXP _markovchain_isStochasticMatrix(SEXP mSEXP, SEXP byrowSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< NumericMatrix >::type m(mSEXP); Rcpp::traits::input_parameter< bool >::type byrow(byrowSEXP); rcpp_result_gen = Rcpp::wrap(isStochasticMatrix(m, byrow)); return rcpp_result_gen; END_RCPP } // isProbVector bool isProbVector(NumericVector prob); RcppExport SEXP _markovchain_isProbVector(SEXP probSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< NumericVector >::type prob(probSEXP); rcpp_result_gen = Rcpp::wrap(isProbVector(prob)); return rcpp_result_gen; END_RCPP } // checkIsAccesibleMethod bool checkIsAccesibleMethod(S4 obj); RcppExport SEXP _markovchain_checkIsAccesibleMethod(SEXP objSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< S4 >::type obj(objSEXP); rcpp_result_gen = Rcpp::wrap(checkIsAccesibleMethod(obj)); return rcpp_result_gen; END_RCPP } // approxEqual bool approxEqual(NumericMatrix a, NumericMatrix b); RcppExport SEXP _markovchain_approxEqual(SEXP aSEXP, SEXP bSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< NumericMatrix >::type a(aSEXP); Rcpp::traits::input_parameter< NumericMatrix >::type b(bSEXP); rcpp_result_gen = Rcpp::wrap(approxEqual(a, b)); return rcpp_result_gen; END_RCPP } // isPartition bool isPartition(List commClasses, CharacterVector states); RcppExport SEXP _markovchain_isPartition(SEXP commClassesSEXP, SEXP statesSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< List >::type commClasses(commClassesSEXP); Rcpp::traits::input_parameter< CharacterVector >::type states(statesSEXP); rcpp_result_gen = Rcpp::wrap(isPartition(commClasses, states)); return rcpp_result_gen; END_RCPP } // areHittingProbabilities bool areHittingProbabilities(NumericMatrix probs, NumericMatrix hitting, bool byrow); RcppExport SEXP _markovchain_areHittingProbabilities(SEXP probsSEXP, SEXP hittingSEXP, SEXP byrowSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< NumericMatrix >::type probs(probsSEXP); Rcpp::traits::input_parameter< NumericMatrix >::type hitting(hittingSEXP); Rcpp::traits::input_parameter< bool >::type byrow(byrowSEXP); rcpp_result_gen = Rcpp::wrap(areHittingProbabilities(probs, hitting, byrow)); return rcpp_result_gen; END_RCPP } // areMeanNumVisits bool areMeanNumVisits(NumericMatrix probs, NumericMatrix numVisits, NumericMatrix hitting, bool byrow); RcppExport SEXP _markovchain_areMeanNumVisits(SEXP probsSEXP, SEXP numVisitsSEXP, SEXP hittingSEXP, SEXP byrowSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< NumericMatrix >::type probs(probsSEXP); Rcpp::traits::input_parameter< NumericMatrix >::type numVisits(numVisitsSEXP); Rcpp::traits::input_parameter< NumericMatrix >::type hitting(hittingSEXP); Rcpp::traits::input_parameter< bool >::type byrow(byrowSEXP); rcpp_result_gen = Rcpp::wrap(areMeanNumVisits(probs, numVisits, hitting, byrow)); return rcpp_result_gen; END_RCPP } // recurrentHitting bool recurrentHitting(List recurrentClasses, NumericMatrix hitting, CharacterVector states, bool byrow); RcppExport SEXP _markovchain_recurrentHitting(SEXP recurrentClassesSEXP, SEXP hittingSEXP, SEXP statesSEXP, SEXP byrowSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< List >::type recurrentClasses(recurrentClassesSEXP); Rcpp::traits::input_parameter< NumericMatrix >::type hitting(hittingSEXP); Rcpp::traits::input_parameter< CharacterVector >::type states(statesSEXP); Rcpp::traits::input_parameter< bool >::type byrow(byrowSEXP); rcpp_result_gen = Rcpp::wrap(recurrentHitting(recurrentClasses, hitting, states, byrow)); return rcpp_result_gen; END_RCPP } // hittingProbsAreOne bool hittingProbsAreOne(NumericMatrix matrix); RcppExport SEXP _markovchain_hittingProbsAreOne(SEXP matrixSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< NumericMatrix >::type matrix(matrixSEXP); rcpp_result_gen = Rcpp::wrap(hittingProbsAreOne(matrix)); return rcpp_result_gen; END_RCPP } // absorbingAreRecurrentClass bool absorbingAreRecurrentClass(CharacterVector absorbingStates, List recurrentClasses); RcppExport SEXP _markovchain_absorbingAreRecurrentClass(SEXP absorbingStatesSEXP, SEXP recurrentClassesSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< CharacterVector >::type absorbingStates(absorbingStatesSEXP); Rcpp::traits::input_parameter< List >::type recurrentClasses(recurrentClassesSEXP); rcpp_result_gen = Rcpp::wrap(absorbingAreRecurrentClass(absorbingStates, recurrentClasses)); return rcpp_result_gen; END_RCPP } static const R_CallMethodDef CallEntries[] = { {"_markovchain_isGen", (DL_FUNC) &_markovchain_isGen, 1}, {"_markovchain_generatorToTransitionMatrix", (DL_FUNC) &_markovchain_generatorToTransitionMatrix, 2}, {"_markovchain_ctmcFit", (DL_FUNC) &_markovchain_ctmcFit, 4}, {"_markovchain_ExpectedTimeRcpp", (DL_FUNC) &_markovchain_ExpectedTimeRcpp, 2}, {"_markovchain_probabilityatTRCpp", (DL_FUNC) &_markovchain_probabilityatTRCpp, 1}, {"_markovchain_impreciseProbabilityatTRCpp", (DL_FUNC) &_markovchain_impreciseProbabilityatTRCpp, 5}, {"_markovchain_seq2freqProb", (DL_FUNC) &_markovchain_seq2freqProb, 1}, {"_markovchain_seq2matHigh", (DL_FUNC) &_markovchain_seq2matHigh, 2}, {"_markovchain_markovchainSequenceRcpp", (DL_FUNC) &_markovchain_markovchainSequenceRcpp, 4}, {"_markovchain_markovchainListRcpp", (DL_FUNC) &_markovchain_markovchainListRcpp, 4}, {"_markovchain_markovchainSequenceParallelRcpp", (DL_FUNC) &_markovchain_markovchainSequenceParallelRcpp, 4}, {"_markovchain_createSequenceMatrix", (DL_FUNC) &_markovchain_createSequenceMatrix, 4}, {"_markovchain_mcListFitForList", (DL_FUNC) &_markovchain_mcListFitForList, 1}, {"_markovchain__matr2Mc", (DL_FUNC) &_markovchain__matr2Mc, 4}, {"_markovchain__list2Mc", (DL_FUNC) &_markovchain__list2Mc, 3}, {"_markovchain_inferHyperparam", (DL_FUNC) &_markovchain_inferHyperparam, 3}, {"_markovchain_markovchainFit", (DL_FUNC) &_markovchain_markovchainFit, 12}, {"_markovchain_noofVisitsDistRCpp", (DL_FUNC) &_markovchain_noofVisitsDistRCpp, 3}, {"_markovchain_multinomialCIForRow", (DL_FUNC) &_markovchain_multinomialCIForRow, 2}, {"_markovchain_multinomCI", (DL_FUNC) &_markovchain_multinomCI, 3}, {"_markovchain_commClassesKernel", (DL_FUNC) &_markovchain_commClassesKernel, 1}, {"_markovchain_communicatingClasses", (DL_FUNC) &_markovchain_communicatingClasses, 1}, {"_markovchain_transientStates", (DL_FUNC) &_markovchain_transientStates, 1}, {"_markovchain_recurrentStates", (DL_FUNC) &_markovchain_recurrentStates, 1}, {"_markovchain_recurrentClasses", (DL_FUNC) &_markovchain_recurrentClasses, 1}, {"_markovchain_transientClasses", (DL_FUNC) &_markovchain_transientClasses, 1}, {"_markovchain_reachabilityMatrix", (DL_FUNC) &_markovchain_reachabilityMatrix, 1}, {"_markovchain_isAccessible", (DL_FUNC) &_markovchain_isAccessible, 3}, {"_markovchain_summaryKernel", (DL_FUNC) &_markovchain_summaryKernel, 1}, {"_markovchain_firstpassageKernel", (DL_FUNC) &_markovchain_firstpassageKernel, 3}, {"_markovchain_firstPassageMultipleRCpp", (DL_FUNC) &_markovchain_firstPassageMultipleRCpp, 4}, {"_markovchain_expectedRewardsRCpp", (DL_FUNC) &_markovchain_expectedRewardsRCpp, 3}, {"_markovchain_expectedRewardsBeforeHittingARCpp", (DL_FUNC) &_markovchain_expectedRewardsBeforeHittingARCpp, 4}, {"_markovchain_gcd", (DL_FUNC) &_markovchain_gcd, 2}, {"_markovchain_period", (DL_FUNC) &_markovchain_period, 1}, {"_markovchain_predictiveDistribution", (DL_FUNC) &_markovchain_predictiveDistribution, 3}, {"_markovchain_priorDistribution", (DL_FUNC) &_markovchain_priorDistribution, 2}, {"_markovchain_hittingProbabilities", (DL_FUNC) &_markovchain_hittingProbabilities, 1}, {"_markovchain_canonicForm", (DL_FUNC) &_markovchain_canonicForm, 1}, {"_markovchain_steadyStates", (DL_FUNC) &_markovchain_steadyStates, 1}, {"_markovchain_absorbingStates", (DL_FUNC) &_markovchain_absorbingStates, 1}, {"_markovchain_isIrreducible", (DL_FUNC) &_markovchain_isIrreducible, 1}, {"_markovchain_isRegular", (DL_FUNC) &_markovchain_isRegular, 1}, {"_markovchain_meanAbsorptionTime", (DL_FUNC) &_markovchain_meanAbsorptionTime, 1}, {"_markovchain_absorptionProbabilities", (DL_FUNC) &_markovchain_absorptionProbabilities, 1}, {"_markovchain_meanFirstPassageTime", (DL_FUNC) &_markovchain_meanFirstPassageTime, 2}, {"_markovchain_meanRecurrenceTime", (DL_FUNC) &_markovchain_meanRecurrenceTime, 1}, {"_markovchain_meanNumVisits", (DL_FUNC) &_markovchain_meanNumVisits, 1}, {"_markovchain_isProb", (DL_FUNC) &_markovchain_isProb, 1}, {"_markovchain_isStochasticMatrix", (DL_FUNC) &_markovchain_isStochasticMatrix, 2}, {"_markovchain_isProbVector", (DL_FUNC) &_markovchain_isProbVector, 1}, {"_markovchain_checkIsAccesibleMethod", (DL_FUNC) &_markovchain_checkIsAccesibleMethod, 1}, {"_markovchain_approxEqual", (DL_FUNC) &_markovchain_approxEqual, 2}, {"_markovchain_isPartition", (DL_FUNC) &_markovchain_isPartition, 2}, {"_markovchain_areHittingProbabilities", (DL_FUNC) &_markovchain_areHittingProbabilities, 3}, {"_markovchain_areMeanNumVisits", (DL_FUNC) &_markovchain_areMeanNumVisits, 4}, {"_markovchain_recurrentHitting", (DL_FUNC) &_markovchain_recurrentHitting, 4}, {"_markovchain_hittingProbsAreOne", (DL_FUNC) &_markovchain_hittingProbsAreOne, 1}, {"_markovchain_absorbingAreRecurrentClass", (DL_FUNC) &_markovchain_absorbingAreRecurrentClass, 2}, {NULL, NULL, 0} }; RcppExport void R_init_markovchain(DllInfo *dll) { R_registerRoutines(dll, NULL, CallEntries, NULL, NULL); R_useDynamicSymbols(dll, FALSE); } markovchain/vignettes/0000755000176200001440000000000014050513461014563 5ustar liggesusersmarkovchain/vignettes/higher_order_markov_chains.Rmd0000644000176200001440000003025313762012754022607 0ustar liggesusers--- title: plain: "Higher, possibly multivariate, Order Markov Chains in markovchain package" formatted: "Higher, possibly multivariate, Order Markov Chains in \\pkg{markovchain} package" short: "Higher order (multivariate) Markov chains" pagetitle: "Higher order (multivariate) Markov chains" author: - name: "Deepak Yadav" affiliation: B-Tech student, Computer Science and Engineering address: > Indian Institute of Technology, Varanasi Uttar Pradesh - 221 005, India email: \email{deepakyadav.iitbhu@gmail.com} - name: "Tae Seung Kang" affiliation: Ph.D student, Computer \& Information Science \& Engineering address: > University of Florida Gainesville, FL, USA email: \email{tskang3@gmail.com} - name: "Giorgio Alfredo Spedicato" affiliation: Ph.D C.Stat ACAS, UnipolSai R\&D address: > Via Firenze 11 Paderno Dugnano 20037 Italy email: \email{spedicato\_giorgio@yahoo.it} preamble: > \author{Deepak Yadav, Tae Seung Kang, Giorgio Alfredo Spedicato} \usepackage{graphicx} \usepackage{amsmath} \usepackage{tabularx} \usepackage{longtable} \usepackage{booktabs} \setkeys{Gin}{width=0.8\textwidth} abstract: | The \pkg{markovchain} package contains functions to fit higher (possibly) multivariate order Markov chains. The functions are shown as well as simple exmaples output: if (rmarkdown::pandoc_version() < 2.2) function(...) { rmarkdown::pdf_document(template = "./template.tex", ...) } else function(...) { bookdown::pdf_book(base_format = rticles::jss_article, ...) } vignette: > %\VignetteIndexEntry{Higher order markov chains} %\VignetteEngine{knitr::rmarkdown} %VignetteEncoding{UTF-8} keywords: plain: [Higher order Markov chains] formatted: [Higher order Markov chains] documentclass: jss classoption: nojss bibliography: markovchainBiblio.bib pkgdown: as_is: true extension: pdf --- ```{r global_options, include=FALSE} knitr::opts_chunk$set(fig.width=8.5, fig.height=6, out.width = "70%") set.seed(123) ``` # Higher Order Markov Chains Continuous time Markov chains are discussed in the CTMC vignette which is a part of the package. An experimental `fitHigherOrder` function has been written in order to fit a higher order Markov chain ([@ching2013higher, @ching2008higher]). `fitHigherOrder` takes two inputs 1. sequence: a categorical data sequence. 2. order: order of Markov chain to fit with default value 2. The output will be a `list` which consists of 1. lambda: model parameter(s). 2. Q: a list of transition matrices. $Q_i$ is the $ith$ step transition matrix stored column-wise. 3. X: frequency probability vector of the given sequence. Its quadratic programming problem is solved using `solnp` function of the Rsolnp package [@pkg:Rsolnp]. ```{r load, results='hide', warning=FALSE, message=FALSE} require(markovchain) library(Rsolnp) ``` ```{r higherOrder} data(rain) fitHigherOrder(rain$rain, 2) fitHigherOrder(rain$rain, 3) ``` # Higher Order Multivariate Markov Chains ## Introduction HOMMC model is used for modeling behaviour of multiple categorical sequences generated by similar sources. The main reference is [@ching2008higher]. Assume that there are s categorical sequences and each has possible states in M. In nth order MMC the state probability distribution of the jth sequence at time $t = r + 1$ depend on the state probability distribution of all the sequences (including itself) at times $t = r, r - 1, ..., r - n + 1$. \[ x_{r+1}^{(j)} = \sum_{k=1}^{s}\sum_{h=1}^{n}\lambda_{jk}^{(h)}P_{h}^{(jk)}x_{r-h+1}^{(k)}, j = 1, 2, ..., s, r = n-1, n, ... \] with initial distribution $x_{0}^{(k)}, x_{1}^{(k)}, ... , x_{n-1}^{(k)} (k = 1, 2, ... , s)$. Here \[ \lambda _{jk}^{(h)} \geq 0, 1\leq j, k\leq s, 1\leq h\leq n \enspace and \enspace \sum_{k=1}^{s}\sum_{h=1}^{n} \lambda_{jk}^{(h)} = 1, j = 1, 2, 3, ... , s. \] Now we will see the simpler representation of the model which will help us understand the result of `fitHighOrderMultivarMC` method. \vspace{5mm} Let $X_{r}^{(j)} = ((x_{r}^{(j)})^{T}, (x_{r-1}^{(j)})^{T}, ..., (x_{r-n+1}^{(j)})^{T})^{T} for \enspace j = 1, 2, 3, ... , s.$ Then \vspace{5mm} \[ \begin{pmatrix} X_{r+1}^{(1)}\\ X_{r+1}^{(2)}\\ .\\ .\\ .\\ X_{r+1}^{(s)} \end{pmatrix} = \begin{pmatrix} B^{11}& B^{12}& .& .& B^{1s}& \\ B^{21}& B^{22}& .& .& B^{2s}& \\ .& .& .& .& .& \\ .& .& .& .& .& \\ .& .& .& .& .& \\ B^{s1}& B^{s2}& .& .& B^{ss}& \\ \end{pmatrix} \begin{pmatrix} X_{r}^{(1)}\\ X_{r}^{(2)}\\ .\\ .\\ .\\ X_{r}^{(s)} \end{pmatrix} \textrm{where} \] \[B^{ii} = \begin{pmatrix} \lambda _{ii}^{(1)}P_{1}^{(ii)}& \lambda _{ii}^{(2)}P_{2}^{(ii)}& .& .& \lambda _{ii}^{(n)}P_{n}^{(ii)}& \\ I& 0& .& .& 0& \\ 0& I& .& .& 0& \\ .& .& .& .& .& \\ .& .& .& .& .& \\ 0& .& .& I& 0& \end{pmatrix}_{mn*mn} \textrm{and} \] \vspace{5mm} \[ B^{ij} = \begin{pmatrix} \lambda _{ij}^{(1)}P_{1}^{(ij)}& \lambda _{ij}^{(2)}P_{2}^{(ij)}& .& .& \lambda _{ij}^{(n)}P_{n}^{(ij)}& \\ 0& 0& .& .& 0& \\ 0& 0& .& .& 0& \\ .& .& .& .& .& \\ .& .& .& .& .& \\ 0& .& .& 0& 0& \end{pmatrix}_{mn*mn} \textrm{when } i\neq j. \] \vspace{5mm} ## Representation of parameters in the code $P_{h}^{(ij)}$ is represented as $Ph(i,j)$ and $\lambda _{ij}^{(h)}$ as Lambdah(i,j). For example: $P_{2}^{(13)}$ as $P2(1,3)$ and $\lambda _{45}^{(3)}$ as Lambda3(4,5). ## Definition of HOMMC class ```{r hommcObject} showClass("hommc") ``` Any element of `hommc` class is comprised by following slots: 1. states: a character vector, listing the states for which transition probabilities are defined. 2. byrow: a logical element, indicating whether transition probabilities are shown by row or by column. 3. order: order of Multivariate Markov chain. 4. P: an array of all transition matrices. 5. Lambda: a vector to store the weightage of each transition matrix. 6. name: optional character element to name the HOMMC ## How to create an object of class HOMMC ```{r hommcCreate} states <- c('a', 'b') P <- array(dim = c(2, 2, 4), dimnames = list(states, states)) P[ , , 1] <- matrix(c(1/3, 2/3, 1, 0), byrow = FALSE, nrow = 2, ncol = 2) P[ , , 2] <- matrix(c(0, 1, 1, 0), byrow = FALSE, nrow = 2, ncol = 2) P[ , , 3] <- matrix(c(2/3, 1/3, 0, 1), byrow = FALSE, nrow = 2, ncol = 2) P[ , , 4] <- matrix(c(1/2, 1/2, 1/2, 1/2), byrow = FALSE, nrow = 2, ncol = 2) Lambda <- c(.8, .2, .3, .7) hob <- new("hommc", order = 1, Lambda = Lambda, P = P, states = states, byrow = FALSE, name = "FOMMC") hob ``` ## Fit HOMMC `fitHighOrderMultivarMC` method is available to fit HOMMC. Below are the 3 parameters of this method. 1. seqMat: a character matrix or a data frame, each column represents a categorical sequence. 2. order: order of Multivariate Markov chain. Default is 2. 3. Norm: Norm to be used. Default is 2. # A Marketing Example We tried to replicate the example found in [@ching2008higher] for an application of HOMMC. A soft-drink company in Hong Kong is facing an in-house problem of production planning and inventory control. A pressing issue is the storage space of its central warehouse, which often finds itself in the state of overflow or near capacity. The company is thus in urgent needs to study the interplay between the storage space requirement and the overall growing sales demand. The product can be classified into six possible states (1, 2, 3, 4, 5, 6) according to their sales volumes. All products are labeled as 1 = no sales volume, 2 = very slow-moving (very low sales volume), 3 = slow-moving, 4 = standard, 5 = fast-moving or 6 = very fast-moving (very high sales volume). Such labels are useful from both marketing and production planning points of view. The data is cointaind in `sales` object. ```{r hommsales} data(sales) head(sales) ``` The company would also like to predict sales demand for an important customer in order to minimize its inventory build-up. More importantly, the company can understand the sales pattern of this customer and then develop a marketing strategy to deal with this customer. Customer's sales demand sequences of five important products of the company for a year. We expect sales demand sequences generated by the same customer to be correlated to each other. Therefore by exploring these relationships, one can obtain a better higher-order multivariate Markov model for such demand sequences, hence obtain better prediction rules. In [@ching2008higher] application, they choose the order arbitrarily to be eight, i.e., n = 8. We first estimate all the transition probability matrices $P_{h}^{ij}$ and we also have the estimates of the stationary probability distributions of the five products:. $\widehat{\boldsymbol{x}}^{(1)} = \begin{pmatrix} 0.0818& 0.4052& 0.0483& 0.0335& 0.0037& 0.4275 \end{pmatrix}^{\boldsymbol{T}}$ $\widehat{\boldsymbol{x}}^{(2)} = \begin{pmatrix} 0.3680& 0.1970& 0.0335& 0.0000& 0.0037& 0.3978 \end{pmatrix}^{\boldsymbol{T}}$ $\widehat{\boldsymbol{x}}^{(3)} = \begin{pmatrix} 0.1450& 0.2045& 0.0186& 0.0000& 0.0037& 0.6283 \end{pmatrix}^{\boldsymbol{T}}$ $\widehat{\boldsymbol{x}}^{(4)} = \begin{pmatrix} 0.0000& 0.3569& 0.1338& 0.1896& 0.0632& 0.2565 \end{pmatrix}^{\boldsymbol{T}}$ $\widehat{\boldsymbol{x}}^{(5)} = \begin{pmatrix} 0.0000& 0.3569& 0.1227& 0.2268& 0.0520& 0.2416 \end{pmatrix}^{\boldsymbol{T}}$ By solving the corresponding linear programming problems, we obtain the following higher-order multivariate Markov chain model: \vspace{3mm} $\boldsymbol{x}_{r+1}^{(1)} = \boldsymbol{P}_{1}^{(12)}\boldsymbol{x}_{r}^{(2)}$ $\boldsymbol{x}_{r+1}^{(2)} = 0.6364\boldsymbol{P}_{1}^{(22)}\boldsymbol{x}_{r}^{(2)} + 0.3636\boldsymbol{P}_{3}^{(22)}\boldsymbol{x}_{r}^{(2)}$ $\boldsymbol{x}_{r+1}^{(3)} = \boldsymbol{P}_{1}^{(35)}\boldsymbol{x}_{r}^{(5)}$ $\boldsymbol{x}_{r+1}^{(4)} = 0.2994\boldsymbol{P}_{8}^{(42)}\boldsymbol{x}_{r}^{(2)} + 0.4324\boldsymbol{P}_{1}^{(45)}\boldsymbol{x}_{r}^{(5)} + 0.2681\boldsymbol{P}_{2}^{(45)}\boldsymbol{x}_{r}^{(5)}$ $\boldsymbol{x}_{r+1}^{(5)} = 0.2718\boldsymbol{P}_{8}^{(52)}\boldsymbol{x}_{r}^{(2)} + 0.6738\boldsymbol{P}_{1}^{(54)}\boldsymbol{x}_{r}^{(4)} + 0.0544\boldsymbol{P}_{2}^{(55)}\boldsymbol{x}_{r}^{(5)}$ \vspace{3mm} According to the constructed 8th order multivariate Markov model, Products A and B are closely related. In particular, the sales demand of Product A depends strongly on Product B. The main reason is that the chemical nature of Products A and B is the same, but they have different packaging for marketing purposes. Moreover, Products B, C, D and E are closely related. Similarly, products C and E have the same product flavor, but different packaging. In this model, it is interesting to note that both Product D and E quite depend on Product B at order of 8, this relationship is hardly to be obtained in conventional Markov model owing to huge amount of parameters. The results show that higher-order multivariate Markov model is quite significant to analyze the relationship of sales demand. ```{r hommcFit, warning = FALSE, message = FALSE} # fit 8th order multivariate markov chain object <- fitHighOrderMultivarMC(sales, order = 8, Norm = 2) ``` We choose to show only results shown in the paper. We see that $\lambda$ values are quite close, but not equal, to those shown in the original paper. ```{r result, echo = FALSE} i <- c(1, 2, 2, 3, 4, 4, 4, 5, 5, 5) j <- c(2, 2, 2, 5, 2, 5, 5, 2, 4, 5) k <- c(1, 1, 3, 1, 8, 1, 2, 8, 1, 2) if(object@byrow == TRUE) { direction <- "(by rows)" } else { direction <- "(by cols)" } cat("Order of multivariate markov chain =", object@order, "\n") cat("states =", object@states, "\n") cat("\n") cat("List of Lambda's and the corresponding transition matrix", direction,":\n") for(p in 1:10) { t <- 8*5*(i[p]-1) + (j[p]-1)*8 cat("Lambda", k[p], "(", i[p], ",", j[p], ") : ", object@Lambda[t+k[p]],"\n", sep = "") cat("P", k[p], "(", i[p], ",", j[p], ") : \n", sep = "") print(object@P[, , t+k[p]]) cat("\n") } ``` # References markovchain/vignettes/markovchainBiblio.bib0000644000176200001440000004703413762012754020704 0ustar liggesusers% Encoding: UTF-8 % Formatted with emacs using C-c C-q in BibTeX mode @book{dobrow2016introduction, title = {Introduction to Stochastic Processes with R}, author = {Dobrow, Robert P}, year = {2016}, publisher = {John Wiley \& Sons} } @manual{pkg:ctmcd, title = {ctmcd: Estimating the Parameters of a Continuous-Time Markov Chain from Discrete-Time Data}, author = {Marius Pfeuffer}, year = {2017}, note = {R package version 1.1}, url = {https://CRAN.R-project.org/package=ctmcd} } @article{Hu2002, author = {Hu, Yen Ting and Kiesel, Rudiger and Perraudin, William}, doi = {10.1016/S0378-4266(02)00268-6}, isbn = {0378-4266}, issn = {03784266}, journal = {Journal of Banking and Finance}, keywords = {Credit rating,Credit risk,Ordered probit,Sovereign default}, number = {7}, pages = {1383--1406}, title = {{The estimation of transition matrices for sovereign credit ratings}}, volume = {26}, year = {2002} } @book{de2016assicurazioni, title = {Assicurazioni sulla salute: caratteristiche, modelli attuariali e basi tecniche}, author = {{De Angelis, Paolo} and {Di Falco, L.}}, isbn = {9788815260840}, series = {Il Mulino}, url = {https://books.google.it/books?id=D56bjgEACAAJ}, year = {2016}, publisher = {Il Mulino} } @mastersthesis{skuriat2005statistical, author = "Skuriat-Olechnowska, Monika", title = "Statistical inference and hypothesis testing for Markov chains with Interval Censoring", school = "TUDelft", year = "2005" } @article{kullback1962tests, title = {Tests for Contingency Tables and Marltov Chains}, author = {Kullback, S and Kupperman, Michael and Ku, HH}, journal = {Technometrics}, volume = {4}, number = {4}, pages = {573--608}, year = {1962}, publisher = {Taylor \& Francis} } @article{israel2001finding, title = {Finding generators for Markov chains via empirical transition matrices, with applications to credit ratings}, author = {Israel, Robert B and Rosenthal, Jeffrey S and Wei, Jason Z}, journal = {Mathematical finance}, volume = {11}, number = {2}, pages = {245--265}, year = {2001}, publisher = {Wiley Online Library} } @manual{pkg:RcppParallel, title = {RcppParallel: Parallel Programming Tools for 'Rcpp'}, author = {JJ Allaire and Romain Francois and Kevin Ushey and Gregory Vandenbrouck and Marcus Geelnard and {Intel}}, year = {2016}, note = {R package version 4.3.19}, url = {http://rcppcore.github.io/RcppParallel/} } @manual{pkg:MultinomialCI, title = {MultinomialCI: Simultaneous confidence intervals for multinomial proportions according to the method by Sison and Glaz}, author = {Pablo J. Villacorta}, year = {2012}, note = {R package version 1.0}, url = {http://CRAN.R-project.org/package=MultinomialCI} } @article{sison1995simultaneous, title = {Simultaneous confidence intervals and sample size determination for multinomial proportions}, author = {Sison, Cristina P and Glaz, Joseph}, journal = {Journal of the American Statistical Association}, volume = {90}, number = {429}, pages = {366--369}, year = {1995}, publisher = {Taylor \& Francis} } @article{konstantopoulos2009markov, title = {Markov Chains and Random Walks}, author = {Konstantopoulos, Takis}, journal = {Lecture notes}, year = {2009} } @article{craigSendi, author = {{B. A. Craig} and {A. A. Sendi}}, title = {Estimation of the Transition Matrix of a Discrete-Time Markov Chain}, journal = {Health Economics}, year = {2002}, volume = {11}, pages = {33--42} } @misc{bardPpt, author = {J. F. Bard}, title = {Lecture 12.5 - Additional Issues Concerning Discrete-Time Markov Chains}, month = {April}, year = {2000}, url ={http://www.me.utexas.edu/~jensen%20/ORMM/instruction/powerpoint/or_models_09/12.5_dtmc2.ppt} } @incollection{bremaud1999discrete, author = {Br{\'e}maud, Pierre}, title = {Discrete-Time Markov Models}, booktitle = {Markov Chains}, publisher = {Springer}, year = {1999}, pages = {53--93} } @book{chambers, title = {Software for Data Analysis: Programming with \proglang{R}}, publisher = {Springer-Verlag}, year = {2008}, author = {Chambers, J.M.}, series = {Statistics and computing}, isbn = {9780387759357}, lccn = {2008922937} } @book{ching2006markov, title = {Markov Chains: Models, Algorithms and Applications}, publisher = {Springer-Verlag}, year = {2006}, author = {Ching, W.K. and Ng, M.K.}, series = {International Series in Operations Research \& Management Science}, isbn = {9780387293356}, lccn = {2005933263} } @article{pkg:igraph, author = {Gabor Csardi and Tamas Nepusz}, title = {The \pkg{igraph} Software Package for Complex Network Research}, journal = {InterJournal}, year = {2006}, volume = {Complex Systems}, pages = {1695}, url = {http://igraph.sf.net} } @book{denuit2007actuarial, title = {Actuarial modelling of claim counts: Risk classification, credibility and bonus-malus systems}, publisher = {Wiley}, year = {2007}, author = {Denuit, Michel and Mar{\'e}chal, Xavier and Pitrebois, Sandra and Walhin, Jean-Fran{\c{c}}ois} } @book{deshmukh2012multiple, title = {Multiple Decrement Models in Insurance: An Introduction Using \proglang{R}}, publisher = {Springer-Verlag}, year = {2012}, author = {Deshmukh, S.R.}, series = {SpringerLink : B{\"u}cher}, isbn = {9788132206590}, lccn = {2012942476} } @book{RcppR, title = {Seamless \proglang{R} and \proglang{C++} Integration with \pkg{Rcpp}}, publisher = {Springer-Verlag}, year = {2013}, author = {Dirk Eddelbuettel}, address = {New York}, note = {ISBN 978-1-4614-6867-7} } @misc{renaldoMatlab, author = {Renaldo Feres}, title = {Notes for Math 450 \proglang{MATLAB} Listings for Markov Chains}, year = {2007}, url = {http://www.math.wustl.edu/~feres/Math450Lect04.pdf} } @manual{mcmcR, title = {mcmc: Markov Chain Monte Carlo}, author = {Charles J. Geyer and Leif T. Johnson}, year = {2013}, note = {\proglang{R} package version 0.9-2}, url = {http://CRAN.R-project.org/package=mcmc} } @incollection{glassHall, author = {Glass, D.V. and Hall, J. R.}, title = {Social Mobility in Great Britain: A Study in Intergenerational Change in Status}, booktitle = {Social Mobility in Great Britain}, publisher = {Routledge and Kegan Paul}, year = {1954} } @manual{expmR, title = {\pkg{expm}: Matrix Exponential}, author = {Vincent Goulet and Christophe Dutang and Martin Maechler and David Firth and Marina Shapira and Michael Stadelmann and {expm-developers@lists.R-forge.R-project.org}}, year = {2013}, note = {R package version 0.99-1}, url = {http://CRAN.R-project.org/package=expm} } @manual{hmmR, title = {\pkg{HMM}: HMM - Hidden Markov Models}, author = {Scientific Software Development - Dr. Lin Himmelmann and {www.linhi.com}}, year = {2010}, note = {\proglang{R} package version 1.0}, url = {http://CRAN.R-project.org/package=HMM} } @book{landOfOz, title = {Introduction to Finite Mathematics}, publisher = {Prentice Hall}, year = {1974}, author = {{J. G. Kemeny} and {J. L.Snell} and {G. L. Thompson}} } @article{msmR, author = {Christopher H. Jackson}, title = {Multi-State Models for Panel Data: The \pkg{msm} Package for \proglang{R}}, journal = {Journal of Statistical Software}, year = {2011}, volume = {38}, pages = {1--29}, number = {8}, url = {http://www.jstatsoft.org/v38/i08/} } @misc{manchesterR, author = {Ian Jacob}, month = {May}, year = {2014}, title = {Is R Cost Effective?}, language = {En}, howpublished = {Electronic}, organization = {Manchester Centre for Health Economics}, note = {Presented on Manchester R Meeting}, url ={http://www.rmanchester.org/Presentations/Ian%20Jacob%20-%20Is%20R%20Cost%20Effective.pdf} } @TECHREPORT{blandenEtAlii, author = {Jo Blanden, Paul Gregg and Stephen Machin}, title = {Intergenerational Mobility in Europe and North America}, institution = {Center for Economic Performances}, year = {2005}, owner = {Giorgio Copia}, timestamp = {2014.01.01}, url ={http://cep.lse.ac.uk/about/news/IntergenerationalMobility.pdf} } @misc{Konstantopoulos2009, author = {Konstantopoulos, Takis}, title = {Markov Chains and Random Walks}, year = {2009}, url ={http://www2.math.uu.se/~takis/public_html/McRw/mcrw.pdf} } @misc{montgomery, author = {James Montgomery}, title = {Communication Classes}, year = {2009}, url = {http://www.ssc.wisc.edu/~jmontgom/commclasses.pdf} } @manual{DTMCPackR, title = {\pkg{DTMCPack}: Suite of Functions Related to Discrete-Time Discrete-State Markov Chains}, author = {William Nicholson}, year = {2013}, note = {R package version 0.1-2}, url = {http://CRAN.R-project.org/package=DTMCPack} } @article{averyHenderson, author = {{P. J. Avery} and {D. A. Henderson}}, title = {Fitting Markov Chain Models to Discrete State Series}, journal = {Applied Statistics}, year = {1999}, volume = {48}, pages = {53-61}, number = {1} } @manual{rSoftware, title = {\proglang{R}: A Language and Environment for Statistical Computing}, author = {{R Core Team}}, organization = {R Foundation for Statistical Computing}, address = {Vienna, Austria}, year = {2013}, url = {http://www.R-project.org/} } @manual{pkg:matlab, title = {\pkg{matlab}: \proglang{MATLAB} emulation package}, author = {P. Roebuck}, year = {2011}, note = {R package version 0.8.9}, url = {http://CRAN.R-project.org/package=matlab} } @misc{probBook, author = {Laurie Snell}, title = {Probability Book: chapter 11}, year = {1999}, url ={http://www.dartmouth.edu/~chance/teaching_aids/books_articles/probability_book/Chapter11.pdf} } @article{pkg:markovchain, author = {Giorgio Alfredo Spedicato}, title = {{Discrete Time Markov Chains with R}}, year = {2017}, journal = {{The R Journal}}, url = {https://journal.r-project.org/archive/2017/RJ-2017-036/index.html} } @manual{MAPmcR, title = {Bayesian Inference of First Order Markov Chains}, author = {Sai Bhargav Yalamanchi and Giorgio Alfredo Spedicato}, month = {06}, year = {2015}, note = {R package version 0.2.5} } @misc{wiki:markov, author = {Wikipedia}, title = {Markov chain --- Wikipedia{,} The Free Encyclopedia}, year = {2013}, note = {[Online; accessed 23-August-2013]}, url = {http://en.wikipedia.org/w/index.php?title=Markov_chain&oldid=568910294} } @manual{CreditMetricsR, title = {CreditMetrics: Functions for Calculating the CreditMetrics Risk Model}, author = {Andreas Wittmann}, year = {2007}, note = {\proglang{R} package version 0.0-2} } @manual{mathematica9, title = {\proglang{Mathematica}}, author = {Wolfram Research, Inc.}, organization = {Wolfram Research, Inc.}, edition = {ninth}, year = {2013}, adddress = {Champaign, Illinois} } @misc{mathematica9MarkovChain, author = {Wolfram Research, Inc.}, year = {2013}, organization = {Wolfram Research, Inc.}, url ={http://www.wolfram.com/mathematica/new-in-9/markov-chains-and-queues/structural-properties-of-finite-markov-processes.html} } @article{sch, author = {Christopher C. Strelioff and James P. Crutchfield and Alfred W. Hubler}, title = {Inferring Markov Chains: Bayesian Estimation, Model Comparison, Entropy Rate, and Out-of-class Modeling}, journal = {Sante Fe Working Papers}, year = {2007}, url = {http://arxiv.org/abs/math/0703715/} } @article{mstateR, author = {Liesbeth C. de Wreede and Marta Fiocco and Hein Putter}, title = {\pkg{mstate}: An \proglang{R} Package for the Analysis of Competing Risks and Multi-State Models}, journal = {Journal of Statistical Software}, year = {2011}, volume = {38}, pages = {1--30}, number = {7}, url = {http://www.jstatsoft.org/v38/i07/} } @mastersthesis{MSkuriat, author = {Monika Skuriat-Olechnowska}, title = {Statistical inference and hypothesis testing for Markov chains with Interval Censoring}, school = {Delft University of Technology}, type = {diploma thesis}, year = {2005} } @misc{wiki:CI, author = {Wikipedia}, title = {Confidence interval --- Wikipedia{,} The Free Encyclopedia}, year = {2015}, url = {https://en.wikipedia.org/w/index.php?title=Confidence_interval&oldid=667353161}, note = {[Online; accessed 21-June-2015]} } @book{pardo2005statistical, title = {Statistical inference based on divergence measures}, author = {Pardo, Leandro}, year = {2005}, publisher = {CRC Press} } @article{anderson1957statistical, title = {Statistical inference about Markov chains}, author = {Anderson, Theodore W and Goodman, Leo A}, journal = {The Annals of Mathematical Statistics}, pages = {89--110}, year = {1957}, publisher = {JSTOR} } @article{ching2008higher, title = {Higher-order multivariate Markov chains and their applications}, author = {Ching, Wai-Ki and Ng, Michael K and Fung, Eric S}, journal = {Linear Algebra and its Applications}, volume = {428}, number = {2}, pages = {492--507}, year = {2008}, publisher = {Elsevier} } @incollection{ching2013higher, title = {Higher-order markov chains}, author = {Ching, Wai-Ki and Huang, Ximin and Ng, Michael K and Siu, Tak-Kuen}, booktitle = {Markov Chains}, pages = {141--176}, year = {2013}, publisher = {Springer} } @manual{pkg:Rsolnp, title = {Rsolnp: General Non-linear Optimization Using Augmented Lagrange Multiplier Method}, author = {Alexios Ghalanos and Stefan Theussl}, year = {2014}, note = {R package version 1.15.} } @PhdThesis{YinyuYe, title = {Interior Algorithms for Linear, Quadratic, and Linearly Constrained Non-Linear Programming}, author = {Yinyu Ye}, year = {1987}, school = {Department of {ESS}, Stanford University} } @book{NorrisBook, title = {Markovchains}, author = {J. R. Norris}, year = {1998}, publisher = {Cambridge University Press} } @article{ictmcpaper, author = {Thomas Krak, Jasper De Bock, Arno Siebes}, title = {Imprecise Continuous Time Markov Chains}, journal = {International Journal of Approximate Reasoning}, year = {2017}, volume = {88}, pages = {452-528}, publisher = {arXiv} } @misc{committorlink, author = {StackOverflow}, url = {https://math.stackexchange.com/questions/1450399/probability-that-a-chain-will-enter-state-5-before-it-enters-state-3?newreg=82f90b66b949495a91661caad24db915}, year = {2015} } @book{GallagerBook, title = {Stochastic Processes: Theory for Applications}, author = {Robert G. Gallager}, year = {2013}, publisher = {Cambridge University Press} } @misc{Sigman, title = {Continuous Time markovchains}, author = {Karl Sigman}, year = {2009}, publisher = {Columbia University} } @article{freqArticle, author = {Alexander Kreinin, Marina Sidelnikova}, title = {Regularization Algorithms for Transition Matrices}, journal = {Algo Research Quarterly}, year = {2001}, volume = {4}, number = {1/2}, pages = {23--40}, month = mar, publisher = {ALGO RESEARCH QUARTERLY} } @book{GrinsteadSnell, author = {Grinstead, Charles M. and Snell, Laurie J.}, booktitle = {Introduction to Probability}, edition = {Version dated 4 July 2006}, publisher = {American Mathematical Society}, title = {{Grinstead and Snell's Introduction to Probability}}, url = {http://math.dartmouth.edu/\~{}prob/prob/prob.pdf}, year = {2006} } @article{noe_constructing_2009, title = {Constructing the equilibrium ensemble of folding pathways from short off-equilibrium simulations}, volume = {106}, copyright = {© 2009 . Freely available online through the PNAS open access option.}, issn = {0027-8424, 1091-6490}, url = {https://www.pnas.org/content/106/45/19011}, doi = {10.1073/pnas.0905466106}, language = {en}, number = {45}, urldate = {2019-01-13}, journal = {Proceedings of the National Academy of Sciences}, author = {Noé, Frank and Schütte, Christof and Vanden-Eijnden, Eric and Reich, Lothar and Weikl, Thomas R.}, month = nov, year = {2009}, pmid = {19887634}, keywords = {committor}, pages = {19011--19016} } @article{Tarjan, doi = {10.1137/0201010}, url = {https://doi.org/10.1137/0201010}, year = {1972}, month = jun, publisher = {Society for Industrial {\&} Applied Mathematics ({SIAM})}, volume = {1}, number = {2}, pages = {146--160}, author = {Robert Tarjan}, title = {Depth-First Search and Linear Graph Algorithms}, journal = {{SIAM} Journal on Computing} }markovchain/vignettes/template.tex0000644000176200001440000000363013762012754017132 0ustar liggesusers\documentclass[ $for(classoption)$ $classoption$$sep$, $endfor$ ]{$documentclass$} \usepackage[utf8]{inputenc} \providecommand{\tightlist}{% \setlength{\itemsep}{0pt}\setlength{\parskip}{0pt}} \author{ $for(author)$$author.name$\\$author.affiliation$$sep$ \And $endfor$ } \title{$title.formatted$} \Plainauthor{$for(author)$$author.name$$sep$, $endfor$} $if(title.plain)$ \Plaintitle{$title.plain$} $endif$ $if(title.short)$ \Shorttitle{$title.short$} $endif$ \Abstract{ $abstract$ } $if(keywords.formatted)$ \Keywords{$for(keywords.formatted)$$keywords.formatted$$sep$, $endfor$} $endif$ $if(keywords.plain)$ \Plainkeywords{$for(keywords.plain)$$keywords.plain$$sep$, $endfor$} $endif$ $if(highlighting-macros)$ $highlighting-macros$ $endif$ %% publication information %% \Volume{50} %% \Issue{9} %% \Month{June} %% \Year{2012} %% \Submitdate{$submitdate$} %% \Acceptdate{2012-06-04} \Address{ $for(author)$ $if(author.address)$ $author.name$\\ $author.affiliation$\\ $author.address$\\ $if(author.email)$E-mail: $author.email$\\$endif$ $if(author.url)$URL: $author.url$\\~\\$endif$ $endif$ $endfor$ } $if(csl-refs)$ \newlength{\cslhangindent} \setlength{\cslhangindent}{1.5em} \newenvironment{cslreferences}% {$if(csl-hanging-indent)$\setlength{\parindent}{0pt}% \everypar{\setlength{\hangindent}{\cslhangindent}}\ignorespaces$endif$}% {\par} $endif$ % Pandoc header $for(header-includes)$ $header-includes$ $endfor$ $preamble$ \begin{document} $body$ $if(natbib)$ $if(bibliography)$ $if(biblio-title)$ $if(book-class)$ \renewcommand\bibname{$biblio-title$} $else$ \renewcommand\refname{$biblio-title$} $endif$ $endif$ \bibliography{$for(bibliography)$$bibliography$$sep$,$endfor$} $endif$ $endif$ $if(biblatex)$ \printbibliography$if(biblio-title)$[title=$biblio-title$]$endif$ $endif$ \end{document} markovchain/vignettes/an_introduction_to_markovchain_package.Rmd0000644000176200001440000032041513762012754025202 0ustar liggesusers--- title: plain: "The markovchain Package: A Package for Easily Handling Discrete Markov Chains in R" formatted: "The \\pkg{markovchain} Package: A Package for Easily Handling Discrete Markov Chains in \\proglang{R}" short: "\\pkg{markovchain} package: discrete Markov chains in \\proglang{R}" pagetitle: "The \\pkg{markovchain} Package: A Package for Easily Handling Discrete Markov Chains in \\proglang{R}" author: - name: "Giorgio Alfredo Spedicato" affiliation: Ph.D C.Stat FCAS, FSA, CSPA Unipol Group address: > Via Firenze 11 Paderno Dugnano 20037 Italy email: \email{spedygiorgio@gmail.com} url: www.statisticaladvisor.com - name: "Tae Seung Kang" affiliation: Ph.D student, Computer \& Information Science \& Engineering address: > University of Florida Gainesville, FL, USA email: \email{tskang3@gmail.com} - name: "Sai Bhargav Yalamanchi" affiliation: B-Tech student, Electrical Engineering address: > Indian Institute of Technology, Bombay Mumbai - 400 076, India email: \email{bhargavcoolboy@gmail.com} - name: "Deepak Yadav" affiliation: B-Tech student, Computer Science and Engineering address: > Indian Institute of Technology, Varanasi Uttar Pradesh - 221 005, India email: \email{deepakyadav.iitbhu@gmail.com} - name: "Ignacio Cordón" affiliation: Software Engineer address: > Madrid (Madrid), Spain email: \email{nacho.cordon.castillo@gmail.com} preamble: > \author{\small{Giorgio Alfredo Spedicato, Tae Seung Kang, Sai Bhargav Yalamanchi, Deepak Yadav, Ignacio Cordón}} \Plainauthor{G.A. Spedicato, T.S. Kang, S.B. Yalamanchi, D. Yadav, I. Cordón} \usepackage{graphicx} \usepackage{amsmath} \usepackage{longtable} \usepackage{booktabs} \setkeys{Gin}{width=0.8\textwidth} \usepackage{amsfonts} abstract: | The \pkg{markovchain} package aims to fill a gap within the \proglang{R} framework providing S4 classes and methods for easily handling discrete time Markov chains, homogeneous and simple inhomogeneous ones as well as continuous time Markov chains. The S4 classes for handling and analysing discrete and continuous time Markov chains are presented, as well as functions and method for performing probabilistic and statistical analysis. Finally, some examples in which the package's functions are applied to Economics, Finance and Natural Sciences topics are shown. output: if (rmarkdown::pandoc_version() < 2.2) function(...) { rmarkdown::pdf_document(template = "./template.tex", ...) } else function(...) { bookdown::pdf_book(base_format = rticles::jss_article, ...) } vignette: > %\VignetteIndexEntry{An introduction to markovchain package} %\VignetteEngine{knitr::rmarkdown} %VignetteEncoding{UTF-8} keywords: plain: [discrete time Markov chains, continuous time Markov chains, transition matrices, communicating classes, periodicity, first passage time, stationary distributions] formatted: [discrete time Markov chains, continuous time Markov chains, transition matrices, communicating classes, periodicity, first passage time, stationary distributions] documentclass: jss classoption: nojss bibliography: markovchainBiblio.bib pkgdown: as_is: true extension: pdf --- ```{r global_options, include=FALSE} knitr::opts_chunk$set(fig.width=8.5, fig.height=6, out.width = "70%") set.seed(123) library(knitr) hook_output <- knit_hooks$get("output") knit_hooks$set(output = function(x, options) { lines <- options$output.lines if (is.null(lines)) { return(hook_output(x, options)) # pass to default hook } x <- unlist(strsplit(x, "\n")) more <- "..." if (length(lines)==1) { # first n lines if (length(x) > lines) { # truncate the output, but add .... x <- c(head(x, lines), more) } } else { x <- c(more, x[lines], more) } # paste these lines together x <- paste(c(x, ""), collapse = "\n") hook_output(x, options) }) ``` # Introduction Markov chains represent a class of stochastic processes of great interest for the wide spectrum of practical applications. In particular, discrete time Markov chains (DTMC) permit to model the transition probabilities between discrete states by the aid of matrices.Various \proglang{R} packages deal with models that are based on Markov chains: * \pkg{msm} [@msmR] handles Multi-State Models for panel data. * \pkg{mcmcR} [@mcmcR] implements Monte Carlo Markov Chain approach. * \pkg{hmm} [@hmmR] fits hidden Markov models with covariates. * \pkg{mstate} fits `Multi-State Models based on Markov chains for survival analysis [@mstateR]. Nevertheless, the \proglang{R} statistical environment [@rSoftware] seems to lack a simple package that coherently defines S4 classes for discrete Markov chains and allows to perform probabilistic analysis, statistical inference and applications. For the sake of completeness, \pkg{markovchain} is the second package specifically dedicated to DTMC analysis, being \pkg{DTMCPack} [@DTMCPackR] the first one. Notwithstanding, \pkg{markovchain} package [@pkg:markovchain] aims to offer more flexibility in handling DTMC than other existing solutions, providing S4 classes for both homogeneous and non-homogeneous Markov chains as well as methods suited to perform statistical and probabilistic analysis. The \pkg{markovchain} package depends on the following \proglang{R} packages: \pkg{expm} [@expmR] to perform efficient matrices powers; \pkg{igraph} [@pkg:igraph] to perform pretty plotting of `markovchain` objects and \pkg{matlab} [@pkg:matlab], that contains functions for matrix management and calculations that emulate those within \proglang{MATLAB} environment. Moreover, other scientific softwares provide functions specifically designed to analyze DTMC, as \proglang{Mathematica} 9 [@mathematica9]. The paper is structured as follows: Section \@ref(sec:mathematics) briefly reviews mathematics and definitions regarding DTMC, Section \@ref(sec:structure) discusses how to handle and manage Markov chain objects within the package, Section \@ref(sec:probability) and Section \@ref(sec:statistics) show how to perform probabilistic and statistical modelling, while Section \@ref(sec:applications) presents some applied examples from various fields analyzed by means of the \pkg{markovchain} package. # Review of core mathematical concepts {#sec:mathematics} ## General Definitions A DTMC is a sequence of random variables $X_{1},\: X_{2}\: ,\ldots,\:X_{n},\ldots$ characterized by the Markov property (also known as memoryless property, see Equation \ref{eq:markovProp}). The Markov property states that the distribution of the forthcoming state $X_{n+1}$ depends only on the current state $X_{n}$ and doesn't depend on the previous ones $X_{n-1},\: X_{n-2},\ldots,\: X_{1}$. \begin{equation} Pr\left(X_{n+1}=x_{n+1}\left|X_{1}=x_{1},X_{2}=x_{2,}...,X_{n}=x_{n}\right.\right)=Pr\left(X_{n+1}=x_{n+1}\left|X_{n}=x_{n}\right.\right). \label{eq:markovProp} \end{equation} The set of possible states $S=\left\{ s_{1},s_{2},...,s_{r}\right\}$ of $X_{n}$ can be finite or countable and it is named the state space of the chain. The chain moves from one state to another (this change is named either 'transition' or 'step') and the probability $p_{ij}$ to move from state $s_{i}$ to state $s_{j}$ in one step is named transition probability: \begin{equation} p_{ij}=Pr\left(X_{1}=s_{j}\left|X_{0}=s_{i}\right.\right). \label{eq:trProp} \end{equation} The probability of moving from state $i$ to $j$ in $n$ steps is denoted by $p_{ij}^{(n)}=Pr\left(X_{n}=s_{j}\left|X_{0}=s_{i}\right.\right)$. A DTMC is called time-homogeneous if the property shown in Equation \ref{eq:mcHom} holds. Time homogeneity implies no change in the underlying transition probabilities as time goes on. \begin{equation} Pr\left(X_{n+1}=s_{j}\left|X_{n}=s_{i}\right.\right)=Pr\left(X_{n}=s_{j}\left|X_{n-1}=s_{i}\right.\right). \label{eq:mcHom} \end{equation} If the Markov chain is time-homogeneous, then $p_{ij}=Pr\left(X_{k+1}=s_{j}\left|X_{k}=s_{i}\right.\right)$ and \newline $p_{ij}^{(n)}=Pr\left(X_{n+k}=s_{j}\left|X_{k}=s_{i}\right.\right)$, where $k>0$. The probability distribution of transitions from one state to another can be represented into a transition matrix $P=(p_{ij})_{i,j}$, where each element of position $(i,j)$ represents the transition probability $p_{ij}$. E.g., if $r=3$ the transition matrix $P$ is shown in Equation \ref{eq:trPropEx} \begin{equation} P=\left[\begin{array}{ccc} p_{11} & p_{12} & p_{13}\\ p_{21} & p_{22} & p_{23}\\ p_{31} & p_{32} & p_{33} \end{array}\right]. \label{eq:trPropEx} \end{equation} The distribution over the states can be written in the form of a stochastic row vector $x$ (the term stochastic means that $\sum_{i}x_{i}=1, x_{i} \geq 0$): e.g., if the current state of $x$ is $s_{2}$, $x=\left(0\:1\:0\right)$. As a consequence, the relation between $x^{(1)}$ and $x^{(0)}$ is $x^{(1)}=x^{(0)}P$ and, recursively, we get $x^{(2)}=x^{(0)}P^{2}$ and $x^{(n)}=x^{(0)}P^{n},\, n>0$. DTMC are explained in most theory books on stochastic processes, see \cite{bremaud1999discrete} and \cite{dobrow2016introduction} for example. Valuable references online available are: \cite{konstantopoulos2009markov}, \cite{probBook} and \cite{bardPpt}. ## Properties and classification of states {#sec:properties} A state $s_{j}$ is said accessible from state $s_{i}$ (written $s_{i}\rightarrow s_{j}$) if a system starting in state $s_{i}$ has a positive probability to reach the state $s_{j}$ at a certain point, i.e., $\exists n>0:\: p_{ij}^{n}>0$. If both $s_{i}\rightarrow s_{j}$ and $s_{j}\rightarrow s_{i}$, then $s_{i}$ and $s_{j}$ are said to communicate. A communicating class is defined to be a set of states that communicate. A DTMC can be composed by one or more communicating classes. If the DTMC is composed by only one communicating class (i.e., if all states in the chain communicate), then it is said irreducible. A communicating class is said to be closed if no states outside of the class can be reached from any state inside it. If $p_{ii}=1$, $s_{i}$ is defined as absorbing state: an absorbing state corresponds to a closed communicating class composed by one state only. The canonical form of a DTMC transition matrix is a matrix having a block form, where the closed communicating classes are shown at the beginning of the diagonal matrix. A state $s_{i}$ has period $k_{i}$ if any return to state $s_{i}$ must occur in multiplies of $k_{i}$ steps, that is $k_{i}=gcd\left\{ n:Pr\left(X_{n}=s_{i}\left|X_{0}=s_{i}\right.\right)>0\right\}$, where $gcd$ is the greatest common divisor. If $k_{i}=1$ the state $s_{i}$ is said to be aperiodic, else if $k_{i}>1$ the state $s_{i}$ is periodic with period $k_{i}$. Loosely speaking, $s_{i}$ is periodic if it can only return to itself after a fixed number of transitions $k_{i}>1$ (or multiple of $k_{i}$), else it is aperiodic. If states $s_{i}$ and $s_{j}$ belong to the same communicating class, then they have the same period $k_{i}$. As a consequence, each of the states of an irreducible DTMC share the same periodicity. This periodicity is also considered the DTMC periodicity. It is possible to classify states according to their periodicity. Let $T^{x\rightarrow x}$ is the number of periods to go back to state $x$ knowing that the chain starts in $x$. * A state $x$ is recurrent if $P(T^{x\rightarrow x}<+\infty)=1$ (equivalently $P(T^{x\rightarrow x}=+\infty)=0$). In addition: 1. A state $x$ is null recurrent if in addition $E(T^{x\rightarrow x})=+\infty$. 2. A state $x$ is positive recurrent if in addition $E(T^{x\rightarrow x})<+\infty$. 3. A state $x$ is absorbing if in addition $P(T^{x\rightarrow x}=1)=1$. * A state $x$ is transient if $P(T^{x\rightarrow x}<+\infty)<1$ (equivalently $P(T^{x\rightarrow x}=+\infty)>0$). It is possible to analyze the timing to reach a certain state. The first passage time (or hitting time) from state $s_{i}$ to state $s_{j}$ is the number $T_{ij}$ of steps taken by the chain until it arrives for the first time to state $s_{j}$, given that $X_{0} = s_{i}$. The probability distribution of $T_{ij}$ is defined by Equation \ref{eq:fpt1} \begin{equation} {h_{ij}}^{\left( n \right)} = Pr\left( {T_{ij} = n} \right) = Pr\left( X_n = s_j,X_{n - 1} \ne s_{j}, \ldots ,X_1 \ne s_j |X_0 = s_i \right) \label{eq:fpt1} \end{equation} and can be found recursively using Equation \ref{eq:ftp2}, given that ${h_{ij}}^{\left( n \right)} = p_{ij}$. \begin{equation} {h_{ij}}^{\left( n \right)} = \sum\limits_{k \in S - \left\{ s_{j} \right\}}^{} {{p_{ik}}{h_{kj}}^{\left( {n - 1} \right)}}. \label{eq:ftp2} \end{equation} A commonly used quantity related to $h$ is its average value, i.e. the \emph{mean first passage time} (also expected hitting time), namely $\bar h_{ij}= \sum_{n=1\dots\infty} n \,h_{ij}^{(n)}$. If in the definition of the first passage time we let $s_{i}=s_{j}$, we obtain the first recurrence time $T_{i}=\inf \{ n\geq1:X_{n}=s_{i}|X_{0}=s_{i} \}$. We could also ask ourselves which is the *mean recurrence time*, an average of the mean first recurrence times: \[ r_i = \sum_{k = 1}^{\infty} k \cdot P(T_i = k) \] Revisiting the definition of recurrence and transience: a state $s_{i}$ is said to be recurrent if it is visited infinitely often, i.e., $Pr(T_{i}<+\infty|X_{0}=s_{i})=1$. On the opposite, $s_{i}$ is called transient if there is a positive probability that the chain will never return to $s_{i}$, i.e., $Pr(T_{i}=+\infty|X_{0}=s_{i})>0$. Given a time homogeneous Markov chain with transition matrix \emph{P}, a stationary distribution \emph{z} is a stochastic row vector such that $z=z\cdot P$, where $0\leq z_{j}\leq 1 \: \forall j$ and $\sum_{j}z_{j}=1$. If a DTMC $\{X_{n}\}$ is irreducible and aperiodic, then it has a limit distribution and this distribution is stationary. As a consequence, if $P$ is the $k\times k$ transition matrix of the chain and $z=\left(z_{1},...,z_{k}\right)$ is the unique eigenvector of $P$ such that $\sum_{i=1}^{k}z_{i}=1$, then we get \begin{equation} \underset{n\rightarrow\infty}{lim}P^{n}=Z, \label{eq:limMc} \end{equation} where $Z$ is the matrix having all rows equal to $z$. The stationary distribution of $\{X_{n}\}$ is represented by $z$. A matrix $A$ is called primitive if all of its entries are strictly positive, and we write it $A > 0$. If the transition matrix $P$ for a DTMC has some primitive power, i.e. it exists $m > 0: P^m > 0$, then the DTMC is said to be regular. In fact being regular is equivalent to being irreducible and aperiodic. All regular DTMCs are irreducible. The counterpart is not true. Given two absorbing states $s_A$ (source) and $s_B$ (sink), the \emph{committor probability} $q_j^{(AB)}$ is the probability that a process starting in state $s_i$ is absorbed in state $s_B$ (rather than $s_A$) [@noe_constructing_2009]. It can be computed via \begin{equation} q_j^{(AB)} = \sum_{k \ni {A, B}} P_{jk}q_k^{(AB)} \quad \mbox{with} \quad q_A^{(AB)} = 0 \quad \mbox{and} \quad q_B^{(AB)} = 1 \end{equation} Note we can also define the hitting probability from $i$ to $j$ as the probability of ever reaching the state $j$ if our initial state is $i$: \begin{equation} h_{i,j} = Pr(T_{ij} < \infty) = \sum_{n = 0}^{\infty} h_{ij}^{(n)} \label{eq:hitting-probs} \end{equation} In a DTMC with finite set of states, we know that a transient state communicates at least with one recurrent state. If the chain starts in a transient element, once it hits a recurrent state, it is going to be caught in its recurrent state, and we cannot expect it would go back to the initial state. Given a transient state $i$ we can define the *absorption probability* to the recurrent state $j$ as the probability that the first recurrent state that the Markov chain visits (and therefore gets absorbed by its recurrent class) is $j$, $f^{*}_ij$. We can also define the *mean absorption time* as the mean number of steps the transient state $i$ would take until it hits any recurrent state, $b_i$. ## A short example Consider the following numerical example. Suppose we have a DTMC with a set of 3 possible states $S=\{s_{1}, s_{2}, s_{3}\}$. Let the transition matrix be: \begin{equation} P=\left[\begin{array}{ccc} 0.5 & 0.2 & 0.3\\ 0.15 & 0.45 & 0.4\\ 0.25 & 0.35 & 0.4 \end{array}\right]. \label{eq:trPropExEx1} \end{equation} In $P$, $p_{11}=0.5$ is the probability that $X_{1}=s_{1}$ given that we observed $X_{0}=s_{1}$ is 0.5, and so on.It is easy to see that the chain is irreducible since all the states communicate (it is made by one communicating class only). Suppose that the current state of the chain is $X_{0}=s_{2}$, i.e., $x^{(0)}=(0\:1\:0)$, then the probability distribution of states after 1 and 2 steps can be computed as shown in Equations \@ref(eq:trPropExEx2) and \@ref(eq:trPropExEx3). \begin{equation} x^{(1)}=\left(0\:1\:0\right)\left[\begin{array}{ccc} 0.5 & 0.2 & 0.3\\ 0.15 & 0.45 & 0.4\\ 0.25 & 0.35 & 0.4 \end{array}\right]=\left(0.15\:0.45\:0.4\right). \label{eq:trPropExEx2} \end{equation} \begin{equation} x^{(n)}=x^{(n-1)}P \to \left(0.15\:0.45\:0.4\right)\left[\begin{array}{ccc} 0.5 & 0.2 & 0.3\\ 0.15 & 0.45 & 0.4\\ 0.25 & 0.35 & 0.4 \end{array}\right]=\left(0.2425\:0.3725\:0.385\right). \label{eq:trPropExEx3} \end{equation} If we were interested in the probability of being in the state $s_{3}$ in the second step, then $Pr\left(X_{2}=s_{3}\left|X_{0}=s_{2}\right.\right)=0.385$. \newpage # The structure of the package {#sec:structure} ## Creating markovchain objects The package is loaded within the \proglang{R} command line as follows: ```{r, load, results='hide', message=FALSE} library("markovchain") ``` ```{r, load-aux, echo=FALSE, results='hide'} require("matlab") ``` The `markovchain` and `markovchainList` S4 classes [@chambers] are defined within the \pkg{markovchain} package as displayed: ```{r, showClass, echo=FALSE} showClass("markovchain") showClass("markovchainList") ``` The first class has been designed to handle homogeneous Markov chain processes, while the latter (which is itself a list of `markovchain` objects) has been designed to handle non-homogeneous Markov chains processes. Any element of `markovchain` class is comprised by following slots: 1. `states`: a character vector, listing the states for which transition probabilities are defined. 2. `byrow`: a logical element, indicating whether transition probabilities are shown by row or by column. 3. `transitionMatrix`: the probabilities of the transition matrix. 4. `name`: optional character element to name the DTMC. The `markovchainList` objects are defined by following slots: 1. `markovchains`: a list of `markovchain` objects. 2. `name`: optional character element to name the DTMC. The `markovchain` objects can be created either in a long way, as the following code shows ```{r mcInitLong} weatherStates <- c("sunny", "cloudy", "rain") byRow <- TRUE weatherMatrix <- matrix(data = c(0.70, 0.2, 0.1, 0.3, 0.4, 0.3, 0.2, 0.45, 0.35), byrow = byRow, nrow = 3, dimnames = list(weatherStates, weatherStates)) mcWeather <- new("markovchain", states = weatherStates, byrow = byRow, transitionMatrix = weatherMatrix, name = "Weather") ``` or in a shorter way, displayed below ```{r mcInitShort} mcWeather <- new("markovchain", states = c("sunny", "cloudy", "rain"), transitionMatrix = matrix(data = c(0.70, 0.2, 0.1, 0.3, 0.4, 0.3, 0.2, 0.45, 0.35), byrow = byRow, nrow = 3), name = "Weather") ``` When `new("markovchain")` is called alone, a default Markov chain is created. ```{r defaultMc} defaultMc <- new("markovchain") ``` The quicker way to create `markovchain` objects is made possible thanks to the implemented `initialize` S4 method that checks that: * the `transitionMatrix` to be a transition matrix, i.e., all entries to be probabilities and either all rows or all columns to sum up to one. * the columns and rows names of `transitionMatrix` to be defined and to coincide with `states` vector slot. The `markovchain` objects can be collected in a list within `markovchainList` S4 objects as following example shows. ```{r intromcList} mcList <- new("markovchainList", markovchains = list(mcWeather, defaultMc), name = "A list of Markov chains") ``` ## Handling markovchain objects Table \@ref(tab:methodsToHandleMc) lists which methods handle and manipulate `markovchain` objects. \begin{table}[h] \centering \begin{tabular}{lll} \hline Method & Purpose \\ \hline \hline \code{*} & Direct multiplication for transition matrices.\\ \code{\textasciicircum{}} & Compute the power \code{markovchain} of a given one.\\ \code{[} & Direct access to the elements of the transition matrix.\\ \code{==} & Equality operator between two transition matrices.\\ \code{!=} & Inequality operator between two transition matrices.\\ \code{as} & Operator to convert \code{markovchain} objects into \code{data.frame} and\\ & \code{table} object.\\ \code{dim} & Dimension of the transition matrix.\\ \code{names} & Equal to \code{states}.\\ \code{names<-} & Change the \code{states} name.\\ \code{name} & Get the name of \code{markovchain object}.\\ \code{name<-} & Change the name of \code{markovchain object}.\\ \code{plot} & \code{plot} method for \code{markovchain} objects.\\ \code{print} & \code{print} method for \code{markovchain} objects.\\ \code{show} & \code{show} method for \code{markovchain} objects.\\ \code{sort} & \code{sort} method for \code{markovchain} objects, in terms of their states.\\ \code{states} & Name of the transition states.\\ \code{t} & Transposition operator (which switches \code{byrow} `slot value and modifies \\ & the transition matrix coherently).\\ \hline \end{tabular} \caption{\pkg{markovchain} methods for handling \code{markovchain} objects.} \label{tab:methodsToHandleMc} \end{table} The examples that follow shows how operations on `markovchain` objects can be easily performed. For example, using the previously defined matrix we can find what is the probability distribution of expected weather states in two and seven days, given the actual state to be cloudy. ```{r operations} initialState <- c(0, 1, 0) after2Days <- initialState * (mcWeather * mcWeather) after7Days <- initialState * (mcWeather ^ 7) after2Days round(after7Days, 3) ``` A similar answer could have been obtained defining the vector of probabilities as a column vector. A column - defined probability matrix could be set up either creating a new matrix or transposing an existing `markovchain` object thanks to the `t` method. ```{r operations2} initialState <- c(0, 1, 0) after2Days <- (t(mcWeather) * t(mcWeather)) * initialState after7Days <- (t(mcWeather) ^ 7) * initialState after2Days round(after7Days, 3) ``` The initial state vector previously shown can not necessarily be a probability vector, as the code that follows shows: ```{r fval} fvals<-function(mchain,initialstate,n) { out<-data.frame() names(initialstate)<-names(mchain) for (i in 0:n) { iteration<-initialstate*mchain^(i) out<-rbind(out,iteration) } out<-cbind(out, i=seq(0,n)) out<-out[,c(4,1:3)] return(out) } fvals(mchain=mcWeather,initialstate=c(90,5,5),n=4) ``` Basic methods have been defined for `markovchain` objects to quickly get states and transition matrix dimension. ```{r otherMethods} states(mcWeather) names(mcWeather) dim(mcWeather) ``` Methods are available to set and get the name of `markovchain` object. ```{r otherMethods2} name(mcWeather) name(mcWeather) <- "New Name" name(mcWeather) ``` Also it is possible to alphabetically sort the transition matrix: ```{r sortMethod} markovchain:::sort(mcWeather) ``` A direct access to transition probabilities is provided both by `transitionProbability` method and `"["` method. ```{r transProb} transitionProbability(mcWeather, "cloudy", "rain") mcWeather[2,3] ``` The transition matrix of a `markovchain` object can be displayed using `print` or `show` methods (the latter being less verbose). Similarly, the underlying transition probability diagram can be plotted by the use of `plot` method (as shown in Figure \@ref(fig:mcPlot)) which is based on \pkg{igraph} package [@pkg:igraph]. `plot` method for `markovchain` objects is a wrapper of `plot.igraph` for `igraph` S4 objects defined within the \pkg{igraph} package. Additional parameters can be passed to `plot` function to control the network graph layout. There are also \pkg{diagram} and \pkg{DiagrammeR} ways available for plotting as shown in Figure \@ref(fig:mcPlotdiagram). The `plot` function also uses `communicatingClasses` function to separate out states of different communicating classes. All states that belong to one class have same color. ```{r printAndShow} print(mcWeather) show(mcWeather) ``` ```{r mcPlot, echo=FALSE, fig.cap="Weather example. Markov chain plot"} if (requireNamespace("igraph", quietly = TRUE)) { library(igraph) plot(mcWeather,layout = layout.fruchterman.reingold) } else { message("igraph unavailable") } ``` ```{r mcPlotdiagram, echo=FALSE, fig.cap="Weather example. Markov chain plot with diagram"} if (requireNamespace("diagram", quietly = TRUE)) { library(diagram) plot(mcWeather, package="diagram", box.size = 0.04) } else { message("diagram unavailable") } ``` Import and export from some specific classes is possible, as shown in Figure \@ref(fig:fromAndTo) and in the following code. ```{r exportImport1} mcDf <- as(mcWeather, "data.frame") mcNew <- as(mcDf, "markovchain") mcDf mcIgraph <- as(mcWeather, "igraph") ``` ```{r exportImport2} if (requireNamespace("msm", quietly = TRUE)) { require(msm) Q <- rbind ( c(0, 0.25, 0, 0.25), c(0.166, 0, 0.166, 0.166), c(0, 0.25, 0, 0.25), c(0, 0, 0, 0) ) cavmsm <- msm(state ~ years, subject = PTNUM, data = cav, qmatrix = Q, death = 4) msmMc <- as(cavmsm, "markovchain") msmMc } else { message("msm unavailable") } ``` from etm (now archived as of September 2020): ```{r exporImport3} if (requireNamespace("etm", quietly = TRUE)) { library(etm) data(sir.cont) sir.cont <- sir.cont[order(sir.cont$id, sir.cont$time), ] for (i in 2:nrow(sir.cont)) { if (sir.cont$id[i]==sir.cont$id[i-1]) { if (sir.cont$time[i]==sir.cont$time[i-1]) { sir.cont$time[i-1] <- sir.cont$time[i-1] - 0.5 } } } tra <- matrix(ncol=3,nrow=3,FALSE) tra[1, 2:3] <- TRUE tra[2, c(1, 3)] <- TRUE tr.prob <- etm::etm(sir.cont, c("0", "1", "2"), tra, "cens", 1) tr.prob etm2mc<-as(tr.prob, "markovchain") etm2mc } else { message("etm unavailable") } ``` ```{r fromAndTo, echo=FALSE, fig.cap="The markovchain methods for import and export"} library(igraph) importExportGraph<-graph.formula(dataframe++markovchain,markovchain-+igraph, markovchain++matrix,table-+markovchain,msm-+markovchain,etm-+markovchain, markovchain++sparseMatrix) plot(importExportGraph,main="Import - Export from and to markovchain objects") ``` Coerce from `matrix` method, as the code below shows, represents another approach to create a `markovchain` method starting from a given squared probability matrix. ```{r exportImport4} myMatr<-matrix(c(.1,.8,.1,.2,.6,.2,.3,.4,.3), byrow=TRUE, ncol=3) myMc<-as(myMatr, "markovchain") myMc ``` Non-homogeneous Markov chains can be created with the aid of `markovchainList` object. The example that follows arises from health insurance, where the costs associated to patients in a Continuous Care Health Community (CCHC) are modeled by a non-homogeneous Markov Chain, since the transition probabilities change by year. Methods explicitly written for `markovchainList` objects are: `print`, `show`, `dim` and `[`. ```{r cchcMcList} stateNames = c("H", "I", "D") Q0 <- new("markovchain", states = stateNames, transitionMatrix =matrix(c(0.7, 0.2, 0.1,0.1, 0.6, 0.3,0, 0, 1), byrow = TRUE, nrow = 3), name = "state t0") Q1 <- new("markovchain", states = stateNames, transitionMatrix = matrix(c(0.5, 0.3, 0.2,0, 0.4, 0.6,0, 0, 1), byrow = TRUE, nrow = 3), name = "state t1") Q2 <- new("markovchain", states = stateNames, transitionMatrix = matrix(c(0.3, 0.2, 0.5,0, 0.2, 0.8,0, 0, 1), byrow = TRUE,nrow = 3), name = "state t2") Q3 <- new("markovchain", states = stateNames, transitionMatrix = matrix(c(0, 0, 1, 0, 0, 1, 0, 0, 1), byrow = TRUE, nrow = 3), name = "state t3") mcCCRC <- new("markovchainList",markovchains = list(Q0,Q1,Q2,Q3), name = "Continuous Care Health Community") print(mcCCRC) ``` It is possible to perform direct access to `markovchainList` elements, as well as to determine the number of `markovchain` objects by which a `markovchainList` object is composed. ```{r cchcMcList2} mcCCRC[[1]] dim(mcCCRC) ``` The `markovchain` package contains some data found in the literature related to DTMC models (see Section \@ref(sec:applications). Table \@ref(tab:datasets) lists datasets and tables included within the current release of the package. \begin{table}[h] \centering \begin{tabular}{p{0.2\textwidth}p{0.75\textwidth}} \hline Dataset & Description \\ \hline \hline \code{blanden} & Mobility across income quartiles, \cite{blandenEtAlii}.\\ \code{craigsendi} & CD4 cells, \cite{craigSendi}.\\ \code{kullback} & raw transition matrices for testing homogeneity, \cite{kullback1962tests}.\\ \code{preproglucacon} & Preproglucacon DNA basis, \cite{averyHenderson}.\\ \code{rain} & Alofi Island rains, \cite{averyHenderson}.\\ \code{holson} & Individual states trajectories.\\ \code{sales} & Sales of six beverages in Hong Kong \cite{ching2008higher}. \\ \hline \end{tabular} \caption{The \pkg{markovchain} \code{data.frame} and \code{table}.} \label{tab:datasets} \end{table} Finally, Table \@ref(tab:demos) lists the demos included in the demo directory of the package. \begin{table}[h] \centering \begin{tabular}{lll} \hline R Code File & Description \\ \hline \hline \code{bard.R} & Structural analysis of Markov chains from Bard PPT.\\ \code{examples.R} & Notable Markov chains, e.g., The Gambler Ruin chain.\\ \code{quickStart.R} & Generic examples.\\ \code{extractMatrices.R} & Generic examples.\\ \hline \end{tabular} \caption{The \pkg{markovchain} demos.} \label{tab:demos} \end{table} # Probability with markovchain objects {#sec:probability} The \pkg{markovchain} package contains functions to analyse DTMC from a probabilistic perspective. For example, the package provides methods to find stationary distributions and identifying absorbing and transient states. Many of these methods come from \proglang{MATLAB} listings that have been ported into \proglang{R}. For a full description of the underlying theory and algorithm the interested reader can overview the original \proglang{MATLAB} listings, \cite{renaldoMatlab} and \cite{montgomery}. Table \@ref(tab:methodsToStats) shows methods that can be applied on `markovchain` objects to perform probabilistic analysis. \begin{table}[h] \centering \begin{tabular}{lll} \hline Method & Returns \\ \hline \hline \code{absorbingStates} & the absorbing states of the transition matrix, if any.\\ \code{steadyStates} & the vector(s) of steady state(s) in matrix form. \\ \code{meanFirstPassageTime} & matrix or vector of mean first passage times. \\ \code{meanRecurrenceTime} & vector of mean number of steps to return to each recurrent state \\ \code{hittingProbabilities} & matrix of hitting probabilities for a Markov chain. \\ \code{meanAbsorptionTime} & expected number of steps for a transient state to be \\ & absorbed by any recurrent class \\ \code{absorptionProbabilities} & probabilities of transient states of being \\ & absorbed by each recurrent state \\ \code{committorAB} & committor probabilities \\ \code{communicatingClasses} & list of communicating classes. \\ & $s_{j}$, given actual state $s_{i}$. \\ \code{canonicForm} & the transition matrix into canonic form. \\ \code{is.accessible} & checks whether a state j is reachable from state i. \\ \code{is.irreducible} & checks whether a DTMC is irreducible. \\ \code{is.regular} & checks whether a DTMC is regular. \\ \code{period} & the period of an irreducible DTMC. \\ \code{recurrentClasses} & list of recurrent communicating classes. \\ \code{transientClasses} & list of transient communicating classes. \\ \code{recurrentStates} & the recurrent states of the transition matrix. \\ \code{transientStates} & the transient states of the transition matrix, if any. \\ \code{summary} & DTMC summary. \\ \hline \end{tabular} \caption{\pkg{markovchain} methods: statistical operations.} \label{tab:methodsToStats} \end{table} ## Conditional distributions The conditional distribution of weather states, given that current day's weather is sunny, is given by following code. ```{r conditionalDistr} conditionalDistribution(mcWeather, "sunny") ``` ## Stationary states A stationary (steady state, or equilibrium) vector is a probability vector such that Equation \ref{eq:steadystat2} holds \begin{equation} \begin{matrix} 0\leq \pi_j \leq 1\\ \sum_{j \in S} \pi_j = 1\\ \pi \cdot P = \pi \end{matrix} \label{eq:steadystat2} \end{equation} Steady states are associated to $P$ eigenvalues equal to one. We could be tempted to compute them solving the eigen values / vectors of the matrix and taking real parts (since if $u + iv$ is a eigen vector, for the matrix $P$, then $Re(u + iv) = u$ and $Im(u + iv) = v$ are eigen vectors) and normalizing by the vector sum, this carries some concerns: 1. If $u, v \in \mathbb{R}^n$ are linearly independent eigen vectors associated to $1$ eigen value, $u + iv$, $u + iu$ are also linearly independent eigen vectors, and their real parts coincide. Clearly if we took real parts, we would be loosing an eigen vector, because we cannot know in advance if the underlying algorithm to compute the eigen vectors is going to output something similar to what we described. We should be agnostic to the underlying eigen vector computation algorithm. 2. Imagine the identity $P$ of dimensions $2 \times 2$. Its eigen vectors associated to the $1$ eigen value are $u = (1, 0)$ and $v = (0, 1)$. However, the underlying algorithm to compute eigen vectors could return $(1, -2)$ and $(-2, 1)$ instead, that are linear combinations of the aforementioned ones, and therefore eigen vectors. Normalizing by their sum, we would get: $(-1, 2)$ and $(2, -1)$, which obviously are not probability measures. Again, we should be agnostic to the underlying eigen computation algorithm. 3. Algorithms to compute eigen values / vectors are computationally expensive: they are iterative, and we cannot predict a fixed number of iterations for them. Moreover, each iteration takes $\mathcal{O}(m^2)$ or $\mathcal{O}(m^3)$ algorithmic complexity, with $m$ the number of states. We are going to use that every irreducible DTMC has a unique steady state, that is, if $M$ is the matrix for an irreducible DTMC (all states communicate with each other), then it exists a unique $v \in \mathbb{R}^m$ such that: \[ v \cdot M = v, \qquad \sum_{i = 1}^m v_i = 1 \] Also, we'll use that a steady state for a DTMC assigns $0$ to the transient states. The canonical form of a (by row) stochastic matrix looks alike: \[ \left(\begin{array}{c|c|c|c|c} M_1 & 0 & 0 & \ldots & 0 \\ \hline 0 & M_2 & 0 & \ldots & 0 \\ \hline 0 & 0 & M_3 & \ldots & 0 \\ \hline \vdots & \vdots & \vdots & \ddots & \vdots \\ \hline A_1 & A_2 & A_3 & \ldots & R \end{array}\right) \] where $M_i$ corresponds to irreducible sub-chains, the blocks $A_i$ correspond to the transitions from transient states to each of the recurrent classes and $R$ are the transitions from the transient states to themselves. Also, we should note that a Markov chain has exactly the same name of steady states as recurrent classes. Therefore, we have coded the following algorithm ^[We would like to thank Prof. Christophe Dutang for his contributions to the development of this method. He coded a first improvement of the original `steadyStates` method and we could not have reached the current correctness without his previous work]: 1. Identify the recurrent classes $[C_1, \ldots, C_l]$ with \texttt{recurrentClasses} function. 2. Take each class $C_i$, compute the sub-matrix corresponding to it $M_i$. 3. Solve the system $v \cdot C_i = v, \, \sum_{j = 1}^{|C_i|} v_j = 1$ which has a unique solution, for each $i = 1, \ldots, l$. 3. Map each state $v_i$ to the original order in $P$ and assign a $0$ to the slots corresponding to transient states in the matrix. The result is returned in matrix form. ```{r steadyStates} steadyStates(mcWeather) ``` It is possible for a Markov chain to have more than one stationary distribution, as the gambler ruin example shows. ```{r gamblerRuin} gamblerRuinMarkovChain <- function(moneyMax, prob = 0.5) { m <- matlab::zeros(moneyMax + 1) m[1,1] <- m[moneyMax + 1,moneyMax + 1] <- 1 states <- as.character(0:moneyMax) rownames(m) <- colnames(m) <- states for(i in 2:moneyMax){ m[i,i-1] <- 1 - prob m[i, i + 1] <- prob } new("markovchain", transitionMatrix = m, name = paste("Gambler ruin", moneyMax, "dim", sep = " ")) } mcGR4 <- gamblerRuinMarkovChain(moneyMax = 4, prob = 0.5) steadyStates(mcGR4) ``` ## Classification of states Absorbing states are determined by means of `absorbingStates` method. ```{r absorbingStates} absorbingStates(mcGR4) absorbingStates(mcWeather) ``` The key function in methods which need knowledge about communicating classes, recurrent states, transient states, is `.commclassKernel`, which is a modification of Tarjan's algorithm from \cite{Tarjan}. This `.commclassKernel` method gets a transition matrix of dimension $n$ and returns a list of two items: 1. `classes`, an matrix whose $(i, j)$ entry is `true` if $s_i$ and $s_j$ are in the same communicating class. 2. `closed`, a vector whose $i$ -th entry indicates whether the communicating class to which $i$ belongs is closed. These functions are used by two other internal functions on which the `summary` method for `markovchain` objects works. The example matrix used in \cite{renaldoMatlab} well exemplifies the purpose of the function. ```{r renaldoMatrix1} P <- matlab::zeros(10) P[1, c(1, 3)] <- 1/2; P[2, 2] <- 1/3; P[2,7] <- 2/3; P[3, 1] <- 1; P[4, 5] <- 1; P[5, c(4, 5, 9)] <- 1/3; P[6, 6] <- 1; P[7, 7] <- 1/4; P[7,9] <- 3/4; P[8, c(3, 4, 8, 10)] <- 1/4; P[9, 2] <- 1; P[10, c(2, 5, 10)] <- 1/3; rownames(P) <- letters[1:10] colnames(P) <- letters[1:10] probMc <- new("markovchain", transitionMatrix = P, name = "Probability MC") summary(probMc) ``` All states that pertain to a transient class are named "transient" and a specific method has been written to elicit them. ```{r transientStates} transientStates(probMc) ``` `canonicForm` method that turns a Markov chain into its canonic form, reordering the states to have first the recurrent classes and then the transient states. ```{r probMc2Canonic} probMcCanonic <- canonicForm(probMc) probMc probMcCanonic ``` The function `is.accessible` permits to investigate whether a state $s_{j}$ is accessible from state $s_i$, that is whether the probability to eventually reach $s_j$ starting from $s_{i}$ is greater than zero. ```{r isAccessible} is.accessible(object = probMc, from = "a", to = "c") is.accessible(object = probMc, from = "g", to = "c") ``` In Section \@ref(sec:properties) we observed that, if a DTMC is irreducible, all its states share the same periodicity. Then, the `period` function returns the periodicity of the DTMC, provided that it is irreducible. The example that follows shows how to find if a DTMC is reducible or irreducible by means of the function `is.irreducible` and, in the latter case, the method `period` is used to compute the periodicity of the chain. ```{r periodicity} E <- matrix(0, nrow = 4, ncol = 4) E[1, 2] <- 1 E[2, 1] <- 1/3; E[2, 3] <- 2/3 E[3,2] <- 1/4; E[3, 4] <- 3/4 E[4, 3] <- 1 mcE <- new("markovchain", states = c("a", "b", "c", "d"), transitionMatrix = E, name = "E") is.irreducible(mcE) period(mcE) ``` The example Markov chain found in \proglang{Mathematica} web site \citep{mathematica9MarkovChain} has been used, and is plotted in Figure \@ref(fig:mcMathematics). ```{r mathematica9Mc} require(matlab) mathematicaMatr <- zeros(5) mathematicaMatr[1,] <- c(0, 1/3, 0, 2/3, 0) mathematicaMatr[2,] <- c(1/2, 0, 0, 0, 1/2) mathematicaMatr[3,] <- c(0, 0, 1/2, 1/2, 0) mathematicaMatr[4,] <- c(0, 0, 1/2, 1/2, 0) mathematicaMatr[5,] <- c(0, 0, 0, 0, 1) statesNames <- letters[1:5] mathematicaMc <- new("markovchain", transitionMatrix = mathematicaMatr, name = "Mathematica MC", states = statesNames) ``` ```{r mcMathematics, fig=TRUE, echo=FALSE, fig.align='center', fig.cap="Mathematica 9 example. Markov chain plot."} plot(mathematicaMc, layout = layout.fruchterman.reingold) ``` ```{r mathematica9MC, echo=FALSE} summary(mathematicaMc) ``` ## First passage time distributions and means \cite{renaldoMatlab} provides code to compute first passage time (within $1,2,\ldots, n$ steps) given the initial state to be $i$. The \proglang{MATLAB} listings translated into \proglang{R} on which the `firstPassage` function is based are: ```{r fpTime1, eval=FALSE} .firstpassageKernel <- function(P, i, n){ G <- P H <- P[i,] E <- 1 - diag(size(P)[2]) for (m in 2:n) { G <- P %*% (G * E) H <- rbind(H, G[i,]) } return(H) } ``` We conclude that the probability for the *first* rainy day to be the third one, given that the current state is sunny, is given by: ```{r fpTime2} firstPassagePdF <- firstPassage(object = mcWeather, state = "sunny", n = 10) firstPassagePdF[3, 3] ``` To compute the *mean* first passage times, i.e. the expected number of days before it rains given that today is sunny, we can use the `meanFirstPassageTime` function: ```{r mfpt1} meanFirstPassageTime(mcWeather) ``` indicating e.g. that the average number of days of sun or cloud before rain is 6.67 if we start counting from a sunny day, and 5 if we start from a cloudy day. Note that we can also specify one or more destination states: ```{r mfpt2} meanFirstPassageTime(mcWeather,"rain") ``` The implementation follows the matrix solutions by [@GrinsteadSnell]. We can check the result by averaging the first passage probability density function: ```{r mfpt3} firstPassagePdF.long <- firstPassage(object = mcWeather, state = "sunny", n = 100) sum(firstPassagePdF.long[,"rain"] * 1:100) ``` ## Mean recurrence time The `meanRecurrenceTime` method gives the first mean recurrence time (expected number of steps to go back to a state if it was the initial one) for each recurrent state in the transition probabilities matrix for a DTMC. Let's see an example: ```{r mrt-weather} meanRecurrenceTime(mcWeather) ``` Another example, with not all of its states being recurrent: ```{r mrt-probMc} recurrentStates(probMc) meanRecurrenceTime(probMc) ``` ## Absorption probabilities and mean absorption time We are going to use the Drunkard’s random walk from [@GrinsteadSnell]. We have a drunk person walking through the street. Each move the person does, if they have not arrived to either home (corner 1) or to the bar (corner 5) could be to the left corner or to the right one, with equal probability. In case of arrival to the bar or to home, the person stays there. ```{r data-drunkard} drunkProbs <- matlab::zeros(5, 5) drunkProbs[1,1] <- drunkProbs[5,5] <- 1 drunkProbs[2,1] <- drunkProbs[2,3] <- 1/2 drunkProbs[3,2] <- drunkProbs[3,4] <- 1/2 drunkProbs[4,3] <- drunkProbs[4,5] <- 1/2 drunkMc <- new("markovchain", transitionMatrix = drunkProbs) drunkMc ``` Recurrent (in fact absorbing states) are: ```{r rs-drunkard} recurrentStates(drunkMc) ``` Transient states are the rest: ```{r ts-drunkard} transientStates(drunkMc) ``` The probability of either being absorbed by the bar or by the sofa at home are: ```{r ap-drunkard} absorptionProbabilities(drunkMc) ``` which means that the probability of arriving home / bar is inversely proportional to the distance to each one. But we also would like to know how much time does the person take to arrive there, which can be done with `meanAbsorptionTime`: ```{r at-drunkard} meanAbsorptionTime(drunkMc) ``` So it would take `3` steps to arrive to the destiny if the person is either in the second or fourth corner, and `4` steps in case of being at the same distance from home than to the bar. ## Committor probability The committor probability tells us the probability to reach a given state before another given. Suppose that we start in a cloudy day, the probabilities of experiencing a rainy day before a sunny one is 0.5: ```{r} committorAB(mcWeather,3,1) ``` ## Hitting probabilities Rewriting the system \eqref{eq:hitting-probs} as: \begin{equation*} A = \left(\begin{array}{c|c|c|c} A_1 & 0 & \ldots & 0 \\ \hline 0 & A_2 & \ldots & 0 \\ \hline \vdots & \vdots & \ddots & 0 \\ \hline 0 & 0 & \ldots & A_n \end{array}\right) \end{equation*} \begin{eqnarray*} A_1 &= \left(\begin{matrix} -1 & p_{1,2} & p_{1,3} & \ldots & p_{1,n} \\ 0 & (p_{2,2} - 1) & p_{2,3} & \ldots & p_{2,n} \\ \vdots & \vdots & \vdots & \ddots & \vdots \\ 0 & p_{n, 2} & p_{n,3} & \ldots & (p_{n,n} - 1) \end{matrix}\right)\\ A_2 &= \left(\begin{matrix} (p_{1,1} - 1) & 0 & p_{1,3} & \ldots & p_{1,n} \\ p_{2,1} & -1 & p_{2,3} & \ldots & p_{2,n} \\ \vdots & \vdots & \vdots & \ddots & \vdots \\ p_{n,1} & 0 & p_{n,3} & \ldots & (p_{n,n} - 1) \end{matrix}\right)\\ \vdots & \vdots\\ A_n &= \left(\begin{matrix} (p_{1,1} - 1) & p_{1,2} & p_{1,3} & \ldots & 0 \\ p_{2,1} & (p_{2,2} -1) & p_{2,3} & \ldots & 0 \\ \vdots & \vdots & \vdots & \ddots & \vdots \\ p_{n,1} & p_{n,2} & p_{n,3} & \ldots & -1 \end{matrix}\right)\\ \end{eqnarray*} \begin{equation*} \begin{array}{lr} X_j = \left(\begin{array}{c} h_{1,j} \\ h_{2,j} \\ \vdots \\ h_{n,j} \end{array}\right) & C_j = - \left(\begin{array}{c} p_{1,j} \\ p_{2,j} \\ \vdots \\ p_{n,j} \end{array}\right) \end{array} \end{equation*} we end up having to solve the block systems: \begin{equation} A_j \cdot X_j = C_j \end{equation} Let us imagine the $i$ -th state has transition probabilities: $(0, \ldots, 0, \underset{i)}{1}, 0, \ldots, 0)$. Then that same row would turn into $(0,0, \ldots, 0)$ for some block, thus obtaining a singular matrix. Another case which may give us problems could be: state $i$ has the following transition probabilities: $(0, \ldots, 0, \underset{j)}{1}, 0, \ldots, 0)$ and the state $j$ has the following transition probabilities: $(0, \ldots, 0, \underset{i)}{1}, 0, \ldots, 0)$. Then when building some blocks we will end up with rows: \begin{eqnarray*} (0, \ldots, 0, \underset{i)}{-1}, 0, \ldots, 0, \underset{j)}{1}, 0, \ldots, 0) \\ (0, \ldots, 0, \underset{i)}{1}, 0, \ldots, 0, \underset{j)}{-1}, 0, \ldots, 0) \end{eqnarray*} which are linearly dependent. Our hypothesis is that if we treat the closed communicating classes differently, we *might* delete the linearity in the system. If we have a closed communicating class $C_u$, then $h_{i,j} = 1$ for all $i,j \in C_u$ and $h_{k,j} = 0$ for all $k\not\in C_u$. Then we can set $X_u$ appropriately and solve the other $X_v$ using those values. The method in charge of that in `markovchain` package is `hittingProbabilities`, which receives a Markov chain and computes the matrix $(h_{ij})_{i,j = 1,\ldots, n}$ where $S = \{s_1, \ldots, s_n\}$ is the set of all states of the chain. For the following chain: ```{r hitting-data} M <- matlab::zeros(5, 5) M[1,1] <- M[5,5] <- 1 M[2,1] <- M[2,3] <- 1/2 M[3,2] <- M[3,4] <- 1/2 M[4,2] <- M[4,5] <- 1/2 hittingTest <- new("markovchain", transitionMatrix = M) hittingProbabilities(hittingTest) ``` we want to compute the hitting probabilities. That can be done with: ```{r hitting-probabilities} hittingProbabilities(hittingTest) ``` In the case of the `mcWeather` Markov chain we would obtain a matrix with all its elements set to $1$. That makes sense (and is desirable) since if today is sunny, we expect it would be sunny again at certain point in the time, and the same with rainy weather (that way we assure good harvests): ```{r hitting-weather} hittingProbabilities(mcWeather) ``` # Statistical analysis {#sec:statistics} Table \@ref(tab:funs4Stats) lists the functions and methods implemented within the package which help to fit, simulate and predict DTMC. \begin{table}[h] \centering \begin{tabular}{lll} \hline Function & Purpose \\ \hline \hline \code{markovchainFit} & Function to return fitted Markov chain for a given sequence.\\ \code{predict} & Method to calculate predictions from \code{markovchain} or \\ & \code{markovchainList} objects.\\ \code{rmarkovchain} & Function to sample from \code{markovchain} or \code{markovchainList} objects.\\ \hline \end{tabular} \caption{The \pkg{markovchain} statistical functions.} \label{tab:funs4Stats} \end{table} ## Simulation Simulating a random sequence from an underlying DTMC is quite easy thanks to the function `rmarkovchain`. The following code generates a year of weather states according to `mcWeather` underlying stochastic process. ```{r simulatingAMarkovChain} weathersOfDays <- rmarkovchain(n = 365, object = mcWeather, t0 = "sunny") weathersOfDays[1:30] ``` Similarly, it is possible to simulate one or more sequences from a non-homogeneous Markov chain, as the following code (applied on CCHC example) exemplifies. ```{r simulatingAListOfMarkovChain} patientStates <- rmarkovchain(n = 5, object = mcCCRC, t0 = "H", include.t0 = TRUE) patientStates[1:10,] ``` Two advance parameters are available to the `rmarkovchain` method which helps you decide which implementation to use. There are four options available : \proglang{R}, \proglang{R} in parallel, \proglang{C++} and \proglang{C++} in parallel. Two boolean parameters `useRcpp` and `parallel` will decide which implementation will be used. Default is \code{useRcpp = TRUE} and \code{parallel = FALSE} i.e. \proglang{C++} implementation. The \proglang{C++} implementation is generally faster than the `R` implementation. If you have multicore processors then you can take advantage of `parallel` parameter by setting it to `TRUE`. When both `Rcpp=TRUE` and `parallel=TRUE` the parallelization has been carried out using \pkg{RcppParallel} package \citep{pkg:RcppParallel}. ## Estimation A time homogeneous Markov chain can be fit from given data. Four methods have been implemented within current version of \pkg{markovchain} package: maximum likelihood, maximum likelihood with Laplace smoothing, Bootstrap approach, maximum a posteriori. Equation \ref{eq:MLE} shows the maximum likelihood estimator (MLE) of the $p_{ij}$ entry, where the $n_{ij}$ element consists in the number sequences $\left( X_{t}=s_{i}, X_{t+1}=s_{j}\right)$ found in the sample, that is \begin{equation} {\hat p^{MLE}}_{ij} = \frac{n_{ij}}{\sum\limits_{u = 1}^k {n_{iu}}}. \label{eq:MLE} \end{equation} Equation \@ref(eq:SE) shows the `standardError` of the MLE \citep{MSkuriat}. \begin{equation} SE_{ij} = \frac{ {\hat p^{MLE}}_{ij} }{\sqrt{n_{ij}}} \label{eq:SE} \end{equation} ```{r fitMcbyMLE2} weatherFittedMLE <- markovchainFit(data = weathersOfDays, method = "mle",name = "Weather MLE") weatherFittedMLE$estimate weatherFittedMLE$standardError ``` The Laplace smoothing approach is a variation of the MLE, where the $n_{ij}$ is substituted by $n_{ij}+\alpha$ (see Equation \ref{eq:LAPLACE}), being $\alpha$ an arbitrary positive stabilizing parameter. \begin{equation} {\hat p^{LS}}_{ij} = \frac{{{n_{ij}} + \alpha }}{{\sum\limits_{u = 1}^k {\left( {{n_{iu}} + \alpha } \right)} }} \label{eq:LAPLACE} \end{equation} ```{r fitMcbyLAPLACE} weatherFittedLAPLACE <- markovchainFit(data = weathersOfDays, method = "laplace", laplacian = 0.01, name = "Weather LAPLACE") weatherFittedLAPLACE$estimate ``` (NOTE: The Confidence Interval option is enabled by default. Remove this option to fasten computations.) Both MLE and Laplace approach are based on the `createSequenceMatrix` functions that returns the raw counts transition matrix. ```{r fitSequenceMatrix} createSequenceMatrix(stringchar = weathersOfDays) ``` `stringchar` could contain `NA` values, and the transitions containing `NA` would be ignored. An issue occurs when the sample contains only one realization of a state (say $X_{\beta}$) which is located at the end of the data sequence, since it yields to a row of zero (no sample to estimate the conditional distribution of the transition). In this case the estimated transition matrix is corrected assuming $p_{\beta,j}=1/k$, being $k$ the possible states. Create sequence matrix can also be used to obtain raw count transition matrices from a given $n*2$ matrix as the following example shows: ```{r fitSequenceMatrix2} myMatr<-matrix(c("a","b","b","a","a","b","b","b","b","a","a","a","b","a"),ncol=2) createSequenceMatrix(stringchar = myMatr,toRowProbs = TRUE) ``` A bootstrap estimation approach has been developed within the package in order to provide an indication of the variability of ${\hat p}_{ij}$ estimates. The bootstrap approach implemented within the \pkg{markovchain} package follows these steps: 1. bootstrap the data sequences following the conditional distributions of states estimated from the original one. The default bootstrap samples is 10, as specified in `nboot` parameter of `markovchainFit` function. 2. apply MLE estimation on bootstrapped data sequences that are saved in `bootStrapSamples` slot of the returned list. 3. the ${p^{BOOTSTRAP}}_{ij}$ is the average of all ${p^{MLE}}_{ij}$ across the `bootStrapSamples` list, normalized by row. A `standardError` of $\hat{{p^{MLE}}_{ij}}$ estimate is provided as well. ```{r fitMcbyBootStrap1} weatherFittedBOOT <- markovchainFit(data = weathersOfDays, method = "bootstrap", nboot = 20) weatherFittedBOOT$estimate weatherFittedBOOT$standardError ``` The bootstrapping process can be done in parallel thanks to \pkg{RcppParallel} package \citep{pkg:RcppParallel}. Parallelized implementation is definitively suggested when the data sample size or the required number of bootstrap runs is high. ```{r fitMcbyBootStrap2, eval=FALSE} weatherFittedBOOTParallel <- markovchainFit(data = weathersOfDays, method = "bootstrap", nboot = 200, parallel = TRUE) weatherFittedBOOTParallel$estimate weatherFittedBOOTParallel$standardError ``` The parallel bootstrapping uses all the available cores on a machine by default. However, it is also possible to tune the number of threads used. Note that this should be done in R before calling the `markovchainFit` function. For example, the following code will set the number of threads to 4. ```{r fitMcbyBootStrap3, eval=FALSE} RcppParallel::setNumThreads(2) ``` For more details, please refer to \pkg{RcppParallel} web site. For all the fitting methods, the `logLikelihood` \citep{MSkuriat} denoted in Equation \ref{eq:LLH} is provided. \begin{equation} LLH = \sum_{i,j} n_{ij} * log (p_{ij}) \label{eq:LLH} \end{equation} where $n_{ij}$ is the entry of the frequency matrix and $p_{ij}$ is the entry of the transition probability matrix. ```{r fitMcbyMLE1} weatherFittedMLE$logLikelihood weatherFittedBOOT$logLikelihood ``` Confidence matrices of estimated parameters (parametric for MLE, non - parametric for BootStrap) are available as well. The `confidenceInterval` is provided with the two matrices: `lowerEndpointMatrix` and `upperEndpointMatrix`. The confidence level (CL) is 0.95 by default and can be given as an argument of the function `markovchainFit`. This is used to obtain the standard score (z-score). From classical inference theory, if $ci$ is the level of confidence required assuming normal distribution the $zscore(ci)$ solves $\Phi \left ( 1-\left(\frac{1-ci}{2}\right) \right )$ Equations \ref{eq:CIL} and \ref{eq:CIU} \citep{MSkuriat} show the `confidenceInterval` of a fitting. Note that each entry of the matrices is bounded between 0 and 1. \begin{align} LowerEndpoint_{ij} = p_{ij} - zscore (CL) * SE_{ij} \label{eq:CIL} \\ UpperEndpoint_{ij} = p_{ij} + zscore (CL) * SE_{ij} \label{eq:CIU} \end{align} ```{r confint} weatherFittedMLE$confidenceInterval weatherFittedBOOT$confidenceInterval ``` A special function, `multinomialConfidenceIntervals`, has been written in order to obtain multinomial wise confidence intervals. The code has been based on and Rcpp translation of package's \pkg{MultinomialCI} functions \cite{pkg:MultinomialCI} that were themselves based on the \cite{sison1995simultaneous} paper. ```{r multinomial} multinomialConfidenceIntervals(transitionMatrix = weatherFittedMLE$estimate@transitionMatrix, countsTransitionMatrix = createSequenceMatrix(weathersOfDays)) ``` The functions for fitting DTMC have mostly been rewritten in \proglang{C++} using \pkg{Rcpp} \cite{RcppR} since version 0.2. It is also possible to fit a DTMC object from `matrix` or `data.frame` objects as shown in following code. ```{r fitMclists} data(holson) singleMc<-markovchainFit(data=holson[,2:12],name="holson") ``` The same applies for `markovchainList` (output length has been limited). ```{r fitMclistsFit1, output.lines=20} mcListFit<-markovchainListFit(data=holson[,2:6],name="holson") mcListFit$estimate ``` Finally, given a `list` object, it is possible to fit a `markovchain` object or to obtain the raw transition matrix. ```{r fitMclistsFit2} c1<-c("a","b","a","a","c","c","a") c2<-c("b") c3<-c("c","a","a","c") c4<-c("b","a","b","a","a","c","b") c5<-c("a","a","c",NA) c6<-c("b","c","b","c","a") mylist<-list(c1,c2,c3,c4,c5,c6) mylistMc<-markovchainFit(data=mylist) mylistMc ``` The same works for `markovchainFitList`. ```{r fitAMarkovChainListfromAlist, output.lines=15} markovchainListFit(data=mylist) ``` If any transition contains `NA`, it will be ignored in the results as the above example showed. ## Prediction The $n$-step forward predictions can be obtained using the `predict` methods explicitly written for `markovchain` and `markovchainList` objects. The prediction is the mode of the conditional distribution of $X_{t+1}$ given $X_{t}=s_{j}$, being $s_{j}$ the last realization of the DTMC (homogeneous or non-homogeneous). ### Predicting from a markovchain object The 3-days forward predictions from `markovchain` object can be generated as follows, assuming that the last two days were respectively "cloudy" and "sunny". ```{r markovchainPredict} predict(object = weatherFittedMLE$estimate, newdata = c("cloudy", "sunny"), n.ahead = 3) ``` ### Predicting from a markovchainList object Given an initial two years health status, the 5-year ahead prediction of any CCRC guest is ```{r markovchainListPredict} predict(mcCCRC, newdata = c("H", "H"), n.ahead = 5) ``` The prediction has stopped at time sequence since the underlying non-homogeneous Markov chain has a length of four. In order to continue five years ahead, the `continue=TRUE` parameter setting makes the `predict` method keeping to use the last `markovchain` in the sequence list. ```{r markovchainListPredict2} predict(mcCCRC, newdata = c("H", "H"), n.ahead = 5, continue = TRUE) ``` ## Statistical Tests In this section, we describe the statistical tests: assessing the Markov property (`verifyMarkovProperty`), the order (`assessOrder`), the stationary (`assessStationarity`) of a Markov chain sequence, and the divergence test for empirically estimated transition matrices (`divergenceTest`). Most of such tests are based on the $\chi ^2$ statistics. Relevant references are \cite{kullback1962tests} and \cite{anderson1957statistical}. All such tests have been designed for small samples, since it is easy to detect departures from Markov property as long as the sample size increases. In addition, the accuracy of the statistical inference functions has been questioned and will be thoroughly investigated in future versions of the package. ### Assessing the Markov property of a Markov chain sequence The `verifyMarkovProperty` function verifies whether the Markov property holds for the given chain. The test implemented in the package looks at triplets of successive observations. If $x_1, x_2, \ldots, x_N$ is a set of observations and $n_{ijk}$ is the number of times $t$ $\left(1 \le t \le N-2 \right)$ such that $x_t=i, x_{t+1}=j, x_{x+2}=k$, then if the Markov property holds $n_{ijk}$ follows a Binomial distribution with parameters $n_{ij}$ and $p_{jk}$. A classical $\chi^2$ test can check this distributional assumption, since $\sum_{i}\sum_{j}\sum_{k}\frac{(n_{ijk}-n_{ij}\hat{p_{jk}})^2}{n_{ij}\hat{p_{jk}}}\sim \chi^2\left(q \right )$ where q is the number of degrees of freedom. The number of degrees of freedom q of the distribution of $\chi^2$ is given by the formula r-q+s-1, where: s denotes the number of states i in the state space such that n_{i} > 0 q denotes the number of pairs (i, j) for which n_{ij} > 0 and r denotes the number of triplets (i, j, k) for which n_{ij}n_{jk} > 0 ```{r test1} sample_sequence<-c("a", "b", "a", "a", "a", "a", "b", "a", "b", "a", "b", "a", "a", "b", "b", "b", "a") verifyMarkovProperty(sample_sequence) ``` ### Assessing the order of a Markov chain sequence The `assessOrder` function checks whether the given chain is of first order or of second order. For each possible present state, we construct a contingency table of the frequency of the future state for each past to present state transition as shown in Table \ref{tab:order}. \begin{table}[h] \centering \begin{tabular}{l | l | l | l} \hline past & present & future & future \\ & & a & b \\ \hline \hline a & a & 2 & 2\\ b & a & 2 & 2\\ \hline \end{tabular} \caption{Contingency table to assess the order for the present state a.} \label{tab:order} \end{table} Using the table, the function performs the $\chi ^2$ test by calling the `chisq.test` function. This test returns a list of the chi-squared value and the p-value. If the p-value is greater than the given significance level, we cannot reject the hypothesis that the sequence is of first order. ```{r test2} data(rain) assessOrder(rain$rain) ``` ### Assessing the stationarity of a Markov chain sequence The `assessStationarity` function assesses if the transition probabilities of the given chain change over time. To be more specific, the chain is stationary if the following condition meets. \begin{equation} p_{ij}(t) = p_{ij} ~\textrm{ for all }~t \label{eq:stationarity} \end{equation} For each possible state, we construct a contingency table of the estimated transition probabilities over time as shown in Table \ref{tab:stationarity}. \begin{table}[h] \centering \begin{tabular}{l | l | l} \hline time (t) & probability of transition to a & probability of transition to b \\ \hline \hline 1 & 0 & 1\\ 2 & 0 & 1\\ . & . & . \\ . & . & . \\ . & . & . \\ 16 & 0.44 & 0.56\\ \hline \end{tabular} \caption{Contingency table to assess the stationarity of the state a.} \label{tab:stationarity} \end{table} Using the table, the function performs the $\chi ^2$ test by calling the `chisq.test` function. This test returns a list of the chi-squared value and the p-value. If the p-value is greater than the given significance level, we cannot reject the hypothesis that the sequence is stationary. ```{r test3} assessStationarity(rain$rain, 10) ``` ### Divergence tests for empirically estimated transition matrices This section discusses tests developed to verify whether: 1. An empirical transition matrix is consistent with a theoretical one. 2. Two or more empirical transition matrices belongs to the same DTMC. The first test is implemented by the `verifyEmpiricalToTheoretical` function. Being $f_{ij}$ the raw transition count, \cite{kullback1962tests} shows that $2*\sum_{i=1}^{r}\sum_{j=1}^{r}f_{ij}\ln\frac{f_{ij}}{f_{i.}P\left( E_j | E_i\right)} \sim \chi^2\left ( r*(r-1) \right )$. The following example is taken from \cite{kullback1962tests}: ```{r divergence1} sequence<-c(0,1,2,2,1,0,0,0,0,0,0,1,2,2,2,1,0,0,1,0,0,0,0,0,0,1,1, 2,0,0,2,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,0,0,0,0,2,1,0, 0,2,1,0,0,0,0,0,0,1,1,1,2,2,0,0,2,1,1,1,1,2,1,1,1,1,1,1,1,1,1,0,2, 0,1,1,0,0,0,1,2,2,0,0,0,0,0,0,2,2,2,1,1,1,1,0,1,1,1,1,0,0,2,1,1, 0,0,0,0,0,2,2,1,1,1,1,1,2,1,2,0,0,0,1,2,2,2,0,0,0,1,1) mc=matrix(c(5/8,1/4,1/8,1/4,1/2,1/4,1/4,3/8,3/8),byrow=TRUE, nrow=3) rownames(mc)<-colnames(mc)<-0:2; theoreticalMc<-as(mc, "markovchain") verifyEmpiricalToTheoretical(data=sequence,object=theoreticalMc) ``` The second one is implemented by the `verifyHomogeneity` function, inspired by \cite[section~9]{kullback1962tests}. Assuming that $i=1,2, \ldots, s$ DTMC samples are available and that the cardinality of the state space is $r$ it verifies whether the $s$ chains belongs to the same unknown one. \cite{kullback1962tests} shows that its test statistics follows a chi-square law, $2*\sum_{i=1}^{s}\sum_{j=1}^{r}\sum_{k=1}^{r}f_{ijk}\ln\frac{n*f_{ijk}}{f_{i..}f_{.jk}} \sim \chi^2\left ( r*(r-1) \right )$. Also the following example is taken from \cite{kullback1962tests}: ```{r divergence2} data(kullback) verifyHomogeneity(inputList=kullback,verbose=TRUE) ``` ## Continuous Times Markov Chains ### Intro The \pkg{markovchain} package provides functionality for continuous time Markov chains (CTMCs). CTMCs are a generalization of discrete time Markov chains (DTMCs) in that we allow time to be continuous. We assume a finite state space $S$ (for an infinite state space wouldn't fit in memory). We can think of CTMCs as Markov chains in which state transitions can happen at any time. More formally, we would like our CTMCs to satisfy the following two properties: * The Markov property - let $F_{X(s)}$ denote the information about $X$ up to time $s$. Let $j \in S$ and $s \leq t$. Then, $P(X(t) = j|F_{X(s)}) = P(X(t) = j|X(s))$. * Time homogeneity - $P(X(t) = j|X(s) = k) = P(X(t-s) = j|X(0) = k)$. If both the above properties are satisfied, it is referred to as a time-homogeneous CTMC. If a transition occurs at time $t$, then $X(t)$ denotes the new state and $X(t)\neq X(t-)$. Now, let $X(0)=x$ and let $T_x$ be the time a transition occurs from this state. We are interested in the distribution of $T_x$. For $s,t \geq 0$, it can be shown that $ P(T_x > s+t | T_x > s) = P(T_x > t) $ This is the memory less property that only the exponential random variable exhibits. Therefore, this is the sought distribution, and each state $s \in S$ has an exponential holding parameter $\lambda(s)$. Since $\mathrm{E}T_x = \frac{1}{\lambda(x)}$, higher the rate $\lambda(x)$, smaller the expected time of transitioning out of the state $x$. However, specifying this parameter alone for each state would only paint an incomplete picture of our CTMC. To see why, consider a state $x$ that may transition to either state $y$ or $z$. The holding parameter enables us to predict when a transition may occur if we start off in state $x$, but tells us nothing about which state will be next. To this end, we also need transition probabilities associated with the process, defined as follows (for $y \neq x$) - $p_{xy} = P(X(T_s) = y | X(0) = x)$. Note that $\sum_{y \neq x} p_{xy} = 1$. Let $Q$ denote this transition matrix ($Q_{ij} = p_{ij}$). What is key here is that $T_x$ and the state $y$ are independent random variables. Let's define $\lambda(x, y) = \lambda(x) p_{xy}$ We now look at Kolmogorov's backward equation. Let's define $P_{ij}(t) = P(X(t) = j | X(0) = i)$ for $i, j \in S$. The backward equation is given by (it can be proved) $P_{ij}(t) = \delta_{ij}e^{-\lambda(i)t} + \int_{0}^{t}\lambda(i)e^{-\lambda(i)t} \sum_{k \neq i} Q_{ik} P_{kj}(t-s) ds$. Basically, the first term is non-zero if and only if $i=j$ and represents the probability that the first transition from state $i$ occurs after time $t$. This would mean that at $t$, the state is still $i$. The second term accounts for any transitions that may occur before time $t$ and denotes the probability that at time $t$, when the smoke clears, we are in state $j$. This equation can be represented compactly as follows $P'(t) = AP(t)$ where $A$ is the *generator* matrix. \[ A(i, j) = \begin{cases} \lambda(i, j) & \mbox{if } i \neq j \\ -\lambda(i) & \mbox{else.} \end{cases} \] Observe that the sum of each row is 0. A CTMC can be completely specified by the generator matrix. ### Stationary Distributions The following theorem guarantees the existence of a unique stationary distribution for CTMCs. Note that $X(t)$ being irreducible and recurrent is the same as $X_n(t)$ being irreducible and recurrent. Suppose that $X(t)$ is irreducible and recurrent. Then $X(t)$ has an invariant measure $\eta$, which is unique up to multiplicative factors. Moreover, for each $k \in S$, we have \[\eta_k = \frac{\pi_k}{\lambda(k)}\] where $\pi$ is the unique invariant measure of the embedded discrete time Markov chain $Xn$. Finally, $\eta$ satisfies \[0 < \eta_j < \infty, \forall j \in S\] and if $\sum_i \eta_i < \infty$ then $\eta$ can be normalized to get a stationary distribution. ### Estimation Let the data set be $D = \{(s_0, t_0), (s_1, t_1), ..., (s_{N-1}, t_{N-1})\}$ where $N=|D|$. Each $s_i$ is a state from the state space $S$ and during the time $[t_i,t_{i+1}]$ the chain is in state $s_i$. Let the parameters be represented by $\theta = \{\lambda, P\}$ where $\lambda$ is the vector of holding parameters for each state and $P$ the transition matrix of the embedded discrete time Markov chain. Then the probability is given by \[ {Pr(D | \theta) \propto \lambda(s_0)e^{-\lambda(s_0)(t_1-t_0)}Pr(s_1|s_0) \cdot\ldots\cdot \lambda(s_{N-2})e^{-\lambda(s_{N-2})(t_{N-1}-t_{N-2})}Pr(s_{N-1}|s_{N-2})} \] Let $n(j|i)$ denote the number of $i$->$j$ transitions in $D$, and $n(i)$ the number of times $s_i$ occurs in $D$. Let $t(s_i)$ denote the total time the chain spends in state $s_i$. Then the MLEs are given by \[ \hat{\lambda(s)} = \frac{n(s)}{t(s)},\hat{Pr(j|i)}=\frac{n(j|i)}{n(i)} \] ### Expected Hitting Time The package provides a function `ExpectedTime` to calculate average hitting time from one state to another. Let the final state be j, then for every state $i \in S$, where $S$ is the set of all states and holding time $q_{i} > 0$ for every $i \neq j$. Assuming the conditions to be true, expected hitting time is equal to minimal non-negative solution vector $p$ to the system of linear equations: \begin{equation} \begin{cases} p_{k} = 0 & k = j \\ -\sum_{l \in I} q_{kl}p_{k} = 1 & k \neq j \end{cases} \label{eq:EHT} \end{equation} ### Probability at time t The package provides a function `probabilityatT` to calculate probability of every state according to given `ctmc` object. Here we use Kolmogorov's backward equation $P(t) = P(0)e^{tQ}$ for $t \geq 0$ and $P(0) = I$. Here $P(t)$ is the transition function at time t. The value $P(t)[i][j]$ at time $P(t)$ describes the probability of the state at time $t$ to be equal to j if it was equal to i at time $t=0$. It takes care of the case when `ctmc` object has a generator represented by columns. If initial state is not provided, the function returns the whole transition matrix $P(t)$. ### Examples To create a CTMC object, you need to provide a valid generator matrix, say $Q$. The CTMC object has the following slots - states, generator, by row, name (look at the documentation object for further details). Consider the following example in which we aim to model the transition of a molecule from the $\sigma$ state to the $\sigma^*$ state. When in the former state, if it absorbs sufficient energy, it can make the jump to the latter state and remains there for some time before transitioning back to the original state. Let us model this by a CTMC: ```{r rCtmcInit} energyStates <- c("sigma", "sigma_star") byRow <- TRUE gen <- matrix(data = c(-3, 3, 1, -1), nrow = 2, byrow = byRow, dimnames = list(energyStates, energyStates)) molecularCTMC <- new("ctmc", states = energyStates, byrow = byRow, generator = gen, name = "Molecular Transition Model") ``` To generate random CTMC transitions, we provide an initial distribution of the states. This must be in the same order as the dimnames of the generator. The output can be returned either as a list or a data frame. ```{r rctmcRandom0} statesDist <- c(0.8, 0.2) rctmc(n = 3, ctmc = molecularCTMC, initDist = statesDist, out.type = "df", include.T0 = FALSE) ``` $n$ represents the number of samples to generate. There is an optional argument $T$ for `rctmc`. It represents the time of termination of the simulation. To use this feature, set $n$ to a very high value, say `Inf` (since we do not know the number of transitions before hand) and set $T$ accordingly. ```{r ctmcRandom1} statesDist <- c(0.8, 0.2) rctmc(n = Inf, ctmc = molecularCTMC, initDist = statesDist, T = 2) ``` To obtain the stationary distribution simply invoke the `steadyStates` function ```{r rctmcSteadyStates} steadyStates(molecularCTMC) ``` For fitting, use the `ctmcFit` function. It returns the MLE values for the parameters along with the confidence intervals. ```{r rctmcFitting} data <- list(c("a", "b", "c", "a", "b", "a", "c", "b", "c"), c(0, 0.8, 2.1, 2.4, 4, 5, 5.9, 8.2, 9)) ctmcFit(data) ``` One approach to obtain the generator matrix is to apply the `logm` function from the \pkg{expm} package on a transition matrix. Numeric issues arise, see \cite{israel2001finding}. For example, applying the standard `method` ('Higham08') on `mcWeather` raises an error, whilst the alternative method (eigenvalue decomposition) is OK. The following code estimates the generator matrix of the `mcWeather` transition matrix. ```{r mcWeatherQ} mcWeatherQ <- expm::logm(mcWeather@transitionMatrix,method='Eigen') mcWeatherQ ``` Therefore, the "half - day" transition probability for mcWeather DTMC is ```{r mcWeatherHalfDay} mcWeatherHalfDayTM <- expm::expm(mcWeatherQ*.5) mcWeatherHalfDay <- new("markovchain",transitionMatrix=mcWeatherHalfDayTM,name="Half Day Weather Transition Matrix") mcWeatherHalfDay ``` The \pkg{ctmcd} package \citep{pkg:ctmcd} provides various functions to estimate the generator matrix (GM) of a CTMC process using different methods. The following code provides a way to join \pkg{markovchain} and \pkg{ctmcd} computations. ```{r ctmcd1} require(ctmcd) require(expm) #defines a function to transform a GM into a TM gm_to_markovchain<-function(object, t=1) { if(!(class(object) %in% c("gm","matrix","Matrix"))) stop("Error! Expecting either a matrix or a gm object") if ( class(object) %in% c("matrix","Matrix")) generator_matrix<-object else generator_matrix<-as.matrix(object[["par"]]) #must add importClassesFrom("markovchain",markovchain) in the NAMESPACE #must add importFrom(expm, "expm") transitionMatrix<-expm(generator_matrix*t) out<-as(transitionMatrix,"markovchain") return(out) } #loading ctmcd dataset data(tm_abs) gm0=matrix(1,8,8) #initializing diag(gm0)=0 diag(gm0)=-rowSums(gm0) gm0[8,]=0 gmem=gm(tm_abs,te=1,method="EM",gmguess=gm0) #estimating GM mc_at_2=gm_to_markovchain(object=gmem, t=2) #converting to TM at time 2 ``` ## Pseudo - Bayesian Estimation \cite{Hu2002} shows an empirical quasi-Bayesian method to estimate transition matrices, given an empirical $\hat{P}$ transition matrix (estimated using the classical approach) and an a - priori estimate $Q$. In particular, each row of the matrix is estimated using the linear combination $\alpha \cdot Q+\left(1-1alpha\right) \cdot P$, where $\alpha$ is defined for each row as Equation \ref{eq:pseudobayes} shows \begin{equation} \left\{\begin{matrix} \hat{\alpha_i}=\frac{\hat{K_i}}{v\left(i \right )+\hat{K_i}}\\ \hat{K_i}=\frac{v\left(i \right)^2 - \sum_{j}Y_{ij}^2}{\sum_{j}(Y_{ij}-v\left(i \right)*q_{ij})^2} \end{matrix}\right. \label{eq:pseudobayes} \end{equation} The following code returns the pseudo Bayesian estimate of the transition matrix: ```{r pseudobayes} pseudoBayesEstimator <- function(raw, apriori){ v_i <- rowSums(raw) K_i <- numeric(nrow(raw)) sumSquaredY <- rowSums(raw^2) #get numerator K_i_num <- v_i^2-sumSquaredY #get denominator VQ <- matrix(0,nrow= nrow(apriori),ncol=ncol(apriori)) for (i in 1:nrow(VQ)) { VQ[i,]<-v_i[i]*apriori[i,] } K_i_den<-rowSums((raw - VQ)^2) K_i <- K_i_num/K_i_den #get the alpha vector alpha <- K_i / (v_i+K_i) #empirical transition matrix Emp<-raw/rowSums(raw) #get the estimate out<-matrix(0, nrow= nrow(raw),ncol=ncol(raw)) for (i in 1:nrow(out)) { out[i,]<-alpha[i]*apriori[i,]+(1-alpha[i])*Emp[i,] } return(out) } ``` We then apply it to the weather example: ```{r pseudobayes2} trueMc<-as(matrix(c(0.1, .9,.7,.3),nrow = 2, byrow = 2),"markovchain") aprioriMc<-as(matrix(c(0.5, .5,.5,.5),nrow = 2, byrow = 2),"markovchain") smallSample<-rmarkovchain(n=20,object = trueMc) smallSampleRawTransitions<-createSequenceMatrix(stringchar = smallSample) pseudoBayesEstimator( raw = smallSampleRawTransitions, apriori = aprioriMc@transitionMatrix ) - trueMc@transitionMatrix biggerSample<-rmarkovchain(n=100,object = trueMc) biggerSampleRawTransitions<-createSequenceMatrix(stringchar = biggerSample) pseudoBayesEstimator( raw = biggerSampleRawTransitions, apriori = aprioriMc@transitionMatrix ) - trueMc@transitionMatrix bigSample<-rmarkovchain(n=1000,object = trueMc) bigSampleRawTransitions<-createSequenceMatrix(stringchar = bigSample) pseudoBayesEstimator( raw = bigSampleRawTransitions, apriori = aprioriMc@transitionMatrix ) - trueMc@transitionMatrix ``` ## Bayesian Estimation The \pkg{markovchain} package provides functionality for maximum a posteriori (MAP) estimation of the chain parameters (at the time of writing this document, only first order models are supported) by Bayesian inference. It also computes the probability of observing a new data set, given a (different) data set. This vignette provides the mathematical description for the methods employed by the package. ### Notation and set-up The data is denoted by $D$, the model parameters (transition matrix) by $\theta$. The object of interest is $P(\theta | D)$ (posterior density). $\mathcal{A}$ represents an alphabet class, each of whose members represent a state of the chain. Therefore \[D = s_0 s_1 ... s_{N-1}, s_t \in \mathcal{A}\] where $N$ is the length of the data set. Also, \[\theta = \{p(s|u), s \in \mathcal{A}, u \in \mathcal{A} \}\] where $\sum_{s \in \mathcal{A}} p(s|u) = 1$ for each $u \in \mathcal{A}$. Our objective is to find $\theta$ which maximizes the posterior. That is, if our solution is denoted by $\hat{\theta}$, then \[\hat{\theta} = \underset{\theta}{argmax}P(\theta | D)\] where the search space is the set of right stochastic matrices of dimension $|\mathcal{A}|x|\mathcal{A}|$. $n(u, s)$ denotes the number of times the word $us$ occurs in $D$ and $n(u)=\sum_{s \in \mathcal{A}}n(u, s)$. The hyper-parameters are similarly denoted by $\alpha(u, s)$ and $\alpha(u)$ respectively. ### Methods Given $D$, its likelihood conditioned on the observed initial state in D is given by \[P(D|\theta) = \prod_{s \in \mathcal{A}} \prod_{u \in \mathcal{A}} p(s|u)^{n(u, s)}\] Conjugate priors are used to model the prior $P(\theta)$. The reasons are two fold: 1. Exact expressions can be derived for the MAP estimates, expectations and even variances 2. Model order selection/comparison can be implemented easily (available in a future release of the package) The hyper-parameters determine the form of the prior distribution, which is a product of Dirichlet distributions \[P(\theta) = \prod_{u \in \mathcal{A}} \Big\{ \frac{\Gamma(\alpha(u))}{\prod_{s \in \mathcal{A}} \Gamma(\alpha(u, s))} \prod_{s \in \mathcal{A}} p(s|u)^{\alpha(u, s)) - 1} \Big\}\] where $\Gamma(.)$ is the Gamma function. The hyper-parameters are specified using the `hyperparam` argument in the `markovchainFit` function. If this argument is not specified, then a default value of 1 is assigned to each hyper-parameter resulting in the prior distribution of each chain parameter to be uniform over $[0,1]$. Given the likelihood and the prior as described above, the evidence $P(D)$ is simply given by \[P(D) = \int P(D|\theta) P(\theta) d\theta\] which simplifies to \[ P(D) = \prod_{u \in \mathcal{A}} \Big\{ \frac{\Gamma(\alpha(u))}{\prod_{s \in \mathcal{A}} \Gamma(\alpha(u, s))} \frac{\prod_{s \in \mathcal{A}} \Gamma(n(u, s) + \alpha(u, s))}{\Gamma(\alpha(u) + n(u))} \Big\} \] Using Bayes' theorem, the posterior now becomes (thanks to the choice of conjugate priors) \[ P(\theta | D) = \prod_{u \in \mathcal{A}} \Big\{ \frac{\Gamma(n(u) + \alpha(u))}{\prod_{s \in \mathcal{A}} \Gamma(n(u, s) + \alpha(u, s))} \prod_{s \in \mathcal{A}} p(s|u)^{n(u, s) + \alpha(u, s)) - 1} \Big\} \] Since this is again a product of Dirichlet distributions, the marginal distribution of a particular parameter $P(s|u)$ of our chain is given by \[ P(s|u) \sim Beta(n(u, s) + \alpha(u, s), n(u) + \alpha(u) - n(u, s) - \alpha(u, s)) \] Thus, the MAP estimate $\hat{\theta}$ is given by \[ \hat{\theta} = \Big\{ \frac{n(u, s) + \alpha(u, s) - 1}{n(u) + \alpha(u) - |\mathcal{A}|}, s \in \mathcal{A}, u \in \mathcal{A} \Big\} \] The function also returns the expected value, given by \[ \text{E}_{\text{post}} p(s|u) = \Big\{ \frac{n(u, s) + \alpha(u, s)}{n(u) + \alpha(u)}, s \in \mathcal{A}, u \in \mathcal{A} \Big\} \] The variance is given by \[ \text{Var}_{\text{post}} p(s|u) = \frac{n(u, s) + \alpha(u, s)}{(n(u) + \alpha(u))^2} \frac{n(u) + \alpha(u) - n(u, s) - \alpha(u, s)}{n(u) + \alpha(u) + 1} \] The square root of this quantity is the standard error, which is returned by the function. The confidence intervals are constructed by computing the inverse of the beta integral. ### Predictive distribution Given the old data set, the probability of observing new data is $P(D'|D)$ where $D'$ is the new data set. Let $m(u, s), m(u)$ denote the corresponding counts for the new data. Then, \[ P(D'|D) = \int P(D' | \theta) P(\theta | D) d\theta \] We already know the expressions for both quantities in the integral and it turns out to be similar to evaluating the evidence \[ P(D'|D) = \prod_{u \in \mathcal{A}} \Big\{ \frac{\Gamma(\alpha(u))}{\prod_{s \in \mathcal{A}} \Gamma(\alpha(u, s))} \frac{\prod_{s \in \mathcal{A}} \Gamma(n(u, s) + m(u, s) + \alpha(u, s))}{\Gamma(\alpha(u) + n(u) + m(u))} \Big\} \] ### Choosing the hyper-parameters The hyper parameters model the shape of the parameters' prior distribution. These must be provided by the user. The package offers functionality to translate a given prior belief transition matrix into the hyper-parameter matrix. It is assumed that this belief matrix corresponds to the mean value of the parameters. Since the relation \[ \text{E}_{\text{prior}} p(s | u) = \frac{\alpha(u, s)}{\alpha(u)} \] holds, the function accepts as input the belief matrix as well as a scaling vector (serves as a proxy for $\alpha(.)$) and proceeds to compute $\alpha(., .)$. Alternatively, the function accepts a data sample and infers the hyper-parameters from it. Since the mode of a parameter (with respect to the prior distribution) is proportional to one less than the corresponding hyper-parameter, we set \[ \alpha(u, s) - 1 = m(u, s) \] where $m(u, s)$ is the $u\rightarrow s$ transition count in the data sample. This is regarded as a 'fake count' which helps $\alpha(u, s)$ to reflect knowledge of the data sample. ### Usage and examples ```{r loadAndDoExample} weatherStates <- c("sunny", "cloudy", "rain") byRow <- TRUE weatherMatrix <- matrix(data = c(0.7, 0.2, 0.1, 0.3, 0.4, 0.3, 0.2, 0.4, 0.4), byrow = byRow, nrow = 3, dimnames = list(weatherStates, weatherStates)) mcWeather <- new("markovchain", states = weatherStates, byrow = byRow, transitionMatrix = weatherMatrix, name = "Weather") weathersOfDays <- rmarkovchain(n = 365, object = mcWeather, t0 = "sunny") ``` For the purpose of this section, we shall continue to use the weather of days example introduced in the main vignette of the package (reproduced above for convenience). Let us invoke the fit function to estimate the MAP parameters with 92\% confidence bounds and hyper-parameters as shown below, based on the first 200 days of the weather data. Additionally, let us find out what the probability is of observing the weather data for the next 165 days. The usage would be as follows ```{r MAPFit} hyperMatrix<-matrix(c(1, 1, 2, 3, 2, 1, 2, 2, 3), nrow = 3, byrow = TRUE, dimnames = list(weatherStates,weatherStates)) markovchainFit(weathersOfDays[1:200], method = "map", confidencelevel = 0.92, hyperparam = hyperMatrix) predictiveDistribution(weathersOfDays[1:200], weathersOfDays[201:365],hyperparam = hyperMatrix) ``` The results should not change after permuting the dimensions of the matrix. ```{r MAPFit2} hyperMatrix2<- hyperMatrix[c(2,3,1), c(2,3,1)] markovchainFit(weathersOfDays[1:200], method = "map", confidencelevel = 0.92, hyperparam = hyperMatrix2) predictiveDistribution(weathersOfDays[1:200], weathersOfDays[201:365],hyperparam = hyperMatrix2) ``` Note that the predictive probability is very small. However, this can be useful when comparing model orders. Suppose we have an idea of the (prior) transition matrix corresponding to the expected value of the parameters, and have a data set from which we want to deduce the MAP estimates. We can infer the hyper-parameters from this known transition matrix itself, and use this to obtain our MAP estimates. ```{r inferHyperparam} inferHyperparam(transMatr = weatherMatrix, scale = c(10, 10, 10)) ``` Alternatively, we can use a data sample to infer the hyper-parameters. ```{r inferHyperparam2} inferHyperparam(data = weathersOfDays[1:15]) ``` In order to use the inferred hyper-parameter matrices, we do ```{r inferHyperparam3} hyperMatrix3 <- inferHyperparam(transMatr = weatherMatrix, scale = c(10, 10, 10)) hyperMatrix3 <- hyperMatrix3$scaledInference hyperMatrix4 <- inferHyperparam(data = weathersOfDays[1:15]) hyperMatrix4 <- hyperMatrix4$dataInference ``` Now we can safely use `hyperMatrix3` and `hyperMatrix4` with `markovchainFit` (in the `hyperparam` argument). Supposing we don't provide any hyper-parameters, then the prior is uniform. This is the same as maximum likelihood. ```{r MAPandMLE} data(preproglucacon) preproglucacon <- preproglucacon[[2]] MLEest <- markovchainFit(preproglucacon, method = "mle") MAPest <- markovchainFit(preproglucacon, method = "map") MLEest$estimate MAPest$estimate ``` # Applications {#sec:applications} This section shows applications of DTMC in various fields. ## Weather forecasting {#app:weather} Markov chains provide a simple model to predict the next day's weather given the current meteorological condition. The first application herewith shown is the "Land of Oz example" from \cite{landOfOz}, the second is the "Alofi Island Rainfall" from \cite{averyHenderson}. ### Land of Oz {#sec:wfLandOfOz} The Land of Oz is acknowledged not to have ideal weather conditions at all: the weather is snowy or rainy very often and, once more, there are never two nice days in a row. Consider three weather states: rainy, nice and snowy. Let the transition matrix be as in the following: ```{r weatPred1} mcWP <- new("markovchain", states = c("rainy", "nice", "snowy"), transitionMatrix = matrix(c(0.5, 0.25, 0.25, 0.5, 0, 0.5, 0.25,0.25,0.5), byrow = T, nrow = 3)) ``` Given that today it is a nice day, the corresponding stochastic row vector is $w_{0}=(0\:,1\:,0)$ and the forecast after 1, 2 and 3 days are given by ```{r weatPred2} W0 <- t(as.matrix(c(0, 1, 0))) W1 <- W0 * mcWP; W1 W2 <- W0 * (mcWP ^ 2); W2 W3 <- W0 * (mcWP ^ 3); W3 ``` As can be seen from $w_{1}$, if in the Land of Oz today is a nice day, tomorrow it will rain or snow with probability 1. One week later, the prediction can be computed as ```{r weatPred3} W7 <- W0 * (mcWP ^ 7) W7 ``` The steady state of the chain can be computed by means of the `steadyStates` method. ```{r weatPred4} q <- steadyStates(mcWP) q ``` Note that, from the seventh day on, the predicted probabilities are substantially equal to the steady state of the chain and they don't depend from the starting point, as the following code shows. ```{r weatPred5} R0 <- t(as.matrix(c(1, 0, 0))) R7 <- R0 * (mcWP ^ 7); R7 S0 <- t(as.matrix(c(0, 0, 1))) S7 <- S0 * (mcWP ^ 7); S7 ``` ### Alofi Island Rainfall {#sec:wfAlofi} Alofi Island daily rainfall data were recorded from January 1st, 1987 until December 31st, 1989 and classified into three states: "0" (no rain), "1-5" (from non zero until 5 mm) and "6+" (more than 5mm). The corresponding dataset is provided within the \pkg{markovchain} package. ```{r Alofi1} data("rain", package = "markovchain") table(rain$rain) ``` The underlying transition matrix is estimated as follows. ```{r Alofi2} mcAlofi <- markovchainFit(data = rain$rain, name = "Alofi MC")$estimate mcAlofi ``` The long term daily rainfall distribution is obtained by means of the `steadyStates` method. ```{r Alofi3} steadyStates(mcAlofi) ``` ## Finance and Economics {#app:fin} Other relevant applications of DTMC can be found in Finance and Economics. ### Finance {#fin:fin} Credit ratings transitions have been successfully modeled with discrete time Markov chains. Some rating agencies publish transition matrices that show the empirical transition probabilities across credit ratings. The example that follows comes from \pkg{CreditMetrics} \proglang{R} package \citep{CreditMetricsR}, carrying Standard \& Poor's published data. ```{r ratings1} rc <- c("AAA", "AA", "A", "BBB", "BB", "B", "CCC", "D") creditMatrix <- matrix( c(90.81, 8.33, 0.68, 0.06, 0.08, 0.02, 0.01, 0.01, 0.70, 90.65, 7.79, 0.64, 0.06, 0.13, 0.02, 0.01, 0.09, 2.27, 91.05, 5.52, 0.74, 0.26, 0.01, 0.06, 0.02, 0.33, 5.95, 85.93, 5.30, 1.17, 1.12, 0.18, 0.03, 0.14, 0.67, 7.73, 80.53, 8.84, 1.00, 1.06, 0.01, 0.11, 0.24, 0.43, 6.48, 83.46, 4.07, 5.20, 0.21, 0, 0.22, 1.30, 2.38, 11.24, 64.86, 19.79, 0, 0, 0, 0, 0, 0, 0, 100 )/100, 8, 8, dimnames = list(rc, rc), byrow = TRUE) ``` It is easy to convert such matrices into `markovchain` objects and to perform some analyses ```{r ratings2} creditMc <- new("markovchain", transitionMatrix = creditMatrix, name = "S&P Matrix") absorbingStates(creditMc) ``` ### Economics {#fin:ec} For a recent application of \pkg{markovchain} in Economic, see \cite{manchesterR}. A dynamic system generates two kinds of economic effects \citep{bardPpt}: 1. those incurred when the system is in a specified state, and 2. those incurred when the system makes a transition from one state to another. Let the monetary amount of being in a particular state be represented as a m-dimensional column vector $c^{\rm{S}}$, while let the monetary amount of a transition be embodied in a $C^{R}$ matrix in which each component specifies the monetary amount of going from state i to state j in a single step. Henceforth, Equation \@ref(eq:cost) represents the monetary of being in state $i$. \begin{equation} {c_i} = c_i^{\rm{S}} + \sum\limits_{j = 1}^m {C_{ij}^{\rm{R}}} {p_{ij}}. \label{eq:cost} \end{equation} Let $\bar c = \left[ c_i \right]$ and let $e_i$ be the vector valued 1 in the initial state and 0 in all other, then, if $f_n$ is the random variable representing the economic return associated with the stochastic process at time $n$, Equation \@ref(eq:return) holds: \begin{equation} E\left[ {{f_n}\left( {{X_n}} \right)|{X_0} = i} \right] = {e_i}{P^n}\bar c. \label{eq:return} \end{equation} The following example assumes that a telephone company models the transition probabilities between customer/non-customer status by matrix $P$ and the cost associated to states by matrix $M$. ```{r economicAnalysis1} statesNames <- c("customer", "non customer") P <- zeros(2); P[1, 1] <- .9; P[1, 2] <- .1; P[2, 2] <- .95; P[2, 1] <- .05; rownames(P) <- statesNames; colnames(P) <- statesNames mcP <- new("markovchain", transitionMatrix = P, name = "Telephone company") M <- zeros(2); M[1, 1] <- -20; M[1, 2] <- -30; M[2, 1] <- -40; M[2, 2] <- 0 ``` If the average revenue for existing customer is +100, the cost per state is computed as follows. ```{r economicAnalysis2} c1 <- 100 + conditionalDistribution(mcP, state = "customer") %*% M[1,] c2 <- 0 + conditionalDistribution(mcP, state = "non customer") %*% M[2,] ``` For an existing customer, the expected gain (loss) at the fifth year is given by the following code. ```{r economicAnalysis3} as.numeric((c(1, 0)* mcP ^ 5) %*% (as.vector(c(c1, c2)))) ``` ## Actuarial science {#app:act} Markov chains are widely applied in the field of actuarial science. Two classical applications are policyholders' distribution across Bonus Malus classes in Motor Third Party Liability (MTPL) insurance (Section \@ref(sec:bm)) and health insurance pricing and reserving (Section \@ref(sec:hi)). ### MPTL Bonus Malus {#sec:bm} Bonus Malus (BM) contracts grant the policyholder a discount (enworsen) as a function of the number of claims in the experience period. The discount (enworsen) is applied on a premium that already allows for known (a priori) policyholder characteristics \citep{denuit2007actuarial} and it usually depends on vehicle, territory, the demographic profile of the policyholder, and policy coverage deep (deductible and policy limits).\\ Since the proposed BM level depends on the claim on the previous period, it can be modeled by a discrete Markov chain. A very simplified example follows. Assume a BM scale from 1 to 5, where 4 is the starting level. The evolution rules are shown in Equation \ref{eq:BM}: \begin{equation} bm_{t + 1} = \max \left( {1,bm_{t} - 1} \right)*\left( {\tilde N = 0} \right) + \min \left( {5,bm_{t} + 2*\tilde N} \right)*\left( {\tilde N \ge 1} \right). \label{eq:BM} \end{equation} The number of claim $\tilde N$ is a random variable that is assumed to be Poisson distributed. ```{r bonusMalus1} getBonusMalusMarkovChain <- function(lambda) { bmMatr <- zeros(5) bmMatr[1, 1] <- dpois(x = 0, lambda) bmMatr[1, 3] <- dpois(x = 1, lambda) bmMatr[1, 5] <- 1 - ppois(q = 1, lambda) bmMatr[2, 1] <- dpois(x = 0, lambda) bmMatr[2, 4] <- dpois(x = 1, lambda) bmMatr[2, 5] <- 1 - ppois(q = 1, lambda) bmMatr[3, 2] <- dpois(x = 0, lambda) bmMatr[3, 5] <- 1 - dpois(x=0, lambda) bmMatr[4, 3] <- dpois(x = 0, lambda) bmMatr[4, 5] <- 1 - dpois(x = 0, lambda) bmMatr[5, 4] <- dpois(x = 0, lambda) bmMatr[5, 5] <- 1 - dpois(x = 0, lambda) stateNames <- as.character(1:5) out <- new("markovchain", transitionMatrix = bmMatr, states = stateNames, name = "BM Matrix") return(out) } ``` Assuming that the a-priori claim frequency per car-year is 0.05 in the class (being the class the group of policyholders that share the same common characteristics), the underlying BM transition matrix and its underlying steady state are as follows. ```{r bonusMalus2} bmMc <- getBonusMalusMarkovChain(0.05) as.numeric(steadyStates(bmMc)) ``` If the underlying BM coefficients of the class are 0.5, 0.7, 0.9, 1.0, 1.25, this means that the average BM coefficient applied on the long run to the class is given by ```{r bonusMalus3} sum(as.numeric(steadyStates(bmMc)) * c(0.5, 0.7, 0.9, 1, 1.25)) ``` This means that the average premium paid by policyholders in the portfolio almost halves in the long run. ### Health insurance example {#sec:hi} Actuaries quantify the risk inherent in insurance contracts evaluating the premium of insurance contract to be sold (therefore covering future risk) and evaluating the actuarial reserves of existing portfolios (the liabilities in terms of benefits or claims payments due to policyholder arising from previously sold contracts), see \cite{deshmukh2012multiple} for details. An applied example can be performed using the data from \cite{de2016assicurazioni} that has been saved in the `exdata` folder. ```{r healthIns6} ltcDemoPath<-system.file("extdata", "ltdItaData.txt", package = "markovchain") ltcDemo<-read.table(file = ltcDemoPath, header=TRUE, sep = ";", dec = ".") head(ltcDemo) ``` The data shows the probability of transition between the state of (A)ctive, to (I)ll and Dead. It is easy to complete the transition matrix. ```{r healthIns7} ltcDemo<-transform(ltcDemo, pIA=0, pII=1-pID, pDD=1, pDA=0, pDI=0) ``` Now we build a function that returns the transition during the $t+1$ th year, assuming that the subject has attained year $t$. ```{r healthIns8} possibleStates<-c("A","I","D") getMc4Age<-function(age) { transitionsAtAge<-ltcDemo[ltcDemo$age==age,] myTransMatr<-matrix(0, nrow=3,ncol = 3, dimnames = list(possibleStates, possibleStates)) myTransMatr[1,1]<-transitionsAtAge$pAA[1] myTransMatr[1,2]<-transitionsAtAge$pAI[1] myTransMatr[1,3]<-transitionsAtAge$pAD[1] myTransMatr[2,2]<-transitionsAtAge$pII[1] myTransMatr[2,3]<-transitionsAtAge$pID[1] myTransMatr[3,3]<-1 myMc<-new("markovchain", transitionMatrix = myTransMatr, states = possibleStates, name = paste("Age",age,"transition matrix")) return(myMc) } ``` Cause transitions are not homogeneous across ages, we use a `markovchainList` object to describe the transition probabilities for a guy starting at age 100. ```{r healthIns8-prob} getFullTransitionTable<-function(age){ ageSequence<-seq(from=age, to=120) k=1 myList=list() for ( i in ageSequence) { mc_age_i<-getMc4Age(age = i) myList[[k]]<-mc_age_i k=k+1 } myMarkovChainList<-new("markovchainList", markovchains = myList, name = paste("TransitionsSinceAge", age, sep = "")) return(myMarkovChainList) } transitionsSince100<-getFullTransitionTable(age=100) ``` We can use such transition for simulating ten life trajectories for a guy that begins "active" (A) aged 100: ```{r healthIns9} rmarkovchain(n = 10, object = transitionsSince100, what = "matrix", t0 = "A", include.t0 = TRUE) ``` Lets consider 1000 simulated live trajectories, for a healthy guy aged 80. We can compute the expected time a guy will be disabled starting active at age 80. ```{r healthIns10} transitionsSince80<-getFullTransitionTable(age=80) lifeTrajectories<-rmarkovchain(n=1e3, object=transitionsSince80, what="matrix",t0="A",include.t0=TRUE) temp<-matrix(0,nrow=nrow(lifeTrajectories),ncol = ncol(lifeTrajectories)) temp[lifeTrajectories=="I"]<-1 expected_period_disabled<-mean(rowSums((temp))) expected_period_disabled ``` Assuming that the health insurance will pay a benefit of 12000 per year disabled and that the real interest rate is 0.02, we can compute the lump sum premium at 80. ```{r healthIns11} mean(rowMeans(12000*temp%*%( matrix((1+0.02)^-seq(from=0, to=ncol(temp)-1))))) ``` ## Sociology {#app:sociology} Markov chains have been actively used to model progressions and regressions between social classes. The first study was performed by \cite{glassHall}, while a more recent application can be found in \cite{blandenEtAlii}. The table that follows shows the income quartile of the father when the son was 16 (in 1984) and the income quartile of the son when aged 30 (in 2000) for the 1970 cohort. ```{r blandenEtAlii} data("blanden") mobilityMc <- as(blanden, "markovchain") mobilityMc ``` The underlying transition graph is plotted in Figure \@ref(fig:mobility). ```{r mobility, fig=TRUE, echo=FALSE, fig.align='center', fig.cap="1970 UK cohort mobility data."} plot(mobilityMc, main = '1970 mobility',vertex.label.cex = 2, layout = layout.fruchterman.reingold) ``` The steady state distribution is computed as follows. Since transition across quartiles are shown, the probability function is evenly 0.25. ```{r blandenEtAlii3} round(steadyStates(mobilityMc), 2) ``` ## Genetics and Medicine {#sec:gen} This section contains two examples: the first shows the use of Markov chain models in genetics, the second shows an application of Markov chains in modelling diseases' dynamics. ### Genetics {#sec:genetics} \cite{averyHenderson} discusses the use of Markov chains in model Preprogucacon gene protein bases sequence. The `preproglucacon` dataset in \pkg{markovchain} contains the dataset shown in the package. ```{r preproglucacon1} data("preproglucacon", package = "markovchain") ``` It is possible to model the transition probabilities between bases as shown in the following code. ```{r preproglucacon2} mcProtein <- markovchainFit(preproglucacon$preproglucacon, name = "Preproglucacon MC")$estimate mcProtein ``` ### Medicine {#sec:medicine} Discrete-time Markov chains are also employed to study the progression of chronic diseases. The following example is taken from \cite{craigSendi}. Starting from six month follow-up data, the maximum likelihood estimation of the monthly transition matrix is obtained. This transition matrix aims to describe the monthly progression of CD4-cell counts of HIV infected subjects. ```{r epid1} craigSendiMatr <- matrix(c(682, 33, 25, 154, 64, 47, 19, 19, 43), byrow = T, nrow = 3) hivStates <- c("0-49", "50-74", "75-UP") rownames(craigSendiMatr) <- hivStates colnames(craigSendiMatr) <- hivStates craigSendiTable <- as.table(craigSendiMatr) mcM6 <- as(craigSendiTable, "markovchain") mcM6@name <- "Zero-Six month CD4 cells transition" mcM6 ``` As shown in the paper, the second passage consists in the decomposition of $M_{6}=V \cdot D \cdot V^{-1}$ in order to obtain $M_{1}$ as $M_{1}=V \cdot D^{1/6} \cdot V^{-1}$ . ```{r epid2} eig <- eigen(mcM6@transitionMatrix) D <- diag(eig$values) ``` ```{r epid3} V <- eig$vectors V %*% D %*% solve(V) d <- D ^ (1/6) M <- V %*% d %*% solve(V) mcM1 <- new("markovchain", transitionMatrix = M, states = hivStates) ``` # Discussion, issues and future plans The \pkg{markovchain} package has been designed in order to provide easily handling of DTMC and communication with alternative packages. The package has known several improvements in the recent years: many functions added, porting the software in Rcpp \pkg{Rcpp} package \citep{RcppR} and many methodological improvements that have improved the software reliability. # Acknowledgments {#sec:aknowledgements} The package was selected for Google Summer of Code 2015 support. The authors wish to thank Michael Cole, Tobi Gutman and Mildenberger Thoralf for their suggestions and bug checks. A final thanks also to Dr. Simona C. Minotti and Dr. Mirko Signorelli for their support in drafting this version of the vignettes. \clearpage # References # markovchain/vignettes/gsoc_2017_additions.Rmd0000644000176200001440000005142213762012754020705 0ustar liggesusers--- title: "Google Summer of Code 2017 Additions" author: "Vandit Jain" date: "August 2017" output: rmarkdown::pdf_document bibliography: markovchainBiblio.bib vignette: > %\VignetteIndexEntry{Google Summer of Code 2017 Additions} %\VignetteEngine{knitr::rmarkdown} %VignetteEncoding{UTF-8} pkgdown: as_is: true extension: pdf --- # Expected Hitting Time using CTMC The package provides `ExpectedTime` function to calculate average hitting time from one state to another. Let the final state be j, then for every state $i \in I$, where $I$ is the set of all possible states and holding time $q_{i} > 0$ for every $i \neq j$. Assuming the conditions to be true, expected hitting time is equal to minimal non-negative solution vector $p$ to the system of linear equations [@NorrisBook]: \begin{equation} \begin{array}{lcr} p_{k} = 0 & k = j \\ -\sum_{l \in I} q_{kl}p_{k} = 1 & k \neq j \end{array} \label{eq:EHT} \end{equation} For example, consider the continuous time markovchain which is as follows: ``` {r,message = FALSE} library(markovchain) states <- c("a","b","c","d") byRow <- TRUE gen <- matrix(data = c(-1, 1/2, 1/2, 0, 1/4, -1/2, 0, 1/4, 1/6, 0, -1/3, 1/6, 0, 0, 0, 0), nrow = 4,byrow = byRow, dimnames = list(states,states)) ctmc <- new("ctmc",states = states, byrow = byRow, generator = gen, name = "testctmc") ``` The generator matrix of the ctmc is: \[ M = \left(\begin{array}{cccc} -1 & 1/2 & 1/2 & 0\\ 1/4 & -1/2 & 1/4 & 1/6\\ 1/6 & 0 & -1/3 & 1/6\\ 0 & 0 & 0 & 0 \end{array}\right) \] Now if we have to calculate expected hitting time the process will take to hit state $d$ if we start from $a$, we apply the $ExpectedTime$ function. $ExpectedTime$ function takes four inputs namely a $ctmc$ class object, initial state $i$, the final state $j$ that we have to calculate expected hitting time and a logical parameter whether to use RCpp implementation. By default, the function uses RCpp as it is faster and takes lesser time. ``` {r} ExpectedTime(ctmc,1,4) ``` We find that the expected hitting time for process to be hit state $d$ is 7 units in this case. # Calculating Probability at time T using ctmc The package provides a function `probabilityatT` to calculate probability of every state according to given `ctmc` object. The Kolmogorov's backward equation gives us a relation between transition matrix at any time t with the generator matrix[@dobrow2016introduction]: \begin{equation} P'(t) = QP(t) \end{equation} Here we use the solution of this differential equation $P(t) = P(0)e^{tQ}$ for $t \geq 0$ and $P(0) = I$. In this equation, $P(t)$ is the transition function at time t. The value $P(t)[i][j]$ at time $P(t)$ describes the conditional probability of the state at time $t$ to be equal to j if it was equal to i at time $t=0$. It takes care of the case when `ctmc` object has a generator represented by columns. If initial state is not provided, the function returns the whole transition matrix $P(t)$. Also to mention is that the function is also implemented using RCpp and can be used used to lessen the time of computation. It is used by default. Next, We consider both examples where initial state is given and case where initial state is not given. In the first case, the function takes two inputs, first of them is an object of the S4 class 'ctmc' and second is the final time $t$. ``` {r} probabilityatT(ctmc,1) ``` Here we get an output in the form of a transition matrix. If we take the second case i.e. considering some initial input: ``` {r} probabilityatT(ctmc,1,1) ``` In this case we get the probabilities corresponding to every state. this also includes probability that the process hits the same state $a$ after time $t=1$. # Plotting generator matrix of continuous-time markovchains The package provides a `plot` function for plotting a generator matrix $Q$ in the form of a directed graph where every possible state is assigned a node. Edges connecting these nodes are weighted. Weight of the edge going from a state $i$ to state $j$ is equal to the value $Q_{ij}$. This gives a picture of the generator matrix. For example, we build a ctmc-class object to plot it. ``` {r} energyStates <- c("sigma", "sigma_star") byRow <- TRUE gen <- matrix(data = c(-3, 3, 1, -1), nrow = 2, byrow = byRow, dimnames = list(energyStates, energyStates)) molecularCTMC <- new("ctmc", states = energyStates, byrow = byRow, generator = gen, name = "Molecular Transition Model") ``` Now if we plot this function we get the following graph: ``` {r} plot(molecularCTMC) ``` The figure shown is built using the $igraph$ package. The package also provides options of plotting graph using $diagram$ and $DiagrameR$ package. Plot using these packages can be built using these commands: ``` {r} plot(molecularCTMC,package = "diagram") ``` Similarly, one can easily replace $diagram$ package with $DiagrammeR$ # Imprecise Continuous-Time Markov chains Continuous-time Markov chains are mathematical models that are used to describe the state-evolution of dynamical systems under stochastic uncertainty. However, building models using continuous time markovchains take in consideration a number of assumptions which may not be realistic for the domain of application; in particular; the ability to provide exact numerical parameter assessments, and the applicability of time-homogeneity and the eponymous Markov property. Hence we take ICTMC into consideration. More technically, an ICTMC is a set of “precise” continuous-time finite-state stochastic processes, and rather than computing expected values of functions, we seek to compute lower expectations, which are tight lower bounds on the expectations that correspond to such a set of “precise” models. ## Types of ICTMCs For any non-empty bounded set of rate matrices $L$, and any non-empty set $M$ of probability mass functions on $X$, we define the following three sets of stochastic processes that are jointly consistent with $L$ and $M$: * $P^{W}_{L,M}$ is the consistent set of all well-behaved stochastic processes; * $P^{WM}_{L,M}$ is the consistent set of all well-behaved Markov chains; * $P^{WHM}_{L,M}$ is the consistent set of all well-behaved homogeneous Markov chains[@ictmcpaper]. From a practical point of view, after having specified a (precise) stochastic process, one is typically interested in the expected value of some function of interest, or the probability of some event. Similarly, in this work, our main objects of consideration will be the lower probabilities that correspond to the ICTMCs. ## Lower Transition Rate Operators for ICTMCs A map $Q_{l}$ from $L(X)$ to $L(X)$ is called a lower transition rate operator if, for all $f,g \in L(X)$, all $\lambda \in R_{\geq 0}$, all $\mu \in L(X)$, and all $x \in X$[@ictmcpaper]: 1. $[Q_{l}m](x) = 0$ 2. $[Q_{l}I](x) \geq 0 \forall y \in X$ such that $x \neq y$ 3. $[Q_{l}(f + g)](x)\geq [Q_{l}f](x) + [Q_{l}g](x)$ 4. $[Q_{l}(l f)](x) = \lambda Q_{l}f[(x)]$ ## Lower Transition Operators A map $T_{l}$ from $L (X )$ to $L (X )$ is called a lower transition operator if, for all $f,g \in L(X)$, all $\lambda \in R_{\geq 0}$, all $\mu \in L(X)$, and all $x \in X$[@ictmcpaper]: 1. $[T_{l} f](x) \geq min(f(y) : y \in L)$ 2. $[T_{l}(f +g)](x) \geq [T_{l} f](x)+[T_{l}g](x)$ 3. $[T_{l}(\lambda f)](x) = l [T_{l} f](x)$ ## ImpreciseprobabilityatT function Now I would like to come onto the practical purpose of using ICTMC classes. ICTMC classes in these package are defined to represent a generator that is defined in such a way that every row of the generator corresponding to every state in the process is governed by a separate variable. As defined earlier, an imprecise continuous time markovchain is a set of many precise CTMCs. Hence this representation of set of precise CTMCs can be used to calulate transition probability at some time in future. This can be seen as an analogy with `probabilityatT` function. It is used to calculate the transition function at some later time t using generatoe matrix. For every generator matrix, we have a corresponding transition function. Similarly, for every Lower Transition rate operator of an ICTMC, we have a corresponding lower transition operator denoted by $L_{t}^{s}$. Here $t$ is the initial time and $s$ is the final time. Now we mention a proposition[@ictmcpaper] which states that: Let $Q_{l}$ be a lower transition rate operator, choose any time $t$ and $s$ both greater than 0 such that $t \leq s$, and let $L_{t}^{s}$ be the lower transition operator corresponding to $Q_{l}$. Then for any $f \in L(X)$ and $\epsilon \in R_{>0}$, if we choose any $n \in N$ such that: \[n \geq max((s-t)*||Q||,\frac{1}{2\epsilon}(s-t)^{2}||Q||^{2}||f||_v)\] with $||f||_{v}$ := max $f$ - min $f$, we are guaranteed that[@ictmcpaper] \[ ||L_{t}^{s} - \prod_{i=1}^{n}(I + \Delta Q_{l}) || \leq \epsilon \] with $\Delta := \frac{s-t}{n}$ Simple put this equation tells us that, using $Q_{l}g$ for all $g \in L(X)$ then we can also approximate the quantity $L_{t}^{s}$ to arbitrary precision, for any given $f \in L(X)$. To explain this approximate calculation, I would take a detailed example of a process containing two states healthy and sick, hence $X = (healthy,sick)$. If we represent in form of an ICTMC, we get: \[ Q = \left(\begin{array}{cc} -a & a \\ b & -b \end{array}\right) \] for some $a,b \in R_{\geq 0}$. The parameter $a$ here is the rate at which a healthy person becomes sick. Technically, this means that if a person is healthy at time $t$, the probability that he or she will be sick at time $t +\Delta$, for small $\Delta$, is very close to $\Delta a$. More intuitively, if we take the time unit to be one week, it means that he or she will, on average, become sick after $\frac{1}{a}$ weeks. The parameter $b$ is the rate at which a sick person becomes healthy again, and has a similar interpretation. Now to completely represent the ICTMC we take an example and write the generator as: \[ Q = \left(\begin{array}{cc} -a & a \\ b & -b \end{array}\right) : a \in [\frac{1}{52},\frac{3}{52}],b \in [\frac{1}{2},2] \] Now suppose we know the initial state of the patient to be sick, hence this is represented in the form of a function by: \[ I_{s} = \left(\begin{matrix} 0 \\ 1 \end{matrix}\right) \] We observe that the $||I_{s}|| = 1$. Now to use the proposition mentioned above, we use the definition to calculate the lower transition operator $Q_{l}$ Next we calculate the norm of the lower transition rate operator and use it in the preposition. Also we take value of $\epsilon$ to be 0.001. Using the preposition we can come up to an algorithm for calculating the probability at any time $s$ given state at initial time $t$ and a ICTMC generator[@ictmcpaper]. The algorithm is as follows: **Input**: A lower transition rate operator $Q$, two time points $t,s$ such that $t \leq s$, a function $f \in L(X )$ and a maximum numerical error $\epsilon \in R_{>0}$. **Algorithm**: 1. $n = max((s-t)||Q||,\frac{1}{2\epsilon}(s-t)^{2}||Q||^{2}||f||_v)$ 2. $\Delta = \frac{s-t}{n}$ 3. $g_{0} = I_{s}$ 4. for $i \in (1,.....,n)$ do $g_{i} = g_{i-1} + \Delta Q_{l}g_{i-1}$ 5. end for 6. return $g_{n}$ **Output**: The conditional probability vector after time $t$ with error $\epsilon$. Hence, after applying the algorithm on above example we get the following result: $ g_{n} = 0.0083$ if final state is $healthy$ and $g_{n} = 0.141$ if the final state is $sick$. The probability calculated is with an error equal to $\epsilon$ i.e. $0.001$. Now we run the algorithm on the example through R code. ``` {r} states <- c("n","y") Q <- matrix(c(-1,1,1,-1),nrow = 2,byrow = TRUE,dimnames = list(states,states)) range <- matrix(c(1/52,3/52,1/2,2),nrow = 2,byrow = 2) name <- "testictmc" ictmc <- new("ictmc",states = states,Q = Q,range = range,name = name) impreciseProbabilityatT(ictmc,2,0,1,10^-3,TRUE) ``` The probabilities we get are with an error of $10^{-3}$ # Continuous time markovchain generator using frequency Matrix The package provides `freq2Generator` function. It takes in a matrix representing relative frequency values along with time taken to provide a continuous time markovchain generator matrix. Here, frequency matrix is a 2-D matrix of dimensions equal to relative number of possible states describing the number of transitions from a state $i$ to $j$ in time $t$, which is another parameter to be provided to the function. The function also allows to chose among three methods for calculation of the generator matrix.[@freqArticle] Three methods are as follows: 1. Quasi Optimization - "QO" 2. Diagonal Adjustment - "DA" 3. Weighted Adjustment - "WA" See reference for details about the methods. Here is an example matrix on which `freq2Generator` function is run: ``` {r} sample <- matrix(c(150,2,1,1,1,200,2,1,2,1,175,1,1,1,1,150),nrow = 4,byrow = TRUE) sample_rel = rbind((sample/rowSums(sample))[1:dim(sample)[1]-1,],c(rep(0,dim(sample)[1]-1),1)) freq2Generator(sample_rel,1) ``` # Committor of a markovchain Consider set of states A,B comprising of states from a markovchain with transition matrix P. The committor vector of a markovchain with respect to sets A and B gives the probability that the process will hit a state from set A before any state from set B. Committor vector u can be calculated by solving the following system of linear equations[@committorlink]: $$ \begin{array}{l} Lu(x) = 0, x \notin A \cup B \\ u(x) = 1, x \in A \\ u(x) = 0, x \in B \end{array} $$ where $L = P -I$. Now we apply the method to an example: ``` {r} transMatr <- matrix(c(0,0,0,1,0.5,0.5,0,0,0,0,0.5,0,0,0,0,0,0.2,0.4,0,0,0,0.8,0.6,0,0.5),nrow = 5) object <- new("markovchain", states=c("a","b","c","d","e"),transitionMatrix=transMatr, name="simpleMc") committorAB(object,c(5),c(3)) ``` Here we get probability that the process will hit state "e" before state "c" given different initial states. # First Passage probability for set of states Currently computation of the first passage time for individual states has been implemented in the package. `firstPassageMultiple` function provides a method to get first passage probability for given provided set of states. Consider this example markovchain object: ``` {r} statesNames <- c("a", "b", "c") testmarkov <- new("markovchain", states = statesNames, transitionMatrix = matrix(c(0.2, 0.5, 0.3, 0.5, 0.1, 0.4, 0.1, 0.8, 0.1), nrow = 3, byrow = TRUE, dimnames = list(statesNames, statesNames) )) ``` Now we apply `firstPassageMultiple` function to calculate first passage probabilities for set of states $"b", "c"$ when initial state is $"a"$. ``` {r} firstPassageMultiple(testmarkov,"a",c("b","c"),4) ``` This shows us the probability that the process will hit any of the state from the set after n number of steps for instance, as shown, the probability of the process to hit any of the states among $"b", "c"$ after $2$ steps is $0.6000$. # Joint PDF of number of visits to the various states of a markovchain The package provides a function `noofVisitsDist` that returns the PDF of the number of visits to the various states of the discrete time markovchain during the first N steps, given initial state of the process. We will take an example to see how to use the function on a `markovchain-class` object: ``` {r} transMatr<-matrix(c(0.4,0.6,.3,.7),nrow=2,byrow=TRUE) simpleMc<-new("markovchain", states=c("a","b"), transitionMatrix=transMatr, name="simpleMc") noofVisitsDist(simpleMc,5,"a") ``` The output clearly shows the probabilities related to various states of the process. # Expected Rewards for a markovchain The package provides a function `expectedRewards` that returns a vector of expected rewards for different initial states. The user provides reward values, a vector $r$ of size equal to number of states having a value corresponding to every state. Given a transition matrix $[P]$, we get the vector of expected rewards $v$ after $n$ transitions according to the equation as follows[@GallagerBook]: $v[n] = r + [P]*v[n-1]$ Applying this equation on a markovchain-class object ``` {r} transMatr<-matrix(c(0.99,0.01,0.01,0.99),nrow=2,byrow=TRUE) simpleMc<-new("markovchain", states=c("a","b"), transitionMatrix=transMatr) expectedRewards(simpleMc,1,c(0,1)) ``` ## Expected Rewards for a set of states in a markovchain process The package provides a function `expectedRewardsBeforeHittingA` that returns the value of expected first passage rewards $E$ given rewards corresponding to every state, an initial state. This means the function returns expected reward for given initial state $s_{0}$, number of transitions $n$ and for a set of states $A$ with a constraint such that the process does not hit any of the states that belong to state $A$. $S$ is the set of all possible states. The function uses an equation which is as follows: $$E = \sum_{i=1}^{n}{1_{s_{0}}P_{S-A}^{i}R_{S-A}}$$ here $1_{s_{0}} = [0,0,...0,1,0,...,0,0,0]$, 1 being on $s_{0}$ position and $R_{S-A}$ being the rewards vector for $S-A$ state. # Checking Irreducibly of a CTMC The package provides a function `is.CTMCirreducible` that returns a Boolean value stating whether the ctmc object is irreducible. We know that a continuous time markovchain is irreducible if and only if its embedded chain is irreducible[@Sigman]. We demonstrate an example running the function: ``` {r} energyStates <- c("sigma", "sigma_star") byRow <- TRUE gen <- matrix(data = c(-3, 3, 1, -1), nrow = 2, byrow = byRow, dimnames = list(energyStates, energyStates)) molecularCTMC <- new("ctmc", states = energyStates, byrow = byRow, generator = gen, name = "Molecular Transition Model") is.CTMCirreducible(molecularCTMC) ``` # Simulation of Higher Order Multivariate Markovchains The package provides `predictHommc` function. This function provides a simulation system for higher order multivariate markovchains. The function assumes that the state probability distribution of the jth sequence at time $r+1$ depends on the state probability distribution of all the sequences at n previous mon=ments of time i.e. $t = r$ to $t = r-n+1$ . Hence the proposed model takes the form mathematically as:[@ching2008higher] $$ X_{r+1}^{j} = \sum_{k=1}^{s}\sum_{h=1}^n{\lambda_{jk}^{(h)}P_{h}^{(jk)}X_{r-h+1}^{(k)}}, \ \ \ j = 1,2,....s, \ \ r = n-1,n,... $$ with initals $X_{0}^{(k)},X_{1}^{(k)},......,X_{n-1}^{(k)} \ (k = 1,2,...s)$. Here, $\lambda_{jk}^{(k)}, \ 1 \leq j,k \leq s, \ 1 \leq h \leq n \ \ \ and \ \ \ \sum_{k=1}^{s}\sum_{h=1}^{n}{\lambda_{jk}^{(h)} = 1}, \ \ \ j = 1,2,....s.$ Now we run an example on sample hommc object for simulating next 3 steps using `predictHommc` function. The function provides a choice of entering initial states according to the hommc object. In case the user does not enter initial states, the function takes all initial states to be the first state from the set of states. ``` {r} statesName <- c("a", "b") P <- array(0, dim = c(2, 2, 4), dimnames = list(statesName, statesName)) P[,,1] <- matrix(c(0, 1, 1/3, 2/3), byrow = FALSE, nrow = 2) P[,,2] <- matrix(c(1/4, 3/4, 0, 1), byrow = FALSE, nrow = 2) P[,,3] <- matrix(c(1, 0, 1/3, 2/3), byrow = FALSE, nrow = 2) P[,,4] <- matrix(c(3/4, 1/4, 0, 1), byrow = FALSE, nrow = 2) Lambda <- c(0.8, 0.2, 0.3, 0.7) ob <- new("hommc", order = 1, states = statesName, P = P, Lambda = Lambda, byrow = FALSE, name = "FOMMC") predictHommc(ob,3) ``` # Check Time Reversibility of Continuous-time markovchains A Continuous-time markovchain with generator $Q$ and stationary distribution $\pi$ is said to be time reversible if:[@dobrow2016introduction] $$ \pi_{i}q_{ij} = \pi_{j}q_{ji} $$ Intuitively, a continuous-time Markov chain is time reversible if the process in forward time is indistinguishable from the process in reversed time. A consequence is that for all states i and j, the long-term forward transition rate from i to j is equal to the long-term backward rate from j to i. The package provides `is.TimeReversible` function to check if a `ctmc` object is time-reversible. We follow with an example run on a `ctmc` object. ``` {r} energyStates <- c("sigma", "sigma_star") byRow <- TRUE gen <- matrix(data = c(-3, 3, 1, -1), nrow = 2, byrow = byRow, dimnames = list(energyStates, energyStates)) molecularCTMC <- new("ctmc", states = energyStates, byrow = byRow, generator = gen, name = "Molecular Transition Model") is.TimeReversible(molecularCTMC) ``` # References markovchain/NEWS0000644000176200001440000000014214050510603013242 0ustar liggesusers2021-05-7 0.8.6 Fix a bug in markovchainListFit that made confusion between lists and data.framesmarkovchain/R/0000755000176200001440000000000014050513011012743 5ustar liggesusersmarkovchain/R/markovchain.R0000644000176200001440000000440413762012754015413 0ustar liggesusers#' @docType package #' @name markovchain-package #' @rdname markovchain #' @title Easy Handling Discrete Time Markov Chains #' #' @description The package contains classes and method to create and manage #' (plot, print, export for example) discrete time Markov chains (DTMC). In #' addition it provide functions to perform statistical (fitting and drawing #' random variates) and probabilistic (analysis of DTMC proprieties) analysis #' #' @details #' \tabular{ll}{ #' Package: \tab markovchain\cr #' Type: \tab Package\cr #' Version: \tab 0.8.2\cr #' Date: \tab 2020-01-5\cr #' License: \tab GPL-2\cr #' Depends: \tab R (>= 3.6.0), methods, expm, matlab, igraph, Matrix\cr #' } #' @author #' Giorgio Alfredo Spedicato #' Maintainer: Giorgio Alfredo Spedicato #' @references Discrete-Time Markov Models, Bremaud, Springer 1999 #' @keywords package #' #' @examples #' # create some markov chains #' statesNames=c("a","b") #' mcA<-new("markovchain", transitionMatrix=matrix(c(0.7,0.3,0.1,0.9),byrow=TRUE, #' nrow=2, dimnames=list(statesNames,statesNames))) #' #' statesNames=c("a","b","c") #' mcB<-new("markovchain", states=statesNames, transitionMatrix= #' matrix(c(0.2,0.5,0.3,0,1,0,0.1,0.8,0.1), nrow=3, #' byrow=TRUE, dimnames=list(statesNames, statesNames))) #' #' statesNames=c("a","b","c","d") #' matrice<-matrix(c(0.25,0.75,0,0,0.4,0.6,0,0,0,0,0.1,0.9,0,0,0.7,0.3), nrow=4, byrow=TRUE) #' mcC<-new("markovchain", states=statesNames, transitionMatrix=matrice) #' mcD<-new("markovchain", transitionMatrix=matrix(c(0,1,0,1), nrow=2,byrow=TRUE)) #' #' #' #operations with S4 methods #' mcA^2 #' steadyStates(mcB) #' absorbingStates(mcB) #' markovchainSequence(n=20, markovchain=mcC, include=TRUE) NULL #' @useDynLib markovchain, .registration = TRUE #' @import igraph #' @import Matrix #' @import methods #' @import parallel #' @importFrom utils packageDescription #' @importFrom Rcpp evalCpp #' @importFrom RcppParallel RcppParallelLibs #' @importFrom stats4 plot summary #' @importFrom matlab zeros find eye size ones #' @importFrom expm %^% logm #' @importFrom stats sd rexp chisq.test pchisq predict aggregate #' @importFrom grDevices colors NULLmarkovchain/R/ctmcProbabilistic.R0000644000176200001440000005114513762012754016552 0ustar liggesusers#' @title rctmc #' #' @description The function generates random CTMC transitions as per the #' provided generator matrix. #' @usage rctmc(n, ctmc, initDist = numeric(), T = 0, include.T0 = TRUE, #' out.type = "list") #' #' @param n The number of samples to generate. #' @param ctmc The CTMC S4 object. #' @param initDist The initial distribution of states. #' @param T The time up to which the simulation runs (all transitions after time #' T are not returned). #' @param include.T0 Flag to determine if start state is to be included. #' @param out.type "list" or "df" #' #' @details In order to use the T0 argument, set n to Inf. #' @return Based on out.type, a list or a data frame is returned. The returned #' list has two elements - a character vector (states) and a numeric vector #' (indicating time of transitions). The data frame is similarly structured. #' @references #' Introduction to Stochastic Processes with Applications in the Biosciences #' (2013), David F. Anderson, University of Wisconsin at Madison #' #' @author Sai Bhargav Yalamanchi #' @seealso \code{\link{generatorToTransitionMatrix}},\code{\link{ctmc-class}} #' @examples #' energyStates <- c("sigma", "sigma_star") #' byRow <- TRUE #' gen <- matrix(data = c(-3, 3, 1, -1), nrow = 2, #' byrow = byRow, dimnames = list(energyStates, energyStates)) #' molecularCTMC <- new("ctmc", states = energyStates, #' byrow = byRow, generator = gen, #' name = "Molecular Transition Model") #' #' statesDist <- c(0.8, 0.2) #' rctmc(n = Inf, ctmc = molecularCTMC, T = 1) #' rctmc(n = 5, ctmc = molecularCTMC, initDist = statesDist, include.T0 = FALSE) #' @export rctmc <- function(n, ctmc, initDist = numeric(), T = 0, include.T0 = TRUE, out.type = "list") { if (identical(initDist, numeric())) state <- sample(ctmc@states, 1) # sample state randomly else if (length(initDist) != dim(ctmc) | round(sum(initDist), 5) != 1) stop("Error! Provide a valid initial state probability distribution") else state <- sample(ctmc@states, 1, prob = initDist) # if valid probability distribution, # sample accordingly # obtain transition probability matrix from the generator matrix trans <- generatorToTransitionMatrix(ctmc@generator) states <- c() time <- c() if (include.T0 == TRUE){ states <- c(states, state) time <- c(time, 0) } t <- 0 i <- 1 while (i <= n){ idx <- which(ctmc@states == state) t <- t + rexp(1, -ctmc@generator[idx, idx]) state <- ctmc@states[sample(1:dim(ctmc), 1, prob = trans[idx, ])] if(T > 0 & t > T) break states <- c(states, state) time <- c(time, t) i <- i + 1 } out <- list(states, time) if (out.type == "list") return(out) else if(out.type == "df"){ df <- data.frame(matrix(unlist(out), nrow = length(states))) names(df) <- c("states", "time") return(df) } else stop("Not a valid output type") } #' @title Return the generator matrix for a corresponding transition matrix #' #' @description Calculate the generator matrix for a #' corresponding transition matrix #' #' @param P transition matrix between time 0 and t #' @param t time of observation #' @param method "logarithm" returns the Matrix logarithm of the transition matrix #' #' @return A matrix that represent the generator of P #' @export #' #' @examples #' mymatr <- matrix(c(.4, .6, .1, .9), nrow = 2, byrow = TRUE) #' Q <- transition2Generator(P = mymatr) #' expm::expm(Q) #' #' @seealso \code{\link{rctmc}} transition2Generator<-function(P, t = 1,method = "logarithm") { if (method == "logarithm") { Q = logm(P)/t } #else return(Q) } #' Returns a generator matrix corresponding to frequency matrix #' #' @description The function provides interface to calculate generator matrix corresponding to #' a frequency matrix and time taken #' #' @param P relative frequency matrix #' @param t (default value = 1) #' @param method one among "QO"(Quasi optimaisation), "WA"(weighted adjustment), "DA"(diagonal adjustment) #' @param logmethod method for computation of matrx algorithm (by default : Eigen) #' #' @return returns a generator matix with same dimnames #' #' @references E. Kreinin and M. Sidelnikova: Regularization Algorithms for #' Transition Matrices. Algo Research Quarterly 4(1):23-40, 2001 #' #' @examples #' sample <- matrix(c(150,2,1,1,1,200,2,1,2,1,175,1,1,1,1,150),nrow = 4,byrow = TRUE) #' sample_rel = rbind((sample/rowSums(sample))[1:dim(sample)[1]-1,],c(rep(0,dim(sample)[1]-1),1)) #' freq2Generator(sample_rel,1) #' #' data(tm_abs) #' tm_rel=rbind((tm_abs/rowSums(tm_abs))[1:7,],c(rep(0,7),1)) #' ## Derive quasi optimization generator matrix estimate #' freq2Generator(tm_rel,1) #' #' @export #' freq2Generator <- function(P,t = 1,method = "QO",logmethod = "Eigen"){ if(method == "QO"){ out <- ctmcd::gmQO(P, t, logmethod) } else if(method == "WA") { out <- ctmcd::gmWA(P, t, logmethod) } else if(method == "DA") { out <- ctmcd::gmDA(P, t, logmethod) } return(out) } #' @title Returns expected hitting time from state i to state j #' #' @description Returns expected hitting time from state i to state j #' #' @usage ExpectedTime(C,i,j,useRCpp) #' #' @param C A CTMC S4 object #' @param i Initial state i #' @param j Final state j #' @param useRCpp logical whether to use Rcpp #' #' @details According to the theorem, holding times for all states except j should be greater than 0. #' #' @return A numerical value that returns expected hitting times from i to j #' #' @references Markovchains, J. R. Norris, Cambridge University Press #' #' @author Vandit Jain #' #' @examples #' states <- c("a","b","c","d") #' byRow <- TRUE #' gen <- matrix(data = c(-1, 1/2, 1/2, 0, 1/4, -1/2, 0, 1/4, 1/6, 0, -1/3, 1/6, 0, 0, 0, 0), #' nrow = 4,byrow = byRow, dimnames = list(states,states)) #' ctmc <- new("ctmc",states = states, byrow = byRow, generator = gen, name = "testctmc") #' ExpectedTime(ctmc,1,4,TRUE) #' #' @export ExpectedTime <- function(C,i,j,useRCpp = TRUE){ # take generator from ctmc-class object Q <- C@generator # in case where generator is written column wise if(C@byrow==FALSE){ Q <- t(Q) } NoofStates <- dim(C) Exceptj <- c(1:NoofStates) # create vector with all values from 1:NoofStates except j Exceptj <- which(Exceptj!=j) # build matrix with vlaues from Q such that row!=j or column!=j Q_Exceptj <- Q[Exceptj,Exceptj] # check for positivity of holding times except for state j if(!all(diag(Q_Exceptj)!=0)){ stop("Holding times for all states except j should be greater than 0") } # get b for solving the system of linear equation Ax = b where A is Q_Exceptj b <- rep(-1,dim(Q_Exceptj)[1]) # use solve function from base packge to solve Ax = b if(useRCpp == TRUE){ out <- .ExpectedTimeRCpp(Q_Exceptj,b) } else { out <- solve(Q_Exceptj,b) } # out will be of size NoofStates-1, hence the adjustment for different cases of i>= NoofStates || x0 < 1){ stop("Initial state provided is not correct") } return(P[x0,]) } } #' Calculating full conditional probability using lower rate transition matrix #' #' This function calculates full conditional probability at given #' time s using lower rate transition matrix #' #' @usage impreciseProbabilityatT(C,i,t,s,error,useRCpp) #' #' @param C a ictmc class object #' @param i initial state at time t #' @param t initial time t. Default value = 0 #' @param s final time #' @param error error rate. Default value = 0.001 #' @param useRCpp logical whether to use RCpp implementation; by default TRUE #' #' @references Imprecise Continuous-Time Markov Chains, Thomas Krak et al., 2016 #' #' @author Vandit Jain #' #' @examples #' states <- c("n","y") #' Q <- matrix(c(-1,1,1,-1),nrow = 2,byrow = TRUE,dimnames = list(states,states)) #' range <- matrix(c(1/52,3/52,1/2,2),nrow = 2,byrow = 2) #' name <- "testictmc" #' ictmc <- new("ictmc",states = states,Q = Q,range = range,name = name) #' impreciseProbabilityatT(ictmc,2,0,1,10^-3,TRUE) #' @export impreciseProbabilityatT <- function(C, i, t=0, s, error = 10^-3, useRCpp = TRUE){ ## input validity checking if(s <= t){ stop("Please provide time points such that initial time is greater than or equal to end point") } if(!class(C) == 'ictmc'){ stop("Please provide a valid ictmc-class object") } noOfstates <-length(C@states) if(i <= 0 || i > noOfstates){ stop("Please provide a valid initial state") } ### validity checking ends if(useRCpp == TRUE) { Qgx <- .impreciseProbabilityatTRCpp(C,i,t,s,error) } else { ## extract values from ictmc object Q <- C@Q range <- C@range ### calculate ||QI_i|| #initialise Q norm value QNorm <- -1 for(i in 1:noOfstates){ sum <- 0 for(j in 1:noOfstates){ sum <- sum + abs(Q[i,j]) } QNorm <- max(sum*range[i,2],QNorm) } ### calculate no of iterations # The 1 is for norm of I_s i.e. ||I_s|| which equals 1 n <- max((s-t)*QNorm, (s-t)*(s-t)*QNorm*QNorm*1/(2*error)) ### calculate delta delta <- (s-t)/n ### build I_i vector Ii <- rep(0, noOfstates) Ii[i] <- 1 ### calculate value of lower operator _QI_i(x) for all x belongs to no ofStates values <- Q%*%Ii Qgx <- rep(0, noOfstates) for(i in 1:noOfstates){ Qgx[i] <- min(values[i]*range[i,1], values[i]*range[i,2]) } Qgx <- delta*Qgx Qgx <- Ii + Qgx for(iter in 1:n-1){ temp <- Qgx values <- Q%*%Qgx for(i in 1:noOfstates){ Qgx[i] <- min(values[i]*range[i,1], values[i]*range[i,2]) } Qgx <- delta*Qgx Qgx <- temp + Qgx } } return(Qgx) } #' Check if CTMC is irreducible #' #' @description #' This function verifies whether a CTMC object is irreducible #' #' @usage is.CTMCirreducible(ctmc) #' #' @param ctmc a ctmc-class object #' #' @references #' Continuous-Time Markov Chains, Karl Sigman, Columbia University #' #' @author Vandit Jain #' #' @return a boolean value as described above. #' #' @examples #' energyStates <- c("sigma", "sigma_star") #' byRow <- TRUE #' gen <- matrix(data = c(-3, 3, #' 1, -1), nrow = 2, #' byrow = byRow, dimnames = list(energyStates, energyStates)) #' molecularCTMC <- new("ctmc", states = energyStates, #' byrow = byRow, generator = gen, #' name = "Molecular Transition Model") #' is.CTMCirreducible(molecularCTMC) #' #' @export is.CTMCirreducible <- function(ctmc) { if(!class(ctmc) == 'ctmc') { stop("please provide a valid ctmc class object") } ## gets the embeded chain matrix embeddedChainMatrix <- generatorToTransitionMatrix(ctmc@generator) ## forms a markovchain object related to embedded transition matrix markovchainObject <- new("markovchain",states = ctmc@states, transitionMatrix = embeddedChainMatrix) ## returns result using is.irreducible function on embedded chain transition matrix return(is.irreducible(markovchainObject)) } #' checks if ctmc object is time reversible #' #' @description #' The function returns checks if provided function is time reversible #' #' @usage is.TimeReversible(ctmc) #' #' @param ctmc a ctmc-class object #' #' @return Returns a boolean value stating whether ctmc object is time reversible #' #' @author Vandit Jain #' #' @return a boolean value as described above #' #' @references #' INTRODUCTION TO STOCHASTIC PROCESSES WITH R, ROBERT P. DOBROW, Wiley #' #' @examples #' energyStates <- c("sigma", "sigma_star") #' byRow <- TRUE #' gen <- matrix(data = c(-3, 3, #' 1, -1), nrow = 2, #' byrow = byRow, dimnames = list(energyStates, energyStates)) #' molecularCTMC <- new("ctmc", states = energyStates, #' byrow = byRow, generator = gen, #' name = "Molecular Transition Model") #' is.TimeReversible(molecularCTMC) #' #' @export is.TimeReversible <- function(ctmc) { if(!class(ctmc) == "ctmc") { stop("please provide a valid ctmc-class object") } ## get steady state probabilities Pi <- steadyStates(ctmc) ## initialise boolean result check <- TRUE ## no of states m <- length(ctmc@states) ## checks for byrow if(ctmc@byrow == FALSE) gen <- t(ctmc@generator) else gen <- ctmc@generator ## iterates for every state for( i in 1:m) { for(j in 1:m) { if(Pi[i]*gen[i,j] != Pi[j]*gen[j,i]) { check <- FALSE break } } } return(check) } # `generator/nextki` <- function(k) { # if(k >= 0) return(-1-k) # return(-k) # } # # `generator/nextk` <- function(k, Kmin, Kmax) { # if(is.null(k)) { # k <- rep(0, length(Kmin)) # return(list(ans = TRUE, k = k)) # } # # if(length(Kmin) == 0) { # return(list(ans = FALSE, k = k)) # } # # i <- 1 # kl <- k # kl[i] <- `generator/nextki`(kl[i]) # while (kl[i] > Kmax[i] || kl[i] < Kmin[i]) { # kl[i] <- 0 # i <- i+1 # if(i > length(kl)) { # k <- kl # return(list(ans = FALSE, k = k)) # } # kl[i] <- `generator/nextki`(kl[i]) # } # k <- kl # return(list(ans = TRUE, k = k)) # } # # `generator/generator` <- function(Po, Di, odi) { # P <- Po # N <- nrow(P) # # if(Di > 22) return(NULL) # bad idea # options(digits = Di) # odigs <- odi # # rSum <- rowSums(P) # if(! all(abs(1-rSum) < 0.001)) { # stop("Sum of each rows of Po should be equal to 1") # } # # P <- P/rSum # d <- det(P) # # if(d <= 0) { # cat("Matrix has non-positive determinant") # return(NULL) # } # # diagP <- 1 # for(i in 1:nrow(P)) diagP <- diagP * P[i, i] # # if(d >= diagP) { # cat("Determinant exceeds product of diagonal elements\n") # return(NULL) # } # # E <- eigen(P)[[1]] # B <- eigen(P)[[2]] # # print("Eigenvalues") # print(E) # # # risky # if(length(unique(E)) != length(E)) { # warning("Matrix does not have distinct eigenvalues") # } # # L <- abs(log(d)) # addigs <- 2 + round(log10(1/Matrix::rcond(B))) + round(L/log(10)) # problem # # if(options()$digits < odigs + addigs) { # if(odigs + addigs > 100) { # print("Eigenvector matrix is singular") # return(NULL) # } # # cat('Going to', odigs + addigs, "digits") # return(`generator/generator`(Po, odigs + addigs, odigs)) # } # # Bi <- solve(B) # # posevs <- NULL # negevs <- NULL # bestj <- NULL # bestQ <- NULL # marks <- rep(TRUE, length(E)) # # for(i in 1:length(E)) { # if(marks[i] && !(Re(E[i]) > 0 && Im(E[i]) == 0)) { # invalid comparison of complex number # cj <- Conj(E[i]) # best <- Inf # if(i+1 <= length(E)) { # for(j in (i+1):length(E)) { # if(marks[j]) { # score <- abs(cj-E[j]) # if(score < best) { # best <- score # bestj <- j # } # } # } # } # # if(best > 10^(3-options()$digits)) { # cat("Unpaired non-positive eigenvalue", E[i]) # return(NULL) # } # marks[bestj] <- FALSE # if(Im(E[i]) >= 0) { # posevs <- c(posevs, i) # negevs <- c(negevs, bestj) # if(Im(E[bestj]) == 0) { # E[bestj] <- complex(real = E[bestj], imaginary = 0) # } # } else { # posevs <- c(posevs, bestj) # negevs <- c(negevs, i) # if(Im(E[i]) == 0) { # E[i] <- complex(real = E[i], imaginary = 0) # } # } # } # } # # npairs <- length(posevs) # # display conjugate pairs # # Kmax <- rep(0, npairs) # Kmin <- Kmax # # for(i in 1:npairs) { # a <- Arg(E[posevs[i]]) # Kmax[i] <- trunc((L-a)/2*pi) # Kmin[i] <- trunc((-L-a)/2*pi) # } # # # display K-max # # display K-min # # best <- -0.001 # DD <- diag(log(E)) # DK <- matlab::zeros(N) # res <- list(); p <- 1 # k <- NULL # while(TRUE) { # # dlist <- `generator/nextk`(k, Kmin, Kmax) # k <- dlist$k # # if(dlist$ans == FALSE) {break} # # # display value of k # for(i in 1:npairs) { # ke <- complex(real = 0, imaginary = 2*pi*k[i]) # DK[posevs[i], posevs[i]] <- ke # DK[negevs[i], negevs[i]] <- -ke # } # # Q <- B %*% (DD + DK) %*% Bi # # Q <- fnormal(Re(Q), options()$digits, 5*(10^(-1-odigs))) # define fnormal of maple # qmin <- Q[1,2] # for(i in 1:N) { # for(j in 1:N) { # if(i != j) { # if(Q[i, j] < qmin) qmin <- Q[i, j] # } # } # } # # if(EnvAllGenerators == TRUE) { # if(qmin > -.001) { # cat("Possible generator with qmin =", qmin) # res[[p]] <- round(Q, odigs) # p <- p + 1 # } else { # cat("qmin =", qmin) # } # # } else { # if(qmin >= 0) { # cat("Found a generator") # return(round(Q, odigs)) # } else { # if(qmin > best) { # best <- qmin # bestQ <- Q # } # if(qmin > -.001) { # cat("Approximate generator with qmin = ", qmin) # } else { # cat("qmin =", qmin) # } # } # } # } # # if(EnvAllGenerators == TRUE) { # return(res) # } # # warning("No completely valid generator found") # # if(! is.null(bestQ)) { # return(round(bestQ, odigs)) # } else return(NULL) # # } # # generator <- function(Po, digits = 10) { # odigs <- digits # options(digits = 15) # if(is.matrix(Po)) { # P <- Po # } else { # stop("Po must be matrix") # } # # if(nrow(P) != ncol(P)) { # print(P) # stop('Po must be square matrix') # } # # if(! all(P >= 0)) { # print(P) # stop('Po must be non negative square matrix') # } # # `generator/generator`(P, options()$digits, odigs) # } markovchain/R/sysdata.rda0000644000176200001440000000353214050513461015117 0ustar liggesusers[ PWG¥xTU^9Di%LmȬ [ awmKX{EhʈL Z,APhm֩twoeAM]޾%D͘|τϡpdK.n0 asfM.8$c8_ yD8M ۗEfKG;BF}| +/ r+ ןNj1op0l}MaP cvĕcn⋫0XC1!p4--4[ژ#Fz\ØoI^*k-C:\`! x)[}֤гdLT}ý9p$ƃ.y>9=տqp⚭0Xn k^9[ -l=R׳=,κʠ<-n(&+ Ƶé7]S4o]  M_\yAP-pc>0XÞ7-uzסøip^6\oYٲ|r:Fu{J`޲HClEL#'$ [ }Pv<3 56]iHd CG"= tU;N/Y?`6MM:=FQAզr\.MBa&_{]H,N߈^ Q3n{rH@YB}n1 .mgCD;=q0DAϴR`y/F#^( w,v";״ \C uj6-dBQep)n"s\&'4*oG^ԉ응1qUbdmb7\ɺ;+Nnmc5q|^2Tg.so{\x,9N.qX=pPz$db @oh^~E9'\$p Vs ?\=q{+'Df4ߢp8uOnc9h?%"I'$ ǒ<X9bGy2L>`& 恘 b&y1L.N9I5CItjS7vllSMTDDžR۷t3838n唤 #H=^e5X}fߎ$V2UCDRr"Ta~1[#s42 셉~BYSpyỹ~ F noofstates) stop("please provide a valid set A") } for(i in length(B)) { if(B[i] <= 0 || B[i] > noofstates) stop("please provide a valid set B") } for(i in 1:noofstates) { if(i %in% A && i %in% B) stop("intersection of set A and B in not null") } if(p <=0 || p > noofstates) stop("please provide a valid initial state") I <- diag(noofstates) matrix <- matrix - I A_size = length(A) B_size = length(B) # sets the matrix according to the provided states for(i in 1:A_size) { for(j in 1:noofstates) { if(A[i]==j) matrix[A[i],j] = 1 else matrix[A[i],j] = 0 } } # sets the matrix according to the provided states for(i in 1:B_size) { for(j in 1:noofstates) { if(B[i]==j) matrix[B[i],j] = 1 else matrix[B[i],j] = 0 } } # initialises b in the equation the system of equation AX =b b <- rep(0,noofstates) for(i in 1:A_size) { b[A[i]] = 1 } # solve AX = b according using solve function from base package out <- solve(matrix,b) if(missing(p)) return(out) else return(out[p]) } #' Expected Rewards for a markovchain #' #' @description Given a markovchain object and reward values for every state, #' function calculates expected reward value after n steps. #' #' @usage expectedRewards(markovchain,n,rewards) #' #' @param markovchain the markovchain-class object #' @param n no of steps of the process #' @param rewards vector depicting rewards coressponding to states #' #' @details the function uses a dynamic programming approach to solve a #' recursive equation described in reference. #' #' @return #' returns a vector of expected rewards for different initial states #' #' @author Vandit Jain #' #' @references Stochastic Processes: Theory for Applications, Robert G. Gallager, #' Cambridge University Press #' #' @examples #' transMatr<-matrix(c(0.99,0.01,0.01,0.99),nrow=2,byrow=TRUE) #' simpleMc<-new("markovchain", states=c("a","b"), #' transitionMatrix=transMatr) #' expectedRewards(simpleMc,1,c(0,1)) #' @export expectedRewards <- function(markovchain, n, rewards) { # gets the transition matrix matrix <- markovchain@transitionMatrix # Rcpp implementation of the function out <- .expectedRewardsRCpp(matrix,n, rewards) noofStates <- length(states(markovchain)) result <- rep(0,noofStates) for(i in 1:noofStates) result[i] = out[i] #names(result) <- states(markovchain) return(result) } #' Expected first passage Rewards for a set of states in a markovchain #' #' @description Given a markovchain object and reward values for every state, #' function calculates expected reward value for a set A of states after n #' steps. #' #' @usage expectedRewardsBeforeHittingA(markovchain, A, state, rewards, n) #' #' @param markovchain the markovchain-class object #' @param A set of states for first passage expected reward #' @param state initial state #' @param rewards vector depicting rewards coressponding to states #' @param n no of steps of the process #' #' @details The function returns the value of expected first passage #' rewards given rewards coressponding to every state, an initial state #' and number of steps. #' #' @return returns a expected reward (numerical value) as described above #' #' @author Sai Bhargav Yalamanchi, Vandit Jain #' #' @export expectedRewardsBeforeHittingA <- function(markovchain, A, state, rewards, n) { ## gets the markovchain matrix matrix <- markovchain@transitionMatrix # gets the names of states stateNames <- states(markovchain) # no of states S <- length(stateNames) # vectors for states in S-A SAno <- rep(0,S-length(A)) rewardsSA <- rep(0,S-length(A)) # for initialisation for set S-A i=1 ini = -1 for(j in 1:length(stateNames)) { if(!(stateNames[j] %in% A)){ SAno[i] = j rewardsSA[i] = rewards[j] if(stateNames[j] == state) ini = i i = i+1 } } ## get the matrix coressponding to S-A matrix <- matrix[SAno,SAno] ## cals the cpp implementation out <- .expectedRewardsBeforeHittingARCpp(matrix, ini, rewardsSA, n) return(out) } #' Mean First Passage Time for irreducible Markov chains #' #' @description Given an irreducible (ergodic) markovchain object, this function #' calculates the expected number of steps to reach other states #' #' @param object the markovchain object #' @param destination a character vector representing the states respect to #' which we want to compute the mean first passage time. Empty by default #' #' @details For an ergodic Markov chain it computes: #' \itemize{ #' \item If destination is empty, the average first time (in steps) that takes #' the Markov chain to go from initial state i to j. (i, j) represents that #' value in case the Markov chain is given row-wise, (j, i) in case it is given #' col-wise. #' \item If destination is not empty, the average time it takes us from the #' remaining states to reach the states in \code{destination} #' } #' #' @return a Matrix of the same size with the average first passage times if #' destination is empty, a vector if destination is not #' #' @author Toni Giorgino, Ignacio Cordón #' #' @references C. M. Grinstead and J. L. Snell. Introduction to Probability. #' American Mathematical Soc., 2012. #' #' @examples #' m <- matrix(1 / 10 * c(6,3,1, #' 2,3,5, #' 4,1,5), ncol = 3, byrow = TRUE) #' mc <- new("markovchain", states = c("s","c","r"), transitionMatrix = m) #' meanFirstPassageTime(mc, "r") #' #' #' # Grinstead and Snell's "Oz weather" worked out example #' mOz <- matrix(c(2,1,1, #' 2,0,2, #' 1,1,2)/4, ncol = 3, byrow = TRUE) #' #' mcOz <- new("markovchain", states = c("s", "c", "r"), transitionMatrix = mOz) #' meanFirstPassageTime(mcOz) #' #' @export meanFirstPassageTime setGeneric("meanFirstPassageTime", function(object, destination) { standardGeneric("meanFirstPassageTime") }) setMethod("meanFirstPassageTime", signature("markovchain", "missing"), function(object, destination) { destination = character() .meanFirstPassageTimeRcpp(object, destination) } ) setMethod("meanFirstPassageTime", signature("markovchain", "character"), function(object, destination) { states <- object@states incorrectStates <- setdiff(destination, states) if (length(incorrectStates) > 0) stop("Some of the states you provided in destination do not match states from the markovchain") result <- .meanFirstPassageTimeRcpp(object, destination) asVector <- as.vector(result) names(asVector) <- colnames(result) asVector } ) #' Mean recurrence time #' #' @description Computes the expected time to return to a recurrent state #' in case the Markov chain starts there #' #' @usage meanRecurrenceTime(object) #' #' @param object the markovchain object #' #' @return For a Markov chain it outputs is a named vector with the expected #' time to first return to a state when the chain starts there. #' States present in the vector are only the recurrent ones. If the matrix #' is ergodic (i.e. irreducible), then all states are present in the output #' and order is the same as states order for the Markov chain #' #' @author Ignacio Cordón #' #' @references C. M. Grinstead and J. L. Snell. Introduction to Probability. #' American Mathematical Soc., 2012. #' #' @examples #' m <- matrix(1 / 10 * c(6,3,1, #' 2,3,5, #' 4,1,5), ncol = 3, byrow = TRUE) #' mc <- new("markovchain", states = c("s","c","r"), transitionMatrix = m) #' meanRecurrenceTime(mc) #' #' @export meanRecurrenceTime setGeneric("meanRecurrenceTime", function(object) { standardGeneric("meanRecurrenceTime") }) setMethod("meanRecurrenceTime", "markovchain", function(object) { .meanRecurrenceTimeRcpp(object) }) #' Mean absorption time #' #' @description Computes the expected number of steps to go from any of the #' transient states to any of the recurrent states. The Markov chain should #' have at least one transient state for this method to work #' #' @usage meanAbsorptionTime(object) #' #' @param object the markovchain object #' #' @return A named vector with the expected number of steps to go from a #' transient state to any of the recurrent ones #' #' @author Ignacio Cordón #' #' @references C. M. Grinstead and J. L. Snell. Introduction to Probability. #' American Mathematical Soc., 2012. #' #' @examples #' m <- matrix(c(1/2, 1/2, 0, #' 1/2, 1/2, 0, #' 0, 1/2, 1/2), ncol = 3, byrow = TRUE) #' mc <- new("markovchain", states = letters[1:3], transitionMatrix = m) #' times <- meanAbsorptionTime(mc) #' #' @export meanAbsorptionTime setGeneric("meanAbsorptionTime", function(object) { standardGeneric("meanAbsorptionTime") }) setMethod("meanAbsorptionTime", "markovchain", function(object) { .meanAbsorptionTimeRcpp(object) }) #' Absorption probabilities #' #' @description Computes the absorption probability from each transient #' state to each recurrent one (i.e. the (i, j) entry or (j, i), in a #' stochastic matrix by columns, represents the probability that the #' first not transient state we can go from the transient state i is j #' (and therefore we are going to be absorbed in the communicating #' recurrent class of j) #' #' @usage absorptionProbabilities(object) #' #' @param object the markovchain object #' #' @return A named vector with the expected number of steps to go from a #' transient state to any of the recurrent ones #' #' @author Ignacio Cordón #' #' @references C. M. Grinstead and J. L. Snell. Introduction to Probability. #' American Mathematical Soc., 2012. #' #' @examples #' m <- matrix(c(1/2, 1/2, 0, #' 1/2, 1/2, 0, #' 0, 1/2, 1/2), ncol = 3, byrow = TRUE) #' mc <- new("markovchain", states = letters[1:3], transitionMatrix = m) #' absorptionProbabilities(mc) #' #' @export absorptionProbabilities setGeneric("absorptionProbabilities", function(object) { standardGeneric("absorptionProbabilities") }) setMethod("absorptionProbabilities", "markovchain", function(object) { .absorptionProbabilitiesRcpp(object) }) #' @title Check if a DTMC is regular #' #' @description Function to check wether a DTCM is regular # #' @details A Markov chain is regular if some of the powers of its matrix has all elements #' strictly positive #' #' @param object a markovchain object #' #' @return A boolean value #' #' @author Ignacio Cordón #' @references Matrix Analysis. Roger A.Horn, Charles R.Johnson. 2nd edition. #' Corollary 8.5.8, Theorem 8.5.9 #' #' #' @examples #' P <- matrix(c(0.5, 0.25, 0.25, #' 0.5, 0, 0.5, #' 0.25, 0.25, 0.5), nrow = 3) #' colnames(P) <- rownames(P) <- c("R","N","S") #' ciao <- as(P, "markovchain") #' is.regular(ciao) #' #' @seealso \code{\link{is.irreducible}} #' #' @exportMethod is.regular setGeneric("is.regular", function(object) standardGeneric("is.regular")) setMethod("is.regular", "markovchain", function(object) { .isRegularRcpp(object) }) #' Hitting probabilities for markovchain #' #' @description Given a markovchain object, #' this function calculates the probability of ever arriving from state i to j #' #' @usage hittingProbabilities(object) #' #' @param object the markovchain-class object #' #' @return a matrix of hitting probabilities #' #' @author Ignacio Cordón #' #' @references R. Vélez, T. Prieto, Procesos Estocásticos, Librería UNED, 2013 #' #' @examples #' M <- matlab::zeros(5, 5) #' M[1,1] <- M[5,5] <- 1 #' M[2,1] <- M[2,3] <- 1/2 #' M[3,2] <- M[3,4] <- 1/2 #' M[4,2] <- M[4,5] <- 1/2 #' #' mc <- new("markovchain", transitionMatrix = M) #' hittingProbabilities(mc) #' #' @exportMethod hittingProbabilities setGeneric("hittingProbabilities", function(object) standardGeneric("hittingProbabilities")) setMethod("hittingProbabilities", "markovchain", function(object) { .hittingProbabilitiesRcpp(object) }) #' Mean num of visits for markovchain, starting at each state #' #' @description Given a markovchain object, this function calculates #' a matrix where the element (i, j) represents the expect number of visits #' to the state j if the chain starts at i (in a Markov chain by columns it #' would be the element (j, i) instead) #' #' @usage meanNumVisits(object) #' #' @param object the markovchain-class object #' #' @return a matrix with the expect number of visits to each state #' #' @author Ignacio Cordón #' #' @references R. Vélez, T. Prieto, Procesos Estocásticos, Librería UNED, 2013 #' #' @examples #' M <- matlab::zeros(5, 5) #' M[1,1] <- M[5,5] <- 1 #' M[2,1] <- M[2,3] <- 1/2 #' M[3,2] <- M[3,4] <- 1/2 #' M[4,2] <- M[4,5] <- 1/2 #' #' mc <- new("markovchain", transitionMatrix = M) #' meanNumVisits(mc) #' #' @exportMethod meanNumVisits setGeneric("meanNumVisits", function(object) standardGeneric("meanNumVisits")) setMethod("meanNumVisits", "markovchain", function(object) { .minNumVisitsRcpp(object) }) setMethod( "steadyStates", "markovchain", function(object) { .steadyStatesRcpp(object) } ) #' @exportMethod summary setGeneric("summary") # summary method for markovchain class # lists: closed, transient classes, irreducibility, absorbint, transient states setMethod("summary", signature(object = "markovchain"), function(object){ # list of closed, recurrent and transient classes outs <- .summaryKernelRcpp(object) # display name of the markovchain object cat(object@name," Markov chain that is composed by:", "\n") # number of closed classes check <- length(outs$closedClasses) cat("Closed classes:","\n") # display closed classes if(check == 0) cat("NONE", "\n") else { for(i in 1:check) cat(outs$closedClasses[[i]], "\n") } # number of recurrent classes check <- length(outs$recurrentClasses) cat("Recurrent classes:", "\n") # display recurrent classes if(check == 0) cat("NONE", "\n") else { cat("{") cat(outs$recurrentClasses[[1]], sep = ",") cat("}") if(check > 1) { for(i in 2:check) { cat(",{") cat(outs$recurrentClasses[[i]], sep = ",") cat("}") } } cat("\n") } # number of transient classes check <- length(outs$transientClasses) cat("Transient classes:","\n") # display transient classes if(check == 0) cat("NONE", "\n") else { cat("{") cat(outs$transientClasses[[1]], sep = ",") cat("}") if(check > 1) { for(i in 2:check) { cat(",{") cat(outs$transientClasses[[i]], sep = ",") cat("}") } } cat("\n") } # bool to say about irreducibility of markovchain irreducibility <- is.irreducible(object) if(irreducibility) cat("The Markov chain is irreducible", "\n") else cat("The Markov chain is not irreducible", "\n") # display absorbing states check <- absorbingStates(object) if(length(check) == 0) check <- "NONE" cat("The absorbing states are:", check ) cat("\n") # return outs # useful when user will assign the value returned invisible(outs) } ) markovchain/R/utils.R0000644000176200001440000000156213762012754014253 0ustar liggesusersprecomputeData <- function(mc) { list( object = mc, transitionMatrix = mc@transitionMatrix, states = mc@states, byrow = mc@byrow, irreducible = is.irreducible(mc), regular = is.regular(mc), canonicForm = canonicForm(mc), recurrentClasses = recurrentClasses(mc), transientClasses = transientClasses(mc), recurrentStates = recurrentStates(mc), transientStates = transientStates(mc), absorbingStates = absorbingStates(mc), hittingProbabilities = hittingProbabilities(mc), meanNumVisits = meanNumVisits(mc), meanRecurrenceTime = meanRecurrenceTime(mc), communicatingClasses = communicatingClasses(mc), steadyStates = steadyStates(mc), reachabilityMatrix = is.accessible(mc) ) } precomputeSteadyStates <- function(mc) { list( object = mc, expected = steadyStates(mc) ) }markovchain/R/zzz.R0000644000176200001440000000112313762012754013741 0ustar liggesusers# Author: Giorgio ############################################################################### # loading the markovchain package .onAttach <- function(libname, pkgname) { desc <- packageDescription(pkgname, libname) packageStartupMessage('Package: ', desc$Package, '\n', 'Version: ', desc$Version, '\n', 'Date: ', desc$Date, '\n', 'BugReport: ', desc$BugReports, '\n') } # for unloading dynamic libraries .onUnload <- function (libpath) { library.dynam.unload("markovchain", libpath) }markovchain/R/fittingFunctions.R0000644000176200001440000006154014050510631016436 0ustar liggesusers#' Function to generate a sequence of states from homogeneous Markov chains. #' #' Provided any \code{markovchain} object, it returns a sequence of #' states coming from the underlying stationary distribution. #' #' @param n Sample size #' @param markovchain \code{markovchain} object #' @param t0 The initial state #' @param include.t0 Specify if the initial state shall be used #' @param useRCpp Boolean. Should RCpp fast implementation being used? Default is yes. #' #' @details A sequence of size n is sampled. #' #' @return A Character Vector #' #' @references A First Course in Probability (8th Edition), Sheldon Ross, Prentice Hall 2010 #' #' @author Giorgio Spedicato #' #' @seealso \code{\link{markovchainFit}} #' #' @examples #' # define the markovchain object #' statesNames <- c("a", "b", "c") #' mcB <- new("markovchain", states = statesNames, #' transitionMatrix = matrix(c(0.2, 0.5, 0.3, 0, 0.2, 0.8, 0.1, 0.8, 0.1), #' nrow = 3, byrow = TRUE, dimnames = list(statesNames, statesNames))) #' #' # show the sequence #' outs <- markovchainSequence(n = 100, markovchain = mcB, t0 = "a") #' #' @export markovchainSequence <-function (n, markovchain, t0 = sample(markovchain@states, 1), include.t0 = FALSE, useRCpp = TRUE) { # check whether given initial state is possible state or not if (!(t0 %in% markovchain@states)) stop("Error! Initial state not defined") # call to cpp implmentation of markovchainSequence if (useRCpp) { return(.markovchainSequenceRcpp(n, markovchain, t0, include.t0)) } # R implementation of the function # create a sequence of size n initially not initialized chain <- rep(NA,n) # initial state state <- t0 # populate the sequence for (i in 1:n) { # row probabilty corresponding to the current state rowProbs <- markovchain@transitionMatrix[state, ] # select the next state outstate <- sample(size = 1, x = markovchain@states, prob = rowProbs) # store the new state chain[i] <- outstate # update the current state state <- outstate } # output out <- chain # whether to include initial state or not if (include.t0) { out <- c(t0, out) } return(out) } ################## # random sampler # ################## # check if the subsequent states are included in the previous ones # check the validity of non homogeneous markovchain list # object is a list of markovchain object .checkSequence <- function(object) { # assume non homogeneous markovchain list is valid out <- TRUE # list of one transition matrix implies valid if (length(object) == 1) { return(out) } # if number of transition matrices are more than one for (i in 2:length(object)) { # select the states which are reachable in one step if(object[[i - 1]]@byrow) { reachable <- (colSums(object[[i - 1]]@transitionMatrix) != 0) } else { reachable <- (rowSums(object[[i - 1]]@transitionMatrix) != 0) } # possible states in the previous markovchain object statesNm1 <- states(object[[i - 1]])[reachable] # possible states in the current markovchain object statesN <- states(object[[i]]) # common states intersection <- intersect(statesNm1, statesN) # condition to check whether statesNm1 is a subset of statesN or not if (setequal(intersection, statesNm1) == FALSE) { out <- FALSE break } } return(out) } #' Function to generate a sequence of states from homogeneous or non-homogeneous Markov chains. #' #' Provided any \code{markovchain} or \code{markovchainList} objects, it returns a sequence of #' states coming from the underlying stationary distribution. #' #' @param n Sample size #' @param object Either a \code{markovchain} or a \code{markovchainList} object #' @param what It specifies whether either a \code{data.frame} or a \code{matrix} #' (each rows represent a simulation) or a \code{list} is returned. #' @param useRCpp Boolean. Should RCpp fast implementation being used? Default is yes. #' @param parallel Boolean. Should parallel implementation being used? Default is yes. #' @param num.cores Number of Cores to be used #' @param ... additional parameters passed to the internal sampler #' #' @details When a homogeneous process is assumed (\code{markovchain} object) a sequence is #' sampled of size n. When a non - homogeneous process is assumed, #' n samples are taken but the process is assumed to last from the begin to the end of the #' non-homogeneous markov process. #' #' @return Character Vector, data.frame, list or matrix #' #' @references A First Course in Probability (8th Edition), Sheldon Ross, Prentice Hall 2010 #' #' @author Giorgio Spedicato #' #' @note Check the type of input #' #' @seealso \code{\link{markovchainFit}}, \code{\link{markovchainSequence}} #' #' @examples #' # define the markovchain object #' statesNames <- c("a", "b", "c") #' mcB <- new("markovchain", states = statesNames, #' transitionMatrix = matrix(c(0.2, 0.5, 0.3, 0, 0.2, 0.8, 0.1, 0.8, 0.1), #' nrow = 3, byrow = TRUE, dimnames = list(statesNames, statesNames))) #' #' # show the sequence #' outs <- rmarkovchain(n = 100, object = mcB, what = "list") #' #' #' #define markovchainList object #' statesNames <- c("a", "b", "c") #' mcA <- new("markovchain", states = statesNames, transitionMatrix = #' matrix(c(0.2, 0.5, 0.3, 0, 0.2, 0.8, 0.1, 0.8, 0.1), nrow = 3, #' byrow = TRUE, dimnames = list(statesNames, statesNames))) #' mcB <- new("markovchain", states = statesNames, transitionMatrix = #' matrix(c(0.2, 0.5, 0.3, 0, 0.2, 0.8, 0.1, 0.8, 0.1), nrow = 3, #' byrow = TRUE, dimnames = list(statesNames, statesNames))) #' mcC <- new("markovchain", states = statesNames, transitionMatrix = #' matrix(c(0.2, 0.5, 0.3, 0, 0.2, 0.8, 0.1, 0.8, 0.1), nrow = 3, #' byrow = TRUE, dimnames = list(statesNames, statesNames))) #' mclist <- new("markovchainList", markovchains = list(mcA, mcB, mcC)) #' #' # show the list of sequence #' rmarkovchain(100, mclist, "list") #' #' @export rmarkovchain <- function(n, object, what = "data.frame", useRCpp = TRUE, parallel = FALSE, num.cores = NULL, ...) { # check the class of the object if (class(object) == "markovchain") { out <- markovchainSequence(n = n, markovchain = object, useRCpp = useRCpp, ...) return(out) } if (class(object) == "markovchainList") { ####################################################### if(useRCpp && !parallel) { # if include.t0 is not passed as extra argument then set include.t0 as false include.t0 <- list(...)$include.t0 include.t0 <- ifelse(is.null(include.t0), FALSE, include.t0) # check whether initial state is passed or not t0 <- list(...)$t0 if (is.null(t0)) t0 <- character() # call fast cpp function dataList <- .markovchainListRcpp(n, object@markovchains, include.t0, t0) # format in which results to be returned if (what == "data.frame") { out <- data.frame(iteration = dataList[[1]], values = dataList[[2]]) } else { # output in matrix format # each row is an independent sequence out <- matrix(data = dataList[[2]], nrow = n, byrow = TRUE) # output in list format if (what == "list") { # outlist <- list() # for (i in 1:nrow(out)) # outlist[[i]] <- out[i, ] # out <- outlist out <- as.list(data.frame(t(out), stringsAsFactors = FALSE)) out <- unname(out) } } return(out) } ########################################################## if(useRCpp && parallel) { # Calculate the number of cores # It's not good to use all cores no_cores <- max(1,parallel::detectCores() - 1) # number of cores specified should be less than or equal to maximum cores available if((! is.null(num.cores)) && num.cores <= no_cores + 1 && num.cores >= 1) { no_cores <- num.cores } RcppParallel::setThreadOptions(no_cores) # if include.t0 is not passed as extra argument then set include.t0 as false include.t0 <- list(...)$include.t0 include.t0 <- ifelse(is.null(include.t0), FALSE, include.t0) # check whether initial state is passed or not t0 <- list(...)$t0 if (is.null(t0)) t0 <- character() dataList <- .markovchainSequenceParallelRcpp(object, n, include.t0, t0) if(what == "list") return(dataList) # dimension of matrix to be returned nrow <- length(dataList) ncol <- length(dataList[[1]]) if(what == "matrix") { out <- matrix(unlist(dataList), nrow = nrow, ncol = ncol, byrow = TRUE) # for(i in 1:nrow) out[i, ] <- dataList[[i]] return(out) } iteration <- unlist(lapply(1:nrow, rep, times = ncol)) values <- unlist(dataList) # if what id data frame # for(i in 1:nrow) { # iteration <- c(iteration, rep(i, ncol)) # values <- append(values, dataList[[i]]) # } return(data.frame(iteration = iteration, values = values)) } ########################################################## if(!useRCpp && parallel) { # if include.t0 is not passed as extra argument then set include.t0 as false include.t0 <- list(...)$include.t0 include.t0 <- ifelse(is.null(include.t0), FALSE, include.t0) # check whether initial state is passed or not t0 <- list(...)$t0 if (is.null(t0)) t0 <- character() dataList <- .markovchainSequenceParallel(n, object, t0, num.cores, include.t0) if(what == "list") return(dataList) # dimension of matrix to be returned nrow <- length(dataList) ncol <- length(dataList[[1]]) if(what == "matrix") { out <- matrix(nrow = nrow, ncol = ncol) for(i in 1:nrow) out[i, ] <- dataList[[i]] return(out) } iteration <- numeric() values <- character() # if what id data frame for(i in 1:nrow) { iteration <- append(iteration, rep(i, ncol)) values <- append(values, dataList[[i]]) } return(data.frame(iteration = iteration, values = values)) } ########################################################## # store list of markovchain object in object object <- object@markovchains # check the validity of markovchainList object verify <- .checkSequence(object = object) # show warning if sequence is invalid if (!verify) { warning("Warning: some states in the markovchain sequences are not contained in the following states!") } # helper vector iteration <- numeric() values <- character() # create one sequence in each iteration for (i in 1:n) { # the first iteration may include initial state sampledValues <- markovchainSequence(n = 1, markovchain = object[[1]], ...) outIter <- rep(i, length(sampledValues)) # number of markovchain objects are more than one if (length(object) > 1) { for (j in 2:length(object)) { pos2take <- length(sampledValues) # select new state of the sequence from the old state # t0 refers to the old state newVals <-markovchainSequence(n = 1, markovchain = object[[j]], t0 = sampledValues[pos2take]) # update in every iteration outIter <- c(outIter, i) sampledValues <- c(sampledValues, newVals) } } # populate the helper vectors iteration <- c(iteration, outIter) values <- c(values, sampledValues) } # defining the output if (what == "data.frame") { out <- data.frame(iteration = iteration, values = values) } else { # ouput in matrix format out <- matrix(data = values, nrow = n, byrow = TRUE) # store each row of the matrix in the list if (what == 'list') { outlist <- list() for (i in 1:nrow(out)) outlist[[i]] <- out[i, ] out <- outlist } } } return(out) } ###################################################################### # helper function to calculate one sequence .markovchainSPHelper <- function(x, t0, mclist, include.t0) { # number of transition matrices n <- length(mclist@markovchains) # take care of initial state vin <- 0 if(include.t0) vin <- 1 # a character vector to store a single sequence seq <- character(length = n + vin) if(length(t0) == 0) { stateNames <- mclist@markovchains[[1]]@states t0 <- sample(x = stateNames, size = 1, prob = rep(1 / length(stateNames), length(stateNames))) } if(include.t0) seq[1] <- t0 invisible(lapply(seq_len(n), function(i) { stateNames <<- mclist@markovchains[[i]]@states byRow <- mclist@markovchains[[i]]@byrow # check whether transition matrix follows row-wise or column-wise fashion if(byRow) prob <- mclist@markovchains[[i]]@transitionMatrix[which(stateNames == t0), ] else prob <- mclist@markovchains[[i]]@transitionMatrix[, which(stateNames == t0)] # initial state for the next transition matrix t0 <<- sample(x = stateNames, size = 1, prob = prob) # populate the sequence vector seq[i+vin] <<- t0 } )) return(seq) } # Function to generate a list of sequence of states in parallel from non-homogeneous Markov chains. # # Provided any markovchainList object, it returns a list of sequence of states coming # from the underlying stationary distribution. # # @param n Sample size # @param object markovchainList object # @param t0 Initial state # @param num.cores Number of cores # .markovchainSequenceParallel <- function(n, object, t0 = character(), num.cores = NULL, include.t0 = FALSE) { # check for the validity of non-uniform markov chain verify <- .checkSequence(object@markovchains) if (!verify) { warning("Warning: some states in the markovchain sequences are not contained in the following states!") } # Calculate the number of cores # It's not good to use all cores no_cores <- max(1,parallel::detectCores() - 1) # number of cores specified should be less than or equal to maximum cores available if((! is.null(num.cores)) && num.cores <= no_cores + 1 && num.cores >= 1) { no_cores <- num.cores } # Initiate cluster cl <- parallel::makeCluster(no_cores) # export the variables to be used in the helper function # parallel::clusterExport(cl, "t0") # export the variables to be used in the helper function mclist <- object # parallel::clusterExport(cl, "mclist") # list of n sequence listSeq <- tryCatch(parallel::parLapply(cl, 1:n, .markovchainSPHelper, t0, mclist, include.t0), error=function(e) e, warning=function(w) w) # release the resources parallel::stopCluster(cl) return(listSeq) } ###################################################################### # function to fit a DTMC with Laplacian Smoother .mcFitLaplacianSmooth <- function(stringchar, byrow, laplacian = 0.01) { # every element of the matrix store the number of times jth state appears just # after the ith state origNum <- createSequenceMatrix(stringchar = stringchar, toRowProbs = FALSE) # add laplacian to the sequence matrix # why? to avoid the cases where sum of row is zero newNum <- origNum + laplacian # store sum of each row in the vector newSumOfRow <- rowSums(newNum) # helper matrix to convert frequency matrix to transition matrix newDen <- matrix(rep(newSumOfRow, length(newSumOfRow)), byrow = FALSE, ncol = length(newSumOfRow)) # transition matrix transMatr <- newNum / newDen # create a markovchain object outMc <- new("markovchain", transitionMatrix = transMatr, name = "Laplacian Smooth Fit") # transpose the transition matrix if (!byrow) { outMc@transitionMatrix <- t(outMc@transitionMatrix) outMc@byrow <- FALSE } # wrap markovchain object in a list out <- list(estimate = outMc) return(out) } # function that return a Markov Chain from a given matrix of observations # .matr2Mc <- function(matrData, laplacian = 0) { # # # number of columns in the input matrix # nCols <- ncol(matrData) # # # an empty character vector to store names of possible states # uniqueVals <- character() # # # populate uniqueVals with names of states # for(i in 1:nCols) { # uniqueVals <- union(uniqueVals, unique(as.character(matrData[,i]))) # } # # # possible states in lexicographical order # uniqueVals <- sort(uniqueVals) # # # create a contingency matrix which store the number of times # # jth state appear just after the ith state # contingencyMatrix <- matrix(rep(0, length(uniqueVals)^2), ncol = length(uniqueVals)) # # # set the names of rows and columns # rownames(contingencyMatrix) <- colnames(contingencyMatrix) <- uniqueVals # # # fill the contingency matrix # for (i in 1:nrow(matrData)) { # for (j in 2:nCols) { # # state in the ith row and (j-1)th column # stateBegin <- as.character(matrData[i, j-1]) # # # index of beginning state # whichRow <- which(uniqueVals == stateBegin) # # # state in the ith row and jth column # stateEnd <- as.character(matrData[i, j]) # # # index of ending state # whichCols <- which(uniqueVals == stateEnd) # # # update the contingency matrix # contingencyMatrix[whichRow, whichCols] <- contingencyMatrix[whichRow, whichCols] + 1 # } # } # # # add laplacian correction if needed # contingencyMatrix <- contingencyMatrix + laplacian # # # take care of rows with all entries 0 # sumOfRows <- rowSums(contingencyMatrix) # for(i in 1:length(sumOfRows)) { # if(sumOfRows[i] == 0) { # contingencyMatrix[i, ] <- 1 # sumOfRows[i] <- length(sumOfRows) # } # } # # # get a transition matrix and a DTMC # transitionMatrix <- contingencyMatrix / sumOfRows # # # markov chain object to be returned # outMc <- new("markovchain", transitionMatrix = transitionMatrix) # # return(outMc) # } #' @title markovchainListFit #' #' @description Given a data frame or a matrix (rows are observations, by cols #' the temporal sequence), it fits a non - homogeneous discrete time markov chain #' process (storing row). In particular a markovchainList of size = ncol - 1 is obtained #' estimating transitions from the n samples given by consecutive column pairs. #' #' @param data Either a matrix or a data.frame or a list object. #' @param laplacian Laplacian correction (default 0). #' @param byrow Indicates whether distinc stochastic processes trajectiories are shown in distinct rows. #' @param name Optional name. #' #' @details If \code{data} contains \code{NAs} then the transitions containing \code{NA} will be ignored. #' @return A list containing two slots: #' estimate (the estimate) #' name #' #' @examples #' #' # using holson dataset #' data(holson) #' # fitting a single markovchain #' singleMc <- markovchainFit(data = holson[,2:12]) #' # fitting a markovchainList #' mclistFit <- markovchainListFit(data = holson[, 2:12], name = "holsonMcList") #' @export markovchainListFit <- function(data, byrow = TRUE, laplacian = 0, name) { # check the format of input data if (!any(is.list(data),is.data.frame(data),is.matrix(data))) { stop("Error: data must be either a matrix or a data.frame or a list") } freqMatrixes <- list() # a pure list= a list and not a data frame if((is.list(data) == TRUE) & (is.data.frame(data)==FALSE)) { markovchains <- list() # list of frequency matrix freqMatrixes <- .mcListFitForList(data) } else{ # if input is data frame convert it to matrix if(is.data.frame(data)) { data <- unname(as.matrix(data)) } # make the entries row wise if it is not if(!byrow) { data <- t(data) } # number of columns in the matrix nCols <- ncol(data) # fit by columns freqMatrixes <- lapply(seq_len(nCols-1), function(i){ # (i-1)th transition matrix for transition from (i-1)th state to ith state matrData <- data[, c(i, i+1)] matrData[1, ] <- as.character(matrData[1, ]) # checking particular data for NA values. validTransition <- any(apply(matrData, 1, function(x){ !any(is.na(x)) })) if(validTransition) createSequenceMatrix(matrData, toRowProbs = FALSE, sanitize = TRUE) }) freqMatrixes <- freqMatrixes[ !sapply(freqMatrixes, is.null) ] } if(length(freqMatrixes) == 0) { return(list()) } markovchains <- lapply(freqMatrixes, function(freqMatrix){ # add laplacian correction freqMatrix <- freqMatrix + laplacian rSums <- rowSums(freqMatrix) # transition matrix tMatrix <- freqMatrix / rSums; estMc <- new("markovchain", transitionMatrix = tMatrix) estMc }) # create markovchainList object outMcList <- new("markovchainList", markovchains = markovchains) # wrap the object in a list out <- list(estimate = outMcList) # set the name of markovchainList object as given in the argument if(!missing(name)) { out$estimate@name <- name } return(out) } #' A function to compute multinomial confidence intervals of DTMC #' #' @description Return estimated transition matrix assuming a Multinomial Distribution #' #' @param transitionMatrix An estimated transition matrix. #' @param countsTransitionMatrix Empirical (conts) transition matrix, on which the \code{transitionMatrix} was performed. #' @param confidencelevel confidence interval level. #' #' @return Two matrices containing the confidence intervals. #' #' @seealso \code{markovchainFit} #' #' @references Constructing two-sided simultaneous confidence intervals #' for multinomial proportions for small counts in a large number of cells. #' Journal of Statistical Software 5(6) (2000) #' #' @examples #' seq<-c("a", "b", "a", "a", "a", "a", "b", "a", "b", "a", "b", "a", "a", "b", "b", "b", "a") #' mcfit<-markovchainFit(data=seq,byrow=TRUE) #' seqmat<-createSequenceMatrix(seq) #' multinomialConfidenceIntervals(mcfit$estimate@transitionMatrix, seqmat, 0.95) #' @export multinomialConfidenceIntervals<-function(transitionMatrix, countsTransitionMatrix, confidencelevel=0.95) { out<-.multinomialCIRcpp(transMat=transitionMatrix, seqMat=countsTransitionMatrix,confidencelevel=confidencelevel) return(out) } #' return a joint pdf of the number of visits to the various states of the DTMC #' #' @description This function would return a joint pdf of the number of visits to #' the various states of the DTMC during the first N steps. #' #' @usage noofVisitsDist(markovchain,N,state) #' #' @param markovchain a markovchain-class object #' @param N no of steps #' @param state the initial state #' #' @details #' This function would return a joint pdf of the number of visits to #' the various states of the DTMC during the first N steps. #' #' @return a numeric vector depicting the above described probability density function. #' #' @author Vandit Jain #' #' @examples #' transMatr<-matrix(c(0.4,0.6,.3,.7),nrow=2,byrow=TRUE) #' simpleMc<-new("markovchain", states=c("a","b"), #' transitionMatrix=transMatr, #' name="simpleMc") #' noofVisitsDist(simpleMc,5,"a") #' #' @export noofVisitsDist <- function(markovchain,N = 5,state) { if(class(markovchain)!="markovchain") stop("please provide a valid markovchain-class object") if(N <= 0) stop("please enter positive number of steps") # the transition matrix Tmatrix <- markovchain@transitionMatrix # character vector of states of the markovchain stateNames <- states(markovchain) i<--1 # initial state i <- which(stateNames == state) if(i==-1) stop("please provide a valid inital state") # call to Rcpp implementation of the function out <- .noofVisitsDistRCpp(Tmatrix,i,N) # adds state names names to the output vector names(out) <- stateNames out <- c(out) return(out) } markovchain/R/data.R0000644000176200001440000001301713762012754014022 0ustar liggesusers#' @name sales #' #' @title Sales Demand Sequences #' #' @description Sales demand sequences of five products (A, B, C, D, E). #' Each row corresponds to a sequence. First row corresponds to Sequence A, #' Second row to Sequence B and so on. #' #' @usage data("sales") #' #' @details The example can be used to fit High order multivariate #' markov chain. #' #' @examples #' data("sales") #' # fitHighOrderMultivarMC(seqMat = sales, order = 2, Norm = 2) #' "sales" #' @name blanden #' #' @title Mobility between income quartiles #' #' @description This table show mobility between income quartiles for father and sons for the 1970 cohort born #' #' @usage data(blanden) #' #' @details The rows represent fathers' income quartile when the son is aged 16, whilst the columns represent sons' income quartiles when he is aged 30 (in 2000). #' #' @source Personal reworking #' #' @references Jo Blanden, Paul Gregg and Stephen Machin, Intergenerational Mobility in Europe and North America, Center for Economic Performances (2005) #' #' @examples #' data(blanden) #' mobilityMc<-as(blanden, "markovchain") "blanden" #' @name craigsendi #' #' @title CD4 cells counts on HIV Infects between zero and six month #' #' @description This is the table shown in Craig and Sendi paper showing zero and six month CD4 cells count in six brakets #' #' @usage data(craigsendi) #' #' @format #' The format is: #' table [1:3, 1:3] 682 154 19 33 64 19 25 47 43 #' - attr(*, "dimnames")=List of 2 #' ..$ : chr [1:3] "0-49" "50-74" "75-UP" #' ..$ : chr [1:3] "0-49" "50-74" "75-UP" #' #' @details Rows represent counts at the beginning, cols represent counts after six months. #' #' @source Estimation of the transition matrix of a discrete time Markov chain, Bruce A. Craig and Peter P. Sendi, Health Economics 11, 2002. #' #' @references see source #' #' @examples #' data(craigsendi) #' csMc<-as(craigsendi, "markovchain") #' steadyStates(csMc) "craigsendi" #' @name holson #' #' @title Holson data set #' #' @description A data set containing 1000 life histories trajectories and a categorical status (1,2,3) observed on eleven evenly spaced steps. #' #' @usage data(holson) #' #' @format #' A data frame with 1000 observations on the following 12 variables. #' \describe{ #' \item{\code{id}}{unique id} #' \item{\code{time1}}{observed status at i-th time} #' \item{\code{time2}}{observed status at i-th time} #' \item{\code{time3}}{observed status at i-th time} #' \item{\code{time4}}{observed status at i-th time} #' \item{\code{time5}}{observed status at i-th time} #' \item{\code{time6}}{observed status at i-th time} #' \item{\code{time7}}{observed status at i-th time} #' \item{\code{time8}}{observed status at i-th time} #' \item{\code{time9}}{observed status at i-th time} #' \item{\code{time10}}{observed status at i-th time} #' \item{\code{time11}}{observed status at i-th time} #' } #' #' @details The example can be used to fit a \code{markovchain} or a \code{markovchainList} object. #' #' @source Private communications #' #' @references Private communications #' #' @examples #' data(holson) #' head(holson) "holson" #' @name kullback #' #' @title Example from Kullback and Kupperman Tests for Contingency Tables #' #' @format A list containing two 6x6 non - negative integer matrices #' #' @usage data(kullback) #' #' @description A list of two matrices representing raw transitions between two states "kullback" #' @name rain #' #' @title Alofi island daily rainfall #' #' @description Rainfall measured in Alofi Island #' #' @usage data(rain) #' #' @format #' A data frame with 1096 observations on the following 2 variables. #' \describe{ #' \item{\code{V1}}{a numeric vector, showing original coding} #' \item{\code{rain}}{a character vector, showing daily rainfall millilitres brackets} #' } #' #' @source Avery Henderson #' #' @references Avery Henderson, Fitting markov chain models on discrete time series such as DNA sequences #' #' @examples #' data(rain) #' rainMc<-markovchainFit(data=rain$rain) "rain" #' @name preproglucacon #' #' @title Preprogluccacon DNA protein bases sequences #' #' @description Sequence of bases for preproglucacon DNA protein #' #' @usage data(preproglucacon) #' #' @format #' A data frame with 1572 observations on the following 2 variables. #' \describe{ #' \item{\code{V1}}{a numeric vector, showing original coding} #' \item{\code{preproglucacon}}{a character vector, showing initial of DNA bases (Adenine, Cytosine, Guanine, Thymine)} #' } #' #' @source Avery Henderson #' #' @references Averuy Henderson, Fitting markov chain models on discrete time series such as DNA sequences #' #' @examples #' data(preproglucacon) #' preproglucaconMc<-markovchainFit(data=preproglucacon$preproglucacon) "preproglucacon" #' @name tm_abs #' #' @title Single Year Corporate Credit Rating Transititions #' #' @description Matrix of Standard and Poor's Global Corporate Rating Transition Frequencies 2000 (NR Removed) #' #' @usage data(tm_abs) #' #' @format #' The format is: #' num [1:8, 1:8] 17 2 0 0 0 0 0 0 1 455 ... #' - attr(*, "dimnames")=List of 2 #' ..$ : chr [1:8] "AAA" "AA" "A" "BBB" ... #' ..$ : chr [1:8] "AAA" "AA" "A" "BBB" ... #' #' @references #' European Securities and Markets Authority, 2016 #' https://cerep.esma.europa.eu/cerep-web/statistics/transitionMatrice.xhtml #' #' @examples #' data(tm_abs) "tm_abs" markovchain/R/RcppExports.R0000644000176200001440000005013714050513011015365 0ustar liggesusers# Generated by using Rcpp::compileAttributes() -> do not edit by hand # Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393 .isGenRcpp <- function(gen) { .Call(`_markovchain_isGen`, gen) } #' @name generatorToTransitionMatrix #' @title Function to obtain the transition matrix from the generator #' @description The transition matrix of the embedded DTMC is inferred from the CTMC's generator #' #' @usage generatorToTransitionMatrix(gen, byrow = TRUE) #' #' @param gen The generator matrix #' @param byrow Flag to determine if rows (columns) sum to 0 #' @return Returns the transition matrix. #' #' @references #' Introduction to Stochastic Processes with Applications in the Biosciences (2013), David F. #' Anderson, University of Wisconsin at Madison #' #' @author Sai Bhargav Yalamanchi #' @seealso \code{\link{rctmc}},\code{\link{ctmc-class}} #' @examples #' energyStates <- c("sigma", "sigma_star") #' byRow <- TRUE #' gen <- matrix(data = c(-3, 3, 1, -1), nrow = 2, #' byrow = byRow, dimnames = list(energyStates, energyStates)) #' generatorToTransitionMatrix(gen) #' #' @export generatorToTransitionMatrix <- function(gen, byrow = TRUE) { .Call(`_markovchain_generatorToTransitionMatrix`, gen, byrow) } #' @name ctmcFit #' @title Function to fit a CTMC #' @description This function fits the underlying CTMC give the state #' transition data and the transition times using the maximum likelihood #' method (MLE) #' @usage ctmcFit(data, byrow = TRUE, name = "", confidencelevel = 0.95) #' @param data It is a list of two elements. The first element is a character #' vector denoting the states. The second is a numeric vector denoting the #' corresponding transition times. #' @param byrow Determines if the output transition probabilities of the #' underlying embedded DTMC are by row. #' @param name Optional name for the CTMC. #' @param confidencelevel Confidence level for the confidence interval #' construnction. #' @return It returns a list containing the CTMC object and the confidence intervals. #' #' @details Note that in data, there must exist an element wise corresponding #' between the two elements of the list and that data[[2]][1] is always 0. #' @references Continuous Time Markov Chains (vignette), Sai Bhargav Yalamanchi, Giorgio Alfredo Spedicato 2015 #' @author Sai Bhargav Yalamanchi #' @seealso \code{\link{rctmc}} #' #' @examples #' data <- list(c("a", "b", "c", "a", "b", "a", "c", "b", "c"), c(0, 0.8, 2.1, 2.4, 4, 5, 5.9, 8.2, 9)) #' ctmcFit(data) #' #' @export #' ctmcFit <- function(data, byrow = TRUE, name = "", confidencelevel = 0.95) { .Call(`_markovchain_ctmcFit`, data, byrow, name, confidencelevel) } .ExpectedTimeRCpp <- function(x, y) { .Call(`_markovchain_ExpectedTimeRcpp`, x, y) } .probabilityatTRCpp <- function(y) { .Call(`_markovchain_probabilityatTRCpp`, y) } .impreciseProbabilityatTRCpp <- function(C, i, t, s, error) { .Call(`_markovchain_impreciseProbabilityatTRCpp`, C, i, t, s, error) } #' @export seq2freqProb <- function(sequence) { .Call(`_markovchain_seq2freqProb`, sequence) } #' @export seq2matHigh <- function(sequence, order) { .Call(`_markovchain_seq2matHigh`, sequence, order) } .markovchainSequenceRcpp <- function(n, markovchain, t0, include_t0 = FALSE) { .Call(`_markovchain_markovchainSequenceRcpp`, n, markovchain, t0, include_t0) } .markovchainListRcpp <- function(n, object, include_t0 = FALSE, t0 = character()) { .Call(`_markovchain_markovchainListRcpp`, n, object, include_t0, t0) } .markovchainSequenceParallelRcpp <- function(listObject, n, include_t0 = FALSE, init_state = character()) { .Call(`_markovchain_markovchainSequenceParallelRcpp`, listObject, n, include_t0, init_state) } #' @rdname markovchainFit #' #' @export createSequenceMatrix <- function(stringchar, toRowProbs = FALSE, sanitize = FALSE, possibleStates = character()) { .Call(`_markovchain_createSequenceMatrix`, stringchar, toRowProbs, sanitize, possibleStates) } .mcListFitForList <- function(data) { .Call(`_markovchain_mcListFitForList`, data) } .matr2Mc <- function(matrData, laplacian = 0, sanitize = FALSE, possibleStates = character()) { .Call(`_markovchain__matr2Mc`, matrData, laplacian, sanitize, possibleStates) } .list2Mc <- function(data, laplacian = 0, sanitize = FALSE) { .Call(`_markovchain__list2Mc`, data, laplacian, sanitize) } #' @name inferHyperparam #' @title Function to infer the hyperparameters for Bayesian inference from an a priori matrix or a data set #' @description Since the Bayesian inference approach implemented in the package is based on conjugate priors, #' hyperparameters must be provided to model the prior probability distribution of the chain #' parameters. The hyperparameters are inferred from a given a priori matrix under the assumption #' that the matrix provided corresponds to the mean (expected) values of the chain parameters. A #' scaling factor vector must be provided too. Alternatively, the hyperparameters can be inferred #' from a data set. #' #' @param transMatr A valid transition matrix, with dimension names. #' @param scale A vector of scaling factors, each element corresponds to the row names of the provided transition #' matrix transMatr, in the same order. #' @param data A data set from which the hyperparameters are inferred. #' #' @details transMatr and scale need not be provided if data is provided. #' @return Returns the hyperparameter matrix in a list. #' #' @note The hyperparameter matrix returned is such that the row and column names are sorted alphanumerically, #' and the elements in the matrix are correspondingly permuted. #' #' @references Yalamanchi SB, Spedicato GA (2015). Bayesian Inference of First Order Markov Chains. R #' package version 0.2.5 #' #' @author Sai Bhargav Yalamanchi, Giorgio Spedicato #' @seealso \code{\link{markovchainFit}}, \code{\link{predictiveDistribution}} #' #' @examples #' data(rain, package = "markovchain") #' inferHyperparam(data = rain$rain) #' #' weatherStates <- c("sunny", "cloudy", "rain") #' weatherMatrix <- matrix(data = c(0.7, 0.2, 0.1, #' 0.3, 0.4, 0.3, #' 0.2, 0.4, 0.4), #' byrow = TRUE, nrow = 3, #' dimnames = list(weatherStates, weatherStates)) #' inferHyperparam(transMatr = weatherMatrix, scale = c(10, 10, 10)) #' #' @export #' inferHyperparam <- function(transMatr = matrix(), scale = numeric(), data = character()) { .Call(`_markovchain_inferHyperparam`, transMatr, scale, data) } #' @name markovchainFit #' @title Function to fit a discrete Markov chain #' @description Given a sequence of states arising from a stationary state, #' it fits the underlying Markov chain distribution using either MLE (also using a #' Laplacian smoother), bootstrap or by MAP (Bayesian) inference. #' #' @param data It can be a character vector or a {n x n} matrix or a {n x n} data frame or a list #' @param method Method used to estimate the Markov chain. Either "mle", "map", "bootstrap" or "laplace" #' @param byrow it tells whether the output Markov chain should show the transition probabilities by row. #' @param nboot Number of bootstrap replicates in case "bootstrap" is used. #' @param laplacian Laplacian smoothing parameter, default zero. It is only used when "laplace" method #' is chosen. #' @param name Optional character for name slot. #' @param parallel Use parallel processing when performing Boostrap estimates. #' @param confidencelevel \deqn{\alpha} level for conficence intervals width. #' Used only when \code{method} equal to "mle". #' @param confint a boolean to decide whether to compute Confidence Interval or not. #' @param hyperparam Hyperparameter matrix for the a priori distribution. If none is provided, #' default value of 1 is assigned to each parameter. This must be of size #' {k x k} where k is the number of states in the chain and the values #' should typically be non-negative integers. #' @param stringchar It can be a {n x n} matrix or a character vector or a list #' @param toRowProbs converts a sequence matrix into a probability matrix #' @param sanitize put 1 in all rows having rowSum equal to zero #' @param possibleStates Possible states which are not present in the given sequence #' #' @details Disabling confint would lower the computation time on large datasets. If \code{data} or \code{stringchar} #' contain \code{NAs}, the related \code{NA} containing transitions will be ignored. #' #' @return A list containing an estimate, log-likelihood, and, when "bootstrap" method is used, a matrix #' of standards deviations and the bootstrap samples. When the "mle", "bootstrap" or "map" method #' is used, the lower and upper confidence bounds are returned along with the standard error. #' The "map" method also returns the expected value of the parameters with respect to the #' posterior distribution. #' @references A First Course in Probability (8th Edition), Sheldon Ross, Prentice Hall 2010 #' #' Inferring Markov Chains: Bayesian Estimation, Model Comparison, Entropy Rate, #' and Out-of-Class Modeling, Christopher C. Strelioff, James P. Crutchfield, #' Alfred Hubler, Santa Fe Institute #' #' Yalamanchi SB, Spedicato GA (2015). Bayesian Inference of First Order Markov Chains. R #' package version 0.2.5 #' #' @author Giorgio Spedicato, Tae Seung Kang, Sai Bhargav Yalamanchi #' @note This function has been rewritten in Rcpp. Bootstrap algorithm has been defined "heuristically". #' In addition, parallel facility is not complete, involving only a part of the bootstrap process. #' When \code{data} is either a \code{data.frame} or a \code{matrix} object, only MLE fit is #' currently available. #' #' @seealso \code{\link{markovchainSequence}}, \code{\link{markovchainListFit}} #' @examples #' sequence <- c("a", "b", "a", "a", "a", "a", "b", "a", "b", "a", "b", "a", "a", #' "b", "b", "b", "a") #' sequenceMatr <- createSequenceMatrix(sequence, sanitize = FALSE) #' mcFitMLE <- markovchainFit(data = sequence) #' mcFitBSP <- markovchainFit(data = sequence, method = "bootstrap", nboot = 5, name = "Bootstrap Mc") #' #' na.sequence <- c("a", NA, "a", "b") #' # There will be only a (a,b) transition #' na.sequenceMatr <- createSequenceMatrix(na.sequence, sanitize = FALSE) #' mcFitMLE <- markovchainFit(data = na.sequence) #' #' # data can be a list of character vectors #' sequences <- list(x = c("a", "b", "a"), y = c("b", "a", "b", "a", "c")) #' mcFitMap <- markovchainFit(sequences, method = "map") #' mcFitMle <- markovchainFit(sequences, method = "mle") #' @rdname markovchainFit #' #' @export #' markovchainFit <- function(data, method = "mle", byrow = TRUE, nboot = 10L, laplacian = 0, name = "", parallel = FALSE, confidencelevel = 0.95, confint = TRUE, hyperparam = matrix(), sanitize = FALSE, possibleStates = character()) { .Call(`_markovchain_markovchainFit`, data, method, byrow, nboot, laplacian, name, parallel, confidencelevel, confint, hyperparam, sanitize, possibleStates) } .noofVisitsDistRCpp <- function(matrix, i, N) { .Call(`_markovchain_noofVisitsDistRCpp`, matrix, i, N) } .multinomialCIForRowRcpp <- function(x, confidencelevel) { .Call(`_markovchain_multinomialCIForRow`, x, confidencelevel) } .multinomialCIRcpp <- function(transMat, seqMat, confidencelevel) { .Call(`_markovchain_multinomCI`, transMat, seqMat, confidencelevel) } .commClassesKernelRcpp <- function(P) { .Call(`_markovchain_commClassesKernel`, P) } .communicatingClassesRcpp <- function(object) { .Call(`_markovchain_communicatingClasses`, object) } .transientStatesRcpp <- function(object) { .Call(`_markovchain_transientStates`, object) } .recurrentStatesRcpp <- function(object) { .Call(`_markovchain_recurrentStates`, object) } .recurrentClassesRcpp <- function(object) { .Call(`_markovchain_recurrentClasses`, object) } .transientClassesRcpp <- function(object) { .Call(`_markovchain_transientClasses`, object) } .reachabilityMatrixRcpp <- function(obj) { .Call(`_markovchain_reachabilityMatrix`, obj) } .isAccessibleRcpp <- function(obj, from, to) { .Call(`_markovchain_isAccessible`, obj, from, to) } .summaryKernelRcpp <- function(object) { .Call(`_markovchain_summaryKernel`, object) } .firstpassageKernelRcpp <- function(P, i, n) { .Call(`_markovchain_firstpassageKernel`, P, i, n) } .firstPassageMultipleRCpp <- function(P, i, setno, n) { .Call(`_markovchain_firstPassageMultipleRCpp`, P, i, setno, n) } .expectedRewardsRCpp <- function(matrix, n, rewards) { .Call(`_markovchain_expectedRewardsRCpp`, matrix, n, rewards) } .expectedRewardsBeforeHittingARCpp <- function(matrix, s0, rewards, n) { .Call(`_markovchain_expectedRewardsBeforeHittingARCpp`, matrix, s0, rewards, n) } .gcdRcpp <- function(a, b) { .Call(`_markovchain_gcd`, a, b) } #' @rdname structuralAnalysis #' #' @export period <- function(object) { .Call(`_markovchain_period`, object) } #' @title predictiveDistribution #' #' @description The function computes the probability of observing a new data #' set, given a data set #' @usage predictiveDistribution(stringchar, newData, hyperparam = matrix()) #' #' @param stringchar This is the data using which the Bayesian inference is #' performed. #' @param newData This is the data whose predictive probability is computed. #' @param hyperparam This determines the shape of the prior distribution of the #' parameters. If none is provided, default value of 1 is assigned to each #' parameter. This must be of size kxk where k is the number of states in the #' chain and the values should typically be non-negative integers. #' @return The log of the probability is returned. #' #' @details The underlying method is Bayesian inference. The probability is #' computed by averaging the likelihood of the new data with respect to the #' posterior. Since the method assumes conjugate priors, the result can be #' represented in a closed form (see the vignette for more details), which is #' what is returned. #' @references #' Inferring Markov Chains: Bayesian Estimation, Model Comparison, Entropy Rate, #' and Out-of-Class Modeling, Christopher C. Strelioff, James P. #' Crutchfield, Alfred Hubler, Santa Fe Institute #' #' Yalamanchi SB, Spedicato GA (2015). Bayesian Inference of First Order Markov #' Chains. R package version 0.2.5 #' #' @author Sai Bhargav Yalamanchi #' @seealso \code{\link{markovchainFit}} #' @examples #' sequence<- c("a", "b", "a", "a", "a", "a", "b", "a", "b", "a", "b", "a", "a", #' "b", "b", "b", "a") #' hyperMatrix<-matrix(c(1, 2, 1, 4), nrow = 2,dimnames=list(c("a","b"),c("a","b"))) #' predProb <- predictiveDistribution(sequence[1:10], sequence[11:17], hyperparam =hyperMatrix ) #' hyperMatrix2<-hyperMatrix[c(2,1),c(2,1)] #' predProb2 <- predictiveDistribution(sequence[1:10], sequence[11:17], hyperparam =hyperMatrix2 ) #' predProb2==predProb #' @export #' predictiveDistribution <- function(stringchar, newData, hyperparam = matrix()) { .Call(`_markovchain_predictiveDistribution`, stringchar, newData, hyperparam) } #' @title priorDistribution #' #' @description Function to evaluate the prior probability of a transition #' matrix. It is based on conjugate priors and therefore a Dirichlet #' distribution is used to model the transitions of each state. #' @usage priorDistribution(transMatr, hyperparam = matrix()) #' #' @param transMatr The transition matrix whose probability is the parameter of #' interest. #' @param hyperparam The hyperparam matrix (optional). If not provided, a #' default value of 1 is assumed for each and therefore the resulting #' probability distribution is uniform. #' @return The log of the probabilities for each state is returned in a numeric #' vector. Each number in the vector represents the probability (log) of #' having a probability transition vector as specified in corresponding the #' row of the transition matrix. #' #' @details The states (dimnames) of the transition matrix and the hyperparam #' may be in any order. #' @references Yalamanchi SB, Spedicato GA (2015). Bayesian Inference of First #' Order Markov Chains. R package version 0.2.5 #' #' @author Sai Bhargav Yalamanchi, Giorgio Spedicato #' #' @note This function can be used in conjunction with inferHyperparam. For #' example, if the user has a prior data set and a prior transition matrix, #' he can infer the hyperparameters using inferHyperparam and then compute #' the probability of their prior matrix using the inferred hyperparameters #' with priorDistribution. #' @seealso \code{\link{predictiveDistribution}}, \code{\link{inferHyperparam}} #' #' @examples #' priorDistribution(matrix(c(0.5, 0.5, 0.5, 0.5), #' nrow = 2, #' dimnames = list(c("a", "b"), c("a", "b"))), #' matrix(c(2, 2, 2, 2), #' nrow = 2, #' dimnames = list(c("a", "b"), c("a", "b")))) #' @export priorDistribution <- function(transMatr, hyperparam = matrix()) { .Call(`_markovchain_priorDistribution`, transMatr, hyperparam) } .hittingProbabilitiesRcpp <- function(object) { .Call(`_markovchain_hittingProbabilities`, object) } .canonicFormRcpp <- function(obj) { .Call(`_markovchain_canonicForm`, obj) } .steadyStatesRcpp <- function(obj) { .Call(`_markovchain_steadyStates`, obj) } .absorbingStatesRcpp <- function(obj) { .Call(`_markovchain_absorbingStates`, obj) } .isIrreducibleRcpp <- function(obj) { .Call(`_markovchain_isIrreducible`, obj) } .isRegularRcpp <- function(obj) { .Call(`_markovchain_isRegular`, obj) } .meanAbsorptionTimeRcpp <- function(obj) { .Call(`_markovchain_meanAbsorptionTime`, obj) } .absorptionProbabilitiesRcpp <- function(obj) { .Call(`_markovchain_absorptionProbabilities`, obj) } .meanFirstPassageTimeRcpp <- function(obj, destination) { .Call(`_markovchain_meanFirstPassageTime`, obj, destination) } .meanRecurrenceTimeRcpp <- function(obj) { .Call(`_markovchain_meanRecurrenceTime`, obj) } .minNumVisitsRcpp <- function(obj) { .Call(`_markovchain_meanNumVisits`, obj) } .isProbability <- function(prob) { .Call(`_markovchain_isProb`, prob) } .isStochasticMatrix <- function(m, byrow) { .Call(`_markovchain_isStochasticMatrix`, m, byrow) } .isProbabilityVector <- function(prob) { .Call(`_markovchain_isProbVector`, prob) } .testthatIsAccesibleRcpp <- function(obj) { .Call(`_markovchain_checkIsAccesibleMethod`, obj) } .approxEqualMatricesRcpp <- function(a, b) { .Call(`_markovchain_approxEqual`, a, b) } .testthatIsPartitionRcpp <- function(commClasses, states) { .Call(`_markovchain_isPartition`, commClasses, states) } .testthatAreHittingRcpp <- function(probs, hitting, byrow) { .Call(`_markovchain_areHittingProbabilities`, probs, hitting, byrow) } .testthatAreMeanNumVisitsRcpp <- function(probs, numVisits, hitting, byrow) { .Call(`_markovchain_areMeanNumVisits`, probs, numVisits, hitting, byrow) } .testthatRecurrentHittingRcpp <- function(recurrentClasses, hitting, states, byrow) { .Call(`_markovchain_recurrentHitting`, recurrentClasses, hitting, states, byrow) } .testthatHittingAreOneRcpp <- function(matrix) { .Call(`_markovchain_hittingProbsAreOne`, matrix) } .testthatAbsorbingAreRecurrentClassRcpp <- function(absorbingStates, recurrentClasses) { .Call(`_markovchain_absorbingAreRecurrentClass`, absorbingStates, recurrentClasses) } markovchain/R/hommc.R0000644000176200001440000002554613762012754014226 0ustar liggesusers#' An S4 class for representing High Order Multivariate Markovchain (HOMMC) #' #' @slot order an integer equal to order of Multivariate Markovchain #' @slot states a vector of states present in the HOMMC model #' @slot P array of transition matrices #' @slot Lambda a vector which stores the weightage of each transition matrices in P #' @slot byrow if FALSE each column sum of transition matrix is 1 else row sum = 1 #' @slot name a name given to hommc #' #' @author Giorgio Spedicato, Deepak Yadav #' #' @examples #' statesName <- c("a", "b") #' #' P <- array(0, dim = c(2, 2, 4), dimnames = list(statesName, statesName)) #' P[,,1] <- matrix(c(0, 1, 1/3, 2/3), byrow = FALSE, nrow = 2) #' P[,,2] <- matrix(c(1/4, 3/4, 0, 1), byrow = FALSE, nrow = 2) #' P[,,3] <- matrix(c(1, 0, 1/3, 2/3), byrow = FALSE, nrow = 2) #' P[,,4] <- matrix(c(3/4, 1/4, 0, 1), byrow = FALSE, nrow = 2) #' #' Lambda <- c(0.8, 0.2, 0.3, 0.7) #' #' ob <- new("hommc", order = 1, states = statesName, P = P, #' Lambda = Lambda, byrow = FALSE, name = "FOMMC") #' #'@export hommc <- setClass("hommc", slots = list(order = "numeric", states = "character", P = "array", Lambda = "numeric", byrow = "logical", name = "character") ) # internal method to show hommc object in informative way .showHommc <- function(object) { # whether data in transition matrices are stored in column-wise or row-wise fashion if(object@byrow == TRUE) { direction <- "(by rows)" } else { direction <- "(by cols)" } # display order and unique states cat("Order of multivariate markov chain =", object@order, "\n") cat("states =", object@states, "\n") cat("\n") cat("List of Lambda's and the corresponding transition matrix", direction,":\n") # display transition matrices and the corresponding lambdas n <- object@order s <- sqrt((dim(object@P))[3]/n) for(i in 1:s) { for(j in 1:s) { # t is the index of transition matrix for transition from i sequence to j sequence # order of transition matrices in P is P1{1,1},P2{1,1}..Pn{1,1},P1{1,2}....Pn{s,s} t <- n * s * (i-1) + (j-1) * n for(k in 1:n) { cat("Lambda", k, "(", i, ",", j, ") : ", object@Lambda[t+k],"\n", sep = "") cat("P", k, "(", i, ",", j, ") : \n", sep = "") print(object@P[, , t+k]) cat("\n") } } } } #' @title Function to display the details of hommc object #' @description This is a convenience function to display the slots of hommc object #' in proper format #' #' @param object An object of class hommc #' #' @rdname hommc-show #' @export setMethod("show", "hommc", function(object){ .showHommc(object) } ) # all transition matrices # n*s*s n = order s = number of categorical sequences # verified using two examples from research paper .allTransMat <- function(data, order = 2) { n <- order # order uelement <- sort(unique(as.character(data))) # unique element m <- length(uelement) # dim of trans-matrix s <- nrow(data) # number of categorical sequence lseq <- ncol(data) # length of each categorical sequence # store all transition matrices allTmat <- array(dim = c(length(uelement), length(uelement), n*s*s), dimnames = list(uelement, uelement)) t <- 1 # help for(i in 1:s) { for(j in 1:s) { x <- data[j, ] # jth sequence y <- data[i, ] # ith sequence # jumps for(h in 1:n) { # column wise allTmat[ , , t] <- t(createSequenceMatrix(matrix(c(x[1:(lseq-h)], y[-(1:h)]), ncol = 2, byrow = FALSE), toRowProbs = TRUE, possibleStates = uelement, sanitize = TRUE)) t <- t + 1 } } } return(allTmat) } # distribution of each categorical sequence based on the frequency # verified using two examples from research paper .allFreqProbMat <- function(data) { uelement <- sort(unique(as.character(data))) # unique element m <- length(uelement) # dim of trans-matrix s <- nrow(data) # number of categorical sequence # frequency based probability for all sequences freqMat <- array(0, dim = c(m, 1, s), dimnames = list(uelement)) for(i in 1:s) { idata <- data[i, ] # ith categorical sequence # populate frequency matrix for(j in idata) { freqMat[j, 1, i] <- freqMat[j, 1, i] + 1 } # normalization freqMat[, , i] <- freqMat[, , i] / sum(freqMat[, , i]) } return(freqMat) } # objective function to pass to solnp .fn3 <- function(params, ...) { hdata <- list(...) # calculate error error <- 0 # number of categorical sequence s <- hdata$s # order n <- hdata$n # number of uniq states || dimension of t-matrix m <- hdata$m # array of transition matrices allTmat <- hdata$allTmat # all frequency matrix freqMat <- hdata$freqMat # norm Norm <- hdata$Norm for(i in 1:s) { helper <- matrix(0, nrow = m*n, ncol = 1) for(j in 1:s) { helper2 <- matrix(0, nrow = m, ncol = 1) y <- n * (j - 1 + s * (i - 1)) for(k in 1:n) { helper2 <- helper2 + params[y + k] * (allTmat[ , , y + k] %*% matrix(freqMat[ , , j])) } helper[1:m, ] <- helper[1:m, ] + helper2 if(i == j && n>= 2) { for(k in 2:n) { p <- (k - 1) * m helper[(p + 1):(p + m)] <- freqMat[ , , j] } } } error <- error + sum(abs((helper - freqMat[ , , i]) ^ Norm)) } return(error ^ (1 / Norm)) } # equality constraint function to pass to solnp .eqn3 <- function(params, ...) { hdata <- list(...) # number of categorical sequence s <- hdata$s # order n <- hdata$n toReturn <- numeric() for(i in 1:s) { toReturn[i] <- sum(params[((i - 1) * n * s + 1):(i * n * s)]) } return(toReturn) } #' Function to fit Higher Order Multivariate Markov chain #' #' @description Given a matrix of categorical sequences it fits #' Higher Order Multivariate Markov chain. #' #' @param seqMat a matrix or a data frame where each column #' is a categorical sequence #' @param order Multivariate Markov chain order. Default is 2. #' @param Norm Norm to be used. Default is 2. #' #' @return an hommc object #' #' @examples #' data <- matrix(c('2', '1', '3', '3', '4', '3', '2', '1', '3', '3', '2', '1', #' c('2', '4', '4', '4', '4', '2', '3', '3', '1', '4', '3', '3')), #' ncol = 2, byrow = FALSE) #' #' fitHighOrderMultivarMC(data, order = 2, Norm = 2) #' #' @references W.-K. Ching et al. / Linear Algebra and its Applications #' #' @author Giorgio Spedicato, Deepak Yadav #' #' @export fitHighOrderMultivarMC <- function(seqMat, order = 2, Norm = 2) { message("This function is experimental") if(is.data.frame(seqMat) == TRUE) { seqMat <- as.matrix(seqMat) } seqMat <- t(seqMat) # array of transition matrices allTmat <- .allTransMat(seqMat, order = order) # array of freq probability freqMat <- .allFreqProbMat(seqMat) n <- order # order uelement <- sort(unique(as.character(seqMat))) # unique element m <- length(uelement) # dim of trans-matrix s <- nrow(seqMat) # number of categorical sequence lmbda <- rep(1 / (n * s), n * s * s) fit <- Rsolnp::solnp(pars = lmbda, fun = .fn3, eqfun = .eqn3, eqB = rep(1, s), LB = rep(0, n * s * s), control = list(trace = 0), allTmat = allTmat, freqMat = freqMat, n = n, m = m, s = s, Norm = Norm) return(new("hommc", order = order, Lambda = fit$pars, P = allTmat, states = uelement, byrow = FALSE)) } #' Simulate a higher order multivariate markovchain #' #' @description #' This function provides a prediction of states for a higher order #' multivariate markovchain object #' #' @usage predictHommc(hommc,t,init) #' #' @param hommc a hommc-class object #' @param t no of iterations to predict #' @param init matrix of previous states size of which depends on hommc #' #' @details #' The user is required to provide a matrix of giving n previous coressponding #' every categorical sequence. Dimensions of the init are s X n, where s is #' number of categorical sequences and n is order of the homc. #' #' @return #' The function returns a matrix of size s X t displaying t predicted states #' in each row coressponding to every categorical sequence. #' #' @author Vandit Jain #' #' #' @export predictHommc <- function(hommc, t, init) { ## order of markovchain n <- hommc@order ## number of categorical sequences s <- sqrt((dim(hommc@P))[3]/n) ## list of states states <- hommc@states ## size of set of all possible states m <- length(states) ## if initial states not provided take statndard example if(missing(init)) { init <- matrix(rep(states[1],s*n),nrow = s,byrow = TRUE) } if(!all(dim(init) == c(s,n))){ stop("Please provide sufficient number of previous states") } if(class(hommc)!= "hommc") { stop("Please provide a valid hommc-class object") } if(t <=0) stop("T should be a positive integer") for(i in 1:s) { for(j in 1:n) { if(!(init[i,j] %in% states)) stop("invalid states in provided state matrix init") } } ## initialize result matrix result <- matrix(NA,nrow = s,ncol = t) ## runs loop according to hommc class structure for(i in 1:t) { for(j in 1:s) { ## initialises probability according rowProbs <- rep(0,m) ## probability for current sequence depends all sequence for(k in 1:s) { ## gets index of coressponding in the 3-D array P # index is the index of transition matrix for transition from i sequence to j sequence # order of transition matrices in P is P1{1,1},P2{1,1}..Pn{1,1},P1{1,2}....Pn{s,s} index <- n * s * (j-1) + n * (k-1) ## iterates for all order 1 to n for(h in 1:n) { prev <- init[j,n-h+1] label <- which(prev == states) rowProbs <- rowProbs + hommc@Lambda[h + index] * hommc@P[label, ,h + index] } } ## uses sample function from base package curr <- sample(size = 1, x = states, prob = rowProbs) ## changes init for next t iteration for(temp in 2:n) { if(temp <= n) init[j,temp-1] = init[j,temp] } init[j,n] = curr; result[j,i] = curr; } } ## returns result return(result) } markovchain/R/fitHigherOrder.R0000644000176200001440000000527713762012754016027 0ustar liggesusers#' @title Higher order Markov Chains class #' @name HigherOrderMarkovChain-class #' @description The S4 class that describes \code{HigherOrderMarkovChain} objects. #' #' @export setClass("HigherOrderMarkovChain", #class name representation( states = "character", order = "numeric", transitions = "list", name = "character" ) # , prototype(states = c("a","b"), byrow = TRUE, # prototypizing # transitionMatrix=matrix(data = c(0,1,1,0), # nrow=2, byrow=TRUE, dimnames=list(c("a","b"), c("a","b"))), # name="Unnamed Markov chain") ) # objective function to pass to solnp .fn1=function(params) { QX <- get("QX") X <- get("X") error <- 0 for (i in 1:length(QX)) { error <- error+(params[i] * QX[[i]]-X) } return(sum(error^2)) } # equality constraint function to pass to solnp .eqn1=function(params){ return(sum(params)) } #' @name fitHigherOrder #' @aliases seq2freqProb seq2matHigh #' @title Functions to fit a higher order Markov chain #' #' @description Given a sequence of states arising from a stationary state, it #' fits the underlying Markov chain distribution with higher order. #' @usage #' fitHigherOrder(sequence, order = 2) #' seq2freqProb(sequence) #' seq2matHigh(sequence, order) #' #' @param sequence A character list. #' @param order Markov chain order #' @return A list containing lambda, Q, and X. #' #' @references #' Ching, W. K., Huang, X., Ng, M. K., & Siu, T. K. (2013). Higher-order markov #' chains. In Markov Chains (pp. 141-176). Springer US. #' #' Ching, W. K., Ng, M. K., & Fung, E. S. (2008). Higher-order multivariate #' Markov chains and their applications. Linear Algebra and its Applications, #' 428(2), 492-507. #' #' @author Giorgio Spedicato, Tae Seung Kang #' @note This function is written in Rcpp. #' #' @examples #' sequence<-c("a", "a", "b", "b", "a", "c", "b", "a", "b", "c", "a", "b", #' "c", "a", "b", "c", "a", "b", "a", "b") #' fitHigherOrder(sequence) #' #' @export fitHigherOrder<-function(sequence, order = 2) { # prbability of each states of sequence X <- seq2freqProb(sequence) # store h step transition matrix Q <- list() QX <- list() for(o in 1:order) { Q[[o]] <- seq2matHigh(sequence, o) QX[[o]] <- Q[[o]]%*%X } environment(.fn1) <- environment() params <- rep(1/order, order) model <- Rsolnp::solnp(params, fun=.fn1, eqfun=.eqn1, eqB=1, LB=rep(0, order), control=list(trace=0)) lambda <- model$pars out <- list(lambda=lambda, Q=Q, X=X) return(out) } markovchain/R/ctmcClassesAndMethods.R0000644000176200001440000003314713762012754017332 0ustar liggesusers#' @title Continuous time Markov Chains class #' @name ctmc-class #' @aliases dim,ctmc-method initialize,ctmc_method states,ctmc-method #' steadyStates,ctmc-method plot,ctmc,missing-method #' @description The S4 class that describes \code{ctmc} (continuous #' time Markov chain) objects. #' #' @param states Name of the states. Must be the same of #' \code{colnames} and \code{rownames} of the generator matrix #' @param byrow TRUE or FALSE. Indicates whether the given matrix is #' stochastic by rows or by columns #' @param generator Square generator matrix #' @param name Optional character name of the Markov chain #' #' @section Methods: #' #' \describe{ #' \item{dim}{\code{signature(x = "ctmc")}: method to get the size} #' \item{initialize}{\code{signature(.Object = "ctmc")}: initialize #' method } #' \item{states}{\code{signature(object = "ctmc")}: states method. } #' \item{steadyStates}{\code{signature(object = "ctmc")}: method to get the #' steady state vector. } #' \item{plot}{\code{signature(x = "ctmc", y = "missing")}: plot method #' for \code{ctmc} objects } #' } #' #' @references #' Introduction to Stochastic Processes with Applications in the Biosciences #' (2013), David F. Anderson, University of Wisconsin at Madison. Sai Bhargav #' Yalamanchi, Giorgio Spedicato #' #' @note #' \enumerate{ #' \item \code{ctmc} classes are written using S4 classes #' \item Validation method is used to assess whether either columns or rows totals to zero. #' Rounding is used up to 5th decimal. If state names are not properly defined #' for a generator \code{matrix}, coercing to \code{ctmc} object leads to overriding #' states name with artificial "s1", "s2", ... sequence #' } #' @seealso \code{\link{generatorToTransitionMatrix}},\code{\link{rctmc}} #' #' @examples #' energyStates <- c("sigma", "sigma_star") #' byRow <- TRUE #' gen <- matrix(data = c(-3, 3, #' 1, -1), nrow = 2, #' byrow = byRow, dimnames = list(energyStates, energyStates)) #' molecularCTMC <- new("ctmc", states = energyStates, #' byrow = byRow, generator = gen, #' name = "Molecular Transition Model") #' steadyStates(molecularCTMC) #' \dontrun{plot(molecularCTMC)} #' #' @keywords classes #' #' @export setClass("ctmc", representation(states = "character", byrow = "logical", generator = "matrix", name = "character"), prototype(states = c("a", "b"), byrow = TRUE, generator = matrix(data = c(-1, 1, 1, -1), byrow = TRUE, nrow = 2, dimnames = list(c("a", "b"), c("a", "b"))), name = "Unnamed CTMC") ) setMethod("initialize", signature(.Object = "ctmc"), function (.Object, states, byrow, generator,name,...) { # put the standard markovchain if(missing(generator)) generator=matrix(data=c(-1, 1, 1, -1), #create a naive matrix nrow=2, byrow=TRUE, dimnames=list(c("a", "b"), c("a", "b")) ) # check names of transition matrix if(all(is.null(rownames(generator)), is.null(colnames(generator)))==TRUE) { #if all names are missing it initializes them to "1", "2",... if(missing(states)) { nr=nrow(generator) stateNames<-as.character(seq(1:nr)) } else {stateNames=states} rownames(generator)=stateNames colnames(generator)=stateNames } else if(is.null(rownames(generator))) { #fix when rownames null rownames(generator)=colnames(generator) } else if(is.null(colnames(generator))) { #fix when colnames null colnames(generator)=rownames(generator) } else if(!setequal(rownames(generator),colnames(generator))) colnames(generator)=rownames(generator) #fix when different if(missing(states)) states=rownames(generator) #assign if(missing(byrow)) byrow=TRUE #set byrow as true by default if(missing(name)) name="Unnamed Markov chain" #generic name to the object callNextMethod(.Object, states = states, byrow = byrow, generator=generator,name=name,...) } ) #returns states of the ctmc setMethod("states", "ctmc", function(object) { out <- object@states return(out) } ) #returns states of the ctmc setMethod("dim", "ctmc", function(x) { out <- nrow(x@generator) return(out) } ) setValidity("ctmc", function(object) { check<-NULL # performs a set of check whose results are saved in check if (.isGenRcpp(object@generator)==FALSE) check <- "Error! Not a generator matrix" if (object@byrow==TRUE) { if(any(round(rowSums(object@generator),5)!=0)) check <- "Error! Row sums not equal to zero" } else { if(any(round(colSums(object@generator),5)!=0)) check <- "Error! Col sums not equal to zero" } if (nrow(object@generator)!=ncol(object@generator)) check <- "Error! Not squared matrix" #check if squalre matrix if (!setequal(colnames(object@generator),object@states)) check <- "Error! Colnames <> states" #checks if if (!setequal(rownames(object@generator),object@states)) check <- "Error! Rownames <> states" if ( is.null(check) ) return(TRUE) else return(check) } ) .ctmcEigen<-function(matr, transpose=TRUE) { # Function to extract eigenvalues, core of get steady states # # Args: # matr: the matrix to extract # transpose: boolean indicating whether the matrx shall be transpose # # Results: # a matrix / vector if (transpose) tMatr <- t(matr) else tMatr <- matr #trasposing eigenResults <- eigen(x=tMatr,symmetric=FALSE) #perform the eigenvalue extraction onesIndex <- which(round(eigenResults$values,3)==1) #takes the one eigenvalue #do the following: 1:get eigenvectors whose eigenvalues==1 #2: normalize if (length(onesIndex)==0) { warning("No eigenvalue = 1 found - the embedded Markov Chain must be irreducible, recurrent") return(NULL) } if(length(onesIndex) > 1){ warning("Eigenvalue = 1 multiplicity > 1! - the embedded Markov Chain must be irreducible, recurrent") return(NULL) } if (transpose==TRUE) { eigenTake <- as.matrix(t(eigenResults$vectors[,onesIndex])) if(rowSums(Im(eigenTake)) != 0){ warning("Eigenvector corresponding to largest eigenvalue has a non-zero imaginary part - the embedded Markov Chain must be irreducible, recurrent") return(NULL) } out <- eigenTake } else { eigenTake <- as.matrix(eigenResults$vectors[,onesIndex]) if(colSums(Im(eigenTake)) != 0){ warning("Eigenvector corresponding to largest eigenvalue has a non-zero imaginary part - the embedded Markov Chain must be irreducible, recurrent") return(NULL) } out <- eigenTake } return(out) } setMethod("steadyStates", "ctmc", function(object) { transposeYN <- FALSE if(object@byrow==TRUE) transposeYN <- TRUE transMatr <- generatorToTransitionMatrix(object@generator, byrow = object@byrow) out<-.ctmcEigen(matr=transMatr, transpose=transposeYN) if(is.null(out)) { warning("Warning! No steady state") return(NULL) } if(transposeYN==TRUE) { colnames(out) <- object@states } else { rownames(out) <- object@states } out <- - out / diag(object@generator) if(transposeYN==TRUE){ out <- out / rowSums(out) } else{ out <- out / colSums(out) } return(out) } ) # internal function for plotting ctmc object using igraph .getNetctmc <- function(object, round = FALSE) { # function to get the graph adjacency object to plot and export to igraph # # Args: # object: a ctmc object # round: boolean to round # # Returns: # # a graph adjacency if (object@byrow == FALSE) { object <- t(object) } #gets the generator matrix matr <- object@generator * 100 if(round == TRUE) { matr <- round(matr, 2) } net <- graph.adjacency(adjmatrix = matr, weighted = TRUE, mode = "directed") return(net) } setMethod("plot",signature(x="ctmc",y="missing"), function(x,y,package = "igraph",...){ switch(package, diagram = { if (requireNamespace("diagram", quietly = TRUE)) { .plotdiagram(object = x, ...) } else { netMc <- .getNetctmc(object = x, round = TRUE) edgeLabel <- round(E(netMc)$weight / 100, 2) plot.igraph(x = netMc, edge.label = edgeLabel, ...) } }, DiagrammeR = { if (requireNamespace("DiagrammeR", quietly = TRUE)) { .plotDiagrammeR(object = x, ...) } else { netMc <- .getNetctmc(object = x, round = TRUE) edgeLabel <- round(E(netMc)$weight / 100, 2) plot.igraph(x = netMc, edge.label = edgeLabel, ...) } }, { netMc <- .getNetctmc(object = x,round = TRUE) edgeLabel <- round(E(netMc)$weight / 100, 2) plot.igraph(x = netMc, edge.label = edgeLabel, ...) }) } ) #' An S4 class for representing Imprecise Continuous Time Markovchains #' #' @slot states a vector of states present in the ICTMC model #' @slot Q matrix representing the generator demonstrated in the form of variables #' @slot range a matrix that stores values of range of variables #' @slot name name given to ICTMC #' ictmc <- setClass("ictmc", slots = list(states = "character", Q = "matrix", range = "matrix", name = "character") ) setMethod("initialize", signature(.Object = "ictmc"), function (.Object, states, Q, range, name, ...) { if(missing(Q)) Q=matrix(data=c(-1, 1, 1, -1), #create a naive matrix nrow=2, byrow=TRUE, dimnames=list(c("n", "y"), c("n", "y")) ) if(missing(range)) range = matrix(c(1/52, 3/52, 1/2, 2), nrow = 2, byrow = 2) #if all names are missing it initializes them to "1", "2",... if(all(is.null(rownames(Q)), is.null(colnames(Q)))==TRUE) { if(missing(states)) { nr=nrow(Q) stateNames<-as.character(seq(1:nr)) } else {stateNames=states} rownames(Q)=stateNames colnames(Q)=stateNames } else if(is.null(rownames(Q))) { #fix when rownames null rownames(Q)=colnames(Q) } else if(is.null(colnames(Q))) { #fix when colnames null colnames(Q)=rownames(Q) } else if(!setequal(rownames(Q),colnames(Q))) colnames(Q)=rownames(Q) #fix when different if(missing(states)) states=rownames(Q) #assign if(missing(name)) name="Unnamed imprecise CTMC" #generic name to the object callNextMethod(.Object, states = states, Q = Q, range=range,name=name,...) } ) setValidity("ictmc", function(object) { check<-NULL # performs a set of check whose results are saved in check if (.isGenRcpp(object@Q)==FALSE) check <- "Error! Not a generator matrix" if(any(round(rowSums(object@Q),5)!=0)) check <- "Error! Row sums not equal to zero" if ( nrow(object@Q) != ncol(object@Q )) check <- "Error! Not squared matrix" #check if square matrix if ( !setequal(colnames(object@Q),object@states )) check <- "Error! Colnames <> states" if ( !setequal(rownames(object@Q),object@states )) check <- "Error! Rownames <> states" if(nrow(object@range) != nrow(object@Q) && ncol(object@range) != 2) check <- "Error! dimension of range matrix not correct." for(i in 1:nrow(object@Q)){ if( object@range[i,1] > object@range[i,2] ){ check <- "Error, improper values set in range matrix." } } if(min(object@range) < 0){ check <- "Error, values in the range matrix should be greater than zero." } if ( is.null(check) ) return(TRUE) else return(check) } ) markovchain/R/random.R0000644000176200001440000000235113762012754014370 0ustar liggesusers# Methods to generate random markov chains normalizeMatrix <- function(matrix, byrow = TRUE) { margin <- ifelse (byrow, 1, 2) n <- nrow(matrix) result <- sapply(1:n, function(i) { row <- matrix[i, ] rowSum <- sum(row) if (rowSum == 0) { values <- c(rep(0, i - 1), 1, rep(0, n - i)) values } else { row / rowSum } }) # If we want the result by rows, we have to transpose the matrix, # since the apply method with margin = 1 (over rows) returns the result # by columns if (byrow) t(result) else result } # Returns a random stochastic matrix randomStochasticMatrix <- function(n, zeroProb, byrow = TRUE) { numRandom <- n * n randomNums <- stats::runif(numRandom) remainProb <- (1 - zeroProb) / numRandom probs <- c(zeroProb, rep(remainProb, numRandom)) entries <- sample(c(0, randomNums), numRandom, prob = probs, replace = TRUE) result <- matrix(entries, n, n, byrow) result <- normalizeMatrix(result, byrow) result } randomMarkovChain <- function(n, zeroProb = 0.95, byrow = TRUE) { matrix <- randomStochasticMatrix(n, zeroProb, byrow) new("markovchain", transitionMatrix = matrix, byrow = byrow) }markovchain/R/statisticalTests.R0000644000176200001440000004772613762012754016476 0ustar liggesusers#helper functions #helper function for checkMP .findNijPjk<-function(Nijk = Nijk, Nij = Nij, trans, row = 1){ i <- Nijk[row,1] j <- Nijk[row,2] k <- Nijk[row,3] fromCh <- as.character(j) toCh <- as.character(k) Pjk <- trans[fromCh,toCh] m1 <- which(Nij[, 1] == i) m2 <- which(Nij[, 2] == j) m <- c(m1, m2) return(Nij[m[anyDuplicated(m)], 3] * Pjk) } #' @name verifyMarkovProperty #' #' @rdname statisticalTests #' @family statisticalTests #' #' @title Various functions to perform statistical inference of DTMC #' @description These functions verify the Markov property, assess #' the order and stationarity of the Markov chain. #' #' @param sequence An empirical sequence. #' @param verbose Should test results be printed out? #' @param nblocks Number of blocks. #' #' @return Verification result #' #' @references Anderson and Goodman. #' #' @author Tae Seung Kang, Giorgio Alfredo Spedicato #' #' @seealso \code{markovchain} #' #' @examples #' sequence <- c("a", "b", "a", "a", "a", "a", "b", "a", "b", #' "a", "b", "a", "a", "b", "b", "b", "a") #' mcFit <- markovchainFit(data = sequence, byrow = FALSE) #' verifyMarkovProperty(sequence) #' assessOrder(sequence) #' assessStationarity(sequence, 1) #' #' #' @export # check if the sequence holds the Markov property verifyMarkovProperty <- function(sequence, verbose = TRUE) { #warning("The accuracy of the statistical inference functions has been questioned. It will be thoroughly investigated in future versions of the package.") #fitting the markovchain transMatrix <- markovchainFit(data = sequence)$estimate@transitionMatrix #make the (n-2)x3 matrix for observations subSample<-sequence[1:(length(sequence) - (length(sequence)%%3))] seqSet1<-matrix(c(subSample[1:(length(subSample) - 2)], subSample[2:(length(subSample) - 1)], subSample[3:(length(subSample))] ),ncol = 3) #fill the matrix in reverse order so position 11 is the first obersvation,12 second and 13 third #compute row frequencies temp<-as.data.frame(seqSet1) Nijk<-aggregate(temp, by = temp, length)[1:(ncol(temp) + 1)] seqSet2 <- seqSet1[, -3] #make matrix of couples temp2 <- as.data.frame(seqSet2) Nij <- aggregate(temp2, by = temp2, length)[1:(ncol(temp2) + 1)] #rowfrequencies included test<-c(length = dim(Nijk)[1]) #compute the test statistic invisible(lapply(seq_len(dim(Nijk)[1]),function(i) { foundNijPjk <- .findNijPjk(Nijk = Nijk, Nij = Nij, trans = transMatrix, row = i) test[i] <<- ((Nijk[i,4]-foundNijPjk)^2)/foundNijPjk }) ) statistic <- sum(test) #return value of the test statistic and test at confience level 95% and 99% #dof #Steps : No. of df = No. of triplets - No. of doubles + No. of observations - 1 #Creating vector of doubles/pairs doubles = numeric(length(sequence)-1) for(i in 1:(length(doubles))) {doubles[i] = paste(sequence[i], sequence[i+1], sep="", collapse = NULL)} #Creating vector of triplets triples = numeric(length(sequence)-2) for(i in 1:(length(triples))) {triples[i] = paste(sequence[i], sequence[i+1], sequence[i+2], sep="", collapse = NULL)} #Hence no. of df is--- dof = length(unique(triples)) - length(unique(doubles)) + length(unique(sequence)) - 1 pvalue <- 1-pchisq(q = statistic,df = dof) out <- list(statistic = statistic,dof = dof,p.value = pvalue) # previous version # n <- length(sequence) # u <- unique(sequence) # stateNames <- u # nelements <- length(stateNames) # mat <- matlab::zeros(nrow = nelements, ncol = 3) # # # SSO: state sequence occurrences # # TSO: two state occurences # dimnames(mat) <- list(stateNames, c("SSO", "TSO", "TSO-SSO")) # # # numeric vector initialized with zero for all states # SSO <- numeric() # for(i in 1:nelements) { # sname <- stateNames[i] # SSO[sname] <- 0 # } # # # numeric vector initialized with zero for all states # TSO <- SSO # # # store the output to be returned # out <- list() # # for(present in stateNames) { # for(future in stateNames) { # # for(i in 1:nelements) { # TSO[i] <- SSO[i] <- 0 # } # # # populate TSO and SSO vector # for(i in 1:(n-1)) { # # let the ith state as past state # past <- sequence[i] # # # if next state in the sequence is present state # if(sequence[i+1] == present) { # TSO[past] <- TSO[past] + 1 # # # if next to next state in the sequence is future state # if((i < n - 1) && (sequence[i+2] == future)) { # SSO[past] <- SSO[past] + 1 # } # } # } # # # populate the matrix # # first column corresponds to SSO, second to TSO and # # third to their difference # # for(i in 1:(length(SSO))) { # mat[i, 1] <- SSO[i] # mat[i, 2] <- TSO[i] # mat[i, 3] <- TSO[i] - SSO[i] # } # # } # } # # # chi-squared test # # # between SSO and TSO-SSO # table <- as.data.frame(mat[, c(1, 3)]) # # # an object of class htest # res <- chisq.test(table) # # # extract all information from htest object # # and stored the result in the form of list # res <- c(res) # # # SSO and TSO # table <- as.data.frame(mat[ , c(1, 2)]) # # # stored the table in the list # res[["table"]] <- table # # # store the result corresponding to present state and future state # out[[paste0(present, future)]] <- res if (verbose == TRUE) { cat("Testing markovianity property on given data sequence\n") cat("Chi - square statistic is:", statistic, "\n") cat("Degrees of freedom are:", dof, "\n") cat("And corresponding p-value is:", pvalue, "\n") } invisible(out) } #' @rdname statisticalTests #' @export # check if sequence is of first order or of second order assessOrder <- function(sequence, verbose = TRUE) { warning("The accuracy of the statistical inference functions has been questioned. It will be thoroughly investigated in future versions of the package.") # length of sequence n <- length(sequence) # unique states states <- unique(sequence) # number of unique states nelements <- length(states) TStat <- 0 for(present in states) { # going to be a transition matrix mat <- matlab::zeros(nelements) dimnames(mat) <- list(states, states) # populate transition matrix for(i in 1:(n - 2)) { if(present == sequence[i + 1]) { past <- sequence[i] future <- sequence[i + 2] mat[past, future] <- mat[past, future] + 1 } } # chi-squared test res <- chisq.test(mat) TStat <- TStat + res$statistic } k <- nelements df <- k * (k - 1)^2 pvalue <- 1-pchisq(q = TStat, df) out <- list(statistic = TStat[[1]], p.value = pvalue[[1]]) # returning the output if (verbose == TRUE) { cat("The assessOrder test statistic is: ", TStat, "\n") cat("The Chi-Square d.f. are: ", df, "\n") cat("The p-value is: ", pvalue, "\n") } invisible(out) } #' @rdname statisticalTests #' @export # check if sequence is stationary assessStationarity <- function(sequence, nblocks, verbose = TRUE) { warning("The accuracy of the statistical inference functions has been questioned. It will be thoroughly investigated in future versions of the package.") # length of sequence n <- length(sequence) # size of each block blocksize <- n / nblocks # vector of unique states states <- unique(sequence) # number of states nstates <- length(states) # sum of the statistics TStat <- 0 # chi-squared test for each state for(i in states) { # init matrix mat <- matlab :: zeros(nblocks, nstates) dimnames(mat) <- list(1:nblocks, states) # compute the transition matrix from sequence for(j in 1:(n - 1)) { if(sequence[j] == i) { # row index b <- ceiling(j / blocksize) # next state future <- sequence[j+1] # update transition matrix mat[b, future] <- mat[b, future] + 1 } } # vector to store row sum of matrix rowsums <- rowSums(mat) # store the indices with zero row sum indices <- which(rowsums == 0) # update rows with zero sum for(k in indices) mat[k, ] <- 1/nstates # update row sum after checking zero sum row rowsums <- rowSums(mat) # row-wise normalize. mat <- mat/rowsums # Some columns may still be all zeros. This causes NaN for chi-squared test. # chi-squared test res <- chisq.test(mat) TStat <- TStat + res$statistic } k <- nstates # degree of freedom df <- k * (nblocks - 1) * (k - 1) pvalue <- 1 - pchisq(TStat, df) # returning the output if (verbose==TRUE) { cat("The assessStationarity test statistic is: ", TStat, "\n") cat("The Chi-Square d.f. are: ", df, "\n") cat("The p-value is: ", pvalue, "\n") } out <- list(statistic = TStat[[1]], p.value = pvalue[[1]]) invisible(out) } # sequence to transition frequencey matrix .seq2mat <- function(sequence) { # basic requirement to create transition matrix n <- length(sequence) states <- unique(sequence) nstates <- length(states) # create transition matrix mat <- matlab::zeros(nstates) dimnames(mat) <- list(states, states) # populate transition matrix for(i in 1:(n - 1)) { from <- sequence[i] to <- sequence[i+1] mat[from, to] <- mat[from, to] + 1 } return (mat) } #' @title test whether an empirical transition matrix is compatible to a theoretical one #' #' @description This function tests whether an empirical transition matrix is statistically compatible #' with a theoretical one. It is a chi-square based test. In case a cell in the empirical transition matrix is >0 #' that is 0 in the theoretical transition matrix the null hypothesis is rejected. #' #' @rdname statisticalTests #' @family statisticalTests #' #' @param data matrix, character or list to be converted in a raw transition matrix #' @param object a markovchain object #' #' @return a list with following slots: statistic (the chi - square statistic), dof (degrees of freedom), and corresponding p-value. In case a cell in the empirical transition matrix is >0 #' that is 0 in the theoretical transition matrix the null hypothesis is rejected. In that case a p-value of 0 and statistic and dof of NA are returned. #' @export #' #' @examples #' #' #Example taken from Kullback Kupperman Tests for Contingency Tables and Markov Chains #' #' sequence<-c(0,1,2,2,1,0,0,0,0,0,0,1,2,2,2,1,0,0,1,0,0,0,0,0,0,1,1, #' 2,0,0,2,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,0,0,0,0,2,1,0, #' 0,2,1,0,0,0,0,0,0,1,1,1,2,2,0,0,2,1,1,1,1,2,1,1,1,1,1,1,1,1,1,0,2, #' 0,1,1,0,0,0,1,2,2,0,0,0,0,0,0,2,2,2,1,1,1,1,0,1,1,1,1,0,0,2,1,1, #' 0,0,0,0,0,2,2,1,1,1,1,1,2,1,2,0,0,0,1,2,2,2,0,0,0,1,1) #' #' mc=matrix(c(5/8,1/4,1/8,1/4,1/2,1/4,1/4,3/8,3/8),byrow=TRUE, nrow=3) #' rownames(mc)<-colnames(mc)<-0:2; theoreticalMc<-as(mc, "markovchain") #' #' verifyEmpiricalToTheoretical(data=sequence,object=theoreticalMc) #' verifyEmpiricalToTheoretical <- function(data, object, verbose = TRUE) { #warning("The accuracy of the statistical inference functions has been questioned. It will be thoroughly investigated in future versions of the package.") if (!class(object) == 'markovchain') stop("Error! Object should belong to the markovchain class") if (missing(data) | missing(object)) stop("Error! Required inputs missing") if (!(class(data) == "numeric" || class(data) == "character" || is.matrix(data))) stop("Error! Data should be either a raw transition matrix or either a character or a numeric element") if ((class(data) == "numeric" || class(data) == "character")) data<-createSequenceMatrix(stringchar = data, possibleStates = states(object)) if (length(setdiff(names(data),names(object))) > 0) stop("Error! Empirical and theoretical tm have different support") # (possibly rearrange columns and rownames) data <- data[match(rownames(data),names(object)),] #matching rows data <- data[,match(colnames(data),names(object))] #matching cols if (sum((data == 0) == (object@transitionMatrix == 0)) == (nrow(data) * ncol(data))) { f_i_dot <-colSums(data) statistic <- 0 for (i in 1:dim(object)) { for (j in 1:dim(object)) { if (data[i, j]>0&object[i, j]>0) statistic <- statistic + data[i, j]*log(data[i, j]/(f_i_dot[i]*object[i, j])) } } statistic <- statistic * 2 null_elements <- sum(object@transitionMatrix == 0) dof <- dim(object) * (dim(object) - 1) - null_elements #r(r-1) - c, c null element ob objects p.value <- 1 - pchisq(q = statistic,df = dof) if (verbose == TRUE) { cat("Testing whether the\n");print(data);cat("transition matrix is compatible with\n");print(object@transitionMatrix);print("theoretical transition matrix") cat("ChiSq statistic is",statistic,"d.o.f are",dof,"corresponding p-value is",p.value,"\n") } out <- list(statistic = statistic, dof = dof,pvalue = p.value) } else { statistic <- NA dof <- NA p.value <- 0 if (verbose == TRUE) { cat("Testing whether the\n");print(data);cat("transition matrix is compatible with\n");print(object@transitionMatrix);print("theoretical transition matrix") cat("ChiSq statistic is",statistic,"d.o.f are",dof,"corresponding p-value is",p.value,"\n") cat("At least one transition is >0 in the data that is 0 in the object. Therefore the null hypothesis is rejected. \n") } out <- list(statistic = statistic, dof = dof,pvalue = p.value) } #return output return(out) } .checkMatrix4Homogeneity<-function(matr) { out<-TRUE if (length(colnames(matr)) == 0) {message("Error! No colnames in input matrix"); out = FALSE} if (length(rownames(matr)) == 0) {message("Error! No rownames in input matrix"); out = FALSE} if (!all.equal(rownames(matr),colnames(matr))) {message("Error! Colnames <> Rownames")} if (any(matr<0)) {message("Error! Negative elements"); out = FALSE} return(out) } .addNamedColumns <- function(matr, fullnames) { if ( length( setdiff(names(matr),fullnames) )>0) stop("Error! Names in matr not in fullnames") fullnames<-sort(fullnames) newMatr<-matrix(0,nrow = length(fullnames),ncol = length(fullnames),dimnames = list(fullnames,fullnames)) current_support = colnames(matr) current_dim = dim(matr) for (i in 1:current_dim[1]) { #cycle on row for (j in 1:current_dim[2]) { #cycle on cols item<-matr[i,j] #take the element which_row_trans<-current_support[i] #define current row and cols which_col_trans<-current_support[j] # lookup element in the pooled table row_to_write <-match(x=which_row_trans,table = fullnames) col_to_write <-match(x=which_col_trans,table = fullnames) # write element into the pooled table newMatr[row_to_write,col_to_write] <- newMatr[row_to_write,col_to_write] + item } } return(newMatr) } #' @title Verify Homogeneity across transition matrices #' #' @description Verifies that the s elements in the input list belongs to the same DTMC #' #' @rdname statisticalTests #' @family statisticalTests #' #' @param inputList A list of items that can coerced to transition matrices #' #' @return a list of transition matrices? #' @export #' #' @examples #' #' data(kullback) #' verifyHomogeneity(inputList=kullback,verbose=TRUE) #' verifyHomogeneity<-function(inputList, verbose = TRUE) { warning("The accuracy of the statistical inference functions has been questioned. It will be thoroughly investigated in future versions of the package.") if (class(inputList) != "list") stop("Error! inputList should be a string") if (length(inputList) < 2) stop("Error! inputList length lower than 2") #checks whether all inputs can be put as transition matrices for (i in 1:length(inputList)) { if (is.matrix(inputList[[i]]) == TRUE) { checks<-.checkMatrix4Homogeneity(inputList[[i]]) if (!checks) stop("Error! Element ", i, " to be checked") } else { inputList[[i]]<-createSequenceMatrix(stringchar = inputList[[i]]) #convert all elements into transition matrices } } # create the pooled raw transition matrix and the matrix of rowsums all.names<-character() for (i in 1:length(inputList)) { all.names<-c(all.names, rownames(inputList[[i]])) } all.names<-sort(unique(all.names)) ##initialize PooledRawTransitionMatrix <- matrix(0,nrow = length(all.names),ncol = length(all.names),dimnames = list(all.names, all.names)) RowSumsMatrix <- matrix(0, nrow = length(inputList),ncol=length(all.names),dimnames = list(1:length(inputList),all.names)) ##sum for each element in the list for (k in 1:length(inputList)) { current_support = rownames(inputList[[k]]) current_dim = dim(inputList[[k]]) for (i in 1:current_dim[1]) { #cycle on row for (j in 1:current_dim[2]) { #cycle on cols num_trans<-inputList[[k]][i, j] #take the element which_row_trans <- current_support[i] #define current row and cols which_col_trans <- current_support[j] # lookup element in the pooled table row_to_write <-match(x = which_row_trans,table = all.names) col_to_write <-match(x = which_col_trans,table = all.names) # write element into the pooled table PooledRawTransitionMatrix[row_to_write,col_to_write]=PooledRawTransitionMatrix[row_to_write,col_to_write]+num_trans } } } #create the matrix of rowsums fij. for (k in 1:length(inputList)) { my_row_sums <- rowSums(inputList[[k]]) current_support = names(my_row_sums) for (i in 1:length(current_support)) { my_element<-my_row_sums[i] col_to_write<-match(x=current_support[i],table = all.names) RowSumsMatrix[k, col_to_write]<-RowSumsMatrix[k, col_to_write] + my_element } } # compute the chi - square statistic statistic <- 0 # degreesOfFreedomLess <- 0 newInputList <- lapply(inputList, .addNamedColumns,fullnames = all.names) number_of_transitions <- sapply(newInputList,sum) total_transitions <- sum(number_of_transitions) for (s in 1:length(inputList)) { #cycle across inputs for (j in 1:length(all.names)) { #cycle across rows for (k in 1:length(all.names)) { #cycle across cols if (any(newInputList[[s]][j,k] == 0, number_of_transitions[s] == 0, PooledRawTransitionMatrix[j,k] == 0)) { statistic <- statistic + 0 # zero element in log expr does not contribute to statistics # degreesOfFreedomLess <- degreesOfFreedomLess +1 } else { statistic <- statistic + newInputList[[s]][j, k] * log((total_transitions*newInputList[[s]][j, k])/(number_of_transitions[s]*PooledRawTransitionMatrix[j,k])) } } } } statistic <- statistic * 2 #dof (s-1)*(r^2-1)-#zeros degrees_of_freedom <- (length(inputList) - 1)*(length(all.names)^2 - 1)#-degreesOfFreedomLess p.value <- 1 - pchisq(q = statistic,df = degrees_of_freedom) if (verbose == TRUE) { cat("Testing homogeneity of DTMC underlying input list \n") cat("ChiSq statistic is",statistic,"d.o.f are",degrees_of_freedom,"corresponding p-value is",p.value,"\n") } #return output out <- list(statistic = statistic, dof = degrees_of_freedom,pvalue = p.value) return(out) } markovchain/R/classesAndMethods.R0000644000176200001440000012325013762013334016511 0ustar liggesusers#' @title Markov Chain class #' @name markovchain-class #' @aliases markovchain-class *,markovchain,markovchain-method #' *,markovchain,matrix-method *,markovchain,numeric-method #' *,matrix,markovchain-method *,numeric,markovchain-method #' ==,markovchain,markovchain-method !=,markovchain,markovchain-method #' absorbingStates,markovchain-method transientStates,markovchain-method #' recurrentStates,markovchain-method transientClasses,markovchain-method #' recurrentClasses,markovchain-method communicatingClasses,markovchain-method #' steadyStates,markovchain-method meanNumVisits,markovchain-method #' is.regular,markovchain-method is.irreducible,markovchain-method #' is.accessible,markovchain,character,character-method #' is.accessible,markovchain,missing,missing-method #' absorptionProbabilities,markovchain-method #' meanFirstPassageTime,markovchain,character-method #' meanFirstPassageTime,markovchain,missing-method #' meanAbsorptionTime,markovchain-method #' meanRecurrenceTime,markovchain-method #' conditionalDistribution,markovchain-method hittingProbabilities,markovchain-method #' canonicForm,markovchain-method coerce,data.frame,markovchain-method #' coerce,markovchain,data.frame-method coerce,table,markovchain-method #' coerce,markovchain,igraph-method coerce,markovchain,matrix-method #' coerce,markovchain,sparseMatrix-method coerce,sparseMatrix,markovchain-method #' coerce,matrix,markovchain-method coerce,msm,markovchain-method #' coerce,msm.est,markovchain-method coerce,etm,markovchain-method #' dim,markovchain-method initialize,markovchain-method #' names<-,markovchain-method plot,markovchain,missing-method #' predict,markovchain-method print,markovchain-method #' show,markovchain-method summary,markovchain-method #' sort,markovchain-method t,markovchain-method #' [,markovchain,ANY,ANY,ANY-method ^,markovchain,numeric-method #' @description The S4 class that describes \code{markovchain} objects. #' #' @param states Name of the states. Must be the same of \code{colnames} and \code{rownames} of the transition matrix #' @param byrow TRUE or FALSE indicating whether the supplied matrix #' is either stochastic by rows or by columns #' @param transitionMatrix Square transition matrix #' @param name Optional character name of the Markov chain #' #' @section Creation of objects: #' #' Objects can be created by calls of the form \code{new("markovchain", states, byrow, transitionMatrix, ...)}. #' #' @section Methods: #' #' \describe{ #' \item{*}{\code{signature(e1 = "markovchain", e2 = "markovchain")}: multiply two \code{markovchain} objects} #' \item{*}{\code{signature(e1 = "markovchain", e2 = "matrix")}: markovchain by matrix multiplication} #' \item{*}{\code{signature(e1 = "markovchain", e2 = "numeric")}: markovchain by numeric vector multiplication } #' \item{*}{\code{signature(e1 = "matrix", e2 = "markovchain")}: matrix by markov chain} #' \item{*}{\code{signature(e1 = "numeric", e2 = "markovchain")}: numeric vector by \code{markovchain} multiplication } #' \item{[}{\code{signature(x = "markovchain", i = "ANY", j = "ANY", drop = "ANY")}: ... } #' \item{^}{\code{signature(e1 = "markovchain", e2 = "numeric")}: power of a \code{markovchain} object} #' \item{==}{\code{signature(e1 = "markovchain", e2 = "markovchain")}: equality of two \code{markovchain} object} #' \item{!=}{\code{signature(e1 = "markovchain", e2 = "markovchain")}: non-equality of two \code{markovchain} object} #' \item{absorbingStates}{\code{signature(object = "markovchain")}: method to get absorbing states } #' \item{canonicForm}{\code{signature(object = "markovchain")}: return a \code{markovchain} object into canonic form } #' \item{coerce}{\code{signature(from = "markovchain", to = "data.frame")}: coerce method from markovchain to \code{data.frame}} #' \item{conditionalDistribution}{\code{signature(object = "markovchain")}: returns the conditional probability of subsequent states given a state} #' \item{coerce}{\code{signature(from = "data.frame", to = "markovchain")}: coerce method from \code{data.frame} to \code{markovchain}} #' \item{coerce}{\code{signature(from = "table", to = "markovchain")}: coerce method from \code{table} to \code{markovchain} } #' \item{coerce}{\code{signature(from = "msm", to = "markovchain")}: coerce method from \code{msm} to \code{markovchain} } #' \item{coerce}{\code{signature(from = "msm.est", to = "markovchain")}: coerce method from \code{msm.est} (but only from a Probability Matrix) to \code{markovchain} } #' \item{coerce}{\code{signature(from = "etm", to = "markovchain")}: coerce method from \code{etm} to \code{markovchain} } #' \item{coerce}{\code{signature(from = "sparseMatrix", to = "markovchain")}: coerce method from \code{sparseMatrix} to \code{markovchain} } #' \item{coerce}{\code{signature(from = "markovchain", to = "igraph")}: coercing to \code{igraph} objects } #' \item{coerce}{\code{signature(from = "markovchain", to = "matrix")}: coercing to \code{matrix} objects } #' \item{coerce}{\code{signature(from = "markovchain", to = "sparseMatrix")}: coercing to \code{sparseMatrix} objects } #' \item{coerce}{\code{signature(from = "matrix", to = "markovchain")}: coercing to \code{markovchain} objects from \code{matrix} one } #' \item{dim}{\code{signature(x = "markovchain")}: method to get the size} #' \item{names}{\code{signature(x = "markovchain")}: method to get the names of states} #' \item{names<-}{\code{signature(x = "markovchain", value = "character")}: method to set the names of states} #' \item{initialize}{\code{signature(.Object = "markovchain")}: initialize method } #' \item{plot}{\code{signature(x = "markovchain", y = "missing")}: plot method for \code{markovchain} objects } #' \item{predict}{\code{signature(object = "markovchain")}: predict method } #' \item{print}{\code{signature(x = "markovchain")}: print method. } #' \item{show}{\code{signature(object = "markovchain")}: show method. } #' \item{sort}{\code{signature(x = "markovchain", decreasing=FALSE)}: sorting the transition matrix. } #' \item{states}{\code{signature(object = "markovchain")}: returns the names of states (as \code{names}. } #' \item{steadyStates}{\code{signature(object = "markovchain")}: method to get the steady vector. } #' \item{summary}{\code{signature(object = "markovchain")}: method to summarize structure of the markov chain } #' \item{transientStates}{\code{signature(object = "markovchain")}: method to get the transient states. } #' \item{t}{\code{signature(x = "markovchain")}: transpose matrix } #' \item{transitionProbability}{\code{signature(object = "markovchain")}: transition probability } #' } #' #' @references #' A First Course in Probability (8th Edition), Sheldon Ross, Prentice Hall 2010 #' #' @author Giorgio Spedicato #' @note #' \enumerate{ #' \item \code{markovchain} object are backed by S4 Classes. #' \item Validation method is used to assess whether either columns or rows totals to one. #' Rounding is used up to \code{.Machine$double.eps * 100}. If state names are not properly #' defined for a probability \code{matrix}, coercing to \code{markovhcain} object leads #' to overriding states name with artificial "s1", "s2", ... sequence. In addition, operator #' overloading has been applied for \eqn{+,*,^,==,!=} operators. #' } #' #' @seealso \code{\link{markovchainSequence}},\code{\link{markovchainFit}} #' #' @examples #' #show markovchain definition #' showClass("markovchain") #' #create a simple Markov chain #' transMatr<-matrix(c(0.4,0.6,.3,.7),nrow=2,byrow=TRUE) #' simpleMc<-new("markovchain", states=c("a","b"), #' transitionMatrix=transMatr, #' name="simpleMc") #' #power #' simpleMc^4 #' #some methods #' steadyStates(simpleMc) #' absorbingStates(simpleMc) #' simpleMc[2,1] #' t(simpleMc) #' is.irreducible(simpleMc) #' #conditional distributions #' conditionalDistribution(simpleMc, "b") #' #example for predict method #' sequence<-c("a", "b", "a", "a", "a", "a", "b", "a", "b", "a", "b", "a", "a", "b", "b", "b", "a") #' mcFit<-markovchainFit(data=sequence) #' predict(mcFit$estimate, newdata="b",n.ahead=3) #' #direct conversion #' myMc<-as(transMatr, "markovchain") #' #' #example of summary #' summary(simpleMc) #' \dontrun{plot(simpleMc)} #' #' @keywords classes #' #' @export setClass( # Class name "markovchain", # Define the slots slots = list(states = "character", byrow = "logical", transitionMatrix = "matrix", name = "character"), # Set the default values for the slots prototype = list( states = c("a", "b"), byrow = TRUE, transitionMatrix = matrix( data = c(0, 1, 1, 0), nrow = 2, byrow = TRUE, dimnames = list(c("a", "b"), c("a", "b"))), name = "Unnamed Markov chain") ) # Initializing method for markovchain objects setMethod( "initialize", signature(.Object = "markovchain"), function (.Object, states, byrow, transitionMatrix, name, ...) { # Put the standard markovchain if (missing(transitionMatrix)) { transitionMatrix <- matrix( data = c(0, 1, 1, 0), nrow = 2, byrow = TRUE, dimnames = list(c("a", "b"), c("a", "b"))) } rowNames <- rownames(transitionMatrix) colNames <- colnames(transitionMatrix) # Check names of transition matrix # if all names are missing it initializes them to "1", "2", .... if (all(is.null(rowNames), is.null(colNames)) == TRUE) { if (missing(states)) { numRows <- nrow(transitionMatrix) stateNames <- as.character(seq(1:numRows)) } else { stateNames <- states } rownames(transitionMatrix) <- stateNames colnames(transitionMatrix) <- stateNames # Fix when rownames null } else if (is.null(rowNames)) { rownames(transitionMatrix) <- colNames # Fix when colnames null } else if (is.null(colNames)) { colnames(transitionMatrix) <- rowNames # Fix when different } else if (! setequal(rowNames, colNames)) { colnames(transitionMatrix) <- rowNames } if (missing(states)) states <- rownames(transitionMatrix) if (missing(byrow)) byrow <- TRUE if (missing(name)) name <- "Unnamed Markov chain" callNextMethod( .Object, states = states, byrow = byrow, transitionMatrix = transitionMatrix, name = name, ... ) } ) #' @title Non homogeneus discrete time Markov Chains class #' @name markovchainList-class #' @aliases [[,markovchainList-method dim,markovchainList-method #' predict,markovchainList-method print,markovchainList-method #' show,markovchainList-method #' @description A class to handle non homogeneous discrete Markov chains #' #' @param markovchains Object of class \code{"list"}: a list of markovchains #' @param name Object of class \code{"character"}: optional name of the class #' #' @section Objects from the Class: #' #' A \code{markovchainlist} is a list of \code{markovchain} objects. They can #' be used to model non homogeneous discrete time Markov Chains, when #' transition probabilities (and possible states) change by time. #' @section Methods: #' \describe{ #' \item{[[}{\code{signature(x = "markovchainList")}: extract the #' i-th \code{markovchain} } #' \item{dim}{\code{signature(x = "markovchainList")}: number #' of \code{markovchain} underlying the matrix } #' \item{predict}{\code{signature(object = "markovchainList")}: predict #' from a \code{markovchainList} } #' \item{print}{\code{signature(x = "markovchainList")}: prints the list #' of markovchains } #' \item{show}{\code{signature(object = "markovchainList")}: same as \code{print} } #' } #' #' @references #' A First Course in Probability (8th Edition), Sheldon Ross, Prentice Hall 2010 #' #' @author Giorgio Spedicato #' #' @note #' The class consists in a list of \code{markovchain} objects. #' It is aimed at working with non homogeneous Markov chains. #' #' @seealso \code{\linkS4class{markovchain}} #' @examples #' showClass("markovchainList") #' #define a markovchainList #' statesNames=c("a","b") #' #' mcA<-new("markovchain",name="MCA", #' transitionMatrix=matrix(c(0.7,0.3,0.1,0.9), #' byrow=TRUE, nrow=2, #' dimnames=list(statesNames,statesNames)) #' ) #' #' mcB<-new("markovchain", states=c("a","b","c"), name="MCB", #' transitionMatrix=matrix(c(0.2,0.5,0.3,0,1,0,0.1,0.8,0.1), #' nrow=3, byrow=TRUE)) #' #' mcC<-new("markovchain", states=c("a","b","c","d"), name="MCC", #' transitionMatrix=matrix(c(0.25,0.75,0,0,0.4,0.6, #' 0,0,0,0,0.1,0.9,0,0,0.7,0.3), #' nrow=4, byrow=TRUE) #' ) #' mcList<-new("markovchainList",markovchains=list(mcA, mcB, mcC), #' name="Non - homogeneous Markov Chain") #' #' @keywords classes #' #' @export setClass( "markovchainList", slots = list( markovchains = "list", name = "character") ) # Verifies whether a markovchainList object is valid or not # A markovchainList is valid iff all the slots are markovchain objects # Returns true if the markovchainList is valid, the indexes of the # wrong slots otherwise setValidity( "markovchainList", function(object) { check <- FALSE markovchains <- object@markovchains classes <- sapply(markovchains, class) nonMarkovchain <- which(classes != "markovchain") errors <- sapply(nonMarkovchain, function(i) { paste(i, "-th element class is not 'markovchain'") }) if (length(errors) == 0) TRUE else errors } ) # generic method to print out states #' @name states #' #' @title Defined states of a transition matrix #' #' @description This method returns the states of a transition matrix. #' #' @param object A discrete \code{markovchain} object #' @return The character vector corresponding to states slot. #' #' @references A First Course in Probability (8th Edition), Sheldon Ross, Prentice Hall 2010 #' #' @author Giorgio Spedicato #' #' @seealso \code{\linkS4class{markovchain}} #' #' @examples #' statesNames <- c("a", "b", "c") #' markovB <- new("markovchain", states = statesNames, transitionMatrix = #' matrix(c(0.2, 0.5, 0.3, 0, 1, 0, 0.1, 0.8, 0.1), nrow = 3, #' byrow = TRUE, dimnames=list(statesNames,statesNames)), #' name = "A markovchain Object" #' ) #' states(markovB) #' names(markovB) #' #' @rdname states #' #' @export setGeneric("states", function(object) standardGeneric("states")) #' @rdname states #' @title states setMethod( "states", "markovchain", function(object) { object@states } ) #' @title Returns the states for a Markov chain object #' #' @param x object we want to return states for #' #' @rdname names setMethod( "names", "markovchain", function(x) { x@states } ) #' @title Method to retrieve name of markovchain object #' #' @name name #' #' @description This method returns the name of a markovchain object #' #' @param object A markovchain object #' @rdname getName #' @author Giorgio Spedicato, Deepak Yadav #' #' @examples #' statesNames <- c("a", "b", "c") #' markovB <- new("markovchain", states = statesNames, transitionMatrix = #' matrix(c(0.2, 0.5, 0.3, 0, 1, 0, 0.1, 0.8, 0.1), nrow = 3, #' byrow = TRUE, dimnames=list(statesNames,statesNames)), #' name = "A markovchain Object" #' ) #' name(markovB) #' #' @export setGeneric("name", function(object) standardGeneric("name")) #' @rdname getName setMethod( "name", "markovchain", function(object) { object@name }) #' @title Method to set name of markovchain object #' #' @name name<- #' #' @description This method modifies the existing name of markovchain object #' #' @param object A markovchain object #' @param value New name of markovchain object #' @rdname setName #' @author Giorgio Spedicato, Deepak Yadav #' #' @examples #' statesNames <- c("a", "b", "c") #' markovB <- new("markovchain", states = statesNames, transitionMatrix = #' matrix(c(0.2, 0.5, 0.3, 0, 1, 0, 0.1, 0.8, 0.1), nrow = 3, #' byrow = TRUE, dimnames=list(statesNames,statesNames)), #' name = "A markovchain Object" #' ) #' name(markovB) <- "dangerous mc" #' #' @export setGeneric("name<-", function(object, value) standardGeneric("name<-")) #' @rdname setName setMethod( "name<-", "markovchain", function(object, value) { object@name <- value object } ) setMethod( "names<-", "markovchain", function(x, value) { rownames(x@transitionMatrix) <- value colnames(x@transitionMatrix) <- value x@states <- value x } ) #' @exportMethod dim setGeneric("dim") # Generic methods to get the dim of a markovchain and markovchainList setMethod( "dim", "markovchain", function(x) { nrow(x@transitionMatrix) } ) setMethod( "dim", "markovchainList", function(x) { length(x@markovchains) } ) # method to set the validity of a markovchain object setValidity( "markovchain", function(object) { errors <- character() transitionMatrix <- object@transitionMatrix states <- object@states if (length(setdiff(states, unique(states))) > 0) { msg <- "Error! States must be unique!" errors <- c(errors, msg) } # Performs a set of checks. If any error arises, it ends up concatenated to errors # Check all values of transition matrix belongs to [0, 1] maybeProbabilities <- sapply(as.numeric(transitionMatrix), .isProbability) if (any(maybeProbabilities) == FALSE) { msg <- "Error! Some elements of transitionMatrix are not probabilities" errors <- c(errors, msg) } # Check whether matrix is square matrix or not if (nrow(transitionMatrix) != ncol(transitionMatrix)) { msg <- "Error! transitionMatrix is not a square matrix" errors <- c(errors, msg) } if (!.checkMatrix(transitionMatrix, object@byrow)) { msg <- paste( paste("Error!", ifelse(object@byrow, "Rows", "Cols")), "of transition matrix do not sum to one" ) errors <- c(errors, msg) } # Check whether column names or rows names equal to state names or not if (! setequal(colnames(transitionMatrix), states)) { msg <- "Error! Colnames of transitionMatrix do not match states" errors <- c(errors, msg) } if (! setequal(rownames(transitionMatrix), states)) { msg <- "Error! Rownames of transitionMatrix do not match states" errors <- c(errors, msg) } if (length(errors) > 0) errors else TRUE } ) # generic method to extract transition probability # from state t0 to state t1 #' @name transitionProbability #' @title Function to get the transition probabilities from initial #' to subsequent states. #' @description This is a convenience function to get transition probabilities. #' #' @param object A \code{markovchain} object. #' @param t0 Initial state. #' @param t1 Subsequent state. #' #' @references A First Course in Probability (8th Edition), #' Sheldon Ross, Prentice Hall 2010 #' #' @return Numeric Vector #' #' @author Giorgio Spedicato #' @seealso \code{\linkS4class{markovchain}} #' #' @examples #' statesNames <- c("a", "b", "c") #' markovB <- new("markovchain", states = statesNames, transitionMatrix = #' matrix(c(0.2, 0.5, 0.3, 0, 1, 0, 0.1, 0.8, 0.1), nrow = 3, #' byrow = TRUE, dimnames=list(statesNames,statesNames)), #' name = "A markovchain Object" #' ) #' transitionProbability(markovB,"b", "c") #' @rdname transitionProbability #' #' @exportMethod transitionProbability setGeneric("transitionProbability", function(object, t0, t1) standardGeneric("transitionProbability")) #' @rdname transitionProbability setMethod("transitionProbability", "markovchain", function(object, t0, t1) { fromState <- which(object@states == t0) toState <- which(object@states == t1) out <- ifelse(object@byrow == TRUE, object@transitionMatrix[fromState, toState] , object@transitionMatrix[toState, fromState]) return(out) } ) # print, plot and show methods .showInt <- function(object, verbose = TRUE) { # find the direction if (object@byrow == TRUE) { direction <- "(by rows)" } else { direction <- "(by cols)" } if (verbose == TRUE) { cat(object@name, "\n A ", dim(object), "- dimensional discrete Markov Chain defined by the following states: \n", paste(states(object), collapse=", "), "\n The transition matrix ", direction, " is defined as follows: \n") } print(object@transitionMatrix) cat("\n") } #' @exportMethod show setGeneric("show") # show methods for markovchain and markovchain list objects setMethod("show", "markovchain", function(object){ .showInt(object) } ) setMethod("show", "markovchainList", function(object) { cat(object@name, " list of Markov chain(s)", "\n") for(i in 1:length(object@markovchains)) { cat("Markovchain ",i,"\n") show(object@markovchains[[i]]) } } ) #' @exportMethod print setGeneric("print") # print methods setMethod("print", "markovchainList", function(x) show(x)) setMethod("print", "markovchain", function(x){ object <- x .showInt(object, verbose = FALSE) } ) .getNet <- function(object, round = FALSE) { # function to get the absorbency matrix to plot and export to igraph # # Args: # object: a markovchain object # round: boolean to round # # Returns: # # a graph adjacency if (object@byrow == FALSE) { object <- t(object) } matr <- object@transitionMatrix*100 if(round == TRUE) { matr <- round(matr, 2) } net <- graph.adjacency(adjmatrix = matr, weighted = TRUE, mode = "directed") return(net) } getColorVector <- function(object){ list <- .communicatingClassesRcpp(object) sections <- length(list) colorList <- grDevices::colors() colorList <- sample(colorList,sections) colorvector <- rep("white",length(object@states)) for(i in 1:length(list)){ part <- list[[i]] for(j in 1:length(part)){ colorvector[match(part[j],object@states)] <- colorList[i] } } return(colorvector) } #' @exportMethod plot setGeneric("plot") # Plot methods for markovchain objects # plot method from stat5 setMethod("plot", signature(x = "markovchain", y = "missing"), function(x, y, package = "igraph", ...) { switch(package, diagram = { if (requireNamespace("diagram", quietly = TRUE)) { .plotdiagram(object = x, ...) } else { netMc <- .getNet(object = x, round = TRUE) edgeLabel <- round(E(netMc)$weight / 100, 2) plot.igraph(x = netMc, edge.label = edgeLabel, ...) } }, DiagrammeR = { if (requireNamespace("DiagrammeR", quietly = TRUE)) { .plotDiagrammeR(object = x, ...) } else { netMc <- .getNet(object = x, round = TRUE) edgeLabel <- round(E(netMc)$weight / 100, 2) plot.igraph(x = netMc, edge.label = edgeLabel, ...) } }, { netMc <- .getNet(object = x,round = TRUE) edgeLabel <- round(E(netMc)$weight / 100, 2) plot.igraph(x = netMc, edge.label = edgeLabel, ...) }) } ) ##################################################AS METHODS######################### .checkMatrix <- function(matr, byrow = TRUE, verbose = FALSE) { # firstly, check size if (ncol(matr) != nrow(matr)) { if(verbose) stop("Error! Not a rectangular matrix") return(FALSE) } # secondly, check is stochastic isStochastic <- .isStochasticMatrix(matr, byrow) if (!isStochastic) { if (verbose) stop("Error! Either rows or cols should sum to 1") return(FALSE) } # if all test are passed return(TRUE) } # Internal function to return a markovchain object given a matrix .matrix2Mc <- function(from) { # whether given matrix is a transition matrix or not # if it is then how probabilities are stored # row-wise or columnwise byrow <- TRUE checkByRows <- .checkMatrix(from, byrow = byrow) if(!checkByRows) { byrow <- FALSE checkByCols <- .checkMatrix(from, byrow = byrow) if(!checkByCols) { #error could be either in rows or in cols if (any(colSums(from) != 1)) cat("columns sums not equal to one are:", which(colSums(from) != 1),"\n") if (any(rowSums(from) != 1)) cat("row sums not equal to one are:", which(rowSums(from) != 1),"\n") stop("Error! Not a transition matrix") } } # extract states names if(byrow) { namesCandidate <- rownames(from) } else { namesCandidate <- colnames(from) } # if states names is not there create it s1, s2, s3, .... if(is.null(namesCandidate)) { namesCandidate <- paste("s", 1:nrow(from), sep = "") } # create markovchain object out <- new("markovchain", transitionMatrix = from, states = namesCandidate, byrow = byrow) invisible(out) } #' @exportMethod coerce NULL # coerce matrix to markovchain object using internal method # example: as("some matrix", "markovchain") setAs(from = "matrix", to = "markovchain", def = .matrix2Mc) # Function to transform a markovchain into a data.frame # Args: # from: a markovchain object # # returns: # a data.frame .mc2Df <- function(from) { # number of rows or columns nr <- nrow(from@transitionMatrix) for(i in 1:nr){ for(j in 1:nr){ t0 <- from@states[i] t1 <- from@states[j] prob <- transitionProbability(object = from, t0 = t0, t1 = t1) #cope with the new default of R 4.0 (5-3-2020) rowDf <- data.frame(t0 = t0, t1 = t1, prob = prob,stringsAsFactors = TRUE ) # go to else part if first row of data frame is generated if(exists("outDf")) { outDf <- rbind(outDf, rowDf) } else { outDf <- rowDf } } } return(outDf) } # method to convert(coerce) from markovchain to data.frame setAs(from = "markovchain", to = "data.frame", def = .mc2Df) # method to find the column which stores transition probability .whichColProb <- function(df) { # column number which stores transition probability out <- 0 # check for validity of data frame if(ncol(df) > 3) { warning("Warning! More than three columns. Only the first three will be used") } if(ncol(df) < 3) { stop("Error! Three columns needed") } for(i in 1:ncol(df)) { # when found the first numeric and probability col if((class(df[, i]) == "numeric") & (all(sapply(df[, i], .isProbability) == TRUE))) { out <- i break } } return(out) } # Function to convert from a data.frame containing initial, ending # and probability columns to a proper markovchain object # # Args: # from: a data.frame # # Returns: # A markovchain object .df2Mc <- function(from) { statesNames <- unique(from[, 1]) colProb <- .whichColProb(from) # what is the use # transition matrix prMatr <- zeros(length(statesNames)) rownames(prMatr) <- statesNames colnames(prMatr) <- statesNames for(i in 1:nrow(from)) { idRow <- which(statesNames == from[i, 1]) # assume first col from idCol <- which(statesNames == from[i, 2]) # assume second col to prMatr[idRow, idCol] <- from[i, 3] # assume third col t-probability } out <- new("markovchain", transitionMatrix = prMatr) return(out) } # method to convert(coerce) data frame to markovchain object setAs(from = "data.frame", to = "markovchain", def = .df2Mc) # example # data <- data.frame(from = c("a", "a", "b", "b", "b", "b"), # to = c("a", "b", "b", "b", "b", "a")) # # from <- table(data) # .table2Mc(from) .table2Mc <- function(from) { # check whether table has square dimension or not if(dim(from)[1] != dim(from)[2]) { stop("Error! Table is not squared") } # rows ond columns name should be same if(!setequal(rownames(from),colnames(from))) { stop("Error! Rows not equal to coulumns") } temp <- unclass(as.matrix(from)) # make same sequence of col / row fromMatr <- temp[, order(rownames(temp))] # obtain transition matrix outMatr <- fromMatr / rowSums(fromMatr) out <- new("markovchain", states = rownames(temp), transitionMatrix = outMatr, byrow=TRUE) return(out) } # coerce table to markovchain object setAs(from = "table", to = "markovchain", def = .table2Mc) # function from msm to markovchain # msm is a package. Use this package to create msm object. # see how to create msm object using ?msm .msm2Mc <- function(from) { temp <- msm::pmatrix.msm(from) prMatr <- unclass(as.matrix(temp)) out <- new("markovchain", transitionMatrix = prMatr) return(out) } # coerce msm object to markovchain object setClass("msm") setAs(from = "msm", to = "markovchain", def = .msm2Mc) # function for msm.est to mc. Assume a probability matrix given .msmest2Mc <- function(from) { if (is.matrix(from)) { # central estimate pMatr <- from } if (is.list(from)) { # central estimate pMatr <- from[[1]] } out <- new("markovchain", transitionMatrix = as(pMatr, "matrix")) return(out) } # coerce ms.est to markovchain object setClass("msm.est") setAs(from = "msm.est", to = "markovchain", def = .msmest2Mc) # function from etm to markovchain .etm2Mc<-function(from) { # data frame consists of 'from' and 'to' column df <- from$trans # name of states elements <- from$state.names # number of unique states nelements <- length(elements) # temporary t-matrix prMatr <- matlab::zeros(nelements) dimnames(prMatr) <- list(elements, elements) # populate t-matrix for(i in 1:dim(df)[1]) { r <- df[i, ] # each row one by one stateFrom <- r$from stateTo <- r$to prMatr[stateFrom, stateTo] <- prMatr[stateFrom, stateTo] + 1 } # convert freq-matrix to trans-matrix rsums <- rowSums(prMatr) prMatr <- prMatr / rsums # take care of rows with all entries 0 if(any(rsums == 0)) { indicesToBeSanitized <- which(rsums == 0) for(i in indicesToBeSanitized) { for(j in 1:nelements) { prMatr[i, j] <- 1 / nelements } } } # create markovchain object out <- new("markovchain", transitionMatrix = prMatr) return(out) } # coerce etm object to markovchain object setClass("etm") setAs(from = "etm", to = "markovchain", def = .etm2Mc) #sparse matrix from Matrix package .sparseMatrix2markovchain<-function(from){ temp<-as(from,"matrix") out <- as(temp, "markovchain") return(out) } .markovchain2sparseMatrix<-function(from){ temp<-as(from,"matrix") out <- as(temp, "sparseMatrix") return(out) } setAs(from = "sparseMatrix", to = "markovchain", def = .sparseMatrix2markovchain) setAs(from = "markovchain", to = "sparseMatrix", def = .markovchain2sparseMatrix) # functions and methods to return a matrix .mc2matrix <- function(from) { out <- from@transitionMatrix return(out) } # coerce markovchain object to matrix(transition) setAs(from = "markovchain", to = "matrix", def = .mc2matrix) # functions and methods to return a matrix .mc2igraph <- function(from) { # convert the markovchain to data.frame temp <- .mc2Df(from=from) # convert the data frame to igraph graph # need to set only non zero weights out <- graph.data.frame(d=temp[temp$prob>0,]) return(out) } # coerce markovchain object to igraph setClass("igraph") setAs(from = "markovchain", to = "igraph", def = .mc2igraph) #' @exportMethod t setGeneric("t") # transposing method for markovchain objects setMethod("t", "markovchain", function(x) { out <- new("markovchain", byrow = !x@byrow, transitionMatrix = t(x@transitionMatrix)) return(out) } ) #' @exportMethod * setGeneric("*") # function to multiplicate two markov chains # # Args: # e1: first markovchain # e2: second markov chain # # Returns: # if feasible, a markovchain where the transition matrix is e1*e2 setMethod("*", c("markovchain", "markovchain"), function(e1, e2) { # compare states of markovchains if(!setequal(e1@states, e2@states)) { warning("Warning! Different states") } # dimension must be equal if(!setequal(dim(e1@transitionMatrix), dim(e2@transitionMatrix))) { stop("Error! Different size") } # both must be either row wise or col wise if(!(e1@byrow == e2@byrow)) { stop("Error! Both transition matrix should be defined either by row or by column") } newStates <- e1@states newTransMatr <- e1@transitionMatrix %*% e2@transitionMatrix byRow <- e1@byrow # multiplicated matrix takes the first matrix's name mcName <- e1@name out<-new("markovchain", states = newStates, transitionMatrix = newTransMatr, byrow = byRow, name = mcName) return(out) } ) # methods implemented for multiplication of markovchain object with # matrix, 1-D vector, and vice-versa setMethod("*", c("matrix", "markovchain"), function(e1, e2) { out <- e1 %*% e2@transitionMatrix return(out) } ) setMethod("*", c("markovchain", "matrix"), function(e1, e2) { out <- e1@transitionMatrix %*% e2 return(out) } ) setMethod("*", c("numeric", "markovchain"), function(e1, e2) { if(length(e1) != dim(e2)) { stop("Error! Uncompatible dimensions") } else { out <- e1 %*% e2@transitionMatrix } return(out) } ) setMethod("*", c("markovchain", "numeric"), function(e1, e2) { if(length(e2) != dim(e1)) { stop("Error! Uncompatible dimensions") } else { out <- e1@transitionMatrix %*% e2 } return(out) } ) #' @exportMethod == setGeneric("==") # compare two markovchain object setMethod("==", c("markovchain", "markovchain"), function(e1, e2) { out <- .approxEqualMatricesRcpp(e1@transitionMatrix, e2@transitionMatrix) return(out) } ) #' @exportMethod != setGeneric("!=") setMethod("!=", c("markovchain", "markovchain"), function(e1, e2) { out <- FALSE out <- !(e1 == e2) return(out) } ) #'@exportMethod ^ setGeneric("^") # markovchain raise to some power # this method is O(n³ log(m)) where n = {num cols (= rows) of e1} and m = e2 setMethod("^", c("markovchain", "numeric"), function(e1, e2) { out <- new("markovchain", states = e1@states, byrow = e1@byrow, transitionMatrix = e1@transitionMatrix %^% e2, name = paste(e1@name, "^", e2, sep = "") ) return(out) } ) #' @exportMethod [ setGeneric("[") # methods to directly access transition matrix elements setMethod("[", signature(x = "markovchain", i = "ANY", j = "ANY"), function(x, i, j) { out <- x@transitionMatrix[i, j] return(out) } ) #' @exportMethod [[ setGeneric("[[") # methods to directly access markovchain objects composing a markovchainList object setMethod("[[", signature(x = "markovchainList", i = "ANY"), function(x, i) { out <- x@markovchains[[i]] return(out) } ) # transition probabilty vector from a given state #' @title \code{conditionalDistribution} of a Markov Chain #' #' @name conditionalDistribution #' #' @description It extracts the conditional distribution of the subsequent state, #' given current state. #' #' @param object A \code{markovchain} object. #' @param state Subsequent state. #' #' @author Giorgio Spedicato, Deepak Yadav #' #' @return A named probability vector #' @references A First Course in Probability (8th Edition), Sheldon Ross, Prentice Hall 2010 #' #' @seealso \code{\linkS4class{markovchain}} #' #' @examples #' # define a markov chain #' statesNames <- c("a", "b", "c") #' markovB <- new("markovchain", states = statesNames, transitionMatrix = #' matrix(c(0.2, 0.5, 0.3, 0, 1, 0, 0.1, 0.8, 0.1),nrow = 3, #' byrow = TRUE, dimnames = list(statesNames, statesNames))) #' #' conditionalDistribution(markovB, "b") #' #' @exportMethod conditionalDistribution setGeneric("conditionalDistribution", function(object, state) standardGeneric("conditionalDistribution")) setMethod("conditionalDistribution", "markovchain", function(object, state) { # get the states names stateNames <- states(object) # number of unique states out <- numeric(length(stateNames)) # states are assumed to be sorted index2Take <- which(stateNames == state) if(object@byrow == TRUE) { out <- object@transitionMatrix[index2Take, ] } else { out <- object@transitionMatrix[, index2Take] } # names the output and returs it names(out) <- stateNames return(out) } ) # Function to get the mode of a probability vector # # Args: # probVector: the probability vector # ties: specifies if ties are to be sampled, otherwise more than one element is returned # # Returns: # the name of the model element .getMode <- function(probVector, ties = "random") { maxIndex <- which(probVector == max(probVector)) temp <- probVector[maxIndex] # index of maximum probabilty if((ties == "random") & (length(temp) > 1)) { out <- sample(temp, 1) } else { out <- temp } return(names(out)) } #' @exportMethod predict setGeneric("predict") # predict method for markovchain objects # given initial state return a vector of next n.ahead states setMethod("predict", "markovchain", function(object, newdata, n.ahead = 1) { # identify the last state lastState <- newdata[length(newdata)] out <- character() for(i in 1:n.ahead) { # cyclically determine the most probable subsequent state from the conditional distribution newState <- .getMode(probVector = conditionalDistribution(object, lastState), ties = "random") out <- c(out, newState) lastState <- newState } return(out) } ) # predict method for markovchainList objects setMethod("predict", "markovchainList", function(object, newdata, n.ahead = 1, continue = FALSE) { # object a markovchainList # newdata = the actual data # n.ahead = how much ahead # continue = veryfy if that lasts # allocate output out <- character() actualPos <- length(newdata) lastState <- newdata[actualPos] # take last position for(i in 1:n.ahead) { newPos <- actualPos + i - 1 if(newPos <= dim(object)) { newState <- predict(object = object[[newPos]], newdata = lastState, n.ahead = 1) out <- c(out, newState) lastState <- newState } else { if(continue == TRUE) { newState <- predict(object = object[[dim(object)]], newdata = lastState, n.ahead = 1) out <- c(out, newState) lastState <- newState } else break; } } return(out) } ) #sort method for markovchain objects setGeneric("sort", function(x, decreasing=FALSE, ...) standardGeneric("sort")) setMethod("sort", signature(x="markovchain"), function(x, decreasing = FALSE) { #get matrix and state names 2 be sorted matr2besorted<-x@transitionMatrix if (x@byrow) states2besorted <- rownames(matr2besorted) else states2besorted <- colnames(matr2besorted) #sorting sort_index<-order(states2besorted,decreasing = decreasing) #reallocating matr_sorted<-matr2besorted[sort_index,sort_index] states_sorted<-states2besorted[sort_index] out<-x out@transitionMatrix<-matr_sorted out@states<-states_sorted return(out) } ) # method to get stationary states #' @name steadyStates #' @title Stationary states of a \code{markovchain} object #' #' @description This method returns the stationary vector in matricial form of a markovchain object. #' @param object A discrete \code{markovchain} object #' #' @return A matrix corresponding to the stationary states #' #' @references A First Course in Probability (8th Edition), Sheldon Ross, Prentice Hall 2010 #' @author Giorgio Spedicato #' @seealso \code{\linkS4class{markovchain}} #' #' @note The steady states are identified starting from which eigenvectors correspond #' to identity eigenvalues and then normalizing them to sum up to unity. When negative values are found #' in the matrix, the eigenvalues extraction is performed on the recurrent classes submatrix. #' #' @examples #' statesNames <- c("a", "b", "c") #' markovB <- new("markovchain", states = statesNames, transitionMatrix = #' matrix(c(0.2, 0.5, 0.3, 0, 1, 0, 0.1, 0.8, 0.1), nrow = 3, #' byrow = TRUE, dimnames=list(statesNames,statesNames)), #' name = "A markovchain Object" #' ) #' steadyStates(markovB) #' #' @rdname steadyStates #' @exportMethod steadyStates setGeneric("steadyStates", function(object) standardGeneric("steadyStates")) markovchain/R/supplementaryPlot.R0000644000176200001440000000513413762012754016661 0ustar liggesusers# plot a diagram using diagram for a markovchain object .plotdiagram <- function(object, ...) { if(class(object) == "markovchain"){ mat <- object@transitionMatrix list <- .communicatingClassesRcpp(object) sections <- length(list) colorList <- grDevices::colors() colorList <- sample(colorList,sections) colorvector <- rep("white",length(object@states)) for(i in 1:length(list)){ part <- list[[i]] for(j in 1:length(part)){ colorvector[match(part[j],object@states)] <- colorList[i] } } } else if(class(object) == "ctmc"){ mat <- object@generator colorvector <- rep("white",length(object@states)) } if(object@byrow == FALSE) { mat <- t(mat) } # pass the matrix as columnwise fashion diagram::plotmat(t(mat),relsize = 0.75,box.col = colorvector, ...) } # plot a diagram using DiagrammeR for a markovchain object .plotDiagrammeR <- function(object, ...) { if(class(object) == "markovchain"){ mat <- object@transitionMatrix } else if(class(object) == "ctmc"){ mat <- object@generator } names <- rownames(mat) # names of nodes nodes <- '' for(i in 1:nrow(mat)) { nodes <- paste0(nodes, names[i], "; ") } # store edges edges <- '' for(i in 1:nrow(mat)) { for(j in 1:ncol(mat)) { edges <- paste0(edges, names[i], "->", names[j], " [label = ", mat[i,j], "] ") } } # extract extra parameter dots <- list(...) args <- "" for(name in names(dots)) { args <- paste0(args, name, "=\"", dots[[name]], "\" ") } # print(args) res <- DiagrammeR::grViz(paste0(" digraph circles { graph [overlap = true, fontsize = 10] node [shape = circle, fixedsize = true, width = 0.9] // sets as circles ", nodes, " ", edges, args," // labelfontsize = 20 labelloc='t' label ='Weather transition matrix' } ")) return (res) } # How to do plotting? # mcWeather <- new("markovchain", states = c("sunny", "cloudy", "rain"), # transitionMatrix = matrix(data = c(0.70, 0.2, 0.1, # 0.3, 0.4, 0.3, # 0.2, 0.45, 0.35), byrow = T, nrow = 3), # name = "Weather") # mcWeather # .plotdiagram(mcWeather, box.size = 0.06) # .plotDiagrammeR(mcWeather, label ="Weather transition matrix", labelloc = "t") # plot(mcWeather, package = "DiagrammeR", label = "Weather transition matrix")markovchain/MD50000644000176200001440000002060014050525163013063 0ustar liggesusersd78281ec1e7a63fae250f10707532adc *ChangeLog e1d925ea3da6c848e6856a717fb29147 *DESCRIPTION bbe9b1c32931766aa31d250e79e2801a *NAMESPACE c4ddd398479446a4f2b8f366c457cc2a *NEWS 49cefb5f5f768b88c6093cb5e1c2f65d *R/RcppExports.R 5e263108c366a6cd04845a3ae513ca2e *R/classesAndMethods.R de2bd558f0b4120acaaf8872ca2299f2 *R/ctmcClassesAndMethods.R 5683c387a5be6daafb6d1741ae3984b9 *R/ctmcProbabilistic.R 8c7ce890d00d92ce802b378a2bcf1dfd *R/data.R 3116a18839fae9568c057505eb25ccb5 *R/fitHigherOrder.R a386d9117e131e157b77943b5e448711 *R/fittingFunctions.R 2f6ce3bddae9bd6e66377020052f8e70 *R/hommc.R 279cd2b8ad81d99b8a4e5b46e94a0cd7 *R/markovchain.R e5862947a4f397c501f11a3fd04ced09 *R/probabilistic.R 8c90c159a9489b0f5dfb8ce08e0495e8 *R/random.R d1b7f19454b2c4e3ee35e0a96436a083 *R/statisticalTests.R d63c96e3a20af5a1096a32ec9952f85d *R/supplementaryPlot.R a0bf234df2af8a3f0d5dc6c7e951b7cc *R/sysdata.rda 39b735feac8bc2ead976b8f9d61db649 *R/utils.R db74c50b2544eddf346897e1a7a1e11d *R/zzz.R 67f2907492ff767274d15244f36dd1db *README.md 99298298f40c05f11148a0026e5dc297 *build/vignette.rds f18b14f78c84c70dbec0ec65ee8950eb *data/blanden.rda 70ac1ce9c2ddb9481ab65b7a31aca4f5 *data/craigsendi.rda 82e35ddd01782b3f91b338cce0790fd1 *data/holson.rda e3efeea63bddc81467f67fce8a2df55b *data/kullback.rda a9e23ed3b0e4dfb5ea85fc60255f5089 *data/preproglucacon.rda 1ce81677704b0c695146cea160aa8107 *data/rain.rda 9857654f2bef257c35ad958365aca3dc *data/sales.rda edb9ca572cc22a15350ebf4a3b69d0ed *data/tm_abs.rda 7062e44a453478115862d4383d21b1aa *demo/00Index 341e2275587b8537b7a3817aaf3588a4 *demo/bard.R 1700f0b961dc53993a9686ce375b2f06 *demo/computationTime.R dbf49ba2e9ce80d4d1f034650764a231 *demo/examples.R fb3046e24980a921d79cba3c3ef6f3b0 *demo/extractMatrices.R 89142f6fd07792388e74fd69002dffeb *demo/mathematica.R 837a7db4def223b7ba17ba93f52039f5 *demo/quasiBayesian.R 1dc619850b52023f072249b66f191b92 *demo/quickStart.R 9b61f7557c1e6c863affd1bc5f07480e *demo/reliability.R 4412e3c1ae45e06dc3dc41779424a939 *inst/CITATION 7996566b5427ce5888d4c2fc0770c2cd *inst/doc/an_introduction_to_markovchain_package.R 93595962cbd87e8c8a06077e60f219cb *inst/doc/an_introduction_to_markovchain_package.Rmd 76e51ff89971b81857b66b2356709d97 *inst/doc/an_introduction_to_markovchain_package.pdf 4b3b3806c112ae0147a4662161165b24 *inst/doc/gsoc_2017_additions.R 263029ac4cc9bc8b12ab1d8c7dffd3df *inst/doc/gsoc_2017_additions.Rmd cec8a50a40510b9506f22f4fb640705e *inst/doc/gsoc_2017_additions.pdf e70605cb8e702fa464278f845ad14bc8 *inst/doc/higher_order_markov_chains.R 6039ddc13e6d4e61f10f3eddb0fbbe9f *inst/doc/higher_order_markov_chains.Rmd 2adfa1f1c6dfd788e781f9f9205c1214 *inst/doc/higher_order_markov_chains.pdf 730fbb92cced04056c80f883f3566252 *inst/extdata/ltdItaData.txt 75bf84bd1d2c1ad2c18b97199443d27f *man/ExpectedTime.Rd 3a7587d2c12ba2a1652ad7e38cc69443 *man/HigherOrderMarkovChain-class.Rd 2207974462e269879736e2aa4f45f3bf *man/absorptionProbabilities.Rd 984657ac88a51f09cd8013b2ad5ccf81 *man/blanden.Rd 515e8fd8bfffcfe1cf5349200a7682a6 *man/committorAB.Rd 6a4eb256b4f993212a37f877712edf59 *man/conditionalDistribution.Rd cd4830df7e3f9ab722d4edb724aa9c1e *man/craigsendi.Rd 84d44f1e1dae8840ee345b2368ebede6 *man/ctmc-class.Rd 7b1edcd40aab0fa00c37a2b8b0a32588 *man/ctmcFit.Rd 1a601c7098ade1f736a926f7e33bb90b *man/expectedRewards.Rd 13ef966850f4dae14aaa7221b1181720 *man/expectedRewardsBeforeHittingA.Rd aff4c6974caafc84c7d410d6d706ad62 *man/firstPassage.Rd bbaac4bed94a47e88deacbf976b86e9e *man/firstPassageMultiple.Rd d2c9c7016cb65119ae51207965beac19 *man/fitHighOrderMultivarMC.Rd 5a78cb9d6722f1b978c563cfe7a7587f *man/fitHigherOrder.Rd 11f3ca6a0117afde424469092c385e59 *man/freq2Generator.Rd dad43a04be77e9a9b7ae1d3a9fb7a495 *man/generatorToTransitionMatrix.Rd c9840a546f9357451f76ca09b23a0bd8 *man/getName.Rd a4603eda5578e9f4fbd25b3029643401 *man/hittingProbabilities.Rd add6fde771cc5bf6c02827ea3536fc96 *man/holson.Rd a46092594d426a3bedb4bd1782da2da2 *man/hommc-class.Rd 297096be86b88ebeaabd6ee7ed0a6486 *man/hommc-show.Rd 10dbf12cf0346f0054d567cd09fb65ff *man/ictmc-class.Rd 592eb7fba6781dfdcee2d2dff82df000 *man/impreciseProbabilityatT.Rd eb6b353a91450ff5674cb25090d46d70 *man/inferHyperparam.Rd db14356077967b114596ac92021a8a20 *man/is.CTMCirreducible.Rd 61cd70542034adea9588515a291dc19c *man/is.TimeReversible.Rd 4dceef72c917d3e66e97adfd880435c9 *man/is.accessible.Rd 93a3d28c78d94b800997dc44077bd480 *man/is.irreducible.Rd df3496a2d6e96dd76409386c6d3f40f0 *man/is.regular.Rd 44c9b0250b8fd6a2e46bcd0a8545f59b *man/kullback.Rd f1ac77afa05488dc787ef87ce5a9074e *man/markovchain-class.Rd e74fd618150c586248fc8d12a3509ce3 *man/markovchain.Rd 550130443105af8219ecc8fabbb62811 *man/markovchainFit.Rd c3c1420c4aec6d3ee30ff46f68756329 *man/markovchainList-class.Rd 16c1cbb7705b17ebb518f1106583dfa2 *man/markovchainListFit.Rd 9be58105d48ce7cbefa8ad420a47f2c0 *man/markovchainSequence.Rd abbaca484a9abb47564201cf4dd69834 *man/meanAbsorptionTime.Rd a26bc397e9698f97f458b3af618a88b3 *man/meanFirstPassageTime.Rd 73e3f9e86f0f26a4ba33040457e2a3ab *man/meanNumVisits.Rd a51431e1948ef165709ff5329b469948 *man/meanRecurrenceTime.Rd 2f53a77e485cefec6a31883806563a7e *man/multinomialConfidenceIntervals.Rd 5fa42ddb7efca956e8e6c354bced7302 *man/names.Rd c7d4ada0e1a238d53ec005b32d93515f *man/noofVisitsDist.Rd f371b228dbd222f85535f1d4c9219b43 *man/predictHommc.Rd fc9bb6a67bee60dda7b4dad14bf7f3ca *man/predictiveDistribution.Rd 29a6c0777531665ad64fc774c6e87e25 *man/preproglucacon.Rd d1755ab27b558f63b9e740af441f7777 *man/priorDistribution.Rd 80fbd5430cc4b8d68d031b1424b62a75 *man/probabilityatT.Rd 4817c2004dc1ac01c19cc8f9a3212b45 *man/rain.Rd 9d4ee477a6139b177ed5d218b12e7fb3 *man/rctmc.Rd 954bc6c4ee8923a7ecec4928f121e9c1 *man/rmarkovchain.Rd 0c8479de816d083d212b802a03c6e802 *man/sales.Rd 4ba99b5946a10b828a943aeedf40e50a *man/setName.Rd be0b6000cb92dc3757c227737ebcef4c *man/states.Rd a28a8b97f9354cb3bf08733359978209 *man/statisticalTests.Rd 3825bbb52da090aae367cac4de6f13f0 *man/steadyStates.Rd 84702a1a7cdd752c4d65bdd9b2abb26c *man/structuralAnalysis.Rd cc0565a34593f573e482e92783e6cd27 *man/tm_abs.Rd b60ba3fe4b6b18d4e99878b7b259562a *man/transition2Generator.Rd 19d5e66645633e19a90cd83e3049b0c9 *man/transitionProbability.Rd a1e86627020c43852446a8e2c2141b3f *src/Makevars a1e86627020c43852446a8e2c2141b3f *src/Makevars.win 10fbda077c0247e8c9b5bc2517b41e18 *src/RcppExports.cpp 4b77a04f31755a0fc89baed5d50c8aca *src/classesAndMethods.cpp e28005d65e2b51dac4973cb40de229b5 *src/ctmcClassesAndMethods.cpp 100a776c9e36949f4bc77fa3467ba1b6 *src/ctmcFittingFunctions.cpp 02ce3f3715895ea5a19302312ea0eabf *src/ctmcProbabilistic.cpp d99e875a963f6fbaf1734b3470ca67fb *src/fitHigherOrder.cpp 54d343f821d5ba9f3ec7dd600d4cc830 *src/fittingFunctions.cpp 8c9f0905b63553383414ccbbf6dfbea5 *src/helpers.h 723761dbfb6fbf1cad9db40ef8dcfcf7 *src/mapFitFunctions.h 671ba94f3c19dc66524665fd56d2b82c *src/multinomCI.cpp 4a38fa670e4b93231f64daacb833f761 *src/probabilistic.cpp 773eb441814453a5543ba24b0330fe69 *src/utils.cpp 9ca86bdb9770b1d4f09faaccd4b207c0 *tests/testthat.R 0943379b6c3cc4843f3254625135c8e7 *tests/testthat/setupData.R f42016766cd79e46169152f54e8ce181 *tests/testthat/testBasic.R 08ec2d61a98f5f86cdd462e67abb5b96 *tests/testthat/testCommClasses.R 95866d62c44f628264aac082f16f8562 *tests/testthat/testFits.R 8cf73c40b5fad4c821665d3a8855a460 *tests/testthat/testHigherOrder.R 20c850c2cd92cdb1f65e28d980d7487c *tests/testthat/testHittingProbabilities.R 9b3034b373172fb7f0cdfa4168de48f6 *tests/testthat/testMarkovChainsClassification.R 01484ce41cdbb428acd213b478096d00 *tests/testthat/testMeanMeasures.R c6b1cc046d56e93de12430fa159cca00 *tests/testthat/testMultinomCI.R c18dc83674455528131959bb3be4f1c6 *tests/testthat/testOptimization.R 76426c64e1f40feec3af8d9efe2b77fc *tests/testthat/testPeriod.R addba15f1b859a975faace21a0f59995 *tests/testthat/testPlot.R 2d8ff4c180c1b9b9828b85986775b24d *tests/testthat/testStatesClassification.R de99aed42aad6ce6930c180d7107351e *tests/testthat/testStatisticalTests.R 1335e4cc5e0a3229a97dcc2b1c13f3d9 *tests/testthat/testSteadyStates.R 201a49d2650e0f94a431772ac391da40 *tests/testthat/testctmc.R 6ccccf1234176016aec331a8b5d4fab2 *tests/testthat/testetm.R 6375e9a45d7975e9de0cf04b2a6a68fd *tests/testthat/testmsm.R 93595962cbd87e8c8a06077e60f219cb *vignettes/an_introduction_to_markovchain_package.Rmd 263029ac4cc9bc8b12ab1d8c7dffd3df *vignettes/gsoc_2017_additions.Rmd 6039ddc13e6d4e61f10f3eddb0fbbe9f *vignettes/higher_order_markov_chains.Rmd c19f024e4ed601bd7796030986ef807b *vignettes/markovchainBiblio.bib 057d08248e65d85ee90839f9c2e6ba4f *vignettes/template.tex markovchain/inst/0000755000176200001440000000000014050513453013531 5ustar liggesusersmarkovchain/inst/doc/0000755000176200001440000000000014050513453014276 5ustar liggesusersmarkovchain/inst/doc/higher_order_markov_chains.Rmd0000644000176200001440000003025313762012754022321 0ustar liggesusers--- title: plain: "Higher, possibly multivariate, Order Markov Chains in markovchain package" formatted: "Higher, possibly multivariate, Order Markov Chains in \\pkg{markovchain} package" short: "Higher order (multivariate) Markov chains" pagetitle: "Higher order (multivariate) Markov chains" author: - name: "Deepak Yadav" affiliation: B-Tech student, Computer Science and Engineering address: > Indian Institute of Technology, Varanasi Uttar Pradesh - 221 005, India email: \email{deepakyadav.iitbhu@gmail.com} - name: "Tae Seung Kang" affiliation: Ph.D student, Computer \& Information Science \& Engineering address: > University of Florida Gainesville, FL, USA email: \email{tskang3@gmail.com} - name: "Giorgio Alfredo Spedicato" affiliation: Ph.D C.Stat ACAS, UnipolSai R\&D address: > Via Firenze 11 Paderno Dugnano 20037 Italy email: \email{spedicato\_giorgio@yahoo.it} preamble: > \author{Deepak Yadav, Tae Seung Kang, Giorgio Alfredo Spedicato} \usepackage{graphicx} \usepackage{amsmath} \usepackage{tabularx} \usepackage{longtable} \usepackage{booktabs} \setkeys{Gin}{width=0.8\textwidth} abstract: | The \pkg{markovchain} package contains functions to fit higher (possibly) multivariate order Markov chains. The functions are shown as well as simple exmaples output: if (rmarkdown::pandoc_version() < 2.2) function(...) { rmarkdown::pdf_document(template = "./template.tex", ...) } else function(...) { bookdown::pdf_book(base_format = rticles::jss_article, ...) } vignette: > %\VignetteIndexEntry{Higher order markov chains} %\VignetteEngine{knitr::rmarkdown} %VignetteEncoding{UTF-8} keywords: plain: [Higher order Markov chains] formatted: [Higher order Markov chains] documentclass: jss classoption: nojss bibliography: markovchainBiblio.bib pkgdown: as_is: true extension: pdf --- ```{r global_options, include=FALSE} knitr::opts_chunk$set(fig.width=8.5, fig.height=6, out.width = "70%") set.seed(123) ``` # Higher Order Markov Chains Continuous time Markov chains are discussed in the CTMC vignette which is a part of the package. An experimental `fitHigherOrder` function has been written in order to fit a higher order Markov chain ([@ching2013higher, @ching2008higher]). `fitHigherOrder` takes two inputs 1. sequence: a categorical data sequence. 2. order: order of Markov chain to fit with default value 2. The output will be a `list` which consists of 1. lambda: model parameter(s). 2. Q: a list of transition matrices. $Q_i$ is the $ith$ step transition matrix stored column-wise. 3. X: frequency probability vector of the given sequence. Its quadratic programming problem is solved using `solnp` function of the Rsolnp package [@pkg:Rsolnp]. ```{r load, results='hide', warning=FALSE, message=FALSE} require(markovchain) library(Rsolnp) ``` ```{r higherOrder} data(rain) fitHigherOrder(rain$rain, 2) fitHigherOrder(rain$rain, 3) ``` # Higher Order Multivariate Markov Chains ## Introduction HOMMC model is used for modeling behaviour of multiple categorical sequences generated by similar sources. The main reference is [@ching2008higher]. Assume that there are s categorical sequences and each has possible states in M. In nth order MMC the state probability distribution of the jth sequence at time $t = r + 1$ depend on the state probability distribution of all the sequences (including itself) at times $t = r, r - 1, ..., r - n + 1$. \[ x_{r+1}^{(j)} = \sum_{k=1}^{s}\sum_{h=1}^{n}\lambda_{jk}^{(h)}P_{h}^{(jk)}x_{r-h+1}^{(k)}, j = 1, 2, ..., s, r = n-1, n, ... \] with initial distribution $x_{0}^{(k)}, x_{1}^{(k)}, ... , x_{n-1}^{(k)} (k = 1, 2, ... , s)$. Here \[ \lambda _{jk}^{(h)} \geq 0, 1\leq j, k\leq s, 1\leq h\leq n \enspace and \enspace \sum_{k=1}^{s}\sum_{h=1}^{n} \lambda_{jk}^{(h)} = 1, j = 1, 2, 3, ... , s. \] Now we will see the simpler representation of the model which will help us understand the result of `fitHighOrderMultivarMC` method. \vspace{5mm} Let $X_{r}^{(j)} = ((x_{r}^{(j)})^{T}, (x_{r-1}^{(j)})^{T}, ..., (x_{r-n+1}^{(j)})^{T})^{T} for \enspace j = 1, 2, 3, ... , s.$ Then \vspace{5mm} \[ \begin{pmatrix} X_{r+1}^{(1)}\\ X_{r+1}^{(2)}\\ .\\ .\\ .\\ X_{r+1}^{(s)} \end{pmatrix} = \begin{pmatrix} B^{11}& B^{12}& .& .& B^{1s}& \\ B^{21}& B^{22}& .& .& B^{2s}& \\ .& .& .& .& .& \\ .& .& .& .& .& \\ .& .& .& .& .& \\ B^{s1}& B^{s2}& .& .& B^{ss}& \\ \end{pmatrix} \begin{pmatrix} X_{r}^{(1)}\\ X_{r}^{(2)}\\ .\\ .\\ .\\ X_{r}^{(s)} \end{pmatrix} \textrm{where} \] \[B^{ii} = \begin{pmatrix} \lambda _{ii}^{(1)}P_{1}^{(ii)}& \lambda _{ii}^{(2)}P_{2}^{(ii)}& .& .& \lambda _{ii}^{(n)}P_{n}^{(ii)}& \\ I& 0& .& .& 0& \\ 0& I& .& .& 0& \\ .& .& .& .& .& \\ .& .& .& .& .& \\ 0& .& .& I& 0& \end{pmatrix}_{mn*mn} \textrm{and} \] \vspace{5mm} \[ B^{ij} = \begin{pmatrix} \lambda _{ij}^{(1)}P_{1}^{(ij)}& \lambda _{ij}^{(2)}P_{2}^{(ij)}& .& .& \lambda _{ij}^{(n)}P_{n}^{(ij)}& \\ 0& 0& .& .& 0& \\ 0& 0& .& .& 0& \\ .& .& .& .& .& \\ .& .& .& .& .& \\ 0& .& .& 0& 0& \end{pmatrix}_{mn*mn} \textrm{when } i\neq j. \] \vspace{5mm} ## Representation of parameters in the code $P_{h}^{(ij)}$ is represented as $Ph(i,j)$ and $\lambda _{ij}^{(h)}$ as Lambdah(i,j). For example: $P_{2}^{(13)}$ as $P2(1,3)$ and $\lambda _{45}^{(3)}$ as Lambda3(4,5). ## Definition of HOMMC class ```{r hommcObject} showClass("hommc") ``` Any element of `hommc` class is comprised by following slots: 1. states: a character vector, listing the states for which transition probabilities are defined. 2. byrow: a logical element, indicating whether transition probabilities are shown by row or by column. 3. order: order of Multivariate Markov chain. 4. P: an array of all transition matrices. 5. Lambda: a vector to store the weightage of each transition matrix. 6. name: optional character element to name the HOMMC ## How to create an object of class HOMMC ```{r hommcCreate} states <- c('a', 'b') P <- array(dim = c(2, 2, 4), dimnames = list(states, states)) P[ , , 1] <- matrix(c(1/3, 2/3, 1, 0), byrow = FALSE, nrow = 2, ncol = 2) P[ , , 2] <- matrix(c(0, 1, 1, 0), byrow = FALSE, nrow = 2, ncol = 2) P[ , , 3] <- matrix(c(2/3, 1/3, 0, 1), byrow = FALSE, nrow = 2, ncol = 2) P[ , , 4] <- matrix(c(1/2, 1/2, 1/2, 1/2), byrow = FALSE, nrow = 2, ncol = 2) Lambda <- c(.8, .2, .3, .7) hob <- new("hommc", order = 1, Lambda = Lambda, P = P, states = states, byrow = FALSE, name = "FOMMC") hob ``` ## Fit HOMMC `fitHighOrderMultivarMC` method is available to fit HOMMC. Below are the 3 parameters of this method. 1. seqMat: a character matrix or a data frame, each column represents a categorical sequence. 2. order: order of Multivariate Markov chain. Default is 2. 3. Norm: Norm to be used. Default is 2. # A Marketing Example We tried to replicate the example found in [@ching2008higher] for an application of HOMMC. A soft-drink company in Hong Kong is facing an in-house problem of production planning and inventory control. A pressing issue is the storage space of its central warehouse, which often finds itself in the state of overflow or near capacity. The company is thus in urgent needs to study the interplay between the storage space requirement and the overall growing sales demand. The product can be classified into six possible states (1, 2, 3, 4, 5, 6) according to their sales volumes. All products are labeled as 1 = no sales volume, 2 = very slow-moving (very low sales volume), 3 = slow-moving, 4 = standard, 5 = fast-moving or 6 = very fast-moving (very high sales volume). Such labels are useful from both marketing and production planning points of view. The data is cointaind in `sales` object. ```{r hommsales} data(sales) head(sales) ``` The company would also like to predict sales demand for an important customer in order to minimize its inventory build-up. More importantly, the company can understand the sales pattern of this customer and then develop a marketing strategy to deal with this customer. Customer's sales demand sequences of five important products of the company for a year. We expect sales demand sequences generated by the same customer to be correlated to each other. Therefore by exploring these relationships, one can obtain a better higher-order multivariate Markov model for such demand sequences, hence obtain better prediction rules. In [@ching2008higher] application, they choose the order arbitrarily to be eight, i.e., n = 8. We first estimate all the transition probability matrices $P_{h}^{ij}$ and we also have the estimates of the stationary probability distributions of the five products:. $\widehat{\boldsymbol{x}}^{(1)} = \begin{pmatrix} 0.0818& 0.4052& 0.0483& 0.0335& 0.0037& 0.4275 \end{pmatrix}^{\boldsymbol{T}}$ $\widehat{\boldsymbol{x}}^{(2)} = \begin{pmatrix} 0.3680& 0.1970& 0.0335& 0.0000& 0.0037& 0.3978 \end{pmatrix}^{\boldsymbol{T}}$ $\widehat{\boldsymbol{x}}^{(3)} = \begin{pmatrix} 0.1450& 0.2045& 0.0186& 0.0000& 0.0037& 0.6283 \end{pmatrix}^{\boldsymbol{T}}$ $\widehat{\boldsymbol{x}}^{(4)} = \begin{pmatrix} 0.0000& 0.3569& 0.1338& 0.1896& 0.0632& 0.2565 \end{pmatrix}^{\boldsymbol{T}}$ $\widehat{\boldsymbol{x}}^{(5)} = \begin{pmatrix} 0.0000& 0.3569& 0.1227& 0.2268& 0.0520& 0.2416 \end{pmatrix}^{\boldsymbol{T}}$ By solving the corresponding linear programming problems, we obtain the following higher-order multivariate Markov chain model: \vspace{3mm} $\boldsymbol{x}_{r+1}^{(1)} = \boldsymbol{P}_{1}^{(12)}\boldsymbol{x}_{r}^{(2)}$ $\boldsymbol{x}_{r+1}^{(2)} = 0.6364\boldsymbol{P}_{1}^{(22)}\boldsymbol{x}_{r}^{(2)} + 0.3636\boldsymbol{P}_{3}^{(22)}\boldsymbol{x}_{r}^{(2)}$ $\boldsymbol{x}_{r+1}^{(3)} = \boldsymbol{P}_{1}^{(35)}\boldsymbol{x}_{r}^{(5)}$ $\boldsymbol{x}_{r+1}^{(4)} = 0.2994\boldsymbol{P}_{8}^{(42)}\boldsymbol{x}_{r}^{(2)} + 0.4324\boldsymbol{P}_{1}^{(45)}\boldsymbol{x}_{r}^{(5)} + 0.2681\boldsymbol{P}_{2}^{(45)}\boldsymbol{x}_{r}^{(5)}$ $\boldsymbol{x}_{r+1}^{(5)} = 0.2718\boldsymbol{P}_{8}^{(52)}\boldsymbol{x}_{r}^{(2)} + 0.6738\boldsymbol{P}_{1}^{(54)}\boldsymbol{x}_{r}^{(4)} + 0.0544\boldsymbol{P}_{2}^{(55)}\boldsymbol{x}_{r}^{(5)}$ \vspace{3mm} According to the constructed 8th order multivariate Markov model, Products A and B are closely related. In particular, the sales demand of Product A depends strongly on Product B. The main reason is that the chemical nature of Products A and B is the same, but they have different packaging for marketing purposes. Moreover, Products B, C, D and E are closely related. Similarly, products C and E have the same product flavor, but different packaging. In this model, it is interesting to note that both Product D and E quite depend on Product B at order of 8, this relationship is hardly to be obtained in conventional Markov model owing to huge amount of parameters. The results show that higher-order multivariate Markov model is quite significant to analyze the relationship of sales demand. ```{r hommcFit, warning = FALSE, message = FALSE} # fit 8th order multivariate markov chain object <- fitHighOrderMultivarMC(sales, order = 8, Norm = 2) ``` We choose to show only results shown in the paper. We see that $\lambda$ values are quite close, but not equal, to those shown in the original paper. ```{r result, echo = FALSE} i <- c(1, 2, 2, 3, 4, 4, 4, 5, 5, 5) j <- c(2, 2, 2, 5, 2, 5, 5, 2, 4, 5) k <- c(1, 1, 3, 1, 8, 1, 2, 8, 1, 2) if(object@byrow == TRUE) { direction <- "(by rows)" } else { direction <- "(by cols)" } cat("Order of multivariate markov chain =", object@order, "\n") cat("states =", object@states, "\n") cat("\n") cat("List of Lambda's and the corresponding transition matrix", direction,":\n") for(p in 1:10) { t <- 8*5*(i[p]-1) + (j[p]-1)*8 cat("Lambda", k[p], "(", i[p], ",", j[p], ") : ", object@Lambda[t+k[p]],"\n", sep = "") cat("P", k[p], "(", i[p], ",", j[p], ") : \n", sep = "") print(object@P[, , t+k[p]]) cat("\n") } ``` # References markovchain/inst/doc/higher_order_markov_chains.R0000644000176200001440000000427614050513420021771 0ustar liggesusers## ----global_options, include=FALSE-------------------------------------------- knitr::opts_chunk$set(fig.width=8.5, fig.height=6, out.width = "70%") set.seed(123) ## ----load, results='hide', warning=FALSE, message=FALSE----------------------- require(markovchain) library(Rsolnp) ## ----higherOrder-------------------------------------------------------------- data(rain) fitHigherOrder(rain$rain, 2) fitHigherOrder(rain$rain, 3) ## ----hommcObject-------------------------------------------------------------- showClass("hommc") ## ----hommcCreate-------------------------------------------------------------- states <- c('a', 'b') P <- array(dim = c(2, 2, 4), dimnames = list(states, states)) P[ , , 1] <- matrix(c(1/3, 2/3, 1, 0), byrow = FALSE, nrow = 2, ncol = 2) P[ , , 2] <- matrix(c(0, 1, 1, 0), byrow = FALSE, nrow = 2, ncol = 2) P[ , , 3] <- matrix(c(2/3, 1/3, 0, 1), byrow = FALSE, nrow = 2, ncol = 2) P[ , , 4] <- matrix(c(1/2, 1/2, 1/2, 1/2), byrow = FALSE, nrow = 2, ncol = 2) Lambda <- c(.8, .2, .3, .7) hob <- new("hommc", order = 1, Lambda = Lambda, P = P, states = states, byrow = FALSE, name = "FOMMC") hob ## ----hommsales---------------------------------------------------------------- data(sales) head(sales) ## ----hommcFit, warning = FALSE, message = FALSE------------------------------- # fit 8th order multivariate markov chain object <- fitHighOrderMultivarMC(sales, order = 8, Norm = 2) ## ----result, echo = FALSE----------------------------------------------------- i <- c(1, 2, 2, 3, 4, 4, 4, 5, 5, 5) j <- c(2, 2, 2, 5, 2, 5, 5, 2, 4, 5) k <- c(1, 1, 3, 1, 8, 1, 2, 8, 1, 2) if(object@byrow == TRUE) { direction <- "(by rows)" } else { direction <- "(by cols)" } cat("Order of multivariate markov chain =", object@order, "\n") cat("states =", object@states, "\n") cat("\n") cat("List of Lambda's and the corresponding transition matrix", direction,":\n") for(p in 1:10) { t <- 8*5*(i[p]-1) + (j[p]-1)*8 cat("Lambda", k[p], "(", i[p], ",", j[p], ") : ", object@Lambda[t+k[p]],"\n", sep = "") cat("P", k[p], "(", i[p], ",", j[p], ") : \n", sep = "") print(object@P[, , t+k[p]]) cat("\n") } markovchain/inst/doc/gsoc_2017_additions.R0000644000176200001440000001161514050513312020061 0ustar liggesusers## ----message = FALSE---------------------------------------------------------- library(markovchain) states <- c("a","b","c","d") byRow <- TRUE gen <- matrix(data = c(-1, 1/2, 1/2, 0, 1/4, -1/2, 0, 1/4, 1/6, 0, -1/3, 1/6, 0, 0, 0, 0), nrow = 4,byrow = byRow, dimnames = list(states,states)) ctmc <- new("ctmc",states = states, byrow = byRow, generator = gen, name = "testctmc") ## ----------------------------------------------------------------------------- ExpectedTime(ctmc,1,4) ## ----------------------------------------------------------------------------- probabilityatT(ctmc,1) ## ----------------------------------------------------------------------------- probabilityatT(ctmc,1,1) ## ----------------------------------------------------------------------------- energyStates <- c("sigma", "sigma_star") byRow <- TRUE gen <- matrix(data = c(-3, 3, 1, -1), nrow = 2, byrow = byRow, dimnames = list(energyStates, energyStates)) molecularCTMC <- new("ctmc", states = energyStates, byrow = byRow, generator = gen, name = "Molecular Transition Model") ## ----------------------------------------------------------------------------- plot(molecularCTMC) ## ----------------------------------------------------------------------------- plot(molecularCTMC,package = "diagram") ## ----------------------------------------------------------------------------- states <- c("n","y") Q <- matrix(c(-1,1,1,-1),nrow = 2,byrow = TRUE,dimnames = list(states,states)) range <- matrix(c(1/52,3/52,1/2,2),nrow = 2,byrow = 2) name <- "testictmc" ictmc <- new("ictmc",states = states,Q = Q,range = range,name = name) impreciseProbabilityatT(ictmc,2,0,1,10^-3,TRUE) ## ----------------------------------------------------------------------------- sample <- matrix(c(150,2,1,1,1,200,2,1,2,1,175,1,1,1,1,150),nrow = 4,byrow = TRUE) sample_rel = rbind((sample/rowSums(sample))[1:dim(sample)[1]-1,],c(rep(0,dim(sample)[1]-1),1)) freq2Generator(sample_rel,1) ## ----------------------------------------------------------------------------- transMatr <- matrix(c(0,0,0,1,0.5,0.5,0,0,0,0,0.5,0,0,0,0,0,0.2,0.4,0,0,0,0.8,0.6,0,0.5),nrow = 5) object <- new("markovchain", states=c("a","b","c","d","e"),transitionMatrix=transMatr, name="simpleMc") committorAB(object,c(5),c(3)) ## ----------------------------------------------------------------------------- statesNames <- c("a", "b", "c") testmarkov <- new("markovchain", states = statesNames, transitionMatrix = matrix(c(0.2, 0.5, 0.3, 0.5, 0.1, 0.4, 0.1, 0.8, 0.1), nrow = 3, byrow = TRUE, dimnames = list(statesNames, statesNames) )) ## ----------------------------------------------------------------------------- firstPassageMultiple(testmarkov,"a",c("b","c"),4) ## ----------------------------------------------------------------------------- transMatr<-matrix(c(0.4,0.6,.3,.7),nrow=2,byrow=TRUE) simpleMc<-new("markovchain", states=c("a","b"), transitionMatrix=transMatr, name="simpleMc") noofVisitsDist(simpleMc,5,"a") ## ----------------------------------------------------------------------------- transMatr<-matrix(c(0.99,0.01,0.01,0.99),nrow=2,byrow=TRUE) simpleMc<-new("markovchain", states=c("a","b"), transitionMatrix=transMatr) expectedRewards(simpleMc,1,c(0,1)) ## ----------------------------------------------------------------------------- energyStates <- c("sigma", "sigma_star") byRow <- TRUE gen <- matrix(data = c(-3, 3, 1, -1), nrow = 2, byrow = byRow, dimnames = list(energyStates, energyStates)) molecularCTMC <- new("ctmc", states = energyStates, byrow = byRow, generator = gen, name = "Molecular Transition Model") is.CTMCirreducible(molecularCTMC) ## ----------------------------------------------------------------------------- statesName <- c("a", "b") P <- array(0, dim = c(2, 2, 4), dimnames = list(statesName, statesName)) P[,,1] <- matrix(c(0, 1, 1/3, 2/3), byrow = FALSE, nrow = 2) P[,,2] <- matrix(c(1/4, 3/4, 0, 1), byrow = FALSE, nrow = 2) P[,,3] <- matrix(c(1, 0, 1/3, 2/3), byrow = FALSE, nrow = 2) P[,,4] <- matrix(c(3/4, 1/4, 0, 1), byrow = FALSE, nrow = 2) Lambda <- c(0.8, 0.2, 0.3, 0.7) ob <- new("hommc", order = 1, states = statesName, P = P, Lambda = Lambda, byrow = FALSE, name = "FOMMC") predictHommc(ob,3) ## ----------------------------------------------------------------------------- energyStates <- c("sigma", "sigma_star") byRow <- TRUE gen <- matrix(data = c(-3, 3, 1, -1), nrow = 2, byrow = byRow, dimnames = list(energyStates, energyStates)) molecularCTMC <- new("ctmc", states = energyStates, byrow = byRow, generator = gen, name = "Molecular Transition Model") is.TimeReversible(molecularCTMC) markovchain/inst/doc/higher_order_markov_chains.pdf0000644000176200001440000023636414050513461022353 0ustar liggesusers%PDF-1.5 % 1 0 obj << /Type /ObjStm /Length 4333 /Filter /FlateDecode /N 98 /First 817 >> stream x[[s۸~[.INg$7qⵝMc6YJt.@Jɑm3,s?@&L2SL ̤fcYcqv /9¤b?ع{~e9;Wy6=+UQ,(m~QgU*~VX7SߒIצM";߲OEuͮl_yS9Gu>cQ6P~dY1Qy񵉎g9}{v]ΫN'?#ϊj7snx?n&U1 ˞Svxmv~ vVV/4D~?I&BE?7T mh8Mix5 aF@{ʧx*!0햟1R|Igxa|^s"ȚԷ49?+?*+PRwwA#M&K5PGcT~< H;txTGeQ(#De@T7O1qXcY!(B[+FN.!yu&>V^I!DqA!M 6pzZ&LX[~C ',W̬ve7yϼުsv^^}ZF f \ Gځc?,E=v>&Eb@yzbJvN&'gP,C~-F[nuTu7N_ "8U ̍!KvzX56w n끮8xyϫēds4&پC-aP"2rsI,Ak"m(~K8IIxk7GbA' m-,m v-(,|`RFz !(_$S)}aG%LJELD@Y"VI "Ri}`Bd 5L)NlT9&mI|U龔l{?yw]ŸLaHF?dFVK!)i47o@).k+ Q!J4ɟ~^MJQs@ԫL8mzs*osv ջc?P/)eBݧ7Nl2ၞ7.<-*/>.5xQA$&;^?0~ů4>Y>>I9,_ƗE>%tt|_EClZi88zN 0&/cFrBH(>{_ kD? #rJw9B0E JyqA)?縞hhh%868x0=bO/uqM .Ep/#Hr&eJ3?t-yG]Y'WS }-IG5/>Kw9ajTeZyړVP+JJ_1aG!ӝX>X Tq| EWvH2M7ƐădʬӾMQ۔R)M-ǥRoC[rЖ+_혬g-Y: JGgAH5 D3 `cMgyGĐpg;5JL4ڬ*UŀURb?Kg}ߤ5:}:T4>˓|l=Z93,r 3#RAϲyuw]oV.$-KcV:>@6%XM;Y}EUK=ZU*=&{ٓ>ЕRz χ/^c꣮=qXf:tfIk6N] שk ø86UL]iie$IJM9K5ZRM{6|$R@Hj P#x&yҶ WXWEhg}^gIjҵc_޽5-(5Q!?[eeKtCr{ҡD-HK R.Mڣ P4y%%^;_-|H;a M\I )7c{1nnmǔmOd5ŏ!$σyK]\MM} +BMffGm)!yw'fi0݋'NA lx) R?uYeC'?y·Y+;n!;nK=糽W= ֏nLH }^&~xsYeH#JeG>;E&{:]YN=&zEoIehuNyxks/mgֵ^:|{-my\]|"i֖7R+#M9R#I_j"F.RE &m# F FhQ]7*j`j@%=5(_O :}yb0k+@ul-tdC馘N+f֎ cIrsZ|#ԺbHzhuˠKuW:=-R+:ѾvRmXwͤb DTD-)Xt.ºC Z t߰AeGXj`tʌ$(ugcXW C aam{S[IAm Z)=nUNߺp=oX1Tl8iKw>Ztxs*nAZwCspaUMVL. EW7=\M)o[H8dGyDa=,R kYȭV"ȦWVDۈ.ri6T v2%`u;o.*\/,z98I" vjM4T90Kp[7aɥ{N~nf,^Т-\vubēlh -{zr߀uIgaK˂nNҭF݀vlK pLnXI9c2;at[&8Heendstream endobj 100 0 obj << /Subtype /XML /Type /Metadata /Length 1703 >> stream GPL Ghostscript 9.20 Higher order Markov chains 2021-05-17T18:17:52+02:00 2021-05-17T18:17:52+02:00 LaTeX with hyperref Higher, possibly multivariate, Order Markov Chains in markovchain packageDeepak Yadav, Tae Seung Kang, Giorgio Alfredo Spedicato endstream endobj 101 0 obj << /Filter /FlateDecode /Length 3696 >> stream xZr#sСǡi׾E[pCaH@P{uC-TB:++/v%f/O/[]ܝ~Zv֬N_rYy%giz#fcqzR(tqF7cacv|ZAfQnvBN;ڬcTr*(h^bip*L$-EX$+Z=#{ުXc酰C,vxazX[ iO[z*qDoilhvNwNCz9;=MG[<:x6␴C7WGӞVC,lt d &L^Wޑ==^q<&HfjG jsNCEgcty*I0]6uzI]8]Nh$͆@J~Z% t'qZE1vV&6F8w+&!yN*c\O[G7g3,_x C0Dw! 8mS]): +ٌ;\0RA𰻨ۓ"A`)k&<>}IܵݯICLzYA'oJ!8!v@ʯ Nt%wӛ5NO9{W' ,\]Xڏo?q@'+AȔ)ݮ!},>ޤV5y_ K9 ec7$] F-(R`FƔቴDs;Ὣ`lT(dOO3"hFU צesyPNȅHq~6G2i ԦUp-HSL)9;0ZT KugJ46N<+"sZ-kbXs&!Jh=Fb'hr?{ Jk;`LKVux(s? di\vN%LO|kRlC2FiE'90 8"1&:c~v;{b;TFjD~87!4:I;N #HIy* urGXҠqԟCy*"<\re93"ն.A10x0!N '/}[>]pіimgOώKV^g kd8Uf@!JBύP|a^x$8,I)=. ԋt'͢%qSQ.0ݫzy7ež]E=<$rD.{a.{ձ--φtARP3)X ӌ ,[ܒ£iˍPS#!8+ޮ$j&OXqQ}I4@NOrh'MzwSg~ݼDĥOߵԢڋkB+{fjnTW6elV Yt0< } ufbY3<^쏀EX7kmPuŘ=x\-S RZ>;|Y[rB]HT&<{jt3}+Xv ܕvO6+콡? tDAodm t鳓teBXQ6{4PG疧_mB? )i{n$`2;2wl?Ko#H#,b 4O?@ZT'v eS K#0shqWcz2lt=~8@)GDԽ;wԕ?D#"?rJq$@TT8aLud խ0J셿"ZuO!kO+z.X8ݰew(ƺb 4Ƥ~3>ca-㗋ao}|HM;py-wmyݖ7Ta{ DEjRBǼU7Ehz+}HPa]Cێfyx M̧tD&;'+DڦźSXgG+ȬGc|tSn곶쾙mb$LQ>J%[ٴ5rۖ]\4zߵe[\'s[ڲP֒endstream endobj 102 0 obj << /Filter /FlateDecode /Length 4941 >> stream x\K7r5^wavXZ/-=up}!{ M)91( /Yn. =<|u3W.6m>\x%gFZ}wyd^Y1W }Vt?\cK'z쮟bbI̸jF)ٻf|w]L:'=΍~}[W 7A\~5p3le/D3J 9r4r/hm8%ᄐ k eT C,7% 'D谀8D3x҃uOH@ichi+RӋN^^}OS쁫^*#hQÛQC3u<*%{M{ 5xFpכy8LE Cr! ƬpAMy0Ȕ9%lwK9MG$=w2 *9Hz,|㆜ O. ,d\pR;i03@bE, )^vsp{dqt/&Bz>V kb)R@i9v3ZƓ7 .<F9v8H4 hK mzqt2p[w_MTN`p}}mY ` `0*4X(q\L^&Us.J>ق4 &rN68x0pZ0Q2rp\r1v"$QIvO ?F =fOm\rP, K_(-qg rL,+(~Ӟ]bЕ`LN&xS Kfэ&BKQRH$$XG^M^3#r% nGӣWW15SBVzd}>rk\$1I4/ A)utQm@hY^vpMV& !&G@NOIwڒ/q,*Ï.^y%zn2!a"Y,Ye1)Έt*'#6y|*aꂯhK立?\DoqQ\6`beA\z&Lp²EXLZMF\ؑCHP[M`6{J N=4wJt9yLg+ l4DtIC~]ToPh|fiO3MYR'M x ۋ(Yf|9<޷^nV|e,D{6:͟PtN(}r&_$$ w\xуփdJ@2+A)@"z>.h4Y#I{?{X\tq&9DD"%  {WCuXS.qq("I ż j{b@Kd-eB-S^>f,[*S>}Ex' 'eb0H^~rܰbI8hm|}vsl9t᜝#ݤ߭p;m)ڼzqX[`ao.\9Byl,.a2)G5ԻY%D^4R"_L[&3f *VQi6fs1n5(*=@j @˅8|h}%do.ć~f0܊vgP PDҡ| NI;S=<8ܮS&'zcJjD2KY+_m 9LᤃDSX\&_'woGkC$)/{Hu@_.=˂,D:k1н'V矱:45)k>M aCjYm:dV9@@"il@I,~ttsr7& p[+Y_YgRnu]eXq+IKِ=/Tq֫Klũ@ǑޢJC8nqFuC沐 T}8\g9NQ(crQɩ2m{T!"jG)>c.87ilDwҥJ9P LϋI$ǹO޼8 6lOrBL#s2o')vɫ -"9/EJȷJ*!FT/wwr-wڒ{rCx9)\IP*׍- V}W{"vgTc4i0.U$uV= 黱T5PNO%k$0CP[9UG5qglkON9NRc9m&:mmxUJ:ˠK& K2u˓FA9X1SAܐ{}:p aI·WyVaV曻ʝ7UۓzV:;WeY 3|n)#L0+apoFH;zeÕ Jyc%w@RPҙuŕ˗N. xPuֳ7r)l.Ήpl˕ p~yjc<QN0b9!*Rz&΂jf> |3]ߥsQxpvvexeQqŏ׏L=|Ap{7DƦ M49f>N÷494|? oP;7/Ozf<Ծ1bGnpIC#Pnv1m*js[M9su$"FpIk,;O,xÚͬ^=F=iJyj=B9,_\Ev$=sB5Q!2+O]~/ UK8')Dx /)I5@,Y_jz/OʓWUPha?;qN{U];$]|vo]de5؈Wo+1vF6M/mmf:zVm1ݐas㺑_ڨQIHټ[W2֑#1#%V /zr뾺2#za#~CŌG]3ۧd~/T8 M- %n1C ><ȵgWau[jD4,1pY~9V2^iSǬoRͻʘv` ؏.uT?Ae{zȄ ?OjkAP1eYNnNpmՏ|ͩ!pkͅ+"gbݬZS3|7:%RQ솧rA^LsOgD|L8I&5RNpϵg :}ZrB~pVʌR_ZDcMUD͵$jarCW ?N*t+Iꃧy(2\^E(RFt0´y׼QZˆ\Y-= Iެ'\D8lR $N,>_΅[Έzv&tnb'v8x^ׁ|98٫xiPpn*(xH ӕqZ.bӨi)N.'bjy1q᩺H1X`FGet5zGf"il#qd/ f>nFë{zfj͊1S mm+^?U}HB%*l kK,f!&oT? ~DE8&g-mB_g I8eLx<+gjįp7ue*s1lT Z#+_". lφPy|vD/Xٻ@z0"~*Rh3 },W?N_6endstream endobj 103 0 obj << /Filter /FlateDecode /Length 3392 >> stream xZKs|}+&19N#˥J#&0\$-+ߧ],wɨr~b0ӛ=6;}!3gGOvRώ.:5? xxufe|[{N= w'󉺞RF˘r`PJVwR­Wf\x7BH4-a>̵x^r+و1gc:ƫy<;z x;n.UylrLu \^x2^uW(I{oD7H+ƴW3@{gLԼ\Xc1^#xV#wsvLj )d N[j8&RzmPcpnD2:Quv_#m8#`IT's5W˗AP^S=jz  pPHV9 qiNQ {Pp&5k=ߵoݠj=xmM(| Ldqd l 趱Ip\3,@D:zI5ch `A{$"wF'<$?N괔~rFFkS-h*Mkwopg y7Xm-kt8t7/g#:} ྲ=i9Ckn(+deOT/ƫS>N7X%Yb\AHc[Xmf~}M5-DEnQD܁+T =}?DHogc6u1_(~VL<(8pyg Y%3cYE?ϝ@ǰ' ~)Dzi4b)b 9woЇtSL`*ŲTrA&ܸ•S30%U`ԖpXM䘦wz;rK%ĉx%5^ 4;ig=F1 6ؐXŲQ"-)@A¢SRey)\g(J ęS!;םn3UijtK% HR az$IS)b!ߔEXi&dU=;kVy )SL`Ns<H,5z0wOMNpšn2 R缇܌21( ,B>.E!B4`DuvMrє8Hս1wڏ˵/ 9:tҋ"^wMő6N y2uڹ  y]ȷ\C!]z>Wj*[N>!yA c@)"THVݴ:|YW(@ؑqR5HhܚΆ PT\&Ns(*=T},;m- 19Mp:P(SoIgg!Bک( 0Bsü4;ͼJ6s#L@#C0~}ަ>a͡ qF*9pl!~<|@yk?^-ȝ (dظ"4t Ffqv9r$"%8x DC4n]_@QȐI/';Gi3OƫM}_a3B} kb@4%W] icC8EV%<3xIQUj+kţ= ˠz]\fiCDӵ{ʗ&{qy`|{E|T=*fy UT/";0v )QeH-ÍUv%Rg)+!Ux UMBFo˖NHd|9TAtWXGYz҃ADguMĵ$ dc%Ag-NqU(뎄馀459s& E#3S7 @fz·`bb 엻|[\Ơz΍$ou{%T]&CИHr[q̈!wbQURIj9~YғGYW!y wE!CmI;= *菧'eYP?]ؑR^AZ`/-D" ͹#P;aMTpw٪?1wM1 ΃S.BJV6\=xE-$(2҉V\6Tc݄* OsGg5@>.Ii⼽W]v+2~JK*\QRGj,F!hr. }w_5IN9¥ e'qoxoSk mjA3R~@mǑu2d@F(}s@K c7-h0U@ xqK-t˷J16AސUݥo:a[#+ea^ߞA#2RkMe69D!jx7[ g2p$3M2lI$B^4)$ҋ9vǷ&lud"̐OE]0f}Y$y} dَ1e^ )Ar+/o3I? Ip~)ț / 7ec#>߾؋mb369h i6+̫;0aл y " ƭ[CD[ V}mm]ra4Zl ` z(O` w4IwYDž|Cyfz(+$q  .1J T𴭦i >uEcGB_4ܨ]P=R-,uMޛS.-iUNi>p8J&$wT? mM+hպiв "9B4Dž$(ݡendstream endobj 104 0 obj << /Filter /FlateDecode /Length 2970 >> stream xZYs ~gG[fSI߇ ;NT R))#+4zD)rR~Fh1fzR_3auvsVgotstv+}VYO^i{*gWWϻ?U 6dQE}v:5.c4jIdcIvMUowh uSwN Nhͦ`|eH h,\2AR 9d [lN(D ΧI nwt] 老Iv^ݣ۸S m:Z$Z`C0KP.;(;;[||_?d6>8o|x|ar9XߧpR%<)dT 7lA`Z=+^}%L^0A޶wXdv24-51nfV]0c3h{w7l[&jg"oX|Y.37w=aL6|),\x&m0`s&1)'2M.ŖS X_ ,PVy*x5!u[F[Cl2e AV(Lvbl$4362*kcEX͸$U>i#.~Gk~ۂ{` ֏O 7:g\ʚ͐"w։B$8szz@@%'cmf^@]TN3/A^8Nz9E1g;ޅӲyhg֪wހMAsT5Lt5%Hw%VJlGx2Ã__]7'=# FY'+ Y.T޲gA-)l) ^OHe8V?ƒhd^ Tw0,`N:6hTk #>1*x yD·%6c ܄IP# `bL,nZNZ% A4 |1}\,o1"ٜpJSnFڢEҵ%M%+s\hqL3> stream x\Ms߰l;F>Evۉ:NJaERKF$&EU90%٥ctP4n=?OD Ag{cm "dj/ *c'θ6(3=o4*t&Zveњ悚Mfz:SJ^S/j[ JF6oQ enij}3zO,R&t6x4nΙ 4P{@A7b²T%69! ߬.I H -Y<9ymh(pCopUfC+m XXK5 ! UVVk<^cx0)zk3#i]#pPCh(:!Bnݙgf3޳*0n;h%Hn#"e9".gz~YbZ.JvN"ZP]Y'1/#[.0T>LfK]^߄ tLH+" PݚmD0+~# i2My׃젳31ⵛmM:2?^jÔ {IEuPU&^Fr|6SDv09^mx!T;^;~&{/b~[|Txe_m| "#.\  x@VճAa)c\췘MWgwjPkG2OQ7w7q?ˮ:0/U{{^GR6z}vŮ-6ZVЇʶ˞Eځq:vO94?0a[=odi)Ha77~/7fw>[C]v^oT[ldxwAlإl:E}A_1 ]zc_z,\;Gz3LPmoLSt8x!?<3Hm̯!3fCja?څ82௘j()GZyx~8_o `ZL?.Y[ϜcEvsn𨨶Ce[ xʶ>y&t2Thv] 2iP2UKTS"B :S=V S>YUiUVk5Ak!9B8:]..㮮ժh)Z)*~u+8C14_]OKG*g1Yb|۫67Dw1 UVgr:X9i G*$ޥ$@eItJ( (3c/Jz׺Ys8"wʙYA\]!Nپa"u82U#+V}4pTO,ÂNu8W%@O klE '#ϯ6tMĬsgTv嵣IhfhŅ$5v-<"AWc;I7%s3t2= UkdmT°"Noq ]+8[*1="Qmc23: I8Ai*U{ CKi8OAiÊϝnBr;ear?S$fӝ-CCRa.( 4!`˄t-$ ҖKnX-ƻ&v?ـK+o S%۩10nctKݜ£1Ho-RKzGg^OoP90<b^vpW'{\',$fnvbX`mq^>pX,79Q3eB>1mٻSE'=_);NF gCd*5[Fhtm `TSH[TIp׀f&^A m|.dM@[ /6&d"gH}AV-Q\)>wns]#-|49$yvD䯈6݌ZQwxȰ"1+:G{c4# HVOMЩ cCGwLzdpEp.vN0t|xf e !-xZL: Yn}/) <#4,zLw,И qa1y=nOFlj l[[uF%vIj' {./ I 3Asw'DwZ \=2bJVOApc4f5 dLv ;xQD^KC=rT7ex]?qYij".xR""/ ~endstream endobj 106 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 4851 >> stream xXTTwܫ" )wEcX;Qzf"7Co*A@/j]Fy1ĸϖ Krٷ9+}#c12LlLoeKd׏{ۿ _ Gbpe26]s 湹Ξ9sP_.$0WGIw8DX8cFlltptMT[ƆB\7jb\k:׵ၮ8~k#uQ(505#Z]{7zeL8xπuA!C77|Bfy}#Ìg1뙅Df3yLf63S/Uf 3lc1nvƝLg֠DTQS=8j&lL"eQɋU K7 p,z7XrN?&dQzDIn=FC\ëm}:`nb@\]b.鍗 Gl^,?O̚#U*b1w@7\6Ta%}fdѸG˥08ncg&' CWDO>SNhz߫yE]Vv.PԀ ]0p*JD^ŗcr ':!ba)LCRP bq&mQ~oCp=d$vmձCn:ҏ #y6ֈX=\Kgz}W<@9 1g)g-Il!* J hi^lB[r/iCSzlJFP) }NԸ iW[;qZ]}2uJ"c pJ yr:5 o_۴L%PUU +*筎1 Dsv%pWC{ސ"( oxyto ;S18v$w  _ zݒ.^_M^919kz6JY"nd ёt:<u%PvN^".ECQۤq<0ܤRil.i` EO޸,Sy^BԻz[K!b,kz{'" ?}͂[zOH)'"w "&Dbv`w"O.;Osy!kW1V{ I4O?i=E3:C/7O'V4Yum:5?>kL"hOͮF2i4?*Q1 ۼd΂#S0PW!Cm8opqdAnI!J{!~sEZzc:5;yI/pAn I|gA,O3 j\ThGj`Ym"c'(;xB-CPSX9J niVf8.H!c 7GMBUn;W(Y2QO'akb`#Ḣj+S)&*ʠ#~c8K(riy!'l!ldˡR`"]/Pao|Iy~.)>#Xݣ&=e;cw>/#?. ~o+g(nZPT緑?ivC |)ַ,&kǑIPvQS&Z"`dԦlsRh| ߥR4(0Nh n8zF=lXT_ Ӧ&m|KWǁ@4tl)َ= kDe. d咇4/(7\91v7dcV o(u8#ocz߸hH)t0 r-P:.@վܧoGi=wZ(BH9/i)@=/&sgW2"z&+9RBFl {;;]pAHpqs J#$ &ɘVA59Gc X=h{c@9&V{yIUR ey ww):<IξQ[m}U)OZ=\nGN^DXGّVRb) U%%:[vҶ-uǠ , A\ִ[4|]ȃ`: 9Γ0 :Y YJ 14;-v39[ tNR*:m^up\c*R.Q'Éd(.Ut:"WuXŽ't$}z'EYQƴb {-pm;dn(9P|,N11WH؟O[\}csձ!3&EoB철w,;&#\#͑5%&SY%9/LD[j$@QoȂT.<&/1X(ٞM :^}]{R;enFnɷg}t&N'pB_pr&xnk+7YW=&LyNm?ג )(,$M8mSCu0K@kUutkE%4Bm`eh@vAх(OII y|v>2;6JC5B5}Mf1sJL`R_Mw"xg9xHAsp[d*?uzB k;%ݥMS43&[nD뒝rih -7P%1حnأiI]Ցo8]+Ck`< y'UdqF1}C.ǝjU^(zJSZZnCmO8 rx*mi{[̳h]!m>3jkzr! b _s̙B!OE\[dCp`TDkayBoձU ZGOni/*pPW8S}T?Y3/HP˄EDn[ZF| <++-,j:}8G ynI3 h{i#K\R3ki)HBK f_7ZH0TttM0䤕q6IFl}H3eF-NxL6a!0pm~uw)]_cr|*F ۷N".pTb#|7utn=B<R-9?)Ν=Uw&% dmj.+'w{:BLt3. <lFq 嶝ߴB6F:Lٙ*+,MsG#-mMb)DAx+Z?͇%[K4~"9n:gymNdri%)=n]ذkoCYӖPe'*et*Ǐz|s +Wb rةG` SvZ2b'XDiH|A.yaR;eAECj++?QYqtg84qpż=͝էEօ ytWߦKg(l#5q.]?۷*-Twu~/;KUF)@Ú}C"-qo|)+2p.%=yZҎoZ:r]5yfD?en!e0B_ ˈ2QulZgK:cf_$ r*{]eS{uB;WV%t6tCt8R,η$(;HW$|JJWەk4Ӭ7K}JrܜY!o7Q`da+Iw!B9ho9e*J8]䟜nL)%"]U\ Xq2C!v*w88 00/Cendstream endobj 107 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 7894 >> stream xyTTő7F12j`]coK]"Io#2L3Ce*{^cb{h4{ߙL[X skAuFYXXX;a ~`7#&kK=bfYAOk>7rXX& f7m݄38~ !\Vy{~vݶ{Eڍ4Oǹ lavk=B=Bvx- [ag8A0!E-i^A Y(ldɎpe#VDXk|m8u30bcz7~I ~Sj55J>Rèupj=5FRQFj5CmSc-jZHQDj 5ZJMQSTj5ZIMQ6-՟@G1v=ՓzQHP֔KԇTEj)՝R[xͻW-ݷwlSa-+3yޒzz\|?О{:cduKMޢu}{`W>?ڻ oj^rZK)  Gnu,YG̨ ~4n3x[TG; x_â^(l`=矐؁uҠpa-_R>Is3!{)`30*<؆3RkO "y_ VX.eb<%l@m!gH]̅*/B0ylF3~a Y03E1<%ƶFyߺC?ѳxZĈɆ8)T\b7[oڅE?Vqdh<4b@AT.$n\MF^Iؙo01z޶)`*O}d/x W2dNbvﮆ٥nT@%VfYtZt `X$wp , > 3%Ŵn ql)AGY!/z)k]S7!84Fk4c͐ww,\^ou XkKx5GuֻB#wΰxѿ?!=[WŹtf>χ!24W3ZA4FJBb}_4yaYNQ. 4b@ Z vyM؄r,C!x\<90ضmt9ST""M@Žeh,T/b D]]i"Ϸ>'>E߲Ŧj9Mtw*ݣnT^hf*shiG724p Ux<31bCnM_3'9:ڑ0[#4 obۛߑEV"O:[ŇyubzHpO!u#[TO-5|" a_=Bhߋ&oӼN|)T ArQVp hKP8_sg|ا=%Lu9Nh|Mp,B L6]8FUa1C *eJ0g+SIV4@ Q?ŗ$|o4&%.G'YpQlLC!SQ.wl:tĉrΊ_M&LIm|k0Pk:uNUǹ*dWUv^3L85gvv(ɔ\ )bFt]_~Wdm#DliC 1WpK J1_^ms@TmlW]]ՠhV_C/L<[ JāmH[4YRlV$UO4_4nDWbAʁ *qN!AvM4[3;un 1v >؏w7=cHbO# .C JcE)(q6"twpo Jkh1:D}F{ gT PLD61W }W-,@UP i/Ji)jU&+^&2JAZ^z(}h;@syzk6/Ah.`^gbE5BJ_?'|23zHPIITʒ mgXpIŒiD9y,rƣ[x2v%kh49-Q.vwhG>[fEd-P O|1_~_[\<2B]yg/nazm|4u$46ew:g2ePk GX4[z"˟F|?02ZEe&MA[4$ǮeheʬZïyZN*E!sv2,TęeU{|cBIanH; PVب(٥K7BӊM*GxРٜlsv ʋP+lv{|SRsAui힣z598- h4sD_jKϪD)ـW%p6HUI * dWy'"Y`MUQ zGj 0Qq YycE?o( eQ. efkAc]?^:c: zC.ahZB"?X Kѣl'0UCNIxGH:,M-K i[oO }d)d]iQՔJyT~7[R'  5dJm㫬WGo ^6,k\Q~N^>zT;sjr:}+f6*U|2ֿڵq"1iM?2V8G~8r#zV3ʳ48P+`isnUWի@̮:C@1n? EW{t$%'4)5gi뛥L4e)/7LˎvL,% 򂹂T]>T2O\?{ mWVWW]I>m7G( h6mG,!5M)E2[Y*)1!v0'lԾf͘pC.X'/N!.!>x4C&o5J-!j {U{7kAmD<\W>)%ZUq'HE6*MJ:WNgg;ܤ)5E{B1JTRxba+>;) _l3f4ta@k9ϟ^4Lϐ 2*] LKUamI苶uv.YG40V1mm[m=6$5ޝ䶪JKRȔ2} ^kdf[} ܅$ܵïLbŴfȃ"Фy~@ 7_BX39^^}`GH`oؗd~*aYWV| d$Q6x2JU$TȌgk='r_`q\/M[>.ӲC٭ a+Mַ(/USuᇧ>f,J,b+PKv%D-:~  ѳg1p ;L1̣e}}Gm;廠9Uӕ7_u7ai8 O>sHcb dzMu#z M\Ditp]+m~qƔg9m gIePS>E$tqܐmk5EI⣮lE—d1}q-Μc |>ߏD9ѯg$?mw.A8 - v^뛺1ӷę- |k? #? `@mQ/V1k#þ|{υҺG&TګddBL:h6D$$9o+ x9srI_7?q[pNŷ6pVnLjd(g쭩; CKfN ?eVol\vEr 0ÞhI["۽h^mcmt((? l~bt'˼tj!)ћ+=z;2(8(^NFVXo+)4yZ )CD,kq-UHD> c!B|ݹo՛OM2e3\ŸO]^@ch)綢*bR޳;tRьOE\IYYSoK4 7:n]+M4KZ,pX&#{6E:9^/s^D]drP|W`ln2؇ٍkYX%ʣ "3cdD>DNZbFؓx MS\Q&oKh3uڑ$5l{]eNk~Q`^8߅)S|};~@ "eZG[zՈ"=ƢgVho/muUz_~HKVJ'n _ n-á4I$&%)ZґѦr9 GB+eFzja3#xU.ۈ@]{$ꔖCEk QXmS.BŢ_G$2X5koh8 FڝZI[endstream endobj 108 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 4153 >> stream xXyTSgڿ1jiflZlvZX[7\PD-E$b @FQW^~TX<&aG&gǠ!0fDED+Ϛ7oⅳϝ8ni![!dH?6]9i$ݤcP BhKz5~NK\l.3nTxh!8>q<XH%|{u$C??FKRB 6z`5vE;PӀ, p9`ɀ+Vi6{ JVa#.0h8a`b,mMwL7{SqUrk:C 1q.=ԗuz\)D-ͻ6'@58UyeWgJiH4 ;Qu`4RS8{!P"Q8-Ѽ&`PnTyC^먹S/&6|9Z҇/HDgx JDfVd.9_iw:g~;ҏNA8Qcxކ:>ڇ~kB`zYfQ Se-͞A0AVtpD51B/ Ndzx}`-_o&b{jh6=od'{14rr4'wfx?H ?YKJɬ7BaVNM_s*4a\u`Ph@.lu (Qv3--+)辩ЇN,pnhZlȂL[ݵJ4Z& u;G8l`?0,rF\ ur;\zrH4[Q&F&7W`Sa KSA 2]SVJAЛ.K[~7mieSъ5itas\5/$uA^T%B S$2mPW8-k$-?1кɓ&1 ֤G J'QuZy4f0$=uUC/NErL@/04:=lA2No;p'޷c3e.[`_OUt8ТolUoCH΢`xa?xvj--zt Agj~FzmC;POW.hԍ/>&ѿz6nK׾ k'7TtSWCSbC_/TE|nֶ,(U4,͗ggu^{ĢVT0\yۖ@~.OGcxG3 8tE՜4zFEƖ7.zvq#dý@{eD_7` ~_FTtζֽbl80:Nh_`ΐq#Y L,|fJ3Sޡ~Dd3"D}`&Ecu,Qno U r( ^r Y4e [;f[5\ #{%M:s͌ϦR*z* O3U-An_7yTt&e>=Kwmmz.P[B,R)gPn=wz荄lYIߛMt5hӏo?~<}\*i?znd '}9q۲ 4H-y7 ]hn"4hD3Sb r2HW,A^Ard"~ _R+m uFIgX@6z %q^J<2tJ# N_yqA֬Ⱦ{]=h6EgxƆO<0zaq &2]endstream endobj 109 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 5444 >> stream xX XS׶>!prTGC::WZUq YB 0Ȩ8TTBtkUjZu}${;{g׿E%$\ܦO;]xC$i&MU7QC3?v#K7n8 (H(dJ+SM6nyM1m<; 6`;W/'KAvkC(TvᆱT*zM .8N駐٭ Gl[[cg8TßE!A;>r;א>`8ot YsybYx+{j>v|Z6 0h{sGϳ`D͓3s3gͦ*j5KަQk)7j5ZOm6RN"jJ-ޥRӨ2jLP+l([JLޠ,(@ ޢS`j!5J9P)+JJXj$$2T/h+jţk̟Z8[\JlI;91`~9H4H>T< `'B>YV͈wIl$9#F_1֋}ۘۄ؊ml UC*Y ;:_ :xPQ4J{46j@ ( ڇʸty jlR"_g CWUTοƢq9|rV8A ^$6tЩuZ=ՁZ% #fTs>Tǝ̤&9=a -=3CVqX]һNu1Gs/!HwoA

TW`ñ-.޵ӟ f?NwPE B ZuL ,$ o\'ciX^:vu27VbGjjQ)_ףv(ۺW+xg\b)DGiGxqԊ.V'|!86S'X0l\v˴zZ!O+:b!R͖ UJBR cIYbwvˁ|pOTt>i}7b&tww/波vG7"i*a>H{(c/]<͋N&/qY@Ǚ:،=0tƏomPҧ >WO:(׉;`VU=p4X?; f$lcIIVP?E;C7<\ nXW~e޿'T*?PrKqD`-F-PUo:ڛ H~@2)y3FӒwt8a%KO`0VELUei]eܑV/La3CIQktY]B6Pk-}g Ud+9O}M?|SeFjKNPX:)xH|}HK:v;mY[p[Υ"F!~$K3[ 6s39K_1 a 'RN rfa.]R[<{YBWFO{杣H#ψ2\$(MYt_2RWΤkMl2Ƭ{`xqAj{rWI%p$U#f63@s.%,Լܥ{#:>.`>A+ɺ40:^ԳpK+v'FYOcEpˢ+lAU" @ը`y 8@ b]8ThUX/MWmE/Dai2@ sJ&g, ]K"A|VdBPV~AfqVIlhwFBԊJQ}: $Y3[x* b +!'kk[FNA蚒LefV.,Bż$^~y OH}3e/!/k-mk.MxI#u /pkOt]n[fv 1Y#\gdḊ&|g/&+dҰ'HV6fZILy=$?#:!b>e5?xD- y1X9Vzծ$>|NGdv7.BD2]uĪ .p$b׈(> p3\w: ΣNtDѥ»jI,q+BvZ\ukC#f 5pRw+tO!WA@IKRY]Z>~,nxc;}; ĸ>B iZatRxC,DMFcA6~rt[N&W/6׭ƃNpώlYs?]fT!эݍ Q&(3wgu^ĔU6TG6ߔG*b9[K"y)ÃF!Y8 @F-㙕GwU̙y+S&ϽxnQm)wPC*|P[3v#s]?tc®8("45_]r\ќ.9\L@|3ԩC{I ]wD7 eށdWx [JfQL+)Tb`0rw_ױ$)[Us[Aa{Q Rb1wZ"T+"Pvz 0ȓb rxE%x6rAw`1-0r # h?CvX. *6mw/2]7۞+b&YeCm !(!* %$x?$`4L| +ū?cV?s!D,'Y`HδRb܄8)tBeqaa4]B ga4 QLdqGKȿ}F1tŅAD@"-YlԄ4U R21%Qe/?3O=)|sdQĜlҡs b2v6\ <uuȂ)L6 >%ko56{GW`a~##| 0N据x<.0(a% ~ qrkdr\\ 3gC|dztFD: a)%szIOJƄNkDm :])H9xdվ'җLS]V_R+9qsɦn|_пy^ ;uMB} 3X-l^_+Nޛ55]ӿ\=#}פ Hg;A?;lʵ+m[Xp2ٓ*J"<&`Մ}1sc.€JQC!M%v%㟟A\M mh'q!tJɈU29/%޷N b^ (ز2MI#2T"<;goW|"=U,;B\C\mPF%n{o gAI}G<_ ,ʇ h# B,h--u)и,endstream endobj 110 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 2924 >> stream x{pSuoH RlX <\eQ< -PІ&}M&#&ɣimҤI6M)*X@@A˪3ZX8t/m$ܛ}sXXbM\vѷNH>VѼ[#=Z,, $Yb@":0 vK<0; [IJ-V,vb`kEbl-[m`SYl ` N({ Bs)}lp_xNʞY*o iTzkM$9?Bl~=|Kd`ĕMf_+HXmXtgi:ќ뗀 Kvѿ'u24C'F]PZj|M]W{1w;B p/o]eՄ|.L6qkS*JN|VT&pݔk8cc SE|ȲKtLJ7Rn4Ae,:n肃TN=ZYؠ[M?hCc9jIcYW{AtU{'"F|ZГY>BWݝ#mv ­QB(@!Ecի@^@5ZH%a1iTz>bAGAfeZ_7hT͇9yCw×i}0d 9HPBQjGES{1(5之\Џ.Nt##nF޽FOpp{$ apA[u~jajx=7j^=tJc$gn^l5:L/5k6LVR"*pz8W:@aYGҏRe!1_Na.Nuu*F*^p\Fc$]c''%ӹâ}ӓ'Ϯ$|qmtC`buS}yGzQNcL,ime%E}\̵y[׈Ywn45zL\?妥`12⼯o ([[l:uGo]hV(t HwKߙ즒m25!>s3͕6+Ct9_+3ɚb^@UXCaU+T=-JUFיMf4s p~MwLؘ}Jg&T ͧlZogsZG"dyͺa Bb' z5N+8Z9}PC<jqY:97dr0TQZ)*GaJ^ᬸG*U/&fr(ZNݽЌG>ER g.i!/E+4&K &FpK4ߣ^Ga#f'3qE5;t0O[o3@Q($~x4_q>(/_DܷIqNHbhj$Nߍ$ \z (G>0z.}.t{&h0җs$LPvh?D'p>hG3>BзwN 釟AeI!*ڹ]Ԁ|Gí+}G I7Z,ZY֋G3~tGwP*Tcargҳ fR,lt"QasZvw힎H b;-),FR/32+/-1^%&pMc+6>I] Mt y +MmѮh^gI+; Y!c 4wӻPVU_eҒ}4^PBS,Sn(ݸ~}ΦE3 xH RBKLET& M5юb[Tx@* *O]=q+9%M`8;sE֐]> stream xkLSgO-%M`ΙM 񲛗x(T8rNB)鍖^H(q*lNMј-sf\}XvVUpKf܇M޼JMV\5x)gOr<3ù|N$Np28GdWT!v$C"6=S$ C0703qsSdef+WlטTe9G,>i0u :1~СΚa V{m3( @Kmj7#!E&"u5R Psѓ}V:,mϕJR 9(.4@ I}hXhW(/?ScR;ڱ=-vC.9( ])8?#4l Z*ru)^pM Z)]NژRh:=3)yvA|_ Kk/D=ffxe*ce/Uʺa,C&kgʵ K))?k8PBa v=}lgtn$t (,XƍrfUnvZНςa@N@iWIhq9#CטOC D;(}<0# d L}>kZn/$*ajnQ,n|)Sx= dٱ @;؉!Gt+edz}wnBSlԲ ƦZ.ks Rs bo`uMVtCAWU5֌uSi*֨cGp}rH #4X !>W!@>6P}Ȣ&32Dݺ}8V956,Ђ֠7Af uxЌΊ갆"і"@Ecrۺ&5/i/#g1tޡܩ"0 sr9DL&=x6$=gsi |5> 8b>\W!բ-|{N͋?^;YQ VjLݶ| Yt="("5t墄bgBo_ǭ>8/dMLD?+endstream endobj 112 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 5282 >> stream xXy|SU~!|W"[/V(.k){ҦiIIl]4Mt_,DPhT U`DQAwonB м{w> ^|muS"4jԎ!3 IO|/=GP#NO;.l̙3&M6?5N# [#IK_R֊b$9a/D&H$iN=)&5cH?g܄DIBؚ8qV%"$lELj\P'=BQjZ$N\;N,$b|haZb ITfVvtWtN+VY&qmRrJWF)rm?aI#LA&V?5b-1XG'&bXHL$ db AFL!׉iR%bM,'V!D(',,L#"Sb 1xL !4 _ xKy',. DPT &MRB~7oT&oB?;`S}50|AɃ=]`!! 5gd0l=:nux!#BքT \p枪qSi}( 2@~)$$Q `f5# dI da3pǕM`_KHa/8N$t gwbPzy6aT fn_h&77sPoJjCAX4nxͻ 93,aМ`4=XD}0A 7W?{ݢs§xQR7[2mK,pd*LFU'YPhz@sAn)TIDæ!@Mt.N}:? 7 c>Wyq7"N<.x >` f[4mݏ5A +OD`8LぴFY%S7f`r)q4}Q燀ƅ2u: 7nf'0 ʒjFL<g 1sxHe5|JA$l7N\wj*REilڲUPp`0PEإhVԘ&ZINw4;PG2ջʨ)l3U[yփ`?:?8<ֲ {7(b`2WdvܚMqlE 'i(֒Ih{TNxyC/%,'?~!撻J;-':k(-dZ}A.֡^\{.{a:m_I@57xc+ɮ$w&jc|Áp~8>%.#tt> |"j؈1ꟀzG/ )8i G Y"S@ϳ|6_ s;! wZ]6ԣS.)?N-*9Ct6h0ꍀ GM[/ _0czy) ke$g? rsC&dW9/R."C$[ ԝ K%\XzW8HSlѥ[s-I<Ae;Ȯ󸹪}zڥj @}Q8 rw"(c 6k߹y?ZOkYڢ-|/N sa3c9z̜B8_YV!ZBaYM|T|ͻ~wµt[%35c A A#z6}i 97n>+ 7Zt]N4 E&%2qHC9uϴ<elG_Q[-U?6IhINlw i >8aR83Ӧ-RHE:ו^ b[Қ4WWŃ8jʅӲS J6Y!UT:ZK١hg@&ϯ`ӚF5DcJg4ԙ/C%ah,XRw]Of΃_"Q~Rfz|l$TA(69)Kn0d0>OыT?Mq{?o`)]D_?#$ KhG@KtK-vRkJ"6U l#%t]hDA8fhĸgsR̚L">*F/_' =O{y7ZI ^ubۡS(ó6MexoHݩQl^.ЃFiaoy킣n8ws A_% xZEc?H!7J@Ih*;]2{31`&H "cz{0ZU%rO} <" }{Q_q8Ursø5mqrUaQ9wLUwo==X[xKhY.7]:AoY <:$<%`1&GŁ9ҔzMoTLHT]Yd-32Z\  ZqfZזŵ7GgrVz,֬_$]܎ZѶb TSu9B:APTVy(ȩPY ,|1CUW9j JV2n7vͻ Uށy=2@;=Mj"7oj_ΕnĚByƮbΩA}"0K6#!Ѫ@NZJ8~8?Eza E)Dz3peF(x#gw9:>+lXQdmba Wg ~Qd Ї:r\mVW``G>#-ZEM)ʳ]oà+wunߙߔ^jFs@vbOnE 1$#\"@Tys|.gO?vp‡mԸԺmKjyr|hk7PMS*U+ ̃uXV<S[[Sj3, s|{2{ B>{,  Dӯ+}SЋ1;@] CáU+5ZF&6Ff҃TJYtY``B4;eutڻ\ԵS߀o^ HB\[oiZ+SġOƢ鹘FhLYT{Gd &[AP[  {GNnNl[l[2iŬ찃z)2s T0ޞao~yOtr^U߂&s(_hSB^T́nc%f{RUZc 7|ΦlcNo TDf)Ao= B 5k6/$&*˹epnI~3rD <ףendstream endobj 113 0 obj << /Filter /FlateDecode /Length 1017 >> stream xXKo1/ep@B z}V ߏՙ$f~ro=erOfˏ?2_dj'uX@@󬛪B,%[l\ɾ>>imE)+/Tм(UHf)m`dLi@4Σ_D! +Z_<qV(~"8pyc F-8m̓1(_PJoBY ɍ7sNtࢸD'[nVr!yVJ~(7R^Ugi)o-L,~ϗ~a: fY^e~2te ] \QZJx:h+lT.*+! B(JM%g nmr"[B/0Rb`! Z"t/j AaP[\;q:F KfcI,&;N=yRSpj*=X5P,<{+a a"-@%2ɢ 3!;!Io鸁M<ڇa|X6.eEJ2ğݱYN4mit\=2 {W=ˆaYzEv!E3g<{vW=n6F|>u*]9aY\oGwV !z3U^[,'/E]hBb0C>"WP@ѣ)ŸY ~`GOMk 5r ! pX+3t+{ d8Quov<7}n%ZA07&34ݮoTS7v}?r]?J~n׏JnnGJ2D 0߶6 J I|r/ _U2WgjUJ1vMx?N @IJ:t/CX/qKnΎ7endstream endobj 114 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 3816 >> stream xWytTUEQBP{ S֋" BPŴBPB ,We_jjK*RIeF@aGVfpipCiqm3G=L:Iqo}P'Qdʪ E\"+=IOjÎta>jfŠr+%_.2AI%~Cٚh!cb|auVU# &:L~ކavy%v^h\A o~qA@"~$Exb.W62hACHkMh~_-ɱYPyHQ2Z֡\Ch&cDOk_mŋmxogo]}. y)q]p9ުV,fIͺ<0f7|Ga >.oqsvoh~JY_%tNjw۵}GAJo|v% ħ:v.JU`9ػƳӗլm/gzAqb l צ4âUYL6;m.AhP] .Wwֈ0ܑόTZ`k7.h>Cjq\5*c *:ЅPUHCM}# oB) &Ln?y/A1d@*$Ŭ2M{YvU@?g\@n@+>Cә?>&Eѯ=2)jI?AsdX(qއw'WO3>]9G3y !z](hly+ϋYV<weoL`5doh.$pUJmT>j I~=C4W^w헏xq mi퐿5OpHd2BkvXE9=Qou|}܂E-2tZ}b*J?){ō 0pPlSlbb|Ecڱo2;[c6ɜGAϲڐ о?}nW-Cwy$awG6ոzPh%eu;{BDTԠz[r `*"cˬMU ]I(vvenc"i!E4cd(?:+s#(vE=闼E*M9Օ@6oI{DOxjĵ^]HP@ܽ`fc@'-xz<,/7zT:^i^h~x Gh5ב~)9?%7:9#zǭv븏_C?Ӻ/L ˋ[b&\"_U~w Y1x܍k^e g W_^Iߝ贤.DefNb:,,p]NJK^j@9 nM^2Oh7M`U8^O&gM!zfSa~7htؼ,\$N\{5U+3Xgk+vZoO{MUl:>2H<\|beve\NDƈXhO:QI㙞A4pvTV*PI=m ne -c_yNB#O?no.gL& YMڦ/@ ߢh¢X U\<#\Y1PɌ&\hq5[^g%jӘ̌R)m=00]? &a'G`I pW7ӑKTp/o魖'4s\m};_I"Fq^VEFREgygN^vQFnGyw/KD. 5L,ٻޱBr73tJ Js^FFR'R!3buL(rjq IPNǯ[8w)$opBGv5=o-gUY8eW0yn!ba|bb1l~դs(+a و-Wx8 HN+R)jTvp|sVMZ~Xo.)2ثb."AGhLݗ vQz]KLİd=E{|nnrWC-, )e!: {kL84m޽,Wxǒb vtLBe)%:qbE/mDPfO km䭪Հ S+/ot|rMd0#o聶XY3+p&e=xhT6>WWW<ɺ> stream x]O1 y? bI  pD D7@SU|ΖM<:0ENGX feX[>LȀp 0_NۺU5HQo;U0 (N0v2ENvqx$j\*=J:V >dށY@endstream endobj 116 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 557 >> stream xuPMkQ}|4ƘiIJh .p!8$d$cbBJ&_ͳ!3'lIPpS,UyL,{ιpf7[/Vca,P 91\!x lcu}xMU(k'LĦ).n/x|}މam:4BYIn1iėFwWVAY.ʰ{KPH) q-)Ep5:]L6_ bsI6Co t&8ȡm ͥ .80'`b84C=zDN9ƚ=I-oh*nmR[rlbklbqs~W},|<8qW"wDl+ᑛjӬ6 jw̋rm4IsnsAx'^5ߥGϞ+d endstream endobj 117 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 1395 >> stream xE{LSwB{--uQ{DnFaNuQPhBA(%,(ʣ(pB2|"AӀȦѡv??IN~'sI$IoX4pL'^.o ɹYML Y]R G P' $UjRӔ;ynv. 0p7?[R)b\x,Pm)|67{q"ϧiZXU&m9>V'r銴LE<QXx ըR3xEW p_9sJBBH IGO0B*%\rbIȋ.]*(Mu;<DK/sj/ >=;cAȼZeϬ0\Ǔ#RC`#]5"Ѥ__VvԳ,giQקQ _\~k:5ThE}t2|C/ 6p~{_kg=hb7 &)2f+45C ty+Ή81"j吠x8៍VF cy^{#FԉSuPIW+ȉRXiUxKK$ Gg]PV\R qxj;^q쎽PBXzOb4*@RZ<]FWcZ7X :)keNOQ4lb4 ? :zb=b/;yf (tmg,.R_9F2,]??⺆c@϶l'=  mC.jC+RDeڢ(}` Dc.7_Sh/z]?˖'DF,Ido盒S5|^#>j߱_݀Y\朖ƶ二rmA[>\U)amiyn?Գ쎭(X*S]sKBG |hĴ#%e3Kn[7O.sfCH|cGLm.l衹Tb0@I<>7FIl9F ?[٠T**S#pZcg`ܤٖK`M'?`2rmwk^&#z+,.G{d68BHsAcvɐ;+q]hNUJCDcڰendstream endobj 118 0 obj << /Filter /FlateDecode /Length 196 >> stream x] w7@Xb6M@8 HP}~~n=¢^Qc.{P@G#ڪYz `2 ɫ"xRKA HW3Fpjh ;L @"ɴAѩ#SgX$tUJ${I%<=p[ B,_E/xeendstream endobj 119 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 1071 >> stream xmL[U\ ͭ0pK&8"EbK/H_hoamާ/ P dofC'1qsKsn9]o~89'9=Q$ !RV|uA<*>D&I̖y^&x{r6Qz*~N i2H0Dn}JG N7xxpm[PXx<}:?-1q6cVΤ㥢=k5r=|EGL<ٜ\7rv3Ͼ3qV輭b:xVZ l8s#W'>܅Ro͐zeSb4p286 `[uNRKO)TM! 0Cԏ])9~CȊE&U'D24;GX&SZ5M0ʲx QB쁶6?^CZRC2ɥg\]56mH tE 䝸qֶU] B&1f73f3Iwf%YBcD/k H'.&Qg0X? J=Q E?%~&ĽꝈbFx DL^o\)6"Ý{zh . -$ZL<(;"0)%AC;7v*a@W)8(&ԜK,N,䫞nBЮp\3ܻ".Z)~*ތD^q^߉$W1rr$+ qWsL,`[Oʗu8{ /47nm?R{I?C*؁"5x99xqq0]K2e 6\Cᓮ3?8ݽA_اBчуCM9a_.>۽zdXp`;dumFz15Ip:>٦0<9M 1"/KB &3ŏqzW)IWW > %N?hL&p]/Y]QW+[&Dʵkԩr%ҤNQCHendstream endobj 120 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 473 >> stream xcd`ab`ddds H3a!G*,l^M_7s7BK``ad+ns/,L(QHT04Q020TpM-LNSM,HM,rr3SK*4l2JJ s4u3K2RSRSJsS%E )Ey@vfq6І &fe] ̌,~t<|'㓟?^ؽhQiwlե%% gXv20.!:{B73k쯙$ﱪxZ$&͛2oľS8˕9F%yݒݵ5}3k|4[bjs9 hhjhlhn⨜Y7{ro%ro6e-4cƜ)󧶯.׾cc:?ξk'ι<<@WB2endstream endobj 121 0 obj << /Filter /FlateDecode /Length 160 >> stream x31ӳP0P02U06P06W05WH1230!U`hhT027`I'O.}O_T.pJ.}2.}g %hj,`cﭠ q)+jtQP[&6o_?5^ap`rTB-endstream endobj 122 0 obj << /Filter /FlateDecode /Length 160 >> stream x]O1 y@.QtЪjbLPB:t8K结r/#=>)ul"-~H0X TYA6ٝUjzCKHQLkOV: Pr48n1Ҵ4CN>Sgendstream endobj 123 0 obj << /Filter /FlateDecode /Length 151 >> stream x31ӳP0P0T06P0P05WH1230!U`aS027i`9'O.}O_T.p .}*.}gC.}h\nn@n.P9?47XΎEAmGTa?ߠ pS! B)endstream endobj 124 0 obj << /Filter /FlateDecode /Length 160 >> stream x]O10 @UuA,tahU@p' aKB,N>~l#GpƲ5 HeQ7-Yy!' dv~W3g}:UiZB 'mUu1 R 8KW`JM5☛&e=O) >Sgendstream endobj 125 0 obj << /Filter /FlateDecode /Length 5266 >> stream x=nǕ?Kzv5_x; Y@%NF"Mʦ5(~Kuש(~PԹߪ\m˿/?W&_ߝg~=/ &/9OyM!..4c`\ٸZm2JWjmDxܽQ)KT*Xxks>~ۀS+a+6!=t0>h! oЗFN? VG3m;I4`mpd/ӜRi |2vnݰCrr |OMx p&s)g=|m&?RQsyS`qƶ6N.z@+P)ko nM*ڙ MXF UbߴsEξ[!YQg0uFP`x{/ 3#|LX 3R 4HqM km7epjWHۛy 2r)|Q1͗w<YN0awK WtE律 V'y 4Sn/tO(C( T=qBƚ nTUyb#:i%(^%"'!1,Wk3~L`$wd)[--רN $ד 9شIz2c,AL(xT3/ʭV'`_g dn"5psؗbsHUeeZFx7a &lB ; ȚJ=JBZ \", 36"9``{$D|Ѧk rSZpD I񪚈PL~pZ0yI^E–iY|#gM؀vma3 6Ob*bZL*A81c dbm;fk=(n(*Ldpc f)'fAwGr#]5f(F')G˂"8e--`unxmdOb<'M`_kIch"{^ƀN:>bgg[1wƺ!E8v D;fXM3qc[;# $ĊE6x+$CeR梑^0+>P`v]!]3|cV1B|}SxRsR) I"q Btvrūr ԸL;z %/[pg9VwƂ5qy=:x[#;}@,h w !m Kfm K92_x6l%nٷx5;T!>O+>~:;@7 S!R<~~s~Loؘ>5q÷?`?Ocۏ[I2+^ׂ}ND;K2<ёIaCi yqر7ۤ%ЇAv{rJi_͔ D)DW%7"yShl1.? 0scͥwWi'yMTIZ~ {^7;amUm#!(#r <xc`Y)~ ,X5 vBm0+DŢ{%)[2E%CS&HZx 'id@$C@}6G,Fߦd D-^x<#k㻽ļJ4o%o'Ď#D vKdZ-e^YNJ }?c6D3F"q'aYy_x_^(y{?XRkpS>[m)>qUqK3eSXƒIY1 /yM z2u3\ټ/U ݏFx@g6^7= A8aռ5v]9Mt1Z[paԸ>8\LV-/YjpqS`)DU>ӴVE ؘV+0t+pM/\2ԘFf'pOjKT_V AE`[^nr9Μ!At!RUjoG6;az5%-TT4n*ua&u'+ݨN.(2=9-EBk@)(`5*C%)bk u?^61\6`q#t7_Yݕ?j /[ lJʄJ=EemϦfw G$15.SYcLwIc/[/q\hꍡPfBzC7"^n>S'-zv<FqLWtn 6hbod 0&XzG- !ui7̼ww&+j(K v ÓL͗lR;yBGR 1rt'=PQYQo->a98QiݹEeE 8Է &ހHdVᔞ?ày ޜuD"^%L*~1_SԞ@G*Ō?̪hbpaW5p 'آĸknO4nJ(GbkۀSW%V j c=ӽx|L`7sG8=8yO?_xLE F4N[z1Br!!i6Gl6]6̊;%|0Il~+P5 ?(DԄ(6Qz9eQv,eɲvf'e¯MXYq?Yv(^%B '5֦S2˓F5'e}ļGr Q>Y,,e٘#]ɲ1uE#i-28(!Wt2$5-%͓Q2Ct( ̹9:3 4/ E鴆B Og(>z9Sͧ4ĆBǬS%ԋǐqFtx%0@S3/D1,˚y.Xjѻٷ̷+?ƀG^ V!r|.(E#%6a>\"~ãc)ubRɖ\A!HTfBsxgXݿP ~4_(%U{Jr,Q ܞ~C̷6 oh.g12Ϸ@+:6D`eW (I_S♘OzJCQ&BRY:',^/'t{Mz -kzr^n &,f,'<-A& yu=25_.HGJ]P`{wNYendstream endobj 126 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 626 >> stream x-KSqu`v {& rdMDC@Zz܎s9Q˗}Oh.7_6%B % .h{@Dp:omU9K(  ?5&b]:Ъ@^S0wz̜F*OXG[+h{UJVE_2hK0^.oeQ˫=dmJ:f?FH׹ }rvm! 5#;R!5%:Q]PF)XN+eFĂ@w7Vwu wYxۘh2gw`$̎S?r3.un )6g ׹, Xf&Ryݴt.>kN@b_ `P$#g7gWieX&͘V_؀ոu7WL5A 8iV"+=<Jc%hz>h!aBh)zIsRr^&ὶ4/ƒ@$`%?(~ ,?Ix@m[&.4OMeě"\x\x̸V+ku"endstream endobj 127 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 415 >> stream xcd`ab`dddwt1H3a!OSVY~'ټ1eQeil)@8s}b<;r>x^%endstream endobj 128 0 obj << /Filter /FlateDecode /Length 3361 >> stream xZIo\3N 8#`B69vA 0"!-CqTu{Uá#{kj맷b&W];7{bvO_˟g8XE|TKgں}o,jxj"f" N&S11o3eLV TIPǰ4(Vu`Q 𔞅pp5B<.!]Z>ɰ_a.n~I",Bglΐ PHG-BJdՋBQEo)d $$ q=ˤ eb`[pdelAig )U[l(ڛ~lJ5ŚJ=,14 .]P6Z]eK%U"9*׽XnHlb0H)K0!L db sj;(aTNHl]ޠrPZށV)& %dr. WrɈIAnc!?n1J}׺7QUbX;Xvy8.ZqS!ش~ ozN}O(ýB6> UU6 )tnUxRU :oB&yk,o!2vNC~ {JNjRHQhJ&)0dRmy8UXyBBԗ(WׅAi8x@EEa$+S10b1}F{o*]kxf#+LsCL+XVTSHQ̫3n(w$Wᠪ 39LP*ۗbP1duOU4rh<~7^i^!e)EɄܒ-W'E5$BCp}NpÂڄ8d, V _O`77(D[acmzX6;>)1(KOnX ^7 9)tQ{r*"+%i%2xާ\*{f:Q΄e])K,)/8JJbs VEG~[AٛWb6(ue;3&ṿwICA=QB҆W6,ic"OyN%wDΉn72"=2 hɌsPs+"htO f1t˦_yGOw<%wD4Y7f{T}Ndh[g슋KT槅ix2?mGF?g7GQ*u!LgRׁճAgyqG-QzQNԨm- eB F~Zq<}wY=>_U&=CIV|$i{v:0+7f|bP&LHٯn+` Ì]:.r]\1zx-4Rsv׮U_F ʃ犽  շdϔF5 P? `[2IWPՄzx6H}SߎXH2jNK*E{BiV"LȰ蕶龬~Cec"M ֪ (7{d,ÇkUڵ -_77d["qrC}N$R4DZ"Ʌe- D8#w_4Zc76BɩovtcMamӷ'Mk~]59\6sۼx])'v}4&Ї'q#TmM!Zr[}xɲkl/gi'Fh3pv9Ǔ'F>fэ-??^l6qxw|?72憍ϫM;yYFy{&4ȝq]Aλxzx ~tgCjޞ`a1cnWۭ><P6|Ar*t̶zOjgvHӼk^*G"endstream endobj 129 0 obj << /Filter /FlateDecode /Length 160 >> stream x]O1 y@QtЪjbLPB:t8K结r/#=>)ul"-~H0X TYA6ٝUjzCKHQLkOV: Pr48n1Ҵ4CN@ Siendstream endobj 130 0 obj << /Filter /FlateDecode /Length 1554 >> stream x[ߏ4~_G߱yDBB펇 { ?vԓN4]CG|?Van\WwV[}\UsiK-6]YeXժBU݊zk+-jk]%εRKr^FSNz-oZ'~x ZJ!ƵuZaP:V)m]]u-g_ߏi,ˉQ '4|XcVkA>t~ԴP>ƻz?jy\Hŝîxʐali+ޒu#eInʅS|-Tcϼ׊=4ΪZJKNt#{P{o@m.F\ ib ]@@0bQjf-Q//xx 1 H$ 6:tdNEqND! Ff(Q`d $nͣ67$ kxA[ATqu G-=T?S8j?cAəc>\2[A/A-S^ݯ՝؈q0'ی`X0[Gi|-J(Gu,x]A o.t?oo{[(h(7L@l2HmtP,-y-D!Ȍ/T{bZ@Uhh3v3qm9=,Ma|d3ky]y :`b9(12nv(92sh PTb.yhOQY̎>y:yP1,kQ--1ڿ" OL!y9voXP)#dlgL¼P|5*x(b=Nji'@ Q!ة2o אځDVC0Dm:Z. >Ÿ,EJm?XimzBo@[0J v;];,A9}S@{j'T$@+ hd?O@Q.G5 pC}?VǷySb ^QȺ vO3ԏV_&偓kP]{ 5`'>x h'%jTM/WNfiAE 85 5zE o[xUrtA(.AE}=,^KPxe6`aDm` Tԛyız|jbv9B(Ql.`δ %@ _@9~&)u:msP9x ?m0rTpDbQ"O,+Po^ tC"=!Yt圱6] 99Q9EY|%τSD T/#jx\3Ct9mO=։0"4^dxѓ I . G<.pďRT Eׄ>'/?{- %r6T<q:ǁ0H ^vYdendstream endobj 131 0 obj << /Filter /FlateDecode /Length 1072 >> stream xVKo6WTPEV=ŭӴi n:_RXTayq>Z?zWFzsT*zWԱG6@R(kg\]%rv欠%).į R[-67 Z P\5ulVJ֑y8^\/ۈS8r5Ҷ6H94Alɔzq)|8Iė2B')#䬍M# ?bh>.𡑎n8psIc-|h&wܧi٠Ş3utʕ^VRJeuHw C},^ߧMj^VUZMr |KRquZSGQhA<$$8OcsldXuΔnX-7Wyg4OVp!D.vsvEp u `ô\~si,~73b>.?'!_YZr`BC]B>l &;$"% /q>plC[^4Bi4`}ۧҞUꑩ⼝%|S]4Oƫȡ}:xL2$4b^3e=)8uʩuG?>&(&Pzl0\D&Q@=eC6lɦEblnfsMl> /W [ 1 3 1 ] /Info 3 0 R /Root 2 0 R /Size 133 /ID [<7c18ae99c903d30d65e393ef3235bf80><33d4de8677f28e0830d20d9008b858d2>] >> stream xcb&F~0 $8J@)"@$0DHyC)0Dl R/dq&Hvs`^ 6,|`=VDf#db@$k endstream endobj startxref 80729 %%EOF markovchain/inst/doc/an_introduction_to_markovchain_package.R0000644000176200001440000010155014050513235024357 0ustar liggesusers## ----global_options, include=FALSE-------------------------------------------- knitr::opts_chunk$set(fig.width=8.5, fig.height=6, out.width = "70%") set.seed(123) library(knitr) hook_output <- knit_hooks$get("output") knit_hooks$set(output = function(x, options) { lines <- options$output.lines if (is.null(lines)) { return(hook_output(x, options)) # pass to default hook } x <- unlist(strsplit(x, "\n")) more <- "..." if (length(lines)==1) { # first n lines if (length(x) > lines) { # truncate the output, but add .... x <- c(head(x, lines), more) } } else { x <- c(more, x[lines], more) } # paste these lines together x <- paste(c(x, ""), collapse = "\n") hook_output(x, options) }) ## ---- load, results='hide', message=FALSE------------------------------------- library("markovchain") ## ---- load-aux, echo=FALSE, results='hide'------------------------------------ require("matlab") ## ---- showClass, echo=FALSE--------------------------------------------------- showClass("markovchain") showClass("markovchainList") ## ----mcInitLong--------------------------------------------------------------- weatherStates <- c("sunny", "cloudy", "rain") byRow <- TRUE weatherMatrix <- matrix(data = c(0.70, 0.2, 0.1, 0.3, 0.4, 0.3, 0.2, 0.45, 0.35), byrow = byRow, nrow = 3, dimnames = list(weatherStates, weatherStates)) mcWeather <- new("markovchain", states = weatherStates, byrow = byRow, transitionMatrix = weatherMatrix, name = "Weather") ## ----mcInitShort-------------------------------------------------------------- mcWeather <- new("markovchain", states = c("sunny", "cloudy", "rain"), transitionMatrix = matrix(data = c(0.70, 0.2, 0.1, 0.3, 0.4, 0.3, 0.2, 0.45, 0.35), byrow = byRow, nrow = 3), name = "Weather") ## ----defaultMc---------------------------------------------------------------- defaultMc <- new("markovchain") ## ----intromcList-------------------------------------------------------------- mcList <- new("markovchainList", markovchains = list(mcWeather, defaultMc), name = "A list of Markov chains") ## ----operations--------------------------------------------------------------- initialState <- c(0, 1, 0) after2Days <- initialState * (mcWeather * mcWeather) after7Days <- initialState * (mcWeather ^ 7) after2Days round(after7Days, 3) ## ----operations2-------------------------------------------------------------- initialState <- c(0, 1, 0) after2Days <- (t(mcWeather) * t(mcWeather)) * initialState after7Days <- (t(mcWeather) ^ 7) * initialState after2Days round(after7Days, 3) ## ----fval--------------------------------------------------------------------- fvals<-function(mchain,initialstate,n) { out<-data.frame() names(initialstate)<-names(mchain) for (i in 0:n) { iteration<-initialstate*mchain^(i) out<-rbind(out,iteration) } out<-cbind(out, i=seq(0,n)) out<-out[,c(4,1:3)] return(out) } fvals(mchain=mcWeather,initialstate=c(90,5,5),n=4) ## ----otherMethods------------------------------------------------------------- states(mcWeather) names(mcWeather) dim(mcWeather) ## ----otherMethods2------------------------------------------------------------ name(mcWeather) name(mcWeather) <- "New Name" name(mcWeather) ## ----sortMethod--------------------------------------------------------------- markovchain:::sort(mcWeather) ## ----transProb---------------------------------------------------------------- transitionProbability(mcWeather, "cloudy", "rain") mcWeather[2,3] ## ----printAndShow------------------------------------------------------------- print(mcWeather) show(mcWeather) ## ----mcPlot, echo=FALSE, fig.cap="Weather example. Markov chain plot"--------- if (requireNamespace("igraph", quietly = TRUE)) { library(igraph) plot(mcWeather,layout = layout.fruchterman.reingold) } else { message("igraph unavailable") } ## ----mcPlotdiagram, echo=FALSE, fig.cap="Weather example. Markov chain plot with diagram"---- if (requireNamespace("diagram", quietly = TRUE)) { library(diagram) plot(mcWeather, package="diagram", box.size = 0.04) } else { message("diagram unavailable") } ## ----exportImport1------------------------------------------------------------ mcDf <- as(mcWeather, "data.frame") mcNew <- as(mcDf, "markovchain") mcDf mcIgraph <- as(mcWeather, "igraph") ## ----exportImport2------------------------------------------------------------ if (requireNamespace("msm", quietly = TRUE)) { require(msm) Q <- rbind ( c(0, 0.25, 0, 0.25), c(0.166, 0, 0.166, 0.166), c(0, 0.25, 0, 0.25), c(0, 0, 0, 0) ) cavmsm <- msm(state ~ years, subject = PTNUM, data = cav, qmatrix = Q, death = 4) msmMc <- as(cavmsm, "markovchain") msmMc } else { message("msm unavailable") } ## ----exporImport3------------------------------------------------------------- if (requireNamespace("etm", quietly = TRUE)) { library(etm) data(sir.cont) sir.cont <- sir.cont[order(sir.cont$id, sir.cont$time), ] for (i in 2:nrow(sir.cont)) { if (sir.cont$id[i]==sir.cont$id[i-1]) { if (sir.cont$time[i]==sir.cont$time[i-1]) { sir.cont$time[i-1] <- sir.cont$time[i-1] - 0.5 } } } tra <- matrix(ncol=3,nrow=3,FALSE) tra[1, 2:3] <- TRUE tra[2, c(1, 3)] <- TRUE tr.prob <- etm::etm(sir.cont, c("0", "1", "2"), tra, "cens", 1) tr.prob etm2mc<-as(tr.prob, "markovchain") etm2mc } else { message("etm unavailable") } ## ----fromAndTo, echo=FALSE, fig.cap="The markovchain methods for import and export"---- library(igraph) importExportGraph<-graph.formula(dataframe++markovchain,markovchain-+igraph, markovchain++matrix,table-+markovchain,msm-+markovchain,etm-+markovchain, markovchain++sparseMatrix) plot(importExportGraph,main="Import - Export from and to markovchain objects") ## ----exportImport4------------------------------------------------------------ myMatr<-matrix(c(.1,.8,.1,.2,.6,.2,.3,.4,.3), byrow=TRUE, ncol=3) myMc<-as(myMatr, "markovchain") myMc ## ----cchcMcList--------------------------------------------------------------- stateNames = c("H", "I", "D") Q0 <- new("markovchain", states = stateNames, transitionMatrix =matrix(c(0.7, 0.2, 0.1,0.1, 0.6, 0.3,0, 0, 1), byrow = TRUE, nrow = 3), name = "state t0") Q1 <- new("markovchain", states = stateNames, transitionMatrix = matrix(c(0.5, 0.3, 0.2,0, 0.4, 0.6,0, 0, 1), byrow = TRUE, nrow = 3), name = "state t1") Q2 <- new("markovchain", states = stateNames, transitionMatrix = matrix(c(0.3, 0.2, 0.5,0, 0.2, 0.8,0, 0, 1), byrow = TRUE,nrow = 3), name = "state t2") Q3 <- new("markovchain", states = stateNames, transitionMatrix = matrix(c(0, 0, 1, 0, 0, 1, 0, 0, 1), byrow = TRUE, nrow = 3), name = "state t3") mcCCRC <- new("markovchainList",markovchains = list(Q0,Q1,Q2,Q3), name = "Continuous Care Health Community") print(mcCCRC) ## ----cchcMcList2-------------------------------------------------------------- mcCCRC[[1]] dim(mcCCRC) ## ----conditionalDistr--------------------------------------------------------- conditionalDistribution(mcWeather, "sunny") ## ----steadyStates------------------------------------------------------------- steadyStates(mcWeather) ## ----gamblerRuin-------------------------------------------------------------- gamblerRuinMarkovChain <- function(moneyMax, prob = 0.5) { m <- matlab::zeros(moneyMax + 1) m[1,1] <- m[moneyMax + 1,moneyMax + 1] <- 1 states <- as.character(0:moneyMax) rownames(m) <- colnames(m) <- states for(i in 2:moneyMax){ m[i,i-1] <- 1 - prob m[i, i + 1] <- prob } new("markovchain", transitionMatrix = m, name = paste("Gambler ruin", moneyMax, "dim", sep = " ")) } mcGR4 <- gamblerRuinMarkovChain(moneyMax = 4, prob = 0.5) steadyStates(mcGR4) ## ----absorbingStates---------------------------------------------------------- absorbingStates(mcGR4) absorbingStates(mcWeather) ## ----renaldoMatrix1----------------------------------------------------------- P <- matlab::zeros(10) P[1, c(1, 3)] <- 1/2; P[2, 2] <- 1/3; P[2,7] <- 2/3; P[3, 1] <- 1; P[4, 5] <- 1; P[5, c(4, 5, 9)] <- 1/3; P[6, 6] <- 1; P[7, 7] <- 1/4; P[7,9] <- 3/4; P[8, c(3, 4, 8, 10)] <- 1/4; P[9, 2] <- 1; P[10, c(2, 5, 10)] <- 1/3; rownames(P) <- letters[1:10] colnames(P) <- letters[1:10] probMc <- new("markovchain", transitionMatrix = P, name = "Probability MC") summary(probMc) ## ----transientStates---------------------------------------------------------- transientStates(probMc) ## ----probMc2Canonic----------------------------------------------------------- probMcCanonic <- canonicForm(probMc) probMc probMcCanonic ## ----isAccessible------------------------------------------------------------- is.accessible(object = probMc, from = "a", to = "c") is.accessible(object = probMc, from = "g", to = "c") ## ----periodicity-------------------------------------------------------------- E <- matrix(0, nrow = 4, ncol = 4) E[1, 2] <- 1 E[2, 1] <- 1/3; E[2, 3] <- 2/3 E[3,2] <- 1/4; E[3, 4] <- 3/4 E[4, 3] <- 1 mcE <- new("markovchain", states = c("a", "b", "c", "d"), transitionMatrix = E, name = "E") is.irreducible(mcE) period(mcE) ## ----mathematica9Mc----------------------------------------------------------- require(matlab) mathematicaMatr <- zeros(5) mathematicaMatr[1,] <- c(0, 1/3, 0, 2/3, 0) mathematicaMatr[2,] <- c(1/2, 0, 0, 0, 1/2) mathematicaMatr[3,] <- c(0, 0, 1/2, 1/2, 0) mathematicaMatr[4,] <- c(0, 0, 1/2, 1/2, 0) mathematicaMatr[5,] <- c(0, 0, 0, 0, 1) statesNames <- letters[1:5] mathematicaMc <- new("markovchain", transitionMatrix = mathematicaMatr, name = "Mathematica MC", states = statesNames) ## ----mcMathematics, fig=TRUE, echo=FALSE, fig.align='center', fig.cap="Mathematica 9 example. Markov chain plot."---- plot(mathematicaMc, layout = layout.fruchterman.reingold) ## ----mathematica9MC, echo=FALSE----------------------------------------------- summary(mathematicaMc) ## ----fpTime1, eval=FALSE------------------------------------------------------ # .firstpassageKernel <- function(P, i, n){ # G <- P # H <- P[i,] # E <- 1 - diag(size(P)[2]) # for (m in 2:n) { # G <- P %*% (G * E) # H <- rbind(H, G[i,]) # } # return(H) # } ## ----fpTime2------------------------------------------------------------------ firstPassagePdF <- firstPassage(object = mcWeather, state = "sunny", n = 10) firstPassagePdF[3, 3] ## ----mfpt1-------------------------------------------------------------------- meanFirstPassageTime(mcWeather) ## ----mfpt2-------------------------------------------------------------------- meanFirstPassageTime(mcWeather,"rain") ## ----mfpt3-------------------------------------------------------------------- firstPassagePdF.long <- firstPassage(object = mcWeather, state = "sunny", n = 100) sum(firstPassagePdF.long[,"rain"] * 1:100) ## ----mrt-weather-------------------------------------------------------------- meanRecurrenceTime(mcWeather) ## ----mrt-probMc--------------------------------------------------------------- recurrentStates(probMc) meanRecurrenceTime(probMc) ## ----data-drunkard------------------------------------------------------------ drunkProbs <- matlab::zeros(5, 5) drunkProbs[1,1] <- drunkProbs[5,5] <- 1 drunkProbs[2,1] <- drunkProbs[2,3] <- 1/2 drunkProbs[3,2] <- drunkProbs[3,4] <- 1/2 drunkProbs[4,3] <- drunkProbs[4,5] <- 1/2 drunkMc <- new("markovchain", transitionMatrix = drunkProbs) drunkMc ## ----rs-drunkard-------------------------------------------------------------- recurrentStates(drunkMc) ## ----ts-drunkard-------------------------------------------------------------- transientStates(drunkMc) ## ----ap-drunkard-------------------------------------------------------------- absorptionProbabilities(drunkMc) ## ----at-drunkard-------------------------------------------------------------- meanAbsorptionTime(drunkMc) ## ----------------------------------------------------------------------------- committorAB(mcWeather,3,1) ## ----hitting-data------------------------------------------------------------- M <- matlab::zeros(5, 5) M[1,1] <- M[5,5] <- 1 M[2,1] <- M[2,3] <- 1/2 M[3,2] <- M[3,4] <- 1/2 M[4,2] <- M[4,5] <- 1/2 hittingTest <- new("markovchain", transitionMatrix = M) hittingProbabilities(hittingTest) ## ----hitting-probabilities---------------------------------------------------- hittingProbabilities(hittingTest) ## ----hitting-weather---------------------------------------------------------- hittingProbabilities(mcWeather) ## ----simulatingAMarkovChain--------------------------------------------------- weathersOfDays <- rmarkovchain(n = 365, object = mcWeather, t0 = "sunny") weathersOfDays[1:30] ## ----simulatingAListOfMarkovChain--------------------------------------------- patientStates <- rmarkovchain(n = 5, object = mcCCRC, t0 = "H", include.t0 = TRUE) patientStates[1:10,] ## ----fitMcbyMLE2-------------------------------------------------------------- weatherFittedMLE <- markovchainFit(data = weathersOfDays, method = "mle",name = "Weather MLE") weatherFittedMLE$estimate weatherFittedMLE$standardError ## ----fitMcbyLAPLACE----------------------------------------------------------- weatherFittedLAPLACE <- markovchainFit(data = weathersOfDays, method = "laplace", laplacian = 0.01, name = "Weather LAPLACE") weatherFittedLAPLACE$estimate ## ----fitSequenceMatrix-------------------------------------------------------- createSequenceMatrix(stringchar = weathersOfDays) ## ----fitSequenceMatrix2------------------------------------------------------- myMatr<-matrix(c("a","b","b","a","a","b","b","b","b","a","a","a","b","a"),ncol=2) createSequenceMatrix(stringchar = myMatr,toRowProbs = TRUE) ## ----fitMcbyBootStrap1-------------------------------------------------------- weatherFittedBOOT <- markovchainFit(data = weathersOfDays, method = "bootstrap", nboot = 20) weatherFittedBOOT$estimate weatherFittedBOOT$standardError ## ----fitMcbyBootStrap2, eval=FALSE-------------------------------------------- # weatherFittedBOOTParallel <- markovchainFit(data = weathersOfDays, # method = "bootstrap", nboot = 200, # parallel = TRUE) # weatherFittedBOOTParallel$estimate # weatherFittedBOOTParallel$standardError ## ----fitMcbyBootStrap3, eval=FALSE-------------------------------------------- # RcppParallel::setNumThreads(2) ## ----fitMcbyMLE1-------------------------------------------------------------- weatherFittedMLE$logLikelihood weatherFittedBOOT$logLikelihood ## ----confint------------------------------------------------------------------ weatherFittedMLE$confidenceInterval weatherFittedBOOT$confidenceInterval ## ----multinomial-------------------------------------------------------------- multinomialConfidenceIntervals(transitionMatrix = weatherFittedMLE$estimate@transitionMatrix, countsTransitionMatrix = createSequenceMatrix(weathersOfDays)) ## ----fitMclists--------------------------------------------------------------- data(holson) singleMc<-markovchainFit(data=holson[,2:12],name="holson") ## ----fitMclistsFit1, output.lines=20------------------------------------------ mcListFit<-markovchainListFit(data=holson[,2:6],name="holson") mcListFit$estimate ## ----fitMclistsFit2----------------------------------------------------------- c1<-c("a","b","a","a","c","c","a") c2<-c("b") c3<-c("c","a","a","c") c4<-c("b","a","b","a","a","c","b") c5<-c("a","a","c",NA) c6<-c("b","c","b","c","a") mylist<-list(c1,c2,c3,c4,c5,c6) mylistMc<-markovchainFit(data=mylist) mylistMc ## ----fitAMarkovChainListfromAlist, output.lines=15---------------------------- markovchainListFit(data=mylist) ## ----markovchainPredict------------------------------------------------------- predict(object = weatherFittedMLE$estimate, newdata = c("cloudy", "sunny"), n.ahead = 3) ## ----markovchainListPredict--------------------------------------------------- predict(mcCCRC, newdata = c("H", "H"), n.ahead = 5) ## ----markovchainListPredict2-------------------------------------------------- predict(mcCCRC, newdata = c("H", "H"), n.ahead = 5, continue = TRUE) ## ----test1-------------------------------------------------------------------- sample_sequence<-c("a", "b", "a", "a", "a", "a", "b", "a", "b", "a", "b", "a", "a", "b", "b", "b", "a") verifyMarkovProperty(sample_sequence) ## ----test2-------------------------------------------------------------------- data(rain) assessOrder(rain$rain) ## ----test3-------------------------------------------------------------------- assessStationarity(rain$rain, 10) ## ----divergence1-------------------------------------------------------------- sequence<-c(0,1,2,2,1,0,0,0,0,0,0,1,2,2,2,1,0,0,1,0,0,0,0,0,0,1,1, 2,0,0,2,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,0,0,0,0,2,1,0, 0,2,1,0,0,0,0,0,0,1,1,1,2,2,0,0,2,1,1,1,1,2,1,1,1,1,1,1,1,1,1,0,2, 0,1,1,0,0,0,1,2,2,0,0,0,0,0,0,2,2,2,1,1,1,1,0,1,1,1,1,0,0,2,1,1, 0,0,0,0,0,2,2,1,1,1,1,1,2,1,2,0,0,0,1,2,2,2,0,0,0,1,1) mc=matrix(c(5/8,1/4,1/8,1/4,1/2,1/4,1/4,3/8,3/8),byrow=TRUE, nrow=3) rownames(mc)<-colnames(mc)<-0:2; theoreticalMc<-as(mc, "markovchain") verifyEmpiricalToTheoretical(data=sequence,object=theoreticalMc) ## ----divergence2-------------------------------------------------------------- data(kullback) verifyHomogeneity(inputList=kullback,verbose=TRUE) ## ----rCtmcInit---------------------------------------------------------------- energyStates <- c("sigma", "sigma_star") byRow <- TRUE gen <- matrix(data = c(-3, 3, 1, -1), nrow = 2, byrow = byRow, dimnames = list(energyStates, energyStates)) molecularCTMC <- new("ctmc", states = energyStates, byrow = byRow, generator = gen, name = "Molecular Transition Model") ## ----rctmcRandom0------------------------------------------------------------- statesDist <- c(0.8, 0.2) rctmc(n = 3, ctmc = molecularCTMC, initDist = statesDist, out.type = "df", include.T0 = FALSE) ## ----ctmcRandom1-------------------------------------------------------------- statesDist <- c(0.8, 0.2) rctmc(n = Inf, ctmc = molecularCTMC, initDist = statesDist, T = 2) ## ----rctmcSteadyStates-------------------------------------------------------- steadyStates(molecularCTMC) ## ----rctmcFitting------------------------------------------------------------- data <- list(c("a", "b", "c", "a", "b", "a", "c", "b", "c"), c(0, 0.8, 2.1, 2.4, 4, 5, 5.9, 8.2, 9)) ctmcFit(data) ## ----mcWeatherQ--------------------------------------------------------------- mcWeatherQ <- expm::logm(mcWeather@transitionMatrix,method='Eigen') mcWeatherQ ## ----mcWeatherHalfDay--------------------------------------------------------- mcWeatherHalfDayTM <- expm::expm(mcWeatherQ*.5) mcWeatherHalfDay <- new("markovchain",transitionMatrix=mcWeatherHalfDayTM,name="Half Day Weather Transition Matrix") mcWeatherHalfDay ## ----ctmcd1------------------------------------------------------------------- require(ctmcd) require(expm) #defines a function to transform a GM into a TM gm_to_markovchain<-function(object, t=1) { if(!(class(object) %in% c("gm","matrix","Matrix"))) stop("Error! Expecting either a matrix or a gm object") if ( class(object) %in% c("matrix","Matrix")) generator_matrix<-object else generator_matrix<-as.matrix(object[["par"]]) #must add importClassesFrom("markovchain",markovchain) in the NAMESPACE #must add importFrom(expm, "expm") transitionMatrix<-expm(generator_matrix*t) out<-as(transitionMatrix,"markovchain") return(out) } #loading ctmcd dataset data(tm_abs) gm0=matrix(1,8,8) #initializing diag(gm0)=0 diag(gm0)=-rowSums(gm0) gm0[8,]=0 gmem=gm(tm_abs,te=1,method="EM",gmguess=gm0) #estimating GM mc_at_2=gm_to_markovchain(object=gmem, t=2) #converting to TM at time 2 ## ----pseudobayes-------------------------------------------------------------- pseudoBayesEstimator <- function(raw, apriori){ v_i <- rowSums(raw) K_i <- numeric(nrow(raw)) sumSquaredY <- rowSums(raw^2) #get numerator K_i_num <- v_i^2-sumSquaredY #get denominator VQ <- matrix(0,nrow= nrow(apriori),ncol=ncol(apriori)) for (i in 1:nrow(VQ)) { VQ[i,]<-v_i[i]*apriori[i,] } K_i_den<-rowSums((raw - VQ)^2) K_i <- K_i_num/K_i_den #get the alpha vector alpha <- K_i / (v_i+K_i) #empirical transition matrix Emp<-raw/rowSums(raw) #get the estimate out<-matrix(0, nrow= nrow(raw),ncol=ncol(raw)) for (i in 1:nrow(out)) { out[i,]<-alpha[i]*apriori[i,]+(1-alpha[i])*Emp[i,] } return(out) } ## ----pseudobayes2------------------------------------------------------------- trueMc<-as(matrix(c(0.1, .9,.7,.3),nrow = 2, byrow = 2),"markovchain") aprioriMc<-as(matrix(c(0.5, .5,.5,.5),nrow = 2, byrow = 2),"markovchain") smallSample<-rmarkovchain(n=20,object = trueMc) smallSampleRawTransitions<-createSequenceMatrix(stringchar = smallSample) pseudoBayesEstimator( raw = smallSampleRawTransitions, apriori = aprioriMc@transitionMatrix ) - trueMc@transitionMatrix biggerSample<-rmarkovchain(n=100,object = trueMc) biggerSampleRawTransitions<-createSequenceMatrix(stringchar = biggerSample) pseudoBayesEstimator( raw = biggerSampleRawTransitions, apriori = aprioriMc@transitionMatrix ) - trueMc@transitionMatrix bigSample<-rmarkovchain(n=1000,object = trueMc) bigSampleRawTransitions<-createSequenceMatrix(stringchar = bigSample) pseudoBayesEstimator( raw = bigSampleRawTransitions, apriori = aprioriMc@transitionMatrix ) - trueMc@transitionMatrix ## ----loadAndDoExample--------------------------------------------------------- weatherStates <- c("sunny", "cloudy", "rain") byRow <- TRUE weatherMatrix <- matrix(data = c(0.7, 0.2, 0.1, 0.3, 0.4, 0.3, 0.2, 0.4, 0.4), byrow = byRow, nrow = 3, dimnames = list(weatherStates, weatherStates)) mcWeather <- new("markovchain", states = weatherStates, byrow = byRow, transitionMatrix = weatherMatrix, name = "Weather") weathersOfDays <- rmarkovchain(n = 365, object = mcWeather, t0 = "sunny") ## ----MAPFit------------------------------------------------------------------- hyperMatrix<-matrix(c(1, 1, 2, 3, 2, 1, 2, 2, 3), nrow = 3, byrow = TRUE, dimnames = list(weatherStates,weatherStates)) markovchainFit(weathersOfDays[1:200], method = "map", confidencelevel = 0.92, hyperparam = hyperMatrix) predictiveDistribution(weathersOfDays[1:200], weathersOfDays[201:365],hyperparam = hyperMatrix) ## ----MAPFit2------------------------------------------------------------------ hyperMatrix2<- hyperMatrix[c(2,3,1), c(2,3,1)] markovchainFit(weathersOfDays[1:200], method = "map", confidencelevel = 0.92, hyperparam = hyperMatrix2) predictiveDistribution(weathersOfDays[1:200], weathersOfDays[201:365],hyperparam = hyperMatrix2) ## ----inferHyperparam---------------------------------------------------------- inferHyperparam(transMatr = weatherMatrix, scale = c(10, 10, 10)) ## ----inferHyperparam2--------------------------------------------------------- inferHyperparam(data = weathersOfDays[1:15]) ## ----inferHyperparam3--------------------------------------------------------- hyperMatrix3 <- inferHyperparam(transMatr = weatherMatrix, scale = c(10, 10, 10)) hyperMatrix3 <- hyperMatrix3$scaledInference hyperMatrix4 <- inferHyperparam(data = weathersOfDays[1:15]) hyperMatrix4 <- hyperMatrix4$dataInference ## ----MAPandMLE---------------------------------------------------------------- data(preproglucacon) preproglucacon <- preproglucacon[[2]] MLEest <- markovchainFit(preproglucacon, method = "mle") MAPest <- markovchainFit(preproglucacon, method = "map") MLEest$estimate MAPest$estimate ## ----weatPred1---------------------------------------------------------------- mcWP <- new("markovchain", states = c("rainy", "nice", "snowy"), transitionMatrix = matrix(c(0.5, 0.25, 0.25, 0.5, 0, 0.5, 0.25,0.25,0.5), byrow = T, nrow = 3)) ## ----weatPred2---------------------------------------------------------------- W0 <- t(as.matrix(c(0, 1, 0))) W1 <- W0 * mcWP; W1 W2 <- W0 * (mcWP ^ 2); W2 W3 <- W0 * (mcWP ^ 3); W3 ## ----weatPred3---------------------------------------------------------------- W7 <- W0 * (mcWP ^ 7) W7 ## ----weatPred4---------------------------------------------------------------- q <- steadyStates(mcWP) q ## ----weatPred5---------------------------------------------------------------- R0 <- t(as.matrix(c(1, 0, 0))) R7 <- R0 * (mcWP ^ 7); R7 S0 <- t(as.matrix(c(0, 0, 1))) S7 <- S0 * (mcWP ^ 7); S7 ## ----Alofi1------------------------------------------------------------------- data("rain", package = "markovchain") table(rain$rain) ## ----Alofi2------------------------------------------------------------------- mcAlofi <- markovchainFit(data = rain$rain, name = "Alofi MC")$estimate mcAlofi ## ----Alofi3------------------------------------------------------------------- steadyStates(mcAlofi) ## ----ratings1----------------------------------------------------------------- rc <- c("AAA", "AA", "A", "BBB", "BB", "B", "CCC", "D") creditMatrix <- matrix( c(90.81, 8.33, 0.68, 0.06, 0.08, 0.02, 0.01, 0.01, 0.70, 90.65, 7.79, 0.64, 0.06, 0.13, 0.02, 0.01, 0.09, 2.27, 91.05, 5.52, 0.74, 0.26, 0.01, 0.06, 0.02, 0.33, 5.95, 85.93, 5.30, 1.17, 1.12, 0.18, 0.03, 0.14, 0.67, 7.73, 80.53, 8.84, 1.00, 1.06, 0.01, 0.11, 0.24, 0.43, 6.48, 83.46, 4.07, 5.20, 0.21, 0, 0.22, 1.30, 2.38, 11.24, 64.86, 19.79, 0, 0, 0, 0, 0, 0, 0, 100 )/100, 8, 8, dimnames = list(rc, rc), byrow = TRUE) ## ----ratings2----------------------------------------------------------------- creditMc <- new("markovchain", transitionMatrix = creditMatrix, name = "S&P Matrix") absorbingStates(creditMc) ## ----economicAnalysis1-------------------------------------------------------- statesNames <- c("customer", "non customer") P <- zeros(2); P[1, 1] <- .9; P[1, 2] <- .1; P[2, 2] <- .95; P[2, 1] <- .05; rownames(P) <- statesNames; colnames(P) <- statesNames mcP <- new("markovchain", transitionMatrix = P, name = "Telephone company") M <- zeros(2); M[1, 1] <- -20; M[1, 2] <- -30; M[2, 1] <- -40; M[2, 2] <- 0 ## ----economicAnalysis2-------------------------------------------------------- c1 <- 100 + conditionalDistribution(mcP, state = "customer") %*% M[1,] c2 <- 0 + conditionalDistribution(mcP, state = "non customer") %*% M[2,] ## ----economicAnalysis3-------------------------------------------------------- as.numeric((c(1, 0)* mcP ^ 5) %*% (as.vector(c(c1, c2)))) ## ----bonusMalus1-------------------------------------------------------------- getBonusMalusMarkovChain <- function(lambda) { bmMatr <- zeros(5) bmMatr[1, 1] <- dpois(x = 0, lambda) bmMatr[1, 3] <- dpois(x = 1, lambda) bmMatr[1, 5] <- 1 - ppois(q = 1, lambda) bmMatr[2, 1] <- dpois(x = 0, lambda) bmMatr[2, 4] <- dpois(x = 1, lambda) bmMatr[2, 5] <- 1 - ppois(q = 1, lambda) bmMatr[3, 2] <- dpois(x = 0, lambda) bmMatr[3, 5] <- 1 - dpois(x=0, lambda) bmMatr[4, 3] <- dpois(x = 0, lambda) bmMatr[4, 5] <- 1 - dpois(x = 0, lambda) bmMatr[5, 4] <- dpois(x = 0, lambda) bmMatr[5, 5] <- 1 - dpois(x = 0, lambda) stateNames <- as.character(1:5) out <- new("markovchain", transitionMatrix = bmMatr, states = stateNames, name = "BM Matrix") return(out) } ## ----bonusMalus2-------------------------------------------------------------- bmMc <- getBonusMalusMarkovChain(0.05) as.numeric(steadyStates(bmMc)) ## ----bonusMalus3-------------------------------------------------------------- sum(as.numeric(steadyStates(bmMc)) * c(0.5, 0.7, 0.9, 1, 1.25)) ## ----healthIns6--------------------------------------------------------------- ltcDemoPath<-system.file("extdata", "ltdItaData.txt", package = "markovchain") ltcDemo<-read.table(file = ltcDemoPath, header=TRUE, sep = ";", dec = ".") head(ltcDemo) ## ----healthIns7--------------------------------------------------------------- ltcDemo<-transform(ltcDemo, pIA=0, pII=1-pID, pDD=1, pDA=0, pDI=0) ## ----healthIns8--------------------------------------------------------------- possibleStates<-c("A","I","D") getMc4Age<-function(age) { transitionsAtAge<-ltcDemo[ltcDemo$age==age,] myTransMatr<-matrix(0, nrow=3,ncol = 3, dimnames = list(possibleStates, possibleStates)) myTransMatr[1,1]<-transitionsAtAge$pAA[1] myTransMatr[1,2]<-transitionsAtAge$pAI[1] myTransMatr[1,3]<-transitionsAtAge$pAD[1] myTransMatr[2,2]<-transitionsAtAge$pII[1] myTransMatr[2,3]<-transitionsAtAge$pID[1] myTransMatr[3,3]<-1 myMc<-new("markovchain", transitionMatrix = myTransMatr, states = possibleStates, name = paste("Age",age,"transition matrix")) return(myMc) } ## ----healthIns8-prob---------------------------------------------------------- getFullTransitionTable<-function(age){ ageSequence<-seq(from=age, to=120) k=1 myList=list() for ( i in ageSequence) { mc_age_i<-getMc4Age(age = i) myList[[k]]<-mc_age_i k=k+1 } myMarkovChainList<-new("markovchainList", markovchains = myList, name = paste("TransitionsSinceAge", age, sep = "")) return(myMarkovChainList) } transitionsSince100<-getFullTransitionTable(age=100) ## ----healthIns9--------------------------------------------------------------- rmarkovchain(n = 10, object = transitionsSince100, what = "matrix", t0 = "A", include.t0 = TRUE) ## ----healthIns10-------------------------------------------------------------- transitionsSince80<-getFullTransitionTable(age=80) lifeTrajectories<-rmarkovchain(n=1e3, object=transitionsSince80, what="matrix",t0="A",include.t0=TRUE) temp<-matrix(0,nrow=nrow(lifeTrajectories),ncol = ncol(lifeTrajectories)) temp[lifeTrajectories=="I"]<-1 expected_period_disabled<-mean(rowSums((temp))) expected_period_disabled ## ----healthIns11-------------------------------------------------------------- mean(rowMeans(12000*temp%*%( matrix((1+0.02)^-seq(from=0, to=ncol(temp)-1))))) ## ----blandenEtAlii------------------------------------------------------------ data("blanden") mobilityMc <- as(blanden, "markovchain") mobilityMc ## ----mobility, fig=TRUE, echo=FALSE, fig.align='center', fig.cap="1970 UK cohort mobility data."---- plot(mobilityMc, main = '1970 mobility',vertex.label.cex = 2, layout = layout.fruchterman.reingold) ## ----blandenEtAlii3----------------------------------------------------------- round(steadyStates(mobilityMc), 2) ## ----preproglucacon1---------------------------------------------------------- data("preproglucacon", package = "markovchain") ## ----preproglucacon2---------------------------------------------------------- mcProtein <- markovchainFit(preproglucacon$preproglucacon, name = "Preproglucacon MC")$estimate mcProtein ## ----epid1-------------------------------------------------------------------- craigSendiMatr <- matrix(c(682, 33, 25, 154, 64, 47, 19, 19, 43), byrow = T, nrow = 3) hivStates <- c("0-49", "50-74", "75-UP") rownames(craigSendiMatr) <- hivStates colnames(craigSendiMatr) <- hivStates craigSendiTable <- as.table(craigSendiMatr) mcM6 <- as(craigSendiTable, "markovchain") mcM6@name <- "Zero-Six month CD4 cells transition" mcM6 ## ----epid2-------------------------------------------------------------------- eig <- eigen(mcM6@transitionMatrix) D <- diag(eig$values) ## ----epid3-------------------------------------------------------------------- V <- eig$vectors V %*% D %*% solve(V) d <- D ^ (1/6) M <- V %*% d %*% solve(V) mcM1 <- new("markovchain", transitionMatrix = M, states = hivStates) markovchain/inst/doc/an_introduction_to_markovchain_package.pdf0000644000176200001440000157023414050513457024747 0ustar liggesusers%PDF-1.5 % 1 0 obj << /Type /ObjStm /Length 5600 /Filter /FlateDecode /N 99 /First 840 >> stream x\rFO?[X}o4ɖ,؊ɸ\ I!@Kޭݗ4J2)SqfJ"t9߹tU$"Id"FR:ő4IHZ-$4IS"Ra/#<"eSOF*Q x{]yKid.*YAT)F6F-(v_YI8rwW8|#vsNKRD QbGIJ(UxH'QjN4FʚEQ2 1.i(Tj70a=4joЄR::H\dʡ B%)@,(kwYP[ Ջt*ȂhKnecPC &?1([ bPF(VtAZ4"BH E8:FA9cz!)tļeG-@-{@٥@p+#I͠ 2*F]"% 4KĆ.M  @5qR o@4 $*%M)DV eTͅc%22ޡ!8P yie-yu:i :I/yN?~y;{8zdwNVSv¢qtžmٰ+GTfG,+~Uim{5"8r2I #[O72I5l]q8↚qاaaM1\ٰ/W-9wl`JS.] \ׁ =L؇nצaK>tC)pϋ\`4 |^@&cCZzaF6b\rXǰ|нAT RZPma> Jz. "a]=Qε\8!iC0-'5&6 =ɀ暴c<Тq^1$ ?Ԅ6*I^6?F˯k[4< 5H=NhÇl A!xBMK哼{ ɯow?vým mǤGhw9sL- ~)u{^Ơ6S g8XIHbcJA$L:/Pm}Sľ}2A1!׳Z:uPאO~ZQuH<[yqk#G|?{|?sc~O/-x֙9?)SH.Ϲ'/E_@%qė| }>C>DK^b;MǼU wV5y9J>;oo|vL%.)Q\k?+%?eaqz^KC*(Vo(2/sV/}~| NjjTĪV+ZԪX- ׮$~7i\-=\#ة=~N=\W [Ml-eAF#) r@saΫݲSpȇݬIH;bux2l11OMbʨ? TX8({I_oN64(*ЩS]U+HU]7bU)D{u ‘dXut旳WrV*f'fe ]%q Zn5گp?JNWPx^NkThB'-\hpbC3T>/Fvy Jt+@RKKzђE-"DwPKX7'gom $۸NRu*c-5Yٱnw{.8FNZр7VNk:!5DV&i 's728i WTRdix 4 4<, ڊHER~ٞn /,+3q y]t m)N,b-_)$˚$#,a%hR%M+*UXL6D3vZ2)\+etbÌ'dVx.e trb1a.kbY`7aJiϘALIvi3 Ec趄cm< LY$S-O>o`4"P +IgfG0AEWC߷QđӚ t\ N( m!,LTUzQ`+^i$h 2$hjMsf]e==;+"edfӐ4䩛 gQнSs6Ƶ=EK cSZZr{F6r?ۿn?1R-s!)ܲ:Xcf^ӷG-]vpOLW{/3ո:\>.09>݄<%ۄ<%/s> y qGq>'!n=BfyU2_8s]64WSof2$S]2>2?D:Y͢uUb{ *\ pT#I=.'j~f"(nz.4OVhqXo>M`l^6XS-OF ԲrC{_J]&6as;|<am6gK,qP^K^\zEWÅ5Q#%3& >LL xYPg[15= eMNĹZ&iԥYačpe"c wN(LG}2-wm im =Ѵi 4iFfջEO"tԣ^]|HeQ&(1Ne;[.9gJ5)M R,Eʼnd4ɍ߀TTR2l}"QǬ]3+.WWWlI_g˻a] ê7SǣHdz/R.hToRW%iCYPdm”ZiglXR0(k ǭֵg.0߈I-,Ff%4۠Ʀ[debe \gEݔ}.; G>04~T'mӃ$F MpG- )*/jV/'BJX i n!8/7!/4OH|(}-Nk- .9BDhx$?BNQOBÅgFؘxFaІ \)"LpSbF6o0TIjԕX;8دƐS|? ɯTPNy~6UӋ 0^|7Ukr(<0 sa6.}ӚE$ -iA$[;e6ȳs0ẆYX=cM9?꟬:m,9, a0ݶORXUvU`N.ˁtD6"E0Qx9߶ ꩉWpYheOh]Z!\ho"_m[潍x)QZZ؈iR j˲lߝaV1 k*rHhM޵3T$U|):X.S!F̹k2;S:MQ-Ttn Bx=Hh!|E7j`+O?ҘJ:Fu,OYcg4ږ>ёLm;>6^ /;=u9+ﳢ[5YʇYKrccKЧEFiV LmT3| CksaVd ?2rtGbͯY7 i.šΏm@h=2Ԩ"C3r}- >lAjF_0Z3MH)xR>lk)F5[n`>bXwp*MIytvFuU/ـ!QضS wR,H, ;O p7>5(NysQ : Z'QƦ]BY윯}EPkn{%9iV?Y## i ({ev:j77w"55kpЊGe1e.a mqi gZja! Z0jn=&6[͂V(ͬ4?0ӱΪ%z7}aR[%Բ79Jendstream endobj 101 0 obj << /Subtype /XML /Type /Metadata /Length 1856 >> stream GPL Ghostscript 9.20 discrete time Markov chains, continuous time Markov chains, transition matrices, communicating classes, periodicity, first passage time, stationary distributions 2021-05-17T18:17:48+02:00 2021-05-17T18:17:48+02:00 LaTeX with hyperref The markovchain Package: A Package for Easily Handling Discrete Markov Chains in RG.A. Spedicato, T.S. Kang, S.B. Yalamanchi, D. Yadav, I. Cordón endstream endobj 102 0 obj << /Type /ObjStm /Length 4180 /Filter /FlateDecode /N 99 /First 936 >> stream x\]s8}_ݚj $T'錓̤{%$w'HJvd[ޔ&AšB 0 g Art8"sZCv&w}:(Vya<Az56xI'aSąQ !1pD?O {>eɳʋ3- PgNNQqDZ-Ő9<:VĠ0D['_q>x,'jS:(EJ27uUgj)\,95gHxsٴg۞]{9BgjރnzdcзPufMZί'Rg_V?[MVUV9^5&.'o>}eN7\~yhܾ;.J7}}zaM}zzuPzˇl䘪ۜ[(ӎq胎#88բCej9Z#_#*-`:z:e KIH m|("4?XSD5|wixU>w$evޕ8PM0MAP.4Q|Y0QdC*+]2-wCC"MDUбr/f/;8ZXGdta-T\QP vʞ]t#Q4YVY?O_>~ׯ_%gi=;NQz\]L 'j5_2z5!<W뎂OW!sׅ'_nn?@Gy}1^!Tp|{ 2%¯P&2D Dt\5t9 ٠n~wDgLP9SPRa )#SQ@1951|!\j}m'<\󋮈5o|"SL>/K|%_7<{AC~ɉL.&<'<_^NnnHPjw"TǻdXcp|9Y]OTf竦n\cYY*/:3W  c[|yc}׌W {o8mz[oy$7 DKݪ?[Y+OE/qA}֕~B}>7#[o]ZA8.]SQ_GX4Tz<^js}Yn Ru|VRvS9^^M'_UeYa§A kꪷ*Y?1,/]uUG\[̴-Rӻ V\dfJmK:[=%Jnܪ:omnq!0[0]8wv˅/q]gWh֥.u)ҮغR\}wUN2sc&g)n &HX`Sp92lb7'wGDPqWh'1.$Virg(DmR(ݞGT/j`>}Ɗm5K`0u3&d 4.{Rtp`8ۏtƋzQKgݽ辱!a/6lypN17~C40W-VJŠ^pJTQY[0x<3=,R8@ sЇ64aHx.i. %[rQ㉙Agݛb@0ơaQe{nh5Zvqv݋P&0~__V痓٭vD]MVd%ن߷D.7e{d UaVZx`/,'Rls{Cd̚( |vdJmY0>TnY]ij#Q/V%wLʟSR\ vbaO76q?]0Ye0 iG:9xDYa&<娫8s]wKaz9mns(۷}ßݻ~A#@`sp}u0!elr0 d'U5=~7GwmզlզLdo6Ya.0Dk6 6z0 <7SY3yVsy!/^]T5>7UoJkQPڍ1]t P=0V1Bo5 j2$,;9$ E$XG~x7ac6y˘0PVA*,#^4UMZ18~\U_pُe>;3XbeѬ۵Z3Q47>pCi3*OX1ҷlb P4Vob#]lNujP˟3b2|`_`?h0M- G=<r:o?$38BP5r46L1m"rK2u,l DZ6E0|Fl ZŃ{nZ0;1D 0ybZ n0ybCaV(FH5)bVѭDG(@Zb}(-p!  Lwb<Чl{ٓ;Yendstream endobj 202 0 obj << /Type /ObjStm /Length 2860 /Filter /FlateDecode /N 99 /First 909 >> stream xZYoG~_яW$qv-+y y`LD& rgFi>(+0 <$lzA5d}ZE'VBkpod-n>&`И1 4I7XF|7xpb FwIwaȣAOmH"b "0hr$⹬ afඁY'l- c8cp6F'Y12L"K1qVa9'1}0Q "S8|7QP= JgΐM€dhXቩLp"#DPxs?E2')xxc9^&#")cxVĘIH1c{GdXIʄ ZH)98" $RbơVQ˄;+ ;PwȘ5V %O!|*ab9Xl"Ok^85@xs@lPkϳmbo awSp;avð s_@5oL_}%lZT(q.~ţlUc*~WW#)(G% Z\ 7ɪҕ<Y\a㧢xU~\aЯK^_ac驮ҎuQlÇ{#Vd`v⎶;qבH^Hn^?*DUq6?qt~IQZ?enhH|A!L0>)-/n]eϧHlG,`190c`C (y ~Z6H GXAj8iqiGD1M U)Ɏx $#]* 01Go' P=Of:{jbNOuhvy/՗j1kJW\ܡIk5eZ-E-bWք VaT,!/;ρE]}QC ҽ.&Kr;!9$DPv 0Ne%rʛjz9t~sR{GuG`:s c>=.:9$3QKP&  zG2TU,KzR^, >G4usgg&A&>;-b~5_= ~s;M@u G&#F2rK`~z387ڥ &oQw]x:)y#U`y1yWjr3<]ߔ+r1oiޗǟy>־mۗToC|W' w5 [gFq JW gDX}hm~5cN |Ny_A"I][5_ў'>V>Uk{T<./IkqY\ex3-Ŵ)b^/ޗXbU;r|#'\LPɷĴU+Ĵ#HC]}E?v0=w<{^JCݲ!n@5w ^󕯟on=7Y|hg?ngҚd\lϭ|.=Be;؎s\s?fI!MIp;HlKǁ:(\ H߬AzIV^둜  dǭ H&i0$8S?yMzT|+J N\~=@!d_S"{:V֔;wNJ=;4͢NvvuqX W:p,XmPٱn0nlևC6ﱷl4l5=ŵK Z3K6zK[.YwŽPMN6:jQ[8Y(yp/Lރߠ2G K,JlH+DGm)H>j! KAƱOݍ#M.q+sLB LZso@@O;|'][D0S{f`6ceƭO6\|HLA^͛|V靈%U2%x\}3`|tR~endstream endobj 302 0 obj << /Type /ObjStm /Length 2536 /Filter /FlateDecode /N 99 /First 908 >> stream xZ[[~c\pxM5MZȃVBZc%7<Nz3މ(QFBDL=Rbr< !EJAN;.*QI T 3t&Ya]Xa p s%탆?{]vcd$~Y{^t'#TV<]Nj2H ')N+X&^tXB؄B&b^*LV$Q VbbgYG2DN+bG*bAO :Ť_SH&|ћK0YVk%=SJ1`O>9S\yº`%h]DMb&0`J!}d%8HC)29Ra|9QBdpzޙKc؁F߂OqYh8| ֠|S̠DGVQkX zGk kp\[ր?|x6.UԕR_ŋr:G.^O&o`ßPg!Maěntat,v9YN@zsw{vo^2~YboE:͊-Ml`ñi i42VR+yè鏮OO#Ӱ qGSH#>>4ܡm3ˣ/2?N˸d-< [ѰrK16%#8}هUCC߈jbZjҪƃdBFU / U?o6177rz=XVU9Yo?gYlߞ@UXN*;&Y6N¥L7&6@e ,;A`T E`002lvoq<vcrQEdU>ƶ)!%!h;" EcA2귙%H}&E MGJ?Bn7EEUCb cˆ:ajނJ. Dᠲ{t9o1occ7nם3.ZQ0Yr‹!AE9+Ip>J6B̚mF 9(loنc5\}h,I[N쭾'b6AmEGRJK:su P է6jgnhP7(QtfPXYOLNF<ƂkL>YJ0PK! jXc{W %Էs6ddxxoLGkPRue@,a|4Isi5$JB.bK@5Aptْ&U!ΜzN.ޜ;Ij=ίnjKeo Gwm0U}=Yjkuٺ>?N6gZGq|էp?f4k"`5}M:alR-}✇bmo~8 Oxru7okI=gJ+=[\i*.^L>}?]蛘T 8^ghn& #nP٧:ë 6v|f- DElӍ-6pȏ؇}^'}YrL tޤײnyܗeu+'ʺx<ښ v8v9Wr@, +]Ֆ.NA< m:uĕ'vK‡۠( .psC9.g+336_^~uScR5ѩV=tO>Vendstream endobj 402 0 obj << /Type /ObjStm /Length 2333 /Filter /FlateDecode /N 99 /First 900 >> stream xZ[o[~cE) X f8>YG#PHF>/\8x+XxUPD_Dk;A\0`ʆ()tA)`2ot:[/|Mb^Og_O{I ρ,&@7[t0a>^us3xSicήv =zsʌ}Y`C?:Cƃbca°ظ-< MzðAalawzN:I▞{oᖆ;gjܞK%iޗ+*W쭱xM\eXz~gYLM$7<)qz#Oŝxhk&4 &BWz~X懥٧?K/֖2YR[~NtнUN%m*Ze墛@Ԑ S %qF[^vW|",V7![mWf0U⎑$<*dz]såB;4xTgXά/n'U-^Pk 񘽛N0{@MKU&a sIMi YrRU*)A(eœ2V n z%1C-`iV@0=fZUVx!Xe kP-o Lt-$X:l9 xbXʑOwv}YOH>w˟wxyK uHhvi16;ЀvϳRt~1nraY$?qh@z3zu3#oٵ<JC_0*/S '9RW.?][mfEvLy PTۯ{EenQ_уU Ij5aXVRY-ava3bˈm 3Q_(iS]eTy-AVG)j09Zòz*![KBS(#i{gS({#Xȧ%Y hc!'l+H|@tO#"iWGsmϿUK1)=V㍀!yGg&w*d^6P&g{$E7bzIb+9nXi0> S%s>{.P=,z >:+X=?;+&O賷K)#zr~H͒Ѭhá`I=a6CQBiox z-m @)k3)HhP,f@<~柲u0($ET _GcRXcb_YlTXkCq1{9pc#GteG"}JsO! XwaПaAEPpؽ@ !s|WOZvb2MlsR\Bz~lH-A HX\^;Ko7꼄[xޭG?MϦ_;iR{j]ڸxyeendstream endobj 502 0 obj << /Type /ObjStm /Length 3416 /Filter /FlateDecode /N 95 /First 863 >> stream x[k_w+Aou  5->h<r$esn1nۘ,:vcb(0Yj|Z}OlY%qw L/pPq S͖ J&DŽ'3K\I,J)H&{bx:w .|eeɔWSHTx\q `t rAjNO)ôt S26 *t ͙vL{%ъTB/mk:_BwH<Ùk`VH"8P*GW4 Mt`,˔:ϓd.99rҁ`&jV%4.&seK椠s͞*Ye n! D i}̛iyd+ _8$O"ݚi-u.nhUk6V}@_@vjw`ѩ =M$wm7S5[L뭐uȭ8+*80 (x_xsWCn9ACu[HY I\Qwհ}h?NW=ɣMll ҩ tts"~p#l ԝs#G9pDUuL w'*`#(G6m<ίC"%Csܙ2OJRfny+7ZKBUy 'RA2ܗy S4psPxsY ͬiVW<`3*Ru5 {NLĺzpdQ Wf˃JnFIV=ܱUy,|wMa6kF?/YY82fJdI"SJ*GzA>)'I>$q$⤄Bn=J1!g bHǢr12֑,X3k ZbqY!V9pYP+Z[-2%U̅xLv!ŢC2!A)w @tU;Ec*Yo=M/,ɸa#D!Ȃ?ZÌeB2!¾Q`΃][Tm.^Uxcn7pmw]w_)]: )"QyQi /UʾK[>m>nRzh:=WLS_CEZ>z;ZRnyT(j{hvٳ7ƌgd`eJXYtmtҐ§cR߽RO3jrUk^& dG+N! J{je٧!/hrTWi&lJFS 6(;_Qa#$ZI:%eQ%U, Hu_/[=Ao0#(Yٶp2k!gNY̆텡*6Y'QTsJОh%ZޤѬnIknfReU@,yݥ|GV'Mgԑ옡v[Q-C=11FPrhfS"EIi0k3fc%IOOz@PChFfRG].(ۚF;I nCds+r'wx*@r~E'ܴi~J.PoХ췮ʼrĩ#3|Qǹo>7EF]pyUd^}мʧaVM?iv9$P}ڱLLf߼bO2fpĠ%)11pР.6VL >MOc24?T.PIY"ulvNaaya2~J%SVd"v21;G60Bfkz{&KLOzf~YLwju7ɸFHIümZ_p냌'W,ɦ̌hTQb2֘J@hN +8F/-yf  ǧy;|!(U t* _jSЏJ,.$͖T -@sK)ZGbK T7Mkq3 $~c`S;uA`,TZz*YEzA%v% K}"DIqe~ޢI=yۗ<;|Sɳt7”rvWu 1rBk;9m/ۛP`Y~\w=)9i? 3Zjnn6xe=N.~ V wiu3khax'-mW~<27 \AZc0S^RQ~A <|t~ /kOv?B"Z%Pڢ81asnOh=a{+GwB-QV-EY-ǃΝ-(CEp HOE81l NR$7w12ijͺev,4%;aE}}۴.vcȷ2ݴ 𣘤j(J >LjЖէ\b %VՐ H_:P%6kEݷnR٧gv!o /D^/Nb_q9P+^/_hPoY$a; x[}JW8Z,iدr$^Tc4 kZT$DAKrZ9bWv^MӯLJL.Rї)tދƕom :C=~j[SUtendstream endobj 598 0 obj << /Filter /FlateDecode /Length 6510 >> stream x\YsGr~?0on88=yW+iKv8VA@ Hg;:z "(4lTWeeQN/{mLqϻ򿋗?!vlwgeKfV]4z.<|fnzq?]Jv:[kG+s^p5vY4Msfz[]࣠/`8 #lxq?;g;=N`Zܪ3DZ /9HúׅI9L_ҲJNݛ*N +'t+vgI*O ID,m‘jZ 8g)Ig%p&m$ggTLt3hCӛ"F|L ry,02MA#94r(g9 Ȼ>ruK0*w΀: 8zo  {R44~[3]cRf4qDwH; 0٘-Hxcvk\)–\iz*^{udZq:l(Bgy_̓"ڃp#rOYH Zhesj99myBD&T=}9x]> cAĝϓU|^גl:~$zP@wILPK1@n?_Fa2U*]q&P2[""W`fra# XTt~!9!|w6dԗzfv}} 3oL:îf`z^F0y^.T{s&]mJ6EGÎ.J,.@S^*,0pnoM)y_NMP R +0D؉o qi`p}Bo)n¬m.bE=-nqX?q8ʔ~6.BbR'̢hF1BhxgM*R8,rРO-mÖ〜m|mA(NSEDeKNƅ쟌$aD 2J?H$ \b'TDXq>n/u4 Ô%6EWW,x܂•AO_ ֝N'klyc|W)u̴q]%/vtC,"7 YKa B{UіgL[TcEXb:`P_M\P#(%KV :MXW#(<*8q`hSgsT{{%)I{3m&4ۇ=Fa p  L*ݰmk+i\߬OBl""6b1{ze">F#`N 5O~LJE$,$&W=@ LCդ|$%}84"T=0E裠d۴l[ !O &JW+-]m7K.X7*Ẕzê2.%B0kS-ְ=?Lc|b>(5C4-",O tc]C0zhS2KY:o/-> b%7^j :BU}em 3qa7H߀q>ɵL =m")\uùcRÁuTo"8(d{oL1r+6a3q߽P(%B6a2!ۘA-2x7@ci@ XAoYozfK& rwԘ0D*;a1vpd6RXYs,[O^v(zOybԻr,{M(%R" d[k AO<+@Aѯ0=>Eq^;!?\ѣ|XעzxM\ǃ%X4 ۯa8"s,-RQv],3=Te}\C^ 8cu&vTf呖ZPEtb1 1X}Su˅ǰBy itbo9l ;c!ֳ05Е~cjoT=xa?`d"^.N[NӜov zUugݦv2d]g8.OK\UXdX\eJ&xqyYY%[$c|5<6ws97*Q')Lh]:1]F{6TaM>~k6XB)p2_ܫdǤH/'٘`0#PPC6WBc0e60wDj+Bmsl~zee:ì9@FkxUIX|+|[H{S8)x+sEH(Ֆ7l D1-ϋԫ,(T@Jj+3V^Y*naRA e",]vXL V-zT)Kvi `Wl@ށ߳.ID)J[0c'hH#e O6HVHM,km,ӡd0BG_tQ)ێK Ð4YLe4đ)P"ԛHiK{R*eQz_V$KZf`b2e#`)&2S} V.jO6\kfլv׳:/̐hLR6hs*H vl6ﺲyG%x@|V/,etJ {fNHdD׋H=i pc6_ _ H@GT)+-ܦ86Y̻.ܠUim%ZpIEeo1o$4'HG6{^t~UNze(31oV#o U>1d B#BX#j#m-e}nMc/noQD.a!ahm#+ uD@n=FlRTHٴI;};/&!C%]9TkK QGP[>ؑD[z=Qo;Ä1{aF̼6H%oǏ #?U{DŽW)7M_h3Tzfh0 0m_*n>BΉ[g9(TDU=la.7Otf, ^+/227[LTn``Z_&ӄ#hMaO]x`# ;jw.5 VOI vd48yMc o#:'/y(815B!!JFfC"m]QJ1A<^%f8Pk@(RO;ATd$tjz(FӃ{d~ ~HWrt<4$.)\tQX;F</4v#Hʁ<#_l=&/JՔo躺+*7D 34o{[38 HXZ+.,}vi=|Zo,]29M_'ڬɱET}'B}]tKJFm)h8@N/@#IeP ZqP1[R)zpg  y?Ξk֛D?8U\g O&+&4)[!3^vu).Sml+B݀`"cy0R{d%n:2+ݟLFъQ'e,SJQ0Y8>?Ye'B٬w|.zs`{oFA3}5rnDQ؍f6Nh4o2}yW;V+m2[;,fX^vtwCލ V@sƀbx)z-k@Rzߵ猪C4>"=TtpK D9K>G7FS> stream x]qgpߴs'~Hpf`K % Y L+|x9KeEak4n9] [{!eQy4Hm4PC-d7y _)F&ꭊy,[_ ;i]،IU!{rV0(wi HH N^_^A l֭o?2#VCE=RpMǓpLv{ƚyk^d om@ f_iw:o |H2)E %۲Y== qQb~\ > ' Nh@# $(ŨrF/Â!^n>eVO V0AIAG; dwDƀo=%VTffsȂ Ë4ZA"k?]..trc?LojFO; MHz&H*-1DVC+a*E:$3i1aVM1is#mg0 z Sdb|jBsl uhf-$y`.D\٧{  $g{jEu9xWy\ FuB-U<I2ү~ C$5 q /B Czᖅω$ SZ a+8UemЃ n ?z ~_<1Cb8򒃚}L"1 (.rlv0x-™cNkdEO` s _rZ}{X[QR\UVLֆ +Y.IV%p A.V4{okLu.fD몝5|]Y|TIHnמOXי`8 Q9(Wkl#,ZW? ds7O 3FـQii*y.[ 1q&0n4ZQMv!,Z2Gÿ$6A(Z0e,Yu^'-s샋ebG$CvqS*?'b"@o+I]907^ 0HDR~bMS s eM=Iɞx lZ9|\@(F\Jp'*)!2$l:d*dMEj.ۣdc⮲^)ɬ]- O" +гHQ`%C8 4!ci5"ZlVe#IXr@$x,AM'9}hc8*;ƘO~pxJm­!/fILJ0I_Q0ba4(3hR-f2h_`jnR,#Z3O V' 7KP3b>gKI dGVả)}D`Y1|I->~3^ \}')8HEBl)fSZtQn'qN ,i]|E䃫h!*d,,EDFXY'0,l<bl, P7d^ =S fFf)Q)&g###k?VDZgR^fmbQ0n 2(`C,[Z<'KS3[x"K3Ĩy9;PYb;c%|X8\sl;hP#-v3$l۶ѕ7F }㘾z-Cm~ly)bvKcwN(xB[-B⟊.)yR=_%z0P2_ u|_sVD8yHݴ.7)jbhBQhg bޱBLz[SsYMNOhy"&ѿ͖p ,wⲒJ[)AiT鳧:\݂]YOuvB& Zd5(ilչGY}uül[97pcڨq7tG 2W"J:qog8f XV# s^fns# a.j*CljۚmͶY4۬qݞϗAlI?d^5,d>,d!)%-%Yʔ.auK ]]]Zk7'dt_&Ҁ*95c(i( |ԹIpwEඈcҧ0{*FȋR--5Wԩ6kw&!mz:Qmu3`%/<lYtc] ޼;[LX SQXGD!./Ƞq׃, zŶ߰DauuX B/W67yZʗ5:qfKz3G_m]` T+ުElsqBV3VQElWYo0 g]:,.mt_aF3mqIFzĜf/@Oԩ[T[D>jI,d5,#-˭Q *7wӏSߴ7ơnP~i-Ail5ad( Waog!LD#mo#蔨i- !da/vpv]t\u]_cUEUb+;5eFBk3&'qIq@D e %2 M'9H ~  80`xWyXgB8%l9WN1'52<(X7=0.p_koǯ% n뗽%SP+b{} + 'є;=!S0jդl@iu4qU4Lnj5b3 qRc~T@dڟ Q 75J3x.a=GyhS n;8%`ef.Kݩ`Y ԄLU# u TbC8< (Eg Gm755f8;QK!o:S.|`WxMc/%Cfy20,r=X@`9ּzHL Tys=6v"[ZDD%aJJ ]XM;DŽڷfo@v-{C<9 KU&^ |[@-8_;V]}`#XHyf4s NN=xY?3Ԍ,cla'. ?k!?mˆ >6⧻w| xxDPlZ3nj\hu:nq@cvF "qD/=ϯ{R>gO[pA֧%7#Vz[CҚtrG?H H^ť1IP-RNvE݌MAׁ)Յx.zu=C:RdnU,nbou܃jis~BU5i[~\qfG4+Ϗhbu0 GNMϕyjr%cn H)sʟPf7]fn e!e1bpRrR^q/n%&mZDhIqgƜs&^LWUS%`؅%0#3-7ߎ 잛ړEj1LXz8VT[Ppc"SSK|0! ݄gL-as6x \ȩndXp@t'Rt쥳Uڸb> stream xZK7 /i^sX`%p$A#i$#vߧX$E k'9L"U_=5ߍY,?q!Èףw#?ӿvN0{xv3նRgQg&ad`춓)Ιn~7atDs%}< vDy+ `^zx7Im]F_gߡaEYFhaz-z6TiT)[/eYy`T{ɴ]Rh^#aaXJqcE`.N‚N" zz S]TÃ7ĤW so@uE{oCy)4W2CSY p)FS!dϝO1qY{fրde(V1ӽGsagSW˴)c7A ƥo Ø 3,'.:fR εq[f3֘'9v4IEnjh`:C@v y(0l; zH w1Խ) 9GHЖa vN蜎 zPV[ʞA_O`kМZ 3MŌ@ i_*Buz1}2&M.V|+SYҺ?( 8]+lw[j~DH2D<@@R{դd\ ZVURn2.iU*q& yMK~umߡD4XH6 rHPv:"uC|*^ yJrg5B,[Y& ;@Bm/*ڑ"q//]q{i7ECE47CnWgEe2*~"Eh_yMvE\gh 5pUwE|ܬlY}D`фs^XqUīKnQXkV^@ vlm7O@(Z^Mr&\8xj}OU@T؂>xTP>ʕ9IYzSUf6E2`gj.tYpVa3;BsgQϤ,RhG&Gf19$=zs)^_em# ~9/jןR@@GH& It6Ruر0'oh@ɳ'$xoZhLo AbmZ _[NCfxmqnx^4'Tg`?O[AtϸeWۡ5%!% CC=bEp~;8\=RR$NMm`w'S4&g qV\tnB4} e"tfIB(xABR(aL ,`4G ]-aDS(Te5+CiCsj)<hH+"5?>?!%xP&[%N 2$eo%Ga64YzpA$%-i_tXؙfMx>^p  | ֑B3bhk)v0KrcCξpղ=mk|sp ?ߗ&'^?AG]?+7I/ QQ5PG#h*ÁGK;6ʨi6aWZɛXpu^f0^Z"c8\hӌ "qWOav8ݏ4JtFp$߄- ^ҘO?uk6)⚭7rv n) P$S<E1 ECjR,4 v\5$bPWJw/ej Y᎔n'ʡ_?=qY {nWֻpuV;Z>e$^",)ϸxYn]GAr볾 UR߾Վur붼X vU ?~zzendstream endobj 601 0 obj << /Filter /FlateDecode /Length 4709 >> stream x\Ks#2'x?|U);oN+$s2)O74f1];C ׏b 盓}'Z\ݝO'2y;,). QD8{qB7E o]mN2^l xfu*cn]AǕF/8pqsvbѫKCD+\KǠ[8?6,߆W57Nٸ\lPva]]Fթ1zQ//>hwpF-lM %aKvm:SZe<',N aF5IyؓB>c ( ,ietDՇ!e|Hvn 龫|{a__@,QVqoHʌLo43RJKp^[ЈȸSFE ߕM:d+} k4Lƚyuer7aa~B ܒ Uʮf?=2Fy0=[~Yz;ީygec)= M&]`|Ң,LnihΑ6 6Sn&G'N!lW>kQ l2:t^_VQe\P n(gNz >o[w~ݝ_ԇ ɨ; k+@OJ :Cl53mE>7H=o|ignPmm xo:dRm[9@Z1ty%-MSNy ̴SdTٺ!!KNQF]n':?`@@4Ʌ badn}UoN. 'z`~(_qr) f 9hOV 2GmI:,4Ѿ@$QܶȣVی:y5W7{@H"Zf<eúuF`$'ʢ~8:/1|=_,'z[GfQgې10^FisD%,xO IV]#QÔ_4Ԁhl9P:<`QfwfեDIP2܀G`dw١aOgY&1ȸ;уcհ ׻!nל.eWiH+H+8h-`6Mu,۽峩;&59i`Z4|2y ocd-fp} 㛿>HNmZKh Q(J7ϫ=A+m u>e !1㛾Zѽ͓ {6k*4]icɟI {w ۘ W&ehXusKw]aay0܉[x^03ƅ卶Ww;`*'Cc:'e*a%e,`JDrR YyB_[xPE~N u/#[Ho1 vH_?ryW 7m,.}GVP_XG~Dx.4U[sWxT`W~`JyC^V>k`b\~d)˳/ow$*cdj—`73-ȲWJ:,/[J}xK: 2}/aZO)Yj3IY,#ҳ6.y NV?bD$9^~CQq"!s&ؾYu6T/j^edccYB|fF#~+D I ֈ/<曣J|5 8=.2 .#^D!)\. @IP5x;K:I'pb_hoX>Hɉwqخ6"k_!w#~mny0A2 ?R1~_B_a7 fLf5#h6΄X>w9VABNgPtFH,}$Ã<]M4{IiNɼb6qs~{_6endstream endobj 602 0 obj << /Filter /FlateDecode /Length 6957 >> stream x\[v~'yY:Joib( 809Vbl6r׳.]zW3` O%ϢwuUuպ|Rˁ^P7~/G$£1A1[ptqcjs3i\7iR/6jɄ<wU'uJɴt~G~}4oк|KLwCXonv)z2ö yvTZ &}hoJg/i% Nۗ7%OLOidf`@/c3*Wcho-[8Ex +[BN B1&'<=i&OD`p}p߽r DbB;)Y@CmAsdCE뫔eWEm֢(mL峉Gu8%b>cD+hYk7`g6&7biNmW)1.mҰb(fWL9 Dd4Fo4h=y6y X6 (t:\XPY9;;;$ o3tLp(qy#:?ڄ*Ūn|^/*y &e%XIVáXb>O_TrWɟ+yRJ쎶{Z+);?wBlGsowB dV! Ɯ'˙47dL,h2AJPxqJMnfHy1ʊq:_=B V-~ufghtCh4 jX>-U-,|/)\J ).5Cn fiQ+c(@Ʋo3"0*-t?* jux!,t&p~kp:6.Z2vqj\ O+7ˁyvѬ {#ۀ Z~]H6%,bV@:&a$Yz@78:B9z'G:UKC# }R%Q "&ɹ8npY"A@|M(& 97St>Df+wv([eQ%߄r`F̡鞡G܀IFN{YQJ~QϻE%Urk/=!v|uh,0ʍ ߍcF48Dx"WJw漨䮒~hu+䃰4B1 MnS1">yct qG4u`@2vkU_q4EYgdiE]SPqN ܪ捴 ]RBkհ.{ 4,-ҬU6pGx-aҊ)O⎥fnŵ|QM0Ae՟_J׻࣎ycWĀ K̸֌q(ֹ{.jF nr3]# nDDJ&@ჺ9r O~dE`Tr+~Zj/*y=xdoU$? w]-ف l 3 H ȵ5.ԩd+(oBSb(>"fk2Tb"bG8qZ4,E+w F><NaUnj3 1D08C9 DL!PĔlZhL@Ļ{=)/x5*Ί%\x{u!"tSғbW2`k@dی8a60ΝZ5d&v},̔%V]:0+`?2 ٦k#c^*"h@9cc?QBH@R"'[iw9("~ i%` h27o*ԕTݧ8=f++*9]Wh-m#Sl*I\h%EPԂ >ȓC%"b6'w`EPl!VߠTИdTQeg-Ƀv2^,m5W$e Ǚ`- )-*_(F2:4<bu8KΠs oIǜr_&׀%8Y־5dL R0`()bXp{9 L@OYMLZ(v'Y*ŀN>ӍEoE YEgX0aǍvC9[Vre8f6geb0;mQiO V]/Mm"E28+pgD/Cf{[ "r.O^s |rPבĄ*s!0:XC"!*0/3XÁS7=T9MfP,s;gI.h1wW^Eg8*o0"qh2pa@T#Ox Qaq?Q.U=@?LI _/VP@Ka"]ˮn^^gwΖw+/T}M<\='pS9pbtXNB!3ԝ3t,}Wo Xl2>rMJ||:u.NG_s㍌U{"z3ÜzeNzHW+-1E_iPF Tt򯳖s4~.wZŬ -jFfs8K$XZHh6K3܋(%3΀annzTqe|s^\\9HF>Ova~K=(.9v4|+wci6ꔩKmpUVKj瞮x=[Mm3nƕ\Yw%{ã`h$xTC"Ǫt/t zd\N r41[6z7e T-W&XW!XJɹ4vЬCh2$=\+i \7C,X(xT* `I_d -n sHZMu^=n" 9QdC1cC[pJZT.:yP9c^H#eWV"jq:覓lp♬OԯJx櫽}[qz9J}x-zx 0/_ɩ J".0-u tOmE_ d?6t/?㝥E1pgR#@dzYajm$5֣#Xᵋ&$EO"gq KMʳڢk‚A 'qң#cs?\ǩ jg,l &Ebg%VMՇTi5G@Dsk&TXJ;ϗSh: ׽K0P\b:/80ha4 z['ə`\#?0!>AYh7]\ ~wB,zQK1Gٞ]`58 |6! ./+󻶎M$<X>6Xx!&܋7^Q`k42I9+g>ՠǿl0#؆chd }YV-zU"sZ s;;TweOd yh kUA?WJUn%ER^wfbv5`~ `[]eIktCË Aͩ|C+|:niOWF*?hxN d !uv⧟$E١VNx n;rffV.1ͬuowbaho: <]xS*W);G3LYsL\ 98Mb0u&䩣b"u^q4R^A%yMJLDp+kC%CzfqZvs<da[O.AuqH]pR>F븈-Q) l\'Jy;*]_mq6kWM.C'0;{ȧ-)QݡtVjYbdο !HfCDE^ Wb{kgNE`_R _u) b)NKPRѳ-Y wW:UJ8.MV6b˨Epq?|xbmSqj:1ݢg"|-adzr/Y@Hk:)n#Iѕ|;̬\VT﬩ʛFCTOucמKY^" !)9KJ*G.:mכJ9> stream x\]oǑ}-086X(+{"\IQD^,+vVULۗlz`knOVW:U=;Fu0O{G\9z4)g@Y=z)0&yfua䢚ãqshC4÷ت`B^m j8NI7I &=\br)=3> 'wTbБlrzJ G\]ܽ.K)=vv W'\OzRXv gkS@{1TnN7:Խ.HCFHaL792~z ՆO6):~C;}UmX sR4a{Y|M2>Dʈ i4֑=|U*kt ƍ{ un&ot O2aix\T$}~ud\#z3YZ,WIEyQinUFFk(ha^&8MO2$3ir<$; 7|;Y=O#hx]y"R<<pns XŔ=Ѳp9d8?-Px`lYZ4x^N 7Zp9:fxZ{[}i Ëy3`Ǎ"׵ߓ,F/H&^4nGI`X<2upf>D4:9OyҠTe˨lOl;ni"W5I}|qY'|HC.(dbw>w>&j+'x?uphlyErʛ)wS"qxVɚ]Sܚ4lݤl.dȞ'RήDcz"E=JѣFC}5<;Q8*yD>nm_B-TtY+U _.UFNz1r +d2@OYm(1 '繲A=i}SǭX.wR _ku->űm-~ZOj_uœZ>}.SY)OACPd@kzx@e(̨jRdދ,Vc [HSR"kO*:x3 ,C62P:w O-<~#}L1CI2zikaBLW xQBm8`c ˖%0fh'>9Xy'O/ pWFB[uEh_S)*4iV4ROI-Z0)h[N\7k#:RֹǬ yIF[0ۨɐiA_,cU.dHnT˶EԲle^#z69<}Laֳ|ř].Y;P>Y pxAS(7M %Y\hTwH"W (7Y-EP_4ǚѪɷc-WtPՏTû}V \9bD_!.t V *zX&F|"t=$$3=\JmBgC~E2rByߧHJR(Z36bdyVkIZ%9r7JZ4ڀH!EiӴRSQ^y%ܚVI|ǂ&oɖv_o,XymPz9EMxtni} Ɔ@u@AYQ Rܮ z:k.CᰅBR8{yؤm,Q}E -v%F=*8ІHIᨭY C2Iv;/*H%7oYyzpT" i" P|ԲI%LGuj3z7*xuƠpxR൳n0ſv[֯ϵ7="QOjn^ZE`x]ݺVďgwIE,2YGM-"k#\"v_vRWݺgݑZj1uJEa-.+8>jUx3QU.סu*{-͆*_llP,KYANjJioRhΘSd73\$ߘ~Ҡi=cK̅Yb vKT&%669r7Cf<ޒϋ mP/p\jK@H3!DްJk0bjc`CEW<#yo:$ ,+aGhYFH:0{h^a*:#!\ݛ:=,Ś.Pg!E-X("D8Acg)rqԴ@̋- q Yft~ވ\:k*XMrL$*FYqC!p]jjoe!5?fsJ8;Y[Zd jH"ApT%twJ;̱_֖OxHr:oY cƤƯ6TZjl!CZ Q§JKc EB,xBqDz:_J"o7KHh9* ؋=+\M?z*h$ԗxQ7xmϺ ǾYMOY "ALf I-x!/+Yl*0&S*maǹ.s:%.QIt&X KkE^Hf*S7 S2@}c.Ct*Zl`d%99w'hbcAY:̱Xm99>%IӜKP\k Ӓv$)͌ FiɒJ^]yL\qe7LN;C񣕍=$?[+Bt^MtW9H8y'""0. Avz A͒IQ.|\vAS]\@t3{ie+akEe \m4G8{ 1 Ԅ4OT.pj:0b3+,y;(Zߋknw"Cz{5|ZM-BZ2/jQ-~} ů~f̎; iӀ f]$'T3}z@|RyIcsvB۵ Dnf׭@0o9#5C[FZaÏ7]v]SSc,ё;xZ] Nx`q@ȼw=\k1>tV`Ժ2* [2NV|a^c^O*D5 Kp[o=kP'K[I$\yn]),4:ޓiUՠ"m~vjv*Ku1Q )&9H]L =wSEI%I7h<F({!S@;nґT٧9]4i@ !;u_b>hM1!'ugіohW$v/NdUGv?e`┚|ЈK-1!{9-, ;| ~Q_d÷Kz<}-%U I|P9%֏, W]^25⭭"'+~_ކ]M^_0\> ĉo5@i`h .5~\3 (),uIu-,hLxndDo2F3@EC`]I ݑA\ӟYv۳%-ʡendstream endobj 604 0 obj << /Filter /FlateDecode /Length 5482 >> stream xf8E-qJlXOS6|U!a9D˲̷9)Gw;g]W:n2f3i @GD0/5N1?"/mNۭng# YJݭ%1\F6dxr;׶w7*mC#F⹸T RZGM>N TKnGU?F ܸ50师]؇x`BUC& A@@ht"i¼ 7/㺀#A&^ EΆ}a▮&tDx񄬇o؍8dzb:<`;fz`PM&TuK Q!p;#Lu #[ơ@68m ܄j"NU%2!PSzU!AQXa  ('VuDU2*gw ΢ %В_9^*;&72F#@'qkER̬Bh.3F O-dYUYgbBF-aoԹ1C:zbcƃ*wx+j>ס!kP;-aEt42߸jg'{BThhą `׃A6pѻӉ0Xq {3]Lkr~m)(TU/Ԅ%&1J5#?gɦN &㶯gs+p{@l^l"|Hg#8B;m-kɑg'i$G 2Yhք<4 8;ُvVz4VBP;فȣV@]ZMzlvu_MA0]5 W?oΝ#)@ܔ CO xTng|Et o3HX9cbѣ{T g<)+9 ƶpr 9 jJQy+Au ly19$US +ÝHpVST\0/H.BnfFdf:.z CTpߡn$#cg6S*9z팓Bm?X#.&!:vkkT"e+:8G4JU) `K&h`U8"w,[n(ǐ*1 0ؚr9]02`YШAҹI*,/~R,[d=bF)<;#X)ĦJԍ Wg[=rļr6#\Y%d FnWEe vQ3CZb؋/Y0`K[lå9YkRXi DG*|ͮ- cKmHZ6B]006*z$+3%BZna`\?`S4~҃qgZ-5vrCգj3{*7֦gɬ]vK7XM=~G.XRBחӅk4L[ XP|Mk++R0R<' Gq,l0?2:둂CiJ+.(o R6FNS`(F{5S?%~Jz()cQ meM_U|S+ 7jmW5u_Uo xL/ ȶlN`j~]6'HkYIܠ:8YbVM(~zXkѕ " f2Ւ <91h;bJuYL,uE%8$U]9>tD)kϘǔ>2ZΆ>sxR$\)4dSF4 Ǥ䕱Bm,T҉FR-X6qiDSmq?UJei]C}BJk>YVZ9cGAo(P_" ]+0ѥYd"&y S5bʷpRӞ)Ztd(Ңr\_"6 !&1T~)l!$Tݍ*͈:bsD.q@d0|FeVqRfq"B%Uzha `IТmf9a{_Y~Qkk5(>+Ov]L2uڹ5BmubYKfFe )y!uNE9@G23e[V1( : Jy 0It"𽭲%-#?)f8@O]2=`,Rxc!ĪĿKJ^bKfNR듏17zFUjh*D|RR [[{w?Or-9,7LU4D ; 9/I€*Xz6čwuEkg(Kb /ģv#qF(y^5qIGuUFc-1,x+6$$T&x eX nlļ.NG8oSOdj95{Ԧ ojj?M0U!ugҭe@ φpM%1KM*;CS`iiD@Yꃃo&S:$Oɡ MDлxάhuID kT%i*)<ͤTa~:5X}>ɛL[ѿ<4p\@*K^%as r,{\UvGun=d]/|Kʴ+(uYb% jҩX]X.eXYpb[a&(co.jM ǁ|({}_^;l/+[؃!vB@6Ҵ6Rb $BU0a8K`jДSr8ܖ͘5!I%^C3w5\nn@这^`-wClrEygHd !˸W]1.]fσy2uԙ~dw#P{nvoٲU-hs=ʚTPJ6c揉E C8UMyņ{ont&Śϖ_ i=MŪBCb蝓>{R5^ J˘Z r:{xEk+}r79 ni7wK"p\8,}LCvxAvp9X_#fOIJgM-~l7-F<,]CK&N6{.yh.\t-%s.#01y.fg0&UuEs]*e.> iK'E9oڰ+U `Rߝnu אߴYO%hrT )=0޺NW I5s2Y[SNl٪EWݦ yK4.lɚ:3w-F5VɞZ!x_ؤ?P$ՍG&cm{47"4bTAV1F9͛t.jFWn"|Ժ \wʖ*j~r7J ;(3r̆6 8wWu^5]{%-u'T}5us rѻh~6TB@oEEj@n4 zUAVbkU`cӃk7\(X^ O0i:eWk#(bmYAce';2nxA}bV6c[61;͕.0kh~Om0fRܾ>,xY@`|lJV FwM=HuΡX_exY1ObzXO}'Q/{U`g%׮r6j{X*VCeHΚ?yjTZzsgFpZKJ/)% Τ_O-pg4:tfFgZ%^>('2eCyUk!O)1|Tۤ%aWS}3^||BF3xT?US_v؊pendstream endobj 605 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 5546 >> stream xXTT׺>cCc`I0FESԀ`G,&P*"gR@5Xh|IјXK=&yI[֬Ξ}_o c1H$.66G|^"N" u!~j3j/01=`FXtN|~ssX6 =G3R$$"y246mc=6 RN#kUwe !Vfo^ER-f!._ ܊yvԇuKJ;.܆W/ 1E@JN$l $ꡰL:t0;Kyioڹ{ZspXW~;{M^3*jZD#aF˔+gb,(SoCp7#_bJٹN+ ]NݸQ _˿f bM(jā#*=#\AQ1E.UF|BrewR&Nq v^/z0_F}Yi+/ d7|X.Xrv8XtSxJ3'$ =1!]3I}J3I=a)i%&{|O\?=WvE5kU%k~#壺r[)CK# rNz14 a<6Lk^0)=3Bwp~{k2*FRd3\ pי36Q ڌ$| fZ1Bk4n@ ,NV)Z*)PܿJF5AG _Po$bsFYU`[374U'k5d"LļY ˉJ1#?A"g+̼j*Pqito&sMO%91-GZ"K:2+O+,&wdnk}{*(zO8"($}NȄ"=~/6u.|ʫ2q Х>3:`M6|+ȖB&3@NI ?t.%9) &z@`Zw@7ByC47&<;z>/\A/m,vrtTZ%INmD-k|uXnyqݶ I\GSg4)DO-lnQ&E}4QMyʧk@™25VEt[KӮN@y6P+)зpl63َx=~Aa؉ 5JʑNP>T{إO^whA++Y)ն!8fLJX} V?%@TCQ Qt>ى+2J@Ө.I+ah-wE>9%qgyiYRb`6#-&6E=!FGY*3Sߕh%O)BT\l陡}iCA$h#ԑ(H.Z!{Ggf J TqbE섬`[YEم'`nV:7 uܯKmĔL)n`w{eFG/!ӈopgQC܋5D6W)w&t$9-Ta{qWkahC|/}Ա>15f̣y~ }ceY i#m>'oНD6pLQɭLj'EZ(áuYRASRyH@ϊYSLxi,yO 5=w?3wjLOh#Q?|Qk򲜐Ar^;mu )G͉2ëTQuP*970}X-~u^NWj=G٫ mO*dҪ5ąՕTǷ{}YJ^(nTzL/m 6q;N8磾+8L ‰w l=ĝ;nϞmS};J*S~zk6۲_ƉDC l|lq1b~?=!\LI^?gpgjnfZVMP[k!#7I4/HݧgG+)INfrc| [9"TK o9y6_mNْH1P;Կ# ã=@֒_~vͺ#H'BE7q0~-yIŽ6R%*HIiP^G޵Ӑr;:3)GsXxW+yYE!Կ֜doMų& Z_5EN#hxyw{{MZʀ^)K7?OjJ}C7\ O1ğ+qY 6\gtI4ܯp,p ̿) N /z:BDjUL"GO|[=.**u0m_XL}j3RiiXKq۾-U; kh-KCm b1Rh9H:h28swNo4nj#=k2>W3 qد>nB%Ay.:%TR/3J~/M5xkUTB %6^t_BlG)?~/;lA>KmS^Bj2uUs0t]؍O h])9Bj9ʥ,Z }ehEA 5ű1KNC}>>rQ*3.''.R*s?2to@Ej:8/\gIsкBM T|,=p$M]X{&O])d${v[vgQIvpJ,u&KپurKٗS,\y/ F,~I3#h~E2rb5 ;׻lX[, Wȼ'|bnp9tpמu=12mfjkۋ|z`~o5:n2=D)~6 4aCc;!u*hT >*=gp x- v82[KzkxihP'PBm9YǨ- שsh`3L0hC gK1Xluwy8 JE [0 Da!iciVGJ0"_j'L6[x-@Mz,[ʅte xs>aiDFxo*,ﰏOP@\+, liu7#5H lܵ7ûdmy~^׸ˤqHtv0uOfAgtR^l?$e+@??| =8 B+'y2q9Y ;SomF %4QZ 7.ŏ|K: Lk9K Jb`;2)5cP8ִZӡk(9SX*HEW 2uDS^Y}k*=ZQ~DNr8dub5q 5-]U'M٥*xksYr2H:@GXт5Cge'(7P1b:0_Z+8ǧ"\ޔZ F'3 tugĩ׿˹Ȩdŵ%GLvYh0ԶS|dKڙEE%0{cwGh4T-q`|hQbhLMόoe\tOՖ L'94Q ix:&{ T]M )\d*"-quPk =E+fWea^W 0 YBAI+],O߷ٳ<ۨjqʣ8"-yҾ4_ h ve7x4cRc)=Er,"k6t$Y(͆|A !Mqb*@pwWȮ\8 0#vLۈV Εendstream endobj 606 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 9239 >> stream xzt׶aL22K(PCcl+nEVْ{dM6 7@B5% qdp˽]_^K i=P]P@`庉, ݰWykͻTsߥߵBu;xet7yCCp9p 4aŒ6j&N%ަX*L]Ѓr![Nu4~]|**뮮mFԊhlbV~/PѳvmBo޲ڧKmn]x۽`ײ︿s}oqp_vaa;8XɈoLA2_Z|󌤭kVZfAi:!?EՏ4f] 7n:ru_AmB(īTI2,³16t9ƒL{+JrV11'l0LӨzoDCdRҳ`i&DBI#wXVw$uI{Rd/~?+Q0߅EjF~CoվqiX(y>+r 3pm]|KE72/I[)-K% 7pƗX#YoI^#Aeh C% 7*2?(hAU-Bf_^(,}0h {$pvbw!H!`?*=T~䨡Aiَ 6H].[yc! \J5ԁ$kAn(cr x):ZHu0Rsfڋl;vu CV⧧ѷ,vCh= & ce WsxX*y/jDhOφ!p࠱!_'"@Uz|rv㔍f.>oF5{h#ՄFt&!peq\"C?y_bBE2QD$jo}VB,aL$i;|NX~uՃtE]h,ԑ-Ň h`=2[ kypEv"<bKA#SdrAמiF>Ʋo&>9bk v~'уrף>ɻ#9H v+rȉ&oW-ۀi>H_\]#)=Mt3$ʏ+ .g9rp(1 v9oY{4#bE姱h qE )}Hx:թB!el41,ņXss'"{-u^zI;dl u9FP[o2s݁R&oƶAK\OT|l|l?&Xdagg`8i7sШcqmkרX{֬)JhT.)XY_ ϐFKH*iȆ"\HM1"NMd\/}ޡ㬀Dk4yG-w?`Aٰ)*4OQ .ԣlnOj=73x"96<׎-`"k|j"/rIwNE"r5 ɤm.6i 5EVھ?fuA$qjUr0BdTdRHC1׾Ao1k7&O-XY /2d) / 97zx̙RΖ_CIɢ6=1YR7ϢiwOf^T{r;r?e*U!3S-N3~\UYY$*Wqy#7N>JlW+;B$o&dq Z(@pj5lj-r(-uJ!t)H^vUUϬ^oPfW񥝝gI8=F@vm<&7"8|(D(G]pu[ !&lj!}5Q ̐:Z::cv),qK(`G Ke_{M 2tŮ4),otBP!^{hsv$Ygq@?B[' 9jM8ч|Ы%J™؉B:5YNcڞgiAz?NїB[Ua7lzKp ͡kπONl-(MC#2 w=,u`a3}\G`{  *y:a UPG.N+RꁞhM2SN" hZFșhkrаZ! q&.N/Eg`0 PO ~c!)SDB0ZV^/q{ +%v ]GK! CWX4[z"G|ߕWo,ZE%&MA[4d,A+APbW%emپ_d(erMuISRC$ƅBbYHBb`^r>>DR.2zZHAWU^RW"ʹָ[Ci$6:b7x3V |s=a#}vufmL ncZXEr$i|RP|}ts+LݰpHIږv-+D]bަH|m[CF'lGL=R#9/++W,ʡr,m%uU#$cg7?wXtԦS_!V@T}^ K`8 w+ ;V Ij|wD0lĦ#.mY_Lz-^|#~Z4SNؙj)m)4-UL_i`P1\a2TFj['T؝. >$z۳m|SpSDt4N#yvDۭ-IKU<07Ƀ3Ph<?-OJ4ebnY0NSK:uV"(;ͯkht]v{E"ϗޝ')9Kk>Ě'?/lLOK^Xr#Ӣa S'Bnσr,Bt//\zLxu\ʐRҐJ"O=IgмzA e+bS`ўX *{68Y28a[{>;)TLvj('݄q|h<1\6B }Ժ}SүuRk7PŲrC [ []63M("_rՆ "r/ocy8RЅΠBCP ~^ '6^viӂuV{shpy2̄3獋:7:qq&/OuoЋ%?#o[~h QY,="r7wӌ .t(g]Iݓ{%PK=A$Btqk#7hr2* !(%v'vӖ>2bIF8v:\z$ʉ4e[Q|/F 8ਡľZ5 s|RVvfQ9 B^ zޖ_$o 1JGɌ'HEUt_ɯ]bQQK-&7ѓSb/3)W) N:9I8?g^{J(Ź)!WF(c. JLPXD)ڠ;9l55Mrw;K&XT(rĦXIWܻLpngX`@s-=xn+? o f/HIJUfq1# RplmF:c%ΟJ%/͂є4h|>',z0㉯ :5#F#`(NB#G v[$bOE8\,tF 0VGģ-a#(8}3WaV'6-S4&;!S$,^8>{}p{}1kgyptOSu4R4j MLdP9Ȳ)dž-E3N6M 6Im YDyR|:Z#@;*!ߣ' 9LQDQDL&Wp'[ac?h)ME̼!`XS[$ q}fW߲"oB++P/V9kng|9ّ[/jY(SA-'ziZ)<$Zx|"uA}EHT•rM[P;0y{P lprw箬>u`o+}d5Tמ!3q;>`N~Wm\neR0CTś; ,"P~94q ӭc m6p}' bȗl^7ђۿɳ^b؃C=F%?`χS4-(|X) [BݭSv-A_}~J 6*FDG nv,;z_ȕG%Zll:m {55ny!pL#[Y/sjZFy)_\ ɱЖIONIM.u>X[T^ơ'XgQ/I,QBGQ܎(tx wB"hfg^ceYҭ{,~ӬK]yëeg-}!- ڲrn7HL\lWILe~b8ɳ}MTDDdxX8 Ʌ"N[`QX)C#ըO  TG{ⷰ]l{5SiY|&ԅ`,ܳcdF͈./Z%BQWPD}"JԚP.Rj<}My 3aoǽ+FEBTVgqZJa R*5?`eG eD;Z˪bJɱLKYkky"3kcQe o{_݌WX vY<>)98ZEe8mԛp^kL8rƩbLYDD ~lyu~V+C|Xu!:6[ Pn@d/'j.c򉋋X$Ch ;f>Devv'~F.ۧ쫗hΟH466Or<ڜeeމ*P+OO՟3>2kJ ^w,B?ͷ %%G'%.iJ~h OP_yIhB򳟓 ݝ|z96WN͙_έ҉qA.mDu* Ijgm2Y:rDw[x[Rl6>.P2R$@ȭC F̼+$h;"q{,cfh$';Lm$iQ Jˏ,]#i)n6hm;63 d1fvSvDy{qh$U4ejPO$,akpNDd( >N/1/]KK O݁f0mψ(ԺF+xwϠ0֝<#?BSiiiwwWF/D~}VN5t]?LvnTz˯Qh3kF|{E2_lԿ {/i{"OWSӑ0ˤO .OPB1;SVzR$-wVE"lS8Q.mM^ֺ3w#o`s圻*!@UJB䟝f9\=3{ًiendstream endobj 607 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 1326 >> stream x]{LSW嶷KE]Ӷ URP+b-dM9)qX)Gl :`j n:wC]99ߓG!(JNNKFrS4n:xz҅Ѥ騴Kh,4LfLP2 rMQƒjc!/߬ ͙Z0fRPPo2芔:sPge1Ǡ7P.7EDX,p]p)o٬JLoӛs+EfeP|P Kz2٘7!ǩͺU9T9a(8Q8ZQ4ҠQ )R_P>>}sUn` /ZHjT<6!vFxp#Y§ IK*HͣFYsZiKWi p+짟e8.ã󺃎+>S},'ᤕ SD@?^:>DLDoFSF!H!up۩a 5lb!=(nޙߢϪ]fK2LI$LT0ܧ5u(-kU j=H$<$f}ů]Wm]n/Seg+!!utnٗq]M35^]Ʋr+Dd,Am^Rdq)6Jv˰xæ]0| ٞ ѐjY_X#'ݺG\k$\ rP|'I2̧{w_$Q*0($BA&B}c_gtȾ<9TWQEn0qB,m).|/4#{s9*KGAͯN}ױ*#33-IGŇZc}ׯp!r6\MpAF;i-rhYဨAHE|\r?ۗ]`kF^?=ՙݸ_Pdj T hIpK, DNm||103xb> stream xY TSgھ1KQS{jU]lUV]QBK[ d#!ۛ쁰 nhťUmL;әim%_og?|{y}/:`XÖٴr_'?1Q?!By(bèӧǢ (v4fR3KR9"~|B1&_ٓ7oq"~LT5Q qQI72r&|)!#C“OfggύJN*eDYq_OMɘ6*9n;mIj03#N4yMjl( S^K]"\Lz+VeGG&7vmܞu 7 6'%بsT"F#GF1b1L"['mkl"XB!Kb$:O VOgg7b-1 d"xx L %CVaK };d%'1{xFQ<ۡx{}c  36y&&!$Τ=~d#|ȏ~]h@>_Q:x6n䫑`3滨h0ׂtpl4KT2xvNDby!$;]X3 6 t-V7@Ugӑk0K'@A&1lns=(9^Ekz(-ZJ8K;? !Ӯ5ZC>W-hũ yZJ%ZdS\㬵W':P!?ݾpGXF#!3$:ГJ'XF*R~d-($:VB/bN  wF6b8LÄ\ױ@I ^yDsQh‡.0,(0pv(AZ)Lk _UZ>PBO8h*dELfqќ$0۬fJ_A-!h+i8B탈W~+q^wf4e23_ Q{NҭGy~p:eӶ]ہ X؁しB Ik@O81ьd?NGڊX(S 5t6ef|Sh4:B_ˮaϤieqŇfsfW|@Ec0} Zt~b VV[XBC3l5ßg$f~4|{daEqU|\Z YZ6">D^{x^Ϝb&KaHh%5Ѓlu41ث7է;ߛ=TfR |qzzq uDˊ2Rswz v}rY !o?PuuX%Ъ$Z+Ol~W.J p/)!`;Y*E.(k-tq1#a92x .vN :*5)"952v< FjGJ/0޿jaޞ~^WkHƻ&E|hI&g٨?{SDEN+YX &\\R<{9KvV+)/9oL#f PSb[==|:koY2ڤ Z 1m W uCYC:YT0pwG&kεAy?Cn-hj^vGp;&qV2l\m'{^5Y̤KNVkL&285%y756^sWZ<苜,Pҥ+X:mYbJ>ӓrDKcc5U`A9BeW*词Y@I7uŃsi:XqaNc J i_- ]=4p_*1_xYm8k(hk*^6EB4WGq}) MmUSifL;Bffű枦ޥ,%V} ;v.A/uŮ^/n}d;`8 *\vNzu?ݜR:{ jX+eȾ=ŋ6,ۄ!JQu &,6RS][,?s֗|J=xhn/kF/qթ,,N־[ÐBMR)5ipwp#U&c}d:q 6E-Ĵj*ޱ^! ׵5k}| <(&1lf3kt؇PeQ$qw2U5/lNՙc>4]uG횭QzF_Qv p\7МK߶FJI֢@ˀ34` n n9y=5zXM2z/h&k豕 䂪~s+4Y'ܵx&_Gi׋7бlqnqN8ޝ)qXIenffBF?Xb>dCyFL|{Zm*ޔz1tōvG x,L$J=upTQ wC՗hhQ@f ծus\]a0C H; 9BN#;lt*0;Ƞ0uJAGT y '1: >ڠSFCCCp8z,Y> +?8ۀ8L{A ϰVG@:R#n71ǀXűZz &ЅC.N:A'VS"̦wc9q^͔^ϩ a?@-7Yxw;m#FgqA Y)qc 6nuRC}㧌V t8$O)uS |EEDm:lr܈$Uu޶ w3Ϭ_=K5*j񜧴@nuⱽw=dO O/@g\tU0_ߣ1yy ^c>tc|Rr:-f_fXnQNo0kyk-,A<D#;S.=+)}Z7]? \^.``:=s*8kJ^@%ni$JV^[%rq|6mi (Syy8B^nB"agqӎ8*xN2lTqe]gx(d{xI49MΉlزElЀ 43AYOX tWwh;}VM@IءP8S@ :VL;fi#nɅS#ivK#}" ۳sz3oD&_ZPb0чwNzf&I7푽[I'"A-KQ_ <76+p3' VAGƱn&(70@;wf:9%"c938(\ES] #zC1M^x?ET΁n._VJAL.ݦd\GnL$k5WkisT©<>GFƢq0?qB)EFI%55Ы\r[q5{Y-\UEk2)j1,Qx8-[}=^HQdۥYTQ'3zq Tc0(]cYjƽA #:oG{T:;Ϯ+qI]O{-wRgvQ`20\nejj3vmrn $V5=6;ҚhOn]/$94&' :ȏ?[L k >? (Zx 2nU5xS ,\frc!XƉ g|"/+`.,2Fga4qlTVいy%?!_4 80n8;>p)!H ɤT` 6+ 37#7&8ݪyN@âCrm+]~+&^HVoЖJ0B\ZV}f1JhRk@?|xjsZvmSVJ$ Adc]&aM(`Sr릖/&o۞@&(y̨o0LhHp pnQC y-i)J_US.G %^]p`e `[PϧW1E=3p}c}ew'S[wA8lO_xQ((D^Ie 191,H~~ϭTI%-&_~2 uO)B!-qKWna*ԙնr4^9#JHT)+KNg ǞӮ7ΞѧG5h5>>sRC.Ũ8e_U< ^=ՈvC1XfʙU9L@]]Nm#+UnyHChz>  6=ܼW"V Mz=F6Kkw(qZ#(Sde([{mZ![%J=he8neeʾg1C3%.n*r(0߈K#C55 =endstream endobj 609 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 1250 >> stream xklSeY3݀T6d. +W-ntUֵu 'c]/<.:sҹ *N."bCF%A!{LL$$O%<$C$i8gƈ(p̙xY6H~~i%((PΔ?;SPY$I(e[kJedf.Igj V3k1K\l5:C1fܬ"N3h-Ͱ۞KMgG\jfw 3Z=696af\[-!bRi4Y[ b @lbXE&td4K@!#U? {B)'BT"X(JP 9qfnhS82Ȏ5YpU̮ʖ˛.#Pwo| t> j=#8E (F J 5x 64i_fM(Ib*niEqS':>dWx:CJ& &)(Bh*GO!yUSQŴgOkUlWY%߇A>*{RRP7C>rV;+ {y. ^CvR./ ,O(ȿ ɇ P,99펠.4/B@ȇdhlWAhmCT`3_` σNjz" 7\ Ёw.JOסNqsKJ7n^ռw_vTy{Q-f. (:)̈́Ar%-h%.Uv'_ nwG/>ݜ T~F$C?@qu͕P˫vx:}UqM}k+ a>.Zwf[.{WHa6A 4-\o8nI3h\S? -^B )D*bj0un:182o\pW>Kwqw/sdcRQ&#*4ZkS8xRXpJ?{ WL*i\ !DѴ#4uw'QKWD/ItUBr iwĨ [TQ{>18ti^7sʷc@9-U~P>HY G rNBIK*# Plendstream endobj 610 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 3831 >> stream xW tS!mB);FU(,*,P,4tK&$Mi}I.I7Z,;{{Z@H^RE*H͠J)TBHHMX<ẴPcH^H _g';w;i/'LJ.-S7M64#?[dz]~qm7Z*kl o+0 [Ĕ엀+JMxrZm?6 oqi8M#u+U%o`Ky==`+ Qp^Z/8bY,p~ Pg/*<qe)*B_˛&u,}2V4 [XsL-:_8kN8GCwik2Ǫ9jN<jz5O W¤=Uby?x4ܴu:I .Q^&A~x 7ym@FN0S 28ܽssE5̿|IeE{)P07'/Yc<ڬ`60#d6=y *& 0"[=KWRUˑp`E!H( Rb763Pء:49y k%8DkTZFuz'У HȔv#9?B@[,9XRP0|{tt (O@`®!N8hCpRh>4+)Rp嬮3!- 0 (?DRn6hevxIݑdߛ4~Gy~ ݁sҍ`tvin (]%?׀ttB<,[:zxG$KPptpy0LnRM+A<,XzAE6;ެwU L䳳(T'.[j;V fJz2@UZ>(#yp+"K:?TԜU܄T)FC%,[EpzLmj ):3urP;{boa˶ow%Ļ-g7-AtWR_[݂J\ƫR8k\ћQ1`c.'C&d ɐg!\e4u[U2,>ReEJ,O)>ׯ7#ޢ(TTCZGPG|yrChP$*ޅ$вOdܺnrXDăѳNL(>&G3e2#+~bXV,w2żkqY]sd1\iYD:r-؄j&8nq]{-!#>IzRD09.ۘ7@Ok7UU.EnPaGC$ZYVmDmC~g^* y{i ^ ٰR*MuK)3 k,, QhV#oᴐIѻiv&+>Jc=(\e+< -HqhܜmLdMAY :UsR8zt9xuE..WPι}@۳Ҹ2ρK尀~7_w*Wl9nZ'8h.7iLjܔ_M~M448*gH^xy]PBX5d}n#@$ *ϜCkFa$&{wFk'#ASJIP@*o/jd&gu{zJo5r Hp`hMȇ;6̛"J"3[+TtyVdveּ޽CdGd%qp?Yu-j^ğmk^RًN!YjnrME2' VQe59T\h9<-&rU#f(ݹAȷ-i~8MpHzGZ(VVj,vqI4-m]Mpj,L}Egql6㕵 ]=$ۊu^QFqvqXul0e"y-G^cQ=S`J& y}_v^>~Lh fU ΢Z}Y~AiF^s# ytwߐ3cUdr:ԐMB"Kdi#FQ$>T'E#n~?3:7?%xo0^-߾c_|ck,+Ky!~̻[dkDSܡNhtnO (ͩKhMZ_}VnCz%Iq+p2+^@S#ju.hJK z^I^=(D7+ܽi~rtdIh¿*mzFAm@KcZ9:x{/ :ۄ2 ﯝd夐ŭayj(?DLendstream endobj 611 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 1402 >> stream xuiPWgjư.!"r4* ,.jG%cy&b<@du('T,TLbAAMUӻ4%hGD8O4?ӂM!EM[>JoU-l)9>i [(SkR2 :ռ*OOG7IMIVI1:CMT2TtezT'6*}.ANUkwTd*$&Iz+IJIө`MZLQT_?]`uBxڤeO9PZA9QT FQTeIY( M}PZZƿbF!sEcAmoI}Hr4K0&.9@G^P` L˻.5ʗGF~Ϥ?kѐ.`M!QIBSXF[cK*eM^/Lû!NpB~$DJ捐i4\6ӜXNC_ a0V{_kVe =9CU,CM`b.Fk؇(' ӹ q6Fh<1+ۋo=xXQMkK+G2>LL3}\T,RDL&nD .uל,H OL0z򶵉q`g3;Mw" ?b:7'Q^ø-aȸgPCDRa#ؗ,?e"ch8>`mCC'ǀ++k'p=5p|'j2ǡ/[%gkw'b. Vʒje]'wzz{' q?#{$ Wv Z)õSv*z;+Xq*+P AgB G Ŗb{!FA݀[,DnC\9GEhSH{Eys0w] b9]'> stream xZ\TK{Ulln55bK@1FQ (,e靥(bˢ&Yl/hLILD߹f{YyWpggΜ|g"ĈDevӦ~& #wSpﵭ;KޡŽ̌IȱeŽp|0BD~ånAVvg5m'[M:ucssx[n5a{P> 8OVAV\]!VYqq:Empoԗ־KoYB2𣠏mBlCWik'׺Xi{G=w޻c,%N})NL6]6#z,M}BRRR:jOS {Fj"@M6QKfj>ZNMR+T#j1eC͠l*jMQRk90ʂ2,)cj852hQVTj5Z@QE j15ZB R(sJB9RoQNKMٓPR&T8J+:`jҸdIi=PYgr_5`?-5|A% ~C>b?$#%a5b˶`,vXl9WF iDH~d;މy g}ys\;̇[Y:5jɨ珮3N$T o ƠdJo"Q+ة $MJCьO *^ХUaO(r+3tSPSJtI j(H΍҃Qb2!Uװ[]B^cT a`ۈvrKH)YpSfЅd=R; Zr٭}]yy΋o:|1r c{hQc[N0f". ثGϠ6{ؘ!,xWbSFK^rkxb G۰qˮ3yq҈.hZk,7q/Lbɏ~ 9gv|WmN[b["V#dkDec!LjdNRϺ囈YplWo]ǣ(6> fW0{ 4!un3,;P*⩻0JK^uqI+lW,txL9oD Y{qQ( YzT2^{r2TBBeNܻ=-j/ vQWtʂ0!X,9 &bK;O'1bK~OSIXK:a"}yRc-()ATA̧+u[εm]cl8=t5acm$$?հ>} }G_ŒNabٴUqOGpx8|#ћ=I\g?:Jxݪ I kaac 5~2$1ŵ1lZڞ|O6{OzA!] q./ -$g4PHOHCmSj H&\J(i^ޏQwdGDQzN0Qҙ͸_FB~D)G(843y#  &JQs\fzwwEK}ԑUEu9Je+i^KNxIW^}IBQ[։=L,DQ wpD sYw06GG1,L{:' \/s.3 qQONt3wN0XOXt^5s[(^R1=dOD1) 4L0=H_iMyxg-ӣ0yDAK"poo6)LHr$o58쾌i f=EdRttzi}ё#CaIޖ :,^X6-ߜ{h_g!9RX"tnM@lTTJ#lyv ,2^yS DXEIqD.):l5bf/kda3+dURO쇱`K|4V&="1nI$GǸ|*JdY?52WHaW}FtvPkf*!?1tշ?(;7~X&nTwcp= )> hPW }ͪ˕騤[O>}zn U*T}W{.X7$;A1{EᣉN[ںzL;Y?_kڲ޾OZ!|p $^g8r/X;P,3x zmsM\ts4ĬP4JH5`1r2 3r9ZXW砛S4iɎ ҕ=.e,,㬮gXׯs 4//K v s=mr0P ѵ[8~:_Fysb[|%i܀*BP0rFȧ̫ħ.rbT}~4D(?t_eyн\$=6a8oFx:هѴ 7 zR5(zMx ԲuI߄Î dkS+J%R[t/&*i_;)]g-,e$:6[N^E{༤vBB%G$p.MI;bd쏏|HZ ӠTGa]x)b:ݛv4@Df4i( * ]3:cUNDSl,`ѣ-jxÕeSH#U6 Aldh ƓM~C>Vom1hl Z-wo@/}{ӽbu=k@VapCNri/4g<ܿ9_9V]6oX=N~[gaZ'J(}]:XXj/—7-NܡNM)%KOYZe-*C9ͽ f1? t_+.07px}Bޓ!ȍĒC'UFrX&?$t]~\>ROHz!Y`0 8UQ0;zFw k~!IW*2œ*őjޖlGT%s eLC9Y^Ez%"~rӳAtAIe8JyeU(Ƣ wd]WC{Fp!ڇc3ɟ$T4#'yd/ >=q9D80m9ڢ]-JQ tl"K^BҼ=,u˺N-4kDGak#lȐgSII^= >,\*/MKCN*h(8wN2=A<F1\س..)=2iJsQgL;<  >m{Z-V_Xf9 w_Te{?/JʦȎ&molSvPFԡjY,a`3ZC" 5lp$I7eHqFfihWr"2,!-9!@ :Sr`mփ75 +(%gDx~ú9]" PlpG0a}j\޻TK&%ͭ`Uj~0j8G&q۪Xf D8Jr:FL]`Ih[9= ; AeC<,M9%+RjU_s殲鍤EZQ Si7Yq?:N<ؖ œ6m0M3fet)* L Ip+R(28*oݗ6o޽ںyS͗nq31aG;9xpuO,lɖog=TPPP\Zp* SB$UʘʰTEfcs;jQALqY~emE{c{QSnv$ۍ|^la3y≈1I3a\@ֺ[ ,*JCoK&ayY N.;Ok̝]z{=ik2˳nj4mǟMI*Ģ/nGgfbޫ*J,OMVqu'>C̋ϼcxv2P\>YrRM}侄K,xsIk bI Qy<"<1GL"HCQL*v֐4[aͻB8U¦x@[-?k"r<#- u~(."^DQ[SɯmΗ?~𱣟ΛnΑ!2 rK~>LO8*V㱻ݹ62 e#$&G% (E~VAT!jzRzz"x_MZ+9JDQayI՚4BUY%1Y{M&L1dcuP0Xp|=*V"ŁQI21RPexQDyEVQ[g$Ծ5ca6&޴y<@T;fB㴿ܺ' )W:.aI<]zaZbT*X=#W$ȏ&Fn(--j"1II>U\FԴ 'F>'SZK<NfQu!E{ݯ"i^&;vPK!uCl+:~byj[> աc[kVg#&쀲Vpr7O,:K 0xtnx˕ƒ?-*t 1iETVc(Mڭo.+юO:_S'cx1~l֕ղry rSӹ={|y'? Re$2;NrO׬_=u碼|ff XS"} DK6 :0f~&VGG&%'%r>~2$DG=?  &<(8Oj Q.ʎ/).هjQ]qeIyQ}*n(Я\TVG#9Ϝ=dbj>uoP>1ۃQ":;w9>p1?ӱQ r X)%X'E./qxq85fdfsJ/~{ǖ_ɇ~fgDs_1'E?ܠ> stream xZxǶ^!,P`'k`z5@wݖ-YlQlɒܻ)C ]$0@ yI9{ߵ-sGtDxs]nݸ_q $c` sάD{|нs̀a{ó=aL/%sȨԠÂM:ydc͎HKZ Z+ H 8m̘a)ɑ3 ʈN Z'h0!5hEX|DSSG?\a|bZjDrrឈ 6N#<7q˼ R.J[4s2Q=+"V]:jMژu6o4yД!^}}35|눑Gsq%r^HJ"XEL#^%V5D(B#ab8D!F(b 1Ml%cXb!1XD'%kRb"D,'&+)D"D|e3 ! $1B0Kt%^ ݈DwEb&уxI"zmD/xID?b.BRy;Tw-"4wѥEJti7~7Ew_8ů{qžսwY}>r? gw x"_W:?pG ;UmAZ-}`S'( zuI~;8saW,*zp{Ǎ>55j[dH91hRbu>&kAS} ~%DNAlB-(Ɨ5.ıe ]yŒ tR;@{SxF_sNbjpƷHv wXԙAYmM 7Le^o&ʳ㹷ϴ&tIN;T 'jN>ԑ̄|mVlIw؜p"gVӞH6U'gx`˕ bWа7yǺ2c󶹡`r+zp 4Elݒ)w 6^sha7Fި;XD}[`h;hw|uF 糙`t N6䩡vG,O Ԓ֚mU=[)Il?R  q|, Ry_}ݦuF)GnqF2O(,U~tӮmB mDmD {m g<*DING ;|/-DJuiͮ"swDn8Ƈ84sNɍ _aW8vusIIl5(W$[*Qgy`Jp0#zJ_l L8^#T1HNZӸ6>%^D&v@}r-FY{ Zx! &Hݼ8\:`gsq^B=PWF78uQFH}TD37o9uoU6kRH!aZPKsÝni؏*"]*{Ɏ=Vb4] G&i{_خa xt(86`<(׈l%ld7= =9 |dUc WUs~XL"aʧ:o1u4|Te`(:`i;?닼 W2?uGt*o'9n>DGTlba36MC6{$r.Ei(t2F8A<7hr @>e= g8q 6f6[XAUYwwuqbU>cu ` KSS|58 yw0B{JZ b}ZyL@-z p$m1GǦ LP' {iI!S2Z$Ek܉8 @N4ߎ*khe]Vj5j u د4vھga&ϑ4@l#$4p{~5Os*F2k*P@ 􆒿~G]NƠ驕]{&-R^kzI))wkNGx"[m@-ѸJ,Ǎ5Xg?OF:d~u-GCBpfGc v 3S#A\8٫]T."l.HYկx*I4}~$h[[۳<@%,H=sNω rz[%Zkܸ% _"s}ٿkG5pTj]~XQ8=9Ye dn٠=Z(_T'r>aM1)af$$SLUb&Cb竑5k}V&Sج< Z2)])VgGf$΋Ȫ}aV௬k?4_. 'NV ,*@eGaP 8e0v]! ##{I3qΧGԀ2vl誟 ω>F~@D>=1s-$CD/]عcfD='KѽJJM=ַ'$X=հݤDeg"^#I|Ȅ}87~ uCyU@ \_xs 2m P,^W5YLJ9 YѲ녇>:Y{| sGˋTl9+mmx [P:NT:l,zh-~ z}q.@&;#g.npO-Pz$j:~}SㅠZ&ޚ3/Ummo"m>{~pNb:/f"B,B UK+xz7?jgu%?%1hA.܆qB7}:ѥ+4[]-(Gm kpŊ]= ;o?ڿvQ 8` "aO6~:(zCs]_=*B*LNrWW3h=t)F !߅7}5(  {x/3LTfh$Y`|tUt.Fq' Zr Лb cTd4az9yE6';Xi 4Y8[J5O]|6^K-Ֆ '͵R 4v?13J+91iI1RlP֧EYq9 7أ=das} S0 OE}x!ta_n6p.C~~Dh9]p/^g5;j6CEamX>q ts?8;HWF)@ޮiY_ Æ5U.5EY65?=7ZU6 !.cl'%VjY>䔿@Ț1T^> bm\%= [_t~vra29#}3($TUmgP>uƦ|{{)y M\Pt'p QW =[y[}Q[/dОMX ̿?1~:ti%. B5{4;ovVk9p[(.OD$6E 1XfH *97Q_a4 PO|c]V4p,=R\N轩]7 ޡ ŘLTef0#>'\lAT@)tM+)>%1)<}ئwn6xyCߪy|D}4 ^6ib,fz%`5|w3k 5gϷ;W]? c=zڣ{Vo qC^ܴΠbKrwdZ&mHXmp: L*a^5XV z0lkؑ1 j5p-vIʅiIm_C48:`pBA3ώ[?Š|#@=*Tb *sO;`p m弫\cd3 Z[]Ewođz K2iR$ SIij6"Bo4A|T[\E| O-Bu Ű7Ͻ@7FEGGW$kii8lBM).EN~эp\s0Q4#$l DK۶-dLR`JDuVԙZ hP`囋!I^+1:A_l7̨IsorG((MVZc,cP;= <0ZYLFԃ#c('Rrjy'aJp/P'Ha,27H{fi$o7D wjι/A%rܾ?zPLK'.̓`ErgQ x3'oOAttQg1V>vqCaL% LʪϵvM E;vb¨Bsq!(*D,E&?AOPX,_$@dTpzIP%她|lO›n;uJ0LKifW rc`kr9a֤WЎloÌc@~r3'41GUv s^*Ca3c@R겪ߺvEHRHBánkQQHd$0%4ƠQ=n3&%>,. P1Oƶx>)QԂs3#!~$=Qӓq+E+?sa%]Vd)Š4Pi@`DP')M,J s-UHrțV?Rp"%` yfuL 703PLiȎ@o&\ԅ_XYjsyjםG_:JPɊ8,jA-Հy~򡋦o힋{73M$h *rID(d 궤xja=XRk9V%ViY"9ߞ u[-Ud@zܔYYh\W` ̲:*-u&FgYW_s,)/ur;'!%/plhqsi:cBlEDqugC `΄]}`4=^a/pJ\4Y"W+<> stream x31ӳP0P0T06P0P05WH1230!U`aS027i`9'O.}O_T.p .}*.}gC.}h\nn@n.P9?47XΎEAmGTa?ߠ pS! B)endstream endobj 615 0 obj << /Filter /FlateDecode /Length 161 >> stream x]O10 @+ thU@p' aKB,N>z ":ր#ME݀ gnʿ?`3]$WBi )(HtUwXI%0lھ?M%k17MR{;RA|ASlendstream endobj 616 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 2436 >> stream x PSgo \JjSC{ӕtحV,U,B w$! H'  A(FTeKѝNwkwnuS ٮL;w9IDdtrJ*`~Ğv`G.l5 ۀm±}~lJyxsGx D 195 p(< -O.X/, |А_ dz,&hn[>>9T5y@g$_gױOd8~&lEџ!ֽ[o;۞O5Cq@cI{ȋ(#CACө@hr!-^\IActÅFh5j`ʣK]C oV4Z?̢ݱu p dsUZQ3Z(f w3]Ehco2q {L޼J V:("!`zE6 crAK6؈\gzX>>WΞ|vܦkhU3z2.*]iQ'z˛;M4D c<1prWBOnk}UT4FBcK:1|}}jw#Z37^&/7BaS.4ܾY9ۆg.Rwj;ghLXO5ĵb,l&f*6gp|]O84de@%>9~9_7fgϢro¿SoVk_D&%.Jށ!Sf9Cq>:҄Q@(vn,`Y9L=W[n#`hjs]ǹvKFr S6Ιlo!ΐwe|&]58ڂr,DOs;6P@ =]3@.PCSD₉os=J/N"Xdfyޅ.qd?a\ơGw }ނmjHF.Ձ`yh샳Us P꣙ii@$vV'ePA,@_N.m㨖=4G`[ٓTú=\s.a|`Dt_SJ B0M(`WنjPsgW6DTȗBOFJzѲ$\@N3"v\#Q J͏:@IaP"jfÆL}h@)˜3MgO;JpJXPK8p)nA7 ͚}fPȍ} @r ޠذx( N9T[>\M^G}/ طq6PKdBQç.s-m,<X Dj7U3(IDG؋R2Z>{a+Uɏr=7[:_~jR[Dؐtgcms. y Geendstream endobj 617 0 obj << /Filter /FlateDecode /Length 8048 >> stream x]Iq ?ݺmv9E-0dˀd 5$CΌHlGddUEdeUw`aYFDE7j7 6#usGM߳77|My}o3C>'o}L Χ݀jeC}9!&WhcڽLzbPE{^Zs~ ~ woI6Czu7A~9hviCv[lQzw;Rk0>S cw8'3dz k̇y7k6Wa]Fqug64'p0cxgŦ*'kUҰ~s6Oݝ?,d9ؘ<ɓvvO雃C N(e`V_Ð6ly֕ ~D @pVisg嫜}ʼ-w'E82Ve|A-Z8(|`>ORGzZY'yvfO[#j?q9]ണn7|k_yždBZvohrg`H YXU+1\E U(4l(9i:sű<[LY,/'FnsCY?I>d֌2qZ=o8]75pBr#7FPxExV:|U!,P9(_O}|N%c󈌆NF6 BtÆ}0rL^tA9c 7J ZQ`kHp͍ <B;9p\;/b ,Y*h,>rR@;)uV=ST3=t Kv,70apS'DJ  6]/FʴEѨ=cr)&g|ټE G@֥Z^9UƑO/)O!N{"c9凥|LS!Sh)Gd"'3R>&U@ןAh,rrpa u>Mī"'J$S\|SwlwUY솆:Xt`BUaTT&#zN1C\pQ 5&Rl2첍E +H]Z5$ tGGMЗOH_LkYM_H }]-f_0c07̓~/:VrmZBQ#7\LQHk%e. k.cOd4 ]LmE/ frdԘ=t9@kg>PaYTkgA # Ö+Ql|T5DUbPu^ j恺{r/*9z/o:0F3'Kpt ]nmZ'C w0O Yst2 ^¯Q2*Z%5 a5"?Vh>.`]e-ObY(UYzdj%aasv60W I>|;*,ړ4ˤPzO& V~<ӭGM25C:|!_^C[>zSM>I1kZKzbɣ?<"/ynnlp܄@hrjsI`]!'Bpְ\wZ,^ClA 6(pZ㻙5z]r 9Ш\Q;w}NM[#AA|Q:Դk1S3]:a M5ꊹqB1vkrYpU8[/¹Hn~8Hꭊ4%0> YJM6qL:FU t5)zC{C,3%x3^M׳CH ++>Rj"FoZ!dhXI!s\hчO,Y0z%SJpkwy$j5Mu/7\7'VK{1jBba\~LyX}/A"hLZf TǸAoepκj2qr_M5۠!Q5 ~FP>gdXQ{D>X) mP\ zjtS"Dy h"KRz`|z­)FkȩR.4o4\&^$/Fc,slv-ߍfv?)=;`yk#ޜM[kDؔ0x/J ѓj%F4(~#d&YlF@8 3XFH="H34j`:MlD諔x ƦVt<]^j}ȶ!kOg!nh ɍAU^ү|`P>?m4HU/<سANztK#oH' MxkĆ#L9aA" bOKඦ-]kN41?fO1*L[#1V/76wi.aH4f t(E</#>B$@Gj@3]#o{7N]q֖U.vLX06xܪ|RF\s 6qz"aLE_Ș ©0M)Q&?xd0QU&2Z sQ2U/,G$YΑ'.4>s<60R#X'ZNF%)eZig:oM7\ < . A#z(* NIv5x>L ZHϩF1:bZ{%M87`t;:ƕ_C_\1v]_Q܎utKY)UPƼ#2R[:O1c2W| e2Έ 0lQFO"-b s ecm~6%-_\ cq[|u&As)%(@ȤUb|9]\-=yE~W̠g\Yg_;6ueoÍQ(Vk&a܋ . htpZ#-6bI*]S  n*PaM ݇0[6س5FZ߽y\b/j/W܃q~9G֌!_Wueb0Ng G--*^nVAVekc a #F;gQhlMh)t9泶 #=`2 GԛE(h'9zFϦ)yt/N\(g-l^3@aK^)drq;W=a9E>[N ^1tfdA53kqHk:׹$c"@ %,&-YL\j dn g44'=hdEj%Em\8оO*2]:;jQ1U^U'!VzaݑP+RgnS^yОѳi1nnIL"Vi$Fڊ@4Z8%7eFXau(9;xXqVp bŘZ}#k^.Nmwݶœ/҄6uJa8)d,`Zv!+j&m>ԍܣNt >!2}6b7$r.MJm/6$ɋݠD*wEn 4t9(7A g8J]kjxBpZ]Z U5ApqWµGEo _9D]>QctũY-O26⻫?US.~WAS1)]E|%gCڟqQLtGiB ЩpߪD}ᓵz H[X_ :,pdVZL猉>&uXO<V TčS6~9 `r`8˽Y` ӂb< c#iɌf/a,ކQ*({"!\zC+N!4Tꇣנ˥2aIXUMk(<2t&jtD 30=t &LA IZsdsGϥAt~>!918? %lҜκtٶIK{*W^ /Tx+ K a-~R*|7t0uRhթI ?Zdl\I+4|a'Kk *dqdhho"H D=y~V8giЉ-nրݬɏj X^8./HK ^̜Ι_L Ԋ]SIV)vsp8/~JE=iӥ)3o:?YdsJf>ƥе9LKMu J`ӱ;}У} ۍw$e-j%3,aopyigPE er <s$`CCC0C׵8^=OJ%d?,r-;qzFMD@& ˠkyіMA۩V kxkX՘?60o6-E}ڌeJߗzXQv0X5[pn4‡|>$w0I^;YRnHA}i _Ms?pusu69,ab؋2 1==irnR*R>iu~c,F{g$xm3 G ʅYb*F7k W;lJYAנ+qO1=vDwM ˆ弢p֫y@×ԯ ^e@ {m&GV|6'O+PqT$''Om6%ѦDi{<[l:];֘,qi) 0 4*.;]bZj1')R771Am_ g]N%3wE5|QcQ÷\a _2> DgzPt*caZΐեRѯWi8p% "Ա<ô&aVkL7.a|S U7 3cOribn,4@RHkQ~Ep^:Ghhj{Q`Xfiƌ%QoD >^,Föaw!'0 <5,Kg/N=7փExFf)@G4^Ӽ=P{tiyk(l5R' s?tL1٥$QʯEw8<Θ {˩dPA 6Ɋ a=,.^TJzde݉aHo,)`+oãLkendstream endobj 618 0 obj << /Filter /FlateDecode /Length 5305 >> stream x\Kq6@7Z``5d>p\r3j;"#+3 بg㋨~#z/S*yt%6﮾ϛߛoq"+,7Ҩ}vs}׭UpƆǟRh_۝ֺAwFzCqSwnxݦ* ս>h7 ^rxk>H]p [ci-vt{7'B>S6CprxǠuPWg6o՝a{\8t>w 1.Mc:G[1_/mtL!q[|0UP%1\F6ovN>8O_wq@>Ja(ZI!NdnI 4BLܡ I =~آ9)1 Y6D`Q2@tНO6Ѯ{VE(+!p |U9)l`St?F-u,!LL83lTM\|Mhb2݃RpmN#C`$Vဈ{b ߄MWj0a^eoA[i b5I>݀@4ϋ`,]OF.eJѵp#Li'ugvKj ;CinTot$/ jJ ^yXvdAvwJO%M{Mi̽+D rD|@RxGj|uxm3#mH U۬&QÉ*CxI$*WDz0!It=XΎ0oAhZeBpvȺ܂џ! r6ܷQW|'t'xϊHHjKb.C/Ibeꇇ8p9/BIKYW^/ /9 ߘQ"G'‚/h(_hzOc,9ʅ4)0 A=dB!ķ\S\'L]89 ;[r,߉1dypZ?t d/tsQ𪂰7[ @)bbp+?֧`>y0#ȕy[ ?<7y.C pMޅذ rKZ#DށT:x$^<~x=gSc'I$%1VσyL\YŶ?U ODY}znx+)K:4|HP㫯lO b*f x-CE8#\EEnd#s$z[+E<I.1 iqU4|6X1$C|LNO2δHH1aف)V6"g+(]T$UIeok\Brؐw|4\5ц'͒Wl`rOĤ!U!$b!# Kf G I0lvuۃe] o'Τ<]SF(JY~F؅ nvkp n`-- kEXBd_*0%`y)W̵`ח7QvKh&^L}5!w[T|GR3,2GB,3*#<4[&YO#GKߤHQ%{ECs@RVB0*i| ΊP0f5mJip1EDFppO,GAP˕f>cuO4O/w4H6 pVzq^ m7an_2V֚A'$$ y{% nYzVH,y)`Y%fR?1z7;q):'4I!aAYޓUQD녲{>h[jib;gH!DZצD[h3^u KE5m{G"a㔌J {q7VpD\B9yP+Z$?2^H2,T$ YBĿL ڤ9H_M`ݢ~q\ ÊOEJRzFq/çw3W۝D:{FөOWk Y={ @cQ?+,Ey=\6W!jNkpڛ [G:MqUO48YTBE|U-Hn @u˘[f홱p0興_QkI'U岓ςb%KNጵE8=rrqml ] Jg"AY:J'fgLIEiKJ,z V#e"3EwQԷMDU]?K, I |IC`P{Nl0(eC !(bN~7\ǡW,56e'1E yi Ƨc+`.gS{R5 NpJ'>˥ϨJocYcNɘm}+6TK# xN*yJp^}: arhsj1nO){*멆VHȵ8M-ŋq6rଞl gSo Lv/ȅ:_XFIO(Bh5CV56ft0xjx]PHoR=pgw4B'!FV<lCG*XMo, ܄Oܵ] ڷqV,5VmiJ~}[L!tö|٥/됎V׌XK 947葷4ƤoΰDj(hDh;+N N cڽJ]9?nsa# BR[2zIU T^]bH7 qu3RM߃6ZIͷ-,Y?#ZTZzQgEKjr0u`P+*2:[x4+0 J#0TK:s/Lp`îY 2\ O*e5H+IRq}P!0W$4FG|hrZWVyς`!7v0B]#V R >O/o/+R8V*x;:xHQY%xv8DF494ia>ascWBm_i.\.RF> 0S׀$"35 &5C04B՟vxc#0Z vǏQt2`>/R8sI'}qyih?5KGק|;h &u+QSnx1UR"JUm5}Nϗ]',)'61 4K<*U[RPTQp0Z[5 j^[5x3S5s/pk 0z¥Z]Np/!s%ztm=a-$,/%A usMt|q yj1 vkɽZ@eO_S7M?uˎ"8uGF^=!~_5?G"GN\61CIYh"˪m;ֿ1'CA2Cn`;g"O^ݟ=q2DkC^j&:r1^Ez9F{KekNC2~]gbYqQ}|&dXۮAg"01_:Sz0v3NBH|fh ockf18r;TErֺÁǟ3N,^x i:8B;brv?5 _xORbӛK ;@q%+uoT auz#ԯ`bz(*amMIЉGI?5|oG> stream x\Ys~ʏiNp8UI|$)+mVA++޶ȕh`,K[]qM~Y~_b~?ӏ_3Gg1;>^լsBg- h/ٓjd9ӭnN\n(vwފ`^zx cI6uFtVZ)?,9w0vB*0bL{}X*%{ewR>w^wh^2`eQ?&8 }JoqFH0p)#`2IXXuzprLuq& [{KGh-0oS&õ;ƹs-\Ɂu_OcwsԽ3f( t^,=cZa/ yYM!oLҺ,ץ mW Iֳ*M!DR;42Dž(䪐.BB>dzm-}Sq!tH_-VI+so yZU!o y{S>b9ŋB>kNd.aaK.A;at{ dH& DrЙ"g}AG7Z&%Z @rP<_̎>|>(Sf1&52c*{y`n`NAzuqgq+k<"dil(dwiz$u A LP T&C]⮔kz]{ޭ5w?A#7l4kBlJXv`¡=Ev)&pHp0n0w^Yh,zz~sCuseJ=|@3.v-]4,icdn#}O)N GmS_Xo1*~to"u{j}M5hq Sg>4 P[Į*yڠ̇@{5x-/yMGvq#xP#E:.@#s0Ď42tSgC#&6\dH+:I(_!6ͽFM`BC$W$G ^ ytzN-w0 k<9<;$mq}c4řih'`,A|&{_g4wtl#UU!O'x3O&ޑ=hλ-& ZgX@Sp-JjTȫn8^2:mMI]'jK$/*X0AՁ֠#HiD񰒙kRTo TFoYK W\/|LVWeLXK$烏TG[kKnY^ZM3QgR}Xy.g24EechY|nsW6K c&9 վ'"{Q~wR\nbTvapT.mݧcs7oa,xC&,oG9$h4p2]^@uL}d@rn'!5H_ =j2I^Uְ1FL'Vtd14 ᮬnEGBD&灈tB <%~4ۨ~e7Nj4o.Y /7)@6lΡo :NIP"-eI0QWU~B@5V 8UU 肮J#JVfklUTʮ&qGYq3]y> stream x[[[ ~#>/FӇ&M"FD@g/k.j?cٻ67 CgW/gc>w|nllgkffTwbFsfX.(0~x|\a},$bx_0/W|XF^;ͰEyoa bX1j%1ō~XoŒ Rh= &AKd^ Kh.ayFL; ֗d.l)pXJy`ǘrQ1D( @̀i _9(_-{h-(o_ ;ƹstJV]ko izK()RΘu˞ H*  eTl-hP u'i=52/`հ-q:*JWаq,kH Җ3\e5uj fVu Ұ8".i5V#:5a@ic$ljh]Tnr6jcUG,&6`%:6K* 3T~*dzgtY2lUW_V@G?yXj!G`vK]$*y[]%_Tr%K\^R;p}%*+y%Ʉm%*de" O# g38)D%%zP%>uYp9B Scr9/~"@}Gٟ1Gi3/+/e)2^&b̤&EkJ~;MTO7$Uըy Z?2qPXp`¤vA8(=WQgj2#Pxe&FB4%_DFZ#Dޟva)"5gwwh2MCE3LI#YG0UY0kT8n4>GxJ8`*t p@ ң9)Be6ZPj,p.ѥ˅R\9PR0th˖*It+&BZ%3iX$J ('eL#@zo fȺчdh.CMa$2'YFOkr:65\@w$6^Sm͘K5 `"\}S! JX`#&= ӜB{;|<>2t`/YThkfkpqA0Q\.Jd8SXwx‚XSӍE)ULq| Mj*n'Z]pAlmrVI7M,IO$Ҵ1"'!δ޴eMPPjGx Q-@FmX/MI!H?<+t妒J~E{J1dX;SQ TU^ PMWA..۩ K؍Wr;%+9XQR0G&, _:뺒麝ԩpk]tp9'8r(dr42n|7"9N","E''^;~j+ m.gS٦C/7PD*uzO #&o`X؎fk2տI(JFNĸРҜ"IY'_rqO9 o4^t%Og(8)-`<ʥW#ڧMv3[.$k9 ;wYpͤ;vIVoa pР t{8̖ \GYݤrpDp^?UWɻH"Eɶ_E#9UgCwE#To[#E`4]P[ps=\T`)Xs Bcg0"j@QZF?P[ڪ3T<-2jx!D efK 47~T\ϾyjHʊILI!X7B"80CI79DZ6a ~1v1ڃ\_QtOuFPuFSi䤦Z5B$MLs颥$Ȋb¬8S0T>叵ĸ䦍zkMda٣_9=>4O>1D%lw4SuWȘ{S"w}?J POɁg/_qmuiǤ}u0@n+&2}Q]3TSE|뫭c3)da=}ҵ(w[a{iqIj7U}%I&f|]mPЀ9LԄ"^)%DQGXNEڧeS/ };?e/V`T?6`?`i7 m=pE⯖S>%m)4&G<']1=!3 GI$ !}%7|D$) J/$C7OEOBؒ#l摟9v0-AkA"FGpzJ]48Դ6N y4UC_NXpCl?Zs4|ovP 8 9|!o9%1%J, >(xj6m`I%}3*ar Ȧ XA]`SfQjfX[&U}¶Yz-kjrQb0 7sz֨..Odqxvġ.N*ً`&i3ƛ,\MOآOj~Ĕ>22N"hM 9w R{]hs) Y>]|B4OdfX8}M ~c*ZY /xsI#$]P&8x%`)ImҎq51'!Q;4ÈSiG_ Kc 5g6ηqҼ}pjd4s(65B,*0t@#q)8EV ﺈ=> stream x[[o\~?KI_iSTQ)JDR,+3CW+IQ!cgfHrZA.toR=8iOҟGOq04Dt/},ҨCvqx HJ] Rk=^/;b1ocP9` V@U)}䭆 eweҏD°{ iG]4j3PBvg#?NW)NV1FvȿV 69ꕇ;ӽbڝ3-7Z? 3Dž= [ÙVm.Icz+Sͺ {6}`'{dHOZ#Km{; Θ˘B酈U! *$h_s{^-RD4y/0VۙgY&ˋBpMvhieky}`ȗȣB^|4 ?Bn pj2cgM%O|֚{\}WFouS3$-I 7dAh_duBl>+.9zRHS' a4]G>~1ϛ~򇃽Ï^wrCph/b ma,u/¹WMM_7ɝ+JFf`OI$Gd7K g;f9٢(3х QUH_HYCсOHls8A[5Rҽ7Ѷ&)O3C[&sM2hΎbI]8 ]sL#H4w=m&)ANEsO͖-cp2Mm`t@斱#(4Gn 8 f"X˯~٦Ɨ8?1 )±^Azba&8JS@1U,DU&`UħZDHrħo%P2aU)$-/Q.| ɒ^4Bp* BȠMdԆ]sfrP<Jt"שb@@=!U ' SjF5E"3I i(OX$a&a?~gJ!C*r`4jc4o pb`/0:V k_%:ȴc:0.m{[CR XZ2`X;P灋F wϱK$\EdR(:IB{&*L#K \4}lj%$+*z; bMԴ}a\q:< N> Ш)JMy&?ɝk45I"<_!;=MWŚ-$'f)& ~)"(Qrh#ɜ37OPK .N; <=/pY#J߱ L&֯:bBvGaWX|ON:r$6UjttJẄҹXu.\&%\tyqL8w6qOʭp|̭l9W^CCɁT֧@^iVTHl7IL@gt #!/)8[7 s!#|P} 9:cXy8cIMmzH#HdK0yDD6}7q{i%"nH2)~2(lQ 7 DFy0jD?˃:7at㑷MWA~l~%kmt*WX9H87 ?^IpWc`%`r^{<3ypvSkҝNt5EFˣ`.΁{6\W;avqxV~Դo >Re!󛏔_2r!*LȊUWIۖwh^gT6ʪ7垄٣Kkԇ{F ^xjw_N7R'mb,)̪iUb@5nLjO(+ KɷY7wq6ޕ[@2~r̊ƥB0+Qc_Jh@՗@R)-;hbKsZQVOw$` J:c8t.MvGơsc6YŗTO~ $q R'I dfhm'0G؂\FvG'_uU7ж`a[wM3èCv U5r> S&+ reWCի3J2 ʨ Hgm-RGnVl3 2v|2؁*11h ָFb}DzՀ>"@Q2Y-ʂkPg58f 44$@X>A& oPoz'=?׉3P0N_+,6D|'h|,)0-aX7Vn'i͗_ w4h2:{ꔙ>3˺L?]N?h;-ݬvO@g\ls w?Gp;kΐ„R[x6V{)nwrl<'Y=5 MMrOfi]}C`em7ej2c /ƒ 9(;/ _o^oگH-ވE;;kv­{5n)$[\9/rwr)=z̞ʦL=b6={3%NTendstream endobj 622 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 7558 >> stream xyxTU,;. ,JiK{$3$g;'d2限!DZA.vum߹dI뺸[tzkjV"jZ+OKILIxcF[ܢ -F$Ų @Jzak3 CWm2K *CF6?**k=[\b,ncM0t^/g ˼:0[ Kթ(,\*Bѕ6|et2M WOXNzJVUNlw4>v!{-j1*}^wrMWqZbRґzzQVTSwtu/i [Ac6tN"VY8~ɸ'EfeY @Mc!@!|Xn͏TF ߡ*t3h n)\iݖ |'&PVa Cb;oԣ29PUYxџ~5ؔfŠ$& J(lt3'mÝqƧ lt'^[fH>74~g}ovPfYfB2$YSV_Z;41nS9P,רؙE kDDf)y!nfЀ=Ơ1_^Xu /].KKOq4D(K Lmr0XVAE&2;,= F"ڕOv*b|UTFЙUf .^}EU¿,x IAj(PEoIxҔ!1zQq(.6Fk{9fn^X<)e&uki[l{[ax`Zg2T&RI/.Bx6i6M}}̵?>+QQ7j:[K^ZJzfNVR3!(DDRP `"s_m2*F` WMwۘ9)ŭWp"*K72ȵc<?dԩV֘rd0ma%NvZ]iqX fЃZ?Ä/ [jus,[vthl@e"Qڨ%x>Yݣۨ1?AQ,4%tn^i]Kn`2ɓ@M#lKx>QZgѐˀAOZtBkx",f18zqM媄u*RZt?q\XlA[kGyG߼>r4FT!K_XyTWJ_ّѺM9RZӛҶgP憏Gʼ?Q Ữ5#4VB#( X[2K t\fO#E фy04uF|`eZHȕa\t03(r;K%p*IShccn(T\T[ v힏{Iޢ+5*O2 +}zR_Q^VѶ>BB{; ۜmuKԟpHXIG )=,. z cvI SRt"~zH= …P2jΤ790MROM^WTʿH/>FDSlܷ"wsFr+X&9 /Z#(ovF|fq pwZ=@ oA?=h2pBk~ L-LBZ\irT;,vy5FDžYT\!5-JKŝ s~Ȼ#ot@]]U[P0#ek wu0͈gUuA)^FyIkKsXvdס6"ޮ&&34|:17Ed"iB@slVWdblSCB>[)8;d$ dY~~}]qI%nEc C󷠸/;!k%Sq?GT4pȗ~qVITLc&wx ݌!JP3nyʐߖctDh<ءjsP0䯡++V(3k=@M^Eivj:ԫ2K$tyj|,;Z!M.yV~^C{twXi_rgb?PCFȤ,. +̛ Q܂!~ @Ȅ$yAc̣5&8tVUCFC)-DF hs[ggfDXރoV6ٔl$./ٚBtjN͹SDt{a30MN)~3ܜ̸Hl&(3Il֗j'c/d[rJD r琎Jur5mk2 2"$m$qAk_Lndzwk EJհzT։]f|bSº쭄 ۬%Hy?QpM%I/ݴqo~LEe_ NQ)Tn,-UPii>ExsCwC$QL|PJ/dǎ9BR\*mÝX.Pu RB–0LFNuF#SCe :U %ϒXWn_ ZfY:pV/z6 ?Ϣ0ڝܔxfx̎a{\" HRu/:Kr[:`~BpfQ6@(*i]I0N/[un05]P 5:MKvJܬҙ/I]M2y:Z<@Qu)1G4 v *,#yy4A(r"RP؆jc aN7L+jUwW*~sRY]:|J}R S ՚%s-"5M,5*q5jYBVX=,SD#z0 $Nij#GmV2,G5Dt`ehg&CDBυ.ZJEd=j?ޤ/ax#'|c<8B]hߔ6#T}?p?K kV['v*kFpU#5RANI5-mO{E6%|7 0g2'JfN}Vw'TRu24/0iނGO쬦Nաf%#Ft4p5Ή诤hH'/2eľ7whAc^gLųz=>}nZ-{U'OKR?3q..eY4QpCͻ6,'D/2`4ZujQ3/O%㹼IWDsipMz.~s}۾8\LcdXibL̛JQYSSdR&s2):"$755[ Ap)`2Dr1VH¶? +ĞǯbW}E Ja)h cíMU]!aȿ$H˽[zWZsdHbŸU'bh"#vUc[Vs 鴤Ą$9yo/ ,[XH`I/e&NPmd 4LI V'nƫ Rrb8ECw;hB+CSNH)f&҉YViBx;zRt%,h75 k& @qgD?vY|fsLZ.MCA6 $Y3F^Ini2֨B0Aybqtο;<'ı9ZJ"_$OT5̇X?fw%E1јZDYL.%)lȖoYI-y^yJYGyFM77VYT䵻Ʈ*˝F`RJ]We62^jҭZ-1x U]yjPC3:˰+ݣiujU-fs@+Ķiy  .hr蝇Ρ#=pl4!Od_b?+7ۍa%/X^ lUW(-ỲO+h&?K8K|R`}Gk*@5TWJ).fLܔk[ք_e|ka!4 ?7=B+DZZhiHb͢3Ӌsry;|~C[-fbl*["=1;[kqf-I_$xVꇦ!RY>7 U¥R!I*8VAi˽@F6 wzORP]P]_R_B6bkk}nGmViW\')LC[ry S tOTz lAxg5ZXʫqw&Z  Ci/pC4nM N EUqU֢=Gj"*h0OQb64O,9P, &EC a #*$eUa!$AļD#`wYl.}~Lǯgh䤌ZJZ񷺚|t>؆.=gPcEgdy3x:i-Xn$q?~H8NDG:ޤP0Qg[IZ8iSE-zd CB׏BB=> stream xE Pg{n1@z=ODAwbk|ATHlxpP|EJ/r_epxj}B=eJbb䠨ذpB&$.$6!dG 9yH Pn (EZ;=kwI^SQ*j5KQkiRp*!=H9S.+)*V©U~m#jm[oǮL!k5I [$5}'t΢82b3q"W cBPe|]ltqR\Ydj3kVTB0A1;6$<&8"JPڍ}R,&biYtۆ#+ڼ'!CN[FT9A:P˸xY8vPΚ:;1HhlE@Q6^s P"0uRRQ~9&p32Ӵb"զmMK7&pYed̀<)M$H+$u0a&[ y){tvA!cor~t~ 0D9룍:!ʜ٥iݣS&D?edhImO>!`Pr/iҦ;kMxmo^&D~[ LMY ͙ \].0Ő["-6}g$hӍnR\)aJB1. "s<z]dwI {ydf$!5=߳\Ѻ]הiQ,dPi/700OpDU @GJ jUUDv6EuBe]iu%G + Cp@`̜@" /j$;UՂHyѷ*u1.0!9\v! %Z.5Sn<Fi5$¬Zb{tVE^jE83>~Il,\G%)o{{7H?<^7\P#<|P)9>"?(α5;r̊@}f82Q凫_ 1k!^bd{ )<l8؈rܴvk92ȕd G d Mb~:54"e+dG9y,2Hq nRR`ǘWq~G܂K1ᱥيMaTTX, A͓6lBJjvZ/Z<7f(+:n鷷H,y5~‡7cvR [wqBW &v-x]n0DEnS>"<7- 8 v|qbW7a:bKy Ů~gJo&spG4"Bh<SО+{TbnfŅ.^>dabj9"tUYc#d(O\4F)j\l)%j7j2 Y\>Q > stream xX xSeN(Ԣ`4q9""le)KKKk4m};|'޴%]X MdT\E6zŒ:iӜ{{CBP47-mĻGw wߓ$?gv`Ԝg({?I$Hv]ţ5B 2\E%;rS:!ӟ:uʔ驳Kvl(HMːoϐ;SWnݑ-/O}ryьG)++/\X̄Ie;Sg˲KYSggu﹅EԴ¬쒂E32;33ϒg Y7%2y|Bil[l+vL9cx@0ND0]p`>2rx J*jC5Iu & L xT0U`qBE4b!+͂Q[&x#,*O&~ؘa=oX8Mٜ<1N3QSGKɭSFG7m!qr+" KJ"]-S sj( oϡ \s4N8|3 P Nz{ַ j񳆩++5d9Mo󒯉@5QT+YoM=]Ϻ+@ZfYO9b}<,OƩ0>m$an{82p N2)S\0X~pPjgIOY@O > 5]sgY]@o޵rD)Kׯ0 hv5*)-(c\jUoX+%`%c&I#R~Xǘ0:ơ񍑞]M5. 58-,xVar- dEB L~~=fpRg\}6znLDA:+1>^$}sVfGov讍WrSp:K] 'N5ԣQ_EE.C4l@mz]tBw;D_/|r6W 6 :$hZ&Nᓩ8?ؤQbx;bM'^.A56MlryET0;iJFiABqZZu.ViG@hq:BlW3THQ`@``SQv>[1_=ټ>:F{ & X%}Tpa2Mf$|2F( Jw/F4~ڪ- #hs8Ȕx;:&^oV ԸD0Lw1hwA\^w64 s^Əns7 \!3R*ߤ[ Ϧ5x> r*cY p ]@ߴ45)Ujt ^P}3AR(>%@tY=mfV){\}mKG[+*% fcf@V(NG ӊPY٨b祃-Zy7g99:Cm]>=-v66ֱt\ȏO_?Ϸ@s%ߋ kXP )š1pxrCACٜgt6UXȲYEO42z8[bȦPnc K () qjYaNiH d`k|X3kIiwO БAIh"9ަ}MZ;56b0oQZ]YVkg6n*Re^m`NkW} gc&'VE9M}ꂦ4ΌIkS Ɇj%}@2PMU ^p=jsV֪2+} FzLlsYn!jj]qb v[byqE:PSD]h8iWTIGSM)5a"Z›%.9g؀`3Xk,۰@(X*9 IH((b}EW+WOP&j6>ɏ":{rANe5lb»ůiJ-а \vYZ,>fR|y&rC$Zp5QiGME%pTSK.5L-WN#ñAUNUQ<)Rs96u{ @k`2`I[ 8LJl\B(v1!,X1fUP6#„ ]hmR?VYaqcɆP!ce{k.5& ˛ wv%30tj=֖\INoɔn^uG|Y|xl)$ VwtWw]Å8<'n˄m6UT%`LeMDdK$[ŀTe k4aZIa%te*e &BcpΥh"G* 0 6^M'ypԥe6VW{"9eڍFi2tu93i ?gOtv4SmЫw4mOOB9.W'6)?87bܠ`.xhДdju`eK1R-O$ p 8H_FDp.=;W‰Gv 4nO$X~lk` |~ q3t1v✘t\Ʊyqp};.GO@Iφi.g5AKa`I)?7#{Fx󴁹aGpըKc9 ξڅGw^K꫏er/BCy5/Ug2#H*Pґ?to\;{@Jt{ j@㗁OfcW ^ J{ ]`3؍+ 9l 9{ρE8):Ecgc*.(#+fl~c.arw~iTb4S gyʁ5U<߁UIFD7dMe,oK3oپ#ڧ+K>EQ6k4sp(G\]G _^~;v. *WUɋ dU6ߟ$^œJ n9Ǿ奋|ff&^ZgK&NZ|-$ކ@;^l\PDx Rg| xj|RcZjF#u<6Z 8hYmo5^4ؠjz]ӈǟ﮼ dVXIq8GUb!#X+@rŸF*#beXm&W.^`1paG x|$74d7Jn#,vJdždG.Uc߰W_qQ@Y2: .-hULl c`&nosVȓcRސMi>qny-ԧ(@x9ME2#؊<App;lq`CٽBt Ms6{K_Rźf k)g\A 40r^J+BHgWAz9n~S &i Xz`˩Hl:C VR^UpJ_7 忼W/ID?\X6bʹXK;dkgn*] +/, 3YxY4 4UQ zu%"X,c|{_Ͻ%S NƎFZu m)UƂS (x6W§Jag|{}pڳ/ga_Wס'軨/!݂sI? Y O{x~H!Oq$cX\ lPa xI4k%\\M8y:q9](Ax9"Z`>7Z&uKoXJ3[,uc(^d?imo[6Y8Ҳ[N,=v_=T :j6J%$$,iiVK7a݁?\B#^ؼ0Rq$Ou%%EyҖ]ͭXNWS}kŮ&b'zFe$i-*G៴u} h{)yuI-4ꁀ[6d"|i-)P)\ߏD]7}0iS#dCendstream endobj 625 0 obj << /Filter /FlateDecode /Length 228 >> stream x]An EƩJdE0 `DEnߙSU]<ñUwm _Klx]n-񜋲cfrۿ})'sܽ'X Kk/gT1nJ),nse,8 #)XӞ ^& `^Y^23鎂1ti`atidN 򋸛g:ZòJR v\[P?#s0endstream endobj 626 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 1493 >> stream xUlSU[G{LH& #ȯ TP~ZXѾэڮk׳vm~081#SADHCu%"&{Ϲ|KSM [>Jhiz4CDufP֬AU4ӕ[&㑧po5-voؖ<>qf*5]sܼ%9 ffe 3xyQZw8w8 fz\ {n9RyF;pSuvrN3:-Y$]mٽqA>s, 5w~~V߻DCM=Z]s3)" I+/bH;:| z!vLj_I@5qA߇#Sb(@^!HϾ}gNNi?9bHV nhcׇpãF6~#uCOOT36)O#C)r%) !9u ^=OIr?]A!ߞH{ct3( QMR!}uo}}R"%,QWYט Ծ!|a5SD7d-ezO/侰zբEo5suy¾yܸqa eRIi{TO+ t_З<{9(QNBlYQΖ! \Cb\;FJN.RQHĽkdF R.ғ<@A#Bha'xEz y*/PUCEOtKۓ#擆\cnS;Ē&ԁ9x #cb(薷)PAx VzWN~rBSwԨTp z&9)a9502.Q$C蒞k?WHx. D㐧& [?* "mA1!<3'g77Z 5U 2dh:۲õd橾`𢪄Ml^=VRCs{Hg4p,N)9˧NuI:Pɢ;oC7w[w= ?  vI1INF3A׻I" A3e Y. p~x 'a{$BvN9[SĸLЌZ1sXJ>\vڈ;kca~8KI1Є!PF# endstream endobj 627 0 obj << /Filter /FlateDecode /Length 282 >> stream x];n0 wB7L .Zm/ t!8CoߟTS>$>@%ͻ+_%9ܖ 9"7q1;5Ey~*dRmgWU(5DB= I'9b|U*bh??ljհA5-VQ3T/ TTQS!U-Zـq #8ujTv 6 ޷=cl:X]c#.޷Mn{=z$^U(Αendstream endobj 628 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 2369 >> stream xu te3M 0(dB *BE<< BI4 IgBu~4%" ,R(.g=q9ߤ_ٯ93}s。x9bOxZTWFxTMExwEx^jՕ3>G͟'}%X! KteOlMէ(|KTW@~ԛ=zZNph=bX4o[H@>SFMj*ƋK]-篇JP\gtsu魯h.LJ=RԿ+M ;E+qJ|?- ]^ɽzMr 6͘7*YT\[NKq7/wT`=o,S6|?.ǫ}QB3?e#CIA 8q/bx2Z6h#* 4 ?052dUࢍ1u=Id7坉BtowbGmb3 6U]}j44$+& MQ78>>b@gQ<%h'Fd ٢60`0,&6a(.c:JC3;xL\?b]!! Gu$%A,lFvXC m4hT q%x*?E䎖h x:`m#Ȗ4usNR]%~C_vwhN:җh Guzft芫-]\\+q.7Iב\ͬIW^}m?`'+h+[O~έv㋋7Fn6;80YeAcÏH܏DTjXl.ۧ= `ͧ7p) ,);MyY&ַ|م Qus|:֦|NGifvk 1Kp1[c/=k>/o8O$,A|"6}ҌW,WZ~^1|}YLL槯o}^Y}'_OyCh;|yn]ZT;0W) )1ń?!*w| 4`cKw aT@ u.n6-٪fv7 A5g'j2 $CnvosOyGw6%;#f4% &&X4-LM}E$Kq|"40h>GSh\eҽ hh|R)H& Af#;9HT`g0q83(8A{tu'cعSQ) ᯿-A 9=* |ڷ嘩%H@-/W^Qm6iDM{_M|<]jH_UNa9>z[¸ Mm[ <O'= ---M{Ƣ’MD{KP'͸8>i ڃ{%ͨMyA"D$B>h[-V'Jp> stream x]n0 yA6NB&iHP{{l8UjvӪS]/:*ޖ{g̠C鯮nͿ+ӆVծa#(_Ɍmb֑Z8 8 UuҞRau ҁRbV`["óRble6/_u(b^r>Ze)< Nendstream endobj 630 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 2607 >> stream xUyXSW@"]l*$:֥ZDj0EZw?Y" $@X¾䝗UZ,Lk|.#Nu3ujk;/ ${sKSnML귷,u~Kȹ$y>1z:h4*/𔀧{' =+i34A%Q:>:N].SPftz@7QΤKi FwGIzM|lV9'zrD&>:R\S%Fjܘsވj^}5==}~db$Ms}8UJQ$5*hGuZF:)FQGi"w#,ɑ:A?2U(+|4d3z>b&<]#Glx:。cۣHZU&2!&>59!R?Y;8]օPDgԮGs-[ Zm3Գ[Q4O-&Lt%]EWеt]O7Ѝt Jۈ>R/9NͤDH5̗t[Ɵ9Nf5l>˼9NzF{$h/g)M!&0!R/9 . I$DlР0_bNL`bk +F OƳf썛^'w:48aJh`;'ǐge:x!Y&M,"NAP0> , #@ƃ].b ` Qr=±آ7Ũq^6N>R%R+ĕ@MDbH=[?S" 2nCQzFʶF%Ż1&^x6+AON `dCФ-h"i}( Dml , + xڂM<1g{Y YyԬHh |~nbГA 9z3xy &^2`+KlkiE(BمORW; ClN%H[qppŐlTW 93y\MpGq>ST_ {6Lr3f"7jN)sW+>!Mqʄa'Iwp AC w1}bd3Y71p{Df^P/ߙ-O޴_ b_®ͫss ! Ťِ%-bx~٦kM} l 47Xw$d9yZ̄%fN. -AuЄ:VuVbmuňm!+Yx8v=!`Q;9Ne:uW]wlxYGjY8@/L@.MlNٙ;:+(Lт;[zqA2 JgLrr!zF+3[liWlڗl0@Tq( Qۭ3MyZ?d`%$ut!36.5ꡄu6ZJdvloww?)\*%[=aO޷s_+reMM6hg?ѦjB_yu? ?F>u=t1SvW{V–m4&/ÜnC3R<5܅.b!'UL68 46h`3uƝ{6ygHҌL hUwa.-C]ZVm34?z`O+2a@DNqBI8[nZl4+b??a E#T:j@ṃOJr%V[ oɧ-ٝ1mȪ."o簾s/! H.ȉ}+E%;.=g*^B(_m'Cю#&2+Px"$EC EVٳPjkQԘ/r2qa_a7D =F | ԛ"ww@i6OGi)r Sw[Rd,,5)z7^k aV)sUX.pƂѱWXeh?<Ŝy9Π]+çwwopQ7ȹ7p8jlpvϠOKΖ03C]9ӓ|&Q?endstream endobj 631 0 obj << /Filter /FlateDecode /Length 159 >> stream x313R0P0U0S01C.=Cɹ\ &`A RN\ %E\@i.}0`ȥ 43KM V8qy(-> stream x]O10 *XЪj8(Nߗ,N>~l#GpƲ5 HeQV-ƃ剳B7OfRUiZB 'mQt1 l.kSIT @s$Lg)FS}endstream endobj 633 0 obj << /Filter /FlateDecode /Length 7023 >> stream x]Y7r7n]}H# exax($Ա/P@& 3)6D_@̓J*uqu@\\=x@?_ë)(/x?A^x맨7vf[ll5k,9*_bR1vbA{r"}W=k1BD˗ؗA[[jyo#qwtd20 k'%4L9vh>ѓz}1D6ja̵M*O( ]?'?E8Z8aeN2aL]㜔9) 32 .O>ߐ%gL=x7m4 6IǷ̍IU4qSm\ f'w?$p_ .o#%J ᚔ6aw~ 3xk>-2sDN;#]HABřC"}&,=i`LO.ݷ9)49.$ؕQ 0ykh`Qڇ*C}s)(`&+/2WqPNVڪ/~?H@hru~xNɺ(n8Q.qhov?D XҀ&9A&K7H)L>Fv nJr3M$KDW4N D~A09Hh^ R(y-y_:,Fh>ρ΃![ĐW$Bga# G;eKDdFu pg ;".9 Y݆͏m~5l%(X:&x] NצIŞZ],91/ .WVH@M#r:y99h-z`|b&C dtgH̯⤔+Q0VVMP )DKY k',Ga"w/,#Ec=L9Rl2l}wBKgUC7]gmlgKm*nj|2N_u\=sY~LmltApu@'ɀ2w6taq 1STa^5|=hOm"dxb?  쥯" x1փU%GQGw}!eB^"M$U qa^*gBgId "ϽlWɫnŞQOJ;#)]8]WSg&XG-g&gLtՃ`I0)㥖a`A5\yhÅ֠<F lCC0>zӃؚ|s\),+pAI:y vԃ=e3Dxߍ < ֆ$rF_`7R@t ah$pA>e}+?|c(s Te ?{[ B#fcaw/@; k9Mr[cc9)# $W>i/QHȰu>ƒ~%``:XF$ %`Ff/N&O %  Ʀnw-b-J@@8sJSDղ?SW 1hNF 0"T!y0U5f>FP/837\yI%0!h9vFRS)H+ze>PQޯh^ +&qDJnCmofAh ^K!{Z|5yro,FzO-ۯ Xh*VBTuӔw즠"wNdQ\[C@$C9@@&Տ~R<"k= +ibkIGf]zI놿:>\` {UzFf * APշ3֝`'k3cŹ'i~iz2+뉪1ɬ̉^Wơ2h0}Р*8Irn$+lS`NKHl&1l>~}͗hpژs6 rѴɱS֠ӡܭ5wku6DSfVj36/Jk>M|{XE`j׳[ޮ \[8, d3vK@/e W2z]|sY>NpI1Z䆻 V[VkդPZ=\Rqv۞Lf{,vaL1o E4p:z+b4~q'Mx9Ów[A9}w_7f[-hJY*Y.UuQ2KNxˆ wVs5pVj@Q8p{еuy+7xp D9q6*:l8su!X*<_-={Ac# qF0<8l1Zf@%?YS˅z)&8%$)\Cz9D'zG=́Q! ,w|cJ#ꮷf9}f m8_IWȒ3.u3"l`RǀC҃OJ>%blëy{s,hz^qhɻw^1l$gHMBmz"i3 4<(qm2E` l#0pW@Q[Tc,|fȭ;d3Y$fP̭ҞRS'Qy<0]e;jqW,$IfI,26&iQLb K.wQ5u'`54d%m+igst%\zt/~UO}iּUK_:SZic/ˇUAl<zk':lyE4}X۝@8K){.jHeӰ~ǼXhO?&9SQJ8qwA0TiJjb\dT)3I"ZD 5cmʃTIbqi7T*18q>%wPUB>൧ZN"2+6(j *YTŃIf2[-Y0CmFVkVF nDGAzx=EGٌVu^`XF|'Z)"@LQM}}U>p.֓(-Ssn1rnA}#*jƒGԉQ2-/WTSIG7+0Bj+rYsuQK0K׭DGk2n4@LjULU)z|.CA;E'@ꄋ֬r:T6IUOL\ǦeBLa'Cj+W_\b`/~RHyY-ix5 \Xˮ/`&Gsi3?i^Ad-DiYm1mR*n CӨ 3$xGerZc.+\X^O.7 ¸ņYV6YX+\8.􍫿%|X"%`z~FBaF0%00`a哴9¤nrn\ήpxJHc7gm\&T)Wu~8 (ŭ@xI'X:.̋$Dו=h Ury.S>waӿ-`9W*b WEl κ Ls " ~H8#\=rH!UZ+PcL2N#LH#:2[EcJNA`T+~oCfPsuVIk b8._˳`twɰ@@ oz<~Pt_~fU"cH}?{\OIۜeu 鴳clx;G]'Vf =01VY}mus [V@f!_k5'x s>{6ӼKЎpΉ|>o̠ae8)=^Y@DZ8ÂYK,>;MXK{VeT|_h.a{70ňMVdT:%;Ԣ5KrfJDd.o+3i{wG;FTPzܫD~S!Tj\!bӹ\-F5l%7 X&: MFv?VO<E" =J.o^Ҧ$i,VߧV VxEo{_&3Ї9P_~J\*SII &O#Ǿ53..K5R5M(-&ZݔG c6X~Wת)*oSw[j՘̧jkecS.-(N.Ud=..X:Z4XrU+v5sLbpJ͊ܛG0 ]bge"ܝ9 J.^ É)\jϗ*4gC*^V ro~0݄F]7eSWS^)=0ՉG>RƤLeS]ݜ1-jߑmPbVuL2sNWkQ@dj-3IGpl+h}wՑ2Tq4hQ{ʆ9`(r[/<:]y֪IM(QUõ38t%Kb|ULS}7ܑK^6F? @t U5H VcMKX3<l×mlҵ?nlѭ@RFxR 2@gr,.*{}^2  <3X ??>Ƶe[u^Dz+!3 |L;SCyǬ eT+E`Td} 0hB!7S+,uz22͌QAn\akWX]zݹh1ysg a 8\N.5`H“ Or׺"JȻTكT3!Y4zVЯK)qJN1 O.f[%[Q|yz`\i‹(vk8{V//x׬B‹}#DV'{|2JGx{LzX gF2lތtt8Yӽ\IK\ӏ(Sw?^p|&m O\*L:ޘ+*V[:x`algudDݞ*}[akmeuT()nHn"uI?; =yڏ==%H=`熾Z,Y>Zaendstream endobj 634 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 967 >> stream x-oLuhs6E 6p%1NSd0)RB)hii2]P%8ɲ_lĘ1&l~7ϓypLY8^~'St^j w*kwNboՠRJYx1!C5ySkX]0`RǚS655Sohk{^^-}Fۭ-dk0G)>@}@hf޳TG0bijӌð-ֈaj|nvXqO .--|O):W%bYn1ymA?\0X3Œ6{d_oKc9䗫n`,n@/6y|SN=(|6X$xa.r4,A㣳FѢ9LZ:ET1#탫ѮRSpeV󰙙w/R<#?DOD|Gڑh,%w~X\"~wxJN4CcyL]6#BM&؀Ow}v /U2z4#P]!J 3d:Kb8Ovv"dx.NIų!4r㻓_m!՗HU"D{d4n{M|W>8y Jq'ؐ^[ OǑn#1deO;zX4dC4Y68LΗIWd> stream x\Ks|i6N~Tʼn˕D:DbJsX>DZr%Rt1%)٦ h|oq!f'W{lvvg78Dk=|j/~g\OVK=?{}7gpFiL/u{݊[i]f7x`^Y3ѝaz nY`1-zyù , {kE֩osl80xw2\n{0ϹwRvU ΅՝Cv]"z9lkxɂGex= kE' e)S qmgqkr٭/J`d)FTp{ nw//;L1f|I m|ew]i.ǥy5o`nZ %O Ae./0SJrVgk+D_,Cr6k L h;HyDL*Zq`i5T 3T.W:ˍNi:8?NZ %WK ~U\_nmwC4 ƧK,sS{NM)f%tSJ"yJ2A%83X4Cbaj#%MӬBT4ص֊Sl iU/9-@".tTIUDOâK`|}HXiзsq%qG5=(4/Ks]7m,n*tRs_zm* 36-闔uZF$TRMHT(Q JT&&93'xlye8Yk `vQW[)S^9ϻ> **fV<` Y%*"莮ʊZfr=g.Sd+ˡˉ1kVލt apF"0B%W@&:0cBA뭊jL)]m/ͳ;& Ow9d$L %{W;j$lѷ"JYAxǩqcW -$I|"N> [pn uADc2 닑;QwE4kP)`Tđ܉q0Bv[GZIK<6hW4>(ݸK$(1 t:vL&# p J`IͳKbӯl4Yl!|Q"b2mh`(FC&<@AMtciyޗai6'#`Is@4a 9o QFvhݫxb9듼HM0F+}MACKKVt"A#T]٫UWkk<`¢bw064Mơ [\SSp^zj"$r!wQఁм,ץ.yXJb`r MY_, *-pS}TSmѦic07)dQ4?R܏OYK"vA:uI@OX+ȬNvӬxd[jCBO j箐~; ?YaGW<-tG CbMi$$'Dȥv>X4, D"@(KSj`p)d< qn;=k›]o66Mgܽ4vrÓkA*F{eK=+;4ms\3q <n#0XyZWBY3PMAE M!MТ!Z1LZ84ץea<ΆHfq'ZBbܖȷXj.AQ3E75yK|B[M*HnIY`ii7%4gf/4x1ā h~FNAh?*͟:vІ-0Ғb!}\lAY &fY@Y}BI, +]׌KDPtkB]lm =*{Ѵ`m^y#eUl*f.Gq=桒 ,v?{q u꣚Fv!:ݪɧp_b@zI*sŚf^0`Pk"8BxWuIO"nMLE@aasW|]*կizf|S ,2Ћ۟pDtTqӴB{">PY> stream x]O10 0 0~ 8ʀ0%!tpw'0^GGpȍ%pu[ΖXpm!',31ܔ<݀wxʋ̫ӸzȺ;czLqmH *)J79l! 47I,|J KSendstream endobj 637 0 obj << /Filter /FlateDecode /Length 2688 >> stream xZKoG 鈓~?ddqY@pdDƤdock5I8Z=U_}U]3ވϞ )g>в'g\-O^o缑jYEfPJ5+py|!mcb A; d8arcwÔi-/["as5yc'۠jgX.`>V:N!xo[uѐϽlWk4/K[ 6C{Vpo/:a-x^ih2Xl4V8]g0/L.[;ϑq v:jD#9\h6 ' lL㭍/Wv4\AP\jToj,V4BVFVυlWC,v,1!ӱxpPJ.U\3 v^˶y Қ+"ɟMG#Ygg@ Ph'hx;+֦5_&u؄7=i1CΣur`]v[$[ॖ-NE.y$,HX<3(!إT 6zbتcF[Wۻ9#qSqdtXf.j2[;@=/P:\J0J@ɲ)e,$=q(t[+Nm5YTܖR&V;:LHU*R8 qӦeƽJ`PzE4$r:-nX()Engè:]Xnp~p̭}VxUh-ωveOZ..[Du0JX Dcv*gU byEh%'C?'ٍT =*΃Ѷ"!‰dz ,N'Z_e!BQQ`oF?T($ gM蝔 tcTzVUW**!m; [ MFpq`඗F7M`E,&Sރ66fD"8 0GvqGǃH[.% L]vT`=\( lKYSpցPKȀ!Ѷh]U/VEke_E]Yyi;3A!"ϓ1LGl}e!PÔ*dސY{u`>"RQ J20ASUnQdU_v(c|_yH6sؓrs*1.pܡ4\|e2{GCۡX,\amQjqmpo]WWU MH_mI=b;Ɖq+m)n/2R"cK'/u 5>Jà/lZОߣ򶘗 ۻn]: m{k3tm4l6{;F 0k dž}16z|Šz<"tj_lx W vG0v[ؐ/>@Nhqޘzs{_֨ KZ.Una:aU.}oǎn"/$Kf8!Pw݌ϻ~6w&.heݍ 풾< 'NZ}74ốhGh:XΗų4d҃Bt6ȥjrӡ;`OiؑXL|$f돂_E𽥉k6#459]PqCE+9v4w`43Jf\Wۓ5a7847Fs)eߑm (%QLbЉ=pCiQ9P뷜қrr._0!y0 25b@*BGEw3? J&bډ 9zkW\lHC<-A]<0 $n OEqJ%4j:N  ٨IܴkNn 1#qBИ!N]Q$8T#(pד")p 'E!k1EF -Gh,~M@jdT A7+w87 :6m#'BqW6,n%v=^]~+mHm *waϣ nfFy%;``xCP+o9\Cv)Ǣo <ꃯI KEψ4x"~{b.RϾ pq^x9fB>_H X58z{ۜ_m$^#Llm~u</gXendstream endobj 638 0 obj << /Filter /FlateDecode /Length 1840 >> stream xnG쮯|ZAQ -z)  ۲IJˉ/gvwR6F >8$𵟦SoRӋDL/&&2̴l4$(Rp3-.ᄈ:F#*1hf5,UëЬS#1)Le@ k%4vL 66F7> f>h-`mJ%AK8UBk@N^fvBF:<褒e@SAU>Kr2:P=x[uB(Io=hnіGkmb!(jm\ (3&V?jt#vBl\%kO|˲e\7Dl(K;̿;wx#2f{f7BRfWnXgJ 7AFhE ׬[֬/=CMY.F<ߴ e"Be  K՗iԄ6{8VlqjDz0D.@(%ԫ+ jq X / d:UI:zwqK8D>AaCY XŋA%+zm": ڜ?uhi؊p1N" xF-ei6'Oe<Ԡ%P. Hu)V !)yUZ$-\"xJ@ƚ=ZQjhcɻE.'XW;ȫq@*V<)vKԦM EIb]j~BRfK @;8IHatJ%I=&-Zp\D/Ij*vMdDi;@M%:$F+&O~R hI83RʾOx-@ u" 6-k`io TA;1l>s]II},3J|e86-He[BK*+4;#2Bbn/c96/pИdH/Yolch}l-[K[h Yj˱3V83dv{BJ Pe ] v_#x 2]"H=r^p]$_L0H؛iȱFQKЋx>[;xEy?1)yj}ݱo p00 9@77$O,K;nnH%"X,yw#dwT-vr0ZA6?07=r=m7JigdX W{;KGZWOb5+О:"q#33lXi|3GDE Oͻj'YJgTJe>cGXendstream endobj 639 0 obj << /Filter /FlateDecode /Length 54209 >> stream xI6q_, }}硗 t0M^ Jd%ْng4Z(B;N-K?}>.??~'7Nʟ]7?~}.¯{46?ҷZҷ_-V}?Wïj,y"w?]u}߿lF7co弧Q?o}vZi ^R}Z ??:ҾkiJ>q|g}o6yT02{rFOԾW߽{s\ ?=cWyOb<-ow(~fնO?5-O~t ?}?}_|w툿#x$}[HG G (%Agh(9?`l|qyjRYWG#gV4JQoGOTmrfѿI 9o䷎dgʿͮ%-om͕?OI= ~&[GPmԼߢgN+[b: L}o][BZΜ;p> 9Suv"/O/oN-}}mw ڞE=cQ껧uq믱g ֑1a8{fT߳W{qck1^^)xjgEk Ɗ{r]Ϙ/뿮&IbYV9ɯys [~2+~}νosi.=+!s52$aΓfyy4<茧Jyd^֜ӝPo% ش[j^l [rS>aMh7eu2RΎ:>mVFғ<IC5k:4:e~$Ia>]A\;:fх0 SʓQ<$fyi]Pq Ɋ#8ŋz͹t$,/V5\z>}{_Ǖ gv3FGO;Z\kG}G#zh恵gz~ O϶Z/=/ӱq^,a-S99y3@{~N95 ׏?XJ?9`z{.m`iҵ6հ/uDP|?ㅝ?Iiƙnv{ gPp 9*U,3~Ӌ#CJ7F4$G5ƷkYxzrə Q/I~ 'a>QkNqʚۅuFdw7יMw(0gpc|uF{^1:[w~ОqHr{<{-yv,u^ {b:;}_s3=(yv~N#;+yn\Wq9#38NU#RǶ*hûg%GjVF,"h֛vڮ2~~+{Pw-ϣmăW%ʛ$獙o֟6<@_$g{=≝Ge;_O ϿR9blSj1ac1q%0߹>.N&>˽/Lxv:nsgnVǢWw3F}༫}zοc%G&^Bm"_j|Q ~`}1(ӆP?o8>e@ٶgT:P$GO>s('l (MK)Ph2l/L/x #_ U\PR{+v{:IG20_O IClUXw'Π6J_/T1mXe>$9/ v=8(xAI <(U-t@o{SgTR4|[].CǐO_F珎VO)Զ0V#jWrcrgӮy(ũ]Pkw{+\PI@žJʆjjjgZmA̳S~HjO~ms*cvGZ8vrcc؅_FOοԈ `<6'+(F bȑoYvy6_e8QGu83ף֟Rs`2'hG)u3U)kir: g iKn#⇳_/;*oW$7$8Ϣ/`H9tGR䨧SRGxI4#J%S/J*%KG#PtvRIF2vU!Y4;)t 6ZcRR/C;=h'(m8u4BR)iAr6tdFS2 ]>F&gɀBUu7L&:J:pT$a8p$#;$6ҳ7¹IkJ:ڷc O^1l9$ eBb9p* ~+%,v`;%XV?EwّTJ!94ˏYq A7pSH:&~gc[5sN͒Cwb0']0TH|G//v蚒3 X ?ݫv+bު-QS CS#W@Y\N+.O @x"g緞[\Y8#SX BGr,MDy3 _sPGg3H>=Ê0YTvDv7|#xZ"븃d9:v4W/o=61'Bl%Jܩ 88VԩX2!^g3-~{>BOy>͝=UmD<;2pPb9Pl߶H)[7MU=LU# $o9) X^'=@N8ݘU¾G>ޖw#ŠgLV?!MO1"a8J_gItoX d>@5\7޸ PS$G$&,KX)*R{Fy;(!pzC(*'EtPԾSi1;p!&wŖ;$Qaa< =wϞw$K:~֘XH7[ĎF%:88>m]j\B$i%W9$#hH&%%ҾHg{L_%tj%fdN{IƷU ]|攪s%:RnO sĝe3|&\ٓwEwy/+ͻ]SՊNTI%IZ욺ym,x?F{o>zN$ՙDC4jk+ȶ$B[yClq= u^Fx^L:I7fd:8F,0:%jJ$KY"d`ur]$rqG$8uҫbqt8*P$[EH6L:@zuXná1ڍpkXwB]3$$Pp>Qi]^2hYP'iɴo"-D0TzZlNgN9&p0{k)ͷc)αRJau Hd,Ԁ)'\eOF'ňfotخ٣n+@Y>r-%w1]s}/k)q (#eh%CC;qt/0k֨ãJߚ>DY+r!@D HN>=@ޤ0MY]n*fbɑԈ)o7ȑп4rKDk&c yn9|'V<$^ P"k!/sW BWZ!ED,rpI䞑XUTB!MU0CI\HŁs$a$X$cս%m ɊJ_W^оt]Pn!l).4weLL;t2 םcz&ky-7 rhjiʡ]mV "$kPNmg 9z`K,c(VIq֦"ⳃ+,48}+Бs.e9sEkD)CݑM;ë󹼺=WWKW͔#[KN&[}ssz2Ԓ [AjȡtO7J eɘ#^ 9=B䜇 at."-<oV܊_u$X(Sq !Vt svϒET=cr.SjĎjh} [!YadЯ ٯ|M(cp F܇@pP V8;4cc[ܡ6#vg ^ RL3Ljpca`#&&Rчg<%Xz93@'? %n`q HI d?k.,;BG=` + dXneFcpT &p&UB)KaGKU7m<QdJZXORtKr4{ؑӳZ-hݦ$}T+=/BB%q-TiH6Fe.fIݾ>JJ{,#TqmbUHG9?c0kfBa>`D:7aDFp%.޲uϵ+cee7KdlixB$N+F0uqXk2"SVM~ԇm}qԒ6# x~IicHS:n;Z޲]G2(s*E-QeV\O\a?֭ '6TeQVdc߰1+5VgWA@ت`:Zg';iN;Ӻm]-K QH,(KkHjxׁko&7;{TkH[s2`JeVض#j~:H wVbVe)hF݊Z!e*̲ ij^e<푬cgbƔ|ׁ^w]qXܼh]ؕ?(\RLbWʒ,n*$jT*S^] ,[6}55; V8,܊n.e[Uj'O9U#5pVhT_@o+ U[" 8_J5ITZcm!ODwnIH 0%iw,?D5XT ߼%IBjYӱ8OܔhՖw FP 4ύע 82]l%] S*;c5.%*D#a*03qߣ/ Xjҿ%SJyʒo/$%h4uJ-)^ `_%cKx$VsTWs Pw `sP`ׯ:=e9 NA9ܾH5qe:Nw_KL)P ke(_,hZU9/#vg50"KQD &jc$UҧA/Ikߖ+)}Y"}F:G ^kB8*L,#_=: %>/]QY Z:myzVVA qfpW-ofHz5kJۉ{Bw +j}y{R{ P}t+FX|tWNkGйoH\4aJTy.R7{ ?#ڛ(#W*XY }YoGJʠ8Rc |дdcHBC@כm E@y=Ŋ@u# bԧ;3C5:7C*oWjLRj!TfUjuN '$*o ;(G$S hE$~K.JۙII0K>I`r9/upPgބFsD<0#.oDUӥUbeXU]jLcE5XqcaB27зR죙!&#.;yq}Ȣ\e>ek]-gMY3$աj# ZCMɀXXICͷ7KPWYf5 ׉Dl.BWvHM A$zhT:Ve?g!֐yMK:_ց$p=+yi쯡:Y<(Z쯡wO?=,MRWuKJE:UwX/+p׻Fr:WjYǛo~NY* YIUjI B!GZcI~&$fq*Us6mDoCkpukܳ_>n{X˝?#CX&Uu` #}Fj[Y&mk']E<QJZFvBS۰ܿJ.>HVyb-芾{|c+_ Ub,2p^u< q7/ll?xs.ҽ#=7'EdwǍ#eӸ!8==pϮ~9f9#p$BNM rpRT~)9Cm:x{ ^2s,*kT)Yk8?>v$UTF"QM4Nqc!r8.0+k*N>)wK)+ ٹp9>,Ĭ#VyǍo3T ]ix$1DE̐,}[/-X^3C+w[IDYlkelHJ MS%b^:Jv ׮(kU*r8N#ʈ #Y$W |g,dӥ*v'M+ғ%˱{E&# l‚hnʂI}7UO!0G#hřI!xˌEُIŎɺ/Xc+II\zAFȓ*caBT 2>HT&yoVS8#ڇЊ#v7ȴ)`qvP_\US@5 ~ kIW-a!"+tM}M*A 2Gнk{$T<Zd]T$2g(~*~gTӮ}F㑴(FWmJBդ t$z "~̇zhIIVIGTFmؤ ӥMnB0 A2: FjJ4Qqp]L |&ҲÑToD|lވ)Hj#VH@9 skXȜ#")0Tں S27PDX3MჍ9Бu Fq'UGǞ|M3<̀{NI%%B"gdZ$sEP̓hס/-DmЀ5$gpSKEܪcXߒu o}[6{Zٟ1pqAOo@Ĺo- yӅFV>%‰o,#vKoxUF"ɒߋ85 4_H}V=F,?dQ'$/ޥ|${D)jp2ĐmK,!G/%N~H*7t@G )T|ei6< n6lܲ]%u%Dg["/#}7ЅJ_xڒL|KV<19%GYԭuz%txVB+yz,%v즡C݋"zg[6= ""$.^0-&sx`ޤ=Y"CVkW!u T22d㤒Ncڒ J yUBVCi>i@q8"+-?G(c=Z4F2Bڑי@Vnw1{W3_oRB\jJPK W"cHKWn@i91p;:z|X( PFL_rCw|^|k*⢟6tYP'*M :nMgŒ˴\ԂyΈi4Z4rүJѓu>6?[WT>:WYXDZwHrLӺGA/ Ԃ5! Զz (tJ)֠Ώ)iۭ5̋ʶ,^{JVJ]) X4fI`$HB<Ÿ*oN}Of:=y- S_Rvx%Xaȅa(0bLJ\l?Kˆǒo_=,#O䕵@hP k?"AQ<|?TEw0F1n.?'&)j˫Wo;gƄiw ǔueb'I :tp <`?"?h_)J{%=R'K"[4">h)!UШSB2QQFFi(!_KШSѨCy8Ey()`KSS P58BDQꢎjB2w:%izP%v$ƐMD T*P uTSAN<8B/:%l_psPp^uJ~0C]áN CfHОPc/s[\uJH4$L'QI)!`su+9Ho%QbVDsݑe0X)!Q'o3XԵxaQ'!HrDbҨs|(b8Lؗ뢃4܁3%ڻSܧv,.r7R.:?XF]G<4tl?\')wEBxxtZ[JK;(ZK.U<{qC]y8b+p )!Xz-(NǶ.ݸ4 /ENuF-Q:]Q_U&*^uQg\"j'>^F]Aâ8J:䱘q^uom3]u E.:hQv0X<{C9';Cˢn<, /=x Q,D:w"FAκPFJQϣt(*}z u3+P 67zbШSQ/]E+HO QvШgԯx²^uJ-ZVyШ}4zi0 7Oq\B X3X6Q%6C-Ӛ{jڼED9N[+=n$t$7H &oSBwؾ"DD]ˠ~K uJX# m{))!q0S-]v&WIgGmv&YbePS@y˒]uJz<ꔐ5')!}{_hePEDhA~D^:%bo[\풨SBQ^`v9%-즥uI)!/I_Pk/K.v`ˡ‹u9jY`ŭ dܢsnn$5Αu?#Eq pӟYg?Mrv?z% %,,%PUydγF@&"\g:%bj7ѠXOc*GunrtxO?6)!T,K,)wS'WbSOGOS] 8F9&P~tGK%j66S<0=G>+vlk[t6 t\ ZƯy?5ŠxE3{k֯e\^v b-Xo~q|,s*:-dr!O5boS}?(Z`vV51Uxӵ(G"i4f9+"yruQVp \guq.a"L-` w|trdC+G)JJW_"c{7]Itʼ럇8]8u?7*9F3F&r tzuu]ޤj-KyHUu,yc6gEO$<1Ub2%^QA":9ou9W\l\F ќA0ԼՀǗ8 pģU#_3$^F/c/1q/t/6}#Yj{ITi5^ëU}'㪪ܨMW eҧ OU1=Ŏ)).6"v3MªR\X5/=T?)Jct*+_EYAy%[NR>з./e#hsf{惢FQmN[JFs(DdKBy UgǍhE 4Z;kճT 5a <-fPa$KɆ-MխcW h tZNbb=jفc,yhU9 [ͽT7Gdp-yiVr1̓8dyb׸nKn04\tʢ}lz94q纹xQ \(f&`L/:ܢo3 _x.Vxt=TSj Tq,'\6;4.ȆiW]/H]%l'At!. L'օWe /tch8f!X~±#()٦ Stc^1C RX6.HLĖhE_>qglQAPL?=^yp Ċ$X맼^yດVMӟknd^#}fze]6'3[LBP=Dw ›ߔ7U̺a8 R1差K-=8zg^ 8g>BL^0 -2F0IRL9gd+bK%ڎ: |^$l.מ[ wqEVO36]( I)J tA6`Ըoͼ$Ztw9 uW¼H 0_0SZ #T)@X";0CP-@m~A$Qh#rql""!@qS".L-@;t2ݿlݸsŀEcw44R*R) (\)gʨCMHoqqJ9xإ*T.IpߋUxU'*UrQ{j៽CUXKUS/:E4+03 UY(*:l=K8BH2]7)C AdkD#ajn7y_\QZ$kUQl #ߪHZ6=eu hL I3i/ݕP/)yUY߫Hji\&8,7&-ZӅJSB^z T;+v%#K*=_UЊ g$(Ki(uzY=AAA GIr0CŅUӿ`N’O!+x.+GO䞫(nRR|$>V:mm+ՃjW~lmJt~[] KQ~TU(4-urO,z$"Pz(詖\%a %Hyw7o븵H\N3 U:p_$[3הQZRY`*f\%ۭq.$HV$FDё^ bI@u{"y  JϮRc̓*\(JF 5;rcBoA=^MFο>; l` Qoj^l~rV8\*F*H hHș.ο84q$ D_4s00"WgF5\Lx'PU@=E\E8,cRH)XZ}7xӃWgX$J^Oӎ 8w9#xH1!Z%[0~Pu$d; @?Gc7'&^4bZa9w#>}plsh*v\(X)[H3ɳ,͸M|?Fu -i)XdYA|.U-p$Ph`t1sYb8'ȎESfSLeۜ.u ]>֎ ]V* $3dBfoBPZ7m.eHEՈ6%1CTFVjjr~.O%u~U ڊEоk9_nZ]l58*=BMH +ULJ5GhtH5 4IٺH6*%o/SG`Ȇ\C5%&.Hd,P$\p(R C\Z)*ȷ/)f_}鄅3ıOs2uw *JqaPofuF.ySQVW_=z+$cG[XqZoLC+X$TSApʭj|$pM_Ci+uXM%tC=w& S dXwx۠"o")2SsEIk#X#BN2J,ieN RVX [Un)q ˞Q}} {9@;xjze$fi4r.ٺ$p2XUELryU,0UK&ռw`RȖJFcBh|sBRoVT/ƁF\Ƽ:5E : ]MW8^Ox8#alZ#eaQK0~0#L5#Krߵn~{V2z:^Ai (o8ozd"A߼ Wz3x5U5EPk%Jx m8Z9SY\8]\(z*Tϖ0EikXU H?~+>='m .7Ӷ9< )x3S;)HQyc:`B%U\y4^u$-FuM>_0c:9gk0|j^*E鮎 I]V?5]3IM 7$fCe5jg$9s.dK+ɔ;CҤ2V?.Kb8W%i4q -$\*B54*oA9fp)8DH>!H#-/"ld `lDrs %"$Ǚ ]헗[I c|@ f!A >A }ѯ 3_.GgV맔IbR ғ^ r>A$Ttx*e-rLPNB.S%j3M4\Y{n hqۯE'/uIwbL/Z EVSEVjר)2siѾN sQl1|w;i]Sn < Pp@KxIH=EN;.؟9X(J@0 *+[bpD#MI ilO#1(a.~ PfetRٌEmك(bɌB PpI6~ -#v7#Z$@OC&*FK!&UPP[RBPB@ASJ_fBIR$EhѺq{ 49 5y(0!`6^#ÔؓG':dx.<ؓ]=iy@ !%š'Ӑ5k ) pWbNFYIӲk3j0.|.*&@b.0ZQSqbG J3Ӗ|{zqcWԺdcHaˏJ OwnMaܖ7E3΁QCR4#xP*[ v2TՆC@rN9*p5ƟQZ[&>1ɢuxJ!q#$7xsJ~ Vs=^,WWD,3V)5@JÍJBdLM9v-N*P]zm A"<:(V-f =*UFӘF w | N9PBZa G9,A^!/H#@'_lK 28f*U>>U\{5>].ʮep,7/'~frT fo%DUh twZӝ#&1d\=ҌKaR@#H3+E"czQ]L|Z"0?i)*),UiuV \s2,{]&euU3,)1/v#ۦ$gM^ TLC~$],gU,,WٲDA)7!ÔCU,(p95N~,2P L ĴH4 %knKeP;9A:#(֌]̡V LP%%X1m$H͏ ǂfLTU2%b~J) rK` +-=@n3VC_/ۈ3cO%gϴ2-f*xP,Ed*ݘ<$l)߫F2MdTr0 3[Ot<8M%`3gI5S^vlڎE%g ;d eWu,xU,:KyXuNEePT ;)%ȊD V©Ɣj({&κJn-#FCpqߴȪ߃DS$Jt!Y\N9R/,Ԛ*xOf"V˾/&jX+(_YY]9oCU|V#FNˇ3oݔѾЦAQ]r\Z8TMW@թ,Ϸ\1]%!9X=}fT\ێbwԜg4$ |Exh%FIUkE}(VEqyl44K]Wc*-.+.LRS`b=_Vsb9Mr]6OS(&Kj~:l0GX+?U}T\1?-|E[j;[VunAΡwUݺݥn-E g> vo`3ZDj@mXޥ;T\ YjU\X`mnQ\քeq4QP]gV ?5cb^NI/X?~G 5j~0eoghSo.8Te%KDl $AFZ3uw)*0Bx d)Mݓ5)`2Wlx"Pa2 ܆\zO@8*B J~iX[fX2_J@kðYeݟS )Lmڢ[CZij\a3g e:#h$H&Y4H2%|$2N)H|V 3[JC@`FI5zdҌ׃$Q2t)H)e>:3obFvR ԀB2)]O 2V!A2 N7 lETn5> F}jQ,*TˍUXjK",)bx"!x'US«@}G Ƶ!?'0ݰDa')pdIhLC=Tɫ#H"**4+ 6U-YE,B@][GRVbI!h: + r Ai Tg=Rl I~ăJ!)ČՆ2 h+BPe6o^HUϮ7T\u<r5*:C5bL`9[KMrPIBmrב &1HetΑ@,!hn ((NQ/ /ŦP_nEeSс/E0|`|})<[b۬.EM# ${.@Ჷ0K[y:a"=-A}x/ 縺*|=*b|^MԛeWg*~;z͎"ͥ t Qi9=~ ġdv\>I`pl)>-^cO<"K)~>yfA؜ //<'M7.oU~%fKa*Ngҧ_*گyRxuld%KgkӪf^mzWp9/nq$ >q^>f?刯2l4EGUJx$ hS+kak?;KK \% LdOv\zi5ZޟNX.Mߣ UEf\~@墎P@j.PQaiC# 6<~=farn?\g4(qTQql4m!žʀ[wko5bV>&}KLmkG']ms P6 cv˜nj*=օYl]q.y3E{iҚ6ӿ֙%lش]GqbɒU(h@> .z9pm* ^BhKN]Ƅ\[!|O'u2Vvw?uB{֞ aM[x{uWeV6d ޮTx/'BT [RV"1 hs6CEP "*ϒoT)Ņ#\'vEk7kŖY}oq`ƘӲ!&rV(P~++PަiK?m+AEyЕ(޲b/xKb֘;0x[cx(Q\'`?͢ǣ{n;.<ȝ:t-E01Es(xU4wDL"QcNWRPE.qJ'˩xK=NtA `';W t,fX:q6kf !qd)Psldq7O祍=FgS]cs2sӭ 峌3c_ BiVSNgF.ݲ A3eXp=.s֖PC6&s۵UFx?c298ո}ҵyԒ{Ʒ\@T?k_RΕgJC)*P M>qe'Alڗ⁗/PG\4W밶h*V[tX]̒Qq\\3iPƂ!j+GZg !@*ܔi /€TV@uqA)kSx 7PV{8dѭ_T$Jk]q3<]Ŏ{ 뫊pBqٸMsSY UQqNX:h|jU/Nܘ[(&/{0@,} GEiřnɣĿՕɄ/q(+ ~<5e7x@/#H><륑"A9+N6 GT7s8 =W"if*..g/FXc0zfM3e~w9}4`?3ኅ[Bqd+fO\Jϖ ;-AaK ,~9&.15Kp*kjPvXrqm^Ɣ ._Km1ҝJߖ2(ܐ+VתoVǾM@v9CH%P*k'ڝOJob(0 ,qf(" '3>fl:B٣F ^[5WdGa 01lcfBzlUN8pRO6codKD l"dZ!u I4WaCQ؄'˞A'6&0JA &i=c۾ g Iwx=2MzڲN U +v&[I807nX[֟ULw@Њ=wB8'Y]bS} +8ħꎈC|󝦎_vn'2~fN#V7btr,2 })4abX7:_Wl gGqqR;sN#'b;j^x|8߯qVA\=/8d/*1oŀ욾~B֡S[(ޠr$ߩ8OePQî.lCټG*+[7CX'!wA]ૅs5q$M4E_P P N^CXBA1AF(8,7ȒY qϡ59.Q ùd 8w9guŢ"3NPAIE\͹Y_ng=Pô@g>xR3qq6?HR!<%‹$VMVU2qn֥-PFtUŊ%Oؽ\hGbҫn-qe5D"UݟR?XVg-tpg-tvzf+83]4lقN 'g{3X^۫h1\Gҝ8ky,:H88#!ޑ/-f6pb0ۖJ>pWOMq)P%F?ݾd w޽Qpi!k+>%AKе-T]J /Yd/PgTp7_WНTTm[%uSq|xFaoo,bKeZ4٨^q"/V_\Me`W7yIGQuaaay= 6,ՐQaqҙL8=\TqrN=1;uh)1 - (*k!XE.Q;v,EUs,gV؈n(3 {WUA.Mòe~K[0%~= nb 7OWՄPeYZ6΁pvN >օDwZa _RT;xX(/q?e\7E,I [:+۵&VˇI\>Ձ)EQ(ӟ~V7+@uTT^mRDv%\x+Aqx'.ܤoeeQ*SP#ҭ fTȨHqw)o%N\VCadϰ MOޤvV2|~y/!ClYSWagf 3F݇ﮘpriڎL;^~*qY}رrfo]oT4ߠ5,w{.Ae"s|;Qfw ky>1' b H8q! % o$,aB?rzrʥ= gJg 'wP,MxV;q=vb,Qyv~.:X}X^~xv! 25wI(Ž%Qܮ /,]Х~WqϒK[YrOpD\u%_]h7T׈E}~<`4W[vqU劰ாJ/0eUټKk vC(B-N}p!@p~ ,T=ҝ1TK~_u8IA]Fd~P"^D&̌i gYfċf;xac]+CedUgˊ 6 /+9BqHih4@qhMQMwh%MDju oJBjwܑ?CU](R2]GD4i}r?xcRݏq%ئ>K0Os5T;p+?*x:szZ< VXm6Rz3%\6\"Due Wl)PSvV\&n|'C-Sn VL]ADZ].E›UOzO__(L26uZב+)!gGL#QiA)C:os3dkF!k5`i&^ M;\oMrS]ѕL3?ԝGLD2< hNۧ #'5ݸa9; FIc z˷?l&˾{3=\@ULޠgic4 Zoޒ "\Gn9U~m,Oڑ(wr5tƊ[Ҳ[,OkG^435yf0:y,\y֝5f˪1b\sM T7?q .7޵P>F/ ƞkVr $}#S$El Mn;/Ax[u&ys3%8A7h6?1ٲN aF<[0> @A6WZ* \COuRbТ;!Dt!X'#`ۻ(CA ؆NP82na#@& ϶M  CWnàU=db *獙!%'BPl$G1`=&EM(Tw P̔^A9z)T3WηbKz({+=qˏ* ,?ESQ$~OOWقƨ Drky}=;48#%YaoԕvaN#Eg /iu$aFl>] 836ܦ$cVΙ 24#J| +W_( ?dܮ+pons(RHȽp|&hsXRى=d%`+}#W$ϟ;hΜoxTtSsmS){B*kj&s'L%1Į8Ls+x5f.Nu-%81(3!][VuI1dW@hjqlalbcLece~v&\[|ld]k58ҕҰ6peuT"{<cL;ZaUlE=±|Hݝߓb #O2mnTueb_+Ow"LC؝^FplW)4 Np#vPO;. 95mJ4P}~bch6\bMוF$5w&ZğLC1<#>$9,ڕlb+]X.3 Cff  ߊ "O'3gX1XD'=Ў -v]5 }'\;\j7l,6i>EQO;n4%Zk?\U1t]w{~"u(;?i5x%lj6ty֥u@ ՠ3örᢒi s>0R"Ehk07Z0W[L%j:ʹo6P9M%)T9 E/\Bs~6䛷emnvv^򤜝#/u}tUa8l\}?̍lbO6(Ž3|HR2@q#{mGl5HHQ$ xS5ԪRȿ0j7WLAa:ds3aY ے4 cwRwiA3~iM\\Š2M[3=f QMUBC?=/)7}w3 b:<>hK\ۼfv"E2n4I/ݩy R^n{ٵd\u6I84'ر~yS @J\Yv @bEX*Hx`7hW.@HLc`'1%{Pg &bƲA8ZvCAĦ˛:~2bm-D4Ԗ'鼀1onkжl[,P4]󝶑Ú6W:k jkbWPf\k8',͚FowO܂ik>`ŶܖhTѾbgv$S0zP>RlߊK7G|k=[iYg#~M]Vi[\_=a}le8b',0¨b S&g?E);Bpk)!YS} .s=$r1LqPJV/b~EQ=@/Zv=2&Ӷ{VQ~ M_P Ng hyG4W,Pv ^{gr ev:eҲ;3(v[WK+' dI- H ?3Y [MU?WBu{$]9 sSu&l ~8]Xh (YMiy% ;7=qh37.,wnMZDǫ;>^B% 1ܹ{ adkRˆسQ7$GO;>n>\xއn::7J לGPTwTjB*_Zy ( _#5M^}cbHZ H+9oin2=vҼ.C7~oux,ЏՄL&' xY''0 mߦP]h9DlgVyS"n KrPT:,t;8cX G:>Yj( OX^k]qʰOla?(cN$"6rzABi\wN_!,etψ#?#elkJ 6q(P6,(v \qIx(+)0(;Vީ'^UB~ӞMyAWgiϠ353Yڒ!ܒOmJ5'X֓miDfj!}a׼뾼뾼뾼뾼뾼뾼뾼뾼}yݗ}yݗT^^u_^u_^u_^u_^u///?.\u///KEu_TEu_TEu_TEuϋEu_TEu_TEu_TEu_TEu꾨꾨zQ/+Eu_TEu_TEu_TEuQu\???o}Ż}AUo~byٿvS*(mka?JCIcdVI? &8D3i6*̖o3yT%PxUCaFz;KY@FFQa.HTYB!ܣ2+Qr9sP-r8Y6NN?JUk:mDƙn#oЄMT1L%A(0:(]/qb^z%sa &"NFV̞RqBJfmͽkslnoܺr摩'f*9ؑUۍN*4ȇwDns~@M3?E4eBh~ajkT>.aXHCB2C T4UyP{UIjL\R͉ݘqʥLr-#mVǗ(GʠzLD/1€K&%*_GqoQʹ徽L~+_a|F J61fxZ=|ݶ (UDe4C[Q>JdgɞAdzHÂB1Q6_}O' G-; š?=z<|o>qyh@z>)|?)rht z=1F#/~j.ț*?e,Oܑ[[/tF0qV)8Oa#bOybk}PzcH`NȚ0Or鷪=i ^~?@P2o&7P4M0>t=\-3" #._`s$}(eS)]wG2X9-)_rڠ\z|0T?t*ڮ*iq%?S>R)eV3bA KjzNeIiVbx+گқP(y_֍¼x>9{Mg#h=^0ɥp x<9䟗NKpGt)ϔ`J~dƔ);S)X&t 87'ZFZc,ӗ4u.NOG_.YrK1$(rFGOhD[t<Ŗ%(AMlSê6h뀭$CPjK.3  (xgw+ME"#\[y[z\N[ѮAY>hmW=T+DL  B;ŽP;-ksp+G6rX kG p t'⎑bJʤCiH%iGfe'X\[)1"X[abJ: 텅NVNh;{K]NJ= ] GNbsܤ&oksS6 UN[3F8edfa/&i{5!b M(0v[g,aAyKWl5|R ;%&Ttb%-!:\ ܋NY%X 7pC-eSXN3H͌8=Us!m>¦{dp>,5FLH\`1r+ɦ'9U_#p!Gg"}*fZ3wRó*lz4GXT!`+DTw@sNFmCkj rK)X?[/ӳlmܓN8*@ZcE:1m -8oǵ3kט׆{wGu'|wZ;_tJeDH;oD( y̓]% l? Ů (eL'!.=*pӅm8t+G [GFh23pq%G;xmDo*U9!XXNilcƚsd44#~pˎhRX7p=49KT&Y2.Ŏ}CQ'(ۊoPGn|+*t~+'nApKe(v<nE1#J'y(~ginS/Fv)B0=8Iwkg~hS~헴6VTñȔ}zfTF 2lVɓX6ePi2 Z1s(A[M+AmOlkk@(tLr;]0Ms3'$7yeӏ<̀M/R~Әpg4W ~HK0F`@!Kv(l-ǁU*F8wnTNʌذ]k`h=b5P"ssTύ^>S 378UlG1"L HvKס&Ay*q]!|DP(rt?ՈAO2BG:MQIۋP6@3Cw-I bo P2T"03tEXas%츚L0V@gr7ݎ(v;`|ҫ7ʴnVZ؟Vnv⮜s0s$s;JhV`  >&rKD}ܗ?/}ܗ՟/^"WKDKDKRx\)/////////DKD}%ڻf??XR,ϿR/QTZ@Jh1tH* V.vPկX@h֚½j6]nQn(lŒv3J6zycx6O#oŎF٩'hFlYmcjؼRtI+Yj+yO =abE;V݉(f~"<KS2fGz`SݔӦ[V! NoZ(-hNPLHbrwef XnaB _#buEo@k`i^U XWI$ԉ*V3v]:Off$GKl:qI[֩LR3)ٽafoiF PSLf` N6#۳ٓњ%Ld%oOez|rOiNxSVf7=;T y`OYўNf947y+|v -9݀[r+ 3m<:LX )oVUOۮ\D[=Ӳg-O[G%WQoDݼtXz3&kSUhR O\N;xX?@ix"qHlC 1z&Hf/z r$HTSio ~H}S 1In]@־!t`cPŎK2;0j>˾|m+Y%z^+pI%!W{l!1nDplWe;{$6(!zMy/+awG0nWb2Bd)-|~E0ѐCbV uSF4=Pf2_<"PLVE?J9*EZP̫VɖBuJ d~TS,UGn"Пj y?ZՀئozS>:tGcKvWar1gf:SSmW_׀"'Xap@,q'Hd)p."P(yX`/EJv]X0!ˋݏs˞)YD.NÓrpbpP 2nJz؍{Ƛ?Uww;==9 7WME&|tMBn8A+p$=<}̪,lc؂5zI5U?83rL]l/D}#D|t(y3 [zÛ7츑kpkr48f %LEsM9)N`{B'^>@kfܭك g+u:u?)D;} $q춘(e-Ѽ;[WP"a%(P3&kq ?r4FtNhwUO1 ';c~V JfE"y?Ts&9t9+< 34D8K(=&ʹ `f+kbA3~jn1KstL ׺R\ġ%r-:6}NX1Вa'Hwyבx`a'F8M )kX8Lf RN)8h&O?dV#DŽCP%\>ڛhthoO[Iָ6y*-P"ZbFgɭV {Ԗj %[X41eCLYI Rz'efu+. N e^\tj3@PoҷMt$/ eNMۛ`Kט>A(b=Yš5K e(;dU;zLd^\>‹˹K"yszKJBo@\߱60ْoP̫mDKmkRmg NǡwKr.mImmFwOqz6Rױ{_tݷ=3@~R9{*^󬸁^ -נ;aDڶ&z"P. +X5,O>3TWOv{+BٓP`+]=AaV;/`m7I C=9O0p 쉅vSx=L7pjC+x E <˰eٓ .`{b/%,0b'}-\KI1oہKU{0@V27yy9K{xI^ȗB[OS"Wdu[89>O;$^WΩ79/'շߠ%/NK5sbI{0ȒM{פ3]rĔ̂6mE~e!+KT-îdqtIdM.ũ%|3<#q8mf-)?a][IBV;x Oӭi[k1 KgҲ&ab NrB[ *VgΘx}ͦ1& %Jbh7hvִ?#PLIv)I?qmt|Q|>:~MV"{QO}D)] pqq0 ϻ{{3ܮKm\>cH 57 cH:fܖ䫛@Jl;6VUƄ#Jlk1D`3f14Rg5^~ UMDxg@dbY h[;è{W",K"D%evRƱOE<oT$:AsW =MCWMN[I|+آZ: ;}V,چ=ibwohƨEK>M> ?6MfۇմDLŃSCDf1U3#ψ@~%:^JĎGTxU1<"{j܍eBqw("Lq>J4E?9B*Q6kAT*[k<,9&ԁ}e%~o< BX-@[o5aC9$y2d2 9ǏRtf;梏riB=uxM)2D% |0\X_?Ba*=)e=[((iZ(+3i)Wa'||C* ϟ5l-(Ez*7!P~rxDGYB2c#CNC>J2p|xGq{(_[P|OpqR\i=+Z?X>9GYڲ}G=M裄%ATkAeVSa(U>J`?X2 8lXN {IQ@T.}O!Qlw?~ӫ~KF| yB C9. RD(OrBOSe+]O%Q/1)` {S | "7|EGZ|B(.e ~ӿhd.wfH>GZ|4ғTGl0*V"NA;~_RZ>mXQ!;[}hlI߲ (eE9[El/,ܺr;cxtI۞alN6[T>$Cf^B4=LH-lf\4RSu?\Z14TH`oYkG kZsPizekUP5>M!(T-޲ב3k9.oE9:닏s)L^}&uD~so}wy4)q^Zs[)1䷥ؽ DCYF*ӊ)N)hPx<2W$c=^ݿ1w[p9Tb;?J<=uq+Gs7+H\R8s9O3TW(DcnkVxcdqYkEG(=̹څqa[ءppc>DɾD-E{*\ 78{H9zɮDsmoœ.y6o6r6x[@OGrhbH%K>fNPӿ'سqpgkvliгA ͩ&_>4 mo?J1Xώ/9hQ,-cOmn]}o!/ X!ΥmN(Ӵ]@<'Um5˗¦bX`pgSB9]5f0B/]+qa: a:CJh-e)T3Bw(-0ޫ2)+Ŏ(DZ/G#?0W!qf! 2x̉}xG~PjU,NTfQLoP(,X(LqZ-{V(e)а?ۡX0ԅPmK9pCi^PJv+8<R2~LRka;v SԯLjf_]Xb(=TÎN]mp:?8o/nH{ܤ4*%ë38[oRlm#{j]mOu"cC$a njd㣬Li>ʎM{LwQSqc`Jnmvx,)E|a-!sf *;Q0W:: ./ÝV ]&>d[nNࣜU0ǡt^o}q׮=:RQJD#HTldm(5vdW}ċ84PAм;˕òVT2OV-';Zu*⡰r8 V bdžw .V TC} ,7xה-ZlgL+zV-ewoq/_oʓ%=~mayv`WdGsۣ #9]lk#2"ֆ[ ?9?Vښ3{kzdMT Ͷ#L9PIQ"ayDYɅ9ݜUlC=K$Q&ϫRN+r&\E{|UNJ,Y@a4w<蚉q91jzǰʔإLCVRͦtbtreokpwOIM;8Efiơ9z<^ l#yiƒ*8YU?;)jcGg&G-j"/.ZhB'T+)c}(ۦ Ĺ@fQ{mզ2]+3~^P W;];*BA[@OR(a;9q,CH-].g{^ÐLg ׶NK>[ E_GHU Tv~@K1\>Pe*vCo]wh~P7+&&rl:#1vY`pfրz|RAk0(I{vKq{@NѾa=v(['=.G"pYyenEдoS6%jpu{hrKeᅠԩ@DM,m/+: 2DtzX*tZbUOS*VX:%lb\hvP4C2;>vpʵjOČBd`syJ(A "e[HʶZ[At#I K!jC+`%ٕ۫Aj{+?p86unGu'9„Wr͑:_~EB79?Q_&..'<+(nJrHey+ ܝ)TGCu8ٝbʶ%9k( .܌R՝=>ru)+ӈkwߥqal/Xi[=&S\ܡ^ڑəj*]QtVѸnHfE ÑN*{KH/``3Lvm($؍jí}4۹ -96= e6}C86{ vpsT}B ]ӴW1;>Ek>̅V `5!Yx/K*|* I2,Wˌ3яt㙸j%JnV9O)Wή nz;3 uO6|z.L0!O[XߐN8*"tr$ㅖoS\M3~SNnBӤ\T7n/8V0 ^T2t6i̍q?XgBg<gBܩnbL}]eֿn+]G@Lx@LSU7g`'YfGoN `ºȂ*4D AUPz6=竑%`n]|F8)#> mnl/[mRJ"fwn0zak%G=ΰ(8>(< ǐ&&H0,?=g C8q {+|ь f#e%|}ls=`Bd:o/\o{yuSgwIPbf2″ ^'m`TyscFsOv9A b*2;x _> ='v)I`CsSwjovzwwS#{<J\>xg`F B+)4 o L[q5 ΄t:FK>'NP6b0`Q\/3ވm^S15љrCԠ# èYMXIMӌ``k3v& F4ePh4DoӺJƸ.eP 7>ع=W#a_x*# p߁oP%EuQ FKfa[xG6-`ij]G0Sϒ b45(1;ltkd eSYØtB'콏t#t&GEIW)8r 0;Y.M!(4F a=FR8׋m cq$m {8& s=Dc7ϻ=8N~E4-&gn gԝ(fjv6^[wۄa|sہTK>^Kbç@9h2#.G2ԥ]آOC9#1WNG~Ӈ`jL*k8K qZz6j  -g^ҍ5y26-nb4`! sǓ|@ OQ3xg};H|r&Gqu$=n;|&: T,0w{,R6&7nT/` ZvKvCSkkWLG&~w X .^v™석mk;;G8s'7ؖ xʮk!w0X1Ϳ43BEfo0ԣ\i8(),ZSsGgH| *?x)x&48Zpeg+#\5oV">]nMA%E–QY*.EBqK[i|eOz(YJOȦP3؄֫%Iq,­Ekč[H GkՎAC!:WAZ蚌OQŦs;I쪶SaHK|'-'pxr+sÙƨOz" dGy5U E?\bֺkXpt_BΕŭFγ%dq* L(LE5ފ>*&YwgwIRsgIr3-(74> stream x[[o\~W?K/AQq"mj pJWZE+q}gH!Gn g8ù}30c=1?<.l?bGJdԳˣW_(I|!쭓ݷ0u0wgnVDw;S5LKc-3-zyw˹ l {kE֩s\lW0x1x5B{*gXx!#;{'efʗd1t.,nT!]v]bz};>dU&Ùz# kE}Kwʎsݽl2v0f)*Oo,i}/;9^uϏ?Z(g {g N `k{Jph ^1E{)L8E v{$p]EV2t5v c4`o0җ)ٜDZ -Iv F_`f7y:wycI"M}[v'Vx(l).sIm8AWqހѝ$Ov"kO$GC'4pW$֢{^"CKWlwG ?JĈv )0n"}@& feJLLr}]i g9Ș&kMyvA@R] 8pc.{ 7Cc=\´rpW(Z n6ؘKqFiMWEac .ڸă%h8 y Qx0,4 of@,y)[S 뎅~ԜFGQ`ԂH]*jcU? 4Gmo3 ũ2O2'u11 y8J,#~y8< B8YD-Oqz q6l&rM|:d`k":P. y8HFUB &^w,&٬QBA50<@D݅Iɛ0_3@Z4m3MQPͱ,9KU2Ltf SzJӆ3R@C#1W3|Qd.YIGs_>]n1 x\BT|Ďp +@FQ0ā"shF(bi8 %*>j[TR0+?UM2T"lg+(~DHvyGu50{yP9q;r=DT 7"1C<_D,0G@*1ChI]H'BhfN|xB)hJ)QM`UlH#,IHRXb@myFq]"8uY]`:BCau0.)Fњ]R.ʒq˛te )u>zc' wyxzb /p7ylC^An)U@4E"2U2Ævaк2O.2(yp/d gk*<fs +JC\5ԸFzig.ixVp {DPN[iy=VA;ikiwKD.zШ fsY~.UZb6)\ £AE=+W.b\V&@-=MHPiN{mNCAvkTGሓbД,TT+eoWF#kl@IeĆ f(5EcŶh֔M@ ͯuͲ2nvu+QG>?5Q,khҬw3LWYh3ĭ լxjlBQz3eA>zs2AY-tyHS1])chMby2ބ6: b!/axwyxDͧMLz\K(Tu }sxFg* ;*leehpVS$ڌHKqDɂv>T&C&PCMcw Cn>TUa/>o@a#tJ1%:6u2CjAh$4SB*;:=U_P{P1HI?=Oyt_/r=pu==NڗD]*$Ͱ88n9+\rlҫB+a6,KV`!I\o4+,͘㯎 @ %= URdY$DfxjMs-yhQxATk=1 Ga(풇[5)4%-vH<æ]>,ika"Y`m`! 94ɢϣ|hz9<.9p$&<'?ۧ"W(,YrlnIDC'հCb[dl\K޳q>I&G}Z}">>~j2y>Yk*;ɳom3"|ܭ.pbtf=l鋘<\6g|iL ƏO l>{Ez/ C#Gg7G$wxڕVn-<b?("g0W#xs=P.es&AIKж.~[M1هd[9癱b*MFH&4vtue$<O^yаi}/B1s3{MQ T$o?QI7L i? 5ϛ[~VV) a,YniS&[)U?^) rEt^e!㺹%lߤ𠕑wW+{!DwLMH ~AGbPC;Cp8|`=ĪV _C/ }m`#e~ vXջ{U9Z#QZ^> _2݆%h:\cJ Xq7k&cBڪi>^i_M#V7+B:껪K=_'JĦ{xC>"}ӁzzWW`DYqӗ×>'a>N&wTgZD *탏|񸼸Wt*M 2}1vRo- Ws0|oY|k.v^Yt$2xaJt>Sv.[a%]H)xL/(4v1Q2K*6 k N!쐍S[=u+~/gCPw ~ܔ%͟mcp:]Zendstream endobj 641 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 5285 >> stream xY xSe>!4TEs:*: #.,NtKBi%{r4kI)-R**BTp3O<̟]ˌy#Y6sVxAa ~7Iԃ=tC׌>_ԍ%yD>"x|a ~,r4\#U H 8paF8 uNkrJG#$ En~6d+,"-(j+ڸVLKhjz(h72 i} g ,規x=@d!bQ} =' C\m|dFZ+7i!Ԝyv bujTb1(ml: uv_?j o*[9eNܓѶK]\ RW8,QU'NY%JS^&I窭RQE=/|q~hv۽T030pDjЭPMDhDK.?Ĝo/ Îѓ5}q/eoYmZI#|fO%0qCΡs€~ݗ(~ K~2OCƠ$+Okp ~xS;7ݞG;S hUa-8O̖HZ= 'c^ OME!+v}fׁAghV~*?i@Θ{=׍=B$BFa.#(sql5<`707~?,e I@5ٙ2h- -]ݩ[[[οFZ{4śvO5oͧo.yú4x:ui+ r:* `_|#"AH&KR^xji:v 0OBppΎ;qFPfY+k+Yfui*Jn$/GzLջB[g+Z7j WJ,M5C ksVefrsX UE79~Ӷif\|Y?jogb KffatyFu Ȏ|Qh>j%zϡ륌9YrZQJJBĒPdg9:ZP{J-i020j'Cim)Հ J:( 4f`0~\@,k"34GS. Q?2 ޢBdnvOE%v閤7)U[B!>*TtU(Udfu1#dhnz#`e/<_2fPvP]\LWMN0.h(1W+ZΈ['+Mk _Ώ-:nmip$BS HɉUwQ(d~qJquD,eoZ ƞڽGgpՊ V͹ᝲ[j`PT}4=@biq8򾵀wV\Y!ޗ?'xI^hn.}9 ϧڤ$2hcN?ѩ뷂r 7@9YW*ؕ#K8ueOrd7nE8x7L]9xzGKQu>UEB&!Y!:|FR SϪ,OjY'vT-Fjm WGbΠ(@RW܎bo-z]\.W70wLUl;/i˚tt3 XR+Ġc (sNm6\pTXZ*kv>~:svLGt ցv&ou5V5%7d зؐU+ ̚Νǿ/K 0 0R,(!{NW7}aU7Ri9 fE6,|Y dМzhAPʷ# ?R fШ[8x-l6ѢLFbX ͜4gdvntcE|)9s9Hp()sTFҭx6ݏ=,HSUT:%@ d%[_Õr[n`wwAxk@-atjYk͓Bd%E͠=yn"jIn-zJ(TV<@.1Ma:?xF[isW2 5 gHdmzSg1QymQܿ`E :^b!$t9B&eǭ)`-5Xé>3-Xeas]s84.0%|m-aCfZkºn!YdlT(ޏVӓܱq/Է =7˴W[:iG܅U dO?81is}4F 0XX+Fx-⟸GڈQ!*X-:C]G#x\X2: %R@G^z~aAwApyVsy}e7T)IeיryAo=}Rh@XѪVJ%SrnmǠ);1hv3pL.D'E1*BqA>Yv6;&r@}A yKts|uɼm E(S mwQn̔~}T*rrBR|ۓ="gl78'GJiS:v`euP%9,^?%&bslj8uK9%Bjl5D Ўc} 6X|o05B 1Qiu)Մq݁€Q_>C2X㠎)("PƤmN3$%a;A\/cLo. U8hXa\CqEXO]Ї*X*EC -5T]kP={/Z\w8~o7\Szuuĩf4NCWҨlڶzۆ-Ժy|m n_V0f\?'6b/Lt頄B ?a3ҏ[5 nc!j1,S^dCkVoo%U` =ópao)q?Gyc8Wzʶk/OCwKe| F D($;DH=Q`+4wWP?:k*0iŗsY40jdPL;U>L}ydE x``d⍆.dӽ_hm3Bԇ݃R7gՁ-Tv|SkEyR5c0(bM5[XB\Mu>j_{"GzLendstream endobj 642 0 obj << /Filter /FlateDecode /Length 5801 >> stream x\s$qO[7V^܎!S%ˮI29{EȍHQ6׻RK7ZbF?~RLROJ]>]ۋ/$2s}+b-4EՇFM䭟Wo7 ذQ iZO>og#a۝r^nowb1o774BE6w!5o^rxs)H2Fmޫ͔F`6.G8iuF`cn mPS Zol0_z 7@f* Eq*In^Et9H"#~"d:%\cdd9i.v y5ic/y6nNɂb͎ VhCJmI>immal pU>?jɂW4Q,ib0' ?e`*jƍHv1Y܂M?/ %FgLvg@>؁C}|7>~ >tu%WǾhC_ǻxw|_o_]ձS}|^=G6BങցGI@r\yǗb?^f`iAD/ېr2ZNiE6GJ"PșuJ_ZI?I Nd" Ah$=#+WG/l)>#3kthb p˲gNsٹ'oDۓKX%cšFoa)h\gfc#sXQkܣ"-Qc}:8@ =gn;.$g9gT/ўҮ\ pX &(gik&Mh cF~vu#!]BsH䥵hn#5<'e |Ʒ8p2Xc!VR@!.X5#n>ſOCpSNc q(E@NNlHL47tx{~/-鸆ԀS7 ;; i_-Bf 4ca@}gFb tyo\^JK ḃ061K9"#17/(,eTMGtfR;{v2VGWy᣿Ӓ:f p,\Jd` ׍~tM^c~r rMjWF賕;%$V6kD/j}aU)<[0Y6n2ԛ ky&lCC+=~A0BA-'ݧ*!XUxbd& 2*L#,!h %8h Ӡ vʄ$uR.([eo7rO!t.NBCS^N0KfG)F*5[a.gZZ LFE쎊<ZqClu<(7:A^toG݁՗ٜ)Ȁ6k]~'e0kbnoT4,@4K@-*A=@#}EDp.̛ߜ8#{J~B~6*T ygи Oǝw:4(y=޻#HxU1Q"w' -IQȒ__n0rm)C:FrNEcB;l3D^RaѺ7-jvFft0YtV= gBAcuyHH1O\[,ThA$Qs"d+1@)%e:9' кձT * ξ3SD vyM-Z/TZgxPq[OۇB4zlݮykm6TLnrLf؋/i:BLEo){nmPŮ2 P'+;iN䝖$Yva%aђPLC.tLLaPO9#p:shBz|#'ikF5H)T3`gf`v%8PMP)ΡwENbrl#{(^`s@6ÊA| Hc}Z@.?-30 #RXgm,.bx߁j>f4|L'>Afl:fЄ穠$'PZPZ%f|JmYI.2`(:\wqxmeCசp=|!o$qks9 V=U\sDK;j`y(9\JKts8N %Fw;Jt r· [em3ÖYko@|JMz1~-{{p@fƖSԒv2k[=ZZ)M]-T/́!890>v4' #Ά0-H;H ag:vx$:t͒y{p[~;HX*˓DgZhE`1<ȒpE[TH>H[0$2L-uOS:*yiSgr)Sw򒆵a6jKwKF?4`1PNT3 q%6UA^mGn!%c؅"<g77 uQʇ*41A,,!'p7ΠTw}>VȜ)3De1n6΄(,C uw1* ]U$(n*<.pШ t+9ɑxtaQ)I5-Fh(jGֵKΙPJ=Xĉ<:%pYy yP;okKm~~ ;%2i>DJuc4Y1s/-<'onIb?c~>=v"c.S4]7NR,{`ti\8G-0xy%6 qI S[8@ߵQvТNPY>Kco=QuaK(jv51b^6$@bu!v#rxG7+ nM즂D`M5oqACc. Ȣh Cpc_zWs:y!е`$a׉Q<͓.2Ԗ1j֜1ɩ$K59m"kt42D?W9q2MhV{kzS^ jT 軕zό?>w^.?ܤF7'Nt>fĦR:V;&شtoExh.n/>IyVͤ؂;3JN̤dA`oӺ+0rd9WΡQz1דKeG#,~w (q5ˋ\K#[GbS5O^Fc:Nho}9"A:[YS/KBZN&Mx}xn-(Xfc}$ÛG5ⅎ|e]f[ި_mUU`f&ojx"u:'?,Zrଢڲ0xIm 幕\pR!m/u}i\[pt˴o:F$㑛jb\t5_Yk^ύ%s ӉUV×4L'IJ!9:BtONMSȶf6z11H2./]) [:;i,syjSϷ&Wh)^q!2q芕am^{góR2c3vLޣ8q)TtY9~I;gbQ4jE"ew{L&sfo=qr0aRGH,^: d 2{$S{Yf1\`#^Xfq(@^vzt\ Jax=e|ha){@X7Qăvj(g]0ۯ )ls>$=%GI\Z|d?2endstream endobj 643 0 obj << /Filter /FlateDecode /Length 3423 >> stream xZYo~-K9X'Hd`&^?P,ɖD/)rTuLU$X,TQ]WDr"st{$&G|;!§6('GyHZ?yۨd~yy6 дHJ]hi3un^g#y7):es2VhmViB5mNX|Z Y?ƵUl8\(}rj<0.fq#PB6z"t:e#?ͷ8XCF3 AfqA+_|Ty83'8ph:6| 3pE / @eqyiBif:bnjLY_W$3F6d:j P43FNfڶ9t&Gs=4̑% ߰:8N|A$uyyR=^=d^S}ܮ4K%eyqEl]47,haS[/Gs"Yz%q䄱}YEbe Y6UUǁ*u_:!JqIR<#rAzne'JG]T'tL³65a N(]f&J$P)֑ȭ"1vĕupR d TbZ\6/!}d|n 8,,&e̍U1'4-Q>Zb$2N*q&آs:5tٚ:27 .qw/j'aeB- Oq dl ׌:]$qTز) B ḬMf3K{&@rs+k# Hko6_<%UY$Z;x9?1r{| 1D klzN|klfr_2v1DaljHi).PJĢJp(kz5N Ga%`X@Qbmy:` XE*,PBHpTͱ5Mbu ʨ `{p ?F? @{<_hr+Ua<:{1>iΏ~:ʷ&v咤TpI"| ꜠3U$6K&D,+MᡌoŬ7 B`v_G#Wؐy0@h=XZ d9d݋To -:$8-_%$0 91z4[@[zl"sM!yAhN{39 7yh:3g˂fj'EVET,N$y=JW:h"}%ʍ$ld̕T,ua8B򈖲RXN:kLLG κ_֎#vg$'9@*4}]ND3jkʮfF-u/RV&EWUA70<+/Urˍ9;ǥuE `͡IH eXgY :Y;O3|CV a? %W5jݚI70d0ork%^Tl@:eFo9vRLiuElb,G,9"bLn,5-a6t~Z=- ~)O#cS%TE%Z}vuFJ;bQVžv$-^\\VKE[[8,,GC)Q6› Cot'IOc54pPڼ/M}; u w6`Oy ,guG<2EKQwu8; ( @=Z=w8"xmL`,\qM̸5nupq;pٵS#$gcn.h(쀋;ϲmۡGźhh0ަ`ovBè(p+,g_u{2[ƎU ]yøͦ-G3Hk x n .ڈ;H}q,%ޥna֞G7WIUS="xU<>endstream endobj 644 0 obj << /Filter /FlateDecode /Length 3471 >> stream x[o P ^H8iIz@P~8βNINU{ə!goNJi#.?r澟^?s`V[pB ٝDtuFYš"ǠuzI/3l<,ݙ? j?F+,0pMWsi `W2hl d1_SRSHXmḼOv[~kr#}DkcONH3-}p.mMޚfO=]btDs!ψM"vMrCț* p3}I}4G 릺\Wo|Ddo KuMmSŋ_6vMN O(wa6iMa܊ n2fC`giIn\yKL)/3 |ʗֲuS!8LMlt4!=&Kc3)hJ+ ?:I_84{=ɩ)9l1N)5.z_ʡZhL)_ Pܤ($n*( Mb@1Q.1MF69"3:B@ϺڠT"zu:rSqp& &nwXHfAEtVxHzX4%MKa,ſdKxyI8vv}is:̓yИm'0CyjxOuR"P8{ S zC"p6{;kM\`6߆": bNz'viM@?yp]oEmA<>^|Ps|42w(ܛpJ>8mLžJWcvIj exIaA Lܹ3;1>:7\9׃gl{hDB XKV&Q:c$6l4Ö l(Wgq3/V[ԐӦ@>`WeL62~A!=P^[?[ ڦ-oE,;+MP,F[jU@FXL CdncQ<,A>@6$T'hd(̙5rCźCX}*fV S8n3ũ0a ԫ7w,Lh{t ŏ^KaxAwAj4gx /Sb  VT΅2UQ̌E;ݼiK) T6<` |<£Kf n)m~9(/Q u&rO _j D̬^cE3(D\0Հ9aҊ-TAQ,O7NO0[厀Q<PYk,S^+@ׇ ?o hWZ]2f1Ysm264YmeM5?KI$Qd\Ĝ׭"weOV/KEɾӦȳ ?4$d=O]VJ"Y|;ag:\(}{D@#1Qp8L!%| &="?>D].mmǧ}[?7vדEvt*ߴS\ ITxNUe#l4xBX"ܾw@2\(`@@Sؤ8RV)4ijiB%ޤ@@=uu鼒uwQ+6Xf0K2ʀÚ9ssiġ7[ 'ՄWjF*w%D9äKe}Mf5A',y4xMr(ba7.CFoe NǁكuRw'AΡ^v}me6`/,t+[x*Q_]ˤ]\tJSYjۧRژET7qᕟ90P<l_웈AUwd8'a]6|&7Ma:_4yy,*;VKj["MH&<7u(8@hRhPs4ʼnXEҪti\dS2$Ez+IIך%djlI@w"j^%);.$2&ݼb[@(]KOn3 'Dʦ n,y \cm W, 6󊫆K?܀XpRXO՗ɲ++YC۳8/) uSqls{ŢmڌMn*l[ͅ2pYԮEDndQ[ Gx[`4Ӕ`T i ŽF{"ۑZ`{Inj)} ldiӟWAMmӦ>{UX;m /?]LUۀW:>YZ4}]Pѥ9,쨁;3Ix`l-S+!cY!„] P/1@#gj4)o\p[L^=my"YHoDy6-3Гn7OgR|AVy^[QK7TJ޺OQFȿz뷒%ͯxŗ,WQ4ngz04H&RA.JT:#endstream endobj 645 0 obj << /Filter /FlateDecode /Length 3367 >> stream x[[\ ~_G 3THۤh,'{g$#Qg4h yDE~7 1ʅ?}/Zl'b9ys"ϋsjSEӫ4Y.\xǨɠgmu3Z3N/N+1Cpv)F˥F;"|~)ΗZ!z5"KǠH?İ\ Fk@,kcS6r ʀ֎Jhms 6re}EHF-lYPPX_wSZgo&$(]LʃL -r`Xfa$|_0сdb${RLkۃ䦘:5 6q+m4 FDgL\YG!_2g皣g=U%Rg@]N/Xo UM^!\ /wY +MUbC5TQu$  "ΰu9ϻ3DAY+ F'?Wꚛ 1p[]EdgA Ea/o-m-c NftM0jǎܳ{!U>pc27Cp,s?%I0~]կW]dàϾ b}\sqC{JUs.ZJ~ H 8 DumVUrzY}^'uc.Oɤ`]MֱKfL Y#[Vm'S?xc%oB>G-4~Y%eǪ~^ZV`q1#Mz!U(q$prS75]zLKJJ S* 8ҫϙAy"jmyF 6a '!5pTT,u0"tK%Ge-/nR0RY~/XV~%/|ϻ>5cV֙5ΨfK)*&uvN-^"Ym J xb6`4 H94 K,k=*! $7LaJf/;}9Zi6c`U66In,?*RپD{hu?B^ݯWƌ!ްy߽Xt5xR5pv߀\7u{V \H-ɏGTWx,nu] &1dNIRd(1/KV)YcǥKFz$M>#ۜt9ESiۆ[Jf3aQEOv۔圴|to؊1 ka91BؠM.։<Ҿ*"VY˕.ڞXT CZ*;4` }^30*צڬgܘʼnFOHmY}ϗdiQQϛinp pz8Raװ-LݛK^.S;ٳKkԝВpmqK45x~%AwlUX;vp6x2 + "?C|l8k$M(3^(3z8^K?ۼd/u.$ΗAЂHryc)y r|i̧#/;&$9q.ezBZŦj DSdmen^ԅHO~Rvu(FCN|J{$> XX}{®c ^=>M=V XM-ֶMzp{ZXOcà $89J5Ik$~kNm&^+{;Ns7޽'JIށI7|C}^Wܱ e#uoP"MEڵ EG:$YjCY)'oovbd IU:al$IEH;ǽ. $ЈɲTji꥾eI-Fzpԭ!a":9{-S/c%N,&6߀Wv75s0. -{p3;r]k+;1_*AcfpQ`)]`y hf_ -cv1!|sxnR'Z#;qo˒43y9'K;|?We'֞n悂Ҿ; {endstream endobj 646 0 obj << /Filter /FlateDecode /Length 3461 >> stream x[KΙ)sébRIA΁ڕVKyw%G>4,it8ĠkӜu|¿q!淏364ynUb՛Yz[m;/nny0[f63Ro\^.+ 3Òu ?.%8Wr #ORy+o{wa-fq{T +:YVQ0vBA ;$T}Ti_.WJz/F;xdAתT _O#S.9F@ut:L|1H{~KLz1Gh-XE9:w]i:WRN+;gLfx_eBvY!<(uWupSukDq}XıvG|C~::awS#4kv Q*Pw6ߓ6Уn~0+V;hwzOᒟ,>L/7ũ 0cx;[+G]ఔ ?eV}GdGQ 7R-6XʸsJ)MF2S 'Kqe*,)+B֠͂†K,lY%DԸ 紜c~&3Ga5tZԧO:*-{U:d"8XP7!a[+3:EzuP? )R6\=#ܥ 9992Ԩ a7j*$]g2TM:$B )[nMvo \, .3Tf㤌.گӔ͛`; J۴S.Nh+SawF*XSgHPF: a % fK-5Ů0Xߧ)Yr\ {~knH Hԁa0%tBbDE41.z%>&t7[ȴ9?1N#W2 15 ͫd (L Q<= R>dçNn@ѣUUYWUSoc1EQl5'=M5e!,@/M1'M|1U2 MzRt.YFGW/."|gǦYm\oC?Ih*y-|-nV|#.V-PDCg=cEͲo?u)+}^:˼L9h[چDUĚ&8J`d.cnptjk83Q<]F*FuW4XXM1ySH鋻dˮ]MMi9-P pIt4/Z @bX Go˲A -S;lF~w@ ҌCQr)]|">z:$Gdw:iVڂp&Nj:C1A | 㜒ێ!ʷ9#sk` #OmDUTd-p'"ǥ>P R}T ʉ5L&pX.&%\F6$#AV-ozZJ.6܄/r};K'zpA6x* 4ۇ8;aAHA{?)ta7u |c9Vy|0s;faax^xa|,[zB69G,unvF/^@9pps'Hp0?CuRz,j: $RH,O8M*)gėxC=KHF>HRqP&HDv.Ox=t@"x(VPv,hq 7wBa{Zh1hX(jVLV9U#T:a$|W[DA9Hސt@kxOZ`);H$2~26ֵ`Z6YWa 1UU'|jgc>ct =Q$ޙc~t{W XRTMץQ E$aRO9%TOVI5F.BaLO+YQrs4YiA()z1 E/E&EAt)Bi5_Y #~^LfX / 䟇s9SQМv}^ͮyW~}uИ%S{E`ԭޘ!,E)c"@5b-n~V\7bg"\HRdeHhѶ7A Sꒉ}65$ ; a3졕{D6H9BB4 {<Iz~@l1@]Wc-0UeLe[2?<TysW 1 Ԥpc13Gjaˢ(T#mX'CsJ =pb P|r,P/Q~Lȣc1ܞ [xK 'v h9M\Z3wrI4HvXiWd6GFq u)D=D%CbE?&GOJsB-Im>.ĒG&i&בk;֝,r^ y9) Xq.+GɰGWNeB{bGtI6}"pyZc7N"$xA]q|^$>D!b|JEl>%5׿(t؁MvTac8!z@]m:<>JOCa> stream xZKo$5=@C߹ʼncA #֬Ze?WEFW@XhX],VI-~r?ӋLW6ny3\&GY2>Ttv8uS.:iBKa~p%:$/?_lwn ŒsŐd!|4rj ߓ,5_]ƒJy1R-s1AkNZy##VYZ a>R> .sJ9'pbulV濨-W>Y~ =~&8SbյʼOM!|w]Y>?aw0Շ#?g?S -!&fi{~X:le _Ӈ R @즧Y)aJ %A~yvaM~YA.﷾-x;Nt Ja|0iK5CFp:cn71n7YA?H`Os`'a*qTi S6m@ApX'v` Iv&bbS%%Š `CStF_[LE81,<<3|7K+8@|%š0_I5:0B(LH D*% p"s'bJ KIUVtINXJp@5:QwdH+̆Qeʏ^({It Uԥe@Ҋ$QrZ\T $YhM8`SF6T`•Z&f +E5pX *{*$RY &pB|SוsƟv% >:RnK̀HA Z榰ё Ey$`5/^r0]xI*$0$7I 9["k*ת #F<9Fb'9A ;3Bx37ZHpl]+24(DY#;a ȄP;yB 5pqo-7Z_laf#,]cOe ʐ 75&=;Лz^PEVt#Ah΂dsG;MzB%r@PZjhtGKp87UcV_ZGxuAį$Wϐ{|Eb%]$= ﷘g@(!3B/x @ozTb=A£|xKcDh%Buѐ9:m/ Jt0bu[ԒɝdH(zW‘RFLim`W ^wM#ξxlGqYB9󦚯Fc*1!yXc7'FV-4͢ݍXV!E]F+]{xuJ/ 5u(\L;+CahQ"ګRHfbT2<(XQ{FQFEYO9M=/Gic؂(阵n?V[Dj$1Y-""2b͟Y{O?Z oֱ0Zb[1qgšbk1&A\A- 6:b E}M;/hGL[}*;8 ̨("#hlz* NJi#8vk# =uzF5a; ѡyÁȥ!f2@7v<[huuxBy<or޳'_dV3]a ~- vuafR^9TJK#B]YnUSsS@Ezn2gCRLŊB/ . :}PZiEHQ$Q\%X*<3 DPZYw(Y~:vh IX94A-m= l(Jb¢>* g~4;#^ڶ!W]Rx"2 =+R4XT͵77Lqr “tv86 Ű^t~>2AD`9Pac#"#$ֱ֡+5]fb` .gr` V> N޳ĩM 0k,5ポJ!2PsXcŀ|w 8hZe{)i#8} !*T|7Xp4ajBߊ1CB]prlk,?U#53cSx&˖oX!81$;T6fC#Oq5qȘLi, #1pW(Mw]!>qC n=|˃>>إ)5Zx'â:Fk1ft.VNoVw 7>Lh_x\ =y X@ώ|/nSħ 286t-cty|S6}¶UʴUgI~~d}&T}<oGπ&N\B{[HxDZCVl#? wArPñOP-O.._>}>z#9 UM:9?;H>ĞPlZ N*897pj _V>*0.CbQTN^^ endstream endobj 648 0 obj << /Filter /FlateDecode /Length 159 >> stream x]O10 PBVUAp$6|/`,@L4[ub,,O\r)x@w|cU!tVDWU}gL/T)Φ SIT @s$Lg);BSmendstream endobj 649 0 obj << /Filter /FlateDecode /Length 2077 >> stream xZok"ò5/yk4Am3( )QJH&;Fݽ;r~F<)EQA|ܾBekO^*uOg=UoM/FĹ*?ՋU_Y]'|3@:QdPISy1c xVyx3(Wb0z'R1D'& [JgJ/Ʒ HO2o](߈ץ,Ȅv1zEQS`_Vo2E)Tb'Ck]Dk: ?A1#sb3hL/ QG/\ jB)1OcH~|UN;̾ɤ2+'Ԕ*Y+V)(i2byCףTi1G.h F?VhheYxCPL&RF!s~A 6L}CKYɞ*S8u-(QdXӊ aG/h- [ulѷτ>Ys[lAsyV]U^xZSKơv7)GIӒ !4쬰\VK;:n%-37bG/>FLk>{~{JE7#7,z{X{`퐯Xf;«2w4 #~BH5l҂e%ܭn9|٩r (yVvJ̠: ƖVK,5jB@pC5a`Ӧh,[v&ܳ@~.ke;3;7b2wy@tl{ny2* {[ֆNy6AnA4?~&Y" JvIH3b#ZIKՑ9`r@`dA kCIAEd6Q3] NiF br dxJS4ޤ p&x<$?51j\tp;y)e?Kmrzhi-U*O[0:>+5K ʄ }S/|,nY T5SRo|÷nFs i]RALW|)&4 .O 6Nzz)g,k2 6K0$:Dn^oendstream endobj 650 0 obj << /Filter /FlateDecode /Length 3018 >> stream xn=$ME`#>޲%_XSEYG =C]Ūֻ]].v\*{';vdy7spe> wwa`>hwI{v 3Ýb)z[_^{Bfe/DzvТaP)A`d:C\:xm]w"1, `\o2@H;9BFLh4FC!m`-ZȟPPȄSZ ݳN`&5M %toH&t2Q.6"^H=2"t4m6`Km{\:R9r8 bipݯ-odx@Kʮ>f 3_ nx@Kʮ Ke_|5.5RP{Ow5s"xD8.\ߵ6 !$Fg/ABE4L!aدWgLe+?V/ޖQ0d>> 9@MzT/ O(FW")$ILSs8J%IVMWsۣ ɷR3rͷd $o\s_p̱i<cES =Eyi!*v@\e(ت^ܛczj]&5Ώ׆~@`Ak3oq5ǂbCu }Dh&]+n>6kj1v4cSMd̕{6xbo&V9o*.6Myk|*}uSU7uptn. JxqRr -_6USL@$gMQ0xt{Dv ~NbQ'aMY:;lls4~_.f>&pWKx`V;ly<CBK36?\& Rb7x, %F9\5WYft/^g0/7o+O=BpJ uO U<jm}%1.ĈU&MpdNVƸPgb4Z.ei^:^6fw` {W\oKP|-8%rNdTdpFdpTd\#X % Bm /*"bc`iᄖ*-Llc,q We",+|Gxf:)ukГA<aaJE/н6`W8Qd 1f %17 LeB;Ϧ.8N3"2HY&ނCEk|2Rpȴ߰5KHhF+LxF(ƨoY޲-o HyABiXOI뇬iofcMpCI!fC;'p?V6 em4|l{k\oU@φ0ID- 3o> jTk|bdo5!T ֽ-`ǃFtyڕ/#no4WH'F56>{APݘ`SֹZc+hqCv(k-}UDRͻԞM=7Ĵ+JU&~L+r ʩTVfJ뜜ه Q8ƿc+Dfufı<]z_-0P<ez@dͬS#06K5ZN%aؽ n4Ǜ!uT9Rmff6ٽ97^bE pK93lm5^fCu<#`s/,5 ,[1ۑ*gt9 j5Z#e0--oq!T5$#!wCk4l 0_b 1rB[O+J9 tj§-ȝ "˦JŲ'0 {9J1w69mHil-8L(`m2ciKXfT^yQ|۔PʲRfBs-6bac|-G'O@Ӭ+%D@(n+ߌO tK]C'Օ{ TW|4 øϫ-f!4%iC?)^ʵ;[xSqDAL|CZ;oos)~w6r / k+Җ2MGyx\}_1do߿Vendstream endobj 651 0 obj << /Filter /FlateDecode /Length 1526 >> stream xZo6_aI"H-)MӺqbqhQ;)rx)?p>?ϙS~f_yz_z_~f?xs܉~v[ }+Fg^$AonU͚,If3霵i4pL]i"Qe.9n+h8hGpKD4澔JE.[RmihL'>2O"R]4 dʴBk&򩝍s %ѫA$X7:ͽV;ŵBRIR\qltXMieRzQśC"w˅,KW_o3OЅ6U򠈕f6MKEb-u9wEQJqHj 'mg|-^8?Ojw.lֺDmHA^LJ[bzQE 9}' Zryou am}Fk:!fEs!m9ɍ}ͪcؘ"%oUaUwa ^* Ң(=vNYP UNAK Æ)%cC+_YCWU`qƼ fZWN53"p.^)i&|u! Ԉ{"m5X ;!< ٞI'ъF| 9)"B',?gp!* eڦy};Ќ6;=_"8HV2rv08 I+\ہO!L3ZI&r4 mD>d6ղvZq@1@ Du:!_i@UBx2+ `֚o~]lbuڮ6 nᎤ-،ݼM7pW&ΥzVT߀e׀4o]W]ܳnIk׀~j߳57Ya> stream x[IsO?0}!U[^\RDrh%[>CJU@-ۜu|BOw36?6K\5Luy>_ŏ+նRϗ˖u]3i\kR: ӊ[i]s. 9i+MX!<9N[n0k6[3g%gH[tf˹ l {kE֩0L0ޜy5B{zq93CF:y'eȔ/bͺnTsF;5=e8?L 7;= kE}Kw&n^66ۛ6! >^__1TFTd5ܶ7A<]Pk `gRΑ7y6 -c<\Y<) GjTʹoG.{[e6Bv č@Vw<.Nj@QEuH$HjF3QnUm'yH>,6<DY *2uuJ2~[-$?vYݢ!GV_u MJfʅ}!^Ux]e:D%gپWTnF²SO>aixh_>}O~P$*r~-xW9pŻs%V?NeH#qbtL>NeN,M )xa% >-@E"\,!Z8 R38plME৆k%mu5ȢG!q(/{_,<;.^TZN a(&wBp0LmRXKxTó6'1 Q){7KˌAmkMb0p)n (erpy*hdUΌ4( ^{8a{@#)! Wyʑ2+0 0a`f")4 v{qݺJPP,NXg] F  Ǜ6L34ܹx pE&"2I4fMXNiuiCY!4 hxap3 XjSԶ)8SĎ[`Uv 6`Hx4 h8\dlp9mjǑn!%L4ȟ 3{I&h0A?4f*NiQ8.,8)'^e|{4Cܼڈ1mKz^l):+MUI|s  &Eq\t^ _2Bzy&>DKqV6_g0g4&7I[YMLcShU>oE,xAzOIa&["Q qf|"In;"6λwix\#D)Lr)}à E;~&/$틻 vb"&`M_52Y[Ai"ׄ"= ar1rZ4%qR>eL=-0wՑ# ~0hYOl]Tb.@,]IxSlB! 饔v(.V^K*| ɋE6|1A Ii> FMe3Clk9>;iΞ;0:C_Üc7r01f2vD|ׄϠcg?!3 7NJsa*剟TDQ&Z$;. {jR2f#TJZ㐚Pl _&(=ݔ]ʙ_KK{TPJGQZIWBkc͛6>)uh0q1lY(H4}2KHV;oW"[]@*-.0i] #\K6GPHOyJt Peq%WXi 5Ӝ-S4V4e{D5})Cu6K2<,]yإ qewٞ443-W??^!A!~@0Oes,<2"Ge2][ VV}o31*d^! a=a%K32v[4Tג쓉4&uLt8-i#m%=pT8^BAAى#0U䰄4)nC5={Mg\ } Gqoކendstream endobj 653 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 510 >> stream xcd`ab`ddd v541H3a!3#,,CŎ|<<,+~d }=B19(3=DA#YS\GR17(391O7$#57QOL-Tа())///K-/JQ(,PJ-N-*KMQp+QKMU8SB10001030=gZ}?˺D׳}]3zqʴI}݋9˱͝=wr뤦^QgWL<ss|7slݕU')=%rv\&ͩnjG{Ţ7޽@-Mrպr#T*8~~]н䃭~T6-˻peڽy.w6?ٻ &Wr|ZtROY {g*_~'Mg̵[|> <<@#endstream endobj 654 0 obj << /Filter /FlateDecode /Length 5284 >> stream x\Y7rNG-v-CZ9:uXZj"CsN.砆k7ߝDjLOK^DH$Qe%F_{r}R#8HWi1(WG4Y\yǨhPb}Wmu3Z3O^U!a{ZQJ[lE>c^ 0CD c 71:_W"r22`#q؞mPvTBigF3|GpZ'} F-lZ+Q|BIXuߧC8vk`0b5Ҥ<Ф3@g VgҬ @G et@z⋍AH2oºujcM5bY_%{Y+x>Y`elAz Y!NpUMeÇtn}( 㝝oQLJ4l`td6avB 2hƤR/.H*5Ѥٽ%^D he11J@)zNeigxd(IGk{Qwdtv{/gD4pc(f2 89-wo$< B X «&Gu@f&~#H;B6"ta= K@USz\vn0*оҾj%c3ѽFE'ktbt1FS;ܞc[ֈ ?Q+KD IWDpPy5apG8¤Xodn5^ГdmRSnlz;ѣp鬬PWE1g9()?ge,ekHF :h#Ip޿u:@k0 1hqɺ9Ep6: WCsd¦2G>ҰX8R.y/Ἣ*|uS]è4䨲_86WQ;ysZlɅ <۱ZX/Zy()z Jnyp'ż#(sEۤVBo9ɡFZtK[%UG, +{4<*@,[ṾMΣcTz8&p2_e88sϯĘ%lt*;NP;40/u'ɂ$ g"&r+T&t\V RM4:s, x$<63alxi A&@P4>N8V-SZ%pZ8waУ@!XmPE1*ǴSQC@:7$t*ZpQ!Z2Q B|.I9Mh0 Ã.%̀ {CKM%ޡdu)%AcSdbw T216*CRd 3^Lh?Ew2G>Śg~X{Egv Ô3>Ai<(lUt0ڝ/l=k&@]LȫLR[xa1h30"4:׸L2r6.Pk LYuv0 Gو!Wl3ѣH $ZCJ3KΜ\zN~XSbF-u4s;K_Ҫ[´R\ЈmX hE<ث("&kM& 3/ibuOC>fcP0VOeCdq.eȘ_0`w "> ''Z#x6TS/tRnQ vlOt*MZȷn'J('\ ZW`vVUʌ{Up\S|/(ױysGko9{ ^Ӹ6"ɗ~T?^mT|ȜQA 4D:ٓE@:dԕ:YQɭjML*6!*QQ(>.V b _IJcwl' b=/8?/t)x)SR.u.}< w-muE)|J=JQ ؤW$}K6}Ԅ VwyRjdnj6Umnk njңVE/ ZmM2=`(zv4Zǹc =_xjI%6…m+|;Bp[;C(AZ z,fR6( M 9MRx.Cj3zi2g}pf펪5ʴfA 4 )$$ַ}Fыmmfv~Yiy'G 7FDIݱ 3`+gk>X,RXR!n8%W`s!kMzi*4\Q`uGW;BF * doGeA-h!b!@K"0D$%=˞?L2X؍ėe-Kx>‡\CgEc11NVUT̽k3e0~+t–,pNȴBB aMt{C] Ӣ*~+|㘴*,=KCELFVY z" zؾCe96GT ľS/XYxrylRҶj-P=\Zͺ] {9; pHrnjY(<К׏:b7>yp (Ϸ[boF/y}_119´YփL0SUG3{Thɇ`S ;n+ 3UNmg98vUعId󋝺KO+) 7 v'{<_e^V݈Ty~gLuJOv(1R yV+Dc(e'mkKˣοm̾m"#EjMY바\21uWe %j\P창/Ɵ8۶'jU-I.S]2V=*AKekw_?́_Y~=]wo*gU:gOM+?,@Fn0+ka{ G`wR_s?KTWAR!}lO524+SJ*F\Jy G:e)*LP9DR($U5,( 4ލbhV[6qN҉Op^ Ĕkzޅ2t F#$ED]p3ܽa^ z K!- /6BncKҖt'_' v洯n/En*,KzJXLK`x (MJ"ؔԔ{Yj߻BHW_`IΏ(R035c^⍤3䋜MBEk'׉af?\j >c}'tp63QfC[[<7zZ>xʥ,4YfKM?wSr&DzeKYm0"Iemz Ɨ!O;RQ^W/r2 ˙EtȩVFuU{)l5SG"Vy@p1Iͳim~͟+l[wz$ؚUvake Mb5Få:"8O*/z:WLuࣄG (ujUq2&:2(iI\QLEd !%eۜwFM+"Nk[4.RV̹46vCT}U !jTaF!))dL0*/lk|{ZҷF̤!͑>/QL:t$}Pȥm]luendstream endobj 655 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 564 >> stream x)CMR6/v-B@x  ha\mqCopyright (c) 1997, 2009 American Mathematical Society (), with Reserved Font Name CMR6.CMR6Computer Modern012g%vԋBCaGs{ŋɋ΋ؙטu?VB>K&zSo4C~jmIK%gd͋ǧj~$`dًËËً‡ #`$Of}I|:}O˪16-X~_ȱЋ=:D\BEKmlGvCoa  7 ޜ 9endstream endobj 656 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 3061 >> stream x TSg_ *(35/Vj=SŶ*u!$%@D$7BQDAQHQ2CO팎SNcmmGk}sbt$oH$ <_JD|=[T&-_𑂏W#~'bƿ13+^NwND+yeeW}u/If5r-c3Pn7fC!,nCXxFu{V9-4zk7w 'ZcnW,16skxTLh'OK,D& D?}$;@4C7g{s015uJk((!6%և%+NMrsK.xG|apdYL~GI.! 1|S(Nπ,p%>fYӏ/#O kn9pUP0=ř&SVls\;q+X{m.\[~V=$\К(dԀ U^ -Oq2N2v뎤|%^GB`=,qKpuqҡafײfRA+r2\07f"p0~EO-=- ui^T BVl_=8[-ݻtCVEš-i9Rut/* rTdڏHVnPo(]n*=I2aFoKxOs͐!7rrZ{4Ml1uIB-/Smjag 'gAeM%{&pp@&UfZ 充&'͆_]ɮ:!hYE|ru|Lܚ 9T PTMN558w^ZE͘c&xg%~+LѬʱ.7э-48tC*Y a줭- 7m04N:?2,ʿ@.@IL#8D*~iXMTFg:M 4pǺŹ73J:]oIeW"[Z7\8l0 +˹;yBek9R/Tָ*s @&KcgYyizWqMF4( ,p-agv }ͣAf;,p׋/dǔ׋shw|{|a}h$]WxJ/͉:!!6.\_Lr-kɽKR:sx1Q_9NK>f_?wLJz9ҧ2ejGl&QAUsLí}}|(HAuh勤eSedFtxC.G,?n=8k<#92o<tKɷ?иzUYN~[kiع}32ݞ[RM:tnY,fZ LEE{[iEydÆ⌚ ăh7PjT޿P^S/ӎogE{.oOeW2(V6݀.5O/ךbh_uh.,u:0@9Ϝ}G '|:}4ĕ|l1!#8FvցD|o^29]}jQ8gBNWc{֣5d$"#s|SSKL'6VIR|cK2Qƺ^'뵨!Jx7ykendstream endobj 657 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 1115 >> stream xE}LuZ,`3wg2p.F[$q9l+hk}F@V`l2&Ɛ$E97+nј%z0c&7ZXf5&afVe-bXe`X&FH;b!/\/d+r0. 46𨛗 J M_6 LGsx|5^hxlx> * Ye(B`:/T[t@DD\؇&w߻+҅Aa:3 h=]@ܻ^vȣC.eqz&AQ#੢җB+A#OM96Ɋ/\dO/QlP(5R!qG$۩k~?H\3/Iߺk7@y麮&wD6j n6a Nt]6u[`UF/ofG2ffrTLUTK.|MSX%.䱱61\흘*1) 䴬w~GɧREEz]Խ*Qo>BEY$=^y0T׀2z{ƹ߈Eǯ&?e.`'//e#|F_OOl5IԔ%ݵKٺ쭰qxn uW, 4NwvmQ@יKz9TbXhT$U<{[Yٞ[V-k7m^w;Bf'J*ĪP~Sp3)ʄ=ˇö`ؿ)endstream endobj 658 0 obj << /Filter /FlateDecode /Length 2341 >> stream xZKs#Pdy?'U>*M@2 $Yd_;|3bp9ua3sDl`y> `>t|h-b4ylI3S*eÈ|Y)wӫ+$/9WDfF[QK/8NjS#7ھ+VZ*3ކy7Bb: Sj'TBR0L[.ℊ1Ul4VJbz6pL`+cKN!EdK)WH1\h&#N41Ud}P|\Z{LOq-(K;ƹsX]{m\5fKK@1tԣ7Jd'K ǵ 6WВV$!Dk|v; Xq@p/ē nfk5S RX[ ]_ \9[:&'-zVy%s Hg$-PX U߹ x g}aXa@\)i/@΀<r 5G@ޑ+4%gؒ*v$wCTAۀ& ;'ء:u4NPo1DӢ8!NINI'.l@qzN`ئ7']ߑ# ;'bsK&UO+ 7@ L6P@F?9Sdsoڶ`5=lRY4ÖT{ [Rv0/GsX r1\Q6g r)$I׾g׋W\%$Z݊ g@xE^v4)]'Ҩy DK:`3,{m'd@ެl;Ü\:?@M7c7+BzNN‹ [Rg2yELאDMRց+ؖ)ۊtMpd;2҈ ; 3Y]۽v!{f]Vuq1%iÆDR3Z>͚JA C\w͗ ykS*7M1H/!H/Ԁyt8xe:]dS_e돉[s$|ziProUwhJ'䘽LlIT9}SFPk2UV۽[:)@vԛ\DHߛ5xrc r'(s6UPUraJ9[\rLnboq H%GR)JNtU9iRZtOVZU"dWU"*\9HԺAHk̷*^wNX=PNŵUiANajN8 ^5.ðWEϵ4nф}īlj %`D}|kBDсU}I>6V3"9 fP[*Yo'};ʣFCuR=Sqm=]6 `IWq;QEٖ\vz"U{P3z,8<'<3ic/p r]8U%$7OU}ȦJS ݻqmCjZ;nEvLbq$.I.1C7Tef# h_hdAY.?;?>3ӯ1צ3 jSےGxwq>V`>_S[%okg+֪͐p5Ҧ]FbT*uAHdϢjUёMi9۩2w&Rvy JefE|G7i]ef~<!?Y;^=Xy`ߙ"௮gۡ?Z?޼_OOHUw?rendstream endobj 659 0 obj << /Filter /FlateDecode /Length 3386 >> stream x\Ksr`4L1ޏd7U]o*ɺ*e)Jdy%ѫi#9CʌQf9+C(O;?h!Dr*^jѱIuk9Ńx™ҕy$UW;ts$y%Gl}_s76vqPJQ9-((}l< DYAa{DJEw_3 ٺ8ʖ$rgݾ,d+#mqM6Vw ˢUx|fXy.?<~{?y~~xп[m.GW}=exw~qsp.Zm=/ewъU%Ӑ;ƫ*WcrYϜ%PL8)r>e&p}:=M]\/OZ&:=;*r aJ.7!}~QW%j;I''$\?Ns>B=6Ue¤RE kJ0!LXc5E΍ g6IHRN3kB )G㝅,)Y6Y)7xx'T'G5 P1qr;Hq^V?1*)KY/ix!Qzvp>3.]+"r/n\!Y;#ʳrZ^5t(j(ZjC2_χ0gFExkʕ+ҁ`0eNW00;0l']g,}"UYyP8[diif*s 7&leְw9n xB oyTA&3NBP,NN4lԬ3~aEWol:!3vڊB;ɫ >~^w{ҁ>R&6ݲGQC@㈁&odH3b$}U* @>B }'[;M-lz\-6"cXa>1;x!,0Ѱ>`;ʩDųqoۂ{/,SZ6b32eyho %[TP!6A1:7``gqRvj/с ިH’"<t)".b$]|I"ΪdSؓ- aW wxI|7<\-Wx"KOZxZZ΋qj?QZ͊RFo}%~LI:PѶD<=9 'If=vU . L%ߊm伯_/ $yuRҫujQM~F$=ua8++,K'XJ8> stream x[[oǕ~ikv~16yX# "$*&%[$~Q `L]V5dwɯ^ޞ˓O|k7>)jw/Nt1Oޅrv&M!d8yX'h_Ōߟ'|ɦ 1Le8頇!pW >M2E'RdtQ +@z[TB]ƐBEM8F )N!Y/ L#!j'X?ʳTL*R9t`7\=AרdzQ$B噀mz6F@ 8JBTqqqaY-~3V.Z0GE>Sa!GJ- [pTesRYj G[(l(d,V҃Y5g dg7- ɼi !ʞYqY+-\-ZiSn[ݨ+1O YGy -2 ̊A;K%1)S={<_ ĖQ%a uĐPl8$dDAPM6ZZV9hL9C\#FIVWթyU$x)Uu4AM1Zel PLY\B^Aѣi;ֶ^<2~]Q$70OFJoavc2l dHSP>p0)e)@`Xro) `RqN@@% ~jVJ LC/>P jD$@ Y G^OV*V_}l,'U+J-!kDNaR   .H@V!<Ĩe6 ;4%,gEE0) 6j)S& B*$?ɯv]M:~` QRmBgBqdEBIadW2pLʓ+XMt%9l2jn-GMMfm Й ZEy섹ʋE>PؐE|dg[UȠ̊5;k-n Nt^'VXT)T B*Eci% FiL\}P\}[Wy Ym4,>jό 2OUͿD@Gϡ.0hŠŦ$vhW(7V!Kx̳KTwmP1`" 4$(sZd=!p(*H\qܑdU$gR.Gb@Ő²M 0S t:HPd: o8@a)dʙtBgQ *&K@: s \eX*$#,U}bRQk)@ HHnЛh/U1*}g N}wfҀlZd\Š ;qa!uRٞCEF񥚵&[DL+oBoj@ }V^ԉ;4>+eFbW+N'YVXEHiȏ m 0[ELlΚ Ħ{*T(!hb)l* nKyNY:a9 Hs`6攢*yTֵUD,/R2¾2%:ZW5WΡmZ b2Β3Ն< Qfr=I_/s{>68_.a9< J+A)"^鉫^, d-2׀lImTae˂1a-1bdQ ^&F8b+ cr>0¹ a9R, OUQb86M,1 2 UcC$$Fʨh)m HJIsJ~L'=B@٩)֗A:ovu{]MRرTh/Hbrt&oY1bo1́-E+|BÞkjSEU\bזSf.sd.[`Z~<TDiMkbmY% VǎQ;%^zNb; ʩ%TR )iai}ַNwlZl^$S(,%(ܮC"e"HKI<|Q/MҐ=WA3=l}2/]ݫ؟riI+2~[o:%O RYL(e )uP3 4ot芦Jo>)}FT.;̟\1C D m(S%- 杬F\ ޾m8+D+}Z2X?T^;gx5eq8Y Y᫗X>/gXѐ'8QA}1@iߡ$Әk2;u[>Na}Ew8~rU,/`v.q9}>,H4ȂnpLAb˧I[*!\b"R>b8冷mpt{w̾n^5#L/Ϙ?0ϘQlh}t=v\@ޱ!hB'XFJ|@îI3;y;pb%u]vHl~F3٧KG&_o.8lZ@]}]hz fX.͆gW勃f^K Ƞ"fDW\Q\ XKj>LrF^?<j%e}Ni>G^gONrye]F ]vZ׏9aD`|}隣Q]oA O).n8)‰+po1㽯AI|bb߾_ o#o (*"pBg65P>=sfc>>ݩp(#v<9gml۾w}}_>ÿ#~}&oWwAy|.r%Չ!Pjb1H묊taCq'bc֘8_@@*/-s3'kٿAHa}e[+/܊|8RɅhͰ;.Y(T b _|`]|}đc8k%J`I3^?0kܙ,[g}'Wp֡m>O>swp?oQ׫}Ӛ>iL^̹٧#(_~dlv D1,沛,{ \vw<'Rg>Y9!F>Ai[#Շ7}xopHpA67{Շ7qtnn7ۄ8 Mt;n܄M'ByZP*mexׇW}\s<>L8X7"̚W84(N_}mƹ _> stream xZKs?iV~TɏJَ9h@|D$@׻{fwd)Lv z{z~/cʱrTj|藑L?3d(NFc9F~ַQrtm"ZiBмj'Sun~a#LkYM1oePQ} 6 uj "xee6H̐](}?r0lc^5m4OE9ꔍ|5_"1׋CE5 Af~A/3f1QLsƴ;,84>he0&\42hl -*)$\OL?MOn6k~3.sh[m,d5Of&0g#Sm5a1djn 0&k"c Dn\NiFDy`CH*+"%hK^GLHRD5}4uHIC&ZVk"m?Zhjo,_Fe2ߵZE?* .JWm,tmŬle [aO4왪ј)јy>8o4Bl rqhϙDv9ψWU1S'rA8հ7U ̫)nSu`~%}BP MݕGԭDn<&rKdWKM{ixZm-&&_WI9D%!M~x$,h*o϶^B)V&S)b NY_!mMi}qϜ7|tvQ% _=*JԜ)yr(0% c?ՐsV]I/$c>n ->~N<'ܱoWGWx e8{)5&]5Q{TgD֪2/f>7a._WJ;:em!l _4؟W8*|Q%(OV"vq7K\"RiX*gN아Z &F?7C^Tjn?zޏ JVR3h wŽh?*^0{vjYNB@ Hɸ&FFM5t.TK)+x2mĜ·#Įƞ u>׷QTp/NH*@0aЬ&7k4꫇p)l#90-r~^[KH({0IR[%ppU2ɮV)` {a5#eVױ1P{x*u],m&Ul5E ͣw7,(L ԧ@Z7YjvXCz ), LE{ND>Tr/l>,󺻇-6=, +zX`½7)F LV=$__EcAD{B7wbRP im1Β9U5˶Nr>GA> stream x[YsN^d6>8[9*]ql>$%aER$-+lקǠ1ٕH%N\1@ϯ=/X \}bq~wGEc5<=|q(\zq|}]3i\i\I){d5E8q:ۼ0X{o8:mWg&z%ZО3,w吐-轓[_dRX8Q_tp`%2qciYxz||$ZSws= d26ˁSB/\v5TGT~yN2NZI$X%N~V3|@wb{#WV5 Q/}/9ƹs%[y {v< ãpbnqhïL|zDž' ' :+/ Krzօ^z2 )x}`||ȁ y(r`x@*kdDM֜۸D?x|K<-GemS^0&8{+z+7l3g)靖~ KP\{t3PH~kK~-RC u$I$A-88&jDՠ4?hT@0ԶE'}6Zm'=qd6/|eMѽrF|{D=GVT>q3*nƭ߸A@ѸAqEraYM4Y^([:t]ɓ "0CM1Ƥ 4b;evEy N. ";+컀wHxj׈}]X! [i<*0 EF}#SsNjQ\ gݺ=r m%\ n=| )2^n@5^v8zA0 Sf2]Dloe\}%Mu5j7pn}N%4ϔM 5MFPˉk81?O_EyLz,crzJ:sWdN.N;AH"`~?#dH (KW*Crdْy A7QP۽7b$Q$rcC uqk @rJX3C?QF:%r Mcp%2D*%g@8onB@R՚n ʩsī),?׷Ǝ+q<2n^uK\F#GFBnmUv?r`҆  kq`ktJCS0f?ĿppXPwU88⯳ԏXʓ+t;I45MʸȔYEl (g kYg&F\D PZ"ѪbA "#ǰ]F:Yy*ˡvS޻,V <8aHb6*,P(iD W\ IWcY-|ŭNJ¨i Q4nf$) MJ=u%S" DCKmƮ.b`oi/*ܺ^kW9 'yk/fXcP(;ȱ8 "}jkZo O$PvZ*/5(\IM!@i(ʀpQ\KF1uIқd6*}E2cb-P*#;ߦx  _q-Ebx27Or!LBƊz.59 <0Rab9113:a1`%p--(ָAE]^!!2,?20idhqZv‡=Gr'Ӕ8I ! ?-bIuŵ?95Cd]TΩ!&6puU$ o^R˪7Ȅ䩬z$1$&fM2u6\%p0#c!⠪D`OjQ~X|0x:N UTJȵײMU@@E2UƂ3Ѳl ţ[T$f 0PGҵ[ǦrhV,`pc!ɘ4$C^70R8-*@K!I_].Oxk]H=, 8(hCӯ7A~C@.iVu4C;vyՀU!7L,#:wIڀKL)0:L?"LXw.j\*ZݔJp:ϫ{yn70A%>IE&iqո4ƭ*Xwкm)7 QȚqfp?£׀/J߿"$Aߤȅ~q1ꃒ])0r<&Uu6 bfjG$7 $hpi{1lAiao!aMd[T£X^Jh}:#z֊=T3M<~=CZsQ5H=_LkjZ2"Pm&Aީ' N#QF:5w|g --lhԌq'[̩ZB*ɑ-|MzI绪wqduaIΫ rޖ2ܔ2<)ËbexljV5e p@ȠMx1DN>zѵǥ)aua)v_mnYO/ !)f<-ҸH\ Aʖ{z|Ypp=$؂pȘp4d?VU bU|R ({_[JTW!Ժ-Bx5]%jW?Һ4* F)0FJ,T>ú]g>vU &6u @d>G{`?+1s.nN^uW/ z*uq* RD}Zih߆ <.Oex_W exVe)û2 %ĥLO!gTOːͧ񯽮n^cG:O{]yy}XN)Ԉw>e{]@Lp%Ut)BR~Ql愛2`jz9~7Aendstream endobj 663 0 obj << /Filter /FlateDecode /Length 4666 >> stream x\[s~g-`+Ov'J8v*sJvV$Ewe-)[n` )ɴs*Fї[^\Tju~8$\>>!BWEGcF~QӵUpƆǦڅ~Z> 6kr^vguF(Tw>huj5E* ewe+'L 6FUק>k;;艐yuFq17CFwTE|iNҝa]̆=-7Z?t O d5 [É{Dl #˧_OH<c=9>=\KŬ6p>1:cb'K* _Q"NܗMiUܖEi* z rljnK4wSQ8)/͛<-7$poДLҼ(]i)Qt4*ӼJ}_7yZo Hk3H[L]xh*ޮ٬uP IFgWRG[ACf:6+fIwssCL13vf{)*M:"Fshn٬&Syf quOy2@P?Є]AZU`EIhZHTa\mFoAh+.wB{e⽇ԫ#%${\$C`@Od1lL?D)<2HN<!ܭ䔁< JsWe=4Q8rWwJ3zH.:!R7 W0px:;H!+"Gtƪ!Y4OLhhUtI ~Pss]@h8t;)f$mt42PXU7j{ZYj3^?_-%D6%keS "͕2 3su> ۴)E(ڐsu+>F0偐,I'>几pߧOLjlyNhGuYUzé`ۗ%7}I;\&rp`]Xhr(( 乢z]ItgW@2k3bwJ=VZ%Du('<k%: q{|îK!fdch2b[z7aū&b0^77MbmF*:vai2AeL<+oKusGf/,ma2Q/AS8d2˟՞N 6i& kl܎b4?a ;mgl>-MY6{Ei>(Kz`uM^fniW/bųuKCodB8DEX ٸݤi8QG{ۏ.ZoVqcUv>3= $LkRr zm-   8?" `,ePH(2)03M"ZRԦe&؍*;ap?+%uiB+S/,N`ڐE4Rb$ MF;=hOIG߬Gh`̖hjZhw|׸FV)&).5͠Xº0vNFsRQ@e)9ž.1<+ؘY:AL'`^oJIZwx# t͈`D $Eb<=]HFi% N e^mC7 )oy4}_nv}H"qc(Շgky08xX{Wh۱Pd }=̇WCumK|3AeMp"&9ˆA2K҄T:a⹋m |[5bGk jxs; :&U& 2NfB+3g UM&DD]HLi#ND!FbJѓF:4]ҳ|ԘH@6ͬ֏ţm*EeiNidwi;a X-e,`q%o67X+1F-D6jhv*ǥEi.,73H6J4 NdCZ.`EfV-&1Sڷb[KQxVgWϻpOwH;ms xdO[I:\znU)9iW~ąI~Xp[A{$ߚ=&ֆҎia4M߶cytrbaz]/+wfA+ 9ӂZfxi&mEF 6F:>e]$C ˨RN^yJ:$I7%rŖ,O&0{,f!Z+!FgL{,PQ$4C'ei.wHVkg@dIqY 4I8<L0lRJ>=M2K)38BPm4\7-4M(@ `&k`S`™p% x@H:FKrN1CiRn7&!Vޛ3Cj" ϓ&0durY`2;2%n:[SSh0 xƖQש3YВD Zve#߬Ƹaf][Gl zoY\XMHLq!pRyFȀІx#@_PagB@ Tq]ޘQ>+4uěCds - n:ъmͩ!ɟ +bsCVeBmX$Ja4/SޫoiCPcHopgB%}>)cܣo+3e~p &̈]r̨CJ' tM Zm<ȁ)YJؽMGA/%E0<YYh2t }Nʂ?t*F E `GzS_;2r\N64/l'Zj2QFoSBo? RsOT񛂁馐ü`FuܝX\~B3*oGy`9TnmgAڤ>(=|~ßx [.ݠFc!*Q\bq*LʸRw;}n 6*yS3`~jSN (ҹ?=PW=D˨6 *:KrXȸKV #V$eWS`$׳Un6Gr m~d`Cq:j9{r-vYco p@ry7k H3_pZJףr=ٜendstream endobj 664 0 obj << /Filter /FlateDecode /Length 3714 >> stream x[Y~_G <$À8>vk igWקdw{8{ vDЃJ-X$8g3{z)ldL=+\xGlx/O g>j;ۿtVW0cpUw9_^Ws\ZRݭ0~;kчUw 3D1-ϐA[׭1:{r1ɊN-6(RX+A#dht_GĴ!n_mJJ#:m) &Q F Up2iVGXm h6X=)CCۃf<4u6נ m\Hc HG牄GH~ S"D [^NQJWOI2Dy8_X{!\T+"wmm~ljH !mMD6Yw"rӜneR_ܱ a"<"-|C9upN+zd]Ѵæ;7^7%cϫpeohlǤAp_>_y2,f]D._dӾH.M)<`<2 zyDsɔ^un~`\D[]y[=3شs\߯M/;$}/Qj7Z/qoZP7U[7 j,] (Yo|~p]\d,"&k_}c?'OQ! 'oF$|m8)c.8XPK882xt:%=,pi˟EVe%ăh^Ac  $>`AzehVa< 食FʀgPp`V ٞAYTL;Oy$m&@,KT yNAH^=7Y wYh#&k8vO@a>N@ BнcC)etJ[?"}UX N3EJ@AQtn0hQLIɝ}.{Ǵ TjF8^k C^ ZAqep!ȤI:ae2ޥ-VQ$` |EXI>XX+[mFfF~ܯ d%urj>T(/[N 槸^͹_mm2xyQg b{R9X4n"ozS$]owiSsS<&Tuũ+1 7v{4:diHE=LlxuF;cv(Ǵȴ3roìZcw$ 6>bA֞amѱ%漪EUւA )xT -,ܽںf`z"B8HL|kҊ},;97+##SRvؓU_2PXL]OŜ%qc7(0zQbF\Iw\v_']lNg(H. yU,|FŀKsW  \ a Rri^/'zn,:LjUǀKf SrT~`<xap&h\ykҺNP|b`#28q<>@ٸ첮|,YAv;6s\hR?]sUF%q/ͨQHEpbpe4>wkZ R^,UiGQ;\yzŃR,L=Yb+!b心NMU?jbeAe*+!VJ¨~3§BJRIq҈g$TL0koY.%fpI@pX`[qx>gj kBЪ P>wicO8mQq;I/86ub:(-m]7v79lK0޾-sN.ً3R|AS8ψd#ڽ"m2{N(udjm)ȅ3^ZdmoVaդæ>kf3y3@}E&Z?>#>s{CIu%=F%ټ ݝm!|pb?8M}_ѩCĴ'!չu 5r!Y7&(Y۬Z4;1Yju Prniv5X, \䙅#MF;#@[>GDZ+!tO68=VkJև7Q⣇ 2g"f0 %'\:2&eҺ 3>!,9&D\5E{rICgD~R{ǖwG7ogSK֖lr-R?xL4=e %?zVݶٻ x*<Te_s Hf7S0I .=6 ڐɣqc3 Vg6X!=WYd&cVjXma%Ca;"o.2k ~Ҩ2.v1)X*c9S7T=Q;RW&Ush/J_id +Te3]ĉnh.'<FC *P-Yvyib_\TI,!lXq0_(`4_e o*?Bth8$AୄlHZSVCBc)Txړ --8 \%OMv={p6g4]7#sKeAYjCM X|ϝ}2 71p6m0JgL{ H:BTbړq#mO( >3_FRbhRQv/|@'36endstream endobj 665 0 obj << /Filter /FlateDecode /Length 6950 >> stream x=˒$q:Of|sN&A6-۔)r}pPw^;ٞ]jL gX4 H$ J r%WgV*>ݙWV~C!(WOr%'ξ_ ذ)vaŰj;6k֯7['\_nb1o4BE7cv}NXzwm 0dέ 1J?.p$tW!u]4-pB !ꔍ|7*8򟛠^^_|ugt7a uG rقep ['-1?/7fW+S'Bd9hm,ɓ'ω"5F9{X8FgL\fkS"4>p|[l8`D^I9Dk˰zogO%*;6}c4LЀqm#rnd!dD%pVpl5B(g#d0Ί@j##'4r!QvݓW(dJo҄50;'(8X :`nF%xK Rd_ @? =ΆkEJ|!J72 ]L:$t L_aǭzQo5Q( ڦѵc,gCF IWu>r \I܉2$ohN ` YH_ʃ01 9־Hhڬ_\U`'Ah„]O UImSL," Tgve\ԃF#g]4oJ4ߗץ4! 9d]) 4tmaug0^![`$kJ+SEruh 2R{J)WLT+m^>4KMi^tgxG <5($,FH|b-]*h514ea35^6$5y6/]iޗ{SҼ~lN4p9appŦ9J+LBD<ߌBYBSi1դT"nEߖ9%+q^-ӂ-03׆9Qȓ[ts0H[x7 ˾iiR2\Q2p4:$|a]!jҪPif'`ae͌g cӐi< Jr5H0H}M iHLt06χucPهHh?, ZSN?qPN1MMYk<]-'ל]qE*ꎺD'A&@HF$< 8$m`gi,X5 5gu5Ci;>K˛DUvlt]Oe:Y&=Th@2")!( jr~L@^x/bwe -%l,5ɻ5 nQ!-ɿ$8&ձI$V+"[baG㓳ߟ]9j4tRH<+ JT%IEr3ii!AQiDX^WT~4`DڕybnGXuXV{>NkތGD)~`i/n7\m2-c,Y_(GHfaӁ .RٞH#G3@ndPRІC l0LR8~)ub/%<`%OJ-Djյ$Xq c2si$MO$;)u[ vzaZ䚅u163| =W*;/5bK/2ap_&7a0h,w[m\ol76z7qw<@ V('mf"# jHM,ML*" PܿK:9AnjS1WnQpQ|8ޠ[;9zz|#00Yc i^r }8I!FM{I#fb 4IZ 0k2N'!N\!rX0r(QeS'o6-nŜQOrPB|4bZoladLq;/H}[hbF:" '0\tes%V@_BA/:fm"eXYa%ϋ ͏LtH0 쨯iR9- !NM|Aؠ²iI uWL8y#CN%)֓ct|{g:ރa*2wLM!dy:CP:7$C>I _r[K|[جE^2Qݵ/!Y:$9*va:.w8P\7|"n/5&.Z9: (py8܂VIrHNBh8wo6вT,(HDG1Ybo_̇4 >/Ev7eZ ̻*bU1 S/0Γ3%S4J}i~] /Js_loyjA!'39s%GTHN6}UvhQdɋ ͷSqM:6 ()ӹFjh2& !SbNh :6&J?21׃6833q2MwIn "{&>!HӋ(y^(fE:(XbIa8/[,uWI2D:S-nfkk0=)ڰ1˘:͘sF&88Hދpv>R"!؀ЇWM':/&ClGLB߳r3r BP i(+jesҼ赢' @.=W;dPi@ ɠV$KZOePxb7l ̢yD^xvem9Rt?:OY:^ ,} t>f+zc!-j"(6,/`E\Xchd2ej1#%Rת9dF|TT8fu_i%e@E,8bV5Н/T%p[hISXD-hG,ƛֆKkSq,bkcjZ%"\^JNo֭S6w+N4ahkJXn :1eYYz&J_`4`M=Ó']^y rl6{[ЂqŒ 1Y)y a*ٍ8mJ:";q9`-a"u(`uxQ֌1~f9 "ŚT/ aiEOM0gֵax'J1[^jU}p*V5<`]9U\\pTC9UT , JWu]B$e`0qIYعϷ*MɄ\Fx`Dr?56ǖ8[һ^'M= S9BQ}a}(fJQJ53 FR( 7WgGzyyrXMcne$'jP!*It`ߟtQ~ȩ8TL[=̀gbfy8E5N[Eҿ z*L+`Q V]jd Oap4`4U,@ ʊ*))P;v͜ąCp(2h[d1h?P F8s^F7HCKTS*i!]\Tr|汜ѦMdxfݱ '$xunUEY.%'fL~c}" ,ˑ\".ٖYoInm%[:0/xrU|`%F![m7['4]J:I Y#x'Ri6ێѦ㑰z92?4˅ضcCtOPB=/ñʸSug"f OAZ "Rw`Lv _@ZX0a\ 4؊x*?$ҪvcU,6f4od:OYK8Dn-j6e‡X&\$tbM]EA~9  Np朆 JSE֚^YYOU{aR+#`4:=ϋ$ Bp>e% 1*9E 5\ #`ja`?|p845.ʝ|7CMDAGƧ[ #񡔶)@u/Y`'# f+V~d-fd˲f1r0YovW+ě9A癪s_O/[iVE Ѝwx7qש%^ըM^ZA ã `bP:zK/{q‘M]t)z k#ȌSYi2cpOo؀0v wJ3LX"S?.\f^P^-`dϱ^ɖKܖEo ݭ=(rd̠c|,n36MJUo$eC|MosFuVkj4vt@9ٷGK%ZΒ1t H>*i *`F~{~HڊTnBƏM AUHoRɗh[^. 6)Ugr H^w9mBLX6ٝ̇S DaC]MXjEk)ģ+X =#uTO@1Vcz~lTc` ;T]?ehbUo#L)-ªyEYCh=IG_^woSoW0W3/cw:瑯QKU12hT %O/<XY}0 |:'eӖ[^hKkÌq1b2[Aʸ_N7+wo ;Lf;;»>0fVq%?4T]Kb}/*u\&| svO ;(Ր>N`VX0 >4l5h4'c`ש,&6"K,]Mu $0J-?l->f*a}S,d)ZK_dcTiJӚK¡8|Dy-1TE7 uj戢NxxAitAY>m,`4ً ޻Ji{u1gn5lZրo=ƘI6Ѷ oTR"k^n(&U ;-}-#.tߕ"1]u*DZw,OkZx}3zjIXfF2<{U BL/C4PS=zl ,h/+&*3 Śi>k?2>3ì *v]lS|_7 R/iTD^v9vu8" S}Kϧ[9@dSc gLLӛUeB> stream x[[o+~79=D_@v" 3+5nO&SnNy 7!{{%PB`Phi@f{  ){$SeCW@5L hn 2jCq&0=^ogw_ܘHJ,h"Hn/00 G0 :\00SI@3SAlL-jeq1X b24ӲtPr/ J7^FQƀ Ff1131[gv(vvXd3ߙqVM7N9-~S&cwNa_1))q )5oQ)T \ ZĨsB-ߡyd; Eyv5'YbCh],sPI!qx3f&QRFb‷q8骹?I61@UOr g!F#is\0B3:%(9 gU1m?IP]uRZ7&N !F)qa!lu0`cYMw]i7{\|7#ty1;!mKd9xgڹ\fczO7Ҳ4Ks E>~gxn6,߁ԫ\df%~H2P/ %v@gCiwh-a|M4JsY7 ]EiJI]7g[E,U,8Og$cͳ"2iǶhUSIwM^qs߻B} [H"D@K/7ShNqCﻦ%0yߤl@x &DG=#eS!=L%P+=HbC"Ы'v5M:\*Q"CS%ovlS=NʮM{w(/Bql8T2Ps/M]RMS74)Ͷ\=iI&ٿ6BbVMy:w0  ~j.fIB6"y]քPb!'$$FJG t=˟N@]mk< SVD}X>\o!p$~xN-%GhixvW007OCjy._oW0j}6G G|קr H#V5ts*_TNofwڊu V c|K,8gJlp .  8 <C.!gSF1  eLXAe ɧ,Cn/ B,AuVXq L]]TV']bSu0. qʗ, Rւ k#FOep5*a܅$ ⌖eIJNeho(TaI&n`T! B2s"ހTd>B( 0 zدw,'02lȚDVF\C4(ToU-EIF9퉓#DٓwUrC&`n p*CUN\gD2U7M0-SZuuZ(SMt-Ќ sCdU\N,$?x>3ۃ1_ipPHt+KiͪvJ5 |hIK'f7DžL^;.ez-aj+rMV*$0V/ŵ0*eÞK%NΝDёa#{ `uMs_;T˻ͮ氍T_H*)w3=1 f'+ccb l4`Pé]u4ⴛr%֫Cp%~< D@-΁U.i&2ǦbU Ӕ/+sÝ|hb,dpL1ԞF>,G;9ZUԢ/q=5G82 XuBP0ub x; VT R[ژRtn4LlC/Ԕd$j3paXr9^^ӳWUZtnбjs ߄W!xFwmKz&5I=@yS5U˙Tw"M~P+bޡ7pp-skb2EXq z ^3ke*DBȱ k3וђh,xmQo.ILsܱ =/WLy;X\0|Jv =FH[7SI!^*(mq:1j)4xhcx1YiDR#1t;JȔ^d*OYSf)rC[i^K^-HTo%]bsM?O9F)wׯoyjv#pO&n /1:=ײ1Aq"nNd8tkCgzGXaE'| :p1u 8s$fǬ+C!I y\냻wJcipG{m;b\K}@y m Ε*++92{ND$ia:Cx4 Iƃf=P[\h̬3?3x4ZdRS캄X`՝1hJ2MLI\aRL㉓98> gd8\_ og?.rEOޫx63z&\1]$iImLT9"/D5^#d'CpG*,ꜲLPa$~8`u Ϊ%7͡K}g._]@*u7qMV 6c1U1瘭"["cz%U.2ִ*r6dE0a"F\)TKNmD7|G87H>u/J TJ7;!Y2"U]J^{(n>su QT[R݃Nv> stream x\Io[[.oz_,̢ f= "R˜0֪\J(b m<$ ll" lLWgZ5rj^@u3P_^RF39{8O%h 7 !&KcP-NN*r@`.צlA+: d*LTr[s+IAT?cg'eRfq B*JU%@>d݁dE!I:XdfgLB A̻~ATUֳN0jŒQjyiPO %G_uLbLsM& ,kVQLxj(6a8B.9)l O\Is"|tx&S)J9G0ԞEX= |2A%'&(]: NB$~NF[V#/ E:eɓuHT)e sQt`tSK6bУ?hYڢluCuisH Ad"H: :JtNd"v/HZKd))BZE#jUt4=AddP@]儲*xzﳎF;UeDpG Lk? \Liԛw;P,Rkҳ2U]\t nt- UE(V,2X>󚜤Fz>M;Jz˦$Q$ $#IR]#ΐ$uH֗svb°)g|&iur-=DРŰpjz,Ο!HTU(NJvӁ^\'ABb#z>ȷsgn )8'PJDpʯfgbmGуN;E.] b_4(*c^ߕܔ>$nOuZtւew{̼_vPGUjǃ̎nJ+Pp82v_ӛ8? 45ikE oH it2DD^zҤT0tUM[g{=.Hh!iS4*[n!6Ei 5v=XΔOi2EuUk7-^nձۡB:VYȷ<7!ɐuAn,{=)[R )aOj H$to':͹c=UBC +VFB{c֭RP{yY,{JiC Dv~{r3YGK jWH=pM:*CpFVnVZ =L9ݽtB.J!{nYl\3x-e"Ƥ[C±?F(1XXbW-Z '38 V4%7g]D2A3WC|}GHu,QoȌD{H @syTu(SR jLUbEQnJj9Shs9S5`.WzTU6hҶ x%~ t_d9c@rv3-|d[ 4mA+/>w׸|p y{>y[|Vk1c ѯ'x8<y̶cG>wofcNgd=]tk,q56>1suzϳ1}`;:xcs|c!vKYx?T$CE}x?,֒PYސ[U9tO$ MXv;$i}lH~d8k-H=xw+o9.ϫm+/2ς9N. ׯ|aɟOi.9a~$M#ɻEy &׻ozgoSYb" eS" o,/$&H {:٫p$Y^^)]XBݷ#T39 o,"Iʼd_iz4e  \SC?HE?x:)D.X) #V{_fTtfxª㪯ZtQl gy;ȯsMo\ ?ё̓> stream x[[o\.7!?o݇=[dOy8H 18 A=.v_3c $ %-֓\lwC%ރ2e]87Utө{WQ(Vڎ`wH8H"=HęA."{1:"rMIHjATW/wI˕Uz4UoD%x 4".VAڟpjJQh C"2te+[MWaj?*6 11y;)D4@QzM]GC+ nM6ܒf(H]2qacpV7{kI+%lMdEvxf$}nD>;c\0s''ر!t!<'{"!rMe;UM|'F VuPs} NdVr  A6=Z WIdPLrzsb6xeXvih{e_j׏v^8G4?MK\` R-=Gg-[ 3 lъSk(cuTیS2f R\PhI>,@FMTAɰ*uCiWA(\fDO|1 Qü4iۍ5&GNjQ8MݿB\3_;<aOQ_;(1=R`R=fK 4ˉְ,I{ H @ *U Þf= /A8P!}@;SfL vtP:Wc#KoLԨgd.P( o㈂CTC %aFC 3%8+"&@E %Ej5O|ēW%⊘U nNCCOXcr [T2g&]H&w*FE['͂Kػt e,s}؋wKuNֈyX]Ru5XmiP\"`/X"{Id*S:S9JЀFYA[7wpWIDF\J ,"~igsU1/\?A"#j6»&`)>,`es#`1\*F*[!H]2M|h4Av8TڌE>GGNh2^p R5 N\&7p>`tj͵.l 5o}g][ۉ3iPurdC6y&lB cߌ1&V,ȉ p:exfFLr}]2Yޕ6b%1. B'<|Vx@R"&k/9xM/oQZT!$rՐ@iH͆TF5;e; 0K@$m#&Byk\c#6a7ׯ&ǵ.'4&sG SdۂۑP,J,2jXC*V.!.__e#K 'a}) ۈViJ5~=r,K(t F`,˂u;,SAmvqy>kO+ R'}'LH&8qN{stJG8Éa qjCK$㣉ψ??1q&A?hstdv oN#f- Ue#tŮn J"]]tɗ:G;Ŵ}GD^yN D~CniwnyW}A[#v;"ٵҷDn}"Uw܋."}A :6q}"_ 1#*nTJ-ڛWVY\"d6Ntm@]_VvH*`4=)ZV,)k[[&"&2YB?qFdXM LJ]Yo¾'3P+3wg.Z&ywٮ_{7}9l]W~vCSu:ެ}Cˈ`]hB D&#GDqk}_nAYE{pK?#=13l3`6 sJm_%λmmуLYmZgg]viYgs1+9&I`3zFS۷iG\3%t`.S46RӮq8'{ޛOp5$7ʪY٧8d쒎HM;ꎰ21 MZS\o#ѹqloɿ$JwU< jV4afE cg]C`KHd쒬VS0? v's`G&7G;TTV25:E*sH $T UZFUs!K  #(Vmܱc]0#e1e='I:! J=^A+m)ŗ,H]ZmӝVQ g t4<1hLrrtI_` f^ݗ n `]ƝxڰdQGY!w_/kVnǍ_|,+Plfchyܡ^K$;^Nٙ8{2R z9N1tجȚȩlt E8:`u{bWֺ+I7Ym0+OR`Ɵ䋁![a+1N`g9UZR%?jWJd9Sk\rE26] 2Ǩ >EHɧ\q@\p fLӿ}A̔3aF:)НBФATSrˬ$VqLt[LtX8=}78`+MPL*=l9T%@iM<#R%`X Vl*窻p*¬x #K#SS'#F K [(D[!1?.+g>(Y1#UK*; !eM3hnŷ{^LIGU![ \tM~O7ۮ=n&eP߮2SgU*/O6p;~VR7 f( 4;6KՏ6Oa㖇 X|:;ke&o^2|oOGXSjo:ƴ`U*MdvA=YߡJhߟˋy 6\5v4Ysoa[knqg'di!D^jc|ܔt:ױaakіfg3N.A2doB'iqV谐}(y{v1s[&*o+9 m1[`lj@0$2\SA =fM^.(`¤t2V,2YlaXKJNׅ)Y.l <<[bzy=6o_N)VX}JS,:duIUJz~|39tS1 Bpǁ{6HE2ܼendstream endobj 669 0 obj << /Filter /FlateDecode /Length 8052 >> stream x]Yq3-l=m b> Xl,i cKNs9R?YUY]CR}`oMVq|qj >y{.|@ӟ/?O^^6 Ye}e})8d/|חj0)8vʆd[k3xt1^]Mzr~Zǜ=Ay V)Mo3$wYqxT!9FJ_/qnwu-2JTs gacsJa' 9Yzz~a\Kn_Lഫӈ9~A^'=.aOs} 3wlrrq4/)?eKUmw Q4bc@'>9{Khhw^RKY3ŤKs6JXsM.DS|R~wULֻpύ2I;HcqZ+yw(d L_ @bn;Vxu/1)^4=$SǺAUÕ*QRQ9ܾ!GEa{70VΦRHv|Ɋ| ьCnA1,lal~W;j{쵇'c痋)D AwX=>pvPihvN7Xn[/UpMa^^f[^d;xIq<\:տi 6۶WeA'ڈrR=d xL6<} FBɸ<=lS 1ig0O:5{" /ލ% y|-K.C2Sκxk>kӷKzEwv2;riu.-Wi)\? Dx㜅Y紽p9Y ]N`کtaHGh @q:2,uי/{n9m,6"31wTL@)Iry~BZo,}PV%G\rˑ9rm rE 4!pYIs)Bugvt ޛNC; }8=x[ؤοqkՆ`aʿHGh5ޟ| Ϯ’V% Ƽ!3M/&>NGN`5A54tMɠ!TM=z`hZoHU{3!d-絉 C#@g(j F$#pBM- K& @!`4VR_'^M"c U$mkK]߁ȍ qX]h W`13L`}_,TL9ؘm>ޔ=I)ʞMk$ZMXC^OF^r~>-1CyӻOc֘(2QHEx0){Ls@:UΆ0dv-Θ~A$Ul҄01i:GmG(*[G Y]aTߌ rap톴[uֺ쟇Vjo ]pR=$HJ1ZyA0 >zvF6Ca?+ P|ڞJ"-I e9m`-U >-tZy _]&&p?H桐d\2la2!L4jxnX d30Ku){WQ2G(6"Ȁ(?a"Z4E lŷ$>q \/^V8=y@&@$0Pwxvq҂~}^)$n鼀]?*&,]b+GEG,k[VКg~>JgnhsdžBDvg$ՙ١E 6 cNh90Kx`}r .`دU)X bbd3` \ϲI3sS̤Rx$\1=52 ؼp{L\gk9;&qB4`](Kq( bj.â}a yL;b'T.'j|iZƫ$b_|:]|@:|s=: qϋv%7Ŋ 뎩LCFdo-aEI9㢤 V`Alr#B)h>\@`#)]#z5)4jiω$ElZ)IEs`}ir J H0&re2CCprT?6g7 zM[yu4J {FM֙;8Kk{9T.4[Ml?N ylCFv]I b+)G#2n GX~Hv"PF8ĈA]*c\fW94MGX/7 )Ll'Rs!H( }@ p0nw<&])`ؼu-*[Yi0 'G-I׊bnPEatM.Pn,c`{!޹o&^lVdsʯׅB:+(7/ 8z"7YX:2c>^ȺiH=i'6-4dh Eo]3: Ey'WxV%al$\s𶌎2z ѼxU/ m3#zyPa&XrUgLp7(E<xR9\z$SqY(IRAٞapz2$=&ܴ.HAO ⼥zyZRY&"mAsrx)c_CjmAcj 1gi N]YW"kju&s"1q%䱵(ߪ"m4ِ$pŁ>dqHy'2Mɦ,ُkZy8jByQΰGL6ZfED4Mɋ3M=YyI## ! G,N+\uŵ=.6 pT0phi1IyJNgtS5pZEC۵g ّW'-7^3EWnCSqh Ö} DupL)MlC(`uՊ6SJ/mP+\(&11+xW U|@ne.R1FnpmJ3e⡊q2tU<^ sQQFp7L9y`mi_i G~&M _qWVEe:I6Y T%Z)}G0kvftKUUI4q\[#!Db)"9y!(6.gz'1݄ŵ1.{tyjO哪b s%&ݎRPcy/,B` 8] oI;@8#/e;"FGht2^xPحL玖D 9%$E_c@{6F!Ѕϡ4Qz-jTS㠢d9['݋PH 7nt@!3P\8"}KMZ("/N7MID4oc )\Ď/N?t"1<|ś0j}!/73M3_V;vdTX<\/ $#Tg&Fb?]NΫk#19BARўU4Ht^Zܳb lshFD#^xidwY0.O lr_C\s+l:m׊1Nmj&LY\^).Q]^3YKL~5-JV`h7~Lmo<}=qIa: Xah3[/,s?26Nyܥ L:_MYߖG=| HB[ne HẖL1D' B$Dw)-LNO|B4DHxH`U`ˏڋwel1V=:!yɊy&\R>W]JK>|G7rFߣvlwoW|ƑJ2p?@f=jkq;yV>i΢l!~t rGf6iȭrt?FB1f 8 ssu00Krg Wx c֯o&13p4ǓX}`Id8+́C9p~ gŧ [g\-t֮=L*K%.63},SZ\„r} j~91hA}= $E<͝y4#S)>^#R?ĉ:#:gLy"^(/t2@lYL;k6ݩ,jKnS(Y]j-Yiנq`@Z"\jRのz$J 1 nE{F*I6.$LQȨm;1~+z[=SHNE⎍4gFO溷b-)&IEY,`zLY7FԤF|qs}yf `sDa+ (JO  l4`Rsvp) VRrqR,D}6_iDXɛE`he(1ƗYBn=y*|YC0w ܈{Y@ +boܕB'ڗSTE{?ǔXhT #KC ܖǘ7t]V&G(1 $V0gu!E"RUcsRNm-u4y NxR+ü ~G:&CޛQ@oؑ.J~\"iNVd0?їhD@.e.n3z4W`8nL =N:ּ~'YyXeV@-ф "t HBu%>DZ%?/ܮ3S{H' J*gZO.zT(ә(a8'VǍD@*ͲYKEʺ)mH :+jALeTT#UcS;;l(G ] >|o*sp;a%Rѱhun7%Qǫ>0KI0tt_gbG:z醼+{^2+9ЛRIyeiYug[kzMNȦP;$Ӈ*ІeXVKͥ{ɞOD> stream x]K7r:7WyXWvC98+G4 Dҿޙ@͇{Pm $e"rzc߫ۋϾB.ޥ\2 S'es|gԻۋN3e1^) o\>x;g=w?%9W;ᓇ_߮[o0/W;>ŹwR9>;?\WX˰ VQ^^;`ZIXcP1>{_ yK5/* S?&?߇M!,/a:Ɣtk$2NʄS]\H7@叅;ƹst@-\.q uoΘ6T:*3ipZ^Ya8B㒺NW)<:χe#IuH8,yXU4@V+-j|$g2OLƦpdxؒwc a0nLCY9jd#an rI|11 "Wng{?v>~0ʉҕ<;A: g8ql5V>F<œ3uRrIɤj#b@(R|' X%*Y ^Gkq)g*(f_AIP\"~@+Voфa{w+}:~[4=ajƸ]^iaѺxtB%؏ђor{A4K\:by$Byό6wH_6Hi9?,˕UMBY`xwj|q \%v8'ULYܭa~/< rC tL9 NׅOHv6+ԡE< ޚJML jw k(\Г06 ڗ0Sy~/.A5u%ϦdJI.I t>9&qvJWq=s~f7M~VuM>93,uvBlhm!CN'D3/k>V= :^.=ŀ+Q0=Y U(ȟPKv4m流ӄ>pb |DJ()k8c>T ]r͋t ~0,@U83̺aBFLk|t/(=jѕvk .B39o 4E +[Y,ЏɀP@J}«1,z@#Ph-; Ist%+υhB f"]úQ@T}Ջ^#oѮq -˻=(WRR c2P0٨dTrKr^7lG mX)۔7U?crQ<?7)}`{Eۢʑtge*rG}#ny.X܉~nav+7ziYjCGWJA8S bH-Ob-.8[ܬ1gQJʅQ̨D%ȯLg >_jߏ!B/go1  a奷޼k`^O6Լ0tE(` k:-捾X: O;|"u朅tΠwq҅j|dAgn _wh;&2dDx,o8?^G2?>͏%lFe11 eT h`w+j6ȥ9Z(?42)jc lGV eJ{OIr;Éo[7ze*b/;IiW Re`? !94r(nZ!g8ymYXnge&QCX/d!:$֡vy{#H!^.c^ zzQ!\-%g_`9ߨh_ԪʼnmwK!Dp}y-t"d` Pw0A9չMv\ x@^SogW*5Kv!E 4YU?s3CFEo4G+݀ g&yʆ&c˽(|`#s5qi_0N$.:_ͬnVّ@݂Iw ^>"Z(k1UI <}A7K3cv2Q4#מH+.pxggԁ/"$"(R M:Y+:#omHݠ8Hh RT&7԰,L(a?"pϝcx@rAZIu_٨a%b[ZMw"ʗyMukZ g|]Ѹ7=ؾm-ync,6-y|pB^Ԙߨ%q6P*Oe91 wthJ29%Պcq3a֙.X%hm,bU83]Pۜ 3˙™|mݖJ!>hBhP ~bCq=%+ JBXb'\x Rt>yzv@xHe++Ss6ŽtOs㓱ޅx *d K5ߥi M< f;bj)?>̏iJоȀa:$CX5 + <R=49?%If%$3Ox{)5npO?;v- |j Lh;Ϳ3m^EM:RG# ݻ")ԕ'}\\w3CxuKB77x ֍%3#-1[ j7 N QA~,u:z\?q=vx0H{:'X/Vp t=t oPVÿQ;-NÔ^^4m(NfqMC յ Vi`uI!yv;6F޸ R:; Kᝐ˻Erݷ3=CDZ1f wNN{ de.l$YH44k~9˽R\s%-bř%{$ZVTϨhk2{nK{POxk9>ubuڥq0@m>Dy2kC;  :Fٳ^Z PA\[om)@.g]Rt֬9R'݇ţ|fȲ^ڲ*F`BXBB(FxǢ%\x$Q{ {3HrsQ X:2B2tO=lcwتQ@b&ʰ`fr-x# ".|M8G0!(h\ZM[fxքbtZ6e=VkU;D2L{e\WYq_?dg <|s%L ~2M)/^bW\G(#vʋ;YꎪwG`$1&pNvLLIZpO|0v+<]X +=X`Műl8Sqx3qv 1bY7ZO,F .i"eJs991Rr̀3\Z?GD v~]FBg^XH{/h";  LBkDS-MQvŠ (΅]3 ^o1a ~0PMUnhV3s BPjqg?[EJfQ>O#K`#"J GVRhignMвYHU2Kߕ%CyM(ffa-X/;WvXrG7+a u&?jԗըoa:dOb>xs}q8}Eendstream endobj 671 0 obj << /Filter /FlateDecode /Length 3532 >> stream x\o ~0am0-ƉCg=,ҝ,/gfwJv춉&F䐳J4_?H3Q= xj:8ɲ 7QbVk5?9q[=1Ìُ|!c^^Er~1ײz~pEW)Qhd 6 m\3`e ulUN+ z֍NS޻Yh[]:%CPyP&Gt)[QG{p/ oBE߹JF x8"U)Li$Br #ecl듲wCVUE=[⛜2l#F6 ER4lC=;GSE6-Ih#'w=gCKVOՖS{#垲茺y"eO~J/˨mz6x]6rH8]ӮV,nޝ+5?_^C~ v ^"")@L 䌧c'y>ΩZK JH[&BS"zKԶ[r2$^kF+|oQ;jȮɭjwa'pA I4K:$%eXHH*xD )Y2`mT0cH>.\ɭ Ip^#4ig7wp ;KvF\g8[.[DidG==a]ҀMlȄ=kS d_ {O:zTբhteJK+SڟLQRCșU,)vA%$)'%'5 Švꇗ/2XWwacG׬O;d_w3=`Pnn>xWl3kjx/VTt 2:zyǣc^"ZucrAӳ@A5E |"Y#,`HpP:5Df6sr0u M¼ aYA 3O{E>MCzO|J뚄vaCqX[' dr7IjWx |$M |p&|#Fz^[i^gh:q#뜡&$Ajԣ.j,,k)-Ru؁ٚ "'vF#Y#пyaVrK4XaH>>38 2&^[ aYΥ^0>*m"mcw@&K؜O}5:S2"$7*.S zYTA+*T"+rK%aliX)|(3627#$o:\mr Q mWx2?Y[F{v`2xB]l~ 61R(?fK@N`/'4e!TpWAsR(ژ؏t̴ DbAڒ@^ }sȤ׸J-O.L[j_o4d`Q-h_r,W:/®R#3 uc$:Jp22 n7q̑ (u |hi@*ڽoѽb\B[H>b P>K};]6%ltaW6z^2,JPo$NEwTK`7 tY]N1wqs,H H +DIǍ@I2*7ЙV-0_!4{8HV =$I$MY/X7|$QZ>.-jپ!ۗn\E?/o/ډ} ћK-;n_l^QI(6K<9*?yɳ?mϒd-<8ٳw[6Iǁw'x KO6*Y;MbJ<,ہ\ژKBP#=+iT02jЈ 6"pt 8H^lbͻBc8 M7p+X4 =o7].qF 5I1hL7X/=r^W|P.sdN!ruCgd|H| bd'|a\zkh`Awr=C.|od|U5uWUZSAuB\>*- \tۙ 6:]ͼB|ܽ '' SPt0EJ܇BG9~e珦aئOWG[/p/tLebj?^[p ;EZdOV3B΃X]aȑ.'M]%-腂Q2* w1.B4"&"@]Czŵoendstream endobj 672 0 obj << /Filter /FlateDecode /Length 4871 >> stream x]sG7oU=-3Se' PuXْlzggzfNNR@CV3=uPP_'|)mbn«>xU-ǜ,Ww`].컄x`x<-@ep yi"Bm"|ʐ xUn}SQ.#z6zq ,vEKcR;!8cB* 1DQBg\Em.)Isp`^Fzo w/m7׎Mo5:e婙>).Q@6/S#q޴M]4 Cw$NH#M$ySr6x h N1:K-KGiksCocؓ6_x6z 2oI ouݣ⓸<6# ~ EgD#ߑcZ"s"W5EN8w}iV%75Ѐ|X"4Ȏçp;o8foe@f{D 9rВ8+FFFsF+CA;2X8Y,$)!QG3z`@BmAwָ" w 0,Kh4 wtć 2D~l!Ls3qJ_T[Avc@rYfGgqIgq ¿ !7z2Wt/ ?XMCH1fKd & G(K`uo8igf ]\@?@&!t_k$#Dbrs\z izǔ`PڦµYALPYbcff!FUe6;TFdpo}U*&,1ԭld*͕ڀXjs4z,Ƥ^o .|u,>t[:vQsX '/,qFP`ZU[(\-6+TDYVᤊ\ oݵSx1*O"?/|$q)+x37 f+Ff6i֬P7G)ہXAFn=BɴjM+ޥ.J.#6|xH2)mkthVBCKCR(Ef dMRX&-Cd\rI&IYn[,5ޯi;'|1j@KQm%dl}V>fe,57j9$4<ɇ2P1O-S%fuPǎi-Y\ȂgoPJS~F1s;Tqȣ )843C %0qXv6!ognh|@%yaF =uׂ{-M]5ezFd39se ;ܜR!dO: "/郋Zoڣ gO U<*_]Y6mu|Զw9q0|.m4*[)yt5 M-xpM=du?ų}5`@ z #VH ZL~qp(`3JgO3f[8lY;&.y' ^*Ҷ'FZmQg儝6cҵmFV/m!mg1,b4wU/_57"#+HGy Mm~)'|6󥜽8Cd'#ӧ⡀?r¾_}5`On 9 ;훛&=6w4UM2.&qMsmY{4Z6D"NL.J Ei&.J3ZDgO}?ÿ|r'endstream endobj 673 0 obj << /Filter /FlateDecode /Length 4285 >> stream x\Yo~00h'}N vd8| @Ê(Y.ER}gg{)Q!1BklzPkcWjqz@ӟ?GiJ>z"gußplz"|qx|;_TorN)t˥m2!wOVZ;m0ؽƎV)G=/T9;ݭZ6'CA:|ĆlD{ l\:Ns>Zdp}oNOpAϮdr1?cNwW0U>O Kk:D0వIWR.`"O&O%| HJu ?GzX'#7#ޓ:%9e]}8we:ON_Y{ JO!__+olT^(:$&r5s/'+34C&?fHҸk&7L7'\6'mNVwe7beR,e1gLn|k0@<ʆZyS |MgXMW&"0dskhL_P~X7`&<9 z;X3`(E$T36%ig,Kal?2: 9&YW/Q(&T)3m`J"!2U|Hekv)2tP&z,!z{1A½'\wYDׅSqÑ/'TE"*8.'$,~ȴHc a@}H$ gA݇L?L>r6 1--Vrxv,؞A2eɇK 8~8sm;JYC;Rie]Q;ѾKu:Zs h#ɶ"͆Hd=K y ^*)~bT@11~f9` ;OY<` x)#&7Es(@ܟvL'ɗR& Σ[Y+A|NA L^1 ;k&_7WSj#g#uĘn4c^ں%l['e$)Iy}&Ťk?Fҟȏں,\RC^unAw -N7ZLtBEVy[~agߧ`C>YגFxWe6cH9xšڞ&&!aT`-&HL RϑGt,Oy_jG" G )-|j" q\-5]` X-L ξqc;uvJ2I`ɷy&UT:.C6X,#賭hU6ZQ2TM"*9*R<%NBLLA;xd̀^LM EQ:pLv-!yaBe ~ ZFg;j^k/x5;Wv 5V ǃ̑ aD!T Op7qX]t\Քt _u'jw3@yӛk{8$9Kك ܧM^{n(LQ[:bkۖQ4DVU(}3ܿߴ&f+FϚIs&yL$ _3)4D.$XQqZ},p_z yr-qGJ[`1(aIKH?^kG0:kIPɷ8l#+TX"a;(7j@*!Br-_^.FfNep'BaOB;;`$yX5jEڐW_Q`}eZ pNhsX'ӫ~)H!lUdZ,n:[3耤NϔDF{ٛf 1Th{L , jb.cVͦ"E|=K&¯[بPy;"NɸwsuW|3T(tsSi)CՃ^䓑mƁ=Y aݺ^Z4a}A{&g\!{\xu~ѻ2 z< XE@xoٽhB =5AyzCwP |Njx)Vsqrxt!usvp QM]_;C.j9V|3Lp0b7{Ƒa9FEX+qΫ!+ g2,x}z=(L!6l-& 8Ѐt#cAc_>$XА_Fo_BU^Lyp8z۝@E#Ƣ ­0=Z +i 3yhm X(nTf'S_oD ( /~Kik_,1 Hv߀spOW0g |GXcD)PD܄l _wLVbׅ pZuͷ`kWgZT3l ;pYWe{C'4tx|xCQʂ*ZE 6{*#UQ,9j<:+"7 fMkK>Ư]&gv~.,孙EbŏDWheM>0Yu_6Dhw֚13_KIr(f@@>e(GUB<=j* tmb(Bq1?\쮞G~_1eWgz'y%hn7󪰍eeW^nfJ-"H$ϗe<g;ﮘr@SݛA" ՒN_ϸ9p\?ًl.>0,VLq5Wvx$XIUr&Rnd-YX)4Ѫ wy<3\ݬL=D N1t6[vL_{2KDF$UO?OSV^}ƕ#,qi^3T } ٶoM)ENj uuYȧۓF NEU}2[Q endstream endobj 674 0 obj << /Filter /FlateDecode /Length 3549 >> stream x[[s\&V /H X*%xX$fI1>3sJʄht}1 {p:WR?yt`X C}Qb9F~B*8cC#)v_,ֽ{FzCwX*;ewX^h qmwNX|ZL9y>Ht [°{y'hpB =PduFz8Yء ^Ǡu:OdBy83s"1. p1|G GalxXi +$2{N۝_.FpZXK{Hf䩍h{m,vli/s8ߡM0Z`+W0jUʦ"FbS,QQ:L`‚:I8%ږl|bMW7i88%Q㔡 8=fhA[}H @`|x4|˴piCGHu+wB'jKbt%b胧SΪY?%P 3DOItEh(17hF4€9*5#s12{d6,SRㆉQ8ڧ_Mi.&&M0'u qYD`yO3D<:UdXո A>DO*bѕAw+43de H+ mN7fL2_'ѣ4SO]ZB YKV /™$yepp%3@MhiJ閸J  > YodWYD\u'!۴3.qh'K>e0S!G.,56^I 1{gOi&x>SMi]Lⰴ67&t  Y$!: <  :R&Ip^ NN`ElnYL&Bym+$;z-qg5VuU *Z`]YS:cb78\9hrD*y]J~\c5ᰒUq%UJTMcP_ϫ¾cJǵoT ܻAowI5nA%+yVxB}6n:1;z bUFYoىʠddh5%L,bAeĚI}ec:%$@P "bP0z X/=bpϰ^їyMzي=tQlnmJj&#IEwLtp_IWI,[+*+Iv h\"1 Ki=r NrmHgOI*6%epZ4#7C.Ypy\ rq)r Di²s]@`?bJ>dg,xc]>8o E$[kعv=k' JZvן<&>[E^#; i2#Hقg94+H}h!1prmK)ۤv`_&HKvL~Rj9yrGI,T1۰5\WS6tN gZ۞+G!-Gj%53~R+S-604m m,K%J:e֘yܦ}M["Zw'{a5[l˯ou|4ȂXn;OE%i]nLANs Ԧu}zCvJ3+۴幰}}XCFۑG_qT"fmc}մ)-pnK}Րh!<_CkS#.5~z:mL7}/U+Z:6{W/5mGa NuJ(Wm7( 1QӰ\_۾"]a_T+_Η cnc ~F-i/ EuIy)9ϲ;]@nC) {MC5y_;N֧f7?7ދۍfSx=E)}Q¿c\nW.8C >[\ŗ:ln٩3)FpV{ Bc+ݣve bG<zGa#%ћbQAkkU9sn=G 'CWشq/)ldsS%ҷK;7/\0=i/ GAWYTc.^Yz]Dл mܫv7VN^ԺPo3B+Ic*>z x~ݩL~#:eXY;1"eH'ug31ˁg ُbi:$}\G|6lOHSHEoPZ?4,_*IˉW,<`mۂLxŽKvصihz=#U'+yT.Ԑ_ֿ_UJ>GfoXgf?w-YKmkbap?= {׮oX'C -[enE:6M[$ˌoa\p-͂suLGt^m?{tξ*!endstream endobj 675 0 obj << /Filter /FlateDecode /Length 2784 >> stream x[Ks oGl|~?TIŮrJ:XŜa/2I=g%%G _梕st\Z*5?엙=:(xjr~t6+yso}]G?%n+nZc<|qt:{\-U1B:(ZRlay?,hC/D1٬.,NtY?Uk h֩锍d\k2T;_'FM|4F>?mnaF-l]QlPD]6)Ұ ͫ,NcNʃN*y*L*#L|ALZFg=8'˺!ew]47^={Kmb!(ڶU1ycsXJa|+d#pER"h@Srinp{"ϖɈ$aPH~ͣ-eHNQJP.5"4_;&k$oITH [$/|ea}FB԰*vlVwvZ\!y$dH82p}zYmy-6Hn(p [6^7MLaH]AA:v+ra[ܠ{WH^Ҭl(Iǰ& +i@)*֑%#;M>Ա,C.pd%+#'Vdɝ }G\ p@ȉ! dWsGtAZ=~X)̛BIF&v# Rd0!)ڇN)ISr^D$fY2bQ^(@W8 E PݦuBA$&ݤs>}mSo! ֥sf0s뇲PD!5M8xFr{^,fov]rɣxzW2ΌY619{ f7@Vv~{4aV*;yt!căs'bK̛wNyPF: 8>} U V3)噍V%':-@s4:OUY O;R{Qd%´ :g.k+iI6Pp L1sY=/<lH2-Z/%(H1}SxK XtXbL50j©’r D<>U1zd8 4It:!WcupnwᤰBj:Ik~£ &E)sdH[٢S |C{?9dc+g- 5Ot!2 v6fr Y- p uficl["ܴڬzJ*gQHu.%G\^.7]5JH'̤4Ew$*[l'3 GsC{0_{iƗd2@_ I YR޲ kC"*ڇ$ՁFrk=fm{Nq1[w,ٛ d1/H^ Er-@|3Z s$%#+U55()|TxէX35_!ENB[/')ǿw (X4UFyKd0\Q qI26<"jXdv%&%@_[p"](\@Bs*%]߇+ʹ3N" Gǖvpēn>I(&[Wr+O? rNssM uH{X HDJŗH"y I9 |B<08TH;::+6H}jQ?|v*y3=ޱϦe#HTl?'CrŎsy95[{}ŧs Lu2ӣNlgʾWaҌʇ r%Ar~x^S oXrҢ `( ɜjʧfm؉O0Si=S =NSV)Y8RN9pW,gu) #*Wd[T~gHϪ_$?D= {@$PVOYkzu27#N6xdzlg_ h>]GVV7mJ֑2L`C,< G><vʛrz_!mPyCUк^\z e?:j\i9}sˉ,erbdƓbuU[d6#.=9osT%ۙxL;/͢;$?lY`mRX}~58;aAsJe9&ϢSU=G,A!=vt ˨`6[+ǧhHsV:*]C=r?S.Hڗ&2?سendstream endobj 676 0 obj << /Filter /FlateDecode /Length 1620 >> stream xZKsEPQf)kp( HVb {fWۭM˳C컡r(ʿb z wYCiw'o=cc! x .'iFz{[NzHp*b%-&V;&E*$A)[ %2F T&{~)Jf \N(!i#OVlīyZ2CpҐ?x Z$_ ffZ( ;C֝j3|j [)I|CMir,짒U`|kL!jhM!>+HJ͖@~5J#sDȵ)OƳӾR809i˃s%1{茉bdB*IID/NHI}oO6z@RK(ʀJͭj8>uоeSztwqyh=\%UQsUb{׈oڅ7ڟ"ގpM]jz|fisW9"GމHe:MNC &`s n~N҆ K7j/>#VQ Ikڈ=GbmiR奈M_ "ӑF !,jUItھ\OƧ=^-ǥl,g%\ؒHmW TWdvb7M7o@@=#mX،4$Ww+2e* !bx_eCN@^zK6s1ļZ.n|媶X7;Fc@做ǝG6t'kMaHdaa?D7؇wz57ֿZ#:'Muԭ%IEPajS'!Fw۰Q?dk$Tz{ÑpÄ<,/^G5O2:фII =MII ?'鼉?V*ꅞCBHYhmJ/pjmR {+#!L d*ey7]:xf&/ /i^~Sn!zt )J\ԀnqnٚHhK Mg{ҒhݽBmn PG[]dUFvx[Eu&endstream endobj 677 0 obj << /Filter /FlateDecode /Length 163 >> stream x]O10 ZXҡUqP(/ СY:ߝ|reA>E0u- -m1,O)x@fw5|S^[+x$E b'Φ2?M%K17MR{;RB|IWSendstream endobj 678 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 280 >> stream xcd`ab`ddds 4H3a! N ;u0w,>E{$ ̌yME% ɚ : F Eɉy %%@NBp~rfjIMFII~yy^bn^~QByfIBPjqjQYj[~^_bnؕz`9?$H7?%(1012hd?w1?7|/{Ңj\lE݅Kg/it\/b^_Fendstream endobj 679 0 obj << /Filter /FlateDecode /Length 4262 >> stream x\K>䶇\reSOlN @6هFkkg}HֿOU,wV:>eWOccO/L*u>Ǜ$8szy X C}Q^4w'o}=^] HJ]/Z=a#{X*;ewX^hhBu8c;Q'v[-WڪRv+.c~^9`K1z>t\dM 'fXO̫S6|U!a9d?֯ʗl2t<ݙ? pe6|the#06<+  @oeW8U`ȼbSH8mZ,1~>mw<LjNVgGGKcR>8/=,BB+_ѧ5l=+V^F*bi-g/D {P%6lcۂZWլ$a8ŅG!I.]UUHE6>'^x36gpI pI lB: J8=:4\EߺQ`#Iz@i`]uRH B2 \7zttPcPjq$m I`Xt57hK8tCjiO¤Ad-{ 2.J 06(4}U-H: 8I^pdzǴ*]XMZYHTK++%N5`x6}Ն}n]8 |B'0dFOP4B$1)b0rÌBƛPݷ] |pA7YOl/G %:*Yb{Dy[#>3ڎbH2CR83,SL&܇7*0{=8+6|[bvlMXM ތtz+K:\]0$u*uyIPdw[~좄VuDžs}y1~X}L61sg>m/WEB Y6&ZPwx&p,y$F"ؾJvF>NAL.d"6Ef3%~\vCqT˜K1`q'8xRzI4JoK>N# Mf2[ee۳;H!_٥3a~_BAHI6zUM!y!o nu\2,eRL?"Ң1.E!7,亐lvMsUȡB .y +C^5WW!w0RA`xQȟ9;ںY)B&tэ$Iqʂ7Bk򶉡]!ߕhjb͠ &I}!:YS혮ιE[[9ΛL7bCfn'F()'l F )B>/M!ׁZMU\Sm4auӌ0pۜ˴aI!{'0O{q cfЙEa{ٜvK=nS eAwٞYO=&>m{pv"C{e[[2t=mrmsy!i莯0>Y@k7 &VsVwO6^d9Khq'ie%9#^gWcFIu,lqzłUNW%B`C.i-]ec6I”tbγwsxQl.訩̖A~sRj =[Xi .Fj1D^Oe9Ste>oE%Cb*R^l8N6]7xzIRJG~buz4wutNrV3jε'*Lk;_)-"VD8yQBγL$v_`ǚ9xeu`~#^A9n/y93]Wrڶ~:gM>X*S*u"A`N 2u-N851rŊ\ذ| J悠)Փp%wGKج%NKae 7Y" (}!Y!5s6qٜy6k+ 4qs{2u+>p7O B 2XvTEsB.Χ4u~Db4sW}seo y^QpVȗ|y?(pZM8;S )89涱^cgT?,=F.tgy)9`=s3Z3&5mۖɦM24?c5KF2 ,+l^-&yUȖJD1<;;č-dlgSVEl?bCG`;[Nᘫns`nXECu_ibڜɏ*_Kl\b`XL@UlKKfULwZ|Wŧ*7#55CJG72>`3ށ>e_@Sp6+G IbyYiJնJ6ʪRljw$_a$YX- *ihvR5YMޕvK?{n{$<~ϜMq5 uy~֌VSZLyG /0P(x?rus;%ݪM$z܅V)KzNt[ّKAm,f?ď Fǭc\}Y#endstream endobj 680 0 obj << /Filter /FlateDecode /Length 1617 >> stream xXKoG Bn8) YHJrfd@Ar87p0;t"^,bx10?oTGpt>haCo}q}z[+FguYZ+R:(J-k).n`,>IEW;X!b|bu,eN44n!NX'r @JhHb!l4ų2F> }  ڨ +&JB)1/sNiZ+'r8!LhQN9)9 UL^)#lt XFg^l[f9 tԭMO .#u:J:8׬J B$zxḬҵe=!d}e*R!i`XI BuvsB"|mYIa|-d!0dP!@spq-W b^ˠA7s#'#RTTrSPhW| r.:X:*[VQIiž 9R[p0Gà25#F;i'@Vdì;~9vٞ|믆F(HtWibE6ˮb"^ !$s#zwNi $# 9B)a%Ǭk&(Yf.9}޵-rWAԗl$ؿMZ1n)|ܤ /,cӻ0}J}V> vߤSWK4^N+޼{e-S[N?dv,\{j7^Hi`{qNLk~/oWD g*S4-LG?Nendstream endobj 681 0 obj << /Filter /FlateDecode /Length 3778 >> stream x[[o\~WA '_h$uڸ]FZ.u$۱}gHrx4MBp!9|Br!?8Tjq|'{?;8[|]1(G{y\HF?yǨblUpƆaDR \iGh6k˕r^+1aꡢP)6 π]6>v1J?Mp0%4WØ{`.fXc'OVl ;Cp;\ȿA1h=4g3`ۺiO]a#|OwmluNE9Z틊]ԣ5QezG6*= юfl$i5tH;n4[`gMf-ulxd:^ewhY{kߐ%v,Ȏdy;^7T#-eƵa쵮¨0Ǜt0a&*})„a.8٠\ HjML]A$)jAشɲqeqf8N!0$U.dD.EYyxWUI)cKS"ǗeXZ/˒@yWÁOX;eoP8j| ~Ӧ6-ฑg[.Gi.ZG t>,L$f:O\ ,48#n]iV> R V hǾ+#(+QH1y:*kTHmnr4H.D JZg8ԥ, t,7SX@ aE|YO2Cڨ>s3T{aipILB 20h,9{we[c7}=_9]\&eUbJ,FLI9%ddq`2Ԉїo9݆j*ZVBRVwM!k7Ǧ+F,;p"k {]ԛ:t!J[X)JoJ6N(9q^i8{(;)ܣFOK 쓵(6؛K}' 9Oػ퐿дc ZX MRwUAPqBE& ?lߝ)fs$~nykoawUB 5N.q%m^i':Av1(:)M"9fDy{u[ 𴁟%lxtQ%lbz)$N D䲧#1lC٭ټxf🢋.‡>(t<Gzj$9,z ;tTV/uL?Ԃ ,QjT&38o;AACV%BMQMV-"j m$Yy<^m~zU?.Xl)2pw/Xf/˄T7Y-VdW4rF~RqƎ^Mjv.ho=Ǻ%%La!")˗eAϗ> LaǶRPUs3D|O,D҆b%ÊedExB#B*$rȶn)c+VvWԍa*\횂ߣe=\po#Na=u\E!T%?4?$%5*U1,Eo>-~KH ]YDC2E9%9́@@!k{BΧ3W63Ѥ#$j]7h5HrzN"HʐeId+L]$Ra 'R5ůwaJv ;4Dw;FrCN >2J";,rO^?gj~##dHD|8YF|#]V&{f(D|{YH r',3bA\Iնeu$)):kA#4$PF^SWI#9U#yyr8cIލ[Tn]oʱIoڲ \\םAӒ9mH2gB-Xu5b@]ܨ$)#pgXlu>/4gsw{Ky:]Ѯ=lWu[jGxD#nǦm2^dXnUu+/wcwpc/ucϊ^Vv!|V=25c}R'=Z$'ݏ$9Ia%4h*"Nm3$!6Vk~ _R`füCmIU!&>n5-8}0ӑY;֍cnSy-uendstream endobj 682 0 obj << /Filter /FlateDecode /Length 4021 >> stream x\[s\ NojƙLwS)ݙ8mN.z鸉g#%ٺغN3HC,vWRA8z5w󧯥R19y#ϓn⡩"ćdIG@@]ɒAHҫ͠о{N(v< cLu 䢒bU"뵴.+W,3~~.*W6l Ede%+yTɷ3TEp/vv?z2%yoޛp{oQba MXS?!#^  i 7 _d3˯;\뮢~v=/u>bu=)x3 򇿛g/}n 7I>b%bu 9M~1ȼ&G_v ?5sZ_v0h}݋$k>o$XQð}vq놜䒻j~6yFƑkJ>`;2*JޭdZSW0f;k[cҢjrIC ڇT} 2N3ޞI+4mnBlC뷷2 ?m}.6e-Yy4iB@$wdg|Tl"z Nlۢo?6GҩM..vmLzP>.g<|?*KgwۇZ[ߋV88eUw2HCQzfᬶRǁ\^FrMʓTz"eU,`nTgU`Py禮Z2/S2j|ƢKjkBBpBk H b*r;H bjd`B.ܚR:yˉ(4B(=hE衺xO Kԣ %=OPvwuvr~2? (/Y -}~>KΒ(Rhh։uh '9+n6ZV2X.h I]&b+ lJ[QӍVIB DM* ֏Zn=˺1@;m ܪ\9òL"e-|ʳj9ZL8.UBv6{s_H!x0C0a zx*;S*Q,$;NVžF"&y "x 50 \M2z6NTmH ~Wl:c*u䮺B7o0DNeF)V:r~UY: A j )K؃l>7}q`̿>22C ϻ~Zp R5M޾@3VX&Nj ]Fw,]1,0X$!>2 Լ'~<g\˴OqAwoؒ.!l-j(u CwHT:LZ$iփkս@Miggh dhG .mJ0k-)UʎOs>]()÷J]F<ɬz4>.~+t{+^׮J.G+[6 qN|(=pX~f$eK +Cxk=i=X tNDN P]pwc8`9./phO<]J2%$*M22v7:txRLs0|w=:O<3^g<9C|ϛ F=8hNilV=7nlD*H! zP TF]@pZAN>QI(D=&KEKq 3d ʝFs bΤ`jCՇ[#v91 =MͨFkFӈTE: ]o[p&Ay#9mN'RfD=Hf.h`2)EMqJ5MUp ykSw\@1 uw\ 'o\4o⚾+tmW:1BzdJV_nb;%h+La2Ci;p&qKŠt6 MJ`u#CE4n)BTN_1и3K88i'9.&A,Op c{*Oe!?uHCh?%k": !5K3ڃIQ#~8iޮCBQ&ErŽ͕נ*b wS_!;zZRZmM kZ R`kv :?|ތ sAGԽԛRCH})xXIRˌLmrVߵݯs+]PM%IQw_eZ?KJ|GO҇ Ȧ6%n\N3^-S̊>݊N d$Wtʎ#& xkS|endstream endobj 683 0 obj << /Filter /FlateDecode /Length 4018 >> stream x[KsΙ)`˄;ةG-NV$ER2wp~}1٥|q Fb w'x&Z\oOI^._S(\8I"ȅ~.NVV70_˻ӕT!T :(ߟj9Hiro˷S-ZuF.׷8A[vY` Hx 锍ũr ʀJh '4BhуzyY>h[j6U(ڟPbw Z+X',0!)FeRdR3@34*LGXm2:bcm]dBxQuy{=CSw&bK~i @8XMHǐy[+ [~Vc6*.?6js] ;W%2ޟI '翣H*Nk{U5jMm^ME͋sm'cu_Y_EGn.c8 L̤M*CI!*N8&MkSfi6}m ;wIc{3}զ)rI$Ϫ^Tk׵mי 뺟&;dnﻈv g2d^ݲ-iuymͫ.s 5TyӝOG?9<6??.1H6W]=>tu÷ݯ?; uh9*9M]y1χij6[u6dw6_UmfWX;Z֒Hg)I3{c&(e_vyJDwSqvPFww]j3m輻͟0 % _2yxі|?ʃej3t y!}e,{-,v-=I5ddh)[ )hBk4f>emYD [!Xv`Z%h \1 AjM΀ Zю)q,'YJKQŀe.>ߎ*pBȘTBB U:qMKA0%S@Q@* _3IMjYB>"<|߯ޖog|.yC3SI ޯީ0&-UԇZޒ&i\B;*np/H^yaK[uhܓͨׯNr(,`24|CE՞{:&meJVywi E1 jٲ23D+(&UvSULiğÔv nֆ[a3WE5* KF'G;LneTLy! l޾&pJ4^IDA \$YFǻgHGrٓ"hHAʺ7nqtQ"Mgfz4(}X)WkNaTc}o+l݂ؐW=4H@\ 6@gg + LE>7lloMƍ+c"1'Al1!6,\†i mb;?D  #Z+ {XlVE<]i 9`v1`]VXM1O%7qln}c1:w-F-M;!:en3父[;k ,NXs8# %̅$n%(!:ybj ^U57^N/LT"v9PDt+1WR6܂RgR>ٞoK==S\(dz)^VO]"w HDu ZEF 0 n(}.135Kl!%9CFVW("Sb6е`1">6Za]3MX-f<]tT-_M2/@Pd \m#7%…](Cl܊ 'f!j \T$ y2#O3]!g}S T`h6vV9mJh֔ Pl~LvR1S*Ǟ"jĕ2I|o+4GA=tluat QRXPKE*Rv-Mj}Hd~{[y}:[2U9VW6n̕1TxudD2Mv~JyKc;s=i5/r`f] NY<$憌lsWifp33 Timx?Xa9'qf&@BK )!dx8V8B/{v0"3HPM䘚6z$BN ٛV MW #Ѳњ&΀NCX3C9;L 48ʲ ]eS79\Ag2ʢp0v DPr{>&Ah!* Ÿeçts-oIʜ}p—A On2-NCDRi3R}pwnkgF# zTdpz‰GS2y^+P-ׄI4(wFWgXTr)&CD- Hb[>i@ݱ|3t}LL`dh`<Iݽ9VE2!W#׋Q!s2M͐YYx{[,XSl Y񊅌t3IF۱kMcۇAlF[F>$(ՊgGx PEы Xd|BΡ5@̢SVi3'Y,E"e6d qiگelXt{ˡ}6<<];l?@^%˔ƛ.䣌ع 'r,[npIic?j1'- ȼ JgƬ(zLq75Kx%LXT$v'"1cĻ oĤؐcdSjg,Pg1 3VsV=9yJC.K\b|sqȂUqF.?Dø'#B,zKon$f,F^syݯ"AANG0w̥I_(Xb%" R`{U}c@7 KSg?@Wc=ؚҭo*[O&d%k:@'ZiX{pvQN5OTfd+ c}9{-[VLL8WP9&8Rn>AĿ'4& A2]+#AOYr*w770av' kɶIcFCd+ eֻx;c6IjSlj_6=h:*gv5N%fسəhvqwlVRY,/On2< 5)<,e,3 H]+ubꊬlu+S@ DkT1Q HVĺGsL=-`C+xMwVa*5_^|AR> stream x]K$qy|o%R@J¶AJL=KLfvw&(T?極LY>3:??׫/ϱj1_q%F?YmG/k6 gvÈI/FJ9Z'?kŭnx3nra^Y=-gbxw^0-9+4}}q>cs=0Ckbckkll70p5\|Ι;ȟNI9lߖIc:ZXXQk27!Mz;:,kx?&(o@Sq0chgMws=*LqX]) f)6GT٠;ld8mQ6Rlpgnͨco}=nj!L1\.cufDs DS8ٓK`g!rRD{g1x{| /S ؛dV\[˼.@쨝im&^g8f\ D8ON1%&3;ĽU)Ibc\ 9} T`\le6Amh7;%]#!ȜVOzƈ\ĘZPw GauhGFDd},(-958wl#C9Ppb_WeoYm_I` R%s2Cz".NHcF p%G!tP->BBj>GdLcFWF0)n)/dgQ4b~t%,ŋq~ʘ\Z$PÆhwaL hOxmi7AxGd-I{_+j1 ;($Q/{:o2SwVx[aW' SE4U>4̣/%^n@, OA<@'XOY*86-R@gQ^]2Z`j7B je>ƽ>Ё2b"v]P^k|)G$U`K *_AB/] ߝṡI}49xK%PnjNWB؉I?H{fեSSiE* ] v O/"aorl఻(wG%QeW?NKJ^D@9(k(^bŕ%CƉpd| +B_Bpmu ntHΔ3+qdif\@$@p_#w"eJ/uhԾ-{\4 iPhc[ zAޣ/ԛDHFv*yƲ六 1oqLk9wTZ&L eO + -APMmO$P ?$!v oD"&+i]z J(U/12e1X31f>A- C1V?-ӓN-(ۋ('ʍ\e9yKBDMH`0xta t|}((sQo3)DFYґB!ٗ; Y; #C*2>b滢 +kvN\U)"H sx ңlͷA@Y\F:h^˻8=PɐgrG7ee7Dyd k)*Wu0N8@ceD:O/ńkWQ|h?,2\m\'_"O?i›(Ș$G$FAe;̘96i! 񢴜"1JY%)آ!ɤWOlã Gdv0 J4ukp7)1s bA9r䩑HDED< `u<"Bxzb-vgi'}Ƈ H 0AjMgC@Gઌ<_^Tef~s~ /3S=2RƪkPL \~Dr"/XN,NN& *?|ܮ#Mq=RF5'0(+nGј*}\SURc f*¼RE.BHU3GX\sFTςp4K rOJƠs|6-l}( һ2E,N+X "dR%WTn: Ucᇡ Bkܥ0؋$$V[C7Ի'1j6gQaܱFZ)fK0׿_s*$0ǟ*`6.0уo X>,"v'!]/TմŠE.f%ԕD F% $Jz|W_nmy{ݯH'a J*~ZtUf[^y*NnB bИY1$B.BlòI|Zax-ߗ]yX/cvn,<)7ߺ I =d&W2DM=id^K .e)I (P+8l+A!Qt]`)i?u]bqǓg|D4 # /-DτZJe*kKsk#.5ǵAZUQL3Ș)6:Ճq/Y0 讌XAU}NLuڤ+% Qbm߶h2Km$qnHJldIO"WUd&J,q!w-e}0Njq- $-g,up(|.'$?iu5RF[%i=,CuL7S*KlUr > 2B=#z#)B/b3÷غipZy~ȘمĬ2چC*BM맕GƊ3l'./6FRJHuKIE=}eG g7mV/ RI{Yi^f/I62k+Y}yfu{d2~p%('hX_i\Z}[Tu7uERNTm=u0OQ)fSAHUe+8bK^-(/Xj1g$ȀkV ? 5¥X8Vk#DLhX>ĉ: Ad8Bs=5jS!o.9J2eVS%.`n0!GM9NeD#<0Q&S'NLbʎN~DF51fǘCO1Ov NZ|vL1ޫCjuCd}9MoOr*ӊ'CQ sp3ťzIr%D/)<8 9㔵t~Q÷O'=u`a+86u`'Sĉ)`U]*zCA!rq.\<<^sB f@bЄ?b}}##r[!.j*P+X%M*H1 />z>tZyXZHL xѤ^YM[w+sQ;RpG/)z0t+)ikt[:w64'I}"CY=^{[Kf"G]h;t::@un9;"ֻX cx}h&7Eu J{[ú|2X˻8ujn'w}jC4_" 8ErF'LE~gbiFD=8q'N:fæ=w(=O78!!gO7ON6"9#N#OnjdIӖiI`_&|Sߟ&U:Y!}NSER;_%LFB7I9 տφ8W\R5Hq%R&!pY޷mYU?ӜX65<ns<Z'.ƍJZ,L; o Fe0]W+= '֑{D{xL43E*ۂbS<R"@C)q+o@e*z>I|罯08'l$JXidP"W9)f>!Dt'TD<޺M2d͘jBأƜpLQwF Rendstream endobj 685 0 obj << /Filter /FlateDecode /Length 5242 >> stream x\Ys$IY/}" 0X]cFqՒZ3YZfaS~~_D/W=R3>LҟWj:=ZF ˳tU!nv-zWk-{)v+bEW+xCD5Υcuw;&2,'ë,1)Ze k{%4v}!l4/ct.iyC=8:sBEO` iKzs;F>XVJ-;ܭplݥ=: q!ih|8´^c= ?5+{R/s(_eCZ   bQR˴IQIޔCÙl'DNGUM/p&vys"BF^yNw3Qrz&‹W;tq@bCbc@qpzJ!Hz畕:PWyCi M!rnfBEL 0b0<Ӄ0 lr -F 3MaоFI W35B>R޿+$.mzB%+d@N㯌 t\o@i}❀5WQ 껟3"PEoˁB2J)_wgJڭރKr+zWg0<9bOKrZj# 2Vc}]kVA?hqT2!_Ϩ P֜oǭkj]݁he{:9%EWM*(l4 률d{))עYj|oB;u'וoH:( R=5CHt0*X%ipeҨ~ۻpՙVnxCփX`|.ud'rO۝֭R>BsM Hn MYANOuDQħS?E2} 7]0+Jh_h^p@$x'k.M"ts1Y,ۦGHCxAē%9>CXsXM$ {?"YB7)hk Jq!.*" MtB@D>BNAUa)~f0F/{/"@rjV Y0@vPp+畽>;GL~RO}OND`S ^ƗȩŏӽS"B׏GT=rw'n靖.H%CJq x]]/] L~ h.fܖ ] 0Սo܄ '+ ha}՝s AGcZr/=!C{B'RIU'fhdW5 Eb꤇d-/6{YlBi8;5df༃Y`ؑ5TKK)iR(V]ΩDIc.|ࠋn~R`ʀcYݞ){H-'{ YhI!I!CBˇ$ Q |`t t%2JV/ ktHacΐg,_t@99Ŀ˓9Kwģ*O5cwts|lSvsXHG !c86smiTWP u=MT⦔?2y=t,@S95Aޢ~j6t}w=T4!/Uʛ~,PS;7oRI{L`|~{@L_#Fa3ۡM \GMoA6XΖ b7`J(Ti \I 5&v+eo"Fca_3MX~! Hx\琗w )eViX9/r9k.jbo*5t1DL.ghЦ\4Xi\2.G9.5-\Sb`(^皴K?Xaz[#̈*)4v\%r|7)nQ_(f 8#X-A3`/SD7N6Ӌơ諪mK!c%nZÓusYvnӱ?rg)H&oFamo[ܟz˪g"EU!L=kd\0t;`=_IQI?>Ek- ܲ,l2fUWDٳHrh6PLiSKjQj&nBu3n0|Ͱ`unZRnSYx-\]VT>#г*v,_,(M U'=gDDk\a 4|l$>B=H|CI[e&f-Hف}GK-zZg!c]J:CYW@?߸z >6 ; '^ u3C}o-o@  .[W'-Qr pu¬}EcޝK%aVN轡6&YGqqA !QVo=Fw~,(^S%̀5:mLSgR!MyEju5aCxTeM -b&fSR yH*'^vbһ6}Mz|SR>q.47nBV0rܖmt M8˯ؐ]9k2K,ԺQl{>m=m֙ͺIM䶯܀w3KWN! wK1O>NW+Ş.% }wړbN2h[3nEH׵g$5O-00kf Wnx GscbCj3%AK&z@H,#yWŃg%;F߲Q1kmfX`SZg?ұ` >ʺV>I#Sׄq0YGß^m5+o6g!gEEVǓKxZS DB@8-04,k[!ӻ6b%re}nijx]u޿5(mj_īȼXg>t+6+6Yn+*Y~Vs,}bXn.2n{KOgzb@XJ S7MR&U"UX^Z.4FuzvXz~Nvd~EG0BJ.!Uj lTwc,(dt8@]S "*nP,( yǢǦNEi80yo>m+R.@ Ժ=sK[]s*5~7cP_^YjB}9}Z+e{>?0>BKX#2hx>?g Tk=԰SؽՇ훛"Fzendstream endobj 686 0 obj << /Filter /FlateDecode /Length 2931 >> stream xZYo~'#B:I߇sNH :\Iп>U]=ճ\R|`GUu]_]^.WG}*Z\#ȿί:IS< QD8yqD"ȅ.N:'VWd}ZDb uûuP.vZRm0ؽΗZ!zս"WA[]#btwubr62o`#Η6(\X+ nht_,WGݭt!Fm0J6|pJ+Z9kNH1 XȓB1XI01LW@ݔAH2oIuOR+mb(Vq̈ΘqJBiG]2*l(+3 y^Hŷeݿ BN8#-Ҩ^\~+\޼qW ׅ[ $lG_5Eڔ7Bn wīQ]B1QX i]M+*=,d(IZSHQHWHUH;je_|stK0z۰18ᚆr餗dG|X1$D={⣗n %@lY͜U2]?F) h3{uSI՚c냔T$#*C @`+jC37CP0|W/]Bw~aP|DR=sT#̽eAx@@ɿ,0Sw\דcM׬NutRk !XB@\* 6K YB_wy tLm4ϨS AF M3VR,*!Q:o0r(TCIH# hJj ,a! gl mćˢJQhpqSeOY?HU0B%5ǣOzL؁s0$WAYpL+}Dc<9'{sߖjg2a3tŞ6<?l.Z|nS䅋iB״1ZpSSm( *_U!(zsh&©qqG&[$v A~Da2%eITeJ%LY!o 94G7i%wB,`Utpo-r C 㦌rӌkj'Ysоݜ4 ~>&vQ[+? |n-?"圑GcK:+)?A!o[ %㡮#&:~V{b r/Lp_2'8Cs ǰ,avvۄqHmg BXǼvqCL,tpSf&wQyG 򩻗aGugEO$LbQ~ ƠcQQ"c3**AfpDm\vH[D6yK^YMH3W<\l[N'ojTEۏJ v:pq\8mRw*4{M.䧄u m&u ⰽ~Hr][br˷sH۽Im0g񯽱%)@8fN'̷H['~$nRxS-os43c>!S&փ7nǪ `Rn@i5kBpnqxEc5W$ Ej*Ef1`xZ8TD 2x`eΫ\DXIq^svnQ0 C}EE[ߟs1i}P_H⓿ ##4*kZ]*/X^ŋ*}IIxX2|eVhHNeפYmDHz~l?9ոc#/.:6e:"[_㽌Ux=dM3>,Qִ @~Ek#"]Ќ؈`+hJᱧi`W>H6tdtRg$j1+<:;]VXgmiGՁ }놝v0jLm>PQ+ShZu?{*J yoG=FM2htCA}/\/pН'!XĘwb oKvrra.JzzZ֣@M]ʟpf{k䙤0.He\gpl]} "{, 3mp^r𤐏ٲRi0愋?I^܆B+B24 Olnư47pVBnW{-_d-endstream endobj 687 0 obj << /Filter /FlateDecode /Length 3357 >> stream xkoFkGp;h(|UKc*~̒CH'NʉHwG_JMGbr}t-&ߜ%kSEXNQ?yۨuT*8cC&P Bδ֭yڇt\뤗t&Z* ܤ>h|uj="xeuh6Hٜ%t [gi 4!h2a8l{z"t:e#=7 Y%F; Af~2f1ULpw8ph:6|ͷ 3 ϧ @5yi̦pf~:|A X*uZS>XГ"E43*DU u#3t+o$0v:xq4q25pL֚ M11I!XTE){tMξ?:뢕Jku0Q& /hDttR>hm[x7. al@1;jw _miJ,#!鑍=K7 CW#M蠭 p>1ܡw !:*@\ u'~Т|)^{L\Cӂ)VeU"`C+ FPHz5ﯨywM !x +oPKL fJ#XdN K_ |_/߃g1d!x2!x`{?74Hk*}wlSD=A ?N K#xsx۞mmaSQ$Y3דLܶfxFbb__"Dp.[s*g]Z#`5/ o757 ~}]L^(@b3~g9#B-'Vy˂c1[kR$GD]6:K6.\j0jQŶ24f H8DG-ZHy >ӐkY_S'$,b)<"bܫV*{t4\St=*wB M(b-$}HjE!Rt|R,BlܥVf^Y1޴VI<*M葬5cU$=Hқ$o|,I z"bՑ+f(8\ k*ctEiEMF7-bB2Y;™sP2 Q֗,:mFn֡g(˪Dkobjb ]?pKuž/+V! m6̴Muf im퓩AeuYgƜcH;qGk`|傝ڠI.Ei|`s_0&{J(٦dLfLP%Hz,;Q% U.g%xnT .):}+EFeNjr рN> {]PJQT>lMb?F:ERwI@}6dgD)Jo@,O3sGbcԫl]yOQb 9toQ5)\gԝlC|{/W֗ș]erX H&FM/މ)smGKݐ)(ڭfun>JI:T6\ gOEHh5~Ī_H=S*iuθǚ-e#%N:͗JEX|Ti:@=@wL ^;EK&8c:FrX5hx=o諘|RŜwf&~/M06< խlCJr3%<ےK>;a6fP1#3;y ޺7evMr)OVJძMw 3Vl.ⴟfR:2Oݻ<4H< ;q.җl%<Y4Z?4Tr*mL_H4/-`WfDw l$tshWt;1]12 \!m*z1(zwE_@~FD #5:SGfd[ؘŽvJG\=Bs8CC0Hendstream endobj 688 0 obj << /Filter /FlateDecode /Length 4859 >> stream x\[\9~x湅:ͦ_%AawB:=d3ӻdIT>vǧ'  \WUvb#_1/9\[]}ßW7硋OgΟ|j;zW7ga[˪73RF_=n6 3 oג+9C[K6:op #+>l_\;pmaŨPQn-̨P@֣`H cګF)9Z/20,ozKZ cԗ #R X t)#t4 4 015D=,|M =E{oywe@c;GL*24uc#%FGGq8lM"!Q 9尽#>&¾=%|NMa9Ddޮs3YYs ld HP 7Ìrt$H.fz:_sE/ʶ4_淥/JsW^+yF+K"ᐝP Q,(p*m$3+aʹچnXySveⲻUrvߔKq,Dʄ0`@גqjnE> bأW]DGAM~'tKz&z} IBd_i@ DJO?ADٔ־ -S`c7Ak ; wF7tד@uo8ʦlcfEPf6۹pHua?S$KFjqGčaB8t”b!\xZZL!XRMKCP$PO@8܉4PP3<l{1hQ>jmI‹hs 0$Q)A9IZ&1 kBR3ťnZPr-bꜻH0f2fۼ%쒜"^rCVNBMU~~6a[trF/PgTR[Š"iLW8Nh3[ fY"g8ªQ 9 0؉f&] a^z)\, 9cؚH`+xVYirQ%M:n>^(HMeF6"e;"yr))@XAx䋦3r7Ms.yKSexUص+w-Wv:t W IO +3ƽi<%h7Ί|j_ILD22b+>#*ڲw*-'Mj(k[`g+D0G܈ IȆ&~"o&}q`4 52xP1A{1.8@(#fhLXfR̙I3AL5=7jkE(9/ɭD Iy 9Yw6CRyU&[ HԴD8 #Xi4\V= *7)d{PFz[Xω؂3ơ0!j<{>f1F(F  D )8cj?gxf:FBV,bL,vg:pJ!⢑1Vj#u&WՠH |[,F/߹Uqsh<$ŬfBV烺sa~W G auzqJ&J$:"+\lI \Ca:(J4V!s2 9̰MҾjT$n@*On@pbȥKMKe} r_pՃu:zC-奸 㖇Vhm%axecǏ1J&9j;;Sr*v"rDiďD~ZO0nkY]?*@&Z GQ諏&$룩 ˆ1IX~öM}3<|4K6c# k6e sc6d_;faؑn»6mi'c yFi7Rz۝NE}I=>vPӃ랋 ץEYq4yifѧ|W__'ɻ,>la~G. /M]_')"qRÓ4O/>5l/j5Kl_S^5X"0֐cDRd_ɰ/VW]t%SҫNja(@ߠ~ LO6wD 9j?x~yN-#-YWI~Wץ/[:,]MwmwI6fF}㬯 +?O7+k;~R @hFCC7*M xs0Mףp{O#4x(YZk{ns{᷄p^A/$" [d-۽oB$*w0{6=i/Ki|.RM}.l=B]]DH&VTC7^||0+!yNSE*ް$Uy3T/}<]-!O⻓x$S6Y?:{4'fY7'ssHN衏;HYi;Ju-CG~;=7fj_ϨG_!> stream x[qo}p`~1Fz|\+79p>Ťc4gB p Oa"xέ peҗ6F0Qo;h pB 9\DȲ:e#Y+ӡ 95Šj3_|Qyغ33&% 0d1|vAV1[MA1_ 5|Ǯ"Mb c *i??c>NXx1rv ƠqQK/a"&-a`X{!,(`0wa87>es5pO(~MБ9`R z@tGf2}@^,"KطѪļYlqC(V2CzՖ^vlƿ44Gj3bt$m-c̪*++8Q4券FOtS<ؔBvi9 peisp鐓=pK1mA;&!e@nfvD` P1p$_RODE;8,x07[*"Acj#d|:Wk!_O\h5w9<_Ekn[6,;X&2렺]9i)+ʑDuNGAN }eXD_lMךbJ)ZP6)Vb.rs2h1>*t(V)ip)Q'OQ6h"% JѱrUHGHp€)6by N&ӛ$$y8v1*~Z ?Fb[ !Kt+SrL'zbםS\6Ru|ҎƙszA+) }vګ^WMITjx޼) d43CM*d^Qz~tQcxEbR`bklm`rJXal#56@csE4S.hsK䷼yczc_#p( $X\Sp°#c6X7#_s9,OLZgsCi1rP !kTLt /dK4>-l)aUNWN6%1%JRz>|'L$r^3rzT'w$'r1$=yA^:7,x:7ر3k1t5(bo&1]2yK]q̨ܳʒ}X3-HJC=+x  #|,`FE+ v xIo=c+} Y% ^C1sOl>Ii`qW%*g;Dsf͆_C2Kp$@ yy A~𙯧Lm˭q?>;5>v B:\Qm#Qr75{0 /D:79i4f*b+f1~,986:1fsaXWѾ9*k`+(?FCP{Bk ~Y>N}gGw&bAz+4Zi&oY_jLT{m`iD'd0'sKA+A.\Çy8B 4ﮧ#1wi@0wS| &ށGM $Yâq/Ovu^*jeGIEd1y7 [lGXū͚IFxY]'a`>JJ=\ ];*3ڹ/υRs[zExT zw4&MbXnfF5F>j~QIFkizos1<(Hhо,7b(Ɂ.)Rf&l `y[ rnϋe(L|0:#m-z(Yde:ݖ־?WAQ$~r,cI89:3{!A#c2Wݟ>"tsO8?euXwTշӵjHMJ]uN*Y94әRB^-AJс~ {Ǜ|;' |lU7-M:cC f|ke3YRSzVOHINl^:p>=e2t l?_ os~}]#5FYK^E\j>Δ[4_c1tR-0 o^W.# `cc9VKqc C1y}j {W4 X B9T]I[/C݃fYqmςXqruﲝ>oLte~pFk ໗ul^H%ELP^"N?^N]RV+hFT D XNBjc? N\gW$hUH|Э%pP+)efج2=1ISh`9ݭ!FRr+כo58P6nqQ`%ZW/8 -DaY$6,LYXeE$#qnA}!?k^$NW8it5oIE⋄ o("^dpq0&!;ޑX!2yn7xG y>rFߣC".쇡&FA* ;+rdH3*;O=+,ҝ_|A ^RtUz?vT3%`Ad޽>ܷA/O0KsV!{Ѻ$fn&q5d?%i=#¡z@E̻(_)a'޸ly剽Ũ7N}솆1-y5]ϱ inf׽;YK C}?j;q郐qkn[s%d\}a/ai5&5o7iOvϧ l0բS<[˿sAެ(. ,T/~tݞ"nϊ6fȲcD羻m\X=ݟO}gAM;[SswTU>hu#VhU&sLV_E;%ͫB}Pa,k'0KE7PO|nxYogo5?C,t?b?&vQAw&wendstream endobj 690 0 obj << /Filter /FlateDecode /Length 4139 >> stream x[KsR [>: 'N9l-3'LJIQHeF3hpv!z㋃?>J-ζbqv@p>(G/r[GmGSˣV7r%zc[[^bze/&"Lvƹ}^u/ uFvWi/L Uo PVx锍x\o2@H;;M!l4˕1Qw'C<mYhD)~pJ4sBn' 1 H׉&&8t`p t$Wzc|1z`޶!etV(7#Or]&bm_ame/$2 N""'pΞ v-* ?U]ӵ㰯c"2 u%Suu,:dr-5qTo>o_QVa%˿_PX^uHH:pFCvزGgg7߱G4>Vzu8D(zker[0߳k/ص[z;p<%J ߱%\'fJ7guqu|agY|{!S'D j`E"cpĐ Œ3S"p)x]~>J;40Bw 9p ,0m 69vZ2!y 'S&X߽@X"8 h~5J$);/ ;u!H{ p;T|m@"tuc]6qQQm:N#>6M[4Vi'IP!@Ӳ[H$6i#gA HN-b0\ <,|k!JH4xΊA;c@s\+n-aVQx4pim @ ᧓ [ٝe3*B :f;ZE^nca9"<&I9IC%|nFh@kJK5Z6˗; Q5сْ=o6yɪƀ[2e6ʓv%SrPWa"4&AZlcMTsP').FҊ\WJ{8&+sK  ūQ&fO?8M?tZhJ'Tr7>4\إZДYK>|F5!Vl _k}VPiT3ܧ*IN~hj%[o %$m]_ӂzd$ʐ' |`3$!2q/$@k!|K #%sĝ[[]"]k@ fB!=¼\N(H&TME lF(X!kݐ;qlB: "ȴok'*oTi)օTܨORuV\eDd i Ү BPOEZ%u6-.F$Ȁ4lK Pg8ExEW_&t2LPM]sj`EB8$v2Ja>`u#_/aME)נ1Hk M}R(g2l|AxGN}MQ0uT[5?$ k#]÷A7(ktz:,,\ F'$8J ֌)]K*ƕ*~ZB.49˥^f -~A{GGF8SglAPruI4SG+e_#fL(/MTARV n(ea)x 9&d:a;c:E)rGšc ](˞\mQY[rbEY'MOX͓+>&k}^Yiz+eKr9ɻ06qdKK θ8 Fʓ>*nF6NqK%1YABk&iXaf3|HR5d,z̶o2r5;!s6x.3%1xV)l).Nj,#|iԚ |4  f7c{ ļqJޔ&7` bdL.FqzД֚9-H@~qk4VPFaRYPPXLp~'*8ɫl2toJA8C=fSKMK>*%&&\W2Sj"5iopY"1Tx2/ L'z\<1ճ4EѤnS9͎9)^l.o -UnA\U-h-sLcˊ|WC0gwNK_x|Ż;V!p$={E+4 *t8j;9L6Wε\g>VAƇ-SCO!N.b2#y\N4/\l§ƫGbVԽ. >}5Z 6 V&H$=;oW| ͉}ؓv)̳ ]ԉixȋS1̡R o(y>nMvcu݄M~1!d?;wN``Ҿi~Aq^2vosɔ~ɉΦ޼$;FJǟ2T*PjCpx'~W}; ~_CzͥW*%'t*Xl%T,M"3[S:FJ7Awيk47q4{"k*ӏ*{u£^Z)IojPni-=CC"}*:w`\ꚝ[ZD4/ꐼ"|i/3v"[(!}G|w g|$)Рg@2&1H?睻kv\;708ވoz%CN螂Y7{D'Q~-;KONo,x;/;vorGߖ4#!2KhciwЄ6&to~:@˫o},GVu> stream xn\][@Pl=RM@ǒ,veK órnG1TFT`0zu߃Rp-kDcR װvsKa˙ll»XxC-Rg͌4ΐΆPBIHPX9N/5P@_W;%JpRT5pRLbz@5"NFBdA.`B⩇&6N'}N `ef/DVﰯwqݡG0Fr\P)2JًazI&_TnP'_9x& r59RQ+/"-(G4? .1Cيz8"8=XPS|rFKcE61 6s*$]Y2u%sn`"[`$1n#KFz&m0 KwH Hew +3 PF{t묓x/F X(;vL8GA2>[ BTf,)F)&&Fb,%Cb< q1v[ъݞ+aVf#QF^ os!6#Gch㡈 x+l$ns gH4 Z '{2'(a2Tq?Uj}Z`zpn |t c([u81&d;iqK]G^Su6Dhu]%L؞ը|0>HsKb$4ez'pBF)^32eDWȘ7%MҰ@{- IYAYH!&ՍbT蒱"Y)nB1;RlF`A~i%9٬ % _CmlDQ-7NrIZ[\z0xٹ:+2? W[n3.IYO]Mt7x'/0I&aqѻgdYxE]qSm, 38CP-IM@}D*r29nm<$ Y1 ڕMsWg:Sj_J";-l'jHHqrM!aGNjOn7QZw/+xTr o=DRv% q^ :JvP9yqpS v'DzIq(OQR['nsZMBUVh;}RT.%w=pVGu+xZqt} o v]OAS5(/{]u*cn ϣ`SwW]/;mB͡q-o2zd")ǜ.\%!\=4rF#7։}عߍ.EJgn/_wᘨl#QvI7s+=6*HuC)lYc ]=4^ʠwǫ%r^=$rяF3'F!^x+? s @Y +hܼF.*im kB1aM{:MeI%Y2MƖmj˜3r:]r\kj_SMZhI.7i_VG> L=sBYNCgBOƐ~S/f庵.Ys3jyeP =e! ̬M/4W"&͸n/. p*B%K%T8\`ӛxɈ"JӡT{ٝLr ܇v..T3ENagz:ׅ$i|RM׎~C$e xέ@%`^a~ִh\lސW4f-mNV9dˍrة78zC("z1Y/izccB=i" l|]l 4Nj $wHCCr]@HdԤ2BQUT4IM/|ch{Kub[,4򄥲ZpoE"X~G صϟ0Ct[{^iӭ&M_Ѥ>tũAtl0,ޟq/񎗾Am4!48{MG܎+D;WݿEvd8zIJnG ) 1pz&H= `FZV%9h^]U@c8F9݄GF2}] "khD8_~zu/Z)3-h{(bWhܰZ6FW_VNB7A!d# M#mRBn<SG J~LXY9|Tn6D=vcSuiNڤ@yvm8 aLTHBJNnqiSs,=rBkq$s"R?VDK$S($vR빦OcԍjBwi2QtȁQc{f >ؙ>MXoh?Sr޷YI* {nOj9-*]c}O#F!tshT'$(f#ʷ3aKӶ3O| % oaEïV0 b38=[AIxA'_s&-t]liz-KL ]OFi@3WՓWPWnJ>I34ERm_mTxVADVSw `BL4&:9E!r;bTaBkkx*><֏Rb"'j*Q:ՠT`SDїCV l1IED0Փ>,f3;/'btܨ?Q_yYi2{\]@p]d{endstream endobj 692 0 obj << /Filter /FlateDecode /Length 6775 >> stream x][o]u~8pb1ր(ŤHt~Z3{>$  י˚5fO3{qs򏿷ޝɟNk\<.jwg/Ot1O>ӳ?w&M!d8<~sz0?{j&_\N ~VpvqTjvWa5kk1߰]jM180khyM.ũKS,.'g 1U[\:!Z`QCٽǪ<1SJ~K%L|*&u8 ~׀2YvIo@@tF(0UyePHHLB `Q8V հ7 dbBR2*sB|T+Pv^G9Fՙ[qT1,L*c ^,L(;B,.'cWu5-̆!sz%ɤq œ9mW'[FR *M ZD/ಐǃ)VpW#(WęnY3e ;0opE#pWYHHAbi%m uʔN*W 'K&oX$LiG>ON"9 8%L wHjhBذ;!esc< ~KV.){\:lUa*}@QPՓ qBmsYQtv  K 7Qxp#N +6=e' %U!$GQ>J[&AUO) QףE[/ɫQ6fW&qj( W-> T^8ҫO#i&d/83]M!Zv8 ?pIr(G^b:f~[sM lށvʹٴ@(;tR n/v/x{ ȋdq< B)-㡪tBc!.p\VKG0p.>,(wH75BW $3BHHU ):#ijGNHC1gE'uf0+@9tm[OWa2WN!JN^!Yez>9t:5<%`<]DƟQ!VV F(r51rxLͭ$b7qv`UfbD Oiջ%A9LN.axHy; uJ fufpg u`YM3v5Y?4#* Y~`]AUŒlV{ЭԦLc-׊#0%@ 'qB%;c;y@uc7/c/1wIi -RiL@p Qĩ_]Y. ^y}Qʪ-_Ur{DVtE%٪ŀjпf ߦ&`vGΎꞎh(Dtj+9䔳eM[AJj&!>9 9 Sȹ%]sK׭{VWd =Dz٤{L 4b)7N.OL@g/TcBsI4A=Q'> ctEl;},Y Kb!FS p&\BQqZ-G'H"}\+a\tqb A BREk2ŅL6tr+\"IoQ q^/}U3e04:*H[ ~52 R%xGWpnN#57P"5`]\mi"x'1-b"CK/"5 cS!9*Dn+&Jo} ]vN%OǬPJt'ۘ5ې1HrL 2rveF_C '\`R6]%a_ I1R$jD#Z@OK%5 w'NVI>~?@!<^2+/.zr *51J_0DΚy)3'B'(YVXh)59]Jt՗;K2 a&ȇ&ܼ(!$Uz=+4'6F8Ǹ-͒EXMj1o`EA\o[Lab+Ҍ)jf-x)ժy/$Iv\F.H (C5y7Eg|Fb+tLU& ڍUZ|}*\2<]  x'ZQbSyCU-]Z-]عhzT/@0k_ZTo9lgT:ˊ؃V6n]^OyLs0e-98$۸pRYOԹ:4_dd䳓^@{˪aOy;(P"X)KTn-vG޷TiTr0@j͓E^Ć}oο\tVϻbr3s^ܼ(56##-MJiV+~^>^hNJ[^,tx1W`U!&EW`yvE^3KuQb"ay}\NoD5t4,7,icO$o8_P,Gd)_Q)RۉTcl Io`v ȪvHsi>0jXNuN),ClMhyGbw}&?Zë е*?9w9!F?F; 2x<'mXO D^=YKf4~ Uk5}139 A_J8@0ԇ.wa1;@mwG|o.v.FJ*4P3(֟!] %1v3je7t eD_ڶi@ӜD$_1b|&8G&{̷u[˽֜ }BFc!3m}\ FT)y0੔4!|/XKdܣ6 Gg/2g3 oX3m}~{b8GõExhRܓLyWN۟fiu5/yOҲBdFaёn3mBKč=?C/Յ`j([ŽLnb"{?y8H=}y{ww{##C`ꆝNiMO'(/0/1qˇ 6&Ƨo$_ p>c{X/o -_m}]j=Y[w$lѽְyyrx/JXc0p>g#~u`߉wjz|ց&p_N@Ҽۄa4\o:.ͫy4Z\OUo毖}˥#y4p4_,VٜDdNOkkqbEw?OY6ae "47rz|\v=Ta>^I4^L %‘]F(ڕ0 еW&9NjYx^M;4R͢'䉗RhΣ1}JV2&zT>_Vѩ]2i$ʣ ʞc8 Gψ:1PY&zUqχe]-MrMbBZOi|8, S_h y|f8ЗP dË:E7NJ6w;0M^4peij 96&M MV7蠯ziK?->\ͅ?_cvݠK{P݃2}XAZzimmjr)G /ifi~јͻf8=@NB7L~3nCf~u:w =y47c$5C%|viX3X}@Obb߀|aA]nfzoa}GܿZǜ-Bw#t?g#ҿsKFhK3.Ͳ uK3/M0YanͅfazkZvinpWM K3xf>&f_326oE\m >)7Pk 4S\X-9䶥ۗK{zǁ*q|z|0R±kG%C%<qUX&M) `D¶t>b+NZJ"|Y-y{w֎V#|ٲD3%t)v@cC |'U_V0JAc˽NLΕɤ~qs$8~2$kvjd:d@pkؓ+h}GxroHץr>ju5ujUؘOu(4ʍ?ңjIC컓рendstream endobj 693 0 obj << /Filter /FlateDecode /Length 3032 >> stream x[YoN^?b`ю>+,98Z@,+"2I]>Lrd"0@U_=]Dr"JM.h=iV7Zd/? x1Lc'Abk("{md1mŴyI X*u:Wf7G6dkޛ#'3m\~Pl%Mn6ĄA50|Qnfdw-2^CcA 2MW%g@Xc6سzYE%ի -LsS׿I(aD PW0Ee.K JNKst:%9Df:LB(n^[sp \%Yb<(Qt|MيY/OA$"j6٩;Q#i͂XV>Ǭ*2z;i@]َlք6*(dD%J`& hrJYFHNT!Uq3|'"ON 4BIH ^bCkU `p( \}rgu=Bq״C= ٜf)O[L ZARQy\KׅUu=LQ@4&&eԵw9oU}p(Kjd*?CH5a=2 `}IyWu)F]SUOkx3-7ؤSpL`1SNRf'mȿͿ|| !Um5 $O_7H 7YfDt w'wʥ#'_?\r?gq8c%@]Q|eOp)''8eK\Gقu{pOx@ڸq;Hnp=NI D7!71.{O470am>/Zt&IEUiEx._Po u$ʅ hl y8{[*LǬe e%Ʉ,gMue9Gf*^!`G$~?-5&0̐$Z@j8 m&9 {Ls\P|d4ήAqp[lKFr[ t8H vCV`>jTV4"Չg:5e~nUpN!Ce2^- IRsz,$DRD +?O`JpH ,[L1bAߨǛB/Y =eM1(Av7ZA9"X?kmt,N1e,|#@e웋NcZ mJ۲4>˃L*m.I?:hwB WۗAE~gi]28ޥ >H܃.pxJ/@{Gtʸԁ2\ A)]} 1n%g1D-ntVEЗs*`|W_`u+LOVs99xc#,g#nxp?Q^%t7ƕGBFo=XTM V.)[mF7{p]NEannS|(sLonǝLCj8m y`]hr3]\A Ңֹ!7#1/h>:%ڮX&^At)>͗fCӻ ;pR[R5fa v݇~ ;Nv5/ {X#ʡK k!kY+5B)vdivx9[omz'D7YGy(6B*iS'0;o*1dl?"*VIWHy۲bq:;"y]|1rZb=j"eL|2kΎKN"ER "D/H~0tLKjh5bk)%x$޹.͛9Iny›u$ȑ[{U{qZ\bh-H)iʾG9T*VeKJ7_o>jǤ_ rsԺƛTN(g2_9yp.VFU_~A )\6j&41&%mF\:*hՔ*lfQC,&H]CWX~7f[u/E惮j%7/> stream xXMs7G8 g޲N*U[C(QKbO` A#h4_K3ڥqֹ0/6<%XLB%ey,>.7!j͎mBq6+ O+3zp Wގ?X ɦP8eaM]Lmg̗N:Vt<4wo_Rm=q?m6kZRNӨŹ3`.Y! -<"CWA62dP 0.UzU ?:Nzp\yN)8JD?q buKpXP` a xKg(Dm{ )&M{/+OuLR0Y11CjQQ9J;Vs߮1U܄5! x=9SS-.TJq3c}P&ʳv4ҿ:K^zkMg A:'\b>bDq%u'M)4(#lT(1ZP/슿+TUrnO q*i2I_[,oI i!|+Wg8m"bKF%Q:a5d%T_V䘽.J3=5&0F9[q|X`qM"6? d(zkD(cduJ hbr2V*jB%̙}|&GJ!jFu V$K>M+LwGRǣ&6q"U$ۉi{M *~C0F ɠpi/v$K3L<đԮϠXUD`ص&+r;k/!'_I\mPrZ/O ϊKS?n7-bzq5s`r4ծ T!ڎ?Dyy.hF?{׵~׷Mendstream endobj 695 0 obj << /Filter /FlateDecode /Length 160 >> stream x31ӳP0P02U06P06W05WH1230!U`hhT027`I'O.}O_T.pJ.}2.}g %hj,`cﭠ q)+jtQP[&6o_?5^ap`rTB-endstream endobj 696 0 obj << /Filter /FlateDecode /Length 159 >> stream x]O10 ‚XЪ@p' aKB,N>al#gpƲ= LeQ7-Uy!'8 dNP+ɩm:MWHAB3O*g fh*qC in X3 ASlendstream endobj 697 0 obj << /Filter /FlateDecode /Length 161 >> stream x]O10 *ХC 8QC_Bg|wY˕m_Xց$i,XX8+/pSj&lO缪:MWHAD3O*g fh*q\C in X3 E Sqendstream endobj 698 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 369 >> stream xcd`ab`dddsuH3a!׮nVY~'YGyyX}+X{\FƼ~ʢdMCKKs#KԢ<Ē 'G!8?93RA&J_\/1X/(NSG<$C!(8,5E-?D/17UL=0霟[PZZZW\ 221G߯jNxW=fN=yW?$~}QN ceʐ!6s~=>57oKsS7ǟ 6\7X*y8w1#endstream endobj 699 0 obj << /Filter /FlateDecode /Length 1906 >> stream xZQs7~k}rtw:СN <@l5&#M 젽 r0ӚK|0WFڌ/(/%9A+z"}k+EhG~\p>U/QPB߰ڤR8_V{:? OԓqW:D.GnatobJqgimA >[fkڲg+C:E/ez͂rǡ0 j҅H (AΈSrWD=OQ@JGfBK IfMkc#mF袸yc\w8&D[Yp\@ՊZeeEvwꅋH?ҹFC3SR\n7ªIgb^UTA/;-Z4#}tmc}p p#1+iCaoXYM]4>CH}a {a=bpB+]KQk.ii𘔥h{Ot$ҹׯXkAn80zK!nCG ~#@H.IeLϲ;ƂVk_פ{v&UCP/to widxaU4'0*,sW2_wM@NFyKg$<&esrZo9͖_G":%DtT8yL -qD^yG?-/6|.pհyL=:PEnYb> stream x[KoBFqF@ ':F "wiΒ`Tꭩѯ"i?TjrG#ϻ_IZrwqSҨO:j8ySy&j(vzZZ]a#͕:Ec4VYCE4cڇQ'vZ^,WT{ɶ"oWN8mޫ.>o좩:nꔍt5ϒ1ךKkT -e:LyX3 4=`jWrAv0 /g 5_yioBjYq73+Q .a)F6p8ة-~%HU#wz茉՟fst->Q/Dt+OX(f ;#qtGlCc/QFqŊ7v@GQDuh֖oP k= 4^WAWRݔ;*ׅތ<04oݙz$^ͻD.n][]6!"ƦflBPGH« ܲVx#= Z sݒU89pNARy>s{0)tndxhtD Vxc% [v'TJ6`~g1j$]$b"!Nrv q9mzގͻhic,%M'I)SvbbTTǺI-r8)QWvmIBI0Ӗ}[}%?xsvja.Ɋk HwTsPlH8|N|n fcμ}\h[REXj@7(>FE_bq"ᅴ6l:ؘkk D(蠪 & l$X#=Z_&K5b$%YN#(E MVQW{X>Zƒ5 E y'd>#5.x!jxg$:nr_怣qk=p;![-!BƥEغxhۨkmX\MڌP&B=ktrb'=x6>20iݛYM )Cg@ NF CGKg-]B!hwv[9ko -@kՌ}KgFyڠH(R#ZQJN~X̙<"ֹהwI~ amvZ`,S~cݹ^/w6@QH'=Ei!P'+%?:i!1A\ r.KPtuJ}2 C,U }<[~1.ZEXKP6'I ѕ אmRqhK"~DY2f G8S3)l]G_ z4=4SupڣaFk˨RVz˨ ehaЇhކdy#0 Pu#%J$89|];R*1Eo~Ü]huvp:.v!i[:x/ ٔ(ia'0/ѥ#Ǻyw`ܬ>97yZΣt-7RΖ^K9 Oi}hBIפ !x"}c1h.Ù_қx&\\w[OI9@Q;w48ݍ# ew`Ï.tO 7H{gQ2akC?].P[W[+ ' CZx"N#V5F\-|*j׃[oP5}$Fnnl 5e>ucH v"yCar']X_'%;_t> stream xZnG+Ça"Nz_8@'@ A lhQ-2U3|Ht ԪXr,rk|$ƇO#Y{\[_J(Fd9rϣrY3}(P ӌb&[N"W1z"rljFg4]c{-ʎ uFf゗A[87r1ʭIb%>F:ec6ߛ(۠L\ D;/!l4ٳM,1DQ!JBJ&l܄SZZNvKvBP)F%u!I&Uh&UI3 LV GlK ^l>)+eB8Iy$iU6䑓ONF16U$3Ip* 2Q&U,'F-H@Z f6ljp|$`K8qR^f?uZpir ԓ)NP迧tJg4!Aj/{t!S:v1.BdOI{WOik .d ߐҮ ^#iw !2RZ*{S+-pd+F?7i9de"M;2a. *X6L2paW@g42 +$A8eVڇ_bs((4VM(eh}tK7Y1S\R W(1*K,-02|Y)[C8x0y pYqf1s{@jO@E(0F!To[Jm.Щ3(ɜ$sHI# ' ŀW+>kdxjcNTT<ߪ5tFlPC !/ dҞCfkHeZ?ی0-ziY='/A.q 9nNk'S߆ fbӪݢ7&`w7|ѫf 53w %. 1/N&RƍaQAՈm2ʮ*R:8΍u̹QjN:k] * Q _Ū(mJٺ+٪#_Rji.(&ݼQFUv,hMI"t늵Ot6Y4z5EŨ:(J#+mEI](fOkԆoKA8Xjd s2f8M8|Q8#cynᨁ觐Eܳm)%8 X<|5EN .hشgswpM6^O̘XCƪ2M;Bx0#q:qv`Rnc/ 2G 0O܉SsUmenoX1ƍܶpjF#d(hPPG@ש 7a }Fe>|Q7u-lf n/<* ֤^V@L'- 7 B;}2t>R%)n[EZ+mӠiI^⯑ǡqH*hßi.cendstream endobj 702 0 obj << /Filter /FlateDecode /Length 2727 >> stream x[Ko$?0zMGήB@x$*4$dwWfFB"YUEvhDI&b<~-.'_%ka"A,'Ҩmvrtypu*Zi)vδ֭ڇz:Sut:xۜdjӰ!fNX|3Z,ڪ R6G](}r0lc^5m4&fM3JN(!z"t:e#Ĭb %A3 Af~+_fbUV7Z ? Opl. [C:KSHжY'DMR7+~wM>XAcH2͌mp.71:c"b ?"yI/Dϐ7, Brt '3[k*.Kv=9pPlVyԆKs$!q2p}zV;%[ŶJIC2"X^XطYwˊG$I[-;ͲI2$? ysps$ϑK%O#ZzYǐ{-XC>-m{;Lj;"{pKav}8,gr@t$&f$tcrDK$/97cg͈I! +5@%M":oAQ|7 6Ž !>|Kћ2$! s6NڛL[ _|!O"6.8d}* gKt0!I'H,vbIεrҪkħ\νwl_X k֧Hui7HEr2>H?KoMCyExR_bu5$I$/${#BPs1X"%bVm2eR H(dx#bbM%km1d3\?ҼG߰)'6+\\ɞS_f#x_LӁ|qTRяuE3DX?/;vW,]lx8K#X?$ݒG5bgW ɓ?ѸC+0IemW7 jӍһfel9k$O\!I {G$?L2jH(1!β$'B)TQoI{)p6(SLH(NZm7õ!-dk]!LȎqBKҲ%#JAzxRӋa2Kcd}'yhb*h[s1:rΑf3E>&벳F(hZ `3pT-=Y ә!4_-@%u!}_BF_` Yp)˝Iľެ] _e@iۨ}i9RB˜h;gc襋7b3QTšp*IٶZ u`yHFRW&$WQ-1Ϭ~h@:?dn2%sL~d.}'n RO$:Ul뾱A]SYlN zzf^`hyOa\gt` 묙6HӠ*+6-ieq Vz6)<hS0RMUÌh}ү^ ͧ#CJH7YL .' g(װ@.KUZ;˻n&u G z8 Qg 㴓A+e(!nX&X[j1RsRTteQ9ȥ| P%qz\:LuYMyYVuފ*1[\@ہyյCX~ʧ4h')R: udAT jynBI~2 dh7&6ÃH̓?gA2ۂ]FǸ\NyOb"q1 PY}ʥYmi%_;C*r/e{8OL=3/ if ?o$fendstream endobj 703 0 obj << /Filter /FlateDecode /Length 2574 >> stream xZYs~ׯ`\a"Np$Uc:eH|ߧLIv?AFo觉Do~JMNbrrӁ̿4-ד?3:('ヲYNxd>Q WamDb U멨uP.VS-k)6 EX>tk˩u^UCD,1h˄Cw?e1,ë 锍b9UAZ C#z:kuu>h-ڨ + '+|/ViYkMf' E1 DI&A&4r`h& TE+сY/6FaY ۃS%|M58L:8Wv-A"1@A>C:n-,+3)m-T:@Ru_6a@xldA*LC؉K*d+68RJ?ȶ>GBHpDq+ 2y\]O,5> 5*c+/.d6Y]-.˲6H}H.EM:!#]$$9T .RzxAnDTYS*'آaGB ;Mz5d˷*.̆7 ^:׆I/4e:-wW~Ui$$bp)Z6β y⿟ά5h^#IƶK@VLBo2!d, ~D p^#" cde9E(>eW-wNٴrBoX}z[mKJ$5P,'C$] YOGxGe^V Rbyʠڢ2422(L4{ʠRjjeI .)b߸^ep"JE"1)'n4`hť= -U2sfD%EFԚ\R<߱:Dŵ;vPN`i(f=оcm7SVEq͞R1%IBⷸ:}sO- Wc% kR*H#EXͼU'C鏒ƶAs Ud a-mbS:=ye4^ %{ *;%o RCHS~kOH0!P#|†=EHV-wL˺]ϖ y3&Bh~?:IU}Lt*-;-$ǤA.-3^'=uܞwB`R bh#DCA~o/BP0ͱ7_6f;^}/;Edo7r_p<~n~@Dx0#%—, `(EsIh uMO9?7·AV>Ŭ| v)~zu/$ ] (Ǡ> {Wƨ9EԞ7#!ɐ;f z(\y o2+kH}&~S.}qa&9:~n 27%g74'bq >WV0!sLA/p2JvԽI"[/xa4n%ˇI ХԻGb{J!ɀ,it &I:Z2ϬthdVgB:DL"I.ky PYiYZ f_[]4S2Z-vjMfŶ لWeXu57'L*zZӋ;5|cD2d'Ḓv}$euO:?/Y!,35},#]OudN?d z83Yhnjyp8grxκm;b<ޱ%\]ގ$.XH>`&5m0É0تNtpmOonmљcW\k:Gd~] 8hIf 26vgm6<@đwe0.z,{KR\?z{_ib~7/endstream endobj 704 0 obj << /Filter /FlateDecode /Length 2585 >> stream xn"(ecn~JnR(B$EɎ{;g#Tֆg6s|¿w~9w\b5bǟr,lj<F0s%j?Ymk/xv9:1apFiWLWՓNVUד0W)$jw^WsX5LKc`p}pq^v=yaVTumƫ ; WsW#|6 3&NIY5%ڌ'‚FU爻e8@`We6~kx?,#'q3zz|X^;e{湮l2Vͤg1fp˪*GF7GTdv2~MԵ3&,&s u4S^ YPV;1] [cګ$mR=J yOG"׈tTεǘ%X^1mŋ6> =Kɫ$*hÖ`Q; D)Ă"^qp.G~,<(Kn{y8 0~~輸}I7*q!hmv7&V@\`z !D(U+6RȪ  O8)eޡ&2\G馝xS!( WRX%Jh7 P iDT^5f196 \ƌ0*kSVa11+J e]` aCtI唪~.9+8(&8 n<{=. :j!k* eY p,`[ 8%)UW<#Iܐ<Ĉ^𴀗l "rqɔ3e=[U Bf+Hx> TƴRDka4H XP$BR]LPI:gbA`+tCCخ yɅRxMkrtz5i獐I|ߊ:e(谷Kf~0nOB6W 4LŁjwBC ,72*S+VZC_kM)-ob0J 1v)ju\`*M T1\.Q oA/(@ - rdg;IҊDmvO.N*N5+\𸀨 @E zJR]/zV_vػO撱Vi]/H"ސrFF+ Cwd4'An׏%QpA6I W2(rȸ;aİ?| y;v{IOCAt4lJa9r/it\aYdAڇ ^IHRٜDvQ,Nx|̿"!/I Ɗd$"rE1ZrCԐ7tPupY^-`CO  WZ(Ɛ9sKOa#1U:}MC *}iS|DcxL*?g)~d+:Z'O,~ČLc(Zm%@A_7\~* ڿ[g- xN(r;ksIݨ뾀 4, 37LWiwmΆBa,RAH:ٯH/R,eCg͔΢.@_~!EEn!EoPDي~Eϰh/wŘB#SN׿a,j;πo*P>aRc_ /,K$t?V 3LYzgTqަehߩ 4«W-xӦa71KiƼTUX-(` FYq:gRwPqL>rJFR 8/yz*krzv}Ѓ晟<(N;#U9OQGۚig.N&x Ȥ=9c 0ns7qyZO!@Bb=s8C >8NVWN2P{7ݷWZIaҽ*Dq'Նt}1: yMxkNb~YȂ]]G=CDRJ\}KT@ߊVOQÅ) l޴$2thꡧIt[{Y@פӷDbcX8o Pw&#rq0&l?fRt\o#lbkr5Lg=$A:G$zpwIw'9hC>J6N$endstream endobj 705 0 obj << /Filter /FlateDecode /Length 2518 >> stream x\KsWLpz?6G`:{0P8IJ wgUuOfrjVTeg}]d귉hD?ߋ??JMh=y8uxxDdzL '&j;ULgiս5x1;{QOkѨCpr*՛FW˄"<ޯ-Z4!zU uFV$KǠ.1:gjXW5Y[cS6VTe@ k%4vt!l4Տ_c6jaJ %A!yʋpJXk կY&bNʃN*1z`0ga|%сeY ۃfM3ZZhpZ&8Wf0AlVE8Gx %vSvŸ2!ĪFx_V ]Ix>a v#P3m:F^޲}i'g.dto^,#鐍XFOkڌFd02Sَgy3&V+vas1OXOX>x χkaPlRۂwWf["|Ge!K`zԺ]C%b(2= rz99YE!\ 9[H}H:l'{ۤxnyk]\$c:F35D4:-Ixc$k5M:I ^ebYAeҿ+./'gy3xzPM[,䢮WlI9,T6ɘQ˝ZTuIa6:ťeu|n,Y^7N82EŒ u,z)DSGЙ8dc2*"d믦ub_ 8"2*|hǿ/_ RMfZI&-ǰGEO%?ؠ+I6[-eK7E{ɑ c~z^t9L@I)HBb3C\NG:y6lۊO$Y͊xH $GKz.W+ÜPnh/BͲǪc c[ GvX7b16GiJ4EsMwn Лjr' `ṕ9p8sQv$qz;N\P8K1D;ro E|͎Ń1";No [8l;Ʒ0~L6#ӮKhS RZѨRMd^x5K/.:MLeĴ2J~mb6 w3k-mo)=(qѥ׋Qs)nZc#Kqu_lUtTĒ(E^ /V~8𠨻VۜU#AĦTt]RK9N$ kXҴ弰 u>A\.%,LkR?u~<>6>Į≖& i]21zp3SʚdR"d<}]?| Nu=ZO2QQԝTʓ}D)Hjb=g$-i.:?b^f’ [`@xJD.Wį] ]Hk^J+5ErmnVw*w\jN|Jzdv~QfOq{+UfYT.(O[a1o8Vo r;+⤂)]k~uƚ5kِӈ\NX BXoJȼ|]jS=řW#.) zc9BK.рP zxy_lͤߛ7&U︟ t02}7W!!|~P?;.Xi$/$&">DҖ_V@bll<DŽ%+섕@ֶw{gyG]ur4m 3z^&^ 5_"$4ZNX $x2uۜc6{fXa 䮆y> 8@!a|[ ;`N$CD4ƻH%߱S"6pw%?nƍ>u9$1 Bґlg&endstream endobj 706 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 371 >> stream xcd`ab`ddt v04qH3a!S,,CŎ|<<,k?'={3#caes~AeQfzFFcnjQfrbobIFjnb ZRaQRR`_^^[_nPYZZTWsᜟ[PZZZ`ȒgP./}"zj֫8G펓–^ߐ&{ r6NN-~oY<{a1sW1U{w1 b=jC.7{BI_r}b1]focoendstream endobj 707 0 obj << /Filter /FlateDecode /Length 161 >> stream x]O10 BVUAD! }I.GOJ`H_#L4;ua:XA6ٝC}U!􆖠DWU}gm/͟t&{8۶/h?%Λkĩ4-Mr{&SA|IYSendstream endobj 708 0 obj << /Filter /FlateDecode /Length 2077 >> stream xYKoG!=E= %ǂAîYowFקgf=&(#E\WUs'Jl&ٻ=';$6ɤ׳Yͣd|q2A7p3ҷرX^@:Nٚ}oZ;|÷ecdSvdRJteR4΋-1%¯t,XVx z%-[+k4`Z)]fs`iC2bE!|uHV2*Oj^pN:)m,`At4zLiL+m@VɃ/.;òQ*#_һnuOhk]!34.?066z_vFW$0.+Tlh1lsQ %EqefH:z.zeA|0[|BG-[B<_NƔbKR2׳RvVz>,%ZZ:DWF0SYiD4)!  .t>fǒVt9bQj2IW|zꢃ[>:B+$e/!sSAHyTy +2nsY-NOhxDY)P9gfCE[!r\\^*{;/tmЁKɀ0g`՟*cq;|ʠ]V)TW[v<Pߐe>G/(=HNE%ΙuY.- GhɄޘ$~d,`@+.G9w@~K#U3=7O4%ҰW{3 2e!+?&qCӿړ,IܐxLbK' 9ڹwWIfdVcPA@kީ=5!R#_Jd")|kRb^΃ G\{8v[jWcA.`ݟ8n c"p?c4\g /M1z*x$sa$@ C|J,SrbxZBgv@F@3A%:G)M{8> r7htλJb<1,9[8Z+ ";!-؇с m_M.`-54qب Tqe`D&M?T`\vo(xg!PGη)Xߣ~hi_U<^6lH/NX3-4yc.A4(^)$'Pi9W# V0~tldZFޘ'E4źc'&hfR|O#IO4,Cm*Kh>%%C{P/9|8 ?I3^aʮSw@DHcuRG.mDŽ9_6P$ =x>̌ZoQ&{+jz/XI߀! l&R㬕=g_7>Vےlf5]?\3e\Hd0]LdZQendstream endobj 709 0 obj << /Type /XRef /Length 461 /Filter /FlateDecode /DecodeParms << /Columns 5 /Predictor 12 >> /W [ 1 3 1 ] /Info 3 0 R /Root 2 0 R /Size 710 /ID [<6aaa2a2ac12e827b36b4db2bc7b7bb7e>] >> stream xOHagvٝm̸.nAx X$X D.":"B.%x[BQQL\me:=xf}_Ķ,": C`*yt}22 3]}պq+0su\m62 3W*Wɕa\=bre?~ޯ˫%1yS/G&rX\ic=vI6+vD#L,DE@'~Jx#x:i1u+:w䖘ԍoӪ8ᄏ߉nI\ӇqUE.]0NybNr|r{;L5. =SE1K qRt=+˦h{[ n)s1o(L%ov vmo3=ßY%f^'_= endstream endobj startxref 454076 %%EOF markovchain/inst/doc/an_introduction_to_markovchain_package.Rmd0000644000176200001440000032041513762012754024714 0ustar liggesusers--- title: plain: "The markovchain Package: A Package for Easily Handling Discrete Markov Chains in R" formatted: "The \\pkg{markovchain} Package: A Package for Easily Handling Discrete Markov Chains in \\proglang{R}" short: "\\pkg{markovchain} package: discrete Markov chains in \\proglang{R}" pagetitle: "The \\pkg{markovchain} Package: A Package for Easily Handling Discrete Markov Chains in \\proglang{R}" author: - name: "Giorgio Alfredo Spedicato" affiliation: Ph.D C.Stat FCAS, FSA, CSPA Unipol Group address: > Via Firenze 11 Paderno Dugnano 20037 Italy email: \email{spedygiorgio@gmail.com} url: www.statisticaladvisor.com - name: "Tae Seung Kang" affiliation: Ph.D student, Computer \& Information Science \& Engineering address: > University of Florida Gainesville, FL, USA email: \email{tskang3@gmail.com} - name: "Sai Bhargav Yalamanchi" affiliation: B-Tech student, Electrical Engineering address: > Indian Institute of Technology, Bombay Mumbai - 400 076, India email: \email{bhargavcoolboy@gmail.com} - name: "Deepak Yadav" affiliation: B-Tech student, Computer Science and Engineering address: > Indian Institute of Technology, Varanasi Uttar Pradesh - 221 005, India email: \email{deepakyadav.iitbhu@gmail.com} - name: "Ignacio Cordón" affiliation: Software Engineer address: > Madrid (Madrid), Spain email: \email{nacho.cordon.castillo@gmail.com} preamble: > \author{\small{Giorgio Alfredo Spedicato, Tae Seung Kang, Sai Bhargav Yalamanchi, Deepak Yadav, Ignacio Cordón}} \Plainauthor{G.A. Spedicato, T.S. Kang, S.B. Yalamanchi, D. Yadav, I. Cordón} \usepackage{graphicx} \usepackage{amsmath} \usepackage{longtable} \usepackage{booktabs} \setkeys{Gin}{width=0.8\textwidth} \usepackage{amsfonts} abstract: | The \pkg{markovchain} package aims to fill a gap within the \proglang{R} framework providing S4 classes and methods for easily handling discrete time Markov chains, homogeneous and simple inhomogeneous ones as well as continuous time Markov chains. The S4 classes for handling and analysing discrete and continuous time Markov chains are presented, as well as functions and method for performing probabilistic and statistical analysis. Finally, some examples in which the package's functions are applied to Economics, Finance and Natural Sciences topics are shown. output: if (rmarkdown::pandoc_version() < 2.2) function(...) { rmarkdown::pdf_document(template = "./template.tex", ...) } else function(...) { bookdown::pdf_book(base_format = rticles::jss_article, ...) } vignette: > %\VignetteIndexEntry{An introduction to markovchain package} %\VignetteEngine{knitr::rmarkdown} %VignetteEncoding{UTF-8} keywords: plain: [discrete time Markov chains, continuous time Markov chains, transition matrices, communicating classes, periodicity, first passage time, stationary distributions] formatted: [discrete time Markov chains, continuous time Markov chains, transition matrices, communicating classes, periodicity, first passage time, stationary distributions] documentclass: jss classoption: nojss bibliography: markovchainBiblio.bib pkgdown: as_is: true extension: pdf --- ```{r global_options, include=FALSE} knitr::opts_chunk$set(fig.width=8.5, fig.height=6, out.width = "70%") set.seed(123) library(knitr) hook_output <- knit_hooks$get("output") knit_hooks$set(output = function(x, options) { lines <- options$output.lines if (is.null(lines)) { return(hook_output(x, options)) # pass to default hook } x <- unlist(strsplit(x, "\n")) more <- "..." if (length(lines)==1) { # first n lines if (length(x) > lines) { # truncate the output, but add .... x <- c(head(x, lines), more) } } else { x <- c(more, x[lines], more) } # paste these lines together x <- paste(c(x, ""), collapse = "\n") hook_output(x, options) }) ``` # Introduction Markov chains represent a class of stochastic processes of great interest for the wide spectrum of practical applications. In particular, discrete time Markov chains (DTMC) permit to model the transition probabilities between discrete states by the aid of matrices.Various \proglang{R} packages deal with models that are based on Markov chains: * \pkg{msm} [@msmR] handles Multi-State Models for panel data. * \pkg{mcmcR} [@mcmcR] implements Monte Carlo Markov Chain approach. * \pkg{hmm} [@hmmR] fits hidden Markov models with covariates. * \pkg{mstate} fits `Multi-State Models based on Markov chains for survival analysis [@mstateR]. Nevertheless, the \proglang{R} statistical environment [@rSoftware] seems to lack a simple package that coherently defines S4 classes for discrete Markov chains and allows to perform probabilistic analysis, statistical inference and applications. For the sake of completeness, \pkg{markovchain} is the second package specifically dedicated to DTMC analysis, being \pkg{DTMCPack} [@DTMCPackR] the first one. Notwithstanding, \pkg{markovchain} package [@pkg:markovchain] aims to offer more flexibility in handling DTMC than other existing solutions, providing S4 classes for both homogeneous and non-homogeneous Markov chains as well as methods suited to perform statistical and probabilistic analysis. The \pkg{markovchain} package depends on the following \proglang{R} packages: \pkg{expm} [@expmR] to perform efficient matrices powers; \pkg{igraph} [@pkg:igraph] to perform pretty plotting of `markovchain` objects and \pkg{matlab} [@pkg:matlab], that contains functions for matrix management and calculations that emulate those within \proglang{MATLAB} environment. Moreover, other scientific softwares provide functions specifically designed to analyze DTMC, as \proglang{Mathematica} 9 [@mathematica9]. The paper is structured as follows: Section \@ref(sec:mathematics) briefly reviews mathematics and definitions regarding DTMC, Section \@ref(sec:structure) discusses how to handle and manage Markov chain objects within the package, Section \@ref(sec:probability) and Section \@ref(sec:statistics) show how to perform probabilistic and statistical modelling, while Section \@ref(sec:applications) presents some applied examples from various fields analyzed by means of the \pkg{markovchain} package. # Review of core mathematical concepts {#sec:mathematics} ## General Definitions A DTMC is a sequence of random variables $X_{1},\: X_{2}\: ,\ldots,\:X_{n},\ldots$ characterized by the Markov property (also known as memoryless property, see Equation \ref{eq:markovProp}). The Markov property states that the distribution of the forthcoming state $X_{n+1}$ depends only on the current state $X_{n}$ and doesn't depend on the previous ones $X_{n-1},\: X_{n-2},\ldots,\: X_{1}$. \begin{equation} Pr\left(X_{n+1}=x_{n+1}\left|X_{1}=x_{1},X_{2}=x_{2,}...,X_{n}=x_{n}\right.\right)=Pr\left(X_{n+1}=x_{n+1}\left|X_{n}=x_{n}\right.\right). \label{eq:markovProp} \end{equation} The set of possible states $S=\left\{ s_{1},s_{2},...,s_{r}\right\}$ of $X_{n}$ can be finite or countable and it is named the state space of the chain. The chain moves from one state to another (this change is named either 'transition' or 'step') and the probability $p_{ij}$ to move from state $s_{i}$ to state $s_{j}$ in one step is named transition probability: \begin{equation} p_{ij}=Pr\left(X_{1}=s_{j}\left|X_{0}=s_{i}\right.\right). \label{eq:trProp} \end{equation} The probability of moving from state $i$ to $j$ in $n$ steps is denoted by $p_{ij}^{(n)}=Pr\left(X_{n}=s_{j}\left|X_{0}=s_{i}\right.\right)$. A DTMC is called time-homogeneous if the property shown in Equation \ref{eq:mcHom} holds. Time homogeneity implies no change in the underlying transition probabilities as time goes on. \begin{equation} Pr\left(X_{n+1}=s_{j}\left|X_{n}=s_{i}\right.\right)=Pr\left(X_{n}=s_{j}\left|X_{n-1}=s_{i}\right.\right). \label{eq:mcHom} \end{equation} If the Markov chain is time-homogeneous, then $p_{ij}=Pr\left(X_{k+1}=s_{j}\left|X_{k}=s_{i}\right.\right)$ and \newline $p_{ij}^{(n)}=Pr\left(X_{n+k}=s_{j}\left|X_{k}=s_{i}\right.\right)$, where $k>0$. The probability distribution of transitions from one state to another can be represented into a transition matrix $P=(p_{ij})_{i,j}$, where each element of position $(i,j)$ represents the transition probability $p_{ij}$. E.g., if $r=3$ the transition matrix $P$ is shown in Equation \ref{eq:trPropEx} \begin{equation} P=\left[\begin{array}{ccc} p_{11} & p_{12} & p_{13}\\ p_{21} & p_{22} & p_{23}\\ p_{31} & p_{32} & p_{33} \end{array}\right]. \label{eq:trPropEx} \end{equation} The distribution over the states can be written in the form of a stochastic row vector $x$ (the term stochastic means that $\sum_{i}x_{i}=1, x_{i} \geq 0$): e.g., if the current state of $x$ is $s_{2}$, $x=\left(0\:1\:0\right)$. As a consequence, the relation between $x^{(1)}$ and $x^{(0)}$ is $x^{(1)}=x^{(0)}P$ and, recursively, we get $x^{(2)}=x^{(0)}P^{2}$ and $x^{(n)}=x^{(0)}P^{n},\, n>0$. DTMC are explained in most theory books on stochastic processes, see \cite{bremaud1999discrete} and \cite{dobrow2016introduction} for example. Valuable references online available are: \cite{konstantopoulos2009markov}, \cite{probBook} and \cite{bardPpt}. ## Properties and classification of states {#sec:properties} A state $s_{j}$ is said accessible from state $s_{i}$ (written $s_{i}\rightarrow s_{j}$) if a system starting in state $s_{i}$ has a positive probability to reach the state $s_{j}$ at a certain point, i.e., $\exists n>0:\: p_{ij}^{n}>0$. If both $s_{i}\rightarrow s_{j}$ and $s_{j}\rightarrow s_{i}$, then $s_{i}$ and $s_{j}$ are said to communicate. A communicating class is defined to be a set of states that communicate. A DTMC can be composed by one or more communicating classes. If the DTMC is composed by only one communicating class (i.e., if all states in the chain communicate), then it is said irreducible. A communicating class is said to be closed if no states outside of the class can be reached from any state inside it. If $p_{ii}=1$, $s_{i}$ is defined as absorbing state: an absorbing state corresponds to a closed communicating class composed by one state only. The canonical form of a DTMC transition matrix is a matrix having a block form, where the closed communicating classes are shown at the beginning of the diagonal matrix. A state $s_{i}$ has period $k_{i}$ if any return to state $s_{i}$ must occur in multiplies of $k_{i}$ steps, that is $k_{i}=gcd\left\{ n:Pr\left(X_{n}=s_{i}\left|X_{0}=s_{i}\right.\right)>0\right\}$, where $gcd$ is the greatest common divisor. If $k_{i}=1$ the state $s_{i}$ is said to be aperiodic, else if $k_{i}>1$ the state $s_{i}$ is periodic with period $k_{i}$. Loosely speaking, $s_{i}$ is periodic if it can only return to itself after a fixed number of transitions $k_{i}>1$ (or multiple of $k_{i}$), else it is aperiodic. If states $s_{i}$ and $s_{j}$ belong to the same communicating class, then they have the same period $k_{i}$. As a consequence, each of the states of an irreducible DTMC share the same periodicity. This periodicity is also considered the DTMC periodicity. It is possible to classify states according to their periodicity. Let $T^{x\rightarrow x}$ is the number of periods to go back to state $x$ knowing that the chain starts in $x$. * A state $x$ is recurrent if $P(T^{x\rightarrow x}<+\infty)=1$ (equivalently $P(T^{x\rightarrow x}=+\infty)=0$). In addition: 1. A state $x$ is null recurrent if in addition $E(T^{x\rightarrow x})=+\infty$. 2. A state $x$ is positive recurrent if in addition $E(T^{x\rightarrow x})<+\infty$. 3. A state $x$ is absorbing if in addition $P(T^{x\rightarrow x}=1)=1$. * A state $x$ is transient if $P(T^{x\rightarrow x}<+\infty)<1$ (equivalently $P(T^{x\rightarrow x}=+\infty)>0$). It is possible to analyze the timing to reach a certain state. The first passage time (or hitting time) from state $s_{i}$ to state $s_{j}$ is the number $T_{ij}$ of steps taken by the chain until it arrives for the first time to state $s_{j}$, given that $X_{0} = s_{i}$. The probability distribution of $T_{ij}$ is defined by Equation \ref{eq:fpt1} \begin{equation} {h_{ij}}^{\left( n \right)} = Pr\left( {T_{ij} = n} \right) = Pr\left( X_n = s_j,X_{n - 1} \ne s_{j}, \ldots ,X_1 \ne s_j |X_0 = s_i \right) \label{eq:fpt1} \end{equation} and can be found recursively using Equation \ref{eq:ftp2}, given that ${h_{ij}}^{\left( n \right)} = p_{ij}$. \begin{equation} {h_{ij}}^{\left( n \right)} = \sum\limits_{k \in S - \left\{ s_{j} \right\}}^{} {{p_{ik}}{h_{kj}}^{\left( {n - 1} \right)}}. \label{eq:ftp2} \end{equation} A commonly used quantity related to $h$ is its average value, i.e. the \emph{mean first passage time} (also expected hitting time), namely $\bar h_{ij}= \sum_{n=1\dots\infty} n \,h_{ij}^{(n)}$. If in the definition of the first passage time we let $s_{i}=s_{j}$, we obtain the first recurrence time $T_{i}=\inf \{ n\geq1:X_{n}=s_{i}|X_{0}=s_{i} \}$. We could also ask ourselves which is the *mean recurrence time*, an average of the mean first recurrence times: \[ r_i = \sum_{k = 1}^{\infty} k \cdot P(T_i = k) \] Revisiting the definition of recurrence and transience: a state $s_{i}$ is said to be recurrent if it is visited infinitely often, i.e., $Pr(T_{i}<+\infty|X_{0}=s_{i})=1$. On the opposite, $s_{i}$ is called transient if there is a positive probability that the chain will never return to $s_{i}$, i.e., $Pr(T_{i}=+\infty|X_{0}=s_{i})>0$. Given a time homogeneous Markov chain with transition matrix \emph{P}, a stationary distribution \emph{z} is a stochastic row vector such that $z=z\cdot P$, where $0\leq z_{j}\leq 1 \: \forall j$ and $\sum_{j}z_{j}=1$. If a DTMC $\{X_{n}\}$ is irreducible and aperiodic, then it has a limit distribution and this distribution is stationary. As a consequence, if $P$ is the $k\times k$ transition matrix of the chain and $z=\left(z_{1},...,z_{k}\right)$ is the unique eigenvector of $P$ such that $\sum_{i=1}^{k}z_{i}=1$, then we get \begin{equation} \underset{n\rightarrow\infty}{lim}P^{n}=Z, \label{eq:limMc} \end{equation} where $Z$ is the matrix having all rows equal to $z$. The stationary distribution of $\{X_{n}\}$ is represented by $z$. A matrix $A$ is called primitive if all of its entries are strictly positive, and we write it $A > 0$. If the transition matrix $P$ for a DTMC has some primitive power, i.e. it exists $m > 0: P^m > 0$, then the DTMC is said to be regular. In fact being regular is equivalent to being irreducible and aperiodic. All regular DTMCs are irreducible. The counterpart is not true. Given two absorbing states $s_A$ (source) and $s_B$ (sink), the \emph{committor probability} $q_j^{(AB)}$ is the probability that a process starting in state $s_i$ is absorbed in state $s_B$ (rather than $s_A$) [@noe_constructing_2009]. It can be computed via \begin{equation} q_j^{(AB)} = \sum_{k \ni {A, B}} P_{jk}q_k^{(AB)} \quad \mbox{with} \quad q_A^{(AB)} = 0 \quad \mbox{and} \quad q_B^{(AB)} = 1 \end{equation} Note we can also define the hitting probability from $i$ to $j$ as the probability of ever reaching the state $j$ if our initial state is $i$: \begin{equation} h_{i,j} = Pr(T_{ij} < \infty) = \sum_{n = 0}^{\infty} h_{ij}^{(n)} \label{eq:hitting-probs} \end{equation} In a DTMC with finite set of states, we know that a transient state communicates at least with one recurrent state. If the chain starts in a transient element, once it hits a recurrent state, it is going to be caught in its recurrent state, and we cannot expect it would go back to the initial state. Given a transient state $i$ we can define the *absorption probability* to the recurrent state $j$ as the probability that the first recurrent state that the Markov chain visits (and therefore gets absorbed by its recurrent class) is $j$, $f^{*}_ij$. We can also define the *mean absorption time* as the mean number of steps the transient state $i$ would take until it hits any recurrent state, $b_i$. ## A short example Consider the following numerical example. Suppose we have a DTMC with a set of 3 possible states $S=\{s_{1}, s_{2}, s_{3}\}$. Let the transition matrix be: \begin{equation} P=\left[\begin{array}{ccc} 0.5 & 0.2 & 0.3\\ 0.15 & 0.45 & 0.4\\ 0.25 & 0.35 & 0.4 \end{array}\right]. \label{eq:trPropExEx1} \end{equation} In $P$, $p_{11}=0.5$ is the probability that $X_{1}=s_{1}$ given that we observed $X_{0}=s_{1}$ is 0.5, and so on.It is easy to see that the chain is irreducible since all the states communicate (it is made by one communicating class only). Suppose that the current state of the chain is $X_{0}=s_{2}$, i.e., $x^{(0)}=(0\:1\:0)$, then the probability distribution of states after 1 and 2 steps can be computed as shown in Equations \@ref(eq:trPropExEx2) and \@ref(eq:trPropExEx3). \begin{equation} x^{(1)}=\left(0\:1\:0\right)\left[\begin{array}{ccc} 0.5 & 0.2 & 0.3\\ 0.15 & 0.45 & 0.4\\ 0.25 & 0.35 & 0.4 \end{array}\right]=\left(0.15\:0.45\:0.4\right). \label{eq:trPropExEx2} \end{equation} \begin{equation} x^{(n)}=x^{(n-1)}P \to \left(0.15\:0.45\:0.4\right)\left[\begin{array}{ccc} 0.5 & 0.2 & 0.3\\ 0.15 & 0.45 & 0.4\\ 0.25 & 0.35 & 0.4 \end{array}\right]=\left(0.2425\:0.3725\:0.385\right). \label{eq:trPropExEx3} \end{equation} If we were interested in the probability of being in the state $s_{3}$ in the second step, then $Pr\left(X_{2}=s_{3}\left|X_{0}=s_{2}\right.\right)=0.385$. \newpage # The structure of the package {#sec:structure} ## Creating markovchain objects The package is loaded within the \proglang{R} command line as follows: ```{r, load, results='hide', message=FALSE} library("markovchain") ``` ```{r, load-aux, echo=FALSE, results='hide'} require("matlab") ``` The `markovchain` and `markovchainList` S4 classes [@chambers] are defined within the \pkg{markovchain} package as displayed: ```{r, showClass, echo=FALSE} showClass("markovchain") showClass("markovchainList") ``` The first class has been designed to handle homogeneous Markov chain processes, while the latter (which is itself a list of `markovchain` objects) has been designed to handle non-homogeneous Markov chains processes. Any element of `markovchain` class is comprised by following slots: 1. `states`: a character vector, listing the states for which transition probabilities are defined. 2. `byrow`: a logical element, indicating whether transition probabilities are shown by row or by column. 3. `transitionMatrix`: the probabilities of the transition matrix. 4. `name`: optional character element to name the DTMC. The `markovchainList` objects are defined by following slots: 1. `markovchains`: a list of `markovchain` objects. 2. `name`: optional character element to name the DTMC. The `markovchain` objects can be created either in a long way, as the following code shows ```{r mcInitLong} weatherStates <- c("sunny", "cloudy", "rain") byRow <- TRUE weatherMatrix <- matrix(data = c(0.70, 0.2, 0.1, 0.3, 0.4, 0.3, 0.2, 0.45, 0.35), byrow = byRow, nrow = 3, dimnames = list(weatherStates, weatherStates)) mcWeather <- new("markovchain", states = weatherStates, byrow = byRow, transitionMatrix = weatherMatrix, name = "Weather") ``` or in a shorter way, displayed below ```{r mcInitShort} mcWeather <- new("markovchain", states = c("sunny", "cloudy", "rain"), transitionMatrix = matrix(data = c(0.70, 0.2, 0.1, 0.3, 0.4, 0.3, 0.2, 0.45, 0.35), byrow = byRow, nrow = 3), name = "Weather") ``` When `new("markovchain")` is called alone, a default Markov chain is created. ```{r defaultMc} defaultMc <- new("markovchain") ``` The quicker way to create `markovchain` objects is made possible thanks to the implemented `initialize` S4 method that checks that: * the `transitionMatrix` to be a transition matrix, i.e., all entries to be probabilities and either all rows or all columns to sum up to one. * the columns and rows names of `transitionMatrix` to be defined and to coincide with `states` vector slot. The `markovchain` objects can be collected in a list within `markovchainList` S4 objects as following example shows. ```{r intromcList} mcList <- new("markovchainList", markovchains = list(mcWeather, defaultMc), name = "A list of Markov chains") ``` ## Handling markovchain objects Table \@ref(tab:methodsToHandleMc) lists which methods handle and manipulate `markovchain` objects. \begin{table}[h] \centering \begin{tabular}{lll} \hline Method & Purpose \\ \hline \hline \code{*} & Direct multiplication for transition matrices.\\ \code{\textasciicircum{}} & Compute the power \code{markovchain} of a given one.\\ \code{[} & Direct access to the elements of the transition matrix.\\ \code{==} & Equality operator between two transition matrices.\\ \code{!=} & Inequality operator between two transition matrices.\\ \code{as} & Operator to convert \code{markovchain} objects into \code{data.frame} and\\ & \code{table} object.\\ \code{dim} & Dimension of the transition matrix.\\ \code{names} & Equal to \code{states}.\\ \code{names<-} & Change the \code{states} name.\\ \code{name} & Get the name of \code{markovchain object}.\\ \code{name<-} & Change the name of \code{markovchain object}.\\ \code{plot} & \code{plot} method for \code{markovchain} objects.\\ \code{print} & \code{print} method for \code{markovchain} objects.\\ \code{show} & \code{show} method for \code{markovchain} objects.\\ \code{sort} & \code{sort} method for \code{markovchain} objects, in terms of their states.\\ \code{states} & Name of the transition states.\\ \code{t} & Transposition operator (which switches \code{byrow} `slot value and modifies \\ & the transition matrix coherently).\\ \hline \end{tabular} \caption{\pkg{markovchain} methods for handling \code{markovchain} objects.} \label{tab:methodsToHandleMc} \end{table} The examples that follow shows how operations on `markovchain` objects can be easily performed. For example, using the previously defined matrix we can find what is the probability distribution of expected weather states in two and seven days, given the actual state to be cloudy. ```{r operations} initialState <- c(0, 1, 0) after2Days <- initialState * (mcWeather * mcWeather) after7Days <- initialState * (mcWeather ^ 7) after2Days round(after7Days, 3) ``` A similar answer could have been obtained defining the vector of probabilities as a column vector. A column - defined probability matrix could be set up either creating a new matrix or transposing an existing `markovchain` object thanks to the `t` method. ```{r operations2} initialState <- c(0, 1, 0) after2Days <- (t(mcWeather) * t(mcWeather)) * initialState after7Days <- (t(mcWeather) ^ 7) * initialState after2Days round(after7Days, 3) ``` The initial state vector previously shown can not necessarily be a probability vector, as the code that follows shows: ```{r fval} fvals<-function(mchain,initialstate,n) { out<-data.frame() names(initialstate)<-names(mchain) for (i in 0:n) { iteration<-initialstate*mchain^(i) out<-rbind(out,iteration) } out<-cbind(out, i=seq(0,n)) out<-out[,c(4,1:3)] return(out) } fvals(mchain=mcWeather,initialstate=c(90,5,5),n=4) ``` Basic methods have been defined for `markovchain` objects to quickly get states and transition matrix dimension. ```{r otherMethods} states(mcWeather) names(mcWeather) dim(mcWeather) ``` Methods are available to set and get the name of `markovchain` object. ```{r otherMethods2} name(mcWeather) name(mcWeather) <- "New Name" name(mcWeather) ``` Also it is possible to alphabetically sort the transition matrix: ```{r sortMethod} markovchain:::sort(mcWeather) ``` A direct access to transition probabilities is provided both by `transitionProbability` method and `"["` method. ```{r transProb} transitionProbability(mcWeather, "cloudy", "rain") mcWeather[2,3] ``` The transition matrix of a `markovchain` object can be displayed using `print` or `show` methods (the latter being less verbose). Similarly, the underlying transition probability diagram can be plotted by the use of `plot` method (as shown in Figure \@ref(fig:mcPlot)) which is based on \pkg{igraph} package [@pkg:igraph]. `plot` method for `markovchain` objects is a wrapper of `plot.igraph` for `igraph` S4 objects defined within the \pkg{igraph} package. Additional parameters can be passed to `plot` function to control the network graph layout. There are also \pkg{diagram} and \pkg{DiagrammeR} ways available for plotting as shown in Figure \@ref(fig:mcPlotdiagram). The `plot` function also uses `communicatingClasses` function to separate out states of different communicating classes. All states that belong to one class have same color. ```{r printAndShow} print(mcWeather) show(mcWeather) ``` ```{r mcPlot, echo=FALSE, fig.cap="Weather example. Markov chain plot"} if (requireNamespace("igraph", quietly = TRUE)) { library(igraph) plot(mcWeather,layout = layout.fruchterman.reingold) } else { message("igraph unavailable") } ``` ```{r mcPlotdiagram, echo=FALSE, fig.cap="Weather example. Markov chain plot with diagram"} if (requireNamespace("diagram", quietly = TRUE)) { library(diagram) plot(mcWeather, package="diagram", box.size = 0.04) } else { message("diagram unavailable") } ``` Import and export from some specific classes is possible, as shown in Figure \@ref(fig:fromAndTo) and in the following code. ```{r exportImport1} mcDf <- as(mcWeather, "data.frame") mcNew <- as(mcDf, "markovchain") mcDf mcIgraph <- as(mcWeather, "igraph") ``` ```{r exportImport2} if (requireNamespace("msm", quietly = TRUE)) { require(msm) Q <- rbind ( c(0, 0.25, 0, 0.25), c(0.166, 0, 0.166, 0.166), c(0, 0.25, 0, 0.25), c(0, 0, 0, 0) ) cavmsm <- msm(state ~ years, subject = PTNUM, data = cav, qmatrix = Q, death = 4) msmMc <- as(cavmsm, "markovchain") msmMc } else { message("msm unavailable") } ``` from etm (now archived as of September 2020): ```{r exporImport3} if (requireNamespace("etm", quietly = TRUE)) { library(etm) data(sir.cont) sir.cont <- sir.cont[order(sir.cont$id, sir.cont$time), ] for (i in 2:nrow(sir.cont)) { if (sir.cont$id[i]==sir.cont$id[i-1]) { if (sir.cont$time[i]==sir.cont$time[i-1]) { sir.cont$time[i-1] <- sir.cont$time[i-1] - 0.5 } } } tra <- matrix(ncol=3,nrow=3,FALSE) tra[1, 2:3] <- TRUE tra[2, c(1, 3)] <- TRUE tr.prob <- etm::etm(sir.cont, c("0", "1", "2"), tra, "cens", 1) tr.prob etm2mc<-as(tr.prob, "markovchain") etm2mc } else { message("etm unavailable") } ``` ```{r fromAndTo, echo=FALSE, fig.cap="The markovchain methods for import and export"} library(igraph) importExportGraph<-graph.formula(dataframe++markovchain,markovchain-+igraph, markovchain++matrix,table-+markovchain,msm-+markovchain,etm-+markovchain, markovchain++sparseMatrix) plot(importExportGraph,main="Import - Export from and to markovchain objects") ``` Coerce from `matrix` method, as the code below shows, represents another approach to create a `markovchain` method starting from a given squared probability matrix. ```{r exportImport4} myMatr<-matrix(c(.1,.8,.1,.2,.6,.2,.3,.4,.3), byrow=TRUE, ncol=3) myMc<-as(myMatr, "markovchain") myMc ``` Non-homogeneous Markov chains can be created with the aid of `markovchainList` object. The example that follows arises from health insurance, where the costs associated to patients in a Continuous Care Health Community (CCHC) are modeled by a non-homogeneous Markov Chain, since the transition probabilities change by year. Methods explicitly written for `markovchainList` objects are: `print`, `show`, `dim` and `[`. ```{r cchcMcList} stateNames = c("H", "I", "D") Q0 <- new("markovchain", states = stateNames, transitionMatrix =matrix(c(0.7, 0.2, 0.1,0.1, 0.6, 0.3,0, 0, 1), byrow = TRUE, nrow = 3), name = "state t0") Q1 <- new("markovchain", states = stateNames, transitionMatrix = matrix(c(0.5, 0.3, 0.2,0, 0.4, 0.6,0, 0, 1), byrow = TRUE, nrow = 3), name = "state t1") Q2 <- new("markovchain", states = stateNames, transitionMatrix = matrix(c(0.3, 0.2, 0.5,0, 0.2, 0.8,0, 0, 1), byrow = TRUE,nrow = 3), name = "state t2") Q3 <- new("markovchain", states = stateNames, transitionMatrix = matrix(c(0, 0, 1, 0, 0, 1, 0, 0, 1), byrow = TRUE, nrow = 3), name = "state t3") mcCCRC <- new("markovchainList",markovchains = list(Q0,Q1,Q2,Q3), name = "Continuous Care Health Community") print(mcCCRC) ``` It is possible to perform direct access to `markovchainList` elements, as well as to determine the number of `markovchain` objects by which a `markovchainList` object is composed. ```{r cchcMcList2} mcCCRC[[1]] dim(mcCCRC) ``` The `markovchain` package contains some data found in the literature related to DTMC models (see Section \@ref(sec:applications). Table \@ref(tab:datasets) lists datasets and tables included within the current release of the package. \begin{table}[h] \centering \begin{tabular}{p{0.2\textwidth}p{0.75\textwidth}} \hline Dataset & Description \\ \hline \hline \code{blanden} & Mobility across income quartiles, \cite{blandenEtAlii}.\\ \code{craigsendi} & CD4 cells, \cite{craigSendi}.\\ \code{kullback} & raw transition matrices for testing homogeneity, \cite{kullback1962tests}.\\ \code{preproglucacon} & Preproglucacon DNA basis, \cite{averyHenderson}.\\ \code{rain} & Alofi Island rains, \cite{averyHenderson}.\\ \code{holson} & Individual states trajectories.\\ \code{sales} & Sales of six beverages in Hong Kong \cite{ching2008higher}. \\ \hline \end{tabular} \caption{The \pkg{markovchain} \code{data.frame} and \code{table}.} \label{tab:datasets} \end{table} Finally, Table \@ref(tab:demos) lists the demos included in the demo directory of the package. \begin{table}[h] \centering \begin{tabular}{lll} \hline R Code File & Description \\ \hline \hline \code{bard.R} & Structural analysis of Markov chains from Bard PPT.\\ \code{examples.R} & Notable Markov chains, e.g., The Gambler Ruin chain.\\ \code{quickStart.R} & Generic examples.\\ \code{extractMatrices.R} & Generic examples.\\ \hline \end{tabular} \caption{The \pkg{markovchain} demos.} \label{tab:demos} \end{table} # Probability with markovchain objects {#sec:probability} The \pkg{markovchain} package contains functions to analyse DTMC from a probabilistic perspective. For example, the package provides methods to find stationary distributions and identifying absorbing and transient states. Many of these methods come from \proglang{MATLAB} listings that have been ported into \proglang{R}. For a full description of the underlying theory and algorithm the interested reader can overview the original \proglang{MATLAB} listings, \cite{renaldoMatlab} and \cite{montgomery}. Table \@ref(tab:methodsToStats) shows methods that can be applied on `markovchain` objects to perform probabilistic analysis. \begin{table}[h] \centering \begin{tabular}{lll} \hline Method & Returns \\ \hline \hline \code{absorbingStates} & the absorbing states of the transition matrix, if any.\\ \code{steadyStates} & the vector(s) of steady state(s) in matrix form. \\ \code{meanFirstPassageTime} & matrix or vector of mean first passage times. \\ \code{meanRecurrenceTime} & vector of mean number of steps to return to each recurrent state \\ \code{hittingProbabilities} & matrix of hitting probabilities for a Markov chain. \\ \code{meanAbsorptionTime} & expected number of steps for a transient state to be \\ & absorbed by any recurrent class \\ \code{absorptionProbabilities} & probabilities of transient states of being \\ & absorbed by each recurrent state \\ \code{committorAB} & committor probabilities \\ \code{communicatingClasses} & list of communicating classes. \\ & $s_{j}$, given actual state $s_{i}$. \\ \code{canonicForm} & the transition matrix into canonic form. \\ \code{is.accessible} & checks whether a state j is reachable from state i. \\ \code{is.irreducible} & checks whether a DTMC is irreducible. \\ \code{is.regular} & checks whether a DTMC is regular. \\ \code{period} & the period of an irreducible DTMC. \\ \code{recurrentClasses} & list of recurrent communicating classes. \\ \code{transientClasses} & list of transient communicating classes. \\ \code{recurrentStates} & the recurrent states of the transition matrix. \\ \code{transientStates} & the transient states of the transition matrix, if any. \\ \code{summary} & DTMC summary. \\ \hline \end{tabular} \caption{\pkg{markovchain} methods: statistical operations.} \label{tab:methodsToStats} \end{table} ## Conditional distributions The conditional distribution of weather states, given that current day's weather is sunny, is given by following code. ```{r conditionalDistr} conditionalDistribution(mcWeather, "sunny") ``` ## Stationary states A stationary (steady state, or equilibrium) vector is a probability vector such that Equation \ref{eq:steadystat2} holds \begin{equation} \begin{matrix} 0\leq \pi_j \leq 1\\ \sum_{j \in S} \pi_j = 1\\ \pi \cdot P = \pi \end{matrix} \label{eq:steadystat2} \end{equation} Steady states are associated to $P$ eigenvalues equal to one. We could be tempted to compute them solving the eigen values / vectors of the matrix and taking real parts (since if $u + iv$ is a eigen vector, for the matrix $P$, then $Re(u + iv) = u$ and $Im(u + iv) = v$ are eigen vectors) and normalizing by the vector sum, this carries some concerns: 1. If $u, v \in \mathbb{R}^n$ are linearly independent eigen vectors associated to $1$ eigen value, $u + iv$, $u + iu$ are also linearly independent eigen vectors, and their real parts coincide. Clearly if we took real parts, we would be loosing an eigen vector, because we cannot know in advance if the underlying algorithm to compute the eigen vectors is going to output something similar to what we described. We should be agnostic to the underlying eigen vector computation algorithm. 2. Imagine the identity $P$ of dimensions $2 \times 2$. Its eigen vectors associated to the $1$ eigen value are $u = (1, 0)$ and $v = (0, 1)$. However, the underlying algorithm to compute eigen vectors could return $(1, -2)$ and $(-2, 1)$ instead, that are linear combinations of the aforementioned ones, and therefore eigen vectors. Normalizing by their sum, we would get: $(-1, 2)$ and $(2, -1)$, which obviously are not probability measures. Again, we should be agnostic to the underlying eigen computation algorithm. 3. Algorithms to compute eigen values / vectors are computationally expensive: they are iterative, and we cannot predict a fixed number of iterations for them. Moreover, each iteration takes $\mathcal{O}(m^2)$ or $\mathcal{O}(m^3)$ algorithmic complexity, with $m$ the number of states. We are going to use that every irreducible DTMC has a unique steady state, that is, if $M$ is the matrix for an irreducible DTMC (all states communicate with each other), then it exists a unique $v \in \mathbb{R}^m$ such that: \[ v \cdot M = v, \qquad \sum_{i = 1}^m v_i = 1 \] Also, we'll use that a steady state for a DTMC assigns $0$ to the transient states. The canonical form of a (by row) stochastic matrix looks alike: \[ \left(\begin{array}{c|c|c|c|c} M_1 & 0 & 0 & \ldots & 0 \\ \hline 0 & M_2 & 0 & \ldots & 0 \\ \hline 0 & 0 & M_3 & \ldots & 0 \\ \hline \vdots & \vdots & \vdots & \ddots & \vdots \\ \hline A_1 & A_2 & A_3 & \ldots & R \end{array}\right) \] where $M_i$ corresponds to irreducible sub-chains, the blocks $A_i$ correspond to the transitions from transient states to each of the recurrent classes and $R$ are the transitions from the transient states to themselves. Also, we should note that a Markov chain has exactly the same name of steady states as recurrent classes. Therefore, we have coded the following algorithm ^[We would like to thank Prof. Christophe Dutang for his contributions to the development of this method. He coded a first improvement of the original `steadyStates` method and we could not have reached the current correctness without his previous work]: 1. Identify the recurrent classes $[C_1, \ldots, C_l]$ with \texttt{recurrentClasses} function. 2. Take each class $C_i$, compute the sub-matrix corresponding to it $M_i$. 3. Solve the system $v \cdot C_i = v, \, \sum_{j = 1}^{|C_i|} v_j = 1$ which has a unique solution, for each $i = 1, \ldots, l$. 3. Map each state $v_i$ to the original order in $P$ and assign a $0$ to the slots corresponding to transient states in the matrix. The result is returned in matrix form. ```{r steadyStates} steadyStates(mcWeather) ``` It is possible for a Markov chain to have more than one stationary distribution, as the gambler ruin example shows. ```{r gamblerRuin} gamblerRuinMarkovChain <- function(moneyMax, prob = 0.5) { m <- matlab::zeros(moneyMax + 1) m[1,1] <- m[moneyMax + 1,moneyMax + 1] <- 1 states <- as.character(0:moneyMax) rownames(m) <- colnames(m) <- states for(i in 2:moneyMax){ m[i,i-1] <- 1 - prob m[i, i + 1] <- prob } new("markovchain", transitionMatrix = m, name = paste("Gambler ruin", moneyMax, "dim", sep = " ")) } mcGR4 <- gamblerRuinMarkovChain(moneyMax = 4, prob = 0.5) steadyStates(mcGR4) ``` ## Classification of states Absorbing states are determined by means of `absorbingStates` method. ```{r absorbingStates} absorbingStates(mcGR4) absorbingStates(mcWeather) ``` The key function in methods which need knowledge about communicating classes, recurrent states, transient states, is `.commclassKernel`, which is a modification of Tarjan's algorithm from \cite{Tarjan}. This `.commclassKernel` method gets a transition matrix of dimension $n$ and returns a list of two items: 1. `classes`, an matrix whose $(i, j)$ entry is `true` if $s_i$ and $s_j$ are in the same communicating class. 2. `closed`, a vector whose $i$ -th entry indicates whether the communicating class to which $i$ belongs is closed. These functions are used by two other internal functions on which the `summary` method for `markovchain` objects works. The example matrix used in \cite{renaldoMatlab} well exemplifies the purpose of the function. ```{r renaldoMatrix1} P <- matlab::zeros(10) P[1, c(1, 3)] <- 1/2; P[2, 2] <- 1/3; P[2,7] <- 2/3; P[3, 1] <- 1; P[4, 5] <- 1; P[5, c(4, 5, 9)] <- 1/3; P[6, 6] <- 1; P[7, 7] <- 1/4; P[7,9] <- 3/4; P[8, c(3, 4, 8, 10)] <- 1/4; P[9, 2] <- 1; P[10, c(2, 5, 10)] <- 1/3; rownames(P) <- letters[1:10] colnames(P) <- letters[1:10] probMc <- new("markovchain", transitionMatrix = P, name = "Probability MC") summary(probMc) ``` All states that pertain to a transient class are named "transient" and a specific method has been written to elicit them. ```{r transientStates} transientStates(probMc) ``` `canonicForm` method that turns a Markov chain into its canonic form, reordering the states to have first the recurrent classes and then the transient states. ```{r probMc2Canonic} probMcCanonic <- canonicForm(probMc) probMc probMcCanonic ``` The function `is.accessible` permits to investigate whether a state $s_{j}$ is accessible from state $s_i$, that is whether the probability to eventually reach $s_j$ starting from $s_{i}$ is greater than zero. ```{r isAccessible} is.accessible(object = probMc, from = "a", to = "c") is.accessible(object = probMc, from = "g", to = "c") ``` In Section \@ref(sec:properties) we observed that, if a DTMC is irreducible, all its states share the same periodicity. Then, the `period` function returns the periodicity of the DTMC, provided that it is irreducible. The example that follows shows how to find if a DTMC is reducible or irreducible by means of the function `is.irreducible` and, in the latter case, the method `period` is used to compute the periodicity of the chain. ```{r periodicity} E <- matrix(0, nrow = 4, ncol = 4) E[1, 2] <- 1 E[2, 1] <- 1/3; E[2, 3] <- 2/3 E[3,2] <- 1/4; E[3, 4] <- 3/4 E[4, 3] <- 1 mcE <- new("markovchain", states = c("a", "b", "c", "d"), transitionMatrix = E, name = "E") is.irreducible(mcE) period(mcE) ``` The example Markov chain found in \proglang{Mathematica} web site \citep{mathematica9MarkovChain} has been used, and is plotted in Figure \@ref(fig:mcMathematics). ```{r mathematica9Mc} require(matlab) mathematicaMatr <- zeros(5) mathematicaMatr[1,] <- c(0, 1/3, 0, 2/3, 0) mathematicaMatr[2,] <- c(1/2, 0, 0, 0, 1/2) mathematicaMatr[3,] <- c(0, 0, 1/2, 1/2, 0) mathematicaMatr[4,] <- c(0, 0, 1/2, 1/2, 0) mathematicaMatr[5,] <- c(0, 0, 0, 0, 1) statesNames <- letters[1:5] mathematicaMc <- new("markovchain", transitionMatrix = mathematicaMatr, name = "Mathematica MC", states = statesNames) ``` ```{r mcMathematics, fig=TRUE, echo=FALSE, fig.align='center', fig.cap="Mathematica 9 example. Markov chain plot."} plot(mathematicaMc, layout = layout.fruchterman.reingold) ``` ```{r mathematica9MC, echo=FALSE} summary(mathematicaMc) ``` ## First passage time distributions and means \cite{renaldoMatlab} provides code to compute first passage time (within $1,2,\ldots, n$ steps) given the initial state to be $i$. The \proglang{MATLAB} listings translated into \proglang{R} on which the `firstPassage` function is based are: ```{r fpTime1, eval=FALSE} .firstpassageKernel <- function(P, i, n){ G <- P H <- P[i,] E <- 1 - diag(size(P)[2]) for (m in 2:n) { G <- P %*% (G * E) H <- rbind(H, G[i,]) } return(H) } ``` We conclude that the probability for the *first* rainy day to be the third one, given that the current state is sunny, is given by: ```{r fpTime2} firstPassagePdF <- firstPassage(object = mcWeather, state = "sunny", n = 10) firstPassagePdF[3, 3] ``` To compute the *mean* first passage times, i.e. the expected number of days before it rains given that today is sunny, we can use the `meanFirstPassageTime` function: ```{r mfpt1} meanFirstPassageTime(mcWeather) ``` indicating e.g. that the average number of days of sun or cloud before rain is 6.67 if we start counting from a sunny day, and 5 if we start from a cloudy day. Note that we can also specify one or more destination states: ```{r mfpt2} meanFirstPassageTime(mcWeather,"rain") ``` The implementation follows the matrix solutions by [@GrinsteadSnell]. We can check the result by averaging the first passage probability density function: ```{r mfpt3} firstPassagePdF.long <- firstPassage(object = mcWeather, state = "sunny", n = 100) sum(firstPassagePdF.long[,"rain"] * 1:100) ``` ## Mean recurrence time The `meanRecurrenceTime` method gives the first mean recurrence time (expected number of steps to go back to a state if it was the initial one) for each recurrent state in the transition probabilities matrix for a DTMC. Let's see an example: ```{r mrt-weather} meanRecurrenceTime(mcWeather) ``` Another example, with not all of its states being recurrent: ```{r mrt-probMc} recurrentStates(probMc) meanRecurrenceTime(probMc) ``` ## Absorption probabilities and mean absorption time We are going to use the Drunkard’s random walk from [@GrinsteadSnell]. We have a drunk person walking through the street. Each move the person does, if they have not arrived to either home (corner 1) or to the bar (corner 5) could be to the left corner or to the right one, with equal probability. In case of arrival to the bar or to home, the person stays there. ```{r data-drunkard} drunkProbs <- matlab::zeros(5, 5) drunkProbs[1,1] <- drunkProbs[5,5] <- 1 drunkProbs[2,1] <- drunkProbs[2,3] <- 1/2 drunkProbs[3,2] <- drunkProbs[3,4] <- 1/2 drunkProbs[4,3] <- drunkProbs[4,5] <- 1/2 drunkMc <- new("markovchain", transitionMatrix = drunkProbs) drunkMc ``` Recurrent (in fact absorbing states) are: ```{r rs-drunkard} recurrentStates(drunkMc) ``` Transient states are the rest: ```{r ts-drunkard} transientStates(drunkMc) ``` The probability of either being absorbed by the bar or by the sofa at home are: ```{r ap-drunkard} absorptionProbabilities(drunkMc) ``` which means that the probability of arriving home / bar is inversely proportional to the distance to each one. But we also would like to know how much time does the person take to arrive there, which can be done with `meanAbsorptionTime`: ```{r at-drunkard} meanAbsorptionTime(drunkMc) ``` So it would take `3` steps to arrive to the destiny if the person is either in the second or fourth corner, and `4` steps in case of being at the same distance from home than to the bar. ## Committor probability The committor probability tells us the probability to reach a given state before another given. Suppose that we start in a cloudy day, the probabilities of experiencing a rainy day before a sunny one is 0.5: ```{r} committorAB(mcWeather,3,1) ``` ## Hitting probabilities Rewriting the system \eqref{eq:hitting-probs} as: \begin{equation*} A = \left(\begin{array}{c|c|c|c} A_1 & 0 & \ldots & 0 \\ \hline 0 & A_2 & \ldots & 0 \\ \hline \vdots & \vdots & \ddots & 0 \\ \hline 0 & 0 & \ldots & A_n \end{array}\right) \end{equation*} \begin{eqnarray*} A_1 &= \left(\begin{matrix} -1 & p_{1,2} & p_{1,3} & \ldots & p_{1,n} \\ 0 & (p_{2,2} - 1) & p_{2,3} & \ldots & p_{2,n} \\ \vdots & \vdots & \vdots & \ddots & \vdots \\ 0 & p_{n, 2} & p_{n,3} & \ldots & (p_{n,n} - 1) \end{matrix}\right)\\ A_2 &= \left(\begin{matrix} (p_{1,1} - 1) & 0 & p_{1,3} & \ldots & p_{1,n} \\ p_{2,1} & -1 & p_{2,3} & \ldots & p_{2,n} \\ \vdots & \vdots & \vdots & \ddots & \vdots \\ p_{n,1} & 0 & p_{n,3} & \ldots & (p_{n,n} - 1) \end{matrix}\right)\\ \vdots & \vdots\\ A_n &= \left(\begin{matrix} (p_{1,1} - 1) & p_{1,2} & p_{1,3} & \ldots & 0 \\ p_{2,1} & (p_{2,2} -1) & p_{2,3} & \ldots & 0 \\ \vdots & \vdots & \vdots & \ddots & \vdots \\ p_{n,1} & p_{n,2} & p_{n,3} & \ldots & -1 \end{matrix}\right)\\ \end{eqnarray*} \begin{equation*} \begin{array}{lr} X_j = \left(\begin{array}{c} h_{1,j} \\ h_{2,j} \\ \vdots \\ h_{n,j} \end{array}\right) & C_j = - \left(\begin{array}{c} p_{1,j} \\ p_{2,j} \\ \vdots \\ p_{n,j} \end{array}\right) \end{array} \end{equation*} we end up having to solve the block systems: \begin{equation} A_j \cdot X_j = C_j \end{equation} Let us imagine the $i$ -th state has transition probabilities: $(0, \ldots, 0, \underset{i)}{1}, 0, \ldots, 0)$. Then that same row would turn into $(0,0, \ldots, 0)$ for some block, thus obtaining a singular matrix. Another case which may give us problems could be: state $i$ has the following transition probabilities: $(0, \ldots, 0, \underset{j)}{1}, 0, \ldots, 0)$ and the state $j$ has the following transition probabilities: $(0, \ldots, 0, \underset{i)}{1}, 0, \ldots, 0)$. Then when building some blocks we will end up with rows: \begin{eqnarray*} (0, \ldots, 0, \underset{i)}{-1}, 0, \ldots, 0, \underset{j)}{1}, 0, \ldots, 0) \\ (0, \ldots, 0, \underset{i)}{1}, 0, \ldots, 0, \underset{j)}{-1}, 0, \ldots, 0) \end{eqnarray*} which are linearly dependent. Our hypothesis is that if we treat the closed communicating classes differently, we *might* delete the linearity in the system. If we have a closed communicating class $C_u$, then $h_{i,j} = 1$ for all $i,j \in C_u$ and $h_{k,j} = 0$ for all $k\not\in C_u$. Then we can set $X_u$ appropriately and solve the other $X_v$ using those values. The method in charge of that in `markovchain` package is `hittingProbabilities`, which receives a Markov chain and computes the matrix $(h_{ij})_{i,j = 1,\ldots, n}$ where $S = \{s_1, \ldots, s_n\}$ is the set of all states of the chain. For the following chain: ```{r hitting-data} M <- matlab::zeros(5, 5) M[1,1] <- M[5,5] <- 1 M[2,1] <- M[2,3] <- 1/2 M[3,2] <- M[3,4] <- 1/2 M[4,2] <- M[4,5] <- 1/2 hittingTest <- new("markovchain", transitionMatrix = M) hittingProbabilities(hittingTest) ``` we want to compute the hitting probabilities. That can be done with: ```{r hitting-probabilities} hittingProbabilities(hittingTest) ``` In the case of the `mcWeather` Markov chain we would obtain a matrix with all its elements set to $1$. That makes sense (and is desirable) since if today is sunny, we expect it would be sunny again at certain point in the time, and the same with rainy weather (that way we assure good harvests): ```{r hitting-weather} hittingProbabilities(mcWeather) ``` # Statistical analysis {#sec:statistics} Table \@ref(tab:funs4Stats) lists the functions and methods implemented within the package which help to fit, simulate and predict DTMC. \begin{table}[h] \centering \begin{tabular}{lll} \hline Function & Purpose \\ \hline \hline \code{markovchainFit} & Function to return fitted Markov chain for a given sequence.\\ \code{predict} & Method to calculate predictions from \code{markovchain} or \\ & \code{markovchainList} objects.\\ \code{rmarkovchain} & Function to sample from \code{markovchain} or \code{markovchainList} objects.\\ \hline \end{tabular} \caption{The \pkg{markovchain} statistical functions.} \label{tab:funs4Stats} \end{table} ## Simulation Simulating a random sequence from an underlying DTMC is quite easy thanks to the function `rmarkovchain`. The following code generates a year of weather states according to `mcWeather` underlying stochastic process. ```{r simulatingAMarkovChain} weathersOfDays <- rmarkovchain(n = 365, object = mcWeather, t0 = "sunny") weathersOfDays[1:30] ``` Similarly, it is possible to simulate one or more sequences from a non-homogeneous Markov chain, as the following code (applied on CCHC example) exemplifies. ```{r simulatingAListOfMarkovChain} patientStates <- rmarkovchain(n = 5, object = mcCCRC, t0 = "H", include.t0 = TRUE) patientStates[1:10,] ``` Two advance parameters are available to the `rmarkovchain` method which helps you decide which implementation to use. There are four options available : \proglang{R}, \proglang{R} in parallel, \proglang{C++} and \proglang{C++} in parallel. Two boolean parameters `useRcpp` and `parallel` will decide which implementation will be used. Default is \code{useRcpp = TRUE} and \code{parallel = FALSE} i.e. \proglang{C++} implementation. The \proglang{C++} implementation is generally faster than the `R` implementation. If you have multicore processors then you can take advantage of `parallel` parameter by setting it to `TRUE`. When both `Rcpp=TRUE` and `parallel=TRUE` the parallelization has been carried out using \pkg{RcppParallel} package \citep{pkg:RcppParallel}. ## Estimation A time homogeneous Markov chain can be fit from given data. Four methods have been implemented within current version of \pkg{markovchain} package: maximum likelihood, maximum likelihood with Laplace smoothing, Bootstrap approach, maximum a posteriori. Equation \ref{eq:MLE} shows the maximum likelihood estimator (MLE) of the $p_{ij}$ entry, where the $n_{ij}$ element consists in the number sequences $\left( X_{t}=s_{i}, X_{t+1}=s_{j}\right)$ found in the sample, that is \begin{equation} {\hat p^{MLE}}_{ij} = \frac{n_{ij}}{\sum\limits_{u = 1}^k {n_{iu}}}. \label{eq:MLE} \end{equation} Equation \@ref(eq:SE) shows the `standardError` of the MLE \citep{MSkuriat}. \begin{equation} SE_{ij} = \frac{ {\hat p^{MLE}}_{ij} }{\sqrt{n_{ij}}} \label{eq:SE} \end{equation} ```{r fitMcbyMLE2} weatherFittedMLE <- markovchainFit(data = weathersOfDays, method = "mle",name = "Weather MLE") weatherFittedMLE$estimate weatherFittedMLE$standardError ``` The Laplace smoothing approach is a variation of the MLE, where the $n_{ij}$ is substituted by $n_{ij}+\alpha$ (see Equation \ref{eq:LAPLACE}), being $\alpha$ an arbitrary positive stabilizing parameter. \begin{equation} {\hat p^{LS}}_{ij} = \frac{{{n_{ij}} + \alpha }}{{\sum\limits_{u = 1}^k {\left( {{n_{iu}} + \alpha } \right)} }} \label{eq:LAPLACE} \end{equation} ```{r fitMcbyLAPLACE} weatherFittedLAPLACE <- markovchainFit(data = weathersOfDays, method = "laplace", laplacian = 0.01, name = "Weather LAPLACE") weatherFittedLAPLACE$estimate ``` (NOTE: The Confidence Interval option is enabled by default. Remove this option to fasten computations.) Both MLE and Laplace approach are based on the `createSequenceMatrix` functions that returns the raw counts transition matrix. ```{r fitSequenceMatrix} createSequenceMatrix(stringchar = weathersOfDays) ``` `stringchar` could contain `NA` values, and the transitions containing `NA` would be ignored. An issue occurs when the sample contains only one realization of a state (say $X_{\beta}$) which is located at the end of the data sequence, since it yields to a row of zero (no sample to estimate the conditional distribution of the transition). In this case the estimated transition matrix is corrected assuming $p_{\beta,j}=1/k$, being $k$ the possible states. Create sequence matrix can also be used to obtain raw count transition matrices from a given $n*2$ matrix as the following example shows: ```{r fitSequenceMatrix2} myMatr<-matrix(c("a","b","b","a","a","b","b","b","b","a","a","a","b","a"),ncol=2) createSequenceMatrix(stringchar = myMatr,toRowProbs = TRUE) ``` A bootstrap estimation approach has been developed within the package in order to provide an indication of the variability of ${\hat p}_{ij}$ estimates. The bootstrap approach implemented within the \pkg{markovchain} package follows these steps: 1. bootstrap the data sequences following the conditional distributions of states estimated from the original one. The default bootstrap samples is 10, as specified in `nboot` parameter of `markovchainFit` function. 2. apply MLE estimation on bootstrapped data sequences that are saved in `bootStrapSamples` slot of the returned list. 3. the ${p^{BOOTSTRAP}}_{ij}$ is the average of all ${p^{MLE}}_{ij}$ across the `bootStrapSamples` list, normalized by row. A `standardError` of $\hat{{p^{MLE}}_{ij}}$ estimate is provided as well. ```{r fitMcbyBootStrap1} weatherFittedBOOT <- markovchainFit(data = weathersOfDays, method = "bootstrap", nboot = 20) weatherFittedBOOT$estimate weatherFittedBOOT$standardError ``` The bootstrapping process can be done in parallel thanks to \pkg{RcppParallel} package \citep{pkg:RcppParallel}. Parallelized implementation is definitively suggested when the data sample size or the required number of bootstrap runs is high. ```{r fitMcbyBootStrap2, eval=FALSE} weatherFittedBOOTParallel <- markovchainFit(data = weathersOfDays, method = "bootstrap", nboot = 200, parallel = TRUE) weatherFittedBOOTParallel$estimate weatherFittedBOOTParallel$standardError ``` The parallel bootstrapping uses all the available cores on a machine by default. However, it is also possible to tune the number of threads used. Note that this should be done in R before calling the `markovchainFit` function. For example, the following code will set the number of threads to 4. ```{r fitMcbyBootStrap3, eval=FALSE} RcppParallel::setNumThreads(2) ``` For more details, please refer to \pkg{RcppParallel} web site. For all the fitting methods, the `logLikelihood` \citep{MSkuriat} denoted in Equation \ref{eq:LLH} is provided. \begin{equation} LLH = \sum_{i,j} n_{ij} * log (p_{ij}) \label{eq:LLH} \end{equation} where $n_{ij}$ is the entry of the frequency matrix and $p_{ij}$ is the entry of the transition probability matrix. ```{r fitMcbyMLE1} weatherFittedMLE$logLikelihood weatherFittedBOOT$logLikelihood ``` Confidence matrices of estimated parameters (parametric for MLE, non - parametric for BootStrap) are available as well. The `confidenceInterval` is provided with the two matrices: `lowerEndpointMatrix` and `upperEndpointMatrix`. The confidence level (CL) is 0.95 by default and can be given as an argument of the function `markovchainFit`. This is used to obtain the standard score (z-score). From classical inference theory, if $ci$ is the level of confidence required assuming normal distribution the $zscore(ci)$ solves $\Phi \left ( 1-\left(\frac{1-ci}{2}\right) \right )$ Equations \ref{eq:CIL} and \ref{eq:CIU} \citep{MSkuriat} show the `confidenceInterval` of a fitting. Note that each entry of the matrices is bounded between 0 and 1. \begin{align} LowerEndpoint_{ij} = p_{ij} - zscore (CL) * SE_{ij} \label{eq:CIL} \\ UpperEndpoint_{ij} = p_{ij} + zscore (CL) * SE_{ij} \label{eq:CIU} \end{align} ```{r confint} weatherFittedMLE$confidenceInterval weatherFittedBOOT$confidenceInterval ``` A special function, `multinomialConfidenceIntervals`, has been written in order to obtain multinomial wise confidence intervals. The code has been based on and Rcpp translation of package's \pkg{MultinomialCI} functions \cite{pkg:MultinomialCI} that were themselves based on the \cite{sison1995simultaneous} paper. ```{r multinomial} multinomialConfidenceIntervals(transitionMatrix = weatherFittedMLE$estimate@transitionMatrix, countsTransitionMatrix = createSequenceMatrix(weathersOfDays)) ``` The functions for fitting DTMC have mostly been rewritten in \proglang{C++} using \pkg{Rcpp} \cite{RcppR} since version 0.2. It is also possible to fit a DTMC object from `matrix` or `data.frame` objects as shown in following code. ```{r fitMclists} data(holson) singleMc<-markovchainFit(data=holson[,2:12],name="holson") ``` The same applies for `markovchainList` (output length has been limited). ```{r fitMclistsFit1, output.lines=20} mcListFit<-markovchainListFit(data=holson[,2:6],name="holson") mcListFit$estimate ``` Finally, given a `list` object, it is possible to fit a `markovchain` object or to obtain the raw transition matrix. ```{r fitMclistsFit2} c1<-c("a","b","a","a","c","c","a") c2<-c("b") c3<-c("c","a","a","c") c4<-c("b","a","b","a","a","c","b") c5<-c("a","a","c",NA) c6<-c("b","c","b","c","a") mylist<-list(c1,c2,c3,c4,c5,c6) mylistMc<-markovchainFit(data=mylist) mylistMc ``` The same works for `markovchainFitList`. ```{r fitAMarkovChainListfromAlist, output.lines=15} markovchainListFit(data=mylist) ``` If any transition contains `NA`, it will be ignored in the results as the above example showed. ## Prediction The $n$-step forward predictions can be obtained using the `predict` methods explicitly written for `markovchain` and `markovchainList` objects. The prediction is the mode of the conditional distribution of $X_{t+1}$ given $X_{t}=s_{j}$, being $s_{j}$ the last realization of the DTMC (homogeneous or non-homogeneous). ### Predicting from a markovchain object The 3-days forward predictions from `markovchain` object can be generated as follows, assuming that the last two days were respectively "cloudy" and "sunny". ```{r markovchainPredict} predict(object = weatherFittedMLE$estimate, newdata = c("cloudy", "sunny"), n.ahead = 3) ``` ### Predicting from a markovchainList object Given an initial two years health status, the 5-year ahead prediction of any CCRC guest is ```{r markovchainListPredict} predict(mcCCRC, newdata = c("H", "H"), n.ahead = 5) ``` The prediction has stopped at time sequence since the underlying non-homogeneous Markov chain has a length of four. In order to continue five years ahead, the `continue=TRUE` parameter setting makes the `predict` method keeping to use the last `markovchain` in the sequence list. ```{r markovchainListPredict2} predict(mcCCRC, newdata = c("H", "H"), n.ahead = 5, continue = TRUE) ``` ## Statistical Tests In this section, we describe the statistical tests: assessing the Markov property (`verifyMarkovProperty`), the order (`assessOrder`), the stationary (`assessStationarity`) of a Markov chain sequence, and the divergence test for empirically estimated transition matrices (`divergenceTest`). Most of such tests are based on the $\chi ^2$ statistics. Relevant references are \cite{kullback1962tests} and \cite{anderson1957statistical}. All such tests have been designed for small samples, since it is easy to detect departures from Markov property as long as the sample size increases. In addition, the accuracy of the statistical inference functions has been questioned and will be thoroughly investigated in future versions of the package. ### Assessing the Markov property of a Markov chain sequence The `verifyMarkovProperty` function verifies whether the Markov property holds for the given chain. The test implemented in the package looks at triplets of successive observations. If $x_1, x_2, \ldots, x_N$ is a set of observations and $n_{ijk}$ is the number of times $t$ $\left(1 \le t \le N-2 \right)$ such that $x_t=i, x_{t+1}=j, x_{x+2}=k$, then if the Markov property holds $n_{ijk}$ follows a Binomial distribution with parameters $n_{ij}$ and $p_{jk}$. A classical $\chi^2$ test can check this distributional assumption, since $\sum_{i}\sum_{j}\sum_{k}\frac{(n_{ijk}-n_{ij}\hat{p_{jk}})^2}{n_{ij}\hat{p_{jk}}}\sim \chi^2\left(q \right )$ where q is the number of degrees of freedom. The number of degrees of freedom q of the distribution of $\chi^2$ is given by the formula r-q+s-1, where: s denotes the number of states i in the state space such that n_{i} > 0 q denotes the number of pairs (i, j) for which n_{ij} > 0 and r denotes the number of triplets (i, j, k) for which n_{ij}n_{jk} > 0 ```{r test1} sample_sequence<-c("a", "b", "a", "a", "a", "a", "b", "a", "b", "a", "b", "a", "a", "b", "b", "b", "a") verifyMarkovProperty(sample_sequence) ``` ### Assessing the order of a Markov chain sequence The `assessOrder` function checks whether the given chain is of first order or of second order. For each possible present state, we construct a contingency table of the frequency of the future state for each past to present state transition as shown in Table \ref{tab:order}. \begin{table}[h] \centering \begin{tabular}{l | l | l | l} \hline past & present & future & future \\ & & a & b \\ \hline \hline a & a & 2 & 2\\ b & a & 2 & 2\\ \hline \end{tabular} \caption{Contingency table to assess the order for the present state a.} \label{tab:order} \end{table} Using the table, the function performs the $\chi ^2$ test by calling the `chisq.test` function. This test returns a list of the chi-squared value and the p-value. If the p-value is greater than the given significance level, we cannot reject the hypothesis that the sequence is of first order. ```{r test2} data(rain) assessOrder(rain$rain) ``` ### Assessing the stationarity of a Markov chain sequence The `assessStationarity` function assesses if the transition probabilities of the given chain change over time. To be more specific, the chain is stationary if the following condition meets. \begin{equation} p_{ij}(t) = p_{ij} ~\textrm{ for all }~t \label{eq:stationarity} \end{equation} For each possible state, we construct a contingency table of the estimated transition probabilities over time as shown in Table \ref{tab:stationarity}. \begin{table}[h] \centering \begin{tabular}{l | l | l} \hline time (t) & probability of transition to a & probability of transition to b \\ \hline \hline 1 & 0 & 1\\ 2 & 0 & 1\\ . & . & . \\ . & . & . \\ . & . & . \\ 16 & 0.44 & 0.56\\ \hline \end{tabular} \caption{Contingency table to assess the stationarity of the state a.} \label{tab:stationarity} \end{table} Using the table, the function performs the $\chi ^2$ test by calling the `chisq.test` function. This test returns a list of the chi-squared value and the p-value. If the p-value is greater than the given significance level, we cannot reject the hypothesis that the sequence is stationary. ```{r test3} assessStationarity(rain$rain, 10) ``` ### Divergence tests for empirically estimated transition matrices This section discusses tests developed to verify whether: 1. An empirical transition matrix is consistent with a theoretical one. 2. Two or more empirical transition matrices belongs to the same DTMC. The first test is implemented by the `verifyEmpiricalToTheoretical` function. Being $f_{ij}$ the raw transition count, \cite{kullback1962tests} shows that $2*\sum_{i=1}^{r}\sum_{j=1}^{r}f_{ij}\ln\frac{f_{ij}}{f_{i.}P\left( E_j | E_i\right)} \sim \chi^2\left ( r*(r-1) \right )$. The following example is taken from \cite{kullback1962tests}: ```{r divergence1} sequence<-c(0,1,2,2,1,0,0,0,0,0,0,1,2,2,2,1,0,0,1,0,0,0,0,0,0,1,1, 2,0,0,2,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,0,0,0,0,2,1,0, 0,2,1,0,0,0,0,0,0,1,1,1,2,2,0,0,2,1,1,1,1,2,1,1,1,1,1,1,1,1,1,0,2, 0,1,1,0,0,0,1,2,2,0,0,0,0,0,0,2,2,2,1,1,1,1,0,1,1,1,1,0,0,2,1,1, 0,0,0,0,0,2,2,1,1,1,1,1,2,1,2,0,0,0,1,2,2,2,0,0,0,1,1) mc=matrix(c(5/8,1/4,1/8,1/4,1/2,1/4,1/4,3/8,3/8),byrow=TRUE, nrow=3) rownames(mc)<-colnames(mc)<-0:2; theoreticalMc<-as(mc, "markovchain") verifyEmpiricalToTheoretical(data=sequence,object=theoreticalMc) ``` The second one is implemented by the `verifyHomogeneity` function, inspired by \cite[section~9]{kullback1962tests}. Assuming that $i=1,2, \ldots, s$ DTMC samples are available and that the cardinality of the state space is $r$ it verifies whether the $s$ chains belongs to the same unknown one. \cite{kullback1962tests} shows that its test statistics follows a chi-square law, $2*\sum_{i=1}^{s}\sum_{j=1}^{r}\sum_{k=1}^{r}f_{ijk}\ln\frac{n*f_{ijk}}{f_{i..}f_{.jk}} \sim \chi^2\left ( r*(r-1) \right )$. Also the following example is taken from \cite{kullback1962tests}: ```{r divergence2} data(kullback) verifyHomogeneity(inputList=kullback,verbose=TRUE) ``` ## Continuous Times Markov Chains ### Intro The \pkg{markovchain} package provides functionality for continuous time Markov chains (CTMCs). CTMCs are a generalization of discrete time Markov chains (DTMCs) in that we allow time to be continuous. We assume a finite state space $S$ (for an infinite state space wouldn't fit in memory). We can think of CTMCs as Markov chains in which state transitions can happen at any time. More formally, we would like our CTMCs to satisfy the following two properties: * The Markov property - let $F_{X(s)}$ denote the information about $X$ up to time $s$. Let $j \in S$ and $s \leq t$. Then, $P(X(t) = j|F_{X(s)}) = P(X(t) = j|X(s))$. * Time homogeneity - $P(X(t) = j|X(s) = k) = P(X(t-s) = j|X(0) = k)$. If both the above properties are satisfied, it is referred to as a time-homogeneous CTMC. If a transition occurs at time $t$, then $X(t)$ denotes the new state and $X(t)\neq X(t-)$. Now, let $X(0)=x$ and let $T_x$ be the time a transition occurs from this state. We are interested in the distribution of $T_x$. For $s,t \geq 0$, it can be shown that $ P(T_x > s+t | T_x > s) = P(T_x > t) $ This is the memory less property that only the exponential random variable exhibits. Therefore, this is the sought distribution, and each state $s \in S$ has an exponential holding parameter $\lambda(s)$. Since $\mathrm{E}T_x = \frac{1}{\lambda(x)}$, higher the rate $\lambda(x)$, smaller the expected time of transitioning out of the state $x$. However, specifying this parameter alone for each state would only paint an incomplete picture of our CTMC. To see why, consider a state $x$ that may transition to either state $y$ or $z$. The holding parameter enables us to predict when a transition may occur if we start off in state $x$, but tells us nothing about which state will be next. To this end, we also need transition probabilities associated with the process, defined as follows (for $y \neq x$) - $p_{xy} = P(X(T_s) = y | X(0) = x)$. Note that $\sum_{y \neq x} p_{xy} = 1$. Let $Q$ denote this transition matrix ($Q_{ij} = p_{ij}$). What is key here is that $T_x$ and the state $y$ are independent random variables. Let's define $\lambda(x, y) = \lambda(x) p_{xy}$ We now look at Kolmogorov's backward equation. Let's define $P_{ij}(t) = P(X(t) = j | X(0) = i)$ for $i, j \in S$. The backward equation is given by (it can be proved) $P_{ij}(t) = \delta_{ij}e^{-\lambda(i)t} + \int_{0}^{t}\lambda(i)e^{-\lambda(i)t} \sum_{k \neq i} Q_{ik} P_{kj}(t-s) ds$. Basically, the first term is non-zero if and only if $i=j$ and represents the probability that the first transition from state $i$ occurs after time $t$. This would mean that at $t$, the state is still $i$. The second term accounts for any transitions that may occur before time $t$ and denotes the probability that at time $t$, when the smoke clears, we are in state $j$. This equation can be represented compactly as follows $P'(t) = AP(t)$ where $A$ is the *generator* matrix. \[ A(i, j) = \begin{cases} \lambda(i, j) & \mbox{if } i \neq j \\ -\lambda(i) & \mbox{else.} \end{cases} \] Observe that the sum of each row is 0. A CTMC can be completely specified by the generator matrix. ### Stationary Distributions The following theorem guarantees the existence of a unique stationary distribution for CTMCs. Note that $X(t)$ being irreducible and recurrent is the same as $X_n(t)$ being irreducible and recurrent. Suppose that $X(t)$ is irreducible and recurrent. Then $X(t)$ has an invariant measure $\eta$, which is unique up to multiplicative factors. Moreover, for each $k \in S$, we have \[\eta_k = \frac{\pi_k}{\lambda(k)}\] where $\pi$ is the unique invariant measure of the embedded discrete time Markov chain $Xn$. Finally, $\eta$ satisfies \[0 < \eta_j < \infty, \forall j \in S\] and if $\sum_i \eta_i < \infty$ then $\eta$ can be normalized to get a stationary distribution. ### Estimation Let the data set be $D = \{(s_0, t_0), (s_1, t_1), ..., (s_{N-1}, t_{N-1})\}$ where $N=|D|$. Each $s_i$ is a state from the state space $S$ and during the time $[t_i,t_{i+1}]$ the chain is in state $s_i$. Let the parameters be represented by $\theta = \{\lambda, P\}$ where $\lambda$ is the vector of holding parameters for each state and $P$ the transition matrix of the embedded discrete time Markov chain. Then the probability is given by \[ {Pr(D | \theta) \propto \lambda(s_0)e^{-\lambda(s_0)(t_1-t_0)}Pr(s_1|s_0) \cdot\ldots\cdot \lambda(s_{N-2})e^{-\lambda(s_{N-2})(t_{N-1}-t_{N-2})}Pr(s_{N-1}|s_{N-2})} \] Let $n(j|i)$ denote the number of $i$->$j$ transitions in $D$, and $n(i)$ the number of times $s_i$ occurs in $D$. Let $t(s_i)$ denote the total time the chain spends in state $s_i$. Then the MLEs are given by \[ \hat{\lambda(s)} = \frac{n(s)}{t(s)},\hat{Pr(j|i)}=\frac{n(j|i)}{n(i)} \] ### Expected Hitting Time The package provides a function `ExpectedTime` to calculate average hitting time from one state to another. Let the final state be j, then for every state $i \in S$, where $S$ is the set of all states and holding time $q_{i} > 0$ for every $i \neq j$. Assuming the conditions to be true, expected hitting time is equal to minimal non-negative solution vector $p$ to the system of linear equations: \begin{equation} \begin{cases} p_{k} = 0 & k = j \\ -\sum_{l \in I} q_{kl}p_{k} = 1 & k \neq j \end{cases} \label{eq:EHT} \end{equation} ### Probability at time t The package provides a function `probabilityatT` to calculate probability of every state according to given `ctmc` object. Here we use Kolmogorov's backward equation $P(t) = P(0)e^{tQ}$ for $t \geq 0$ and $P(0) = I$. Here $P(t)$ is the transition function at time t. The value $P(t)[i][j]$ at time $P(t)$ describes the probability of the state at time $t$ to be equal to j if it was equal to i at time $t=0$. It takes care of the case when `ctmc` object has a generator represented by columns. If initial state is not provided, the function returns the whole transition matrix $P(t)$. ### Examples To create a CTMC object, you need to provide a valid generator matrix, say $Q$. The CTMC object has the following slots - states, generator, by row, name (look at the documentation object for further details). Consider the following example in which we aim to model the transition of a molecule from the $\sigma$ state to the $\sigma^*$ state. When in the former state, if it absorbs sufficient energy, it can make the jump to the latter state and remains there for some time before transitioning back to the original state. Let us model this by a CTMC: ```{r rCtmcInit} energyStates <- c("sigma", "sigma_star") byRow <- TRUE gen <- matrix(data = c(-3, 3, 1, -1), nrow = 2, byrow = byRow, dimnames = list(energyStates, energyStates)) molecularCTMC <- new("ctmc", states = energyStates, byrow = byRow, generator = gen, name = "Molecular Transition Model") ``` To generate random CTMC transitions, we provide an initial distribution of the states. This must be in the same order as the dimnames of the generator. The output can be returned either as a list or a data frame. ```{r rctmcRandom0} statesDist <- c(0.8, 0.2) rctmc(n = 3, ctmc = molecularCTMC, initDist = statesDist, out.type = "df", include.T0 = FALSE) ``` $n$ represents the number of samples to generate. There is an optional argument $T$ for `rctmc`. It represents the time of termination of the simulation. To use this feature, set $n$ to a very high value, say `Inf` (since we do not know the number of transitions before hand) and set $T$ accordingly. ```{r ctmcRandom1} statesDist <- c(0.8, 0.2) rctmc(n = Inf, ctmc = molecularCTMC, initDist = statesDist, T = 2) ``` To obtain the stationary distribution simply invoke the `steadyStates` function ```{r rctmcSteadyStates} steadyStates(molecularCTMC) ``` For fitting, use the `ctmcFit` function. It returns the MLE values for the parameters along with the confidence intervals. ```{r rctmcFitting} data <- list(c("a", "b", "c", "a", "b", "a", "c", "b", "c"), c(0, 0.8, 2.1, 2.4, 4, 5, 5.9, 8.2, 9)) ctmcFit(data) ``` One approach to obtain the generator matrix is to apply the `logm` function from the \pkg{expm} package on a transition matrix. Numeric issues arise, see \cite{israel2001finding}. For example, applying the standard `method` ('Higham08') on `mcWeather` raises an error, whilst the alternative method (eigenvalue decomposition) is OK. The following code estimates the generator matrix of the `mcWeather` transition matrix. ```{r mcWeatherQ} mcWeatherQ <- expm::logm(mcWeather@transitionMatrix,method='Eigen') mcWeatherQ ``` Therefore, the "half - day" transition probability for mcWeather DTMC is ```{r mcWeatherHalfDay} mcWeatherHalfDayTM <- expm::expm(mcWeatherQ*.5) mcWeatherHalfDay <- new("markovchain",transitionMatrix=mcWeatherHalfDayTM,name="Half Day Weather Transition Matrix") mcWeatherHalfDay ``` The \pkg{ctmcd} package \citep{pkg:ctmcd} provides various functions to estimate the generator matrix (GM) of a CTMC process using different methods. The following code provides a way to join \pkg{markovchain} and \pkg{ctmcd} computations. ```{r ctmcd1} require(ctmcd) require(expm) #defines a function to transform a GM into a TM gm_to_markovchain<-function(object, t=1) { if(!(class(object) %in% c("gm","matrix","Matrix"))) stop("Error! Expecting either a matrix or a gm object") if ( class(object) %in% c("matrix","Matrix")) generator_matrix<-object else generator_matrix<-as.matrix(object[["par"]]) #must add importClassesFrom("markovchain",markovchain) in the NAMESPACE #must add importFrom(expm, "expm") transitionMatrix<-expm(generator_matrix*t) out<-as(transitionMatrix,"markovchain") return(out) } #loading ctmcd dataset data(tm_abs) gm0=matrix(1,8,8) #initializing diag(gm0)=0 diag(gm0)=-rowSums(gm0) gm0[8,]=0 gmem=gm(tm_abs,te=1,method="EM",gmguess=gm0) #estimating GM mc_at_2=gm_to_markovchain(object=gmem, t=2) #converting to TM at time 2 ``` ## Pseudo - Bayesian Estimation \cite{Hu2002} shows an empirical quasi-Bayesian method to estimate transition matrices, given an empirical $\hat{P}$ transition matrix (estimated using the classical approach) and an a - priori estimate $Q$. In particular, each row of the matrix is estimated using the linear combination $\alpha \cdot Q+\left(1-1alpha\right) \cdot P$, where $\alpha$ is defined for each row as Equation \ref{eq:pseudobayes} shows \begin{equation} \left\{\begin{matrix} \hat{\alpha_i}=\frac{\hat{K_i}}{v\left(i \right )+\hat{K_i}}\\ \hat{K_i}=\frac{v\left(i \right)^2 - \sum_{j}Y_{ij}^2}{\sum_{j}(Y_{ij}-v\left(i \right)*q_{ij})^2} \end{matrix}\right. \label{eq:pseudobayes} \end{equation} The following code returns the pseudo Bayesian estimate of the transition matrix: ```{r pseudobayes} pseudoBayesEstimator <- function(raw, apriori){ v_i <- rowSums(raw) K_i <- numeric(nrow(raw)) sumSquaredY <- rowSums(raw^2) #get numerator K_i_num <- v_i^2-sumSquaredY #get denominator VQ <- matrix(0,nrow= nrow(apriori),ncol=ncol(apriori)) for (i in 1:nrow(VQ)) { VQ[i,]<-v_i[i]*apriori[i,] } K_i_den<-rowSums((raw - VQ)^2) K_i <- K_i_num/K_i_den #get the alpha vector alpha <- K_i / (v_i+K_i) #empirical transition matrix Emp<-raw/rowSums(raw) #get the estimate out<-matrix(0, nrow= nrow(raw),ncol=ncol(raw)) for (i in 1:nrow(out)) { out[i,]<-alpha[i]*apriori[i,]+(1-alpha[i])*Emp[i,] } return(out) } ``` We then apply it to the weather example: ```{r pseudobayes2} trueMc<-as(matrix(c(0.1, .9,.7,.3),nrow = 2, byrow = 2),"markovchain") aprioriMc<-as(matrix(c(0.5, .5,.5,.5),nrow = 2, byrow = 2),"markovchain") smallSample<-rmarkovchain(n=20,object = trueMc) smallSampleRawTransitions<-createSequenceMatrix(stringchar = smallSample) pseudoBayesEstimator( raw = smallSampleRawTransitions, apriori = aprioriMc@transitionMatrix ) - trueMc@transitionMatrix biggerSample<-rmarkovchain(n=100,object = trueMc) biggerSampleRawTransitions<-createSequenceMatrix(stringchar = biggerSample) pseudoBayesEstimator( raw = biggerSampleRawTransitions, apriori = aprioriMc@transitionMatrix ) - trueMc@transitionMatrix bigSample<-rmarkovchain(n=1000,object = trueMc) bigSampleRawTransitions<-createSequenceMatrix(stringchar = bigSample) pseudoBayesEstimator( raw = bigSampleRawTransitions, apriori = aprioriMc@transitionMatrix ) - trueMc@transitionMatrix ``` ## Bayesian Estimation The \pkg{markovchain} package provides functionality for maximum a posteriori (MAP) estimation of the chain parameters (at the time of writing this document, only first order models are supported) by Bayesian inference. It also computes the probability of observing a new data set, given a (different) data set. This vignette provides the mathematical description for the methods employed by the package. ### Notation and set-up The data is denoted by $D$, the model parameters (transition matrix) by $\theta$. The object of interest is $P(\theta | D)$ (posterior density). $\mathcal{A}$ represents an alphabet class, each of whose members represent a state of the chain. Therefore \[D = s_0 s_1 ... s_{N-1}, s_t \in \mathcal{A}\] where $N$ is the length of the data set. Also, \[\theta = \{p(s|u), s \in \mathcal{A}, u \in \mathcal{A} \}\] where $\sum_{s \in \mathcal{A}} p(s|u) = 1$ for each $u \in \mathcal{A}$. Our objective is to find $\theta$ which maximizes the posterior. That is, if our solution is denoted by $\hat{\theta}$, then \[\hat{\theta} = \underset{\theta}{argmax}P(\theta | D)\] where the search space is the set of right stochastic matrices of dimension $|\mathcal{A}|x|\mathcal{A}|$. $n(u, s)$ denotes the number of times the word $us$ occurs in $D$ and $n(u)=\sum_{s \in \mathcal{A}}n(u, s)$. The hyper-parameters are similarly denoted by $\alpha(u, s)$ and $\alpha(u)$ respectively. ### Methods Given $D$, its likelihood conditioned on the observed initial state in D is given by \[P(D|\theta) = \prod_{s \in \mathcal{A}} \prod_{u \in \mathcal{A}} p(s|u)^{n(u, s)}\] Conjugate priors are used to model the prior $P(\theta)$. The reasons are two fold: 1. Exact expressions can be derived for the MAP estimates, expectations and even variances 2. Model order selection/comparison can be implemented easily (available in a future release of the package) The hyper-parameters determine the form of the prior distribution, which is a product of Dirichlet distributions \[P(\theta) = \prod_{u \in \mathcal{A}} \Big\{ \frac{\Gamma(\alpha(u))}{\prod_{s \in \mathcal{A}} \Gamma(\alpha(u, s))} \prod_{s \in \mathcal{A}} p(s|u)^{\alpha(u, s)) - 1} \Big\}\] where $\Gamma(.)$ is the Gamma function. The hyper-parameters are specified using the `hyperparam` argument in the `markovchainFit` function. If this argument is not specified, then a default value of 1 is assigned to each hyper-parameter resulting in the prior distribution of each chain parameter to be uniform over $[0,1]$. Given the likelihood and the prior as described above, the evidence $P(D)$ is simply given by \[P(D) = \int P(D|\theta) P(\theta) d\theta\] which simplifies to \[ P(D) = \prod_{u \in \mathcal{A}} \Big\{ \frac{\Gamma(\alpha(u))}{\prod_{s \in \mathcal{A}} \Gamma(\alpha(u, s))} \frac{\prod_{s \in \mathcal{A}} \Gamma(n(u, s) + \alpha(u, s))}{\Gamma(\alpha(u) + n(u))} \Big\} \] Using Bayes' theorem, the posterior now becomes (thanks to the choice of conjugate priors) \[ P(\theta | D) = \prod_{u \in \mathcal{A}} \Big\{ \frac{\Gamma(n(u) + \alpha(u))}{\prod_{s \in \mathcal{A}} \Gamma(n(u, s) + \alpha(u, s))} \prod_{s \in \mathcal{A}} p(s|u)^{n(u, s) + \alpha(u, s)) - 1} \Big\} \] Since this is again a product of Dirichlet distributions, the marginal distribution of a particular parameter $P(s|u)$ of our chain is given by \[ P(s|u) \sim Beta(n(u, s) + \alpha(u, s), n(u) + \alpha(u) - n(u, s) - \alpha(u, s)) \] Thus, the MAP estimate $\hat{\theta}$ is given by \[ \hat{\theta} = \Big\{ \frac{n(u, s) + \alpha(u, s) - 1}{n(u) + \alpha(u) - |\mathcal{A}|}, s \in \mathcal{A}, u \in \mathcal{A} \Big\} \] The function also returns the expected value, given by \[ \text{E}_{\text{post}} p(s|u) = \Big\{ \frac{n(u, s) + \alpha(u, s)}{n(u) + \alpha(u)}, s \in \mathcal{A}, u \in \mathcal{A} \Big\} \] The variance is given by \[ \text{Var}_{\text{post}} p(s|u) = \frac{n(u, s) + \alpha(u, s)}{(n(u) + \alpha(u))^2} \frac{n(u) + \alpha(u) - n(u, s) - \alpha(u, s)}{n(u) + \alpha(u) + 1} \] The square root of this quantity is the standard error, which is returned by the function. The confidence intervals are constructed by computing the inverse of the beta integral. ### Predictive distribution Given the old data set, the probability of observing new data is $P(D'|D)$ where $D'$ is the new data set. Let $m(u, s), m(u)$ denote the corresponding counts for the new data. Then, \[ P(D'|D) = \int P(D' | \theta) P(\theta | D) d\theta \] We already know the expressions for both quantities in the integral and it turns out to be similar to evaluating the evidence \[ P(D'|D) = \prod_{u \in \mathcal{A}} \Big\{ \frac{\Gamma(\alpha(u))}{\prod_{s \in \mathcal{A}} \Gamma(\alpha(u, s))} \frac{\prod_{s \in \mathcal{A}} \Gamma(n(u, s) + m(u, s) + \alpha(u, s))}{\Gamma(\alpha(u) + n(u) + m(u))} \Big\} \] ### Choosing the hyper-parameters The hyper parameters model the shape of the parameters' prior distribution. These must be provided by the user. The package offers functionality to translate a given prior belief transition matrix into the hyper-parameter matrix. It is assumed that this belief matrix corresponds to the mean value of the parameters. Since the relation \[ \text{E}_{\text{prior}} p(s | u) = \frac{\alpha(u, s)}{\alpha(u)} \] holds, the function accepts as input the belief matrix as well as a scaling vector (serves as a proxy for $\alpha(.)$) and proceeds to compute $\alpha(., .)$. Alternatively, the function accepts a data sample and infers the hyper-parameters from it. Since the mode of a parameter (with respect to the prior distribution) is proportional to one less than the corresponding hyper-parameter, we set \[ \alpha(u, s) - 1 = m(u, s) \] where $m(u, s)$ is the $u\rightarrow s$ transition count in the data sample. This is regarded as a 'fake count' which helps $\alpha(u, s)$ to reflect knowledge of the data sample. ### Usage and examples ```{r loadAndDoExample} weatherStates <- c("sunny", "cloudy", "rain") byRow <- TRUE weatherMatrix <- matrix(data = c(0.7, 0.2, 0.1, 0.3, 0.4, 0.3, 0.2, 0.4, 0.4), byrow = byRow, nrow = 3, dimnames = list(weatherStates, weatherStates)) mcWeather <- new("markovchain", states = weatherStates, byrow = byRow, transitionMatrix = weatherMatrix, name = "Weather") weathersOfDays <- rmarkovchain(n = 365, object = mcWeather, t0 = "sunny") ``` For the purpose of this section, we shall continue to use the weather of days example introduced in the main vignette of the package (reproduced above for convenience). Let us invoke the fit function to estimate the MAP parameters with 92\% confidence bounds and hyper-parameters as shown below, based on the first 200 days of the weather data. Additionally, let us find out what the probability is of observing the weather data for the next 165 days. The usage would be as follows ```{r MAPFit} hyperMatrix<-matrix(c(1, 1, 2, 3, 2, 1, 2, 2, 3), nrow = 3, byrow = TRUE, dimnames = list(weatherStates,weatherStates)) markovchainFit(weathersOfDays[1:200], method = "map", confidencelevel = 0.92, hyperparam = hyperMatrix) predictiveDistribution(weathersOfDays[1:200], weathersOfDays[201:365],hyperparam = hyperMatrix) ``` The results should not change after permuting the dimensions of the matrix. ```{r MAPFit2} hyperMatrix2<- hyperMatrix[c(2,3,1), c(2,3,1)] markovchainFit(weathersOfDays[1:200], method = "map", confidencelevel = 0.92, hyperparam = hyperMatrix2) predictiveDistribution(weathersOfDays[1:200], weathersOfDays[201:365],hyperparam = hyperMatrix2) ``` Note that the predictive probability is very small. However, this can be useful when comparing model orders. Suppose we have an idea of the (prior) transition matrix corresponding to the expected value of the parameters, and have a data set from which we want to deduce the MAP estimates. We can infer the hyper-parameters from this known transition matrix itself, and use this to obtain our MAP estimates. ```{r inferHyperparam} inferHyperparam(transMatr = weatherMatrix, scale = c(10, 10, 10)) ``` Alternatively, we can use a data sample to infer the hyper-parameters. ```{r inferHyperparam2} inferHyperparam(data = weathersOfDays[1:15]) ``` In order to use the inferred hyper-parameter matrices, we do ```{r inferHyperparam3} hyperMatrix3 <- inferHyperparam(transMatr = weatherMatrix, scale = c(10, 10, 10)) hyperMatrix3 <- hyperMatrix3$scaledInference hyperMatrix4 <- inferHyperparam(data = weathersOfDays[1:15]) hyperMatrix4 <- hyperMatrix4$dataInference ``` Now we can safely use `hyperMatrix3` and `hyperMatrix4` with `markovchainFit` (in the `hyperparam` argument). Supposing we don't provide any hyper-parameters, then the prior is uniform. This is the same as maximum likelihood. ```{r MAPandMLE} data(preproglucacon) preproglucacon <- preproglucacon[[2]] MLEest <- markovchainFit(preproglucacon, method = "mle") MAPest <- markovchainFit(preproglucacon, method = "map") MLEest$estimate MAPest$estimate ``` # Applications {#sec:applications} This section shows applications of DTMC in various fields. ## Weather forecasting {#app:weather} Markov chains provide a simple model to predict the next day's weather given the current meteorological condition. The first application herewith shown is the "Land of Oz example" from \cite{landOfOz}, the second is the "Alofi Island Rainfall" from \cite{averyHenderson}. ### Land of Oz {#sec:wfLandOfOz} The Land of Oz is acknowledged not to have ideal weather conditions at all: the weather is snowy or rainy very often and, once more, there are never two nice days in a row. Consider three weather states: rainy, nice and snowy. Let the transition matrix be as in the following: ```{r weatPred1} mcWP <- new("markovchain", states = c("rainy", "nice", "snowy"), transitionMatrix = matrix(c(0.5, 0.25, 0.25, 0.5, 0, 0.5, 0.25,0.25,0.5), byrow = T, nrow = 3)) ``` Given that today it is a nice day, the corresponding stochastic row vector is $w_{0}=(0\:,1\:,0)$ and the forecast after 1, 2 and 3 days are given by ```{r weatPred2} W0 <- t(as.matrix(c(0, 1, 0))) W1 <- W0 * mcWP; W1 W2 <- W0 * (mcWP ^ 2); W2 W3 <- W0 * (mcWP ^ 3); W3 ``` As can be seen from $w_{1}$, if in the Land of Oz today is a nice day, tomorrow it will rain or snow with probability 1. One week later, the prediction can be computed as ```{r weatPred3} W7 <- W0 * (mcWP ^ 7) W7 ``` The steady state of the chain can be computed by means of the `steadyStates` method. ```{r weatPred4} q <- steadyStates(mcWP) q ``` Note that, from the seventh day on, the predicted probabilities are substantially equal to the steady state of the chain and they don't depend from the starting point, as the following code shows. ```{r weatPred5} R0 <- t(as.matrix(c(1, 0, 0))) R7 <- R0 * (mcWP ^ 7); R7 S0 <- t(as.matrix(c(0, 0, 1))) S7 <- S0 * (mcWP ^ 7); S7 ``` ### Alofi Island Rainfall {#sec:wfAlofi} Alofi Island daily rainfall data were recorded from January 1st, 1987 until December 31st, 1989 and classified into three states: "0" (no rain), "1-5" (from non zero until 5 mm) and "6+" (more than 5mm). The corresponding dataset is provided within the \pkg{markovchain} package. ```{r Alofi1} data("rain", package = "markovchain") table(rain$rain) ``` The underlying transition matrix is estimated as follows. ```{r Alofi2} mcAlofi <- markovchainFit(data = rain$rain, name = "Alofi MC")$estimate mcAlofi ``` The long term daily rainfall distribution is obtained by means of the `steadyStates` method. ```{r Alofi3} steadyStates(mcAlofi) ``` ## Finance and Economics {#app:fin} Other relevant applications of DTMC can be found in Finance and Economics. ### Finance {#fin:fin} Credit ratings transitions have been successfully modeled with discrete time Markov chains. Some rating agencies publish transition matrices that show the empirical transition probabilities across credit ratings. The example that follows comes from \pkg{CreditMetrics} \proglang{R} package \citep{CreditMetricsR}, carrying Standard \& Poor's published data. ```{r ratings1} rc <- c("AAA", "AA", "A", "BBB", "BB", "B", "CCC", "D") creditMatrix <- matrix( c(90.81, 8.33, 0.68, 0.06, 0.08, 0.02, 0.01, 0.01, 0.70, 90.65, 7.79, 0.64, 0.06, 0.13, 0.02, 0.01, 0.09, 2.27, 91.05, 5.52, 0.74, 0.26, 0.01, 0.06, 0.02, 0.33, 5.95, 85.93, 5.30, 1.17, 1.12, 0.18, 0.03, 0.14, 0.67, 7.73, 80.53, 8.84, 1.00, 1.06, 0.01, 0.11, 0.24, 0.43, 6.48, 83.46, 4.07, 5.20, 0.21, 0, 0.22, 1.30, 2.38, 11.24, 64.86, 19.79, 0, 0, 0, 0, 0, 0, 0, 100 )/100, 8, 8, dimnames = list(rc, rc), byrow = TRUE) ``` It is easy to convert such matrices into `markovchain` objects and to perform some analyses ```{r ratings2} creditMc <- new("markovchain", transitionMatrix = creditMatrix, name = "S&P Matrix") absorbingStates(creditMc) ``` ### Economics {#fin:ec} For a recent application of \pkg{markovchain} in Economic, see \cite{manchesterR}. A dynamic system generates two kinds of economic effects \citep{bardPpt}: 1. those incurred when the system is in a specified state, and 2. those incurred when the system makes a transition from one state to another. Let the monetary amount of being in a particular state be represented as a m-dimensional column vector $c^{\rm{S}}$, while let the monetary amount of a transition be embodied in a $C^{R}$ matrix in which each component specifies the monetary amount of going from state i to state j in a single step. Henceforth, Equation \@ref(eq:cost) represents the monetary of being in state $i$. \begin{equation} {c_i} = c_i^{\rm{S}} + \sum\limits_{j = 1}^m {C_{ij}^{\rm{R}}} {p_{ij}}. \label{eq:cost} \end{equation} Let $\bar c = \left[ c_i \right]$ and let $e_i$ be the vector valued 1 in the initial state and 0 in all other, then, if $f_n$ is the random variable representing the economic return associated with the stochastic process at time $n$, Equation \@ref(eq:return) holds: \begin{equation} E\left[ {{f_n}\left( {{X_n}} \right)|{X_0} = i} \right] = {e_i}{P^n}\bar c. \label{eq:return} \end{equation} The following example assumes that a telephone company models the transition probabilities between customer/non-customer status by matrix $P$ and the cost associated to states by matrix $M$. ```{r economicAnalysis1} statesNames <- c("customer", "non customer") P <- zeros(2); P[1, 1] <- .9; P[1, 2] <- .1; P[2, 2] <- .95; P[2, 1] <- .05; rownames(P) <- statesNames; colnames(P) <- statesNames mcP <- new("markovchain", transitionMatrix = P, name = "Telephone company") M <- zeros(2); M[1, 1] <- -20; M[1, 2] <- -30; M[2, 1] <- -40; M[2, 2] <- 0 ``` If the average revenue for existing customer is +100, the cost per state is computed as follows. ```{r economicAnalysis2} c1 <- 100 + conditionalDistribution(mcP, state = "customer") %*% M[1,] c2 <- 0 + conditionalDistribution(mcP, state = "non customer") %*% M[2,] ``` For an existing customer, the expected gain (loss) at the fifth year is given by the following code. ```{r economicAnalysis3} as.numeric((c(1, 0)* mcP ^ 5) %*% (as.vector(c(c1, c2)))) ``` ## Actuarial science {#app:act} Markov chains are widely applied in the field of actuarial science. Two classical applications are policyholders' distribution across Bonus Malus classes in Motor Third Party Liability (MTPL) insurance (Section \@ref(sec:bm)) and health insurance pricing and reserving (Section \@ref(sec:hi)). ### MPTL Bonus Malus {#sec:bm} Bonus Malus (BM) contracts grant the policyholder a discount (enworsen) as a function of the number of claims in the experience period. The discount (enworsen) is applied on a premium that already allows for known (a priori) policyholder characteristics \citep{denuit2007actuarial} and it usually depends on vehicle, territory, the demographic profile of the policyholder, and policy coverage deep (deductible and policy limits).\\ Since the proposed BM level depends on the claim on the previous period, it can be modeled by a discrete Markov chain. A very simplified example follows. Assume a BM scale from 1 to 5, where 4 is the starting level. The evolution rules are shown in Equation \ref{eq:BM}: \begin{equation} bm_{t + 1} = \max \left( {1,bm_{t} - 1} \right)*\left( {\tilde N = 0} \right) + \min \left( {5,bm_{t} + 2*\tilde N} \right)*\left( {\tilde N \ge 1} \right). \label{eq:BM} \end{equation} The number of claim $\tilde N$ is a random variable that is assumed to be Poisson distributed. ```{r bonusMalus1} getBonusMalusMarkovChain <- function(lambda) { bmMatr <- zeros(5) bmMatr[1, 1] <- dpois(x = 0, lambda) bmMatr[1, 3] <- dpois(x = 1, lambda) bmMatr[1, 5] <- 1 - ppois(q = 1, lambda) bmMatr[2, 1] <- dpois(x = 0, lambda) bmMatr[2, 4] <- dpois(x = 1, lambda) bmMatr[2, 5] <- 1 - ppois(q = 1, lambda) bmMatr[3, 2] <- dpois(x = 0, lambda) bmMatr[3, 5] <- 1 - dpois(x=0, lambda) bmMatr[4, 3] <- dpois(x = 0, lambda) bmMatr[4, 5] <- 1 - dpois(x = 0, lambda) bmMatr[5, 4] <- dpois(x = 0, lambda) bmMatr[5, 5] <- 1 - dpois(x = 0, lambda) stateNames <- as.character(1:5) out <- new("markovchain", transitionMatrix = bmMatr, states = stateNames, name = "BM Matrix") return(out) } ``` Assuming that the a-priori claim frequency per car-year is 0.05 in the class (being the class the group of policyholders that share the same common characteristics), the underlying BM transition matrix and its underlying steady state are as follows. ```{r bonusMalus2} bmMc <- getBonusMalusMarkovChain(0.05) as.numeric(steadyStates(bmMc)) ``` If the underlying BM coefficients of the class are 0.5, 0.7, 0.9, 1.0, 1.25, this means that the average BM coefficient applied on the long run to the class is given by ```{r bonusMalus3} sum(as.numeric(steadyStates(bmMc)) * c(0.5, 0.7, 0.9, 1, 1.25)) ``` This means that the average premium paid by policyholders in the portfolio almost halves in the long run. ### Health insurance example {#sec:hi} Actuaries quantify the risk inherent in insurance contracts evaluating the premium of insurance contract to be sold (therefore covering future risk) and evaluating the actuarial reserves of existing portfolios (the liabilities in terms of benefits or claims payments due to policyholder arising from previously sold contracts), see \cite{deshmukh2012multiple} for details. An applied example can be performed using the data from \cite{de2016assicurazioni} that has been saved in the `exdata` folder. ```{r healthIns6} ltcDemoPath<-system.file("extdata", "ltdItaData.txt", package = "markovchain") ltcDemo<-read.table(file = ltcDemoPath, header=TRUE, sep = ";", dec = ".") head(ltcDemo) ``` The data shows the probability of transition between the state of (A)ctive, to (I)ll and Dead. It is easy to complete the transition matrix. ```{r healthIns7} ltcDemo<-transform(ltcDemo, pIA=0, pII=1-pID, pDD=1, pDA=0, pDI=0) ``` Now we build a function that returns the transition during the $t+1$ th year, assuming that the subject has attained year $t$. ```{r healthIns8} possibleStates<-c("A","I","D") getMc4Age<-function(age) { transitionsAtAge<-ltcDemo[ltcDemo$age==age,] myTransMatr<-matrix(0, nrow=3,ncol = 3, dimnames = list(possibleStates, possibleStates)) myTransMatr[1,1]<-transitionsAtAge$pAA[1] myTransMatr[1,2]<-transitionsAtAge$pAI[1] myTransMatr[1,3]<-transitionsAtAge$pAD[1] myTransMatr[2,2]<-transitionsAtAge$pII[1] myTransMatr[2,3]<-transitionsAtAge$pID[1] myTransMatr[3,3]<-1 myMc<-new("markovchain", transitionMatrix = myTransMatr, states = possibleStates, name = paste("Age",age,"transition matrix")) return(myMc) } ``` Cause transitions are not homogeneous across ages, we use a `markovchainList` object to describe the transition probabilities for a guy starting at age 100. ```{r healthIns8-prob} getFullTransitionTable<-function(age){ ageSequence<-seq(from=age, to=120) k=1 myList=list() for ( i in ageSequence) { mc_age_i<-getMc4Age(age = i) myList[[k]]<-mc_age_i k=k+1 } myMarkovChainList<-new("markovchainList", markovchains = myList, name = paste("TransitionsSinceAge", age, sep = "")) return(myMarkovChainList) } transitionsSince100<-getFullTransitionTable(age=100) ``` We can use such transition for simulating ten life trajectories for a guy that begins "active" (A) aged 100: ```{r healthIns9} rmarkovchain(n = 10, object = transitionsSince100, what = "matrix", t0 = "A", include.t0 = TRUE) ``` Lets consider 1000 simulated live trajectories, for a healthy guy aged 80. We can compute the expected time a guy will be disabled starting active at age 80. ```{r healthIns10} transitionsSince80<-getFullTransitionTable(age=80) lifeTrajectories<-rmarkovchain(n=1e3, object=transitionsSince80, what="matrix",t0="A",include.t0=TRUE) temp<-matrix(0,nrow=nrow(lifeTrajectories),ncol = ncol(lifeTrajectories)) temp[lifeTrajectories=="I"]<-1 expected_period_disabled<-mean(rowSums((temp))) expected_period_disabled ``` Assuming that the health insurance will pay a benefit of 12000 per year disabled and that the real interest rate is 0.02, we can compute the lump sum premium at 80. ```{r healthIns11} mean(rowMeans(12000*temp%*%( matrix((1+0.02)^-seq(from=0, to=ncol(temp)-1))))) ``` ## Sociology {#app:sociology} Markov chains have been actively used to model progressions and regressions between social classes. The first study was performed by \cite{glassHall}, while a more recent application can be found in \cite{blandenEtAlii}. The table that follows shows the income quartile of the father when the son was 16 (in 1984) and the income quartile of the son when aged 30 (in 2000) for the 1970 cohort. ```{r blandenEtAlii} data("blanden") mobilityMc <- as(blanden, "markovchain") mobilityMc ``` The underlying transition graph is plotted in Figure \@ref(fig:mobility). ```{r mobility, fig=TRUE, echo=FALSE, fig.align='center', fig.cap="1970 UK cohort mobility data."} plot(mobilityMc, main = '1970 mobility',vertex.label.cex = 2, layout = layout.fruchterman.reingold) ``` The steady state distribution is computed as follows. Since transition across quartiles are shown, the probability function is evenly 0.25. ```{r blandenEtAlii3} round(steadyStates(mobilityMc), 2) ``` ## Genetics and Medicine {#sec:gen} This section contains two examples: the first shows the use of Markov chain models in genetics, the second shows an application of Markov chains in modelling diseases' dynamics. ### Genetics {#sec:genetics} \cite{averyHenderson} discusses the use of Markov chains in model Preprogucacon gene protein bases sequence. The `preproglucacon` dataset in \pkg{markovchain} contains the dataset shown in the package. ```{r preproglucacon1} data("preproglucacon", package = "markovchain") ``` It is possible to model the transition probabilities between bases as shown in the following code. ```{r preproglucacon2} mcProtein <- markovchainFit(preproglucacon$preproglucacon, name = "Preproglucacon MC")$estimate mcProtein ``` ### Medicine {#sec:medicine} Discrete-time Markov chains are also employed to study the progression of chronic diseases. The following example is taken from \cite{craigSendi}. Starting from six month follow-up data, the maximum likelihood estimation of the monthly transition matrix is obtained. This transition matrix aims to describe the monthly progression of CD4-cell counts of HIV infected subjects. ```{r epid1} craigSendiMatr <- matrix(c(682, 33, 25, 154, 64, 47, 19, 19, 43), byrow = T, nrow = 3) hivStates <- c("0-49", "50-74", "75-UP") rownames(craigSendiMatr) <- hivStates colnames(craigSendiMatr) <- hivStates craigSendiTable <- as.table(craigSendiMatr) mcM6 <- as(craigSendiTable, "markovchain") mcM6@name <- "Zero-Six month CD4 cells transition" mcM6 ``` As shown in the paper, the second passage consists in the decomposition of $M_{6}=V \cdot D \cdot V^{-1}$ in order to obtain $M_{1}$ as $M_{1}=V \cdot D^{1/6} \cdot V^{-1}$ . ```{r epid2} eig <- eigen(mcM6@transitionMatrix) D <- diag(eig$values) ``` ```{r epid3} V <- eig$vectors V %*% D %*% solve(V) d <- D ^ (1/6) M <- V %*% d %*% solve(V) mcM1 <- new("markovchain", transitionMatrix = M, states = hivStates) ``` # Discussion, issues and future plans The \pkg{markovchain} package has been designed in order to provide easily handling of DTMC and communication with alternative packages. The package has known several improvements in the recent years: many functions added, porting the software in Rcpp \pkg{Rcpp} package \citep{RcppR} and many methodological improvements that have improved the software reliability. # Acknowledgments {#sec:aknowledgements} The package was selected for Google Summer of Code 2015 support. The authors wish to thank Michael Cole, Tobi Gutman and Mildenberger Thoralf for their suggestions and bug checks. A final thanks also to Dr. Simona C. Minotti and Dr. Mirko Signorelli for their support in drafting this version of the vignettes. \clearpage # References # markovchain/inst/doc/gsoc_2017_additions.pdf0000644000176200001440000036627614050513460020455 0ustar liggesusers%PDF-1.5 % 1 0 obj << /Type /ObjStm /Length 4945 /Filter /FlateDecode /N 95 /First 798 >> stream x\Ys:~oSSI:)gq;OAhsdɑ,o7dQު[2-@ (I #RNDʈ 2K$J(c&YQbHF28c$H^$I4ś2BP%T&( ː!1 KaZi BxJ!F8<D*7<#pK<I8  f$bI~//T4-KeäoП=e>.ɸɃ'g) xDה%MW'@xV2heb9]@d1bK1yvXɲ-*/OJKMQ7'=f,/=ۘ? 1"]׉nqX`^fy.yY l ]: m/QW9ajj^rEGj -fYZd~/('0!F3`\Lh&O#H>|5z4$c(37@_I*+$>~0 DK ; [Aa 4(x$Ьu-JM6 R[+Tt \ky _F:[R,&'W&oH=aTi -D{L̽U_vedut׆`f{ _U;ƅFz:v+-; #YFٽdm-d*(5&:+Le^͠63W L;.Uk2FL/"~i!0e{$o&ӆƥM>^>9>+щEp8lr`\F,pd!!fL4 19njR]>z?}>`iݏ,"EBt[T[ᐨlmEb]\.{0F5>Tմ{w)z&O#yNq'q>̲=a6ĨLb<iC*55l7@`C-BxVXF탌Ya r;j6dмC+c%4BH{C={`0fQ]Vm{PZRA2ٝ:nr`L3f>Psc4l k7,ҺIf!9u]F&[Fx01TCaK;2=8@4B>gت3/jWϷ{Pm\SP֋xҕ؆-e(%hd1[ b]LtN/ %iUyQ-Fڶ :/v^|Y/|z1É{žEnΣjzsa)pkX\%]Zax´XVuL7h@\C1]X?:<|m,^[ehVѮvb{#J|9ڔ﮿kq634BQIY ϴv(,޲ڊC1WZ7|+4mo=5CznvKivؓQk&nXkl>;:}v͎kTꐥjN֠,Ns;tY]00~&ϙ} ?|aS}7  mO0ELfN0U*{ n/4}n֪0)F qvqfg ;i 0X`ɤP_xU#]p#nr|s1JެA+o+]@?`mB&B:n3, tr[?}?n.U]d?Ҿp?R֧:Caf; vF[\ ~jx`,r H+hucsqb1<s 6Wg[!هz`r0 U['_8ûG |:ɖLoqf~08[ jGPypY 1em8 # vzRҸ[=ޖVgy~Wk> u 5qAװ;ͰJl 㷏?}{+P~j(w6iCa Bh}G62ԹnvltϦ6MjӢVqEquu"\Fx;c=7X?p6s,XA0͌iEVJ\[z[@yݧ'5aنBg(M`;zfs~F]nJzy=0ԝ+]7y d?7vCP[ -kNN/}>]f.̄2=}BNWʸl۟, ynJ'Vtw"׼,&mP_,3qcߝDFo:r VuL/ >wvPosuzᕯ0u*{xqa:պ76\[7B=Q.(;m6(|ut)ʢﰊ,ٝl;Aw ?; f^ 0t9.y7ݫ7,\MѺ8;O D;f~im.tKr:߹QOVGώϟ>??H3a/Чh\in6 I eKH 930zOO#Uv#Mxp^ nrq ,ڸ wpO6<;qn8w}3 sr{w܎Gn.Jڅ$)%C˞ldۨ:u?q5]; m:;۸v9[Nsum=#1Q6$:x" p wfDVKLcV%~\WhvDٓE1+<@b~@|`ׁ8/(1Ͽ-kzi )/ Ll2O)^LfòG4_ ˏIF>C7oJ38N y:2G[_+?.Vܘʞ=O s#:姷G)NIݣ?bd WdFfm';´<)./s# ֏شO4\Ԥg_pc|ۈ4o~\^Q1=0hSԻ~?jOc;O @jI+kC(Ky]8kx$q4vC#鋹N^{5z˞hA08u8K}ln!W’sM~9$:#%Z|+^mq%5Viv5Z\qnI_>\hp %?#7endstream endobj 97 0 obj << /Subtype /XML /Type /Metadata /Length 1593 >> stream GPL Ghostscript 9.20 2021-05-17T18:17:51+02:00 2021-05-17T18:17:51+02:00 LaTeX via pandoc Google Summer of Code 2017 AdditionsVandit Jain endstream endobj 98 0 obj << /Filter /FlateDecode /Length 4610 >> stream x\Ks#9rYag]+ffe h|h(jzK<NQIGZB%/3QӤm~rrtN~:$w|{A#ԄFp&|mvc\=n-.麟ijl= x$[/Mge޶.(\m8o˔a=]D7|ixl;mUӯl[p2)^hZ?̄c:ESd&U瓋[CVs#e)N'3{={~ Ϙ&1&\5R0L@RzL&N˔XZ2X#l i&v4|yn;DD4o8_L |XS7CA.)Q|lQ.byVf*,o;ʳz*\`]T-gi[E_kT9;D{օxy[!FPT4әRMɟn CjmdZ;EHHtE.&AFNh͊?ğf4 ~U `_ sƌq%S#*&VȉI'<)ƛF`<$D[R G# ziT|5%;K&%)^Zw(WNH`c5'󧼍J.l&bn%!YVuWz>lH.=S/ᒉϾ BM,* $xK??)+Ph>Q hv9^Hc+ZzdPeP#dQN8Bf8'_0 3_Jf#9J9×n[\)y:$MpMW;zoAw|pM|S xkWRnv^GcLޗHK/7VP~%%Ί<\Mb5A.eF|r*jdJVtIx%d؈9yab Q{umAo#:p״.8p<@!\]a(i߰-ILV&s ^$uexj/6۪F ַW.1+Vk8"q*1 $6%*ڽd4A~hn}GOa?őqLj'8]PœF>ѸE'qh{Ծa\+'քqq.nF 2``I~5༢_ T\jǸ@950`mZEʧqP7 T! bus .gtEy2(şC.{"# %AA22{){^?g0vam>Ɓ9|B]]?=P1)m8#X, =T-2wg?;hɯܟ6F&-0KNnb%~;}i"TB!l"z!G^85O;[.+WAoHIu0' U :Fj$}H v=_9_g:"<\ ;-krWݐ |f8`qX-yI8gjίWߝs o뉾9s/${hko\ K \qSE)ן\:(H;`G+?wo0Zz@$!B\K#nItm7_P|x?8fctcdK l#vn:"|]8﷉!,qp}+0$Aߊ*%i~ &w~N\c-rET HHT r8@\='+X#USVy<`u8=`et{r>uN.`pQ'-cMEZ Xt5/ ,k :?`K.Jڣnqb$UV kDzW$j:a;h'Q`^X*\Qv\O(eɓ0ꚴ9@AN_Tk`YGy JFI} VNQJI8oNV 0E_ڞDģ$PDzWe@='T٣+ $xac[pOci hrhwjzu8N [ǣQ,̯6Oχ|NOІ cBũqŒ@+\NQh%7m㹩VΈ|>U.v\u˻c Xb=$ab8UQJ/Wt.P|SlA1 :a ]]}:Bki_Õ3}E?䭐15~IȆޘ3\WaG$=:pjuT8%ZsY p*zwMG^i)栥7׆)z!`{<3=cTtfjLHh/t1W0/mtOHm~x ?ޱ>Բ얩i.2VW$:n|*9KdX "?3gje NW#*ٸ+ v`=NNRQ#8SEJAR $=Ta2A*+:`,( PV8q>RBy\5Rzr>zka5.Q XpW3}6 F{zvB-.<Ō<־NȪƹ?w U5N(PG.囈(]I P%6Quj<\nTL:l"c+{olCoijULx~yAsE|Trs۲NMP`goePFZ/6`qmcV ͌S[r c2w۴6ȺM"npE6?0b3cP$hyk^9U֒%arci.vTvXX N.ڤĪhs1Fw9m1[VWkE $?9gսgr ܨ~|*%{: ٱ zszDأe\UXOTOUYrEFbsiuv0* T;-d7;YDIN&w zG;pm>K vBrf3 cCѬ0 @?En?VvK)Y-}5+MA1/ 9ȋOaAKȾW>P!,~-CɭO.eR4В=|-@Z|.{7yʹY""8֕ќIVŞk ԩ^->B7CfOW}b-2M^ ^sV.KϻMrW)|*2L7䖁wZ繭u?EqÙz/ !KOPI]"h <k$lKAAO/+*)b_oQŖ?6(r(4mnӘGj`w'(&yt`s˪.|hS\$݄~wmt.%e= RŒ2AJP7]nՍG>Zt..|]N.v wbi}٧!+NW_\X_g:b {2{> @-'T G߲rWꁷZjx@cGAy1睩6%h.q/[|=z>ul~ tctpR= Ɋ\X^ w8?endstream endobj 99 0 obj << /Filter /FlateDecode /Length 4137 >> stream x[K=RR˖7V̾GcM%ߧ,WqfFNX' ^.NFMnO~=u\L~#a&g7'. X1LmtY#m'u͌>NwJ/|xV]6*KUۮ]-Bz&vČ_SbW]9^11_ zwOԁt&BI*!ń\rsΧәpfT_[V.Jښ(,KڔJUmgf"k +XOc@ 7>iZ6xSK9('a5˫}lZ~9i?SF= A G~TٹP*UT"׾J$ q@UG2jI +i3=[5D?N:hM\ʂ3~mj!ӕ4֐,c y!#^hNal/! Mފfvf2RJ=SYtUGso \vG(o׮?/22b.A7궘,2=|/w눌4aEWW2 ‚4FףqX, 4B `z}F.) їȝ%̲[VU7H X-4^%kA:P8dzb͐&zHș(~[Q%g4Eӭr; 1T8$t)+x`wicr#4Dp@rZiIL ^UZZX9oCn:>,iྊFEf1~^z7tkQ@ÇvyW7ai枌]X0ٖʙEEr/UZ7T|5&ɸxlP$'j>v3T\@{},}T y$X_ {L\Q&˜ڄ!eGF+ .hS~vw`98قNR^@i&$ν ñ.B3 ^.)Oaq"Ud_`\+H-rŹOLTSaLxe. <ٰ;.g \L be΄yP#L K3qJtL &;7aX¥n.H+hmIT >Yhl/06 k5W0FjjdL+4<%4=[1SBfx} BtIe\`I2lq dm]]9S]IZn0_HSR \L<1u^._sGܱ$u hzQ _Ki#"H jDvy y(2;aD G]vEDxBO2<)bK,]̴TAXTՋ zLzw5=5q9_anms|3?HqNS4+D0u;`*.ͷW+y E1&cHP1De'ɥ]&lRQjBMqwm?2^SE>^dZD늃f$vvxς357@h^觾 mJ=i;+7걇JòԪ.pU-8uFd(竮 i^/S؆Sz :v%DxjLa߲ڮiT5 fd3rSI10ur*o, sڱ֐ƛa}$ȄωCm5}Rhhno謇RW*Ї~ MFfĉ-^Z_N&}!VxUVE5rc}a h !P0Ւa)e뿔4t5v &EOւ1uL%u#?L5mCZZs¤AXII_qx KQZsQJ#f1,U 5]^߷Enm} M|O.'!nH-z&1b v2fEy Y"?<oNŶOyE{rɋqV<͛yj}!YI= ]l /Zt4gWۡx/|*ΚpQO?n6TDZj-UWuĦ.uV֬4D/Q;/Ʌ G5`"&.15^mE &g}A%GbDnfXҠ $}ʓӛǗ1W`'<I ~$s: z>]aP+tҋh ^$Ն˂h e2[!`o<`c0cj&Πi|{H+O1vr# 5D<.f-MHx$A~ 9f 4`ڠ3IPQ\<9M:&# m\C{c kU,G<7, u/>E(j?y*XCq-.@if xeH" (M\R8ΧDžP298P-VYs7/%>Vʅ3@5^HEt>'m|WIVP=,(3KS>*v%6鏍]}3,[Z^.#;IJ3X xf&^]X= )ù^aVһWWWѬxΘWș2~SoV8z;ݹpK&otJC&[%Y#w])7|/c< taµǏ|ϐxY=mcSS> stream x]ϥqv?!_ p_rc0` @, NS$Eg(RUkz;:c "g S]]]?~._ӧݟ>Oϣϭ훦~|o>:JT?|ן<ʞ??}j?g~퟿|/j_g uZn^~8UZٵݟ=4mg?_""dvȼZ:@v*$3ӘFYR]mٞETST} Bw"yrp8\הӇq}`CTwL"fe J]U[v+E2e`;hBC;,6_M]#jX% +퀕Z:Kzw6 N6ٳ]ejwW`YFlv]fo4knm_9Бx?L]n#x'[yWΥ^u59n̔bXt^ɖyb.&;{l^lhE6ZhnƲˇʁ&}导,0n?5~~Ýf\Ro?aʨojנ?}çç{Ent;а;~ ^}8؎4Wdf<@n3?_]ޑ\k/Gno@ng@}|u'0&G n#D;W5i}HF^@{cOٞETvinc!n8ϭj|J9{xGrvHHv_SoC>dmJq[jJZI-}~kVg vBXvMko`l }j-JZݓZkxڇ3\o!9{wDSʞ@>C~e[MV#-['/iw%Qz[s >CizId~leqh dJ/$杵[ Si4\ajlۿ}[?c,~C:b1Ԝe1cCo(MFݑd|.ܗzwLXNdLX3Ҍ[Z [Jc#7 m]Yh)&9^P_V/)1t}fșnѾnO/S 99kHc[-׳Vi~Bػ(Z>zl_Vlbǁ;XqΈq۵:떃Nˑ+b!da4| O *ԁ[oj)nՆH@F6c޷Uy-sc˛O>?=懿/s{nqJ زqw6}V{~hfS}ag:m<&V{*?̊[*ү> Vlcw2>Xx3lu_߶Gg /(,1w~63B)RKH^]׽o#m*Zwǟ?f_;vΆ}Fh'ӻ(W_wXOe)u?F?'si_~'%_g9_Oz5gƕWe;I9>{t?◷jˏW?SvzGiq'[;*6d/~9jj&7cn/>q_~[TÎ^_ v-s|2>6oۿ=Y_~G^o?OeJTXOwnw_?F9CߎWˏZ;C9OW9F%S ^f>_uf[s2^ӭl!o?}k%`/> qƂToC+;ΖZ65ӡ CO>!LjswY >۾ú>m]ϾavBώSgmk5g4g z?'\p*?߿,8'E$!02{O̴@Fmop<tYj6*wfP?Tl-Oo|¥~lwA\Mցb_&Ed]{%vZ0Cŏ|"+9M =My6IơMk T1Q;ECas4o4|22T\b C˞2DY'?l gl,~49,ڵSv`bRB7M^QzИKKͺn(ɚ,kcE&cu6(i-68h3*jeITEBmQ$T)s7C=O0QV 0kaX=;pw\ci:;5:Uo"W,%O4SД7@X䱊 6OvOj,T9o0btdR$xPnBe vuZУRWR@IIw~ lq>iVUhV|ڛQUo%x=+&Ge%VN06ʀHN HQ&%peKG?@mB#0Y3v ҳg9|z}YV[ӞЪruV:Ugxj-;^ZFƏ8y,u0` 6kW0)6> 5a%;4\g6? ȶ:ǶJ[Ѓ3w:W>2"}\1vAF B:⨬52cDpTJ8c Fe`Oj`Z8NتsAlhgOF`9#yWP6뺩馉릦/r-UkLWo:Ym܊u32Xy42F,:9ױ)B>NJŁwH\XT;[Ia]疓ɫnͰqZZӸ33ZvtO UP.O%JdU/"rmwz\zOmII(IS5f8DTUgXZpEXTrAkw^jAY ~wH(jYE/tZB-4;PBd,W r/Т -y;"5,S$@M4F=yh*>y}߽<,%e7ƚr'5 .)O`īMm(R2 km >$}%+ IyēHRÍ=+q;$lUND;_⫚}@W XX9 ٘d ܘh EM% H`s :[VqId&Jq6sYtRȕs\,`q&!˻밂}BT[u}=hɾ:Z x]iU_-QVe߅>%[Ĉ>'%k$zS.l)$,="&;M(gSNZcWبEddO\9j9F``߶Ʊ6ݽ2@fmD&UߨGٗlp7giD+"XӇqQa~A޳KͼPr|#ԅSm":R`%>&}S_d+#QchBi6)Y-܉R'Ҩw5Fiuݷ@&>SVl?S&2z\?l$?xܔ W@j"8^.VZ %rh҈H6~S_X]Ddy68w*wV!S8I!<_9"lcGfX'mXv4WϽ߀ 'q3CJ$Z:%kq $kCn/hM>Vaϝ[ Y;g>z)W;,~O>=0q#aKkpc;a.Y!eIodl~7~넵hî䮈HLy-c@GA}(r-Z4OeX-?}.itHtIQ ,jf^ ,i#qRAέZ`T8Z_S+RsOgD0ѹ~.cx1"d9K@V"޻N >(I(鑝\(@4B:u˭2q#;EX8:Wø > rI+s '}USE^- Plim3xn;v]{KɴFܙf4'%0GHݴNp"aԲv|ׇ<ėH6yi0XDtrSR2CJnK)}+ pU$~F.k0J,I(]'+ҩ1^ļWv}pO"1ĽNf jnl!0ڇ*Oo18>XPLљۛ dg@BK%-~ڟxJAnc`S?G#?ǁhɡ I@$>zB)M_|-x+U2%KZ9c`c8uiM^xy}? =_q<_?Un| 7S_7ԗ[qJ7fӵŠ[}u=JQwCNOG;w|)6cq~73_r5 gW8-#%7ttl5ڨa:Ư:5\(9]H<[=wZ40ZNtg!']ԾiE& VanvOJ^6fvp>!=Ӎr6<v[yOޏ%gyi;hƲ:u6-v?#v+ `>ۇ/H^uBddo>Ȱ9(/sy/ƚ6kmo>i0~振)]UxrW-l USٽE^+ma ̼@?g,6TΩAf wYq/`DB=Rgy샽93s:syrE\dIΎfo6aՋl鳣Yi+9/4eEM@{@)9nzW}Ր׬W-ޗYUV])9aVqfr4؆q 'ss.uƶ֜ܣv7KBʜ)7᯷K1籼!ⵓ(A3jXKQs9K:V'l U gVIgoF^7V1ʗ'GG^ qQQ `|zKoeȽ=Urw^ɷ({E VL+HӺ^졙52>Ol!mfKW.N]fΚ *DпܺWI~#3lkfe_&9u!w8xS=NgXyE|sCRJy}Q܌^P^o<؞ng+*fSo[ӴH%bBδV"oF)u\eTD-xl8wJ%g`/2?3˥9kQ硗T B2b> .(;!윯ں!bUGE(\:03JRQz^d G宔g%YYVW[;@} /H"I]אָQ y^^mVKT茥7BgYmZɲWjg7B;||nyuZog~G*=rԞ}+5@퉹͜4r5ts!r`F[^_\@N!gI8wWÝg=&e(Hg.b)ܴpinIՇrj˖Z5wep~pǏ-:W{y_ٍw\8(A*\85 @A ׁll#>>:9*HMh5wZ󍴶@-MΉGGKﮑBȤM&&. [ɒh\jX>RÝϩWL~=Ei0=4pˤOHbB_a5:(P>wZ_M?)Bn# R- 9SH%Gș\^oebցxzJ 6%Ue턿 ĸD;@s _U8o4A[˙U.pǝ4gHg݆-{WpR^G \o?$z}RsG.4eHsC\}j2&,;\Av0:6jhtnoHXcM^V;6 !԰n!l ~MpS2MgKr5ܫ^o_ħn tMnhB/"w瑗]-ʟG]8KHXOVB}tWa^?BW*oBJv?[Ud1CҔ=-??bQ)ep.!}:%#ƺ[ QfilIra^V%PN#5R@7?@҆~/ͯ.V(gW[G,r/<<a 9ٻ&x/J[ThƛomwF# >amF)DJ 4Y=z)SH,: tek.-AΫߗIj+U"kiM<%e];ml6e9 AAÏ xVrW٨С-?K7 DI-'Bf9;frl}<?8AFgÜ8p0l6*>wYO?1OY\Xe*G >މx0g|'x"3Z(6tOz$9/T/4? I6n̆T?0 :Uc.*'% _z2C3ūK*eڡ=#K#~b_'q$@HNJq8>e+;d: Gbp| m2xEwr( =cg&dB"dSNvl@x23ʖ# y=2+ϒlK+k~OldzsmJ |] ;ԐSJ0Bus(΅,`:~;j9 lJdc?BlS[ǿ", #YEwZ(~~\pwɚ _7|4|¤)Ϯ4c_k.ߗkS݇VfZK⬾^%-"֔cd|njrĞuߠŌ6̫Ʌ`<>ryyh岻u?s3Ht*A䑹 6?hqȍ~ p~<Dƭ'&@irST@>iJDrkq賹|tC]=aλَ1v!!%/뙿K *G>"Viw-jO2^j1 o-y|Ce>C y?Ҝ{ A'DB)gB9R΀>'̟JDR!r揸JNd?QH$5[=Y٧1L;7QUƠ0JCpDBg0A|HH>fH{nP:x;_e-#+A,~Ha~!%R*^7w(w}>JaqR{R\d&&8"h?g GhDP)VF'f $"whhCU\d$LۋD D&,63CV\'B 39Vߛؔk3la3̸i 0w,_Ų1v x-?n&R`1g0wse2~q aqE(f s9K i)I*L8Y*dwbY\EK~e+y-v! [Qw0wn|GɯN,2~;Z; Q-'_AX WiܑɼBtL}"S6ˎY!89Wpa>O",9qq6rq#̅e(_.ԧ.$T7LӈdK@ sxwqSEIHrZP眻"'ZIĈȆ{qF ڴ Lj%pG(XDso6sG 5A}$/p?'V a0Ψ0 XrW,Ek0@h1D0:%#b@@@1=L( ]DVQ1=ԹKírui1ִ̽bc2^c¯m[Z"l?S12ΈKZY~;Hrqq2aeZ/M}@aȄ5^_/0^ƋM=%Ř/egЌ0jƋ u3!!gcg)g;Y6.22_g hd@h1VٞC#3jZܵF#gϥ>ujMB,@(e-wN#KU^c5Fue#xH9x?)jĸJ}SL/ֈyFֈ1yE㼐ID9k|A}xï $-ϽL6b•C׸1WNԐJ2F̹unWԗxxXR˲##+B=HpDHd>xX==l\![H%+B[@z=wG+/ּ?ldobtgt=}Z`d%oVSZ[ϊ=̎3ϛ,,g"FxwF'vަmaKz{'B43Y>+Dj/# {YvD0'ƒ=^H-aLCY=32 ooțĈa6t=}yn29=M; Iz=̽ =̟Waf^jyC#u|y/{z7K}6N% -|Q˽qD؞{ސ{~qf]2\<0N2 W̷ /ԛ ܿ/4 oV_V|K=} (EWpQ2~|=;H[U] Eg},)E3M/jgU0G ,;{F5pKL(͠5Ӱޫ3 D3 ꤆Ena~FF3^uPCjWL)@pb٬#j7a^mSF;_3zA>+BNB[iDR0G R&Y pF0)(G0G Q1"rN~uz+#eqˌ#J%{K1Zj4԰L31! M GeZl=Wa< 6G'Eц_!=t#x}dDrxJ퓢{ĸJ)M:FјY:=bT}p59a o;C5J0G;BpsU־et|=bփ>3#O$9{ {,"<{܍>f7<48L=bphWlW85K(ߧatcTo#c,N|2:Ih3,?8}pY;La-,;{zkX4[q}i`4厕=tW=b-&Ƣdla+;_ ޝf;[e|ml<L;+o /l>;Ip|./wߵCV;^r$.mMH"$)J#Îy] tM1S x<.$hw Az&HV<ԽxE˟XB -V4Sfۈ˫!;w!i!~?\t }gaFk˜L˃ {`&% / ^NuO%ޥ{{Hy|y3q[7wڍe+ 9~&4'Hԩ{6𮗿ϷtsH[qn4*|ܢߞr Oy Bc)Jg@x}DxDxqOldASÐSTKtA_ d~r=ư{'RyO7jż1|M%V?/`^Ɂ 9O\Xƻ"~R4^5 CinYJөOJSYβ9,F}L DŽ8¶g@xO&.aK'C`CSIʥ[j]<&˞~)i6K1`Hi!yp;2(_˞~"1}In!҆S~_m;@|3dKI \| D-w@-fФV]9eظZZ~-7@ǝ@OJnBY_J|xQ} G via\-W>r1;ݝH%A:w#A0[E{hG+j"scG ^WeO W͒vDwyuo,!MHdW ]S3(͡MiFiCVPóD& )1^lDa 2h\ms$"ɋj#dW/hdsŘEDjSoɪ DriYn> 7"x?I_ 熸)[C`*"s B!ahb2XSO[UJAZA@>cl ZYR Nq|-"{y}Ŗ-Ly4~j)c/9?r:PDf2}C}sw74%;b%e'ߣ1^PsQ9\x6ԇ#M}"S!#\` G|(!Dg9JSsUQ,vr!<" T"٣8YV=sZ"WݟP(L=>kܘ#3ZzegsVug!-'ᓡ>x`66̽.2R>SXk e5#$4\;B "GHXlLkdJ͇LӜO͸ཱp"iwjqEDV !^ 4~6GD\CUJԅ3["9@Mǽ>rBæVwzV~(}2 FC伔"/ܭ LV 3R((/rΣ5>4?OpZGBr8gj{ٮH ^Ɵ ZǁuODqhd OCNv>T1h\ }eZ84l92յ@ EV/,W5R2zP<&cs{`h;G`hkQ"Ϣ  tHg#w=y1{&_|pѕ1=68ۇ:3DꮨT|/y }<^3'=Ukxq 7%ߚY8ADVcf*E 3;sv"%`8 {V_|$Yq]3r  3}em%W0׍X jπgN*|.jrlAnkp< OpR#Tc=o9WfXeH,EcfҐ'rh` l}/܈Hc!dDf!yG伷 [InFؘHcŁygs|t)7HQ6OFˎ8ZQLB:8p=ojI4/ h>:#4l(cdvA݂}yui`GN MͥӂCMT CňÏ4 W =>N. VҊӭ`ԘNwozvYޏ&l[v,_mEi¶mi(~ĩkCĄyaf4 { 1=)YOFGB$\0O}^kkk8cy@Ruhz[o;EWx8w\eIB0dX7Ϡ}ՄI|#Ǹ"*E9zPNi,ST;wUv+o="-x_<ڟU#ru<<>yO|f >?4?4jJ6[k!w%|ѵ{H^j5.v{s%V9zߑU{ GOG>E|Wg|HYL|{=g:!/NY^*)u"dy["Ұ vR4!Er~-eb`&!FN#du^99.|AB 8 >*WuQ./T?y7vV>̑$b csI6r$-))y_rk fjHMҳ𫴡3mXr Ҵ؋'< J 8=,\RafyD/^.>pC"oo1f o1f o1f o1f o1f o1f o1f o1f o1f o1f o1f 7O/ [{^WDU[=yz&}2w_Aҥ:D'ud<"U?U wk򦙍g|#_^nKQbͯȇ}Xح-f}U;rVq|εz!? kr,ʽxbwXhpB:5ӈi[8_i5 UʫrEܵdOy@4KJ>JW/4- ,VoS^{($Kz5lXz&IKv,7U0# 1~|BFe2,YL<!;%,? "[eDd$?M_O~u=WYb9a˕FFp[}$s%B}4LfrbĜQNZU6{S^)a/eqC˺4Nz3aK m$ wL7ze٧dGp<~t&NBáv1[!'s${y&c@FU=:k$}'l{@c6KJg&NMӬ4d^Fg}`vռ((t'6kpı*1fub-YmEyd2/dvh([ŔGJ)?GJ!'GHCujlɬX\ܙEbЖDw< 6캅PHfkc_dљM;Q癷%V)WO6VA`?EX=>p#51f%~SN%}KL)w=M,)9lj=2~ `&LrhKJ=n^H0'k>aө\ u6_;my m)>Ē۝عɼ-m!ud`3¼M$,dA yk& :Jù_ʞƋ5]/d ';w'CLFy;9) p?MY>=nU[R$61qDK6)>%9/+<;RԲ컒CgHx'OQ^kc|n $IV'B%Ld\)VqVaS%B~JĊϸŘ?yGӿ' zcFmFH{z2]Nm037rHD:gdV|- 6lDXMuwg0ndfoX96GZ qɃ phU7kOϥ? P =#3sMLPJnOrE Z@Nӳ$6s/d[$cK;'WV}'9_(g;;~O4I.-YCm#R8M6XR*?NW#r$ݓG6CVmIEC@$z;I^|$M qp:/A\*>K).|#Љ0'#!zi" EhwDKO~nyɐYqHFe}- ku!84 ?Ik]2Dc_%~V#t:r8w6:d3b>-5URrrW4ݖytnyhr_ۭ*HNڦH~$qnw2Eݪ$䃪lV?ɷj!d dS$U?%.z#pcfEe&6OMv&cu vH{*9cf@S nj$7~n,ȭdAJ喆xʅԮ uw$)S+뼰OX\Ǣt-H G%<,QT# ,\ڬ!,gU,F> Aq^Yp,..KD)c+@?$d (, H _hN_2&#'C8& TScCm'r|#oQ2tП@Dw镤QCrS!t iS,4,Qs4 H9.88_qq2GT.j1'Dvr:WH$A;e)$).*$9yɽ?;rD<*Z{D;UAϔKɪ"a@)=z1'ɪ,I")`Ip{ک<:|frT&ѯW)}V$Cm&BR_RI˞%LQ[NmΧ:Dg)Y$w!IB\?v:EztEu N- 2& k55ջ.9t5)ssxlіyEc27Hz0)|d(2#(\;:磭ϝm,#ѯySre~ᯡ1g=+;@9TI]/pmy -4PIHkr{;)t|*7#$)ˆ1]DY!gmT$BebW{@C-A^Pm$A7"CU;ǃo& o&~ o7M~7M&7M~7M~7M~o&y o& o& o7OM~d`)o2 &o2 \?Dd7ț &yd` o2 M~o2 &d&o27M~@d7M~d7ۛ &o2Kd_}*?Eg>>Vuqe~w#g?#f~ӗ?w?çç*pWU5Zsbq"p? | B6q殰8Rr yɩQC#N9$Rngi .g4wTRNp8V%(hU_jJc3/Rv .%m!f8@rM$/yW}՘׮OU-n%ҔM;N9e2mԹ˵uZr"9n$BQ4]V4@G4!CH҅d3,!!d atwr ZA1FW1h}v!y7ifq$f?ȹR'>Na"B +cp*-r ż̔f&*mg_Cr?,]@ 1Qr4t\ }|11 єW,{co ro64&mQ_uS5 eA˃]C+6gTprY=ȁ yg>EkM \(yԣ6hI@j#;p+ک8@NgR%gbqȁ*qeN4D?gUEfNO3ȩAo3pO 9mki֠<[r2SgfAW#%y YvZD*r{~Mwl)w;Թ)ͨԹJԹ*! ҰHI Qfq$y>ҪsAաl?6[u~ƖAӽHhPhPZKA ;St}@Kd3SMkqzY HvO\RsUOr9AMrXb1R"l-mIuQA da_: )KaWqEy業D'J6l~2T2rl-Z)f! M i8`{\-ЌcsZq+kKZiեH@XkrK`1gk<'Hc/%:ohE}FómdDʵ5džKH'ҙS̀X"tARpcέD+,0wE$5To:My5gaգ`$& @vn> vW=r5Bs!ZF}rĂo~,OɆRd-蚱>T }e :ij٭˼FWG`kZ,#9DC_(šg_#lxdg5wս!n1kd}(OR \c  Iԧf&ʽ;sGIGTZ8|M#P;NJ!ZLktKɛifJ>LS2> HlY;w. Yjл|E-j A2'DBYS"-sΝ\GNhڋki|~lêG=HN wL3wZ)/O)E|K)vM}H^fSÙ d5sdNۉW(Yk)#Ūhشv$$;e~6(E/9I_!rv(󠟔DZ cBnϘZH|5UgXy+m(ce`95ftk\ }e_9Q>,{ē5ى\]SD \#q%l{&N#ݾd~k̮3lx[Y ƒ:2U^FZo@Imc ]ȝѺ6jᙲ}W95<]d(/г/Yq ";%c>w}؁t&9gd!}1R,V}zmID0KrY5&,fM{DSrk`C#@ÙEݰsJy]-3p1w)~ }CX=p[v>,ٰa6fڹa=?|$}X~|hÏv| 2D`:VZԧƅ}Kr!R!)D&O1"܋$g;,@V"yD>T}/ D6DÆ6&B;x"3Q?%A:C$ȑj\ DV^PC;k "̢> Q]W*HW]x*!@LxWd EԻ1!"uQr u^ʫKCLA-4ԨNJne?fvnaUDaPg\֨r$8B ˤw^z8 v4S2w"GN~GRԴNt [KU?u$utyVw$y.:ΕDl_^h-0Q_g P JZNLSz5f. 5Η. 5Җ1A?щPr %4UWw>pKՕTvI;WΕGsm\vϬ2֙sbҨzөCsCL rTL2DL҄p1j"B pR5V)KS2٤Eßlt㳺 dͷ%}ހJ:S,I WS>_+R/J>ҾhB^c2sFq$Y@P/@hc3T;ƃ1ҀRKQ!k9+du:I1øsDe;6|rJw2Pr;)j2:;SFy5Sպpw 4Fbh*W%)SyD?jN]P2J@rݒ,;W߲zSQ\8my$xz*E!%ghvhuQ ҧW.ZyM;"ҾL Y=bBR25.}*P(%"#ܤ\"pM*!$.R(3; iqC$- +)}T!R] Zu T[B秋S"$#"GL$ٔ=J)ҹ33Ɇ~QX$YK IQ6J;風V~UMrj>WhXC_"j"Jh^x>#ZernDPK J鷟:] A&o2L&o2L&o2L&o2L&o2L&o2L&o2L&o2L&o2L&o2L&o2L&LW~)^#6t8Φ989Ywx,:uC{4G(!wZ6&{v5HfUq8Y9 ; H,Ai+!Ɗ7-ָ$pӔolřmR"lMZ(^XAFHYNA]K"3 %@CdX7D'o69QueL*%xphIm?j".orx piH4"0k>L>LmTy?{5p:;3<&p\8uĠ |gలP%{Bt>tl{fK۞3jwL3.;1*݊R9sCpjbЌDNՈh)QܲQ8(Y 3m?޼ eK:rf`=s g-̙С̴Bܼ+V¯6M0rw aN,vfg@\&:TF='0J& X¯3"& Y9 NVԱѳJxz& -BB} }$Jh- uQ70`н7qZBIxEVkIJJAʎ`+м.8,`93CB:Hܻ/,V@BYF#άR"!( lP $ 7Jv Wնgxѹ/\)砘4"'*ᖲ 7 F]쒂r-/ȳ3S 0$.t3E"esfX40[f~+J ;\^In$ICy#!iy^u.+nvy^Nx2<3mG[K G{6ϻ&+f\C ]aVۡѣ U?o yN"v`Db`YKFڊWV뱬8<N@hy/f Y`@jm,=Aގ(L.&^I+Vڬdհ$-weBBKF!KTpv|EA`!+YXQ:ohNjI5_. ˊ mZj+# 8LQ\^h\ҔU$ZWP[so gG,`=:Bh4GP$] tI9*$+z$eP;pKG?/WeE(.fY&:( yEq5[wƘnu47(. 8mCqMb(I #dEqQ@WhmdyH]DoXL8wٔHnX D(vIrFCBtݮ]=nz\TkqhhSl ݿ5+Ǣ=&;aP][+xlV2_9 jEq>,ĝ0t֙eO7^Ww)zla/d]uf܀Pmk<ls5S&5T;OhO;<7ʚ^/'-4 f.AFx }A`fs $@qP^3H#h(n0DxP܍(nHn$@WI躖C6X+ X4e  uVڊr gI =F (Q !xQ!@Zgo2_X7T_ACf =s 2&;1P-<۸8˵5nۮTz?O:/ץb~s}$޹:C[r>^ߴɵ=MG|1nŋ~Z8[fîm)?D_}{<|j%')O72EXwo[/BxS%5D7td/f Va%EӪ["Y)r# Jv[*eYjsύtLr-经Ǘk醖$}:Ϗ&taw>? Bě~}s5N矮yytnO?˅Snc+dm^blٓoeߦX?<5&Q(h%EuRSST\T%8OԪb{"l֥ s/Ԇtr "8~ t*Knw=5}uhSn>]eaut=gS .Znv~<0>_GV'>_'vgKE[wvNkveMܔZ]k/XGmSEf6]#J[?nۖ|9J^ rZ(YMߎkcZs~i -uj [)N>nؖ6Wzl`}iVPrw;:?ͮxפWo;iѭh u4ipgKU';F Ͽ~iMQ L7|F. oay:H _-=Oa' >탥w,jՈȧ=|Vhsu_;u{ ӣշ{ ҆Wm#v^Fҕ6N;UNڰ}?>Qq>n 9U5K k {ntWߕ͑{>?풸]&5Kzlܴ`~;lN=F6bQyZs]Ϫ$-b$1;N76[LN/q5¸VڴLӄiB5k: /[9C_d|> m3k Vs,&-dnuizaX2Roo;]xERs>춝{[+W,.W{j_TlM5]e-f]I{uy#:_L<0%kF'I̻Y'e[voo}HI ~\cʞ~:e3[]OO-9ă~pj7%4f-Æozjբ BUokxO0´sv9o)_wȦ܇{Wk?36{NBysÛ -JݯÃǒ(z2 Cgmjt̨b.7E|~B,=m#:ߞz' 0>J+f\iU2NaKQzwgvBٚ]U|aycaB ~IO 27.z13T2}Yl]_lݾR-{e shuSoϻw/XN!g T%6߿H l Hп&9M)[țZYFϖE?m۷g3oom+(b3[ֶH/swA!,}ubh$?.zfkW[ YN=]LUѓC7rwAv)LDf%.Uuۃ\N65M<& ,o-Q5yAuל2knxצ}MQ=OM> ɗ͜!]㩍4^W[Sw-`_oI6pqPg%bHd~徿 ?]_XsPR۬o&Fv68 -Xkz^!5ƽpzj:rF-9:J_I>^v]I׈e UfB 5*yK=JV?7 wu#,E7a 1endstream endobj 101 0 obj << /Filter /FlateDecode /Length 5897 >> stream x\KovAvdgғz?l8{DmI@{1C9ԣCIv.4ͮyS_ ⿯nO[)'lq} ϫ?A FINLI˳*ۋJIﱅ[[鰅u/doa;tũaЫ? M)y_J){x}tjy qi|\WOig̨nj[5ٸW[e{F77/PRX)L//҄^~ ;;n~{n:H^*Lg`z%F[dh5|ٿJs!^YK\hss5u r$DϹ/G(G_v?x-|&iiY^H>2S6 [ xjz,WfUuxh֟5_oo׵VOۇAǂapQ:pm0h3lӢ&/{b&Ecsoʸ5(U\ғdAv=\(+d!2u#bxt \0+d_OKA*[>6ϛKiy,57#RI\c@=]ScX1lI!oȷ\!oX hΒ*!K;[JCyVRM\%ij*9l'{o~Hậ _ՁkŌ>#c⼱;n}_>LZخEd r2o*mvT`-'zB\3a' G FB9u*0/jȫ!_%2߶(c1O " D9_C΁]T`Xo- (?6u9>B5Us)QDAD' s H5zrONp*q N!+)>~]&< Lw~"S;p!ȀM9)/@ SvcԮ\()Lk HEp 6S3o74. ' "*DT)!bq>J:t((7[&dO)8L%gx|4H3&O`<ĝvD;6_Yr`}Xsk$^2gdbj$9D$h{1x@j+Cߨ82`t)<<&59D޳(2DcbCxE2]oZ*])Zɲh=SG?krTj7qT6LOVQL-}Ӭ jɏJ-RHI26KhiK䎐Ai>BZ?3Ô-H~E[0N(wǥ}nnQއT2U q5Y"J~Z#`&pAd|*I$ڛf(2Ld:9WKq%}\̍ ov֔kKN DK ,B* W̤bj LNGJ9D%2ۇ |ݬ҅#m3+hgc N!?$})" lcyh Z4G4p~K#mvimQ\C1Jn&jқU5x7`C4m9PF I+ze+!qU#ې~w@uV9V`4OrCib/2.I8tLI:= nue2^= o a"y%M7rfԛgPLYh:;}ZBDaC=C0~^2 $`^`j: mM*Kat )I ڠc1Qh G< ؼtc4.%B[P*frLbp>Uh MPA_`![Fv5.JCI0M D 3 O&(֦1VaEA#Ř[^uO>Zzn2QF@߰efU@9FUf\xhm!%AhzJGl,MԅA(M(P[c'3Si8?A`tF#KP}y0ddP` "j+$ywWpWDQP>zIf~U@\3 W- y}yaLx!ST.T6*qFbx^[Dn!$P,߮c-UDq.C$&V҄yy 2H 'c99"X& a(\ɻ W+ɛqQ6x:uQh)#SČ&3]XG} C$d)6hUfXn܇.daEѪΏw[Һ?%Y:<R!.ю%Dq23t5*?b JzC7K}rtA geiߕ r +ޥwl6MuD~՝- ݄FQz~-#7W- ǂ8 ij)բ=K(̏WLp|GEH rq0_]#南Bvx/ז@0(Ea7D"f'n k/)G"] E0q0\TTål՟.½$Z<4q%rUuz^ޟv=Ǜ.`vl @$rjjv JVrhhvu[Y6T^+Na~ N'IKKō 0ߍ:E.&fn  yV(ZfQҕĢuA02zXWٽQWKfåUbM;Bqlhr<7ıxKZHGN3 CM3& ?{1P~LIꦎV2}Cn@N`9"d]p"C+P 5v5·2Cd.+Qf}܏XRti5EWU_!̎g@DX87iUuAnn{ h 1u%24Pf:yu]g &M#,t"AMd 'ilڄOtq& <ĊLAbp?PrFk!}2ܬ7skyRs5Ž.Nol Ӕ|;HA=t|هJ2ߑ Wwd /ѡ{=k=hLv1d4z 24*ok?bM;GɫgS/g$`3RTM>.;>\iV|fq+.q_o ^Uu@Dž0 %l HX?|/f ѴS /ސq1~i".{0@U?j$))Fms2Swal-|0Hrr ea@$YƷ'L5W?+V7Vn!4@陡v7'uF( !s=?ܜ|/aHZ/Dkɑ hJ)㇐fac,TV(~JyAZ64Eg :а\E3*r@МJ} gڒJ]g_$C,,6Ldιk?.,gs֍]r"Uy/Ewڭðq9rCcBN kQ^:vb*KR,UpɦLvJ.T]vxr"jdu,ۮ67~I1O6fַu$! n7xj]ˇցě $ǾτC ՏX DJ"GFӔU@ <3ȓ08ZċrDaqL|?G rWҜ*xn@qT2uEX ηPE^j}ؒz\$XW&J'f.}jp*  ^>fÕp⥥ ]bŷnEi;r 飏tV4 lW"~VsmL^A|=o,o<&Q^Ԛ ˟ G.Kp8QClݿ9endstream endobj 102 0 obj << /Filter /FlateDecode /Length 4586 >> stream x\Ks$7r'혮!{3҆wmZ9h[bws͡ƿޙ i,YU"Į* !H_oČUjv}gG=?c(,4J;;:5ə͎G'/Mwxa2KBvv߱^42>x=[%g|jUn=[nW܅VB9v8Cs1eU=>M-wWx(rv* WҖ :f6)5Nj=/PΆm%(\@)vP1ol?Pqg`4 lhe :) ܓ60tF~9/ڇnBkŠ؟[Q^:o]d0.[d.%h5J*!ޮR}.T,+ITU)C-R( jv6.\Y=ʫ6v8#'+d#šHhͲl)0 aQ)`Y΅f DX&r[ n+f]Rtjȱ/6)/vh= J3,xήz57o6c>BaWQ=.Bo](g>EhIcvAEdq0pT҄nmnȦq΅T2y8GTnGK gPQir춝ddgV .~^\>!Ĵ:k=h߅Y:e7s߄{Tjգ}|#C\40m ۮQ,f\d(4A)Cab/`#k:`,Fkp/+F =S =_j:I]7gǛ9MM"McuCCl|1erCL#?)rDAJ3y'ӑLjLӏ)('Gf !*&h9yB!۲]*62%9dL[JXncm<)K 10)Kn@@Ii둎-v^Ɖrے+uZ|Q/vqgH綻NyYey[]>`}/㌣X':ΓDLmUե(E2ƁV'QYtN#pH'a1_//Jaܶ 1 y}D6FeHm08ò2UN"J ,쫟~]lN>ahzf@޴k_*Z1)W"tl6_}766zbLՓ,ݽbͅ8$ \oT~9`kVUm5,j|\f0ş>BNrV)'TB,ES5˶EJO[ Ck3+XVWcΰ$;Jէ&:~U7}cw}dqry]kg0Y>ݻˣhGiQнX cDJq'~ْ]tMQg Q\WBw+!m% ja,E-|Fk?Ph`] *Fd#C-Z@\cdq-^4@a),h8o4S TN)>Bע!SU+5 Vz,dГ hfwLw{3zLzsXHmto^ ۛJB wYʙcU8 R ^/GJ$fDI"*n:3ZWx$'j)FOpV5^އ|7 ކU#0-2 mǠ)1a@8> W(Yc#DBTiNyYh%A"g+y1X,uQV6k&.Ҥj-Y^E|)efL$MVAYSc*]lSeX= k$",a\VLHk8f X.m4O ;ݨ-ZMEbrGV'61DOH 'hPZZTL%&)li׭yPw#? 2b.$5m L';4Z٤oR [^̯zKɭE"=[C\3XIfT>H1]!Z *m[ՑtCg&׭ɧ#%ۤ4endstream endobj 103 0 obj << /Filter /FlateDecode /Length 4400 >> stream xr$-UÔr'Yu~8KbUصq)aׇ^=Vmfyhw@mWQJA@=?x#fۓ?}ޝ|",!LFi+g'q99s7\kný9){5W.dmPFXF׆K. k]!inz۾6}L،e4Lp4[sݰu+m*8V,4JK m87biۭ@2F˹t4v* 5(AVw;[-@DmoaÝdF8˽$FxvnJM!3f(Jnywv!g``E4*v3Gx2>՚Ixr nQAݎ#seV2 -3&-q^` ?7< X.l#@kPv >q_=`m^' sK4n9_{%aG-#׸T đu9`Q`TuoZpn nc9)lثHj>5ڬw_w=.8ܠJBf%tp^iɏaV(RQ*Y%/Ux/԰&ꌀeqre'3M3 mRKvۮBJYIg7mT}{oI;Xa(o_ :?'j٧w'6Kaf {퀙ϼэqA}m!]{ fvS lQ NWGN'n՜9"G#5̘ ELcнa;, /Cx<+N iU_< 4(q@ڳS.jdSGc(D-sw&Wg|/OcnhJVϳdh :9:xf*˫4+~𹶘mg:@[=E`˄Re Fpq1Ua;rD \_k^7x<2rzPRcw??A6r9S,cp!I#`_ 5o~LSh%CyEb!,(^w8yG(R&4mw}"4ؔ<(BQa]ʁHSs!k!Wl"reGdciDrz!.KS0P /LZ32Ao6x٨kodj*Z`[cDRG(j֥Hj\2"RY1,#iSJrpVi\Zyܧ@r otd7joY2eHV!܋O$eqN8Ń@,%_4-myGW|J{=?RkO8)*G+ @!k߁hm^F2|ӉH<6qi>,CPFar؂#rf|qFgR<,FN̎@ e1>7=A*2uV/D@uS˦޴NBPy@8~1$8KhUG{fgfZ'?ͪ[v1+њH˿έ!l^AKkw,c#hve?[d(A[%Qj lVM"l1-aRins D /j, q"QȸSg'wMUܜO=UB):E{/qKY  .(ceJ%x#u%YnT;5(."9Ą!AYJ}kdA.1ΚipH`WIr~Ic%Fy2:9e^ lxơT/g8r%ɍ_ވڄJS1RQOoyqUऊ[أNH08J[saB1> q .JY\4ir<Gh֤̄|`G񮜁/'ô]Sm3|" }<3y،!!(NKaNko6 S!|Vtdj G"=1%fTҜTʆ!S*Ick)o4c?Ai%E ^|bl'}IA6iMR |wM<Lh_ؔ&riV,)F7)j;.%H*COH#c3f<~]Zk$Ul ¢OCL_1FKxMATdg=L> stream xWJ6!x^jl>>{v[lk8OIb5~Kb0{y,=)<,rrS>f2l`9JM;g NZcG^ad#ThY9٠a$ n;2e8~pȄ w/*`24F 8X|-? [|ǎ%Faq(xA{06װA"jk !Vr8zrxbTI,KXԓk 2uB3΀G4ю rU܃VΤ5vilL׆|Dp~N %q_1\}+6]x߿! ee:޹> cbvnOmV Y0~/^ڬt_@R,wFh6`@'S1jD[t [j=RۮhdI\RtXyR|)P'RֳXF?$ްjV>]BCo9A99.b4H6Y=eрԂ0]B/BqlU/h1  +lɒ vM5>%$2Nxkӌ@pD;1h8?h;u +R {M\rʼn[:!`)Dn}[}`u^5s1AII9^alky?L ?dv~\F+ƷOPOqo9BR8WA+0g%t8PmidyڥGp)%cVN{n+}d4ٸ#S&/׋CpLm7ŅM2PܯM\~x125Z>i{`04Ƶ8eHF'nS9׍CyK!$ B%h8 Y~1'5 Hj2btٞ˔~4A۴LyǁP8P5r1в@U )ì!n?[V*A?rv#` Bծ0'h|}hmi= Aޟ0KkMAi&e!`*yWMHA)]/T_vZ<`ld.`V (8J j.q sC3,'kkQ-{Xt2J8޲@3g `hX7&λW)jc,"K.M~v;N!K/ &ΰW#eVA`^RVs-˷>"o\:惤݀[*Pj."S}.~ xSqܽZc<4y\@њqRc*HɹL{&g'l,POb=q zvu3 _W6M`F;٭D Kϝ`MJjNFnџ WcF8J0gkk-+Xi+Ѩq$ˏ/!!bGo77 $W-"ZQ&&%IYCLB́R K93^LxLZ%p5S9lˤIhVB䦹퍘~`5ʎSd{v ֍Sw|~~[\&3$ݸͿ wv7XjCG,i ~qb%ϒTJcqE {DZz-]KE,^G+fo~'OҠz*J7hctS#q ڳUݠԓ'JO[ %ov1Pé1Ә+v0z\Xg'2Q_=W1BEY]O" Fp/ H,W)`V+a@&I&p d\h÷Pģ/4%huX;UR'"46=@#|Q{}uYKSn<YߊHvt-bypRs7$r8awt)co?'VqZ%j=Yao^Բ9~+iCfwmQ +jdM`~b1[mrJDtiߖ҉b1@!(If^bz6ݭ_ )A>Րt7$ڣ~^6zl_{b OޡBM99N/ޝXOZt'XtOOWx1[gl|vhMhl4;id;P`l&.ΎT1kjqFOy V*x3ՙɯy۪ng v[|[pe,lHE=4[@Ԋc?z; m1?E_uL5K*|6{-L0'Xbߵ$>/'b* z%Hh]\fJw3]U?P.Ӣ'VHOYE#0RK.$ҁMPtvϕ]Vcy2H"nFzs~2 T:4iʮRR?EA'PJci2#7lF3C;5: L5*3Zu bE:D>O!t&sP`ۖoaL?gd٢n4{ Ck頱=j sVSropW D1ATm&4D fbHCl%glKfV7.M)Qʹx_6N6)Y5oր%>&N7E`gUݠcGGYPR1i4x8}fųU!!n4;zA[pW龇dpAd%mҰa 9(WPY>С0lv^BB):w ?.n خmh(#!Ihq\^-]]wu[ZQ@wuG 5ļ`=jkȄm҉z>;@H/)`z=R+ Z} @ʑiDRuS 2J9D]Xri%ehf '&OqRf(phx ~VZ^AR _`b*K6RȚ!>:3H$k];tw}o#? -oąb3vKC4aPbE*ŶwȃA2n{-zAp'rEE@؅/ /."6O2X ޹Rz?ȫK(}~Jؽ跡=`Zx!A3f}[Mxendstream endobj 105 0 obj << /Filter /FlateDecode /Length 4974 >> stream x]KsFr'o:luǻkk҈;T8@Ib֠3άPRCaWYY_ /zfwD7Gu_)!V18>O҅rqztV}l&U7wDji?N$pՊK WG.O.Tm57x Z+0m3ލZni|Lͬelҵp ֖yU_ѽvYB1% ~Mv?N_ ?wA; !( ӹ2 me`w#1Eu#Z'h]kcH3hF\ dV_>Y`O ͎>`谿;@74t.Fko˻ ^ni0r]uٷ1UV~](HoK||CT5(J0wWhM a?495GWta?KXM("v$!f2 (C LWՂ|TÿvԒR7`BHl2\ZS#@yD`y`5& >KL#*5oF:(,F,w-ގTdž"o ̆FNeӰGF&gImW_K-#͂\I/%CGdZА%HbjAn!s˩~i[R; \6੶qd_m+mS G#J4g`[}h}n0ɪD zZKUmkwͯ%BGbw)7GhxW}O?er2}f]b-xvo RǕq ;tn|~#M+y3 fH&i[wM{$IFOI@ Lm83U|D/C sKoOOb:]4S2g#eI bBJ[-`ZE>~zI`,@r /.FU;t~1H%D[\B@>ul2[K.&u54n{60_e\8 PpspPW?j ~E3 |𧴥xu8̃RY$7HL.-L͍WL`~ZኈT| _@q"6]|`-&jND[.YC4&DpURZqbܚ⋇Yu_LfSR5OJ=%Ƅ5}~)2R\>h3UÁ¤|B~ ٪IqaM5)C*&u)MMYu~ĄKax,AAyw?aB`\,|eWRZ\=E38˦Ǹ,X)3Qi%dZ8J6/II[*RRR4%,SRD)2{jj|%,d;0{'+EX 'sOf0C]~M3 :9$ 屑BҒXK|%Uoy 22B)?RoFl+𚚩=Rlf24T xY %XN&-eHW+ eR^ΞW%WȲ+eJX^X ȥWf0i*elJ2"*}3AR4eJ,3q~)GI=oJQ [ӞQIJR9) f9 *O8$SWz`^^)3z^^Is@6.k@/$%Wf4rG।% siKH)2ދ%KjKL ⪙H[3] S2m؍Kagb X 'x2Wd/S6E:P^ҘSIL c~-.Ky+Ӱ}Yzb˺V(5p&e ·;4g^#Qұ;A4ez hq0p']ܜ< mP66m(KM㷁Xk)Ι=|N҃CR* iؑ ;sW }!'O~aΩ"96kbdLWEY2FB2|cDL?ׯ&D~L$>GOWNBf-~Gf i(E [?16h ?)+l:;? tv²o'[T軣ª3zU^T4|kۦ8hxW Z+,#1}*WuǢpSzIaXHS̋OQ0:ó/nxkZ)ױ7|2}knD!T',nmP@Qd/viOw0Ksɪ6k/'0`=,nwA^iry*J*lijW %bjyЏhڋj%Emp_nf`pHp@h -dY6n*W Cm6ܘלX(pYHg9onsoFv%kdMA5uUvH2~LHV "Emxh-uA-+}qii0 rXÓ"Ze/Cz@AÐGVA~L5刭a(037V#r5 DƇ^20X`k(pvCVV"VJUx/4 +w鮂B:W"W\i} ,qc]I׮t{ $1&YM{ ]ā.Qy,Qź d>< YnAWrt@44kYdNm3PC>ntqP+Y:F(ƺBUL? Jtd UaC"&]"EcΛk9)]^W6fa@~_=;#b¿!aQbʩsDz_5*̰P_&d(I.ZH߬?uqI`JhMAP)o wU>%@eΘ4 8LnDh٨TɉI2}(_Y]"Mke_?!W  Lwʗ"UqmB*mc͡.,EyXЃ@X/I9-T>þe̘CAƾ# 5zCC3'S^x`N9r<ĺ/.`rǜ7ӏ~}s 28@aqa/- `3)pK:ixO)\  :ݾHF!{C}<>aR` ĕOXQ+qúYof1Fz4U`NrΪf<%&1$ {gQ.A. ?A+ ӿ,/eoMnj ;_Fs&p'])2,JѪt[ SyJ'eƘk_Yduøg17Ơ;{nQSZQ+voʹXV&y6M屲KIq,# i^~5bR /)lKgmD7OZ:IBxYz獛& yRJi;v=V V,bg db\{4/ T&jVL&ad1zNf۫MZEPmj.N[IX͆=öp?k3ju\~| G7⛆hS&K끊fs]:7.ݸL ,½0xt 6$/ky۴|rp՝ގЪYm\xFu_>dP7hfߺ(B Sv~᫒mTԯ@L()׸H)&%lz&.R/+߄._H/E8]rG{ҫ͖ 0!+/E3ߟo0Lj[FB>K\M_w W>)<&Y%2/ ߂;tYendstream endobj 106 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 2529 >> stream xVkTSW!! j(&Ъu@iqjo(q Z#w  $;@ E+0Te\hN˶֮=Ikf.bk;Ys׺޳>!p xf9!t4Fy0OLKeH_^//DnIg'&HRb7{Kȳy4:NS*) & ^,  %@xe/-?#36ng|BpR>/DHxD1%k?@-141:3O!?T(q q<+| V\kᵎQ-F3e)⩨I(\ z-PtI* ^_idhuÊap7rU;|emsYפ5?tWW1KkC!MqO6K֓,Lįu=chu̓>[~δKȍ@qurIz2yfܘ2,ڜ-zHـW;.e06u{TJLg BRKf)ZOn~),@H٫$ԝ6n#.@⧞LrюU+3Ƞ돿@+c(B:dy-+p ]G齅[l}7kArV-:4,t-LHڂ#5W9.Yݰwd"H4CԌl6%'$xma%YP ^Cugq.e&FoC-z?gޝ 3tZIHlNK iiy9 Չ絎z,HHO(Tkurl#5t.Zs v^oʚm$d1"o<^[)fڬ"Hje& G֣Mw}ZכE=G)J [mؚۘSc@#W$W'Cz_K KEu\gVX^t  ý,<3t }1*h6Ys꣱7XuOrP`, 5yU ]f]eub80+Fq|:W^;t"j󸎄QL:M.9hi+$6!Ih W za޴7gL\omzLNy'Ѭ9ɷŢboQ^ST`s9Ô*o//zpq^ǔqcQzZMI^wM’W_H2%~XqAj͙Lm[+.1:N_Ԥxd&`=rj='Bk= 2o9PGC;>_Xh 7}ˏ{ϬTM=T脪 PN=9;˩<ݘelG6-·&"jReV@*s 1/YK]UL? hD_z|nE&°]9ߣ>y9% aKf_ 7Az׏{^sᲵK>q6RU``#ĝa܆a笷٣܌Zɹr'9̷͘& {y@0R(VaY0XL=l&eZB&Y!D/;k4;~}%m!/V VUN䅜Ȇ;sGN^ccu/ĶT8ܮS\j뭳ə | 81}GNmAJE:583NpqFPQUPSf0 F_D.}endstream endobj 107 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 2011 >> stream xmU{TwL2MAęس["bzGhQQ  B@@Hyc{x TUVU={~CsvtϜ?ν{stkHh"-VgExT9].b!pcco3mrg!8;z\Q(m>^ ]EznfT%^b]/˕{IcSjeJ$V )|0YB.KŦ&II(IwH·En߱ `6\ M&\=z ۆE`Nl9}mƂ;:抑m+8 \=98^D02 ϰQjt  ":IJq3O5qegMc3m&0O(2t Ӣ ͆z(D@_;|#"lOwY d7dBY'Bpx YhS2,uEj셈|y8U{h)`V<_<<)/o]o {忂uXm6F=%D/) ]-ui&BtN&TU^t*7n(GUXLRID_Fw+N.PPkU[U g~zRQ2t@@I.yPRc,/)Q04>f ^=yyW1m Ulr#_h*&c3z[IT:BwD^赞;/v%_d,BP!8wc֗З-!Z2P4 V.,@/|Fp9ZP~Kx "_J}GfoK:K:-GjM$N~+ȁbcJ 'I5VFG]OFq<Yw⋪xH yd1 X,j[A)͟؁8}9qs{uka#AmGտRdGteEinx{"w3q$\I70[S?8Gi R |9L6tRQdL'߽(j;uxkơ7PA~r_bm(6dB`H;$r#c2E!ەҚeJ6&ӂ\y|ԛYU^)">|棹G_EE5ŐkZfLjE) r?miCSbl1`z6@Ы kg)n Y7T|fP黓> stream xX T׶TDCT7*"C&1TveiIC$ vk$Q%?O^%O%[@z ,V{}KX[12lR۴Qx͍,],VHГWf9ۗہlϏs< Wg2YycCvx{Ogs'}oFDFQվ&߈c[´!-kԫP/\l񈊞}1>;-k&y˲а+#"y12Ɵ,gܙ&`V2qjf<x2kZfŬc3,d&3TƗƼ,e7;f,Mc1eVS*V0{ۭLɷVj7k~=lrv~h/ ?/=\6zl=QQ,>X~.6'\0ga?̇*򱏆1ހ 5z7vݿ0 &QDI?dQā8vzd~Gmr(࠻goB3xG@XO-$[0"cuo}nbYޓ =b 0"ࣰ <,yۏZ6Pm#oL+jX(r9ZվdHI qdY*Rڂ&D7wќ桭#?\c(LxS8糷/򹒭;p "}ޚ%MHYe'\.w' XF^#='ud#BLMw78S+q,í*2"G dО aANx=8x`Cp: Jxۙe-y(_J0]=#m]baԋd$%tji+xl|6>:^Qj_O ܒecq3/@mقe{4xsّҳkZ EzCCQl9ZO[B/0Qj McR(gy8[dmo5G^௓6@-4e|) 63h(UY_(Sͨ}{O#*8`BCAݷSX 8JSn!2VDdFtm(kqRBS|EUEKC+w[ByނT rkT_'Ci{貐Pඑ@F𸣙LS~8v2kޠ[Aqؐ 8dTM|#T ;|+AgZRB֌Ci9q]. x G[h вǎrUy鋷hZURZXb 箃M|majC QFNM*X%2 Lcht399uJ v .J[~pB%Z5o̚UwAoxTj[p"伬Pw DttkJCk -Jֽm6>3Y?@28"J.=ËF' [_LUA(b~l(X;c?bq~}a1ˠXxgN >N,tqn7tE^.q GC? iWPx`zm {wsudhXLМO w = }['CR_l?gF,X?hX@܌XJvM+Y; X:X}.?qz58S nRV'}tܼ t'NБ_Qnm]M Rψ-ʪ4+,́20!q 3kZ*+v Zک-3Uod ȽӇUKhST@ ĈuwU(Bz%P:,,-$}h a+h3zմqcq4lUtEo*G@@!>)Aw _z6('.6ZxvFhs rm[MvL,M/m^ OۿlyeyeRNRADw*hTz6ŠؐR#`!$e /F@nE)U- v`/[dՒb$\A3 AQ"H,^4%A.랙7*3Y1gdA2sK3VmՅo8[ .j/vb6}0ZHm0$g MR8LG:c~> zAb8]uU-%l*Ⳁ"~zSX{-Ig.ΓHYADŷ(uTi* `ؑ7$S1~֧EvAQ 9P͉rX}| %/!@16X9HIW]P{SHs88׿N-Old*75Д3ɨ_B g+M'T,?5_vMR _EubG^ ӉMe˨`@fNt?hK@nCvRwn=]݀.Ux^Ru鷃Ϊ怃%;'ꟽ=t&)rNT5W6]m2QHVKc[z7P_twJ #!1$JBQMp;jSCvH cSf _E]ң[[33=SN|4 //t 5Ц[SDp[_'!?]WLF>Oؓ tܛMGKOKb9l﷖xViDfWd;og_cYEt2I *lG'̅3 j)pWS1wWt<Ljfq6I %ޞ \ƅ+_[YlWJH ¡ Sb3vx"\/y7bJK`*&'\SZ|uE(Z҅O?njN.R5/7fgfgA;pȡ*l i_L UaʰIЋ->R*7?N T,yOG1o,Cş_AKP$t tG7G}I $6R K Ό[1K 6n6`@y$4%Wh 8XDGp[8Ƿkv!i'*WW2q<iGSI \#_M?ѓ#Iyxz_z% Z[#n>rtB <>'eGr7(~u[}qmm UB |qz!5MЬZ{bز҃8x;sA6SwG鎔68f X%1Ć8Ql>{cM[zבĽd1Fvt xBn=r59Y~R v)+v9^N>K. qe0ʆWF! 99Ɯ۶0 e]endstream endobj 109 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 8592 >> stream xzTT^r,`h5vEAH& eaл8 ]ԨF$ؒX9^ {_k1soރ2A 5km},Nqq2kzÅX=x8i6 =5G.(#`g` /iSN4kix*gWo`o/ g&l?e1b ;ۏll-Xۭ7Og|R7gn[$`iGA˂]*yu˚H׵wsv_vFo;ͳfϙ+70vMcxQg8T0 55ZO6PQ6-5Hq&j<@mSjZJMSQSeTj9eIQ+*jIfQkuʔ2̩!%52Q4Q E,^|K~?՟ZD =eBT@ `*\ЋLJQAQ=[ꍧ.%f8SP~}ϲ>}-7_ZI=2p b!^'>4x૦Lf!x3~h0aLJ앴[ a4b#~{H6#SFe3i?{7>`fTӯ=h.hw!:]Z}ڍ͒rӣ hY=vJƇ~AXOo ҕkTY*(25F :'j}m!40xP2B zX/՚WUS7C(1Ȗ޻x,{'4{2Ip )X Z!#f~~l;G*óّgV-jЛ I:lKSH7sœgEW/7S\G2MCQF{$K?}a"6?z@īBu1 7$)m⥛@}rz4ki I?^!S6M{]b4hx 4D}|XnN<.1o+8cIѠpΔ8z CNN9T}^p5O> E =F 8%GY`-Q3:6￀%hPS BT.n{ݖ[x:v®hDHIW&=:7WQA);/D>bĎ8;s4 6dzQ._Fd{VәtjTW?=GbɜG\݊PA1ivPsՉ7.LƆoIzxb^ 7bd"K3C#E%cFX{l >0we?@w&"\ m^zh}LX`PU!_7CυnSc*].}@dFǃg#Z*/>#CcY!Qkm_i_P&A{F'l@ Q/"uhMGlY* ͔,Qb7w"4 *6` P㎖w Q wuSւj*+9PY  s}m]'Aiv싦_AXdKh u9 Tb%;thΔ᷋;{χ*M-hNwѬDȭ @̎5D.(x`U Zu?Ȥ8r9rܔ: 1zgsm\8Ԗ7>@=R-}Ԝ*>\H-YEI)qr[ !ֳ"TRX)#]F_v֚_[1߈wݙ,< ;cg4OG67_z5LȗtCJCAⷔT>G z&䧫$CHE񹠍 xCz4JG|FH9b Y}\%]~jbhO8 :uML/TXy-O SFwS(]D/HC;_B-Q3P>:.%jztHx% Dd=cųNqltf>i<^Sij+/Vq4Ak[P kos擸vۚ\ԷəveS">Zȧ qB!0CcC+C r{8 jY.7@!GX&PWmC_YYZ:)ȕ)Y2,{Uex[*T j0I#2Z~$²wk0PWpF[G?OI==e&)qaGW^_/azE;JXnՍk_fs P~_O7%A@|Jnj( 5&BSj#d3gER+D:J ~]CArVL,7=b&N2rS^KPLT> 9od1K$Q^-\Pꉾ6+<ϭ%(ˠݭGkMo"׫7Hgl A2.T ;lf2Ż+ sjv$'9qn4;Eb"Ȱyς.=xcT8Zr!k Y̪&.rR{647@(ZAXa*#t*s=bcl>L xKHĭYKhĠh%.IόXsߝNMm"6;&Mrx)sɑvCپNrd_e &u(IΈB$[lY$i搟Yb\iJ-Jy[C Àܩ\W 3E!Wc2Qu{ =T@r_[UЄ>"&~W!51XǺ ָX>2C.)-Out_[#{C.;`Xw( 'J rT g?(#PXV_12<ݒQvMLRB?;4C\^ĎUxL;aAw EvY[xޤq#`7Ic"<+9T^y='ysfI+ԧdh)( uh8yܹ2-_jV_p"jwZOt[ doXw`6M?fwNg^RzJ\2_a +ˋlaÇxLxЋCQ,CQeE%ʯDq_ E_ DHb}sZ/xgt؏6خ`Gñ _=RcR ͳA}TEo ^ ):!r!kx+3!='3Sr 7DRU|(V+K*%D0EtjJ"'v<3ˌU)ӁɃ" cw-A;n%e ɜhK/8BEZ {Fꎙ2x2/;E\Dih=~`^g;%]Z>Ggn9 d[Ixv!n2Uhyb\Ϟ2gmߠQKq3>q/C+@FOzM4|L_"D 5#c )/.?%Ikxyl_2~?4xF}K@*s:*횏t0cRdsHiA)R4Kr/MYZGc{cw;\9Kk5C'/[M& <'Mn \%/6~OͷjTr*7:"jNN C5W{EMH/>VS$fa FW2eDJsl#"+?(4pJו2~G h^O/5˕wgAz;ϐ)ڙ Z(\p=L^!"Iq<@悤̒Ig&! U4 3qG݊:DrPxf4}x'/ Cv8Ϊ/<#ԥjTi,Xm@&2*Ob7N'4C@kvRg3e_nWHav."(.)QliSL>Qᎈ>WGږ89mI4nx`g>?9TS]$8|ΩT_`s~vvCkICrތ;gmuW۬MfNȳ3_;sJ=ȿns;2?&md^# AE/Ao2":,O 3|֕~!F+D.qKXeVA-N\f$6w۾B/}x |ELڊwWxGd29YiAr^Ju.Jr ңaғRs^ej +9ig]_Ȱ_ ^{8QLlz^<x &Z|-F58]m=btٸظ* MTy=3'%oyfN78L]c-*.5R8Oo",;%GEFF\:1t$rr!<^[ΰ*KꚆ`?ĿHMH#[Rw#2Fg^!.e H.<{OLBw2 &1]Ӳ}Of# uAuP2F|pCw1pmj=ZA5]ըFEA~5t[!("(C5oe!zOs哄46R(aLhF؎^WR̹$!USTZZMVq-Xaѿ.$D1.\w׎*F׷ͩsɔJwEۈ!,Գ?!R%W{ΌQv\xHw_pLxyTBdƞKlo$]6qዢv>/FBTqt/7Ybm<~ú_=sOq? YT[38mqR $J3R4ũd`΍Aފ0m7 PFwB7p';~[ ְ8e,Jl\xhލp=WSomfNL˥Lu^'pZ3_fZ#fBXtGѤj}> qd~$Na؊2h< 99+uulk?+A[+,,-JI4&;bL۲$ëiQkqqiR1 F:~J 6EəiKEv?O:[z:x(&ѝ*g:ݎ +o y} g7ѲԛؔCA$DǸ= +(7TĵHdyVSٞڜrτdP*+gм |uƵx#e&5lu7QQ߇ķW]?ժ󯬺}ʕ\7a|[ fV d<\T:.n';dzh Od0Ɏ/lQ宼 Ž`)hXA!ɓr9id|C$;AAye^^Eť&=]%F^?^ 'DO |5nkrwB녻UbmI{'Zsi826yCw"H NNei:/-stZOIm^ b`hǩOW"+h%@nb~;ZT>n2AE3 R+.̰.8Mus]н^c)Ɲ'Zds0&䋰^ CSvBb{KLmwfeJd5CQks'c')㷅AŽt쁱q%'H{MKWkR3_)Ȉ,pW ;wIOv"FZWWV[+3q-1yu<2B̫wwzN&v'˻(U+@+Uδy]&OmS͘ ,i=i #hDZU(Ea#SU+b bLc/dL?tDepB/;M+ڛeqcЧV6ĞVk.kԤ4MjZJ%Oendstream endobj 110 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 6027 >> stream xYTW̎qdA3K5jHoKYCR`ˢ~5cbԘhLI9]_=왙{[CDa6~3OwsqoFQ:{)8\{ V8FP(i0yT C)]hRpGgMJ7s 7ff_iz43;9i6g.3Mf-_g~5>\S|ҀeeA+W]\mws[kZ{u^}6hc,Y4-SM3g͞CQj 5ZK:j@MSP&ʂL-R2jS+Jj&eE͢*jeCͥl(;ʘP&.eJQ#hPBjL ̩0ʈbG,IThJNLisһ^N"g̙;\3ipq;vzxgΝa2`㭒;I䬉dIMS2SOtOM9edH<*sԟ``)Q E8T'&;)3E!uTZߥo:·psď'FVნ5yB(4Mz$%a0v>SkeJP 4:34}ɳӊ UW?ʕ>r41 {`#&/^)5r{-fJc{$lc%.EHO xZCH?u5[yK\ !`,vEQuB,ҕL!mBcI<՜Ǔ@GaKAJ:r[R@W1f @2ȼMmbADCZ%Xwt:(Jux+gS]YZKU(b-$q= Sq_«_uQFQbyiʕ3 /^Q(ZQ2{[70ψѬUw:(x*gFBq|xNx60vyMߺࡌO.O)D:\IH$k/h+OOk h)iw,%S`/>&4s:9n݀`꣟^ TtwDNws6493hQB4Gb+l 64TJC\+X(AZc>rT#f1:W'x'u=E#ySɷh'ߏLEOTA7i2H&(":=ݧAZu] Y [%#4Cey1A n# ,U0͘WN;UēfOXkFdUÇ4V*T>.]ZkY`ǰYWHj l"}6 / =+Q2@5`ᥤײRT [^]![5p M 7t1ïx8ؐO3F8ZSQUd3+83292˕uO33 :{cPQ Q,ͪ `QQA$0[\3xY5}𭊬-ioQ^FF",xf4ac-Ymm>ɳ>էl&[E?sΰEN(amqYfTϓp4fA0h.!/mQtO~K6S$P7OC4[{h|N1l{ZT:󋤬y]xgDqqdICa~SWS]u @5ȸ4y"u b.ݩR#xiG1Uew 'l _#LJ(U]jFcF"h E޼3 E|J$qK#wrʐ1=%eI] 5lc5M q!+:/-;1'0⼼̜o <HEt;{. R?F7D8jt-N*h2pl&loGx)Z#m]4ow:_ݾqu}| 4(y?FHLV+ j 4tyE&MBNbt0\i;OH KC6GL *PЫ~#)ۆ48|V/^x @?UPO2.sp$b| [#e>lѲCgYHVs&,z.tDѕ>j/q(J4cz QDW.pW?F/60;yP׆/DM|JʚmQBj7SsŒgXjlHg/mn9R_WaɥWᓫW/URx9E1;r Wּ>1sPFN]f>w{WIQ!ȣXC(a'"tίv0 .1Cmg6GT@r:EQ( 3jdvgL;|1~)egƈڬ=))lXwv޸y}رsl1l}[Z*|t1HDz)jAȋ)@$.)C9Oc EsD' a7"$jI|Iz1"%gf&KOg yC܅у=.VW>G!:Zkv9 ~D1{0"EUIz\ VI'G'_Cy5|LcpaO[鷟<<^^IڄZPbowoC~\2ϜRf;[[Ґ|k;f]S{k,[s[C`>{ک8 7ˉ?=gN^J]Si=|HbvGDQ.\dzøO:^49M#i/w;&]%FAwgC $$k_JkcC` ~kvZ6s}Uu_:ݥp7]݃C9X 48P}K˄ɐ%mfqs&1jsSfzM!O>|s&qx gLڙ =AN] "ﺯHx$ȔF`ǀ`n3{Q J+>IܮZS& &v{kVgv8}])rWRԈLQoP.Gs1v~˝d=5z?߿ y#_EP~ٔ&G#}p=TLO5%BB̞<|pMq5΢Yu:@F,RH 9gub#\bxh?*N&$U5v0 Ad,=< D?~ α_jREL3y~5 cK\ppO&uѦLEe4^m3=15j 9ly|N O2^͎*M~vW'o?-"#um*vw#?}jz?/MMo6L-b *I /C&Л J rP \臷c Oh̟L{+|҅_ `gg65oB,IU*yXT理d.,#G%$(&!G^ } alQJAS~SVWbUbH TSxR7`ɿJv>+*$FE'212ܒ24< sq`uބ9bc7` ּe'ZcE&x{wo'h{J,A2e-Ӆ2Zݑ%$ns7ƆE!FV\Rq81ktq_Ӛ0u`t܏ls۫=q<rŵx06X8ز\拈y`۬k8'$%#+͆4M0|΢N0P *i8+VM6\B{7.bUToX'5on>u7# D(KsOOBȏQ?A{^]n NJssP%SV) Z¦<m(v]!h~%ytJN0$>${'6)~ʾwfh[}0G)ztR~Z#6pN qWkWQpKyV> stream x]n0D )܍CHYPC3㺇ݝ!yz~㸖_.6yNmݚo>kr~vY?uM:׏},\BȗeMcZ.Nt)KaD> stream xXg`Te־C#80C7hd ui+E ^BzIɤ0齒* ]A]w];NXVH~ܙyyxX d(d5QIiYI !Q1!k歟BZ.y?6>*9.$-.dK쎐 2|֍irTE-Kݱ<ٌ2٫sEy!6nM $%yђ)< uZ@ͦ6Qspj3JmR۩NjFQQPS+*1j58zZK=IS/P,5Hs&p  @TWG)Jr^G\]ނHV.K@Ji?z=`g+h"4rRyn(U) +/}4WuT r%NIB2@Đhmz֡oAI 9~ /?vZZm8 Nn hGj!ɄLs ɲlF&|zJ-صmOxf42ЩmSbVnH6BxK"ɗ_ay1T/C>-qALpR`sC9Kj$]Np̃6+9}ex,op`p@9])[sA V+[b1X4Ehl{ɇx?}FS{v3~\<3EAf&0Z@M|EH-Px[#zxm Q+Vgv>NE\$Joԣj|ܤH)#@ B¢֓na%-b Vlw7zVſ4= 'żv'Z| `6hDXo+*~DQ,EТ׿f=FCY&QHPQ\׻YΪdy[,bH@i<ؕ<h+ E3G5~yubOf^<@5mF lZ|&T72 y=Umm>Rl 6m6mpR,-8M)Lۼv34GnC@_,TBQs g`/iA\R584n~D\uT;#TDz-ڬrM3sjLer :ۏTV0,.ߐ ttTz%iIpF-3ժ!Fnitj5%-˭潉Fp Mr(AQL8ςxI ;DJcPh4&4o.;Zkj`SYAf+-r.FﻎB/%<A-ZNKD8أmKW^}BS¤s!.r|U9K֫RBgc }BC]ute;'#%g_kN47p~Œ\z- ' Z-+Yqsf.ecΡ^s} ҥg6M/gba I}׹+p&D֎S<=#gߑ1UW뎌5EZhsʨs}VQgD`tvk-vYou׼,I;s4z-L@;,i YUv(w:!b)X)lw-Ax`D ~ U0ڣZ<8-;" "/HzgK'Є#lQ- dҫlHCB,\v3ȯ\QĐY8c ZW!֚ʛ@`*8(NSfBP?ʺNn$l&0:SyMFmbu Mp[>>*T j il{aJfF/]w\[L?_F@e[us#)lj~E{&0WЩ.W2^' DHQ)S9dBz d⪽bNHRFKpBoI m)kjzJ(߬zuIx$sOMt0,?r0hny{E?spz]3{lC +PZ.ë4ϭ#\g*~ jou5 e=K{Χl)c%e$Ħ"gB1xNX{1,&y8ش)-j_=Ϫ:B,.x6~%h'RTTP%U&-:Y< lwna6Q켻x"`ĉZHd ޔxwP b>+!n7W>;@Tp*>OI\)7HMF*nUȐqߖo9JtmU C:s`>9*m .64cpxYЇc=s.r7?N9Z8+h΍2&z񁉼#{+'0, Or"w +vh+;brA ])ref;u襮eLJ4`G8|O\u|(/-9frQFLR^S`L!V']}Oe0ɦW>I00j+E!4WŃ'cܷAA8:=}ڗ| s 4 `a"4Dk{!s:{􉘯'A$ W $Q/=o_vͪ afv G,"Ҹd"F׾gZMAh)&2j4篯 /h`ҤQdzvSf<1څw#gуy@[i,Dp2Nǹ/D#@W]f=Q!1K1ZcX[emVb4&5wu\i,>ÃB^ɐ(Io%AOWGWwwy Z#kCmZ %=姛oO/g KNǼm' ^ @Xvxn)^E`ڛ VZ=[c1 l2}}>tqϦN>40 ddlKO0{yU&gI04DA"`$-?e.u6Z_47F4|E_j1:Roi"ELhES\Deh ӲdUe'K1BG_ybCqJSnp>B"0==#=ϓ_UP8s~R9ѺvܮSy]Fg^['f!G}.e!a#PTvu*?$/|sxdF*@vLfJ 6lr^_$avg^d<[طBvK@0ACkqMTlX(@ڑh6<2ɪ,`{6^t3uV`~PF7Q ,,a0%SPY[[SD <2yDj9'vC>wK@{4Q۟Qy `#rP1D59]Ձ$:휁iZ\ &A,y|N]@mNk%i{ ɭB m$4T3B-᪾tu p L13T^KVal&Ȧ,)h2XF' d50K钀|XԾ. Ԟ'v+^\J4 ?j,kj:w8X)6:UF=荺Zy H"R }i{V0 1ѱ).qы[[-ՠVۍ]'j>)E7#WS“m$:M*R'I0y4q@% rE IZTAmLY?p#ρ [~!]#b[/ֶUeLd|ص6_>հb'^;ޒ[;SHa0-ӫM<t׍&;ӏT>4q@(ʳ4_xШsU? XC,eEm:-˥rʣd>l$EJ C2٨񓯻,v d~ /A[վ a]ILKhvNd4H ܊oHLdR-Mh3ޯŢ s9dpHJJzClV}ß9)v(=E+eك7dѫw?;Olul7Zl9Hi_蒔tQ|kJT%쭪skO;mψ[){\N_3j2DY#9g2TLrʿj>ǵJbAG:\N;T}pW(f;h@JK9sǃȫʁ<_fC,D쉏Kl2=$ &Py$"ԛnOpFk]q FM} .@NN҈-nv7wAhK>*55++5*> ΛKjD9GB?tLzPəܸ87j0Y-R$0>ŭ3-b0i0<]bgȸ\(ÌeKQ0v`4Jz %c -VҢVq?oendstream endobj 113 0 obj << /Filter /FlateDecode /Length 210 >> stream x]1n0 EwB7DǎKdhQ,SȂb}( OH|:b x#]cRk˪9~|yd@aOw۶VJvD<!T_60? ڢg;]֎Pu@հv# X{1{/V;ִpگPZ$&WsSQOmiendstream endobj 114 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 1029 >> stream x}QoLeA[vYcoAh&$!p`2PHi(X?mzp B fK<~!1QcG+o,Ѽo޼yU[>eCl\nOۤ" ۯ<PuC>СC ӓHz.c{n.\݆9EiMe$( HƩ.ZOR=)%@|h͟qQ(]حKK'iEjї^x3U(,  !@GK e )R * dm Z CKQ6B3~6QRm,Xh$sFP'۰ӫ 3Ϣi Ym 0W7rk"Zx`EReAn*Neo)j`w 28Nq'Ne@r,I%MKBOTȭ(E})[fI5pNϩܪnht~grٽ,dORՑXϮ4,sZQU2F1Y͙c%^|m=8EYŜ,?p8EqMԒW^t> stream x];n0 wB7.Zm/`t!8Coߟtݡ's(>d5M9~) <r ?Lq5;[\vy햯E<dK}KqtQ.ĵ!p;$ ~*F?#ҳk˞*lӵUF8'6'3гjd TK6jɂI 5%$KZ:6*!t@BGd]vEl@pBQc]5UST ([Ϯ > stream xW pS畾B$Ԑ{ҦIڲЦX0q/,[ÖkܫӖ-ɶ$@@)@H$0a!鄔̆ٝm_Nfimwg*_|;ߑ˚ e;w*UTmWUd#ʺRygyBJgy_~[y {ܻ@r.,_14(/EG,@]%YCH"ٵk[ܲkrwMչswٵ&wLV箔UTU*s +slWmߞO@찢NVðe/˛]Usުꂚں/l\a`X!{vی`b۱XۅHl} ᢬YgH>.,Yf"g ]|,Jw0H4_->wuȀA (t ev08´#!:9;x£lh6UZHZ*;?7pJ ]ܼe ^4^sI;T{a>>6KCD&U8L:`܁qwa9ⱝ x7֤A zbdT+y>y =6cLQZ.|fݦ1|c t2E=!u!WFJŸ)ֆ6MCgquQ>gMG@'?w2^C!>0Փ/(L20P:+vi7}(5 ĝ~]f I TVML3ֶxQi&c6FPm|x h 1ޏA pҙ\m3 pTFOoFFL8{oy8;:%3.8w (-`G׊}I%;Vܡ,O"d汃6<ø #(]70H~hCGn~v0jR|shMHzxgX`.7Q9!;:Hw45~JDF(r6ݗF 1>RtC>w㲆zMHHDbFC^O/N>;N(\:\󧑑xz.Aam"wr?tzHp8gÂijxIeoUɉU!eM^`"x:iWILlWh兢#^Wqagn^#Orr"n~|jSd|U ?^ʼn{r LMNm-QS6+KNMӡ& {ol!Zm+I?cͳE/榐d Ae*9 J_Ńc{8aGj3U ҎN?5?}ZMko'8 ֢n=cp&>}!u]cCmŘ ey(o-:/UOo]/d/=$h{s;/ixUj0I;].'\t?sn!ėb)9?=4Z@bL슿P4&eg@;یTi_=s eyM/S:?ج|t4p8$" spcy'58̘:vy zJ;*VM?uhRSDTAVPv'XAC eϔK Y&;5EC9hnٛ q AM i+#sqbIv@  D@(F#=kr?0jEـ⌋Q;3n'dJd_Y4@i) `^/G1t\FфDNk m[H:707-z w`I&l#)!26M^XWx/ƼmyEut˼kq?e{֖i,d׿%v@)+Q(m.|} Qz"mLrD{3O2ZDIpwӮ d48/QGٗ jR o,"fr@΢yBw0'Ì̈́2endstream endobj 117 0 obj << /Filter /FlateDecode /Length 196 >> stream x] w7PXb6M@8 H}~x xU Pm򰭻@'%pPz")p 3œUM*<$W3,y5'`՟I5vF&xtؠdWEDnIʈ5OBVK1y>`C B,_E/2eendstream endobj 118 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 1287 >> stream xTmLSW>Kq j`dnbTSB?dkmRg{R`15m??$S%&Kmr.CYc{}>pH8 fwJQ8%)˖_f7ƳK bH آ/cЌ M7@_E^I sCq ȃdԅs5hӭr8\nqIJII&Zdqy{P4[uō Y(.t[ٞ':l1gڬM9%%fnsOB['NVLē%1q9eR\UZcYb2ra~Slb[3̚8N| pm\ q\'F"@5\ w#&5fjWmE ї *Ut#}G#TW{541ngV{9V\^k1oDnIm( aO@D喞\d1[㔤rY["K5eigh0Zz4bY@| D94T1GG'^.rj 4>Iz Ai>?)GbU]t9mikyeI<-Ue< ȝ*'1o?:lMYΗ3(OG~ӸZ4=8ކ x p ^lGEH+;W~;q9TkiM#W>> ~hqmMcrC !Zg)jΏ$FG.kEƥkJ4x }~C@/ Y"jt_wŎ^D8EXY_uMܣ%j _pY_jw7/*٬ᐆP;k<ءEg ֆ wwܺZWXk:e4n&*-0(:f/Xra;̟AU4SQf^TܙC8 ݾ}>XzHK\P@IEqT~W"G&ll_9" R$a1ig;e9VZv%k:5*E8vޠ!FϾJ-[f6{[{Aw×$@8OFL~YsrӌSM"u@{=H#Dendstream endobj 119 0 obj << /Filter /FlateDecode /Length 184 >> stream x]OA  ~@YzhK~p(%@1fٝemw¤N%(=T[?T5JOX{H_XZ|5I ݀*#:'WCo~&38ZDwIɀjT SYwr- UKbyi~E\Hendstream endobj 120 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 562 >> stream xmkQk!cjAf\l!!VCk16bR'IjH}i>P$`q;D7 DA˅{9hOݩ' /]>}Y^ЙysU^2`VY[0y4D :kۅh,%|'VuCY~9MƃGnYӆF3NF##Ep-n4=Z?_( An!!$H"&@kvB <Щ &PW> -Bxwtlfs5#夜EH_(h{ms>0g\q*kEkRj[ oL:x->SoEud{FUᨃc!#zS#4Q zC C)z:9M9rBf~ȝNRᗇd//Nbr6GM#V %ȑ9LQ*VpdJKgL3kO0;.;RR**,^'jYbt endstream endobj 121 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 1004 >> stream x]]LSwOP99\ ,Ȍsa& I(‘2(P^ڎ*PڢeBؗJLB9@q DIL??Ռx&sP Q+Viw%juuzᥩvP[2)b Zġ?;uP$(hP|Ѹx+.B4EЉjYCVHUҒwP|V؟"djJ*E\T,dd9)h bP-KT8- rKLy!MH|[njB EaHQ EPh,B:@RDVIx8r8p ҁ1m$1f# n LxR2P FP | qzGCݩBOLR8iVBWAcE!8{8gl8KGeI8^1QRKh2?b16Ȕ|sK<* F/qx 5 ^jb ,ђ? f,irMҏpʩ 0gTJrs 7Z ܅+>TC:/I>yM1뱷|ZNpuKRu{O'4Lzٿ 0:oLzW֖}?nВg*˙sAcy|y;56#> stream x\Yo$9r^ xۇS50^,g6Z:kM4݂$SRixT}Jظ<2(6 niiʃM/@T{FF\oJ`;=ơGXt=M2\-׭#N'i7Bg#o9Zw3 g`wۼ٭c<>&)䈭ni ihӜ<=ušjFk uЮ~㢩xh}..BBPc(|D6(v9W7wN¾l Wі$bEq.cH2!ή r8APEWD $A噵+b1 cⶃWeeR]klhOut $%k 3~6E3]&`;n!Pgyơ92IHv4WJ駒 wHNA84/B$!3 `k<Ç4 eS)D(ebnn1%1HR:6tOL&ntd2i9_US\%:۬7&_rAd|L^zA~9D05[`0~$,Z&h'0M8Ih\e@ת0ƚNtrITgm$Gɵ/I٥ֹD) s1ͦz%- fpݷEݤg!?7+*2ΨGl!L)]Mpӛ:ŏ^EvJKpAL6̱JHE4&MN&ГP{A,RSxg~se^ {`pR5$om)[C8yHVi [H{ }Qδ,BjKΫER66^t(!ܦxa*< QcHeS.#!r etLT||J}*$_r~1Oשb8 8pJT+`I*߽@ZT!eH>+`E8L t5LTL'w4$ֹIdpmNNekW!Da]լs[PY+")67#|'hXD@dZYs2Pxms9@xsbjÛƌE3N\3ko6O;ae"օu;xP*P-&ZO,mq%z1tIB6ʁ]p"Ž8 }X9W r(fqǾbO}Cl#*Z_)vB]!ȸzCPZjznnr0T>! 0% YfvUkBM4eJz䶜8S gf(.2Җ&T5]?Q-/h//',ґ($X[ Z%ZP \ʒ<|) }Cf!:}=6cTݐkp`ˠE-SpE+ٰY VV&'!al4>cT=ߎc7 MIo NC/DW(z6mڻ7ˆ"5 \Zjr2ԬZD,:DOzHe92' . LHHvW<:?aXyϢ!0)dY?>Krxas(cˎ69*: C)n|Ѹ> 91u\fA^ tmRB0F H6F됡SS&Y*y\)Ѡyh")XHw ,up]O8e-5unE`Z*#m:htJm B^~*E z^l߂Kڽ!BU|el$0fi};Vy 2`1[EIEiBC} x! 7An|ퟷU䷬ R>!ݢ"m{$! XT<){WЃM#&A ;68/LhXc"yNNu\֛Zx_lɤLțl'j!{ sPL-rFta#5p2t5;xR<%ȧxNQm~J̀R:ov/)I]=2!@1Xd v8Y-/صʓДp# A1騫gĭWjN, OvC\^3)%"X$g۴PQ`kO`\⡕<`EVJ'8XVB9 K}Ѫc-h/ᝀPzR1_$R٤ITFX+B GF fG@%: Zl#G]Da?*PztoC4Y/^ژnOCi$qU cf*R1tb|N׈2PF:/ۘR񼃋)Y+y+*$]D{{> Ȇ iMhTĎ _.˴6`6W ն;u=RаAt䑓pdἋ;yQw 8T!V}aSC@1EGIG?=:Fj7ͬjЂ~FlWs47oj[:S`o@)bhN14k,}7y?^6Y^e+! zGsK4CyU!N]n: C#3:&>?+&4CYa4O~o#~b9줲 ?Y_]^4 5"=}]!.T O_1qYMa0DLUD/ /&ר!a~tcB3bM=˯`]3rWBqq!B'|TT_@}t"A f=t-h*ef I<̣¤&: (>\>- wr N#ڇSczlɪĩ5q.N_΢㽓U4W`&w~\w> stream xT PTe/˽wu,dݫԙM|<AAAvyI(Y $@rPiꌏtll&& +jrş;Ls=;9 ! -O}f{C)A2G`v;YsDWAoX?k0a40ʌW펒L[4l_J$t)"uM ;KJM"GNj0K JiԜ}"#Q FM>ߣ/B~~ݑ_X$:Ӗ $Đ$ē2bDHETėkTG|a5ɾ]2j2d$+Ly OweoFc7PoOU:v9<̀ݦ]'q9:f去v@49[lop᧬~}}ۀ` rBB䅸+%djBlbY+MB+7k̭ P+*<AF҂F|><}<{Bp6|D_cP 8Q|eěVJv h ˵gtA^<LAGbG߄@!TR|gVymY,/\/Kp.WuؘvHGT˟~TxpPƸbßYe) 6& !TIg巅 uj3+s9uqHÊ3r3ӥ/&&X8DyXM^Y[5!« [Ǫ(wdCpF )ANӜ!I=!ͭ"7prV+tWhuլE9sÔOF̡qͧ%DCZ% Fq"4lߛ0^1`9ͺ_-˭_];J|Yr|iXU JqZނ޼f]ݛٜj= Hҭ7ؔTjuVBcWIj ӎ춊uJ+WyrZ62mb> stream xcd`ab`dd M3 JM/I, f!C۟z N]yyX~?%=V{pfF|ʢ#c]] iTध_^竧_TSHJHISOSIP v Vp Cwh Ȓ} JdZxk~Ereg{Sg~}9V5Z5i3>}jGܿ1gy ün҅?|ϛ6q!vr\,!!<<ܫzzpr_<<[M7^bendstream endobj 125 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 4441 >> stream xX X[e>!%=֌`uUu[+ҝ.P V- KB @/ YHN%VJ7b[li]ǵu{:uԫs;sO8Ow33o]YV.ߔ.L?\,0;9l%qws^w85fsaSo CsQ-ÉLoSW\frWoOe䊄IWl[=?/ NHNOO J O;d]a;v/[?Awol.&l-Lܖ#ug.pQzc b F vp"CGD{(b&!V!ijD(H&6kăCVbXD!#f9$s%og.n]O Om bnЬgfoY6bxWXn sըkEE^f_܉ j"6z 2Pk=WSPh0O *Ph!^2;_E")>X[(iL&zHhTgyh;z'o#,wsƸ.]WP*xN %ښVB7D3 ^;¤%3R O!M,َ`pKǼ@z S!J+d:N/@a+nr[z춸nA;jZxCɶXv<.j@Bƶ-3Ss~Q%e,b[X>G >ZB(,3|oL4I)W{w;{预ea¢RTg W2u&cT*m^P1Q˭mvZN Fө^-VOe}c-jMYh)ZFsٍҜ@T^ &>vں_|DbvV<^.A]23sMʗQIE:6#5i ɒ*[# 1@+N/p;*>|dCBU%Cdk^eUĠ$zspB&6(?$oPJ EiQ#f87XR-*~&t_sؿ)F (1(] BU\]@ uvut< ށ(8-#"2G)BHb)~WT@.JiV,^gjOs35|tlh^U4h`8TLz./]Y@ dL١OZi\޸iR4rƚJm 6;W)'!!|Lݝ-TCĐzZmsR=Mp\i#ߩm`uX+k),UAO1"1^ tę5-,BSwiu%z-.T/Sm Vu Aiń0qתr4 (Rof^N^^UmQS-&}>8ua۫R{5H_[ȕ2Ϣ'Psw/sD~Ƥڟ!L~NՃtnMP 9KN, ֪4`)hwCO&^iĤ[ۭa:!::AVU26|̝DђHQ@FVZ >ST1kvRu^HdO^}=hrE! =am=s6e@|W: 8Aԁ2LqMr=%ՉdM=kqlm{,}ǽ( :v W4~[h9qX)CQI@-![7ݍm;u&h:yld33|TI埋yo{[TPg>ǼhK-.KV6KrV Gb)ɑO:whW_g=մ7#Rrk}'Y( K ۾@gVkRBR5GjV+j$* Y -]$gCU=G[[X1:I@E,wb󝥠-kjJ:%ٕ@>Kb,4Z VX> ٧aěԔ`q٠ۆ /(]49jT8]Wf M$Z1Dn__l- F / :31lyR]M%9y FmPGdBj4I;rPxOuIt2eҾUU^TQ뗯(2"g̉z^:3fXW8=C;׻zYzvG{ ?}w)PlUilض"ZPOp7sFa Xo?I|wriNR ,Zkg 8L%jC)˨@-RYLBolckAP fMpc?فi?Nt j{qcӟʔ,vZK boP;ْb-ҹUV#[hh堥W>v܉-zܬQ4jBȭ}}݌Ƴ-fج.J9J9ATχ5}: @zNfRgރWB+ 4s+^򳳏hV6-)pVdߑC'lUCW&IߍO2ҩ ]:^W*]5_# &KV6T=FL!trۂVD_^vgҁж<65 Nۀ^#~";b݁ ҄*LE??P] "xGe2nZBRr1˝٨>h*VDDddFyi3Z@^_3)n>S-3s2cTrCwA[S eoU]ϧfy^"S-3IY*O6#HjIڅ1i|W;C$JI:|>wNlQ,b*Cz0ۗ-5=.5WE>$W֫+)<пP_IDC z\WQ1^~P}~.3#{Έi֭8&_8Zl:|H%W>yu|ln39ptT/I$Sx՘+,VnN^qxH>\t;OKLA&fq' ~' 5T?ZRv^nl߹(4dbzoXPIN)ĊAENS.A٠BF,:f68PstzuӌA0{ۄ]WZfǯq; m?endstream endobj 126 0 obj << /Filter /FlateDecode /Length 1758 >> stream xWIoFr,S1"z%@QMSGEml$2,&*mQ[e/x-~\f'"~]ŗ+|`]ޜ:i ^s/umUK\b+gM|ޖ/>Jb)bͺZB*ǶfbkX]у',{?ʽSRG$>>(w:P bf?l@ f6uVjW3VYO+.aYU 2n0wkmڡmuv"2jL?ynKP$=lmmO 7u% $ <=vZk Α$~v 0qn%4QR-HU1P,\>/%Q6BX/@0zYyx6acX !k/]1zKwB*P(*$2iM:/!D2 keD1*N]l1Vl87w%;b'^n}6._Q6*%9R&U6mG̛o벖.pZU|\ !>8@Up1ciDo![\l H6;*\.j ~Pb9C5C…-tSb]ƌ0;C>콼|C[JQ6Q8<)Fƺ@$"~Ec 8ffG13VY"Z,Ϙ XʘInvMw=om]!'"jOQbrhd8}s&kд?]iz}#r6ekEĔx}/El )coov NX4@Ҩ{ [K%xaq 𩗾:4z{c5Z>l48QpJj!&ĐkC͑iqIK.P)-QK)Rjvq3AM"2 : |Q;R b$5{_b%2鏊脜%1OPBh=M,l`ݨz3x"kbE9}*7#n(MK_3+Z馧N bX͸< qTR0Y%#R@xL[ErS1;y &;J2WmЈ͖2g* 0mnfo4S 't /BdCa{oZ,L*%,RbF9BvjӬ2ݐOqه$S9My8<{.y?&JX[B؁Ij6f+s/~aED5db`S1Myh"PeRGEm)LFukb$=`OXEpVv.؇O}(ܸ(POwQ)CQnWF}ڐ)rbjT]\آ<۽fR3Z6RRXjme Jɻ''J{IC@:.R4:/KYwX]glcqt&Efg\s PCk|.^tY6.}Njб]m_3pc\C#Yf4hXZQůW'?endstream endobj 127 0 obj << /Type /XRef /Length 139 /Filter /FlateDecode /DecodeParms << /Columns 5 /Predictor 12 >> /W [ 1 3 1 ] /Info 3 0 R /Root 2 0 R /Size 128 /ID [<5b27227a043574e9fca2850a7924e8f6><1277f439151d0fdf7d7cd6d8f626102a>] >> stream xcb&F~0 $8J$-ٻAGD j0) "MU@QD"l@$JA3fM`, `v)XF^ɖf3uHVwJ`Ys~m` endstream endobj startxref 125728 %%EOF markovchain/inst/doc/gsoc_2017_additions.Rmd0000644000176200001440000005142213762012754020417 0ustar liggesusers--- title: "Google Summer of Code 2017 Additions" author: "Vandit Jain" date: "August 2017" output: rmarkdown::pdf_document bibliography: markovchainBiblio.bib vignette: > %\VignetteIndexEntry{Google Summer of Code 2017 Additions} %\VignetteEngine{knitr::rmarkdown} %VignetteEncoding{UTF-8} pkgdown: as_is: true extension: pdf --- # Expected Hitting Time using CTMC The package provides `ExpectedTime` function to calculate average hitting time from one state to another. Let the final state be j, then for every state $i \in I$, where $I$ is the set of all possible states and holding time $q_{i} > 0$ for every $i \neq j$. Assuming the conditions to be true, expected hitting time is equal to minimal non-negative solution vector $p$ to the system of linear equations [@NorrisBook]: \begin{equation} \begin{array}{lcr} p_{k} = 0 & k = j \\ -\sum_{l \in I} q_{kl}p_{k} = 1 & k \neq j \end{array} \label{eq:EHT} \end{equation} For example, consider the continuous time markovchain which is as follows: ``` {r,message = FALSE} library(markovchain) states <- c("a","b","c","d") byRow <- TRUE gen <- matrix(data = c(-1, 1/2, 1/2, 0, 1/4, -1/2, 0, 1/4, 1/6, 0, -1/3, 1/6, 0, 0, 0, 0), nrow = 4,byrow = byRow, dimnames = list(states,states)) ctmc <- new("ctmc",states = states, byrow = byRow, generator = gen, name = "testctmc") ``` The generator matrix of the ctmc is: \[ M = \left(\begin{array}{cccc} -1 & 1/2 & 1/2 & 0\\ 1/4 & -1/2 & 1/4 & 1/6\\ 1/6 & 0 & -1/3 & 1/6\\ 0 & 0 & 0 & 0 \end{array}\right) \] Now if we have to calculate expected hitting time the process will take to hit state $d$ if we start from $a$, we apply the $ExpectedTime$ function. $ExpectedTime$ function takes four inputs namely a $ctmc$ class object, initial state $i$, the final state $j$ that we have to calculate expected hitting time and a logical parameter whether to use RCpp implementation. By default, the function uses RCpp as it is faster and takes lesser time. ``` {r} ExpectedTime(ctmc,1,4) ``` We find that the expected hitting time for process to be hit state $d$ is 7 units in this case. # Calculating Probability at time T using ctmc The package provides a function `probabilityatT` to calculate probability of every state according to given `ctmc` object. The Kolmogorov's backward equation gives us a relation between transition matrix at any time t with the generator matrix[@dobrow2016introduction]: \begin{equation} P'(t) = QP(t) \end{equation} Here we use the solution of this differential equation $P(t) = P(0)e^{tQ}$ for $t \geq 0$ and $P(0) = I$. In this equation, $P(t)$ is the transition function at time t. The value $P(t)[i][j]$ at time $P(t)$ describes the conditional probability of the state at time $t$ to be equal to j if it was equal to i at time $t=0$. It takes care of the case when `ctmc` object has a generator represented by columns. If initial state is not provided, the function returns the whole transition matrix $P(t)$. Also to mention is that the function is also implemented using RCpp and can be used used to lessen the time of computation. It is used by default. Next, We consider both examples where initial state is given and case where initial state is not given. In the first case, the function takes two inputs, first of them is an object of the S4 class 'ctmc' and second is the final time $t$. ``` {r} probabilityatT(ctmc,1) ``` Here we get an output in the form of a transition matrix. If we take the second case i.e. considering some initial input: ``` {r} probabilityatT(ctmc,1,1) ``` In this case we get the probabilities corresponding to every state. this also includes probability that the process hits the same state $a$ after time $t=1$. # Plotting generator matrix of continuous-time markovchains The package provides a `plot` function for plotting a generator matrix $Q$ in the form of a directed graph where every possible state is assigned a node. Edges connecting these nodes are weighted. Weight of the edge going from a state $i$ to state $j$ is equal to the value $Q_{ij}$. This gives a picture of the generator matrix. For example, we build a ctmc-class object to plot it. ``` {r} energyStates <- c("sigma", "sigma_star") byRow <- TRUE gen <- matrix(data = c(-3, 3, 1, -1), nrow = 2, byrow = byRow, dimnames = list(energyStates, energyStates)) molecularCTMC <- new("ctmc", states = energyStates, byrow = byRow, generator = gen, name = "Molecular Transition Model") ``` Now if we plot this function we get the following graph: ``` {r} plot(molecularCTMC) ``` The figure shown is built using the $igraph$ package. The package also provides options of plotting graph using $diagram$ and $DiagrameR$ package. Plot using these packages can be built using these commands: ``` {r} plot(molecularCTMC,package = "diagram") ``` Similarly, one can easily replace $diagram$ package with $DiagrammeR$ # Imprecise Continuous-Time Markov chains Continuous-time Markov chains are mathematical models that are used to describe the state-evolution of dynamical systems under stochastic uncertainty. However, building models using continuous time markovchains take in consideration a number of assumptions which may not be realistic for the domain of application; in particular; the ability to provide exact numerical parameter assessments, and the applicability of time-homogeneity and the eponymous Markov property. Hence we take ICTMC into consideration. More technically, an ICTMC is a set of “precise” continuous-time finite-state stochastic processes, and rather than computing expected values of functions, we seek to compute lower expectations, which are tight lower bounds on the expectations that correspond to such a set of “precise” models. ## Types of ICTMCs For any non-empty bounded set of rate matrices $L$, and any non-empty set $M$ of probability mass functions on $X$, we define the following three sets of stochastic processes that are jointly consistent with $L$ and $M$: * $P^{W}_{L,M}$ is the consistent set of all well-behaved stochastic processes; * $P^{WM}_{L,M}$ is the consistent set of all well-behaved Markov chains; * $P^{WHM}_{L,M}$ is the consistent set of all well-behaved homogeneous Markov chains[@ictmcpaper]. From a practical point of view, after having specified a (precise) stochastic process, one is typically interested in the expected value of some function of interest, or the probability of some event. Similarly, in this work, our main objects of consideration will be the lower probabilities that correspond to the ICTMCs. ## Lower Transition Rate Operators for ICTMCs A map $Q_{l}$ from $L(X)$ to $L(X)$ is called a lower transition rate operator if, for all $f,g \in L(X)$, all $\lambda \in R_{\geq 0}$, all $\mu \in L(X)$, and all $x \in X$[@ictmcpaper]: 1. $[Q_{l}m](x) = 0$ 2. $[Q_{l}I](x) \geq 0 \forall y \in X$ such that $x \neq y$ 3. $[Q_{l}(f + g)](x)\geq [Q_{l}f](x) + [Q_{l}g](x)$ 4. $[Q_{l}(l f)](x) = \lambda Q_{l}f[(x)]$ ## Lower Transition Operators A map $T_{l}$ from $L (X )$ to $L (X )$ is called a lower transition operator if, for all $f,g \in L(X)$, all $\lambda \in R_{\geq 0}$, all $\mu \in L(X)$, and all $x \in X$[@ictmcpaper]: 1. $[T_{l} f](x) \geq min(f(y) : y \in L)$ 2. $[T_{l}(f +g)](x) \geq [T_{l} f](x)+[T_{l}g](x)$ 3. $[T_{l}(\lambda f)](x) = l [T_{l} f](x)$ ## ImpreciseprobabilityatT function Now I would like to come onto the practical purpose of using ICTMC classes. ICTMC classes in these package are defined to represent a generator that is defined in such a way that every row of the generator corresponding to every state in the process is governed by a separate variable. As defined earlier, an imprecise continuous time markovchain is a set of many precise CTMCs. Hence this representation of set of precise CTMCs can be used to calulate transition probability at some time in future. This can be seen as an analogy with `probabilityatT` function. It is used to calculate the transition function at some later time t using generatoe matrix. For every generator matrix, we have a corresponding transition function. Similarly, for every Lower Transition rate operator of an ICTMC, we have a corresponding lower transition operator denoted by $L_{t}^{s}$. Here $t$ is the initial time and $s$ is the final time. Now we mention a proposition[@ictmcpaper] which states that: Let $Q_{l}$ be a lower transition rate operator, choose any time $t$ and $s$ both greater than 0 such that $t \leq s$, and let $L_{t}^{s}$ be the lower transition operator corresponding to $Q_{l}$. Then for any $f \in L(X)$ and $\epsilon \in R_{>0}$, if we choose any $n \in N$ such that: \[n \geq max((s-t)*||Q||,\frac{1}{2\epsilon}(s-t)^{2}||Q||^{2}||f||_v)\] with $||f||_{v}$ := max $f$ - min $f$, we are guaranteed that[@ictmcpaper] \[ ||L_{t}^{s} - \prod_{i=1}^{n}(I + \Delta Q_{l}) || \leq \epsilon \] with $\Delta := \frac{s-t}{n}$ Simple put this equation tells us that, using $Q_{l}g$ for all $g \in L(X)$ then we can also approximate the quantity $L_{t}^{s}$ to arbitrary precision, for any given $f \in L(X)$. To explain this approximate calculation, I would take a detailed example of a process containing two states healthy and sick, hence $X = (healthy,sick)$. If we represent in form of an ICTMC, we get: \[ Q = \left(\begin{array}{cc} -a & a \\ b & -b \end{array}\right) \] for some $a,b \in R_{\geq 0}$. The parameter $a$ here is the rate at which a healthy person becomes sick. Technically, this means that if a person is healthy at time $t$, the probability that he or she will be sick at time $t +\Delta$, for small $\Delta$, is very close to $\Delta a$. More intuitively, if we take the time unit to be one week, it means that he or she will, on average, become sick after $\frac{1}{a}$ weeks. The parameter $b$ is the rate at which a sick person becomes healthy again, and has a similar interpretation. Now to completely represent the ICTMC we take an example and write the generator as: \[ Q = \left(\begin{array}{cc} -a & a \\ b & -b \end{array}\right) : a \in [\frac{1}{52},\frac{3}{52}],b \in [\frac{1}{2},2] \] Now suppose we know the initial state of the patient to be sick, hence this is represented in the form of a function by: \[ I_{s} = \left(\begin{matrix} 0 \\ 1 \end{matrix}\right) \] We observe that the $||I_{s}|| = 1$. Now to use the proposition mentioned above, we use the definition to calculate the lower transition operator $Q_{l}$ Next we calculate the norm of the lower transition rate operator and use it in the preposition. Also we take value of $\epsilon$ to be 0.001. Using the preposition we can come up to an algorithm for calculating the probability at any time $s$ given state at initial time $t$ and a ICTMC generator[@ictmcpaper]. The algorithm is as follows: **Input**: A lower transition rate operator $Q$, two time points $t,s$ such that $t \leq s$, a function $f \in L(X )$ and a maximum numerical error $\epsilon \in R_{>0}$. **Algorithm**: 1. $n = max((s-t)||Q||,\frac{1}{2\epsilon}(s-t)^{2}||Q||^{2}||f||_v)$ 2. $\Delta = \frac{s-t}{n}$ 3. $g_{0} = I_{s}$ 4. for $i \in (1,.....,n)$ do $g_{i} = g_{i-1} + \Delta Q_{l}g_{i-1}$ 5. end for 6. return $g_{n}$ **Output**: The conditional probability vector after time $t$ with error $\epsilon$. Hence, after applying the algorithm on above example we get the following result: $ g_{n} = 0.0083$ if final state is $healthy$ and $g_{n} = 0.141$ if the final state is $sick$. The probability calculated is with an error equal to $\epsilon$ i.e. $0.001$. Now we run the algorithm on the example through R code. ``` {r} states <- c("n","y") Q <- matrix(c(-1,1,1,-1),nrow = 2,byrow = TRUE,dimnames = list(states,states)) range <- matrix(c(1/52,3/52,1/2,2),nrow = 2,byrow = 2) name <- "testictmc" ictmc <- new("ictmc",states = states,Q = Q,range = range,name = name) impreciseProbabilityatT(ictmc,2,0,1,10^-3,TRUE) ``` The probabilities we get are with an error of $10^{-3}$ # Continuous time markovchain generator using frequency Matrix The package provides `freq2Generator` function. It takes in a matrix representing relative frequency values along with time taken to provide a continuous time markovchain generator matrix. Here, frequency matrix is a 2-D matrix of dimensions equal to relative number of possible states describing the number of transitions from a state $i$ to $j$ in time $t$, which is another parameter to be provided to the function. The function also allows to chose among three methods for calculation of the generator matrix.[@freqArticle] Three methods are as follows: 1. Quasi Optimization - "QO" 2. Diagonal Adjustment - "DA" 3. Weighted Adjustment - "WA" See reference for details about the methods. Here is an example matrix on which `freq2Generator` function is run: ``` {r} sample <- matrix(c(150,2,1,1,1,200,2,1,2,1,175,1,1,1,1,150),nrow = 4,byrow = TRUE) sample_rel = rbind((sample/rowSums(sample))[1:dim(sample)[1]-1,],c(rep(0,dim(sample)[1]-1),1)) freq2Generator(sample_rel,1) ``` # Committor of a markovchain Consider set of states A,B comprising of states from a markovchain with transition matrix P. The committor vector of a markovchain with respect to sets A and B gives the probability that the process will hit a state from set A before any state from set B. Committor vector u can be calculated by solving the following system of linear equations[@committorlink]: $$ \begin{array}{l} Lu(x) = 0, x \notin A \cup B \\ u(x) = 1, x \in A \\ u(x) = 0, x \in B \end{array} $$ where $L = P -I$. Now we apply the method to an example: ``` {r} transMatr <- matrix(c(0,0,0,1,0.5,0.5,0,0,0,0,0.5,0,0,0,0,0,0.2,0.4,0,0,0,0.8,0.6,0,0.5),nrow = 5) object <- new("markovchain", states=c("a","b","c","d","e"),transitionMatrix=transMatr, name="simpleMc") committorAB(object,c(5),c(3)) ``` Here we get probability that the process will hit state "e" before state "c" given different initial states. # First Passage probability for set of states Currently computation of the first passage time for individual states has been implemented in the package. `firstPassageMultiple` function provides a method to get first passage probability for given provided set of states. Consider this example markovchain object: ``` {r} statesNames <- c("a", "b", "c") testmarkov <- new("markovchain", states = statesNames, transitionMatrix = matrix(c(0.2, 0.5, 0.3, 0.5, 0.1, 0.4, 0.1, 0.8, 0.1), nrow = 3, byrow = TRUE, dimnames = list(statesNames, statesNames) )) ``` Now we apply `firstPassageMultiple` function to calculate first passage probabilities for set of states $"b", "c"$ when initial state is $"a"$. ``` {r} firstPassageMultiple(testmarkov,"a",c("b","c"),4) ``` This shows us the probability that the process will hit any of the state from the set after n number of steps for instance, as shown, the probability of the process to hit any of the states among $"b", "c"$ after $2$ steps is $0.6000$. # Joint PDF of number of visits to the various states of a markovchain The package provides a function `noofVisitsDist` that returns the PDF of the number of visits to the various states of the discrete time markovchain during the first N steps, given initial state of the process. We will take an example to see how to use the function on a `markovchain-class` object: ``` {r} transMatr<-matrix(c(0.4,0.6,.3,.7),nrow=2,byrow=TRUE) simpleMc<-new("markovchain", states=c("a","b"), transitionMatrix=transMatr, name="simpleMc") noofVisitsDist(simpleMc,5,"a") ``` The output clearly shows the probabilities related to various states of the process. # Expected Rewards for a markovchain The package provides a function `expectedRewards` that returns a vector of expected rewards for different initial states. The user provides reward values, a vector $r$ of size equal to number of states having a value corresponding to every state. Given a transition matrix $[P]$, we get the vector of expected rewards $v$ after $n$ transitions according to the equation as follows[@GallagerBook]: $v[n] = r + [P]*v[n-1]$ Applying this equation on a markovchain-class object ``` {r} transMatr<-matrix(c(0.99,0.01,0.01,0.99),nrow=2,byrow=TRUE) simpleMc<-new("markovchain", states=c("a","b"), transitionMatrix=transMatr) expectedRewards(simpleMc,1,c(0,1)) ``` ## Expected Rewards for a set of states in a markovchain process The package provides a function `expectedRewardsBeforeHittingA` that returns the value of expected first passage rewards $E$ given rewards corresponding to every state, an initial state. This means the function returns expected reward for given initial state $s_{0}$, number of transitions $n$ and for a set of states $A$ with a constraint such that the process does not hit any of the states that belong to state $A$. $S$ is the set of all possible states. The function uses an equation which is as follows: $$E = \sum_{i=1}^{n}{1_{s_{0}}P_{S-A}^{i}R_{S-A}}$$ here $1_{s_{0}} = [0,0,...0,1,0,...,0,0,0]$, 1 being on $s_{0}$ position and $R_{S-A}$ being the rewards vector for $S-A$ state. # Checking Irreducibly of a CTMC The package provides a function `is.CTMCirreducible` that returns a Boolean value stating whether the ctmc object is irreducible. We know that a continuous time markovchain is irreducible if and only if its embedded chain is irreducible[@Sigman]. We demonstrate an example running the function: ``` {r} energyStates <- c("sigma", "sigma_star") byRow <- TRUE gen <- matrix(data = c(-3, 3, 1, -1), nrow = 2, byrow = byRow, dimnames = list(energyStates, energyStates)) molecularCTMC <- new("ctmc", states = energyStates, byrow = byRow, generator = gen, name = "Molecular Transition Model") is.CTMCirreducible(molecularCTMC) ``` # Simulation of Higher Order Multivariate Markovchains The package provides `predictHommc` function. This function provides a simulation system for higher order multivariate markovchains. The function assumes that the state probability distribution of the jth sequence at time $r+1$ depends on the state probability distribution of all the sequences at n previous mon=ments of time i.e. $t = r$ to $t = r-n+1$ . Hence the proposed model takes the form mathematically as:[@ching2008higher] $$ X_{r+1}^{j} = \sum_{k=1}^{s}\sum_{h=1}^n{\lambda_{jk}^{(h)}P_{h}^{(jk)}X_{r-h+1}^{(k)}}, \ \ \ j = 1,2,....s, \ \ r = n-1,n,... $$ with initals $X_{0}^{(k)},X_{1}^{(k)},......,X_{n-1}^{(k)} \ (k = 1,2,...s)$. Here, $\lambda_{jk}^{(k)}, \ 1 \leq j,k \leq s, \ 1 \leq h \leq n \ \ \ and \ \ \ \sum_{k=1}^{s}\sum_{h=1}^{n}{\lambda_{jk}^{(h)} = 1}, \ \ \ j = 1,2,....s.$ Now we run an example on sample hommc object for simulating next 3 steps using `predictHommc` function. The function provides a choice of entering initial states according to the hommc object. In case the user does not enter initial states, the function takes all initial states to be the first state from the set of states. ``` {r} statesName <- c("a", "b") P <- array(0, dim = c(2, 2, 4), dimnames = list(statesName, statesName)) P[,,1] <- matrix(c(0, 1, 1/3, 2/3), byrow = FALSE, nrow = 2) P[,,2] <- matrix(c(1/4, 3/4, 0, 1), byrow = FALSE, nrow = 2) P[,,3] <- matrix(c(1, 0, 1/3, 2/3), byrow = FALSE, nrow = 2) P[,,4] <- matrix(c(3/4, 1/4, 0, 1), byrow = FALSE, nrow = 2) Lambda <- c(0.8, 0.2, 0.3, 0.7) ob <- new("hommc", order = 1, states = statesName, P = P, Lambda = Lambda, byrow = FALSE, name = "FOMMC") predictHommc(ob,3) ``` # Check Time Reversibility of Continuous-time markovchains A Continuous-time markovchain with generator $Q$ and stationary distribution $\pi$ is said to be time reversible if:[@dobrow2016introduction] $$ \pi_{i}q_{ij} = \pi_{j}q_{ji} $$ Intuitively, a continuous-time Markov chain is time reversible if the process in forward time is indistinguishable from the process in reversed time. A consequence is that for all states i and j, the long-term forward transition rate from i to j is equal to the long-term backward rate from j to i. The package provides `is.TimeReversible` function to check if a `ctmc` object is time-reversible. We follow with an example run on a `ctmc` object. ``` {r} energyStates <- c("sigma", "sigma_star") byRow <- TRUE gen <- matrix(data = c(-3, 3, 1, -1), nrow = 2, byrow = byRow, dimnames = list(energyStates, energyStates)) molecularCTMC <- new("ctmc", states = energyStates, byrow = byRow, generator = gen, name = "Molecular Transition Model") is.TimeReversible(molecularCTMC) ``` # References markovchain/inst/extdata/0000755000176200001440000000000013762012754015172 5ustar liggesusersmarkovchain/inst/extdata/ltdItaData.txt0000644000176200001440000001754413762012754017761 0ustar liggesusersage;pAD;pID;pAI;pAA 20;0.000461600217312132;0.0108336431456553;0.000176246694342926;0.999362153088345 21;0.000482488764261525;0.0107971909886194;0.000171057735612955;0.999346453500126 22;0.000494993811957668;0.0117707568980395;0.00015923328063064;0.999345772907412 23;0.00050429345591411;0.0115939415352431;0.00016057305104192;0.999335133493044 24;0.000507419256318309;0.0126057434590121;0.000160650439141657;0.99933193030454 25;0.000515426695613004;0.0152636383159172;0.000164360322575267;0.999320212981812 26;0.000525595649643117;0.0179897694669832;0.000161660453870113;0.999312743896487 27;0.000534823584740014;0.0166832171238458;0.000160135991146609;0.999305040424113 28;0.000537542563101855;0.0170682636815637;0.000159910891995352;0.999302546544903 29;0.00054010571047368;0.0189681430662499;0.000154068989936519;0.99930582529959 30;0.000540223049860296;0.0211937258794527;0.000157160647241465;0.999302616302898 31;0.000543650098790082;0.0273283049097408;0.000166920507275799;0.999289429393934 32;0.000550657084106076;0.0297242338721561;0.000176149326345398;0.999273193589549 33;0.000567666524533948;0.0312031127015988;0.000174959290838917;0.999257374184627 34;0.000589564479997331;0.0362614421255913;0.000181142949631881;0.999229292570371 35;0.00061806212917393;0.0400308116670889;0.000191872923626047;0.9991900649472 36;0.000650632511832817;0.0418389766775716;0.000199643253754878;0.999149724234412 37;0.000689474293782214;0.0467358834902955;0.000211977632924897;0.999098548073293 38;0.000731138219342785;0.0523964586730976;0.000221795302027688;0.99904706647863 39;0.000793533578699302;0.055177966832089;0.000240416543812767;0.998966049877488 40;0.000859207185781173;0.0585124067163057;0.000264674910015306;0.998876117904203 41;0.000920725621476301;0.0679656015534935;0.000282355739283444;0.99879691863924 42;0.000984991230447042;0.0757310812483226;0.000314702144743578;0.998700306624809 43;0.00105939521363715;0.0811703192912012;0.000349094911367465;0.998591509874995 44;0.00115292666392253;0.086792620615223;0.000376750745815673;0.998470322590262 45;0.00124967505305504;0.0951282991008114;0.000410031980278301;0.998340292966667 46;0.00134617679388282;0.104306046855522;0.000454381385459602;0.998199441820658 47;0.00147744439982092;0.108863822493192;0.000510090216360853;0.998012465383818 48;0.00160678108915558;0.115727419192654;0.000572265035829771;0.997820953875015 49;0.00175231797005106;0.122620733316479;0.000653974571998273;0.997593707457951 50;0.00192647416205334;0.128264864604654;0.000724389338373027;0.997349136499574 51;0.00211873152640448;0.132487558117908;0.00080347791835564;0.99707779055524 52;0.00231715656887456;0.140903426576919;0.000897377523639647;0.996785465907486 53;0.00257005954245349;0.148308971347905;0.000988973589213584;0.996440966868333 54;0.00282270426395954;0.152694601283312;0.00110175953341175;0.996075536202629 55;0.00308892793456527;0.156014449299285;0.00121842271667517;0.99569264934876 56;0.00334491994186366;0.160354906530207;0.00135221310972041;0.995302866948416 57;0.0036680988581455;0.163584432824716;0.0015021237857599;0.994829777356095 58;0.00404383426827132;0.166621852151551;0.00164389926056965;0.994312266471159 59;0.00446410949795472;0.172995362892957;0.00181867939206979;0.993717211109975 60;0.00491071415431949;0.175114162729557;0.00203795753783481;0.993051328307846 61;0.00541897972205418;0.176836610548664;0.00223079594105024;0.992350224336896 62;0.0058872972394518;0.179065380547063;0.00243327615598675;0.991679426604561 63;0.00635426205223726;0.18328905395475;0.00268594485354049;0.990959793094222 64;0.00684249456592532;0.186309865207791;0.00299675785270434;0.99016074758137 65;0.00745741108087491;0.18514690618557;0.0034276558099294;0.989114933109196 66;0.00813029947746983;0.186388327973927;0.00401498471329922;0.987854715809231 67;0.00894480989820999;0.189665426363304;0.00467257950797381;0.986382610593816 68;0.0097208540778663;0.190052370707222;0.00533807252107788;0.984941073401056 69;0.0106145155277237;0.188697039510498;0.0060901494087728;0.983295335063503 70;0.0115815669321884;0.190655935136829;0.00698624429053202;0.98143218877728 71;0.0126738410165432;0.193198718680596;0.0080193455961521;0.979306813387305 72;0.0137706103972485;0.194509663180914;0.00912917559527522;0.977100214007476 73;0.0151133228338427;0.197263891809617;0.0104517574376933;0.974434919728464 74;0.0167023100407502;0.19981346841012;0.011975363077487;0.971322326881763 75;0.0185015234683037;0.202602461969921;0.0136406055721243;0.967857870959572 76;0.020543474835099;0.20567144487397;0.0155521514540999;0.963904373710801 77;0.0226990867632567;0.209583024789031;0.0176773816301192;0.959623531606624 78;0.025242012724978;0.216536798215654;0.0199840051708029;0.954773982104219 79;0.0280714682899232;0.222812914664102;0.0225820740028015;0.949346457707275 80;0.0313087874273656;0.229026086379995;0.0254147939549998;0.943276418617635 81;0.0347623037317666;0.237135642759773;0.0283027615785079;0.936934934689726 82;0.0377325549166625;0.246569096453296;0.0313008704863735;0.930966574596964 83;0.0415709360291018;0.255103424222878;0.0350322493312845;0.923396814639614 84;0.0466891630019649;0.263805059072913;0.0392249475621001;0.914085889435935 85;0.0523809285174003;0.275827261187502;0.0435063497564212;0.904112721726179 86;0.0595353395950259;0.287154077419303;0.0477809616106315;0.892683698794343 87;0.0667672618557459;0.296736437668921;0.0520193008104549;0.881213437333799 88;0.0742284842880654;0.307394243456317;0.0567867940499367;0.868984721661998 89;0.08102260498543;0.318807087201621;0.0617441024832344;0.857233292531336 90;0.0879638443512971;0.330335659703562;0.0647086526386771;0.847327503010026 91;0.0956534410840304;0.34429171728002;0.0667471331812722;0.837599425734697 92;0.104629865442421;0.360343939150447;0.0679974373329284;0.827372697224651 93;0.114342159249409;0.376714600886086;0.0692694237579942;0.816388416992597 94;0.124830307842353;0.39337161823954;0.0705634029519849;0.804606289205662 95;0.136132619794602;0.410280336624487;0.0718796873808002;0.791987692824598 96;0.148284870965249;0.427403785267262;0.0732185913907422;0.778496537644009 97;0.16131934887796;0.44470297533411;0.0745804311136826;0.764100220008357 98;0.175263808419464;0.46213723752253;0.0759655243672633;0.748770667213273 99;0.190140356683496;0.479664593311152;0.0773741905500132;0.732485452766491 100;0.205964292416731;0.49724215292386;0.0788067505312643;0.715228957052005 101;0.222742933595137;0.514826532138116;0.0802635265357542;0.696993539869109 102;0.240474474656817;0.532374279403284;0.0817448420228012;0.677780683320382 103;0.259146922168122;0.549842304367436;0.0832510215599426;0.657602056271935 104;0.278737163401518;0.567188298860823;0.0847823906909263;0.636480445907556 105;0.299210225581725;0.584371141653351;0.0863392757979522;0.614450498620323 106;0.320518783551316;0.601351278877663;0.087922003958059;0.591559212490625 107;0.342602969587468;0.618091072857778;0.089530902793558;0.567866127618974 108;0.365390530594804;0.634555113161069;0.0911663003164189;0.543443169088777 109;0.388797364816074;0.650710484943252;0.0928285247665155;0.51837411041741 110;0.412728452936279;0.666526991020167;0.0945179044436453;0.492753642620076 111;0.437079177930066;0.681977325511973;0.0962347675332406;0.466686054536693 112;0.461737005643692;0.69703719830248;0.0979794419256961;0.440283552430612 113;0.486583475732349;0.711685410881454;0.099752255029241;0.41366426923841 114;0.511496432215896;0.725903885342153;0.101553533576292;0.386950034207812 115;0.536352406554738;0.739677649351817;0.103383603423233;0.360263990022029 116;0.561029055458654;0.752994780772872;0.105242789343564;0.333728155197782 117;0.585407551755433;0.765846316272406;0.107131414814384;0.307461033430183 118;0.609374829980436;0.778226128713706;0.109049801796177;0.281575368223387 119;0.632825598544515;0.790130778382749;0.110998270505863;0.256176130949622 120;0.655664046315679;0.801559343179304;0.112977139183108;0.231358814501213 121;1;1;0;0 markovchain/inst/CITATION0000644000176200001440000000077213762012754014703 0ustar liggesuserscitHeader("To cite package markovchain in publications use:") citEntry(entry="Article", title = "Discrete Time Markov Chains with R", author = person(given="Giorgio Alfredo", family="Spedicato"), month="07", year="2017", journal="The R Journal", url="https://journal.r-project.org/archive/2017/RJ-2017-036/index.html", note="R package version 0.6.9.7", textVersion = "Discrete Time Markov Chains with R" )