markovchain/0000755000176200001440000000000014503777503012567 5ustar liggesusersmarkovchain/NAMESPACE0000644000176200001440000000477414427513574014023 0ustar liggesusersexport("name<-") export(ExpectedTime) export(absorptionProbabilities) export(assessOrder) export(assessStationarity) export(committorAB) export(createSequenceMatrix) export(ctmcFit) export(expectedRewards) export(expectedRewardsBeforeHittingA) export(firstPassage) export(firstPassageMultiple) export(fitHighOrderMultivarMC) export(fitHigherOrder) export(freq2Generator) export(generatorToTransitionMatrix) export(impreciseProbabilityatT) export(inferHyperparam) export(is.CTMCirreducible) export(is.TimeReversible) export(markovchainFit) export(markovchainListFit) export(markovchainSequence) export(meanAbsorptionTime) export(meanFirstPassageTime) export(meanRecurrenceTime) export(multinomialConfidenceIntervals) export(name) export(noofVisitsDist) export(period) export(predictHommc) export(predictiveDistribution) export(priorDistribution) export(probabilityatT) export(rctmc) export(rmarkovchain) export(seq2freqProb) export(seq2matHigh) export(states) export(transition2Generator) export(verifyEmpiricalToTheoretical) export(verifyHomogeneity) export(verifyMarkovProperty) exportClasses(HigherOrderMarkovChain) exportClasses(ctmc) exportClasses(hommc) exportClasses(markovchain) exportClasses(markovchainList) exportMethods("!=") exportMethods("*") exportMethods("==") exportMethods("[") exportMethods("[[") exportMethods("^") exportMethods(absorbingStates) exportMethods(canonicForm) exportMethods(coerce) exportMethods(communicatingClasses) exportMethods(conditionalDistribution) exportMethods(dim) exportMethods(hittingProbabilities) exportMethods(is.accessible) exportMethods(is.irreducible) exportMethods(is.regular) exportMethods(meanNumVisits) exportMethods(plot) exportMethods(predict) exportMethods(print) exportMethods(recurrentClasses) exportMethods(recurrentStates) exportMethods(show) exportMethods(steadyStates) exportMethods(summary) exportMethods(t) exportMethods(transientClasses) exportMethods(transientStates) exportMethods(transitionProbability) import(Matrix) import(igraph) import(methods) import(parallel) importFrom(Rcpp,evalCpp) importFrom(RcppParallel,RcppParallelLibs) importFrom(expm,"%^%") importFrom(expm,logm) importFrom(grDevices,colors) importFrom(stats,aggregate) importFrom(stats,chisq.test) importFrom(stats,pchisq) importFrom(stats,predict) importFrom(stats,rexp) importFrom(stats,sd) importFrom(stats4,plot) importFrom(stats4,summary) importFrom(utils,packageDescription) useDynLib(markovchain, .registration = TRUE)markovchain/demo/0000755000176200001440000000000013762012756013511 5ustar liggesusersmarkovchain/demo/computationTime.R0000644000176200001440000000111213762012756017010 0ustar liggesuserslibrary(microbenchmark) library(markovchain) #using the rain data sequence data(rain) rainSequence<-rain$rain #choosing different sample size sizes<-c(10,50,100,250,500,1096) #estimating microseconds microseconds<-numeric(length(sizes)) for(i in 1:length(sizes)) { mydim<-sizes[i] mysequence<-rainSequence[1:mydim] out<-microbenchmark( myFit<-markovchainFit(data=mysequence) ) microseconds[i]<-mean(out$time) } #plot(sizes, microseconds,type="o",col="steelblue",xlab="character sequence size",ylab="microseconds",main="Computational time vs size") markovchain/demo/quickStart.R0000644000176200001440000000301113762012756015761 0ustar liggesusers# TODO: Add comment # # Author: Giorgio Spedicato ############################################################################### #creates some markovchain objects statesNames=c("a","b") mcA<-new("markovchain", transitionMatrix=matrix(c(0.7,0.3,0.1,0.9),byrow=TRUE, nrow=2, dimnames=list(statesNames,statesNames) )) mcB<-new("markovchain", states=c("a","b","c"), transitionMatrix= matrix(c(0.2,0.5,0.3, 0,1,0, 0.1,0.8,0.1),nrow=3, byrow=TRUE)) mcC<-new("markovchain", states=c("a","b","c","d"), transitionMatrix=matrix(c(0.25,0.75,0,0,0.4, 0.6,0,0,0,0,0.1,0.9,0,0,0.7,0.3), nrow=4, byrow=TRUE) ) mcD<-new("markovchain", transitionMatrix=matrix(c(0,1,0,1), nrow=2,byrow=TRUE)) #apply some methods testConversion<-as(mcC, "data.frame") markovD<-t(mcC) steadyStates(mcC) steadyStates(mcC) #perform some fitting sequence<-c("a", "b", "a", "a", "a", "a", "b", "a", "b", "a", "b", "a", "a", "b", "b", "b", "a") mcFit<-markovchainFit(data=sequence,byrow=FALSE) #canonic form #from https://math.dartmouth.edu/archive/m20x06/public_html/Lecture14.pdf P<-zeros(5) P[1,1]<-P[5,5]<-1 P[2,1]<-P[2,3]<-0.5 P[3,2]<-P[3,4]<-0.5 P[4,3]<-P[4,5]<-0.5 mcP<-as(P,"markovchain") mcPCan<-canonicForm(mcP) # coercing markov chains to sparse matrix forth and back require(Matrix) ciauz<-c(0,.5,.5,1,0,0,.2,0,.8) matrix(ciauz, nrow=3, byrow=TRUE) sparse<-as(matrix(ciauz, nrow=3, byrow=TRUE),"sparseMatrix") mc<-as(sparse,"markovchain") sparse2<-as(mc,"sparseMatrix") markovchain/demo/bard.R0000644000176200001440000000213513762012756014545 0ustar liggesusers#Bard PPT examples #Page 8 require(matlab) statesNames=as.character(0:3) pg8<-zeros(4) pg8[1,c(2,3)]<-0.5 pg8[2,1]<-1 pg8[3,4]<-1 pg8[4,c(1,4)]<-0.5 pg8Mc<-new("markovchain",transitionMatrix=pg8,states=statesNames, name="Page 8") summary(pg8Mc) #Page 9 statesNames=as.character(0:4) pg9<-zeros(5) pg9[c(1,2),c(1,2)]<-0.5 pg9[3,3]<-1 pg9[4,c(3,4)]<-1/2 pg9[5,1]<-1 pg9Mc<-new("markovchain", transitionMatrix=pg9, states=statesNames, name="Page 9") summary(pg9Mc) #Page 10 statesNames=as.character(0:3) pg10<-zeros(4) pg10[1,c(2,3)]<-1/2 pg10[c(2,3),4]<-1 pg10[4,1]<-1 pg10Mc<-new("markovchain", transitionMatrix=pg10, states=statesNames, name="Page 10") summary(pg10Mc) #Page 11 statesNames=as.character(1:5) pg11<-zeros(5) pg11[1,c(1,2)]<-c(0.4,0.6) pg11[2,c(1,2)]<-c(0.5,0.5) pg11[3,c(3,4)]<-c(0.3,0.7) pg11[4,c(3,4,5)]<-c(5,4,1)/10 pg11[5,c(4,5)]<-c(0.8,0.2) pg11Mc<-new("markovchain", transitionMatrix=pg11, states=statesNames, name="Page 10") summary(pg11Mc) markovchain/demo/extractMatrices.R0000644000176200001440000000134713762012756017003 0ustar liggesusers#function to extract matrices extractMatrices <- function(mcObj) { require(matlab) mcObj <- canonicForm(object = mcObj) #get the indices of transient and absorbing transIdx <- which(states(mcObj) %in% transientStates(mcObj)) absIdx <- which(states(mcObj) %in% absorbingStates(mcObj)) #get Q, R and I Q <- as.matrix(mcObj@transitionMatrix[transIdx,transIdx]) R <- as.matrix(mcObj@transitionMatrix[transIdx,absIdx]) I <- as.matrix(mcObj@transitionMatrix[absIdx, absIdx]) #get fundamental matrix N <- solve(eye(size(Q)) - Q) #final absorbion probabilities NR <- N %*% R #return out <- list( canonicalForm = mcObj, Q = Q, R = R, I = I, N=N, NR=NR ) return(out) }markovchain/demo/quasiBayesian.R0000644000176200001440000000131513762012756016432 0ustar liggesusers#PSEUDO BAYESIAN ESTIMATOR getAlphaVector <- function(raw, apriori){ v_i <- rowSums(raw) K_i <- numeric(nrow(raw)) sumSquaredY <- rowSums(raw^2) #get numerator K_i_num <- v_i^2-sumSquaredY #get denominator VQ <- matrix(0,nrow= nrow(apriori),ncol=ncol(apriori)) for (i in 1:nrow(VQ)) { VQ[i,]<-v_i[i]*apriori[i,] } K_i_den<-rowSums((raw - VQ)^2) K_i <- K_i_num/K_i_den #get the alpha vector alpha <- K_i / (v_i+K_i) #empirical transition matrix Emp<-raw/rowSums(raw) #get the estimate out<-matrix(0, nrow= nrow(raw),ncol=ncol(raw)) for (i in 1:nrow(out)) { out[i,]<-alpha[i]*apriori[i,]+(1-alpha[i])*Emp[i,] } return(out) }markovchain/demo/mathematica.R0000644000176200001440000000232613762012756016114 0ustar liggesusers#STRUCTURAL PROPRIETIES #see mathematica 9 mathematicaAMatr <- matrix(c(0, 1/3, 0, 2/3, 0, 1/2, 0, 0, 0, 1/2, 0, 0, 1/2, 1/2, 0, 0, 0, 1/2, 1/2, 0, 0, 0, 0, 0, 1), byrow = TRUE, nrow=5) #should behave like #http://www.wolfram.com/mathematica/new-in-9/markov-chains-and-queues/structural-properties-of-finite-markov-processes.html mathematicaAMc<-as(mathematicaAMatr, "markovchain") summary(mathematicaAMc) canonicForm(mathematicaAMc) is.irreducible(mathematicaAMc) transientStates(mathematicaAMc) absorbingStates(mathematicaAMc) communicatingClasses(mathematicaAMc) recurrentClasses(mathematicaAMc) #first passage time #should behave like #http://www.wolfram.com/mathematica/new-in-9/markov-chains-and-queues/distribution-of-times-to-reach-a-target-state.html mathematicaBMatr= matrix(c( 0, 1/2, 1/2, 1/2, 0, 1/2, 1/2, 1/2, 0),byrow=TRUE, nrow=3) ; mathematicabMc<-as(mathematicaBMatr, "markovchain") firstPassage(mathematicabMc, "s3",9) #if you repeat this more thime results change. No good communicatingClasses(mathematicabMc) recurrentClasses(mathematicabMc)markovchain/demo/examples.R0000644000176200001440000000122213762012756015447 0ustar liggesusers#up and down markov chains mcUpDown<-new("markovchain", states=c("up","down"), transitionMatrix=matrix(c(0,1,1,0),nrow=2, byrow=TRUE),name="UpDown") #gamblers ruin gamblerRuinMarkovChain<-function(moneyMax, prob=0.5) { require(matlab) matr<-zeros(moneyMax+1) states<-as.character(seq(from=0, to=moneyMax, by=1)) rownames(matr)=states; colnames(matr)=states matr[1,1]=1;matr[moneyMax+1,moneyMax+1]=1 for(i in 2:moneyMax) { matr[i,i-1]=1-prob;matr[i,i+1]=prob } out<-new("markovchain", transitionMatrix=matr, name=paste("Gambler ruin",moneyMax,"dim",sep=" ") ) return(out) }markovchain/demo/00Index0000644000176200001440000000074513762012756014651 0ustar liggesusersquickStart Quick start for markovchain package examples Various examples of Markov Chain bard Structural analysis of Markov chains from Bard PPT extractMatrices Given a matrix, returns the canonical form, fundamental matrix etc... mathematica Mathematica Markov Chains reliability Simulate reliability in inverting a markovchain quasiBayesian Quasi Bayesian estimator function computationTime Computation time by size of the character sequence markovchain/demo/reliability.R0000644000176200001440000001133613762012756016151 0ustar liggesusers#load required libraries library(parallel) require(MCMCpack) require(markovchain) dimensions2Test<-2:32 numSim=10000 #helper function to create a random stochastic matrix createMatrix<-function(matr_size) { out<-matrix(0, nrow=matr_size, ncol = matr_size) for (i in 1:matr_size) { priors.dirichlet<-runif(n=matr_size) myStochasticRow<-rdirichlet(n=1,alpha=priors.dirichlet) out[i,]<-myStochasticRow } return(out) } createSparseMatrix<-function(matr_size, sparsity=0.75){ out <- matrix(0, nrow=matr_size, ncol = matr_size) nonzeroitems<-ceiling(matr_size*(1-sparsity)) for (i in 1:matr_size) { priors.dirichlet<-runif(n=nonzeroitems) myStochasticRow<-rdirichlet(n=1,alpha=priors.dirichlet) columnsPositions<-sample(x = 1:matr_size,size = nonzeroitems,replace = FALSE) out[i,columnsPositions]<-myStochasticRow } return(out) } #first test: function to simulate the inversion of a matrix of size num checkInversion<-function(i,num){ #simulate the priors myStochasticMatrix<-createMatrix(matr_size = num) #this code returns FALSE -> 0 if error in inversion 1 otherwise out<-tryCatch(steadyStates(as(myStochasticMatrix, "markovchain")), error=function(c) return(FALSE) ) if(class(out)=="logical") return(0) else return(1) } checkSparseMInversion<-function(i,num){ #simulate the priors myStochasticMatrix<-createSparseMatrix(matr_size = num) #this code returns FALSE -> 0 if error in inversion 1 otherwise out<-tryCatch(steadyStates(as(myStochasticMatrix, "markovchain")), error=function(c) return(FALSE) ) if(class(out)=="logical") return(0) else return(1) } #performing the simulation successRate<-numeric(length(dimensions2Test)) #using parallel backend no_cores <- detectCores() - 1 cl <- makeCluster(no_cores) clusterExport(cl, "checkInversion") clusterExport(cl, "createMatrix") clusterEvalQ(cl, library(markovchain)) clusterEvalQ(cl, library(MCMCpack)) k=1 for (dimension in dimensions2Test){ simulations<-parSapply(cl=cl,1:numSim,FUN=checkInversion,num=dimension) successRate[k]<-mean(simulations) k=k+1 } stopCluster(cl) #summarising first test: #plot(x=dimensions2Test,y=successRate,type="l",xlab="matrix size",ylab="success rate",main="Steady state computation success rate") #abline(h=0.5,col="red") #text(x=dimensions2Test,y=successRate,labels=round(successRate,digits=2),col="darkred",cex=0.7) #dev.off() dimensions2Test = 2^seq(from=3, to=8) successRate<-numeric(length(dimensions2Test)) #using parallel backend no_cores <- detectCores() - 1 cl <- makeCluster(no_cores) clusterExport(cl, "checkSparseMInversion") clusterExport(cl, "createSparseMatrix") clusterEvalQ(cl, library(markovchain)) clusterEvalQ(cl, library(MCMCpack)) k=1 for (dimension in dimensions2Test){ simulations<-parSapply(cl=cl,1:numSim,FUN=checkSparseMInversion,num=dimension) successRate[k]<-mean(simulations) k=k+1 } stopCluster(cl) # # # plot(x=dimensions2Test,y=successRate,type="l",xlab="matrix size",ylab="success rate",main="Steady state computation success rate ??? sparse matrices") # abline(h=0.5,col="red") # text(x=dimensions2Test,y=successRate,labels=round(successRate,digits=2),col="darkred",cex=0.7) #second test: simulating exponentiation checkExponentiation<-function(i,num){ #simulate the priors myStochasticMatrix<-createMatrix(matr_size = num) #this code returns FALSE -> 0 if error in inversion 1 otherwise out<-tryCatch((as(myStochasticMatrix, "markovchain"))^2, error=function(c) return(FALSE) ) if(class(out)=="logical") return(0) else return(1) } #performing the simulation successRate2<-numeric(length(dimensions2Test)) #using parallel backend no_cores <- detectCores() - 1 cl <- makeCluster(no_cores) clusterExport(cl, "checkExponentiation") clusterExport(cl, "createMatrix") clusterEvalQ(cl, library(markovchain)) clusterEvalQ(cl, library(MCMCpack)) k=1 for (dimension in dimensions2Test){ simulations<-parSapply(cl=cl,1:numSim,FUN=checkExponentiation,num=dimension) successRate2[k]<-mean(simulations) k=k+1 } stopCluster(cl) #summarising first test: #par(mfrow=c(1,2)) # plot(x=dimensions2Test,y=successRate,type="l",xlab="matrix size",ylab="success rate",main="Steady state computation success rate") # abline(h=0.5,col="red") # text(x=dimensions2Test,y=successRate,labels=round(successRate,digits=2),col="darkred",cex=0.7) # plot(x=dimensions2Test,y=successRate2,type="l",xlab="matrix sixe",ylab="success rate",main="Exponentiation computation success rate") # abline(h=0.5,col="red") # text(x=dimensions2Test,y=successRate2,labels=round(successRate2,digits=2),col="darkred",cex=0.7) markovchain/LICENSE0000644000176200001440000000006114430124236013555 0ustar liggesusersYEAR: 2023 COPYRIGHT HOLDER: markovchain authors markovchain/README.md0000644000176200001440000000146514354626704014054 0ustar liggesusers# markovchain R package providing classes, methods and function for easily handling Discrete Time Markov Chains (DTMC), performing probabilistic analysis and fitting. ## Install the current release from CRAN: ```r install.packages('markovchain') ``` ## Install the development version from GitHub: ```r devtools::install_github('spedygiorgio/markovchain') ``` ![alt tag](https://travis-ci.org/spedygiorgio/markovchain.svg?branch=master) [![Downloads](http://cranlogs.r-pkg.org/badges/markovchain)](https://cran.r-project.org/package=markovchain) [![CRAN_Status_Badge](http://www.r-pkg.org/badges/version/markovchain)](https://cran.r-project.org/package=markovchain) [![Research software impact](http://depsy.org/api/package/cran/markovchain/badge.svg)](http://depsy.org/package/r/markovchain) markovchain/data/0000755000176200001440000000000013762012756013476 5ustar liggesusersmarkovchain/data/kullback.rda0000644000176200001440000000040614503773064015756 0ustar liggesusers͑In@Ef0 "2R '[ABWX#pooPvlտʿ?g-_{4ėk5dYORvu A$}dH+1w70/䞹`^խvE{5Xq>[[f^Dĉ3'7g/F:mMy# !)nb 2_Camarkovchain/data/preproglucacon.rda0000644000176200001440000000211314503773064017206 0ustar liggesusersBZh91AY&SY!5 '!3H0,@/ހ@!B(h1 QOTbd Fƚ 2h 414`U*d i&CF54@2 𠕖se !EvprtpE Ch3ZĮ\ҨJSf:(uɪ8 %$"O;q#,[MM) 'ВVȵ !"[Tч,<E9j\d^4Yp&We3ڜ3x<|WheJ+DG `yrbav`6mVa$B\a LtgV<>_'0 AШE"@TQzTACGꞡ?TR@*hTGUOUzhު@  =UJC`b`& ɐUQ44G4h4l C@5Ti@~6M4L&F4ah2 ~h4`4˚;/ꪪ`o_$I$I$I$I$I$I$I$I$I$I$I$I$Ip|/[ }~q?ym_ն37o[l͛3NNNNNNN[ &})7ٌrpsm$c1c1aR&x"MERaDM""""""ֵkZֵkZE>eYeYeYe\UUUmUUUUU\9s9s(((:c1c1Du[9ss9s9nݻvۻ,U,,mmmm,cQ[jVKHƲ%k%cXST%Iээ C5 $F̴8[[pVeETm$ZMh@ HUK,1QlUTYLVTk&رbk)TVQBc8p+[mJj6@*%L-%bbM%eL,V- EFѴZ [*k"jf&k (ځJ5m4mX#hh 6 RbSjZ-ţdՍ1m&DŌhjch֔#dmIQc%ڐ4FƱmkTpb-IS dLJ-%%FXl[6Zj2Xɴ٭Fƶ*ٕ"Ʋb5F "h*k-0L&fڊlUE5"֍آmF4"(XKXY)) %)0S1"lF$*-BFT`5hjTX V6h&Y 1[Aj+FHXMQ mэkEQZ,ʠMi)+cZfUX-cRmVdŢTmmFQDbPmfL1FƤƱ3Y*ShJcThXմTj,HI(U5hDAFMj5lY3@XmFɫF6d`@PPefMSY) cFJJ,%M+[e[aXIcl4A3X%0EQZ%LE!MBmŰdB&+뮺E]u]1F1c1c1cM49s9sDA!QaTa61&4Xm hA6ѬT*f,hLlX- ԤXѨ5)LmZT`%aHRXKREXh5IjƌQ5M,* beFSH,ԕ2,jePl,EFXCblҦS)b%IU&42-&ɠRhEh1Z#5E6-IcHbfDm&Ѥ$lm!QDMI a3JCj663Z $j4͢05E%QIb-b&b 5*شZƉVhm&AImZ b)ƣFѤk`+6HƓ QC6$ѵ Z1c1c66666AAAAAAAAAA@P@P@P@P6(((((((TBR)JR)ZDQEQmmUUUUUUUUEZьc1cw9I${{s9s9Φiigӝ7T,?m^IB뮺뮺뮺xHnDEΟr":mh7$,hH-NA1c1yyyycpge\ms>g]fŋ,Xgg}}}Ŷmmm'c׏, ,eYeYeY]u]u]u]u򈈢"/{{hiiֵkZֵk@fffffffjR)JR)JR)JR(B!Bffff@L̙ ٙ""֋Z-k{.p!Spmarkovchain/data/craigsendi.rda0000644000176200001440000000025714503773064016302 0ustar liggesusers r0b```b`afd`b2Y# 'J.JL/NKd``p:0C6f :UJC]!4M)@ Qp%Pu`uy]K(@17 U5ݬ9P#%I9@?,1WNmarkovchain/data/rain.rda0000644000176200001440000000131514503773064015117 0ustar liggesusersBZh91AY&SYpZ'2H s@@/'@@ xnMFFLF ?JM!Fdi*COS#CF"*#C&CFL[w0qZ\[M)ftC8naPqHݦ htv2nƛX ksc8vcl(uڦ23Lu&ĒֲX1\K,9SUqSLX6tvb [M$.֔\eb3Z: ,R+2ޏ_Gwwwz@&'Pldiii&RJ cLFQQF!ƄPZ DjGXBHsc$BW1+<2KE%0Xe]U`$PL)IAfYare\L"\.";pWK\*x.sy } \keyword{package} markovchain/man/expectedRewardsBeforeHittingA.Rd0000644000176200001440000000203013762012756021526 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/probabilistic.R \name{expectedRewardsBeforeHittingA} \alias{expectedRewardsBeforeHittingA} \title{Expected first passage Rewards for a set of states in a markovchain} \usage{ expectedRewardsBeforeHittingA(markovchain, A, state, rewards, n) } \arguments{ \item{markovchain}{the markovchain-class object} \item{A}{set of states for first passage expected reward} \item{state}{initial state} \item{rewards}{vector depicting rewards coressponding to states} \item{n}{no of steps of the process} } \value{ returns a expected reward (numerical value) as described above } \description{ Given a markovchain object and reward values for every state, function calculates expected reward value for a set A of states after n steps. } \details{ The function returns the value of expected first passage rewards given rewards coressponding to every state, an initial state and number of steps. } \author{ Sai Bhargav Yalamanchi, Vandit Jain } markovchain/man/noofVisitsDist.Rd0000644000176200001440000000201113762012756016610 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/fittingFunctions.R \name{noofVisitsDist} \alias{noofVisitsDist} \title{return a joint pdf of the number of visits to the various states of the DTMC} \usage{ noofVisitsDist(markovchain,N,state) } \arguments{ \item{markovchain}{a markovchain-class object} \item{N}{no of steps} \item{state}{the initial state} } \value{ a numeric vector depicting the above described probability density function. } \description{ This function would return a joint pdf of the number of visits to the various states of the DTMC during the first N steps. } \details{ This function would return a joint pdf of the number of visits to the various states of the DTMC during the first N steps. } \examples{ transMatr<-matrix(c(0.4,0.6,.3,.7),nrow=2,byrow=TRUE) simpleMc<-new("markovchain", states=c("a","b"), transitionMatrix=transMatr, name="simpleMc") noofVisitsDist(simpleMc,5,"a") } \author{ Vandit Jain } markovchain/man/zeros.Rd0000644000176200001440000000045314242152072014761 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/matlab_package_functions.R \name{zeros} \alias{zeros} \title{Matrix to create zeros} \usage{ zeros(n) } \arguments{ \item{n}{size of the matrix} } \value{ a square matrix of zeros } \description{ Matrix to create zeros } markovchain/man/steadyStates.Rd0000644000176200001440000000241413762012756016305 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/classesAndMethods.R \name{steadyStates} \alias{steadyStates} \title{Stationary states of a \code{markovchain} object} \usage{ steadyStates(object) } \arguments{ \item{object}{A discrete \code{markovchain} object} } \value{ A matrix corresponding to the stationary states } \description{ This method returns the stationary vector in matricial form of a markovchain object. } \note{ The steady states are identified starting from which eigenvectors correspond to identity eigenvalues and then normalizing them to sum up to unity. When negative values are found in the matrix, the eigenvalues extraction is performed on the recurrent classes submatrix. } \examples{ statesNames <- c("a", "b", "c") markovB <- new("markovchain", states = statesNames, transitionMatrix = matrix(c(0.2, 0.5, 0.3, 0, 1, 0, 0.1, 0.8, 0.1), nrow = 3, byrow = TRUE, dimnames=list(statesNames,statesNames)), name = "A markovchain Object" ) steadyStates(markovB) } \references{ A First Course in Probability (8th Edition), Sheldon Ross, Prentice Hall 2010 } \seealso{ \code{\linkS4class{markovchain}} } \author{ Giorgio Spedicato } markovchain/man/markovchainSequence.Rd0000644000176200001440000000261413762012756017625 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/fittingFunctions.R \name{markovchainSequence} \alias{markovchainSequence} \title{Function to generate a sequence of states from homogeneous Markov chains.} \usage{ markovchainSequence( n, markovchain, t0 = sample(markovchain@states, 1), include.t0 = FALSE, useRCpp = TRUE ) } \arguments{ \item{n}{Sample size} \item{markovchain}{\code{markovchain} object} \item{t0}{The initial state} \item{include.t0}{Specify if the initial state shall be used} \item{useRCpp}{Boolean. Should RCpp fast implementation being used? Default is yes.} } \value{ A Character Vector } \description{ Provided any \code{markovchain} object, it returns a sequence of states coming from the underlying stationary distribution. } \details{ A sequence of size n is sampled. } \examples{ # define the markovchain object statesNames <- c("a", "b", "c") mcB <- new("markovchain", states = statesNames, transitionMatrix = matrix(c(0.2, 0.5, 0.3, 0, 0.2, 0.8, 0.1, 0.8, 0.1), nrow = 3, byrow = TRUE, dimnames = list(statesNames, statesNames))) # show the sequence outs <- markovchainSequence(n = 100, markovchain = mcB, t0 = "a") } \references{ A First Course in Probability (8th Edition), Sheldon Ross, Prentice Hall 2010 } \seealso{ \code{\link{markovchainFit}} } \author{ Giorgio Spedicato } markovchain/man/tm_abs.Rd0000644000176200001440000000131113762012756015070 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/data.R \docType{data} \name{tm_abs} \alias{tm_abs} \title{Single Year Corporate Credit Rating Transititions} \format{ The format is: num [1:8, 1:8] 17 2 0 0 0 0 0 0 1 455 ... - attr(*, "dimnames")=List of 2 ..$ : chr [1:8] "AAA" "AA" "A" "BBB" ... ..$ : chr [1:8] "AAA" "AA" "A" "BBB" ... } \usage{ data(tm_abs) } \description{ Matrix of Standard and Poor's Global Corporate Rating Transition Frequencies 2000 (NR Removed) } \examples{ data(tm_abs) } \references{ European Securities and Markets Authority, 2016 https://cerep.esma.europa.eu/cerep-web/statistics/transitionMatrice.xhtml } \keyword{datasets} markovchain/man/rctmc.Rd0000644000176200001440000000341613762012756014743 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/ctmcProbabilistic.R \name{rctmc} \alias{rctmc} \title{rctmc} \usage{ rctmc(n, ctmc, initDist = numeric(), T = 0, include.T0 = TRUE, out.type = "list") } \arguments{ \item{n}{The number of samples to generate.} \item{ctmc}{The CTMC S4 object.} \item{initDist}{The initial distribution of states.} \item{T}{The time up to which the simulation runs (all transitions after time T are not returned).} \item{include.T0}{Flag to determine if start state is to be included.} \item{out.type}{"list" or "df"} } \value{ Based on out.type, a list or a data frame is returned. The returned list has two elements - a character vector (states) and a numeric vector (indicating time of transitions). The data frame is similarly structured. } \description{ The function generates random CTMC transitions as per the provided generator matrix. } \details{ In order to use the T0 argument, set n to Inf. } \examples{ energyStates <- c("sigma", "sigma_star") byRow <- TRUE gen <- matrix(data = c(-3, 3, 1, -1), nrow = 2, byrow = byRow, dimnames = list(energyStates, energyStates)) molecularCTMC <- new("ctmc", states = energyStates, byrow = byRow, generator = gen, name = "Molecular Transition Model") statesDist <- c(0.8, 0.2) rctmc(n = Inf, ctmc = molecularCTMC, T = 1) rctmc(n = 5, ctmc = molecularCTMC, initDist = statesDist, include.T0 = FALSE) } \references{ Introduction to Stochastic Processes with Applications in the Biosciences (2013), David F. Anderson, University of Wisconsin at Madison } \seealso{ \code{\link{generatorToTransitionMatrix}},\code{\link{ctmc-class}} } \author{ Sai Bhargav Yalamanchi } markovchain/man/absorptionProbabilities.Rd0000644000176200001440000000222113762012756020515 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/probabilistic.R \name{absorptionProbabilities} \alias{absorptionProbabilities} \title{Absorption probabilities} \usage{ absorptionProbabilities(object) } \arguments{ \item{object}{the markovchain object} } \value{ A named vector with the expected number of steps to go from a transient state to any of the recurrent ones } \description{ Computes the absorption probability from each transient state to each recurrent one (i.e. the (i, j) entry or (j, i), in a stochastic matrix by columns, represents the probability that the first not transient state we can go from the transient state i is j (and therefore we are going to be absorbed in the communicating recurrent class of j) } \examples{ m <- matrix(c(1/2, 1/2, 0, 1/2, 1/2, 0, 0, 1/2, 1/2), ncol = 3, byrow = TRUE) mc <- new("markovchain", states = letters[1:3], transitionMatrix = m) absorptionProbabilities(mc) } \references{ C. M. Grinstead and J. L. Snell. Introduction to Probability. American Mathematical Soc., 2012. } \author{ Ignacio Cordón } markovchain/man/hommc-show.Rd0000644000176200001440000000064113762012756015711 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/hommc.R \name{show,hommc-method} \alias{show,hommc-method} \title{Function to display the details of hommc object} \usage{ \S4method{show}{hommc}(object) } \arguments{ \item{object}{An object of class hommc} } \description{ This is a convenience function to display the slots of hommc object in proper format } markovchain/man/inferHyperparam.Rd0000644000176200001440000000440313762012756016764 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/RcppExports.R \name{inferHyperparam} \alias{inferHyperparam} \title{Function to infer the hyperparameters for Bayesian inference from an a priori matrix or a data set} \usage{ inferHyperparam(transMatr = matrix(), scale = numeric(), data = character()) } \arguments{ \item{transMatr}{A valid transition matrix, with dimension names.} \item{scale}{A vector of scaling factors, each element corresponds to the row names of the provided transition matrix transMatr, in the same order.} \item{data}{A data set from which the hyperparameters are inferred.} } \value{ Returns the hyperparameter matrix in a list. } \description{ Since the Bayesian inference approach implemented in the package is based on conjugate priors, hyperparameters must be provided to model the prior probability distribution of the chain parameters. The hyperparameters are inferred from a given a priori matrix under the assumption that the matrix provided corresponds to the mean (expected) values of the chain parameters. A scaling factor vector must be provided too. Alternatively, the hyperparameters can be inferred from a data set. } \details{ transMatr and scale need not be provided if data is provided. } \note{ The hyperparameter matrix returned is such that the row and column names are sorted alphanumerically, and the elements in the matrix are correspondingly permuted. } \examples{ data(rain, package = "markovchain") inferHyperparam(data = rain$rain) weatherStates <- c("sunny", "cloudy", "rain") weatherMatrix <- matrix(data = c(0.7, 0.2, 0.1, 0.3, 0.4, 0.3, 0.2, 0.4, 0.4), byrow = TRUE, nrow = 3, dimnames = list(weatherStates, weatherStates)) inferHyperparam(transMatr = weatherMatrix, scale = c(10, 10, 10)) } \references{ Yalamanchi SB, Spedicato GA (2015). Bayesian Inference of First Order Markov Chains. R package version 0.2.5 } \seealso{ \code{\link{markovchainFit}}, \code{\link{predictiveDistribution}} } \author{ Sai Bhargav Yalamanchi, Giorgio Spedicato } markovchain/man/markovchain-class.Rd0000644000176200001440000002100313762012756017230 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/classesAndMethods.R \docType{class} \name{markovchain-class} \alias{markovchain-class} \alias{*,markovchain,markovchain-method} \alias{*,markovchain,matrix-method} \alias{*,markovchain,numeric-method} \alias{*,matrix,markovchain-method} \alias{*,numeric,markovchain-method} \alias{==,markovchain,markovchain-method} \alias{!=,markovchain,markovchain-method} \alias{absorbingStates,markovchain-method} \alias{transientStates,markovchain-method} \alias{recurrentStates,markovchain-method} \alias{transientClasses,markovchain-method} \alias{recurrentClasses,markovchain-method} \alias{communicatingClasses,markovchain-method} \alias{steadyStates,markovchain-method} \alias{meanNumVisits,markovchain-method} \alias{is.regular,markovchain-method} \alias{is.irreducible,markovchain-method} \alias{is.accessible,markovchain,character,character-method} \alias{is.accessible,markovchain,missing,missing-method} \alias{absorptionProbabilities,markovchain-method} \alias{meanFirstPassageTime,markovchain,character-method} \alias{meanFirstPassageTime,markovchain,missing-method} \alias{meanAbsorptionTime,markovchain-method} \alias{meanRecurrenceTime,markovchain-method} \alias{conditionalDistribution,markovchain-method} \alias{hittingProbabilities,markovchain-method} \alias{canonicForm,markovchain-method} \alias{coerce,data.frame,markovchain-method} \alias{coerce,markovchain,data.frame-method} \alias{coerce,table,markovchain-method} \alias{coerce,markovchain,igraph-method} \alias{coerce,markovchain,matrix-method} \alias{coerce,markovchain,sparseMatrix-method} \alias{coerce,sparseMatrix,markovchain-method} \alias{coerce,matrix,markovchain-method} \alias{coerce,msm,markovchain-method} \alias{coerce,msm.est,markovchain-method} \alias{coerce,etm,markovchain-method} \alias{dim,markovchain-method} \alias{initialize,markovchain-method} \alias{names<-,markovchain-method} \alias{plot,markovchain,missing-method} \alias{predict,markovchain-method} \alias{print,markovchain-method} \alias{show,markovchain-method} \alias{summary,markovchain-method} \alias{sort,markovchain-method} \alias{t,markovchain-method} \alias{[,markovchain,ANY,ANY,ANY-method} \alias{^,markovchain,numeric-method} \title{Markov Chain class} \arguments{ \item{states}{Name of the states. Must be the same of \code{colnames} and \code{rownames} of the transition matrix} \item{byrow}{TRUE or FALSE indicating whether the supplied matrix is either stochastic by rows or by columns} \item{transitionMatrix}{Square transition matrix} \item{name}{Optional character name of the Markov chain} } \description{ The S4 class that describes \code{markovchain} objects. } \note{ \enumerate{ \item \code{markovchain} object are backed by S4 Classes. \item Validation method is used to assess whether either columns or rows totals to one. Rounding is used up to \code{.Machine$double.eps * 100}. If state names are not properly defined for a probability \code{matrix}, coercing to \code{markovhcain} object leads to overriding states name with artificial "s1", "s2", ... sequence. In addition, operator overloading has been applied for \eqn{+,*,^,==,!=} operators. } } \section{Creation of objects}{ Objects can be created by calls of the form \code{new("markovchain", states, byrow, transitionMatrix, ...)}. } \section{Methods}{ \describe{ \item{*}{\code{signature(e1 = "markovchain", e2 = "markovchain")}: multiply two \code{markovchain} objects} \item{*}{\code{signature(e1 = "markovchain", e2 = "matrix")}: markovchain by matrix multiplication} \item{*}{\code{signature(e1 = "markovchain", e2 = "numeric")}: markovchain by numeric vector multiplication } \item{*}{\code{signature(e1 = "matrix", e2 = "markovchain")}: matrix by markov chain} \item{*}{\code{signature(e1 = "numeric", e2 = "markovchain")}: numeric vector by \code{markovchain} multiplication } \item{[}{\code{signature(x = "markovchain", i = "ANY", j = "ANY", drop = "ANY")}: ... } \item{^}{\code{signature(e1 = "markovchain", e2 = "numeric")}: power of a \code{markovchain} object} \item{==}{\code{signature(e1 = "markovchain", e2 = "markovchain")}: equality of two \code{markovchain} object} \item{!=}{\code{signature(e1 = "markovchain", e2 = "markovchain")}: non-equality of two \code{markovchain} object} \item{absorbingStates}{\code{signature(object = "markovchain")}: method to get absorbing states } \item{canonicForm}{\code{signature(object = "markovchain")}: return a \code{markovchain} object into canonic form } \item{coerce}{\code{signature(from = "markovchain", to = "data.frame")}: coerce method from markovchain to \code{data.frame}} \item{conditionalDistribution}{\code{signature(object = "markovchain")}: returns the conditional probability of subsequent states given a state} \item{coerce}{\code{signature(from = "data.frame", to = "markovchain")}: coerce method from \code{data.frame} to \code{markovchain}} \item{coerce}{\code{signature(from = "table", to = "markovchain")}: coerce method from \code{table} to \code{markovchain} } \item{coerce}{\code{signature(from = "msm", to = "markovchain")}: coerce method from \code{msm} to \code{markovchain} } \item{coerce}{\code{signature(from = "msm.est", to = "markovchain")}: coerce method from \code{msm.est} (but only from a Probability Matrix) to \code{markovchain} } \item{coerce}{\code{signature(from = "etm", to = "markovchain")}: coerce method from \code{etm} to \code{markovchain} } \item{coerce}{\code{signature(from = "sparseMatrix", to = "markovchain")}: coerce method from \code{sparseMatrix} to \code{markovchain} } \item{coerce}{\code{signature(from = "markovchain", to = "igraph")}: coercing to \code{igraph} objects } \item{coerce}{\code{signature(from = "markovchain", to = "matrix")}: coercing to \code{matrix} objects } \item{coerce}{\code{signature(from = "markovchain", to = "sparseMatrix")}: coercing to \code{sparseMatrix} objects } \item{coerce}{\code{signature(from = "matrix", to = "markovchain")}: coercing to \code{markovchain} objects from \code{matrix} one } \item{dim}{\code{signature(x = "markovchain")}: method to get the size} \item{names}{\code{signature(x = "markovchain")}: method to get the names of states} \item{names<-}{\code{signature(x = "markovchain", value = "character")}: method to set the names of states} \item{initialize}{\code{signature(.Object = "markovchain")}: initialize method } \item{plot}{\code{signature(x = "markovchain", y = "missing")}: plot method for \code{markovchain} objects } \item{predict}{\code{signature(object = "markovchain")}: predict method } \item{print}{\code{signature(x = "markovchain")}: print method. } \item{show}{\code{signature(object = "markovchain")}: show method. } \item{sort}{\code{signature(x = "markovchain", decreasing=FALSE)}: sorting the transition matrix. } \item{states}{\code{signature(object = "markovchain")}: returns the names of states (as \code{names}. } \item{steadyStates}{\code{signature(object = "markovchain")}: method to get the steady vector. } \item{summary}{\code{signature(object = "markovchain")}: method to summarize structure of the markov chain } \item{transientStates}{\code{signature(object = "markovchain")}: method to get the transient states. } \item{t}{\code{signature(x = "markovchain")}: transpose matrix } \item{transitionProbability}{\code{signature(object = "markovchain")}: transition probability } } } \examples{ #show markovchain definition showClass("markovchain") #create a simple Markov chain transMatr<-matrix(c(0.4,0.6,.3,.7),nrow=2,byrow=TRUE) simpleMc<-new("markovchain", states=c("a","b"), transitionMatrix=transMatr, name="simpleMc") #power simpleMc^4 #some methods steadyStates(simpleMc) absorbingStates(simpleMc) simpleMc[2,1] t(simpleMc) is.irreducible(simpleMc) #conditional distributions conditionalDistribution(simpleMc, "b") #example for predict method sequence<-c("a", "b", "a", "a", "a", "a", "b", "a", "b", "a", "b", "a", "a", "b", "b", "b", "a") mcFit<-markovchainFit(data=sequence) predict(mcFit$estimate, newdata="b",n.ahead=3) #direct conversion myMc<-as(transMatr, "markovchain") #example of summary summary(simpleMc) \dontrun{plot(simpleMc)} } \references{ A First Course in Probability (8th Edition), Sheldon Ross, Prentice Hall 2010 } \seealso{ \code{\link{markovchainSequence}},\code{\link{markovchainFit}} } \author{ Giorgio Spedicato } \keyword{classes} markovchain/man/predictHommc.Rd0000644000176200001440000000162013762012756016244 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/hommc.R \name{predictHommc} \alias{predictHommc} \title{Simulate a higher order multivariate markovchain} \usage{ predictHommc(hommc,t,init) } \arguments{ \item{hommc}{a hommc-class object} \item{t}{no of iterations to predict} \item{init}{matrix of previous states size of which depends on hommc} } \value{ The function returns a matrix of size s X t displaying t predicted states in each row coressponding to every categorical sequence. } \description{ This function provides a prediction of states for a higher order multivariate markovchain object } \details{ The user is required to provide a matrix of giving n previous coressponding every categorical sequence. Dimensions of the init are s X n, where s is number of categorical sequences and n is order of the homc. } \author{ Vandit Jain } markovchain/man/is.accessible.Rd0000644000176200001440000000251613762012756016342 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/probabilistic.R \name{is.accessible} \alias{is.accessible} \title{Verify if a state j is reachable from state i.} \usage{ is.accessible(object, from, to) } \arguments{ \item{object}{A \code{markovchain} object.} \item{from}{The name of state "i" (beginning state).} \item{to}{The name of state "j" (ending state).} } \value{ A boolean value. } \description{ This function verifies if a state is reachable from another, i.e., if there exists a path that leads to state j leaving from state i with positive probability } \details{ It wraps an internal function named \code{reachabilityMatrix}. } \examples{ statesNames <- c("a", "b", "c") markovB <- new("markovchain", states = statesNames, transitionMatrix = matrix(c(0.2, 0.5, 0.3, 0, 1, 0, 0.1, 0.8, 0.1), nrow = 3, byrow = TRUE, dimnames = list(statesNames, statesNames) ) ) is.accessible(markovB, "a", "c") } \references{ James Montgomery, University of Madison } \seealso{ \code{is.irreducible} } \author{ Giorgio Spedicato, Ignacio Cordón } markovchain/man/sales.Rd0000644000176200001440000000131713762012756014740 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/data.R \docType{data} \name{sales} \alias{sales} \title{Sales Demand Sequences} \format{ An object of class \code{matrix} (inherits from \code{array}) with 269 rows and 5 columns. } \usage{ data("sales") } \description{ Sales demand sequences of five products (A, B, C, D, E). Each row corresponds to a sequence. First row corresponds to Sequence A, Second row to Sequence B and so on. } \details{ The example can be used to fit High order multivariate markov chain. } \examples{ data("sales") # fitHighOrderMultivarMC(seqMat = sales, order = 2, Norm = 2) } \keyword{datasets} markovchain/man/meanAbsorptionTime.Rd0000644000176200001440000000174013762012756017431 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/probabilistic.R \name{meanAbsorptionTime} \alias{meanAbsorptionTime} \title{Mean absorption time} \usage{ meanAbsorptionTime(object) } \arguments{ \item{object}{the markovchain object} } \value{ A named vector with the expected number of steps to go from a transient state to any of the recurrent ones } \description{ Computes the expected number of steps to go from any of the transient states to any of the recurrent states. The Markov chain should have at least one transient state for this method to work } \examples{ m <- matrix(c(1/2, 1/2, 0, 1/2, 1/2, 0, 0, 1/2, 1/2), ncol = 3, byrow = TRUE) mc <- new("markovchain", states = letters[1:3], transitionMatrix = m) times <- meanAbsorptionTime(mc) } \references{ C. M. Grinstead and J. L. Snell. Introduction to Probability. American Mathematical Soc., 2012. } \author{ Ignacio Cordón } markovchain/man/markovchainList-class.Rd0000644000176200001440000000516713762012756020101 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/classesAndMethods.R \docType{class} \name{markovchainList-class} \alias{markovchainList-class} \alias{[[,markovchainList-method} \alias{dim,markovchainList-method} \alias{predict,markovchainList-method} \alias{print,markovchainList-method} \alias{show,markovchainList-method} \title{Non homogeneus discrete time Markov Chains class} \arguments{ \item{markovchains}{Object of class \code{"list"}: a list of markovchains} \item{name}{Object of class \code{"character"}: optional name of the class} } \description{ A class to handle non homogeneous discrete Markov chains } \note{ The class consists in a list of \code{markovchain} objects. It is aimed at working with non homogeneous Markov chains. } \section{Objects from the Class}{ A \code{markovchainlist} is a list of \code{markovchain} objects. They can be used to model non homogeneous discrete time Markov Chains, when transition probabilities (and possible states) change by time. } \section{Methods}{ \describe{ \item{[[}{\code{signature(x = "markovchainList")}: extract the i-th \code{markovchain} } \item{dim}{\code{signature(x = "markovchainList")}: number of \code{markovchain} underlying the matrix } \item{predict}{\code{signature(object = "markovchainList")}: predict from a \code{markovchainList} } \item{print}{\code{signature(x = "markovchainList")}: prints the list of markovchains } \item{show}{\code{signature(object = "markovchainList")}: same as \code{print} } } } \examples{ showClass("markovchainList") #define a markovchainList statesNames=c("a","b") mcA<-new("markovchain",name="MCA", transitionMatrix=matrix(c(0.7,0.3,0.1,0.9), byrow=TRUE, nrow=2, dimnames=list(statesNames,statesNames)) ) mcB<-new("markovchain", states=c("a","b","c"), name="MCB", transitionMatrix=matrix(c(0.2,0.5,0.3,0,1,0,0.1,0.8,0.1), nrow=3, byrow=TRUE)) mcC<-new("markovchain", states=c("a","b","c","d"), name="MCC", transitionMatrix=matrix(c(0.25,0.75,0,0,0.4,0.6, 0,0,0,0,0.1,0.9,0,0,0.7,0.3), nrow=4, byrow=TRUE) ) mcList<-new("markovchainList",markovchains=list(mcA, mcB, mcC), name="Non - homogeneous Markov Chain") } \references{ A First Course in Probability (8th Edition), Sheldon Ross, Prentice Hall 2010 } \seealso{ \code{\linkS4class{markovchain}} } \author{ Giorgio Spedicato } \keyword{classes} markovchain/man/markovchainListFit.Rd0000644000176200001440000000240313762012756017427 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/fittingFunctions.R \name{markovchainListFit} \alias{markovchainListFit} \title{markovchainListFit} \usage{ markovchainListFit(data, byrow = TRUE, laplacian = 0, name) } \arguments{ \item{data}{Either a matrix or a data.frame or a list object.} \item{byrow}{Indicates whether distinc stochastic processes trajectiories are shown in distinct rows.} \item{laplacian}{Laplacian correction (default 0).} \item{name}{Optional name.} } \value{ A list containing two slots: estimate (the estimate) name } \description{ Given a data frame or a matrix (rows are observations, by cols the temporal sequence), it fits a non - homogeneous discrete time markov chain process (storing row). In particular a markovchainList of size = ncol - 1 is obtained estimating transitions from the n samples given by consecutive column pairs. } \details{ If \code{data} contains \code{NAs} then the transitions containing \code{NA} will be ignored. } \examples{ # using holson dataset data(holson) # fitting a single markovchain singleMc <- markovchainFit(data = holson[,2:12]) # fitting a markovchainList mclistFit <- markovchainListFit(data = holson[, 2:12], name = "holsonMcList") } markovchain/man/fitHigherOrder.Rd0000644000176200001440000000222014430750470016523 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/fitHigherOrder.R \name{fitHigherOrder} \alias{fitHigherOrder} \alias{seq2freqProb} \alias{seq2matHigh} \title{Functions to fit a higher order Markov chain} \usage{ fitHigherOrder(sequence, order = 2) seq2freqProb(sequence) seq2matHigh(sequence, order) } \arguments{ \item{sequence}{A character list.} \item{order}{Markov chain order} } \value{ A list containing lambda, Q, and X. } \description{ Given a sequence of states arising from a stationary state, it fits the underlying Markov chain distribution with higher order. } \examples{ sequence<-c("a", "a", "b", "b", "a", "c", "b", "a", "b", "c", "a", "b", "c", "a", "b", "c", "a", "b", "a", "b") fitHigherOrder(sequence) } \references{ Ching, W. K., Huang, X., Ng, M. K., & Siu, T. K. (2013). Higher-order markov chains. In Markov Chains (pp. 141-176). Springer US. Ching, W. K., Ng, M. K., & Fung, E. S. (2008). Higher-order multivariate Markov chains and their applications. Linear Algebra and its Applications, 428(2), 492-507. } \author{ Giorgio Spedicato, Tae Seung Kang } markovchain/man/fitHighOrderMultivarMC.Rd0000644000176200001440000000205413762012756020152 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/hommc.R \name{fitHighOrderMultivarMC} \alias{fitHighOrderMultivarMC} \title{Function to fit Higher Order Multivariate Markov chain} \usage{ fitHighOrderMultivarMC(seqMat, order = 2, Norm = 2) } \arguments{ \item{seqMat}{a matrix or a data frame where each column is a categorical sequence} \item{order}{Multivariate Markov chain order. Default is 2.} \item{Norm}{Norm to be used. Default is 2.} } \value{ an hommc object } \description{ Given a matrix of categorical sequences it fits Higher Order Multivariate Markov chain. } \examples{ data <- matrix(c('2', '1', '3', '3', '4', '3', '2', '1', '3', '3', '2', '1', c('2', '4', '4', '4', '4', '2', '3', '3', '1', '4', '3', '3')), ncol = 2, byrow = FALSE) fitHighOrderMultivarMC(data, order = 2, Norm = 2) } \references{ W.-K. Ching et al. / Linear Algebra and its Applications } \author{ Giorgio Spedicato, Deepak Yadav } markovchain/man/meanRecurrenceTime.Rd0000644000176200001440000000215013762012756017402 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/probabilistic.R \name{meanRecurrenceTime} \alias{meanRecurrenceTime} \title{Mean recurrence time} \usage{ meanRecurrenceTime(object) } \arguments{ \item{object}{the markovchain object} } \value{ For a Markov chain it outputs is a named vector with the expected time to first return to a state when the chain starts there. States present in the vector are only the recurrent ones. If the matrix is ergodic (i.e. irreducible), then all states are present in the output and order is the same as states order for the Markov chain } \description{ Computes the expected time to return to a recurrent state in case the Markov chain starts there } \examples{ m <- matrix(1 / 10 * c(6,3,1, 2,3,5, 4,1,5), ncol = 3, byrow = TRUE) mc <- new("markovchain", states = c("s","c","r"), transitionMatrix = m) meanRecurrenceTime(mc) } \references{ C. M. Grinstead and J. L. Snell. Introduction to Probability. American Mathematical Soc., 2012. } \author{ Ignacio Cordón } markovchain/man/transition2Generator.Rd0000644000176200001440000000145213762012756017754 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/ctmcProbabilistic.R \name{transition2Generator} \alias{transition2Generator} \title{Return the generator matrix for a corresponding transition matrix} \usage{ transition2Generator(P, t = 1, method = "logarithm") } \arguments{ \item{P}{transition matrix between time 0 and t} \item{t}{time of observation} \item{method}{"logarithm" returns the Matrix logarithm of the transition matrix} } \value{ A matrix that represent the generator of P } \description{ Calculate the generator matrix for a corresponding transition matrix } \examples{ mymatr <- matrix(c(.4, .6, .1, .9), nrow = 2, byrow = TRUE) Q <- transition2Generator(P = mymatr) expm::expm(Q) } \seealso{ \code{\link{rctmc}} } markovchain/man/markovchainFit.Rd0000644000176200001440000001113413762012756016574 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/RcppExports.R \name{createSequenceMatrix} \alias{createSequenceMatrix} \alias{markovchainFit} \title{Function to fit a discrete Markov chain} \usage{ createSequenceMatrix( stringchar, toRowProbs = FALSE, sanitize = FALSE, possibleStates = character() ) markovchainFit( data, method = "mle", byrow = TRUE, nboot = 10L, laplacian = 0, name = "", parallel = FALSE, confidencelevel = 0.95, confint = TRUE, hyperparam = matrix(), sanitize = FALSE, possibleStates = character() ) } \arguments{ \item{stringchar}{It can be a {n x n} matrix or a character vector or a list} \item{toRowProbs}{converts a sequence matrix into a probability matrix} \item{sanitize}{put 1 in all rows having rowSum equal to zero} \item{possibleStates}{Possible states which are not present in the given sequence} \item{data}{It can be a character vector or a {n x n} matrix or a {n x n} data frame or a list} \item{method}{Method used to estimate the Markov chain. Either "mle", "map", "bootstrap" or "laplace"} \item{byrow}{it tells whether the output Markov chain should show the transition probabilities by row.} \item{nboot}{Number of bootstrap replicates in case "bootstrap" is used.} \item{laplacian}{Laplacian smoothing parameter, default zero. It is only used when "laplace" method is chosen.} \item{name}{Optional character for name slot.} \item{parallel}{Use parallel processing when performing Boostrap estimates.} \item{confidencelevel}{\deqn{\alpha} level for conficence intervals width. Used only when \code{method} equal to "mle".} \item{confint}{a boolean to decide whether to compute Confidence Interval or not.} \item{hyperparam}{Hyperparameter matrix for the a priori distribution. If none is provided, default value of 1 is assigned to each parameter. This must be of size {k x k} where k is the number of states in the chain and the values should typically be non-negative integers.} } \value{ A list containing an estimate, log-likelihood, and, when "bootstrap" method is used, a matrix of standards deviations and the bootstrap samples. When the "mle", "bootstrap" or "map" method is used, the lower and upper confidence bounds are returned along with the standard error. The "map" method also returns the expected value of the parameters with respect to the posterior distribution. } \description{ Given a sequence of states arising from a stationary state, it fits the underlying Markov chain distribution using either MLE (also using a Laplacian smoother), bootstrap or by MAP (Bayesian) inference. } \details{ Disabling confint would lower the computation time on large datasets. If \code{data} or \code{stringchar} contain \code{NAs}, the related \code{NA} containing transitions will be ignored. } \note{ This function has been rewritten in Rcpp. Bootstrap algorithm has been defined "heuristically". In addition, parallel facility is not complete, involving only a part of the bootstrap process. When \code{data} is either a \code{data.frame} or a \code{matrix} object, only MLE fit is currently available. } \examples{ sequence <- c("a", "b", "a", "a", "a", "a", "b", "a", "b", "a", "b", "a", "a", "b", "b", "b", "a") sequenceMatr <- createSequenceMatrix(sequence, sanitize = FALSE) mcFitMLE <- markovchainFit(data = sequence) mcFitBSP <- markovchainFit(data = sequence, method = "bootstrap", nboot = 5, name = "Bootstrap Mc") na.sequence <- c("a", NA, "a", "b") # There will be only a (a,b) transition na.sequenceMatr <- createSequenceMatrix(na.sequence, sanitize = FALSE) mcFitMLE <- markovchainFit(data = na.sequence) # data can be a list of character vectors sequences <- list(x = c("a", "b", "a"), y = c("b", "a", "b", "a", "c")) mcFitMap <- markovchainFit(sequences, method = "map") mcFitMle <- markovchainFit(sequences, method = "mle") } \references{ A First Course in Probability (8th Edition), Sheldon Ross, Prentice Hall 2010 Inferring Markov Chains: Bayesian Estimation, Model Comparison, Entropy Rate, and Out-of-Class Modeling, Christopher C. Strelioff, James P. Crutchfield, Alfred Hubler, Santa Fe Institute Yalamanchi SB, Spedicato GA (2015). Bayesian Inference of First Order Markov Chains. R package version 0.2.5 } \seealso{ \code{\link{markovchainSequence}}, \code{\link{markovchainListFit}} } \author{ Giorgio Spedicato, Tae Seung Kang, Sai Bhargav Yalamanchi } markovchain/man/HigherOrderMarkovChain-class.Rd0000644000176200001440000000047513762012756021265 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/fitHigherOrder.R \docType{class} \name{HigherOrderMarkovChain-class} \alias{HigherOrderMarkovChain-class} \title{Higher order Markov Chains class} \description{ The S4 class that describes \code{HigherOrderMarkovChain} objects. } markovchain/man/is.regular.Rd0000644000176200001440000000155013762012756015703 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/probabilistic.R \name{is.regular} \alias{is.regular} \title{Check if a DTMC is regular} \usage{ is.regular(object) } \arguments{ \item{object}{a markovchain object} } \value{ A boolean value } \description{ Function to check wether a DTCM is regular } \details{ A Markov chain is regular if some of the powers of its matrix has all elements strictly positive } \examples{ P <- matrix(c(0.5, 0.25, 0.25, 0.5, 0, 0.5, 0.25, 0.25, 0.5), nrow = 3) colnames(P) <- rownames(P) <- c("R","N","S") ciao <- as(P, "markovchain") is.regular(ciao) } \references{ Matrix Analysis. Roger A.Horn, Charles R.Johnson. 2nd edition. Corollary 8.5.8, Theorem 8.5.9 } \seealso{ \code{\link{is.irreducible}} } \author{ Ignacio Cordón } markovchain/man/states.Rd0000644000176200001440000000177413762012756015143 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/classesAndMethods.R \name{states} \alias{states} \alias{states,markovchain-method} \title{Defined states of a transition matrix} \usage{ states(object) \S4method{states}{markovchain}(object) } \arguments{ \item{object}{A discrete \code{markovchain} object} } \value{ The character vector corresponding to states slot. } \description{ This method returns the states of a transition matrix. } \examples{ statesNames <- c("a", "b", "c") markovB <- new("markovchain", states = statesNames, transitionMatrix = matrix(c(0.2, 0.5, 0.3, 0, 1, 0, 0.1, 0.8, 0.1), nrow = 3, byrow = TRUE, dimnames=list(statesNames,statesNames)), name = "A markovchain Object" ) states(markovB) names(markovB) } \references{ A First Course in Probability (8th Edition), Sheldon Ross, Prentice Hall 2010 } \seealso{ \code{\linkS4class{markovchain}} } \author{ Giorgio Spedicato } markovchain/man/is.irreducible.Rd0000644000176200001440000000177513762012756016544 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/probabilistic.R \name{is.irreducible} \alias{is.irreducible} \title{Function to check if a Markov chain is irreducible (i.e. ergodic)} \usage{ is.irreducible(object) } \arguments{ \item{object}{A \code{markovchain} object} } \value{ A boolean values. } \description{ This function verifies whether a \code{markovchain} object transition matrix is composed by only one communicating class. } \details{ It is based on \code{.communicatingClasses} internal function. } \examples{ statesNames <- c("a", "b") mcA <- new("markovchain", transitionMatrix = matrix(c(0.7,0.3,0.1,0.9), byrow = TRUE, nrow = 2, dimnames = list(statesNames, statesNames) )) is.irreducible(mcA) } \references{ Feres, Matlab listings for Markov Chains. } \seealso{ \code{\link{summary}} } \author{ Giorgio Spedicato } markovchain/man/predictiveDistribution.Rd0000644000176200001440000000406713762012756020374 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/RcppExports.R \name{predictiveDistribution} \alias{predictiveDistribution} \title{predictiveDistribution} \usage{ predictiveDistribution(stringchar, newData, hyperparam = matrix()) } \arguments{ \item{stringchar}{This is the data using which the Bayesian inference is performed.} \item{newData}{This is the data whose predictive probability is computed.} \item{hyperparam}{This determines the shape of the prior distribution of the parameters. If none is provided, default value of 1 is assigned to each parameter. This must be of size kxk where k is the number of states in the chain and the values should typically be non-negative integers.} } \value{ The log of the probability is returned. } \description{ The function computes the probability of observing a new data set, given a data set } \details{ The underlying method is Bayesian inference. The probability is computed by averaging the likelihood of the new data with respect to the posterior. Since the method assumes conjugate priors, the result can be represented in a closed form (see the vignette for more details), which is what is returned. } \examples{ sequence<- c("a", "b", "a", "a", "a", "a", "b", "a", "b", "a", "b", "a", "a", "b", "b", "b", "a") hyperMatrix<-matrix(c(1, 2, 1, 4), nrow = 2,dimnames=list(c("a","b"),c("a","b"))) predProb <- predictiveDistribution(sequence[1:10], sequence[11:17], hyperparam =hyperMatrix ) hyperMatrix2<-hyperMatrix[c(2,1),c(2,1)] predProb2 <- predictiveDistribution(sequence[1:10], sequence[11:17], hyperparam =hyperMatrix2 ) predProb2==predProb } \references{ Inferring Markov Chains: Bayesian Estimation, Model Comparison, Entropy Rate, and Out-of-Class Modeling, Christopher C. Strelioff, James P. Crutchfield, Alfred Hubler, Santa Fe Institute Yalamanchi SB, Spedicato GA (2015). Bayesian Inference of First Order Markov Chains. R package version 0.2.5 } \seealso{ \code{\link{markovchainFit}} } \author{ Sai Bhargav Yalamanchi } markovchain/man/statisticalTests.Rd0000644000176200001440000000564713762012756017212 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/statisticalTests.R \name{verifyMarkovProperty} \alias{verifyMarkovProperty} \alias{assessOrder} \alias{assessStationarity} \alias{verifyEmpiricalToTheoretical} \alias{verifyHomogeneity} \title{Various functions to perform statistical inference of DTMC} \usage{ verifyMarkovProperty(sequence, verbose = TRUE) assessOrder(sequence, verbose = TRUE) assessStationarity(sequence, nblocks, verbose = TRUE) verifyEmpiricalToTheoretical(data, object, verbose = TRUE) verifyHomogeneity(inputList, verbose = TRUE) } \arguments{ \item{sequence}{An empirical sequence.} \item{verbose}{Should test results be printed out?} \item{nblocks}{Number of blocks.} \item{data}{matrix, character or list to be converted in a raw transition matrix} \item{object}{a markovchain object} \item{inputList}{A list of items that can coerced to transition matrices} } \value{ Verification result a list with following slots: statistic (the chi - square statistic), dof (degrees of freedom), and corresponding p-value. In case a cell in the empirical transition matrix is >0 that is 0 in the theoretical transition matrix the null hypothesis is rejected. In that case a p-value of 0 and statistic and dof of NA are returned. a list of transition matrices? } \description{ These functions verify the Markov property, assess the order and stationarity of the Markov chain. This function tests whether an empirical transition matrix is statistically compatible with a theoretical one. It is a chi-square based test. In case a cell in the empirical transition matrix is >0 that is 0 in the theoretical transition matrix the null hypothesis is rejected. Verifies that the s elements in the input list belongs to the same DTMC } \examples{ sequence <- c("a", "b", "a", "a", "a", "a", "b", "a", "b", "a", "b", "a", "a", "b", "b", "b", "a") mcFit <- markovchainFit(data = sequence, byrow = FALSE) verifyMarkovProperty(sequence) assessOrder(sequence) assessStationarity(sequence, 1) #Example taken from Kullback Kupperman Tests for Contingency Tables and Markov Chains sequence<-c(0,1,2,2,1,0,0,0,0,0,0,1,2,2,2,1,0,0,1,0,0,0,0,0,0,1,1, 2,0,0,2,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,0,0,0,0,2,1,0, 0,2,1,0,0,0,0,0,0,1,1,1,2,2,0,0,2,1,1,1,1,2,1,1,1,1,1,1,1,1,1,0,2, 0,1,1,0,0,0,1,2,2,0,0,0,0,0,0,2,2,2,1,1,1,1,0,1,1,1,1,0,0,2,1,1, 0,0,0,0,0,2,2,1,1,1,1,1,2,1,2,0,0,0,1,2,2,2,0,0,0,1,1) mc=matrix(c(5/8,1/4,1/8,1/4,1/2,1/4,1/4,3/8,3/8),byrow=TRUE, nrow=3) rownames(mc)<-colnames(mc)<-0:2; theoreticalMc<-as(mc, "markovchain") verifyEmpiricalToTheoretical(data=sequence,object=theoreticalMc) data(kullback) verifyHomogeneity(inputList=kullback,verbose=TRUE) } \references{ Anderson and Goodman. } \seealso{ \code{markovchain} } \author{ Tae Seung Kang, Giorgio Alfredo Spedicato } \concept{statisticalTests} markovchain/man/meanNumVisits.Rd0000644000176200001440000000170114243234660016423 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/probabilistic.R \name{meanNumVisits} \alias{meanNumVisits} \title{Mean num of visits for markovchain, starting at each state} \usage{ meanNumVisits(object) } \arguments{ \item{object}{the markovchain-class object} } \value{ a matrix with the expect number of visits to each state } \description{ Given a markovchain object, this function calculates a matrix where the element (i, j) represents the expect number of visits to the state j if the chain starts at i (in a Markov chain by columns it would be the element (j, i) instead) } \examples{ M <- markovchain:::zeros(5) M[1,1] <- M[5,5] <- 1 M[2,1] <- M[2,3] <- 1/2 M[3,2] <- M[3,4] <- 1/2 M[4,2] <- M[4,5] <- 1/2 mc <- new("markovchain", transitionMatrix = M) meanNumVisits(mc) } \references{ R. Vélez, T. Prieto, Procesos Estocásticos, Librería UNED, 2013 } \author{ Ignacio Cordón } markovchain/man/rmarkovchain.Rd0000644000176200001440000000547313762012756016324 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/fittingFunctions.R \name{rmarkovchain} \alias{rmarkovchain} \title{Function to generate a sequence of states from homogeneous or non-homogeneous Markov chains.} \usage{ rmarkovchain( n, object, what = "data.frame", useRCpp = TRUE, parallel = FALSE, num.cores = NULL, ... ) } \arguments{ \item{n}{Sample size} \item{object}{Either a \code{markovchain} or a \code{markovchainList} object} \item{what}{It specifies whether either a \code{data.frame} or a \code{matrix} (each rows represent a simulation) or a \code{list} is returned.} \item{useRCpp}{Boolean. Should RCpp fast implementation being used? Default is yes.} \item{parallel}{Boolean. Should parallel implementation being used? Default is yes.} \item{num.cores}{Number of Cores to be used} \item{...}{additional parameters passed to the internal sampler} } \value{ Character Vector, data.frame, list or matrix } \description{ Provided any \code{markovchain} or \code{markovchainList} objects, it returns a sequence of states coming from the underlying stationary distribution. } \details{ When a homogeneous process is assumed (\code{markovchain} object) a sequence is sampled of size n. When a non - homogeneous process is assumed, n samples are taken but the process is assumed to last from the begin to the end of the non-homogeneous markov process. } \note{ Check the type of input } \examples{ # define the markovchain object statesNames <- c("a", "b", "c") mcB <- new("markovchain", states = statesNames, transitionMatrix = matrix(c(0.2, 0.5, 0.3, 0, 0.2, 0.8, 0.1, 0.8, 0.1), nrow = 3, byrow = TRUE, dimnames = list(statesNames, statesNames))) # show the sequence outs <- rmarkovchain(n = 100, object = mcB, what = "list") #define markovchainList object statesNames <- c("a", "b", "c") mcA <- new("markovchain", states = statesNames, transitionMatrix = matrix(c(0.2, 0.5, 0.3, 0, 0.2, 0.8, 0.1, 0.8, 0.1), nrow = 3, byrow = TRUE, dimnames = list(statesNames, statesNames))) mcB <- new("markovchain", states = statesNames, transitionMatrix = matrix(c(0.2, 0.5, 0.3, 0, 0.2, 0.8, 0.1, 0.8, 0.1), nrow = 3, byrow = TRUE, dimnames = list(statesNames, statesNames))) mcC <- new("markovchain", states = statesNames, transitionMatrix = matrix(c(0.2, 0.5, 0.3, 0, 0.2, 0.8, 0.1, 0.8, 0.1), nrow = 3, byrow = TRUE, dimnames = list(statesNames, statesNames))) mclist <- new("markovchainList", markovchains = list(mcA, mcB, mcC)) # show the list of sequence rmarkovchain(100, mclist, "list") } \references{ A First Course in Probability (8th Edition), Sheldon Ross, Prentice Hall 2010 } \seealso{ \code{\link{markovchainFit}}, \code{\link{markovchainSequence}} } \author{ Giorgio Spedicato } markovchain/man/ctmcFit.Rd0000644000176200001440000000257513762012756015231 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/RcppExports.R \name{ctmcFit} \alias{ctmcFit} \title{Function to fit a CTMC} \usage{ ctmcFit(data, byrow = TRUE, name = "", confidencelevel = 0.95) } \arguments{ \item{data}{It is a list of two elements. The first element is a character vector denoting the states. The second is a numeric vector denoting the corresponding transition times.} \item{byrow}{Determines if the output transition probabilities of the underlying embedded DTMC are by row.} \item{name}{Optional name for the CTMC.} \item{confidencelevel}{Confidence level for the confidence interval construnction.} } \value{ It returns a list containing the CTMC object and the confidence intervals. } \description{ This function fits the underlying CTMC give the state transition data and the transition times using the maximum likelihood method (MLE) } \details{ Note that in data, there must exist an element wise corresponding between the two elements of the list and that data[[2]][1] is always 0. } \examples{ data <- list(c("a", "b", "c", "a", "b", "a", "c", "b", "c"), c(0, 0.8, 2.1, 2.4, 4, 5, 5.9, 8.2, 9)) ctmcFit(data) } \references{ Continuous Time Markov Chains (vignette), Sai Bhargav Yalamanchi, Giorgio Alfredo Spedicato 2015 } \seealso{ \code{\link{rctmc}} } \author{ Sai Bhargav Yalamanchi } markovchain/man/hittingProbabilities.Rd0000644000176200001440000000145314242455302020001 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/probabilistic.R \name{hittingProbabilities} \alias{hittingProbabilities} \title{Hitting probabilities for markovchain} \usage{ hittingProbabilities(object) } \arguments{ \item{object}{the markovchain-class object} } \value{ a matrix of hitting probabilities } \description{ Given a markovchain object, this function calculates the probability of ever arriving from state i to j } \examples{ M <- markovchain:::zeros(5) M[1,1] <- M[5,5] <- 1 M[2,1] <- M[2,3] <- 1/2 M[3,2] <- M[3,4] <- 1/2 M[4,2] <- M[4,5] <- 1/2 mc <- new("markovchain", transitionMatrix = M) hittingProbabilities(mc) } \references{ R. Vélez, T. Prieto, Procesos Estocásticos, Librería UNED, 2013 } \author{ Ignacio Cordón } markovchain/man/blanden.Rd0000644000176200001440000000150713762012756015235 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/data.R \docType{data} \name{blanden} \alias{blanden} \title{Mobility between income quartiles} \format{ An object of class \code{table} with 4 rows and 4 columns. } \source{ Personal reworking } \usage{ data(blanden) } \description{ This table show mobility between income quartiles for father and sons for the 1970 cohort born } \details{ The rows represent fathers' income quartile when the son is aged 16, whilst the columns represent sons' income quartiles when he is aged 30 (in 2000). } \examples{ data(blanden) mobilityMc<-as(blanden, "markovchain") } \references{ Jo Blanden, Paul Gregg and Stephen Machin, Intergenerational Mobility in Europe and North America, Center for Economic Performances (2005) } \keyword{datasets} markovchain/man/conditionalDistribution.Rd0000644000176200001440000000213013762012756020526 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/classesAndMethods.R \name{conditionalDistribution} \alias{conditionalDistribution} \title{\code{conditionalDistribution} of a Markov Chain} \usage{ conditionalDistribution(object, state) } \arguments{ \item{object}{A \code{markovchain} object.} \item{state}{Subsequent state.} } \value{ A named probability vector } \description{ It extracts the conditional distribution of the subsequent state, given current state. } \examples{ # define a markov chain statesNames <- c("a", "b", "c") markovB <- new("markovchain", states = statesNames, transitionMatrix = matrix(c(0.2, 0.5, 0.3, 0, 1, 0, 0.1, 0.8, 0.1),nrow = 3, byrow = TRUE, dimnames = list(statesNames, statesNames))) conditionalDistribution(markovB, "b") } \references{ A First Course in Probability (8th Edition), Sheldon Ross, Prentice Hall 2010 } \seealso{ \code{\linkS4class{markovchain}} } \author{ Giorgio Spedicato, Deepak Yadav } markovchain/man/is.CTMCirreducible.Rd0000644000176200001440000000165613762012756017211 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/ctmcProbabilistic.R \name{is.CTMCirreducible} \alias{is.CTMCirreducible} \title{Check if CTMC is irreducible} \usage{ is.CTMCirreducible(ctmc) } \arguments{ \item{ctmc}{a ctmc-class object} } \value{ a boolean value as described above. } \description{ This function verifies whether a CTMC object is irreducible } \examples{ energyStates <- c("sigma", "sigma_star") byRow <- TRUE gen <- matrix(data = c(-3, 3, 1, -1), nrow = 2, byrow = byRow, dimnames = list(energyStates, energyStates)) molecularCTMC <- new("ctmc", states = energyStates, byrow = byRow, generator = gen, name = "Molecular Transition Model") is.CTMCirreducible(molecularCTMC) } \references{ Continuous-Time Markov Chains, Karl Sigman, Columbia University } \author{ Vandit Jain } markovchain/man/generatorToTransitionMatrix.Rd0000644000176200001440000000201413762012756021355 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/RcppExports.R \name{generatorToTransitionMatrix} \alias{generatorToTransitionMatrix} \title{Function to obtain the transition matrix from the generator} \usage{ generatorToTransitionMatrix(gen, byrow = TRUE) } \arguments{ \item{gen}{The generator matrix} \item{byrow}{Flag to determine if rows (columns) sum to 0} } \value{ Returns the transition matrix. } \description{ The transition matrix of the embedded DTMC is inferred from the CTMC's generator } \examples{ energyStates <- c("sigma", "sigma_star") byRow <- TRUE gen <- matrix(data = c(-3, 3, 1, -1), nrow = 2, byrow = byRow, dimnames = list(energyStates, energyStates)) generatorToTransitionMatrix(gen) } \references{ Introduction to Stochastic Processes with Applications in the Biosciences (2013), David F. Anderson, University of Wisconsin at Madison } \seealso{ \code{\link{rctmc}},\code{\link{ctmc-class}} } \author{ Sai Bhargav Yalamanchi } markovchain/man/kullback.Rd0000644000176200001440000000064613762012756015425 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/data.R \docType{data} \name{kullback} \alias{kullback} \title{Example from Kullback and Kupperman Tests for Contingency Tables} \format{ A list containing two 6x6 non - negative integer matrices } \usage{ data(kullback) } \description{ A list of two matrices representing raw transitions between two states } \keyword{datasets} markovchain/man/priorDistribution.Rd0000644000176200001440000000372013762012756017364 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/RcppExports.R \name{priorDistribution} \alias{priorDistribution} \title{priorDistribution} \usage{ priorDistribution(transMatr, hyperparam = matrix()) } \arguments{ \item{transMatr}{The transition matrix whose probability is the parameter of interest.} \item{hyperparam}{The hyperparam matrix (optional). If not provided, a default value of 1 is assumed for each and therefore the resulting probability distribution is uniform.} } \value{ The log of the probabilities for each state is returned in a numeric vector. Each number in the vector represents the probability (log) of having a probability transition vector as specified in corresponding the row of the transition matrix. } \description{ Function to evaluate the prior probability of a transition matrix. It is based on conjugate priors and therefore a Dirichlet distribution is used to model the transitions of each state. } \details{ The states (dimnames) of the transition matrix and the hyperparam may be in any order. } \note{ This function can be used in conjunction with inferHyperparam. For example, if the user has a prior data set and a prior transition matrix, he can infer the hyperparameters using inferHyperparam and then compute the probability of their prior matrix using the inferred hyperparameters with priorDistribution. } \examples{ priorDistribution(matrix(c(0.5, 0.5, 0.5, 0.5), nrow = 2, dimnames = list(c("a", "b"), c("a", "b"))), matrix(c(2, 2, 2, 2), nrow = 2, dimnames = list(c("a", "b"), c("a", "b")))) } \references{ Yalamanchi SB, Spedicato GA (2015). Bayesian Inference of First Order Markov Chains. R package version 0.2.5 } \seealso{ \code{\link{predictiveDistribution}}, \code{\link{inferHyperparam}} } \author{ Sai Bhargav Yalamanchi, Giorgio Spedicato } markovchain/man/committorAB.Rd0000644000176200001440000000220313762012756016044 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/probabilistic.R \name{committorAB} \alias{committorAB} \title{Calculates committor of a markovchain object with respect to set A, B} \usage{ committorAB(object,A,B,p) } \arguments{ \item{object}{a markovchain class object} \item{A}{a set of states} \item{B}{a set of states} \item{p}{initial state (default value : 1)} } \value{ Return a vector of probabilities in case initial state is not provided else returns a number } \description{ Returns the probability of hitting states rom set A before set B with different initial states } \details{ The function solves a system of linear equations to calculate probaility that the process hits a state from set A before any state from set B } \examples{ transMatr <- matrix(c(0,0,0,1,0.5, 0.5,0,0,0,0, 0.5,0,0,0,0, 0,0.2,0.4,0,0, 0,0.8,0.6,0,0.5), nrow = 5) object <- new("markovchain", states=c("a","b","c","d","e"),transitionMatrix=transMatr) committorAB(object,c(5),c(3)) } markovchain/man/getName.Rd0000644000176200001440000000142413762012756015210 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/classesAndMethods.R \name{name} \alias{name} \alias{name,markovchain-method} \title{Method to retrieve name of markovchain object} \usage{ name(object) \S4method{name}{markovchain}(object) } \arguments{ \item{object}{A markovchain object} } \description{ This method returns the name of a markovchain object } \examples{ statesNames <- c("a", "b", "c") markovB <- new("markovchain", states = statesNames, transitionMatrix = matrix(c(0.2, 0.5, 0.3, 0, 1, 0, 0.1, 0.8, 0.1), nrow = 3, byrow = TRUE, dimnames=list(statesNames,statesNames)), name = "A markovchain Object" ) name(markovB) } \author{ Giorgio Spedicato, Deepak Yadav } markovchain/DESCRIPTION0000644000176200001440000000563014503777502014300 0ustar liggesusersPackage: markovchain Type: Package Title: Easy Handling Discrete Time Markov Chains Version: 0.9.5 Authors@R: c( person("Giorgio Alfredo", "Spedicato", role = c("aut", "cre"), email = "spedicato_giorgio@yahoo.it",comment = c(ORCID = "0000-0002-0315-8888")), person("Tae", "Seung Kang", role = "aut"), person("Sai", "Bhargav Yalamanchi", role = "aut"), person("Mildenberger", "Thoralf", role = "ctb", comment = c(ORCID = "0000-0001-7242-1873")), person("Deepak", "Yadav", role = "aut"), person("Ignacio", "Cordón", role = "aut", comment = c(ORCID = "0000-0002-3152-0231")), person("Vandit", "Jain", role = "ctb"), person("Toni", "Giorgino", role="ctb", comment = c(ORCID = "0000-0001-6449-0596")), person("Richèl J.C.", "Bilderbeek", role = "ctb", comment = c(ORCID = "0000-0003-1107-7049")), person("Daniel", "Ebbert", email = "daniel@ebbert.nrw", role = "ctb", comment = c(ORCID = "0000-0003-3666-7205")), person("Shreyash", "Maheshwari", email = "coolshreysh1000@gmail.com", role = "ctb") ) Maintainer: Giorgio Alfredo Spedicato Description: Functions and S4 methods to create and manage discrete time Markov chains more easily. In addition functions to perform statistical (fitting and drawing random variates) and probabilistic (analysis of their structural proprieties) analysis are provided. See Spedicato (2017) . Some functions for continuous times Markov chains depends on the suggested ctmcd package, that, as May 2023, can be retrieved from . License: MIT + file LICENSE Depends: R (>= 4.2.0), methods Imports: igraph, Matrix (>= 1.5-0), expm, stats4, parallel, Rcpp (>= 1.0.2), RcppParallel, utils, stats, grDevices Suggests: knitr, testthat, diagram, DiagrammeR, msm, Rsolnp, rmarkdown, ctmcd, bookdown, rticles Enhances: etm VignetteBuilder: rmarkdown, knitr, bookdown, rticles LinkingTo: Rcpp, RcppParallel, RcppArmadillo (>= 0.9.600.4.0) SystemRequirements: GNU make LazyLoad: yes ByteCompile: yes Encoding: UTF-8 BugReports: https://github.com/spedygiorgio/markovchain/issues URL: https://github.com/spedygiorgio/markovchain/ RoxygenNote: 7.2.3 NeedsCompilation: yes Packaged: 2023-09-24 08:41:22 UTC; giorgio Author: Giorgio Alfredo Spedicato [aut, cre] (), Tae Seung Kang [aut], Sai Bhargav Yalamanchi [aut], Mildenberger Thoralf [ctb] (), Deepak Yadav [aut], Ignacio Cordón [aut] (), Vandit Jain [ctb], Toni Giorgino [ctb] (), Richèl J.C. Bilderbeek [ctb] (), Daniel Ebbert [ctb] (), Shreyash Maheshwari [ctb] Repository: CRAN Date/Publication: 2023-09-24 09:20:02 UTC markovchain/build/0000755000176200001440000000000014503773052013661 5ustar liggesusersmarkovchain/build/vignette.rds0000644000176200001440000000051314503773054016221 0ustar liggesusersRAK0Nn D$PԋᘂQBh6+i?5mƨ/yy{:B:;a*AJ"d-B$91d4ӈO)3Q!1Q5I,+،"N6M -zl  J8~^)WB| J X-{t_37:iA. ~]Tuno]&n-/ucܚ90L$v]Q|'tfU3̮l/|h`[^PǦ UdGJmarkovchain/tests/0000755000176200001440000000000013762012756013727 5ustar liggesusersmarkovchain/tests/testthat/0000755000176200001440000000000014503777502015570 5ustar liggesusersmarkovchain/tests/testthat/testctmc.R0000644000176200001440000001065514425420344017540 0ustar liggesuserslibrary(markovchain) context("Checking that ExpectedTime function works as expected; it depends on ctmcd") # Example from the book Markovchains, J. R. Norris, Cambridge University Press states <- c("a","b","c","d") byRow <- TRUE gen <- matrix(data = c(-1, 1/2, 1/2, 0, 1/4, -1/2, 0, 1/4, 1/6, 0, -1/3, 1/6, 0, 0, 0, 0), nrow = 4,byrow = byRow, dimnames = list(states,states)) ctmc <- new("ctmc",states = states, byrow = byRow, generator = gen, name = "testctmc") test_that("Check Expected hitting time from one state to another",{ # Skip the test if the ctmcd package is not available if (!requireNamespace("ctmcd", quietly = TRUE)) { skip("The ctmcd package is not available") } expect_equal(ExpectedTime(ctmc,1,4),7) expect_equal(ExpectedTime(ctmc,2,4),5.5) }) context("Checking that probabilityatT function works as expected") # TESTS for probabilityatT function # Example taken from the book INTRODUCTION TO STOCHASTIC PROCESSES WITH R, ROBERT P. DOBROW, Wiley states <- c("a","b","c","d","e") # taken exactly from book ansMatrix <- matrix(data = c(0.610, 0.290, 0.081, 0.016, 0.003, 0.232, 0.443, 0.238, 0.071, 0.017, 0.052, 0.190, 0.435, 0.238, 0.085, 0.008, 0.045, 0.191, 0.446, 0.310, 0.001, 0.008, 0.054, 0.248, 0.688),nrow = 5,byrow = T,dimnames = list(states,states)) byRow <- TRUE gen <- matrix(c(-1/4,1/4,0,0,0,1/5,-9/20,1/4,0,0,0,1/5,-9/20,1/4,0,0,0,1/5,-9/20,1/4,0,0,0,1/5,-1/5), nrow=5,byrow=byRow, dimnames = list(states,states)) ctmc <- new("ctmc",states = states, byrow = byRow, generator = gen, name = "testctmc") test_that("Check probabilityatT using a ctmc object:",{ if (!requireNamespace("ctmcd", quietly = TRUE)) { skip("The ctmcd package is not available") } expect_equal(round(probabilityatT(ctmc,2.5),3),ansMatrix) }) ### Adds tests for impreciseprobabilityatT function context("Checking that impreciseprobabilityatT function works as expected:") states <- c("n","y") Q <- matrix(c(-1,1,1,-1),nrow = 2,byrow = T,dimnames = list(states,states)) range <- matrix(c(1/52,3/52,1/2,2),nrow = 2,byrow = 2) name <- "testictmc" ictmc <- new("ictmc",states = states,Q = Q,range = range,name = name) test_that("Check impreciseProbabilityatT function using an ictmc object:",{ if (!requireNamespace("ctmcd", quietly = TRUE)) { skip("The ctmcd package is not available") } expect_equal(round(impreciseProbabilityatT(ictmc,2,0,1,error = 10^-3),4),c(0.0083,0.1410)) }) ### Adds tests for freq2Generator function sample <- matrix(c(150,2,1,1,1,200,2,1,2,1,175,1,1,1,1,150),nrow = 4,byrow = TRUE) sample_rel = rbind((sample/rowSums(sample))[1:dim(sample)[1]-1,],c(rep(0,dim(sample)[1]-1),1)) answer <- matrix(c( -0.024, 0.015, 0.009, 0, 0.007, -0.018, 0.012, 0, 0.013 , 0.007, -0.021, 0, 0.000, 0.000, 0.000, 0 ),nrow = 4,byrow = TRUE) test_that("Check if ",{ if (!requireNamespace("ctmcd", quietly = TRUE)) { skip("The ctmcd package is not available") } expect_equal(round(freq2Generator(sample_rel,1),3),answer) }) ### tests for is.CTMCirreducible fcuntion energyStates <- c("sigma", "sigma_star") byRow <- TRUE gen <- matrix(data = c(-3, 3, 1, -1), nrow = 2, byrow = byRow, dimnames = list(energyStates, energyStates)) molecularCTMC <- new("ctmc", states = energyStates, byrow = byRow, generator = gen, name = "Molecular Transition Model") test_that("is.CTMCirreducible works", { if (!requireNamespace("ctmcd", quietly = TRUE)) { skip("The ctmcd package is not available") } expect_equal(is.CTMCirreducible(molecularCTMC),TRUE) }) ### tests for is.TimeReversible function energyStates <- c("sigma", "sigma_star") byRow <- TRUE gen <- matrix(data = c(-3, 3, 1, -1), nrow = 2, byrow = byRow, dimnames = list(energyStates, energyStates)) molecularCTMC <- new("ctmc", states = energyStates, byrow = byRow, generator = gen, name = "Molecular Transition Model") test_that("is.TimeReversible works", { if (!requireNamespace("ctmcd", quietly = TRUE)) { skip("The ctmcd package is not available") } expect_equal(is.TimeReversible(molecularCTMC),TRUE) }) markovchain/tests/testthat/testFits.R0000644000176200001440000000165213144267070017516 0ustar liggesuserscontext("Checking that fitting works") #load & prepare data data(rain) data(holson) myHolson<-as.matrix(holson[,-1]); rownames(myHolson)<-holson$id test_that("Check createSequenceMatrix", { expect_equal(createSequenceMatrix(rain$rain), checksAlofiRawTransitions) }) #data preparation ciao<-c("a","a","b","b","a",NA,"b","a","b","a","a") test_that("Check markovchainFit & listFit", { expect_equal(markovchainFit(ciao), simpleMcCiaoFit) expect_equal(markovchainListFit(data=myHolson), checkmarkovchainFitList) }) #### tests for noofVisitsDist function transMatr<-matrix(c(0.4,0.6,.3,.7),nrow=2,byrow=TRUE) simpleMc<-new("markovchain", states=c("a","b"), transitionMatrix=transMatr, name="simpleMc") answer <- c(0.348148, 0.651852) names(answer) <- c("a","b") test_that("Check noofVisitsDist works", { expect_equal(noofVisitsDist(simpleMc,5,"a"),answer) }) markovchain/tests/testthat/testBasic1.R0000644000176200001440000004350414243237154017716 0ustar liggesusers#library(markovchain) #create basic markov chains markov1<-new("markovchain", states=c("a","b","c"), transitionMatrix= matrix(c(0.2,0.5,0.3, 0,1,0, 0.1,0.8,0.1),nrow=3, byrow=TRUE, dimnames=list(c("a","b","c"), c("a","b","c")) )) mathematicaMatr <- markovchain:::zeros(5) mathematicaMatr[1,] <- c(0, 1/3, 0, 2/3, 0) mathematicaMatr[2,] <- c(1/2, 0, 0, 0, 1/2) mathematicaMatr[3,] <- c(0, 0, 1/2, 1/2, 0) mathematicaMatr[4,] <- c(0, 0, 1/2, 1/2, 0) mathematicaMatr[5,] <- c(0, 0, 0, 0, 1) statesNames <- letters[1:5] mathematicaMc <- new("markovchain", transitionMatrix = mathematicaMatr, name = "Mathematica MC", states = statesNames) ####end creating DTMC context("Basic DTMC proprieties") test_that("States are those that should be", { expect_equal(absorbingStates(markov1), "b") expect_equal(transientStates(markov1), c("a","c")) expect_equal(is.irreducible(mathematicaMc),FALSE) expect_equal(transientStates(mathematicaMc), c("a","b")) expect_equal(is.accessible(mathematicaMc, "a", "c"),TRUE) expect_equal(canonicForm(mathematicaMc)@transitionMatrix, markovchain:::.canonicFormRcpp(mathematicaMc)@transitionMatrix) expect_equal(recurrentClasses(mathematicaMc), list(c("c", "d"), c("e"))) expect_equal(summary(mathematicaMc), list(closedClasses = list(c("c", "d"), c("e")), recurrentClasses = list(c("c", "d"), c("e")), transientClasses = list(c("a", "b")))) }) ###testing proper conversion of objects context("Conversion of objects") provaMatr2Mc<-as(mathematicaMatr,"markovchain") test_that("Conversion of objects", { expect_equal(class(provaMatr2Mc)=="markovchain",TRUE) }) ### Markovchain Fitting sequence1 <- c("a", "b", "a", "a", NA, "a", "a", NA) sequence2 <- c(NA, "a", "b", NA, "a", "a", "a", NA, "a", "b", "a", "b", "a", "b", "a", "a", "b", "b", "b", "a", NA) mcFit <- markovchainFit(data = sequence1, byrow = FALSE, sanitize = TRUE) mcFit2 <- markovchainFit(c("a","b","a","b"), sanitize = TRUE) test_that("Fit should satisfy", { expect_equal((mcFit["logLikelihood"])[[1]], log(1/3) + 2*log(2/3)) expect_equal(markovchainFit(data = sequence2, method = "bootstrap")["confidenceInterval"] [[1]]["confidenceLevel"][[1]], 0.95) expect_equal(mcFit2$upperEndpointMatrix, matrix(c(0,1,1,0), nrow = 2, byrow = TRUE, dimnames = list(c("a", "b"), c("a", "b")))) }) ### Markovchain Fitting for bigger markov chain bigseq <- rep(c("a", "b", "c"), 500000) bigmcFit <- markovchainFit(bigseq) test_that("MC Fit for large sequence 1", { expect_equal(bigmcFit$logLikelihood, 0) expect_equal(bigmcFit$confidenceLevel, 0.95) expect_equal(bigmcFit$estimate@transitionMatrix, bigmcFit$upperEndpointMatrix) }) bigmcFit <- markovchainFit(bigseq, sanitize = TRUE) test_that("MC Fit for large sequence 2", { expect_equal(bigmcFit$logLikelihood, 0) expect_equal(bigmcFit$confidenceLevel, 0.95) expect_equal(bigmcFit$estimate@transitionMatrix, bigmcFit$upperEndpointMatrix) }) ### Markovchain Fitting For dataframe or matrix as an input matseq <- matrix(c("a", "b", "c", NA ,"b", "c"), nrow = 2, byrow = T) # for matrix as input test_that("Markovchain Fit for matrix as input", { # for matrix as input expect_equal(markovchainFit(matseq)$estimate@transitionMatrix, matrix(c(0, 1, 0, 0, 0, 1, 0, 0, 0), nrow = 3, byrow = TRUE, dimnames = list(c("a", "b", "c"), c("a", "b", "c")))) expect_equal(markovchainFit(matseq, sanitize = TRUE)$estimate@transitionMatrix, matrix(c(0, 1, 0, 0, 0, 1, 1/3, 1/3, 1/3), nrow = 3, byrow = TRUE, dimnames = list(c("a", "b", "c"), c("a", "b", "c")))) # for data frame as input expect_equal(markovchainFit(as.data.frame(matseq))$estimate@transitionMatrix, matrix(c(0, 1, 0, 0, 0, 1, 0, 0, 0), nrow = 3, byrow = TRUE, dimnames = list(c("a", "b", "c"), c("a", "b", "c")))) expect_equal(markovchainFit(as.data.frame(matseq), sanitize = TRUE)$estimate@transitionMatrix, matrix(c(0, 1, 0, 0, 0, 1, 1/3, 1/3, 1/3), nrow = 3, byrow = TRUE, dimnames = list(c("a", "b", "c"), c("a", "b", "c")))) }) ### Markovchain Fitting(mle) with sanitize parameter mle_sequence <- c("a", "b", NA, "b", "b", "a", "a", "a", "b", "b", NA, "b", "b", "a", "a", "b", "a", "a", "b", "c") mle_fit1 <- markovchainFit(mle_sequence) mle_fit2 <- markovchainFit(mle_sequence, sanitize = TRUE) test_that("MarkovchainFit MLE", { expect_equal(mle_fit1$estimate@transitionMatrix, matrix(c(0.5, 0.5, 0, 3/7, 3/7, 1/7, 0, 0, 0), nrow = 3, byrow = TRUE, dimnames = list(c("a", "b", "c"), c("a", "b", "c")))) expect_equal(mle_fit2$estimate@transitionMatrix, matrix(c(0.5, 0.5, 0, 3/7, 3/7, 1/7, 1/3, 1/3, 1/3), nrow = 3, byrow = TRUE, dimnames = list(c("a", "b", "c"), c("a", "b", "c")))) expect_equal(mle_fit1$logLikelihood, mle_fit2$logLikelihood) expect_equal(mle_fit1$confidenceInterval, mle_fit2$confidenceInterval) expect_equal(mle_fit2$standardError, mle_fit2$standardError) }) ### Markovchain Fitting(laplace) with sanitize parameter lap_sequence <- c("a", "b", NA, "b", "b", "a", "a", "a", "b", "b", NA, "b", "b", "a", "a", "b", "a", "a", "b", "c") lap_fit1 <- markovchainFit(lap_sequence, "laplace") lap_fit2 <- markovchainFit(lap_sequence, "laplace", sanitize = TRUE) test_that("Markovchain Laplace", { expect_equal(lap_fit1$estimate@transitionMatrix, matrix(c(0.5, 0.5, 0, 3/7, 3/7, 1/7, 0, 0, 0), nrow = 3, byrow = TRUE, dimnames = list(c("a", "b", "c"), c("a", "b", "c")))) expect_equal(lap_fit2$estimate@transitionMatrix, matrix(c(0.5, 0.5, 0, 3/7, 3/7, 1/7, 1/3, 1/3, 1/3), nrow = 3, byrow = TRUE, dimnames = list(c("a", "b", "c"), c("a", "b", "c")))) expect_equal(lap_fit1$logLikelihood, lap_fit2$logLikelihood) }) ### Markovchain Fitting when some states are not present in the given sequence mix_seq <- c("a", "b", NA, "b", "b", "a", "a", "a", "b", "b", NA, "b", "b", "a", "a", "b", "a", "a", "b", "c") mix_fit1 <- markovchainFit(mix_seq, "mle", sanitize = TRUE, possibleStates = c("d")) mix_fit2 <- markovchainFit(mix_seq, "laplace", sanitize = TRUE, possibleStates = c("d")) mix_fit3 <- markovchainFit(mix_seq, "map", sanitize = TRUE, possibleStates = c("d")) test_that("Mixture of Markovchain Fitting", { expect_equal(mix_fit2$estimate@transitionMatrix, matrix(c(.5, .5, 0, 0, 3/7, 3/7, 1/7, 0, 1/4, 1/4, 1/4, 1/4, 1/4, 1/4, 1/4, 1/4), nrow = 4, byrow = TRUE, dimnames = list(c("a", "b", "c", "d"), c("a", "b", "c", "d")) ) ) expect_equal(mix_fit1$estimate@transitionMatrix, matrix(c(.5, .5, 0, 0, 3/7, 3/7, 1/7, 0, 1/4, 1/4, 1/4, 1/4, 1/4, 1/4, 1/4, 1/4), nrow = 4, byrow = TRUE, dimnames = list(c("a", "b", "c", "d"), c("a", "b", "c", "d")) ) ) expect_equal(mix_fit3$estimate@transitionMatrix, matrix(c(.5, .5, 0, 0, 3/7, 3/7, 1/7, 0, 1/4, 1/4, 1/4, 1/4, 1/4, 1/4, 1/4, 1/4), nrow = 4, byrow = TRUE, dimnames = list(c("a", "b", "c", "d"), c("a", "b", "c", "d")) ) ) }) ### Test for createSequenceMatrix rsequence <- c("a", "b", NA, "b", "b", "a", "a", "a", "b", "b", NA, "b", "b", "a", "a", "b", "a", "a", "b", "c") test_that("createSequenceMatrix : Permutation of parameters",{ expect_equal(createSequenceMatrix(rsequence, FALSE, FALSE), matrix(c(4, 4, 0, 3, 3, 1, 0, 0, 0), nrow = 3, byrow = TRUE, dimnames = list(c("a", "b", "c"), c("a", "b", "c")))) expect_equal(createSequenceMatrix(rsequence, FALSE, TRUE), matrix(c(4, 4, 0, 3, 3, 1, 1, 1, 1), nrow = 3, byrow = TRUE, dimnames = list(c("a", "b", "c"), c("a", "b", "c")))) expect_equal(createSequenceMatrix(rsequence, TRUE, FALSE), matrix(c(4/8, 4/8, 0, 3/7, 3/7, 1/7, 0, 0, 0), nrow = 3, byrow = TRUE, dimnames = list(c("a", "b", "c"), c("a", "b", "c")))) expect_equal(createSequenceMatrix(rsequence, TRUE, TRUE), matrix(c(4/8, 4/8, 0, 3/7, 3/7, 1/7, 1/3, 1/3, 1/3), nrow = 3, byrow = TRUE, dimnames = list(c("a", "b", "c"), c("a", "b", "c")))) }) ### Test for createSequenceMatrix : input nx2 matrix data <- matrix(c("a", "a", "b", "a", "b", "a", "b", "a", NA, "a", "a", "a", "a", "b", NA, "b"), ncol = 2, byrow = TRUE) test_that("createSequenceMatrix : input as matrix",{ expect_equal(createSequenceMatrix(data), matrix(c(2, 1, 3, 0), nrow = 2, byrow = TRUE, dimnames = list(c("a", "b"), c("a", "b")))) expect_equal(createSequenceMatrix(data, toRowProbs = TRUE), matrix(c(2/3, 1/3, 3/3, 0), nrow = 2, byrow = TRUE, dimnames = list(c("a", "b"), c("a", "b")))) expect_equal(createSequenceMatrix(data, toRowProbs = TRUE, possibleStates = "d", sanitize = TRUE), matrix(c(2/3, 1/3, 0, 1, 0, 0, 1/3, 1/3, 1/3), nrow = 3, byrow = TRUE, dimnames = list(c("a", "b", "d"), c("a", "b", "d")))) }) ### Test for markovchainSequence and rmarkovchain statesNames <- c("a", "b", "c") mcB <- new("markovchain", states = statesNames, transitionMatrix = matrix(c(0.2, 0.5, 0.3, 0, 0.2, 0.8, 0.1, 0.8, 0.1), nrow = 3, byrow = TRUE, dimnames = list(statesNames, statesNames))) s1 <- markovchainSequence(10, mcB) s2 <- markovchainSequence(10, mcB, include.t0 = TRUE) s3 <- markovchainSequence(10, mcB, t0 = "b", include.t0 = TRUE) s4 <- markovchainSequence(10, mcB, useRCpp = FALSE) s5 <- markovchainSequence(10, mcB, include.t0 = TRUE, useRCpp = FALSE) s6 <- markovchainSequence(10, mcB, t0 = "b", include.t0 = TRUE, useRCpp = FALSE) test_that("Output format of markovchainSequence", { expect_equal(length(s1), 10) expect_equal(length(s2), 11) expect_equal(length(s3), 11) expect_equal(s3[1], "b") expect_equal(length(s4), 10) expect_equal(length(s5), 11) expect_equal(length(s6), 11) expect_equal(s6[1], "b") }) statesNames <- c("a", "b", "c") mcA <- new("markovchain", states = statesNames, transitionMatrix = matrix(c(0.2, 0.5, 0.3, 0, 0.2, 0.8, 0.1, 0.8, 0.1), nrow = 3, byrow = TRUE, dimnames = list(statesNames, statesNames))) mcB <- new("markovchain", states = statesNames, transitionMatrix = matrix(c(0.2, 0.5, 0.3, 0, 0.2, 0.8, 0.1, 0.8, 0.1), nrow = 3, byrow = TRUE, dimnames = list(statesNames, statesNames))) mcC <- new("markovchain", states = statesNames, transitionMatrix = matrix(c(0.2, 0.5, 0.3, 0, 0.2, 0.8, 0.1, 0.8, 0.1), nrow = 3, byrow = TRUE, dimnames = list(statesNames, statesNames))) mclist <- new("markovchainList", markovchains = list(mcA, mcB, mcC)) o1 <- rmarkovchain(15, mclist, "list") o2 <- rmarkovchain(15, mclist, "matrix") o3 <- rmarkovchain(15, mclist, "data.frame") o4 <- rmarkovchain(15, mclist, "list", t0 = "a", include.t0 = TRUE) o5 <- rmarkovchain(15, mclist, "matrix", t0 = "a", include.t0 = TRUE) o6 <- rmarkovchain(15, mclist, "data.frame", t0 = "a", include.t0 = TRUE) test_that("Output format of rmarkovchain", { expect_equal(length(o1), 15) expect_equal(length(o1[[1]]), 3) expect_equal(all(dim(o2) == c(15, 3)), TRUE) expect_equal(all(dim(o3) == c(45, 2)), TRUE) expect_equal(length(o4), 15) expect_equal(length(o4[[1]]), 4) expect_equal(o4[[1]][1], "a") expect_equal(all(dim(o5) == c(15, 4)), TRUE) expect_equal(all(o5[, 1] == "a"), TRUE) expect_equal(all(dim(o6) == c(60, 2)), TRUE) }) ### MAP fit function tests data1 <- c("a", "b", "a", "c", "a", "b", "a", "b", "c", "b", "b", "a", "b") data2 <- c("c", "a", "b") test_that("MAP fits must satisfy", { expect_identical(markovchainFit(data1, method = "map")$estimate@transitionMatrix, markovchainFit(data1, method = "mle")$estimate@transitionMatrix) expect_identical(markovchainFit(data1, method = "map")$estimate@transitionMatrix, matrix(c(0.0, 0.6, 0.5, 0.8, 0.2, 0.5, 0.2, 0.2, 0.0), nrow = 3, dimnames = list(c("a", "b", "c"), c("a", "b", "c")))) expect_identical(markovchainFit(data1, method = "map", hyperparam = matrix(c(2, 1, 3, 4, 5, 2, 2, 2, 1), nrow = 3, dimnames = list(c("a", "b", "c"), c("a", "b", "c"))))$estimate@transitionMatrix, matrix(c(1/10, 3/10, 3/5, 7/10, 5/10, 2/5, 2/10, 2/10, 0), nrow = 3, dimnames = list(c("a", "b", "c"), c("a", "b", "c")))) }) test_that("predictiveDistribution must satisfy", { expect_equal(predictiveDistribution(data1, character()), 0) expect_equal(predictiveDistribution(data1, data2, hyperparam = matrix(c(2, 1, 3, 4, 5, 2, 2, 2, 1), nrow = 3, dimnames = list(c("a", "b", "c"), c("a", "b", "c")))), log(4 / 13)) }) test_that("inferHyperparam must satisfy", { expect_identical(inferHyperparam(data = data1)$dataInference, matrix(c(1, 4, 2, 5, 2, 2, 2, 2, 1), nrow = 3, dimnames = list(c("a", "b", "c"), c("a", "b", "c")))) expect_identical(inferHyperparam(transMatr = matrix(c(0.0, 0.6, 0.5, 0.8, 0.2, 0.5, 0.2, 0.2, 0.0), nrow = 3, dimnames = list(c("a", "b", "c"), c("a", "b", "c"))), scale = c(10, 10, 10))$scaledInference, matrix(c(0, 6, 5, 8, 2, 5, 2, 2, 0), nrow = 3, dimnames = list(c("a", "b", "c"), c("a", "b", "c")))) }) pDRes <- c(log(3/2), log(3/2)) names(pDRes) <- c("a", "b") test_that("priorDistribution must sastisfy", { expect_equal(priorDistribution(matrix(c(0.5, 0.5, 0.5, 0.5), nrow = 2, dimnames = list(c("a", "b"), c("a", "b"))), matrix(c(2, 2, 2, 2), nrow = 2, dimnames = list(c("a", "b"), c("a", "b")))), pDRes) }) energyStates <- c("sigma", "sigma_star") byRow <- TRUE gen <- matrix(data = c(-3, 3, 1, -1), nrow = 2, byrow = byRow, dimnames = list(energyStates, energyStates)) molecularCTMC <- new("ctmc", states = energyStates, byrow = byRow, generator = gen, name = "Molecular Transition Model") test_that("steadyStates must satisfy", { expect_identical(steadyStates(molecularCTMC), matrix(c(1/4, 3/4), nrow = 1, dimnames = list(c(), energyStates))) }) ### Tests for expectedRewards function ### Examples taken from Stochastic Processes: Theory for Applications, Robert G. Gallager,Cambridge University Press transMatr<-matrix(c(0.99,0.01,0.01,0.99),nrow=2,byrow=TRUE) simpleMc<-new("markovchain", states=c("a","b"), transitionMatrix=transMatr) test_that("expectedRewards must satisfy", { expect_equal(expectedRewards(simpleMc,1,c(0,1)),c(0.01,1.99)) expect_equal(expectedRewards(simpleMc,2,c(0,1)),c(0.0298,2.9702)) }) ### Tests for committorAB function transMatr <- matrix(c(0,0,0,1,0.5, 0.5,0,0,0,0, 0.5,0,0,0,0, 0,0.2,0.4,0,0, 0,0.8,0.6,0,0.5),nrow = 5) object <- new("markovchain", states=c("a","b","c","d","e"),transitionMatrix=transMatr, name="simpleMc") answer <- c(0.444,0.889,0.000,0.444,1.000) names <- c("a","b","c","d","e") names(answer) <- names test_that("committorAB must satisfy", { expect_equal(round(committorAB(object,c(5),c(3)),3),answer) }) ### Tests for firstPassageMultiple function statesNames <- c("a", "b", "c") testmarkov <- new("markovchain", states = statesNames, transitionMatrix = matrix(c(0.2, 0.5, 0.3, 0.5, 0.1, 0.4, 0.1, 0.8, 0.1), nrow = 3, byrow = TRUE, dimnames = list(statesNames, statesNames) )) answer <- matrix(c(.8000, 0.6000, 0.2540 ),nrow = 3,dimnames = list(c("1","2","3"),"set")) test_that("firstPassageMultiple function satisfies", { expect_equal(firstPassageMultiple(testmarkov,"a",c("b","c"),3),answer) }) markovchain/tests/testthat/testMultinomCI.R0000644000176200001440000000327013144267070020627 0ustar liggesusers#library(markovchain) seq<-c("a", "b", "a", "a", "a", "a", "b", "a", "b", "a", "b", "a", "a", "b", "b", "b", "a") mcfit<-markovchainFit(data=seq,byrow=TRUE) # print(mcfit) seqmat<-createSequenceMatrix(seq) seqmat mCI <- .multinomialCIRcpp(mcfit$estimate@transitionMatrix, seqmat, 0.95) # print(mCI) ####end of creating multinomialCI context("Multinomial confidence interval") test_that("multinomial CI statisfay", { # expect_equal(mCI$lowerEndpointMatrix, matrix(c(0.2222222,0.3333333, # 0.5714286,0.1428571),nrow=2, byrow=TRUE, dimnames=list(c("a","b"), # c("a","b")) # )) # expect_equal(mCI$upperEndpointMatrix, matrix(c(0.8111456,0.9222567, # 1,0.6839473),nrow=2, byrow=TRUE, dimnames=list(c("a","b"), # c("a","b")) # )) expect_equal(mCI$upperEndpointMatrix[2,1],1) }) # Multinomial distribution with 3 classes, from which 79 samples # were drawn: 23 of them belong to the first class, 12 to the # second class and 44 to the third class. Punctual estimations # of the probabilities from this sample would be 23/79, 12/79 # and 44/79 but we want to build 95% simultaneous confidence intervals # for the true probabilities # m = multinomialCI(c(23,12,44), 0.05) # print(paste("First class: [", m[1,1], m[1,2], "]")) # print(paste("Second class: [", m[2,1], m[2,2], "]")) # print(paste("Third class: [", m[3,1], m[3,2], "]")) # seq<-c(4, 5) # m = multinomialCI(seq, 0.05) # m markovchain/tests/testthat/testStatesClassification.R0000644000176200001440000000430214243233076022723 0ustar liggesuserscontext("Classification of states") A <- structure(c(0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0.5, 0.3, 0.3, 0, 0, 0, 0, 0, 0.5, 0.7, 0.7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.3, 0.4, 0, 0, 0, 0, 0, 0.4, 0, 0.5, 0, 0, 0, 0, 0, 0.6, 0.7, 0, 0, 0, 0, 0, 0, 0, 0, 0.1, 1), .Dim = c(8L, 8L), .Dimnames = list( c("1", "2", "3", "4", "5", "6", "7", "8"), c("1", "2", "3", "4", "5", "6", "7", "8"))) mchain <- new("markovchain", transitionMatrix=A) #summary(mchain) test_that("States are those that should be", { expect_equal(recurrentClasses(mchain), list(c("3", "4"), c("8"))) expect_equal(transientStates(mchain), c("1", "2", "5", "6", "7")) expect_equal(absorbingStates(mchain), "8") }) #https://www.math.ucdavis.edu/~gravner/MAT135B/materials/ch13.pdf mcMatr1<-markovchain:::zeros(3) mcMatr1[1,]<-c(0.5,0.5,0) mcMatr1[2,]<-c(0.5,0.25,0.25) mcMatr1[3,]<-c(0,1/3,2/3) mc1<-as(mcMatr1,"markovchain") test_that("States are those that should be", { expect_equal(is.irreducible(mc1),TRUE) }) mcMatr2<-matrix(c(0, 0, 1/2, 1/2,1, 0 ,0, 0,0, 1, 0, 0,0, 1, 0, 0),ncol = 4,byrow=TRUE) mc2<-as(mcMatr2,"markovchain") test_that("States are those that should be", { expect_equal(recurrentClasses(mc2),list(c("s1","s2","s3","s4"))) }) mcMatr3<-matrix(c( 0,1,0,0,0,0, 0.4,0.6,0,0,0,0, 0.3,0,0.4,0.2,0.1,0, 0,0,0,0.3,0.7,0, 0,0,0,0.5,0,0.5, 0,0,0,0.3,0,0.7),nrow = 6,byrow=TRUE) mc3<-as(mcMatr3,"markovchain") recurrentClasses(mc3) transientStates(mc3) #canonicForm(mc3) test_that("States are those that should be", { expect_equal(recurrentClasses(mc3),list(c("s1","s2"),c("s4","s5","s6") )) expect_equal(transientStates(mc3),"s3") }) mcMatr4<-markovchain:::zeros(5) mcMatr4[1:2,1:2]<-0.5*markovchain:::ones(2) mcMatr4[5,1]<-1 mcMatr4[3,3]<-1 mcMatr4[4,3:4]<-0.5 mc4<-as(mcMatr4,"markovchain") test_that("States are those that should be", { expect_equal(recurrentClasses(mc4),list(c("s1","s2"),c("s3"))) expect_equal(absorbingStates(mc4),"s3") expect_equal(transientStates(mc4),c("s4","s5")) } ) markovchain/tests/testthat/testPeriod.R0000644000176200001440000000302613144267070020030 0ustar liggesusers# Period examples from http://www.math.wisc.edu/~anderson/605F11/Notes/StochBioChapter3.pdf #library(markovchain) mcPeriodic<-new("markovchain", states=c("0","1","2"), transitionMatrix= matrix(c(0,1,0, 0,0,1, 1,0,0),nrow=3, byrow=TRUE, dimnames=list(c("0","1","2"), c("0","1","2")) )) myMatr<-matrix(c(0 , 0 , 1/2 , 1/4 , 1/4 , 0 , 0, 0 , 0 , 1/3 , 0 , 2/3, 0 , 0 , 0 , 0 , 0 , 0 , 0 , 1/3 , 2/3 , 0 , 0 , 0 , 0 , 0 , 1/2 , 1/2 , 0 , 0 , 0 , 0 , 0 , 3/4 , 1/4 , 1/2, 1/2 , 0 , 0 , 0 , 0 , 0 , 1/4, 3/4 , 0 , 0 , 0 , 0 , 0),byrow=TRUE, nrow = 7) mcPeriodic2<-as(myMatr, "markovchain") mcAperiodic<-new("markovchain", states=c("0","1","2","3","4"), transitionMatrix= matrix(c(1/2,1/2,0,0,0, 1/2,0,1/2,0,0, 0,1/2,0,1/2,0, 0,0,1/2,0,1/2, 0,0,0,1,0),nrow=5, byrow=TRUE, dimnames=list(c("0","1","2","3","4"), c("0","1","2","3","4")) )) ####end creating DTMC context("Basic DTMC proprieties") test_that("States are those that should be", { expect_equal(period(mcPeriodic),3) expect_equal(period(mcPeriodic2),3) expect_equal(period(mcAperiodic),1) })markovchain/tests/testthat.R0000644000176200001440000000010413762012756015705 0ustar liggesuserslibrary(testthat) library(markovchain) test_check("markovchain") markovchain/src/0000755000176200001440000000000014425421006013341 5ustar liggesusersmarkovchain/src/ctmcFittingFunctions.cpp0000644000176200001440000001057113762012756020230 0ustar liggesusers#include #include using namespace Rcpp; #include List markovchainFit(SEXP data, String method = "mle", bool byrow = true, int nboot = 10, double laplacian = 0, String name = "", bool parallel = false, double confidencelevel = 0.95, bool confint = true, NumericMatrix hyperparam = NumericMatrix(), bool sanitize = false, CharacterVector possibleStates = CharacterVector()); //' @name ctmcFit //' @title Function to fit a CTMC //' @description This function fits the underlying CTMC give the state //' transition data and the transition times using the maximum likelihood //' method (MLE) //' @usage ctmcFit(data, byrow = TRUE, name = "", confidencelevel = 0.95) //' @param data It is a list of two elements. The first element is a character //' vector denoting the states. The second is a numeric vector denoting the //' corresponding transition times. //' @param byrow Determines if the output transition probabilities of the //' underlying embedded DTMC are by row. //' @param name Optional name for the CTMC. //' @param confidencelevel Confidence level for the confidence interval //' construnction. //' @return It returns a list containing the CTMC object and the confidence intervals. //' //' @details Note that in data, there must exist an element wise corresponding //' between the two elements of the list and that data[[2]][1] is always 0. //' @references Continuous Time Markov Chains (vignette), Sai Bhargav Yalamanchi, Giorgio Alfredo Spedicato 2015 //' @author Sai Bhargav Yalamanchi //' @seealso \code{\link{rctmc}} //' //' @examples //' data <- list(c("a", "b", "c", "a", "b", "a", "c", "b", "c"), c(0, 0.8, 2.1, 2.4, 4, 5, 5.9, 8.2, 9)) //' ctmcFit(data) //' //' @export //' // [[Rcpp::export]] List ctmcFit(List data, bool byrow=true, String name="", double confidencelevel = 0.95) { CharacterVector stateData(as(data[0]).size()); for (int i = 0; i < as(data[0]).size(); i++) stateData[i] = as(data[0])[i]; NumericVector transData = data[1]; CharacterVector sortedStates = unique(as(data[0])).sort(); NumericVector stateCount(sortedStates.size()); NumericVector stateSojournTime(sortedStates.size()); List dtmcData = markovchainFit(stateData, "mle", byrow, 10, 0, name, false, confidencelevel); for (int i = 0; i < stateData.size() - 1; i++){ int idx = std::find(sortedStates.begin(), sortedStates.end(), stateData[i]) - sortedStates.begin(); stateCount[idx]++; stateSojournTime[idx] += transData[i+1] - transData[i]; } S4 dtmcEst = dtmcData["estimate"]; NumericMatrix gen = dtmcEst.slot("transitionMatrix"); for (int i = 0; i < gen.nrow(); i++){ for (int j = 0; j < gen.ncol(); j++){ if (stateCount[i] > 0) gen(i, j) *= stateCount[i] / stateSojournTime[i]; } if (stateCount[i] > 0) gen(i, i) = - stateCount[i] / stateSojournTime[i]; else gen(i, i) = -1; } double zscore = stats::qnorm_0(confidencelevel, 1.0, 0.0); NumericVector lowerConfVecLambda(sortedStates.size()), upperConfVecLambda(sortedStates.size()); for (int i = 0; i < sortedStates.size(); i++){ if (stateCount[i] > 0){ auto factor = stateCount[i] / stateSojournTime[i] * (1 - zscore / sqrt(stateCount[i])); lowerConfVecLambda(i) = std::max(0., factor); upperConfVecLambda(i) = std::min(1., factor); } else { lowerConfVecLambda(i) = 1; upperConfVecLambda(i) = 1; } } S4 outCtmc("ctmc"); outCtmc.slot("states") = sortedStates; outCtmc.slot("generator") = gen; outCtmc.slot("name") = name; return List::create(_["estimate"] = outCtmc, _["errors"] = List::create( _["dtmcConfidenceInterval"] = List::create( _["confidenceLevel"] = dtmcData["confidenceLevel"], _["lowerEndpointMatrix"] = dtmcData["lowerEndpointMatrix"], _["upperEndpointMatrix"] = dtmcData["upperEndpointMatrix"]), _["lambdaConfidenceInterval"] = List::create( _["lowerEndpointVector"] = lowerConfVecLambda, _["upperEndpointVector"] = upperConfVecLambda)) ); } markovchain/src/classesAndMethods.cpp0000644000176200001440000000073613762012756017472 0ustar liggesusers// [[Rcpp::depends(RcppArmadillo)]] #include #include #include #include using namespace Rcpp; using namespace arma; using namespace std; // TODO meaning of this method // [[Rcpp::export(.isGenRcpp)]] bool isGen(NumericMatrix gen) { for (int i = 0; i < gen.nrow(); i++) for (int j = 0; j < gen.ncol(); j++) if ((i == j && gen(i, j) > 0) || (i != j && gen(i, j) < 0)) return false; return true; } markovchain/src/fitHigherOrder.cpp0000644000176200001440000000261313762012756016767 0ustar liggesusers// [[Rcpp::depends(RcppArmadillo)]] #include using namespace Rcpp; // sequence to frequency probability vector //' @export // [[Rcpp::export]] NumericVector seq2freqProb(CharacterVector sequence) { int n = sequence.size(); CharacterVector states = unique(sequence).sort(); int nstates = states.length(); NumericVector v(nstates); v.names() = states; for (int i = 0; i < n; i ++) v[std::string(sequence[i])] = v[std::string(sequence[i])] + 1.0; NumericVector out = v/sum(v); out.names() = v.names(); return out; } // sequence to transition matrix for higher order markov chai //' @export // [[Rcpp::export]] NumericMatrix seq2matHigh(CharacterVector sequence, int order) { int n = sequence.size(); CharacterVector states = unique(sequence).sort(); int nstates = states.length(); NumericVector colsums(nstates); NumericMatrix out(nstates); out.attr("dimnames") = List::create(states, states); for (int i = 0; i < n - order; i++) { int from = -1, to = -1; for (int j = 0; j < nstates; j++) { if (sequence[i] == states[j]) from = j; if (sequence[i + order] == states[j]) to = j; } if (from != -1 && to != -1) { out(to, from) ++; colsums[from] ++; } } for (int i = 0; i < nstates; i ++) { for (int j = 0; j < nstates; j ++) out(i, j) /= colsums[j]; } return out; } markovchain/src/probabilistic.cpp0000644000176200001440000013202014257554210016700 0ustar liggesusers// [[Rcpp::depends(RcppArmadillo)]] #include #include #include #include #include #include using namespace Rcpp; using namespace std; using namespace arma; template T sortByDimNames(const T m); typedef unsigned int uint; // Returns whether a Markov chain is ergodic // Declared in this same file bool isIrreducible(S4 obj); // Declared in utils.cpp bool anyElement(const mat& matrix, bool (*condition)(const double&)); // Declared in utils.cpp bool allElements(const mat& matrix, bool (*condition)(const double&)); // Declared in utils.cpp bool approxEqual(const cx_double& a, const cx_double& b); // Used in commClassesKernel void strongConnect(int v, vector& disc, vector& low, vector& onStack, int& index, stack& exploring, NumericMatrix& P, vector>& components, int numStates) { disc[v] = index; low[v] = index; ++index; exploring.push(v); onStack[v] = true; // For each edge (v, w) that goes out of v for (int w = 0; w < numStates; ++w) { if (P(v, w) > 0) { // If w has not been visited yet, compute [w], and update // the minimum node we can travel to from v if (disc[w] == -1) { strongConnect(w, disc, low, onStack, index, exploring, P, components, numStates); low[v] = min(low[v], low[w]); // Otherwise, if w is on the stack of nodes been explored, // update the minimum node we can travel to from v } else if (onStack[w]) { low[v] = min(low[v], disc[w]); } // Otherwise, (v, w) is a cross edge between components // in the DFS tree, do nothing } } // If v is the root of [v], unwind the strongly connected // component from the stack if (low[v] == disc[v]) { bool remaining = true; unordered_set component; int w; while (remaining) { w = exploring.top(); exploring.pop(); component.insert(w); onStack[w] = false; remaining = w != v; } components.push_back(component); } } // This method is based on Tarjan's algorithm to find strongly // connected components in a directed graph: // https://en.wikipedia.org/wiki/Tarjan's_strongly_connected_components_algorithm // to compute the communicating classes. // Output: // - classes: a matrix whose entry (i, j) is true iff i and // j are in the same communicating class // - closed: a vector whose i-th entry indicates whether the // class [i] is closed // // [[Rcpp::export(.commClassesKernelRcpp)]] List commClassesKernel(NumericMatrix P) { // The matrix must be stochastic by rows int numStates = P.ncol(); vector disc(numStates, -1); vector low(numStates, -1); vector onStack(numStates, false); vector> components; stack exploring; int index = 0; // If the component [v] has not been computed yet // (disc[v] == -1), compute it for (int v = 0; v < numStates; ++v) { if (disc[v] == -1) strongConnect(v, disc, low, onStack, index, exploring, P, components, numStates); } // Create the output data structures CharacterVector stateNames = rownames(P); LogicalMatrix classes(numStates, numStates); classes.attr("dimnames") = P.attr("dimnames"); std::fill(classes.begin(), classes.end(), false); LogicalVector closed(numStates); closed.names() = stateNames; for (auto component : components) { bool isClosed = true; // The class is closed iff there is no edge going out of the class for (int i : component) { for (int j = 0; j < numStates; ++j) if (P(i, j) > 0 && component.count(j) == 0) isClosed = false; } // Set the communicating matrix and whether it is closed or not for (int i : component) { closed(i) = isClosed; for (int j : component) classes(i, j) = true; } } return List::create(_["classes"] = classes, _["closed"] = closed); } // Wrapper that computes the communicating states from the matrix generated by // commClassesKernel (a matrix where an entry i,j is TRUE iff i and j are in the // same communicating class). It also needs the list of states names from the // Markov Chain List computeCommunicatingClasses(LogicalMatrix& commClasses, CharacterVector& states) { int numStates = states.size(); vector computed(numStates, false); List classesList; for (int i = 0; i < numStates; ++i) { CharacterVector currentClass; if (!computed[i]) { for (int j = 0; j < numStates; ++j) { if (commClasses(i, j)) { currentClass.push_back(states[j]); computed[j] = true; } } classesList.push_back(currentClass); } } return classesList; } // [[Rcpp::export(.communicatingClassesRcpp)]] List communicatingClasses(S4 object) { // Returns the underlying communicating classes // It is indifferent if the matrices are stochastic by rows or columns NumericMatrix transitionMatrix = object.slot("transitionMatrix"); bool byrow = object.slot("byrow"); CharacterVector states = object.slot("states"); if (!byrow) transitionMatrix = transpose(transitionMatrix); List commClassesList = commClassesKernel(transitionMatrix); LogicalMatrix commClasses = commClassesList["classes"]; return computeCommunicatingClasses(commClasses, states); } // Wrapper that computes the transient states from a list of the states and a // vector indicating whether the communicating class for each state is closed CharacterVector computeTransientStates(CharacterVector& states, LogicalVector& closedClass) { CharacterVector transientStates; for (int i = 0; i < states.size(); i++) if (!closedClass[i]) transientStates.push_back(states[i]); return transientStates; } // Wrapper that computes the recurrent states from a list of states and a // vector indicating whether the communicating class for each state is closed CharacterVector computeRecurrentStates(CharacterVector& states, LogicalVector& closedClass) { CharacterVector recurrentStates; for (int i = 0; i < states.size(); i++) if (closedClass[i]) recurrentStates.push_back(states[i]); return recurrentStates; } // [[Rcpp::export(.transientStatesRcpp)]] CharacterVector transientStates(S4 object) { NumericMatrix transitionMatrix = object.slot("transitionMatrix"); bool byrow = object.slot("byrow"); if (!byrow) transitionMatrix = transpose(transitionMatrix); List commKernel = commClassesKernel(transitionMatrix); LogicalVector closed = commKernel["closed"]; CharacterVector states = object.slot("states"); return computeTransientStates(states, closed); } // [[Rcpp::export(.recurrentStatesRcpp)]] CharacterVector recurrentStates(S4 object) { NumericMatrix transitionMatrix = object.slot("transitionMatrix"); bool byrow = object.slot("byrow"); CharacterVector states = object.slot("states"); if (!byrow) transitionMatrix = transpose(transitionMatrix); List commKernel = commClassesKernel(transitionMatrix); LogicalVector closed = commKernel["closed"]; return computeRecurrentStates(states, closed); } // Wrapper that computes the recurrent classes from the matrix given by // commClassesKernel (which entry i,j is TRUE iff i and j are in the same // communicating class), a vector indicating wheter the class for state is // closed and the states of the Markov Chain List computeRecurrentClasses(LogicalMatrix& commClasses, LogicalVector& closedClass, CharacterVector& states) { int numStates = states.size(); vector computed(numStates, false); List recurrentClassesList; bool isRecurrentClass; for (int i = 0; i < numStates; ++i) { CharacterVector currentClass; isRecurrentClass = closedClass(i) && !computed[i]; if (isRecurrentClass) { for (int j = 0; j < numStates; ++j) { if (commClasses(i, j)) { currentClass.push_back(states[j]); computed[j] = true; } } recurrentClassesList.push_back(currentClass); } } return recurrentClassesList; } // returns the recurrent classes // [[Rcpp::export(.recurrentClassesRcpp)]] List recurrentClasses(S4 object) { NumericMatrix transitionMatrix = object.slot("transitionMatrix"); bool byrow = object.slot("byrow"); CharacterVector states = object.slot("states"); if (!byrow) transitionMatrix = transpose(transitionMatrix); List commClassesList = commClassesKernel(transitionMatrix); LogicalMatrix commClasses = commClassesList["classes"]; LogicalVector closed = commClassesList["closed"]; return computeRecurrentClasses(commClasses, closed, states); } // Wrapper that computes the transient classes from the matrix given by // commClassesKernel (which entry i,j is TRUE iff i and j are in the same // communicating class), a vector indicating wheter the class for state is // closed and the states of the Markov Chain List computeTransientClasses(LogicalMatrix& commClasses, LogicalVector& closedClass, CharacterVector& states) { int numStates = states.size(); vector computed(numStates, false); List transientClassesList; bool isTransientClass; for (int i = 0; i < numStates; ++i) { CharacterVector currentClass; isTransientClass = !closedClass(i) && !computed[i]; if (isTransientClass) { for (int j = 0; j < numStates; ++j) { if (commClasses(i, j)) { currentClass.push_back(states[j]); computed[j] = true; } } transientClassesList.push_back(currentClass); } } return transientClassesList; } // returns the transient classes // [[Rcpp::export(.transientClassesRcpp)]] List transientClasses(S4 object) { NumericMatrix transitionMatrix = object.slot("transitionMatrix"); bool byrow = object.slot("byrow"); CharacterVector states = object.slot("states"); if (!byrow) transitionMatrix = transpose(transitionMatrix); List commClassesList = commClassesKernel(transitionMatrix); LogicalMatrix commClasses = commClassesList["classes"]; LogicalVector closed = commClassesList["closed"]; return computeTransientClasses(commClasses, closed, states); } // Defined in probabilistic.cpp mat matrixPow(const mat& A, int n); // [[Rcpp::export(.reachabilityMatrixRcpp)]] LogicalMatrix reachabilityMatrix(S4 obj) { NumericMatrix matrix = obj.slot("transitionMatrix"); // Reachability matrix int m = matrix.nrow(); mat X(matrix.begin(), m, m, true); mat reachability = eye(m, m) + sign(X); reachability = matrixPow(reachability, m - 1); LogicalMatrix result = wrap(reachability > 0); result.attr("dimnames") = matrix.attr("dimnames"); return result; } // [[Rcpp::export(.isAccessibleRcpp)]] bool isAccessible(S4 obj, String from, String to) { NumericMatrix probs = obj.slot("transitionMatrix"); CharacterVector states = obj.slot("states"); int fromPos = -1, toPos = -1; bool byrow = obj.slot("byrow"); int m = probs.ncol(); // Compute indices for states from and pos for (int i = 0; i < m; ++i) { if (states[i] == from) fromPos = i; if (states[i] == to) toPos = i; } if (fromPos == -1 || toPos == -1) stop("Please give valid states method"); stack toExplore; toExplore.push(fromPos); vector visited(m, false); visited[fromPos] = true; bool isReachable = false; // DFS until we hit 'to' state or we cannot traverse to more states while (!toExplore.empty() && !isReachable) { int i = toExplore.top(); toExplore.pop(); visited[i] = true; isReachable = i == toPos; for (int j = 0; j < m; ++j) if (((byrow && !approxEqual(probs(i, j), 0)) || (!byrow && !approxEqual(probs(j, i), 0))) && !visited[j]) toExplore.push(j); } return isReachable; } // summary of markovchain object // [[Rcpp::export(.summaryKernelRcpp)]] List summaryKernel(S4 object) { NumericMatrix transitionMatrix = object.slot("transitionMatrix"); bool byrow = object.slot("byrow"); CharacterVector states = object.slot("states"); if (!byrow) transitionMatrix = transpose(transitionMatrix); List commClassesList = commClassesKernel(transitionMatrix); LogicalMatrix commClasses = commClassesList["classes"]; LogicalVector closed = commClassesList["closed"]; List recurrentClasses = computeRecurrentClasses(commClasses, closed, states); List transientClasses = computeTransientClasses(commClasses, closed, states); List summaryResult = List::create(_["closedClasses"] = recurrentClasses, _["recurrentClasses"] = recurrentClasses, _["transientClasses"] = transientClasses); return(summaryResult); } //here the kernel function to compute the first passage // [[Rcpp::export(.firstpassageKernelRcpp)]] NumericMatrix firstpassageKernel(NumericMatrix P, int i, int n) { arma::mat G = as(P); arma::mat Pa = G; arma::mat H(n, P.ncol()); //here Thoralf suggestion //initializing the first row for (unsigned int j = 0; j < G.n_cols; j++) H(0, j) = G(i-1, j); arma::mat E = 1 - arma::eye(P.ncol(), P.ncol()); for (int m = 1; m < n; m++) { G = Pa * (G%E); for (unsigned int j = 0; j < G.n_cols; j ++) H(m, j) = G(i-1, j); } NumericMatrix R = wrap(H); return R; } // [[Rcpp::export(.firstPassageMultipleRCpp)]] NumericVector firstPassageMultipleRCpp(NumericMatrix P,int i, NumericVector setno, int n) { arma::mat G = as(P); arma::mat Pa = G; arma::vec H = arma::zeros(n); //here Thoralf suggestion unsigned int size = setno.size(); //initializing the first row for (unsigned int k = 0; k < size; k++) { H[0] += G(i-1, setno[k]-1); } arma::mat E = 1 - arma::eye(P.ncol(), P.ncol()); for (int m = 1; m < n; m++) { G = Pa * (G%E); for (unsigned int k = 0; k < size; k++) { H[m] += G(i-1, setno[k]-1); } } NumericVector R = wrap(H); return R; } // [[Rcpp::export(.expectedRewardsRCpp)]] NumericVector expectedRewardsRCpp(NumericMatrix matrix, int n, NumericVector rewards) { // initialises output vector NumericVector out; // gets no of states int no_of_states = matrix.ncol(); // initialises armadillo matrices and vectors arma::vec temp = arma::zeros(no_of_states); arma::mat matr = as(matrix); arma::vec v = arma::zeros(no_of_states); // initialses the vector for the base case of dynamic programming expression for (int i=0;i(matrix); arma::mat temp = as(matrix); arma::vec r = as(rewards); arma::mat I = arma::zeros(1,size); I(0,s0-1) = 1; for (int j = 0; j < n; j++) { arma::mat res = I*(temp*r); result = result + res(0,0); temp = temp*matr; } return result; } // greatest common denominator // [[Rcpp::export(.gcdRcpp)]] int gcd (int a, int b) { int c; a = abs(a); b = abs(b); while ( a != 0 ) { c = a; a = b%a; b = c; } return b; } // function to get the period of a DTMC //' @rdname structuralAnalysis //' //' @export // [[Rcpp::export(period)]] int period(S4 object) { bool irreducible = isIrreducible(object); if (!irreducible) { warning("The matrix is not irreducible"); return 0; } else { NumericMatrix P = object.slot("transitionMatrix"); int n = P.ncol(); std::vector r, T(1), w; int d = 0, m = T.size(), i = 0, j = 0; if (n > 0) { arma::vec v(n); v[0] = 1; while (m>0 && d!=1) { i = T[0]; T.erase(T.begin()); w.push_back(i); j = 0; while (j < n) { if (P(i,j) > 0) { r.insert(r.end(), w.begin(), w.end()); r.insert(r.end(), T.begin(), T.end()); double k = 0; for (std::vector::iterator it = r.begin(); it != r.end(); it ++) if (*it == j) k ++; if (k > 0) { int b = v[i] + 1 - v[j]; d = gcd(d, b); } else { T.push_back(j); v[j] = v[i] + 1; } } j++; } m = T.size(); } } // v = v - floor(v/d)*d; return d; } } //' @title predictiveDistribution //' //' @description The function computes the probability of observing a new data //' set, given a data set //' @usage predictiveDistribution(stringchar, newData, hyperparam = matrix()) //' //' @param stringchar This is the data using which the Bayesian inference is //' performed. //' @param newData This is the data whose predictive probability is computed. //' @param hyperparam This determines the shape of the prior distribution of the //' parameters. If none is provided, default value of 1 is assigned to each //' parameter. This must be of size kxk where k is the number of states in the //' chain and the values should typically be non-negative integers. //' @return The log of the probability is returned. //' //' @details The underlying method is Bayesian inference. The probability is //' computed by averaging the likelihood of the new data with respect to the //' posterior. Since the method assumes conjugate priors, the result can be //' represented in a closed form (see the vignette for more details), which is //' what is returned. //' @references //' Inferring Markov Chains: Bayesian Estimation, Model Comparison, Entropy Rate, //' and Out-of-Class Modeling, Christopher C. Strelioff, James P. //' Crutchfield, Alfred Hubler, Santa Fe Institute //' //' Yalamanchi SB, Spedicato GA (2015). Bayesian Inference of First Order Markov //' Chains. R package version 0.2.5 //' //' @author Sai Bhargav Yalamanchi //' @seealso \code{\link{markovchainFit}} //' @examples //' sequence<- c("a", "b", "a", "a", "a", "a", "b", "a", "b", "a", "b", "a", "a", //' "b", "b", "b", "a") //' hyperMatrix<-matrix(c(1, 2, 1, 4), nrow = 2,dimnames=list(c("a","b"),c("a","b"))) //' predProb <- predictiveDistribution(sequence[1:10], sequence[11:17], hyperparam =hyperMatrix ) //' hyperMatrix2<-hyperMatrix[c(2,1),c(2,1)] //' predProb2 <- predictiveDistribution(sequence[1:10], sequence[11:17], hyperparam =hyperMatrix2 ) //' predProb2==predProb //' @export //' // [[Rcpp::export]] double predictiveDistribution(CharacterVector stringchar, CharacterVector newData, NumericMatrix hyperparam = NumericMatrix()) { // construct list of states CharacterVector elements = stringchar; for (int i = 0; i < newData.size(); i++) elements.push_back(newData[i]); elements = unique(elements).sort(); int sizeMatr = elements.size(); // if no hyperparam argument provided, use default value of 1 for all if (hyperparam.nrow() == 1 && hyperparam.ncol() == 1) { NumericMatrix temp(sizeMatr, sizeMatr); temp.attr("dimnames") = List::create(elements, elements); for (int i = 0; i < sizeMatr; i++) for (int j = 0; j < sizeMatr; j++) temp(i, j) = 1; hyperparam = temp; } // validity check if (hyperparam.nrow() != hyperparam.ncol()) stop("Dimensions of the hyperparameter matrix are inconsistent"); if (hyperparam.nrow() < sizeMatr) stop("Hyperparameters for all state transitions must be provided"); List dimNames = hyperparam.attr("dimnames"); CharacterVector colNames = dimNames[1]; CharacterVector rowNames = dimNames[0]; int sizeHyperparam = hyperparam.ncol(); CharacterVector sortedColNames(sizeHyperparam), sortedRowNames(sizeHyperparam); for (int i = 0; i < sizeHyperparam; i++) sortedColNames(i) = colNames(i), sortedRowNames(i) = rowNames(i); sortedColNames.sort(); sortedRowNames.sort(); for (int i = 0; i < sizeHyperparam; i++) { if (i > 0 && (sortedColNames(i) == sortedColNames(i-1) || sortedRowNames(i) == sortedRowNames(i-1))) stop("The states must all be unique"); else if (sortedColNames(i) != sortedRowNames(i)) stop("The set of row names must be the same as the set of column names"); bool found = false; for (int j = 0; j < sizeMatr; j++) if (elements(j) == sortedColNames(i)) found = true; // hyperparam may contain states not in stringchar if (!found) elements.push_back(sortedColNames(i)); } // check for the case where hyperparam has missing data for (int i = 0; i < sizeMatr; i++) { bool found = false; for (int j = 0; j < sizeHyperparam; j++) if (sortedColNames(j) == elements(i)) found = true; if (!found) stop("Hyperparameters for all state transitions must be provided"); } elements = elements.sort(); sizeMatr = elements.size(); for (int i = 0; i < sizeMatr; i++) for (int j = 0; j < sizeMatr; j++) if (hyperparam(i, j) < 1.) stop("The hyperparameter elements must all be greater than or equal to 1"); // permute the elements of hyperparam such that the row, column names are sorted hyperparam = sortByDimNames(hyperparam); NumericMatrix freqMatr(sizeMatr), newFreqMatr(sizeMatr); double predictiveDist = 0.; // log of the predictive probability // populate frequeny matrix for old data; this is used for inference int posFrom = 0, posTo = 0; for (int i = 0; i < stringchar.size() - 1; i ++) { for (int j = 0; j < sizeMatr; j ++) { if (stringchar[i] == elements[j]) posFrom = j; if (stringchar[i + 1] == elements[j]) posTo = j; } freqMatr(posFrom,posTo)++; } // frequency matrix for new data for (int i = 0; i < newData.size() - 1; i ++) { for (int j = 0; j < sizeMatr; j ++) { if (newData[i] == elements[j]) posFrom = j; if (newData[i + 1] == elements[j]) posTo = j; } newFreqMatr(posFrom,posTo)++; } for (int i = 0; i < sizeMatr; i++) { double rowSum = 0, newRowSum = 0, paramRowSum = 0; for (int j = 0; j < sizeMatr; j++) { rowSum += freqMatr(i, j), newRowSum += newFreqMatr(i, j), paramRowSum += hyperparam(i, j); predictiveDist += lgamma(freqMatr(i, j) + newFreqMatr(i, j) + hyperparam(i, j)) - lgamma(freqMatr(i, j) + hyperparam(i, j)); } predictiveDist += lgamma(rowSum + paramRowSum) - lgamma(rowSum + newRowSum + paramRowSum); } return predictiveDist; } //' @title priorDistribution //' //' @description Function to evaluate the prior probability of a transition //' matrix. It is based on conjugate priors and therefore a Dirichlet //' distribution is used to model the transitions of each state. //' @usage priorDistribution(transMatr, hyperparam = matrix()) //' //' @param transMatr The transition matrix whose probability is the parameter of //' interest. //' @param hyperparam The hyperparam matrix (optional). If not provided, a //' default value of 1 is assumed for each and therefore the resulting //' probability distribution is uniform. //' @return The log of the probabilities for each state is returned in a numeric //' vector. Each number in the vector represents the probability (log) of //' having a probability transition vector as specified in corresponding the //' row of the transition matrix. //' //' @details The states (dimnames) of the transition matrix and the hyperparam //' may be in any order. //' @references Yalamanchi SB, Spedicato GA (2015). Bayesian Inference of First //' Order Markov Chains. R package version 0.2.5 //' //' @author Sai Bhargav Yalamanchi, Giorgio Spedicato //' //' @note This function can be used in conjunction with inferHyperparam. For //' example, if the user has a prior data set and a prior transition matrix, //' he can infer the hyperparameters using inferHyperparam and then compute //' the probability of their prior matrix using the inferred hyperparameters //' with priorDistribution. //' @seealso \code{\link{predictiveDistribution}}, \code{\link{inferHyperparam}} //' //' @examples //' priorDistribution(matrix(c(0.5, 0.5, 0.5, 0.5), //' nrow = 2, //' dimnames = list(c("a", "b"), c("a", "b"))), //' matrix(c(2, 2, 2, 2), //' nrow = 2, //' dimnames = list(c("a", "b"), c("a", "b")))) //' @export // [[Rcpp::export]] NumericVector priorDistribution(NumericMatrix transMatr, NumericMatrix hyperparam = NumericMatrix()) { // begin validity checks for the transition matrix if (transMatr.nrow() != transMatr.ncol()) stop("Transition matrix dimensions are inconsistent"); int sizeMatr = transMatr.nrow(); for (int i = 0; i < sizeMatr; i++) { double rowSum = 0., eps = 1e-10; for (int j = 0; j < sizeMatr; j++) if (transMatr(i, j) < 0. || transMatr(i, j) > 1.) stop("The entries in the transition matrix must each belong to the interval [0, 1]"); else rowSum += transMatr(i, j); if (rowSum <= 1. - eps || rowSum >= 1. + eps) stop("The rows of the transition matrix must each sum to 1"); } List dimNames = transMatr.attr("dimnames"); if (dimNames.size() == 0) stop("Provide dimnames for the transition matrix"); CharacterVector colNames = dimNames[1]; CharacterVector rowNames = dimNames[0]; CharacterVector sortedColNames(sizeMatr), sortedRowNames(sizeMatr); for (int i = 0; i < sizeMatr; i++) sortedColNames(i) = colNames(i), sortedRowNames(i) = rowNames(i); sortedColNames.sort(); sortedRowNames.sort(); for (int i = 0; i < sizeMatr; i++) if (i > 0 && (sortedColNames(i) == sortedColNames(i-1) || sortedRowNames(i) == sortedRowNames(i-1))) stop("The states must all be unique"); else if (sortedColNames(i) != sortedRowNames(i)) stop("The set of row names must be the same as the set of column names"); // if no hyperparam argument provided, use default value of 1 for all if (hyperparam.nrow() == 1 && hyperparam.ncol() == 1) { NumericMatrix temp(sizeMatr, sizeMatr); temp.attr("dimnames") = List::create(sortedColNames, sortedColNames); for (int i = 0; i < sizeMatr; i++) for (int j = 0; j < sizeMatr; j++) temp(i, j) = 1; hyperparam = temp; } // validity check for hyperparam if (hyperparam.nrow() != hyperparam.ncol()) stop("Dimensions of the hyperparameter matrix are inconsistent"); if (hyperparam.nrow() != sizeMatr) stop("Hyperparameter and the transition matrices differ in dimensions"); List _dimNames = hyperparam.attr("dimnames"); if (_dimNames.size() == 0) stop("Provide dimnames for the hyperparameter matrix"); CharacterVector _colNames = _dimNames[1]; CharacterVector _rowNames = _dimNames[0]; int sizeHyperparam = hyperparam.ncol(); CharacterVector _sortedColNames(sizeHyperparam), _sortedRowNames(sizeHyperparam); for (int i = 0; i < sizeHyperparam; i++) _sortedColNames(i) = colNames(i), _sortedRowNames(i) = rowNames(i); _sortedColNames.sort(); _sortedRowNames.sort(); for (int i = 0; i < sizeHyperparam; i++) if (sortedColNames(i) != _sortedColNames(i) || sortedRowNames(i) != _sortedRowNames(i)) stop("Hyperparameter and the transition matrices states differ"); for (int i = 0; i < sizeMatr; i++) for (int j = 0; j < sizeMatr; j++) if (hyperparam(i, j) < 1.) stop("The hyperparameter elements must all be greater than or equal to 1"); transMatr = sortByDimNames(transMatr); hyperparam = sortByDimNames(hyperparam); NumericVector logProbVec; for (int i = 0; i < sizeMatr; i++) { double logProb_i = 0., hyperparamRowSum = 0; for (int j = 0; j < sizeMatr; j++) { hyperparamRowSum += hyperparam(i, j); logProb_i += (hyperparam(i, j) - 1.) * log(transMatr(i, j)) - lgamma(hyperparam(i, j)); } logProb_i += lgamma(hyperparamRowSum); logProbVec.push_back(logProb_i); } logProbVec.attr("names") = sortedColNames; return logProbVec; } // [[Rcpp::export(.hittingProbabilitiesRcpp)]] NumericMatrix hittingProbabilities(S4 object) { NumericMatrix transitionMatrix = object.slot("transitionMatrix"); CharacterVector states = object.slot("states"); bool byrow = object.slot("byrow"); if (!byrow) transitionMatrix = transpose(transitionMatrix); int numStates = transitionMatrix.nrow(); arma::mat transitionProbs = as(transitionMatrix); arma::mat hittingProbs(numStates, numStates); // Compute closed communicating classes List commClasses = commClassesKernel(transitionMatrix); List closedClass = commClasses["closed"]; LogicalMatrix communicating = commClasses["classes"]; for (int j = 0; j < numStates; ++j) { arma::mat coeffs = as(transitionMatrix); arma::vec right_part = -transitionProbs.col(j); for (int i = 0; i < numStates; ++i) { coeffs(i, j) = 0; coeffs(i, i) -= 1; } for (int i = 0; i < numStates; ++i) { if (closedClass(i)) { for (int k = 0; k < numStates; ++k) if (k != i) coeffs(i, k) = 0; else coeffs(i, i) = 1; if (communicating(i, j)) right_part(i) = 1; else right_part(i) = 0; } } hittingProbs.col(j) = arma::solve(coeffs, right_part); } NumericMatrix result = wrap(hittingProbs); colnames(result) = states; rownames(result) = states; if (!byrow) result = transpose(result); return result; } // method to convert into canonic form a markovchain object // [[Rcpp::export(.canonicFormRcpp)]] S4 canonicForm(S4 obj) { NumericMatrix transitions = obj.slot("transitionMatrix"); bool byrow = obj.slot("byrow"); int numRows = transitions.nrow(); int numCols = transitions.ncol(); NumericMatrix resultTransitions(numRows, numCols); CharacterVector states = obj.slot("states"); unordered_map stateToIndex; unordered_set usedIndices; int currentIndex; List recClasses; S4 input("markovchain"); S4 result("markovchain"); vector indexPermutation(numRows); if (!byrow) { input.slot("transitionMatrix") = transpose(transitions); input.slot("states") = states; input.slot("byrow") = true; transitions = transpose(transitions); } else { input = obj; } recClasses = recurrentClasses(input); // Map each state to the index it has for (int i = 0; i < states.size(); ++i) { string state = (string) states[i]; stateToIndex[state] = i; } int toFill = 0; for (CharacterVector recClass : recClasses) { for (auto state : recClass) { currentIndex = stateToIndex[(string) state]; indexPermutation[toFill] = currentIndex; ++toFill; usedIndices.insert(currentIndex); } } for (int i = 0; i < states.size(); ++i) { if (usedIndices.count(i) == 0) { indexPermutation[toFill] = i; ++toFill; } } CharacterVector newStates(numRows); for (int i = 0; i < numRows; ++i) { int r = indexPermutation[i]; newStates(i) = states(r); for (int j = 0; j < numCols; ++j) { int c = indexPermutation[j]; resultTransitions(i, j) = transitions(r, c); } } rownames(resultTransitions) = newStates; colnames(resultTransitions) = newStates; if (!byrow) resultTransitions = transpose(resultTransitions); result.slot("transitionMatrix") = resultTransitions; result.slot("byrow") = byrow; result.slot("states") = newStates; result.slot("name") = input.slot("name"); return result; } // Function to sort a matrix of vectors lexicographically NumericMatrix lexicographicalSort(NumericMatrix m) { int numCols = m.ncol(); int numRows = m.nrow(); if (numRows > 0 && numCols > 0) { vector> x(numRows, vector(numCols)); for (int i = 0; i < numRows; ++i) for (int j = 0; j < numCols; ++j) x[i][j] = m(i,j); sort(x.begin(), x.end()); NumericMatrix result(numRows, numCols); for (int i = 0; i < numRows; ++i) for (int j = 0; j < numCols; ++j) result(i, j) = x[i][j]; colnames(result) = colnames(m); return result; } else { return m; } } // This method computes the *unique* steady state that exists for an // matrix has to be schocastic by rows // ergodic (= irreducible) matrix vec steadyStateErgodicMatrix(const mat& submatrix) { int nRows = submatrix.n_rows; int nCols = submatrix.n_cols; vec rightPart(nRows + 1, fill::zeros); vec result; mat coeffs(nRows + 1, nCols); // If P is Ergodic, the system (I - P)*w = 0 plus the equation // w_1 + ... + w_m = 1 must have a soultion for (int i = 0; i < nRows; ++i) { for (int j = 0; j < nCols; ++j) { // transpose matrix in-place coeffs(i, j) = submatrix(j, i); if (i == j) coeffs(i, i) -= 1; } } for (int j = 0; j < nCols; ++j) coeffs(nRows, j) = 1; rightPart(nRows) = 1; if (!solve(result, coeffs, rightPart)) stop("Failure computing eigen values / vectors for submatrix in steadyStateErgodicMatrix"); return result; } // Precondition: the matrix should be stochastic by rows NumericMatrix steadyStatesByRecurrentClasses(S4 object) { List recClasses = recurrentClasses(object); int numRecClasses = recClasses.size(); NumericMatrix transitionMatrix = object.slot("transitionMatrix"); CharacterVector states = object.slot("states"); int numCols = transitionMatrix.ncol(); NumericMatrix steady(numRecClasses, numCols); unordered_map stateToIndex; int steadyStateIndex = 0; // Map each state to the index it has for (int i = 0; i < states.size(); ++i) { string state = (string) states[i]; stateToIndex[state] = i; } // For each recurrent class, there must be an steady state for (CharacterVector recurrentClass : recClasses) { int recClassSize = recurrentClass.size(); mat subMatrix(recClassSize, recClassSize); // Fill the submatrix corresponding to the current steady class // Note that for that we have to subset the matrix with the indices // the states in the recurrent class ocuppied in the transition matrix for (int i = 0; i < recClassSize; ++i) { int r = stateToIndex[(string) recurrentClass[i]]; for (int j = 0; j < recClassSize; ++j) { int c = stateToIndex[(string) recurrentClass[j]]; subMatrix(i, j) = transitionMatrix(r, c); } } // Compute the steady states for the given submatrix vec steadyState = steadyStateErgodicMatrix(subMatrix); for (int i = 0; i < recClassSize; ++i) { int c = stateToIndex[(string) recurrentClass[i]]; steady(steadyStateIndex, c) = steadyState(i);; } ++steadyStateIndex; } colnames(steady) = states; return steady; } // [[Rcpp::export(.steadyStatesRcpp)]] NumericMatrix steadyStates(S4 obj) { NumericMatrix transitions = obj.slot("transitionMatrix"); CharacterVector states = obj.slot("states"); bool byrow = obj.slot("byrow"); S4 object("markovchain"); if (!byrow) { object.slot("transitionMatrix") = transpose(transitions); object.slot("states") = states; object.slot("byrow") = true; } else { object = obj; } // Compute steady states using recurrent classes (there is // exactly one steady state associated with each recurrent class) NumericMatrix result = lexicographicalSort(steadyStatesByRecurrentClasses(object)); if (!byrow) result = transpose(result); return result; } // This method is agnostic on whether the matrix is stochastic // by rows or by columns, we just need the diagonal // [[Rcpp::export(.absorbingStatesRcpp)]] CharacterVector absorbingStates(S4 obj) { NumericMatrix transitionMatrix = obj.slot("transitionMatrix"); CharacterVector states = obj.slot("states"); CharacterVector absorbing; int numStates = states.size(); for (int i = 0; i < numStates; ++i) if (approxEqual(transitionMatrix(i, i), 1)) absorbing.push_back(states(i)); return absorbing; } // [[Rcpp::export(.isIrreducibleRcpp)]] bool isIrreducible(S4 obj) { List commClasses = communicatingClasses(obj); // The markov chain is irreducible iff has only a single communicating class return commClasses.size() == 1; } // [[Rcpp::export(.isRegularRcpp)]] bool isRegular(S4 obj) { NumericMatrix transitions = obj.slot("transitionMatrix"); int m = transitions.ncol(); mat probs(transitions.begin(), m, m, true); mat reachable; auto arePositive = [](const double& x){ return x > 0; }; // Taken from the book: // Matrix Analysis. Roger A.Horn, Charles R.Johnson. 2nd edition. // Theorem 8.5.9 // A is regular iff A^{m²- 2m + 2} > 0 reachable = matrixPow(probs, m*m - 2*m + 2); return allElements(reachable, arePositive); } NumericMatrix computeMeanAbsorptionTimes(mat& probs, CharacterVector& absorbing, CharacterVector& states) { unordered_set toErase; vector indicesToKeep; CharacterVector newNames; string current; for (auto state : absorbing) toErase.insert((string) state); // Compute the states which are not in absorbing // and subset the sub-probability matrix of those // states which are not considered absorbing, Q for (uint i = 0; i < states.size(); ++i) { current = (string) states(i); if (toErase.count(current) == 0) { indicesToKeep.push_back(i); newNames.push_back(current); } } int n = indicesToKeep.size(); uvec indices(indicesToKeep); // Comppute N = 1 - Q mat coeffs = eye(n, n) - probs(indices, indices); vec rightPart = vec(n, fill::ones); mat meanTimes; // Mean absorbing times A are computed as N * A = 1, // where 1 is a column vector of 1s if (!solve(meanTimes, coeffs, rightPart)) stop("Error solving system in meanAbsorptionTime"); NumericMatrix result = wrap(meanTimes); rownames(result) = newNames; return result; } // [[Rcpp::export(.meanAbsorptionTimeRcpp)]] NumericVector meanAbsorptionTime(S4 obj) { NumericMatrix transitions = obj.slot("transitionMatrix"); CharacterVector states = obj.slot("states"); bool byrow = obj.slot("byrow"); unordered_set allStates; if (!byrow) transitions = transpose(transitions); // Compute recurrent and transient states List commKernel = commClassesKernel(transitions); LogicalVector closed = commKernel["closed"]; CharacterVector transient = computeTransientStates(states, closed); CharacterVector recurrent = computeRecurrentStates(states, closed); // Compute the mean absorption time for the transient states mat probs(transitions.begin(), transitions.nrow(), transitions.ncol(), true); NumericMatrix meanTimes = computeMeanAbsorptionTimes(probs, recurrent, states); NumericVector result; if (meanTimes.ncol() > 0) { result = meanTimes(_, 0); result.attr("names") = transient; } return result; } // [[Rcpp::export(.absorptionProbabilitiesRcpp)]] NumericMatrix absorptionProbabilities(S4 obj) { NumericMatrix transitions = obj.slot("transitionMatrix"); CharacterVector states = obj.slot("states"); string current; bool byrow = obj.slot("byrow"); if (!byrow) transitions = transpose(transitions); unordered_map stateToIndex; // Map each state to the index it has for (int i = 0; i < states.size(); ++i) { current = (string) states[i]; stateToIndex[current] = i; } List commKernel = commClassesKernel(transitions); LogicalVector closed = commKernel["closed"]; CharacterVector transient = computeTransientStates(states, closed); CharacterVector recurrent = computeRecurrentStates(states, closed); vector transientIndxs, recurrentIndxs; // Compute the indexes of the matrix which correspond to transient and recurrent states for (auto state : transient) { current = (string) state; transientIndxs.push_back(stateToIndex[current]); } for (auto state : recurrent) { current = (string) state; recurrentIndxs.push_back(stateToIndex[current]); } int m = transitions.ncol(); int n = transientIndxs.size(); if (n == 0) stop("Markov chain does not have transient states, method not applicable"); // Get the indices in arma::uvec s uvec transientIndices(transientIndxs); uvec recurrentIndices(recurrentIndxs); // Compute N = (1 - Q)^{-1} mat probs(transitions.begin(), m, m, true); mat toInvert = eye(n, n) - probs(transientIndices, transientIndices); mat fundamentalMatrix; if (!inv(fundamentalMatrix, toInvert)) stop("Could not compute fundamental matrix"); // Compute the mean absorption probabilities as F* = N*P[transient, recurrent] mat meanProbs = fundamentalMatrix * probs(transientIndices, recurrentIndices); NumericMatrix result = wrap(meanProbs); rownames(result) = transient; colnames(result) = recurrent; if (!byrow) result = transpose(result); return result; } // [[Rcpp::export(.meanFirstPassageTimeRcpp)]] NumericMatrix meanFirstPassageTime(S4 obj, CharacterVector destination) { bool isErgodic = isIrreducible(obj); if (!isErgodic) stop("Markov chain needs to be ergodic (= irreducile) for this method to work"); else { NumericMatrix transitions = obj.slot("transitionMatrix"); mat probs(transitions.begin(), transitions.nrow(), transitions.ncol(), true); CharacterVector states = obj.slot("states"); bool byrow = obj.slot("byrow"); int numStates = states.size(); NumericMatrix result; if (!byrow) probs = probs.t(); if (destination.size() > 0) { result = computeMeanAbsorptionTimes(probs, destination, states); // This transpose is intentional to return a row always instead of a column result = transpose(result); return result; } else { result = NumericMatrix(numStates, numStates); vec steadyState = steadyStateErgodicMatrix(probs); mat toInvert(numStates, numStates); mat Z; // Compute inverse for (I - P + W), where P = probs, // and W = steadyState pasted row-wise for (int i = 0; i < numStates; ++i) { for (int j = 0; j < numStates; ++j) { toInvert(i, j) = -probs(i, j) + steadyState(j); if (i == j) toInvert(i, i) += 1; } } if (!inv(Z, toInvert)) stop("Problem computing inverse of matrix inside meanFirstPassageTime"); // Set the result matrix for (int j = 0; j < numStates; ++j) { double r_j = 1.0 / steadyState(j); for (int i = 0; i < numStates; ++i) { result(i, j) = (Z(j,j) - Z(i,j)) * r_j; } } colnames(result) = states; rownames(result) = states; if (!byrow) result = transpose(result); return result; } } } // [[Rcpp::export(.meanRecurrenceTimeRcpp)]] NumericVector meanRecurrenceTime(S4 obj) { NumericMatrix steady = steadyStates(obj); bool byrow = obj.slot("byrow"); if (!byrow) steady = transpose(steady); CharacterVector states = obj.slot("states"); NumericVector result; CharacterVector recurrentStates; for (int i = 0; i < steady.nrow(); ++i) { for (int j = 0; j < steady.ncol(); ++j) { // This depends on our imlementation of the steady // states, but we have the guarantee that the entry // corresponding to a state in a recurrent class is // only going to be positive in one vector and the // entries corresponding to transient states are // going to be zero if (!approxEqual(steady(i, j), 0)) { result.push_back(1.0 / steady(i, j)); recurrentStates.push_back(states(j)); } } } result.attr("names") = recurrentStates; return result; } // [[Rcpp::export(.minNumVisitsRcpp)]] NumericMatrix meanNumVisits(S4 obj) { NumericMatrix hitting = hittingProbabilities(obj); CharacterVector states = obj.slot("states"); bool byrow = obj.slot("byrow"); if (!byrow) hitting = transpose(hitting); int n = hitting.ncol(); bool closeToOne; double inverse; NumericMatrix result(n, n); rownames(result) = states; colnames(result) = states; // Lets call the matrix of hitting probabilities as f // Then mean number of visits from i to j are given by // f_{ij} / (1 - f_{jj}) // having care when f_{ij} -> mean num of visits is zero // and when f_{ij} > 0 and f_{jj} = 1 -> infinity mean // num of visits for (int j = 0; j < n; ++j) { closeToOne = approxEqual(hitting(j, j), 1); if (!closeToOne) inverse = 1 / (1 - hitting(j, j)); for (int i = 0; i < n; ++i) { if (hitting(i, j) == 0) result(i, j) = 0; else { if (closeToOne) result(i, j) = R_PosInf; else result(i, j) = hitting(i, j) * inverse; } } } if (!byrow) result = transpose(result); return result; } markovchain/src/Makevars0000644000176200001440000000022214376752612015050 0ustar liggesusersPKG_LIBS = $(LAPACK_LIBS) $(BLAS_LIBS) $(FLIBS) #CXX_STD = CXX11 PKG_LIBS += $(shell ${R_HOME}/bin/Rscript -e "RcppParallel::RcppParallelLibs()") markovchain/src/fittingFunctions.cpp0000644000176200001440000015216513762012756017427 0ustar liggesusers#ifndef STRICT_R_HEADERS #define STRICT_R_HEADERS // [[Rcpp::depends(RcppParallel)]] // [[Rcpp::depends(RcppArmadillo)]] #include #include #include using namespace Rcpp; using namespace RcppArmadillo; using namespace RcppParallel; using namespace std; #include "helpers.h" #include "mapFitFunctions.h" #include #include // [[Rcpp::export(.markovchainSequenceRcpp)]] CharacterVector markovchainSequenceRcpp(int n, S4 markovchain, CharacterVector t0, bool include_t0 = false) { // character vector to store the result CharacterVector chain(n); // transition mastrix NumericMatrix transitionMatrix = markovchain.slot("transitionMatrix"); // possible states CharacterVector states = markovchain.slot("states"); // current state CharacterVector state = t0; NumericVector rowProbs(states.size()); CharacterVector outstate; for (int i = 0;i < n;i++) { // extracting row probabilties for the given state from transition matrix int row_no = 0; for (int j = 0;j < states.size();j++) { /* last element of state character vector because of markovchainListRcpp, a seq of length greater than 1 is also passed whose end state is the beginning state here */ if (states[j] == state[state.size()-1]) { row_no = j; break; } } for (int j = 0; j < states.size(); j++) { rowProbs[j] = transitionMatrix(row_no, j); } // calculate next state outstate = sample(states, 1, false, rowProbs); chain[i] = outstate[0]; state = outstate; } if (include_t0) chain.push_front(t0[0]); return chain; } bool checkSequenceRcpp(List object) { bool out = true; int nob = object.size(); // if there is only one markovchain object return true if (nob == 1) return(true); S4 ob0, ob1; CharacterVector statesN1, statesN, intersection; for (int i = 1; i < nob;i++) { CharacterVector statesNm1; ob0 = S4(object[i-1]); ob1 = S4(object[i]); statesN1 = ob0.slot("states"); statesN = ob1.slot("states"); NumericMatrix matr = ob0.slot("transitionMatrix"); double csum = 0; for (int j = 0;j < matr.ncol();j++) { csum = 0; for (int k = 0;k < matr.nrow();k++) { csum += matr(k, j); } if (csum != 0) statesNm1.push_back(statesN1[j]); } intersection = intersect(statesNm1, statesN); if (not setequal(intersection, statesNm1)) { out = false; break; } } return(out); } // [[Rcpp::export(.markovchainListRcpp)]] List markovchainListRcpp(int n, List object, bool include_t0 = false, CharacterVector t0 = CharacterVector()) { bool verify = checkSequenceRcpp(object); if (not verify) { warning("Warning: some states in the markovchain sequences are not contained in the following states!"); } // size of result vector int sz = n*object.size(); if (include_t0) sz += n; int vin = 0; // useful in filling below vectors NumericVector iteration(sz); CharacterVector values(sz); S4 ob(object[0]); CharacterVector sampledValues, newVals; // Initial State selection if not passed //----------------------------------------------------------------------------- CharacterVector ustates = ob.slot("states"); NumericVector rowProbs; for (int i = 0;i < ustates.size();i++) { rowProbs.push_back(1.0 / ustates.size()); } bool rselect = (t0.size() == 0); if (rselect) { t0 = sample(ustates, 1, false, rowProbs); } //------------------------------------------------------------------------------ // check whether t0 is in unique states or not for (int i = 0;i < ustates.size();i++) { if (ustates[i] == t0[0]) break; else if (i == ustates.size()-1) stop("Error! Initial state not defined"); } // generate n sequence for (int i = 0;i < n;i++) { // random selection of initial state if not passed to the function if (rselect) { t0 = sample(ustates, 1, false, rowProbs); } sampledValues = markovchainSequenceRcpp(1, object[0], t0, include_t0); if (object.size() > 1) { for (int j = 1;j < object.size();j++) { newVals = markovchainSequenceRcpp(1, object[j], sampledValues); sampledValues.push_back(newVals[0]); } } for (int k = 0;k < sampledValues.size();k++) { iteration[vin] = i + 1; values[vin++] = sampledValues[k]; } } return(List::create(iteration, values)); } struct MCList : public Worker { // 3-D matrix where each slice is a transition matrix const arma::cube mat; // number of transition matrices const int num_mat; // matrix where ith row vector store the list of states // names present in ith transition matrix const vector > names; // vector whose ith element store the dimension of ith // transition matrix const vector size_emat; // whether to include first state const bool include_t0; // info about initial state const bool init; // whether initial state is passed to the method const string init_state; // if yes what's the name // each element of list is a sequence list > output; // constructor for initialization MCList(const arma::cube &pmat, const int &pnum_mat, const vector > &pnames, const vector psize_emat, const bool &pinclude_t0, const bool &pinit, const string &pinit_state) : mat(pmat), num_mat(pnum_mat), names(pnames), size_emat(psize_emat), include_t0(pinclude_t0), init(pinit), init_state(pinit_state) {} MCList(const MCList& mclist, Split) : mat(mclist.mat), num_mat(mclist.num_mat), names(mclist.names), size_emat(mclist.size_emat), include_t0(mclist.include_t0), init(mclist.init), init_state(mclist.init_state) {} void operator()(std::size_t begin, std::size_t end) { // to take care of include_t0 unsigned int ci = 0; if (include_t0) ci = 1; // to store single sequence generated each time vector temp(num_mat+ci); // initial probability and states indices arma::vec in_probs(size_emat[0]); arma::vec in_states(size_emat[0]); // assume equal chances of selection of states for the first time for (unsigned int i = 0; i < in_probs.size(); i++) { in_probs[i] = 1.0 / size_emat[0]; in_states[i] = i; } // to store the index of the state selected arma::vec istate; string t0; // every time generate one sequence for (unsigned int p = begin; p < end; p++) { if (not init) { // randomly selected state istate = sample(in_states, 1, false, in_probs); t0 = names[0][istate[0]]; } else { t0 = init_state; } // include the state in the sequence if (include_t0) temp[0] = t0; // to generate one sequence for (unsigned int i = 0; i < (unsigned int)num_mat; i++) { // useful for generating rows probabilty vector unsigned int j = 0; for (j = 0; j < (unsigned int)size_emat[i]; j++) { if (names[i][j] == t0) break; } // vector to be passed to sample method arma::vec probs(size_emat[i]); arma::vec states(size_emat[i]); for (unsigned int k=0; k < probs.size(); k++) { probs[k] = mat(j, k, i); states[k] = k; } // new state selected arma::vec elmt = sample(states, 1, false, probs); t0 = names[i][elmt[0]]; // populate sequence temp[i+ci] = t0; } // insert one sequence to the output output.push_back(temp); } } void join(const MCList& rhs) { // constant iterator to the first element of rhs.output list >::const_iterator it = rhs.output.begin(); // merge the result of two parallel computation for (;it != rhs.output.end();it++) { output.push_back(*it); } } }; // Function to generate a list of sequence of states in parallel from non-homogeneous Markov chains. // // Provided any markovchainList object, it returns a list of sequence of states coming // from the underlying stationary distribution. // // @param listObject markovchainList object // @param n Sample size // @param include_t0 Specify if the initial state shall be used // // @return A List // @author Giorgio Spedicato, Deepak Yadav // // @examples // statesNames <- c("a") // mcA <- new("markovchain", states = statesNames, transitionMatrix = // matrix(c(1), nrow = 1, byrow = TRUE, // dimnames = list(statesNames, statesNames))) // // statesNames <- c("a","b") // mcB <- new("markovchain", states = statesNames, transitionMatrix = // matrix(c(0.5, 0.5, 0.3, 0.7), nrow = 2, byrow = TRUE, // dimnames = list(statesNames, statesNames))) // // statesNames <- c("a","b","c") // mcC <- new("markovchain", states = statesNames, transitionMatrix = // matrix(c(0.2, 0.5, 0.3, 0, 0.2, 0.8, 0.1, 0.8, 0.1), nrow = 3, // byrow = TRUE, dimnames = list(statesNames, statesNames))) // // mclist <- new("markovchainList", markovchains = list(mcA, mcB, mcC)) // // markovchainSequenceParallelRcpp(mclist, 99999, TRUE) // // // [[Rcpp::export(.markovchainSequenceParallelRcpp)]] List markovchainSequenceParallelRcpp(S4 listObject, int n, bool include_t0 = false, CharacterVector init_state = CharacterVector()) { // list of markovchain object List object = listObject.slot("markovchains"); bool verify = checkSequenceRcpp(object); if (not verify) { warning("Warning: some states in the markovchain sequences are not contained in the following states!"); } // store number of transition matrices int num_matrix = object.size(); // maximum of dimension of all transition matrices int max_dim_mat = 0; // to store the dimension of each transition matrices vector size_emat(num_matrix); // to store list of states in a transition matrix CharacterVector states; // calculate max_dim_mat and populate size_emat for (int i = 0; i < num_matrix; i++) { // extract ith markovchain object S4 ob = object[i]; // list of states in ith markovchain object states = ob.slot("states"); // keep track of maximun dimension if (states.size() > max_dim_mat) max_dim_mat = states.size(); // size of ith transition matrix size_emat[i] = states.size(); } // Matrix with ith row store the states in ith t-matrix vector > names(num_matrix, vector(max_dim_mat)); // to store all t-matrix arma::cube mat(max_dim_mat, max_dim_mat, num_matrix); mat.fill(0); for (int i = 0; i < num_matrix;i++) { // ith markovchain object S4 ob = object[i]; // t-matrix and states names NumericMatrix tmat = ob.slot("transitionMatrix"); CharacterVector stat_names = ob.slot("states"); // populate 3-D matrix for (int j = 0;j < tmat.nrow();j++) { for (int k = 0; k < tmat.ncol();k++) { mat(j, k, i) = tmat(j, k); } // populate names of states names[i][j] = stat_names[j]; } } // initial state is passed or not bool init = false; string ini_state; if (init_state.size() != 0) { init = true; ini_state = as(init_state[0]); } // create an object of MCList class MCList mcList(mat, num_matrix, names, size_emat, include_t0, init, ini_state); // start parallel computation parallelReduce(0, n, mcList); // list of sequences return wrap(mcList.output); } // convert a frequency matrix to a transition probability matrix NumericMatrix _toRowProbs(NumericMatrix x, bool sanitize = false) { int nrow = x.nrow(), ncol = x.ncol(); NumericMatrix out(nrow); for (int i = 0; i < nrow; i++) { double rowSum = 0; for (int j = 0; j < ncol; j++) rowSum += x(i, j); for (int j = 0; j < ncol; j++) { // not updating out(i,j) outside sanitize condition as it may lead to runtime error(rowSum=0) if (sanitize == true) { if (rowSum == 0) { out(i, j) = 1.0/ncol; } else { out(i, j) = x(i, j) / rowSum; } } else { if (rowSum == 0) { out(i, j) = 0; } else { out(i, j) = x(i, j) / rowSum; } } } } out.attr("dimnames") = List::create(rownames(x), colnames(x)); return out; } // Create a frequency matrix //' @rdname markovchainFit //' //' @export // [[Rcpp::export]] NumericMatrix createSequenceMatrix(SEXP stringchar, bool toRowProbs = false, bool sanitize = false, CharacterVector possibleStates = CharacterVector()) { //--------------------------------------------------------------------- // check whether stringchar is a list or not if (TYPEOF(stringchar) == VECSXP) { List seqs = as(stringchar); CharacterVector pstates; // possiblestates for (int i = 0;i < seqs.size();i++) { CharacterVector tseq = unique(as(seqs[i])); for (int j = 0;j < tseq.size();j++) { if (tseq[j] != "NA") { pstates.push_back(tseq[j]); } } } for (int i = 0;i < possibleStates.size();i++) { pstates.push_back(possibleStates[i]); } pstates = unique(pstates); pstates = pstates.sort(); int sizeMatr = pstates.size(); NumericMatrix freqMatrix(sizeMatr); freqMatrix.attr("dimnames") = List::create(pstates, pstates); for (int i = 0;i < seqs.size();i++) { NumericMatrix temp = createSequenceMatrix(seqs[i], false, false, pstates); freqMatrix += temp; } if (sanitize == true) { for (int i = 0; i < sizeMatr; i++) { double rowSum = 0; for (int j = 0; j < sizeMatr; j++) rowSum += freqMatrix(i, j); if (rowSum == 0) for (int j = 0; j < sizeMatr; j++) freqMatrix(i, j) = 1; } } if (toRowProbs == true) return _toRowProbs(freqMatrix, sanitize); return freqMatrix; } //--------------------------------------------------------------------- CharacterVector stringChar = as(stringchar); // may include missing values CharacterVector elements_na = unique(union_(stringChar, possibleStates)); // free from missing values CharacterVector elements = clean_nas(elements_na); elements = elements.sort(); int sizeMatr = elements.size(); // output matrix of dimensions equal to total possible states NumericMatrix freqMatrix(sizeMatr); freqMatrix.attr("dimnames") = List::create(elements, elements); CharacterVector rnames = rownames(freqMatrix); if (Rf_isMatrix(stringchar)) { // coerce to CharacterMatrix CharacterMatrix seqMat = as(stringchar); // number of columns must be 2 if (seqMat.ncol() != 2) { stop("Number of columns in the matrix must be 2"); } // populate frequency matrix int posFrom = 0, posTo = 0; for (long int i = 0; i < seqMat.nrow(); i ++) { if (seqMat(i, 0) != "NA" && seqMat(i, 1) != "NA") { for (int j = 0; j < rnames.size(); j ++) { if (seqMat(i, 0) == rnames[j]) posFrom = j; if (seqMat(i, 1) == rnames[j]) posTo = j; } freqMatrix(posFrom, posTo)++; } } } else { int posFrom = 0, posTo = 0; for (long int i = 0; i < stringChar.size() - 1; i ++) { if (stringChar[i] != "NA" && stringChar[i+1] != "NA") { for (int j = 0; j < rnames.size(); j ++) { if (stringChar[i] == rnames[j]) posFrom = j; if (stringChar[i + 1] == rnames[j]) posTo = j; } freqMatrix(posFrom, posTo)++; } } } // sanitizing if any row in the matrix sums to zero by posing the corresponding diagonal equal to 1/dim if (sanitize == true) { for (int i = 0; i < sizeMatr; i++) { double rowSum = 0; for (int j = 0; j < sizeMatr; j++) rowSum += freqMatrix(i, j); if (rowSum == 0) for (int j = 0; j < sizeMatr; j++) freqMatrix(i, j) = 1; } } if (toRowProbs == true) return _toRowProbs(freqMatrix, sanitize); return (freqMatrix); } // log-likelihood double _loglikelihood(CharacterVector seq, NumericMatrix transMatr) { // to store the result double out = 0; // states names CharacterVector rnames = rownames(transMatr); // caculate out int from = 0, to = 0; for (long int i = 0; i < seq.size() - 1; i ++) { if (seq[i] != "NA" && seq[i+1] != "NA") { for (int r = 0; r < rnames.size(); r ++) { if (rnames[r] == seq[i]) from = r; if (rnames[r] == seq[i + 1]) to = r; } out += log(transMatr(from, to)); } } return out; } // [[Rcpp::export(.mcListFitForList)]] List mcListFitForList(List data) { int l = data.size(); // length of list // pair of length and index // length of sequence data[index] vector > length_seq(l); for (int i = 0;i < l;i++) { CharacterVector temp = as(data[i]); length_seq[i] = make_pair(temp.size(), i); } // increasing order of the length of sequence in the list sort(length_seq.begin(), length_seq.end()); int i = 1; // ith transition int j = 0; // start from least length sequence List out; // to store result while(j < l) { int len = length_seq[j].first; if (i < len) { // transition from (i-1)th to ith CharacterMatrix temp(l-j, 2); // indicates wheter there is a valid transition for the current time of the // markov chain bool validTransition = false; for (int k = j;k < l;k++) { temp(k-j, 0) = (as(data[length_seq[k].second]))[i-1]; temp(k-j, 1) = (as(data[length_seq[k].second]))[i]; if (temp(k-j,0) != "NA" && temp(k-j, 1) != "NA") validTransition = true; } // frequency matrix if (validTransition) out.push_back(createSequenceMatrix(temp, false, true)); i++; } else { j++; } } return out; } List generateCI(double confidencelevel, NumericMatrix freqMatr) { int sizeMatr = freqMatr.nrow(); // the true confidence level is 1-(1-alpha)/2 float true_confidence_level = 1-(1-confidencelevel)/2.0; // transition matrix NumericMatrix initialMatr(sizeMatr, sizeMatr); // calculation of transition matrix // take care of rows with all entries 0 for (int i = 0; i < sizeMatr; i++) { double rowSum = 0; for (int j = 0; j < sizeMatr; j++) { rowSum += freqMatr(i, j); } // calculate rows probability for (int j = 0; j < sizeMatr; j++) { if (rowSum == 0) { initialMatr(i, j) = 1.0/sizeMatr; } else { initialMatr(i, j) = freqMatr(i, j)/rowSum; } } } // matrices to store end results NumericMatrix lowerEndpointMatr(sizeMatr, sizeMatr); NumericMatrix upperEndpointMatr(sizeMatr, sizeMatr); NumericMatrix standardError(sizeMatr, sizeMatr); // z score for given confidence interval // double zscore = stats::qnorm_0(confidencelevel, 1.0, 0.0); double zscore = stats::qnorm_0(true_confidence_level, 1.0, 0.0); // populate above defined matrix double marginOfError, lowerEndpoint, upperEndpoint; for (int i = 0; i < sizeMatr; i++) { for (int j = 0; j < sizeMatr; j++) { if (freqMatr(i, j) == 0) { // whether entire ith row is zero or not bool notrans = true; for (int k = 0; k < sizeMatr; k++) { // if the entire ith row is not zero then set notrans to false if (freqMatr(i, k) != 0) { standardError(i, j) = lowerEndpointMatr(i, j) = upperEndpointMatr(i, j) = 0; notrans = false; break; } } // if entire ith row is zero if (notrans) standardError(i, j) = lowerEndpointMatr(i, j) = upperEndpointMatr(i, j) = 1; } else { // standard error calculation standardError(i, j) = initialMatr(i, j) / sqrt(freqMatr(i, j)); // marginal error calculation marginOfError = zscore * standardError(i, j); // lower and upper end point calculation lowerEndpoint = initialMatr(i, j) - marginOfError; upperEndpoint = initialMatr(i, j) + marginOfError; // taking care that upper and lower end point should be between 0(included) and 1(included) lowerEndpointMatr(i, j) = (lowerEndpoint > 1.0) ? 1.0 : ((0.0 > lowerEndpoint) ? 0.0 : lowerEndpoint); upperEndpointMatr(i, j) = (upperEndpoint > 1.0) ? 1.0 : ((0.0 > upperEndpoint) ? 0.0 : upperEndpoint); } } } // set the rows and columns name as states names standardError.attr("dimnames") = upperEndpointMatr.attr("dimnames") = lowerEndpointMatr.attr("dimnames") = freqMatr.attr("dimnames"); return List::create(_["standardError"] = standardError, _["confidenceLevel"] = confidencelevel, _["lowerEndpointMatrix"] = lowerEndpointMatr, _["upperEndpointMatrix"] = upperEndpointMatr); } // Fit DTMC using MLE List _mcFitMle(SEXP data, bool byrow, double confidencelevel, bool sanitize = false, CharacterVector possibleStates = CharacterVector()) { NumericMatrix freqMatr = createSequenceMatrix(data, false, false, possibleStates); // matrix size = nrows = ncols int sizeMatr = freqMatr.nrow(); // initial matrix = transition matrix NumericMatrix initialMatr(sizeMatr); // set names of states as rows name and columns name initialMatr.attr("dimnames") = freqMatr.attr("dimnames"); // take care of rows with all entries 0 for (int i = 0; i < sizeMatr; i++) { double rowSum = 0; for (int j = 0; j < sizeMatr; j++) { rowSum += freqMatr(i, j); } // calculate rows probability for (int j = 0; j < sizeMatr; j++) { if (rowSum == 0) { initialMatr(i, j) = (sanitize ? 1.0/sizeMatr : 0); } else { initialMatr(i, j) = freqMatr(i, j)/rowSum; } } } // transpose the matrix if byrow is false if (byrow == false) { initialMatr = transposeMatrix(initialMatr); } // create markov chain object S4 outMc("markovchain"); outMc.slot("transitionMatrix") = initialMatr; outMc.slot("name") = "MLE Fit"; List CI = generateCI(confidencelevel, freqMatr); // return a list of important results return List::create(_["estimate"] = outMc, _["standardError"] = CI[0], _["confidenceLevel"] = CI[1], _["lowerEndpointMatrix"] = CI[2], _["upperEndpointMatrix"] = CI[3] ); } // Fit DTMC using Laplacian smooth List _mcFitLaplacianSmooth(CharacterVector stringchar, bool byrow, double laplacian = 0.01, bool sanitize = false, CharacterVector possibleStates = CharacterVector()) { // create frequency matrix NumericMatrix origNum = createSequenceMatrix(stringchar, false, sanitize, possibleStates); // store dimension of frequency matrix int nRows = origNum.nrow(), nCols = origNum.ncol(); // convert frequency matrix to transition matrix for (int i = 0; i < nRows; i ++) { double rowSum = 0; // add laplacian correction to each entry // also calculate row's sum for (int j = 0; j < nCols; j ++) { origNum(i,j) += laplacian; rowSum += origNum(i,j); } // get a transition matrix and a DTMC for (int j = 0; j < nCols; j ++) { if (rowSum == 0) origNum(i,j) = sanitize ? origNum(i,j)/rowSum : 0; else origNum(i,j) = origNum(i,j)/rowSum; } } // transpose transition matrix = columnwise storage if (byrow == false) { origNum = transposeMatrix(origNum); } // create markovchain object S4 outMc("markovchain"); outMc.slot("transitionMatrix") = origNum; outMc.slot("name") = "Laplacian Smooth Fit"; return List::create(_["estimate"] = outMc); } // bootstrap a sequence to produce a list of sample sequences List _bootstrapCharacterSequences(CharacterVector stringchar, int n, long int size = -1, CharacterVector possibleStates = CharacterVector()) { // store length of sequence if (size == -1) { size = stringchar.size(); } // frequency matrix NumericMatrix contingencyMatrix = createSequenceMatrix(stringchar, true, true, possibleStates); // many samples from a given a sequence :: bootstrap // res list is helper list List samples, res; // state names CharacterVector itemset = rownames(contingencyMatrix); // number of distinct states int itemsetsize = itemset.size(); // access R sample function Function sample("sample"); for (int i = 0; i < n; i ++) { // charseq to store a fresh sequence CharacterVector charseq, resvec; // select a random number between 0 ans itemsize int rnd = (int)(runif (1)(0) * itemsetsize); // random state String ch = itemset[rnd]; // push start state to charseq charseq.push_back(ch); for (long int j = 1; j < size; j ++) { // store row probability NumericVector probsVector; // populate probsVector for (int k = 0; k < itemsetsize; k ++) { if ((std::string)itemset[k] == (std::string) ch) { probsVector = contingencyMatrix(k, _); break; } } // select next state of sequence res = sample(itemset, 1, true, probsVector); resvec = res[0]; // current state ch = resvec[0]; charseq.push_back(ch); } // every add one sequence samples.push_back(charseq); } // return a list of n sequence of same length as of given sequence return samples; } // estimate from the list of bootstrapped matrices List _fromBoot2Estimate(List listMatr) { // number of transition matrices int sampleSize = listMatr.size(); // first transition matrix NumericMatrix firstMat = listMatr[0]; // dimension of matrix int matrDim = firstMat.nrow(); // matrix to store mean and standard deviation NumericMatrix matrMean(matrDim), matrSd(matrDim); // populate mean and sd matrix for (int i = 0; i < matrDim; i ++) { for (int j = 0; j < matrDim; j ++) { NumericVector probsEstimated; for (int k = 0; k < sampleSize; k ++) { NumericMatrix mat = listMatr[k]; probsEstimated.push_back(mat(i,j)); } matrMean(i, j) = mean(probsEstimated); matrSd(i, j) = Rcpp::sd(probsEstimated); } } // set rows and columns names = states names matrMean.attr("dimnames") = List::create(rownames(firstMat), colnames(firstMat)); matrSd.attr("dimnames") = matrMean.attr("dimnames"); // return list of estimated mean transition matrix and estimated sd matrix return List::create(_["estMu"] = matrMean, _["estSigma"] = matrSd); } struct BootstrapList : public Worker { // transition matrix const RMatrix input; // unique states const vector states; // length of sequence const int len; // list of new sequences list > output; // constructor BootstrapList(const NumericMatrix input, const vector states, const int len) : input(input), states(states), len(len) {} BootstrapList(const BootstrapList& bsList, Split) : input(bsList.input), states(bsList.states), len(bsList.len) {} // generate (end-begin) sequences void operator()(std::size_t begin, std::size_t end) { // number of unique states unsigned int n = states.size(); // initial probability vector arma::vec iprobs(n); // probability vector (can be any row of transition matrix) arma::vec probs(n); // unique states indices arma::vec ustates(n); // initialization for (unsigned int i = 0;i < n;i++) { iprobs[i] = 1.0/n; ustates[i] = i; } // to store new state generated arma::vec istate; // every time generate one sequence for (unsigned int p = begin; p < (unsigned int)end;p++) { // randomly select starting state vector result(len); istate = sample(ustates, 1, false, iprobs); result[0] = states[istate[0]]; // given a present state generate a future state for (unsigned int j = 1; j < (unsigned int)len;j++) { // row vector corresponding to state istate[0] for (unsigned int k = 0;k < (unsigned int)n;k++) { probs[k] = input(istate[0], k); } // select future state istate = sample(ustates, 1, false, probs); result[j] = states[istate[0]]; } // populate a sequence output.push_back(result); } } void join(const BootstrapList& rhs) { // constant iterator to the first element of rhs.output list >::const_iterator it = rhs.output.begin(); // merge the result of two parallel computation for (;it != rhs.output.end();it++) { output.push_back(*it); } } }; List _bootstrapCharacterSequencesParallel(CharacterVector stringchar, int n, long int size = -1, CharacterVector possibleStates = CharacterVector()) { // store length of sequence if (size == -1) { size = stringchar.size(); } // frequency matrix NumericMatrix contingencyMatrix = createSequenceMatrix(stringchar, true, true, possibleStates); // state names vector itemset = as >(rownames(contingencyMatrix)); // number of distinct states // int itemsetsize = itemset.size(); BootstrapList bsList(contingencyMatrix, itemset, size); parallelReduce(0, n, bsList); return wrap(bsList.output); } // Fit DTMC using bootstrap method List _mcFitBootStrap(CharacterVector data, int nboot, bool byrow, bool parallel, double confidencelevel, bool sanitize = false, CharacterVector possibleStates = CharacterVector()) { // list of sequence generated using given sequence List theList = (parallel) ? _bootstrapCharacterSequencesParallel(data, nboot, data.size()) : _bootstrapCharacterSequences(data, nboot, data.size()); // number of new sequence int n = theList.size(); // to store frequency matrix for every sequence List pmsBootStrapped(n); // populate pmsBootStrapped if (parallel) for (int i = 0; i < n; i++) pmsBootStrapped[i] = createSequenceMatrix(theList[i], true, sanitize, possibleStates); else for (int i = 0; i < n; i++) pmsBootStrapped[i] = createSequenceMatrix(theList[i], true, sanitize, possibleStates); List estimateList = _fromBoot2Estimate(pmsBootStrapped); // transition matrix NumericMatrix transMatr = _toRowProbs(estimateList["estMu"], sanitize); // markovchain object S4 estimate("markovchain"); estimate.slot("transitionMatrix") = transMatr; estimate.slot("byrow") = byrow; estimate.slot("name") = "BootStrap Estimate"; // z score for given confidence interval double zscore = stats::qnorm_0(confidencelevel, 1.0, 0.0); // store dimension of matrix int nrows = transMatr.nrow(); int ncols = transMatr.ncol(); // matrix to store end results NumericMatrix lowerEndpointMatr(nrows, ncols), upperEndpointMatr(nrows, ncols); NumericMatrix sigma = estimateList["estSigma"], standardError(nrows, ncols); // populate above defined matrix double marginOfError, lowerEndpoint, upperEndpoint; for (int i = 0; i < nrows; i ++) { for (int j = 0; j < ncols; j ++) { // standard error calculation standardError(i, j) = sigma(i, j) / sqrt(double(n)); // marginal error calculation marginOfError = zscore * standardError(i, j); // lower and upper end point calculation lowerEndpoint = transMatr(i, j) - marginOfError; upperEndpoint = transMatr(i, j) + marginOfError; // taking care that upper and lower end point should be between 0(included) and 1(included) lowerEndpointMatr(i, j) = (lowerEndpoint > 1.0) ? 1.0 : ((0.0 > lowerEndpoint) ? 0.0 : lowerEndpoint); upperEndpointMatr(i, j) = (upperEndpoint > 1.0) ? 1.0 : ((0.0 > upperEndpoint) ? 0.0 : upperEndpoint); } } // set the rows and columns name as states names standardError.attr("dimnames") = upperEndpointMatr.attr("dimnames") = lowerEndpointMatr.attr("dimnames") = transMatr.attr("dimnames"); // return a list of important results List out = List::create(_["estimate"] = estimate, _["standardError"] = standardError, _["confidenceInterval"] = List::create(_["confidenceLevel"] = confidencelevel, _["lowerEndpointMatrix"] = lowerEndpointMatr, _["upperEndpointMatrix"] = upperEndpointMatr), _["bootStrapSamples"] = pmsBootStrapped ); return out; } // convert matrix data to transition probability matrix // [[Rcpp::export(.matr2Mc)]] S4 _matr2Mc(CharacterMatrix matrData, double laplacian = 0, bool sanitize = false, CharacterVector possibleStates = CharacterVector()) { // dimension of input matrix long int nRows = matrData.nrow(), nCols = matrData.ncol(); // set of states std::set uniqueVals; // populate uniqueVals set for (long int i = 0; i < nRows; i++) for (long int j = 0; j < nCols; j++) { if (matrData(i,j) != "NA") uniqueVals.insert((std::string)matrData(i, j)); } for (int i = 0;i < possibleStates.size();i++) { uniqueVals.insert((std::string)possibleStates[i]); } // unique states int usize = uniqueVals.size(); // matrix of dimension usize NumericMatrix contingencyMatrix (usize); // state names as rows name and columns name contingencyMatrix.attr("dimnames") = List::create(uniqueVals, uniqueVals); // iterator for set of states std::set::iterator it; // populate contingency matrix int stateBegin = 0, stateEnd = 0; for (long int i = 0; i < nRows; i ++) { for (long int j = 1; j < nCols; j ++) { if (matrData(i,j-1) != "NA" && matrData(i,j) != "NA") { // row and column number of begin state and end state int k = 0; for (it = uniqueVals.begin(); it != uniqueVals.end(); ++it, k++) { if (*it == (std::string)matrData(i, j-1)) { stateBegin = k; } if (*it == (std::string)matrData(i,j)) { stateEnd = k; } } contingencyMatrix(stateBegin,stateEnd)++; } } } // add laplacian correction if needed for (int i = 0; i < usize; i++) { double rowSum = 0; for (int j = 0; j < usize; j++) { contingencyMatrix(i,j) += laplacian; rowSum += contingencyMatrix(i, j); } // get the transition matrix and a DTMC for (int j = 0; j < usize; j ++) { if (sanitize == true) { if (rowSum == 0) { contingencyMatrix(i,j) = 1.0/usize; } else { contingencyMatrix(i,j) /= rowSum; } } else { if (rowSum == 0) { contingencyMatrix(i,j) = 0; } else { contingencyMatrix(i,j) /= rowSum; } } } } // markovchain object S4 outMc("markovchain"); outMc.slot("transitionMatrix") = contingencyMatrix; return(outMc); } // convert matrix data to transition probability matrix // [[Rcpp::export(.list2Mc)]] S4 _list2Mc(List data, double laplacian = 0, bool sanitize = false) { // set of states std::set uniqueVals; // populate uniqueVals set for (long int i = 0; i < data.size(); i++) { CharacterVector temp = as(data[i]); for (long int j = 0; j < temp.size(); j++) { uniqueVals.insert((std::string)temp[j]); } } // unique states int usize = uniqueVals.size(); // matrix of dimension usize NumericMatrix contingencyMatrix (usize); // state names as rows name and columns name contingencyMatrix.attr("dimnames") = List::create(uniqueVals, uniqueVals); // iterator for set of states std::set::iterator it; // populate contingency matrix int stateBegin = 0, stateEnd = 0; for (long int i = 0; i < data.size(); i ++) { CharacterVector temp = as(data[i]); for (long int j = 1; j < temp.size(); j ++) { // row and column number of begin state and end state int k = 0; for (it = uniqueVals.begin(); it != uniqueVals.end(); ++it, k++) { if (*it == (std::string)temp[j-1]) { stateBegin = k; } if (*it == (std::string)temp[j]) { stateEnd = k; } } contingencyMatrix(stateBegin,stateEnd)++; } } // add laplacian correction if needed for (int i = 0; i < usize; i++) { double rowSum = 0; for (int j = 0; j < usize; j++) { contingencyMatrix(i,j) += laplacian; rowSum += contingencyMatrix(i, j); } // get the transition matrix and a DTMC for (int j = 0; j < usize; j ++) { if (sanitize == true) { if (rowSum == 0) { contingencyMatrix(i,j) = 1.0/usize; } else { contingencyMatrix(i,j) /= rowSum; } } else { if (rowSum == 0) { contingencyMatrix(i,j) = 0; } else { contingencyMatrix(i,j) /= rowSum; } } } } // markovchain object S4 outMc("markovchain"); outMc.slot("transitionMatrix") = contingencyMatrix; return(outMc); } //' @name inferHyperparam //' @title Function to infer the hyperparameters for Bayesian inference from an a priori matrix or a data set //' @description Since the Bayesian inference approach implemented in the package is based on conjugate priors, //' hyperparameters must be provided to model the prior probability distribution of the chain //' parameters. The hyperparameters are inferred from a given a priori matrix under the assumption //' that the matrix provided corresponds to the mean (expected) values of the chain parameters. A //' scaling factor vector must be provided too. Alternatively, the hyperparameters can be inferred //' from a data set. //' //' @param transMatr A valid transition matrix, with dimension names. //' @param scale A vector of scaling factors, each element corresponds to the row names of the provided transition //' matrix transMatr, in the same order. //' @param data A data set from which the hyperparameters are inferred. //' //' @details transMatr and scale need not be provided if data is provided. //' @return Returns the hyperparameter matrix in a list. //' //' @note The hyperparameter matrix returned is such that the row and column names are sorted alphanumerically, //' and the elements in the matrix are correspondingly permuted. //' //' @references Yalamanchi SB, Spedicato GA (2015). Bayesian Inference of First Order Markov Chains. R //' package version 0.2.5 //' //' @author Sai Bhargav Yalamanchi, Giorgio Spedicato //' @seealso \code{\link{markovchainFit}}, \code{\link{predictiveDistribution}} //' //' @examples //' data(rain, package = "markovchain") //' inferHyperparam(data = rain$rain) //' //' weatherStates <- c("sunny", "cloudy", "rain") //' weatherMatrix <- matrix(data = c(0.7, 0.2, 0.1, //' 0.3, 0.4, 0.3, //' 0.2, 0.4, 0.4), //' byrow = TRUE, nrow = 3, //' dimnames = list(weatherStates, weatherStates)) //' inferHyperparam(transMatr = weatherMatrix, scale = c(10, 10, 10)) //' //' @export //' // [[Rcpp::export]] List inferHyperparam(NumericMatrix transMatr = NumericMatrix(), NumericVector scale = NumericVector(), CharacterVector data = CharacterVector()) { // stop if there is only one element in the matrix and size of data sequence is zero if (transMatr.nrow() * transMatr.ncol() == 1 && data.size() == 0) stop("Provide the prior transition matrix or the data set in order to infer the hyperparameters"); // to store final result List out; // Number of elements are greater than 1 if (transMatr.nrow() * transMatr.ncol() != 1) { if (scale.size() == 0) { stop("Provide a non-zero scaling factor vector to infer integer hyperparameters"); } // --------begin validity checks for the transition matrix--------- if (transMatr.nrow() != transMatr.ncol()) { stop("Transition matrix dimensions are inconsistent"); } // number of rows in transition matrix int sizeMatr = transMatr.nrow(); // if any element is greater than 1 or less than 0 then raise error // sum of each rows must lie between 1 - eps and 1 + eps for (int i = 0; i < sizeMatr; i++) { double rowSum = 0., eps = 1e-10; for (int j = 0; j < sizeMatr; j++) { if (transMatr(i, j) < 0. || transMatr(i, j) > 1.) stop("The entries in the transition matrix must each belong to the interval [0, 1]"); else rowSum += transMatr(i, j); } if (rowSum <= 1. - eps || rowSum >= 1. + eps) stop("Each rows of the transition matrix must sum to 1"); } // rows and columns name of transition matrix List dimNames = transMatr.attr("dimnames"); CharacterVector colNames = dimNames[1]; CharacterVector rowNames = dimNames[0]; // sorted rows and columns names CharacterVector sortedColNames(sizeMatr), sortedRowNames(sizeMatr); for (int i = 0; i < sizeMatr; i++) { sortedColNames(i) = colNames(i), sortedRowNames(i) = rowNames(i); } sortedColNames.sort(); sortedRowNames.sort(); // rows names vector and columns name vector must be same // and no names in names vectors should be same for (int i = 0; i < sizeMatr; i++) { if (i > 0 && (sortedColNames(i) == sortedColNames(i-1) || sortedRowNames(i) == sortedRowNames(i-1))) stop("The states must all be unique"); else if (sortedColNames(i) != sortedRowNames(i)) stop("The set of row names must be the same as the set of column names"); } // --------end of validity checks for the transition matrix--------- // --------beginning of validity checks for the scale factor vector--------- // length of scale vector must be equal to number of rows in transition matrix if (scale.size() != sizeMatr) stop("The dimensions of the scale vector must match the number of states in the chain"); // if any value in the scale vector is zero for (int i = 0; i < sizeMatr; i++) { if (scale(i) == 0) stop("The scaling factors must be non-zero!"); } // --------end of validity checks for the scale factor vector--------- // Creation of output matrix i.e. hyper param matrix NumericMatrix hpScaled(sizeMatr); hpScaled.attr("dimnames") = List::create(rowNames, colNames); // populate hyper param matrix for (int i = 0; i < sizeMatr; i++) for (int j = 0; j < sizeMatr; j++) hpScaled(i, j) = scale(i) * transMatr(i, j); /* shift rows and columns so that names of rows and columns names will be in sorted order */ hpScaled = sortByDimNames(hpScaled); // store list of hyper param scaled matrix out = List::create(_["scaledInference"] = hpScaled); } else if (data.size() != 0) { // to store unique states in sorted order CharacterVector elements = data; for (int i = 0; i < data.size(); i++) elements.push_back(data[i]); elements = unique(elements).sort(); // size of hyperparam matrix int sizeMatr = elements.size(); // create hyperparam matrix NumericMatrix hpData(sizeMatr); hpData.attr("dimnames") = List::create(elements, elements); std::fill(hpData.begin(), hpData.end(), 1); // populate hyper param matrix int posFrom = 0, posTo = 0; for (long int i = 0; i < data.size() - 1; i ++) { for (int j = 0; j < sizeMatr; j ++) { if (data[i] == elements[j]) posFrom = j; if (data[i + 1] == elements[j]) posTo = j; } hpData(posFrom,posTo)++; } // ouput data out = List::create(_["dataInference"] = hpData); } return out; } //' @name markovchainFit //' @title Function to fit a discrete Markov chain //' @description Given a sequence of states arising from a stationary state, //' it fits the underlying Markov chain distribution using either MLE (also using a //' Laplacian smoother), bootstrap or by MAP (Bayesian) inference. //' //' @param data It can be a character vector or a {n x n} matrix or a {n x n} data frame or a list //' @param method Method used to estimate the Markov chain. Either "mle", "map", "bootstrap" or "laplace" //' @param byrow it tells whether the output Markov chain should show the transition probabilities by row. //' @param nboot Number of bootstrap replicates in case "bootstrap" is used. //' @param laplacian Laplacian smoothing parameter, default zero. It is only used when "laplace" method //' is chosen. //' @param name Optional character for name slot. //' @param parallel Use parallel processing when performing Boostrap estimates. //' @param confidencelevel \deqn{\alpha} level for conficence intervals width. //' Used only when \code{method} equal to "mle". //' @param confint a boolean to decide whether to compute Confidence Interval or not. //' @param hyperparam Hyperparameter matrix for the a priori distribution. If none is provided, //' default value of 1 is assigned to each parameter. This must be of size //' {k x k} where k is the number of states in the chain and the values //' should typically be non-negative integers. //' @param stringchar It can be a {n x n} matrix or a character vector or a list //' @param toRowProbs converts a sequence matrix into a probability matrix //' @param sanitize put 1 in all rows having rowSum equal to zero //' @param possibleStates Possible states which are not present in the given sequence //' //' @details Disabling confint would lower the computation time on large datasets. If \code{data} or \code{stringchar} //' contain \code{NAs}, the related \code{NA} containing transitions will be ignored. //' //' @return A list containing an estimate, log-likelihood, and, when "bootstrap" method is used, a matrix //' of standards deviations and the bootstrap samples. When the "mle", "bootstrap" or "map" method //' is used, the lower and upper confidence bounds are returned along with the standard error. //' The "map" method also returns the expected value of the parameters with respect to the //' posterior distribution. //' @references A First Course in Probability (8th Edition), Sheldon Ross, Prentice Hall 2010 //' //' Inferring Markov Chains: Bayesian Estimation, Model Comparison, Entropy Rate, //' and Out-of-Class Modeling, Christopher C. Strelioff, James P. Crutchfield, //' Alfred Hubler, Santa Fe Institute //' //' Yalamanchi SB, Spedicato GA (2015). Bayesian Inference of First Order Markov Chains. R //' package version 0.2.5 //' //' @author Giorgio Spedicato, Tae Seung Kang, Sai Bhargav Yalamanchi //' @note This function has been rewritten in Rcpp. Bootstrap algorithm has been defined "heuristically". //' In addition, parallel facility is not complete, involving only a part of the bootstrap process. //' When \code{data} is either a \code{data.frame} or a \code{matrix} object, only MLE fit is //' currently available. //' //' @seealso \code{\link{markovchainSequence}}, \code{\link{markovchainListFit}} //' @examples //' sequence <- c("a", "b", "a", "a", "a", "a", "b", "a", "b", "a", "b", "a", "a", //' "b", "b", "b", "a") //' sequenceMatr <- createSequenceMatrix(sequence, sanitize = FALSE) //' mcFitMLE <- markovchainFit(data = sequence) //' mcFitBSP <- markovchainFit(data = sequence, method = "bootstrap", nboot = 5, name = "Bootstrap Mc") //' //' na.sequence <- c("a", NA, "a", "b") //' # There will be only a (a,b) transition //' na.sequenceMatr <- createSequenceMatrix(na.sequence, sanitize = FALSE) //' mcFitMLE <- markovchainFit(data = na.sequence) //' //' # data can be a list of character vectors //' sequences <- list(x = c("a", "b", "a"), y = c("b", "a", "b", "a", "c")) //' mcFitMap <- markovchainFit(sequences, method = "map") //' mcFitMle <- markovchainFit(sequences, method = "mle") //' @rdname markovchainFit //' //' @export //' // [[Rcpp::export]] List markovchainFit(SEXP data, String method = "mle", bool byrow = true, int nboot = 10, double laplacian = 0, String name = "", bool parallel = false, double confidencelevel = 0.95, bool confint = true, NumericMatrix hyperparam = NumericMatrix(), bool sanitize = false, CharacterVector possibleStates = CharacterVector()) { if (method != "mle" && method != "bootstrap" && method != "map" && method != "laplace") { stop ("method should be one of \"mle\", \"bootsrap\", \"map\" or \"laplace\""); } // list to store the output List out; // if input data is data frame or matrix if (Rf_inherits(data, "data.frame") || Rf_isMatrix(data)) { // store input data in mat CharacterMatrix mat; // if data is a data.frame force it to matrix if (Rf_inherits(data, "data.frame")) { DataFrame df(data); // matrix : no of rows = no of rows in df : same for number of columns mat = CharacterMatrix(df.nrows(), df.size()); for (long int i = 0; i < df.size(); i++) mat(_,i) = CharacterVector(df[i]); } else { mat = data; } // byrow assumes distinct observations (trajectiories) are per row // otherwise transpose if (!byrow) mat = transposeMatrix(mat); S4 outMc = _matr2Mc(mat, laplacian, sanitize, possibleStates); // whether to compute confidence interval or not if (confint) { // convert matrix to list int nrows = mat.nrow(); List manyseq(nrows); for (int i = 0; i < nrows; i++) manyseq[i] = mat(i, _); out = _mcFitMle(manyseq, byrow, confidencelevel, sanitize, possibleStates); out[0] = outMc; } else { out = List::create(_["estimate"] = outMc); } } else if (TYPEOF(data) == VECSXP) { if (method == "mle") { out = _mcFitMle(data, byrow, confidencelevel, sanitize, possibleStates); } else if (method == "map") { out = _mcFitMap(data, byrow, confidencelevel, hyperparam, sanitize, possibleStates); } else stop("method not available for a list"); } else { if (method == "mle") { out = _mcFitMle(data, byrow, confidencelevel, sanitize, possibleStates); } else if (method == "bootstrap") { out = _mcFitBootStrap(data, nboot, byrow, parallel, confidencelevel, sanitize, possibleStates); } else if (method == "laplace") { out = _mcFitLaplacianSmooth(data, byrow, laplacian, sanitize, possibleStates); } else if (method == "map") { out = _mcFitMap(data, byrow, confidencelevel, hyperparam, sanitize, possibleStates); } } // markovchain object S4 estimate = out["estimate"]; if (name != "") { estimate.slot("name") = name; } // transition matrix NumericMatrix transMatr = estimate.slot("transitionMatrix"); // data is neither data frame nor matrix if (!Rf_inherits(data, "data.frame") && !Rf_isMatrix(data) && TYPEOF(data) != VECSXP) out["logLikelihood"] = _loglikelihood(data, transMatr); estimate.slot("states") = rownames(transMatr); out["estimate"] = estimate; return out; } // [[Rcpp::export(.noofVisitsDistRCpp)]] NumericVector noofVisitsDistRCpp(NumericMatrix matrix, int i,int N) { // no of states in the process int noOfStates = matrix.ncol(); arma::vec out = arma::zeros(noOfStates); arma::mat Tmatrix = as(matrix); arma::mat temp = Tmatrix; // initial distribution is in the transition matrix itself for (int j = 0; j < noOfStates; j++) out[j] = Tmatrix(i - 1, j); //The distribution for Nth step is calculates after N multiplication of the transition matrix // and adding everytime the coresponding ratios // finally dividing by N for (int p = 0; p < N - 1; p++) { temp = temp * Tmatrix; for (int j = 0; j < noOfStates; j++) out[j] += temp(i - 1, j); } out = out/N; NumericVector R = wrap(out); return R; } #endif markovchain/src/Makevars.win0000644000176200001440000000033714376752612015653 0ustar liggesusersPKG_LIBS = $(LAPACK_LIBS) $(BLAS_LIBS) $(FLIBS) #CXX_STD = CXX11 PKG_CXXFLAGS += -DRCPP_PARALLEL_USE_TBB=1 PKG_LIBS += $(shell "${R_HOME}/bin${R_ARCH_BIN}/Rscript.exe" \ -e "RcppParallel::RcppParallelLibs()") markovchain/src/utils.cpp0000644000176200001440000002405714257554210015224 0ustar liggesusers// [[Rcpp::depends(RcppArmadillo)]] #include #include #include #include #include #include using namespace Rcpp; using namespace arma; using namespace std; // Defined in probabilistic.cpp bool isAccessible(S4 obj, String from, String to); // Defined in probabilistic.cpp LogicalMatrix reachabilityMatrix(S4 obj); // matrix power function // O(log n * m³) where m is the number of rows / cols of A mat matrixPow(const mat& A, int n) { int m = A.n_rows; mat result = eye(m, m); mat partial = A; // We can decompose n = 2^a + 2^b + 2^c ... with a > b > c >= 0 // Compute last = a + 1 while (n > 0) { if ((n & 1) > 0) result = result * partial; partial = partial * partial; n >>= 1; } return result; } // check if two vectors are intersected bool intersects(CharacterVector x, CharacterVector y) { if (x.size() < y.size()) return intersects(y, x); else { unordered_set values; bool intersect = false; for (auto value : x) values.insert(as(value)); for (auto it = y.begin(); it != y.end() && !intersect; ++it) intersect = values.count(as(*it)) > 0; return intersect; } } bool anyElement(const mat& matrix, bool (*condition)(const double&)) { int numRows = matrix.n_rows; int numCols = matrix.n_cols; bool found = false; for (int i = 0; i < numRows && !found; ++i) for (int j = 0; j < numCols && !found; ++j) found = condition(matrix(i, j)); return found; } bool allElements(const mat& matrix, bool (*condition)(const double&)) { int numRows = matrix.n_rows; int numCols = matrix.n_cols; bool all = true; for (int i = 0; i < numRows && all; ++i) for (int j = 0; j < numCols && all; ++j) all = condition(matrix(i, j)); return all; } bool approxEqual(const double& a, const double& b) { if (a >= b) return (a - b) <= 1E-7; else return approxEqual(b, a); } bool approxEqual(const cx_double& a, const cx_double& b){ double x = a.real() - b.real(); double y = a.imag() - b.imag(); return (x*x - y*y) <= 1E-14; } // check if prob is probability or not // [[Rcpp::export(.isProbability)]] bool isProb(double prob) { return (prob >= 0 && prob <= 1); } // checks if a matrix is stochastic (by rows or by columns), i.e. all // elements are probabilities and the rows (cols, resp.) sum 1 // [[Rcpp::export(.isStochasticMatrix)]] bool isStochasticMatrix(NumericMatrix m, bool byrow) { if (!byrow) m = transpose(m); int nrow = m.nrow(); int ncol = m.ncol(); bool isStochastic = true; double rowSum; for (int i = 0; i < nrow && isStochastic; ++i) { rowSum = 0; for (int j = 0; j < ncol && isStochastic; ++j) { isStochastic = m(i, j) >= 0; rowSum += m(i, j); } isStochastic = approxEqual(rowSum, 1); } return isStochastic; } // [[Rcpp::export(.isProbabilityVector)]] bool isProbVector(NumericVector prob) { bool result = true; double sumProbs = 0; for (int i = 0; i < prob.size() && result; ++i) { result = prob[i] >= 0; sumProbs += prob[i]; } return result && approxEqual(sumProbs, 1); } // [[Rcpp::export(.testthatIsAccesibleRcpp)]] bool checkIsAccesibleMethod(S4 obj) { CharacterVector states = obj.slot("states"); bool byrow = obj.slot("byrow"); LogicalMatrix reachability = reachabilityMatrix(obj); int m = states.size(); bool correct = true; bool reachable; for (int i = 0; i < m && correct; ++i) { for (int j = 0; j < m && correct; ++j) { reachable = (byrow ? reachability(i, j) : reachability(j, i)); correct = isAccessible(obj, states(i), states(j)) == reachable; } } return correct; } // [[Rcpp::export(.approxEqualMatricesRcpp)]] bool approxEqual(NumericMatrix a, NumericMatrix b) { int a_ncol = a.ncol(); int b_ncol = b.ncol(); int a_nrow = a.nrow(); int b_nrow = b.nrow(); if (a_ncol != b_ncol || a_nrow != b_nrow) return false; else { bool equal = true; for (int i = 0; i < a_nrow && equal; ++i) for (int j = 0; j < a_ncol && equal; ++j) equal = approxEqual(a(i, j), b(i, j)); return equal; } } // This method receives the output of communicatingClasses(object) and object@states // and checks that in fact the communicating classes are a partition of states // Is a method agnostic on whether the Markov Chain was given by rows or columns // [[Rcpp::export(.testthatIsPartitionRcpp)]] bool isPartition(List commClasses, CharacterVector states) { int n = states.size(); unordered_set used; unordered_set originalStates; int numClassStates = 0; bool partition = true; for (auto state : states) originalStates.insert((string) state); // Check that the union of the classes is // states and they do not overlap for (int i = 0; i < commClasses.size() && partition; ++i) { CharacterVector currentClass = commClasses(i); numClassStates += currentClass.size(); for (int j = 0; j < currentClass.size() && partition; ++j) { string state = (string) currentClass(j); partition = used.count(state) == 0 && originalStates.count(state) > 0; used.insert(state); } } return partition && numClassStates == n; } // This is simply a method that checks the following recurrence, // naming p = probs, f = hitting, it checks: // // f(i, j) = p(i, j) + ∑_{k ≠ j} p(i, k) f(k, j) // // where p are the transitionMatrix probs and hitting are the // hitting probabilities for the Markov Chain associated to // probs. byrow indicates whether probs is an stochastic matrix // by rows or by columns. // [[Rcpp::export(.testthatAreHittingRcpp)]] bool areHittingProbabilities(NumericMatrix probs, NumericMatrix hitting, bool byrow) { if (!byrow) { probs = transpose(probs); hitting = transpose(hitting); } int numStates = probs.nrow(); bool holds = true; double result; for (int i = 0; i < numStates && holds; ++i) { for (int j = 0; j < numStates && holds; ++j) { result = 0; for (int k = 0; k < numStates; ++k) if (k != j) result -= probs(i, k) * hitting(k, j); result += hitting(i, j) - probs(i, j); holds = approxEqual(result, 0); } } return holds; } // This is simply a method that checks the following recurrence, // naming p = probs, E = mean number of visits, f = hitting // probabilities, it checks: // // E(i, j) = p(i, j) / (1 - f(j, j)) + ∑_{k ≠ j} p(i, k) E(k, j) // // Note this recurrence is similar to the one for hitting probabilities // We have to take care when E(i, j) = 0, because we would have to check // that in either p(i, k) = 0 or E(k, j) = 0. If E(i, j) = ∞ we would have // to check that either p(i, j) > 0 or (p(i, k) != 0 and E(k, j) = ∞) // // where p are the transitionMatrix probs, numVisits are the mean // number of visits for each state, and hitting are the hitting // probabilities for the Markov Chain associated to probs. // byrow indicates whether probs is an stochastic matrix // by rows or by columns. // [[Rcpp::export(.testthatAreMeanNumVisitsRcpp)]] bool areMeanNumVisits(NumericMatrix probs, NumericMatrix numVisits, NumericMatrix hitting, bool byrow) { if (!byrow) { probs = transpose(probs); numVisits = transpose(numVisits); hitting = transpose(hitting); } int numStates = probs.ncol(); bool holds = true; double result; double inverse; for (int j = 0; j < numStates && holds; ++j) { if (!approxEqual(hitting(j, j), 1)) { inverse = 1 / (1 - hitting(j, j)); for (int i = 0; i < numStates && holds; ++i) { result = 0; for (int k = 0; k < numStates; ++k) if (k != j) result -= probs(i, k) * numVisits(k, j); result += numVisits(i, j) - probs(i, j) * inverse; holds = approxEqual(result, 0); } } } return holds; } // [[Rcpp::export(.testthatRecurrentHittingRcpp)]] bool recurrentHitting(List recurrentClasses, NumericMatrix hitting, CharacterVector states, bool byrow) { if (!byrow) hitting = transpose(hitting); unordered_map stateToIndex; bool correct = true; int n = states.size(); for (int i = 0; i < n; ++i) stateToIndex[(string) states(i)] = i; for (CharacterVector recClass : recurrentClasses) { unordered_set classIndexes; for (auto state : recClass) classIndexes.insert(stateToIndex[(string) state]); for (int i : classIndexes) { for (int j = 0; j < n; ++j) { if (classIndexes.count(j) > 0) correct = correct && approxEqual(hitting(i, j), 1); else correct = correct && approxEqual(hitting(i, j), 0); } } } return correct; } // [[Rcpp::export(.testthatHittingAreOneRcpp)]] bool hittingProbsAreOne(NumericMatrix matrix) { bool allOne = true; int nrow = matrix.nrow(); int ncol = matrix.ncol(); for (int i = 0; i < nrow && allOne; ++i) for (int j = 0; j < ncol && allOne; ++j) allOne = approxEqual(matrix(i, j), 1); return allOne; } // [[Rcpp::export(.testthatAbsorbingAreRecurrentClassRcpp)]] bool absorbingAreRecurrentClass(CharacterVector absorbingStates, List recurrentClasses) { unordered_set singletonRecurrent; unordered_set absorbing; string current; bool diffEmpty = true; for (CharacterVector recClass : recurrentClasses) if (recClass.size() == 1) singletonRecurrent.insert((string) (*recClass.begin())); for (auto state : absorbingStates) absorbing.insert((string) state); for (int i = 0; i < absorbingStates.size() && diffEmpty; ++i) { current = (string) absorbingStates(i); diffEmpty = singletonRecurrent.count(current) > 0; } for (auto it = singletonRecurrent.begin(); it != singletonRecurrent.end() && diffEmpty; ++it) { current = (string) (*it); diffEmpty = absorbing.count(current) > 0; } return diffEmpty; } markovchain/src/ctmcClassesAndMethods.cpp0000644000176200001440000000304613762012756020276 0ustar liggesusers#include using namespace Rcpp; //' @name generatorToTransitionMatrix //' @title Function to obtain the transition matrix from the generator //' @description The transition matrix of the embedded DTMC is inferred from the CTMC's generator //' //' @usage generatorToTransitionMatrix(gen, byrow = TRUE) //' //' @param gen The generator matrix //' @param byrow Flag to determine if rows (columns) sum to 0 //' @return Returns the transition matrix. //' //' @references //' Introduction to Stochastic Processes with Applications in the Biosciences (2013), David F. //' Anderson, University of Wisconsin at Madison //' //' @author Sai Bhargav Yalamanchi //' @seealso \code{\link{rctmc}},\code{\link{ctmc-class}} //' @examples //' energyStates <- c("sigma", "sigma_star") //' byRow <- TRUE //' gen <- matrix(data = c(-3, 3, 1, -1), nrow = 2, //' byrow = byRow, dimnames = list(energyStates, energyStates)) //' generatorToTransitionMatrix(gen) //' //' @export // [[Rcpp::export]] NumericMatrix generatorToTransitionMatrix(NumericMatrix gen, bool byrow = true){ NumericMatrix transMatr(gen.nrow()); transMatr.attr("dimnames") = gen.attr("dimnames"); if (byrow) { for (int i = 0; i < gen.nrow(); i++){ for (int j = 0; j < gen.ncol(); j++){ if (i != j) transMatr(i, j) = -gen(i, j) / gen(i, i); } } } else { for (int j = 0; j < gen.ncol(); j++){ for (int i = 0; i < gen.nrow(); i++){ if (i != j) transMatr(i, j) = -gen(i, j) / gen(j, j); } } } return transMatr; } markovchain/src/helpers.h0000644000176200001440000001676113762012756015202 0ustar liggesusers#ifndef HELPERS_H #define HELPERS_H // [[Rcpp::depends(RcppArmadillo)]] #include #include #include using namespace Rcpp; using namespace std; template T sortByDimNames(const T mat){ CharacterVector colNames = colnames(mat); CharacterVector rowNames = rownames(mat); int n = colNames.size(); CharacterVector sortedNames(n); copy(rowNames.begin(), rowNames.end(), sortedNames.begin()); sortedNames.sort(); NumericVector colIdx(n); NumericVector rowIdx(n); for (int i = 0; i < n; i++) { for (int j = 0; j < n; j++) { if (colNames(j) == sortedNames(i)) colIdx(i) = j; if (rowNames(j) == sortedNames(i)) rowIdx(i) = j; } } T sortedMatrix(n); sortedMatrix.attr("dimnames") = List::create(sortedNames, sortedNames); for (int i = 0; i < n; i++) for (int j = 0; j < n; j++) sortedMatrix(i, j) = mat(rowIdx(i), colIdx(j)); return sortedMatrix; } double lbeta(double p, double q){ return lgamma(p) + lgamma(q) - lgamma(p + q); } template T transposeMatrix(T & mat) { int numRows = mat.nrow(); int numCols = mat.ncol(); T transpose(numCols, numRows); // Assign dim names transposed (rows to cols and viceversa) transpose.attr("dimnames") = List::create(colnames(mat), rownames(mat)); for (int i = 0; i < numCols; ++i) { transpose(i, _) = mat(_, i); } return transpose; } /* Purpose: BETAIN computes the incomplete Beta function ratio. Licensing: This code is distributed under the GNU LGPL license. Modified: 25 September 2014 Author: Original FORTRAN77 version by KL Majumder, GP Bhattacharjee. C++ version by John Burkardt. Reference: KL Majumder, GP Bhattacharjee, Algorithm AS 63: The incomplete Beta Integral, Applied Statistics, Volume 22, Number 3, 1973, pages 409-411. Parameters: Input, double X, the argument, between 0 and 1. Input, double P, Q, the parameters, which must be positive. Input, double BETA, the logarithm of the complete beta function. Output, int &IFAULT, error flag. 0, no error. nonzero, an error occurred. Output, double BETAIN, the value of the incomplete Beta function ratio. */ double betain(double x, double p, double q, double beta) { double acu = 0.1E-14; double ai; double cx; bool indx; int ns; double pp; double psq; double qq; double rx; double temp; double term; double value; double xx; value = x; // Special cases. if (x == 0.0 || x == 1.0) { return value; } // Change tail if necessary and determine S. psq = p + q; cx = 1.0 - x; if (p < psq * x){ xx = cx; cx = x; pp = q; qq = p; indx = true; } else { xx = x; pp = p; qq = q; indx = false; } term = 1.0; ai = 1.0; value = 1.0; ns = (int) (qq + cx * psq); // Use the Soper reduction formula. rx = xx / cx; temp = qq - ai; if (ns == 0) { rx = xx; } bool loop = true; while (loop) { term = term * temp * rx / (pp + ai); value = value + term;; temp = fabs(term); if (temp <= acu && temp <= acu * value) { value = value * exp (pp * log(xx) + (qq - 1.0) * log(cx) - beta) / pp; if (indx) value = 1.0 - value; loop = false; } ai = ai + 1.0; ns = ns - 1; if (0 <= ns) { temp = qq - ai; if (ns == 0) rx = xx; } else { temp = psq; psq = psq + 1.0; } } return value; } /* Purpose: XINBTA computes inverse of the incomplete Beta function. Discussion: The accuracy exponent SAE was loosened from -37 to -30, because the code would not otherwise accept the results of an iteration with p = 0.3, q = 3.0, alpha = 0.2. Licensing: This code is distributed under the GNU LGPL license. Modified: 25 September 2014 Author: Original FORTRAN77 version by GW Cran, KJ Martin, GE Thomas. C++ version by John Burkardt. Reference: GW Cran, KJ Martin, GE Thomas, Remark AS R19 and Algorithm AS 109: A Remark on Algorithms AS 63: The Incomplete Beta Integral and AS 64: Inverse of the Incomplete Beta Integeral, Applied Statistics, Volume 26, Number 1, 1977, pages 111-114. Parameters: Input, double P, Q, the parameters of the incomplete Beta function. Input, double BETA, the logarithm of the value of the complete Beta function. Input, double ALPHA, the value of the incomplete Beta function. 0 <= ALPHA <= 1. Output, int &IFAULT, error flag. 0, no error occurred. nonzero, an error occurred. Output, double XINBTA, the argument of the incomplete Beta function which produces the value ALPHA. Local Parameters: Local, double SAE, requests an accuracy of about 10^SAE. */ double xinbta ( double p, double q, double beta, double alpha ) { double a; double acu; double adj; double fpu; double g; double h; int iex; bool indx; double pp; double prev; double qq; double r; double s; double sae = -30.0; double sq; double t; double tx; double value; double w; double xin; double y; double yprev; fpu = pow(10.0, sae); value = alpha; // If the answer is easy to determine, return immediately. if (alpha == 0.0 || alpha == 1) return alpha; // Change tail if necessary. if (0.5 < alpha) { a = 1.0 - alpha; pp = q; qq = p; indx = true; } else { a = alpha; pp = p; qq = q; indx = false; } // Calculate the initial approximation. r = sqrt(- log (a * a)); y = r - (2.30753 + 0.27061 * r) / (1.0 + ( 0.99229 + 0.04481 * r) * r); if (1.0 < pp && 1.0 < qq) { r = (y * y - 3.0) / 6.0; s = 1.0 / (pp + pp - 1.0); t = 1.0 / (qq + qq - 1.0); h = 2.0 / (s + t); w = y * sqrt(h + r) / h - (t - s) * (r + 5.0 / 6.0 - 2.0 / (3.0 * h)); value = pp / (pp + qq * exp(w + w)); } else { r = qq + qq; t = 1.0 / (9.0 * qq); t = r * pow(1.0 - t + y * sqrt ( t ), 3); if (t <= 0.0) { value = 1.0 - exp( (log((1.0 - a) * qq) + beta) / qq ); } else { t = (4.0 * pp + r - 2.0) / t; if (t <= 1.0) value = exp( (log(a * pp) + beta) / pp ); else value = 1.0 - 2.0 / (t + 1.0); } } // Solve for X by a modified Newton-Raphson method, // using the function BETAIN. r = 1.0 - pp; t = 1.0 - qq; yprev = 0.0; sq = 1.0; prev = 1.0; if (value < 0.0001) value = 0.0001; if (0.9999 < value) value = 0.9999; iex = max(- 5.0 / pp / pp - 1.0 / pow (a, 0.2) - 13.0, sae); acu = pow(10.0, iex); // Iteration loop. for ( ; ; ) { y = betain(value, pp, qq, beta); xin = value; y = (y - a) * exp(beta + r * log (xin) + t * log (1.0 - xin)); if (y * yprev <= 0.0) prev = max(sq, fpu); g = 1.0; for ( ; ; ) { for ( ; ; ) { adj = g * y; sq = adj * adj; if (sq < prev) { tx = value - adj; if (0.0 <= tx && tx <= 1.0) break; } g = g / 3.0; } // Check whether the current estimate is acceptable. // The change "VALUE = TX" was suggested by Ivan Ukhov. if (prev <= acu || y * y <= acu) { value = tx; if (indx) value = 1.0 - value; return value; } if (tx != 0.0 && tx != 1.0) break; g = g / 3.0; } if (tx == value) break; value = tx; yprev = y; } if(indx) value = 1.0 - value; return value; } #endif markovchain/src/mapFitFunctions.h0000644000176200001440000001732713762012756016650 0ustar liggesusers/* * Function to remove NA values from a vector */ CharacterVector clean_nas(CharacterVector elements_na){ CharacterVector elements; for(int i = 0; i < elements_na.size();i++) if(elements_na[i] != "NA") elements.push_back(elements_na[i]); return elements; } List _mcFitMap(SEXP data, bool byrow, double confidencelevel, NumericMatrix hyperparam = NumericMatrix(), bool sanitize = false, CharacterVector possibleStates = CharacterVector()) { if(TYPEOF(data) != VECSXP) { data = List::create(as(data)); } List seqs = as(data); CharacterVector elements; for(int i = 0;i < (int)seqs.size();i++) { CharacterVector tseq = unique(as(seqs[i])); for(int j = 0;j < (int)tseq.size();j++) { if(tseq[j] != "NA") { elements.push_back(tseq[j]); } } } elements = unique(union_(elements, possibleStates)).sort(); // number of unique states int sizeMatr = elements.size(); // if no hyperparam argument provided, use default value of 1 for all if(hyperparam.nrow() == 1 && hyperparam.ncol() == 1) { // matrix with all entries 1 NumericMatrix temp(sizeMatr, sizeMatr); temp.attr("dimnames") = List::create(elements, elements); for(int i = 0; i < sizeMatr; i++) for(int j = 0; j < sizeMatr; j++) temp(i, j) = 1; hyperparam = temp; } //-----------beginning of validity checking of hyperparam matrix---------------------- // validity check for hyperparam matrix if(hyperparam.nrow() != hyperparam.ncol()) { stop("Dimensions of the hyperparameter matrix are inconsistent"); } if(hyperparam.nrow() < sizeMatr) { stop("Hyperparameters for all state transitions must be provided"); } // extract rows and columns name out of hyperparam matrix List dimNames = hyperparam.attr("dimnames"); CharacterVector colNames = dimNames[1]; CharacterVector rowNames = dimNames[0]; // size of hyperparam matrix int sizeHyperparam = hyperparam.ncol(); // sorted order of hyperparam rows and columns name CharacterVector sortedColNames(sizeHyperparam), sortedRowNames(sizeHyperparam); for(int i = 0; i < sizeHyperparam; i++) { sortedColNames(i) = colNames(i), sortedRowNames(i) = rowNames(i); } sortedColNames.sort(); sortedRowNames.sort(); // validity of hyperparam matrix for(int i = 0; i < sizeHyperparam; i++){ // columns names must be different // rows names must be different if(i > 0 && (sortedColNames(i) == sortedColNames(i-1) || sortedRowNames(i) == sortedRowNames(i-1))) { stop("The states must all be unique"); } // same states should be present in rows and columns else if(sortedColNames(i) != sortedRowNames(i)) { stop("The set of row names must be the same as the set of column names"); } // chech whether any state in column names exists which is not in the given sequence bool found = false; for(int j = 0; j < sizeMatr; j++) { if(elements(j) == sortedColNames(i)) { found = true; break; } } // hyperparam may contain states not in stringchar if(!found) { elements.push_back(sortedColNames(i)); } } // check for the case where hyperparam has missing data for(int i = 0; i < sizeMatr; i++){ bool found = false; for(int j = 0; j < sizeHyperparam; j++) { if(sortedColNames(j) == elements(i)) { found = true; break; } } if(!found) stop("Hyperparameters for all state transitions must be provided"); } elements = elements.sort(); sizeMatr = elements.size(); for(int i = 0; i < sizeMatr; i++) for(int j = 0; j < sizeMatr; j++) if(hyperparam(i, j) < 1.) stop("The hyperparameter elements must all be greater than or equal to 1"); //-----------end of validity checking of hyperparam matrix---------------------- // permute the elements of hyperparam such that the row, column names are sorted hyperparam = sortByDimNames(hyperparam); // helper matrices which will help in the calculation of transition matrix // other matrices will be returned as a result NumericMatrix mapEstMatr(sizeMatr), expMatr(sizeMatr); NumericMatrix freqMatr(sizeMatr); mapEstMatr.attr("dimnames") = List::create(elements, elements); expMatr.attr("dimnames") = List::create(elements, elements); // matrices to be returned NumericMatrix lowerEndpointMatr = NumericMatrix(mapEstMatr.nrow(), mapEstMatr.ncol()); NumericMatrix upperEndpointMatr = NumericMatrix(mapEstMatr.nrow(), mapEstMatr.ncol()); NumericMatrix stdError = NumericMatrix(mapEstMatr.nrow(), mapEstMatr.ncol()); // populate frequeny matrix for old data; this is used for inference for(int k = 0;k < seqs.size();k++) { CharacterVector stringchar = as(seqs[k]); int posFrom = 0, posTo = 0; for(long int i = 0; i < stringchar.size() - 1; i ++) { if(stringchar[i] != "NA" && stringchar[i+1] != "NA"){ for (int j = 0; j < sizeMatr; j ++) { if(stringchar[i] == elements[j]) posFrom = j; if(stringchar[i + 1] == elements[j]) posTo = j; } freqMatr(posFrom,posTo)++; } } } // sanitize and to row probs for (int i = 0; i < sizeMatr; i++) { // rowsum of frequency matrix and hyperparam matrix double rowSum = 0, paramRowSum = 0; for (int j = 0; j < sizeMatr; j++) { rowSum += freqMatr(i, j), paramRowSum += hyperparam(i, j); } // toRowProbs for (int j = 0; j < sizeMatr; j++) { // confidence intervals and bounds double p = freqMatr(i, j) + hyperparam(i, j), q = rowSum + paramRowSum - freqMatr(i, j) - hyperparam(i, j); // expected value of the transition parameters expMatr(i, j) = p / (p + q); if(p + q == sizeMatr) { mapEstMatr(i, j) = (sanitize ? 1.0 / sizeMatr : 0); } else { // maximum a posteriori estimate mapEstMatr(i, j) = (p - 1) / (p + q - sizeMatr); } // populate lowerEndPoint, upperEndPoint and stand error matrices double beta = lbeta(p, q); double cdf = betain(double(mapEstMatr(i, j)), p, q, beta); if(cdf + confidencelevel / 2 > 1.) { upperEndpointMatr(i, j) = 1.; lowerEndpointMatr(i, j) = xinbta(p, q, beta, 1 - confidencelevel); } else if(cdf - confidencelevel / 2 < 0.) { lowerEndpointMatr(i, j) = 0.; upperEndpointMatr(i, j) = xinbta(p, q, beta, confidencelevel); } else { lowerEndpointMatr(i, j) = xinbta(p, q, beta, cdf - confidencelevel / 2); upperEndpointMatr(i, j) = xinbta(p, q, beta, cdf + confidencelevel / 2); } stdError(i, j) = sqrt(p * q / (p + q) / (p + q) / (1 + p + q)); } } // transpose the matrix if columwise result is required if(byrow == false) { mapEstMatr = transposeMatrix(mapEstMatr); } // markovchain object S4 outMc("markovchain"); outMc.slot("transitionMatrix") = mapEstMatr; outMc.slot("name") = "Bayesian Fit"; // message("\n\'estimate\' is the MAP set of parameters where as \'expectedValue\' \nis the expectation // of the parameters with respect to the posterior.\nThe confidence intervals are given for \'estimate\'."); return List::create(_["estimate"] = outMc, _["expectedValue"] = expMatr, _["standardError"] = stdError, _["confidenceInterval"] = List::create(_["confidenceLevel"] = confidencelevel, _["lowerEndpointMatrix"] = lowerEndpointMatr, _["upperEndpointMatrix"] = upperEndpointMatr)); } markovchain/src/multinomCI.cpp0000644000176200001440000001135213762012756016142 0ustar liggesusers// [[Rcpp::depends(RcppArmadillo)]] #include using namespace Rcpp; // returns the column sums of a matrix NumericVector colSums(NumericMatrix m) { NumericVector out; for (int i = 0; i < m.cols(); i ++) out.push_back(sum(m.column(i))); return out; } // poisson distribution double ppois(double n, double lambda) { return R::ppois(n,lambda, true, false); } // moments NumericVector moments(int c, double lambda) { double a = lambda+c; double b = lambda-c; double den = 0, poisA = 0, poisB = 0; if (b < 0) b = 0; if (b > 0) den = ppois(a, lambda) - ppois(b - 1, lambda); if (b == 0) den = ppois(a, lambda); NumericVector mu(4); NumericVector mom(5); for (int r = 1; r <= 4; r ++) { poisA = 0; poisB = 0; if ((a - r) >= 0) poisA = ppois(a, lambda) - ppois(a - r, lambda); if ((a - r) < 0) poisA = ppois(a, lambda); if ((b - r - 1) >= 0) poisB = ppois(b - 1, lambda) - ppois(b - r - 1, lambda); if ((b - r - 1) < 0 && (b - 1) >= 0) poisB = ppois(b - 1, lambda); if ((b - r - 1) < 0 && (b - 1) < 0) poisB = 0; mu[r - 1] = (pow(lambda, r)) * (1 - (poisA - poisB)/den); } mom[0] = mu[0]; mom[1] = mu[1] + mu[0] - pow(mu[0], 2); mom[2] = mu[2] + mu[1] * (3 - 3*mu[0]) + (mu[0] - 3*pow(mu[0], 2) + 2*pow(mu[0], 3)); mom[3] = mu[3] + mu[2] * (6 - 4*mu[0]) + mu[1] * (7 - 12*mu[0] + 6*pow(mu[0], 2)) + mu[0] - 4 * pow(mu[0], 2) + 6 * pow(mu[0], 3) - 3 * pow(mu[0], 4); mom[4] = den; return mom; } // coverage probability for the particular choice of c double truncpoi(int c, NumericVector x, double n, int k) { NumericMatrix m(k,5); for (int i = 0; i < k; i++) { double lambda = x[i]; NumericVector mom = moments(c, lambda); for (int j = 0; j < 5; j ++) m(i, j) = mom[j]; } for (int i = 0; i < k; i ++) m(i, 3) = m(i, 3) - 3 * m(i, 1) * m(i, 1); NumericVector s = colSums(m); double s1 = s[0]; double s2 = s[1]; double s3 = s[2]; double s4 = s[3]; double probn = 1 / (ppois(n, n) - ppois(n - 1, n)); double z = (n - s1) / sqrt(s2); double g1 = s3 / (pow(s2, (3.0 / 2.0))); double g2 = s4 / (pow(s2, 2)); double poly = 1.0 + g1 * (pow(z, 3) - 3 * z) / 6.0 + g2 * (pow(z, 4) - 6.0 * pow(z, 2) + 3.0) / 24.0 + pow(g1, 2) * (pow(z, 6) - 15.0 * pow(z, 4) + 45.0*pow(z, 2) - 15.0)/72.0; double f = poly * exp(-pow(z, 2)/2) / (sqrt(2.0) * R::gammafn(0.5)); double probx=1; for (int i = 0; i < k; i++) probx = probx * m(i, 4); return(probn * probx * f / sqrt(s2)); } // multinomial confidence intervals for a row // [[Rcpp::export(.multinomialCIForRowRcpp)]] NumericMatrix multinomialCIForRow(NumericVector x, double confidencelevel) { double n = std::accumulate(x.begin(), x.end(), 0.0); int k = x.size(); double c = 0; double p = 0, pold = 0; for (int cc = 1; cc <= n; cc ++) { p = truncpoi(cc, x, n, k); if (p > confidencelevel && pold < confidencelevel) { c = cc; break; } pold = p; } NumericMatrix salida(k, 2); double delta = (confidencelevel - pold) / (p - pold); NumericMatrix out(k,5); NumericMatrix num(k,1); c--; double obsp = 0; for (int i = 0; i < k; i++) { num(i, 0) = i; obsp = x[i] / n; out(i, 0) = obsp; out(i, 1) = obsp - c/n; out(i, 2) = obsp + c/n + 2*delta/n; if (out(i, 1) < 0) out(i, 1) = 0; if (out(i, 2) > 1) out(i, 2) = 1; out(i, 3) = obsp - c/n - 1/n; out(i, 4) = obsp + c/n + 1/n; salida(i, 0) = out(i, 1); salida(i, 1) = out(i, 2); } return salida; } // multinomial confidence intervals // [[Rcpp::export(.multinomialCIRcpp)]] List multinomCI(NumericMatrix transMat, NumericMatrix seqMat, double confidencelevel) { NumericMatrix res; NumericVector v; int nrows = transMat.nrow(); int ncols = transMat.ncol(); NumericMatrix lowerEndpointMatr(nrows, ncols); NumericMatrix upperEndpointMatr(nrows, ncols); double lowerEndpoint, upperEndpoint; for (int i = 0; i < nrows; i ++) { v = seqMat.row(i); res = multinomialCIForRow(v, confidencelevel); for (int j = 0; j < res.rows(); j++) { lowerEndpoint = res(j, 0); lowerEndpointMatr(i,j) = lowerEndpoint; upperEndpoint = res(j, 1); upperEndpointMatr(i,j) = upperEndpoint; } } upperEndpointMatr.attr("dimnames") = lowerEndpointMatr.attr("dimnames") = seqMat.attr("dimnames"); List out = List::create(_["confidenceLevel"] = confidencelevel, _["lowerEndpointMatrix"] = lowerEndpointMatr, _["upperEndpointMatrix"] = upperEndpointMatr); return out; } markovchain/src/ctmcProbabilistic.cpp0000644000176200001440000000704113762012756017517 0ustar liggesusers#include // [[Rcpp::depends(RcppArmadillo)]] #include #include using namespace Rcpp; using namespace RcppArmadillo; using namespace arma; using namespace std; // [[Rcpp::export(.ExpectedTimeRCpp)]] NumericVector ExpectedTimeRcpp(NumericMatrix x, NumericVector y) { NumericVector out; int size = x.nrow(); arma::mat T = arma::zeros(size, size); for (int i = 0; i < size; i++) for (int j = 0; j < size; j++) T(i, j) = x(i, j); arma::vec c = arma::zeros(size); for (int i = 0; i < size; i++) c[i] = y[i]; out = wrap(solve(T,c)); return out; } // [[Rcpp::export(.probabilityatTRCpp)]] NumericMatrix probabilityatTRCpp(NumericMatrix y) { int size = y.nrow(); NumericMatrix out(size,size); arma::mat T = arma::zeros(size, size); for (int i = 0; i < size; i++) for (int j = 0; j < size; j++) T(i, j) = y(i, j); T = expmat(T); for (int i = 0; i < size; i++) for (int j = 0; j < size; j++) out(i, j) = T(i, j); return out; } // [[Rcpp::export(.impreciseProbabilityatTRCpp)]] NumericVector impreciseProbabilityatTRCpp(S4 C, int i,int t, int s, double error) { CharacterVector states = C.slot("states"); int noOfstates = states.size(); NumericMatrix non_Q = C.slot("Q"); NumericMatrix non_range = C.slot("range"); arma::mat Q = arma::zeros(noOfstates, noOfstates); arma::mat range = arma::zeros(noOfstates, 2); // initialises armadillo matrices for (int p = 0; p < noOfstates; p++) for (int q = 0; q < noOfstates; q++) Q(p, q) = non_Q(p, q); // initialises armadillo matrices for (int p = 0; p < noOfstates; p++) for (int q = 0; q < 2; q++) range(p, q) = non_range(p, q); // initialses value of norm of Q double QNorm = -1.0; // calculates norm of Q for (int p =0; p < noOfstates; p++) { float sum = 0.0; for (int q = 0; q < noOfstates; q++) { if (Q(p, q) >= 0) sum = sum + Q(p, q); else sum = sum + -Q(p, q); } if (sum * range(p, 1) > QNorm) QNorm = sum * range(p, 1); } // calculates no. of iterations according to error rate, QNorm and other parameters int n; if ((s - t) * QNorm > (s - t) * (s - t) * QNorm * QNorm * 1/ (2 * error)) n = (int)(s - t) * QNorm; else n = (int)(s - t) * (s-t) * QNorm * QNorm * 1/(2 * error); // sets delta value float delta = (s - t) * 1.0/n; // declares and initialises initial f arma::vec Ii(noOfstates); for (int p = 0; p < noOfstates; p++) Ii[p] = 0; Ii[i-1] = 1; // calculation of Qgx vector arma::vec values = Q * Ii; arma::vec Qgx(noOfstates); for (int p = 0; p < noOfstates; p++) Qgx[p] = 0; for (int p = 0; p < noOfstates; p++) { if (values[p] * range(p, 0) < values[p] * range(p, 1)) Qgx[p] = values[p] * range(p,0); else Qgx[p] = values[p] * range(p,1); } Qgx = delta * Qgx; // runs n-1 iterations according to the algorithm // Qgx_i = Qgx_{i-1} + delta*Q*Qgx_{i-1} Qgx = Qgx + Ii; for (int iter = 0; iter < n - 1; iter++) { arma::vec temp = Qgx; values = Q * Qgx; for (int p = 0; p < noOfstates; p++) { // calculating keeping in mind the lower opertaotr values if (values[p] * range(p,0) < values[p] * range(p,1)) Qgx[p] = values[p] * range(p,0); else Qgx[p] = values[p] * range(p,1); } Qgx = delta * Qgx; Qgx = temp + Qgx; } NumericVector out; for (int p = 0; p < noOfstates; p++) out.push_back(Qgx[p]); return out; } markovchain/src/RcppExports.cpp0000644000176200001440000010676314425420606016360 0ustar liggesusers// Generated by using Rcpp::compileAttributes() -> do not edit by hand // Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393 #include #include using namespace Rcpp; #ifdef RCPP_USE_GLOBAL_ROSTREAM Rcpp::Rostream& Rcpp::Rcout = Rcpp::Rcpp_cout_get(); Rcpp::Rostream& Rcpp::Rcerr = Rcpp::Rcpp_cerr_get(); #endif // isGen bool isGen(NumericMatrix gen); RcppExport SEXP _markovchain_isGen(SEXP genSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< NumericMatrix >::type gen(genSEXP); rcpp_result_gen = Rcpp::wrap(isGen(gen)); return rcpp_result_gen; END_RCPP } // generatorToTransitionMatrix NumericMatrix generatorToTransitionMatrix(NumericMatrix gen, bool byrow); RcppExport SEXP _markovchain_generatorToTransitionMatrix(SEXP genSEXP, SEXP byrowSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< NumericMatrix >::type gen(genSEXP); Rcpp::traits::input_parameter< bool >::type byrow(byrowSEXP); rcpp_result_gen = Rcpp::wrap(generatorToTransitionMatrix(gen, byrow)); return rcpp_result_gen; END_RCPP } // ctmcFit List ctmcFit(List data, bool byrow, String name, double confidencelevel); RcppExport SEXP _markovchain_ctmcFit(SEXP dataSEXP, SEXP byrowSEXP, SEXP nameSEXP, SEXP confidencelevelSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< List >::type data(dataSEXP); Rcpp::traits::input_parameter< bool >::type byrow(byrowSEXP); Rcpp::traits::input_parameter< String >::type name(nameSEXP); Rcpp::traits::input_parameter< double >::type confidencelevel(confidencelevelSEXP); rcpp_result_gen = Rcpp::wrap(ctmcFit(data, byrow, name, confidencelevel)); return rcpp_result_gen; END_RCPP } // ExpectedTimeRcpp NumericVector ExpectedTimeRcpp(NumericMatrix x, NumericVector y); RcppExport SEXP _markovchain_ExpectedTimeRcpp(SEXP xSEXP, SEXP ySEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< NumericMatrix >::type x(xSEXP); Rcpp::traits::input_parameter< NumericVector >::type y(ySEXP); rcpp_result_gen = Rcpp::wrap(ExpectedTimeRcpp(x, y)); return rcpp_result_gen; END_RCPP } // probabilityatTRCpp NumericMatrix probabilityatTRCpp(NumericMatrix y); RcppExport SEXP _markovchain_probabilityatTRCpp(SEXP ySEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< NumericMatrix >::type y(ySEXP); rcpp_result_gen = Rcpp::wrap(probabilityatTRCpp(y)); return rcpp_result_gen; END_RCPP } // impreciseProbabilityatTRCpp NumericVector impreciseProbabilityatTRCpp(S4 C, int i, int t, int s, double error); RcppExport SEXP _markovchain_impreciseProbabilityatTRCpp(SEXP CSEXP, SEXP iSEXP, SEXP tSEXP, SEXP sSEXP, SEXP errorSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< S4 >::type C(CSEXP); Rcpp::traits::input_parameter< int >::type i(iSEXP); Rcpp::traits::input_parameter< int >::type t(tSEXP); Rcpp::traits::input_parameter< int >::type s(sSEXP); Rcpp::traits::input_parameter< double >::type error(errorSEXP); rcpp_result_gen = Rcpp::wrap(impreciseProbabilityatTRCpp(C, i, t, s, error)); return rcpp_result_gen; END_RCPP } // seq2freqProb NumericVector seq2freqProb(CharacterVector sequence); RcppExport SEXP _markovchain_seq2freqProb(SEXP sequenceSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< CharacterVector >::type sequence(sequenceSEXP); rcpp_result_gen = Rcpp::wrap(seq2freqProb(sequence)); return rcpp_result_gen; END_RCPP } // seq2matHigh NumericMatrix seq2matHigh(CharacterVector sequence, int order); RcppExport SEXP _markovchain_seq2matHigh(SEXP sequenceSEXP, SEXP orderSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< CharacterVector >::type sequence(sequenceSEXP); Rcpp::traits::input_parameter< int >::type order(orderSEXP); rcpp_result_gen = Rcpp::wrap(seq2matHigh(sequence, order)); return rcpp_result_gen; END_RCPP } // markovchainSequenceRcpp CharacterVector markovchainSequenceRcpp(int n, S4 markovchain, CharacterVector t0, bool include_t0); RcppExport SEXP _markovchain_markovchainSequenceRcpp(SEXP nSEXP, SEXP markovchainSEXP, SEXP t0SEXP, SEXP include_t0SEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< int >::type n(nSEXP); Rcpp::traits::input_parameter< S4 >::type markovchain(markovchainSEXP); Rcpp::traits::input_parameter< CharacterVector >::type t0(t0SEXP); Rcpp::traits::input_parameter< bool >::type include_t0(include_t0SEXP); rcpp_result_gen = Rcpp::wrap(markovchainSequenceRcpp(n, markovchain, t0, include_t0)); return rcpp_result_gen; END_RCPP } // markovchainListRcpp List markovchainListRcpp(int n, List object, bool include_t0, CharacterVector t0); RcppExport SEXP _markovchain_markovchainListRcpp(SEXP nSEXP, SEXP objectSEXP, SEXP include_t0SEXP, SEXP t0SEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< int >::type n(nSEXP); Rcpp::traits::input_parameter< List >::type object(objectSEXP); Rcpp::traits::input_parameter< bool >::type include_t0(include_t0SEXP); Rcpp::traits::input_parameter< CharacterVector >::type t0(t0SEXP); rcpp_result_gen = Rcpp::wrap(markovchainListRcpp(n, object, include_t0, t0)); return rcpp_result_gen; END_RCPP } // markovchainSequenceParallelRcpp List markovchainSequenceParallelRcpp(S4 listObject, int n, bool include_t0, CharacterVector init_state); RcppExport SEXP _markovchain_markovchainSequenceParallelRcpp(SEXP listObjectSEXP, SEXP nSEXP, SEXP include_t0SEXP, SEXP init_stateSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< S4 >::type listObject(listObjectSEXP); Rcpp::traits::input_parameter< int >::type n(nSEXP); Rcpp::traits::input_parameter< bool >::type include_t0(include_t0SEXP); Rcpp::traits::input_parameter< CharacterVector >::type init_state(init_stateSEXP); rcpp_result_gen = Rcpp::wrap(markovchainSequenceParallelRcpp(listObject, n, include_t0, init_state)); return rcpp_result_gen; END_RCPP } // createSequenceMatrix NumericMatrix createSequenceMatrix(SEXP stringchar, bool toRowProbs, bool sanitize, CharacterVector possibleStates); RcppExport SEXP _markovchain_createSequenceMatrix(SEXP stringcharSEXP, SEXP toRowProbsSEXP, SEXP sanitizeSEXP, SEXP possibleStatesSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< SEXP >::type stringchar(stringcharSEXP); Rcpp::traits::input_parameter< bool >::type toRowProbs(toRowProbsSEXP); Rcpp::traits::input_parameter< bool >::type sanitize(sanitizeSEXP); Rcpp::traits::input_parameter< CharacterVector >::type possibleStates(possibleStatesSEXP); rcpp_result_gen = Rcpp::wrap(createSequenceMatrix(stringchar, toRowProbs, sanitize, possibleStates)); return rcpp_result_gen; END_RCPP } // mcListFitForList List mcListFitForList(List data); RcppExport SEXP _markovchain_mcListFitForList(SEXP dataSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< List >::type data(dataSEXP); rcpp_result_gen = Rcpp::wrap(mcListFitForList(data)); return rcpp_result_gen; END_RCPP } // _matr2Mc S4 _matr2Mc(CharacterMatrix matrData, double laplacian, bool sanitize, CharacterVector possibleStates); RcppExport SEXP _markovchain__matr2Mc(SEXP matrDataSEXP, SEXP laplacianSEXP, SEXP sanitizeSEXP, SEXP possibleStatesSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< CharacterMatrix >::type matrData(matrDataSEXP); Rcpp::traits::input_parameter< double >::type laplacian(laplacianSEXP); Rcpp::traits::input_parameter< bool >::type sanitize(sanitizeSEXP); Rcpp::traits::input_parameter< CharacterVector >::type possibleStates(possibleStatesSEXP); rcpp_result_gen = Rcpp::wrap(_matr2Mc(matrData, laplacian, sanitize, possibleStates)); return rcpp_result_gen; END_RCPP } // _list2Mc S4 _list2Mc(List data, double laplacian, bool sanitize); RcppExport SEXP _markovchain__list2Mc(SEXP dataSEXP, SEXP laplacianSEXP, SEXP sanitizeSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< List >::type data(dataSEXP); Rcpp::traits::input_parameter< double >::type laplacian(laplacianSEXP); Rcpp::traits::input_parameter< bool >::type sanitize(sanitizeSEXP); rcpp_result_gen = Rcpp::wrap(_list2Mc(data, laplacian, sanitize)); return rcpp_result_gen; END_RCPP } // inferHyperparam List inferHyperparam(NumericMatrix transMatr, NumericVector scale, CharacterVector data); RcppExport SEXP _markovchain_inferHyperparam(SEXP transMatrSEXP, SEXP scaleSEXP, SEXP dataSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< NumericMatrix >::type transMatr(transMatrSEXP); Rcpp::traits::input_parameter< NumericVector >::type scale(scaleSEXP); Rcpp::traits::input_parameter< CharacterVector >::type data(dataSEXP); rcpp_result_gen = Rcpp::wrap(inferHyperparam(transMatr, scale, data)); return rcpp_result_gen; END_RCPP } // markovchainFit List markovchainFit(SEXP data, String method, bool byrow, int nboot, double laplacian, String name, bool parallel, double confidencelevel, bool confint, NumericMatrix hyperparam, bool sanitize, CharacterVector possibleStates); RcppExport SEXP _markovchain_markovchainFit(SEXP dataSEXP, SEXP methodSEXP, SEXP byrowSEXP, SEXP nbootSEXP, SEXP laplacianSEXP, SEXP nameSEXP, SEXP parallelSEXP, SEXP confidencelevelSEXP, SEXP confintSEXP, SEXP hyperparamSEXP, SEXP sanitizeSEXP, SEXP possibleStatesSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< SEXP >::type data(dataSEXP); Rcpp::traits::input_parameter< String >::type method(methodSEXP); Rcpp::traits::input_parameter< bool >::type byrow(byrowSEXP); Rcpp::traits::input_parameter< int >::type nboot(nbootSEXP); Rcpp::traits::input_parameter< double >::type laplacian(laplacianSEXP); Rcpp::traits::input_parameter< String >::type name(nameSEXP); Rcpp::traits::input_parameter< bool >::type parallel(parallelSEXP); Rcpp::traits::input_parameter< double >::type confidencelevel(confidencelevelSEXP); Rcpp::traits::input_parameter< bool >::type confint(confintSEXP); Rcpp::traits::input_parameter< NumericMatrix >::type hyperparam(hyperparamSEXP); Rcpp::traits::input_parameter< bool >::type sanitize(sanitizeSEXP); Rcpp::traits::input_parameter< CharacterVector >::type possibleStates(possibleStatesSEXP); rcpp_result_gen = Rcpp::wrap(markovchainFit(data, method, byrow, nboot, laplacian, name, parallel, confidencelevel, confint, hyperparam, sanitize, possibleStates)); return rcpp_result_gen; END_RCPP } // noofVisitsDistRCpp NumericVector noofVisitsDistRCpp(NumericMatrix matrix, int i, int N); RcppExport SEXP _markovchain_noofVisitsDistRCpp(SEXP matrixSEXP, SEXP iSEXP, SEXP NSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< NumericMatrix >::type matrix(matrixSEXP); Rcpp::traits::input_parameter< int >::type i(iSEXP); Rcpp::traits::input_parameter< int >::type N(NSEXP); rcpp_result_gen = Rcpp::wrap(noofVisitsDistRCpp(matrix, i, N)); return rcpp_result_gen; END_RCPP } // multinomialCIForRow NumericMatrix multinomialCIForRow(NumericVector x, double confidencelevel); RcppExport SEXP _markovchain_multinomialCIForRow(SEXP xSEXP, SEXP confidencelevelSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< NumericVector >::type x(xSEXP); Rcpp::traits::input_parameter< double >::type confidencelevel(confidencelevelSEXP); rcpp_result_gen = Rcpp::wrap(multinomialCIForRow(x, confidencelevel)); return rcpp_result_gen; END_RCPP } // multinomCI List multinomCI(NumericMatrix transMat, NumericMatrix seqMat, double confidencelevel); RcppExport SEXP _markovchain_multinomCI(SEXP transMatSEXP, SEXP seqMatSEXP, SEXP confidencelevelSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< NumericMatrix >::type transMat(transMatSEXP); Rcpp::traits::input_parameter< NumericMatrix >::type seqMat(seqMatSEXP); Rcpp::traits::input_parameter< double >::type confidencelevel(confidencelevelSEXP); rcpp_result_gen = Rcpp::wrap(multinomCI(transMat, seqMat, confidencelevel)); return rcpp_result_gen; END_RCPP } // commClassesKernel List commClassesKernel(NumericMatrix P); RcppExport SEXP _markovchain_commClassesKernel(SEXP PSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< NumericMatrix >::type P(PSEXP); rcpp_result_gen = Rcpp::wrap(commClassesKernel(P)); return rcpp_result_gen; END_RCPP } // communicatingClasses List communicatingClasses(S4 object); RcppExport SEXP _markovchain_communicatingClasses(SEXP objectSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< S4 >::type object(objectSEXP); rcpp_result_gen = Rcpp::wrap(communicatingClasses(object)); return rcpp_result_gen; END_RCPP } // transientStates CharacterVector transientStates(S4 object); RcppExport SEXP _markovchain_transientStates(SEXP objectSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< S4 >::type object(objectSEXP); rcpp_result_gen = Rcpp::wrap(transientStates(object)); return rcpp_result_gen; END_RCPP } // recurrentStates CharacterVector recurrentStates(S4 object); RcppExport SEXP _markovchain_recurrentStates(SEXP objectSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< S4 >::type object(objectSEXP); rcpp_result_gen = Rcpp::wrap(recurrentStates(object)); return rcpp_result_gen; END_RCPP } // recurrentClasses List recurrentClasses(S4 object); RcppExport SEXP _markovchain_recurrentClasses(SEXP objectSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< S4 >::type object(objectSEXP); rcpp_result_gen = Rcpp::wrap(recurrentClasses(object)); return rcpp_result_gen; END_RCPP } // transientClasses List transientClasses(S4 object); RcppExport SEXP _markovchain_transientClasses(SEXP objectSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< S4 >::type object(objectSEXP); rcpp_result_gen = Rcpp::wrap(transientClasses(object)); return rcpp_result_gen; END_RCPP } // reachabilityMatrix LogicalMatrix reachabilityMatrix(S4 obj); RcppExport SEXP _markovchain_reachabilityMatrix(SEXP objSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< S4 >::type obj(objSEXP); rcpp_result_gen = Rcpp::wrap(reachabilityMatrix(obj)); return rcpp_result_gen; END_RCPP } // isAccessible bool isAccessible(S4 obj, String from, String to); RcppExport SEXP _markovchain_isAccessible(SEXP objSEXP, SEXP fromSEXP, SEXP toSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< S4 >::type obj(objSEXP); Rcpp::traits::input_parameter< String >::type from(fromSEXP); Rcpp::traits::input_parameter< String >::type to(toSEXP); rcpp_result_gen = Rcpp::wrap(isAccessible(obj, from, to)); return rcpp_result_gen; END_RCPP } // summaryKernel List summaryKernel(S4 object); RcppExport SEXP _markovchain_summaryKernel(SEXP objectSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< S4 >::type object(objectSEXP); rcpp_result_gen = Rcpp::wrap(summaryKernel(object)); return rcpp_result_gen; END_RCPP } // firstpassageKernel NumericMatrix firstpassageKernel(NumericMatrix P, int i, int n); RcppExport SEXP _markovchain_firstpassageKernel(SEXP PSEXP, SEXP iSEXP, SEXP nSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< NumericMatrix >::type P(PSEXP); Rcpp::traits::input_parameter< int >::type i(iSEXP); Rcpp::traits::input_parameter< int >::type n(nSEXP); rcpp_result_gen = Rcpp::wrap(firstpassageKernel(P, i, n)); return rcpp_result_gen; END_RCPP } // firstPassageMultipleRCpp NumericVector firstPassageMultipleRCpp(NumericMatrix P, int i, NumericVector setno, int n); RcppExport SEXP _markovchain_firstPassageMultipleRCpp(SEXP PSEXP, SEXP iSEXP, SEXP setnoSEXP, SEXP nSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< NumericMatrix >::type P(PSEXP); Rcpp::traits::input_parameter< int >::type i(iSEXP); Rcpp::traits::input_parameter< NumericVector >::type setno(setnoSEXP); Rcpp::traits::input_parameter< int >::type n(nSEXP); rcpp_result_gen = Rcpp::wrap(firstPassageMultipleRCpp(P, i, setno, n)); return rcpp_result_gen; END_RCPP } // expectedRewardsRCpp NumericVector expectedRewardsRCpp(NumericMatrix matrix, int n, NumericVector rewards); RcppExport SEXP _markovchain_expectedRewardsRCpp(SEXP matrixSEXP, SEXP nSEXP, SEXP rewardsSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< NumericMatrix >::type matrix(matrixSEXP); Rcpp::traits::input_parameter< int >::type n(nSEXP); Rcpp::traits::input_parameter< NumericVector >::type rewards(rewardsSEXP); rcpp_result_gen = Rcpp::wrap(expectedRewardsRCpp(matrix, n, rewards)); return rcpp_result_gen; END_RCPP } // expectedRewardsBeforeHittingARCpp double expectedRewardsBeforeHittingARCpp(NumericMatrix matrix, int s0, NumericVector rewards, int n); RcppExport SEXP _markovchain_expectedRewardsBeforeHittingARCpp(SEXP matrixSEXP, SEXP s0SEXP, SEXP rewardsSEXP, SEXP nSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< NumericMatrix >::type matrix(matrixSEXP); Rcpp::traits::input_parameter< int >::type s0(s0SEXP); Rcpp::traits::input_parameter< NumericVector >::type rewards(rewardsSEXP); Rcpp::traits::input_parameter< int >::type n(nSEXP); rcpp_result_gen = Rcpp::wrap(expectedRewardsBeforeHittingARCpp(matrix, s0, rewards, n)); return rcpp_result_gen; END_RCPP } // gcd int gcd(int a, int b); RcppExport SEXP _markovchain_gcd(SEXP aSEXP, SEXP bSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< int >::type a(aSEXP); Rcpp::traits::input_parameter< int >::type b(bSEXP); rcpp_result_gen = Rcpp::wrap(gcd(a, b)); return rcpp_result_gen; END_RCPP } // period int period(S4 object); RcppExport SEXP _markovchain_period(SEXP objectSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< S4 >::type object(objectSEXP); rcpp_result_gen = Rcpp::wrap(period(object)); return rcpp_result_gen; END_RCPP } // predictiveDistribution double predictiveDistribution(CharacterVector stringchar, CharacterVector newData, NumericMatrix hyperparam); RcppExport SEXP _markovchain_predictiveDistribution(SEXP stringcharSEXP, SEXP newDataSEXP, SEXP hyperparamSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< CharacterVector >::type stringchar(stringcharSEXP); Rcpp::traits::input_parameter< CharacterVector >::type newData(newDataSEXP); Rcpp::traits::input_parameter< NumericMatrix >::type hyperparam(hyperparamSEXP); rcpp_result_gen = Rcpp::wrap(predictiveDistribution(stringchar, newData, hyperparam)); return rcpp_result_gen; END_RCPP } // priorDistribution NumericVector priorDistribution(NumericMatrix transMatr, NumericMatrix hyperparam); RcppExport SEXP _markovchain_priorDistribution(SEXP transMatrSEXP, SEXP hyperparamSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< NumericMatrix >::type transMatr(transMatrSEXP); Rcpp::traits::input_parameter< NumericMatrix >::type hyperparam(hyperparamSEXP); rcpp_result_gen = Rcpp::wrap(priorDistribution(transMatr, hyperparam)); return rcpp_result_gen; END_RCPP } // hittingProbabilities NumericMatrix hittingProbabilities(S4 object); RcppExport SEXP _markovchain_hittingProbabilities(SEXP objectSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< S4 >::type object(objectSEXP); rcpp_result_gen = Rcpp::wrap(hittingProbabilities(object)); return rcpp_result_gen; END_RCPP } // canonicForm S4 canonicForm(S4 obj); RcppExport SEXP _markovchain_canonicForm(SEXP objSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< S4 >::type obj(objSEXP); rcpp_result_gen = Rcpp::wrap(canonicForm(obj)); return rcpp_result_gen; END_RCPP } // steadyStates NumericMatrix steadyStates(S4 obj); RcppExport SEXP _markovchain_steadyStates(SEXP objSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< S4 >::type obj(objSEXP); rcpp_result_gen = Rcpp::wrap(steadyStates(obj)); return rcpp_result_gen; END_RCPP } // absorbingStates CharacterVector absorbingStates(S4 obj); RcppExport SEXP _markovchain_absorbingStates(SEXP objSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< S4 >::type obj(objSEXP); rcpp_result_gen = Rcpp::wrap(absorbingStates(obj)); return rcpp_result_gen; END_RCPP } // isIrreducible bool isIrreducible(S4 obj); RcppExport SEXP _markovchain_isIrreducible(SEXP objSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< S4 >::type obj(objSEXP); rcpp_result_gen = Rcpp::wrap(isIrreducible(obj)); return rcpp_result_gen; END_RCPP } // isRegular bool isRegular(S4 obj); RcppExport SEXP _markovchain_isRegular(SEXP objSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< S4 >::type obj(objSEXP); rcpp_result_gen = Rcpp::wrap(isRegular(obj)); return rcpp_result_gen; END_RCPP } // meanAbsorptionTime NumericVector meanAbsorptionTime(S4 obj); RcppExport SEXP _markovchain_meanAbsorptionTime(SEXP objSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< S4 >::type obj(objSEXP); rcpp_result_gen = Rcpp::wrap(meanAbsorptionTime(obj)); return rcpp_result_gen; END_RCPP } // absorptionProbabilities NumericMatrix absorptionProbabilities(S4 obj); RcppExport SEXP _markovchain_absorptionProbabilities(SEXP objSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< S4 >::type obj(objSEXP); rcpp_result_gen = Rcpp::wrap(absorptionProbabilities(obj)); return rcpp_result_gen; END_RCPP } // meanFirstPassageTime NumericMatrix meanFirstPassageTime(S4 obj, CharacterVector destination); RcppExport SEXP _markovchain_meanFirstPassageTime(SEXP objSEXP, SEXP destinationSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< S4 >::type obj(objSEXP); Rcpp::traits::input_parameter< CharacterVector >::type destination(destinationSEXP); rcpp_result_gen = Rcpp::wrap(meanFirstPassageTime(obj, destination)); return rcpp_result_gen; END_RCPP } // meanRecurrenceTime NumericVector meanRecurrenceTime(S4 obj); RcppExport SEXP _markovchain_meanRecurrenceTime(SEXP objSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< S4 >::type obj(objSEXP); rcpp_result_gen = Rcpp::wrap(meanRecurrenceTime(obj)); return rcpp_result_gen; END_RCPP } // meanNumVisits NumericMatrix meanNumVisits(S4 obj); RcppExport SEXP _markovchain_meanNumVisits(SEXP objSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< S4 >::type obj(objSEXP); rcpp_result_gen = Rcpp::wrap(meanNumVisits(obj)); return rcpp_result_gen; END_RCPP } // isProb bool isProb(double prob); RcppExport SEXP _markovchain_isProb(SEXP probSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< double >::type prob(probSEXP); rcpp_result_gen = Rcpp::wrap(isProb(prob)); return rcpp_result_gen; END_RCPP } // isStochasticMatrix bool isStochasticMatrix(NumericMatrix m, bool byrow); RcppExport SEXP _markovchain_isStochasticMatrix(SEXP mSEXP, SEXP byrowSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< NumericMatrix >::type m(mSEXP); Rcpp::traits::input_parameter< bool >::type byrow(byrowSEXP); rcpp_result_gen = Rcpp::wrap(isStochasticMatrix(m, byrow)); return rcpp_result_gen; END_RCPP } // isProbVector bool isProbVector(NumericVector prob); RcppExport SEXP _markovchain_isProbVector(SEXP probSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< NumericVector >::type prob(probSEXP); rcpp_result_gen = Rcpp::wrap(isProbVector(prob)); return rcpp_result_gen; END_RCPP } // checkIsAccesibleMethod bool checkIsAccesibleMethod(S4 obj); RcppExport SEXP _markovchain_checkIsAccesibleMethod(SEXP objSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< S4 >::type obj(objSEXP); rcpp_result_gen = Rcpp::wrap(checkIsAccesibleMethod(obj)); return rcpp_result_gen; END_RCPP } // approxEqual bool approxEqual(NumericMatrix a, NumericMatrix b); RcppExport SEXP _markovchain_approxEqual(SEXP aSEXP, SEXP bSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< NumericMatrix >::type a(aSEXP); Rcpp::traits::input_parameter< NumericMatrix >::type b(bSEXP); rcpp_result_gen = Rcpp::wrap(approxEqual(a, b)); return rcpp_result_gen; END_RCPP } // isPartition bool isPartition(List commClasses, CharacterVector states); RcppExport SEXP _markovchain_isPartition(SEXP commClassesSEXP, SEXP statesSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< List >::type commClasses(commClassesSEXP); Rcpp::traits::input_parameter< CharacterVector >::type states(statesSEXP); rcpp_result_gen = Rcpp::wrap(isPartition(commClasses, states)); return rcpp_result_gen; END_RCPP } // areHittingProbabilities bool areHittingProbabilities(NumericMatrix probs, NumericMatrix hitting, bool byrow); RcppExport SEXP _markovchain_areHittingProbabilities(SEXP probsSEXP, SEXP hittingSEXP, SEXP byrowSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< NumericMatrix >::type probs(probsSEXP); Rcpp::traits::input_parameter< NumericMatrix >::type hitting(hittingSEXP); Rcpp::traits::input_parameter< bool >::type byrow(byrowSEXP); rcpp_result_gen = Rcpp::wrap(areHittingProbabilities(probs, hitting, byrow)); return rcpp_result_gen; END_RCPP } // areMeanNumVisits bool areMeanNumVisits(NumericMatrix probs, NumericMatrix numVisits, NumericMatrix hitting, bool byrow); RcppExport SEXP _markovchain_areMeanNumVisits(SEXP probsSEXP, SEXP numVisitsSEXP, SEXP hittingSEXP, SEXP byrowSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< NumericMatrix >::type probs(probsSEXP); Rcpp::traits::input_parameter< NumericMatrix >::type numVisits(numVisitsSEXP); Rcpp::traits::input_parameter< NumericMatrix >::type hitting(hittingSEXP); Rcpp::traits::input_parameter< bool >::type byrow(byrowSEXP); rcpp_result_gen = Rcpp::wrap(areMeanNumVisits(probs, numVisits, hitting, byrow)); return rcpp_result_gen; END_RCPP } // recurrentHitting bool recurrentHitting(List recurrentClasses, NumericMatrix hitting, CharacterVector states, bool byrow); RcppExport SEXP _markovchain_recurrentHitting(SEXP recurrentClassesSEXP, SEXP hittingSEXP, SEXP statesSEXP, SEXP byrowSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< List >::type recurrentClasses(recurrentClassesSEXP); Rcpp::traits::input_parameter< NumericMatrix >::type hitting(hittingSEXP); Rcpp::traits::input_parameter< CharacterVector >::type states(statesSEXP); Rcpp::traits::input_parameter< bool >::type byrow(byrowSEXP); rcpp_result_gen = Rcpp::wrap(recurrentHitting(recurrentClasses, hitting, states, byrow)); return rcpp_result_gen; END_RCPP } // hittingProbsAreOne bool hittingProbsAreOne(NumericMatrix matrix); RcppExport SEXP _markovchain_hittingProbsAreOne(SEXP matrixSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< NumericMatrix >::type matrix(matrixSEXP); rcpp_result_gen = Rcpp::wrap(hittingProbsAreOne(matrix)); return rcpp_result_gen; END_RCPP } // absorbingAreRecurrentClass bool absorbingAreRecurrentClass(CharacterVector absorbingStates, List recurrentClasses); RcppExport SEXP _markovchain_absorbingAreRecurrentClass(SEXP absorbingStatesSEXP, SEXP recurrentClassesSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< CharacterVector >::type absorbingStates(absorbingStatesSEXP); Rcpp::traits::input_parameter< List >::type recurrentClasses(recurrentClassesSEXP); rcpp_result_gen = Rcpp::wrap(absorbingAreRecurrentClass(absorbingStates, recurrentClasses)); return rcpp_result_gen; END_RCPP } static const R_CallMethodDef CallEntries[] = { {"_markovchain_isGen", (DL_FUNC) &_markovchain_isGen, 1}, {"_markovchain_generatorToTransitionMatrix", (DL_FUNC) &_markovchain_generatorToTransitionMatrix, 2}, {"_markovchain_ctmcFit", (DL_FUNC) &_markovchain_ctmcFit, 4}, {"_markovchain_ExpectedTimeRcpp", (DL_FUNC) &_markovchain_ExpectedTimeRcpp, 2}, {"_markovchain_probabilityatTRCpp", (DL_FUNC) &_markovchain_probabilityatTRCpp, 1}, {"_markovchain_impreciseProbabilityatTRCpp", (DL_FUNC) &_markovchain_impreciseProbabilityatTRCpp, 5}, {"_markovchain_seq2freqProb", (DL_FUNC) &_markovchain_seq2freqProb, 1}, {"_markovchain_seq2matHigh", (DL_FUNC) &_markovchain_seq2matHigh, 2}, {"_markovchain_markovchainSequenceRcpp", (DL_FUNC) &_markovchain_markovchainSequenceRcpp, 4}, {"_markovchain_markovchainListRcpp", (DL_FUNC) &_markovchain_markovchainListRcpp, 4}, {"_markovchain_markovchainSequenceParallelRcpp", (DL_FUNC) &_markovchain_markovchainSequenceParallelRcpp, 4}, {"_markovchain_createSequenceMatrix", (DL_FUNC) &_markovchain_createSequenceMatrix, 4}, {"_markovchain_mcListFitForList", (DL_FUNC) &_markovchain_mcListFitForList, 1}, {"_markovchain__matr2Mc", (DL_FUNC) &_markovchain__matr2Mc, 4}, {"_markovchain__list2Mc", (DL_FUNC) &_markovchain__list2Mc, 3}, {"_markovchain_inferHyperparam", (DL_FUNC) &_markovchain_inferHyperparam, 3}, {"_markovchain_markovchainFit", (DL_FUNC) &_markovchain_markovchainFit, 12}, {"_markovchain_noofVisitsDistRCpp", (DL_FUNC) &_markovchain_noofVisitsDistRCpp, 3}, {"_markovchain_multinomialCIForRow", (DL_FUNC) &_markovchain_multinomialCIForRow, 2}, {"_markovchain_multinomCI", (DL_FUNC) &_markovchain_multinomCI, 3}, {"_markovchain_commClassesKernel", (DL_FUNC) &_markovchain_commClassesKernel, 1}, {"_markovchain_communicatingClasses", (DL_FUNC) &_markovchain_communicatingClasses, 1}, {"_markovchain_transientStates", (DL_FUNC) &_markovchain_transientStates, 1}, {"_markovchain_recurrentStates", (DL_FUNC) &_markovchain_recurrentStates, 1}, {"_markovchain_recurrentClasses", (DL_FUNC) &_markovchain_recurrentClasses, 1}, {"_markovchain_transientClasses", (DL_FUNC) &_markovchain_transientClasses, 1}, {"_markovchain_reachabilityMatrix", (DL_FUNC) &_markovchain_reachabilityMatrix, 1}, {"_markovchain_isAccessible", (DL_FUNC) &_markovchain_isAccessible, 3}, {"_markovchain_summaryKernel", (DL_FUNC) &_markovchain_summaryKernel, 1}, {"_markovchain_firstpassageKernel", (DL_FUNC) &_markovchain_firstpassageKernel, 3}, {"_markovchain_firstPassageMultipleRCpp", (DL_FUNC) &_markovchain_firstPassageMultipleRCpp, 4}, {"_markovchain_expectedRewardsRCpp", (DL_FUNC) &_markovchain_expectedRewardsRCpp, 3}, {"_markovchain_expectedRewardsBeforeHittingARCpp", (DL_FUNC) &_markovchain_expectedRewardsBeforeHittingARCpp, 4}, {"_markovchain_gcd", (DL_FUNC) &_markovchain_gcd, 2}, {"_markovchain_period", (DL_FUNC) &_markovchain_period, 1}, {"_markovchain_predictiveDistribution", (DL_FUNC) &_markovchain_predictiveDistribution, 3}, {"_markovchain_priorDistribution", (DL_FUNC) &_markovchain_priorDistribution, 2}, {"_markovchain_hittingProbabilities", (DL_FUNC) &_markovchain_hittingProbabilities, 1}, {"_markovchain_canonicForm", (DL_FUNC) &_markovchain_canonicForm, 1}, {"_markovchain_steadyStates", (DL_FUNC) &_markovchain_steadyStates, 1}, {"_markovchain_absorbingStates", (DL_FUNC) &_markovchain_absorbingStates, 1}, {"_markovchain_isIrreducible", (DL_FUNC) &_markovchain_isIrreducible, 1}, {"_markovchain_isRegular", (DL_FUNC) &_markovchain_isRegular, 1}, {"_markovchain_meanAbsorptionTime", (DL_FUNC) &_markovchain_meanAbsorptionTime, 1}, {"_markovchain_absorptionProbabilities", (DL_FUNC) &_markovchain_absorptionProbabilities, 1}, {"_markovchain_meanFirstPassageTime", (DL_FUNC) &_markovchain_meanFirstPassageTime, 2}, {"_markovchain_meanRecurrenceTime", (DL_FUNC) &_markovchain_meanRecurrenceTime, 1}, {"_markovchain_meanNumVisits", (DL_FUNC) &_markovchain_meanNumVisits, 1}, {"_markovchain_isProb", (DL_FUNC) &_markovchain_isProb, 1}, {"_markovchain_isStochasticMatrix", (DL_FUNC) &_markovchain_isStochasticMatrix, 2}, {"_markovchain_isProbVector", (DL_FUNC) &_markovchain_isProbVector, 1}, {"_markovchain_checkIsAccesibleMethod", (DL_FUNC) &_markovchain_checkIsAccesibleMethod, 1}, {"_markovchain_approxEqual", (DL_FUNC) &_markovchain_approxEqual, 2}, {"_markovchain_isPartition", (DL_FUNC) &_markovchain_isPartition, 2}, {"_markovchain_areHittingProbabilities", (DL_FUNC) &_markovchain_areHittingProbabilities, 3}, {"_markovchain_areMeanNumVisits", (DL_FUNC) &_markovchain_areMeanNumVisits, 4}, {"_markovchain_recurrentHitting", (DL_FUNC) &_markovchain_recurrentHitting, 4}, {"_markovchain_hittingProbsAreOne", (DL_FUNC) &_markovchain_hittingProbsAreOne, 1}, {"_markovchain_absorbingAreRecurrentClass", (DL_FUNC) &_markovchain_absorbingAreRecurrentClass, 2}, {NULL, NULL, 0} }; RcppExport void R_init_markovchain(DllInfo *dll) { R_registerRoutines(dll, NULL, CallEntries, NULL, NULL); R_useDynamicSymbols(dll, FALSE); } markovchain/vignettes/0000755000176200001440000000000014430767052014574 5ustar liggesusersmarkovchain/vignettes/higher_order_markov_chains.Rmd0000644000176200001440000003060214472736544022616 0ustar liggesusers--- title: plain: "Higher, possibly multivariate, Order Markov Chains in markovchain package" formatted: "Higher, possibly multivariate, Order Markov Chains in \\pkg{markovchain} package" short: "Higher order (multivariate) Markov chains" pagetitle: "Higher order (multivariate) Markov chains" author: - name: "Deepak Yadav" affiliation: B-Tech student, Computer Science and Engineering address: > Indian Institute of Technology, Varanasi Uttar Pradesh - 221 005, India email: \email{deepakyadav.iitbhu@gmail.com} - name: "Tae Seung Kang" affiliation: Ph.D student, Computer \& Information Science \& Engineering address: > University of Florida Gainesville, FL, USA email: \email{tskang3@gmail.com} - name: "Giorgio Alfredo Spedicato" affiliation: Ph.D C.Stat ACAS, UnipolSai R\&D address: > Via Firenze 11 Paderno Dugnano 20037 Italy email: \email{spedicato\_giorgio@yahoo.it} preamble: > \author{Deepak Yadav, Tae Seung Kang, Giorgio Alfredo Spedicato} \usepackage{graphicx} \usepackage{amsmath} \usepackage{tabularx} \usepackage{longtable} \usepackage{booktabs} \setkeys{Gin}{width=0.8\textwidth} abstract: | The \pkg{markovchain} package contains functions to fit higher (possibly) multivariate order Markov chains. The functions are shown as well as simple exmaples output: if (rmarkdown::pandoc_version() < "2.2") function(...) { rmarkdown::pdf_document(template = "./template.tex", ...) } else function(...) { bookdown::pdf_book(base_format = rticles::jss_article, ...) } vignette: > %\VignetteIndexEntry{Higher order markov chains} %\VignetteEngine{knitr::rmarkdown} %VignetteEncoding{UTF-8} keywords: plain: [Higher order Markov chains] formatted: [Higher order Markov chains] documentclass: jss classoption: nojss bibliography: markovchainBiblio.bib pkgdown: as_is: true extension: pdf --- ```{r global_options, include=FALSE} knitr::opts_chunk$set(fig.width=8.5, fig.height=6, out.width = "70%") set.seed(123) ``` # Higher Order Markov Chains Continuous time Markov chains are discussed in the CTMC vignette which is a part of the package. An experimental `fitHigherOrder` function has been written in order to fit a higher order Markov chain ([@ching2013higher, @ching2008higher]). `fitHigherOrder` takes two inputs 1. sequence: a categorical data sequence. 2. order: order of Markov chain to fit with default value 2. The output will be a `list` which consists of 1. lambda: model parameter(s). 2. Q: a list of transition matrices. $Q_i$ is the $ith$ step transition matrix stored column-wise. 3. X: frequency probability vector of the given sequence. Its quadratic programming problem is solved using `solnp` function of the Rsolnp package [@pkg:Rsolnp]. ```{r load, results='hide', warning=FALSE, message=FALSE} require(markovchain) ``` ```{r higherOrder} if (requireNamespace("Rsolnp", quietly = TRUE)) { library(Rsolnp) data(rain) fitHigherOrder(rain$rain, 2) fitHigherOrder(rain$rain, 3) } ``` # Higher Order Multivariate Markov Chains ## Introduction HOMMC model is used for modeling behaviour of multiple categorical sequences generated by similar sources. The main reference is [@ching2008higher]. Assume that there are s categorical sequences and each has possible states in M. In nth order MMC the state probability distribution of the jth sequence at time $t = r + 1$ depend on the state probability distribution of all the sequences (including itself) at times $t = r, r - 1, ..., r - n + 1$. \[ x_{r+1}^{(j)} = \sum_{k=1}^{s}\sum_{h=1}^{n}\lambda_{jk}^{(h)}P_{h}^{(jk)}x_{r-h+1}^{(k)}, j = 1, 2, ..., s, r = n-1, n, ... \] with initial distribution $x_{0}^{(k)}, x_{1}^{(k)}, ... , x_{n-1}^{(k)} (k = 1, 2, ... , s)$. Here \[ \lambda _{jk}^{(h)} \geq 0, 1\leq j, k\leq s, 1\leq h\leq n \enspace and \enspace \sum_{k=1}^{s}\sum_{h=1}^{n} \lambda_{jk}^{(h)} = 1, j = 1, 2, 3, ... , s. \] Now we will see the simpler representation of the model which will help us understand the result of `fitHighOrderMultivarMC` method. \vspace{5mm} Let $X_{r}^{(j)} = ((x_{r}^{(j)})^{T}, (x_{r-1}^{(j)})^{T}, ..., (x_{r-n+1}^{(j)})^{T})^{T} for \enspace j = 1, 2, 3, ... , s.$ Then \vspace{5mm} \[ \begin{pmatrix} X_{r+1}^{(1)}\\ X_{r+1}^{(2)}\\ .\\ .\\ .\\ X_{r+1}^{(s)} \end{pmatrix} = \begin{pmatrix} B^{11}& B^{12}& .& .& B^{1s}& \\ B^{21}& B^{22}& .& .& B^{2s}& \\ .& .& .& .& .& \\ .& .& .& .& .& \\ .& .& .& .& .& \\ B^{s1}& B^{s2}& .& .& B^{ss}& \\ \end{pmatrix} \begin{pmatrix} X_{r}^{(1)}\\ X_{r}^{(2)}\\ .\\ .\\ .\\ X_{r}^{(s)} \end{pmatrix} \textrm{where} \] \[B^{ii} = \begin{pmatrix} \lambda _{ii}^{(1)}P_{1}^{(ii)}& \lambda _{ii}^{(2)}P_{2}^{(ii)}& .& .& \lambda _{ii}^{(n)}P_{n}^{(ii)}& \\ I& 0& .& .& 0& \\ 0& I& .& .& 0& \\ .& .& .& .& .& \\ .& .& .& .& .& \\ 0& .& .& I& 0& \end{pmatrix}_{mn*mn} \textrm{and} \] \vspace{5mm} \[ B^{ij} = \begin{pmatrix} \lambda _{ij}^{(1)}P_{1}^{(ij)}& \lambda _{ij}^{(2)}P_{2}^{(ij)}& .& .& \lambda _{ij}^{(n)}P_{n}^{(ij)}& \\ 0& 0& .& .& 0& \\ 0& 0& .& .& 0& \\ .& .& .& .& .& \\ .& .& .& .& .& \\ 0& .& .& 0& 0& \end{pmatrix}_{mn*mn} \textrm{when } i\neq j. \] \vspace{5mm} ## Representation of parameters in the code $P_{h}^{(ij)}$ is represented as $Ph(i,j)$ and $\lambda _{ij}^{(h)}$ as Lambdah(i,j). For example: $P_{2}^{(13)}$ as $P2(1,3)$ and $\lambda _{45}^{(3)}$ as Lambda3(4,5). ## Definition of HOMMC class ```{r hommcObject} showClass("hommc") ``` Any element of `hommc` class is comprised by following slots: 1. states: a character vector, listing the states for which transition probabilities are defined. 2. byrow: a logical element, indicating whether transition probabilities are shown by row or by column. 3. order: order of Multivariate Markov chain. 4. P: an array of all transition matrices. 5. Lambda: a vector to store the weightage of each transition matrix. 6. name: optional character element to name the HOMMC ## How to create an object of class HOMMC ```{r hommcCreate} states <- c('a', 'b') P <- array(dim = c(2, 2, 4), dimnames = list(states, states)) P[ , , 1] <- matrix(c(1/3, 2/3, 1, 0), byrow = FALSE, nrow = 2, ncol = 2) P[ , , 2] <- matrix(c(0, 1, 1, 0), byrow = FALSE, nrow = 2, ncol = 2) P[ , , 3] <- matrix(c(2/3, 1/3, 0, 1), byrow = FALSE, nrow = 2, ncol = 2) P[ , , 4] <- matrix(c(1/2, 1/2, 1/2, 1/2), byrow = FALSE, nrow = 2, ncol = 2) Lambda <- c(.8, .2, .3, .7) hob <- new("hommc", order = 1, Lambda = Lambda, P = P, states = states, byrow = FALSE, name = "FOMMC") hob ``` ## Fit HOMMC `fitHighOrderMultivarMC` method is available to fit HOMMC. Below are the 3 parameters of this method. 1. seqMat: a character matrix or a data frame, each column represents a categorical sequence. 2. order: order of Multivariate Markov chain. Default is 2. 3. Norm: Norm to be used. Default is 2. # A Marketing Example We tried to replicate the example found in [@ching2008higher] for an application of HOMMC. A soft-drink company in Hong Kong is facing an in-house problem of production planning and inventory control. A pressing issue is the storage space of its central warehouse, which often finds itself in the state of overflow or near capacity. The company is thus in urgent needs to study the interplay between the storage space requirement and the overall growing sales demand. The product can be classified into six possible states (1, 2, 3, 4, 5, 6) according to their sales volumes. All products are labeled as 1 = no sales volume, 2 = very slow-moving (very low sales volume), 3 = slow-moving, 4 = standard, 5 = fast-moving or 6 = very fast-moving (very high sales volume). Such labels are useful from both marketing and production planning points of view. The data is cointaind in `sales` object. ```{r hommsales} data(sales) head(sales) ``` The company would also like to predict sales demand for an important customer in order to minimize its inventory build-up. More importantly, the company can understand the sales pattern of this customer and then develop a marketing strategy to deal with this customer. Customer's sales demand sequences of five important products of the company for a year. We expect sales demand sequences generated by the same customer to be correlated to each other. Therefore by exploring these relationships, one can obtain a better higher-order multivariate Markov model for such demand sequences, hence obtain better prediction rules. In [@ching2008higher] application, they choose the order arbitrarily to be eight, i.e., n = 8. We first estimate all the transition probability matrices $P_{h}^{ij}$ and we also have the estimates of the stationary probability distributions of the five products:. $\widehat{\boldsymbol{x}}^{(1)} = \begin{pmatrix} 0.0818& 0.4052& 0.0483& 0.0335& 0.0037& 0.4275 \end{pmatrix}^{\boldsymbol{T}}$ $\widehat{\boldsymbol{x}}^{(2)} = \begin{pmatrix} 0.3680& 0.1970& 0.0335& 0.0000& 0.0037& 0.3978 \end{pmatrix}^{\boldsymbol{T}}$ $\widehat{\boldsymbol{x}}^{(3)} = \begin{pmatrix} 0.1450& 0.2045& 0.0186& 0.0000& 0.0037& 0.6283 \end{pmatrix}^{\boldsymbol{T}}$ $\widehat{\boldsymbol{x}}^{(4)} = \begin{pmatrix} 0.0000& 0.3569& 0.1338& 0.1896& 0.0632& 0.2565 \end{pmatrix}^{\boldsymbol{T}}$ $\widehat{\boldsymbol{x}}^{(5)} = \begin{pmatrix} 0.0000& 0.3569& 0.1227& 0.2268& 0.0520& 0.2416 \end{pmatrix}^{\boldsymbol{T}}$ By solving the corresponding linear programming problems, we obtain the following higher-order multivariate Markov chain model: \vspace{3mm} $\boldsymbol{x}_{r+1}^{(1)} = \boldsymbol{P}_{1}^{(12)}\boldsymbol{x}_{r}^{(2)}$ $\boldsymbol{x}_{r+1}^{(2)} = 0.6364\boldsymbol{P}_{1}^{(22)}\boldsymbol{x}_{r}^{(2)} + 0.3636\boldsymbol{P}_{3}^{(22)}\boldsymbol{x}_{r}^{(2)}$ $\boldsymbol{x}_{r+1}^{(3)} = \boldsymbol{P}_{1}^{(35)}\boldsymbol{x}_{r}^{(5)}$ $\boldsymbol{x}_{r+1}^{(4)} = 0.2994\boldsymbol{P}_{8}^{(42)}\boldsymbol{x}_{r}^{(2)} + 0.4324\boldsymbol{P}_{1}^{(45)}\boldsymbol{x}_{r}^{(5)} + 0.2681\boldsymbol{P}_{2}^{(45)}\boldsymbol{x}_{r}^{(5)}$ $\boldsymbol{x}_{r+1}^{(5)} = 0.2718\boldsymbol{P}_{8}^{(52)}\boldsymbol{x}_{r}^{(2)} + 0.6738\boldsymbol{P}_{1}^{(54)}\boldsymbol{x}_{r}^{(4)} + 0.0544\boldsymbol{P}_{2}^{(55)}\boldsymbol{x}_{r}^{(5)}$ \vspace{3mm} According to the constructed 8th order multivariate Markov model, Products A and B are closely related. In particular, the sales demand of Product A depends strongly on Product B. The main reason is that the chemical nature of Products A and B is the same, but they have different packaging for marketing purposes. Moreover, Products B, C, D and E are closely related. Similarly, products C and E have the same product flavor, but different packaging. In this model, it is interesting to note that both Product D and E quite depend on Product B at order of 8, this relationship is hardly to be obtained in conventional Markov model owing to huge amount of parameters. The results show that higher-order multivariate Markov model is quite significant to analyze the relationship of sales demand. ```{r hommcFit, warning = FALSE, message = FALSE} # fit 8th order multivariate markov chain if (requireNamespace("Rsolnp", quietly = TRUE)) { object <- fitHighOrderMultivarMC(sales, order = 8, Norm = 2) } ``` We choose to show only results shown in the paper. We see that $\lambda$ values are quite close, but not equal, to those shown in the original paper. ```{r result, echo = FALSE} if (requireNamespace("Rsolnp", quietly = TRUE)) { i <- c(1, 2, 2, 3, 4, 4, 4, 5, 5, 5) j <- c(2, 2, 2, 5, 2, 5, 5, 2, 4, 5) k <- c(1, 1, 3, 1, 8, 1, 2, 8, 1, 2) if(object@byrow == TRUE) { direction <- "(by rows)" } else { direction <- "(by cols)" } cat("Order of multivariate markov chain =", object@order, "\n") cat("states =", object@states, "\n") cat("\n") cat("List of Lambda's and the corresponding transition matrix", direction,":\n") for(p in 1:10) { t <- 8*5*(i[p]-1) + (j[p]-1)*8 cat("Lambda", k[p], "(", i[p], ",", j[p], ") : ", object@Lambda[t+k[p]],"\n", sep = "") cat("P", k[p], "(", i[p], ",", j[p], ") : \n", sep = "") print(object@P[, , t+k[p]]) cat("\n") } } else { print("package Rsolnp unavailable") } ``` # References markovchain/vignettes/markovchainBiblio.bib0000644000176200001440000004703413762012756020706 0ustar liggesusers% Encoding: UTF-8 % Formatted with emacs using C-c C-q in BibTeX mode @book{dobrow2016introduction, title = {Introduction to Stochastic Processes with R}, author = {Dobrow, Robert P}, year = {2016}, publisher = {John Wiley \& Sons} } @manual{pkg:ctmcd, title = {ctmcd: Estimating the Parameters of a Continuous-Time Markov Chain from Discrete-Time Data}, author = {Marius Pfeuffer}, year = {2017}, note = {R package version 1.1}, url = {https://CRAN.R-project.org/package=ctmcd} } @article{Hu2002, author = {Hu, Yen Ting and Kiesel, Rudiger and Perraudin, William}, doi = {10.1016/S0378-4266(02)00268-6}, isbn = {0378-4266}, issn = {03784266}, journal = {Journal of Banking and Finance}, keywords = {Credit rating,Credit risk,Ordered probit,Sovereign default}, number = {7}, pages = {1383--1406}, title = {{The estimation of transition matrices for sovereign credit ratings}}, volume = {26}, year = {2002} } @book{de2016assicurazioni, title = {Assicurazioni sulla salute: caratteristiche, modelli attuariali e basi tecniche}, author = {{De Angelis, Paolo} and {Di Falco, L.}}, isbn = {9788815260840}, series = {Il Mulino}, url = {https://books.google.it/books?id=D56bjgEACAAJ}, year = {2016}, publisher = {Il Mulino} } @mastersthesis{skuriat2005statistical, author = "Skuriat-Olechnowska, Monika", title = "Statistical inference and hypothesis testing for Markov chains with Interval Censoring", school = "TUDelft", year = "2005" } @article{kullback1962tests, title = {Tests for Contingency Tables and Marltov Chains}, author = {Kullback, S and Kupperman, Michael and Ku, HH}, journal = {Technometrics}, volume = {4}, number = {4}, pages = {573--608}, year = {1962}, publisher = {Taylor \& Francis} } @article{israel2001finding, title = {Finding generators for Markov chains via empirical transition matrices, with applications to credit ratings}, author = {Israel, Robert B and Rosenthal, Jeffrey S and Wei, Jason Z}, journal = {Mathematical finance}, volume = {11}, number = {2}, pages = {245--265}, year = {2001}, publisher = {Wiley Online Library} } @manual{pkg:RcppParallel, title = {RcppParallel: Parallel Programming Tools for 'Rcpp'}, author = {JJ Allaire and Romain Francois and Kevin Ushey and Gregory Vandenbrouck and Marcus Geelnard and {Intel}}, year = {2016}, note = {R package version 4.3.19}, url = {http://rcppcore.github.io/RcppParallel/} } @manual{pkg:MultinomialCI, title = {MultinomialCI: Simultaneous confidence intervals for multinomial proportions according to the method by Sison and Glaz}, author = {Pablo J. Villacorta}, year = {2012}, note = {R package version 1.0}, url = {http://CRAN.R-project.org/package=MultinomialCI} } @article{sison1995simultaneous, title = {Simultaneous confidence intervals and sample size determination for multinomial proportions}, author = {Sison, Cristina P and Glaz, Joseph}, journal = {Journal of the American Statistical Association}, volume = {90}, number = {429}, pages = {366--369}, year = {1995}, publisher = {Taylor \& Francis} } @article{konstantopoulos2009markov, title = {Markov Chains and Random Walks}, author = {Konstantopoulos, Takis}, journal = {Lecture notes}, year = {2009} } @article{craigSendi, author = {{B. A. Craig} and {A. A. Sendi}}, title = {Estimation of the Transition Matrix of a Discrete-Time Markov Chain}, journal = {Health Economics}, year = {2002}, volume = {11}, pages = {33--42} } @misc{bardPpt, author = {J. F. Bard}, title = {Lecture 12.5 - Additional Issues Concerning Discrete-Time Markov Chains}, month = {April}, year = {2000}, url ={http://www.me.utexas.edu/~jensen%20/ORMM/instruction/powerpoint/or_models_09/12.5_dtmc2.ppt} } @incollection{bremaud1999discrete, author = {Br{\'e}maud, Pierre}, title = {Discrete-Time Markov Models}, booktitle = {Markov Chains}, publisher = {Springer}, year = {1999}, pages = {53--93} } @book{chambers, title = {Software for Data Analysis: Programming with \proglang{R}}, publisher = {Springer-Verlag}, year = {2008}, author = {Chambers, J.M.}, series = {Statistics and computing}, isbn = {9780387759357}, lccn = {2008922937} } @book{ching2006markov, title = {Markov Chains: Models, Algorithms and Applications}, publisher = {Springer-Verlag}, year = {2006}, author = {Ching, W.K. and Ng, M.K.}, series = {International Series in Operations Research \& Management Science}, isbn = {9780387293356}, lccn = {2005933263} } @article{pkg:igraph, author = {Gabor Csardi and Tamas Nepusz}, title = {The \pkg{igraph} Software Package for Complex Network Research}, journal = {InterJournal}, year = {2006}, volume = {Complex Systems}, pages = {1695}, url = {http://igraph.sf.net} } @book{denuit2007actuarial, title = {Actuarial modelling of claim counts: Risk classification, credibility and bonus-malus systems}, publisher = {Wiley}, year = {2007}, author = {Denuit, Michel and Mar{\'e}chal, Xavier and Pitrebois, Sandra and Walhin, Jean-Fran{\c{c}}ois} } @book{deshmukh2012multiple, title = {Multiple Decrement Models in Insurance: An Introduction Using \proglang{R}}, publisher = {Springer-Verlag}, year = {2012}, author = {Deshmukh, S.R.}, series = {SpringerLink : B{\"u}cher}, isbn = {9788132206590}, lccn = {2012942476} } @book{RcppR, title = {Seamless \proglang{R} and \proglang{C++} Integration with \pkg{Rcpp}}, publisher = {Springer-Verlag}, year = {2013}, author = {Dirk Eddelbuettel}, address = {New York}, note = {ISBN 978-1-4614-6867-7} } @misc{renaldoMatlab, author = {Renaldo Feres}, title = {Notes for Math 450 \proglang{MATLAB} Listings for Markov Chains}, year = {2007}, url = {http://www.math.wustl.edu/~feres/Math450Lect04.pdf} } @manual{mcmcR, title = {mcmc: Markov Chain Monte Carlo}, author = {Charles J. Geyer and Leif T. Johnson}, year = {2013}, note = {\proglang{R} package version 0.9-2}, url = {http://CRAN.R-project.org/package=mcmc} } @incollection{glassHall, author = {Glass, D.V. and Hall, J. R.}, title = {Social Mobility in Great Britain: A Study in Intergenerational Change in Status}, booktitle = {Social Mobility in Great Britain}, publisher = {Routledge and Kegan Paul}, year = {1954} } @manual{expmR, title = {\pkg{expm}: Matrix Exponential}, author = {Vincent Goulet and Christophe Dutang and Martin Maechler and David Firth and Marina Shapira and Michael Stadelmann and {expm-developers@lists.R-forge.R-project.org}}, year = {2013}, note = {R package version 0.99-1}, url = {http://CRAN.R-project.org/package=expm} } @manual{hmmR, title = {\pkg{HMM}: HMM - Hidden Markov Models}, author = {Scientific Software Development - Dr. Lin Himmelmann and {www.linhi.com}}, year = {2010}, note = {\proglang{R} package version 1.0}, url = {http://CRAN.R-project.org/package=HMM} } @book{landOfOz, title = {Introduction to Finite Mathematics}, publisher = {Prentice Hall}, year = {1974}, author = {{J. G. Kemeny} and {J. L.Snell} and {G. L. Thompson}} } @article{msmR, author = {Christopher H. Jackson}, title = {Multi-State Models for Panel Data: The \pkg{msm} Package for \proglang{R}}, journal = {Journal of Statistical Software}, year = {2011}, volume = {38}, pages = {1--29}, number = {8}, url = {http://www.jstatsoft.org/v38/i08/} } @misc{manchesterR, author = {Ian Jacob}, month = {May}, year = {2014}, title = {Is R Cost Effective?}, language = {En}, howpublished = {Electronic}, organization = {Manchester Centre for Health Economics}, note = {Presented on Manchester R Meeting}, url ={http://www.rmanchester.org/Presentations/Ian%20Jacob%20-%20Is%20R%20Cost%20Effective.pdf} } @TECHREPORT{blandenEtAlii, author = {Jo Blanden, Paul Gregg and Stephen Machin}, title = {Intergenerational Mobility in Europe and North America}, institution = {Center for Economic Performances}, year = {2005}, owner = {Giorgio Copia}, timestamp = {2014.01.01}, url ={http://cep.lse.ac.uk/about/news/IntergenerationalMobility.pdf} } @misc{Konstantopoulos2009, author = {Konstantopoulos, Takis}, title = {Markov Chains and Random Walks}, year = {2009}, url ={http://www2.math.uu.se/~takis/public_html/McRw/mcrw.pdf} } @misc{montgomery, author = {James Montgomery}, title = {Communication Classes}, year = {2009}, url = {http://www.ssc.wisc.edu/~jmontgom/commclasses.pdf} } @manual{DTMCPackR, title = {\pkg{DTMCPack}: Suite of Functions Related to Discrete-Time Discrete-State Markov Chains}, author = {William Nicholson}, year = {2013}, note = {R package version 0.1-2}, url = {http://CRAN.R-project.org/package=DTMCPack} } @article{averyHenderson, author = {{P. J. Avery} and {D. A. Henderson}}, title = {Fitting Markov Chain Models to Discrete State Series}, journal = {Applied Statistics}, year = {1999}, volume = {48}, pages = {53-61}, number = {1} } @manual{rSoftware, title = {\proglang{R}: A Language and Environment for Statistical Computing}, author = {{R Core Team}}, organization = {R Foundation for Statistical Computing}, address = {Vienna, Austria}, year = {2013}, url = {http://www.R-project.org/} } @manual{pkg:matlab, title = {\pkg{matlab}: \proglang{MATLAB} emulation package}, author = {P. Roebuck}, year = {2011}, note = {R package version 0.8.9}, url = {http://CRAN.R-project.org/package=matlab} } @misc{probBook, author = {Laurie Snell}, title = {Probability Book: chapter 11}, year = {1999}, url ={http://www.dartmouth.edu/~chance/teaching_aids/books_articles/probability_book/Chapter11.pdf} } @article{pkg:markovchain, author = {Giorgio Alfredo Spedicato}, title = {{Discrete Time Markov Chains with R}}, year = {2017}, journal = {{The R Journal}}, url = {https://journal.r-project.org/archive/2017/RJ-2017-036/index.html} } @manual{MAPmcR, title = {Bayesian Inference of First Order Markov Chains}, author = {Sai Bhargav Yalamanchi and Giorgio Alfredo Spedicato}, month = {06}, year = {2015}, note = {R package version 0.2.5} } @misc{wiki:markov, author = {Wikipedia}, title = {Markov chain --- Wikipedia{,} The Free Encyclopedia}, year = {2013}, note = {[Online; accessed 23-August-2013]}, url = {http://en.wikipedia.org/w/index.php?title=Markov_chain&oldid=568910294} } @manual{CreditMetricsR, title = {CreditMetrics: Functions for Calculating the CreditMetrics Risk Model}, author = {Andreas Wittmann}, year = {2007}, note = {\proglang{R} package version 0.0-2} } @manual{mathematica9, title = {\proglang{Mathematica}}, author = {Wolfram Research, Inc.}, organization = {Wolfram Research, Inc.}, edition = {ninth}, year = {2013}, adddress = {Champaign, Illinois} } @misc{mathematica9MarkovChain, author = {Wolfram Research, Inc.}, year = {2013}, organization = {Wolfram Research, Inc.}, url ={http://www.wolfram.com/mathematica/new-in-9/markov-chains-and-queues/structural-properties-of-finite-markov-processes.html} } @article{sch, author = {Christopher C. Strelioff and James P. Crutchfield and Alfred W. Hubler}, title = {Inferring Markov Chains: Bayesian Estimation, Model Comparison, Entropy Rate, and Out-of-class Modeling}, journal = {Sante Fe Working Papers}, year = {2007}, url = {http://arxiv.org/abs/math/0703715/} } @article{mstateR, author = {Liesbeth C. de Wreede and Marta Fiocco and Hein Putter}, title = {\pkg{mstate}: An \proglang{R} Package for the Analysis of Competing Risks and Multi-State Models}, journal = {Journal of Statistical Software}, year = {2011}, volume = {38}, pages = {1--30}, number = {7}, url = {http://www.jstatsoft.org/v38/i07/} } @mastersthesis{MSkuriat, author = {Monika Skuriat-Olechnowska}, title = {Statistical inference and hypothesis testing for Markov chains with Interval Censoring}, school = {Delft University of Technology}, type = {diploma thesis}, year = {2005} } @misc{wiki:CI, author = {Wikipedia}, title = {Confidence interval --- Wikipedia{,} The Free Encyclopedia}, year = {2015}, url = {https://en.wikipedia.org/w/index.php?title=Confidence_interval&oldid=667353161}, note = {[Online; accessed 21-June-2015]} } @book{pardo2005statistical, title = {Statistical inference based on divergence measures}, author = {Pardo, Leandro}, year = {2005}, publisher = {CRC Press} } @article{anderson1957statistical, title = {Statistical inference about Markov chains}, author = {Anderson, Theodore W and Goodman, Leo A}, journal = {The Annals of Mathematical Statistics}, pages = {89--110}, year = {1957}, publisher = {JSTOR} } @article{ching2008higher, title = {Higher-order multivariate Markov chains and their applications}, author = {Ching, Wai-Ki and Ng, Michael K and Fung, Eric S}, journal = {Linear Algebra and its Applications}, volume = {428}, number = {2}, pages = {492--507}, year = {2008}, publisher = {Elsevier} } @incollection{ching2013higher, title = {Higher-order markov chains}, author = {Ching, Wai-Ki and Huang, Ximin and Ng, Michael K and Siu, Tak-Kuen}, booktitle = {Markov Chains}, pages = {141--176}, year = {2013}, publisher = {Springer} } @manual{pkg:Rsolnp, title = {Rsolnp: General Non-linear Optimization Using Augmented Lagrange Multiplier Method}, author = {Alexios Ghalanos and Stefan Theussl}, year = {2014}, note = {R package version 1.15.} } @PhdThesis{YinyuYe, title = {Interior Algorithms for Linear, Quadratic, and Linearly Constrained Non-Linear Programming}, author = {Yinyu Ye}, year = {1987}, school = {Department of {ESS}, Stanford University} } @book{NorrisBook, title = {Markovchains}, author = {J. R. Norris}, year = {1998}, publisher = {Cambridge University Press} } @article{ictmcpaper, author = {Thomas Krak, Jasper De Bock, Arno Siebes}, title = {Imprecise Continuous Time Markov Chains}, journal = {International Journal of Approximate Reasoning}, year = {2017}, volume = {88}, pages = {452-528}, publisher = {arXiv} } @misc{committorlink, author = {StackOverflow}, url = {https://math.stackexchange.com/questions/1450399/probability-that-a-chain-will-enter-state-5-before-it-enters-state-3?newreg=82f90b66b949495a91661caad24db915}, year = {2015} } @book{GallagerBook, title = {Stochastic Processes: Theory for Applications}, author = {Robert G. Gallager}, year = {2013}, publisher = {Cambridge University Press} } @misc{Sigman, title = {Continuous Time markovchains}, author = {Karl Sigman}, year = {2009}, publisher = {Columbia University} } @article{freqArticle, author = {Alexander Kreinin, Marina Sidelnikova}, title = {Regularization Algorithms for Transition Matrices}, journal = {Algo Research Quarterly}, year = {2001}, volume = {4}, number = {1/2}, pages = {23--40}, month = mar, publisher = {ALGO RESEARCH QUARTERLY} } @book{GrinsteadSnell, author = {Grinstead, Charles M. and Snell, Laurie J.}, booktitle = {Introduction to Probability}, edition = {Version dated 4 July 2006}, publisher = {American Mathematical Society}, title = {{Grinstead and Snell's Introduction to Probability}}, url = {http://math.dartmouth.edu/\~{}prob/prob/prob.pdf}, year = {2006} } @article{noe_constructing_2009, title = {Constructing the equilibrium ensemble of folding pathways from short off-equilibrium simulations}, volume = {106}, copyright = {© 2009 . Freely available online through the PNAS open access option.}, issn = {0027-8424, 1091-6490}, url = {https://www.pnas.org/content/106/45/19011}, doi = {10.1073/pnas.0905466106}, language = {en}, number = {45}, urldate = {2019-01-13}, journal = {Proceedings of the National Academy of Sciences}, author = {Noé, Frank and Schütte, Christof and Vanden-Eijnden, Eric and Reich, Lothar and Weikl, Thomas R.}, month = nov, year = {2009}, pmid = {19887634}, keywords = {committor}, pages = {19011--19016} } @article{Tarjan, doi = {10.1137/0201010}, url = {https://doi.org/10.1137/0201010}, year = {1972}, month = jun, publisher = {Society for Industrial {\&} Applied Mathematics ({SIAM})}, volume = {1}, number = {2}, pages = {146--160}, author = {Robert Tarjan}, title = {Depth-First Search and Linear Graph Algorithms}, journal = {{SIAM} Journal on Computing} }markovchain/vignettes/template.tex0000644000176200001440000000363013762012756017134 0ustar liggesusers\documentclass[ $for(classoption)$ $classoption$$sep$, $endfor$ ]{$documentclass$} \usepackage[utf8]{inputenc} \providecommand{\tightlist}{% \setlength{\itemsep}{0pt}\setlength{\parskip}{0pt}} \author{ $for(author)$$author.name$\\$author.affiliation$$sep$ \And $endfor$ } \title{$title.formatted$} \Plainauthor{$for(author)$$author.name$$sep$, $endfor$} $if(title.plain)$ \Plaintitle{$title.plain$} $endif$ $if(title.short)$ \Shorttitle{$title.short$} $endif$ \Abstract{ $abstract$ } $if(keywords.formatted)$ \Keywords{$for(keywords.formatted)$$keywords.formatted$$sep$, $endfor$} $endif$ $if(keywords.plain)$ \Plainkeywords{$for(keywords.plain)$$keywords.plain$$sep$, $endfor$} $endif$ $if(highlighting-macros)$ $highlighting-macros$ $endif$ %% publication information %% \Volume{50} %% \Issue{9} %% \Month{June} %% \Year{2012} %% \Submitdate{$submitdate$} %% \Acceptdate{2012-06-04} \Address{ $for(author)$ $if(author.address)$ $author.name$\\ $author.affiliation$\\ $author.address$\\ $if(author.email)$E-mail: $author.email$\\$endif$ $if(author.url)$URL: $author.url$\\~\\$endif$ $endif$ $endfor$ } $if(csl-refs)$ \newlength{\cslhangindent} \setlength{\cslhangindent}{1.5em} \newenvironment{cslreferences}% {$if(csl-hanging-indent)$\setlength{\parindent}{0pt}% \everypar{\setlength{\hangindent}{\cslhangindent}}\ignorespaces$endif$}% {\par} $endif$ % Pandoc header $for(header-includes)$ $header-includes$ $endfor$ $preamble$ \begin{document} $body$ $if(natbib)$ $if(bibliography)$ $if(biblio-title)$ $if(book-class)$ \renewcommand\bibname{$biblio-title$} $else$ \renewcommand\refname{$biblio-title$} $endif$ $endif$ \bibliography{$for(bibliography)$$bibliography$$sep$,$endfor$} $endif$ $endif$ $if(biblatex)$ \printbibliography$if(biblio-title)$[title=$biblio-title$]$endif$ $endif$ \end{document} markovchain/vignettes/an_introduction_to_markovchain_package.Rmd0000644000176200001440000032057114472736566025222 0ustar liggesusers--- title: plain: "The markovchain Package: A Package for Easily Handling Discrete Markov Chains in R" formatted: "The \\pkg{markovchain} Package: A Package for Easily Handling Discrete Markov Chains in \\proglang{R}" short: "\\pkg{markovchain} package: discrete Markov chains in \\proglang{R}" pagetitle: "The \\pkg{markovchain} Package: A Package for Easily Handling Discrete Markov Chains in \\proglang{R}" author: - name: "Giorgio Alfredo Spedicato" affiliation: Ph.D C.Stat FCAS, FSA, CSPA Unipol Group address: > Via Firenze 11 Paderno Dugnano 20037 Italy email: \email{spedygiorgio@gmail.com} url: www.statisticaladvisor.com - name: "Tae Seung Kang" affiliation: Ph.D student, Computer \& Information Science \& Engineering address: > University of Florida Gainesville, FL, USA email: \email{tskang3@gmail.com} - name: "Sai Bhargav Yalamanchi" affiliation: B-Tech student, Electrical Engineering address: > Indian Institute of Technology, Bombay Mumbai - 400 076, India email: \email{bhargavcoolboy@gmail.com} - name: "Deepak Yadav" affiliation: B-Tech student, Computer Science and Engineering address: > Indian Institute of Technology, Varanasi Uttar Pradesh - 221 005, India email: \email{deepakyadav.iitbhu@gmail.com} - name: "Ignacio Cordón" affiliation: Software Engineer address: > Madrid (Madrid), Spain email: \email{nacho.cordon.castillo@gmail.com} preamble: > \author{\small{Giorgio Alfredo Spedicato, Tae Seung Kang, Sai Bhargav Yalamanchi, Deepak Yadav, Ignacio Cordón}} \Plainauthor{G.A. Spedicato, T.S. Kang, S.B. Yalamanchi, D. Yadav, I. Cordón} \usepackage{graphicx} \usepackage{amsmath} \usepackage{longtable} \usepackage{booktabs} \setkeys{Gin}{width=0.8\textwidth} \usepackage{amsfonts} abstract: | The \pkg{markovchain} package aims to fill a gap within the \proglang{R} framework providing S4 classes and methods for easily handling discrete time Markov chains, homogeneous and simple inhomogeneous ones as well as continuous time Markov chains. The S4 classes for handling and analysing discrete and continuous time Markov chains are presented, as well as functions and method for performing probabilistic and statistical analysis. Finally, some examples in which the package's functions are applied to Economics, Finance and Natural Sciences topics are shown. output: if (rmarkdown::pandoc_version() < "2.2") function(...) { rmarkdown::pdf_document(template = "./template.tex", ...) } else function(...) { bookdown::pdf_book(base_format = rticles::jss_article, ...) } vignette: > %\VignetteIndexEntry{An introduction to markovchain package} %\VignetteEngine{knitr::rmarkdown} %VignetteEncoding{UTF-8} keywords: plain: [discrete time Markov chains, continuous time Markov chains, transition matrices, communicating classes, periodicity, first passage time, stationary distributions] formatted: [discrete time Markov chains, continuous time Markov chains, transition matrices, communicating classes, periodicity, first passage time, stationary distributions] documentclass: jss classoption: nojss bibliography: markovchainBiblio.bib pkgdown: as_is: true extension: pdf --- ```{r global_options, include=FALSE} knitr::opts_chunk$set(fig.width=8.5, fig.height=6, out.width = "70%") set.seed(123) library(knitr) hook_output <- knit_hooks$get("output") knit_hooks$set(output = function(x, options) { lines <- options$output.lines if (is.null(lines)) { return(hook_output(x, options)) # pass to default hook } x <- unlist(strsplit(x, "\n")) more <- "..." if (length(lines)==1) { # first n lines if (length(x) > lines) { # truncate the output, but add .... x <- c(head(x, lines), more) } } else { x <- c(more, x[lines], more) } # paste these lines together x <- paste(c(x, ""), collapse = "\n") hook_output(x, options) }) ``` # Introduction Markov chains represent a class of stochastic processes of great interest for the wide spectrum of practical applications. In particular, discrete time Markov chains (DTMC) permit to model the transition probabilities between discrete states by the aid of matrices.Various \proglang{R} packages deal with models that are based on Markov chains: * \pkg{msm} [@msmR] handles Multi-State Models for panel data. * \pkg{mcmcR} [@mcmcR] implements Monte Carlo Markov Chain approach. * \pkg{hmm} [@hmmR] fits hidden Markov models with covariates. * \pkg{mstate} fits `Multi-State Models based on Markov chains for survival analysis [@mstateR]. Nevertheless, the \proglang{R} statistical environment [@rSoftware] seems to lack a simple package that coherently defines S4 classes for discrete Markov chains and allows to perform probabilistic analysis, statistical inference and applications. For the sake of completeness, \pkg{markovchain} is the second package specifically dedicated to DTMC analysis, being \pkg{DTMCPack} [@DTMCPackR] the first one. Notwithstanding, \pkg{markovchain} package [@pkg:markovchain] aims to offer more flexibility in handling DTMC than other existing solutions, providing S4 classes for both homogeneous and semi-homogeneous Markov chains as well as methods suited to perform statistical and probabilistic analysis. The \pkg{markovchain} package depends on the following \proglang{R} packages: \pkg{expm} [@expmR] to perform efficient matrices powers; \pkg{igraph} [@pkg:igraph] to perform pretty plotting of `markovchain` objects and \pkg{matlab} [@pkg:matlab], that contains functions for matrix management and calculations that emulate those within \proglang{MATLAB} environment. Moreover, other scientific softwares provide functions specifically designed to analyze DTMC, as \proglang{Mathematica} 9 [@mathematica9]. The paper is structured as follows: Section \@ref(sec:mathematics) briefly reviews mathematics and definitions regarding DTMC, Section \@ref(sec:structure) discusses how to handle and manage Markov chain objects within the package, Section \@ref(sec:probability) and Section \@ref(sec:statistics) show how to perform probabilistic and statistical modelling, while Section \@ref(sec:applications) presents some applied examples from various fields analyzed by means of the \pkg{markovchain} package. # Review of core mathematical concepts {#sec:mathematics} ## General Definitions A DTMC is a sequence of random variables $X_{1},\: X_{2}\: ,\ldots,\:X_{n},\ldots$ characterized by the Markov property (also known as memoryless property, see Equation \ref{eq:markovProp}). The Markov property states that the distribution of the forthcoming state $X_{n+1}$ depends only on the current state $X_{n}$ and doesn't depend on the previous ones $X_{n-1},\: X_{n-2},\ldots,\: X_{1}$. \begin{equation} Pr\left(X_{n+1}=x_{n+1}\left|X_{1}=x_{1},X_{2}=x_{2,}...,X_{n}=x_{n}\right.\right)=Pr\left(X_{n+1}=x_{n+1}\left|X_{n}=x_{n}\right.\right). \label{eq:markovProp} \end{equation} The set of possible states $S=\left\{ s_{1},s_{2},...,s_{r}\right\}$ of $X_{n}$ can be finite or countable and it is named the state space of the chain. The chain moves from one state to another (this change is named either 'transition' or 'step') and the probability $p_{ij}$ to move from state $s_{i}$ to state $s_{j}$ in one step is named transition probability: \begin{equation} p_{ij}=Pr\left(X_{1}=s_{j}\left|X_{0}=s_{i}\right.\right). \label{eq:trProp} \end{equation} The probability of moving from state $i$ to $j$ in $n$ steps is denoted by $p_{ij}^{(n)}=Pr\left(X_{n}=s_{j}\left|X_{0}=s_{i}\right.\right)$. A DTMC is called time-homogeneous if the property shown in Equation \ref{eq:mcHom} holds. Time homogeneity implies no change in the underlying transition probabilities as time goes on. \begin{equation} Pr\left(X_{n+1}=s_{j}\left|X_{n}=s_{i}\right.\right)=Pr\left(X_{n}=s_{j}\left|X_{n-1}=s_{i}\right.\right). \label{eq:mcHom} \end{equation} If the Markov chain is time-homogeneous, then $p_{ij}=Pr\left(X_{k+1}=s_{j}\left|X_{k}=s_{i}\right.\right)$ and \newline $p_{ij}^{(n)}=Pr\left(X_{n+k}=s_{j}\left|X_{k}=s_{i}\right.\right)$, where $k>0$. The probability distribution of transitions from one state to another can be represented into a transition matrix $P=(p_{ij})_{i,j}$, where each element of position $(i,j)$ represents the transition probability $p_{ij}$. E.g., if $r=3$ the transition matrix $P$ is shown in Equation \ref{eq:trPropEx} \begin{equation} P=\left[\begin{array}{ccc} p_{11} & p_{12} & p_{13}\\ p_{21} & p_{22} & p_{23}\\ p_{31} & p_{32} & p_{33} \end{array}\right]. \label{eq:trPropEx} \end{equation} The distribution over the states can be written in the form of a stochastic row vector $x$ (the term stochastic means that $\sum_{i}x_{i}=1, x_{i} \geq 0$): e.g., if the current state of $x$ is $s_{2}$, $x=\left(0\:1\:0\right)$. As a consequence, the relation between $x^{(1)}$ and $x^{(0)}$ is $x^{(1)}=x^{(0)}P$ and, recursively, we get $x^{(2)}=x^{(0)}P^{2}$ and $x^{(n)}=x^{(0)}P^{n},\, n>0$. DTMC are explained in most theory books on stochastic processes, see \cite{bremaud1999discrete} and \cite{dobrow2016introduction} for example. Valuable references online available are: \cite{konstantopoulos2009markov}, \cite{probBook} and \cite{bardPpt}. ## Properties and classification of states {#sec:properties} A state $s_{j}$ is said accessible from state $s_{i}$ (written $s_{i}\rightarrow s_{j}$) if a system starting in state $s_{i}$ has a positive probability to reach the state $s_{j}$ at a certain point, i.e., $\exists n>0:\: p_{ij}^{n}>0$. If both $s_{i}\rightarrow s_{j}$ and $s_{j}\rightarrow s_{i}$, then $s_{i}$ and $s_{j}$ are said to communicate. A communicating class is defined to be a set of states that communicate. A DTMC can be composed by one or more communicating classes. If the DTMC is composed by only one communicating class (i.e., if all states in the chain communicate), then it is said irreducible. A communicating class is said to be closed if no states outside of the class can be reached from any state inside it. If $p_{ii}=1$, $s_{i}$ is defined as absorbing state: an absorbing state corresponds to a closed communicating class composed by one state only. The canonical form of a DTMC transition matrix is a matrix having a block form, where the closed communicating classes are shown at the beginning of the diagonal matrix. A state $s_{i}$ has period $k_{i}$ if any return to state $s_{i}$ must occur in multiplies of $k_{i}$ steps, that is $k_{i}=gcd\left\{ n:Pr\left(X_{n}=s_{i}\left|X_{0}=s_{i}\right.\right)>0\right\}$, where $gcd$ is the greatest common divisor. If $k_{i}=1$ the state $s_{i}$ is said to be aperiodic, else if $k_{i}>1$ the state $s_{i}$ is periodic with period $k_{i}$. Loosely speaking, $s_{i}$ is periodic if it can only return to itself after a fixed number of transitions $k_{i}>1$ (or multiple of $k_{i}$), else it is aperiodic. If states $s_{i}$ and $s_{j}$ belong to the same communicating class, then they have the same period $k_{i}$. As a consequence, each of the states of an irreducible DTMC share the same periodicity. This periodicity is also considered the DTMC periodicity. It is possible to classify states according to their periodicity. Let $T^{x\rightarrow x}$ is the number of periods to go back to state $x$ knowing that the chain starts in $x$. * A state $x$ is recurrent if $P(T^{x\rightarrow x}<+\infty)=1$ (equivalently $P(T^{x\rightarrow x}=+\infty)=0$). In addition: 1. A state $x$ is null recurrent if in addition $E(T^{x\rightarrow x})=+\infty$. 2. A state $x$ is positive recurrent if in addition $E(T^{x\rightarrow x})<+\infty$. 3. A state $x$ is absorbing if in addition $P(T^{x\rightarrow x}=1)=1$. * A state $x$ is transient if $P(T^{x\rightarrow x}<+\infty)<1$ (equivalently $P(T^{x\rightarrow x}=+\infty)>0$). It is possible to analyze the timing to reach a certain state. The first passage time (or hitting time) from state $s_{i}$ to state $s_{j}$ is the number $T_{ij}$ of steps taken by the chain until it arrives for the first time to state $s_{j}$, given that $X_{0} = s_{i}$. The probability distribution of $T_{ij}$ is defined by Equation \ref{eq:fpt1} \begin{equation} {h_{ij}}^{\left( n \right)} = Pr\left( {T_{ij} = n} \right) = Pr\left( X_n = s_j,X_{n - 1} \ne s_{j}, \ldots ,X_1 \ne s_j |X_0 = s_i \right) \label{eq:fpt1} \end{equation} and can be found recursively using Equation \ref{eq:ftp2}, given that ${h_{ij}}^{\left( n \right)} = p_{ij}$. \begin{equation} {h_{ij}}^{\left( n \right)} = \sum\limits_{k \in S - \left\{ s_{j} \right\}}^{} {{p_{ik}}{h_{kj}}^{\left( {n - 1} \right)}}. \label{eq:ftp2} \end{equation} A commonly used quantity related to $h$ is its average value, i.e. the \emph{mean first passage time} (also expected hitting time), namely $\bar h_{ij}= \sum_{n=1\dots\infty} n \,h_{ij}^{(n)}$. If in the definition of the first passage time we let $s_{i}=s_{j}$, we obtain the first recurrence time $T_{i}=\inf \{ n\geq1:X_{n}=s_{i}|X_{0}=s_{i} \}$. We could also ask ourselves which is the *mean recurrence time*, an average of the mean first recurrence times: \[ r_i = \sum_{k = 1}^{\infty} k \cdot P(T_i = k) \] Revisiting the definition of recurrence and transience: a state $s_{i}$ is said to be recurrent if it is visited infinitely often, i.e., $Pr(T_{i}<+\infty|X_{0}=s_{i})=1$. On the opposite, $s_{i}$ is called transient if there is a positive probability that the chain will never return to $s_{i}$, i.e., $Pr(T_{i}=+\infty|X_{0}=s_{i})>0$. Given a time homogeneous Markov chain with transition matrix \emph{P}, a stationary distribution \emph{z} is a stochastic row vector such that $z=z\cdot P$, where $0\leq z_{j}\leq 1 \: \forall j$ and $\sum_{j}z_{j}=1$. If a DTMC $\{X_{n}\}$ is irreducible and aperiodic, then it has a limit distribution and this distribution is stationary. As a consequence, if $P$ is the $k\times k$ transition matrix of the chain and $z=\left(z_{1},...,z_{k}\right)$ is the unique eigenvector of $P$ such that $\sum_{i=1}^{k}z_{i}=1$, then we get \begin{equation} \underset{n\rightarrow\infty}{lim}P^{n}=Z, \label{eq:limMc} \end{equation} where $Z$ is the matrix having all rows equal to $z$. The stationary distribution of $\{X_{n}\}$ is represented by $z$. A matrix $A$ is called primitive if all of its entries are strictly positive, and we write it $A > 0$. If the transition matrix $P$ for a DTMC has some primitive power, i.e. it exists $m > 0: P^m > 0$, then the DTMC is said to be regular. In fact being regular is equivalent to being irreducible and aperiodic. All regular DTMCs are irreducible. The counterpart is not true. Given two absorbing states $s_A$ (source) and $s_B$ (sink), the \emph{committor probability} $q_j^{(AB)}$ is the probability that a process starting in state $s_i$ is absorbed in state $s_B$ (rather than $s_A$) [@noe_constructing_2009]. It can be computed via \begin{equation} q_j^{(AB)} = \sum_{k \ni {A, B}} P_{jk}q_k^{(AB)} \quad \mbox{with} \quad q_A^{(AB)} = 0 \quad \mbox{and} \quad q_B^{(AB)} = 1 \end{equation} Note we can also define the hitting probability from $i$ to $j$ as the probability of ever reaching the state $j$ if our initial state is $i$: \begin{equation} h_{i,j} = Pr(T_{ij} < \infty) = \sum_{n = 0}^{\infty} h_{ij}^{(n)} \label{eq:hitting-probs} \end{equation} In a DTMC with finite set of states, we know that a transient state communicates at least with one recurrent state. If the chain starts in a transient element, once it hits a recurrent state, it is going to be caught in its recurrent state, and we cannot expect it would go back to the initial state. Given a transient state $i$ we can define the *absorption probability* to the recurrent state $j$ as the probability that the first recurrent state that the Markov chain visits (and therefore gets absorbed by its recurrent class) is $j$, $f^{*}_ij$. We can also define the *mean absorption time* as the mean number of steps the transient state $i$ would take until it hits any recurrent state, $b_i$. ## A short example Consider the following numerical example. Suppose we have a DTMC with a set of 3 possible states $S=\{s_{1}, s_{2}, s_{3}\}$. Let the transition matrix be: \begin{equation} P=\left[\begin{array}{ccc} 0.5 & 0.2 & 0.3\\ 0.15 & 0.45 & 0.4\\ 0.25 & 0.35 & 0.4 \end{array}\right]. \label{eq:trPropExEx1} \end{equation} In $P$, $p_{11}=0.5$ is the probability that $X_{1}=s_{1}$ given that we observed $X_{0}=s_{1}$ is 0.5, and so on.It is easy to see that the chain is irreducible since all the states communicate (it is made by one communicating class only). Suppose that the current state of the chain is $X_{0}=s_{2}$, i.e., $x^{(0)}=(0\:1\:0)$, then the probability distribution of states after 1 and 2 steps can be computed as shown in Equations \@ref(eq:trPropExEx2) and \@ref(eq:trPropExEx3). \begin{equation} x^{(1)}=\left(0\:1\:0\right)\left[\begin{array}{ccc} 0.5 & 0.2 & 0.3\\ 0.15 & 0.45 & 0.4\\ 0.25 & 0.35 & 0.4 \end{array}\right]=\left(0.15\:0.45\:0.4\right). \label{eq:trPropExEx2} \end{equation} \begin{equation} x^{(n)}=x^{(n-1)}P \to \left(0.15\:0.45\:0.4\right)\left[\begin{array}{ccc} 0.5 & 0.2 & 0.3\\ 0.15 & 0.45 & 0.4\\ 0.25 & 0.35 & 0.4 \end{array}\right]=\left(0.2425\:0.3725\:0.385\right). \label{eq:trPropExEx3} \end{equation} If we were interested in the probability of being in the state $s_{3}$ in the second step, then $Pr\left(X_{2}=s_{3}\left|X_{0}=s_{2}\right.\right)=0.385$. \newpage # The structure of the package {#sec:structure} ## Creating markovchain objects The package is loaded within the \proglang{R} command line as follows: ```{r, load, results='hide', message=FALSE} library("markovchain") ``` The `markovchain` and `markovchainList` S4 classes [@chambers] are defined within the \pkg{markovchain} package as displayed: ```{r, showClass, echo=FALSE} showClass("markovchain") showClass("markovchainList") ``` The first class has been designed to handle homogeneous Markov chain processes, while the latter (which is itself a list of `markovchain` objects) has been designed to handle semi-homogeneous Markov chains processes. Any element of `markovchain` class is comprised by following slots: 1. `states`: a character vector, listing the states for which transition probabilities are defined. 2. `byrow`: a logical element, indicating whether transition probabilities are shown by row or by column. 3. `transitionMatrix`: the probabilities of the transition matrix. 4. `name`: optional character element to name the DTMC. The `markovchainList` objects are defined by following slots: 1. `markovchains`: a list of `markovchain` objects. 2. `name`: optional character element to name the DTMC. The `markovchain` objects can be created either in a long way, as the following code shows ```{r mcInitLong} weatherStates <- c("sunny", "cloudy", "rain") byRow <- TRUE weatherMatrix <- matrix(data = c(0.70, 0.2, 0.1, 0.3, 0.4, 0.3, 0.2, 0.45, 0.35), byrow = byRow, nrow = 3, dimnames = list(weatherStates, weatherStates)) mcWeather <- new("markovchain", states = weatherStates, byrow = byRow, transitionMatrix = weatherMatrix, name = "Weather") ``` or in a shorter way, displayed below ```{r mcInitShort} mcWeather <- new("markovchain", states = c("sunny", "cloudy", "rain"), transitionMatrix = matrix(data = c(0.70, 0.2, 0.1, 0.3, 0.4, 0.3, 0.2, 0.45, 0.35), byrow = byRow, nrow = 3), name = "Weather") ``` When `new("markovchain")` is called alone, a default Markov chain is created. ```{r defaultMc} defaultMc <- new("markovchain") ``` The quicker way to create `markovchain` objects is made possible thanks to the implemented `initialize` S4 method that checks that: * the `transitionMatrix` to be a transition matrix, i.e., all entries to be probabilities and either all rows or all columns to sum up to one. * the columns and rows names of `transitionMatrix` to be defined and to coincide with `states` vector slot. The `markovchain` objects can be collected in a list within `markovchainList` S4 objects as following example shows. ```{r intromcList} mcList <- new("markovchainList", markovchains = list(mcWeather, defaultMc), name = "A list of Markov chains") ``` ## Handling markovchain objects Table \@ref(tab:methodsToHandleMc) lists which methods handle and manipulate `markovchain` objects. \begin{table}[h] \centering \begin{tabular}{lll} \hline Method & Purpose \\ \hline \hline \code{*} & Direct multiplication for transition matrices.\\ \code{\textasciicircum{}} & Compute the power \code{markovchain} of a given one.\\ \code{[} & Direct access to the elements of the transition matrix.\\ \code{==} & Equality operator between two transition matrices.\\ \code{!=} & Inequality operator between two transition matrices.\\ \code{as} & Operator to convert \code{markovchain} objects into \code{data.frame} and\\ & \code{table} object.\\ \code{dim} & Dimension of the transition matrix.\\ \code{names} & Equal to \code{states}.\\ \code{names<-} & Change the \code{states} name.\\ \code{name} & Get the name of \code{markovchain object}.\\ \code{name<-} & Change the name of \code{markovchain object}.\\ \code{plot} & \code{plot} method for \code{markovchain} objects.\\ \code{print} & \code{print} method for \code{markovchain} objects.\\ \code{show} & \code{show} method for \code{markovchain} objects.\\ \code{sort} & \code{sort} method for \code{markovchain} objects, in terms of their states.\\ \code{states} & Name of the transition states.\\ \code{t} & Transposition operator (which switches \code{byrow} `slot value and modifies \\ & the transition matrix coherently).\\ \hline \end{tabular} \caption{\pkg{markovchain} methods for handling \code{markovchain} objects.} \label{tab:methodsToHandleMc} \end{table} The examples that follow shows how operations on `markovchain` objects can be easily performed. For example, using the previously defined matrix we can find what is the probability distribution of expected weather states in two and seven days, given the actual state to be cloudy. ```{r operations} initialState <- c(0, 1, 0) after2Days <- initialState * (mcWeather * mcWeather) after7Days <- initialState * (mcWeather ^ 7) after2Days round(after7Days, 3) ``` A similar answer could have been obtained defining the vector of probabilities as a column vector. A column - defined probability matrix could be set up either creating a new matrix or transposing an existing `markovchain` object thanks to the `t` method. ```{r operations2} initialState <- c(0, 1, 0) after2Days <- (t(mcWeather) * t(mcWeather)) * initialState after7Days <- (t(mcWeather) ^ 7) * initialState after2Days round(after7Days, 3) ``` The initial state vector previously shown can not necessarily be a probability vector, as the code that follows shows: ```{r fval} fvals<-function(mchain,initialstate,n) { out<-data.frame() names(initialstate)<-names(mchain) for (i in 0:n) { iteration<-initialstate*mchain^(i) out<-rbind(out,iteration) } out<-cbind(out, i=seq(0,n)) out<-out[,c(4,1:3)] return(out) } fvals(mchain=mcWeather,initialstate=c(90,5,5),n=4) ``` Basic methods have been defined for `markovchain` objects to quickly get states and transition matrix dimension. ```{r otherMethods} states(mcWeather) names(mcWeather) dim(mcWeather) ``` Methods are available to set and get the name of `markovchain` object. ```{r otherMethods2} name(mcWeather) name(mcWeather) <- "New Name" name(mcWeather) ``` Also it is possible to alphabetically sort the transition matrix: ```{r sortMethod} markovchain:::sort(mcWeather) ``` A direct access to transition probabilities is provided both by `transitionProbability` method and `"["` method. ```{r transProb} transitionProbability(mcWeather, "cloudy", "rain") mcWeather[2,3] ``` The transition matrix of a `markovchain` object can be displayed using `print` or `show` methods (the latter being less verbose). Similarly, the underlying transition probability diagram can be plotted by the use of `plot` method (as shown in Figure \@ref(fig:mcPlot)) which is based on \pkg{igraph} package [@pkg:igraph]. `plot` method for `markovchain` objects is a wrapper of `plot.igraph` for `igraph` S4 objects defined within the \pkg{igraph} package. Additional parameters can be passed to `plot` function to control the network graph layout. There are also \pkg{diagram} and \pkg{DiagrammeR} ways available for plotting as shown in Figure \@ref(fig:mcPlotdiagram). The `plot` function also uses `communicatingClasses` function to separate out states of different communicating classes. All states that belong to one class have same color. ```{r printAndShow} print(mcWeather) show(mcWeather) ``` ```{r mcPlot, echo=FALSE, fig.cap="Weather example. Markov chain plot"} if (requireNamespace("igraph", quietly = TRUE)) { library(igraph) plot(mcWeather,layout = layout.fruchterman.reingold) } else { message("igraph unavailable") } ``` ```{r mcPlotdiagram, echo=FALSE, fig.cap="Weather example. Markov chain plot with diagram"} if (requireNamespace("diagram", quietly = TRUE)) { library(diagram) plot(mcWeather, package="diagram", box.size = 0.04) } else { message("diagram unavailable") } ``` Import and export from some specific classes is possible, as shown in Figure \@ref(fig:fromAndTo) and in the following code. ```{r exportImport1} mcDf <- as(mcWeather, "data.frame") mcNew <- as(mcDf, "markovchain") mcDf mcIgraph <- as(mcWeather, "igraph") ``` ```{r exportImport2} if (requireNamespace("msm", quietly = TRUE)) { require(msm) Q <- rbind ( c(0, 0.25, 0, 0.25), c(0.166, 0, 0.166, 0.166), c(0, 0.25, 0, 0.25), c(0, 0, 0, 0) ) cavmsm <- msm(state ~ years, subject = PTNUM, data = cav, qmatrix = Q, death = 4) msmMc <- as(cavmsm, "markovchain") msmMc } else { message("msm unavailable") } ``` from etm (now archived as of September 2020): ```{r exporImport3} if (requireNamespace("etm", quietly = TRUE)) { library(etm) data(sir.cont) sir.cont <- sir.cont[order(sir.cont$id, sir.cont$time), ] for (i in 2:nrow(sir.cont)) { if (sir.cont$id[i]==sir.cont$id[i-1]) { if (sir.cont$time[i]==sir.cont$time[i-1]) { sir.cont$time[i-1] <- sir.cont$time[i-1] - 0.5 } } } tra <- matrix(ncol=3,nrow=3,FALSE) tra[1, 2:3] <- TRUE tra[2, c(1, 3)] <- TRUE tr.prob <- etm::etm(sir.cont, c("0", "1", "2"), tra, "cens", 1) tr.prob etm2mc<-as(tr.prob, "markovchain") etm2mc } else { message("etm unavailable") } ``` ```{r fromAndTo, echo=FALSE, fig.cap="The markovchain methods for import and export"} library(igraph) importExportGraph<-graph.formula(dataframe++markovchain,markovchain-+igraph, markovchain++matrix,table-+markovchain,msm-+markovchain,etm-+markovchain, markovchain++sparseMatrix) plot(importExportGraph,main="Import - Export from and to markovchain objects") ``` Coerce from `matrix` method, as the code below shows, represents another approach to create a `markovchain` method starting from a given squared probability matrix. ```{r exportImport4} myMatr<-matrix(c(.1,.8,.1,.2,.6,.2,.3,.4,.3), byrow=TRUE, ncol=3) myMc<-as(myMatr, "markovchain") myMc ``` Semi-homogeneous Markov chains can be created with the aid of `markovchainList` object. The example that follows arises from health insurance, where the costs associated to patients in a Continuous Care Health Community (CCHC) are modeled by a semi-homogeneous Markov Chain, since the transition probabilities change by year. Methods explicitly written for `markovchainList` objects are: `print`, `show`, `dim` and `[`. ```{r cchcMcList} stateNames = c("H", "I", "D") Q0 <- new("markovchain", states = stateNames, transitionMatrix =matrix(c(0.7, 0.2, 0.1,0.1, 0.6, 0.3,0, 0, 1), byrow = TRUE, nrow = 3), name = "state t0") Q1 <- new("markovchain", states = stateNames, transitionMatrix = matrix(c(0.5, 0.3, 0.2,0, 0.4, 0.6,0, 0, 1), byrow = TRUE, nrow = 3), name = "state t1") Q2 <- new("markovchain", states = stateNames, transitionMatrix = matrix(c(0.3, 0.2, 0.5,0, 0.2, 0.8,0, 0, 1), byrow = TRUE,nrow = 3), name = "state t2") Q3 <- new("markovchain", states = stateNames, transitionMatrix = matrix(c(0, 0, 1, 0, 0, 1, 0, 0, 1), byrow = TRUE, nrow = 3), name = "state t3") mcCCRC <- new("markovchainList",markovchains = list(Q0,Q1,Q2,Q3), name = "Continuous Care Health Community") print(mcCCRC) ``` It is possible to perform direct access to `markovchainList` elements, as well as to determine the number of `markovchain` objects by which a `markovchainList` object is composed. ```{r cchcMcList2} mcCCRC[[1]] dim(mcCCRC) ``` The `markovchain` package contains some data found in the literature related to DTMC models (see Section \@ref(sec:applications). Table \@ref(tab:datasets) lists datasets and tables included within the current release of the package. \begin{table}[h] \centering \begin{tabular}{p{0.2\textwidth}p{0.75\textwidth}} \hline Dataset & Description \\ \hline \hline \code{blanden} & Mobility across income quartiles, \cite{blandenEtAlii}.\\ \code{craigsendi} & CD4 cells, \cite{craigSendi}.\\ \code{kullback} & raw transition matrices for testing homogeneity, \cite{kullback1962tests}.\\ \code{preproglucacon} & Preproglucacon DNA basis, \cite{averyHenderson}.\\ \code{rain} & Alofi Island rains, \cite{averyHenderson}.\\ \code{holson} & Individual states trajectories.\\ \code{sales} & Sales of six beverages in Hong Kong \cite{ching2008higher}. \\ \hline \end{tabular} \caption{The \pkg{markovchain} \code{data.frame} and \code{table}.} \label{tab:datasets} \end{table} Finally, Table \@ref(tab:demos) lists the demos included in the demo directory of the package. \begin{table}[h] \centering \begin{tabular}{lll} \hline R Code File & Description \\ \hline \hline \code{bard.R} & Structural analysis of Markov chains from Bard PPT.\\ \code{examples.R} & Notable Markov chains, e.g., The Gambler Ruin chain.\\ \code{quickStart.R} & Generic examples.\\ \code{extractMatrices.R} & Generic examples.\\ \hline \end{tabular} \caption{The \pkg{markovchain} demos.} \label{tab:demos} \end{table} # Probability with markovchain objects {#sec:probability} The \pkg{markovchain} package contains functions to analyse DTMC from a probabilistic perspective. For example, the package provides methods to find stationary distributions and identifying absorbing and transient states. Many of these methods come from \proglang{MATLAB} listings that have been ported into \proglang{R}. For a full description of the underlying theory and algorithm the interested reader can overview the original \proglang{MATLAB} listings, \cite{renaldoMatlab} and \cite{montgomery}. Table \@ref(tab:methodsToStats) shows methods that can be applied on `markovchain` objects to perform probabilistic analysis. \begin{table}[h] \centering \begin{tabular}{lll} \hline Method & Returns \\ \hline \hline \code{absorbingStates} & the absorbing states of the transition matrix, if any.\\ \code{steadyStates} & the vector(s) of steady state(s) in matrix form. \\ \code{meanFirstPassageTime} & matrix or vector of mean first passage times. \\ \code{meanRecurrenceTime} & vector of mean number of steps to return to each recurrent state \\ \code{hittingProbabilities} & matrix of hitting probabilities for a Markov chain. \\ \code{meanAbsorptionTime} & expected number of steps for a transient state to be \\ & absorbed by any recurrent class \\ \code{absorptionProbabilities} & probabilities of transient states of being \\ & absorbed by each recurrent state \\ \code{committorAB} & committor probabilities \\ \code{communicatingClasses} & list of communicating classes. \\ & $s_{j}$, given actual state $s_{i}$. \\ \code{canonicForm} & the transition matrix into canonic form. \\ \code{is.accessible} & checks whether a state j is reachable from state i. \\ \code{is.irreducible} & checks whether a DTMC is irreducible. \\ \code{is.regular} & checks whether a DTMC is regular. \\ \code{period} & the period of an irreducible DTMC. \\ \code{recurrentClasses} & list of recurrent communicating classes. \\ \code{transientClasses} & list of transient communicating classes. \\ \code{recurrentStates} & the recurrent states of the transition matrix. \\ \code{transientStates} & the transient states of the transition matrix, if any. \\ \code{summary} & DTMC summary. \\ \hline \end{tabular} \caption{\pkg{markovchain} methods: statistical operations.} \label{tab:methodsToStats} \end{table} ## Conditional distributions The conditional distribution of weather states, given that current day's weather is sunny, is given by following code. ```{r conditionalDistr} conditionalDistribution(mcWeather, "sunny") ``` ## Stationary states A stationary (steady state, or equilibrium) vector is a probability vector such that Equation \ref{eq:steadystat2} holds \begin{equation} \begin{matrix} 0\leq \pi_j \leq 1\\ \sum_{j \in S} \pi_j = 1\\ \pi \cdot P = \pi \end{matrix} \label{eq:steadystat2} \end{equation} Steady states are associated to $P$ eigenvalues equal to one. We could be tempted to compute them solving the eigen values / vectors of the matrix and taking real parts (since if $u + iv$ is a eigen vector, for the matrix $P$, then $Re(u + iv) = u$ and $Im(u + iv) = v$ are eigen vectors) and normalizing by the vector sum, this carries some concerns: 1. If $u, v \in \mathbb{R}^n$ are linearly independent eigen vectors associated to $1$ eigen value, $u + iv$, $u + iu$ are also linearly independent eigen vectors, and their real parts coincide. Clearly if we took real parts, we would be loosing an eigen vector, because we cannot know in advance if the underlying algorithm to compute the eigen vectors is going to output something similar to what we described. We should be agnostic to the underlying eigen vector computation algorithm. 2. Imagine the identity $P$ of dimensions $2 \times 2$. Its eigen vectors associated to the $1$ eigen value are $u = (1, 0)$ and $v = (0, 1)$. However, the underlying algorithm to compute eigen vectors could return $(1, -2)$ and $(-2, 1)$ instead, that are linear combinations of the aforementioned ones, and therefore eigen vectors. Normalizing by their sum, we would get: $(-1, 2)$ and $(2, -1)$, which obviously are not probability measures. Again, we should be agnostic to the underlying eigen computation algorithm. 3. Algorithms to compute eigen values / vectors are computationally expensive: they are iterative, and we cannot predict a fixed number of iterations for them. Moreover, each iteration takes $\mathcal{O}(m^2)$ or $\mathcal{O}(m^3)$ algorithmic complexity, with $m$ the number of states. We are going to use that every irreducible DTMC has a unique steady state, that is, if $M$ is the matrix for an irreducible DTMC (all states communicate with each other), then it exists a unique $v \in \mathbb{R}^m$ such that: \[ v \cdot M = v, \qquad \sum_{i = 1}^m v_i = 1 \] Also, we'll use that a steady state for a DTMC assigns $0$ to the transient states. The canonical form of a (by row) stochastic matrix looks alike: \[ \left(\begin{array}{c|c|c|c|c} M_1 & 0 & 0 & \ldots & 0 \\ \hline 0 & M_2 & 0 & \ldots & 0 \\ \hline 0 & 0 & M_3 & \ldots & 0 \\ \hline \vdots & \vdots & \vdots & \ddots & \vdots \\ \hline A_1 & A_2 & A_3 & \ldots & R \end{array}\right) \] where $M_i$ corresponds to irreducible sub-chains, the blocks $A_i$ correspond to the transitions from transient states to each of the recurrent classes and $R$ are the transitions from the transient states to themselves. Also, we should note that a Markov chain has exactly the same name of steady states as recurrent classes. Therefore, we have coded the following algorithm ^[We would like to thank Prof. Christophe Dutang for his contributions to the development of this method. He coded a first improvement of the original `steadyStates` method and we could not have reached the current correctness without his previous work]: 1. Identify the recurrent classes $[C_1, \ldots, C_l]$ with \texttt{recurrentClasses} function. 2. Take each class $C_i$, compute the sub-matrix corresponding to it $M_i$. 3. Solve the system $v \cdot C_i = v, \, \sum_{j = 1}^{|C_i|} v_j = 1$ which has a unique solution, for each $i = 1, \ldots, l$. 3. Map each state $v_i$ to the original order in $P$ and assign a $0$ to the slots corresponding to transient states in the matrix. The result is returned in matrix form. ```{r steadyStates} steadyStates(mcWeather) ``` It is possible for a Markov chain to have more than one stationary distribution, as the gambler ruin example shows. ```{r gamblerRuin} gamblerRuinMarkovChain <- function(moneyMax, prob = 0.5) { m <- markovchain:::zeros(moneyMax + 1) m[1,1] <- m[moneyMax + 1,moneyMax + 1] <- 1 states <- as.character(0:moneyMax) rownames(m) <- colnames(m) <- states for(i in 2:moneyMax){ m[i,i-1] <- 1 - prob m[i, i + 1] <- prob } new("markovchain", transitionMatrix = m, name = paste("Gambler ruin", moneyMax, "dim", sep = " ")) } mcGR4 <- gamblerRuinMarkovChain(moneyMax = 4, prob = 0.5) steadyStates(mcGR4) ``` ## Classification of states Absorbing states are determined by means of `absorbingStates` method. ```{r absorbingStates} absorbingStates(mcGR4) absorbingStates(mcWeather) ``` The key function in methods which need knowledge about communicating classes, recurrent states, transient states, is `.commclassKernel`, which is a modification of Tarjan's algorithm from \cite{Tarjan}. This `.commclassKernel` method gets a transition matrix of dimension $n$ and returns a list of two items: 1. `classes`, an matrix whose $(i, j)$ entry is `true` if $s_i$ and $s_j$ are in the same communicating class. 2. `closed`, a vector whose $i$ -th entry indicates whether the communicating class to which $i$ belongs is closed. These functions are used by two other internal functions on which the `summary` method for `markovchain` objects works. The example matrix used in \cite{renaldoMatlab} well exemplifies the purpose of the function. ```{r renaldoMatrix1} P <- markovchain:::zeros(10) P[1, c(1, 3)] <- 1/2; P[2, 2] <- 1/3; P[2,7] <- 2/3; P[3, 1] <- 1; P[4, 5] <- 1; P[5, c(4, 5, 9)] <- 1/3; P[6, 6] <- 1; P[7, 7] <- 1/4; P[7,9] <- 3/4; P[8, c(3, 4, 8, 10)] <- 1/4; P[9, 2] <- 1; P[10, c(2, 5, 10)] <- 1/3; rownames(P) <- letters[1:10] colnames(P) <- letters[1:10] probMc <- new("markovchain", transitionMatrix = P, name = "Probability MC") summary(probMc) ``` All states that pertain to a transient class are named "transient" and a specific method has been written to elicit them. ```{r transientStates} transientStates(probMc) ``` `canonicForm` method that turns a Markov chain into its canonic form, reordering the states to have first the recurrent classes and then the transient states. ```{r probMc2Canonic} probMcCanonic <- canonicForm(probMc) probMc probMcCanonic ``` The function `is.accessible` permits to investigate whether a state $s_{j}$ is accessible from state $s_i$, that is whether the probability to eventually reach $s_j$ starting from $s_{i}$ is greater than zero. ```{r isAccessible} is.accessible(object = probMc, from = "a", to = "c") is.accessible(object = probMc, from = "g", to = "c") ``` In Section \@ref(sec:properties) we observed that, if a DTMC is irreducible, all its states share the same periodicity. Then, the `period` function returns the periodicity of the DTMC, provided that it is irreducible. The example that follows shows how to find if a DTMC is reducible or irreducible by means of the function `is.irreducible` and, in the latter case, the method `period` is used to compute the periodicity of the chain. ```{r periodicity} E <- matrix(0, nrow = 4, ncol = 4) E[1, 2] <- 1 E[2, 1] <- 1/3; E[2, 3] <- 2/3 E[3,2] <- 1/4; E[3, 4] <- 3/4 E[4, 3] <- 1 mcE <- new("markovchain", states = c("a", "b", "c", "d"), transitionMatrix = E, name = "E") is.irreducible(mcE) period(mcE) ``` The example Markov chain found in \proglang{Mathematica} web site \citep{mathematica9MarkovChain} has been used, and is plotted in Figure \@ref(fig:mcMathematics). ```{r mathematica9Mc} mathematicaMatr <- markovchain:::zeros(5) mathematicaMatr[1,] <- c(0, 1/3, 0, 2/3, 0) mathematicaMatr[2,] <- c(1/2, 0, 0, 0, 1/2) mathematicaMatr[3,] <- c(0, 0, 1/2, 1/2, 0) mathematicaMatr[4,] <- c(0, 0, 1/2, 1/2, 0) mathematicaMatr[5,] <- c(0, 0, 0, 0, 1) statesNames <- letters[1:5] mathematicaMc <- new("markovchain", transitionMatrix = mathematicaMatr, name = "Mathematica MC", states = statesNames) ``` ```{r mcMathematics, fig=TRUE, echo=FALSE, fig.align='center', fig.cap="Mathematica 9 example. Markov chain plot."} plot(mathematicaMc, layout = layout.fruchterman.reingold) ``` ```{r mathematica9MC, echo=FALSE} summary(mathematicaMc) ``` ## First passage time distributions and means \cite{renaldoMatlab} provides code to compute first passage time (within $1,2,\ldots, n$ steps) given the initial state to be $i$. The \proglang{MATLAB} listings translated into \proglang{R} on which the `firstPassage` function is based are: ```{r fpTime1, eval=FALSE} .firstpassageKernel <- function(P, i, n){ G <- P H <- P[i,] E <- 1 - diag(size(P)[2]) for (m in 2:n) { G <- P %*% (G * E) H <- rbind(H, G[i,]) } return(H) } ``` We conclude that the probability for the *first* rainy day to be the third one, given that the current state is sunny, is given by: ```{r fpTime2} firstPassagePdF <- firstPassage(object = mcWeather, state = "sunny", n = 10) firstPassagePdF[3, 3] ``` To compute the *mean* first passage times, i.e. the expected number of days before it rains given that today is sunny, we can use the `meanFirstPassageTime` function: ```{r mfpt1} meanFirstPassageTime(mcWeather) ``` indicating e.g. that the average number of days of sun or cloud before rain is 6.67 if we start counting from a sunny day, and 5 if we start from a cloudy day. Note that we can also specify one or more destination states: ```{r mfpt2} meanFirstPassageTime(mcWeather,"rain") ``` The implementation follows the matrix solutions by [@GrinsteadSnell]. We can check the result by averaging the first passage probability density function: ```{r mfpt3} firstPassagePdF.long <- firstPassage(object = mcWeather, state = "sunny", n = 100) sum(firstPassagePdF.long[,"rain"] * 1:100) ``` ## Mean recurrence time The `meanRecurrenceTime` method gives the first mean recurrence time (expected number of steps to go back to a state if it was the initial one) for each recurrent state in the transition probabilities matrix for a DTMC. Let's see an example: ```{r mrt-weather} meanRecurrenceTime(mcWeather) ``` Another example, with not all of its states being recurrent: ```{r mrt-probMc} recurrentStates(probMc) meanRecurrenceTime(probMc) ``` ## Absorption probabilities and mean absorption time We are going to use the Drunkard’s random walk from [@GrinsteadSnell]. We have a drunk person walking through the street. Each move the person does, if they have not arrived to either home (corner 1) or to the bar (corner 5) could be to the left corner or to the right one, with equal probability. In case of arrival to the bar or to home, the person stays there. ```{r data-drunkard} drunkProbs <- markovchain:::zeros(5) drunkProbs[1,1] <- drunkProbs[5,5] <- 1 drunkProbs[2,1] <- drunkProbs[2,3] <- 1/2 drunkProbs[3,2] <- drunkProbs[3,4] <- 1/2 drunkProbs[4,3] <- drunkProbs[4,5] <- 1/2 drunkMc <- new("markovchain", transitionMatrix = drunkProbs) drunkMc ``` Recurrent (in fact absorbing states) are: ```{r rs-drunkard} recurrentStates(drunkMc) ``` Transient states are the rest: ```{r ts-drunkard} transientStates(drunkMc) ``` The probability of either being absorbed by the bar or by the sofa at home are: ```{r ap-drunkard} absorptionProbabilities(drunkMc) ``` which means that the probability of arriving home / bar is inversely proportional to the distance to each one. But we also would like to know how much time does the person take to arrive there, which can be done with `meanAbsorptionTime`: ```{r at-drunkard} meanAbsorptionTime(drunkMc) ``` So it would take `3` steps to arrive to the destiny if the person is either in the second or fourth corner, and `4` steps in case of being at the same distance from home than to the bar. ## Committor probability The committor probability tells us the probability to reach a given state before another given. Suppose that we start in a cloudy day, the probabilities of experiencing a rainy day before a sunny one is 0.5: ```{r} committorAB(mcWeather,3,1) ``` ## Hitting probabilities Rewriting the system \eqref{eq:hitting-probs} as: \begin{equation*} A = \left(\begin{array}{c|c|c|c} A_1 & 0 & \ldots & 0 \\ \hline 0 & A_2 & \ldots & 0 \\ \hline \vdots & \vdots & \ddots & 0 \\ \hline 0 & 0 & \ldots & A_n \end{array}\right) \end{equation*} \begin{eqnarray*} A_1 &= \left(\begin{matrix} -1 & p_{1,2} & p_{1,3} & \ldots & p_{1,n} \\ 0 & (p_{2,2} - 1) & p_{2,3} & \ldots & p_{2,n} \\ \vdots & \vdots & \vdots & \ddots & \vdots \\ 0 & p_{n, 2} & p_{n,3} & \ldots & (p_{n,n} - 1) \end{matrix}\right)\\ A_2 &= \left(\begin{matrix} (p_{1,1} - 1) & 0 & p_{1,3} & \ldots & p_{1,n} \\ p_{2,1} & -1 & p_{2,3} & \ldots & p_{2,n} \\ \vdots & \vdots & \vdots & \ddots & \vdots \\ p_{n,1} & 0 & p_{n,3} & \ldots & (p_{n,n} - 1) \end{matrix}\right)\\ \vdots & \vdots\\ A_n &= \left(\begin{matrix} (p_{1,1} - 1) & p_{1,2} & p_{1,3} & \ldots & 0 \\ p_{2,1} & (p_{2,2} -1) & p_{2,3} & \ldots & 0 \\ \vdots & \vdots & \vdots & \ddots & \vdots \\ p_{n,1} & p_{n,2} & p_{n,3} & \ldots & -1 \end{matrix}\right)\\ \end{eqnarray*} \begin{equation*} \begin{array}{lr} X_j = \left(\begin{array}{c} h_{1,j} \\ h_{2,j} \\ \vdots \\ h_{n,j} \end{array}\right) & C_j = - \left(\begin{array}{c} p_{1,j} \\ p_{2,j} \\ \vdots \\ p_{n,j} \end{array}\right) \end{array} \end{equation*} we end up having to solve the block systems: \begin{equation} A_j \cdot X_j = C_j \end{equation} Let us imagine the $i$ -th state has transition probabilities: $(0, \ldots, 0, \underset{i)}{1}, 0, \ldots, 0)$. Then that same row would turn into $(0,0, \ldots, 0)$ for some block, thus obtaining a singular matrix. Another case which may give us problems could be: state $i$ has the following transition probabilities: $(0, \ldots, 0, \underset{j)}{1}, 0, \ldots, 0)$ and the state $j$ has the following transition probabilities: $(0, \ldots, 0, \underset{i)}{1}, 0, \ldots, 0)$. Then when building some blocks we will end up with rows: \begin{eqnarray*} (0, \ldots, 0, \underset{i)}{-1}, 0, \ldots, 0, \underset{j)}{1}, 0, \ldots, 0) \\ (0, \ldots, 0, \underset{i)}{1}, 0, \ldots, 0, \underset{j)}{-1}, 0, \ldots, 0) \end{eqnarray*} which are linearly dependent. Our hypothesis is that if we treat the closed communicating classes differently, we *might* delete the linearity in the system. If we have a closed communicating class $C_u$, then $h_{i,j} = 1$ for all $i,j \in C_u$ and $h_{k,j} = 0$ for all $k\not\in C_u$. Then we can set $X_u$ appropriately and solve the other $X_v$ using those values. The method in charge of that in `markovchain` package is `hittingProbabilities`, which receives a Markov chain and computes the matrix $(h_{ij})_{i,j = 1,\ldots, n}$ where $S = \{s_1, \ldots, s_n\}$ is the set of all states of the chain. For the following chain: ```{r hitting-data} M <- markovchain:::zeros(5) M[1,1] <- M[5,5] <- 1 M[2,1] <- M[2,3] <- 1/2 M[3,2] <- M[3,4] <- 1/2 M[4,2] <- M[4,5] <- 1/2 hittingTest <- new("markovchain", transitionMatrix = M) hittingProbabilities(hittingTest) ``` we want to compute the hitting probabilities. That can be done with: ```{r hitting-probabilities} hittingProbabilities(hittingTest) ``` In the case of the `mcWeather` Markov chain we would obtain a matrix with all its elements set to $1$. That makes sense (and is desirable) since if today is sunny, we expect it would be sunny again at certain point in the time, and the same with rainy weather (that way we assure good harvests): ```{r hitting-weather} hittingProbabilities(mcWeather) ``` # Statistical analysis {#sec:statistics} Table \@ref(tab:funs4Stats) lists the functions and methods implemented within the package which help to fit, simulate and predict DTMC. \begin{table}[h] \centering \begin{tabular}{lll} \hline Function & Purpose \\ \hline \hline \code{markovchainFit} & Function to return fitted Markov chain for a given sequence.\\ \code{predict} & Method to calculate predictions from \code{markovchain} or \\ & \code{markovchainList} objects.\\ \code{rmarkovchain} & Function to sample from \code{markovchain} or \code{markovchainList} objects.\\ \hline \end{tabular} \caption{The \pkg{markovchain} statistical functions.} \label{tab:funs4Stats} \end{table} ## Simulation Simulating a random sequence from an underlying DTMC is quite easy thanks to the function `rmarkovchain`. The following code generates a year of weather states according to `mcWeather` underlying stochastic process. ```{r simulatingAMarkovChain} weathersOfDays <- rmarkovchain(n = 365, object = mcWeather, t0 = "sunny") weathersOfDays[1:30] ``` Similarly, it is possible to simulate one or more sequences from a semi-homogeneous Markov chain, as the following code (applied on CCHC example) exemplifies. ```{r simulatingAListOfMarkovChain} patientStates <- rmarkovchain(n = 5, object = mcCCRC, t0 = "H", include.t0 = TRUE) patientStates[1:10,] ``` Two advance parameters are available to the `rmarkovchain` method which helps you decide which implementation to use. There are four options available : \proglang{R}, \proglang{R} in parallel, \proglang{C++} and \proglang{C++} in parallel. Two boolean parameters `useRcpp` and `parallel` will decide which implementation will be used. Default is \code{useRcpp = TRUE} and \code{parallel = FALSE} i.e. \proglang{C++} implementation. The \proglang{C++} implementation is generally faster than the `R` implementation. If you have multicore processors then you can take advantage of `parallel` parameter by setting it to `TRUE`. When both `Rcpp=TRUE` and `parallel=TRUE` the parallelization has been carried out using \pkg{RcppParallel} package \citep{pkg:RcppParallel}. ## Estimation A time homogeneous Markov chain can be fit from given data. Four methods have been implemented within current version of \pkg{markovchain} package: maximum likelihood, maximum likelihood with Laplace smoothing, Bootstrap approach, maximum a posteriori. Equation \ref{eq:MLE} shows the maximum likelihood estimator (MLE) of the $p_{ij}$ entry, where the $n_{ij}$ element consists in the number sequences $\left( X_{t}=s_{i}, X_{t+1}=s_{j}\right)$ found in the sample, that is \begin{equation} {\hat p^{MLE}}_{ij} = \frac{n_{ij}}{\sum\limits_{u = 1}^k {n_{iu}}}. \label{eq:MLE} \end{equation} Equation \@ref(eq:SE) shows the `standardError` of the MLE \citep{MSkuriat}. \begin{equation} SE_{ij} = \frac{ {\hat p^{MLE}}_{ij} }{\sqrt{n_{ij}}} \label{eq:SE} \end{equation} ```{r fitMcbyMLE2} weatherFittedMLE <- markovchainFit(data = weathersOfDays, method = "mle",name = "Weather MLE") weatherFittedMLE$estimate weatherFittedMLE$standardError ``` The Laplace smoothing approach is a variation of the MLE, where the $n_{ij}$ is substituted by $n_{ij}+\alpha$ (see Equation \ref{eq:LAPLACE}), being $\alpha$ an arbitrary positive stabilizing parameter. \begin{equation} {\hat p^{LS}}_{ij} = \frac{{{n_{ij}} + \alpha }}{{\sum\limits_{u = 1}^k {\left( {{n_{iu}} + \alpha } \right)} }} \label{eq:LAPLACE} \end{equation} ```{r fitMcbyLAPLACE} weatherFittedLAPLACE <- markovchainFit(data = weathersOfDays, method = "laplace", laplacian = 0.01, name = "Weather LAPLACE") weatherFittedLAPLACE$estimate ``` (NOTE: The Confidence Interval option is enabled by default. Remove this option to fasten computations.) Both MLE and Laplace approach are based on the `createSequenceMatrix` functions that returns the raw counts transition matrix. ```{r fitSequenceMatrix} createSequenceMatrix(stringchar = weathersOfDays) ``` `stringchar` could contain `NA` values, and the transitions containing `NA` would be ignored. An issue occurs when the sample contains only one realization of a state (say $X_{\beta}$) which is located at the end of the data sequence, since it yields to a row of zero (no sample to estimate the conditional distribution of the transition). In this case the estimated transition matrix is corrected assuming $p_{\beta,j}=1/k$, being $k$ the possible states. Create sequence matrix can also be used to obtain raw count transition matrices from a given $n*2$ matrix as the following example shows: ```{r fitSequenceMatrix2} myMatr<-matrix(c("a","b","b","a","a","b","b","b","b","a","a","a","b","a"),ncol=2) createSequenceMatrix(stringchar = myMatr,toRowProbs = TRUE) ``` A bootstrap estimation approach has been developed within the package in order to provide an indication of the variability of ${\hat p}_{ij}$ estimates. The bootstrap approach implemented within the \pkg{markovchain} package follows these steps: 1. bootstrap the data sequences following the conditional distributions of states estimated from the original one. The default bootstrap samples is 10, as specified in `nboot` parameter of `markovchainFit` function. 2. apply MLE estimation on bootstrapped data sequences that are saved in `bootStrapSamples` slot of the returned list. 3. the ${p^{BOOTSTRAP}}_{ij}$ is the average of all ${p^{MLE}}_{ij}$ across the `bootStrapSamples` list, normalized by row. A `standardError` of $\hat{{p^{MLE}}_{ij}}$ estimate is provided as well. ```{r fitMcbyBootStrap1} weatherFittedBOOT <- markovchainFit(data = weathersOfDays, method = "bootstrap", nboot = 20) weatherFittedBOOT$estimate weatherFittedBOOT$standardError ``` The bootstrapping process can be done in parallel thanks to \pkg{RcppParallel} package \citep{pkg:RcppParallel}. Parallelized implementation is definitively suggested when the data sample size or the required number of bootstrap runs is high. ```{r fitMcbyBootStrap2, eval=FALSE} weatherFittedBOOTParallel <- markovchainFit(data = weathersOfDays, method = "bootstrap", nboot = 200, parallel = TRUE) weatherFittedBOOTParallel$estimate weatherFittedBOOTParallel$standardError ``` The parallel bootstrapping uses all the available cores on a machine by default. However, it is also possible to tune the number of threads used. Note that this should be done in R before calling the `markovchainFit` function. For example, the following code will set the number of threads to 4. ```{r fitMcbyBootStrap3, eval=FALSE} RcppParallel::setNumThreads(2) ``` For more details, please refer to \pkg{RcppParallel} web site. For all the fitting methods, the `logLikelihood` \citep{MSkuriat} denoted in Equation \ref{eq:LLH} is provided. \begin{equation} LLH = \sum_{i,j} n_{ij} * log (p_{ij}) \label{eq:LLH} \end{equation} where $n_{ij}$ is the entry of the frequency matrix and $p_{ij}$ is the entry of the transition probability matrix. ```{r fitMcbyMLE1} weatherFittedMLE$logLikelihood weatherFittedBOOT$logLikelihood ``` Confidence matrices of estimated parameters (parametric for MLE, non - parametric for BootStrap) are available as well. The `confidenceInterval` is provided with the two matrices: `lowerEndpointMatrix` and `upperEndpointMatrix`. The confidence level (CL) is 0.95 by default and can be given as an argument of the function `markovchainFit`. This is used to obtain the standard score (z-score). From classical inference theory, if $ci$ is the level of confidence required assuming normal distribution the $zscore(ci)$ solves $\Phi \left ( 1-\left(\frac{1-ci}{2}\right) \right )$ Equations \ref{eq:CIL} and \ref{eq:CIU} \citep{MSkuriat} show the `confidenceInterval` of a fitting. Note that each entry of the matrices is bounded between 0 and 1. \begin{align} LowerEndpoint_{ij} = p_{ij} - zscore (CL) * SE_{ij} \label{eq:CIL} \\ UpperEndpoint_{ij} = p_{ij} + zscore (CL) * SE_{ij} \label{eq:CIU} \end{align} ```{r confint} weatherFittedMLE$confidenceInterval weatherFittedBOOT$confidenceInterval ``` A special function, `multinomialConfidenceIntervals`, has been written in order to obtain multinomial wise confidence intervals. The code has been based on and Rcpp translation of package's \pkg{MultinomialCI} functions \cite{pkg:MultinomialCI} that were themselves based on the \cite{sison1995simultaneous} paper. ```{r multinomial} multinomialConfidenceIntervals(transitionMatrix = weatherFittedMLE$estimate@transitionMatrix, countsTransitionMatrix = createSequenceMatrix(weathersOfDays)) ``` The functions for fitting DTMC have mostly been rewritten in \proglang{C++} using \pkg{Rcpp} \cite{RcppR} since version 0.2. It is also possible to fit a DTMC object from `matrix` or `data.frame` objects as shown in following code. ```{r fitMclists} data(holson) singleMc<-markovchainFit(data=holson[,2:12],name="holson") ``` The same applies for `markovchainList` (output length has been limited). ```{r fitMclistsFit1, output.lines=20} mcListFit<-markovchainListFit(data=holson[,2:6],name="holson") mcListFit$estimate ``` Finally, given a `list` object, it is possible to fit a `markovchain` object or to obtain the raw transition matrix. ```{r fitMclistsFit2} c1<-c("a","b","a","a","c","c","a") c2<-c("b") c3<-c("c","a","a","c") c4<-c("b","a","b","a","a","c","b") c5<-c("a","a","c",NA) c6<-c("b","c","b","c","a") mylist<-list(c1,c2,c3,c4,c5,c6) mylistMc<-markovchainFit(data=mylist) mylistMc ``` The same works for `markovchainFitList`. ```{r fitAMarkovChainListfromAlist, output.lines=15} markovchainListFit(data=mylist) ``` If any transition contains `NA`, it will be ignored in the results as the above example showed. ## Prediction The $n$-step forward predictions can be obtained using the `predict` methods explicitly written for `markovchain` and `markovchainList` objects. The prediction is the mode of the conditional distribution of $X_{t+1}$ given $X_{t}=s_{j}$, being $s_{j}$ the last realization of the DTMC (homogeneous or semi-homogeneous). ### Predicting from a markovchain object The 3-days forward predictions from `markovchain` object can be generated as follows, assuming that the last two days were respectively "cloudy" and "sunny". ```{r markovchainPredict} predict(object = weatherFittedMLE$estimate, newdata = c("cloudy", "sunny"), n.ahead = 3) ``` ### Predicting from a markovchainList object Given an initial two years health status, the 5-year ahead prediction of any CCRC guest is ```{r markovchainListPredict} predict(mcCCRC, newdata = c("H", "H"), n.ahead = 5) ``` The prediction has stopped at time sequence since the underlying semi-homogeneous Markov chain has a length of four. In order to continue five years ahead, the `continue=TRUE` parameter setting makes the `predict` method keeping to use the last `markovchain` in the sequence list. ```{r markovchainListPredict2} predict(mcCCRC, newdata = c("H", "H"), n.ahead = 5, continue = TRUE) ``` ## Statistical Tests In this section, we describe the statistical tests: assessing the Markov property (`verifyMarkovProperty`), the order (`assessOrder`), the stationary (`assessStationarity`) of a Markov chain sequence, and the divergence test for empirically estimated transition matrices (`divergenceTest`). Most of such tests are based on the $\chi ^2$ statistics. Relevant references are \cite{kullback1962tests} and \cite{anderson1957statistical}. All such tests have been designed for small samples, since it is easy to detect departures from Markov property as long as the sample size increases. In addition, the accuracy of the statistical inference functions has been questioned and will be thoroughly investigated in future versions of the package. ### Assessing the Markov property of a Markov chain sequence The `verifyMarkovProperty` function verifies whether the Markov property holds for the given chain. The test implemented in the package looks at triplets of successive observations. If $x_1, x_2, \ldots, x_N$ is a set of observations and $n_{ijk}$ is the number of times $t$ $\left(1 \le t \le N-2 \right)$ such that $x_t=i, x_{t+1}=j, x_{x+2}=k$, then if the Markov property holds $n_{ijk}$ follows a Binomial distribution with parameters $n_{ij}$ and $p_{jk}$. A classical $\chi^2$ test can check this distributional assumption, since $\sum_{i}\sum_{j}\sum_{k}\frac{(n_{ijk}-n_{ij}\hat{p_{jk}})^2}{n_{ij}\hat{p_{jk}}}\sim \chi^2\left(q \right )$ where q is the number of degrees of freedom. The number of degrees of freedom q of the distribution of $\chi^2$ is given by the formula r-q+s-1, where: s denotes the number of states i in the state space such that n_{i} > 0 q denotes the number of pairs (i, j) for which n_{ij} > 0 and r denotes the number of triplets (i, j, k) for which n_{ij}n_{jk} > 0 ```{r test1} sample_sequence<-c("a", "b", "a", "a", "a", "a", "b", "a", "b", "a", "b", "a", "a", "b", "b", "b", "a") verifyMarkovProperty(sample_sequence) ``` ### Assessing the order of a Markov chain sequence The `assessOrder` function checks whether the given chain is of first order or of second order. For each possible present state, we construct a contingency table of the frequency of the future state for each past to present state transition as shown in Table \ref{tab:order}. \begin{table}[h] \centering \begin{tabular}{l | l | l | l} \hline past & present & future & future \\ & & a & b \\ \hline \hline a & a & 2 & 2\\ b & a & 2 & 2\\ \hline \end{tabular} \caption{Contingency table to assess the order for the present state a.} \label{tab:order} \end{table} Using the table, the function performs the $\chi ^2$ test by calling the `chisq.test` function. This test returns a list of the chi-squared value and the p-value. If the p-value is greater than the given significance level, we cannot reject the hypothesis that the sequence is of first order. ```{r test2} data(rain) assessOrder(rain$rain) ``` ### Assessing the stationarity of a Markov chain sequence The `assessStationarity` function assesses if the transition probabilities of the given chain change over time. To be more specific, the chain is stationary if the following condition meets. \begin{equation} p_{ij}(t) = p_{ij} ~\textrm{ for all }~t \label{eq:stationarity} \end{equation} For each possible state, we construct a contingency table of the estimated transition probabilities over time as shown in Table \ref{tab:stationarity}. \begin{table}[h] \centering \begin{tabular}{l | l | l} \hline time (t) & probability of transition to a & probability of transition to b \\ \hline \hline 1 & 0 & 1\\ 2 & 0 & 1\\ . & . & . \\ . & . & . \\ . & . & . \\ 16 & 0.44 & 0.56\\ \hline \end{tabular} \caption{Contingency table to assess the stationarity of the state a.} \label{tab:stationarity} \end{table} Using the table, the function performs the $\chi ^2$ test by calling the `chisq.test` function. This test returns a list of the chi-squared value and the p-value. If the p-value is greater than the given significance level, we cannot reject the hypothesis that the sequence is stationary. ```{r test3} assessStationarity(rain$rain, 10) ``` ### Divergence tests for empirically estimated transition matrices This section discusses tests developed to verify whether: 1. An empirical transition matrix is consistent with a theoretical one. 2. Two or more empirical transition matrices belongs to the same DTMC. The first test is implemented by the `verifyEmpiricalToTheoretical` function. Being $f_{ij}$ the raw transition count, \cite{kullback1962tests} shows that $2*\sum_{i=1}^{r}\sum_{j=1}^{r}f_{ij}\ln\frac{f_{ij}}{f_{i.}P\left( E_j | E_i\right)} \sim \chi^2\left ( r*(r-1) \right )$. The following example is taken from \cite{kullback1962tests}: ```{r divergence1} sequence<-c(0,1,2,2,1,0,0,0,0,0,0,1,2,2,2,1,0,0,1,0,0,0,0,0,0,1,1, 2,0,0,2,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,0,0,0,0,2,1,0, 0,2,1,0,0,0,0,0,0,1,1,1,2,2,0,0,2,1,1,1,1,2,1,1,1,1,1,1,1,1,1,0,2, 0,1,1,0,0,0,1,2,2,0,0,0,0,0,0,2,2,2,1,1,1,1,0,1,1,1,1,0,0,2,1,1, 0,0,0,0,0,2,2,1,1,1,1,1,2,1,2,0,0,0,1,2,2,2,0,0,0,1,1) mc=matrix(c(5/8,1/4,1/8,1/4,1/2,1/4,1/4,3/8,3/8),byrow=TRUE, nrow=3) rownames(mc)<-colnames(mc)<-0:2; theoreticalMc<-as(mc, "markovchain") verifyEmpiricalToTheoretical(data=sequence,object=theoreticalMc) ``` The second one is implemented by the `verifyHomogeneity` function, inspired by \cite[section~9]{kullback1962tests}. Assuming that $i=1,2, \ldots, s$ DTMC samples are available and that the cardinality of the state space is $r$ it verifies whether the $s$ chains belongs to the same unknown one. \cite{kullback1962tests} shows that its test statistics follows a chi-square law, $2*\sum_{i=1}^{s}\sum_{j=1}^{r}\sum_{k=1}^{r}f_{ijk}\ln\frac{n*f_{ijk}}{f_{i..}f_{.jk}} \sim \chi^2\left ( r*(r-1) \right )$. Also the following example is taken from \cite{kullback1962tests}: ```{r divergence2} data(kullback) verifyHomogeneity(inputList=kullback,verbose=TRUE) ``` ## Continuous Times Markov Chains ### Intro The \pkg{markovchain} package provides functionality for continuous time Markov chains (CTMCs). CTMCs are a generalization of discrete time Markov chains (DTMCs) in that we allow time to be continuous. We assume a finite state space $S$ (for an infinite state space wouldn't fit in memory). We can think of CTMCs as Markov chains in which state transitions can happen at any time. More formally, we would like our CTMCs to satisfy the following two properties: * The Markov property - let $F_{X(s)}$ denote the information about $X$ up to time $s$. Let $j \in S$ and $s \leq t$. Then, $P(X(t) = j|F_{X(s)}) = P(X(t) = j|X(s))$. * Time homogeneity - $P(X(t) = j|X(s) = k) = P(X(t-s) = j|X(0) = k)$. If both the above properties are satisfied, it is referred to as a time-homogeneous CTMC. If a transition occurs at time $t$, then $X(t)$ denotes the new state and $X(t)\neq X(t-)$. Now, let $X(0)=x$ and let $T_x$ be the time a transition occurs from this state. We are interested in the distribution of $T_x$. For $s,t \geq 0$, it can be shown that $ P(T_x > s+t | T_x > s) = P(T_x > t) $ This is the memory less property that only the exponential random variable exhibits. Therefore, this is the sought distribution, and each state $s \in S$ has an exponential holding parameter $\lambda(s)$. Since $\mathrm{E}T_x = \frac{1}{\lambda(x)}$, higher the rate $\lambda(x)$, smaller the expected time of transitioning out of the state $x$. However, specifying this parameter alone for each state would only paint an incomplete picture of our CTMC. To see why, consider a state $x$ that may transition to either state $y$ or $z$. The holding parameter enables us to predict when a transition may occur if we start off in state $x$, but tells us nothing about which state will be next. To this end, we also need transition probabilities associated with the process, defined as follows (for $y \neq x$) - $p_{xy} = P(X(T_s) = y | X(0) = x)$. Note that $\sum_{y \neq x} p_{xy} = 1$. Let $Q$ denote this transition matrix ($Q_{ij} = p_{ij}$). What is key here is that $T_x$ and the state $y$ are independent random variables. Let's define $\lambda(x, y) = \lambda(x) p_{xy}$ We now look at Kolmogorov's backward equation. Let's define $P_{ij}(t) = P(X(t) = j | X(0) = i)$ for $i, j \in S$. The backward equation is given by (it can be proved) $P_{ij}(t) = \delta_{ij}e^{-\lambda(i)t} + \int_{0}^{t}\lambda(i)e^{-\lambda(i)t} \sum_{k \neq i} Q_{ik} P_{kj}(t-s) ds$. Basically, the first term is non-zero if and only if $i=j$ and represents the probability that the first transition from state $i$ occurs after time $t$. This would mean that at $t$, the state is still $i$. The second term accounts for any transitions that may occur before time $t$ and denotes the probability that at time $t$, when the smoke clears, we are in state $j$. This equation can be represented compactly as follows $P'(t) = AP(t)$ where $A$ is the *generator* matrix. \[ A(i, j) = \begin{cases} \lambda(i, j) & \mbox{if } i \neq j \\ -\lambda(i) & \mbox{else.} \end{cases} \] Observe that the sum of each row is 0. A CTMC can be completely specified by the generator matrix. ### Stationary Distributions The following theorem guarantees the existence of a unique stationary distribution for CTMCs. Note that $X(t)$ being irreducible and recurrent is the same as $X_n(t)$ being irreducible and recurrent. Suppose that $X(t)$ is irreducible and recurrent. Then $X(t)$ has an invariant measure $\eta$, which is unique up to multiplicative factors. Moreover, for each $k \in S$, we have \[\eta_k = \frac{\pi_k}{\lambda(k)}\] where $\pi$ is the unique invariant measure of the embedded discrete time Markov chain $Xn$. Finally, $\eta$ satisfies \[0 < \eta_j < \infty, \forall j \in S\] and if $\sum_i \eta_i < \infty$ then $\eta$ can be normalized to get a stationary distribution. ### Estimation Let the data set be $D = \{(s_0, t_0), (s_1, t_1), ..., (s_{N-1}, t_{N-1})\}$ where $N=|D|$. Each $s_i$ is a state from the state space $S$ and during the time $[t_i,t_{i+1}]$ the chain is in state $s_i$. Let the parameters be represented by $\theta = \{\lambda, P\}$ where $\lambda$ is the vector of holding parameters for each state and $P$ the transition matrix of the embedded discrete time Markov chain. Then the probability is given by \[ {Pr(D | \theta) \propto \lambda(s_0)e^{-\lambda(s_0)(t_1-t_0)}Pr(s_1|s_0) \cdot\ldots\cdot \lambda(s_{N-2})e^{-\lambda(s_{N-2})(t_{N-1}-t_{N-2})}Pr(s_{N-1}|s_{N-2})} \] Let $n(j|i)$ denote the number of $i$->$j$ transitions in $D$, and $n(i)$ the number of times $s_i$ occurs in $D$. Let $t(s_i)$ denote the total time the chain spends in state $s_i$. Then the MLEs are given by \[ \hat{\lambda(s)} = \frac{n(s)}{t(s)},\hat{Pr(j|i)}=\frac{n(j|i)}{n(i)} \] ### Expected Hitting Time The package provides a function `ExpectedTime` to calculate average hitting time from one state to another. Let the final state be j, then for every state $i \in S$, where $S$ is the set of all states and holding time $q_{i} > 0$ for every $i \neq j$. Assuming the conditions to be true, expected hitting time is equal to minimal non-negative solution vector $p$ to the system of linear equations: \begin{equation} \begin{cases} p_{k} = 0 & k = j \\ -\sum_{l \in I} q_{kl}p_{k} = 1 & k \neq j \end{cases} \label{eq:EHT} \end{equation} ### Probability at time t The package provides a function `probabilityatT` to calculate probability of every state according to given `ctmc` object. Here we use Kolmogorov's backward equation $P(t) = P(0)e^{tQ}$ for $t \geq 0$ and $P(0) = I$. Here $P(t)$ is the transition function at time t. The value $P(t)[i][j]$ at time $P(t)$ describes the probability of the state at time $t$ to be equal to j if it was equal to i at time $t=0$. It takes care of the case when `ctmc` object has a generator represented by columns. If initial state is not provided, the function returns the whole transition matrix $P(t)$. ### Examples To create a CTMC object, you need to provide a valid generator matrix, say $Q$. The CTMC object has the following slots - states, generator, by row, name (look at the documentation object for further details). Consider the following example in which we aim to model the transition of a molecule from the $\sigma$ state to the $\sigma^*$ state. When in the former state, if it absorbs sufficient energy, it can make the jump to the latter state and remains there for some time before transitioning back to the original state. Let us model this by a CTMC: ```{r rCtmcInit} energyStates <- c("sigma", "sigma_star") byRow <- TRUE gen <- matrix(data = c(-3, 3, 1, -1), nrow = 2, byrow = byRow, dimnames = list(energyStates, energyStates)) molecularCTMC <- new("ctmc", states = energyStates, byrow = byRow, generator = gen, name = "Molecular Transition Model") ``` To generate random CTMC transitions, we provide an initial distribution of the states. This must be in the same order as the dimnames of the generator. The output can be returned either as a list or a data frame. ```{r rctmcRandom0} statesDist <- c(0.8, 0.2) rctmc(n = 3, ctmc = molecularCTMC, initDist = statesDist, out.type = "df", include.T0 = FALSE) ``` $n$ represents the number of samples to generate. There is an optional argument $T$ for `rctmc`. It represents the time of termination of the simulation. To use this feature, set $n$ to a very high value, say `Inf` (since we do not know the number of transitions before hand) and set $T$ accordingly. ```{r ctmcRandom1} statesDist <- c(0.8, 0.2) rctmc(n = Inf, ctmc = molecularCTMC, initDist = statesDist, T = 2) ``` To obtain the stationary distribution simply invoke the `steadyStates` function ```{r rctmcSteadyStates} steadyStates(molecularCTMC) ``` For fitting, use the `ctmcFit` function. It returns the MLE values for the parameters along with the confidence intervals. ```{r rctmcFitting} data <- list(c("a", "b", "c", "a", "b", "a", "c", "b", "c"), c(0, 0.8, 2.1, 2.4, 4, 5, 5.9, 8.2, 9)) ctmcFit(data) ``` One approach to obtain the generator matrix is to apply the `logm` function from the \pkg{expm} package on a transition matrix. Numeric issues arise, see \cite{israel2001finding}. For example, applying the standard `method` ('Higham08') on `mcWeather` raises an error, whilst the alternative method (eigenvalue decomposition) is OK. The following code estimates the generator matrix of the `mcWeather` transition matrix. ```{r mcWeatherQ} mcWeatherQ <- expm::logm(mcWeather@transitionMatrix,method='Eigen') mcWeatherQ ``` Therefore, the "half - day" transition probability for mcWeather DTMC is ```{r mcWeatherHalfDay} mcWeatherHalfDayTM <- expm::expm(mcWeatherQ*.5) mcWeatherHalfDay <- new("markovchain",transitionMatrix=mcWeatherHalfDayTM,name="Half Day Weather Transition Matrix") mcWeatherHalfDay ``` The \pkg{ctmcd} package \citep{pkg:ctmcd} provides various functions to estimate the generator matrix (GM) of a CTMC process using different methods. The following code provides a way to join \pkg{markovchain} and \pkg{ctmcd} computations. ```{r ctmcd1} if(requireNamespace(package='ctmcd', quietly = TRUE)) { require(ctmcd) require(expm) #defines a function to transform a GM into a TM gm_to_markovchain<-function(object, t=1) { if(!(class(object) %in% c("gm","matrix","Matrix"))) stop("Error! Expecting either a matrix or a gm object") if ( class(object) %in% c("matrix","Matrix")) generator_matrix<-object else generator_matrix<-as.matrix(object[["par"]]) #must add importClassesFrom("markovchain",markovchain) in the NAMESPACE #must add importFrom(expm, "expm") transitionMatrix<-expm(generator_matrix*t) out<-as(transitionMatrix,"markovchain") return(out) } #loading ctmcd dataset data(tm_abs) gm0=matrix(1,8,8) #initializing diag(gm0)=0 diag(gm0)=-rowSums(gm0) gm0[8,]=0 gmem=gm(tm_abs,te=1,method="EM",gmguess=gm0) #estimating GM mc_at_2=gm_to_markovchain(object=gmem, t=2) #converting to TM at time 2 } else { warning('package ctmcd unavailable') } ``` ## Pseudo - Bayesian Estimation \cite{Hu2002} shows an empirical quasi-Bayesian method to estimate transition matrices, given an empirical $\hat{P}$ transition matrix (estimated using the classical approach) and an a - priori estimate $Q$. In particular, each row of the matrix is estimated using the linear combination $\alpha \cdot Q+\left(1-1alpha\right) \cdot P$, where $\alpha$ is defined for each row as Equation \ref{eq:pseudobayes} shows \begin{equation} \left\{\begin{matrix} \hat{\alpha_i}=\frac{\hat{K_i}}{v\left(i \right )+\hat{K_i}}\\ \hat{K_i}=\frac{v\left(i \right)^2 - \sum_{j}Y_{ij}^2}{\sum_{j}(Y_{ij}-v\left(i \right)*q_{ij})^2} \end{matrix}\right. \label{eq:pseudobayes} \end{equation} The following code returns the pseudo Bayesian estimate of the transition matrix: ```{r pseudobayes} pseudoBayesEstimator <- function(raw, apriori){ v_i <- rowSums(raw) K_i <- numeric(nrow(raw)) sumSquaredY <- rowSums(raw^2) #get numerator K_i_num <- v_i^2-sumSquaredY #get denominator VQ <- matrix(0,nrow= nrow(apriori),ncol=ncol(apriori)) for (i in 1:nrow(VQ)) { VQ[i,]<-v_i[i]*apriori[i,] } K_i_den<-rowSums((raw - VQ)^2) K_i <- K_i_num/K_i_den #get the alpha vector alpha <- K_i / (v_i+K_i) #empirical transition matrix Emp<-raw/rowSums(raw) #get the estimate out<-matrix(0, nrow= nrow(raw),ncol=ncol(raw)) for (i in 1:nrow(out)) { out[i,]<-alpha[i]*apriori[i,]+(1-alpha[i])*Emp[i,] } return(out) } ``` We then apply it to the weather example: ```{r pseudobayes2} trueMc<-as(matrix(c(0.1, .9,.7,.3),nrow = 2, byrow = 2),"markovchain") aprioriMc<-as(matrix(c(0.5, .5,.5,.5),nrow = 2, byrow = 2),"markovchain") smallSample<-rmarkovchain(n=20,object = trueMc) smallSampleRawTransitions<-createSequenceMatrix(stringchar = smallSample) pseudoBayesEstimator( raw = smallSampleRawTransitions, apriori = aprioriMc@transitionMatrix ) - trueMc@transitionMatrix biggerSample<-rmarkovchain(n=100,object = trueMc) biggerSampleRawTransitions<-createSequenceMatrix(stringchar = biggerSample) pseudoBayesEstimator( raw = biggerSampleRawTransitions, apriori = aprioriMc@transitionMatrix ) - trueMc@transitionMatrix bigSample<-rmarkovchain(n=1000,object = trueMc) bigSampleRawTransitions<-createSequenceMatrix(stringchar = bigSample) pseudoBayesEstimator( raw = bigSampleRawTransitions, apriori = aprioriMc@transitionMatrix ) - trueMc@transitionMatrix ``` ## Bayesian Estimation The \pkg{markovchain} package provides functionality for maximum a posteriori (MAP) estimation of the chain parameters (at the time of writing this document, only first order models are supported) by Bayesian inference. It also computes the probability of observing a new data set, given a (different) data set. This vignette provides the mathematical description for the methods employed by the package. ### Notation and set-up The data is denoted by $D$, the model parameters (transition matrix) by $\theta$. The object of interest is $P(\theta | D)$ (posterior density). $\mathcal{A}$ represents an alphabet class, each of whose members represent a state of the chain. Therefore \[D = s_0 s_1 ... s_{N-1}, s_t \in \mathcal{A}\] where $N$ is the length of the data set. Also, \[\theta = \{p(s|u), s \in \mathcal{A}, u \in \mathcal{A} \}\] where $\sum_{s \in \mathcal{A}} p(s|u) = 1$ for each $u \in \mathcal{A}$. Our objective is to find $\theta$ which maximizes the posterior. That is, if our solution is denoted by $\hat{\theta}$, then \[\hat{\theta} = \underset{\theta}{argmax}P(\theta | D)\] where the search space is the set of right stochastic matrices of dimension $|\mathcal{A}|x|\mathcal{A}|$. $n(u, s)$ denotes the number of times the word $us$ occurs in $D$ and $n(u)=\sum_{s \in \mathcal{A}}n(u, s)$. The hyper-parameters are similarly denoted by $\alpha(u, s)$ and $\alpha(u)$ respectively. ### Methods Given $D$, its likelihood conditioned on the observed initial state in D is given by \[P(D|\theta) = \prod_{s \in \mathcal{A}} \prod_{u \in \mathcal{A}} p(s|u)^{n(u, s)}\] Conjugate priors are used to model the prior $P(\theta)$. The reasons are two fold: 1. Exact expressions can be derived for the MAP estimates, expectations and even variances 2. Model order selection/comparison can be implemented easily (available in a future release of the package) The hyper-parameters determine the form of the prior distribution, which is a product of Dirichlet distributions \[P(\theta) = \prod_{u \in \mathcal{A}} \Big\{ \frac{\Gamma(\alpha(u))}{\prod_{s \in \mathcal{A}} \Gamma(\alpha(u, s))} \prod_{s \in \mathcal{A}} p(s|u)^{\alpha(u, s)) - 1} \Big\}\] where $\Gamma(.)$ is the Gamma function. The hyper-parameters are specified using the `hyperparam` argument in the `markovchainFit` function. If this argument is not specified, then a default value of 1 is assigned to each hyper-parameter resulting in the prior distribution of each chain parameter to be uniform over $[0,1]$. Given the likelihood and the prior as described above, the evidence $P(D)$ is simply given by \[P(D) = \int P(D|\theta) P(\theta) d\theta\] which simplifies to \[ P(D) = \prod_{u \in \mathcal{A}} \Big\{ \frac{\Gamma(\alpha(u))}{\prod_{s \in \mathcal{A}} \Gamma(\alpha(u, s))} \frac{\prod_{s \in \mathcal{A}} \Gamma(n(u, s) + \alpha(u, s))}{\Gamma(\alpha(u) + n(u))} \Big\} \] Using Bayes' theorem, the posterior now becomes (thanks to the choice of conjugate priors) \[ P(\theta | D) = \prod_{u \in \mathcal{A}} \Big\{ \frac{\Gamma(n(u) + \alpha(u))}{\prod_{s \in \mathcal{A}} \Gamma(n(u, s) + \alpha(u, s))} \prod_{s \in \mathcal{A}} p(s|u)^{n(u, s) + \alpha(u, s)) - 1} \Big\} \] Since this is again a product of Dirichlet distributions, the marginal distribution of a particular parameter $P(s|u)$ of our chain is given by \[ P(s|u) \sim Beta(n(u, s) + \alpha(u, s), n(u) + \alpha(u) - n(u, s) - \alpha(u, s)) \] Thus, the MAP estimate $\hat{\theta}$ is given by \[ \hat{\theta} = \Big\{ \frac{n(u, s) + \alpha(u, s) - 1}{n(u) + \alpha(u) - |\mathcal{A}|}, s \in \mathcal{A}, u \in \mathcal{A} \Big\} \] The function also returns the expected value, given by \[ \text{E}_{\text{post}} p(s|u) = \Big\{ \frac{n(u, s) + \alpha(u, s)}{n(u) + \alpha(u)}, s \in \mathcal{A}, u \in \mathcal{A} \Big\} \] The variance is given by \[ \text{Var}_{\text{post}} p(s|u) = \frac{n(u, s) + \alpha(u, s)}{(n(u) + \alpha(u))^2} \frac{n(u) + \alpha(u) - n(u, s) - \alpha(u, s)}{n(u) + \alpha(u) + 1} \] The square root of this quantity is the standard error, which is returned by the function. The confidence intervals are constructed by computing the inverse of the beta integral. ### Predictive distribution Given the old data set, the probability of observing new data is $P(D'|D)$ where $D'$ is the new data set. Let $m(u, s), m(u)$ denote the corresponding counts for the new data. Then, \[ P(D'|D) = \int P(D' | \theta) P(\theta | D) d\theta \] We already know the expressions for both quantities in the integral and it turns out to be similar to evaluating the evidence \[ P(D'|D) = \prod_{u \in \mathcal{A}} \Big\{ \frac{\Gamma(\alpha(u))}{\prod_{s \in \mathcal{A}} \Gamma(\alpha(u, s))} \frac{\prod_{s \in \mathcal{A}} \Gamma(n(u, s) + m(u, s) + \alpha(u, s))}{\Gamma(\alpha(u) + n(u) + m(u))} \Big\} \] ### Choosing the hyper-parameters The hyper parameters model the shape of the parameters' prior distribution. These must be provided by the user. The package offers functionality to translate a given prior belief transition matrix into the hyper-parameter matrix. It is assumed that this belief matrix corresponds to the mean value of the parameters. Since the relation \[ \text{E}_{\text{prior}} p(s | u) = \frac{\alpha(u, s)}{\alpha(u)} \] holds, the function accepts as input the belief matrix as well as a scaling vector (serves as a proxy for $\alpha(.)$) and proceeds to compute $\alpha(., .)$. Alternatively, the function accepts a data sample and infers the hyper-parameters from it. Since the mode of a parameter (with respect to the prior distribution) is proportional to one less than the corresponding hyper-parameter, we set \[ \alpha(u, s) - 1 = m(u, s) \] where $m(u, s)$ is the $u\rightarrow s$ transition count in the data sample. This is regarded as a 'fake count' which helps $\alpha(u, s)$ to reflect knowledge of the data sample. ### Usage and examples ```{r loadAndDoExample} weatherStates <- c("sunny", "cloudy", "rain") byRow <- TRUE weatherMatrix <- matrix(data = c(0.7, 0.2, 0.1, 0.3, 0.4, 0.3, 0.2, 0.4, 0.4), byrow = byRow, nrow = 3, dimnames = list(weatherStates, weatherStates)) mcWeather <- new("markovchain", states = weatherStates, byrow = byRow, transitionMatrix = weatherMatrix, name = "Weather") weathersOfDays <- rmarkovchain(n = 365, object = mcWeather, t0 = "sunny") ``` For the purpose of this section, we shall continue to use the weather of days example introduced in the main vignette of the package (reproduced above for convenience). Let us invoke the fit function to estimate the MAP parameters with 92\% confidence bounds and hyper-parameters as shown below, based on the first 200 days of the weather data. Additionally, let us find out what the probability is of observing the weather data for the next 165 days. The usage would be as follows ```{r MAPFit} hyperMatrix<-matrix(c(1, 1, 2, 3, 2, 1, 2, 2, 3), nrow = 3, byrow = TRUE, dimnames = list(weatherStates,weatherStates)) markovchainFit(weathersOfDays[1:200], method = "map", confidencelevel = 0.92, hyperparam = hyperMatrix) predictiveDistribution(weathersOfDays[1:200], weathersOfDays[201:365],hyperparam = hyperMatrix) ``` The results should not change after permuting the dimensions of the matrix. ```{r MAPFit2} hyperMatrix2<- hyperMatrix[c(2,3,1), c(2,3,1)] markovchainFit(weathersOfDays[1:200], method = "map", confidencelevel = 0.92, hyperparam = hyperMatrix2) predictiveDistribution(weathersOfDays[1:200], weathersOfDays[201:365],hyperparam = hyperMatrix2) ``` Note that the predictive probability is very small. However, this can be useful when comparing model orders. Suppose we have an idea of the (prior) transition matrix corresponding to the expected value of the parameters, and have a data set from which we want to deduce the MAP estimates. We can infer the hyper-parameters from this known transition matrix itself, and use this to obtain our MAP estimates. ```{r inferHyperparam} inferHyperparam(transMatr = weatherMatrix, scale = c(10, 10, 10)) ``` Alternatively, we can use a data sample to infer the hyper-parameters. ```{r inferHyperparam2} inferHyperparam(data = weathersOfDays[1:15]) ``` In order to use the inferred hyper-parameter matrices, we do ```{r inferHyperparam3} hyperMatrix3 <- inferHyperparam(transMatr = weatherMatrix, scale = c(10, 10, 10)) hyperMatrix3 <- hyperMatrix3$scaledInference hyperMatrix4 <- inferHyperparam(data = weathersOfDays[1:15]) hyperMatrix4 <- hyperMatrix4$dataInference ``` Now we can safely use `hyperMatrix3` and `hyperMatrix4` with `markovchainFit` (in the `hyperparam` argument). Supposing we don't provide any hyper-parameters, then the prior is uniform. This is the same as maximum likelihood. ```{r MAPandMLE} data(preproglucacon) preproglucacon <- preproglucacon[[2]] MLEest <- markovchainFit(preproglucacon, method = "mle") MAPest <- markovchainFit(preproglucacon, method = "map") MLEest$estimate MAPest$estimate ``` # Applications {#sec:applications} This section shows applications of DTMC in various fields. ## Weather forecasting {#app:weather} Markov chains provide a simple model to predict the next day's weather given the current meteorological condition. The first application herewith shown is the "Land of Oz example" from \cite{landOfOz}, the second is the "Alofi Island Rainfall" from \cite{averyHenderson}. ### Land of Oz {#sec:wfLandOfOz} The Land of Oz is acknowledged not to have ideal weather conditions at all: the weather is snowy or rainy very often and, once more, there are never two nice days in a row. Consider three weather states: rainy, nice and snowy. Let the transition matrix be as in the following: ```{r weatPred1} mcWP <- new("markovchain", states = c("rainy", "nice", "snowy"), transitionMatrix = matrix(c(0.5, 0.25, 0.25, 0.5, 0, 0.5, 0.25,0.25,0.5), byrow = T, nrow = 3)) ``` Given that today it is a nice day, the corresponding stochastic row vector is $w_{0}=(0\:,1\:,0)$ and the forecast after 1, 2 and 3 days are given by ```{r weatPred2} W0 <- t(as.matrix(c(0, 1, 0))) W1 <- W0 * mcWP; W1 W2 <- W0 * (mcWP ^ 2); W2 W3 <- W0 * (mcWP ^ 3); W3 ``` As can be seen from $w_{1}$, if in the Land of Oz today is a nice day, tomorrow it will rain or snow with probability 1. One week later, the prediction can be computed as ```{r weatPred3} W7 <- W0 * (mcWP ^ 7) W7 ``` The steady state of the chain can be computed by means of the `steadyStates` method. ```{r weatPred4} q <- steadyStates(mcWP) q ``` Note that, from the seventh day on, the predicted probabilities are substantially equal to the steady state of the chain and they don't depend from the starting point, as the following code shows. ```{r weatPred5} R0 <- t(as.matrix(c(1, 0, 0))) R7 <- R0 * (mcWP ^ 7); R7 S0 <- t(as.matrix(c(0, 0, 1))) S7 <- S0 * (mcWP ^ 7); S7 ``` ### Alofi Island Rainfall {#sec:wfAlofi} Alofi Island daily rainfall data were recorded from January 1st, 1987 until December 31st, 1989 and classified into three states: "0" (no rain), "1-5" (from non zero until 5 mm) and "6+" (more than 5mm). The corresponding dataset is provided within the \pkg{markovchain} package. ```{r Alofi1} data("rain", package = "markovchain") table(rain$rain) ``` The underlying transition matrix is estimated as follows. ```{r Alofi2} mcAlofi <- markovchainFit(data = rain$rain, name = "Alofi MC")$estimate mcAlofi ``` The long term daily rainfall distribution is obtained by means of the `steadyStates` method. ```{r Alofi3} steadyStates(mcAlofi) ``` ## Finance and Economics {#app:fin} Other relevant applications of DTMC can be found in Finance and Economics. ### Finance {#fin:fin} Credit ratings transitions have been successfully modeled with discrete time Markov chains. Some rating agencies publish transition matrices that show the empirical transition probabilities across credit ratings. The example that follows comes from \pkg{CreditMetrics} \proglang{R} package \citep{CreditMetricsR}, carrying Standard \& Poor's published data. ```{r ratings1} rc <- c("AAA", "AA", "A", "BBB", "BB", "B", "CCC", "D") creditMatrix <- matrix( c(90.81, 8.33, 0.68, 0.06, 0.08, 0.02, 0.01, 0.01, 0.70, 90.65, 7.79, 0.64, 0.06, 0.13, 0.02, 0.01, 0.09, 2.27, 91.05, 5.52, 0.74, 0.26, 0.01, 0.06, 0.02, 0.33, 5.95, 85.93, 5.30, 1.17, 1.12, 0.18, 0.03, 0.14, 0.67, 7.73, 80.53, 8.84, 1.00, 1.06, 0.01, 0.11, 0.24, 0.43, 6.48, 83.46, 4.07, 5.20, 0.21, 0, 0.22, 1.30, 2.38, 11.24, 64.86, 19.79, 0, 0, 0, 0, 0, 0, 0, 100 )/100, 8, 8, dimnames = list(rc, rc), byrow = TRUE) ``` It is easy to convert such matrices into `markovchain` objects and to perform some analyses ```{r ratings2} creditMc <- new("markovchain", transitionMatrix = creditMatrix, name = "S&P Matrix") absorbingStates(creditMc) ``` ### Economics {#fin:ec} For a recent application of \pkg{markovchain} in Economic, see \cite{manchesterR}. A dynamic system generates two kinds of economic effects \citep{bardPpt}: 1. those incurred when the system is in a specified state, and 2. those incurred when the system makes a transition from one state to another. Let the monetary amount of being in a particular state be represented as a m-dimensional column vector $c^{\rm{S}}$, while let the monetary amount of a transition be embodied in a $C^{R}$ matrix in which each component specifies the monetary amount of going from state i to state j in a single step. Henceforth, Equation \@ref(eq:cost) represents the monetary of being in state $i$. \begin{equation} {c_i} = c_i^{\rm{S}} + \sum\limits_{j = 1}^m {C_{ij}^{\rm{R}}} {p_{ij}}. \label{eq:cost} \end{equation} Let $\bar c = \left[ c_i \right]$ and let $e_i$ be the vector valued 1 in the initial state and 0 in all other, then, if $f_n$ is the random variable representing the economic return associated with the stochastic process at time $n$, Equation \@ref(eq:return) holds: \begin{equation} E\left[ {{f_n}\left( {{X_n}} \right)|{X_0} = i} \right] = {e_i}{P^n}\bar c. \label{eq:return} \end{equation} The following example assumes that a telephone company models the transition probabilities between customer/non-customer status by matrix $P$ and the cost associated to states by matrix $M$. ```{r economicAnalysis1} statesNames <- c("customer", "non customer") P <- markovchain:::zeros(2); P[1, 1] <- .9; P[1, 2] <- .1; P[2, 2] <- .95; P[2, 1] <- .05; rownames(P) <- statesNames; colnames(P) <- statesNames mcP <- new("markovchain", transitionMatrix = P, name = "Telephone company") M <- markovchain:::zeros(2); M[1, 1] <- -20; M[1, 2] <- -30; M[2, 1] <- -40; M[2, 2] <- 0 ``` If the average revenue for existing customer is +100, the cost per state is computed as follows. ```{r economicAnalysis2} c1 <- 100 + conditionalDistribution(mcP, state = "customer") %*% M[1,] c2 <- 0 + conditionalDistribution(mcP, state = "non customer") %*% M[2,] ``` For an existing customer, the expected gain (loss) at the fifth year is given by the following code. ```{r economicAnalysis3} as.numeric((c(1, 0)* mcP ^ 5) %*% (as.vector(c(c1, c2)))) ``` ## Actuarial science {#app:act} Markov chains are widely applied in the field of actuarial science. Two classical applications are policyholders' distribution across Bonus Malus classes in Motor Third Party Liability (MTPL) insurance (Section \@ref(sec:bm)) and health insurance pricing and reserving (Section \@ref(sec:hi)). ### MPTL Bonus Malus {#sec:bm} Bonus Malus (BM) contracts grant the policyholder a discount (enworsen) as a function of the number of claims in the experience period. The discount (enworsen) is applied on a premium that already allows for known (a priori) policyholder characteristics \citep{denuit2007actuarial} and it usually depends on vehicle, territory, the demographic profile of the policyholder, and policy coverage deep (deductible and policy limits).\\ Since the proposed BM level depends on the claim on the previous period, it can be modeled by a discrete Markov chain. A very simplified example follows. Assume a BM scale from 1 to 5, where 4 is the starting level. The evolution rules are shown in Equation \ref{eq:BM}: \begin{equation} bm_{t + 1} = \max \left( {1,bm_{t} - 1} \right)*\left( {\tilde N = 0} \right) + \min \left( {5,bm_{t} + 2*\tilde N} \right)*\left( {\tilde N \ge 1} \right). \label{eq:BM} \end{equation} The number of claim $\tilde N$ is a random variable that is assumed to be Poisson distributed. ```{r bonusMalus1} getBonusMalusMarkovChain <- function(lambda) { bmMatr <- markovchain:::zeros(5) bmMatr[1, 1] <- dpois(x = 0, lambda) bmMatr[1, 3] <- dpois(x = 1, lambda) bmMatr[1, 5] <- 1 - ppois(q = 1, lambda) bmMatr[2, 1] <- dpois(x = 0, lambda) bmMatr[2, 4] <- dpois(x = 1, lambda) bmMatr[2, 5] <- 1 - ppois(q = 1, lambda) bmMatr[3, 2] <- dpois(x = 0, lambda) bmMatr[3, 5] <- 1 - dpois(x=0, lambda) bmMatr[4, 3] <- dpois(x = 0, lambda) bmMatr[4, 5] <- 1 - dpois(x = 0, lambda) bmMatr[5, 4] <- dpois(x = 0, lambda) bmMatr[5, 5] <- 1 - dpois(x = 0, lambda) stateNames <- as.character(1:5) out <- new("markovchain", transitionMatrix = bmMatr, states = stateNames, name = "BM Matrix") return(out) } ``` Assuming that the a-priori claim frequency per car-year is 0.05 in the class (being the class the group of policyholders that share the same common characteristics), the underlying BM transition matrix and its underlying steady state are as follows. ```{r bonusMalus2} bmMc <- getBonusMalusMarkovChain(0.05) as.numeric(steadyStates(bmMc)) ``` If the underlying BM coefficients of the class are 0.5, 0.7, 0.9, 1.0, 1.25, this means that the average BM coefficient applied on the long run to the class is given by ```{r bonusMalus3} sum(as.numeric(steadyStates(bmMc)) * c(0.5, 0.7, 0.9, 1, 1.25)) ``` This means that the average premium paid by policyholders in the portfolio almost halves in the long run. ### Health insurance example {#sec:hi} Actuaries quantify the risk inherent in insurance contracts evaluating the premium of insurance contract to be sold (therefore covering future risk) and evaluating the actuarial reserves of existing portfolios (the liabilities in terms of benefits or claims payments due to policyholder arising from previously sold contracts), see \cite{deshmukh2012multiple} for details. An applied example can be performed using the data from \cite{de2016assicurazioni} that has been saved in the `exdata` folder. ```{r healthIns6} ltcDemoPath<-system.file("extdata", "ltdItaData.txt", package = "markovchain") ltcDemo<-read.table(file = ltcDemoPath, header=TRUE, sep = ";", dec = ".") head(ltcDemo) ``` The data shows the probability of transition between the state of (A)ctive, to (I)ll and Dead. It is easy to complete the transition matrix. ```{r healthIns7} ltcDemo<-transform(ltcDemo, pIA=0, pII=1-pID, pDD=1, pDA=0, pDI=0) ``` Now we build a function that returns the transition during the $t+1$ th year, assuming that the subject has attained year $t$. ```{r healthIns8} possibleStates<-c("A","I","D") getMc4Age<-function(age) { transitionsAtAge<-ltcDemo[ltcDemo$age==age,] myTransMatr<-matrix(0, nrow=3,ncol = 3, dimnames = list(possibleStates, possibleStates)) myTransMatr[1,1]<-transitionsAtAge$pAA[1] myTransMatr[1,2]<-transitionsAtAge$pAI[1] myTransMatr[1,3]<-transitionsAtAge$pAD[1] myTransMatr[2,2]<-transitionsAtAge$pII[1] myTransMatr[2,3]<-transitionsAtAge$pID[1] myTransMatr[3,3]<-1 myMc<-new("markovchain", transitionMatrix = myTransMatr, states = possibleStates, name = paste("Age",age,"transition matrix")) return(myMc) } ``` Cause transitions are not homogeneous across ages, we use a `markovchainList` object to describe the transition probabilities for a guy starting at age 100. ```{r healthIns8-prob} getFullTransitionTable<-function(age){ ageSequence<-seq(from=age, to=120) k=1 myList=list() for ( i in ageSequence) { mc_age_i<-getMc4Age(age = i) myList[[k]]<-mc_age_i k=k+1 } myMarkovChainList<-new("markovchainList", markovchains = myList, name = paste("TransitionsSinceAge", age, sep = "")) return(myMarkovChainList) } transitionsSince100<-getFullTransitionTable(age=100) ``` We can use such transition for simulating ten life trajectories for a guy that begins "active" (A) aged 100: ```{r healthIns9} rmarkovchain(n = 10, object = transitionsSince100, what = "matrix", t0 = "A", include.t0 = TRUE) ``` Lets consider 1000 simulated live trajectories, for a healthy guy aged 80. We can compute the expected time a guy will be disabled starting active at age 80. ```{r healthIns10} transitionsSince80<-getFullTransitionTable(age=80) lifeTrajectories<-rmarkovchain(n=1e3, object=transitionsSince80, what="matrix",t0="A",include.t0=TRUE) temp<-matrix(0,nrow=nrow(lifeTrajectories),ncol = ncol(lifeTrajectories)) temp[lifeTrajectories=="I"]<-1 expected_period_disabled<-mean(rowSums((temp))) expected_period_disabled ``` Assuming that the health insurance will pay a benefit of 12000 per year disabled and that the real interest rate is 0.02, we can compute the lump sum premium at 80. ```{r healthIns11} mean(rowMeans(12000*temp%*%( matrix((1+0.02)^-seq(from=0, to=ncol(temp)-1))))) ``` ## Sociology {#app:sociology} Markov chains have been actively used to model progressions and regressions between social classes. The first study was performed by \cite{glassHall}, while a more recent application can be found in \cite{blandenEtAlii}. The table that follows shows the income quartile of the father when the son was 16 (in 1984) and the income quartile of the son when aged 30 (in 2000) for the 1970 cohort. ```{r blandenEtAlii} data("blanden") mobilityMc <- as(blanden, "markovchain") mobilityMc ``` The underlying transition graph is plotted in Figure \@ref(fig:mobility). ```{r mobility, fig=TRUE, echo=FALSE, fig.align='center', fig.cap="1970 UK cohort mobility data."} plot(mobilityMc, main = '1970 mobility',vertex.label.cex = 2, layout = layout.fruchterman.reingold) ``` The steady state distribution is computed as follows. Since transition across quartiles are shown, the probability function is evenly 0.25. ```{r blandenEtAlii3} round(steadyStates(mobilityMc), 2) ``` ## Genetics and Medicine {#sec:gen} This section contains two examples: the first shows the use of Markov chain models in genetics, the second shows an application of Markov chains in modelling diseases' dynamics. ### Genetics {#sec:genetics} \cite{averyHenderson} discusses the use of Markov chains in model Preprogucacon gene protein bases sequence. The `preproglucacon` dataset in \pkg{markovchain} contains the dataset shown in the package. ```{r preproglucacon1} data("preproglucacon", package = "markovchain") ``` It is possible to model the transition probabilities between bases as shown in the following code. ```{r preproglucacon2} mcProtein <- markovchainFit(preproglucacon$preproglucacon, name = "Preproglucacon MC")$estimate mcProtein ``` ### Medicine {#sec:medicine} Discrete-time Markov chains are also employed to study the progression of chronic diseases. The following example is taken from \cite{craigSendi}. Starting from six month follow-up data, the maximum likelihood estimation of the monthly transition matrix is obtained. This transition matrix aims to describe the monthly progression of CD4-cell counts of HIV infected subjects. ```{r epid1} craigSendiMatr <- matrix(c(682, 33, 25, 154, 64, 47, 19, 19, 43), byrow = T, nrow = 3) hivStates <- c("0-49", "50-74", "75-UP") rownames(craigSendiMatr) <- hivStates colnames(craigSendiMatr) <- hivStates craigSendiTable <- as.table(craigSendiMatr) mcM6 <- as(craigSendiTable, "markovchain") mcM6@name <- "Zero-Six month CD4 cells transition" mcM6 ``` As shown in the paper, the second passage consists in the decomposition of $M_{6}=V \cdot D \cdot V^{-1}$ in order to obtain $M_{1}$ as $M_{1}=V \cdot D^{1/6} \cdot V^{-1}$ . ```{r epid2} eig <- eigen(mcM6@transitionMatrix) D <- diag(eig$values) ``` ```{r epid3} V <- eig$vectors V %*% D %*% solve(V) d <- D ^ (1/6) M <- V %*% d %*% solve(V) mcM1 <- new("markovchain", transitionMatrix = M, states = hivStates) ``` # Discussion, issues and future plans The \pkg{markovchain} package has been designed in order to provide easily handling of DTMC and communication with alternative packages. The package has known several improvements in the recent years: many functions added, porting the software in Rcpp \pkg{Rcpp} package \citep{RcppR} and many methodological improvements that have improved the software reliability. # Acknowledgments {#sec:aknowledgements} The package was selected for Google Summer of Code 2015 support. The authors wish to thank Michael Cole, Tobi Gutman and Mildenberger Thoralf for their suggestions and bug checks. A final thanks also to Dr. Simona C. Minotti and Dr. Mirko Signorelli for their support in drafting this version of the vignettes. \clearpage # References # markovchain/vignettes/gsoc_2017_additions.Rmd0000644000176200001440000005216314430771726020714 0ustar liggesusers--- title: "Google Summer of Code 2017 Additions" author: "Vandit Jain" date: "August 2017" output: rmarkdown::pdf_document bibliography: markovchainBiblio.bib vignette: > %\VignetteIndexEntry{Google Summer of Code 2017 Additions} %\VignetteEngine{knitr::rmarkdown} %VignetteEncoding{UTF-8} pkgdown: as_is: true extension: pdf --- # Expected Hitting Time using CTMC The package provides `ExpectedTime` function to calculate average hitting time from one state to another. Let the final state be j, then for every state $i \in I$, where $I$ is the set of all possible states and holding time $q_{i} > 0$ for every $i \neq j$. Assuming the conditions to be true, expected hitting time is equal to minimal non-negative solution vector $p$ to the system of linear equations [@NorrisBook]: \begin{equation} \begin{array}{lcr} p_{k} = 0 & k = j \\ -\sum_{l \in I} q_{kl}p_{k} = 1 & k \neq j \end{array} \label{eq:EHT} \end{equation} For example, consider the continuous time markovchain which is as follows: ``` {r,message = FALSE} library(markovchain) states <- c("a","b","c","d") byRow <- TRUE gen <- matrix(data = c(-1, 1/2, 1/2, 0, 1/4, -1/2, 0, 1/4, 1/6, 0, -1/3, 1/6, 0, 0, 0, 0), nrow = 4,byrow = byRow, dimnames = list(states,states)) ctmc <- new("ctmc",states = states, byrow = byRow, generator = gen, name = "testctmc") ``` The generator matrix of the ctmc is: \[ M = \left(\begin{array}{cccc} -1 & 1/2 & 1/2 & 0\\ 1/4 & -1/2 & 1/4 & 1/6\\ 1/6 & 0 & -1/3 & 1/6\\ 0 & 0 & 0 & 0 \end{array}\right) \] Now if we have to calculate expected hitting time the process will take to hit state $d$ if we start from $a$, we apply the $ExpectedTime$ function. $ExpectedTime$ function takes four inputs namely a $ctmc$ class object, initial state $i$, the final state $j$ that we have to calculate expected hitting time and a logical parameter whether to use RCpp implementation. By default, the function uses RCpp as it is faster and takes lesser time. ``` {r} ExpectedTime(ctmc,1,4) ``` We find that the expected hitting time for process to be hit state $d$ is 7 units in this case. # Calculating Probability at time T using ctmc The package provides a function `probabilityatT` to calculate probability of every state according to given `ctmc` object. The Kolmogorov's backward equation gives us a relation between transition matrix at any time t with the generator matrix[@dobrow2016introduction]: \begin{equation} P'(t) = QP(t) \end{equation} Here we use the solution of this differential equation $P(t) = P(0)e^{tQ}$ for $t \geq 0$ and $P(0) = I$. In this equation, $P(t)$ is the transition function at time t. The value $P(t)[i][j]$ at time $P(t)$ describes the conditional probability of the state at time $t$ to be equal to j if it was equal to i at time $t=0$. It takes care of the case when `ctmc` object has a generator represented by columns. If initial state is not provided, the function returns the whole transition matrix $P(t)$. Also to mention is that the function is also implemented using RCpp and can be used used to lessen the time of computation. It is used by default. Next, We consider both examples where initial state is given and case where initial state is not given. In the first case, the function takes two inputs, first of them is an object of the S4 class 'ctmc' and second is the final time $t$. ``` {r} probabilityatT(ctmc,1) ``` Here we get an output in the form of a transition matrix. If we take the second case i.e. considering some initial input: ``` {r} probabilityatT(ctmc,1,1) ``` In this case we get the probabilities corresponding to every state. this also includes probability that the process hits the same state $a$ after time $t=1$. # Plotting generator matrix of continuous-time markovchains The package provides a `plot` function for plotting a generator matrix $Q$ in the form of a directed graph where every possible state is assigned a node. Edges connecting these nodes are weighted. Weight of the edge going from a state $i$ to state $j$ is equal to the value $Q_{ij}$. This gives a picture of the generator matrix. For example, we build a ctmc-class object to plot it. ``` {r} energyStates <- c("sigma", "sigma_star") byRow <- TRUE gen <- matrix(data = c(-3, 3, 1, -1), nrow = 2, byrow = byRow, dimnames = list(energyStates, energyStates)) molecularCTMC <- new("ctmc", states = energyStates, byrow = byRow, generator = gen, name = "Molecular Transition Model") ``` Now if we plot this function we get the following graph: ``` {r} plot(molecularCTMC) ``` The figure shown is built using the $igraph$ package. The package also provides options of plotting graph using $diagram$ and $DiagrameR$ package. Plot using these packages can be built using these commands: ``` {r} if(requireNamespace(package='ctmcd', quietly = TRUE)) { plot(molecularCTMC,package = "diagram") } else { print("diagram package unavailable") } ``` Similarly, one can easily replace $diagram$ package with $DiagrammeR$. # Imprecise Continuous-Time Markov chains Continuous-time Markov chains are mathematical models that are used to describe the state-evolution of dynamical systems under stochastic uncertainty. However, building models using continuous time markovchains take in consideration a number of assumptions which may not be realistic for the domain of application; in particular; the ability to provide exact numerical parameter assessments, and the applicability of time-homogeneity and the eponymous Markov property. Hence we take ICTMC into consideration. More technically, an ICTMC is a set of “precise” continuous-time finite-state stochastic processes, and rather than computing expected values of functions, we seek to compute lower expectations, which are tight lower bounds on the expectations that correspond to such a set of “precise” models. ## Types of ICTMCs For any non-empty bounded set of rate matrices $L$, and any non-empty set $M$ of probability mass functions on $X$, we define the following three sets of stochastic processes that are jointly consistent with $L$ and $M$: * $P^{W}_{L,M}$ is the consistent set of all well-behaved stochastic processes; * $P^{WM}_{L,M}$ is the consistent set of all well-behaved Markov chains; * $P^{WHM}_{L,M}$ is the consistent set of all well-behaved homogeneous Markov chains[@ictmcpaper]. From a practical point of view, after having specified a (precise) stochastic process, one is typically interested in the expected value of some function of interest, or the probability of some event. Similarly, in this work, our main objects of consideration will be the lower probabilities that correspond to the ICTMCs. ## Lower Transition Rate Operators for ICTMCs A map $Q_{l}$ from $L(X)$ to $L(X)$ is called a lower transition rate operator if, for all $f,g \in L(X)$, all $\lambda \in R_{\geq 0}$, all $\mu \in L(X)$, and all $x \in X$[@ictmcpaper]: 1. $[Q_{l}m](x) = 0$ 2. $[Q_{l}I](x) \geq 0 \forall y \in X$ such that $x \neq y$ 3. $[Q_{l}(f + g)](x)\geq [Q_{l}f](x) + [Q_{l}g](x)$ 4. $[Q_{l}(l f)](x) = \lambda Q_{l}f[(x)]$ ## Lower Transition Operators A map $T_{l}$ from $L (X )$ to $L (X )$ is called a lower transition operator if, for all $f,g \in L(X)$, all $\lambda \in R_{\geq 0}$, all $\mu \in L(X)$, and all $x \in X$[@ictmcpaper]: 1. $[T_{l} f](x) \geq min(f(y) : y \in L)$ 2. $[T_{l}(f +g)](x) \geq [T_{l} f](x)+[T_{l}g](x)$ 3. $[T_{l}(\lambda f)](x) = l [T_{l} f](x)$ ## ImpreciseprobabilityatT function Now I would like to come onto the practical purpose of using ICTMC classes. ICTMC classes in these package are defined to represent a generator that is defined in such a way that every row of the generator corresponding to every state in the process is governed by a separate variable. As defined earlier, an imprecise continuous time markovchain is a set of many precise CTMCs. Hence this representation of set of precise CTMCs can be used to calulate transition probability at some time in future. This can be seen as an analogy with `probabilityatT` function. It is used to calculate the transition function at some later time t using generatoe matrix. For every generator matrix, we have a corresponding transition function. Similarly, for every Lower Transition rate operator of an ICTMC, we have a corresponding lower transition operator denoted by $L_{t}^{s}$. Here $t$ is the initial time and $s$ is the final time. Now we mention a proposition[@ictmcpaper] which states that: Let $Q_{l}$ be a lower transition rate operator, choose any time $t$ and $s$ both greater than 0 such that $t \leq s$, and let $L_{t}^{s}$ be the lower transition operator corresponding to $Q_{l}$. Then for any $f \in L(X)$ and $\epsilon \in R_{>0}$, if we choose any $n \in N$ such that: \[n \geq max((s-t)*||Q||,\frac{1}{2\epsilon}(s-t)^{2}||Q||^{2}||f||_v)\] with $||f||_{v}$ := max $f$ - min $f$, we are guaranteed that[@ictmcpaper] \[ ||L_{t}^{s} - \prod_{i=1}^{n}(I + \Delta Q_{l}) || \leq \epsilon \] with $\Delta := \frac{s-t}{n}$ Simple put this equation tells us that, using $Q_{l}g$ for all $g \in L(X)$ then we can also approximate the quantity $L_{t}^{s}$ to arbitrary precision, for any given $f \in L(X)$. To explain this approximate calculation, I would take a detailed example of a process containing two states healthy and sick, hence $X = (healthy,sick)$. If we represent in form of an ICTMC, we get: \[ Q = \left(\begin{array}{cc} -a & a \\ b & -b \end{array}\right) \] for some $a,b \in R_{\geq 0}$. The parameter $a$ here is the rate at which a healthy person becomes sick. Technically, this means that if a person is healthy at time $t$, the probability that he or she will be sick at time $t +\Delta$, for small $\Delta$, is very close to $\Delta a$. More intuitively, if we take the time unit to be one week, it means that he or she will, on average, become sick after $\frac{1}{a}$ weeks. The parameter $b$ is the rate at which a sick person becomes healthy again, and has a similar interpretation. Now to completely represent the ICTMC we take an example and write the generator as: \[ Q = \left(\begin{array}{cc} -a & a \\ b & -b \end{array}\right) : a \in [\frac{1}{52},\frac{3}{52}],b \in [\frac{1}{2},2] \] Now suppose we know the initial state of the patient to be sick, hence this is represented in the form of a function by: \[ I_{s} = \left(\begin{matrix} 0 \\ 1 \end{matrix}\right) \] We observe that the $||I_{s}|| = 1$. Now to use the proposition mentioned above, we use the definition to calculate the lower transition operator $Q_{l}$ Next we calculate the norm of the lower transition rate operator and use it in the preposition. Also we take value of $\epsilon$ to be 0.001. Using the preposition we can come up to an algorithm for calculating the probability at any time $s$ given state at initial time $t$ and a ICTMC generator[@ictmcpaper]. The algorithm is as follows: **Input**: A lower transition rate operator $Q$, two time points $t,s$ such that $t \leq s$, a function $f \in L(X )$ and a maximum numerical error $\epsilon \in R_{>0}$. **Algorithm**: 1. $n = max((s-t)||Q||,\frac{1}{2\epsilon}(s-t)^{2}||Q||^{2}||f||_v)$ 2. $\Delta = \frac{s-t}{n}$ 3. $g_{0} = I_{s}$ 4. for $i \in (1,.....,n)$ do $g_{i} = g_{i-1} + \Delta Q_{l}g_{i-1}$ 5. end for 6. return $g_{n}$ **Output**: The conditional probability vector after time $t$ with error $\epsilon$. Hence, after applying the algorithm on above example we get the following result: $ g_{n} = 0.0083$ if final state is $healthy$ and $g_{n} = 0.141$ if the final state is $sick$. The probability calculated is with an error equal to $\epsilon$ i.e. $0.001$. Now we run the algorithm on the example through R code. ``` {r} states <- c("n","y") Q <- matrix(c(-1,1,1,-1),nrow = 2,byrow = TRUE,dimnames = list(states,states)) range <- matrix(c(1/52,3/52,1/2,2),nrow = 2,byrow = 2) name <- "testictmc" ictmc <- new("ictmc",states = states,Q = Q,range = range,name = name) impreciseProbabilityatT(ictmc,2,0,1,10^-3,TRUE) ``` The probabilities we get are with an error of $10^{-3}$ # Continuous time markovchain generator using frequency Matrix The package provides `freq2Generator` function. It takes in a matrix representing relative frequency values along with time taken to provide a continuous time markovchain generator matrix. Here, frequency matrix is a 2-D matrix of dimensions equal to relative number of possible states describing the number of transitions from a state $i$ to $j$ in time $t$, which is another parameter to be provided to the function. The function also allows to chose among three methods for calculation of the generator matrix [@freqArticle]. It requires the [@pkg:ctmcd] package. Three methods are as follows: 1. Quasi Optimization - "QO" 2. Diagonal Adjustment - "DA" 3. Weighted Adjustment - "WA" See reference for details about the methods. Here is an example matrix on which `freq2Generator` function is run: ``` {r} if(requireNamespace(package='ctmcd', quietly = TRUE)) { sample <- matrix(c(150,2,1,1,1,200,2,1,2,1,175,1,1,1,1,150),nrow = 4,byrow = TRUE) sample_rel = rbind((sample/rowSums(sample))[1:dim(sample)[1]-1,],c(rep(0,dim(sample)[1]-1),1)) freq2Generator(sample_rel,1) } else { print('ctmcd unavailable') } ``` # Committor of a markovchain Consider set of states A,B comprising of states from a markovchain with transition matrix P. The committor vector of a markovchain with respect to sets A and B gives the probability that the process will hit a state from set A before any state from set B. Committor vector u can be calculated by solving the following system of linear equations[@committorlink]: $$ \begin{array}{l} Lu(x) = 0, x \notin A \cup B \\ u(x) = 1, x \in A \\ u(x) = 0, x \in B \end{array} $$ where $L = P -I$. Now we apply the method to an example: ``` {r eval=FALSE} transMatr <- matrix(c(0,0,0,1,0.5,0.5,0,0,0,0,0.5,0,0,0,0,0,0.2,0.4,0,0,0,0.8,0.6,0,0.5),nrow = 5) object <- new("markovchain", states=c("a","b","c","d","e"),transitionMatrix=transMatr, name="simpleMc") committorAB(object,c(5),c(3)) ``` Here we get probability that the process will hit state "e" before state "c" given different initial states. # First Passage probability for set of states Currently computation of the first passage time for individual states has been implemented in the package. `firstPassageMultiple` function provides a method to get first passage probability for given provided set of states. Consider this example markovchain object: ``` {r} statesNames <- c("a", "b", "c") testmarkov <- new("markovchain", states = statesNames, transitionMatrix = matrix(c(0.2, 0.5, 0.3, 0.5, 0.1, 0.4, 0.1, 0.8, 0.1), nrow = 3, byrow = TRUE, dimnames = list(statesNames, statesNames) )) ``` Now we apply `firstPassageMultiple` function to calculate first passage probabilities for set of states $"b", "c"$ when initial state is $"a"$. ``` {r} firstPassageMultiple(testmarkov,"a",c("b","c"),4) ``` This shows us the probability that the process will hit any of the state from the set after n number of steps for instance, as shown, the probability of the process to hit any of the states among $"b", "c"$ after $2$ steps is $0.6000$. # Joint PDF of number of visits to the various states of a markovchain The package provides a function `noofVisitsDist` that returns the PDF of the number of visits to the various states of the discrete time markovchain during the first N steps, given initial state of the process. We will take an example to see how to use the function on a `markovchain-class` object: ``` {r} transMatr<-matrix(c(0.4,0.6,.3,.7),nrow=2,byrow=TRUE) simpleMc<-new("markovchain", states=c("a","b"), transitionMatrix=transMatr, name="simpleMc") noofVisitsDist(simpleMc,5,"a") ``` The output clearly shows the probabilities related to various states of the process. # Expected Rewards for a markovchain The package provides a function `expectedRewards` that returns a vector of expected rewards for different initial states. The user provides reward values, a vector $r$ of size equal to number of states having a value corresponding to every state. Given a transition matrix $[P]$, we get the vector of expected rewards $v$ after $n$ transitions according to the equation as follows[@GallagerBook]: $v[n] = r + [P]*v[n-1]$ Applying this equation on a markovchain-class object ``` {r} transMatr<-matrix(c(0.99,0.01,0.01,0.99),nrow=2,byrow=TRUE) simpleMc<-new("markovchain", states=c("a","b"), transitionMatrix=transMatr) expectedRewards(simpleMc,1,c(0,1)) ``` ## Expected Rewards for a set of states in a markovchain process The package provides a function `expectedRewardsBeforeHittingA` that returns the value of expected first passage rewards $E$ given rewards corresponding to every state, an initial state. This means the function returns expected reward for given initial state $s_{0}$, number of transitions $n$ and for a set of states $A$ with a constraint such that the process does not hit any of the states that belong to state $A$. $S$ is the set of all possible states. The function uses an equation which is as follows: $$E = \sum_{i=1}^{n}{1_{s_{0}}P_{S-A}^{i}R_{S-A}}$$ here $1_{s_{0}} = [0,0,...0,1,0,...,0,0,0]$, 1 being on $s_{0}$ position and $R_{S-A}$ being the rewards vector for $S-A$ state. # Checking Irreducibly of a CTMC The package provides a function `is.CTMCirreducible` that returns a Boolean value stating whether the ctmc object is irreducible. We know that a continuous time markovchain is irreducible if and only if its embedded chain is irreducible[@Sigman]. We demonstrate an example running the function: ``` {r} energyStates <- c("sigma", "sigma_star") byRow <- TRUE gen <- matrix(data = c(-3, 3, 1, -1), nrow = 2, byrow = byRow, dimnames = list(energyStates, energyStates)) molecularCTMC <- new("ctmc", states = energyStates, byrow = byRow, generator = gen, name = "Molecular Transition Model") is.CTMCirreducible(molecularCTMC) ``` # Simulation of Higher Order Multivariate Markovchains The package provides `predictHommc` function. This function provides a simulation system for higher order multivariate markovchains. The function assumes that the state probability distribution of the jth sequence at time $r+1$ depends on the state probability distribution of all the sequences at n previous mon=ments of time i.e. $t = r$ to $t = r-n+1$ . Hence the proposed model takes the form mathematically as:[@ching2008higher] $$ X_{r+1}^{j} = \sum_{k=1}^{s}\sum_{h=1}^n{\lambda_{jk}^{(h)}P_{h}^{(jk)}X_{r-h+1}^{(k)}}, \ \ \ j = 1,2,....s, \ \ r = n-1,n,... $$ with initals $X_{0}^{(k)},X_{1}^{(k)},......,X_{n-1}^{(k)} \ (k = 1,2,...s)$. Here, $\lambda_{jk}^{(k)}, \ 1 \leq j,k \leq s, \ 1 \leq h \leq n \ \ \ and \ \ \ \sum_{k=1}^{s}\sum_{h=1}^{n}{\lambda_{jk}^{(h)} = 1}, \ \ \ j = 1,2,....s.$ Now we run an example on sample hommc object for simulating next 3 steps using `predictHommc` function. The function provides a choice of entering initial states according to the hommc object. In case the user does not enter initial states, the function takes all initial states to be the first state from the set of states. ``` {r} if (requireNamespace("Rsolnp", quietly = TRUE)) { statesName <- c("a", "b") P <- array(0, dim = c(2, 2, 4), dimnames = list(statesName, statesName)) P[,,1] <- matrix(c(0, 1, 1/3, 2/3), byrow = FALSE, nrow = 2) P[,,2] <- matrix(c(1/4, 3/4, 0, 1), byrow = FALSE, nrow = 2) P[,,3] <- matrix(c(1, 0, 1/3, 2/3), byrow = FALSE, nrow = 2) P[,,4] <- matrix(c(3/4, 1/4, 0, 1), byrow = FALSE, nrow = 2) Lambda <- c(0.8, 0.2, 0.3, 0.7) ob <- new("hommc", order = 1, states = statesName, P = P, Lambda = Lambda, byrow = FALSE, name = "FOMMC") predictHommc(ob,3) } else { print("Rsolnp unavailable") } ``` # Check Time Reversibility of Continuous-time markovchains A Continuous-time markovchain with generator $Q$ and stationary distribution $\pi$ is said to be time reversible if:[@dobrow2016introduction] $$ \pi_{i}q_{ij} = \pi_{j}q_{ji} $$ Intuitively, a continuous-time Markov chain is time reversible if the process in forward time is indistinguishable from the process in reversed time. A consequence is that for all states i and j, the long-term forward transition rate from i to j is equal to the long-term backward rate from j to i. The package provides `is.TimeReversible` function to check if a `ctmc` object is time-reversible. We follow with an example run on a `ctmc` object. ``` {r} energyStates <- c("sigma", "sigma_star") byRow <- TRUE gen <- matrix(data = c(-3, 3, 1, -1), nrow = 2, byrow = byRow, dimnames = list(energyStates, energyStates)) molecularCTMC <- new("ctmc", states = energyStates, byrow = byRow, generator = gen, name = "Molecular Transition Model") is.TimeReversible(molecularCTMC) ``` # References markovchain/R/0000755000176200001440000000000014503772362012766 5ustar liggesusersmarkovchain/R/markovchain.R0000644000176200001440000000371414472736700015421 0ustar liggesusers#' @docType package #' @name markovchain-package #' @rdname markovchain #' @title Easy Handling Discrete Time Markov Chains #' #' @description The package contains classes and method to create and manage #' (plot, print, export for example) discrete time Markov chains (DTMC). In #' addition it provide functions to perform statistical (fitting and drawing #' random variates) and probabilistic (analysis of DTMC proprieties) analysis #' #' @author #' Giorgio Alfredo Spedicato #' Maintainer: Giorgio Alfredo Spedicato #' @references Discrete-Time Markov Models, Bremaud, Springer 1999 #' @keywords package #' #' @examples #' # create some markov chains #' statesNames=c("a","b") #' mcA<-new("markovchain", transitionMatrix=matrix(c(0.7,0.3,0.1,0.9),byrow=TRUE, #' nrow=2, dimnames=list(statesNames,statesNames))) #' #' statesNames=c("a","b","c") #' mcB<-new("markovchain", states=statesNames, transitionMatrix= #' matrix(c(0.2,0.5,0.3,0,1,0,0.1,0.8,0.1), nrow=3, #' byrow=TRUE, dimnames=list(statesNames, statesNames))) #' #' statesNames=c("a","b","c","d") #' matrice<-matrix(c(0.25,0.75,0,0,0.4,0.6,0,0,0,0,0.1,0.9,0,0,0.7,0.3), nrow=4, byrow=TRUE) #' mcC<-new("markovchain", states=statesNames, transitionMatrix=matrice) #' mcD<-new("markovchain", transitionMatrix=matrix(c(0,1,0,1), nrow=2,byrow=TRUE)) #' #' #' #operations with S4 methods #' mcA^2 #' steadyStates(mcB) #' absorbingStates(mcB) #' markovchainSequence(n=20, markovchain=mcC, include=TRUE) NULL #' @useDynLib markovchain, .registration = TRUE #' @import igraph #' @import Matrix #' @import methods #' @import parallel #' @importFrom utils packageDescription #' @importFrom Rcpp evalCpp #' @importFrom RcppParallel RcppParallelLibs #' @importFrom stats4 plot summary #' @importFrom expm %^% logm #' @importFrom stats sd rexp chisq.test pchisq predict aggregate #' @importFrom grDevices colors NULLmarkovchain/R/ctmcProbabilistic.R0000644000176200001440000005132314427513574016555 0ustar liggesusers#' @title rctmc #' #' @description The function generates random CTMC transitions as per the #' provided generator matrix. #' @usage rctmc(n, ctmc, initDist = numeric(), T = 0, include.T0 = TRUE, #' out.type = "list") #' #' @param n The number of samples to generate. #' @param ctmc The CTMC S4 object. #' @param initDist The initial distribution of states. #' @param T The time up to which the simulation runs (all transitions after time #' T are not returned). #' @param include.T0 Flag to determine if start state is to be included. #' @param out.type "list" or "df" #' #' @details In order to use the T0 argument, set n to Inf. #' @return Based on out.type, a list or a data frame is returned. The returned #' list has two elements - a character vector (states) and a numeric vector #' (indicating time of transitions). The data frame is similarly structured. #' @references #' Introduction to Stochastic Processes with Applications in the Biosciences #' (2013), David F. Anderson, University of Wisconsin at Madison #' #' @author Sai Bhargav Yalamanchi #' @seealso \code{\link{generatorToTransitionMatrix}},\code{\link{ctmc-class}} #' @examples #' energyStates <- c("sigma", "sigma_star") #' byRow <- TRUE #' gen <- matrix(data = c(-3, 3, 1, -1), nrow = 2, #' byrow = byRow, dimnames = list(energyStates, energyStates)) #' molecularCTMC <- new("ctmc", states = energyStates, #' byrow = byRow, generator = gen, #' name = "Molecular Transition Model") #' #' statesDist <- c(0.8, 0.2) #' rctmc(n = Inf, ctmc = molecularCTMC, T = 1) #' rctmc(n = 5, ctmc = molecularCTMC, initDist = statesDist, include.T0 = FALSE) #' @export rctmc <- function(n, ctmc, initDist = numeric(), T = 0, include.T0 = TRUE, out.type = "list") { if (identical(initDist, numeric())) state <- sample(ctmc@states, 1) # sample state randomly else if (length(initDist) != dim(ctmc) | round(sum(initDist), 5) != 1) stop("Error! Provide a valid initial state probability distribution") else state <- sample(ctmc@states, 1, prob = initDist) # if valid probability distribution, # sample accordingly # obtain transition probability matrix from the generator matrix trans <- generatorToTransitionMatrix(ctmc@generator) states <- c() time <- c() if (include.T0 == TRUE){ states <- c(states, state) time <- c(time, 0) } t <- 0 i <- 1 while (i <= n){ idx <- which(ctmc@states == state) t <- t + rexp(1, -ctmc@generator[idx, idx]) state <- ctmc@states[sample(1:dim(ctmc), 1, prob = trans[idx, ])] if(T > 0 & t > T) break states <- c(states, state) time <- c(time, t) i <- i + 1 } out <- list(states, time) if (out.type == "list") return(out) else if(out.type == "df"){ df <- data.frame(matrix(unlist(out), nrow = length(states))) names(df) <- c("states", "time") return(df) } else stop("Not a valid output type") } #' @title Return the generator matrix for a corresponding transition matrix #' #' @description Calculate the generator matrix for a #' corresponding transition matrix #' #' @param P transition matrix between time 0 and t #' @param t time of observation #' @param method "logarithm" returns the Matrix logarithm of the transition matrix #' #' @return A matrix that represent the generator of P #' @export #' #' @examples #' mymatr <- matrix(c(.4, .6, .1, .9), nrow = 2, byrow = TRUE) #' Q <- transition2Generator(P = mymatr) #' expm::expm(Q) #' #' @seealso \code{\link{rctmc}} transition2Generator<-function(P, t = 1,method = "logarithm") { if (method == "logarithm") { Q = logm(P)/t } #else return(Q) } #' Returns a generator matrix corresponding to frequency matrix #' #' @description The function provides interface to calculate generator matrix corresponding to #' a frequency matrix and time taken #' #' @param P relative frequency matrix #' @param t (default value = 1) #' @param method one among "QO"(Quasi optimaisation), "WA"(weighted adjustment), "DA"(diagonal adjustment) #' @param logmethod method for computation of matrx algorithm (by default : Eigen) #' #' @return returns a generator matix with same dimnames #' #' @references E. Kreinin and M. Sidelnikova: Regularization Algorithms for #' Transition Matrices. Algo Research Quarterly 4(1):23-40, 2001 #' #' @examples #' sample <- matrix(c(150,2,1,1,1,200,2,1,2,1,175,1,1,1,1,150),nrow = 4,byrow = TRUE) #' sample_rel = rbind((sample/rowSums(sample))[1:dim(sample)[1]-1,],c(rep(0,dim(sample)[1]-1),1)) #' freq2Generator(sample_rel,1) #' #' data(tm_abs) #' tm_rel=rbind((tm_abs/rowSums(tm_abs))[1:7,],c(rep(0,7),1)) #' ## Derive quasi optimization generator matrix estimate #' freq2Generator(tm_rel,1) #' #' @export #' freq2Generator <- function(P,t = 1,method = "QO",logmethod = "Eigen"){ if(requireNamespace('ctmcd', quietly = TRUE)) { if(method == "QO"){ out <- ctmcd::gmQO(P, t, logmethod) } else if(method == "WA") { out <- ctmcd::gmWA(P, t, logmethod) } else if(method == "DA") { out <- ctmcd::gmDA(P, t, logmethod) } } else { warning("package ctmcd is not installed") out = NULL } return(out) } #' @title Returns expected hitting time from state i to state j #' #' @description Returns expected hitting time from state i to state j #' #' @usage ExpectedTime(C,i,j,useRCpp) #' #' @param C A CTMC S4 object #' @param i Initial state i #' @param j Final state j #' @param useRCpp logical whether to use Rcpp #' #' @details According to the theorem, holding times for all states except j should be greater than 0. #' #' @return A numerical value that returns expected hitting times from i to j #' #' @references Markovchains, J. R. Norris, Cambridge University Press #' #' @author Vandit Jain #' #' @examples #' states <- c("a","b","c","d") #' byRow <- TRUE #' gen <- matrix(data = c(-1, 1/2, 1/2, 0, 1/4, -1/2, 0, 1/4, 1/6, 0, -1/3, 1/6, 0, 0, 0, 0), #' nrow = 4,byrow = byRow, dimnames = list(states,states)) #' ctmc <- new("ctmc",states = states, byrow = byRow, generator = gen, name = "testctmc") #' ExpectedTime(ctmc,1,4,TRUE) #' #' @export ExpectedTime <- function(C,i,j,useRCpp = TRUE){ # take generator from ctmc-class object Q <- C@generator # in case where generator is written column wise if(C@byrow==FALSE){ Q <- t(Q) } NoofStates <- dim(C) Exceptj <- c(1:NoofStates) # create vector with all values from 1:NoofStates except j Exceptj <- which(Exceptj!=j) # build matrix with vlaues from Q such that row!=j or column!=j Q_Exceptj <- Q[Exceptj,Exceptj] # check for positivity of holding times except for state j if(!all(diag(Q_Exceptj)!=0)){ stop("Holding times for all states except j should be greater than 0") } # get b for solving the system of linear equation Ax = b where A is Q_Exceptj b <- rep(-1,dim(Q_Exceptj)[1]) # use solve function from base packge to solve Ax = b if(useRCpp == TRUE){ out <- .ExpectedTimeRCpp(Q_Exceptj,b) } else { out <- solve(Q_Exceptj,b) } # out will be of size NoofStates-1, hence the adjustment for different cases of i>= NoofStates || x0 < 1){ stop("Initial state provided is not correct") } return(P[x0,]) } } #' Calculating full conditional probability using lower rate transition matrix #' #' This function calculates full conditional probability at given #' time s using lower rate transition matrix #' #' @usage impreciseProbabilityatT(C,i,t,s,error,useRCpp) #' #' @param C a ictmc class object #' @param i initial state at time t #' @param t initial time t. Default value = 0 #' @param s final time #' @param error error rate. Default value = 0.001 #' @param useRCpp logical whether to use RCpp implementation; by default TRUE #' #' @references Imprecise Continuous-Time Markov Chains, Thomas Krak et al., 2016 #' #' @author Vandit Jain #' #' @examples #' states <- c("n","y") #' Q <- matrix(c(-1,1,1,-1),nrow = 2,byrow = TRUE,dimnames = list(states,states)) #' range <- matrix(c(1/52,3/52,1/2,2),nrow = 2,byrow = 2) #' name <- "testictmc" #' ictmc <- new("ictmc",states = states,Q = Q,range = range,name = name) #' impreciseProbabilityatT(ictmc,2,0,1,10^-3,TRUE) #' @export impreciseProbabilityatT <- function(C, i, t=0, s, error = 10^-3, useRCpp = TRUE){ ## input validity checking if(s <= t){ stop("Please provide time points such that initial time is greater than or equal to end point") } if(!is(C,'ictmc')){ stop("Please provide a valid ictmc-class object") } noOfstates <-length(C@states) if(i <= 0 || i > noOfstates){ stop("Please provide a valid initial state") } ### validity checking ends if(useRCpp == TRUE) { Qgx <- .impreciseProbabilityatTRCpp(C,i,t,s,error) } else { ## extract values from ictmc object Q <- C@Q range <- C@range ### calculate ||QI_i|| #initialise Q norm value QNorm <- -1 for(i in 1:noOfstates){ sum <- 0 for(j in 1:noOfstates){ sum <- sum + abs(Q[i,j]) } QNorm <- max(sum*range[i,2],QNorm) } ### calculate no of iterations # The 1 is for norm of I_s i.e. ||I_s|| which equals 1 n <- max((s-t)*QNorm, (s-t)*(s-t)*QNorm*QNorm*1/(2*error)) ### calculate delta delta <- (s-t)/n ### build I_i vector Ii <- rep(0, noOfstates) Ii[i] <- 1 ### calculate value of lower operator _QI_i(x) for all x belongs to no ofStates values <- Q%*%Ii Qgx <- rep(0, noOfstates) for(i in 1:noOfstates){ Qgx[i] <- min(values[i]*range[i,1], values[i]*range[i,2]) } Qgx <- delta*Qgx Qgx <- Ii + Qgx for(iter in 1:n-1){ temp <- Qgx values <- Q%*%Qgx for(i in 1:noOfstates){ Qgx[i] <- min(values[i]*range[i,1], values[i]*range[i,2]) } Qgx <- delta*Qgx Qgx <- temp + Qgx } } return(Qgx) } #' Check if CTMC is irreducible #' #' @description #' This function verifies whether a CTMC object is irreducible #' #' @usage is.CTMCirreducible(ctmc) #' #' @param ctmc a ctmc-class object #' #' @references #' Continuous-Time Markov Chains, Karl Sigman, Columbia University #' #' @author Vandit Jain #' #' @return a boolean value as described above. #' #' @examples #' energyStates <- c("sigma", "sigma_star") #' byRow <- TRUE #' gen <- matrix(data = c(-3, 3, #' 1, -1), nrow = 2, #' byrow = byRow, dimnames = list(energyStates, energyStates)) #' molecularCTMC <- new("ctmc", states = energyStates, #' byrow = byRow, generator = gen, #' name = "Molecular Transition Model") #' is.CTMCirreducible(molecularCTMC) #' #' @export is.CTMCirreducible <- function(ctmc) { if(!is(ctmc, 'ctmc') ) { stop("please provide a valid ctmc class object") } ## gets the embeded chain matrix embeddedChainMatrix <- generatorToTransitionMatrix(ctmc@generator) ## forms a markovchain object related to embedded transition matrix markovchainObject <- new("markovchain",states = ctmc@states, transitionMatrix = embeddedChainMatrix) ## returns result using is.irreducible function on embedded chain transition matrix return(is.irreducible(markovchainObject)) } #' checks if ctmc object is time reversible #' #' @description #' The function returns checks if provided function is time reversible #' #' @usage is.TimeReversible(ctmc) #' #' @param ctmc a ctmc-class object #' #' @return Returns a boolean value stating whether ctmc object is time reversible #' #' @author Vandit Jain #' #' @return a boolean value as described above #' #' @references #' INTRODUCTION TO STOCHASTIC PROCESSES WITH R, ROBERT P. DOBROW, Wiley #' #' @examples #' energyStates <- c("sigma", "sigma_star") #' byRow <- TRUE #' gen <- matrix(data = c(-3, 3, #' 1, -1), nrow = 2, #' byrow = byRow, dimnames = list(energyStates, energyStates)) #' molecularCTMC <- new("ctmc", states = energyStates, #' byrow = byRow, generator = gen, #' name = "Molecular Transition Model") #' is.TimeReversible(molecularCTMC) #' #' @export is.TimeReversible <- function(ctmc) { if(!is(ctmc,"ctmc")) { stop("please provide a valid ctmc-class object") } ## get steady state probabilities Pi <- steadyStates(ctmc) ## initialise boolean result check <- TRUE ## no of states m <- length(ctmc@states) ## checks for byrow if(ctmc@byrow == FALSE) gen <- t(ctmc@generator) else gen <- ctmc@generator ## iterates for every state for( i in 1:m) { for(j in 1:m) { if(Pi[i]*gen[i,j] != Pi[j]*gen[j,i]) { check <- FALSE break } } } return(check) } # `generator/nextki` <- function(k) { # if(k >= 0) return(-1-k) # return(-k) # } # # `generator/nextk` <- function(k, Kmin, Kmax) { # if(is.null(k)) { # k <- rep(0, length(Kmin)) # return(list(ans = TRUE, k = k)) # } # # if(length(Kmin) == 0) { # return(list(ans = FALSE, k = k)) # } # # i <- 1 # kl <- k # kl[i] <- `generator/nextki`(kl[i]) # while (kl[i] > Kmax[i] || kl[i] < Kmin[i]) { # kl[i] <- 0 # i <- i+1 # if(i > length(kl)) { # k <- kl # return(list(ans = FALSE, k = k)) # } # kl[i] <- `generator/nextki`(kl[i]) # } # k <- kl # return(list(ans = TRUE, k = k)) # } # # `generator/generator` <- function(Po, Di, odi) { # P <- Po # N <- nrow(P) # # if(Di > 22) return(NULL) # bad idea # options(digits = Di) # odigs <- odi # # rSum <- rowSums(P) # if(! all(abs(1-rSum) < 0.001)) { # stop("Sum of each rows of Po should be equal to 1") # } # # P <- P/rSum # d <- det(P) # # if(d <= 0) { # cat("Matrix has non-positive determinant") # return(NULL) # } # # diagP <- 1 # for(i in 1:nrow(P)) diagP <- diagP * P[i, i] # # if(d >= diagP) { # cat("Determinant exceeds product of diagonal elements\n") # return(NULL) # } # # E <- eigen(P)[[1]] # B <- eigen(P)[[2]] # # print("Eigenvalues") # print(E) # # # risky # if(length(unique(E)) != length(E)) { # warning("Matrix does not have distinct eigenvalues") # } # # L <- abs(log(d)) # addigs <- 2 + round(log10(1/Matrix::rcond(B))) + round(L/log(10)) # problem # # if(options()$digits < odigs + addigs) { # if(odigs + addigs > 100) { # print("Eigenvector matrix is singular") # return(NULL) # } # # cat('Going to', odigs + addigs, "digits") # return(`generator/generator`(Po, odigs + addigs, odigs)) # } # # Bi <- solve(B) # # posevs <- NULL # negevs <- NULL # bestj <- NULL # bestQ <- NULL # marks <- rep(TRUE, length(E)) # # for(i in 1:length(E)) { # if(marks[i] && !(Re(E[i]) > 0 && Im(E[i]) == 0)) { # invalid comparison of complex number # cj <- Conj(E[i]) # best <- Inf # if(i+1 <= length(E)) { # for(j in (i+1):length(E)) { # if(marks[j]) { # score <- abs(cj-E[j]) # if(score < best) { # best <- score # bestj <- j # } # } # } # } # # if(best > 10^(3-options()$digits)) { # cat("Unpaired non-positive eigenvalue", E[i]) # return(NULL) # } # marks[bestj] <- FALSE # if(Im(E[i]) >= 0) { # posevs <- c(posevs, i) # negevs <- c(negevs, bestj) # if(Im(E[bestj]) == 0) { # E[bestj] <- complex(real = E[bestj], imaginary = 0) # } # } else { # posevs <- c(posevs, bestj) # negevs <- c(negevs, i) # if(Im(E[i]) == 0) { # E[i] <- complex(real = E[i], imaginary = 0) # } # } # } # } # # npairs <- length(posevs) # # display conjugate pairs # # Kmax <- rep(0, npairs) # Kmin <- Kmax # # for(i in 1:npairs) { # a <- Arg(E[posevs[i]]) # Kmax[i] <- trunc((L-a)/2*pi) # Kmin[i] <- trunc((-L-a)/2*pi) # } # # # display K-max # # display K-min # # best <- -0.001 # DD <- diag(log(E)) # DK <- matlab::zeros(N) # res <- list(); p <- 1 # k <- NULL # while(TRUE) { # # dlist <- `generator/nextk`(k, Kmin, Kmax) # k <- dlist$k # # if(dlist$ans == FALSE) {break} # # # display value of k # for(i in 1:npairs) { # ke <- complex(real = 0, imaginary = 2*pi*k[i]) # DK[posevs[i], posevs[i]] <- ke # DK[negevs[i], negevs[i]] <- -ke # } # # Q <- B %*% (DD + DK) %*% Bi # # Q <- fnormal(Re(Q), options()$digits, 5*(10^(-1-odigs))) # define fnormal of maple # qmin <- Q[1,2] # for(i in 1:N) { # for(j in 1:N) { # if(i != j) { # if(Q[i, j] < qmin) qmin <- Q[i, j] # } # } # } # # if(EnvAllGenerators == TRUE) { # if(qmin > -.001) { # cat("Possible generator with qmin =", qmin) # res[[p]] <- round(Q, odigs) # p <- p + 1 # } else { # cat("qmin =", qmin) # } # # } else { # if(qmin >= 0) { # cat("Found a generator") # return(round(Q, odigs)) # } else { # if(qmin > best) { # best <- qmin # bestQ <- Q # } # if(qmin > -.001) { # cat("Approximate generator with qmin = ", qmin) # } else { # cat("qmin =", qmin) # } # } # } # } # # if(EnvAllGenerators == TRUE) { # return(res) # } # # warning("No completely valid generator found") # # if(! is.null(bestQ)) { # return(round(bestQ, odigs)) # } else return(NULL) # # } # # generator <- function(Po, digits = 10) { # odigs <- digits # options(digits = 15) # if(is.matrix(Po)) { # P <- Po # } else { # stop("Po must be matrix") # } # # if(nrow(P) != ncol(P)) { # print(P) # stop('Po must be square matrix') # } # # if(! all(P >= 0)) { # print(P) # stop('Po must be non negative square matrix') # } # # `generator/generator`(P, options()$digits, odigs) # } markovchain/R/sysdata.rda0000644000176200001440000000353314503773064015132 0ustar liggesusers[ PGݛKÑb Qr9IdXVwG;&!JA4((HDP& (*IXN0̲Xٮm}=z ӂݧd2LJ*RP2̍J}tI.93%3tIa"LR5ҏgҫz4f#qZËJV*B` geLHf0$a4h1DK֓(ޥoC啈WANeB|ز:|pՇhE.c-7ޗ;4hST*#=&SqT X*&D!iRTZRͰH=@KZR03=E[C6X]0]2o;*r+[%íj툰C쾊{P9e6R%q~3/ޅjv-܍O*?j_pTbW* KkW&BDn.nǏYbܕKA , ;MEuqPzgɲl80@ڭ?s?<2Lp3 N\ so\}n#á6ePA F vg|MWCa'cFh >9dM1#<’V\Kl_7h%N!Pu4{0.0j?J2:=8mZc6|p 0廾44dF"gDzO g%0m=P3r{08nȏnTLLa;[ -i<|=Y.l+ rZ 1N\ ?Gm$߇?4{bǹi^t>o b"y sALg2nSqOM01f.A/#{JvqؽwPS_jn'sF=ǭRuI«fU6HMK328O5>8;:JQ ^(+4sy|.s}Z-!ۈY% ]/v~Y'׊?(9>}y^ӃX{I̠^&фhMq3V?Ř" 4#a )aҬN1&x d4[T%s3markovchain/R/probabilistic.R0000644000176200001440000007064614243470036015746 0ustar liggesusers# given a markovchain object is it possible to reach goal state from # a given state #' @name is.accessible #' @title Verify if a state j is reachable from state i. #' @description This function verifies if a state is reachable from another, i.e., #' if there exists a path that leads to state j leaving from state i with #' positive probability #' #' @param object A \code{markovchain} object. #' @param from The name of state "i" (beginning state). #' @param to The name of state "j" (ending state). #' #' @details It wraps an internal function named \code{reachabilityMatrix}. #' @return A boolean value. #' #' @references James Montgomery, University of Madison #' #' @author Giorgio Spedicato, Ignacio Cordón #' @seealso \code{is.irreducible} #' #' @examples #' statesNames <- c("a", "b", "c") #' markovB <- new("markovchain", states = statesNames, #' transitionMatrix = matrix(c(0.2, 0.5, 0.3, #' 0, 1, 0, #' 0.1, 0.8, 0.1), nrow = 3, byrow = TRUE, #' dimnames = list(statesNames, statesNames) #' ) #' ) #' is.accessible(markovB, "a", "c") #' #' @exportMethod is.accessible setGeneric("is.accessible", function(object, from, to) standardGeneric("is.accessible")) setMethod("is.accessible", c("markovchain", "character", "character"), function(object, from, to) { # O(n²) procedure to see if to state is reachable starting at from state return(.isAccessibleRcpp(object, from, to)) } ) setMethod("is.accessible", c("markovchain", "missing", "missing"), function(object, from, to) { .reachabilityMatrixRcpp(object) } ) # a markov chain is irreducible if it is composed of only one communicating class #' @name is.irreducible #' @title Function to check if a Markov chain is irreducible (i.e. ergodic) #' @description This function verifies whether a \code{markovchain} object transition matrix #' is composed by only one communicating class. #' @param object A \code{markovchain} object #' #' @details It is based on \code{.communicatingClasses} internal function. #' @return A boolean values. #' #' @references Feres, Matlab listings for Markov Chains. #' @author Giorgio Spedicato #' #' @seealso \code{\link{summary}} #' #' @examples #' statesNames <- c("a", "b") #' mcA <- new("markovchain", transitionMatrix = matrix(c(0.7,0.3,0.1,0.9), #' byrow = TRUE, nrow = 2, #' dimnames = list(statesNames, statesNames) #' )) #' is.irreducible(mcA) #' #' @exportMethod is.irreducible setGeneric("is.irreducible", function(object) standardGeneric("is.irreducible")) setMethod("is.irreducible", "markovchain", function(object) { .isIrreducibleRcpp(object) }) # what this function will do? # It calculates the probability to go from given state # to all other states in k steps # k varies from 1 to n #' @name firstPassage #' @title First passage across states #' @description This function compute the first passage probability in states #' #' @param object A \code{markovchain} object #' @param state Initial state #' @param n Number of rows on which compute the distribution #' #' @details Based on Feres' Matlab listings #' @return A matrix of size 1:n x number of states showing the probability of the #' first time of passage in states to be exactly the number in the row. #' #' @references Renaldo Feres, Notes for Math 450 Matlab listings for Markov chains #' #' @author Giorgio Spedicato #' @seealso \code{\link{conditionalDistribution}} #' #' @examples #' simpleMc <- new("markovchain", states = c("a", "b"), #' transitionMatrix = matrix(c(0.4, 0.6, .3, .7), #' nrow = 2, byrow = TRUE)) #' firstPassage(simpleMc, "b", 20) #' #' @export firstPassage <- function(object, state, n) { P <- object@transitionMatrix stateNames <- states(object) # row number i <- which(stateNames == state) outMatr <- .firstpassageKernelRcpp(P = P, i = i, n = n) colnames(outMatr) <- stateNames rownames(outMatr) <- 1:n return(outMatr) } #' function to calculate first passage probabilities #' #' @description The function calculates first passage probability for a subset of #' states given an initial state. #' #' @param object a markovchain-class object #' @param state intital state of the process (charactervector) #' @param set set of states A, first passage of which is to be calculated #' @param n Number of rows on which compute the distribution #' #' @return A vector of size n showing the first time proabilities #' @references #' Renaldo Feres, Notes for Math 450 Matlab listings for Markov chains; #' MIT OCW, course - 6.262, Discrete Stochastic Processes, course-notes, chap -05 #' #' @author Vandit Jain #' #' @seealso \code{\link{firstPassage}} #' @examples #' statesNames <- c("a", "b", "c") #' markovB <- new("markovchain", states = statesNames, transitionMatrix = #' matrix(c(0.2, 0.5, 0.3, #' 0, 1, 0, #' 0.1, 0.8, 0.1), nrow = 3, byrow = TRUE, #' dimnames = list(statesNames, statesNames) #' )) #' firstPassageMultiple(markovB,"a",c("b","c"),4) #' #' @export firstPassageMultiple <- function(object,state,set, n){ # gets the transition matrix P <- object@transitionMatrix # character vector of states of the markovchain stateNames <- states(object) k <- -1 k <- which(stateNames == state) if(k==-1) stop("please provide a valid initial state") # gets the set in numeric vector setno <- rep(0,length(set)) for(i in 1:length(set)) { setno[i] = which(set[i] == stateNames) if(setno[i] == 0) stop("please provide proper set of states") } # calls Rcpp implementation outMatr <- .firstPassageMultipleRCpp(P,k,setno,n) #sets column and row names of output colnames(outMatr) <- "set" rownames(outMatr) <- 1:n return(outMatr) } #' @name communicatingClasses #' @rdname structuralAnalysis #' @aliases transientStates recurrentStates absorbingStates communicatingClasses #' transientClasses recurrentClasses #' @title Various function to perform structural analysis of DTMC #' @description These functions return absorbing and transient states of the \code{markovchain} objects. #' #' @param object A \code{markovchain} object. #' #' @return #' \describe{ #' \item{\code{period}}{returns a integer number corresponding to the periodicity of the Markov #' chain (if it is irreducible)} #' \item{\code{absorbingStates}}{returns a character vector with the names of the absorbing #' states in the Markov chain} #' \item{\code{communicatingClasses}}{returns a list in which each slot contains the names of #' the states that are in that communicating class} #' \item{\code{recurrentClasses}}{analogously to \code{communicatingClasses}, but with #' recurrent classes} #' \item{\code{transientClasses}}{analogously to \code{communicatingClasses}, but with #' transient classes} #' \item{\code{transientStates}}{returns a character vector with all the transient states #' for the Markov chain} #' \item{\code{recurrentStates}}{returns a character vector with all the recurrent states #' for the Markov chain} #' \item{\code{canonicForm}}{returns the Markov chain reordered by a permutation of states #' so that we have blocks submatrices for each of the recurrent classes and a collection #' of rows in the end for the transient states} #' } #' #' @references Feres, Matlab listing for markov chain. #' #' @author Giorgio Alfredo Spedicato, Ignacio Cordón #' #' @seealso \code{\linkS4class{markovchain}} #' #' @examples #' statesNames <- c("a", "b", "c") #' mc <- new("markovchain", states = statesNames, transitionMatrix = #' matrix(c(0.2, 0.5, 0.3, #' 0, 1, 0, #' 0.1, 0.8, 0.1), nrow = 3, byrow = TRUE, #' dimnames = list(statesNames, statesNames)) #' ) #' #' communicatingClasses(mc) #' recurrentClasses(mc) #' recurrentClasses(mc) #' absorbingStates(mc) #' transientStates(mc) #' recurrentStates(mc) #' canonicForm(mc) #' #' # periodicity analysis #' A <- matrix(c(0, 1, 0, 0, 0.5, 0, 0.5, 0, 0, 0.5, 0, 0.5, 0, 0, 1, 0), #' nrow = 4, ncol = 4, byrow = TRUE) #' mcA <- new("markovchain", states = c("a", "b", "c", "d"), #' transitionMatrix = A, #' name = "A") #' #' is.irreducible(mcA) #true #' period(mcA) #2 #' #' # periodicity analysis #' B <- matrix(c(0, 0, 1/2, 1/4, 1/4, 0, 0, #' 0, 0, 1/3, 0, 2/3, 0, 0, #' 0, 0, 0, 0, 0, 1/3, 2/3, #' 0, 0, 0, 0, 0, 1/2, 1/2, #' 0, 0, 0, 0, 0, 3/4, 1/4, #' 1/2, 1/2, 0, 0, 0, 0, 0, #' 1/4, 3/4, 0, 0, 0, 0, 0), byrow = TRUE, ncol = 7) #' mcB <- new("markovchain", transitionMatrix = B) #' period(mcB) #' #' @exportMethod communicatingClasses setGeneric("communicatingClasses", function(object) standardGeneric("communicatingClasses")) setMethod("communicatingClasses", "markovchain", function(object) { return(.communicatingClassesRcpp(object)) }) # A communicating class will be a recurrent class if # there is no outgoing edge from this class # Recurrent classes are subset of communicating classes #' @rdname structuralAnalysis #' #' @exportMethod recurrentClasses setGeneric("recurrentClasses", function(object) standardGeneric("recurrentClasses")) setMethod("recurrentClasses", "markovchain", function(object) { return(.recurrentClassesRcpp(object)) }) # A communicating class will be a transient class iff # there is an outgoing edge from this class to an state # outside of the class # Transient classes are subset of communicating classes #' @rdname structuralAnalysis #' #' @exportMethod transientClasses setGeneric("transientClasses", function(object) standardGeneric("transientClasses")) setMethod("transientClasses", "markovchain", function(object) { return(.transientClassesRcpp(object)) }) #' @rdname structuralAnalysis #' #' @exportMethod transientStates setGeneric("transientStates", function(object) standardGeneric("transientStates")) setMethod("transientStates", "markovchain", function(object) { .transientStatesRcpp(object) } ) #' @rdname structuralAnalysis #' #' @exportMethod recurrentStates setGeneric("recurrentStates", function(object) standardGeneric("recurrentStates")) setMethod("recurrentStates", "markovchain", function(object) { .recurrentStatesRcpp(object) } ) # generic function to extract absorbing states #' @rdname structuralAnalysis #' #' @exportMethod absorbingStates setGeneric("absorbingStates", function(object) standardGeneric("absorbingStates")) setMethod("absorbingStates", "markovchain", function(object) { .absorbingStatesRcpp(object) } ) #' @rdname structuralAnalysis #' #' @exportMethod canonicForm setGeneric("canonicForm", function(object) standardGeneric("canonicForm")) setMethod("canonicForm", "markovchain", function(object) { .canonicFormRcpp(object) } ) #' @title Calculates committor of a markovchain object with respect to set A, B #' #' @description Returns the probability of hitting states rom set A before set B #' with different initial states #' #' @usage committorAB(object,A,B,p) #' #' @param object a markovchain class object #' @param A a set of states #' @param B a set of states #' @param p initial state (default value : 1) #' #' @details The function solves a system of linear equations to calculate probaility that the process hits #' a state from set A before any state from set B #' #' @return Return a vector of probabilities in case initial state is not provided else returns a number #' #' @examples #' transMatr <- matrix(c(0,0,0,1,0.5, #' 0.5,0,0,0,0, #' 0.5,0,0,0,0, #' 0,0.2,0.4,0,0, #' 0,0.8,0.6,0,0.5), #' nrow = 5) #' object <- new("markovchain", states=c("a","b","c","d","e"),transitionMatrix=transMatr) #' committorAB(object,c(5),c(3)) #' #' @export committorAB <- function(object,A,B,p=1) { if(!is(object,"markovchain")) stop("please provide a valid markovchain object") matrix <- object@transitionMatrix noofstates <- length(object@states) for(i in length(A)) { if(A[i] <= 0 || A[i] > noofstates) stop("please provide a valid set A") } for(i in length(B)) { if(B[i] <= 0 || B[i] > noofstates) stop("please provide a valid set B") } for(i in 1:noofstates) { if(i %in% A && i %in% B) stop("intersection of set A and B in not null") } if(p <=0 || p > noofstates) stop("please provide a valid initial state") I <- diag(noofstates) matrix <- matrix - I A_size = length(A) B_size = length(B) # sets the matrix according to the provided states for(i in 1:A_size) { for(j in 1:noofstates) { if(A[i]==j) matrix[A[i],j] = 1 else matrix[A[i],j] = 0 } } # sets the matrix according to the provided states for(i in 1:B_size) { for(j in 1:noofstates) { if(B[i]==j) matrix[B[i],j] = 1 else matrix[B[i],j] = 0 } } # initialises b in the equation the system of equation AX =b b <- rep(0,noofstates) for(i in 1:A_size) { b[A[i]] = 1 } # solve AX = b according using solve function from base package out <- solve(matrix,b) if(missing(p)) return(out) else return(out[p]) } #' Expected Rewards for a markovchain #' #' @description Given a markovchain object and reward values for every state, #' function calculates expected reward value after n steps. #' #' @usage expectedRewards(markovchain,n,rewards) #' #' @param markovchain the markovchain-class object #' @param n no of steps of the process #' @param rewards vector depicting rewards coressponding to states #' #' @details the function uses a dynamic programming approach to solve a #' recursive equation described in reference. #' #' @return #' returns a vector of expected rewards for different initial states #' #' @author Vandit Jain #' #' @references Stochastic Processes: Theory for Applications, Robert G. Gallager, #' Cambridge University Press #' #' @examples #' transMatr<-matrix(c(0.99,0.01,0.01,0.99),nrow=2,byrow=TRUE) #' simpleMc<-new("markovchain", states=c("a","b"), #' transitionMatrix=transMatr) #' expectedRewards(simpleMc,1,c(0,1)) #' @export expectedRewards <- function(markovchain, n, rewards) { # gets the transition matrix matrix <- markovchain@transitionMatrix # Rcpp implementation of the function out <- .expectedRewardsRCpp(matrix,n, rewards) noofStates <- length(states(markovchain)) result <- rep(0,noofStates) for(i in 1:noofStates) result[i] = out[i] #names(result) <- states(markovchain) return(result) } #' Expected first passage Rewards for a set of states in a markovchain #' #' @description Given a markovchain object and reward values for every state, #' function calculates expected reward value for a set A of states after n #' steps. #' #' @usage expectedRewardsBeforeHittingA(markovchain, A, state, rewards, n) #' #' @param markovchain the markovchain-class object #' @param A set of states for first passage expected reward #' @param state initial state #' @param rewards vector depicting rewards coressponding to states #' @param n no of steps of the process #' #' @details The function returns the value of expected first passage #' rewards given rewards coressponding to every state, an initial state #' and number of steps. #' #' @return returns a expected reward (numerical value) as described above #' #' @author Sai Bhargav Yalamanchi, Vandit Jain #' #' @export expectedRewardsBeforeHittingA <- function(markovchain, A, state, rewards, n) { ## gets the markovchain matrix matrix <- markovchain@transitionMatrix # gets the names of states stateNames <- states(markovchain) # no of states S <- length(stateNames) # vectors for states in S-A SAno <- rep(0,S-length(A)) rewardsSA <- rep(0,S-length(A)) # for initialisation for set S-A i=1 ini = -1 for(j in 1:length(stateNames)) { if(!(stateNames[j] %in% A)){ SAno[i] = j rewardsSA[i] = rewards[j] if(stateNames[j] == state) ini = i i = i+1 } } ## get the matrix coressponding to S-A matrix <- matrix[SAno,SAno] ## cals the cpp implementation out <- .expectedRewardsBeforeHittingARCpp(matrix, ini, rewardsSA, n) return(out) } #' Mean First Passage Time for irreducible Markov chains #' #' @description Given an irreducible (ergodic) markovchain object, this function #' calculates the expected number of steps to reach other states #' #' @param object the markovchain object #' @param destination a character vector representing the states respect to #' which we want to compute the mean first passage time. Empty by default #' #' @details For an ergodic Markov chain it computes: #' \itemize{ #' \item If destination is empty, the average first time (in steps) that takes #' the Markov chain to go from initial state i to j. (i, j) represents that #' value in case the Markov chain is given row-wise, (j, i) in case it is given #' col-wise. #' \item If destination is not empty, the average time it takes us from the #' remaining states to reach the states in \code{destination} #' } #' #' @return a Matrix of the same size with the average first passage times if #' destination is empty, a vector if destination is not #' #' @author Toni Giorgino, Ignacio Cordón #' #' @references C. M. Grinstead and J. L. Snell. Introduction to Probability. #' American Mathematical Soc., 2012. #' #' @examples #' m <- matrix(1 / 10 * c(6,3,1, #' 2,3,5, #' 4,1,5), ncol = 3, byrow = TRUE) #' mc <- new("markovchain", states = c("s","c","r"), transitionMatrix = m) #' meanFirstPassageTime(mc, "r") #' #' #' # Grinstead and Snell's "Oz weather" worked out example #' mOz <- matrix(c(2,1,1, #' 2,0,2, #' 1,1,2)/4, ncol = 3, byrow = TRUE) #' #' mcOz <- new("markovchain", states = c("s", "c", "r"), transitionMatrix = mOz) #' meanFirstPassageTime(mcOz) #' #' @export meanFirstPassageTime setGeneric("meanFirstPassageTime", function(object, destination) { standardGeneric("meanFirstPassageTime") }) setMethod("meanFirstPassageTime", signature("markovchain", "missing"), function(object, destination) { destination = character() .meanFirstPassageTimeRcpp(object, destination) } ) setMethod("meanFirstPassageTime", signature("markovchain", "character"), function(object, destination) { states <- object@states incorrectStates <- setdiff(destination, states) if (length(incorrectStates) > 0) stop("Some of the states you provided in destination do not match states from the markovchain") result <- .meanFirstPassageTimeRcpp(object, destination) asVector <- as.vector(result) names(asVector) <- colnames(result) asVector } ) #' Mean recurrence time #' #' @description Computes the expected time to return to a recurrent state #' in case the Markov chain starts there #' #' @usage meanRecurrenceTime(object) #' #' @param object the markovchain object #' #' @return For a Markov chain it outputs is a named vector with the expected #' time to first return to a state when the chain starts there. #' States present in the vector are only the recurrent ones. If the matrix #' is ergodic (i.e. irreducible), then all states are present in the output #' and order is the same as states order for the Markov chain #' #' @author Ignacio Cordón #' #' @references C. M. Grinstead and J. L. Snell. Introduction to Probability. #' American Mathematical Soc., 2012. #' #' @examples #' m <- matrix(1 / 10 * c(6,3,1, #' 2,3,5, #' 4,1,5), ncol = 3, byrow = TRUE) #' mc <- new("markovchain", states = c("s","c","r"), transitionMatrix = m) #' meanRecurrenceTime(mc) #' #' @export meanRecurrenceTime setGeneric("meanRecurrenceTime", function(object) { standardGeneric("meanRecurrenceTime") }) setMethod("meanRecurrenceTime", "markovchain", function(object) { .meanRecurrenceTimeRcpp(object) }) #' Mean absorption time #' #' @description Computes the expected number of steps to go from any of the #' transient states to any of the recurrent states. The Markov chain should #' have at least one transient state for this method to work #' #' @usage meanAbsorptionTime(object) #' #' @param object the markovchain object #' #' @return A named vector with the expected number of steps to go from a #' transient state to any of the recurrent ones #' #' @author Ignacio Cordón #' #' @references C. M. Grinstead and J. L. Snell. Introduction to Probability. #' American Mathematical Soc., 2012. #' #' @examples #' m <- matrix(c(1/2, 1/2, 0, #' 1/2, 1/2, 0, #' 0, 1/2, 1/2), ncol = 3, byrow = TRUE) #' mc <- new("markovchain", states = letters[1:3], transitionMatrix = m) #' times <- meanAbsorptionTime(mc) #' #' @export meanAbsorptionTime setGeneric("meanAbsorptionTime", function(object) { standardGeneric("meanAbsorptionTime") }) setMethod("meanAbsorptionTime", "markovchain", function(object) { .meanAbsorptionTimeRcpp(object) }) #' Absorption probabilities #' #' @description Computes the absorption probability from each transient #' state to each recurrent one (i.e. the (i, j) entry or (j, i), in a #' stochastic matrix by columns, represents the probability that the #' first not transient state we can go from the transient state i is j #' (and therefore we are going to be absorbed in the communicating #' recurrent class of j) #' #' @usage absorptionProbabilities(object) #' #' @param object the markovchain object #' #' @return A named vector with the expected number of steps to go from a #' transient state to any of the recurrent ones #' #' @author Ignacio Cordón #' #' @references C. M. Grinstead and J. L. Snell. Introduction to Probability. #' American Mathematical Soc., 2012. #' #' @examples #' m <- matrix(c(1/2, 1/2, 0, #' 1/2, 1/2, 0, #' 0, 1/2, 1/2), ncol = 3, byrow = TRUE) #' mc <- new("markovchain", states = letters[1:3], transitionMatrix = m) #' absorptionProbabilities(mc) #' #' @export absorptionProbabilities setGeneric("absorptionProbabilities", function(object) { standardGeneric("absorptionProbabilities") }) setMethod("absorptionProbabilities", "markovchain", function(object) { .absorptionProbabilitiesRcpp(object) }) #' @title Check if a DTMC is regular #' #' @description Function to check wether a DTCM is regular # #' @details A Markov chain is regular if some of the powers of its matrix has all elements #' strictly positive #' #' @param object a markovchain object #' #' @return A boolean value #' #' @author Ignacio Cordón #' @references Matrix Analysis. Roger A.Horn, Charles R.Johnson. 2nd edition. #' Corollary 8.5.8, Theorem 8.5.9 #' #' #' @examples #' P <- matrix(c(0.5, 0.25, 0.25, #' 0.5, 0, 0.5, #' 0.25, 0.25, 0.5), nrow = 3) #' colnames(P) <- rownames(P) <- c("R","N","S") #' ciao <- as(P, "markovchain") #' is.regular(ciao) #' #' @seealso \code{\link{is.irreducible}} #' #' @exportMethod is.regular setGeneric("is.regular", function(object) standardGeneric("is.regular")) setMethod("is.regular", "markovchain", function(object) { .isRegularRcpp(object) }) #' Hitting probabilities for markovchain #' #' @description Given a markovchain object, #' this function calculates the probability of ever arriving from state i to j #' #' @usage hittingProbabilities(object) #' #' @param object the markovchain-class object #' #' @return a matrix of hitting probabilities #' #' @author Ignacio Cordón #' #' @references R. Vélez, T. Prieto, Procesos Estocásticos, Librería UNED, 2013 #' #' @examples #' M <- markovchain:::zeros(5) #' M[1,1] <- M[5,5] <- 1 #' M[2,1] <- M[2,3] <- 1/2 #' M[3,2] <- M[3,4] <- 1/2 #' M[4,2] <- M[4,5] <- 1/2 #' #' mc <- new("markovchain", transitionMatrix = M) #' hittingProbabilities(mc) #' #' @exportMethod hittingProbabilities setGeneric("hittingProbabilities", function(object) standardGeneric("hittingProbabilities")) setMethod("hittingProbabilities", "markovchain", function(object) { .hittingProbabilitiesRcpp(object) }) #' Mean num of visits for markovchain, starting at each state #' #' @description Given a markovchain object, this function calculates #' a matrix where the element (i, j) represents the expect number of visits #' to the state j if the chain starts at i (in a Markov chain by columns it #' would be the element (j, i) instead) #' #' @usage meanNumVisits(object) #' #' @param object the markovchain-class object #' #' @return a matrix with the expect number of visits to each state #' #' @author Ignacio Cordón #' #' @references R. Vélez, T. Prieto, Procesos Estocásticos, Librería UNED, 2013 #' #' @examples #' M <- markovchain:::zeros(5) #' M[1,1] <- M[5,5] <- 1 #' M[2,1] <- M[2,3] <- 1/2 #' M[3,2] <- M[3,4] <- 1/2 #' M[4,2] <- M[4,5] <- 1/2 #' #' mc <- new("markovchain", transitionMatrix = M) #' meanNumVisits(mc) #' #' @exportMethod meanNumVisits setGeneric("meanNumVisits", function(object) standardGeneric("meanNumVisits")) setMethod("meanNumVisits", "markovchain", function(object) { .minNumVisitsRcpp(object) }) setMethod( "steadyStates", "markovchain", function(object) { .steadyStatesRcpp(object) } ) #' @exportMethod summary setGeneric("summary") # summary method for markovchain class # lists: closed, transient classes, irreducibility, absorbint, transient states setMethod("summary", signature(object = "markovchain"), function(object){ # list of closed, recurrent and transient classes outs <- .summaryKernelRcpp(object) # display name of the markovchain object cat(object@name," Markov chain that is composed by:", "\n") # number of closed classes check <- length(outs$closedClasses) cat("Closed classes:","\n") # display closed classes if(check == 0) cat("NONE", "\n") else { for(i in 1:check) cat(outs$closedClasses[[i]], "\n") } # number of recurrent classes check <- length(outs$recurrentClasses) cat("Recurrent classes:", "\n") # display recurrent classes if(check == 0) cat("NONE", "\n") else { cat("{") cat(outs$recurrentClasses[[1]], sep = ",") cat("}") if(check > 1) { for(i in 2:check) { cat(",{") cat(outs$recurrentClasses[[i]], sep = ",") cat("}") } } cat("\n") } # number of transient classes check <- length(outs$transientClasses) cat("Transient classes:","\n") # display transient classes if(check == 0) cat("NONE", "\n") else { cat("{") cat(outs$transientClasses[[1]], sep = ",") cat("}") if(check > 1) { for(i in 2:check) { cat(",{") cat(outs$transientClasses[[i]], sep = ",") cat("}") } } cat("\n") } # bool to say about irreducibility of markovchain irreducibility <- is.irreducible(object) if(irreducibility) cat("The Markov chain is irreducible", "\n") else cat("The Markov chain is not irreducible", "\n") # display absorbing states check <- absorbingStates(object) if(length(check) == 0) check <- "NONE" cat("The absorbing states are:", check ) cat("\n") # return outs # useful when user will assign the value returned invisible(outs) } ) markovchain/R/utils.R0000644000176200001440000000156213762012756014255 0ustar liggesusersprecomputeData <- function(mc) { list( object = mc, transitionMatrix = mc@transitionMatrix, states = mc@states, byrow = mc@byrow, irreducible = is.irreducible(mc), regular = is.regular(mc), canonicForm = canonicForm(mc), recurrentClasses = recurrentClasses(mc), transientClasses = transientClasses(mc), recurrentStates = recurrentStates(mc), transientStates = transientStates(mc), absorbingStates = absorbingStates(mc), hittingProbabilities = hittingProbabilities(mc), meanNumVisits = meanNumVisits(mc), meanRecurrenceTime = meanRecurrenceTime(mc), communicatingClasses = communicatingClasses(mc), steadyStates = steadyStates(mc), reachabilityMatrix = is.accessible(mc) ) } precomputeSteadyStates <- function(mc) { list( object = mc, expected = steadyStates(mc) ) }markovchain/R/zzz.R0000644000176200001440000000112313762012756013743 0ustar liggesusers# Author: Giorgio ############################################################################### # loading the markovchain package .onAttach <- function(libname, pkgname) { desc <- packageDescription(pkgname, libname) packageStartupMessage('Package: ', desc$Package, '\n', 'Version: ', desc$Version, '\n', 'Date: ', desc$Date, '\n', 'BugReport: ', desc$BugReports, '\n') } # for unloading dynamic libraries .onUnload <- function (libpath) { library.dynam.unload("markovchain", libpath) }markovchain/R/fittingFunctions.R0000644000176200001440000006152114242452220016437 0ustar liggesusers#' Function to generate a sequence of states from homogeneous Markov chains. #' #' Provided any \code{markovchain} object, it returns a sequence of #' states coming from the underlying stationary distribution. #' #' @param n Sample size #' @param markovchain \code{markovchain} object #' @param t0 The initial state #' @param include.t0 Specify if the initial state shall be used #' @param useRCpp Boolean. Should RCpp fast implementation being used? Default is yes. #' #' @details A sequence of size n is sampled. #' #' @return A Character Vector #' #' @references A First Course in Probability (8th Edition), Sheldon Ross, Prentice Hall 2010 #' #' @author Giorgio Spedicato #' #' @seealso \code{\link{markovchainFit}} #' #' @examples #' # define the markovchain object #' statesNames <- c("a", "b", "c") #' mcB <- new("markovchain", states = statesNames, #' transitionMatrix = matrix(c(0.2, 0.5, 0.3, 0, 0.2, 0.8, 0.1, 0.8, 0.1), #' nrow = 3, byrow = TRUE, dimnames = list(statesNames, statesNames))) #' #' # show the sequence #' outs <- markovchainSequence(n = 100, markovchain = mcB, t0 = "a") #' #' @export markovchainSequence <-function (n, markovchain, t0 = sample(markovchain@states, 1), include.t0 = FALSE, useRCpp = TRUE) { # check whether given initial state is possible state or not if (!(t0 %in% markovchain@states)) stop("Error! Initial state not defined") # call to cpp implmentation of markovchainSequence if (useRCpp) { return(.markovchainSequenceRcpp(n, markovchain, t0, include.t0)) } # R implementation of the function # create a sequence of size n initially not initialized chain <- rep(NA,n) # initial state state <- t0 # populate the sequence for (i in 1:n) { # row probabilty corresponding to the current state rowProbs <- markovchain@transitionMatrix[state, ] # select the next state outstate <- sample(size = 1, x = markovchain@states, prob = rowProbs) # store the new state chain[i] <- outstate # update the current state state <- outstate } # output out <- chain # whether to include initial state or not if (include.t0) { out <- c(t0, out) } return(out) } ################## # random sampler # ################## # check if the subsequent states are included in the previous ones # check the validity of non homogeneous markovchain list # object is a list of markovchain object .checkSequence <- function(object) { # assume non homogeneous markovchain list is valid out <- TRUE # list of one transition matrix implies valid if (length(object) == 1) { return(out) } # if number of transition matrices are more than one for (i in 2:length(object)) { # select the states which are reachable in one step if(object[[i - 1]]@byrow) { reachable <- (colSums(object[[i - 1]]@transitionMatrix) != 0) } else { reachable <- (rowSums(object[[i - 1]]@transitionMatrix) != 0) } # possible states in the previous markovchain object statesNm1 <- states(object[[i - 1]])[reachable] # possible states in the current markovchain object statesN <- states(object[[i]]) # common states intersection <- intersect(statesNm1, statesN) # condition to check whether statesNm1 is a subset of statesN or not if (setequal(intersection, statesNm1) == FALSE) { out <- FALSE break } } return(out) } #' Function to generate a sequence of states from homogeneous or non-homogeneous Markov chains. #' #' Provided any \code{markovchain} or \code{markovchainList} objects, it returns a sequence of #' states coming from the underlying stationary distribution. #' #' @param n Sample size #' @param object Either a \code{markovchain} or a \code{markovchainList} object #' @param what It specifies whether either a \code{data.frame} or a \code{matrix} #' (each rows represent a simulation) or a \code{list} is returned. #' @param useRCpp Boolean. Should RCpp fast implementation being used? Default is yes. #' @param parallel Boolean. Should parallel implementation being used? Default is yes. #' @param num.cores Number of Cores to be used #' @param ... additional parameters passed to the internal sampler #' #' @details When a homogeneous process is assumed (\code{markovchain} object) a sequence is #' sampled of size n. When a non - homogeneous process is assumed, #' n samples are taken but the process is assumed to last from the begin to the end of the #' non-homogeneous markov process. #' #' @return Character Vector, data.frame, list or matrix #' #' @references A First Course in Probability (8th Edition), Sheldon Ross, Prentice Hall 2010 #' #' @author Giorgio Spedicato #' #' @note Check the type of input #' #' @seealso \code{\link{markovchainFit}}, \code{\link{markovchainSequence}} #' #' @examples #' # define the markovchain object #' statesNames <- c("a", "b", "c") #' mcB <- new("markovchain", states = statesNames, #' transitionMatrix = matrix(c(0.2, 0.5, 0.3, 0, 0.2, 0.8, 0.1, 0.8, 0.1), #' nrow = 3, byrow = TRUE, dimnames = list(statesNames, statesNames))) #' #' # show the sequence #' outs <- rmarkovchain(n = 100, object = mcB, what = "list") #' #' #' #define markovchainList object #' statesNames <- c("a", "b", "c") #' mcA <- new("markovchain", states = statesNames, transitionMatrix = #' matrix(c(0.2, 0.5, 0.3, 0, 0.2, 0.8, 0.1, 0.8, 0.1), nrow = 3, #' byrow = TRUE, dimnames = list(statesNames, statesNames))) #' mcB <- new("markovchain", states = statesNames, transitionMatrix = #' matrix(c(0.2, 0.5, 0.3, 0, 0.2, 0.8, 0.1, 0.8, 0.1), nrow = 3, #' byrow = TRUE, dimnames = list(statesNames, statesNames))) #' mcC <- new("markovchain", states = statesNames, transitionMatrix = #' matrix(c(0.2, 0.5, 0.3, 0, 0.2, 0.8, 0.1, 0.8, 0.1), nrow = 3, #' byrow = TRUE, dimnames = list(statesNames, statesNames))) #' mclist <- new("markovchainList", markovchains = list(mcA, mcB, mcC)) #' #' # show the list of sequence #' rmarkovchain(100, mclist, "list") #' #' @export rmarkovchain <- function(n, object, what = "data.frame", useRCpp = TRUE, parallel = FALSE, num.cores = NULL, ...) { # check the class of the object if (is(object,"markovchain")) { out <- markovchainSequence(n = n, markovchain = object, useRCpp = useRCpp, ...) return(out) } if (is(object,"markovchainList")) { ####################################################### if(useRCpp && !parallel) { # if include.t0 is not passed as extra argument then set include.t0 as false include.t0 <- list(...)$include.t0 include.t0 <- ifelse(is.null(include.t0), FALSE, include.t0) # check whether initial state is passed or not t0 <- list(...)$t0 if (is.null(t0)) t0 <- character() # call fast cpp function dataList <- .markovchainListRcpp(n, object@markovchains, include.t0, t0) # format in which results to be returned if (what == "data.frame") { out <- data.frame(iteration = dataList[[1]], values = dataList[[2]]) } else { # output in matrix format # each row is an independent sequence out <- matrix(data = dataList[[2]], nrow = n, byrow = TRUE) # output in list format if (what == "list") { # outlist <- list() # for (i in 1:nrow(out)) # outlist[[i]] <- out[i, ] # out <- outlist out <- as.list(data.frame(t(out), stringsAsFactors = FALSE)) out <- unname(out) } } return(out) } ########################################################## if(useRCpp && parallel) { # Calculate the number of cores # It's not good to use all cores no_cores <- max(1,parallel::detectCores() - 1) # number of cores specified should be less than or equal to maximum cores available if((! is.null(num.cores)) && num.cores <= no_cores + 1 && num.cores >= 1) { no_cores <- num.cores } RcppParallel::setThreadOptions(no_cores) # if include.t0 is not passed as extra argument then set include.t0 as false include.t0 <- list(...)$include.t0 include.t0 <- ifelse(is.null(include.t0), FALSE, include.t0) # check whether initial state is passed or not t0 <- list(...)$t0 if (is.null(t0)) t0 <- character() dataList <- .markovchainSequenceParallelRcpp(object, n, include.t0, t0) if(what == "list") return(dataList) # dimension of matrix to be returned nrow <- length(dataList) ncol <- length(dataList[[1]]) if(what == "matrix") { out <- matrix(unlist(dataList), nrow = nrow, ncol = ncol, byrow = TRUE) # for(i in 1:nrow) out[i, ] <- dataList[[i]] return(out) } iteration <- unlist(lapply(1:nrow, rep, times = ncol)) values <- unlist(dataList) # if what id data frame # for(i in 1:nrow) { # iteration <- c(iteration, rep(i, ncol)) # values <- append(values, dataList[[i]]) # } return(data.frame(iteration = iteration, values = values)) } ########################################################## if(!useRCpp && parallel) { # if include.t0 is not passed as extra argument then set include.t0 as false include.t0 <- list(...)$include.t0 include.t0 <- ifelse(is.null(include.t0), FALSE, include.t0) # check whether initial state is passed or not t0 <- list(...)$t0 if (is.null(t0)) t0 <- character() dataList <- .markovchainSequenceParallel(n, object, t0, num.cores, include.t0) if(what == "list") return(dataList) # dimension of matrix to be returned nrow <- length(dataList) ncol <- length(dataList[[1]]) if(what == "matrix") { out <- matrix(nrow = nrow, ncol = ncol) for(i in 1:nrow) out[i, ] <- dataList[[i]] return(out) } iteration <- numeric() values <- character() # if what id data frame for(i in 1:nrow) { iteration <- append(iteration, rep(i, ncol)) values <- append(values, dataList[[i]]) } return(data.frame(iteration = iteration, values = values)) } ########################################################## # store list of markovchain object in object object <- object@markovchains # check the validity of markovchainList object verify <- .checkSequence(object = object) # show warning if sequence is invalid if (!verify) { warning("Warning: some states in the markovchain sequences are not contained in the following states!") } # helper vector iteration <- numeric() values <- character() # create one sequence in each iteration for (i in 1:n) { # the first iteration may include initial state sampledValues <- markovchainSequence(n = 1, markovchain = object[[1]], ...) outIter <- rep(i, length(sampledValues)) # number of markovchain objects are more than one if (length(object) > 1) { for (j in 2:length(object)) { pos2take <- length(sampledValues) # select new state of the sequence from the old state # t0 refers to the old state newVals <-markovchainSequence(n = 1, markovchain = object[[j]], t0 = sampledValues[pos2take]) # update in every iteration outIter <- c(outIter, i) sampledValues <- c(sampledValues, newVals) } } # populate the helper vectors iteration <- c(iteration, outIter) values <- c(values, sampledValues) } # defining the output if (what == "data.frame") { out <- data.frame(iteration = iteration, values = values) } else { # ouput in matrix format out <- matrix(data = values, nrow = n, byrow = TRUE) # store each row of the matrix in the list if (what == 'list') { outlist <- list() for (i in 1:nrow(out)) outlist[[i]] <- out[i, ] out <- outlist } } } return(out) } ###################################################################### # helper function to calculate one sequence .markovchainSPHelper <- function(x, t0, mclist, include.t0) { # number of transition matrices n <- length(mclist@markovchains) # take care of initial state vin <- 0 if(include.t0) vin <- 1 # a character vector to store a single sequence seq <- character(length = n + vin) if(length(t0) == 0) { stateNames <- mclist@markovchains[[1]]@states t0 <- sample(x = stateNames, size = 1, prob = rep(1 / length(stateNames), length(stateNames))) } if(include.t0) seq[1] <- t0 invisible(lapply(seq_len(n), function(i) { stateNames <<- mclist@markovchains[[i]]@states byRow <- mclist@markovchains[[i]]@byrow # check whether transition matrix follows row-wise or column-wise fashion if(byRow) prob <- mclist@markovchains[[i]]@transitionMatrix[which(stateNames == t0), ] else prob <- mclist@markovchains[[i]]@transitionMatrix[, which(stateNames == t0)] # initial state for the next transition matrix t0 <<- sample(x = stateNames, size = 1, prob = prob) # populate the sequence vector seq[i+vin] <<- t0 } )) return(seq) } # Function to generate a list of sequence of states in parallel from non-homogeneous Markov chains. # # Provided any markovchainList object, it returns a list of sequence of states coming # from the underlying stationary distribution. # # @param n Sample size # @param object markovchainList object # @param t0 Initial state # @param num.cores Number of cores # .markovchainSequenceParallel <- function(n, object, t0 = character(), num.cores = NULL, include.t0 = FALSE) { # check for the validity of non-uniform markov chain verify <- .checkSequence(object@markovchains) if (!verify) { warning("Warning: some states in the markovchain sequences are not contained in the following states!") } # Calculate the number of cores # It's not good to use all cores no_cores <- max(1,parallel::detectCores() - 1) # number of cores specified should be less than or equal to maximum cores available if((! is.null(num.cores)) && num.cores <= no_cores + 1 && num.cores >= 1) { no_cores <- num.cores } # Initiate cluster cl <- parallel::makeCluster(no_cores) # export the variables to be used in the helper function # parallel::clusterExport(cl, "t0") # export the variables to be used in the helper function mclist <- object # parallel::clusterExport(cl, "mclist") # list of n sequence listSeq <- tryCatch(parallel::parLapply(cl, 1:n, .markovchainSPHelper, t0, mclist, include.t0), error=function(e) e, warning=function(w) w) # release the resources parallel::stopCluster(cl) return(listSeq) } ###################################################################### # function to fit a DTMC with Laplacian Smoother .mcFitLaplacianSmooth <- function(stringchar, byrow, laplacian = 0.01) { # every element of the matrix store the number of times jth state appears just # after the ith state origNum <- createSequenceMatrix(stringchar = stringchar, toRowProbs = FALSE) # add laplacian to the sequence matrix # why? to avoid the cases where sum of row is zero newNum <- origNum + laplacian # store sum of each row in the vector newSumOfRow <- rowSums(newNum) # helper matrix to convert frequency matrix to transition matrix newDen <- matrix(rep(newSumOfRow, length(newSumOfRow)), byrow = FALSE, ncol = length(newSumOfRow)) # transition matrix transMatr <- newNum / newDen # create a markovchain object outMc <- new("markovchain", transitionMatrix = transMatr, name = "Laplacian Smooth Fit") # transpose the transition matrix if (!byrow) { outMc@transitionMatrix <- t(outMc@transitionMatrix) outMc@byrow <- FALSE } # wrap markovchain object in a list out <- list(estimate = outMc) return(out) } # function that return a Markov Chain from a given matrix of observations # .matr2Mc <- function(matrData, laplacian = 0) { # # # number of columns in the input matrix # nCols <- ncol(matrData) # # # an empty character vector to store names of possible states # uniqueVals <- character() # # # populate uniqueVals with names of states # for(i in 1:nCols) { # uniqueVals <- union(uniqueVals, unique(as.character(matrData[,i]))) # } # # # possible states in lexicographical order # uniqueVals <- sort(uniqueVals) # # # create a contingency matrix which store the number of times # # jth state appear just after the ith state # contingencyMatrix <- matrix(rep(0, length(uniqueVals)^2), ncol = length(uniqueVals)) # # # set the names of rows and columns # rownames(contingencyMatrix) <- colnames(contingencyMatrix) <- uniqueVals # # # fill the contingency matrix # for (i in 1:nrow(matrData)) { # for (j in 2:nCols) { # # state in the ith row and (j-1)th column # stateBegin <- as.character(matrData[i, j-1]) # # # index of beginning state # whichRow <- which(uniqueVals == stateBegin) # # # state in the ith row and jth column # stateEnd <- as.character(matrData[i, j]) # # # index of ending state # whichCols <- which(uniqueVals == stateEnd) # # # update the contingency matrix # contingencyMatrix[whichRow, whichCols] <- contingencyMatrix[whichRow, whichCols] + 1 # } # } # # # add laplacian correction if needed # contingencyMatrix <- contingencyMatrix + laplacian # # # take care of rows with all entries 0 # sumOfRows <- rowSums(contingencyMatrix) # for(i in 1:length(sumOfRows)) { # if(sumOfRows[i] == 0) { # contingencyMatrix[i, ] <- 1 # sumOfRows[i] <- length(sumOfRows) # } # } # # # get a transition matrix and a DTMC # transitionMatrix <- contingencyMatrix / sumOfRows # # # markov chain object to be returned # outMc <- new("markovchain", transitionMatrix = transitionMatrix) # # return(outMc) # } #' @title markovchainListFit #' #' @description Given a data frame or a matrix (rows are observations, by cols #' the temporal sequence), it fits a non - homogeneous discrete time markov chain #' process (storing row). In particular a markovchainList of size = ncol - 1 is obtained #' estimating transitions from the n samples given by consecutive column pairs. #' #' @param data Either a matrix or a data.frame or a list object. #' @param laplacian Laplacian correction (default 0). #' @param byrow Indicates whether distinc stochastic processes trajectiories are shown in distinct rows. #' @param name Optional name. #' #' @details If \code{data} contains \code{NAs} then the transitions containing \code{NA} will be ignored. #' @return A list containing two slots: #' estimate (the estimate) #' name #' #' @examples #' #' # using holson dataset #' data(holson) #' # fitting a single markovchain #' singleMc <- markovchainFit(data = holson[,2:12]) #' # fitting a markovchainList #' mclistFit <- markovchainListFit(data = holson[, 2:12], name = "holsonMcList") #' @export markovchainListFit <- function(data, byrow = TRUE, laplacian = 0, name) { # check the format of input data if (!any(is.list(data),is.data.frame(data),is.matrix(data))) { stop("Error: data must be either a matrix or a data.frame or a list") } freqMatrixes <- list() # a pure list= a list and not a data frame if((is.list(data) == TRUE) & (is.data.frame(data)==FALSE)) { markovchains <- list() # list of frequency matrix freqMatrixes <- .mcListFitForList(data) } else{ # if input is data frame convert it to matrix if(is.data.frame(data)) { data <- unname(as.matrix(data)) } # make the entries row wise if it is not if(!byrow) { data <- t(data) } # number of columns in the matrix nCols <- ncol(data) # fit by columns freqMatrixes <- lapply(seq_len(nCols-1), function(i){ # (i-1)th transition matrix for transition from (i-1)th state to ith state matrData <- data[, c(i, i+1)] matrData[1, ] <- as.character(matrData[1, ]) # checking particular data for NA values. validTransition <- any(apply(matrData, 1, function(x){ !any(is.na(x)) })) if(validTransition) createSequenceMatrix(matrData, toRowProbs = FALSE, sanitize = TRUE) }) freqMatrixes <- freqMatrixes[ !sapply(freqMatrixes, is.null) ] } if(length(freqMatrixes) == 0) { return(list()) } markovchains <- lapply(freqMatrixes, function(freqMatrix){ # add laplacian correction freqMatrix <- freqMatrix + laplacian rSums <- rowSums(freqMatrix) # transition matrix tMatrix <- freqMatrix / rSums; estMc <- new("markovchain", transitionMatrix = tMatrix) estMc }) # create markovchainList object outMcList <- new("markovchainList", markovchains = markovchains) # wrap the object in a list out <- list(estimate = outMcList) # set the name of markovchainList object as given in the argument if(!missing(name)) { out$estimate@name <- name } return(out) } #' A function to compute multinomial confidence intervals of DTMC #' #' @description Return estimated transition matrix assuming a Multinomial Distribution #' #' @param transitionMatrix An estimated transition matrix. #' @param countsTransitionMatrix Empirical (conts) transition matrix, on which the \code{transitionMatrix} was performed. #' @param confidencelevel confidence interval level. #' #' @return Two matrices containing the confidence intervals. #' #' @seealso \code{markovchainFit} #' #' @references Constructing two-sided simultaneous confidence intervals #' for multinomial proportions for small counts in a large number of cells. #' Journal of Statistical Software 5(6) (2000) #' #' @examples #' seq<-c("a", "b", "a", "a", "a", "a", "b", "a", "b", "a", "b", "a", "a", "b", "b", "b", "a") #' mcfit<-markovchainFit(data=seq,byrow=TRUE) #' seqmat<-createSequenceMatrix(seq) #' multinomialConfidenceIntervals(mcfit$estimate@transitionMatrix, seqmat, 0.95) #' @export multinomialConfidenceIntervals<-function(transitionMatrix, countsTransitionMatrix, confidencelevel=0.95) { out<-.multinomialCIRcpp(transMat=transitionMatrix, seqMat=countsTransitionMatrix,confidencelevel=confidencelevel) return(out) } #' return a joint pdf of the number of visits to the various states of the DTMC #' #' @description This function would return a joint pdf of the number of visits to #' the various states of the DTMC during the first N steps. #' #' @usage noofVisitsDist(markovchain,N,state) #' #' @param markovchain a markovchain-class object #' @param N no of steps #' @param state the initial state #' #' @details #' This function would return a joint pdf of the number of visits to #' the various states of the DTMC during the first N steps. #' #' @return a numeric vector depicting the above described probability density function. #' #' @author Vandit Jain #' #' @examples #' transMatr<-matrix(c(0.4,0.6,.3,.7),nrow=2,byrow=TRUE) #' simpleMc<-new("markovchain", states=c("a","b"), #' transitionMatrix=transMatr, #' name="simpleMc") #' noofVisitsDist(simpleMc,5,"a") #' #' @export noofVisitsDist <- function(markovchain,N = 5,state) { if(!is(markovchain,"markovchain")) stop("please provide a valid markovchain-class object") if(N <= 0) stop("please enter positive number of steps") # the transition matrix Tmatrix <- markovchain@transitionMatrix # character vector of states of the markovchain stateNames <- states(markovchain) i<--1 # initial state i <- which(stateNames == state) if(i==-1) stop("please provide a valid inital state") # call to Rcpp implementation of the function out <- .noofVisitsDistRCpp(Tmatrix,i,N) # adds state names names to the output vector names(out) <- stateNames out <- c(out) return(out) } markovchain/R/data.R0000644000176200001440000001301713762012756014024 0ustar liggesusers#' @name sales #' #' @title Sales Demand Sequences #' #' @description Sales demand sequences of five products (A, B, C, D, E). #' Each row corresponds to a sequence. First row corresponds to Sequence A, #' Second row to Sequence B and so on. #' #' @usage data("sales") #' #' @details The example can be used to fit High order multivariate #' markov chain. #' #' @examples #' data("sales") #' # fitHighOrderMultivarMC(seqMat = sales, order = 2, Norm = 2) #' "sales" #' @name blanden #' #' @title Mobility between income quartiles #' #' @description This table show mobility between income quartiles for father and sons for the 1970 cohort born #' #' @usage data(blanden) #' #' @details The rows represent fathers' income quartile when the son is aged 16, whilst the columns represent sons' income quartiles when he is aged 30 (in 2000). #' #' @source Personal reworking #' #' @references Jo Blanden, Paul Gregg and Stephen Machin, Intergenerational Mobility in Europe and North America, Center for Economic Performances (2005) #' #' @examples #' data(blanden) #' mobilityMc<-as(blanden, "markovchain") "blanden" #' @name craigsendi #' #' @title CD4 cells counts on HIV Infects between zero and six month #' #' @description This is the table shown in Craig and Sendi paper showing zero and six month CD4 cells count in six brakets #' #' @usage data(craigsendi) #' #' @format #' The format is: #' table [1:3, 1:3] 682 154 19 33 64 19 25 47 43 #' - attr(*, "dimnames")=List of 2 #' ..$ : chr [1:3] "0-49" "50-74" "75-UP" #' ..$ : chr [1:3] "0-49" "50-74" "75-UP" #' #' @details Rows represent counts at the beginning, cols represent counts after six months. #' #' @source Estimation of the transition matrix of a discrete time Markov chain, Bruce A. Craig and Peter P. Sendi, Health Economics 11, 2002. #' #' @references see source #' #' @examples #' data(craigsendi) #' csMc<-as(craigsendi, "markovchain") #' steadyStates(csMc) "craigsendi" #' @name holson #' #' @title Holson data set #' #' @description A data set containing 1000 life histories trajectories and a categorical status (1,2,3) observed on eleven evenly spaced steps. #' #' @usage data(holson) #' #' @format #' A data frame with 1000 observations on the following 12 variables. #' \describe{ #' \item{\code{id}}{unique id} #' \item{\code{time1}}{observed status at i-th time} #' \item{\code{time2}}{observed status at i-th time} #' \item{\code{time3}}{observed status at i-th time} #' \item{\code{time4}}{observed status at i-th time} #' \item{\code{time5}}{observed status at i-th time} #' \item{\code{time6}}{observed status at i-th time} #' \item{\code{time7}}{observed status at i-th time} #' \item{\code{time8}}{observed status at i-th time} #' \item{\code{time9}}{observed status at i-th time} #' \item{\code{time10}}{observed status at i-th time} #' \item{\code{time11}}{observed status at i-th time} #' } #' #' @details The example can be used to fit a \code{markovchain} or a \code{markovchainList} object. #' #' @source Private communications #' #' @references Private communications #' #' @examples #' data(holson) #' head(holson) "holson" #' @name kullback #' #' @title Example from Kullback and Kupperman Tests for Contingency Tables #' #' @format A list containing two 6x6 non - negative integer matrices #' #' @usage data(kullback) #' #' @description A list of two matrices representing raw transitions between two states "kullback" #' @name rain #' #' @title Alofi island daily rainfall #' #' @description Rainfall measured in Alofi Island #' #' @usage data(rain) #' #' @format #' A data frame with 1096 observations on the following 2 variables. #' \describe{ #' \item{\code{V1}}{a numeric vector, showing original coding} #' \item{\code{rain}}{a character vector, showing daily rainfall millilitres brackets} #' } #' #' @source Avery Henderson #' #' @references Avery Henderson, Fitting markov chain models on discrete time series such as DNA sequences #' #' @examples #' data(rain) #' rainMc<-markovchainFit(data=rain$rain) "rain" #' @name preproglucacon #' #' @title Preprogluccacon DNA protein bases sequences #' #' @description Sequence of bases for preproglucacon DNA protein #' #' @usage data(preproglucacon) #' #' @format #' A data frame with 1572 observations on the following 2 variables. #' \describe{ #' \item{\code{V1}}{a numeric vector, showing original coding} #' \item{\code{preproglucacon}}{a character vector, showing initial of DNA bases (Adenine, Cytosine, Guanine, Thymine)} #' } #' #' @source Avery Henderson #' #' @references Averuy Henderson, Fitting markov chain models on discrete time series such as DNA sequences #' #' @examples #' data(preproglucacon) #' preproglucaconMc<-markovchainFit(data=preproglucacon$preproglucacon) "preproglucacon" #' @name tm_abs #' #' @title Single Year Corporate Credit Rating Transititions #' #' @description Matrix of Standard and Poor's Global Corporate Rating Transition Frequencies 2000 (NR Removed) #' #' @usage data(tm_abs) #' #' @format #' The format is: #' num [1:8, 1:8] 17 2 0 0 0 0 0 0 1 455 ... #' - attr(*, "dimnames")=List of 2 #' ..$ : chr [1:8] "AAA" "AA" "A" "BBB" ... #' ..$ : chr [1:8] "AAA" "AA" "A" "BBB" ... #' #' @references #' European Securities and Markets Authority, 2016 #' https://cerep.esma.europa.eu/cerep-web/statistics/transitionMatrice.xhtml #' #' @examples #' data(tm_abs) "tm_abs" markovchain/R/RcppExports.R0000644000176200001440000005013714503772362015410 0ustar liggesusers# Generated by using Rcpp::compileAttributes() -> do not edit by hand # Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393 .isGenRcpp <- function(gen) { .Call(`_markovchain_isGen`, gen) } #' @name generatorToTransitionMatrix #' @title Function to obtain the transition matrix from the generator #' @description The transition matrix of the embedded DTMC is inferred from the CTMC's generator #' #' @usage generatorToTransitionMatrix(gen, byrow = TRUE) #' #' @param gen The generator matrix #' @param byrow Flag to determine if rows (columns) sum to 0 #' @return Returns the transition matrix. #' #' @references #' Introduction to Stochastic Processes with Applications in the Biosciences (2013), David F. #' Anderson, University of Wisconsin at Madison #' #' @author Sai Bhargav Yalamanchi #' @seealso \code{\link{rctmc}},\code{\link{ctmc-class}} #' @examples #' energyStates <- c("sigma", "sigma_star") #' byRow <- TRUE #' gen <- matrix(data = c(-3, 3, 1, -1), nrow = 2, #' byrow = byRow, dimnames = list(energyStates, energyStates)) #' generatorToTransitionMatrix(gen) #' #' @export generatorToTransitionMatrix <- function(gen, byrow = TRUE) { .Call(`_markovchain_generatorToTransitionMatrix`, gen, byrow) } #' @name ctmcFit #' @title Function to fit a CTMC #' @description This function fits the underlying CTMC give the state #' transition data and the transition times using the maximum likelihood #' method (MLE) #' @usage ctmcFit(data, byrow = TRUE, name = "", confidencelevel = 0.95) #' @param data It is a list of two elements. The first element is a character #' vector denoting the states. The second is a numeric vector denoting the #' corresponding transition times. #' @param byrow Determines if the output transition probabilities of the #' underlying embedded DTMC are by row. #' @param name Optional name for the CTMC. #' @param confidencelevel Confidence level for the confidence interval #' construnction. #' @return It returns a list containing the CTMC object and the confidence intervals. #' #' @details Note that in data, there must exist an element wise corresponding #' between the two elements of the list and that data[[2]][1] is always 0. #' @references Continuous Time Markov Chains (vignette), Sai Bhargav Yalamanchi, Giorgio Alfredo Spedicato 2015 #' @author Sai Bhargav Yalamanchi #' @seealso \code{\link{rctmc}} #' #' @examples #' data <- list(c("a", "b", "c", "a", "b", "a", "c", "b", "c"), c(0, 0.8, 2.1, 2.4, 4, 5, 5.9, 8.2, 9)) #' ctmcFit(data) #' #' @export #' ctmcFit <- function(data, byrow = TRUE, name = "", confidencelevel = 0.95) { .Call(`_markovchain_ctmcFit`, data, byrow, name, confidencelevel) } .ExpectedTimeRCpp <- function(x, y) { .Call(`_markovchain_ExpectedTimeRcpp`, x, y) } .probabilityatTRCpp <- function(y) { .Call(`_markovchain_probabilityatTRCpp`, y) } .impreciseProbabilityatTRCpp <- function(C, i, t, s, error) { .Call(`_markovchain_impreciseProbabilityatTRCpp`, C, i, t, s, error) } #' @export seq2freqProb <- function(sequence) { .Call(`_markovchain_seq2freqProb`, sequence) } #' @export seq2matHigh <- function(sequence, order) { .Call(`_markovchain_seq2matHigh`, sequence, order) } .markovchainSequenceRcpp <- function(n, markovchain, t0, include_t0 = FALSE) { .Call(`_markovchain_markovchainSequenceRcpp`, n, markovchain, t0, include_t0) } .markovchainListRcpp <- function(n, object, include_t0 = FALSE, t0 = character()) { .Call(`_markovchain_markovchainListRcpp`, n, object, include_t0, t0) } .markovchainSequenceParallelRcpp <- function(listObject, n, include_t0 = FALSE, init_state = character()) { .Call(`_markovchain_markovchainSequenceParallelRcpp`, listObject, n, include_t0, init_state) } #' @rdname markovchainFit #' #' @export createSequenceMatrix <- function(stringchar, toRowProbs = FALSE, sanitize = FALSE, possibleStates = character()) { .Call(`_markovchain_createSequenceMatrix`, stringchar, toRowProbs, sanitize, possibleStates) } .mcListFitForList <- function(data) { .Call(`_markovchain_mcListFitForList`, data) } .matr2Mc <- function(matrData, laplacian = 0, sanitize = FALSE, possibleStates = character()) { .Call(`_markovchain__matr2Mc`, matrData, laplacian, sanitize, possibleStates) } .list2Mc <- function(data, laplacian = 0, sanitize = FALSE) { .Call(`_markovchain__list2Mc`, data, laplacian, sanitize) } #' @name inferHyperparam #' @title Function to infer the hyperparameters for Bayesian inference from an a priori matrix or a data set #' @description Since the Bayesian inference approach implemented in the package is based on conjugate priors, #' hyperparameters must be provided to model the prior probability distribution of the chain #' parameters. The hyperparameters are inferred from a given a priori matrix under the assumption #' that the matrix provided corresponds to the mean (expected) values of the chain parameters. A #' scaling factor vector must be provided too. Alternatively, the hyperparameters can be inferred #' from a data set. #' #' @param transMatr A valid transition matrix, with dimension names. #' @param scale A vector of scaling factors, each element corresponds to the row names of the provided transition #' matrix transMatr, in the same order. #' @param data A data set from which the hyperparameters are inferred. #' #' @details transMatr and scale need not be provided if data is provided. #' @return Returns the hyperparameter matrix in a list. #' #' @note The hyperparameter matrix returned is such that the row and column names are sorted alphanumerically, #' and the elements in the matrix are correspondingly permuted. #' #' @references Yalamanchi SB, Spedicato GA (2015). Bayesian Inference of First Order Markov Chains. R #' package version 0.2.5 #' #' @author Sai Bhargav Yalamanchi, Giorgio Spedicato #' @seealso \code{\link{markovchainFit}}, \code{\link{predictiveDistribution}} #' #' @examples #' data(rain, package = "markovchain") #' inferHyperparam(data = rain$rain) #' #' weatherStates <- c("sunny", "cloudy", "rain") #' weatherMatrix <- matrix(data = c(0.7, 0.2, 0.1, #' 0.3, 0.4, 0.3, #' 0.2, 0.4, 0.4), #' byrow = TRUE, nrow = 3, #' dimnames = list(weatherStates, weatherStates)) #' inferHyperparam(transMatr = weatherMatrix, scale = c(10, 10, 10)) #' #' @export #' inferHyperparam <- function(transMatr = matrix(), scale = numeric(), data = character()) { .Call(`_markovchain_inferHyperparam`, transMatr, scale, data) } #' @name markovchainFit #' @title Function to fit a discrete Markov chain #' @description Given a sequence of states arising from a stationary state, #' it fits the underlying Markov chain distribution using either MLE (also using a #' Laplacian smoother), bootstrap or by MAP (Bayesian) inference. #' #' @param data It can be a character vector or a {n x n} matrix or a {n x n} data frame or a list #' @param method Method used to estimate the Markov chain. Either "mle", "map", "bootstrap" or "laplace" #' @param byrow it tells whether the output Markov chain should show the transition probabilities by row. #' @param nboot Number of bootstrap replicates in case "bootstrap" is used. #' @param laplacian Laplacian smoothing parameter, default zero. It is only used when "laplace" method #' is chosen. #' @param name Optional character for name slot. #' @param parallel Use parallel processing when performing Boostrap estimates. #' @param confidencelevel \deqn{\alpha} level for conficence intervals width. #' Used only when \code{method} equal to "mle". #' @param confint a boolean to decide whether to compute Confidence Interval or not. #' @param hyperparam Hyperparameter matrix for the a priori distribution. If none is provided, #' default value of 1 is assigned to each parameter. This must be of size #' {k x k} where k is the number of states in the chain and the values #' should typically be non-negative integers. #' @param stringchar It can be a {n x n} matrix or a character vector or a list #' @param toRowProbs converts a sequence matrix into a probability matrix #' @param sanitize put 1 in all rows having rowSum equal to zero #' @param possibleStates Possible states which are not present in the given sequence #' #' @details Disabling confint would lower the computation time on large datasets. If \code{data} or \code{stringchar} #' contain \code{NAs}, the related \code{NA} containing transitions will be ignored. #' #' @return A list containing an estimate, log-likelihood, and, when "bootstrap" method is used, a matrix #' of standards deviations and the bootstrap samples. When the "mle", "bootstrap" or "map" method #' is used, the lower and upper confidence bounds are returned along with the standard error. #' The "map" method also returns the expected value of the parameters with respect to the #' posterior distribution. #' @references A First Course in Probability (8th Edition), Sheldon Ross, Prentice Hall 2010 #' #' Inferring Markov Chains: Bayesian Estimation, Model Comparison, Entropy Rate, #' and Out-of-Class Modeling, Christopher C. Strelioff, James P. Crutchfield, #' Alfred Hubler, Santa Fe Institute #' #' Yalamanchi SB, Spedicato GA (2015). Bayesian Inference of First Order Markov Chains. R #' package version 0.2.5 #' #' @author Giorgio Spedicato, Tae Seung Kang, Sai Bhargav Yalamanchi #' @note This function has been rewritten in Rcpp. Bootstrap algorithm has been defined "heuristically". #' In addition, parallel facility is not complete, involving only a part of the bootstrap process. #' When \code{data} is either a \code{data.frame} or a \code{matrix} object, only MLE fit is #' currently available. #' #' @seealso \code{\link{markovchainSequence}}, \code{\link{markovchainListFit}} #' @examples #' sequence <- c("a", "b", "a", "a", "a", "a", "b", "a", "b", "a", "b", "a", "a", #' "b", "b", "b", "a") #' sequenceMatr <- createSequenceMatrix(sequence, sanitize = FALSE) #' mcFitMLE <- markovchainFit(data = sequence) #' mcFitBSP <- markovchainFit(data = sequence, method = "bootstrap", nboot = 5, name = "Bootstrap Mc") #' #' na.sequence <- c("a", NA, "a", "b") #' # There will be only a (a,b) transition #' na.sequenceMatr <- createSequenceMatrix(na.sequence, sanitize = FALSE) #' mcFitMLE <- markovchainFit(data = na.sequence) #' #' # data can be a list of character vectors #' sequences <- list(x = c("a", "b", "a"), y = c("b", "a", "b", "a", "c")) #' mcFitMap <- markovchainFit(sequences, method = "map") #' mcFitMle <- markovchainFit(sequences, method = "mle") #' @rdname markovchainFit #' #' @export #' markovchainFit <- function(data, method = "mle", byrow = TRUE, nboot = 10L, laplacian = 0, name = "", parallel = FALSE, confidencelevel = 0.95, confint = TRUE, hyperparam = matrix(), sanitize = FALSE, possibleStates = character()) { .Call(`_markovchain_markovchainFit`, data, method, byrow, nboot, laplacian, name, parallel, confidencelevel, confint, hyperparam, sanitize, possibleStates) } .noofVisitsDistRCpp <- function(matrix, i, N) { .Call(`_markovchain_noofVisitsDistRCpp`, matrix, i, N) } .multinomialCIForRowRcpp <- function(x, confidencelevel) { .Call(`_markovchain_multinomialCIForRow`, x, confidencelevel) } .multinomialCIRcpp <- function(transMat, seqMat, confidencelevel) { .Call(`_markovchain_multinomCI`, transMat, seqMat, confidencelevel) } .commClassesKernelRcpp <- function(P) { .Call(`_markovchain_commClassesKernel`, P) } .communicatingClassesRcpp <- function(object) { .Call(`_markovchain_communicatingClasses`, object) } .transientStatesRcpp <- function(object) { .Call(`_markovchain_transientStates`, object) } .recurrentStatesRcpp <- function(object) { .Call(`_markovchain_recurrentStates`, object) } .recurrentClassesRcpp <- function(object) { .Call(`_markovchain_recurrentClasses`, object) } .transientClassesRcpp <- function(object) { .Call(`_markovchain_transientClasses`, object) } .reachabilityMatrixRcpp <- function(obj) { .Call(`_markovchain_reachabilityMatrix`, obj) } .isAccessibleRcpp <- function(obj, from, to) { .Call(`_markovchain_isAccessible`, obj, from, to) } .summaryKernelRcpp <- function(object) { .Call(`_markovchain_summaryKernel`, object) } .firstpassageKernelRcpp <- function(P, i, n) { .Call(`_markovchain_firstpassageKernel`, P, i, n) } .firstPassageMultipleRCpp <- function(P, i, setno, n) { .Call(`_markovchain_firstPassageMultipleRCpp`, P, i, setno, n) } .expectedRewardsRCpp <- function(matrix, n, rewards) { .Call(`_markovchain_expectedRewardsRCpp`, matrix, n, rewards) } .expectedRewardsBeforeHittingARCpp <- function(matrix, s0, rewards, n) { .Call(`_markovchain_expectedRewardsBeforeHittingARCpp`, matrix, s0, rewards, n) } .gcdRcpp <- function(a, b) { .Call(`_markovchain_gcd`, a, b) } #' @rdname structuralAnalysis #' #' @export period <- function(object) { .Call(`_markovchain_period`, object) } #' @title predictiveDistribution #' #' @description The function computes the probability of observing a new data #' set, given a data set #' @usage predictiveDistribution(stringchar, newData, hyperparam = matrix()) #' #' @param stringchar This is the data using which the Bayesian inference is #' performed. #' @param newData This is the data whose predictive probability is computed. #' @param hyperparam This determines the shape of the prior distribution of the #' parameters. If none is provided, default value of 1 is assigned to each #' parameter. This must be of size kxk where k is the number of states in the #' chain and the values should typically be non-negative integers. #' @return The log of the probability is returned. #' #' @details The underlying method is Bayesian inference. The probability is #' computed by averaging the likelihood of the new data with respect to the #' posterior. Since the method assumes conjugate priors, the result can be #' represented in a closed form (see the vignette for more details), which is #' what is returned. #' @references #' Inferring Markov Chains: Bayesian Estimation, Model Comparison, Entropy Rate, #' and Out-of-Class Modeling, Christopher C. Strelioff, James P. #' Crutchfield, Alfred Hubler, Santa Fe Institute #' #' Yalamanchi SB, Spedicato GA (2015). Bayesian Inference of First Order Markov #' Chains. R package version 0.2.5 #' #' @author Sai Bhargav Yalamanchi #' @seealso \code{\link{markovchainFit}} #' @examples #' sequence<- c("a", "b", "a", "a", "a", "a", "b", "a", "b", "a", "b", "a", "a", #' "b", "b", "b", "a") #' hyperMatrix<-matrix(c(1, 2, 1, 4), nrow = 2,dimnames=list(c("a","b"),c("a","b"))) #' predProb <- predictiveDistribution(sequence[1:10], sequence[11:17], hyperparam =hyperMatrix ) #' hyperMatrix2<-hyperMatrix[c(2,1),c(2,1)] #' predProb2 <- predictiveDistribution(sequence[1:10], sequence[11:17], hyperparam =hyperMatrix2 ) #' predProb2==predProb #' @export #' predictiveDistribution <- function(stringchar, newData, hyperparam = matrix()) { .Call(`_markovchain_predictiveDistribution`, stringchar, newData, hyperparam) } #' @title priorDistribution #' #' @description Function to evaluate the prior probability of a transition #' matrix. It is based on conjugate priors and therefore a Dirichlet #' distribution is used to model the transitions of each state. #' @usage priorDistribution(transMatr, hyperparam = matrix()) #' #' @param transMatr The transition matrix whose probability is the parameter of #' interest. #' @param hyperparam The hyperparam matrix (optional). If not provided, a #' default value of 1 is assumed for each and therefore the resulting #' probability distribution is uniform. #' @return The log of the probabilities for each state is returned in a numeric #' vector. Each number in the vector represents the probability (log) of #' having a probability transition vector as specified in corresponding the #' row of the transition matrix. #' #' @details The states (dimnames) of the transition matrix and the hyperparam #' may be in any order. #' @references Yalamanchi SB, Spedicato GA (2015). Bayesian Inference of First #' Order Markov Chains. R package version 0.2.5 #' #' @author Sai Bhargav Yalamanchi, Giorgio Spedicato #' #' @note This function can be used in conjunction with inferHyperparam. For #' example, if the user has a prior data set and a prior transition matrix, #' he can infer the hyperparameters using inferHyperparam and then compute #' the probability of their prior matrix using the inferred hyperparameters #' with priorDistribution. #' @seealso \code{\link{predictiveDistribution}}, \code{\link{inferHyperparam}} #' #' @examples #' priorDistribution(matrix(c(0.5, 0.5, 0.5, 0.5), #' nrow = 2, #' dimnames = list(c("a", "b"), c("a", "b"))), #' matrix(c(2, 2, 2, 2), #' nrow = 2, #' dimnames = list(c("a", "b"), c("a", "b")))) #' @export priorDistribution <- function(transMatr, hyperparam = matrix()) { .Call(`_markovchain_priorDistribution`, transMatr, hyperparam) } .hittingProbabilitiesRcpp <- function(object) { .Call(`_markovchain_hittingProbabilities`, object) } .canonicFormRcpp <- function(obj) { .Call(`_markovchain_canonicForm`, obj) } .steadyStatesRcpp <- function(obj) { .Call(`_markovchain_steadyStates`, obj) } .absorbingStatesRcpp <- function(obj) { .Call(`_markovchain_absorbingStates`, obj) } .isIrreducibleRcpp <- function(obj) { .Call(`_markovchain_isIrreducible`, obj) } .isRegularRcpp <- function(obj) { .Call(`_markovchain_isRegular`, obj) } .meanAbsorptionTimeRcpp <- function(obj) { .Call(`_markovchain_meanAbsorptionTime`, obj) } .absorptionProbabilitiesRcpp <- function(obj) { .Call(`_markovchain_absorptionProbabilities`, obj) } .meanFirstPassageTimeRcpp <- function(obj, destination) { .Call(`_markovchain_meanFirstPassageTime`, obj, destination) } .meanRecurrenceTimeRcpp <- function(obj) { .Call(`_markovchain_meanRecurrenceTime`, obj) } .minNumVisitsRcpp <- function(obj) { .Call(`_markovchain_meanNumVisits`, obj) } .isProbability <- function(prob) { .Call(`_markovchain_isProb`, prob) } .isStochasticMatrix <- function(m, byrow) { .Call(`_markovchain_isStochasticMatrix`, m, byrow) } .isProbabilityVector <- function(prob) { .Call(`_markovchain_isProbVector`, prob) } .testthatIsAccesibleRcpp <- function(obj) { .Call(`_markovchain_checkIsAccesibleMethod`, obj) } .approxEqualMatricesRcpp <- function(a, b) { .Call(`_markovchain_approxEqual`, a, b) } .testthatIsPartitionRcpp <- function(commClasses, states) { .Call(`_markovchain_isPartition`, commClasses, states) } .testthatAreHittingRcpp <- function(probs, hitting, byrow) { .Call(`_markovchain_areHittingProbabilities`, probs, hitting, byrow) } .testthatAreMeanNumVisitsRcpp <- function(probs, numVisits, hitting, byrow) { .Call(`_markovchain_areMeanNumVisits`, probs, numVisits, hitting, byrow) } .testthatRecurrentHittingRcpp <- function(recurrentClasses, hitting, states, byrow) { .Call(`_markovchain_recurrentHitting`, recurrentClasses, hitting, states, byrow) } .testthatHittingAreOneRcpp <- function(matrix) { .Call(`_markovchain_hittingProbsAreOne`, matrix) } .testthatAbsorbingAreRecurrentClassRcpp <- function(absorbingStates, recurrentClasses) { .Call(`_markovchain_absorbingAreRecurrentClass`, absorbingStates, recurrentClasses) } markovchain/R/hommc.R0000644000176200001440000002572014430770064014216 0ustar liggesusers#' An S4 class for representing High Order Multivariate Markovchain (HOMMC) #' #' @slot order an integer equal to order of Multivariate Markovchain #' @slot states a vector of states present in the HOMMC model #' @slot P array of transition matrices #' @slot Lambda a vector which stores the weightage of each transition matrices in P #' @slot byrow if FALSE each column sum of transition matrix is 1 else row sum = 1 #' @slot name a name given to hommc #' #' @author Giorgio Spedicato, Deepak Yadav #' #' @examples #' statesName <- c("a", "b") #' #' P <- array(0, dim = c(2, 2, 4), dimnames = list(statesName, statesName)) #' P[,,1] <- matrix(c(0, 1, 1/3, 2/3), byrow = FALSE, nrow = 2) #' P[,,2] <- matrix(c(1/4, 3/4, 0, 1), byrow = FALSE, nrow = 2) #' P[,,3] <- matrix(c(1, 0, 1/3, 2/3), byrow = FALSE, nrow = 2) #' P[,,4] <- matrix(c(3/4, 1/4, 0, 1), byrow = FALSE, nrow = 2) #' #' Lambda <- c(0.8, 0.2, 0.3, 0.7) #' #' ob <- new("hommc", order = 1, states = statesName, P = P, #' Lambda = Lambda, byrow = FALSE, name = "FOMMC") #' #'@export hommc <- setClass("hommc", slots = list(order = "numeric", states = "character", P = "array", Lambda = "numeric", byrow = "logical", name = "character") ) # internal method to show hommc object in informative way .showHommc <- function(object) { # whether data in transition matrices are stored in column-wise or row-wise fashion if(object@byrow == TRUE) { direction <- "(by rows)" } else { direction <- "(by cols)" } # display order and unique states cat("Order of multivariate markov chain =", object@order, "\n") cat("states =", object@states, "\n") cat("\n") cat("List of Lambda's and the corresponding transition matrix", direction,":\n") # display transition matrices and the corresponding lambdas n <- object@order s <- sqrt((dim(object@P))[3]/n) for(i in 1:s) { for(j in 1:s) { # t is the index of transition matrix for transition from i sequence to j sequence # order of transition matrices in P is P1{1,1},P2{1,1}..Pn{1,1},P1{1,2}....Pn{s,s} t <- n * s * (i-1) + (j-1) * n for(k in 1:n) { cat("Lambda", k, "(", i, ",", j, ") : ", object@Lambda[t+k],"\n", sep = "") cat("P", k, "(", i, ",", j, ") : \n", sep = "") print(object@P[, , t+k]) cat("\n") } } } } #' @title Function to display the details of hommc object #' @description This is a convenience function to display the slots of hommc object #' in proper format #' #' @param object An object of class hommc #' #' @rdname hommc-show #' @export setMethod("show", "hommc", function(object){ .showHommc(object) } ) # all transition matrices # n*s*s n = order s = number of categorical sequences # verified using two examples from research paper .allTransMat <- function(data, order = 2) { n <- order # order uelement <- sort(unique(as.character(data))) # unique element m <- length(uelement) # dim of trans-matrix s <- nrow(data) # number of categorical sequence lseq <- ncol(data) # length of each categorical sequence # store all transition matrices allTmat <- array(dim = c(length(uelement), length(uelement), n*s*s), dimnames = list(uelement, uelement)) t <- 1 # help for(i in 1:s) { for(j in 1:s) { x <- data[j, ] # jth sequence y <- data[i, ] # ith sequence # jumps for(h in 1:n) { # column wise allTmat[ , , t] <- t(createSequenceMatrix(matrix(c(x[1:(lseq-h)], y[-(1:h)]), ncol = 2, byrow = FALSE), toRowProbs = TRUE, possibleStates = uelement, sanitize = TRUE)) t <- t + 1 } } } return(allTmat) } # distribution of each categorical sequence based on the frequency # verified using two examples from research paper .allFreqProbMat <- function(data) { uelement <- sort(unique(as.character(data))) # unique element m <- length(uelement) # dim of trans-matrix s <- nrow(data) # number of categorical sequence # frequency based probability for all sequences freqMat <- array(0, dim = c(m, 1, s), dimnames = list(uelement)) for(i in 1:s) { idata <- data[i, ] # ith categorical sequence # populate frequency matrix for(j in idata) { freqMat[j, 1, i] <- freqMat[j, 1, i] + 1 } # normalization freqMat[, , i] <- freqMat[, , i] / sum(freqMat[, , i]) } return(freqMat) } # objective function to pass to solnp .fn3 <- function(params, ...) { hdata <- list(...) # calculate error error <- 0 # number of categorical sequence s <- hdata$s # order n <- hdata$n # number of uniq states || dimension of t-matrix m <- hdata$m # array of transition matrices allTmat <- hdata$allTmat # all frequency matrix freqMat <- hdata$freqMat # norm Norm <- hdata$Norm for(i in 1:s) { helper <- matrix(0, nrow = m*n, ncol = 1) for(j in 1:s) { helper2 <- matrix(0, nrow = m, ncol = 1) y <- n * (j - 1 + s * (i - 1)) for(k in 1:n) { helper2 <- helper2 + params[y + k] * (allTmat[ , , y + k] %*% matrix(freqMat[ , , j])) } helper[1:m, ] <- helper[1:m, ] + helper2 if(i == j && n>= 2) { for(k in 2:n) { p <- (k - 1) * m helper[(p + 1):(p + m)] <- freqMat[ , , j] } } } error <- error + sum(abs((helper - freqMat[ , , i]) ^ Norm)) } return(error ^ (1 / Norm)) } # equality constraint function to pass to solnp .eqn3 <- function(params, ...) { hdata <- list(...) # number of categorical sequence s <- hdata$s # order n <- hdata$n toReturn <- numeric() for(i in 1:s) { toReturn[i] <- sum(params[((i - 1) * n * s + 1):(i * n * s)]) } return(toReturn) } #' Function to fit Higher Order Multivariate Markov chain #' #' @description Given a matrix of categorical sequences it fits #' Higher Order Multivariate Markov chain. #' #' @param seqMat a matrix or a data frame where each column #' is a categorical sequence #' @param order Multivariate Markov chain order. Default is 2. #' @param Norm Norm to be used. Default is 2. #' #' @return an hommc object #' #' @examples #' data <- matrix(c('2', '1', '3', '3', '4', '3', '2', '1', '3', '3', '2', '1', #' c('2', '4', '4', '4', '4', '2', '3', '3', '1', '4', '3', '3')), #' ncol = 2, byrow = FALSE) #' #' fitHighOrderMultivarMC(data, order = 2, Norm = 2) #' #' @references W.-K. Ching et al. / Linear Algebra and its Applications #' #' @author Giorgio Spedicato, Deepak Yadav #' #' @export fitHighOrderMultivarMC <- function(seqMat, order = 2, Norm = 2) { if (requireNamespace("Rsolnp", quietly = TRUE)) { message("This function is experimental") if(is.data.frame(seqMat) == TRUE) { seqMat <- as.matrix(seqMat) } seqMat <- t(seqMat) # array of transition matrices allTmat <- .allTransMat(seqMat, order = order) # array of freq probability freqMat <- .allFreqProbMat(seqMat) n <- order # order uelement <- sort(unique(as.character(seqMat))) # unique element m <- length(uelement) # dim of trans-matrix s <- nrow(seqMat) # number of categorical sequence lmbda <- rep(1 / (n * s), n * s * s) fit <- Rsolnp::solnp(pars = lmbda, fun = .fn3, eqfun = .eqn3, eqB = rep(1, s), LB = rep(0, n * s * s), control = list(trace = 0), allTmat = allTmat, freqMat = freqMat, n = n, m = m, s = s, Norm = Norm) return(new("hommc", order = order, Lambda = fit$pars, P = allTmat, states = uelement, byrow = FALSE)) } else { print("Rsolnp unavailable") return(NULL) } } #' Simulate a higher order multivariate markovchain #' #' @description #' This function provides a prediction of states for a higher order #' multivariate markovchain object #' #' @usage predictHommc(hommc,t,init) #' #' @param hommc a hommc-class object #' @param t no of iterations to predict #' @param init matrix of previous states size of which depends on hommc #' #' @details #' The user is required to provide a matrix of giving n previous coressponding #' every categorical sequence. Dimensions of the init are s X n, where s is #' number of categorical sequences and n is order of the homc. #' #' @return #' The function returns a matrix of size s X t displaying t predicted states #' in each row coressponding to every categorical sequence. #' #' @author Vandit Jain #' #' #' @export predictHommc <- function(hommc, t, init) { ## order of markovchain n <- hommc@order ## number of categorical sequences s <- sqrt((dim(hommc@P))[3]/n) ## list of states states <- hommc@states ## size of set of all possible states m <- length(states) ## if initial states not provided take statndard example if(missing(init)) { init <- matrix(rep(states[1],s*n),nrow = s,byrow = TRUE) } if(!all(dim(init) == c(s,n))){ stop("Please provide sufficient number of previous states") } if(!is(hommc, "hommc")) { stop("Please provide a valid hommc-class object") } if(t <=0) stop("T should be a positive integer") for(i in 1:s) { for(j in 1:n) { if(!(init[i,j] %in% states)) stop("invalid states in provided state matrix init") } } ## initialize result matrix result <- matrix(NA,nrow = s,ncol = t) ## runs loop according to hommc class structure for(i in 1:t) { for(j in 1:s) { ## initialises probability according rowProbs <- rep(0,m) ## probability for current sequence depends all sequence for(k in 1:s) { ## gets index of coressponding in the 3-D array P # index is the index of transition matrix for transition from i sequence to j sequence # order of transition matrices in P is P1{1,1},P2{1,1}..Pn{1,1},P1{1,2}....Pn{s,s} index <- n * s * (j-1) + n * (k-1) ## iterates for all order 1 to n for(h in 1:n) { prev <- init[j,n-h+1] label <- which(prev == states) rowProbs <- rowProbs + hommc@Lambda[h + index] * hommc@P[label, ,h + index] } } ## uses sample function from base package curr <- sample(size = 1, x = states, prob = rowProbs) ## changes init for next t iteration for(temp in 2:n) { if(temp <= n) init[j,temp-1] = init[j,temp] } init[j,n] = curr; result[j,i] = curr; } } ## returns result return(result) } markovchain/R/matlab_package_functions.R0000644000176200001440000000047114243244474020115 0ustar liggesusers#' Matrix to create zeros #' #' @param n size of the matrix #' #' @return a square matrix of zeros zeros <- function(n) { return(matrix(0,n,n)) } #' Returns an Identity matrix #' #' @param n size of the matrix #' #' @return a identity matrix ones <- function(n) { return(matrix(1,n,n)) } markovchain/R/fitHigherOrder.R0000644000176200001440000000542514430750060016012 0ustar liggesusers#' @title Higher order Markov Chains class #' @name HigherOrderMarkovChain-class #' @description The S4 class that describes \code{HigherOrderMarkovChain} objects. #' #' @export setClass("HigherOrderMarkovChain", #class name representation( states = "character", order = "numeric", transitions = "list", name = "character" ) # , prototype(states = c("a","b"), byrow = TRUE, # prototypizing # transitionMatrix=matrix(data = c(0,1,1,0), # nrow=2, byrow=TRUE, dimnames=list(c("a","b"), c("a","b"))), # name="Unnamed Markov chain") ) # objective function to pass to solnp .fn1=function(params) { QX <- get("QX") X <- get("X") error <- 0 for (i in 1:length(QX)) { error <- error+(params[i] * QX[[i]]-X) } return(sum(error^2)) } # equality constraint function to pass to solnp .eqn1=function(params){ return(sum(params)) } #' @name fitHigherOrder #' @aliases seq2freqProb seq2matHigh #' @title Functions to fit a higher order Markov chain #' #' @description Given a sequence of states arising from a stationary state, it #' fits the underlying Markov chain distribution with higher order. #' @usage #' fitHigherOrder(sequence, order = 2) #' seq2freqProb(sequence) #' seq2matHigh(sequence, order) #' #' @param sequence A character list. #' @param order Markov chain order #' @return A list containing lambda, Q, and X. #' #' @references #' Ching, W. K., Huang, X., Ng, M. K., & Siu, T. K. (2013). Higher-order markov #' chains. In Markov Chains (pp. 141-176). Springer US. #' #' Ching, W. K., Ng, M. K., & Fung, E. S. (2008). Higher-order multivariate #' Markov chains and their applications. Linear Algebra and its Applications, #' 428(2), 492-507. #' #' @author Giorgio Spedicato, Tae Seung Kang #' #' @examples #' sequence<-c("a", "a", "b", "b", "a", "c", "b", "a", "b", "c", "a", "b", #' "c", "a", "b", "c", "a", "b", "a", "b") #' fitHigherOrder(sequence) #' #' @export fitHigherOrder<-function(sequence, order = 2) { # prbability of each states of sequence if (requireNamespace("Rsolnp", quietly = TRUE)) { X <- seq2freqProb(sequence) # store h step transition matrix Q <- list() QX <- list() for(o in 1:order) { Q[[o]] <- seq2matHigh(sequence, o) QX[[o]] <- Q[[o]]%*%X } environment(.fn1) <- environment() params <- rep(1/order, order) model <- Rsolnp::solnp(params, fun=.fn1, eqfun=.eqn1, eqB=1, LB=rep(0, order), control=list(trace=0)) lambda <- model$pars out <- list(lambda=lambda, Q=Q, X=X) } else { print("package Rsolnp unavailable") out <- NULL } return(out) } markovchain/R/ctmcClassesAndMethods.R0000644000176200001440000003314713762012756017334 0ustar liggesusers#' @title Continuous time Markov Chains class #' @name ctmc-class #' @aliases dim,ctmc-method initialize,ctmc_method states,ctmc-method #' steadyStates,ctmc-method plot,ctmc,missing-method #' @description The S4 class that describes \code{ctmc} (continuous #' time Markov chain) objects. #' #' @param states Name of the states. Must be the same of #' \code{colnames} and \code{rownames} of the generator matrix #' @param byrow TRUE or FALSE. Indicates whether the given matrix is #' stochastic by rows or by columns #' @param generator Square generator matrix #' @param name Optional character name of the Markov chain #' #' @section Methods: #' #' \describe{ #' \item{dim}{\code{signature(x = "ctmc")}: method to get the size} #' \item{initialize}{\code{signature(.Object = "ctmc")}: initialize #' method } #' \item{states}{\code{signature(object = "ctmc")}: states method. } #' \item{steadyStates}{\code{signature(object = "ctmc")}: method to get the #' steady state vector. } #' \item{plot}{\code{signature(x = "ctmc", y = "missing")}: plot method #' for \code{ctmc} objects } #' } #' #' @references #' Introduction to Stochastic Processes with Applications in the Biosciences #' (2013), David F. Anderson, University of Wisconsin at Madison. Sai Bhargav #' Yalamanchi, Giorgio Spedicato #' #' @note #' \enumerate{ #' \item \code{ctmc} classes are written using S4 classes #' \item Validation method is used to assess whether either columns or rows totals to zero. #' Rounding is used up to 5th decimal. If state names are not properly defined #' for a generator \code{matrix}, coercing to \code{ctmc} object leads to overriding #' states name with artificial "s1", "s2", ... sequence #' } #' @seealso \code{\link{generatorToTransitionMatrix}},\code{\link{rctmc}} #' #' @examples #' energyStates <- c("sigma", "sigma_star") #' byRow <- TRUE #' gen <- matrix(data = c(-3, 3, #' 1, -1), nrow = 2, #' byrow = byRow, dimnames = list(energyStates, energyStates)) #' molecularCTMC <- new("ctmc", states = energyStates, #' byrow = byRow, generator = gen, #' name = "Molecular Transition Model") #' steadyStates(molecularCTMC) #' \dontrun{plot(molecularCTMC)} #' #' @keywords classes #' #' @export setClass("ctmc", representation(states = "character", byrow = "logical", generator = "matrix", name = "character"), prototype(states = c("a", "b"), byrow = TRUE, generator = matrix(data = c(-1, 1, 1, -1), byrow = TRUE, nrow = 2, dimnames = list(c("a", "b"), c("a", "b"))), name = "Unnamed CTMC") ) setMethod("initialize", signature(.Object = "ctmc"), function (.Object, states, byrow, generator,name,...) { # put the standard markovchain if(missing(generator)) generator=matrix(data=c(-1, 1, 1, -1), #create a naive matrix nrow=2, byrow=TRUE, dimnames=list(c("a", "b"), c("a", "b")) ) # check names of transition matrix if(all(is.null(rownames(generator)), is.null(colnames(generator)))==TRUE) { #if all names are missing it initializes them to "1", "2",... if(missing(states)) { nr=nrow(generator) stateNames<-as.character(seq(1:nr)) } else {stateNames=states} rownames(generator)=stateNames colnames(generator)=stateNames } else if(is.null(rownames(generator))) { #fix when rownames null rownames(generator)=colnames(generator) } else if(is.null(colnames(generator))) { #fix when colnames null colnames(generator)=rownames(generator) } else if(!setequal(rownames(generator),colnames(generator))) colnames(generator)=rownames(generator) #fix when different if(missing(states)) states=rownames(generator) #assign if(missing(byrow)) byrow=TRUE #set byrow as true by default if(missing(name)) name="Unnamed Markov chain" #generic name to the object callNextMethod(.Object, states = states, byrow = byrow, generator=generator,name=name,...) } ) #returns states of the ctmc setMethod("states", "ctmc", function(object) { out <- object@states return(out) } ) #returns states of the ctmc setMethod("dim", "ctmc", function(x) { out <- nrow(x@generator) return(out) } ) setValidity("ctmc", function(object) { check<-NULL # performs a set of check whose results are saved in check if (.isGenRcpp(object@generator)==FALSE) check <- "Error! Not a generator matrix" if (object@byrow==TRUE) { if(any(round(rowSums(object@generator),5)!=0)) check <- "Error! Row sums not equal to zero" } else { if(any(round(colSums(object@generator),5)!=0)) check <- "Error! Col sums not equal to zero" } if (nrow(object@generator)!=ncol(object@generator)) check <- "Error! Not squared matrix" #check if squalre matrix if (!setequal(colnames(object@generator),object@states)) check <- "Error! Colnames <> states" #checks if if (!setequal(rownames(object@generator),object@states)) check <- "Error! Rownames <> states" if ( is.null(check) ) return(TRUE) else return(check) } ) .ctmcEigen<-function(matr, transpose=TRUE) { # Function to extract eigenvalues, core of get steady states # # Args: # matr: the matrix to extract # transpose: boolean indicating whether the matrx shall be transpose # # Results: # a matrix / vector if (transpose) tMatr <- t(matr) else tMatr <- matr #trasposing eigenResults <- eigen(x=tMatr,symmetric=FALSE) #perform the eigenvalue extraction onesIndex <- which(round(eigenResults$values,3)==1) #takes the one eigenvalue #do the following: 1:get eigenvectors whose eigenvalues==1 #2: normalize if (length(onesIndex)==0) { warning("No eigenvalue = 1 found - the embedded Markov Chain must be irreducible, recurrent") return(NULL) } if(length(onesIndex) > 1){ warning("Eigenvalue = 1 multiplicity > 1! - the embedded Markov Chain must be irreducible, recurrent") return(NULL) } if (transpose==TRUE) { eigenTake <- as.matrix(t(eigenResults$vectors[,onesIndex])) if(rowSums(Im(eigenTake)) != 0){ warning("Eigenvector corresponding to largest eigenvalue has a non-zero imaginary part - the embedded Markov Chain must be irreducible, recurrent") return(NULL) } out <- eigenTake } else { eigenTake <- as.matrix(eigenResults$vectors[,onesIndex]) if(colSums(Im(eigenTake)) != 0){ warning("Eigenvector corresponding to largest eigenvalue has a non-zero imaginary part - the embedded Markov Chain must be irreducible, recurrent") return(NULL) } out <- eigenTake } return(out) } setMethod("steadyStates", "ctmc", function(object) { transposeYN <- FALSE if(object@byrow==TRUE) transposeYN <- TRUE transMatr <- generatorToTransitionMatrix(object@generator, byrow = object@byrow) out<-.ctmcEigen(matr=transMatr, transpose=transposeYN) if(is.null(out)) { warning("Warning! No steady state") return(NULL) } if(transposeYN==TRUE) { colnames(out) <- object@states } else { rownames(out) <- object@states } out <- - out / diag(object@generator) if(transposeYN==TRUE){ out <- out / rowSums(out) } else{ out <- out / colSums(out) } return(out) } ) # internal function for plotting ctmc object using igraph .getNetctmc <- function(object, round = FALSE) { # function to get the graph adjacency object to plot and export to igraph # # Args: # object: a ctmc object # round: boolean to round # # Returns: # # a graph adjacency if (object@byrow == FALSE) { object <- t(object) } #gets the generator matrix matr <- object@generator * 100 if(round == TRUE) { matr <- round(matr, 2) } net <- graph.adjacency(adjmatrix = matr, weighted = TRUE, mode = "directed") return(net) } setMethod("plot",signature(x="ctmc",y="missing"), function(x,y,package = "igraph",...){ switch(package, diagram = { if (requireNamespace("diagram", quietly = TRUE)) { .plotdiagram(object = x, ...) } else { netMc <- .getNetctmc(object = x, round = TRUE) edgeLabel <- round(E(netMc)$weight / 100, 2) plot.igraph(x = netMc, edge.label = edgeLabel, ...) } }, DiagrammeR = { if (requireNamespace("DiagrammeR", quietly = TRUE)) { .plotDiagrammeR(object = x, ...) } else { netMc <- .getNetctmc(object = x, round = TRUE) edgeLabel <- round(E(netMc)$weight / 100, 2) plot.igraph(x = netMc, edge.label = edgeLabel, ...) } }, { netMc <- .getNetctmc(object = x,round = TRUE) edgeLabel <- round(E(netMc)$weight / 100, 2) plot.igraph(x = netMc, edge.label = edgeLabel, ...) }) } ) #' An S4 class for representing Imprecise Continuous Time Markovchains #' #' @slot states a vector of states present in the ICTMC model #' @slot Q matrix representing the generator demonstrated in the form of variables #' @slot range a matrix that stores values of range of variables #' @slot name name given to ICTMC #' ictmc <- setClass("ictmc", slots = list(states = "character", Q = "matrix", range = "matrix", name = "character") ) setMethod("initialize", signature(.Object = "ictmc"), function (.Object, states, Q, range, name, ...) { if(missing(Q)) Q=matrix(data=c(-1, 1, 1, -1), #create a naive matrix nrow=2, byrow=TRUE, dimnames=list(c("n", "y"), c("n", "y")) ) if(missing(range)) range = matrix(c(1/52, 3/52, 1/2, 2), nrow = 2, byrow = 2) #if all names are missing it initializes them to "1", "2",... if(all(is.null(rownames(Q)), is.null(colnames(Q)))==TRUE) { if(missing(states)) { nr=nrow(Q) stateNames<-as.character(seq(1:nr)) } else {stateNames=states} rownames(Q)=stateNames colnames(Q)=stateNames } else if(is.null(rownames(Q))) { #fix when rownames null rownames(Q)=colnames(Q) } else if(is.null(colnames(Q))) { #fix when colnames null colnames(Q)=rownames(Q) } else if(!setequal(rownames(Q),colnames(Q))) colnames(Q)=rownames(Q) #fix when different if(missing(states)) states=rownames(Q) #assign if(missing(name)) name="Unnamed imprecise CTMC" #generic name to the object callNextMethod(.Object, states = states, Q = Q, range=range,name=name,...) } ) setValidity("ictmc", function(object) { check<-NULL # performs a set of check whose results are saved in check if (.isGenRcpp(object@Q)==FALSE) check <- "Error! Not a generator matrix" if(any(round(rowSums(object@Q),5)!=0)) check <- "Error! Row sums not equal to zero" if ( nrow(object@Q) != ncol(object@Q )) check <- "Error! Not squared matrix" #check if square matrix if ( !setequal(colnames(object@Q),object@states )) check <- "Error! Colnames <> states" if ( !setequal(rownames(object@Q),object@states )) check <- "Error! Rownames <> states" if(nrow(object@range) != nrow(object@Q) && ncol(object@range) != 2) check <- "Error! dimension of range matrix not correct." for(i in 1:nrow(object@Q)){ if( object@range[i,1] > object@range[i,2] ){ check <- "Error, improper values set in range matrix." } } if(min(object@range) < 0){ check <- "Error, values in the range matrix should be greater than zero." } if ( is.null(check) ) return(TRUE) else return(check) } ) markovchain/R/random.R0000644000176200001440000000235113762012756014372 0ustar liggesusers# Methods to generate random markov chains normalizeMatrix <- function(matrix, byrow = TRUE) { margin <- ifelse (byrow, 1, 2) n <- nrow(matrix) result <- sapply(1:n, function(i) { row <- matrix[i, ] rowSum <- sum(row) if (rowSum == 0) { values <- c(rep(0, i - 1), 1, rep(0, n - i)) values } else { row / rowSum } }) # If we want the result by rows, we have to transpose the matrix, # since the apply method with margin = 1 (over rows) returns the result # by columns if (byrow) t(result) else result } # Returns a random stochastic matrix randomStochasticMatrix <- function(n, zeroProb, byrow = TRUE) { numRandom <- n * n randomNums <- stats::runif(numRandom) remainProb <- (1 - zeroProb) / numRandom probs <- c(zeroProb, rep(remainProb, numRandom)) entries <- sample(c(0, randomNums), numRandom, prob = probs, replace = TRUE) result <- matrix(entries, n, n, byrow) result <- normalizeMatrix(result, byrow) result } randomMarkovChain <- function(n, zeroProb = 0.95, byrow = TRUE) { matrix <- randomStochasticMatrix(n, zeroProb, byrow) new("markovchain", transitionMatrix = matrix, byrow = byrow) }markovchain/R/statisticalTests.R0000644000176200001440000004317714243470374016473 0ustar liggesusers#helper functions #helper function for checkMP .findNijPjk<-function(Nijk = Nijk, Nij = Nij, trans, row = 1){ i <- Nijk[row,1] j <- Nijk[row,2] k <- Nijk[row,3] fromCh <- as.character(j) toCh <- as.character(k) Pjk <- trans[fromCh,toCh] m1 <- which(Nij[, 1] == i) m2 <- which(Nij[, 2] == j) m <- c(m1, m2) return(Nij[m[anyDuplicated(m)], 3] * Pjk) } #' @name verifyMarkovProperty #' #' @rdname statisticalTests #' @family statisticalTests #' #' @title Various functions to perform statistical inference of DTMC #' @description These functions verify the Markov property, assess #' the order and stationarity of the Markov chain. #' #' @param sequence An empirical sequence. #' @param verbose Should test results be printed out? #' @param nblocks Number of blocks. #' #' @return Verification result #' #' @references Anderson and Goodman. #' #' @author Tae Seung Kang, Giorgio Alfredo Spedicato #' #' @seealso \code{markovchain} #' #' @examples #' sequence <- c("a", "b", "a", "a", "a", "a", "b", "a", "b", #' "a", "b", "a", "a", "b", "b", "b", "a") #' mcFit <- markovchainFit(data = sequence, byrow = FALSE) #' verifyMarkovProperty(sequence) #' assessOrder(sequence) #' assessStationarity(sequence, 1) #' #' #' @export # check if the sequence holds the Markov property verifyMarkovProperty <- function(sequence, verbose = TRUE) { #warning("The accuracy of the statistical inference functions has been questioned. It will be thoroughly investigated in future versions of the package.") #fitting the markovchain transMatrix <- markovchainFit(data = sequence)$estimate@transitionMatrix #make the (n-2)x3 matrix for observations subSample<-sequence[1:(length(sequence) - (length(sequence)%%3))] seqSet1<-matrix(c(subSample[1:(length(subSample) - 2)], subSample[2:(length(subSample) - 1)], subSample[3:(length(subSample))] ),ncol = 3) #fill the matrix in reverse order so position 11 is the first obersvation,12 second and 13 third #compute row frequencies temp<-as.data.frame(seqSet1) Nijk<-aggregate(temp, by = temp, length)[1:(ncol(temp) + 1)] seqSet2 <- seqSet1[, -3] #make matrix of couples temp2 <- as.data.frame(seqSet2) Nij <- aggregate(temp2, by = temp2, length)[1:(ncol(temp2) + 1)] #rowfrequencies included test<-c(length = dim(Nijk)[1]) #compute the test statistic invisible(lapply(seq_len(dim(Nijk)[1]),function(i) { foundNijPjk <- .findNijPjk(Nijk = Nijk, Nij = Nij, trans = transMatrix, row = i) test[i] <<- ((Nijk[i,4]-foundNijPjk)^2)/foundNijPjk }) ) statistic <- sum(test) #return value of the test statistic and test at confience level 95% and 99% #dof #Steps : No. of df = No. of triplets - No. of doubles + No. of observations - 1 #Creating vector of doubles/pairs doubles = numeric(length(sequence)-1) for(i in 1:(length(doubles))) {doubles[i] = paste(sequence[i], sequence[i+1], sep="", collapse = NULL)} #Creating vector of triplets triples = numeric(length(sequence)-2) for(i in 1:(length(triples))) {triples[i] = paste(sequence[i], sequence[i+1], sequence[i+2], sep="", collapse = NULL)} #Hence no. of df is--- dof = length(unique(triples)) - length(unique(doubles)) + length(unique(sequence)) - 1 pvalue <- 1-pchisq(q = statistic,df = dof) out <- list(statistic = statistic,dof = dof,p.value = pvalue) if (verbose == TRUE) { cat("Testing markovianity property on given data sequence\n") cat("Chi - square statistic is:", statistic, "\n") cat("Degrees of freedom are:", dof, "\n") cat("And corresponding p-value is:", pvalue, "\n") } invisible(out) } #' @rdname statisticalTests #' @export # check if sequence is of first order or of second order assessOrder <- function(sequence, verbose = TRUE) { warning("The accuracy of the statistical inference functions has been questioned. It will be thoroughly investigated in future versions of the package.") # length of sequence n <- length(sequence) # unique states states <- unique(sequence) # number of unique states nelements <- length(states) TStat <- 0 for(present in states) { # going to be a transition matrix mat <- zeros(nelements) dimnames(mat) <- list(states, states) # populate transition matrix for(i in 1:(n - 2)) { if(present == sequence[i + 1]) { past <- sequence[i] future <- sequence[i + 2] mat[past, future] <- mat[past, future] + 1 } } # chi-squared test res <- chisq.test(mat) TStat <- TStat + res$statistic } k <- nelements df <- k * (k - 1)^2 pvalue <- 1-pchisq(q = TStat, df) out <- list(statistic = TStat[[1]], p.value = pvalue[[1]]) # returning the output if (verbose == TRUE) { cat("The assessOrder test statistic is: ", TStat, "\n") cat("The Chi-Square d.f. are: ", df, "\n") cat("The p-value is: ", pvalue, "\n") } invisible(out) } #' @rdname statisticalTests #' @export # check if sequence is stationary assessStationarity <- function(sequence, nblocks, verbose = TRUE) { warning("The accuracy of the statistical inference functions has been questioned. It will be thoroughly investigated in future versions of the package.") # length of sequence n <- length(sequence) # size of each block blocksize <- n / nblocks # vector of unique states states <- unique(sequence) # number of states nstates <- length(states) # sum of the statistics TStat <- 0 # chi-squared test for each state for(i in states) { # init matrix mat <- matrix(0,nblocks, nstates) dimnames(mat) <- list(1:nblocks, states) # compute the transition matrix from sequence for(j in 1:(n - 1)) { if(sequence[j] == i) { # row index b <- ceiling(j / blocksize) # next state future <- sequence[j+1] # update transition matrix mat[b, future] <- mat[b, future] + 1 } } # vector to store row sum of matrix rowsums <- rowSums(mat) # store the indices with zero row sum indices <- which(rowsums == 0) # update rows with zero sum for(k in indices) mat[k, ] <- 1/nstates # update row sum after checking zero sum row rowsums <- rowSums(mat) # row-wise normalize. mat <- mat/rowsums # Some columns may still be all zeros. This causes NaN for chi-squared test. # chi-squared test res <- chisq.test(mat) TStat <- TStat + res$statistic } k <- nstates # degree of freedom df <- k * (nblocks - 1) * (k - 1) pvalue <- 1 - pchisq(TStat, df) # returning the output if (verbose==TRUE) { cat("The assessStationarity test statistic is: ", TStat, "\n") cat("The Chi-Square d.f. are: ", df, "\n") cat("The p-value is: ", pvalue, "\n") } out <- list(statistic = TStat[[1]], p.value = pvalue[[1]]) invisible(out) } # sequence to transition frequencey matrix .seq2mat <- function(sequence) { # basic requirement to create transition matrix n <- length(sequence) states <- unique(sequence) nstates <- length(states) # create transition matrix mat <- zeros(nstates) dimnames(mat) <- list(states, states) # populate transition matrix for(i in 1:(n - 1)) { from <- sequence[i] to <- sequence[i+1] mat[from, to] <- mat[from, to] + 1 } return (mat) } #' @title test whether an empirical transition matrix is compatible to a theoretical one #' #' @description This function tests whether an empirical transition matrix is statistically compatible #' with a theoretical one. It is a chi-square based test. In case a cell in the empirical transition matrix is >0 #' that is 0 in the theoretical transition matrix the null hypothesis is rejected. #' #' @rdname statisticalTests #' @family statisticalTests #' #' @param data matrix, character or list to be converted in a raw transition matrix #' @param object a markovchain object #' #' @return a list with following slots: statistic (the chi - square statistic), dof (degrees of freedom), and corresponding p-value. In case a cell in the empirical transition matrix is >0 #' that is 0 in the theoretical transition matrix the null hypothesis is rejected. In that case a p-value of 0 and statistic and dof of NA are returned. #' @export #' #' @examples #' #' #Example taken from Kullback Kupperman Tests for Contingency Tables and Markov Chains #' #' sequence<-c(0,1,2,2,1,0,0,0,0,0,0,1,2,2,2,1,0,0,1,0,0,0,0,0,0,1,1, #' 2,0,0,2,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,0,0,0,0,2,1,0, #' 0,2,1,0,0,0,0,0,0,1,1,1,2,2,0,0,2,1,1,1,1,2,1,1,1,1,1,1,1,1,1,0,2, #' 0,1,1,0,0,0,1,2,2,0,0,0,0,0,0,2,2,2,1,1,1,1,0,1,1,1,1,0,0,2,1,1, #' 0,0,0,0,0,2,2,1,1,1,1,1,2,1,2,0,0,0,1,2,2,2,0,0,0,1,1) #' #' mc=matrix(c(5/8,1/4,1/8,1/4,1/2,1/4,1/4,3/8,3/8),byrow=TRUE, nrow=3) #' rownames(mc)<-colnames(mc)<-0:2; theoreticalMc<-as(mc, "markovchain") #' #' verifyEmpiricalToTheoretical(data=sequence,object=theoreticalMc) #' verifyEmpiricalToTheoretical <- function(data, object, verbose = TRUE) { #warning("The accuracy of the statistical inference functions has been questioned. It will be thoroughly investigated in future versions of the package.") if (!is(object,'markovchain') ) stop("Error! Object should belong to the markovchain class") if (missing(data) | missing(object)) stop("Error! Required inputs missing") if ( !is.numeric(data) || is.character(data) || is.matrix(data)) stop("Error! Data should be either a raw transition matrix or either a character or a numeric element") if (is.numeric(data) || is.character(data) ) data<-createSequenceMatrix(stringchar = data, possibleStates = states(object)) if (length(setdiff(names(data),names(object))) > 0) stop("Error! Empirical and theoretical tm have different support") # (possibly rearrange columns and rownames) data <- data[match(rownames(data),names(object)),] #matching rows data <- data[,match(colnames(data),names(object))] #matching cols if (sum((data == 0) == (object@transitionMatrix == 0)) == (nrow(data) * ncol(data))) { f_i_dot <-colSums(data) statistic <- 0 for (i in 1:dim(object)) { for (j in 1:dim(object)) { if (data[i, j]>0&object[i, j]>0) statistic <- statistic + data[i, j]*log(data[i, j]/(f_i_dot[i]*object[i, j])) } } statistic <- statistic * 2 null_elements <- sum(object@transitionMatrix == 0) dof <- dim(object) * (dim(object) - 1) - null_elements #r(r-1) - c, c null element ob objects p.value <- 1 - pchisq(q = statistic,df = dof) if (verbose == TRUE) { cat("Testing whether the\n");print(data);cat("transition matrix is compatible with\n");print(object@transitionMatrix);print("theoretical transition matrix") cat("ChiSq statistic is",statistic,"d.o.f are",dof,"corresponding p-value is",p.value,"\n") } out <- list(statistic = statistic, dof = dof,pvalue = p.value) } else { statistic <- NA dof <- NA p.value <- 0 if (verbose == TRUE) { cat("Testing whether the\n");print(data);cat("transition matrix is compatible with\n");print(object@transitionMatrix);print("theoretical transition matrix") cat("ChiSq statistic is",statistic,"d.o.f are",dof,"corresponding p-value is",p.value,"\n") cat("At least one transition is >0 in the data that is 0 in the object. Therefore the null hypothesis is rejected. \n") } out <- list(statistic = statistic, dof = dof,pvalue = p.value) } #return output return(out) } .checkMatrix4Homogeneity<-function(matr) { out<-TRUE if (length(colnames(matr)) == 0) {message("Error! No colnames in input matrix"); out = FALSE} if (length(rownames(matr)) == 0) {message("Error! No rownames in input matrix"); out = FALSE} if (!all.equal(rownames(matr),colnames(matr))) {message("Error! Colnames <> Rownames")} if (any(matr<0)) {message("Error! Negative elements"); out = FALSE} return(out) } .addNamedColumns <- function(matr, fullnames) { if ( length( setdiff(names(matr),fullnames) )>0) stop("Error! Names in matr not in fullnames") fullnames<-sort(fullnames) newMatr<-matrix(0,nrow = length(fullnames),ncol = length(fullnames),dimnames = list(fullnames,fullnames)) current_support = colnames(matr) current_dim = dim(matr) for (i in 1:current_dim[1]) { #cycle on row for (j in 1:current_dim[2]) { #cycle on cols item<-matr[i,j] #take the element which_row_trans<-current_support[i] #define current row and cols which_col_trans<-current_support[j] # lookup element in the pooled table row_to_write <-match(x=which_row_trans,table = fullnames) col_to_write <-match(x=which_col_trans,table = fullnames) # write element into the pooled table newMatr[row_to_write,col_to_write] <- newMatr[row_to_write,col_to_write] + item } } return(newMatr) } #' @title Verify Homogeneity across transition matrices #' #' @description Verifies that the s elements in the input list belongs to the same DTMC #' #' @rdname statisticalTests #' @family statisticalTests #' #' @param inputList A list of items that can coerced to transition matrices #' #' @return a list of transition matrices? #' @export #' #' @examples #' #' data(kullback) #' verifyHomogeneity(inputList=kullback,verbose=TRUE) #' verifyHomogeneity<-function(inputList, verbose = TRUE) { warning("The accuracy of the statistical inference functions has been questioned. It will be thoroughly investigated in future versions of the package.") if (!is.list(inputList) ) stop("Error! inputList should be a string") if (length(inputList) < 2) stop("Error! inputList length lower than 2") #checks whether all inputs can be put as transition matrices for (i in 1:length(inputList)) { if (is.matrix(inputList[[i]]) == TRUE) { checks<-.checkMatrix4Homogeneity(inputList[[i]]) if (!checks) stop("Error! Element ", i, " to be checked") } else { inputList[[i]]<-createSequenceMatrix(stringchar = inputList[[i]]) #convert all elements into transition matrices } } # create the pooled raw transition matrix and the matrix of rowsums all.names<-character() for (i in 1:length(inputList)) { all.names<-c(all.names, rownames(inputList[[i]])) } all.names<-sort(unique(all.names)) ##initialize PooledRawTransitionMatrix <- matrix(0,nrow = length(all.names),ncol = length(all.names),dimnames = list(all.names, all.names)) RowSumsMatrix <- matrix(0, nrow = length(inputList),ncol=length(all.names),dimnames = list(1:length(inputList),all.names)) ##sum for each element in the list for (k in 1:length(inputList)) { current_support = rownames(inputList[[k]]) current_dim = dim(inputList[[k]]) for (i in 1:current_dim[1]) { #cycle on row for (j in 1:current_dim[2]) { #cycle on cols num_trans<-inputList[[k]][i, j] #take the element which_row_trans <- current_support[i] #define current row and cols which_col_trans <- current_support[j] # lookup element in the pooled table row_to_write <-match(x = which_row_trans,table = all.names) col_to_write <-match(x = which_col_trans,table = all.names) # write element into the pooled table PooledRawTransitionMatrix[row_to_write,col_to_write]=PooledRawTransitionMatrix[row_to_write,col_to_write]+num_trans } } } #create the matrix of rowsums fij. for (k in 1:length(inputList)) { my_row_sums <- rowSums(inputList[[k]]) current_support = names(my_row_sums) for (i in 1:length(current_support)) { my_element<-my_row_sums[i] col_to_write<-match(x=current_support[i],table = all.names) RowSumsMatrix[k, col_to_write]<-RowSumsMatrix[k, col_to_write] + my_element } } # compute the chi - square statistic statistic <- 0 # degreesOfFreedomLess <- 0 newInputList <- lapply(inputList, .addNamedColumns,fullnames = all.names) number_of_transitions <- sapply(newInputList,sum) total_transitions <- sum(number_of_transitions) for (s in 1:length(inputList)) { #cycle across inputs for (j in 1:length(all.names)) { #cycle across rows for (k in 1:length(all.names)) { #cycle across cols if (any(newInputList[[s]][j,k] == 0, number_of_transitions[s] == 0, PooledRawTransitionMatrix[j,k] == 0)) { statistic <- statistic + 0 # zero element in log expr does not contribute to statistics # degreesOfFreedomLess <- degreesOfFreedomLess +1 } else { statistic <- statistic + newInputList[[s]][j, k] * log((total_transitions*newInputList[[s]][j, k])/(number_of_transitions[s]*PooledRawTransitionMatrix[j,k])) } } } } statistic <- statistic * 2 #dof (s-1)*(r^2-1)-#zeros degrees_of_freedom <- (length(inputList) - 1)*(length(all.names)^2 - 1)#-degreesOfFreedomLess p.value <- 1 - pchisq(q = statistic,df = degrees_of_freedom) if (verbose == TRUE) { cat("Testing homogeneity of DTMC underlying input list \n") cat("ChiSq statistic is",statistic,"d.o.f are",degrees_of_freedom,"corresponding p-value is",p.value,"\n") } #return output out <- list(statistic = statistic, dof = degrees_of_freedom,pvalue = p.value) return(out) } markovchain/R/classesAndMethods.R0000644000176200001440000012342414430747270016522 0ustar liggesusers#' @title Markov Chain class #' @name markovchain-class #' @aliases markovchain-class *,markovchain,markovchain-method #' *,markovchain,matrix-method *,markovchain,numeric-method #' *,matrix,markovchain-method *,numeric,markovchain-method #' ==,markovchain,markovchain-method !=,markovchain,markovchain-method #' absorbingStates,markovchain-method transientStates,markovchain-method #' recurrentStates,markovchain-method transientClasses,markovchain-method #' recurrentClasses,markovchain-method communicatingClasses,markovchain-method #' steadyStates,markovchain-method meanNumVisits,markovchain-method #' is.regular,markovchain-method is.irreducible,markovchain-method #' is.accessible,markovchain,character,character-method #' is.accessible,markovchain,missing,missing-method #' absorptionProbabilities,markovchain-method #' meanFirstPassageTime,markovchain,character-method #' meanFirstPassageTime,markovchain,missing-method #' meanAbsorptionTime,markovchain-method #' meanRecurrenceTime,markovchain-method #' conditionalDistribution,markovchain-method hittingProbabilities,markovchain-method #' canonicForm,markovchain-method coerce,data.frame,markovchain-method #' coerce,markovchain,data.frame-method coerce,table,markovchain-method #' coerce,markovchain,igraph-method coerce,markovchain,matrix-method #' coerce,markovchain,sparseMatrix-method coerce,sparseMatrix,markovchain-method #' coerce,matrix,markovchain-method coerce,msm,markovchain-method #' coerce,msm.est,markovchain-method coerce,etm,markovchain-method #' dim,markovchain-method initialize,markovchain-method #' names<-,markovchain-method plot,markovchain,missing-method #' predict,markovchain-method print,markovchain-method #' show,markovchain-method summary,markovchain-method #' sort,markovchain-method t,markovchain-method #' [,markovchain,ANY,ANY,ANY-method ^,markovchain,numeric-method #' @description The S4 class that describes \code{markovchain} objects. #' #' @param states Name of the states. Must be the same of \code{colnames} and \code{rownames} of the transition matrix #' @param byrow TRUE or FALSE indicating whether the supplied matrix #' is either stochastic by rows or by columns #' @param transitionMatrix Square transition matrix #' @param name Optional character name of the Markov chain #' #' @section Creation of objects: #' #' Objects can be created by calls of the form \code{new("markovchain", states, byrow, transitionMatrix, ...)}. #' #' @section Methods: #' #' \describe{ #' \item{*}{\code{signature(e1 = "markovchain", e2 = "markovchain")}: multiply two \code{markovchain} objects} #' \item{*}{\code{signature(e1 = "markovchain", e2 = "matrix")}: markovchain by matrix multiplication} #' \item{*}{\code{signature(e1 = "markovchain", e2 = "numeric")}: markovchain by numeric vector multiplication } #' \item{*}{\code{signature(e1 = "matrix", e2 = "markovchain")}: matrix by markov chain} #' \item{*}{\code{signature(e1 = "numeric", e2 = "markovchain")}: numeric vector by \code{markovchain} multiplication } #' \item{[}{\code{signature(x = "markovchain", i = "ANY", j = "ANY", drop = "ANY")}: ... } #' \item{^}{\code{signature(e1 = "markovchain", e2 = "numeric")}: power of a \code{markovchain} object} #' \item{==}{\code{signature(e1 = "markovchain", e2 = "markovchain")}: equality of two \code{markovchain} object} #' \item{!=}{\code{signature(e1 = "markovchain", e2 = "markovchain")}: non-equality of two \code{markovchain} object} #' \item{absorbingStates}{\code{signature(object = "markovchain")}: method to get absorbing states } #' \item{canonicForm}{\code{signature(object = "markovchain")}: return a \code{markovchain} object into canonic form } #' \item{coerce}{\code{signature(from = "markovchain", to = "data.frame")}: coerce method from markovchain to \code{data.frame}} #' \item{conditionalDistribution}{\code{signature(object = "markovchain")}: returns the conditional probability of subsequent states given a state} #' \item{coerce}{\code{signature(from = "data.frame", to = "markovchain")}: coerce method from \code{data.frame} to \code{markovchain}} #' \item{coerce}{\code{signature(from = "table", to = "markovchain")}: coerce method from \code{table} to \code{markovchain} } #' \item{coerce}{\code{signature(from = "msm", to = "markovchain")}: coerce method from \code{msm} to \code{markovchain} } #' \item{coerce}{\code{signature(from = "msm.est", to = "markovchain")}: coerce method from \code{msm.est} (but only from a Probability Matrix) to \code{markovchain} } #' \item{coerce}{\code{signature(from = "etm", to = "markovchain")}: coerce method from \code{etm} to \code{markovchain} } #' \item{coerce}{\code{signature(from = "sparseMatrix", to = "markovchain")}: coerce method from \code{sparseMatrix} to \code{markovchain} } #' \item{coerce}{\code{signature(from = "markovchain", to = "igraph")}: coercing to \code{igraph} objects } #' \item{coerce}{\code{signature(from = "markovchain", to = "matrix")}: coercing to \code{matrix} objects } #' \item{coerce}{\code{signature(from = "markovchain", to = "sparseMatrix")}: coercing to \code{sparseMatrix} objects } #' \item{coerce}{\code{signature(from = "matrix", to = "markovchain")}: coercing to \code{markovchain} objects from \code{matrix} one } #' \item{dim}{\code{signature(x = "markovchain")}: method to get the size} #' \item{names}{\code{signature(x = "markovchain")}: method to get the names of states} #' \item{names<-}{\code{signature(x = "markovchain", value = "character")}: method to set the names of states} #' \item{initialize}{\code{signature(.Object = "markovchain")}: initialize method } #' \item{plot}{\code{signature(x = "markovchain", y = "missing")}: plot method for \code{markovchain} objects } #' \item{predict}{\code{signature(object = "markovchain")}: predict method } #' \item{print}{\code{signature(x = "markovchain")}: print method. } #' \item{show}{\code{signature(object = "markovchain")}: show method. } #' \item{sort}{\code{signature(x = "markovchain", decreasing=FALSE)}: sorting the transition matrix. } #' \item{states}{\code{signature(object = "markovchain")}: returns the names of states (as \code{names}. } #' \item{steadyStates}{\code{signature(object = "markovchain")}: method to get the steady vector. } #' \item{summary}{\code{signature(object = "markovchain")}: method to summarize structure of the markov chain } #' \item{transientStates}{\code{signature(object = "markovchain")}: method to get the transient states. } #' \item{t}{\code{signature(x = "markovchain")}: transpose matrix } #' \item{transitionProbability}{\code{signature(object = "markovchain")}: transition probability } #' } #' #' @references #' A First Course in Probability (8th Edition), Sheldon Ross, Prentice Hall 2010 #' #' @author Giorgio Spedicato #' @note #' \enumerate{ #' \item \code{markovchain} object are backed by S4 Classes. #' \item Validation method is used to assess whether either columns or rows totals to one. #' Rounding is used up to \code{.Machine$double.eps * 100}. If state names are not properly #' defined for a probability \code{matrix}, coercing to \code{markovhcain} object leads #' to overriding states name with artificial "s1", "s2", ... sequence. In addition, operator #' overloading has been applied for \eqn{+,*,^,==,!=} operators. #' } #' #' @seealso \code{\link{markovchainSequence}},\code{\link{markovchainFit}} #' #' @examples #' #show markovchain definition #' showClass("markovchain") #' #create a simple Markov chain #' transMatr<-matrix(c(0.4,0.6,.3,.7),nrow=2,byrow=TRUE) #' simpleMc<-new("markovchain", states=c("a","b"), #' transitionMatrix=transMatr, #' name="simpleMc") #' #power #' simpleMc^4 #' #some methods #' steadyStates(simpleMc) #' absorbingStates(simpleMc) #' simpleMc[2,1] #' t(simpleMc) #' is.irreducible(simpleMc) #' #conditional distributions #' conditionalDistribution(simpleMc, "b") #' #example for predict method #' sequence<-c("a", "b", "a", "a", "a", "a", "b", "a", "b", "a", "b", "a", "a", "b", "b", "b", "a") #' mcFit<-markovchainFit(data=sequence) #' predict(mcFit$estimate, newdata="b",n.ahead=3) #' #direct conversion #' myMc<-as(transMatr, "markovchain") #' #' #example of summary #' summary(simpleMc) #' \dontrun{plot(simpleMc)} #' #' @keywords classes #' #' @export setClass( # Class name "markovchain", # Define the slots slots = list(states = "character", byrow = "logical", transitionMatrix = "matrix", name = "character"), # Set the default values for the slots prototype = list( states = c("a", "b"), byrow = TRUE, transitionMatrix = matrix( data = c(0, 1, 1, 0), nrow = 2, byrow = TRUE, dimnames = list(c("a", "b"), c("a", "b"))), name = "Unnamed Markov chain") ) # Initializing method for markovchain objects setMethod( "initialize", signature(.Object = "markovchain"), function (.Object, states, byrow, transitionMatrix, name, ...) { # Put the standard markovchain if (missing(transitionMatrix)) { transitionMatrix <- matrix( data = c(0, 1, 1, 0), nrow = 2, byrow = TRUE, dimnames = list(c("a", "b"), c("a", "b"))) } rowNames <- rownames(transitionMatrix) colNames <- colnames(transitionMatrix) # Check names of transition matrix # if all names are missing it initializes them to "1", "2", .... if (all(is.null(rowNames), is.null(colNames)) == TRUE) { if (missing(states)) { numRows <- nrow(transitionMatrix) stateNames <- as.character(seq(1:numRows)) } else { stateNames <- states } rownames(transitionMatrix) <- stateNames colnames(transitionMatrix) <- stateNames # Fix when rownames null } else if (is.null(rowNames)) { rownames(transitionMatrix) <- colNames # Fix when colnames null } else if (is.null(colNames)) { colnames(transitionMatrix) <- rowNames # Fix when different } else if (! setequal(rowNames, colNames)) { colnames(transitionMatrix) <- rowNames } if (missing(states)) states <- rownames(transitionMatrix) if (missing(byrow)) byrow <- TRUE if (missing(name)) name <- "Unnamed Markov chain" callNextMethod( .Object, states = states, byrow = byrow, transitionMatrix = transitionMatrix, name = name, ... ) } ) #' @title Non homogeneus discrete time Markov Chains class #' @name markovchainList-class #' @aliases [[,markovchainList-method dim,markovchainList-method #' predict,markovchainList-method print,markovchainList-method #' show,markovchainList-method #' @description A class to handle non homogeneous discrete Markov chains #' #' @param markovchains Object of class \code{"list"}: a list of markovchains #' @param name Object of class \code{"character"}: optional name of the class #' #' @section Objects from the Class: #' #' A \code{markovchainlist} is a list of \code{markovchain} objects. They can #' be used to model non homogeneous discrete time Markov Chains, when #' transition probabilities (and possible states) change by time. #' @section Methods: #' \describe{ #' \item{[[}{\code{signature(x = "markovchainList")}: extract the #' i-th \code{markovchain} } #' \item{dim}{\code{signature(x = "markovchainList")}: number #' of \code{markovchain} underlying the matrix } #' \item{predict}{\code{signature(object = "markovchainList")}: predict #' from a \code{markovchainList} } #' \item{print}{\code{signature(x = "markovchainList")}: prints the list #' of markovchains } #' \item{show}{\code{signature(object = "markovchainList")}: same as \code{print} } #' } #' #' @references #' A First Course in Probability (8th Edition), Sheldon Ross, Prentice Hall 2010 #' #' @author Giorgio Spedicato #' #' @note #' The class consists in a list of \code{markovchain} objects. #' It is aimed at working with non homogeneous Markov chains. #' #' @seealso \code{\linkS4class{markovchain}} #' @examples #' showClass("markovchainList") #' #define a markovchainList #' statesNames=c("a","b") #' #' mcA<-new("markovchain",name="MCA", #' transitionMatrix=matrix(c(0.7,0.3,0.1,0.9), #' byrow=TRUE, nrow=2, #' dimnames=list(statesNames,statesNames)) #' ) #' #' mcB<-new("markovchain", states=c("a","b","c"), name="MCB", #' transitionMatrix=matrix(c(0.2,0.5,0.3,0,1,0,0.1,0.8,0.1), #' nrow=3, byrow=TRUE)) #' #' mcC<-new("markovchain", states=c("a","b","c","d"), name="MCC", #' transitionMatrix=matrix(c(0.25,0.75,0,0,0.4,0.6, #' 0,0,0,0,0.1,0.9,0,0,0.7,0.3), #' nrow=4, byrow=TRUE) #' ) #' mcList<-new("markovchainList",markovchains=list(mcA, mcB, mcC), #' name="Non - homogeneous Markov Chain") #' #' @keywords classes #' #' @export setClass( "markovchainList", slots = list( markovchains = "list", name = "character") ) # Verifies whether a markovchainList object is valid or not # A markovchainList is valid iff all the slots are markovchain objects # Returns true if the markovchainList is valid, the indexes of the # wrong slots otherwise setValidity( "markovchainList", function(object) { check <- FALSE markovchains <- object@markovchains classes <- sapply(markovchains, class) nonMarkovchain <- which(classes != "markovchain") errors <- sapply(nonMarkovchain, function(i) { paste(i, "-th element class is not 'markovchain'") }) if (length(errors) == 0) TRUE else errors } ) # generic method to print out states #' @name states #' #' @title Defined states of a transition matrix #' #' @description This method returns the states of a transition matrix. #' #' @param object A discrete \code{markovchain} object #' @return The character vector corresponding to states slot. #' #' @references A First Course in Probability (8th Edition), Sheldon Ross, Prentice Hall 2010 #' #' @author Giorgio Spedicato #' #' @seealso \code{\linkS4class{markovchain}} #' #' @examples #' statesNames <- c("a", "b", "c") #' markovB <- new("markovchain", states = statesNames, transitionMatrix = #' matrix(c(0.2, 0.5, 0.3, 0, 1, 0, 0.1, 0.8, 0.1), nrow = 3, #' byrow = TRUE, dimnames=list(statesNames,statesNames)), #' name = "A markovchain Object" #' ) #' states(markovB) #' names(markovB) #' #' @rdname states #' #' @export setGeneric("states", function(object) standardGeneric("states")) #' @rdname states #' @title states setMethod( "states", "markovchain", function(object) { object@states } ) #' @title Returns the states for a Markov chain object #' #' @param x object we want to return states for #' #' @rdname names setMethod( "names", "markovchain", function(x) { x@states } ) #' @title Method to retrieve name of markovchain object #' #' @name name #' #' @description This method returns the name of a markovchain object #' #' @param object A markovchain object #' @rdname getName #' @author Giorgio Spedicato, Deepak Yadav #' #' @examples #' statesNames <- c("a", "b", "c") #' markovB <- new("markovchain", states = statesNames, transitionMatrix = #' matrix(c(0.2, 0.5, 0.3, 0, 1, 0, 0.1, 0.8, 0.1), nrow = 3, #' byrow = TRUE, dimnames=list(statesNames,statesNames)), #' name = "A markovchain Object" #' ) #' name(markovB) #' #' @export setGeneric("name", function(object) standardGeneric("name")) #' @rdname getName setMethod( "name", "markovchain", function(object) { object@name }) #' @title Method to set name of markovchain object #' #' @name name<- #' #' @description This method modifies the existing name of markovchain object #' #' @param object A markovchain object #' @param value New name of markovchain object #' @rdname setName #' @author Giorgio Spedicato, Deepak Yadav #' #' @examples #' statesNames <- c("a", "b", "c") #' markovB <- new("markovchain", states = statesNames, transitionMatrix = #' matrix(c(0.2, 0.5, 0.3, 0, 1, 0, 0.1, 0.8, 0.1), nrow = 3, #' byrow = TRUE, dimnames=list(statesNames,statesNames)), #' name = "A markovchain Object" #' ) #' name(markovB) <- "dangerous mc" #' #' @export setGeneric("name<-", function(object, value) standardGeneric("name<-")) #' @rdname setName setMethod( "name<-", "markovchain", function(object, value) { object@name <- value object } ) setMethod( "names<-", "markovchain", function(x, value) { rownames(x@transitionMatrix) <- value colnames(x@transitionMatrix) <- value x@states <- value x } ) #' @exportMethod dim setGeneric("dim") # Generic methods to get the dim of a markovchain and markovchainList setMethod( "dim", "markovchain", function(x) { nrow(x@transitionMatrix) } ) setMethod( "dim", "markovchainList", function(x) { length(x@markovchains) } ) # method to set the validity of a markovchain object setValidity( "markovchain", function(object) { errors <- character() transitionMatrix <- object@transitionMatrix states <- object@states if (length(setdiff(states, unique(states))) > 0) { msg <- "Error! States must be unique!" errors <- c(errors, msg) } # Performs a set of checks. If any error arises, it ends up concatenated to errors # Check all values of transition matrix belongs to [0, 1] maybeProbabilities <- sapply(as.numeric(transitionMatrix), .isProbability) if (any(maybeProbabilities) == FALSE) { msg <- "Error! Some elements of transitionMatrix are not probabilities" errors <- c(errors, msg) } # Check whether matrix is square matrix or not if (nrow(transitionMatrix) != ncol(transitionMatrix)) { msg <- "Error! transitionMatrix is not a square matrix" errors <- c(errors, msg) } if (!.checkMatrix(transitionMatrix, object@byrow)) { msg <- paste( paste("Error!", ifelse(object@byrow, "Rows", "Cols")), "of transition matrix do not sum to one" ) errors <- c(errors, msg) } # Check whether column names or rows names equal to state names or not if (! setequal(colnames(transitionMatrix), states)) { msg <- "Error! Colnames of transitionMatrix do not match states" errors <- c(errors, msg) } if (! setequal(rownames(transitionMatrix), states)) { msg <- "Error! Rownames of transitionMatrix do not match states" errors <- c(errors, msg) } if (length(errors) > 0) errors else TRUE } ) # generic method to extract transition probability # from state t0 to state t1 #' @name transitionProbability #' @title Function to get the transition probabilities from initial #' to subsequent states. #' @description This is a convenience function to get transition probabilities. #' #' @param object A \code{markovchain} object. #' @param t0 Initial state. #' @param t1 Subsequent state. #' #' @references A First Course in Probability (8th Edition), #' Sheldon Ross, Prentice Hall 2010 #' #' @return Numeric Vector #' #' @author Giorgio Spedicato #' @seealso \code{\linkS4class{markovchain}} #' #' @examples #' statesNames <- c("a", "b", "c") #' markovB <- new("markovchain", states = statesNames, transitionMatrix = #' matrix(c(0.2, 0.5, 0.3, 0, 1, 0, 0.1, 0.8, 0.1), nrow = 3, #' byrow = TRUE, dimnames=list(statesNames,statesNames)), #' name = "A markovchain Object" #' ) #' transitionProbability(markovB,"b", "c") #' @rdname transitionProbability #' #' @exportMethod transitionProbability setGeneric("transitionProbability", function(object, t0, t1) standardGeneric("transitionProbability")) #' @rdname transitionProbability setMethod("transitionProbability", "markovchain", function(object, t0, t1) { fromState <- which(object@states == t0) toState <- which(object@states == t1) out <- ifelse(object@byrow == TRUE, object@transitionMatrix[fromState, toState] , object@transitionMatrix[toState, fromState]) return(out) } ) # print, plot and show methods .showInt <- function(object, verbose = TRUE) { # find the direction if (object@byrow == TRUE) { direction <- "(by rows)" } else { direction <- "(by cols)" } if (verbose == TRUE) { cat(object@name, "\n A ", dim(object), "- dimensional discrete Markov Chain defined by the following states: \n", paste(states(object), collapse=", "), "\n The transition matrix ", direction, " is defined as follows: \n") } print(object@transitionMatrix) cat("\n") } #' @exportMethod show setGeneric("show") # show methods for markovchain and markovchain list objects setMethod("show", "markovchain", function(object){ .showInt(object) } ) setMethod("show", "markovchainList", function(object) { cat(object@name, " list of Markov chain(s)", "\n") for(i in 1:length(object@markovchains)) { cat("Markovchain ",i,"\n") show(object@markovchains[[i]]) } } ) #' @exportMethod print setGeneric("print") # print methods setMethod("print", "markovchainList", function(x) show(x)) setMethod("print", "markovchain", function(x){ object <- x .showInt(object, verbose = FALSE) } ) .getNet <- function(object, round = FALSE) { # function to get the absorbency matrix to plot and export to igraph # # Args: # object: a markovchain object # round: boolean to round # # Returns: # # a graph adjacency if (object@byrow == FALSE) { object <- t(object) } matr <- object@transitionMatrix*100 if(round == TRUE) { matr <- round(matr, 2) } net <- graph.adjacency(adjmatrix = matr, weighted = TRUE, mode = "directed") return(net) } getColorVector <- function(object){ list <- .communicatingClassesRcpp(object) sections <- length(list) colorList <- grDevices::colors() colorList <- sample(colorList,sections) colorvector <- rep("white",length(object@states)) for(i in 1:length(list)){ part <- list[[i]] for(j in 1:length(part)){ colorvector[match(part[j],object@states)] <- colorList[i] } } return(colorvector) } #' @exportMethod plot setGeneric("plot") # Plot methods for markovchain objects # plot method from stat5 setMethod("plot", signature(x = "markovchain", y = "missing"), function(x, y, package = "igraph", ...) { switch(package, diagram = { if (requireNamespace("diagram", quietly = TRUE)) { .plotdiagram(object = x, ...) } else { netMc <- .getNet(object = x, round = TRUE) edgeLabel <- round(E(netMc)$weight / 100, 2) plot.igraph(x = netMc, edge.label = edgeLabel, ...) } }, DiagrammeR = { if (requireNamespace("DiagrammeR", quietly = TRUE)) { .plotDiagrammeR(object = x, ...) } else { netMc <- .getNet(object = x, round = TRUE) edgeLabel <- round(E(netMc)$weight / 100, 2) plot.igraph(x = netMc, edge.label = edgeLabel, ...) } }, { netMc <- .getNet(object = x,round = TRUE) edgeLabel <- round(E(netMc)$weight / 100, 2) plot.igraph(x = netMc, edge.label = edgeLabel, ...) }) } ) ##################################################AS METHODS######################### .checkMatrix <- function(matr, byrow = TRUE, verbose = FALSE) { # firstly, check size if (ncol(matr) != nrow(matr)) { if(verbose) stop("Error! Not a rectangular matrix") return(FALSE) } # secondly, check is stochastic isStochastic <- .isStochasticMatrix(matr, byrow) if (!isStochastic) { if (verbose) stop("Error! Either rows or cols should sum to 1") return(FALSE) } # if all test are passed return(TRUE) } # Internal function to return a markovchain object given a matrix .matrix2Mc <- function(from) { # whether given matrix is a transition matrix or not # if it is then how probabilities are stored # row-wise or columnwise byrow <- TRUE checkByRows <- .checkMatrix(from, byrow = byrow) if(!checkByRows) { byrow <- FALSE checkByCols <- .checkMatrix(from, byrow = byrow) if(!checkByCols) { #error could be either in rows or in cols if (any(colSums(from) != 1)) cat("columns sums not equal to one are:", which(colSums(from) != 1),"\n") if (any(rowSums(from) != 1)) cat("row sums not equal to one are:", which(rowSums(from) != 1),"\n") stop("Error! Not a transition matrix") } } # extract states names if(byrow) { namesCandidate <- rownames(from) } else { namesCandidate <- colnames(from) } # if states names is not there create it s1, s2, s3, .... if(is.null(namesCandidate)) { namesCandidate <- paste("s", 1:nrow(from), sep = "") } # create markovchain object out <- new("markovchain", transitionMatrix = from, states = namesCandidate, byrow = byrow) invisible(out) } #' @exportMethod coerce NULL # coerce matrix to markovchain object using internal method # example: as("some matrix", "markovchain") setAs(from = "matrix", to = "markovchain", def = .matrix2Mc) # Function to transform a markovchain into a data.frame # Args: # from: a markovchain object # # returns: # a data.frame .mc2Df <- function(from) { # number of rows or columns nr <- nrow(from@transitionMatrix) for(i in 1:nr){ for(j in 1:nr){ t0 <- from@states[i] t1 <- from@states[j] prob <- transitionProbability(object = from, t0 = t0, t1 = t1) #cope with the new default of R 4.0 (5-3-2020) rowDf <- data.frame(t0 = t0, t1 = t1, prob = prob,stringsAsFactors = TRUE ) # go to else part if first row of data frame is generated if(exists("outDf")) { outDf <- rbind(outDf, rowDf) } else { outDf <- rowDf } } } return(outDf) } # method to convert(coerce) from markovchain to data.frame setAs(from = "markovchain", to = "data.frame", def = .mc2Df) # method to find the column which stores transition probability .whichColProb <- function(df) { # column number which stores transition probability out <- 0 # check for validity of data frame if(ncol(df) > 3) { warning("Warning! More than three columns. Only the first three will be used") } if(ncol(df) < 3) { stop("Error! Three columns needed") } for(i in 1:ncol(df)) { # when found the first numeric and probability col if((is(df[, i], "numeric")) & (all(sapply(df[, i], .isProbability) == TRUE))) { out <- i break } } return(out) } # Function to convert from a data.frame containing initial, ending # and probability columns to a proper markovchain object # # Args: # from: a data.frame # # Returns: # A markovchain object .df2Mc <- function(from) { statesNames <- unique(from[, 1]) colProb <- .whichColProb(from) # what is the use # transition matrix prMatr <- zeros(length(statesNames)) rownames(prMatr) <- statesNames colnames(prMatr) <- statesNames for(i in 1:nrow(from)) { idRow <- which(statesNames == from[i, 1]) # assume first col from idCol <- which(statesNames == from[i, 2]) # assume second col to prMatr[idRow, idCol] <- from[i, 3] # assume third col t-probability } out <- new("markovchain", transitionMatrix = prMatr) return(out) } # method to convert(coerce) data frame to markovchain object setAs(from = "data.frame", to = "markovchain", def = .df2Mc) # example # data <- data.frame(from = c("a", "a", "b", "b", "b", "b"), # to = c("a", "b", "b", "b", "b", "a")) # # from <- table(data) # .table2Mc(from) .table2Mc <- function(from) { # check whether table has square dimension or not if(dim(from)[1] != dim(from)[2]) { stop("Error! Table is not squared") } # rows ond columns name should be same if(!setequal(rownames(from),colnames(from))) { stop("Error! Rows not equal to coulumns") } temp <- unclass(as.matrix(from)) # make same sequence of col / row fromMatr <- temp[, order(rownames(temp))] # obtain transition matrix outMatr <- fromMatr / rowSums(fromMatr) out <- new("markovchain", states = rownames(temp), transitionMatrix = outMatr, byrow=TRUE) return(out) } # coerce table to markovchain object setAs(from = "table", to = "markovchain", def = .table2Mc) # function from msm to markovchain # msm is a package. Use this package to create msm object. # see how to create msm object using ?msm .msm2Mc <- function(from) { if(requireNamespace(package='msm', quietly = TRUE)) { temp <- msm::pmatrix.msm(from) prMatr <- unclass(as.matrix(temp)) out <- new("markovchain", transitionMatrix = prMatr) } else { out <- NULL print("msm unavailable") } return(out) } # coerce msm object to markovchain object setClass("msm") setAs(from = "msm", to = "markovchain", def = .msm2Mc) # function for msm.est to mc. Assume a probability matrix given .msmest2Mc <- function(from) { if (is.matrix(from)) { # central estimate pMatr <- from } if (is.list(from)) { # central estimate pMatr <- from[[1]] } out <- new("markovchain", transitionMatrix = as(pMatr, "matrix")) return(out) } # coerce ms.est to markovchain object setClass("msm.est") setAs(from = "msm.est", to = "markovchain", def = .msmest2Mc) # function from etm to markovchain .etm2Mc<-function(from) { # data frame consists of 'from' and 'to' column df <- from$trans # name of states elements <- from$state.names # number of unique states nelements <- length(elements) # temporary t-matrix prMatr <- zeros(nelements) dimnames(prMatr) <- list(elements, elements) # populate t-matrix for(i in 1:dim(df)[1]) { r <- df[i, ] # each row one by one stateFrom <- r$from stateTo <- r$to prMatr[stateFrom, stateTo] <- prMatr[stateFrom, stateTo] + 1 } # convert freq-matrix to trans-matrix rsums <- rowSums(prMatr) prMatr <- prMatr / rsums # take care of rows with all entries 0 if(any(rsums == 0)) { indicesToBeSanitized <- which(rsums == 0) for(i in indicesToBeSanitized) { for(j in 1:nelements) { prMatr[i, j] <- 1 / nelements } } } # create markovchain object out <- new("markovchain", transitionMatrix = prMatr) return(out) } # coerce etm object to markovchain object setClass("etm") setAs(from = "etm", to = "markovchain", def = .etm2Mc) #sparse matrix from Matrix package .sparseMatrix2markovchain<-function(from){ temp<-as(from,"matrix") out <- as(temp, "markovchain") return(out) } .markovchain2sparseMatrix<-function(from){ temp<-as(from,"matrix") out <- as(temp, "sparseMatrix") return(out) } setAs(from = "sparseMatrix", to = "markovchain", def = .sparseMatrix2markovchain) setAs(from = "markovchain", to = "sparseMatrix", def = .markovchain2sparseMatrix) # functions and methods to return a matrix .mc2matrix <- function(from) { out <- from@transitionMatrix return(out) } # coerce markovchain object to matrix(transition) setAs(from = "markovchain", to = "matrix", def = .mc2matrix) # functions and methods to return a matrix .mc2igraph <- function(from) { # convert the markovchain to data.frame temp <- .mc2Df(from=from) # convert the data frame to igraph graph # need to set only non zero weights out <- graph.data.frame(d=temp[temp$prob>0,]) return(out) } # coerce markovchain object to igraph setClass("igraph") setAs(from = "markovchain", to = "igraph", def = .mc2igraph) #' @exportMethod t setGeneric("t") # transposing method for markovchain objects setMethod("t", "markovchain", function(x) { out <- new("markovchain", byrow = !x@byrow, transitionMatrix = t(x@transitionMatrix)) return(out) } ) #' @exportMethod * setGeneric("*") # function to multiplicate two markov chains # # Args: # e1: first markovchain # e2: second markov chain # # Returns: # if feasible, a markovchain where the transition matrix is e1*e2 setMethod("*", c("markovchain", "markovchain"), function(e1, e2) { # compare states of markovchains if(!setequal(e1@states, e2@states)) { warning("Warning! Different states") } # dimension must be equal if(!setequal(dim(e1@transitionMatrix), dim(e2@transitionMatrix))) { stop("Error! Different size") } # both must be either row wise or col wise if(!(e1@byrow == e2@byrow)) { stop("Error! Both transition matrix should be defined either by row or by column") } newStates <- e1@states newTransMatr <- e1@transitionMatrix %*% e2@transitionMatrix byRow <- e1@byrow # multiplicated matrix takes the first matrix's name mcName <- e1@name out<-new("markovchain", states = newStates, transitionMatrix = newTransMatr, byrow = byRow, name = mcName) return(out) } ) # methods implemented for multiplication of markovchain object with # matrix, 1-D vector, and vice-versa setMethod("*", c("matrix", "markovchain"), function(e1, e2) { out <- e1 %*% e2@transitionMatrix return(out) } ) setMethod("*", c("markovchain", "matrix"), function(e1, e2) { out <- e1@transitionMatrix %*% e2 return(out) } ) setMethod("*", c("numeric", "markovchain"), function(e1, e2) { if(length(e1) != dim(e2)) { stop("Error! Uncompatible dimensions") } else { out <- e1 %*% e2@transitionMatrix } return(out) } ) setMethod("*", c("markovchain", "numeric"), function(e1, e2) { if(length(e2) != dim(e1)) { stop("Error! Uncompatible dimensions") } else { out <- e1@transitionMatrix %*% e2 } return(out) } ) #' @exportMethod == setGeneric("==") # compare two markovchain object setMethod("==", c("markovchain", "markovchain"), function(e1, e2) { out <- .approxEqualMatricesRcpp(e1@transitionMatrix, e2@transitionMatrix) return(out) } ) #' @exportMethod != setGeneric("!=") setMethod("!=", c("markovchain", "markovchain"), function(e1, e2) { out <- FALSE out <- !(e1 == e2) return(out) } ) #'@exportMethod ^ setGeneric("^") # markovchain raise to some power # this method is O(n³ log(m)) where n = {num cols (= rows) of e1} and m = e2 setMethod("^", c("markovchain", "numeric"), function(e1, e2) { out <- new("markovchain", states = e1@states, byrow = e1@byrow, transitionMatrix = e1@transitionMatrix %^% e2, name = paste(e1@name, "^", e2, sep = "") ) return(out) } ) #' @exportMethod [ setGeneric("[") # methods to directly access transition matrix elements setMethod("[", signature(x = "markovchain", i = "ANY", j = "ANY"), function(x, i, j) { out <- x@transitionMatrix[i, j] return(out) } ) #' @exportMethod [[ setGeneric("[[") # methods to directly access markovchain objects composing a markovchainList object setMethod("[[", signature(x = "markovchainList", i = "ANY"), function(x, i) { out <- x@markovchains[[i]] return(out) } ) # transition probabilty vector from a given state #' @title \code{conditionalDistribution} of a Markov Chain #' #' @name conditionalDistribution #' #' @description It extracts the conditional distribution of the subsequent state, #' given current state. #' #' @param object A \code{markovchain} object. #' @param state Subsequent state. #' #' @author Giorgio Spedicato, Deepak Yadav #' #' @return A named probability vector #' @references A First Course in Probability (8th Edition), Sheldon Ross, Prentice Hall 2010 #' #' @seealso \code{\linkS4class{markovchain}} #' #' @examples #' # define a markov chain #' statesNames <- c("a", "b", "c") #' markovB <- new("markovchain", states = statesNames, transitionMatrix = #' matrix(c(0.2, 0.5, 0.3, 0, 1, 0, 0.1, 0.8, 0.1),nrow = 3, #' byrow = TRUE, dimnames = list(statesNames, statesNames))) #' #' conditionalDistribution(markovB, "b") #' #' @exportMethod conditionalDistribution setGeneric("conditionalDistribution", function(object, state) standardGeneric("conditionalDistribution")) setMethod("conditionalDistribution", "markovchain", function(object, state) { # get the states names stateNames <- states(object) # number of unique states out <- numeric(length(stateNames)) # states are assumed to be sorted index2Take <- which(stateNames == state) if(object@byrow == TRUE) { out <- object@transitionMatrix[index2Take, ] } else { out <- object@transitionMatrix[, index2Take] } # names the output and returs it names(out) <- stateNames return(out) } ) # Function to get the mode of a probability vector # # Args: # probVector: the probability vector # ties: specifies if ties are to be sampled, otherwise more than one element is returned # # Returns: # the name of the model element .getMode <- function(probVector, ties = "random") { maxIndex <- which(probVector == max(probVector)) temp <- probVector[maxIndex] # index of maximum probabilty if((ties == "random") & (length(temp) > 1)) { out <- sample(temp, 1) } else { out <- temp } return(names(out)) } #' @exportMethod predict setGeneric("predict") # predict method for markovchain objects # given initial state return a vector of next n.ahead states setMethod("predict", "markovchain", function(object, newdata, n.ahead = 1) { # identify the last state lastState <- newdata[length(newdata)] out <- character() for(i in 1:n.ahead) { # cyclically determine the most probable subsequent state from the conditional distribution newState <- .getMode(probVector = conditionalDistribution(object, lastState), ties = "random") out <- c(out, newState) lastState <- newState } return(out) } ) # predict method for markovchainList objects setMethod("predict", "markovchainList", function(object, newdata, n.ahead = 1, continue = FALSE) { # object a markovchainList # newdata = the actual data # n.ahead = how much ahead # continue = veryfy if that lasts # allocate output out <- character() actualPos <- length(newdata) lastState <- newdata[actualPos] # take last position for(i in 1:n.ahead) { newPos <- actualPos + i - 1 if(newPos <= dim(object)) { newState <- predict(object = object[[newPos]], newdata = lastState, n.ahead = 1) out <- c(out, newState) lastState <- newState } else { if(continue == TRUE) { newState <- predict(object = object[[dim(object)]], newdata = lastState, n.ahead = 1) out <- c(out, newState) lastState <- newState } else break; } } return(out) } ) #sort method for markovchain objects setGeneric("sort", function(x, decreasing=FALSE, ...) standardGeneric("sort")) setMethod("sort", signature(x="markovchain"), function(x, decreasing = FALSE) { #get matrix and state names 2 be sorted matr2besorted<-x@transitionMatrix if (x@byrow) states2besorted <- rownames(matr2besorted) else states2besorted <- colnames(matr2besorted) #sorting sort_index<-order(states2besorted,decreasing = decreasing) #reallocating matr_sorted<-matr2besorted[sort_index,sort_index] states_sorted<-states2besorted[sort_index] out<-x out@transitionMatrix<-matr_sorted out@states<-states_sorted return(out) } ) # method to get stationary states #' @name steadyStates #' @title Stationary states of a \code{markovchain} object #' #' @description This method returns the stationary vector in matricial form of a markovchain object. #' @param object A discrete \code{markovchain} object #' #' @return A matrix corresponding to the stationary states #' #' @references A First Course in Probability (8th Edition), Sheldon Ross, Prentice Hall 2010 #' @author Giorgio Spedicato #' @seealso \code{\linkS4class{markovchain}} #' #' @note The steady states are identified starting from which eigenvectors correspond #' to identity eigenvalues and then normalizing them to sum up to unity. When negative values are found #' in the matrix, the eigenvalues extraction is performed on the recurrent classes submatrix. #' #' @examples #' statesNames <- c("a", "b", "c") #' markovB <- new("markovchain", states = statesNames, transitionMatrix = #' matrix(c(0.2, 0.5, 0.3, 0, 1, 0, 0.1, 0.8, 0.1), nrow = 3, #' byrow = TRUE, dimnames=list(statesNames,statesNames)), #' name = "A markovchain Object" #' ) #' steadyStates(markovB) #' #' @rdname steadyStates #' @exportMethod steadyStates setGeneric("steadyStates", function(object) standardGeneric("steadyStates")) markovchain/R/supplementaryPlot.R0000644000176200001440000000541614430746304016662 0ustar liggesusers# plot a diagram using diagram for a markovchain object .plotdiagram <- function(object, ...) { if(is(object,"markovchain")){ mat <- object@transitionMatrix list <- .communicatingClassesRcpp(object) sections <- length(list) colorList <- grDevices::colors() colorList <- sample(colorList,sections) colorvector <- rep("white",length(object@states)) for(i in 1:length(list)){ part <- list[[i]] for(j in 1:length(part)){ colorvector[match(part[j],object@states)] <- colorList[i] } } } else if(is(object,"ctmc")){ mat <- object@generator colorvector <- rep("white",length(object@states)) } if(object@byrow == FALSE) { mat <- t(mat) } if (!requireNamespace("diagram", quietly = TRUE)) { print("The diagram package is not available") } else{ diagram::plotmat(t(mat),relsize = 0.75,box.col = colorvector, ...) } } # plot a diagram using DiagrammeR for a markovchain object .plotDiagrammeR <- function(object, ...) { if(is(object,"markovchain")){ mat <- object@transitionMatrix } else if(is(object,"ctmc")){ mat <- object@generator } names <- rownames(mat) # names of nodes nodes <- '' for(i in 1:nrow(mat)) { nodes <- paste0(nodes, names[i], "; ") } # store edges edges <- '' for(i in 1:nrow(mat)) { for(j in 1:ncol(mat)) { edges <- paste0(edges, names[i], "->", names[j], " [label = ", mat[i,j], "] ") } } # extract extra parameter dots <- list(...) args <- "" for(name in names(dots)) { args <- paste0(args, name, "=\"", dots[[name]], "\" ") } # print(args) if (requireNamespace("DiagrammeR", quietly = TRUE)) { res <- DiagrammeR::grViz(paste0(" digraph circles { graph [overlap = true, fontsize = 10] node [shape = circle, fixedsize = true, width = 0.9] // sets as circles ", nodes, " ", edges, args," // labelfontsize = 20 labelloc='t' label ='Weather transition matrix' } ")) return (res) } else { print("Diagrammer unavailable") } } # How to do plotting? # mcWeather <- new("markovchain", states = c("sunny", "cloudy", "rain"), # transitionMatrix = matrix(data = c(0.70, 0.2, 0.1, # 0.3, 0.4, 0.3, # 0.2, 0.45, 0.35), byrow = T, nrow = 3), # name = "Weather") # mcWeather # .plotdiagram(mcWeather, box.size = 0.06) # .plotDiagrammeR(mcWeather, label ="Weather transition matrix", labelloc = "t") # plot(mcWeather, package = "DiagrammeR", label = "Weather transition matrix")markovchain/NEWS.md0000644000176200001440000001610614503767446013676 0ustar liggesusers--- editor_options: markdown: wrap: 72 title: News --- # News for version 0.9.5 - Downtick R requirements # News for version 0.9.4 - Corrected strange characters # News for version 0.9.3 - Generalized application of requireNamespace(..., quietly = TRUE) - Other fixes to comply to newer CRAN requirements - Move to MIT license # News for version 0.9.2 - Add RcppParallel flags to PKG_LIBS # News for version 0.9.1 ## Current changes - Uptick Matrics requirements and modified Changelogs ## old changes - 2022-09-23 0.9.1 Uptick Matrix reqs - 2022-07-01 0.9.0 Bugfix a state classification error in Rcpp - 2022-05-21 0.8.9 Removal of Matlab package dependency - 2021-05-7 0.8.6 Fix a bug in markovchainListFit that made confusion between lists and data.frames - 2020-12-04 0.8.5-2 Fixing unavailable software issues and language glitches - 2020-09-21 0.8.5-1 Coping with etm unavailability - 2020-05-21 0.8.5 Fixed DoF in verify markov property and supported input in compare teorethical - 2020-05-04 0.8.4.1 2022-09-23 0.9.1 - Uptick Matrix reqs 2022-07-01 0.9.0 - Bugfix a state classification error in Rcpp 2022-05-21 - 0.8.9 Removal of Matlab package dependency - 2021-05-7 0.8.6 Fix a bug in markovchainListFit that made confusion between lists and data.frames - 2020-12-04 0.8.5-2 Fixing unavailable software issues and language glitches - 2020-09-21 0.8.5-1 Coping with etm unavailability - 2020-05-21 0.8.5 Fixed DoF in verify markov property and supported input in compare teorethical - 2020-05-04 0.8.4.1 Fixed presentation - 2020-03-16 0.8.4Limiting output lines in vignettes. - 2020-03-15 0.8.3 Add small changes in code to cope with upcoming R 4.0.0 (stringsAsFactor=TRUE in data.frame). - 2019-12-10 0.8.2 Add small changes in code to cope with upcoming R 4.0.0 (no more check class(x)=='matrix') as well as packages' unavailable. - 2019-08-13 0.7.0 Improves performance and refactors `communicatingClasses`, `recurrentClasses`, `transientStates`, `is.irreducible`, `canonicForm`, `summary` and `steadyStates` methods, porting them to C++ whenever possible and improving the algorithmic complexity of the code. Solves a bug with `steadyStates` method. Adds the methods `recurrentStates` and `transientClasses`. Makes the aforementioned methods work on by column Markov chains. Improves tests, adding checking of mathematical structural properties and hundreds of random test cases. Fixes documentation for `roxygen` and NAMESPACE file for automatic generation using `devtools::document()` - Bumps Ignacio Cordón as author (ORCID included) 2019-07-01 0.6.9.15 Fixed confidence interval calculation: true confidence intervals are now 1-(1-confidence_interval)/2 Various code refactoring - 09-12-2018 0.6.9.14 Added plot from MmgraphR Added meanFirstPassageTime (thanks to Toni Giorgino) Add orcid Add more warning to Statistical Inference Functions - 12-08-2018 0.6.9.12 Improved Rcpp performance - 20-04-2018 0.6.9.9 Fixed typo in vignette MAP method now works also with lists (issue #141) Fix valgrid error - 14-08-2017 0.6.9.8-1 Added is.TimeReversible function added gm_to_markovchain example - 10-07-2017 0.6.9.5 Added empirical bayesian estimate Various additions from GSOC 2017 (see the new vignette) - 31-03-2014 Version 0.6.9 Added sort method Revised numeric tolerance when creating markovchains Added suggestion for which row to fix - 16-03-2017 Version 0.6.8 Deep restructuring of statistical tests - Add parameter confint to markovchainFit Fixed bug in markovchainFitList - Handling of NA - 02-02-2017 Version 0.6.6.2 Add parameter confint to markovchainFit - 27-01-2017 Version 0.6.6.1 Fixing bug in markovchainListFit - 22-01-2017 markovchainFit accepts an uneven list now Added confidence intervals when markovchainFit is given a matrix - 08-12-2016 Added patch to divergence test - 20-08-2016 Fully parallelized bootstrapped markovchain fit - 08-08-2016 Version 0.6 Added multivariate higher order markov chains Better handlign of steady state analysis on non - recurrent Markov Chains Fixed an error in the igraph conversion - 08-07-2016 Fixed C++ 11 variables types - 24-06-2016 Version 0.4.5 Speeding up rmarkovchain using parallel and RcppParallel library. - 14-06-2016 Version 0.4.4.4 Bug fixed for markovchainFit when method = bootstrap - 09-06-2016 Version 0.4.4.2 added sanitize=false paramter to markovchainFit - 31-05-2016 Version 0.4.4.1 Improvement of the internal method checkSequence. name method to set and get the names of markovchain object. - 10-05-2016 Version 0.4.4 rmarkovchain in RCpp (thanks to Deepak and GSOC 2016) Various small fixes - 05-03-2016 Version 0.4.3.1 fixed a bug in the states classification added options to save output of random sampler in a matrix - 10-10-2015 Version 0.4.3 fixed an error in plot function - 08-07-2015 Version 0.3.1 Period to Rcpp (thanks to TAE) communicatingClasses and recurrentClasses (thanks to TAE) Various optimization (thanks to TAE) Initial support for Continuous Time Markov Chains (thanks to SAI) Added new methods: names, != - 15-06-2015 Version 0.3 Added a CrashIntro vignette Most probability function rewritten in Rcpp Added standard errors and confidence intervals for MLE (thanks to Tae) Added confidence intervals for bootstap (thanks to Tae) Added bayesian Maximum A Posteriori estimation (thanks to Sai) - 12-05-2015 Version 0.2.1 Fixed a compatibility issue with R 3 development - 12-04-2015 Version 0.2 This is a milestone for markovchain package, since the package project has been selected within the funded GSOC 2015 projects. Thanks to Tae support now the fitting functions have been rewritten in Rcpp. - 20-03-2015 Version 0.1.3 Fastened the firstpassage time code thanks to Thoralf suggestion - 01-03-2015 Version 0.1.2 Add GitHub project url - 17-02-2015 Version 0.1.1 Fasten markovchain sequence thanks to Mildenberger Thoralf suggestion - 04-01-2015 Version 0.1.0 It is now possible to fit a markovchain and a markovchainList object from a matrix or data.frame Updated vignettes Added tests - 21-06-2014 Version 0.0.9.5 Updated vignettes Added a method to convert a square matrix into a markovchain object. - 20-04-2014 Version 0.0.9 Updated vignette Added parallel processing for bootstrap estimation - 09-02-2014 Version 0.0.8 Minor vignette enhancements Added function to find period of a DTMC - 12-01-2014 Version 0.0.7 Deeply improved vignettes Added predict and summary methods Added function to perform probabilistic analysis - 31-12-2013 Version 0.0.5 Improved vignettes Add predict methods Add methods for transitory states - 04-11-2013 Version 0.0.3 Added various method to easily handle markovchain and markovchainList objects Implemented rmarkovchain and bootstrap fit Improved vignettes markovchain/MD50000644000176200001440000001736714503777503013115 0ustar liggesusers15ee98611ae9e985902f89cbc9192e22 *DESCRIPTION 33f1d4b9f6d468bd807409e38234e3bb *LICENSE 3ab103442e49de45ffeee91e4f7294bf *NAMESPACE 138f116ae7e88d753ed93c61a749e3c1 *NEWS.md 49cefb5f5f768b88c6093cb5e1c2f65d *R/RcppExports.R fe2fdb80210ff2e8b32ffccd5ea6c35a *R/classesAndMethods.R de2bd558f0b4120acaaf8872ca2299f2 *R/ctmcClassesAndMethods.R 50993ff119180ba8b0d883f6e68e26fe *R/ctmcProbabilistic.R 8c7ce890d00d92ce802b378a2bcf1dfd *R/data.R a99314ea622a69a8e36076b40bc5fbdd *R/fitHigherOrder.R 0be873b92b60f1099954ecdd2345a910 *R/fittingFunctions.R c0b6439b5ec5882b173a83de5013c43d *R/hommc.R 17e37e252d07bf19e4dc49d1f943dcff *R/markovchain.R 2fa8aca8212bfacdd1524bd97f3c4e5c *R/matlab_package_functions.R c173f5676e42f00709b9423416bfd27e *R/probabilistic.R 8c90c159a9489b0f5dfb8ce08e0495e8 *R/random.R 41fc8f8e9367fd173a69c2f7feebfc3a *R/statisticalTests.R 993a960ae22cf15eee4f42b12007acf1 *R/supplementaryPlot.R 71cc1f47e4b59910f39bbcb0a1e709c5 *R/sysdata.rda 39b735feac8bc2ead976b8f9d61db649 *R/utils.R db74c50b2544eddf346897e1a7a1e11d *R/zzz.R e2c9fc2f3dc6fc68efe5c992e9319d4b *README.md 389dbe007e10786df3ddbe3d178a4ce6 *build/vignette.rds d2d075fc1631577b5359c009938dcd4f *data/blanden.rda 49ddc41c807cd47ec741886d64bb4092 *data/craigsendi.rda e3cf899f6740cc572706346b65d897cb *data/holson.rda 5c851fdfe55c853a04d5ce47f838de1c *data/kullback.rda c7dd06b768a721bc87294b3cf224351c *data/preproglucacon.rda 239b0734b23f5ffcafc6ffea5b247b0d *data/rain.rda 3c13c7c1dbfd0c7c5d8fe7f364568617 *data/sales.rda 0977e3c219235473f9964d1e60ade165 *data/tm_abs.rda 7062e44a453478115862d4383d21b1aa *demo/00Index 341e2275587b8537b7a3817aaf3588a4 *demo/bard.R 1700f0b961dc53993a9686ce375b2f06 *demo/computationTime.R dbf49ba2e9ce80d4d1f034650764a231 *demo/examples.R fb3046e24980a921d79cba3c3ef6f3b0 *demo/extractMatrices.R 89142f6fd07792388e74fd69002dffeb *demo/mathematica.R 837a7db4def223b7ba17ba93f52039f5 *demo/quasiBayesian.R 1dc619850b52023f072249b66f191b92 *demo/quickStart.R 9b61f7557c1e6c863affd1bc5f07480e *demo/reliability.R 8acad8740c32beedb885f1fae3c13754 *inst/CITATION ce99a241d8b349feecd612fe26227070 *inst/doc/an_introduction_to_markovchain_package.R b64520934fdc7526efd9535a038b5b18 *inst/doc/an_introduction_to_markovchain_package.Rmd 355d1e958f8f67cc5909ed71668b1a11 *inst/doc/an_introduction_to_markovchain_package.pdf ffc3c9338daea8be6d0d8357f77aa6aa *inst/doc/gsoc_2017_additions.R fc0fa8e32ad53619b12cb4eadcfe8cee *inst/doc/gsoc_2017_additions.Rmd f099d859bd6323cd739613f78ff7b57d *inst/doc/gsoc_2017_additions.pdf 1c94018519979c0ebbf844c0ba0c3cc6 *inst/doc/higher_order_markov_chains.R 48aee4ae30025ac1fcd6e9274d91e03f *inst/doc/higher_order_markov_chains.Rmd 739db0bf783820bc652402f87354aad6 *inst/doc/higher_order_markov_chains.pdf 730fbb92cced04056c80f883f3566252 *inst/extdata/ltdItaData.txt 75bf84bd1d2c1ad2c18b97199443d27f *man/ExpectedTime.Rd 3a7587d2c12ba2a1652ad7e38cc69443 *man/HigherOrderMarkovChain-class.Rd 2207974462e269879736e2aa4f45f3bf *man/absorptionProbabilities.Rd 984657ac88a51f09cd8013b2ad5ccf81 *man/blanden.Rd 515e8fd8bfffcfe1cf5349200a7682a6 *man/committorAB.Rd 6a4eb256b4f993212a37f877712edf59 *man/conditionalDistribution.Rd cd4830df7e3f9ab722d4edb724aa9c1e *man/craigsendi.Rd 84d44f1e1dae8840ee345b2368ebede6 *man/ctmc-class.Rd 7b1edcd40aab0fa00c37a2b8b0a32588 *man/ctmcFit.Rd 1a601c7098ade1f736a926f7e33bb90b *man/expectedRewards.Rd 13ef966850f4dae14aaa7221b1181720 *man/expectedRewardsBeforeHittingA.Rd aff4c6974caafc84c7d410d6d706ad62 *man/firstPassage.Rd bbaac4bed94a47e88deacbf976b86e9e *man/firstPassageMultiple.Rd d2c9c7016cb65119ae51207965beac19 *man/fitHighOrderMultivarMC.Rd 24b9ff5bd194750b8bc5da4f66775cd7 *man/fitHigherOrder.Rd 11f3ca6a0117afde424469092c385e59 *man/freq2Generator.Rd dad43a04be77e9a9b7ae1d3a9fb7a495 *man/generatorToTransitionMatrix.Rd c9840a546f9357451f76ca09b23a0bd8 *man/getName.Rd cf61b91702f4f73d44e9fd5608ca4d65 *man/hittingProbabilities.Rd add6fde771cc5bf6c02827ea3536fc96 *man/holson.Rd a46092594d426a3bedb4bd1782da2da2 *man/hommc-class.Rd 297096be86b88ebeaabd6ee7ed0a6486 *man/hommc-show.Rd 10dbf12cf0346f0054d567cd09fb65ff *man/ictmc-class.Rd 592eb7fba6781dfdcee2d2dff82df000 *man/impreciseProbabilityatT.Rd eb6b353a91450ff5674cb25090d46d70 *man/inferHyperparam.Rd db14356077967b114596ac92021a8a20 *man/is.CTMCirreducible.Rd 61cd70542034adea9588515a291dc19c *man/is.TimeReversible.Rd 4dceef72c917d3e66e97adfd880435c9 *man/is.accessible.Rd 93a3d28c78d94b800997dc44077bd480 *man/is.irreducible.Rd df3496a2d6e96dd76409386c6d3f40f0 *man/is.regular.Rd 44c9b0250b8fd6a2e46bcd0a8545f59b *man/kullback.Rd f1ac77afa05488dc787ef87ce5a9074e *man/markovchain-class.Rd 176b4ddd04fe67ee7df1e4f2afba5940 *man/markovchain.Rd 550130443105af8219ecc8fabbb62811 *man/markovchainFit.Rd c3c1420c4aec6d3ee30ff46f68756329 *man/markovchainList-class.Rd 16c1cbb7705b17ebb518f1106583dfa2 *man/markovchainListFit.Rd 9be58105d48ce7cbefa8ad420a47f2c0 *man/markovchainSequence.Rd abbaca484a9abb47564201cf4dd69834 *man/meanAbsorptionTime.Rd a26bc397e9698f97f458b3af618a88b3 *man/meanFirstPassageTime.Rd 3f11047d088c7d3e5848c6dca4c02493 *man/meanNumVisits.Rd a51431e1948ef165709ff5329b469948 *man/meanRecurrenceTime.Rd 2f53a77e485cefec6a31883806563a7e *man/multinomialConfidenceIntervals.Rd 5fa42ddb7efca956e8e6c354bced7302 *man/names.Rd c7d4ada0e1a238d53ec005b32d93515f *man/noofVisitsDist.Rd 3e2482fe20053c54b008d4a4393ce5c1 *man/ones.Rd f371b228dbd222f85535f1d4c9219b43 *man/predictHommc.Rd fc9bb6a67bee60dda7b4dad14bf7f3ca *man/predictiveDistribution.Rd 29a6c0777531665ad64fc774c6e87e25 *man/preproglucacon.Rd d1755ab27b558f63b9e740af441f7777 *man/priorDistribution.Rd 80fbd5430cc4b8d68d031b1424b62a75 *man/probabilityatT.Rd 4817c2004dc1ac01c19cc8f9a3212b45 *man/rain.Rd 9d4ee477a6139b177ed5d218b12e7fb3 *man/rctmc.Rd 954bc6c4ee8923a7ecec4928f121e9c1 *man/rmarkovchain.Rd 0c8479de816d083d212b802a03c6e802 *man/sales.Rd 4ba99b5946a10b828a943aeedf40e50a *man/setName.Rd be0b6000cb92dc3757c227737ebcef4c *man/states.Rd a28a8b97f9354cb3bf08733359978209 *man/statisticalTests.Rd 3825bbb52da090aae367cac4de6f13f0 *man/steadyStates.Rd 84702a1a7cdd752c4d65bdd9b2abb26c *man/structuralAnalysis.Rd cc0565a34593f573e482e92783e6cd27 *man/tm_abs.Rd b60ba3fe4b6b18d4e99878b7b259562a *man/transition2Generator.Rd 19d5e66645633e19a90cd83e3049b0c9 *man/transitionProbability.Rd 85575509f03f4a18367e077392a9572a *man/zeros.Rd 7a3aeb1aa851dfb26ed4b62a9db06cec *src/Makevars 46afad93db0f8369499180b529bbac9a *src/Makevars.win 4c8bf0a3e340b7aa84647f337918e08b *src/RcppExports.cpp 4b77a04f31755a0fc89baed5d50c8aca *src/classesAndMethods.cpp e28005d65e2b51dac4973cb40de229b5 *src/ctmcClassesAndMethods.cpp 100a776c9e36949f4bc77fa3467ba1b6 *src/ctmcFittingFunctions.cpp 02ce3f3715895ea5a19302312ea0eabf *src/ctmcProbabilistic.cpp d99e875a963f6fbaf1734b3470ca67fb *src/fitHigherOrder.cpp 54d343f821d5ba9f3ec7dd600d4cc830 *src/fittingFunctions.cpp 8c9f0905b63553383414ccbbf6dfbea5 *src/helpers.h 723761dbfb6fbf1cad9db40ef8dcfcf7 *src/mapFitFunctions.h 671ba94f3c19dc66524665fd56d2b82c *src/multinomCI.cpp 693f21d800e439415365d7d75ca318a5 *src/probabilistic.cpp 8118d2d4fb74432e8cee34693663e394 *src/utils.cpp 9ca86bdb9770b1d4f09faaccd4b207c0 *tests/testthat.R 8702f384348e3b3a74de2ba9d8bd5f46 *tests/testthat/testBasic1.R 95866d62c44f628264aac082f16f8562 *tests/testthat/testFits.R c6b1cc046d56e93de12430fa159cca00 *tests/testthat/testMultinomCI.R 76426c64e1f40feec3af8d9efe2b77fc *tests/testthat/testPeriod.R 9fda6f42d517e727c4a080c8a1fe8baf *tests/testthat/testStatesClassification.R c84a4418ecfd4c94fb810586787b3d40 *tests/testthat/testctmc.R b64520934fdc7526efd9535a038b5b18 *vignettes/an_introduction_to_markovchain_package.Rmd fc0fa8e32ad53619b12cb4eadcfe8cee *vignettes/gsoc_2017_additions.Rmd 48aee4ae30025ac1fcd6e9274d91e03f *vignettes/higher_order_markov_chains.Rmd c19f024e4ed601bd7796030986ef807b *vignettes/markovchainBiblio.bib 057d08248e65d85ee90839f9c2e6ba4f *vignettes/template.tex markovchain/inst/0000755000176200001440000000000013762012756013542 5ustar liggesusersmarkovchain/inst/doc/0000755000176200001440000000000014503773052014304 5ustar liggesusersmarkovchain/inst/doc/higher_order_markov_chains.Rmd0000644000176200001440000003060214472736544022330 0ustar liggesusers--- title: plain: "Higher, possibly multivariate, Order Markov Chains in markovchain package" formatted: "Higher, possibly multivariate, Order Markov Chains in \\pkg{markovchain} package" short: "Higher order (multivariate) Markov chains" pagetitle: "Higher order (multivariate) Markov chains" author: - name: "Deepak Yadav" affiliation: B-Tech student, Computer Science and Engineering address: > Indian Institute of Technology, Varanasi Uttar Pradesh - 221 005, India email: \email{deepakyadav.iitbhu@gmail.com} - name: "Tae Seung Kang" affiliation: Ph.D student, Computer \& Information Science \& Engineering address: > University of Florida Gainesville, FL, USA email: \email{tskang3@gmail.com} - name: "Giorgio Alfredo Spedicato" affiliation: Ph.D C.Stat ACAS, UnipolSai R\&D address: > Via Firenze 11 Paderno Dugnano 20037 Italy email: \email{spedicato\_giorgio@yahoo.it} preamble: > \author{Deepak Yadav, Tae Seung Kang, Giorgio Alfredo Spedicato} \usepackage{graphicx} \usepackage{amsmath} \usepackage{tabularx} \usepackage{longtable} \usepackage{booktabs} \setkeys{Gin}{width=0.8\textwidth} abstract: | The \pkg{markovchain} package contains functions to fit higher (possibly) multivariate order Markov chains. The functions are shown as well as simple exmaples output: if (rmarkdown::pandoc_version() < "2.2") function(...) { rmarkdown::pdf_document(template = "./template.tex", ...) } else function(...) { bookdown::pdf_book(base_format = rticles::jss_article, ...) } vignette: > %\VignetteIndexEntry{Higher order markov chains} %\VignetteEngine{knitr::rmarkdown} %VignetteEncoding{UTF-8} keywords: plain: [Higher order Markov chains] formatted: [Higher order Markov chains] documentclass: jss classoption: nojss bibliography: markovchainBiblio.bib pkgdown: as_is: true extension: pdf --- ```{r global_options, include=FALSE} knitr::opts_chunk$set(fig.width=8.5, fig.height=6, out.width = "70%") set.seed(123) ``` # Higher Order Markov Chains Continuous time Markov chains are discussed in the CTMC vignette which is a part of the package. An experimental `fitHigherOrder` function has been written in order to fit a higher order Markov chain ([@ching2013higher, @ching2008higher]). `fitHigherOrder` takes two inputs 1. sequence: a categorical data sequence. 2. order: order of Markov chain to fit with default value 2. The output will be a `list` which consists of 1. lambda: model parameter(s). 2. Q: a list of transition matrices. $Q_i$ is the $ith$ step transition matrix stored column-wise. 3. X: frequency probability vector of the given sequence. Its quadratic programming problem is solved using `solnp` function of the Rsolnp package [@pkg:Rsolnp]. ```{r load, results='hide', warning=FALSE, message=FALSE} require(markovchain) ``` ```{r higherOrder} if (requireNamespace("Rsolnp", quietly = TRUE)) { library(Rsolnp) data(rain) fitHigherOrder(rain$rain, 2) fitHigherOrder(rain$rain, 3) } ``` # Higher Order Multivariate Markov Chains ## Introduction HOMMC model is used for modeling behaviour of multiple categorical sequences generated by similar sources. The main reference is [@ching2008higher]. Assume that there are s categorical sequences and each has possible states in M. In nth order MMC the state probability distribution of the jth sequence at time $t = r + 1$ depend on the state probability distribution of all the sequences (including itself) at times $t = r, r - 1, ..., r - n + 1$. \[ x_{r+1}^{(j)} = \sum_{k=1}^{s}\sum_{h=1}^{n}\lambda_{jk}^{(h)}P_{h}^{(jk)}x_{r-h+1}^{(k)}, j = 1, 2, ..., s, r = n-1, n, ... \] with initial distribution $x_{0}^{(k)}, x_{1}^{(k)}, ... , x_{n-1}^{(k)} (k = 1, 2, ... , s)$. Here \[ \lambda _{jk}^{(h)} \geq 0, 1\leq j, k\leq s, 1\leq h\leq n \enspace and \enspace \sum_{k=1}^{s}\sum_{h=1}^{n} \lambda_{jk}^{(h)} = 1, j = 1, 2, 3, ... , s. \] Now we will see the simpler representation of the model which will help us understand the result of `fitHighOrderMultivarMC` method. \vspace{5mm} Let $X_{r}^{(j)} = ((x_{r}^{(j)})^{T}, (x_{r-1}^{(j)})^{T}, ..., (x_{r-n+1}^{(j)})^{T})^{T} for \enspace j = 1, 2, 3, ... , s.$ Then \vspace{5mm} \[ \begin{pmatrix} X_{r+1}^{(1)}\\ X_{r+1}^{(2)}\\ .\\ .\\ .\\ X_{r+1}^{(s)} \end{pmatrix} = \begin{pmatrix} B^{11}& B^{12}& .& .& B^{1s}& \\ B^{21}& B^{22}& .& .& B^{2s}& \\ .& .& .& .& .& \\ .& .& .& .& .& \\ .& .& .& .& .& \\ B^{s1}& B^{s2}& .& .& B^{ss}& \\ \end{pmatrix} \begin{pmatrix} X_{r}^{(1)}\\ X_{r}^{(2)}\\ .\\ .\\ .\\ X_{r}^{(s)} \end{pmatrix} \textrm{where} \] \[B^{ii} = \begin{pmatrix} \lambda _{ii}^{(1)}P_{1}^{(ii)}& \lambda _{ii}^{(2)}P_{2}^{(ii)}& .& .& \lambda _{ii}^{(n)}P_{n}^{(ii)}& \\ I& 0& .& .& 0& \\ 0& I& .& .& 0& \\ .& .& .& .& .& \\ .& .& .& .& .& \\ 0& .& .& I& 0& \end{pmatrix}_{mn*mn} \textrm{and} \] \vspace{5mm} \[ B^{ij} = \begin{pmatrix} \lambda _{ij}^{(1)}P_{1}^{(ij)}& \lambda _{ij}^{(2)}P_{2}^{(ij)}& .& .& \lambda _{ij}^{(n)}P_{n}^{(ij)}& \\ 0& 0& .& .& 0& \\ 0& 0& .& .& 0& \\ .& .& .& .& .& \\ .& .& .& .& .& \\ 0& .& .& 0& 0& \end{pmatrix}_{mn*mn} \textrm{when } i\neq j. \] \vspace{5mm} ## Representation of parameters in the code $P_{h}^{(ij)}$ is represented as $Ph(i,j)$ and $\lambda _{ij}^{(h)}$ as Lambdah(i,j). For example: $P_{2}^{(13)}$ as $P2(1,3)$ and $\lambda _{45}^{(3)}$ as Lambda3(4,5). ## Definition of HOMMC class ```{r hommcObject} showClass("hommc") ``` Any element of `hommc` class is comprised by following slots: 1. states: a character vector, listing the states for which transition probabilities are defined. 2. byrow: a logical element, indicating whether transition probabilities are shown by row or by column. 3. order: order of Multivariate Markov chain. 4. P: an array of all transition matrices. 5. Lambda: a vector to store the weightage of each transition matrix. 6. name: optional character element to name the HOMMC ## How to create an object of class HOMMC ```{r hommcCreate} states <- c('a', 'b') P <- array(dim = c(2, 2, 4), dimnames = list(states, states)) P[ , , 1] <- matrix(c(1/3, 2/3, 1, 0), byrow = FALSE, nrow = 2, ncol = 2) P[ , , 2] <- matrix(c(0, 1, 1, 0), byrow = FALSE, nrow = 2, ncol = 2) P[ , , 3] <- matrix(c(2/3, 1/3, 0, 1), byrow = FALSE, nrow = 2, ncol = 2) P[ , , 4] <- matrix(c(1/2, 1/2, 1/2, 1/2), byrow = FALSE, nrow = 2, ncol = 2) Lambda <- c(.8, .2, .3, .7) hob <- new("hommc", order = 1, Lambda = Lambda, P = P, states = states, byrow = FALSE, name = "FOMMC") hob ``` ## Fit HOMMC `fitHighOrderMultivarMC` method is available to fit HOMMC. Below are the 3 parameters of this method. 1. seqMat: a character matrix or a data frame, each column represents a categorical sequence. 2. order: order of Multivariate Markov chain. Default is 2. 3. Norm: Norm to be used. Default is 2. # A Marketing Example We tried to replicate the example found in [@ching2008higher] for an application of HOMMC. A soft-drink company in Hong Kong is facing an in-house problem of production planning and inventory control. A pressing issue is the storage space of its central warehouse, which often finds itself in the state of overflow or near capacity. The company is thus in urgent needs to study the interplay between the storage space requirement and the overall growing sales demand. The product can be classified into six possible states (1, 2, 3, 4, 5, 6) according to their sales volumes. All products are labeled as 1 = no sales volume, 2 = very slow-moving (very low sales volume), 3 = slow-moving, 4 = standard, 5 = fast-moving or 6 = very fast-moving (very high sales volume). Such labels are useful from both marketing and production planning points of view. The data is cointaind in `sales` object. ```{r hommsales} data(sales) head(sales) ``` The company would also like to predict sales demand for an important customer in order to minimize its inventory build-up. More importantly, the company can understand the sales pattern of this customer and then develop a marketing strategy to deal with this customer. Customer's sales demand sequences of five important products of the company for a year. We expect sales demand sequences generated by the same customer to be correlated to each other. Therefore by exploring these relationships, one can obtain a better higher-order multivariate Markov model for such demand sequences, hence obtain better prediction rules. In [@ching2008higher] application, they choose the order arbitrarily to be eight, i.e., n = 8. We first estimate all the transition probability matrices $P_{h}^{ij}$ and we also have the estimates of the stationary probability distributions of the five products:. $\widehat{\boldsymbol{x}}^{(1)} = \begin{pmatrix} 0.0818& 0.4052& 0.0483& 0.0335& 0.0037& 0.4275 \end{pmatrix}^{\boldsymbol{T}}$ $\widehat{\boldsymbol{x}}^{(2)} = \begin{pmatrix} 0.3680& 0.1970& 0.0335& 0.0000& 0.0037& 0.3978 \end{pmatrix}^{\boldsymbol{T}}$ $\widehat{\boldsymbol{x}}^{(3)} = \begin{pmatrix} 0.1450& 0.2045& 0.0186& 0.0000& 0.0037& 0.6283 \end{pmatrix}^{\boldsymbol{T}}$ $\widehat{\boldsymbol{x}}^{(4)} = \begin{pmatrix} 0.0000& 0.3569& 0.1338& 0.1896& 0.0632& 0.2565 \end{pmatrix}^{\boldsymbol{T}}$ $\widehat{\boldsymbol{x}}^{(5)} = \begin{pmatrix} 0.0000& 0.3569& 0.1227& 0.2268& 0.0520& 0.2416 \end{pmatrix}^{\boldsymbol{T}}$ By solving the corresponding linear programming problems, we obtain the following higher-order multivariate Markov chain model: \vspace{3mm} $\boldsymbol{x}_{r+1}^{(1)} = \boldsymbol{P}_{1}^{(12)}\boldsymbol{x}_{r}^{(2)}$ $\boldsymbol{x}_{r+1}^{(2)} = 0.6364\boldsymbol{P}_{1}^{(22)}\boldsymbol{x}_{r}^{(2)} + 0.3636\boldsymbol{P}_{3}^{(22)}\boldsymbol{x}_{r}^{(2)}$ $\boldsymbol{x}_{r+1}^{(3)} = \boldsymbol{P}_{1}^{(35)}\boldsymbol{x}_{r}^{(5)}$ $\boldsymbol{x}_{r+1}^{(4)} = 0.2994\boldsymbol{P}_{8}^{(42)}\boldsymbol{x}_{r}^{(2)} + 0.4324\boldsymbol{P}_{1}^{(45)}\boldsymbol{x}_{r}^{(5)} + 0.2681\boldsymbol{P}_{2}^{(45)}\boldsymbol{x}_{r}^{(5)}$ $\boldsymbol{x}_{r+1}^{(5)} = 0.2718\boldsymbol{P}_{8}^{(52)}\boldsymbol{x}_{r}^{(2)} + 0.6738\boldsymbol{P}_{1}^{(54)}\boldsymbol{x}_{r}^{(4)} + 0.0544\boldsymbol{P}_{2}^{(55)}\boldsymbol{x}_{r}^{(5)}$ \vspace{3mm} According to the constructed 8th order multivariate Markov model, Products A and B are closely related. In particular, the sales demand of Product A depends strongly on Product B. The main reason is that the chemical nature of Products A and B is the same, but they have different packaging for marketing purposes. Moreover, Products B, C, D and E are closely related. Similarly, products C and E have the same product flavor, but different packaging. In this model, it is interesting to note that both Product D and E quite depend on Product B at order of 8, this relationship is hardly to be obtained in conventional Markov model owing to huge amount of parameters. The results show that higher-order multivariate Markov model is quite significant to analyze the relationship of sales demand. ```{r hommcFit, warning = FALSE, message = FALSE} # fit 8th order multivariate markov chain if (requireNamespace("Rsolnp", quietly = TRUE)) { object <- fitHighOrderMultivarMC(sales, order = 8, Norm = 2) } ``` We choose to show only results shown in the paper. We see that $\lambda$ values are quite close, but not equal, to those shown in the original paper. ```{r result, echo = FALSE} if (requireNamespace("Rsolnp", quietly = TRUE)) { i <- c(1, 2, 2, 3, 4, 4, 4, 5, 5, 5) j <- c(2, 2, 2, 5, 2, 5, 5, 2, 4, 5) k <- c(1, 1, 3, 1, 8, 1, 2, 8, 1, 2) if(object@byrow == TRUE) { direction <- "(by rows)" } else { direction <- "(by cols)" } cat("Order of multivariate markov chain =", object@order, "\n") cat("states =", object@states, "\n") cat("\n") cat("List of Lambda's and the corresponding transition matrix", direction,":\n") for(p in 1:10) { t <- 8*5*(i[p]-1) + (j[p]-1)*8 cat("Lambda", k[p], "(", i[p], ",", j[p], ") : ", object@Lambda[t+k[p]],"\n", sep = "") cat("P", k[p], "(", i[p], ",", j[p], ") : \n", sep = "") print(object@P[, , t+k[p]]) cat("\n") } } else { print("package Rsolnp unavailable") } ``` # References markovchain/inst/doc/higher_order_markov_chains.R0000644000176200001440000000462314503773026022002 0ustar liggesusers## ----global_options, include=FALSE-------------------------------------------- knitr::opts_chunk$set(fig.width=8.5, fig.height=6, out.width = "70%") set.seed(123) ## ----load, results='hide', warning=FALSE, message=FALSE----------------------- require(markovchain) ## ----higherOrder-------------------------------------------------------------- if (requireNamespace("Rsolnp", quietly = TRUE)) { library(Rsolnp) data(rain) fitHigherOrder(rain$rain, 2) fitHigherOrder(rain$rain, 3) } ## ----hommcObject-------------------------------------------------------------- showClass("hommc") ## ----hommcCreate-------------------------------------------------------------- states <- c('a', 'b') P <- array(dim = c(2, 2, 4), dimnames = list(states, states)) P[ , , 1] <- matrix(c(1/3, 2/3, 1, 0), byrow = FALSE, nrow = 2, ncol = 2) P[ , , 2] <- matrix(c(0, 1, 1, 0), byrow = FALSE, nrow = 2, ncol = 2) P[ , , 3] <- matrix(c(2/3, 1/3, 0, 1), byrow = FALSE, nrow = 2, ncol = 2) P[ , , 4] <- matrix(c(1/2, 1/2, 1/2, 1/2), byrow = FALSE, nrow = 2, ncol = 2) Lambda <- c(.8, .2, .3, .7) hob <- new("hommc", order = 1, Lambda = Lambda, P = P, states = states, byrow = FALSE, name = "FOMMC") hob ## ----hommsales---------------------------------------------------------------- data(sales) head(sales) ## ----hommcFit, warning = FALSE, message = FALSE------------------------------- # fit 8th order multivariate markov chain if (requireNamespace("Rsolnp", quietly = TRUE)) { object <- fitHighOrderMultivarMC(sales, order = 8, Norm = 2) } ## ----result, echo = FALSE----------------------------------------------------- if (requireNamespace("Rsolnp", quietly = TRUE)) { i <- c(1, 2, 2, 3, 4, 4, 4, 5, 5, 5) j <- c(2, 2, 2, 5, 2, 5, 5, 2, 4, 5) k <- c(1, 1, 3, 1, 8, 1, 2, 8, 1, 2) if(object@byrow == TRUE) { direction <- "(by rows)" } else { direction <- "(by cols)" } cat("Order of multivariate markov chain =", object@order, "\n") cat("states =", object@states, "\n") cat("\n") cat("List of Lambda's and the corresponding transition matrix", direction,":\n") for(p in 1:10) { t <- 8*5*(i[p]-1) + (j[p]-1)*8 cat("Lambda", k[p], "(", i[p], ",", j[p], ") : ", object@Lambda[t+k[p]],"\n", sep = "") cat("P", k[p], "(", i[p], ",", j[p], ") : \n", sep = "") print(object@P[, , t+k[p]]) cat("\n") } } else { print("package Rsolnp unavailable") } markovchain/inst/doc/gsoc_2017_additions.R0000644000176200001440000001230514503772710020072 0ustar liggesusers## ----message = FALSE---------------------------------------------------------- library(markovchain) states <- c("a","b","c","d") byRow <- TRUE gen <- matrix(data = c(-1, 1/2, 1/2, 0, 1/4, -1/2, 0, 1/4, 1/6, 0, -1/3, 1/6, 0, 0, 0, 0), nrow = 4,byrow = byRow, dimnames = list(states,states)) ctmc <- new("ctmc",states = states, byrow = byRow, generator = gen, name = "testctmc") ## ----------------------------------------------------------------------------- ExpectedTime(ctmc,1,4) ## ----------------------------------------------------------------------------- probabilityatT(ctmc,1) ## ----------------------------------------------------------------------------- probabilityatT(ctmc,1,1) ## ----------------------------------------------------------------------------- energyStates <- c("sigma", "sigma_star") byRow <- TRUE gen <- matrix(data = c(-3, 3, 1, -1), nrow = 2, byrow = byRow, dimnames = list(energyStates, energyStates)) molecularCTMC <- new("ctmc", states = energyStates, byrow = byRow, generator = gen, name = "Molecular Transition Model") ## ----------------------------------------------------------------------------- plot(molecularCTMC) ## ----------------------------------------------------------------------------- if(requireNamespace(package='ctmcd', quietly = TRUE)) { plot(molecularCTMC,package = "diagram") } else { print("diagram package unavailable") } ## ----------------------------------------------------------------------------- states <- c("n","y") Q <- matrix(c(-1,1,1,-1),nrow = 2,byrow = TRUE,dimnames = list(states,states)) range <- matrix(c(1/52,3/52,1/2,2),nrow = 2,byrow = 2) name <- "testictmc" ictmc <- new("ictmc",states = states,Q = Q,range = range,name = name) impreciseProbabilityatT(ictmc,2,0,1,10^-3,TRUE) ## ----------------------------------------------------------------------------- if(requireNamespace(package='ctmcd', quietly = TRUE)) { sample <- matrix(c(150,2,1,1,1,200,2,1,2,1,175,1,1,1,1,150),nrow = 4,byrow = TRUE) sample_rel = rbind((sample/rowSums(sample))[1:dim(sample)[1]-1,],c(rep(0,dim(sample)[1]-1),1)) freq2Generator(sample_rel,1) } else { print('ctmcd unavailable') } ## ----eval=FALSE--------------------------------------------------------------- # transMatr <- matrix(c(0,0,0,1,0.5,0.5,0,0,0,0,0.5,0,0,0,0,0,0.2,0.4,0,0,0,0.8,0.6,0,0.5),nrow = 5) # object <- new("markovchain", states=c("a","b","c","d","e"),transitionMatrix=transMatr, name="simpleMc") # committorAB(object,c(5),c(3)) ## ----------------------------------------------------------------------------- statesNames <- c("a", "b", "c") testmarkov <- new("markovchain", states = statesNames, transitionMatrix = matrix(c(0.2, 0.5, 0.3, 0.5, 0.1, 0.4, 0.1, 0.8, 0.1), nrow = 3, byrow = TRUE, dimnames = list(statesNames, statesNames) )) ## ----------------------------------------------------------------------------- firstPassageMultiple(testmarkov,"a",c("b","c"),4) ## ----------------------------------------------------------------------------- transMatr<-matrix(c(0.4,0.6,.3,.7),nrow=2,byrow=TRUE) simpleMc<-new("markovchain", states=c("a","b"), transitionMatrix=transMatr, name="simpleMc") noofVisitsDist(simpleMc,5,"a") ## ----------------------------------------------------------------------------- transMatr<-matrix(c(0.99,0.01,0.01,0.99),nrow=2,byrow=TRUE) simpleMc<-new("markovchain", states=c("a","b"), transitionMatrix=transMatr) expectedRewards(simpleMc,1,c(0,1)) ## ----------------------------------------------------------------------------- energyStates <- c("sigma", "sigma_star") byRow <- TRUE gen <- matrix(data = c(-3, 3, 1, -1), nrow = 2, byrow = byRow, dimnames = list(energyStates, energyStates)) molecularCTMC <- new("ctmc", states = energyStates, byrow = byRow, generator = gen, name = "Molecular Transition Model") is.CTMCirreducible(molecularCTMC) ## ----------------------------------------------------------------------------- if (requireNamespace("Rsolnp", quietly = TRUE)) { statesName <- c("a", "b") P <- array(0, dim = c(2, 2, 4), dimnames = list(statesName, statesName)) P[,,1] <- matrix(c(0, 1, 1/3, 2/3), byrow = FALSE, nrow = 2) P[,,2] <- matrix(c(1/4, 3/4, 0, 1), byrow = FALSE, nrow = 2) P[,,3] <- matrix(c(1, 0, 1/3, 2/3), byrow = FALSE, nrow = 2) P[,,4] <- matrix(c(3/4, 1/4, 0, 1), byrow = FALSE, nrow = 2) Lambda <- c(0.8, 0.2, 0.3, 0.7) ob <- new("hommc", order = 1, states = statesName, P = P, Lambda = Lambda, byrow = FALSE, name = "FOMMC") predictHommc(ob,3) } else { print("Rsolnp unavailable") } ## ----------------------------------------------------------------------------- energyStates <- c("sigma", "sigma_star") byRow <- TRUE gen <- matrix(data = c(-3, 3, 1, -1), nrow = 2, byrow = byRow, dimnames = list(energyStates, energyStates)) molecularCTMC <- new("ctmc", states = energyStates, byrow = byRow, generator = gen, name = "Molecular Transition Model") is.TimeReversible(molecularCTMC) markovchain/inst/doc/higher_order_markov_chains.pdf0000644000176200001440000022566614503773062022366 0ustar liggesusers%PDF-1.5 % 1 0 obj << /Type /ObjStm /Length 4549 /Filter /FlateDecode /N 93 /First 782 >> stream xks6 |tnBo𦓹$N4viM熖h[Yt%:C)G\U. #Di"1HI4gD8'P U%4#@#e z2¨FNp)K4SgB g p.tNrBnsxq"A,D8@?@Yq))`PD0#ΈAs xU mU@&'9DFr J A+2~3a`TH& Ո$Б3cQ3i i 2g@A ZH (\'n H{OTSdaa!7DdX?>̜ PUYReP.(C褐0R4@h@' PXȁY!!U p Հ$X N\Ț R-a`/9 @iB+?C5R,:"Px0xnVGd{PI R!CT^ϓ H(vI83/{s*G-7_퍹trjM}gqws0=urtV< M2ahh67s }8-WrX Ͼԇg5ʛy}eYVsDiSrU (SG7\k?l%Av>G_`y0qvq2W{cpOќ'2kPKdt Z^s|r o_zy[FWUi*}ӛg.=|RͧbRMgK:9UKe*=NNOӳm!-tNyE:M"OP;^W,O1"jQ7MUt:e֟6~N_ `|%&-ӟ8w9 K ;^ge2ˎe~ҳG`_}>IxKk.}omuq vU,V)T[Cycʯ\E_:vmv~[|n{hk*?XٗaQ.[Ϭ"sZ-A;jpc_߼ c׳A3rffcU_Ţn: Z}.ږ]]PbGSTݞ+% ;,]n'ت|8'7HB(cyࢻ-Zc4{XV1WֶVMpWUSnj11<` ͊( !3<(n Leu+,yf=D`v"06k3=`(rz;2; g'0͋E=]-uc=G C@7jcuQw4LFg FÌrdc{Kɍmq,2mU^*s){-'2`Y 辬/<`NyxLx}o>2#$0"Q9x_'K+p/_LLd^>;z~MDBB&}bme(Bbؖ-ƹ \9^Cl0{W89^} ;o֎ۥOE !3NOlHvcZsP~wB{><=r0[̵̅EgLG-KQUעxUԟgH.yݔ/MsǢޱxiu t%TM{ҷR^}n),{/tȪaIu|% SWj\[#99c%,T%!nXryFۗM&#W>jg/c}YVM8`nc2V\Ag+@\V4ə= CcioRy" }Ɉ$ u3W'T\688D_9s-_9·H%~hx&.tv޿>8~%ᰣ!;nhxipF5?% i\uN`ڕ1ۏXHǝίn#7DlFl ;; NpY-˛bZ|JfؔL,^'(Plf!q̌tcW%6L_d92)1]݀,MZUU%>(ScWALgk2^M{~h9]\6'8A@w$H+ f}WO'gp0m{ ??O;(ŸWk~Zwq$NVJ{Ɉ} bwT~X1&cv81b8f-endstream endobj 95 0 obj << /Subtype /XML /Type /Metadata /Length 1703 >> stream GPL Ghostscript 9.20 Higher order Markov chains 2023-09-24T10:41:20+02:00 2023-09-24T10:41:20+02:00 LaTeX with hyperref Higher, possibly multivariate, Order Markov Chains in markovchain packageDeepak Yadav, Tae Seung Kang, Giorgio Alfredo Spedicato endstream endobj 96 0 obj << /Filter /FlateDecode /Length 2459 >> stream xYm ¡FmӼ@^s]Zve#y @V/|Tuŋv핰0"X+תXR^p+Eavb;}߯K.yUעaCYWjԒU wRְTZv܌QRvU9ؕsZI6m_e7]`/Q6Z7D];6\eM9λC=_61 ےVХLZ93 g5-C%ݷ~2 8]u%Z]4\>Wur\hxu`Rep/eK;FGU %[8kx^w*Ӱjb/9Y@0ֲU?-y(8>>Q`@dݲm= lq]%Di!NGa*뢁TئN+ Jٳ8c.ɲ9H9ykby/Z!*ݮ$ypA a!lT&h[iY + @@Mdsre#lhF$~zk5Ո4l55 iHe7eX!sg֘2{삘<$+-4on 4%nLt\JL;;3*J)" y^+vAY{ \ \b!Yu <LFo[zw9_rjNate(!elH^Dҕ2bS2oD#zf: wf&@&Yɛ`9 h0.J J*#,rt]u v*Kȑ*i˱@%BgeƒgF5R6d\Hǡb!`~ ,w~Q}#䷝ʥH@S CڐƆ-,F1{dPYF2coݝ6}xܿJ߯vqBú%/Aqek|ӷPAS@Hwlwb\bFƍJCt~-~ ~Yj>^Qqe@ Lrӽ/@ZcR9mb*~ȍ2w$,Hh#\#э즜tv~?~pi *^GY1H^ԋ 7Pk b">8B  [HI$ p_awxi+(6u\ymL]]P>6M ~Hv.XY8u VC-+>ON-%f@X4#kH3!irÎ^@`8B3:0՞PxW@hbTirSKUa2F) x؀! кBg&tVI { d(l|TE >ϊ&ˊ1C/UǿA5 귙3O>in}(E1[}m)$6-9M_5+x:}>2N'TcSņ-&yCR,c>Zf%niSmƸay1N+| a,`㇠7RЌ1<^)sK9){I5jRi}4N$.o 84ExqSpZ$476ZB~PlK2..ͧ9U6@ SOŁMpت{ x-i؜pO C1I]߰:hNxF oTA(Jl-_y(QmRKpq }*ύ "a^?w:g#lG>:5shRKS!1q~s6+oAiʢL' Ka1\d,K͇5.Oa~Uww9b)JGׇU5 ?LIJ;su ?#SaȽ.v,p{b)!Y=1X< (dZ̚mf6g|yCp>a=LnДX5$ϫ~Yv٨Fٳ=,,'nL#Y\&vJB ŋuiwX`9/a]M7pr0~ХfPLa.Hæz쳔$SZߗs-qwe]dT}7NQOPYbPa'A DI 9w߾ $<6Ko >\3QL;ftO"5~ɯ'QfDe>GlOx5Yendstream endobj 97 0 obj << /Filter /FlateDecode /Length 2489 >> stream xYKoΙifI^!Yp6xX@eil)g=I8z~U7b{}?󿤛>Nvy"~V(7MAL'iz1u5A~lkMVF{䒽nofF9 o=J gWs޲A*T_/3cr.w淳|v_,g6yؗng:/M8{yȗV;4Z2}^_%.~,t.Ż` taQW:jIQɏo=G{au>gbnfs-9g(t^ ea\Z8VRQm&Ov3{D(VHKlH C>탨bGHuwJ8 wykN36(D4ceSSҋ?\\Lzd&ԺK{߮M_>T^!jɩ)ǹeAcub!Qz_X=m)ِrP_΂3E7m1y3IDL/%~hvV]d96x̖”l$c7;H(_ R̕HPf@n{=og]ayhzxCN%ꋃ H^-\Ss@6(Ŕ.H%RH [Yޤ q[+MT})S{؃P%lw[>fc2́?r-5*BS-H%q)$tEH '͗ҕu U6ѰyZ#iETąb_{9\"_.{? Tu7iô}6GQìz7jSf[IeA&*jƔͤ} A;}*mci*%i Oi#=H͒BFWq~7vzWM ;8իWPp\K^0Ta3G%#E J)T`{Z\j[>!_:ˆsSp#@XPiY#T1duL:2G` f:l+F#v " q(_UTt L? 4٨30P{^(]) GxB>e%y1k!!Y$G+Ɋ||ݨ) R/=ĕM2e5( ZIfÜčњ*7Ur<ݶO<L<5/B ̘J0f@]8uV[]Z q,1iS}Sʞ;_ƾ.ˏaʊ-4/1&EߎJՏ#;z]Ј 7} Cta_iffT]NC@qC᛽q^)^8xco ѫ2?Pt uiLI5QI1S<#;k<ez@1J%7u *Ő*R['-/d8)R7ұ"2.FYTĎ]~ldU%T|Θ;&s8> stream x[KsNUnJa2]JŒXd;I>HD4O<vAQTebgzz{iyw4Q&RSg zJ_&@sxCQo]:M S6Aig?hٞ=4]KJdMO|Y#1M-HP.oqʹ|!-bIFN۴9lQ#8AæM#%#˻ɌhaefȇEwZ7 l*Y΢3 -KNIYrVI>\o;Ivv$5N$z;|7\ٳ39rᩑ&;%iS4;VR'hOL,*ZÆ O<;,#M?8QJ4FI*6yW#s G D&-L&UQD:mRaS$~K R`р VتٹUoRnTC~׷ZP|0pq\K<`._1+Dzgc`Z+`CZ)WZb"M""py>,+7`bSP΄Ǒ㯋QÎA>GU9CTeZPUgYҔK|Ǣ69/ b,(Mka9àĂfpQ.,Ǘm b0$*p"ryf{,ܷ++<9%m\&t҄(PTHFG l 1AE&d!R-u@+co;.2TZmx we+ Cg6*0‘lLYbulmCE8T1 sF5"=F\1u1zOPzuznYF9u_Ѵ@vХf+ PQ.:vc~AM8wE)8 Iy+)2 Dpc,E4ܵc/4/\&Y$Dvp:O<pHǧ-bRp)b]<^ >ԗڮao.*^dˉLSd 8$2'jf0` 8m:MMN'pY(CA>}V?-aL6 M]:,nOF zwOML%]2m\KYD`c ]̋ɽ)Xa~qlV<@}LQnIV ro j5i3N)dq ǻ+Z?QN)RXKqb5ݣ#)S`Ea%(#x"4czA*!st=C6DqJD$lH#VAVGӿ/v$49gϋu+O9"@uL]jhFgReJiuMr0GJw r+RDkw=ZGVxEfmbkJa- +L3-O; PND4;!|XJure갰J+0 7l2!};0n<,7Zo(ʷEa2s/#{W͋2, --|fkEZ` ؂ê> :@g;1 FD@èIYNpq=tud~5QЅR2>]t$ ] /%"j+2؇ @ddf@1.-)an@OƔx}t g ` 9H:-IPIjQ8̫[/tV' X@!*#ف-^= Ǜ^(EbjY%m cPhpee]Bh{E}IBK.Bmm :;B׭'Sb$=):'jna7І1^Фb)k4jt<ьWaW6o-Y]m%rKXym EqljU/7~Ӝ٩և"rɱ0E) qIiq hBº,uP ܀|u{l͖VVc&NWj\EiR;]w,HdlHBZ; HwԢ]f]"BL RK `)qQ6 t 23k j'tؘ&xOΠ<#F*IjFY*xf|QVzX>`uH!E*V1 &xNmS؛B@٫?ck M[G|~$Yj"30<-Noe^DGj~|Z~ Q>Bo5v1ڏfpd/. [C;Q6YpP{o.*'o~]-ok܊Vv0\lMXԬZ_HÓ켺.@zr/_qendstream endobj 99 0 obj << /Filter /FlateDecode /Length 1738 >> stream xXKoF7 ek2k"AP؈@KTߙ.kIN(j͓03>Lfzɇ wb3eROg7:ʧ%m2+t0ރeT@~eۻ$r- -[%).J^%kTJܰ]>)y-K- mm%f1zةYYC>{:+H^VB vVeXh:%Pffn}.C.]7]d"H4TLIFgInBmjZΪ$˜k wYm鼵%D˅y7Шh8qր lchM3 .4"]pͪm³2?n^gmsMBzjV]k7 װݮpIR( awh̜4L޽v`J01+¼Cu7^RMRZ8LjjG$ޢ0n #W]=z5^*j:#@|>rU8.wPr-dwϝ<炳7Q9\tO_~O4E窲m8QIYĕY r W:i%BA'8S=)Kh-l6ʋzVwNXdwؒFύ 8Q󾤖֥3Dڊ4A &Y UP6U8@u`E '*>JQ܁Tb% n3гJj$J] 睡qW1'*JrdeuH_#E@bU&(̛< M*B(/_bPm/7QTThևz_& ?qm#$T eBD<<$kRBD I 'FL"hJ2e"pd&Å/⏅9;q_'Amt*LAVਫ1 n{I2k?$i!ask{.K'Gj8v_#Oz`!h?lGr ͧѴ݇>wt5> e}ml 4u .xqS_,]ۊ実 J^?<rCΟQ ͳwգC`!ݯM&|mG\O*T+㱽Pu(W3kFHQ8RS5?$&Cy(6;b6~1 Eendstream endobj 100 0 obj << /Filter /FlateDecode /Length 2669 >> stream xYYo~op/id|m*{X@H0Dr$GQSLאF]U]W_|~oY=[}<,^\-Ikg˳8ϸzf<^l.٫je#7sVNxϚ󅔢ް\8)YsHk2; 9;057Txޢo@~|+j~hȋgI"gh~vI/g7(s[Zڰ>4-~[Wwś/jtNLR;xtU #*.I*gB7¤ =tCXg9cއ1b&Ϟ8:Զ;o?Gy57l9\krpxU;'+@T}R N SgM!vxj TÅ𒭛vW@FxE,Z6%,9֟F7pο~;](PPc8%R\=i|1Rq14]W\#ov  Cj QvT8J`m <낅!)dG0+sO/}l41Z M'úUx-7S* 9(w(PP#yFbj'4z;tO69 \cлeYHHc%ְ'(R|j3zO2Cf-Jv NLǒז)YS 0R6f(Sy:3 `C yokbMS2`N'&L,AlKK>%pMnc~E&䰑#K-2#+!?"pr B&Ԁf9PZ7 ԁ !/éI텕!T(*8=%},!ShDj^K;-PQ& ឭlԖ}c5 Yt7ҌJvl`PAC1J#2T  6y:2~ nZL׸Sŵ.Mb.vBz$_b8~6q6 qᇁY.I:nC|1E8RSRU&Mv7/P-lFB7ֳn þ}L̔Y8T2M7Om7*h`hLJpOZ"HPc"\͑YLӡ~/cQN6qF g 6}y1DRplC)ɮxT`\8E:g9fZH^nІZo\VcTj5ICygZxjX0mhi_ͱ 1v s-}du-- mvw۬Җ]käOh =%qO' Oǃbvr'Ù\p.}_D%,K|9N lg'~mΚSX4t\eQ|vb|9^/_#$T_g%kfOS^ޘn.iiof&Է4By.5+1{f86m4=endstream endobj 101 0 obj << /Filter /FlateDecode /Length 2068 >> stream xXKs|M \2Ǟ,k쮽^qVVHD$@C_UUlK_`sy7͌W_fvf(#W͙rjn)<'rZPU$/ K˝#u 4WfK?EFaJ ~'˂RFQ9G=NsA%ܐq^ yS&NDF^mۨ uL2IX4դ ꌡLoG'û,M XfaӥKٷٿ $BhX,ge;KPg}"oe/:QY+DCşZ(ekePl'`)Ϸ6hc~%<>E|TT 9ʈ1$q̩klؽΟDqƍ։4A:fɘa[e~ 'lAKKQciÙ`1La}h 4@>%$ o:$׉2u,, 8`(`JX12Qf"O~ r LltY32fKWX'B|4b5V3 HRuUsY݌#& Q l]7@iػ%xs/ | J*jf?K$aE%yd#xVPj;Y:/O׫uP3*)C?v%m]c nr.\k4EƄrG^GrUzXRYos$ x*?Z8@   W8(m_1PTxq.Eas tʽԀ<?_,y{  8N^Ǖ<ڀi/#c]r_םmЊjδFӷސM_eJ겯4e|CS/ n1`2-p_J\w\x@ql{M\}38?im}{*S_{+aBaD49|hI6eY0}- o{qc6i|XMdah1ObE @6΀bl Nb JQ3: 3D~.7e $yb=ER>e_1Tb %15lM;upÞ3&JQ"CIH [;]* D^c.29(.QAn,KQ.) 98ؠˉw=^(,s6A+OgޙE-ͳ쯳O38K;,3xQsT3!˪ڕ岼+vڊv4~zFl8p33ӵ 02%rzxra|eh [3TGn&iPqk-s:ag1PM>[_nڮ>\UET7@M|;p>1I5OLJ2L>_jKܝxoHrXTv4%5/͗"Qҗ^@p8;Mfӄhū T|^^>|؝u9^վ̜7!,,y@Ayjy~j> stream xX XWRq e5{DQQqiPYdS\>4.lFdk<%&j\3FG'!:Sx˻h$o޼_S}S2Ƣ#-Y"83hıH&2q`q|52U17{栠K9XZh~?#ܷ aci^9,aѸF9xm vvXu*y+Xm0+(xs煸߹`BmpKl]mJw7af0qea2'f gV2#UHf53qg01Zfˌc1.33Y,d&1bf)>c2}~3c-Lae,} z|$"O?XdBxPTZ=,%*.TYZwM}gm귮_Co5?[YYzg QUYLFSߟ ,n Xǰ$o\~(؅>!.$wऐD\Fy1TB܀R=gfwVN1mXy8+j56 *O=Iѷ]A07CLV(GnD )Y]0CcRߙ~{(dzB^H-j򙋆-5^ڃ dn^:R:C uL|IVrUh? @LBͣ8VRMyWQPx n{AApU$6 B^~Fef+!a4rA..ʖ$d ~gVh`lseءP=#=z@b>yot4 B CT`L*t[Pn8{Ӭp gkɄJX Rv 3縝\P6 H,G7‘d$Y싽4dL"gzFӘ6|K U0w]Y8mdvdN W廎| z%SKk4gbD,?Oϻ[5ƹ>yq 7CjG\imqWd` HϽ}7,682JlQJu@T^Jh6cي.`ӚqTنپKUJ-=̦pofS93sp,g6+0Z##GMBg%(ٳ"jq]?}r%L5oU(X~)'aOa.n7Dp^如&XiJGyI@2\tUuAמFT@7aیM5;vha%V%Nٹ d+o·pPէU\ja +K[8{Y6>}(Ob#3$CJolC^!1I\ z0UH0ŧBcLcxr9v͒`!!y7pZHک&,qyF?(*:{ v`vΔHaV1xmmx* ;?WXB搰ȀhOX#rr,@KcK!DWkmR M.ըai8;U5[(l>qmb/jtg,+P3naLjI6wH7I3/.2̌ݙfG <'6D"H/" scxu* 5-oJI٥qР-XD\~vL,?#-Wm4;JЋp,$+NzL,(}-̬~8x1|ѢXh:Sh)uLVeE't4lEIBЪ&=IQaYS?C,YanL(|0fimz^jEtk4^ٹ-6C a^qά ջ5MgSbGƤgBBb 0X$کOg'gql[q%`Mq9>'J %-mzAJGz u{8dUI%X_*}’dȇRG/G7+agMDEUeq}Q\lYE]m:M%ɤMD\ص||_  - (Qw;%ߑn~m[qț*HKJcp̌`gf0Bv 9ы\WƭƘvuKа5ǴV? Ky,.@fƗ>@>)Vbz#Uzai>gљbxԾ@yl@@Oݟ`LP?EʉS,3^^tNDe WҀRau*>$+$fmg3[PTPe]#-{D)-|aEwh'pR]zf{_Yo?5JP&力>U$D&%$'JJ(@0k'U.)!j_PBvJ9:c0D%qucmII{!]^\]¦".copP\:82[ђö$N<C9BGAvE 0Cp;5R_;nf'jHN? ْzuiG#L$ȖڃnJ2#/.ǐ/XPAr2Y5VҒUq6P{6X.?:+3Ǟǣ(M uE5K̔k%\򘉪o_cЗ8P2 '>#nÁVo8`nZ.zF?$^գ]/?O]ǚʍG,gqf:dx9ɧy3o%4Ksf|SѠOF-&PI]Q jp[5&֪8y]cfq,S㥍 S%s=V7/ ˫O;<—Իx _sۮ.%νӷ^^#x-I !YUO9#9/g!Ob|T/K-0&%&.:[T,Teg0p}p٠峟w9:bsESRyHwuWoڍ_*7 [77P)n 8|&ʤ5uZrh 6%2iha1]}w0&3]RCy}ųbcw8F*QG;txq|嗢sI8N0I ={+qJ4Wݎ^7jEplr6/G9oXk.Im׊?zoUPTcjhqaFFti]x^Xɠj%I!k1ok )Z0ultqIA.xm(gCfL usOl]9`XiID]bC!ڮ]^;993Kٖ I6^ȅf"]L6P6YA~SUmi/e6uW_:uіSb{=_[ }փ|Ks?xKsewN]T0. ﰕzf-&r^HyZp%DaqQ}7KhbE;trш*Oe6uotltKx%MHoy^[xjBb9g9^5 $a1(E]kF56V:Hz`؀='D'f|| _u' ֜DM,aϡEKkק MW! Ko~>fH'A$rpR0c#8F0Cm̫T%q3=#_#H`bk+ Ϟ3ep4U:FEe#!Z/ٰNf<IEAn59J\w0˘! ƾ;~Kry@coevTxnX?)[f⸶_ZD^7x ~ͱN^ڽehiWTӒȀUjϣˁm mç^WK;8mFxOn.GT}rDLt0+GiB/=weJrJJ19#=escfe@Aendstream endobj 103 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 1394 >> stream xUTyLwݙQau=fǶ" ؊ZD䈨d9%UW,A*" 낢1VQQcjӼ?Ԥ?w%M)(G,l2g[6BA_05>lE\oLM?{.E}D%PTNEP) 5#RPmF_J%Eb/ygdS a$onP)lvmQH*>^ؾ#}p9t ^_o}\!|{CCl 2إT7 ).F PU*-$ ͹ΕQibTvZzruE oQضSݭFmr[XzK9(j`ڛy8Tyu qWA]9qPQ\vӦ6Cmr 9im"!@i--[n.fI9wJ ;a+py`5UNB 00P!yv>ew&Xկ1H8R/e5ihSe\kӓ { ˯mKw|$#RwLGFvM[ Pm'x5'z∭Q)^"} 5ۀLW%aZAEVLY{)0U򘲻Η @? KӠ.9l4K>667hl~d`۸"\ o%ы K8{>C#U' ]™Cr.RՇoNeU)})b֧b"cD9Y)f2آZKn>8-\?5 8qcFap[R_B'~"uW[wàȣy #8 bD&ewO R&/`;X v..ȫ_* xDa`ãS4A*jq]7u4QHEgq:+***jj^OW9VU9}|)_|endstream endobj 104 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 7927 >> stream xzTT2 3Xb[4"U^D`( :0`dDI,%%11!/ߗ=ߺb-rpλgE %Wumf(*) N?1-AJ}JvNqY!R:-}m!a0?ʒP 2Eza2NOǛqJ92ܮYHk @A4<Χ!ȫt=,{'5̯4g&nU2A S ZC r+9v6,Uf8QEkAa A {kXr4k\a-}ZC(@\rrK<9!d}R&ZB@ ўEx^SBsr< 'En NHo3Р!O؞ !K!w9tj4I^S@vΎZ 3GE,h >/vdTHn #Y1HCSl֙XVR8¢36**}ْMG>9N*:ZjerEo^4 h}1d77p?,ezIY2PPG-e%h&8]qpV:U:zpc% =x$n kqO60M #•Jg2MYSA3琩, D9 Iy| `cMv}xBj-Xo̢B k>`meCFTqZ6Eś$ڴ=G*tѠe GARvBǁ%T] <E3_ b-7rh䢍oҾ|1!ؗtch8JKBSP9*=ߕ)YtZT@2Ŭ'36e O91楩L8 J+Jܶ =!<M_g&4h4Z58ckx'~Qr$Z%eMyB6F𚘯e]BiI1.W>"2󑈹|Ơ+Hh{`}h/L(W#=Lh36+# !Bc]P~=CaDAJdA2 U 1JQYwKI?Dj i}BjS|?YO"N,2(O>ʓOx$^Ӡ@i-ЁVj6skYhE!#:Ldcwp&8V"M߆A}GxRW@Go5%1P`-,IHH) ~hyQr4䨅tjGepwOlWLLۜypl~L&ؔ` DSG%%Cl Qū5n.r[BPEm`hFzӠxfOy3==`5y8`b 1R.HIKI))au:t>-#A RGHhg<$R7 3LZ^XEcS,_w}?*e xڝMJx-`g쌦 {y]sɂZD7}Jr(JRG(CF28dw @/us_C9<\ӂ$پVcL(- ,Fl̡4 ?t9 NYH,O=SAwT 'Q̿ I> Iu=t2ҠrN0 $dS DKQomT9J{۠cy0 ?{r ;&?G@z+z}Rl.k투L CwNMH,;b>A '$Oӄը OVL96EBȑ RR= "50WQB-*KΌHZ3O>xAG[BAAVi ##4a` uH~er{kHٶ~]r;U긔JA#'_O+K_|=J̋"U;@H5dzQ*hI|ߗZro!p}#RHK9GF:ݩt:n\ҲK|+iRdle<*C1\h(¦g2+4VUmN3ex8?k8Rw:$h"jH'uʗ=^$I2hʰ?bIq6?ș< i wMNr,Fuvr*F('f8JDcEuK$bV"L2!1-ǯ4;W%+'R +:(bkS1?eǁ*2Sh潖SYW bҢ=oS/r4݁gTYۦF(LT1ၛYia05Qci-7"MDP2l:4ԠTNƺH?aI *HSstUD"?/fyUk>Iv K2VNTcꍋٙWN짉\VB G?!}%#w}z[إ`;$q hk$nܑN?>k= m~4w7"G/E &wEgHRK! 3&]:f |"&4"ν ˨&جkf۶MnaX"ړڲ@4d^&ǶNxk΁cgTXK?t<#"MEKKGCy~cU/x%]n.qko i*Hi\l!)d/ܒ?ӖzA4 .r<Ѧ&>^HC49Ӟe秌t;D 懕~Ѽ ZߎO#/s߼- Km8QѢi`wOnCឬGgx1K)g!b2n`._PVEUAKWO -vͰU$ݪ %ıDeX-!9O]I }a֕܌ېGUMxVqp0fOpF&tjy d#VeLGm}eǑ渘:W Cn-eQ٫x$2O4j‰:{6'1ȸkt[ yK`c'{W}z/1>mqlYLp_#,3:/D+Fx& =A} NCݓ!?ak 1Rsr֛1÷e̬) B|w,?ɈZ>9D el+pi8RXu|_^=ٺErSWooѣ7b- amӿ|֥9B `rߕ4FtXڼ;Xfvn#ņ˜66?p%knꜾ~ ;ivoˇ5׬/D9#R%6wij-´u2%3-80j*n2,]n&XL8|^u]}&=U(rm%Y>?FV4\x1 0|wb+bNT'#2EbxЕ5au?j f;3kQ$J!oȿ$QZ %,CtWV߽G A+{cC9q46 [&k!Z 34ą.*b*cJ \вp"X,G[*#-4>0; /,JSdW]V[A~zv^jAQbw sXjBX$ i)jw Qs0,O̼2%ׇd; QqT’gϋrP ky\&^-L]JN;Wh&7b ݔݤ_nqv3Xâ@KG ursn uj݆;̬\e˔vu"< pZ3O_eZ#KFg|X- "j#wvb~ΛiT ЅkVMoa[Jsi/ ?v2-ێWNBK3?_{ Z$ $&:NLXAd0K޼F{4Dwqol)A_T\Y限 Z [|`lSh k%W `%#bЈrFTepoe_'ݛzv 5\=FXk^0gB'&tTn۩ &h!A8FEAɽec~N .x*8cJ'=[Dvr|( 1̖qmx_liJ ۖl7N:F׆[z ' :bE&/G[ '/ p/xd-̷Pٯ0)ӃEh)S*K\>/]vط$MxHM(W؞a_W EFAB#!!?COOes O'=3]'X;pNͩ]Z^3XQ홸]!-O6"ch<<#㿝=ddW#e]DHO(ZmL[Y^un$֯6-ل#aSGsc#޿ñ5(MU)AUC,i|z:ޡ~qI7HzM7lp z03t:=t]^OKOً3+endstream endobj 105 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 1196 >> stream x}mLSgǟ۷[ʥP&^3ClS?ȋ::HcJVpkUӖVQF@,:?̹!Q,fbt,,5 uA4~y>srߡL(Jؔ_d3rMuiE&sc>PBDX(ŭ?"Mf5VIJM)7iNiE=P0d^U)d*@C\~<<86i&M"-O{mzl~q 7 q,1UK>`ΐO{t31?nBF$,'j@QOhɎ)ꠜ =]`5UGg,b.P\dl?@ףm\BWO;q v[^,꼂xY<:ܰ o |GGyZ[BBxG9 :vEFzwH"nMr;nq%gY[g:1kF-*&h9V sX*vgg>8?Ԯ?endstream endobj 106 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 4182 >> stream xX TSw!KKa+KKWXWT@D-CD=_UN\w(v֩bO;}Nyp}bpflܴMxQк4ạfؿ>FLicEL\:9Dg taD&ggd S,Z4(^WRpx8oH>xSpfq( g%Ħ%wDl X-4bK؂VA<:C&3kܼ q 7&lN MN&E?LsPb 1JD Al'"KBb D$~OwE:b)H,#6Gitbn1h9]n~΋<3iݤ7'+&4TC+;-~ڹ;{3ifg9y}w>FE;<ᇸ/WJDz*fgH@R[#ǼM Rg,q6BHjJPi1^pX\h?9g}b2ȗy ]8j=Dڌn &a ~TŠ/R[5T75g 0@ٹrWh~1:!ld˖o5+6=J /!z{F6h@'ۋl56zZ<)0Vs St=i<],ԕ@.9gh9#+uXϰ|v.;MEJt!&g(4#|GN WO6"nYsxk7'lqC.p>5XjB) 2@^Bm\syke8V2v_! E"J=ҨU;V|i?ZZ|C~' |v9ƪb{±P7'Bw$ 3(@+ڭJLheR``H,[S3:|ɥYmA4Vwe@f/eY1!;ܪe7&;} mwvUmfS26qDY.z.M.#eOw!9*GB$ԤhiMhEA`f/MD!&?8wG3qb>~:!&(Y_;tSDZe&(Hu/{;)]ڬz Ej9%NY^SFoH~=Յ᳹?Hev$M Ő'wBm繡6Jq<].Kr7`-}q ;H;T߾yڱϓfqo3vb=xO^pqe@I_ LhA~ūrb@~׉KAmEz.]+cur@%}=dj% DǟSc'+ 5% (r/4}6Y0 ʊtUROj @_R,g2}!^k)d7&8 T!T^JٽMtSdJ!{WC$@3TuQu tBmCu`3^~4\FLy3m*ka fKىmRJk,/v.eGjm{F-tDk Chna\N( $8z);3ڽ U&66_%6VlgOj,Zo;B&}m^]w멊C9fP7 3!.`Kj TQQHNfs!ڹ3GƮ+E r|I/T}n;yOCd,Tz50ym;U.2F_(fVXf*ҦrtM@׭.|` . &yAa󿎮 u@SeR% 8s؃2;xPzf9kqbe]M:b6nmjoT7Fd?ήT*w\䡺L2i^A3->ZL6gl4FI(eG4l_.ٲ Ea)}( F[|j(/3*w%r?ekl&5@^Fgx{w&~B7<̧RcSI_B/^MlH  40GG{gQOʧW$Jr$ǝӎ*~m']$JT涴yۨ1V˱KHw;Bnh-`7 4mtԕ6yO;> X&vNOsuWcAGx!mDRB1l[f:tj.V:) 5Dz:{<5NU>qƻf;t,iX*SQ*?(O<6#(E ;;q#ȧF0~G|gƐdx5j(UVaH~3FshH`M n@{.0W 5_nnݿ;$jϞtJy9&Bagׯ]'m9yt2}(S/׳Я|"$H)8bęw e]{$bQjzo{+z_'/Diٕy6(/(!r1wMVJb#yyx՚/0vP8p v كx5NTy&O&O3uL1KGJSvUmrLԇW?_endstream endobj 107 0 obj << /Filter /FlateDecode /Length 551 >> stream x]=n@{7 $ȥAEn)D͈zy~Y.ncڭ/˴6^/pxLoú?}m[<>:oS{_mvyL}cU=VUƘUUXUB<3U82NU81BlĬPUFABRUE̅ū 6EFF6EFF6EFFe*āE ^kb62xMFfl51 &f#De2L4I$ & hq6W"' tA:!D.*'c:hjlxj'CA@P`zH?P ,B+ , BZYpes90肣  F]&d4J-56)Q&e4JؤF Q(a2J%lRFIM(iIwẽƏmkMN ;`ʻ:/endstream endobj 108 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 5629 >> stream xXXvvD,0)&3D]b5@#`Wi-_HUV@ }q5j4ib411%$wYpK˝{);\ ed@I$SgАY3gEy˵gc%k8FI5E]xՏĉ: FSFzЄwCw#gϜ9g wOPEx`wk'kPy`=94;huNk kݗ[/w[=)6:,247< /&"|EQKC68{2yxĊ(wsvw?=0(x[ -xӦoa5s9sMqjj!5z@ޠ&RBS(j#.5D-leԛ#5ZN͢VP5r\(Wʜ,)CʊKI)FSS(jI(SʌbW(#ɡ!JfipհhQt4FӏdJfɆmG G&SGL5aTVeg6ެɬa+1T.;Ie.5_bg^o~YieªgBjC1C%y(bE!en Aq(諷P}A46|qOPއZ6p؂|(?zj51gOŭ\^OAM !R ^b9B]9jQC?R"\:Z8ut4<.ѽ-$Gmk2X(ЬL0Ԡ/ٙb8K<`êg/R@3c_A> FՊf4UqjԆrg,/_^3۾"8,ύ6M?^3iټW&Hy;bߛ>Q ]&uΝfnq/} 3j2K0UUe|߅l sS-Z>u-q5WNT,%|)@Vܟq^~su$MG8vώC\B?D/ʹ^p'?}I1}J~$q|H@touFzhClQ6lXƵ!% hkmb"tFٟw4w-QuzT3]ՌڴGVGjIl| ,qpS᜴KW"P0zr5ܛYAy5Gy L.]O-AeU7\^z}~[mvVq$*oRI Ry^ \T*QQnnݞrT-,(S{Z$lK>=ʲ H~O ~n|RE [uPg NxXtu쑟PQ[VZVQEQQE^PXv "t=Y?M-ކa¿ >UHLYãOX~Ϻ!~ ű ,]}٭w{1 4%e;Yv#CJ_~bh. }YU9ekY!A,l-F>ĤfE֦";dw6+U?1xIxv?M,N?n8w^Hg*/5B2pp,J]Kn| .xbIkâf9H^x6]B=fme=ԑRQh u|R]{Žd\~$} L7;ФoF&Bj#O>D|f񎎺*@vVCV?eS"D;c,qߵPgsSC9N__g3F5|s-+7 Є]ԬbΎBтkn[)'`+Sq[Xj7[hPIq)VȿB^H(a'Zd5Wx^KF1g7$net*J@{Q:J`qII9 />х'w>v5e)}go\>qc[@aym4:AL/Hz0-lvvD)FL1dRi<O/9陲OiH;lMџޅMFWb`h. YkŌ ԅ/_~\@\v=}"։JY a?l'Q)fn/2?YOxZ sz>|G J* ?( Qkn8LVx^9>_ ay+uѥVM{Psǟ]XaEiIxE-9OeNWŠTh[kkU}7*aqu[[7O.;j i(׹E dI*^yQ*Ԍ jr! {GM6g!A:6LdžӏfGOxP ˍ/((DLSL]\X N_|Xv݇wqUgK&V!<}򠠝a퇚x|O7\5Hkw8ف̳ʪ ɩ(jo*6.;&x_6HXGᅘEA y4ğ+/)B*Z◹u%$;֊C2Y{+t V}t9s%}eU!%DNޒױ9܉2~O7؅y;fsӊ'^Ci1ex[`MU|Teԅmful/8s'M~1k !uw#OWƗd K*<&;33DoΩ3SNcPr2@ )?dkGGNe{1 /ůQϜ4a$_\/C$gm_t5Z OhTNܢX7Ӈb+]i)Vɂ %B- (nZtGd5%;1Ӊȿu0:tqA:O4͜za ǻlKLΊ@ &2E07*]}!*KӇWngP۴^M?疶m|[+[]uVC ` ѤkT G'ޱÓN` a;ISH5TH r(~3jZ鉉Iq9Q(dT-Od=</Z8*ZUQ2llfe<&Y?xݑB`y:0hPj5=$'Saɋ@ZQJƿ[$l]im<`e }@qa9 rx!yM ԒG!! 7%?`cH mȭf.q,E-Gktpz{BW] %&%&Nٱe`Fxfn@Lȹ)oUnxÈQJd y .喠"fdRn),./ƚἱG0d2PUa.Ĥ~OܒJLFP?('endstream endobj 109 0 obj << /Filter /FlateDecode /Length 287 >> stream x]=n0 wB7* .ɒEL"3}dOGxT=> stream xypSƟ>YV"&&,I !&`jxE$/ZhՖ-;^C &%PHd%diz>f'L4LhFww17'$y^Q+bs+xW+CO0B0%oDjlvܺ%k ϟ:k` z~13t-bVM7l xul>3vMS+w-&dv= ʒkA˝vg!Z.{G:$UZd;DhkUPsMfgHcvg5]c #ok2H@//Js F7cvAIeЙ(6(ьexp@ EDdrWI@Xߠmԓ6}P'( T\̋ha3nwJAQ rR+m$z\JR*5g5iZ^ wA- ^Pdp7sPʆ:P4^j\336F'xH& @}h I9y~ W? K$eABlzFJyzcY:'WhV(=nW?,5>B[4 fܟNn>e/]":Ё 7h `Z?A=1( Z-)]EW|ƾJjYH-\za׈@ZB !K0{S^RJ3һۆN5w~kז~W/nhz+4Wؕ)}'_D(Gr{v?S!KliRfg:^4ڃƘ]>kcg0mn) (k?M%}̱hc;!.z YQҐn˵k3pfx}V3 +|CQ19^T''d ^^1qP_*+ڛ|j 9?~fN7.d jVFDƢmW]Fs yFeyuALf ^-R{SYXP"Mzj@S b<`?,ET)RTBX F!QeԘV؈慺bl&X㔁bY/$p;Ԡ)H| p L$z܄i#SqN4tR[zM$NT#v?6v̜*j++x}/tj4$duqq+>=-@/uTZ jNzJf u'd#CkzIҩW4D- y] O)U4:𬚞^:hhp %|*i:ؔM %ϧK_LH[MF> OTTV%kxT 48=DžG_gN8=pv[s;z*id8]kU % 6h5 p %43qׁQǟYPUic,f8ܱ]htmwu !w^WRHJ56rM>x'캥Bx8 ?2n+|#糌{x̀}mU'.?Xԕt cj+#mn|=9pdmbؑM1hJai)so1>gH-(--+5U].g'o.x'm$&rGg'tF vfSn u`mª*T yݽyŶ(p%ٿ A%g'ia AXToook'x961X<`XgNԪ 2M5`][Zۺe}CSgP3m 휼 .y$q4gbݤ.<;;:.XFSG&*F." s.=WYk^Hp5ۜ6,^5uR]xnMw=#G脣]fcԺ*z}Ln`/*Ke.NF^zޖR& QS1kB fhDΨ5z/:Su:յpdJFUpwT0ڥR $&msحRC GX㮴C;>=2Y[73Q"PRRQQR :YzÅ},_ "*"%%=z:D&n24^tzgb?vendstream endobj 111 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 1489 >> stream xkLWRWtsMvӨmfIZP)R+Rzy{WhR(Eq^2q3\沙͸8/&{%q/ʌ&[rᜓs~IMA(lQff,NUS=[0*&% %ڒ,{ȘI[ݰ~acQzvy5B8~R(,.O*1[y-\ܰ_= a+9+rW0;Ff9]_V2 8E̝YlֶW(V5 6$)@6#9d5Cf#yJ>7u)5Z:=yB#LI~rH2*ZaP+p-L!wx_e+,4b}EP^X,@m:7lc&VeU&"\6d,=_3M^ǰ9x0=}rC=v2]Rh L76'1_u'3؁٠e1;=HǍ~B7u Y|_GQ֮v[=ѻa 5!nY⯴鬰1}6RRWkqs(7{YIbAisay%_R˕T ;d l6.tgS/s~#6ĉHMOD%8'(ăǺ|q]w wž!Fnu*tz5 xu 4z0hXɫ:9ɳ͞Oi:h9TS0c/i4~]/ף_QԌKo64uɂ?LIPXzm>wtE~fᢥ$YhQ60xxHʵ hF(GeH{U$_6|W; 0vJewd[ӏ:q׆ \;hEOY<@n6:^[r󗈔a60%kh⯣DcF=ͣ)6y;]PdzhPMcw-V> stream x]n@D{}@vokE PPa""07wruun[{~lSNC7_ۧ:nm\Y[oms8 i4]S}_sݵe磇C=VHHtʬȤ|Ǫ|> stream xY |P2 Cn"X, -(P.t_i&96wCKY\(hA$>T('.Oû;i(M}wΝssws#D"Ѹq1Rf͜6,"5f{pg/ܒp!w1>2Qn0jOƸ Ǣ1hP(8&MJ|BZ3gΙ>^#g^vy*^Nw{M=&+>+(l׺k]. Nq1a3RgQr\+ K%Hɷٱ2mUa"" ^'|j%S yvږ31kv֜'Sj*zZC&Qd*ZG=C zNm^fPKeLj95ZARs(?y5j.GS(ܨ@=FMS#E(!j 5C=L6SKm$"=2y@+$J8mCE*i S ;Έ<=jo>Z1Ƙ17j{/qόh^sݣ<#R_Z!9~_Nxh¬ N>7'Οh/z?.CCx}P*jc*dn*8Ȁ0v˂PrylMlT] {fnC'#@OM^TAhs4>;l@gmA6(ms{y!JBU?+uDsr;kCKl :Xgm@9c1@ViAײv` `)~4zh ~쭸)'s6t^=~B!ib) aęS) n){VJwۏ('[Xf8ևz6E Qt2ǎUg+p'^lGb{]KUD#Y&6M1wK#AKm oZpшi?2?dWcދMt`{&f/T(>'Q]` 7+n] ;*[e5ՆBp-Xb=rDRk!fg$˴eT1U;zcMmXc$#A`f{S;LuZ=ߺ{Éɉ6z,qA~"Y^/=)|7?Rbռ>64ߕR< L}ٔHn41;oWCIeFJsӂ(rGCv#o˃_Ւ};7ui7aoQhGNP#&Zf~`ٯ}݆2mn| +[c-LLM]POԥ;gp`2uBXxެcyײۢffXWsUJ/:2rڌ6(ʂ7"NZ͆svmo (.1V"! 42m? 7@IS#n;b4$6ϧYz_Ԓwme{o;6ٳ xD m{(1[׃Mm0g]ߍGFO#oHBubB0`'e "hHrG#]_DgH&Ft鮑{zABjUhF&s["d~]wF/oʜ-]}opqƳ&4#- ":Jv*?:Ɵ㟺)A8w>o[vBx@TGo\?F'lQP>ӄKd|BIsTs$}I$HJըVTrFO B\{<|\lky cya=S@Wᆼj(.T#rm>phsgulYe՗%Ý8 u߆4ݳhNcϒr𠖃")7O* ()ʄvpIs.+?Ƕ  Cʠ܃lћ_1H/O(xU2&u> :<~Nj i #+MeFAm'x#rxY߸jm im"G#B2-p#L?~$q5%nG+h,M7+n^[Y! W;h_\Ǵ˧}~g3뉾{geg}Eב&u>x7~$+ft:5O4dOvFv&ViM*YU\Ț*װ_Zoonfg΃lP2Ih+ˍ@Q&Sʰ^Z#|yPvv-5t ^e*h,%Ci؂Ca.>Űyě'@fcKT Mɰ7:k[*2y)50Ȝt#Dx юViDTC7SG}l#<֫;oKc\`&V Ox(:*)"MC^랺K]#__#H-5Zkt Y~j;(&_zzШsDt;]I 1K/@b;9>.ŧ8Am˃{~A"tLGܛPO>^=BVCcGOaV])1 qŹ?WK;DgX-!į.UQn]J6~NR}g}6M}qJZhG BJEĒ%b-[wp0xտv6~M'wx~[htsfeHddM^Ҫ%9Ml)﨎HmC5> stream xW XT^0RZꠙaKK jy4;W 05ͬpQhjjF-m]m99t3\o}~I(OJ"\Jɜ1}jhlJk/jX$ɔy%/3O0|qvũi9 3O5u*y)`{N@Q;RwH J X1mմթĀԔ QʸԸ- X&|miw JIݔ$cUVԫѫbbP&NBQ5Zj.z ST85@m)jzZFP˩ lj%ZMFQcR5ʓ*%J/4igl$?62y1 Ƹw縘qm{\hKܚ-z@Cߎ=d,_erAt|alkMz%{O@rYMflX< f .}8lT*9nȼx^%\_sF`ϵw쮌NN]R!|[̪=`i^>8śH,@pz% HJ  2=X‹Ccp;tEN˼E^$NoVp^<,TXqy4@D RB_9elh9G/;eg\GyBn[ݶuX~$._ug?&a@RҀrA< U ٜQSRXd1NHm6Np6[X!zl 5h*+ؒF]A2p;zep kpT͔ r{7̈́pdDxP7 r&;W\}:zˏ+AJRf7LUEL42f^y 캒H &hndíp㘰4Rfk84TSAx63gNDCB}"'.=s>vaMw5ߢJ_цB  O,+@5L@ZK?xؼ3-d$'ɷaoB< _wUKg. H ~IE8-uն:2!x/)c"JH]IYFt![PalFؚoěTj}!uO~}6wtJ`I֓c*bKW*?o?~ (aPUf[TwB١!# MOjasHQ.2QՒOwIzqkQ i MU&kI0&mlnu]QLN@#{cOP**p콸{s{5H  S_E+z݅=cũp7QϛE_ 0g,8"Zš74ӽ2(-L jDE^óP6oڣ+%e`)T`m kR&4/l_jESoS^~@cedrx:(.6ri9i0~ZN.$g_z= 5 E 58tt@\hrœ7?F;=wֺ1B3IwŽNk% XXdz Y+F0|HD5wC{h<UW0=4F4 Izå<˄ܟKA(;Ž“#e^ҖC72s )S-ݗg$ bWT} er`+3nhu/N62i_Qv8h], ̶vuwt%uoy^hksfK%o5hS?m˒4[U;}pо4(:wvIY= F3%AbfH8Pf[gov7F^e> >$lkQP, *+sL&R(6!%XlSkO=.L>L,G ; jbb ۭΝCT_v5 ݊ҏehR^<ouBqьf9GNC\2tl5B}zuv 4Ccm?RJnTs z]2װꠙ-b7?J%%a$;Tz}/rpx `$<"O/>d>dhRPz¶;ef9z?=B +?ưf)Aʐ¿ Հ$%{1qC.~'~/cj4D&c> stream x]M PO4i Q/@ahX o/Pk7ސp܂X,n 2*U#r|yiW1Sz,Ot3I`BBvWk5a?YQQO^XdlyQ6a񢪮e&\CY%vNk,|/gNBoy\^endstream endobj 116 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 569 >> stream x}O`ߗta]251c&G m P-m1z_-m 7^<~y<8;7?Oe%\M/5JBeh\2l1f|1 L80j%=7/TΧDnNOGAW\f5Kc67*BKtbVbB=Zx4r-?xt~F1 ɜ-):Nel*PS 0<p&~:F߇fX?6'Uv#-6븆GW.ɵ60zV>UqWV8MƱU$u|sovU\nwơU*3HUXk6Œl8UڕpۧqJV|X.[J8^G4EV?LVFa9Oo*<_I;:6bGLS 뾉1?\89M" jC]IjiA0endstream endobj 117 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 1490 >> stream x]PTU߲}= ^e  RT~(OwIeap֕@]X0Ae3L"ZA%')WeVNS6Qܙs39(o/J&_[ߘ Y! 7mӘ3\*y@l >өI1u{p Ԩɓ)L:C>xSVFA "uBl,-=GoteqrQrf 3:a̴݆\ V KV"~V8nSY#<5)@R*JMi(zb)Joe.-:\Һ:(ͬ9bе'>476D۔աZ ՞\O3drB^l|BK(I/?YXZ Q{k{ 3x*i纫aD2cfDDݹ't85r ^'R8d'FWGPd]P=AQC8Q.dzcZQjccߓC;FrM*YVg-k"ӑON=dB=]_PQ#ADГ@T@ 8m6hE |m,\+AE }[ teKu < $$!GI-+jڶeGFP¥*_}~oA}clL4@IT f%Ka f} 5%뽊rфgYt?E9h 5Ĥψ c-hzYU8MGYX‡! 0go\ma%-vOI̗bw2beg5[ tǝJef+zm?z-?F~wkG⮭)N \*,ƪdsћUP4Y8ni<9tLcV4wd0s{##ėL䉆LaP)H$7Je~+LIK?usf˿ zDZ(ٳ͹'>v hF3|ՌE4S 1PhvhnregYfIDGϺ>m~{8"5UoWK6pl9TqhTɱsj__jNf)ők]@v띠WM_u[6T]6GT)Iwendstream endobj 118 0 obj << /Filter /FlateDecode /Length 202 >> stream x]1 E{N "IF-24I d($ I o*kVCtySB~iIW1On*!;;XM@tCRZt IhGH~E"i =5 %hOz6n9C\yܑ59QJ{i6endstream endobj 119 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 1135 >> stream xmlSen핕AJt%)A!0npE0Үkn6^u[{m!e`6%b`\L["3&F=P|n>;.rT!dK55o}n\Wv4?>]OWjPb!Ԑ{8ݕ[,Tn-nz!tj\2XLRR.ǧ]*Jϗ_+$kTY*:no9,6igiMtJϺ%⪓uپO[g}+.Ox,F<#O՛Y/j~6ge騷x۝6Jr ‚ʭ^g]T(QHI(r3di"_^W ^|/?Ȑ^fju:; l +)aX;]W61/kƣ2үަ7gK@@YKZӲ15Yn2eM|xYsFQNLoS-`>NDfϖ\gͨf=YOtD"ȣ ik!R?vlfja[ujxd8TdĮ 0o2?*sbw9DA9y7K'AhѬiU2ob!Sm&ŖYfaO!0.x]r chr'l!秏 ~B߬=r;6 nbf354JgbPF)taPD7Ot^fqB\j ˁA9G/ʜ2r?R V\#CtZMzvyV~`vP<_y|T1͇@wbLaV9: ޞ isdE^f1\Ǻ.XθQ)Q%.tSH<ƯT*u wvendstream endobj 120 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 460 >> stream xcd`ab`ddM,M) JM/I,If!CgɏTVY~'ټznn?l ~^&[_PYQ`d`` $-*ˋ3R|ˁ y I9i i ! A Az؝*X\ZY c```b`f`XȢVi w2,'Ve3\f{<&a' EOlzZ3&vϓع$CƬڽs:wKVvr4[iݔvh}uMŌ֩U|~x7׵vWHk{{˔˺vΩ-zieonɹgϒkyO-.býw䞉=z,3wZ^ Lendstream endobj 121 0 obj << /Filter /FlateDecode /Length 4415 >> stream x<˒8r:ه=8B>,inlGxq9|*Hb5Zݙ HQ*;v(H$@>ʂJ^.;agwrvwYsR7EUV|vO3gVۢzv`f~X csw?/RKˊqg )eQr(Udj.`JH_nsmX56nրoǹ0E*X9[qcKqui3Qhp_"p[`ZXzJ;8cS44RMXܤmm-i:7DN!c]F:uf旂e & o+mA8ϗA Rm }ؓ/HCt]yl0FXE ֬w  ye=[5f9e+P%5s[Vfo·ZジU"lN;D{IKv5_l)+fyxLPFi@SmP.)#a3 .I s26,ixN^XiI#,M@N\M˜\7)śH>"bٔ(cu2n3"wr XUWfFf8-fq!m$T; 0/9l}xhn]7G70wOJڛ\UeAJJ#=Q5\-摱<7h@J23FpS)oAnhN6#هTEs_cH0=}1-x=Ri24kFz ހ[ɖ%"Mhn#:*Gd>رiHHo^5ڣ {6xQ.:os2jvXG+_Zn'o`3*g`H S H C TCvSZ$/y$F!% Vm@W H|1pl݇\lo4-iQ.RGA˴ I sh4mXÒq'XSXWh ~aüFӅO{n\UΌ3hcs`BiRp@-C.dP$O-&T0Z0 H0{/d#ʸ=,s՞E=PƬw`cU8Èvdq8tȰGD]* ]|Jb6>=Z\4*miԨ)2 VWNUy.8H@EP \h([B@3;66,,Nb#%};*a0HkV,O6և8dӌUf5e8b,%f [P^q5ӅrIl+ Yл{c!pؙ,H=Fvm9=!8!wX43(ulo"˽BTPJdgzBБ Ȗtɟ`yB,@ (8f6WؕM}M0feEOO_Ek 2 3JIY8a*\^خ@9/Ƃ )Dm]U*45^J1yJRU!toYR$쯦BȅrìQeP2jŃ~5X ѕ@:vs *S^UWπ %#Ty8 ՙ\P\KiOz:$zB/D8z 9tv9 ! I#+>Qu{Uyq^@y(ʺcs #jO{.qÕ~㼀@Z{{X{t?T4Gs?5̜Ok6Kί5ͳb5FkXvMIrP_er 8N}a΁%Bsq@{[G*1m[{gҨ)zPSZe+#x3: , ahK{9I$ @;F@3AT|9JSp*N\R2)"`#oX 5d ] _s'?+[ wl6YR=#UPumdByDvD*-zPHA6~طD|w ;*W:>Rqeiq&``j]DwTzS|_V.*~SU^o|jVȬ1r0gShu c+}" կ+TCkAת$^.AhMxE `rNGu@2:E.uĻ sjSnuθ:oZ.1УF `Z[*7/'9nK R5(`84nlqTY_*SI3QX*\dݛ@) Wͫ@ ߅݃hzË[&LS^^簣 oOw##^KW XOP;9%URWeKLcҎ*`GK#Ҏ!{X4kFeI&kZ7ٺ$NؕSE2LC7T,n?Yn靠ZFw^!;oSxH6>QkxF-`EmH֏IZPzPC˱ICG4+KaE*, rgLoBt!{ݳ 9ATC,,5 > B+]oo12_0N}Ľ@];%%^@&raYF4e2st J$Ї[E,rj`&60ڷ :o}.^<7bI7߾qش}# &FӵXBSH?Re[Yl~d+cH~1pMAr;Zܭ/:+66_$σxwc?=B',OK#S;?G042߂R۰dL=:0Yz d+(4t~gî;Ǧ#;6rWa+ _ 8SZ|_߼cC`?zn8ȲX /3nmoޱ 0f*Q =$AEdAv t\9,10+&&\ą[R) I0RCI -c9n(؝qPRI"+JmC_5w0,ЇŤ endstream endobj 122 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 776 >> stream xm]HSqp5KZl9E]hZD]Qыef͗t˝;/:35Y4gAu!B"^n gL]<߫ϏB*DQcUU&nk(/ەAp59[YhDŕv^4jd"!bҴ)d_ uZN~GyÛ%?bjj\6o珔UՂ+mVΛv /XF@M-xme!BjDBPw*@C%_}zHѪOdnWנ0,E9.E6Hͤ\l\p̦2 9z\&Wr(W37`4MYQM쩻rFOuK }xz$[;VtCrqItM7xۗ!'Gdaʞ":Y}4gu(]ɘ,OrM˯/ɲ7e\N@<Hw`]Oz b^wws`!9@c".OۻrY"H%3qa0?g.LT֣uS0.v\sU}RD OɖmCrɝh 8C3$o? 1͙i/HxeQq 3 };~{0 FAVP1J1.-RY` E!̏͌p80GF4 \d>endstream endobj 123 0 obj << /Filter /FlateDecode /Length 1075 >> stream xWKo6WVUr=hQ дKAߗ$eƲ]sE ?тeI_ϾTh}k?`G C ʇY7eLPdJgݒTB)w}RP#ɟHEQr5yrʼ2׮#n o`!mϚE>M`x5@k}-2smVeGg08yh !$ME: RSK"`TP()l!Ō۩Pe9( -Ӻ%5e}pONs~~~+]wU^|wzȰ@V^ʯono>w=\׻}oW;?,Ϛ-ifQoV>}Smvn<ĺ7?>+w.Iqv^UYՃn٦ϫ>D|3jP$fC \mjviܛLno{Ge'>+v?pÐZ0¤kʏx|WL(o!UF`d!$"eڒH:E vvz| +]!b򨚗w eT_IcW]!@΅ >!!WcԡqP)qρk0a0Z麝G&H!U a4];PlJD )=_ON:&(إܠ/L q:II0ڍ>leViV luS/x6+m ^@9JH;zBUMq;䥔db|_&$Nͷ/e+yF]$^(h%WhށwTFHd,%q亽PL8$UѢ8>8`{k)'a:5> stream xXKo7Wq' (H TqV-%֎DiMry|3#o\2ۭ }a嚇 mwM:Av"4wrGf[Ί=st~sz~[z'n?ڧnMDtQ{:L#R?uq\Fۧ?:>OӉ c>5y}P[^AZPئƕ8 L l^6_mq\>PyEY)͕+bj  sE\)"s . e]j)"Ϟb"EhwTd&jE/яTdПh/[mF2Kη ,$4}sp5{5K=5{J1ݘmnL7%L|oV\40"aB\\ GT]Q=YPsb; C0SSR0GZn IVhJ 5HXAebcCKaWjNlt"C/"i6@Z<"_Ьn0䧋E, ,aMYt5JF;T@*.Ne̫ڀj;P$E+(ĎQd+9$3y1@i-m+7xO;ާ>Zvt:aV>|<4KaC_S^ r,yX=ȉy\/z{lqlfI+ȥG%1=>)O#N}LrçZ> /W [ 1 3 1 ] /Info 3 0 R /Root 2 0 R /Size 126 /ID [<7c85361c64b6bb70d045a3f9bca72309><9dc2f20b6ec66a42011a6f30800dc013>] >> stream xcb&F~0 $8J BVKl4L2Hp. "H8) "΂H 0D l R,D2fy VR@$ ]̓$#.Xl#)ɤ":j@$ V5>A endstream endobj startxref 76312 %%EOF markovchain/inst/doc/an_introduction_to_markovchain_package.R0000644000176200001440000010165014503772642024374 0ustar liggesusers## ----global_options, include=FALSE-------------------------------------------- knitr::opts_chunk$set(fig.width=8.5, fig.height=6, out.width = "70%") set.seed(123) library(knitr) hook_output <- knit_hooks$get("output") knit_hooks$set(output = function(x, options) { lines <- options$output.lines if (is.null(lines)) { return(hook_output(x, options)) # pass to default hook } x <- unlist(strsplit(x, "\n")) more <- "..." if (length(lines)==1) { # first n lines if (length(x) > lines) { # truncate the output, but add .... x <- c(head(x, lines), more) } } else { x <- c(more, x[lines], more) } # paste these lines together x <- paste(c(x, ""), collapse = "\n") hook_output(x, options) }) ## ----load, results='hide', message=FALSE-------------------------------------- library("markovchain") ## ----showClass, echo=FALSE---------------------------------------------------- showClass("markovchain") showClass("markovchainList") ## ----mcInitLong--------------------------------------------------------------- weatherStates <- c("sunny", "cloudy", "rain") byRow <- TRUE weatherMatrix <- matrix(data = c(0.70, 0.2, 0.1, 0.3, 0.4, 0.3, 0.2, 0.45, 0.35), byrow = byRow, nrow = 3, dimnames = list(weatherStates, weatherStates)) mcWeather <- new("markovchain", states = weatherStates, byrow = byRow, transitionMatrix = weatherMatrix, name = "Weather") ## ----mcInitShort-------------------------------------------------------------- mcWeather <- new("markovchain", states = c("sunny", "cloudy", "rain"), transitionMatrix = matrix(data = c(0.70, 0.2, 0.1, 0.3, 0.4, 0.3, 0.2, 0.45, 0.35), byrow = byRow, nrow = 3), name = "Weather") ## ----defaultMc---------------------------------------------------------------- defaultMc <- new("markovchain") ## ----intromcList-------------------------------------------------------------- mcList <- new("markovchainList", markovchains = list(mcWeather, defaultMc), name = "A list of Markov chains") ## ----operations--------------------------------------------------------------- initialState <- c(0, 1, 0) after2Days <- initialState * (mcWeather * mcWeather) after7Days <- initialState * (mcWeather ^ 7) after2Days round(after7Days, 3) ## ----operations2-------------------------------------------------------------- initialState <- c(0, 1, 0) after2Days <- (t(mcWeather) * t(mcWeather)) * initialState after7Days <- (t(mcWeather) ^ 7) * initialState after2Days round(after7Days, 3) ## ----fval--------------------------------------------------------------------- fvals<-function(mchain,initialstate,n) { out<-data.frame() names(initialstate)<-names(mchain) for (i in 0:n) { iteration<-initialstate*mchain^(i) out<-rbind(out,iteration) } out<-cbind(out, i=seq(0,n)) out<-out[,c(4,1:3)] return(out) } fvals(mchain=mcWeather,initialstate=c(90,5,5),n=4) ## ----otherMethods------------------------------------------------------------- states(mcWeather) names(mcWeather) dim(mcWeather) ## ----otherMethods2------------------------------------------------------------ name(mcWeather) name(mcWeather) <- "New Name" name(mcWeather) ## ----sortMethod--------------------------------------------------------------- markovchain:::sort(mcWeather) ## ----transProb---------------------------------------------------------------- transitionProbability(mcWeather, "cloudy", "rain") mcWeather[2,3] ## ----printAndShow------------------------------------------------------------- print(mcWeather) show(mcWeather) ## ----mcPlot, echo=FALSE, fig.cap="Weather example. Markov chain plot"--------- if (requireNamespace("igraph", quietly = TRUE)) { library(igraph) plot(mcWeather,layout = layout.fruchterman.reingold) } else { message("igraph unavailable") } ## ----mcPlotdiagram, echo=FALSE, fig.cap="Weather example. Markov chain plot with diagram"---- if (requireNamespace("diagram", quietly = TRUE)) { library(diagram) plot(mcWeather, package="diagram", box.size = 0.04) } else { message("diagram unavailable") } ## ----exportImport1------------------------------------------------------------ mcDf <- as(mcWeather, "data.frame") mcNew <- as(mcDf, "markovchain") mcDf mcIgraph <- as(mcWeather, "igraph") ## ----exportImport2------------------------------------------------------------ if (requireNamespace("msm", quietly = TRUE)) { require(msm) Q <- rbind ( c(0, 0.25, 0, 0.25), c(0.166, 0, 0.166, 0.166), c(0, 0.25, 0, 0.25), c(0, 0, 0, 0) ) cavmsm <- msm(state ~ years, subject = PTNUM, data = cav, qmatrix = Q, death = 4) msmMc <- as(cavmsm, "markovchain") msmMc } else { message("msm unavailable") } ## ----exporImport3------------------------------------------------------------- if (requireNamespace("etm", quietly = TRUE)) { library(etm) data(sir.cont) sir.cont <- sir.cont[order(sir.cont$id, sir.cont$time), ] for (i in 2:nrow(sir.cont)) { if (sir.cont$id[i]==sir.cont$id[i-1]) { if (sir.cont$time[i]==sir.cont$time[i-1]) { sir.cont$time[i-1] <- sir.cont$time[i-1] - 0.5 } } } tra <- matrix(ncol=3,nrow=3,FALSE) tra[1, 2:3] <- TRUE tra[2, c(1, 3)] <- TRUE tr.prob <- etm::etm(sir.cont, c("0", "1", "2"), tra, "cens", 1) tr.prob etm2mc<-as(tr.prob, "markovchain") etm2mc } else { message("etm unavailable") } ## ----fromAndTo, echo=FALSE, fig.cap="The markovchain methods for import and export"---- library(igraph) importExportGraph<-graph.formula(dataframe++markovchain,markovchain-+igraph, markovchain++matrix,table-+markovchain,msm-+markovchain,etm-+markovchain, markovchain++sparseMatrix) plot(importExportGraph,main="Import - Export from and to markovchain objects") ## ----exportImport4------------------------------------------------------------ myMatr<-matrix(c(.1,.8,.1,.2,.6,.2,.3,.4,.3), byrow=TRUE, ncol=3) myMc<-as(myMatr, "markovchain") myMc ## ----cchcMcList--------------------------------------------------------------- stateNames = c("H", "I", "D") Q0 <- new("markovchain", states = stateNames, transitionMatrix =matrix(c(0.7, 0.2, 0.1,0.1, 0.6, 0.3,0, 0, 1), byrow = TRUE, nrow = 3), name = "state t0") Q1 <- new("markovchain", states = stateNames, transitionMatrix = matrix(c(0.5, 0.3, 0.2,0, 0.4, 0.6,0, 0, 1), byrow = TRUE, nrow = 3), name = "state t1") Q2 <- new("markovchain", states = stateNames, transitionMatrix = matrix(c(0.3, 0.2, 0.5,0, 0.2, 0.8,0, 0, 1), byrow = TRUE,nrow = 3), name = "state t2") Q3 <- new("markovchain", states = stateNames, transitionMatrix = matrix(c(0, 0, 1, 0, 0, 1, 0, 0, 1), byrow = TRUE, nrow = 3), name = "state t3") mcCCRC <- new("markovchainList",markovchains = list(Q0,Q1,Q2,Q3), name = "Continuous Care Health Community") print(mcCCRC) ## ----cchcMcList2-------------------------------------------------------------- mcCCRC[[1]] dim(mcCCRC) ## ----conditionalDistr--------------------------------------------------------- conditionalDistribution(mcWeather, "sunny") ## ----steadyStates------------------------------------------------------------- steadyStates(mcWeather) ## ----gamblerRuin-------------------------------------------------------------- gamblerRuinMarkovChain <- function(moneyMax, prob = 0.5) { m <- markovchain:::zeros(moneyMax + 1) m[1,1] <- m[moneyMax + 1,moneyMax + 1] <- 1 states <- as.character(0:moneyMax) rownames(m) <- colnames(m) <- states for(i in 2:moneyMax){ m[i,i-1] <- 1 - prob m[i, i + 1] <- prob } new("markovchain", transitionMatrix = m, name = paste("Gambler ruin", moneyMax, "dim", sep = " ")) } mcGR4 <- gamblerRuinMarkovChain(moneyMax = 4, prob = 0.5) steadyStates(mcGR4) ## ----absorbingStates---------------------------------------------------------- absorbingStates(mcGR4) absorbingStates(mcWeather) ## ----renaldoMatrix1----------------------------------------------------------- P <- markovchain:::zeros(10) P[1, c(1, 3)] <- 1/2; P[2, 2] <- 1/3; P[2,7] <- 2/3; P[3, 1] <- 1; P[4, 5] <- 1; P[5, c(4, 5, 9)] <- 1/3; P[6, 6] <- 1; P[7, 7] <- 1/4; P[7,9] <- 3/4; P[8, c(3, 4, 8, 10)] <- 1/4; P[9, 2] <- 1; P[10, c(2, 5, 10)] <- 1/3; rownames(P) <- letters[1:10] colnames(P) <- letters[1:10] probMc <- new("markovchain", transitionMatrix = P, name = "Probability MC") summary(probMc) ## ----transientStates---------------------------------------------------------- transientStates(probMc) ## ----probMc2Canonic----------------------------------------------------------- probMcCanonic <- canonicForm(probMc) probMc probMcCanonic ## ----isAccessible------------------------------------------------------------- is.accessible(object = probMc, from = "a", to = "c") is.accessible(object = probMc, from = "g", to = "c") ## ----periodicity-------------------------------------------------------------- E <- matrix(0, nrow = 4, ncol = 4) E[1, 2] <- 1 E[2, 1] <- 1/3; E[2, 3] <- 2/3 E[3,2] <- 1/4; E[3, 4] <- 3/4 E[4, 3] <- 1 mcE <- new("markovchain", states = c("a", "b", "c", "d"), transitionMatrix = E, name = "E") is.irreducible(mcE) period(mcE) ## ----mathematica9Mc----------------------------------------------------------- mathematicaMatr <- markovchain:::zeros(5) mathematicaMatr[1,] <- c(0, 1/3, 0, 2/3, 0) mathematicaMatr[2,] <- c(1/2, 0, 0, 0, 1/2) mathematicaMatr[3,] <- c(0, 0, 1/2, 1/2, 0) mathematicaMatr[4,] <- c(0, 0, 1/2, 1/2, 0) mathematicaMatr[5,] <- c(0, 0, 0, 0, 1) statesNames <- letters[1:5] mathematicaMc <- new("markovchain", transitionMatrix = mathematicaMatr, name = "Mathematica MC", states = statesNames) ## ----mcMathematics, fig=TRUE, echo=FALSE, fig.align='center', fig.cap="Mathematica 9 example. Markov chain plot."---- plot(mathematicaMc, layout = layout.fruchterman.reingold) ## ----mathematica9MC, echo=FALSE----------------------------------------------- summary(mathematicaMc) ## ----fpTime1, eval=FALSE------------------------------------------------------ # .firstpassageKernel <- function(P, i, n){ # G <- P # H <- P[i,] # E <- 1 - diag(size(P)[2]) # for (m in 2:n) { # G <- P %*% (G * E) # H <- rbind(H, G[i,]) # } # return(H) # } ## ----fpTime2------------------------------------------------------------------ firstPassagePdF <- firstPassage(object = mcWeather, state = "sunny", n = 10) firstPassagePdF[3, 3] ## ----mfpt1-------------------------------------------------------------------- meanFirstPassageTime(mcWeather) ## ----mfpt2-------------------------------------------------------------------- meanFirstPassageTime(mcWeather,"rain") ## ----mfpt3-------------------------------------------------------------------- firstPassagePdF.long <- firstPassage(object = mcWeather, state = "sunny", n = 100) sum(firstPassagePdF.long[,"rain"] * 1:100) ## ----mrt-weather-------------------------------------------------------------- meanRecurrenceTime(mcWeather) ## ----mrt-probMc--------------------------------------------------------------- recurrentStates(probMc) meanRecurrenceTime(probMc) ## ----data-drunkard------------------------------------------------------------ drunkProbs <- markovchain:::zeros(5) drunkProbs[1,1] <- drunkProbs[5,5] <- 1 drunkProbs[2,1] <- drunkProbs[2,3] <- 1/2 drunkProbs[3,2] <- drunkProbs[3,4] <- 1/2 drunkProbs[4,3] <- drunkProbs[4,5] <- 1/2 drunkMc <- new("markovchain", transitionMatrix = drunkProbs) drunkMc ## ----rs-drunkard-------------------------------------------------------------- recurrentStates(drunkMc) ## ----ts-drunkard-------------------------------------------------------------- transientStates(drunkMc) ## ----ap-drunkard-------------------------------------------------------------- absorptionProbabilities(drunkMc) ## ----at-drunkard-------------------------------------------------------------- meanAbsorptionTime(drunkMc) ## ----------------------------------------------------------------------------- committorAB(mcWeather,3,1) ## ----hitting-data------------------------------------------------------------- M <- markovchain:::zeros(5) M[1,1] <- M[5,5] <- 1 M[2,1] <- M[2,3] <- 1/2 M[3,2] <- M[3,4] <- 1/2 M[4,2] <- M[4,5] <- 1/2 hittingTest <- new("markovchain", transitionMatrix = M) hittingProbabilities(hittingTest) ## ----hitting-probabilities---------------------------------------------------- hittingProbabilities(hittingTest) ## ----hitting-weather---------------------------------------------------------- hittingProbabilities(mcWeather) ## ----simulatingAMarkovChain--------------------------------------------------- weathersOfDays <- rmarkovchain(n = 365, object = mcWeather, t0 = "sunny") weathersOfDays[1:30] ## ----simulatingAListOfMarkovChain--------------------------------------------- patientStates <- rmarkovchain(n = 5, object = mcCCRC, t0 = "H", include.t0 = TRUE) patientStates[1:10,] ## ----fitMcbyMLE2-------------------------------------------------------------- weatherFittedMLE <- markovchainFit(data = weathersOfDays, method = "mle",name = "Weather MLE") weatherFittedMLE$estimate weatherFittedMLE$standardError ## ----fitMcbyLAPLACE----------------------------------------------------------- weatherFittedLAPLACE <- markovchainFit(data = weathersOfDays, method = "laplace", laplacian = 0.01, name = "Weather LAPLACE") weatherFittedLAPLACE$estimate ## ----fitSequenceMatrix-------------------------------------------------------- createSequenceMatrix(stringchar = weathersOfDays) ## ----fitSequenceMatrix2------------------------------------------------------- myMatr<-matrix(c("a","b","b","a","a","b","b","b","b","a","a","a","b","a"),ncol=2) createSequenceMatrix(stringchar = myMatr,toRowProbs = TRUE) ## ----fitMcbyBootStrap1-------------------------------------------------------- weatherFittedBOOT <- markovchainFit(data = weathersOfDays, method = "bootstrap", nboot = 20) weatherFittedBOOT$estimate weatherFittedBOOT$standardError ## ----fitMcbyBootStrap2, eval=FALSE-------------------------------------------- # weatherFittedBOOTParallel <- markovchainFit(data = weathersOfDays, # method = "bootstrap", nboot = 200, # parallel = TRUE) # weatherFittedBOOTParallel$estimate # weatherFittedBOOTParallel$standardError ## ----fitMcbyBootStrap3, eval=FALSE-------------------------------------------- # RcppParallel::setNumThreads(2) ## ----fitMcbyMLE1-------------------------------------------------------------- weatherFittedMLE$logLikelihood weatherFittedBOOT$logLikelihood ## ----confint------------------------------------------------------------------ weatherFittedMLE$confidenceInterval weatherFittedBOOT$confidenceInterval ## ----multinomial-------------------------------------------------------------- multinomialConfidenceIntervals(transitionMatrix = weatherFittedMLE$estimate@transitionMatrix, countsTransitionMatrix = createSequenceMatrix(weathersOfDays)) ## ----fitMclists--------------------------------------------------------------- data(holson) singleMc<-markovchainFit(data=holson[,2:12],name="holson") ## ----fitMclistsFit1, output.lines=20------------------------------------------ mcListFit<-markovchainListFit(data=holson[,2:6],name="holson") mcListFit$estimate ## ----fitMclistsFit2----------------------------------------------------------- c1<-c("a","b","a","a","c","c","a") c2<-c("b") c3<-c("c","a","a","c") c4<-c("b","a","b","a","a","c","b") c5<-c("a","a","c",NA) c6<-c("b","c","b","c","a") mylist<-list(c1,c2,c3,c4,c5,c6) mylistMc<-markovchainFit(data=mylist) mylistMc ## ----fitAMarkovChainListfromAlist, output.lines=15---------------------------- markovchainListFit(data=mylist) ## ----markovchainPredict------------------------------------------------------- predict(object = weatherFittedMLE$estimate, newdata = c("cloudy", "sunny"), n.ahead = 3) ## ----markovchainListPredict--------------------------------------------------- predict(mcCCRC, newdata = c("H", "H"), n.ahead = 5) ## ----markovchainListPredict2-------------------------------------------------- predict(mcCCRC, newdata = c("H", "H"), n.ahead = 5, continue = TRUE) ## ----test1-------------------------------------------------------------------- sample_sequence<-c("a", "b", "a", "a", "a", "a", "b", "a", "b", "a", "b", "a", "a", "b", "b", "b", "a") verifyMarkovProperty(sample_sequence) ## ----test2-------------------------------------------------------------------- data(rain) assessOrder(rain$rain) ## ----test3-------------------------------------------------------------------- assessStationarity(rain$rain, 10) ## ----divergence1-------------------------------------------------------------- sequence<-c(0,1,2,2,1,0,0,0,0,0,0,1,2,2,2,1,0,0,1,0,0,0,0,0,0,1,1, 2,0,0,2,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,0,0,0,0,2,1,0, 0,2,1,0,0,0,0,0,0,1,1,1,2,2,0,0,2,1,1,1,1,2,1,1,1,1,1,1,1,1,1,0,2, 0,1,1,0,0,0,1,2,2,0,0,0,0,0,0,2,2,2,1,1,1,1,0,1,1,1,1,0,0,2,1,1, 0,0,0,0,0,2,2,1,1,1,1,1,2,1,2,0,0,0,1,2,2,2,0,0,0,1,1) mc=matrix(c(5/8,1/4,1/8,1/4,1/2,1/4,1/4,3/8,3/8),byrow=TRUE, nrow=3) rownames(mc)<-colnames(mc)<-0:2; theoreticalMc<-as(mc, "markovchain") verifyEmpiricalToTheoretical(data=sequence,object=theoreticalMc) ## ----divergence2-------------------------------------------------------------- data(kullback) verifyHomogeneity(inputList=kullback,verbose=TRUE) ## ----rCtmcInit---------------------------------------------------------------- energyStates <- c("sigma", "sigma_star") byRow <- TRUE gen <- matrix(data = c(-3, 3, 1, -1), nrow = 2, byrow = byRow, dimnames = list(energyStates, energyStates)) molecularCTMC <- new("ctmc", states = energyStates, byrow = byRow, generator = gen, name = "Molecular Transition Model") ## ----rctmcRandom0------------------------------------------------------------- statesDist <- c(0.8, 0.2) rctmc(n = 3, ctmc = molecularCTMC, initDist = statesDist, out.type = "df", include.T0 = FALSE) ## ----ctmcRandom1-------------------------------------------------------------- statesDist <- c(0.8, 0.2) rctmc(n = Inf, ctmc = molecularCTMC, initDist = statesDist, T = 2) ## ----rctmcSteadyStates-------------------------------------------------------- steadyStates(molecularCTMC) ## ----rctmcFitting------------------------------------------------------------- data <- list(c("a", "b", "c", "a", "b", "a", "c", "b", "c"), c(0, 0.8, 2.1, 2.4, 4, 5, 5.9, 8.2, 9)) ctmcFit(data) ## ----mcWeatherQ--------------------------------------------------------------- mcWeatherQ <- expm::logm(mcWeather@transitionMatrix,method='Eigen') mcWeatherQ ## ----mcWeatherHalfDay--------------------------------------------------------- mcWeatherHalfDayTM <- expm::expm(mcWeatherQ*.5) mcWeatherHalfDay <- new("markovchain",transitionMatrix=mcWeatherHalfDayTM,name="Half Day Weather Transition Matrix") mcWeatherHalfDay ## ----ctmcd1------------------------------------------------------------------- if(requireNamespace(package='ctmcd', quietly = TRUE)) { require(ctmcd) require(expm) #defines a function to transform a GM into a TM gm_to_markovchain<-function(object, t=1) { if(!(class(object) %in% c("gm","matrix","Matrix"))) stop("Error! Expecting either a matrix or a gm object") if ( class(object) %in% c("matrix","Matrix")) generator_matrix<-object else generator_matrix<-as.matrix(object[["par"]]) #must add importClassesFrom("markovchain",markovchain) in the NAMESPACE #must add importFrom(expm, "expm") transitionMatrix<-expm(generator_matrix*t) out<-as(transitionMatrix,"markovchain") return(out) } #loading ctmcd dataset data(tm_abs) gm0=matrix(1,8,8) #initializing diag(gm0)=0 diag(gm0)=-rowSums(gm0) gm0[8,]=0 gmem=gm(tm_abs,te=1,method="EM",gmguess=gm0) #estimating GM mc_at_2=gm_to_markovchain(object=gmem, t=2) #converting to TM at time 2 } else { warning('package ctmcd unavailable') } ## ----pseudobayes-------------------------------------------------------------- pseudoBayesEstimator <- function(raw, apriori){ v_i <- rowSums(raw) K_i <- numeric(nrow(raw)) sumSquaredY <- rowSums(raw^2) #get numerator K_i_num <- v_i^2-sumSquaredY #get denominator VQ <- matrix(0,nrow= nrow(apriori),ncol=ncol(apriori)) for (i in 1:nrow(VQ)) { VQ[i,]<-v_i[i]*apriori[i,] } K_i_den<-rowSums((raw - VQ)^2) K_i <- K_i_num/K_i_den #get the alpha vector alpha <- K_i / (v_i+K_i) #empirical transition matrix Emp<-raw/rowSums(raw) #get the estimate out<-matrix(0, nrow= nrow(raw),ncol=ncol(raw)) for (i in 1:nrow(out)) { out[i,]<-alpha[i]*apriori[i,]+(1-alpha[i])*Emp[i,] } return(out) } ## ----pseudobayes2------------------------------------------------------------- trueMc<-as(matrix(c(0.1, .9,.7,.3),nrow = 2, byrow = 2),"markovchain") aprioriMc<-as(matrix(c(0.5, .5,.5,.5),nrow = 2, byrow = 2),"markovchain") smallSample<-rmarkovchain(n=20,object = trueMc) smallSampleRawTransitions<-createSequenceMatrix(stringchar = smallSample) pseudoBayesEstimator( raw = smallSampleRawTransitions, apriori = aprioriMc@transitionMatrix ) - trueMc@transitionMatrix biggerSample<-rmarkovchain(n=100,object = trueMc) biggerSampleRawTransitions<-createSequenceMatrix(stringchar = biggerSample) pseudoBayesEstimator( raw = biggerSampleRawTransitions, apriori = aprioriMc@transitionMatrix ) - trueMc@transitionMatrix bigSample<-rmarkovchain(n=1000,object = trueMc) bigSampleRawTransitions<-createSequenceMatrix(stringchar = bigSample) pseudoBayesEstimator( raw = bigSampleRawTransitions, apriori = aprioriMc@transitionMatrix ) - trueMc@transitionMatrix ## ----loadAndDoExample--------------------------------------------------------- weatherStates <- c("sunny", "cloudy", "rain") byRow <- TRUE weatherMatrix <- matrix(data = c(0.7, 0.2, 0.1, 0.3, 0.4, 0.3, 0.2, 0.4, 0.4), byrow = byRow, nrow = 3, dimnames = list(weatherStates, weatherStates)) mcWeather <- new("markovchain", states = weatherStates, byrow = byRow, transitionMatrix = weatherMatrix, name = "Weather") weathersOfDays <- rmarkovchain(n = 365, object = mcWeather, t0 = "sunny") ## ----MAPFit------------------------------------------------------------------- hyperMatrix<-matrix(c(1, 1, 2, 3, 2, 1, 2, 2, 3), nrow = 3, byrow = TRUE, dimnames = list(weatherStates,weatherStates)) markovchainFit(weathersOfDays[1:200], method = "map", confidencelevel = 0.92, hyperparam = hyperMatrix) predictiveDistribution(weathersOfDays[1:200], weathersOfDays[201:365],hyperparam = hyperMatrix) ## ----MAPFit2------------------------------------------------------------------ hyperMatrix2<- hyperMatrix[c(2,3,1), c(2,3,1)] markovchainFit(weathersOfDays[1:200], method = "map", confidencelevel = 0.92, hyperparam = hyperMatrix2) predictiveDistribution(weathersOfDays[1:200], weathersOfDays[201:365],hyperparam = hyperMatrix2) ## ----inferHyperparam---------------------------------------------------------- inferHyperparam(transMatr = weatherMatrix, scale = c(10, 10, 10)) ## ----inferHyperparam2--------------------------------------------------------- inferHyperparam(data = weathersOfDays[1:15]) ## ----inferHyperparam3--------------------------------------------------------- hyperMatrix3 <- inferHyperparam(transMatr = weatherMatrix, scale = c(10, 10, 10)) hyperMatrix3 <- hyperMatrix3$scaledInference hyperMatrix4 <- inferHyperparam(data = weathersOfDays[1:15]) hyperMatrix4 <- hyperMatrix4$dataInference ## ----MAPandMLE---------------------------------------------------------------- data(preproglucacon) preproglucacon <- preproglucacon[[2]] MLEest <- markovchainFit(preproglucacon, method = "mle") MAPest <- markovchainFit(preproglucacon, method = "map") MLEest$estimate MAPest$estimate ## ----weatPred1---------------------------------------------------------------- mcWP <- new("markovchain", states = c("rainy", "nice", "snowy"), transitionMatrix = matrix(c(0.5, 0.25, 0.25, 0.5, 0, 0.5, 0.25,0.25,0.5), byrow = T, nrow = 3)) ## ----weatPred2---------------------------------------------------------------- W0 <- t(as.matrix(c(0, 1, 0))) W1 <- W0 * mcWP; W1 W2 <- W0 * (mcWP ^ 2); W2 W3 <- W0 * (mcWP ^ 3); W3 ## ----weatPred3---------------------------------------------------------------- W7 <- W0 * (mcWP ^ 7) W7 ## ----weatPred4---------------------------------------------------------------- q <- steadyStates(mcWP) q ## ----weatPred5---------------------------------------------------------------- R0 <- t(as.matrix(c(1, 0, 0))) R7 <- R0 * (mcWP ^ 7); R7 S0 <- t(as.matrix(c(0, 0, 1))) S7 <- S0 * (mcWP ^ 7); S7 ## ----Alofi1------------------------------------------------------------------- data("rain", package = "markovchain") table(rain$rain) ## ----Alofi2------------------------------------------------------------------- mcAlofi <- markovchainFit(data = rain$rain, name = "Alofi MC")$estimate mcAlofi ## ----Alofi3------------------------------------------------------------------- steadyStates(mcAlofi) ## ----ratings1----------------------------------------------------------------- rc <- c("AAA", "AA", "A", "BBB", "BB", "B", "CCC", "D") creditMatrix <- matrix( c(90.81, 8.33, 0.68, 0.06, 0.08, 0.02, 0.01, 0.01, 0.70, 90.65, 7.79, 0.64, 0.06, 0.13, 0.02, 0.01, 0.09, 2.27, 91.05, 5.52, 0.74, 0.26, 0.01, 0.06, 0.02, 0.33, 5.95, 85.93, 5.30, 1.17, 1.12, 0.18, 0.03, 0.14, 0.67, 7.73, 80.53, 8.84, 1.00, 1.06, 0.01, 0.11, 0.24, 0.43, 6.48, 83.46, 4.07, 5.20, 0.21, 0, 0.22, 1.30, 2.38, 11.24, 64.86, 19.79, 0, 0, 0, 0, 0, 0, 0, 100 )/100, 8, 8, dimnames = list(rc, rc), byrow = TRUE) ## ----ratings2----------------------------------------------------------------- creditMc <- new("markovchain", transitionMatrix = creditMatrix, name = "S&P Matrix") absorbingStates(creditMc) ## ----economicAnalysis1-------------------------------------------------------- statesNames <- c("customer", "non customer") P <- markovchain:::zeros(2); P[1, 1] <- .9; P[1, 2] <- .1; P[2, 2] <- .95; P[2, 1] <- .05; rownames(P) <- statesNames; colnames(P) <- statesNames mcP <- new("markovchain", transitionMatrix = P, name = "Telephone company") M <- markovchain:::zeros(2); M[1, 1] <- -20; M[1, 2] <- -30; M[2, 1] <- -40; M[2, 2] <- 0 ## ----economicAnalysis2-------------------------------------------------------- c1 <- 100 + conditionalDistribution(mcP, state = "customer") %*% M[1,] c2 <- 0 + conditionalDistribution(mcP, state = "non customer") %*% M[2,] ## ----economicAnalysis3-------------------------------------------------------- as.numeric((c(1, 0)* mcP ^ 5) %*% (as.vector(c(c1, c2)))) ## ----bonusMalus1-------------------------------------------------------------- getBonusMalusMarkovChain <- function(lambda) { bmMatr <- markovchain:::zeros(5) bmMatr[1, 1] <- dpois(x = 0, lambda) bmMatr[1, 3] <- dpois(x = 1, lambda) bmMatr[1, 5] <- 1 - ppois(q = 1, lambda) bmMatr[2, 1] <- dpois(x = 0, lambda) bmMatr[2, 4] <- dpois(x = 1, lambda) bmMatr[2, 5] <- 1 - ppois(q = 1, lambda) bmMatr[3, 2] <- dpois(x = 0, lambda) bmMatr[3, 5] <- 1 - dpois(x=0, lambda) bmMatr[4, 3] <- dpois(x = 0, lambda) bmMatr[4, 5] <- 1 - dpois(x = 0, lambda) bmMatr[5, 4] <- dpois(x = 0, lambda) bmMatr[5, 5] <- 1 - dpois(x = 0, lambda) stateNames <- as.character(1:5) out <- new("markovchain", transitionMatrix = bmMatr, states = stateNames, name = "BM Matrix") return(out) } ## ----bonusMalus2-------------------------------------------------------------- bmMc <- getBonusMalusMarkovChain(0.05) as.numeric(steadyStates(bmMc)) ## ----bonusMalus3-------------------------------------------------------------- sum(as.numeric(steadyStates(bmMc)) * c(0.5, 0.7, 0.9, 1, 1.25)) ## ----healthIns6--------------------------------------------------------------- ltcDemoPath<-system.file("extdata", "ltdItaData.txt", package = "markovchain") ltcDemo<-read.table(file = ltcDemoPath, header=TRUE, sep = ";", dec = ".") head(ltcDemo) ## ----healthIns7--------------------------------------------------------------- ltcDemo<-transform(ltcDemo, pIA=0, pII=1-pID, pDD=1, pDA=0, pDI=0) ## ----healthIns8--------------------------------------------------------------- possibleStates<-c("A","I","D") getMc4Age<-function(age) { transitionsAtAge<-ltcDemo[ltcDemo$age==age,] myTransMatr<-matrix(0, nrow=3,ncol = 3, dimnames = list(possibleStates, possibleStates)) myTransMatr[1,1]<-transitionsAtAge$pAA[1] myTransMatr[1,2]<-transitionsAtAge$pAI[1] myTransMatr[1,3]<-transitionsAtAge$pAD[1] myTransMatr[2,2]<-transitionsAtAge$pII[1] myTransMatr[2,3]<-transitionsAtAge$pID[1] myTransMatr[3,3]<-1 myMc<-new("markovchain", transitionMatrix = myTransMatr, states = possibleStates, name = paste("Age",age,"transition matrix")) return(myMc) } ## ----healthIns8-prob---------------------------------------------------------- getFullTransitionTable<-function(age){ ageSequence<-seq(from=age, to=120) k=1 myList=list() for ( i in ageSequence) { mc_age_i<-getMc4Age(age = i) myList[[k]]<-mc_age_i k=k+1 } myMarkovChainList<-new("markovchainList", markovchains = myList, name = paste("TransitionsSinceAge", age, sep = "")) return(myMarkovChainList) } transitionsSince100<-getFullTransitionTable(age=100) ## ----healthIns9--------------------------------------------------------------- rmarkovchain(n = 10, object = transitionsSince100, what = "matrix", t0 = "A", include.t0 = TRUE) ## ----healthIns10-------------------------------------------------------------- transitionsSince80<-getFullTransitionTable(age=80) lifeTrajectories<-rmarkovchain(n=1e3, object=transitionsSince80, what="matrix",t0="A",include.t0=TRUE) temp<-matrix(0,nrow=nrow(lifeTrajectories),ncol = ncol(lifeTrajectories)) temp[lifeTrajectories=="I"]<-1 expected_period_disabled<-mean(rowSums((temp))) expected_period_disabled ## ----healthIns11-------------------------------------------------------------- mean(rowMeans(12000*temp%*%( matrix((1+0.02)^-seq(from=0, to=ncol(temp)-1))))) ## ----blandenEtAlii------------------------------------------------------------ data("blanden") mobilityMc <- as(blanden, "markovchain") mobilityMc ## ----mobility, fig=TRUE, echo=FALSE, fig.align='center', fig.cap="1970 UK cohort mobility data."---- plot(mobilityMc, main = '1970 mobility',vertex.label.cex = 2, layout = layout.fruchterman.reingold) ## ----blandenEtAlii3----------------------------------------------------------- round(steadyStates(mobilityMc), 2) ## ----preproglucacon1---------------------------------------------------------- data("preproglucacon", package = "markovchain") ## ----preproglucacon2---------------------------------------------------------- mcProtein <- markovchainFit(preproglucacon$preproglucacon, name = "Preproglucacon MC")$estimate mcProtein ## ----epid1-------------------------------------------------------------------- craigSendiMatr <- matrix(c(682, 33, 25, 154, 64, 47, 19, 19, 43), byrow = T, nrow = 3) hivStates <- c("0-49", "50-74", "75-UP") rownames(craigSendiMatr) <- hivStates colnames(craigSendiMatr) <- hivStates craigSendiTable <- as.table(craigSendiMatr) mcM6 <- as(craigSendiTable, "markovchain") mcM6@name <- "Zero-Six month CD4 cells transition" mcM6 ## ----epid2-------------------------------------------------------------------- eig <- eigen(mcM6@transitionMatrix) D <- diag(eig$values) ## ----epid3-------------------------------------------------------------------- V <- eig$vectors V %*% D %*% solve(V) d <- D ^ (1/6) M <- V %*% d %*% solve(V) mcM1 <- new("markovchain", transitionMatrix = M, states = hivStates) markovchain/inst/doc/an_introduction_to_markovchain_package.pdf0000644000176200001440000134447014503773060024751 0ustar liggesusers%PDF-1.5 % 1 0 obj << /Type /ObjStm /Length 5818 /Filter /FlateDecode /N 96 /First 821 >> stream x\[sF~_IjJ@/S3RIl9IR IIB!Aۙ߾nM&j2) j48}w"čTƕW"3Pg˄v6pgO0g2TLZʤw<:SLL)3RN˴rL;,3ƢDҊL>efC2'Pљ3edIԁTBgx 2OHy +`-q"Q _ n8t s88F*pDi c*@ $QB"#)  pQ3"4 +@]h@қL րs F@bk@6Ԁl,ƥ@F'Ne$R@2l3xgh @vdg@Ρde(p{ BY@Ԇ؊yL(M-)h1`3fw&e!"0"%>dH3d)kDOp$,!dYi3POHb7pȘ0 =?8rX%x "t_Q=fPQ^b/7UV]쿃ԖgU>2lEl{I(3ʶn&ʶ>y'0-c4>yV^g:mVoYGӎfV1~u8َ5_$MOU~7^^&x9 ~g:;3GG}Ϻ/<vۋ{sfzW``#ѩ?[4k[ >kcf8T'/eOY;L6`hr~S5hOpnGU$#ڬq6 _RM#.f&IwxN/g$9XRRuTMR{HmҢvI [xЩ|ҒI+P# wTKUA~B ijZSGլ]m!sF,'ш0Nv8b+UuJS<(Ib Y$\I TvSDI$~Z?ǎ9\!-4\U1NrNuΟ=XkXI@4-sKAd|uvNf- Ť.~ZSD"%1ķgmR &5I%yKt|,b#HF(6Bv˄ W ûYmDK*/iФ@;ap[-:ͯ&ݝT@4S du]rl]rR.BJ2˫tPǤC2(gt\8ft^uu*tX~?>;;o儋ϚOLͰ\eS&d}^SRYh# MNSJ iBwnufAsKfOelѲ9|a0Ĭ Ȼ"s6VdžIPpCaLc, chz†겫{i]  -Tw- [*aqJ|QW]'8->+ϋœxQ/WwEY\bЌ e1,"*.˺,[\/7դŨ)IU7մnŴE[Ӫ*M1//?!x<*(K(J'N.=xq=@sj' ٖzp:Kճ:0^FIe[1b}qOXte\=9入()5nMj(Oj_5b`lE؎p\bk{B`"[J'%| CS=k qta]Bt=*#ErD"|>~DMWI or!T aԐeHt9U [d\6{3<;E/` a:!"ߓK)wc߾f\/0!. ә @,5&Ob3649d\" p=<qiIeyV*thVds/L~@ ;W{z3wo;]{[fGlfvoL m+Rk[~S aA 2A4v.z!}c: ӆԻdžY&]=bE5A7mLCV\J Nn&3iT1b| 3:KYڞٹ$d%WɩXeڦ ֝pXjApV‘R=ɤ}8Aܰ[<@ȏa`kx!i.n\(Q5t*'\¨{Inभr<]nn=%Tn؜6;&HIar8i3h N\B}m,r*va0ڜPvl\EX8prKʵ,|7\[CxӎI<#2HyN#ׂ{ e/9__iIq4wC[__|T[bv=^;N06g,6C=nݽZCn֋yaQ9i@62ݗ2⼋y;ȑtȰ3 4.؇V?tva.nm\8[,\l:95[YܮfW*)OG:D}}QaK@e^wN xKHyj|M'y4׽C- r[bfRl0=[s!mػ!dQm:jܺJ-u/BNai6BxFg6-CnٖLWb)/Ur7lU"^H'vFU枵uB׺\2s҆)[S[~v*=>&e4r   򐥡W L.iSub>°|giNvG|#ͼp!9,=a;uݬm已3 |N+ #>TB.*iWѴH,; `$+޿Oþۗ/< /}rݶ7*X~Uns\zQNѨ]e$^!9D{0*\ ym|  @fFZj44}(gy5S5U ^YQOft-naĚch#q؎"i65%bv9yRve\Cd=;ss.IuaWrea`;%ѰԽ͋y;˯\yOW><= : U9;~֠~L~juY1sqd!ai֖lfzU=HUq ']h#Bo^vvFg4{@A<W:gv`ro}ACbv!0 {Y>Qs^t3!ahG=20gganyQ#8x 'JOۧ Najͧh\Imbg:ҐȒ[̿ %}2,:uOdp]Q0=˒P r@W׻~:ZMGsAaz>E>{`w&ͪEy-\L㾪&4 5n9؜rog&l̑cN79p6yKgbTwo;4N .u8?N z@s.1,z.>Ȩw+I9w2x#9(;!AP% ).gd #.0]x0]1Gm=iu9z]|kb!W"2B2¿zPRvRON<.N6N줜 O~Ws(N8#MVN˓zRIzU2a  iѽU \?1-:_і1 aɘs9~K$G_=Kl~T_z؄FOp[g+\V@9#Y} QyM^oDA_][~!n} ;wӷ+ؑ 2"~GLHYqo+`endstream endobj 98 0 obj << /Subtype /XML /Type /Metadata /Length 1856 >> stream GPL Ghostscript 9.20 discrete time Markov chains, continuous time Markov chains, transition matrices, communicating classes, periodicity, first passage time, stationary distributions 2023-09-24T10:41:14+02:00 2023-09-24T10:41:14+02:00 LaTeX with hyperref The markovchain Package: A Package for Easily Handling Discrete Markov Chains in RG.A. Spedicato, T.S. Kang, S.B. Yalamanchi, D. Yadav, I. Cordón endstream endobj 99 0 obj << /Type /ObjStm /Length 4585 /Filter /FlateDecode /N 96 /First 909 >> stream x\is>gz%$Nd'3>PRK 9ɦ$ʦƥ ދ,E)TvG-\8|ъd-N(OO#wPc3 -ѽ:w0:W'KLya ŃJEaF* .8 X`Nmxt>Dh) _7::"Zt01NxcI'Q$M@ IhK{ jc N<`d]eIxIJz[t$x$-Db:_W24os 1MU{ѴG%|] E=y7\D ?MxA4=j]W+Aį>~+{WZOjO/maN~Um;~ێ߶m;~׎ߵw];~׎ߵ?\۟omϷ?lIzo:*ޠx,<.^Kl%q#n#~8/ղQfTxYؑꀿ{ɢ&‚)< 5 2x9cFИ|:A|,& /} E $"CuD ʥ/C4#*9U*)nascM(h2>@maƘ ̦Pi)zZea`6 0XMse3i0ik ͥ_ccHbbЄPx ɪB+:y߾zv=|vZ]N'K|qQϯz\_\&,Y/O}?D(xy+p`Bk}n󶡙9n>5:+|Uryl'.y=8`CBhè;#M%f;o^hhZ1[SC;V?PּBlaY6 ӼTpY,Ȁ]'#|ξʸYEV|1&fZ?|._ȗJ~#kIS7Y"DN7y&u-D^ȋjJVR^ɫec)e-W.r:]LZLBțjY/.M-K յ\5{ʿQ,<W kXj Vj ]i@V4ɬzP@+'i7zdﯫ/@dF.fy'wzYY6>u#Laڐ#.0Lv42䀽vv9>|yE.)4.ْ]țU%-俫 F,{1{p4\x'S.ZPyݏ/~ns>huAݿ:V>CvmOw `6,XR5 !uϾ[ B E,a۔\L{kiWn~k#7@ݷ/~{u7ggjtS/04xܕrKG^:|j=̽hFP(chޮ7\(er6u s(&KyWy gS0CDz_zQ9Vyu5Y׋y勿snz̲JF=?o[' O?x𬨷mt >۝yNWC>z^ܮJ>Yq^ ׍Qoaaht>+jMaP`Ъo.5"2Yi EŒrrQ㸪g5L?j2JUwK%OT+TReաR7'߶_|\a*ux7#X#G\oj)u`1`.C²#{Ͽ{/24!222ɻ:Hr-FReD/-뫭ƠuޜMc}3߫ॶ~m t؟oQFc @/nnaLRZIKw/,K}lLe@XS&[|sY)sj* #FL׀awC];t˪kZ)'1nrw`́A%;Acj/9.,CSxY6R(|?@i w@1]߳ȹd`2~V01 =(T6+],UF_4:TRERJ9 F,GLXXRppLJ :P6zP{BBp|WEd1P*A%ShZPZcM(h]1, H7 sd*jn:V/5Ƕ6Vڻc#vTORWQ%1ѢiJ[bs OXT[Vk̸Ún4pN}LfJ^get?Nea`EtHP3_N~뭘HvYޑtqdr/d^GCnvLc8J{kqo Ɣ>Ю>)-Our`h^Gl8h]s>}l ڣVѝ{j=endstream endobj 196 0 obj << /Type /ObjStm /Length 2561 /Filter /FlateDecode /N 96 /First 870 >> stream xZmo_[7  qr]ɈHgL$8}fH((2w幙ٙg(09 YZx"m¯X:at(ģ A7 UoB=n8QϮ'WKAlE] ˣ>^Wvm #O^o^NV\O/£z~p2QNr\u>(8Nh#3jXZ4bt`U܈立? Ԇ#Q۞Uܖ%##3TB9ȌQL5Z*^Lg=<#؁T!nosPdjW5aRz+GwH !,aQǩF:r@q܀wl{DB~g]`nFFq ilI:+d5}YwˆK\/YlБIo9 2]JZVKc> 6am8N ۻ\IE8o# wc d'Z%h/1 v+c[Le0c@{h+#sCArB ȥX .z#vvC%ے # q,3b g䝛B.gD 9WqMꤛt\in4uwo,E~'FyNPZM/&C ?h0{ǬFzqn׉ܟgKI+ݿ~n\rUBJF\CG*ohzە gIYHė6xGd#gǙP1~Qۘf|&!F ao65_׵XXHfIl԰Ce&!;NGG"driPRonږ24$@/`4v%$_)ʘS8G@CCaH&LV$aQ$?7@$#5_א|iwE9_HHL{3_|ǡƲPq FsoArY}ψE"ӷA:'NAYʭ{(}}\0v|?XC 5ób>H3S ⑐:h#$X _xz>;&>rʥ ) ҥTؘ҇`:A[ݛz[gz 0TpS!ygx"4[ct0( #AE]΋51 I2R%_&َsN2!C($eDm5Z]В#e|~ |g9&ע$gl$_$+üP%w[oIr 0F`endstream endobj 293 0 obj << /Type /ObjStm /Length 2406 /Filter /FlateDecode /N 96 /First 877 >> stream x[mo_-R9$qҴ"עtΞ#ԑKrVzYEw_K>;Ǿ/xY&^L3DޡA|h h0bhDC+A#2PvxV #`KWl|LD硨(q`aET 7f<氄Ǹ:g;`=x \t&ɄT u 9]D?d"emxu 3c1(Y9(JCcNFl*Ftn2"IF)4XwMruD ,$4pv ܩ%*Y3)(B6E]"zI982㨲3&+p,h(ępV,dro)uL0E%ҲQ`uPHH68\ 99\es -d9ɥ *Jl0~Ѣ_` ҝ+VoA9:_T9S9'ŞE[ʽkxXsk(w}gF/bn[* O}Rݓfr6]4S"my?_>c^-8X_?|p̍"_nESx޻Mܣw^? LhtְA-#h;ǎƱqE8>14^ۇt2, /t]OC6/'x[ul) -ulN`6>5'hA Fʲ,-;nD߉j[{jm S]tfZ4(\\A P;UwvJGeU;dWCG4WKPyuם 6\Jnx#[Yl*ke>W3Ym7?.*L_s:8HɅ N8 V$/a M1p-ܷ#XlqF,<*#ٜO"3U6P '(Ze2 => yu!aux{O@׷pLB B".)h#!~~ T9>=,6kTd":'z%-J부ylN_6ֳ![#=%?5[p{sGMMO{p` +ɨ!L^2t槖TEi %5'{jAj. Rƃx,+P-lvTé/ol"& %& dU(_YĘq6¼.ί7]}MາW=|#P rBY]Py{"0[`. \ k72^1yxU\|FWJD5lp欚 PV/*ј"{bh%WbY\SIjS^Iqnn@ΖW&Sm{T2mJΫ^"YM]/-8e@lTL Z^\MW_=8{6RTY6 }f{ :`>ѶMҖ2Z,^zUwdȡA)DdɉԎ5 x [e@=井]0tL^S{Pg0Pf޾u-Y/Q)ִ:ӂ(Oe <Q, C+Sm3y VVf#̒%z@qa_ndϤAVzx*M!a6?ЃbVS~IPbQBXp`*yXjkA@qr_׭zMH`MYl[+l r S.0N]?(xtLD_!I#y}O[']VR0!B Z:[ދMZ+B zN+L1YűĴd&5LvB&""a-M]bk봤L*!OVCܭ:.37Un޼o޾/>\t=ޏ|]-muax/]ROCAތWczPΠ%k~AMohQ_݇ed8C:Z܏0"u򫕹^}ðMkUl}L݂Vc?55zZ2]ɷ;betB1BqO9fZhUپqfrkktAsvȠu'A;& JחPmdëO?5+V)v~$B΋uendstream endobj 390 0 obj << /Type /ObjStm /Length 2087 /Filter /FlateDecode /N 96 /First 868 >> stream xZn}W (֍,l^l;YH,TOhzf${l?H͞ԽtJ%I$Ud[Y R牨=-%Q-%rb4bNLEIo;[bkѧ&nLh444:{bFOݒp`8Qv\k&а ^+ aac=I)`y Zz4ЯRtĤ-2Gg܃fq|I&$YDۆXɚƧ!+%o@JTUSkU!ᗖj ѨFZIkK85>&5Dr5/4֒CW,~ɭJ\P;V*hTL{ :AL1J @S:HuРkp Z={rB`6C*ZV6t h@,T<R鱖 BD"1kVwߥ?~HA'd}tFպv/Q-C\){o=`@m͇7R ?}|e}t7n1͛J7|zĤ@6^ϰedUl\1-æb6Y67^ouUlbؤ^ɶ?am]}Eu=SƧO]*]ft2FkN鴑NtU鲙2=Ql%6YG:x1U3S/7O"o|(vB>mݘfiF~/2?#D֍.3ܺ jY%_6#2^hCL47(rCad~GQ ՚ ctky=~Ͽg4ouKcb ˔eM/@SKOc |/4fm3!G9jFPk`bS EQv=kc Q0i95@ИZ~K.%3Á0q.&v:zF9J%hz!X b@U[zn)($puV* CMPjVzig/:ըwo-ic'ys ?M=9&]UW\mlAA~A}FOH4 ʄ\}[I@{^hg_V/&{\qvݟhŋ,X8Y.$I%.suq\r~i XcVL9DҐ큪ׁZ,aldY隻!j!DQ$* 8S,|9L3 7GHdCE% =TIP0+cSM} ut[a-;Tl+TPta Iendstream endobj 487 0 obj << /Type /ObjStm /Length 3588 /Filter /FlateDecode /N 91 /First 854 >> stream x[[o~ﯘE(/v4hV۴ ]ɋȻ̐3jy93;9wڇNz逶v&Wu;40 a;9=rN K0BpI<Qhh4T;SS)'8S^ 4\hp4:ҠqZ! u :@0"tkt, ҡ!; fXL )\;sxKƒJuAP Se 9 Q-Eq_pj9^3Kpѐ#}?RH6 J19%:Of0E`1 x $9$V0ȓxn04!$c bJ(iGD3C)>0 1A_,X̡c pb5ZhBkAO-9c1&-b?(ڒB"IR=4G4H5aLD`5e<Ξ;j{s~ht^0N3Muܝ]J]a!Wo;˱au6^]ox\l/.8Ԁs7w!-[jvo?ٽõ]DZئߩ//9ep?"|FAv6N*#HH] L+9)0{ٌ2KG]YA( 5$U!H`)]5M&W2AF;1Mv\VbWl#ɛ"ՌҒ#PsJ1[Y,-x|i2Ks&[9+{Z  +g9F5ul *Rϵ2ُ@p3a5E*Ibzg$⸉ *4JF7jĥÓgVCD)'CFL#)4El 27(6Y@YOυm.]DJ|sYmc>LOHqJqeJ ?ͶVႈޢ> 6d2$VHGVWٌIB!C3WM'g&;ąb>Vfm-aі9nK i#\&kVu Oҽ,ȫS/,) ࣜF)o5j0-RKTtbYĿ>rvdM-m&X8˳t.%pu@9m9srnRNZ4+ibd[^I8(5`G5)myW,VrUBpmQ;Q)xUs@U D&Uu+Yr ~w5m, oe @mfl3/֤F~Cա†LZ3p=:ٞ,sLH[jv =NsR;qOdm))/cBsKrcpf< ]C[n,Ť{V0F"ZY,U*τ%TuE3HFB=1Av(݋' >>AUn\W_ՌxUق]Kv Jf6cy Yf٠å 4X$fT+mƘߌ'mܪHrUYHڌ9a2>W7lJtCZ6^ Fahza"gt+ä-+·.L5!*FZQNҬ/\^$a^[򓀚5XO.wʲz[8|Y/2 $۶oRmM^*"Qp RyW6JS C92uj滹J#n*LϮq(60ں/3OQ,aO" FUR*rh,] 6Q-Q5$=0mfdm"jcv1k)tY0W I}"L+ŷ! gt Ÿ$F~.:Ϙd2-)xYp% - +4ZU֮ æuVT:ݬbe=;&MU9R<籝[ϼV. PpSf^ d^_6d$c|)w_f޾l4(%/U@s\7T#4A\[5fٓZ{im(S}Ѥdy-_U9Ry^ɿ4aFcDK)N6H] Ѝ1UDtpiL1C Ӫ\w!22׻*Tn&Y?l8)&\Zɷe^k9YhPۘ2SSM4% zX6R߾ uiʹ=tLٺ4e~c̮tnn?25BR / OjRܳ3I"gjsX#B̳z援<]㵅qc<'pW/x4'SW2LZ/?'&qNT/ijyAer&"(m?v`H"z ţ`sCr,0 &IJDvJ!Te1iyߐ yPg(A@K촪9'8})s0RIERH-!gL_9 Q`> L n^V7Ķg?_<;\mf~ovӍ>{fuy4X_|c~7v ܀,Iӛuy#9{zMĨc٫{=9ƻZ] k=;nyxbw YN_.qj~Dď^nn9\o q%>X[=02].&&.xa:9((fe @җw0ӜX@11MJѿ?^};j;[]?ݼn>\%d1̩)r"go"x\% ;Oi:}Jdï&'"A>Fz/jRl+,ĥّkۛg׿9?Q^c{X ͯ^ J5r{_o~Y/էq- B H_ô\~]W>0|l>~gW_yiBI ՋZc஠^o{]]}TvYO0ww}O l r@endstream endobj 579 0 obj << /Filter /FlateDecode /Length 4463 >> stream x[msοjvn9 ]ՕrD,0}\.Fӯ9kj1k?gw>?7h WKa"N`jIQKgWwjPMcM?WMnD̈́jDҼgZ,M`V:U}4Pӓ@Q`jlu;^ ߿kQ#/tS+lƒD?e:zMF#$Kh5JVļbu %VH yBREJ(z3 3,tjle k:bx/wX+buvZG <^jL$M^H ѮTSWO$Fs{VzvE@xݷ6C9T-cݴ[ ĢUV*}Z=;hȅ2-4*TNPݶ9.@S_/2{CƒZZH9/ޞRL rf߷7}%0 vVF`ejLStc#Ecia _->*0>L롍S{2cmQUmӋUKBT/֠o tTmz kz{ϋ +< B1l4!UUw0)d/3qɿ 1H(m}Bn&I3!F]c=&"M /Op&U#0(+ҪXnJ九c9.S?QyJI[63RW=0K߲X\uJ!Yzȟ45a#SPځŸWRg ZԐb7FA5A!򮄇=\$}At}⚉<&%Bqy[Z rdܮFn;Vv9%Z Ò;q w@RU@it%o@p;4D |ЦcUlĬPinTjnxS;.ƭjދ 1 hJEu .$w?b,W&=W/I1, .}aLvjcM4$c^Lw}w])9QXbMUU ̄:( 5 kZ96oDCqm ;*]>. d9դِH/ɦfZˑyzn0KNQ{%ؗhDRRP ^Xmnl3:Xy8)Rxvⵎmt_O;l^U) ^?MbH H TuqFܔ lrc/lx>PںQB-jQsOFٙB˰^f?uipTeEӺݿs+ӴUqMыvv<`!'D_Y*"3ʦ(S4i0C(KJWKփ(\E,S4`sıN\,a4ġ |S TD0 bA&Bj('8JIl 鮉 ŰҠR\^->+6]J_ȱ+V~} -VG 6?I@X=̌-T.)9ZRV }X,ceadfh7A]Juݎ"eA#cB)iecnMX CmHf+&w^]}YFms<wBB”#3۟9.=BD))jo\L9~L 03Ƣ4%$?fBg  |mti /m)e>T,}xK&*6U]@+gD֏cZSTNBs2X۷1/\?Y (JH$,tŃ2[}~`Bk&ܟ25ZׁyP+"Mb WjLv@řrsG}3œǴ mbJuGL<ۢaF++c 8S+)UgPtRxbvdae{rc<,<SLͧȱrLLP2ʹw0.Ce?p .vodwduNxih;ڴ!uq>܏:8hbB]Qт NYxXL>kArt:0n eapyX@2G~GLq My-[|&8.ZTޥ:%?cD@>"v$e?] %tX5+/!sXBM{}XJ[I8,͂cCFuːjR`30}pfq[xg=@'o}ynmF΂}S-̴2zf'endstream endobj 580 0 obj << /Filter /FlateDecode /Length 5246 >> stream x\q.|XiI.>6NA[ V<igfovV:OU&]胦(~`454/mI՛Eh [s詪*uzqmx=0-Lj]OgpX̐V* X]^o،Dw"{Բb;,26f\|A $Ӕ[ V!}n$D%Yk,,n6['s&ɸ |}wӓxQj@GΪ?>7\."A,V w PNZڻcƌ*^qhbMXѤ$"4ĉ[Ji57v7sn3" 3;#A/V7AYL@ a/r/nQJu%²>j0)]ZYcYL"{XOekxۻ>zY ~G{,+Շxس$NH`Qn*nmM 0&zEC Wma^(U]?.2[Hg^g_qm/w]ZE@oY@!P8V] ie Kwcn؀cɊ8/v-= Y=W W.VC9Tnqk(o~Z\BT?iQv.kJb^-/a#Xëm=i)]YxWC"Q`}|,@-C+BNj}+ICoRaʏ˫5 X Ym 4+2|ťH,JS}3Cu_G2MJUf.#4Y w.W՟NUܶj4+ʰd2oog3CRjM(debѴVEO"x'8TR =oRFpS1̗axn-ٛƣMqEU>W{$Kįi;xnn,xz{t |ܣ&ÀOB汧ɻ&2Ia`V-r}Ж [Gnj[[C{_@fu5H8'o6::,2q֍"{|;Ĉ`:@خ^f7E3CKJ̔ ٣}a.^^2Ԡ} pf;*~ ϪE<y Cy*=QvühȫG@ﻫJA2{Aޚ;NюCGI ue\#ApH̝ɭ2 DB@t0]1\o-c`U9AB&#d\xVqrGZ7:[z>)ϗL sn')G#*6͟pEy*Hu:*A&2blQ˝/>>St@@* tK,U$s G`g>ܦ pި鿵nui)I`tb&= ?@d9gȅj.G/ ]oB7Ur qL]ve;tGoyjH6rG 3F/,U-WpK|kM۔wӂAQ2ILIC0LLBjNycj˸3!V Z-b$AG?f $Fp2'jzuW(@v^]\݃HQB&+I3fֵV<"i`i:Btz$[ ce;~r>A?/<!ZoLc/ٽ~;* ~C0 T2zXdj%Vɝ!!Y4Z-hIԒŖy]PYjަn %l^@C!L4%a=IYBKh0J֣eUEA Ѵ ],9CoAfIHg%GF }NQg8o`8:!;pr!pc&>n(UoM٠q-!A2[0["ㆴn_y/i }⮫00e؅Zi x"ϵ:43Ii/DŽtndwi :YѢn[/]masb"YF.Y/!7 ~ROBitV)؋CG%62m^/;&<= - tA]ʍu#b2^ *OaIa.xie,yebX_b5X*\IW[mvw/%4e|L (/oTo*uR2q@._NH" \5Ɠ_@T)WTJ7Yz\* (Fb/<}6d_Ko,O5[E{cybj94='DSw\BLFbQ[,& BOKbR-}WV5k5-}0>Y Se ;x[!t +mUM eb1m}zѯ!"`Wq*t 1Aɱw3@9,'xf\e %il7XE&FN@{j]|y[0>1'7$ZboSʹIO>5 "l]]ffT;[qB!:[J\@Y)]y IEFfS+2uRMnjrpxg~!X2߳Dsep@k:K}t[ 'ٛhkA>2I@='`{=ᖒJA/ژj14g yf) g k5D>7um^jWpäl_q6mYBz@GF)vU=Z!q>xѣ/urq+y^ÉaI'f {&?]}sG3˗8gҹ:F8zC4B D!_ŀFD3ܥP(| "ܵg᪪>_ź/WDfx/9(ypbFɐa%U5g2`t/%̜>2u貟GH|$sEfBV~C;2_"Wq;i9b\nqh>;mlWƣ觧,گt: !ORctʹ'9 ~2%GU'\*d8[kĥKO2ӃcxX~zx6!w_PA~AMzeOi7|uw;)qZXNKx.[sWE~\a AFvg|6mZ.&ACA3i]C&|?b hπ)=!{b?b hπ)EEX:(S2w_He&wǪMPMwuVvAo#endstream endobj 581 0 obj << /Filter /FlateDecode /Length 2225 >> stream xYM۸7%P*bobJ\Rv֫$ЖYt7|0)k/˲_?rE]y•/7_~2_V|i-qjk .uQ&lvOXwV&Uֱvxpȶݭʢұ9o?oMt# 0ܧ շ>R UVk@vɌ漥Q╩XOzxR;ONTQ򗜆q͜䮷(dDž^rUHeY r-uQ"Z%X86W;QöCKxpqgTئ,%FQn7XVqUDVURFx4]t6Njn︹\7X*3ež@?߂jySp謺'\j%qOxzN PY]6~gE;uË*B~$WT"fExO4ұC~G+Yӿ@CyZy ^s+ mRx> /03{qoo1o?VqUJʼng1}dY<1;c M]cDN˭7~vr&lW+v3WoO3#kq ^v{ |x3Y!bVkH3 Y=c| mNX֑I0r&ʲJz5 ĉ550 = c7'2e+](l@6̄!8^hPAiDԠfyDV]Y2rˌ—Ww!]oe$@YL:?ji|4-ƝV<VDm+m&a 3e@AF.~^^؂WbXڀP~_3fC厎,e7;?l] 9s!}Eox ;h\G̱YW@xݑØWdt i.+~hJmLX--OwIP(g~]/XĀ2U)P jiSa 8Z9Er,R-Z%:HfK5BDmäO5 C=IR/|ڀO;@ݨw$%D5Ŝ4i+(##(G\E ECƃ[lQ1 |r! K,YdN<ҋyQha}23He ib-cB+T`YvpH}`ymՔu|N#{Lv\]*z&^/?QLG#e}P\ДM!y;f" `!m,(od1"B߇e7Tb]=Ӌ!@ju2 <6ԙvS8B];PBX(:T.r*-6 /ҪC cY@y-h`nW= wU8/栝`Ԇ4Eה书^]fSM7ڬcF(^o˯0ners-?c^bհ'oc .x:ДecJuUZiy^Q y&M $J0^m`q?C ^d= + p\mp =Pݫt忥 lQAkxxqq#a焽i33' hi2% M݋Px'2-DNTm3/B/b =(/{UE£Hf?˞##' OA`쒥 4 .Ϊendstream endobj 582 0 obj << /Filter /FlateDecode /Length 4470 >> stream xZsJrwise+*̫J%,ry%e69$%"=*s5t ߮Ub~w_wϯ<˃s.zu~=y.G//vSrGW3q%2QI=\~^d㉔"sNqZ(G3k '(WldW,{].Kyxv}diq +5O{vр,nW -*a|ΚO rTcxȝlQux=eU~nxe/ѰmՕU##S9Pn=ُ߬XO^xcDd^RC[vU-<$FXo|C8lYYo8ɺpq*^I6Ғ~)2ަ}'Eby( ɦ$-vQZ"`X$5J$)=P^ 5ީ\~M y6op",ί+g\co.e>)>ɜ`EFɳB&BdL^$H[J8vNWTp5~ScjgȲʠv %JrvHv9g?Umwӏfi`7ёwKՖ 4;I9K`@Wdde-'T^Ӛ%GeOϋr*& o*yU|͖Oh#KQ!uAgubHEWow,;O9YٵoN` jky ፃ|qmveؤ  wh'J/ֽyX[p$Z?%.z{G LChaFdZ#]ƍ~<::>>;٢U"1HSU#F']>$`(A80;Ace#+>IKsrmܠiاQ}8D\V |Idѧ~[FGEpE )T|m A4p2#m: J4QN!>'ϛsA(Xfj?Mti`ȣK7r"вT1KޮQvfR7-tuOb5D7%0_W )YzdG",d6`#0CE4L Aq_@* d?[v뱙{Ȝrcۉxj)}8=i?yPNk-LgӄǴ D?(u0ct*|`3>rfMsT =8R4λ%S-~rOOm2,Re/8V`WPVP{CldpIX|)fHpTr·H]A?DާB`4~>/3.Z&j*t؊̰Ik|֤xT=;Kc讚\ta43~\,Heer,A8ZW;/  R}+,,)  h7å-{L[[r /%8[ncW% " h/5@F} )v?*@!0r9AT%N]h8 >Vp}$/߄N& GwX)>#EBkoH0GeU\t'x$Q_}F>hI*vxd6)Ѳ}C?u~Cms:"]᷎Ck44&h6#p-;Cj-@ڬ=U~^It=>CK~3_u4x5;eӲAŚO}(tI@voV'WWs_ߝHC>$6&I:/)M-r;}C ݧȢqw@H r Ci7n{ܾH`|%c:} ِ_ uK z~+sSqC/oE:]19Z=;r`5p$!ػt qv{Bg$ӐF>ߖ t aBG5XK?cӫ@^Dn"DMn"q[z`YI-`B;w0GX=;nKi bfazJ<3ʌf{vhZMmR!n(p&Sjz . wQ}_M&̬Y̊C `N0+h肰Lf"8AZw;PF L=fy'_z»X/(k&P#"^Nxv{0@OT8JRjo?cN?#&I` `[7J&/nС-LztʲohK/3?eJL`'tai(Ѐ9 ݕE8 CU#)38'[wOw'OLpP ưMbyx~"8b/ ts|6_އ&hj@JIknZXd=mr,2@ g!wpf/5ӋHX7J`Ă5'FIn`F,sDp *<p@a e'tW,R4=*H :)y>"|4q&8endstream endobj 583 0 obj << /Filter /FlateDecode /Length 4294 >> stream xZMsHr=/ޭUOTMĆ43IYKr,;fP7HBn?̬*#9x ШϬ̗/3oyƏs gˣK.(?=lyH?e.wwǖȜ#fEz~#[D@/[8OL<&cw<˵,xXY~MWO?M*(6 )5@Mӱu9Xa- Ȣ`D fzH2ADf~<>QiX5r*јXֻ[srgM[eh6SZ3Ԅuw}ɥWr'ܯB2Fie*DfV A'QI4r8w( @s K¡ Kn m=9>=xp[ұV;|\ԫDw-<<VK[\upSucP l~h]~=%Nw*c5ei'׷w#KIL@G{nfaK6l_&E@WVu`Et6mh QVx%šnѩ'AjWwcxY[82<5xh&6D'($,n!OxS* 6@ۻMd"Li͡ko'Jf m&bP28:F,5|?%2&2D*R)DžT$= e$)~c24FdJz o;-ףJB e$86g{@K^!bzK?scX<51:xI v^$N 7)3P(.>g× KE>qt|_VCͶM\^jY;] ( RcZ0(pb'zIWט@*9V;c/dg'O=Kb/@OSo"% [q&eh, +dAN"%}{α9Z2a27Þm6lt$نr6oTh6 (X =?k{Vvx][(ru*Ne0KnN8 !cu*)]lTsʂ0jD CW I 8l"Id6oF!K%.6'RFa@ʬZX}MSp܊cqm{ۀycc~66?ܾ<=;=}eA$+Q?4H%1ynF oKc炝)ou+ami4/<#goDtඃMs5KYy|21We A\䎈Cq#QKxćއ_H$ OJA> ϻ0 F Y0$V@ig|:}ݱ实^u>:{s0QƑ/Lѕ9YԲǮFRJI |ɀ>J5,hy?cN'VSkFt.U4:~7q0$9>l@NGִ'H{RU$D }W|EȄ-XώDW,iR)Ixݗw] y#$.PCP0'vz zF`^Bz^%QCB%ҧ7a,/2~1#lW&hn1r6SWV(ṖH0,}#YfCȼ~_'Jxz'*Ln˔mbC gIF0g#NՋtJ>oEWl_~Cc1ǥ+M HBfJ.INÅOI %#YV!N`WiD ȁ/?+ɶy!Vm6UH=]e[[]@dne@Q#@PQ$OrI_/䐶f5{pz0'huU. l[+,.'ֺ3I&MLfwv<3yCa.Q?@-AۜC[;„BL㫠}W%)d/ MCOM;yH@;;"IJ0:Y@ǔCT`)M O9]?V/IV5-'ZO4/B-(eׇB3PA, 3=OTίO~Wwkg `K`8dzC8NhzZ'" W,»̜bZ$\9gMnڜAլ[o(e\(n.Q>DJn̝;vYȲ;<=pn(LN ``6;E aU[Ζ3 Q ݊;~Qĥ. ;!˞E$;({I"9Oěx0V48%ceBV76A4g3hh*0?0Mv9 mi./x#[.B endstream endobj 584 0 obj << /Filter /FlateDecode /Length 3433 >> stream xZMsFrxab>1p;e:=[),$apFTXb>{^wף&e'% W's.?턻ϓ|5Hw'ZUWj5q8퇩@/[լ>N)j0iccDu K v14+hn]_Lz@ srS. wgSIl)۩] 8 -_[n"Ðأ3[a)#=E.DãWlNEK7n m@,l%jy+e$ۧxK9kzo LG-Yc*͇[RlؠKZgžVM` mxi4[בܛU;oIУ9P~;Lm冯1&wp?~Mvġdl k2+$R_s)|{r%+/O9.;vx39slKUŐ%|( C3^GВx-˓NB'y,IPh~fð=͞8?Ť'rmk;~~.*UH<;Ҧi#D-3"xXhth "8;s B ZEynV s8ˍSBWrOPy .$kW{5kT(}%v=HW6Tp,G)r_[m΍YTtgmȖ`V˞M$s CCAZ^-:dWis{~܋OX'?M%Ɏ(|I2Zc.?Y𐱙$*R4lR.?өi~bJpQaNrη].+xM' @%K{O(]|zTD@.~G©A0 lbW>^ys [>Z%A_cɖ:xT|ڧ#`sQg3z,)=8$8Ѱ@H\Ζ>JGc)lkL"Z, ! a%&@\{`b]<(812EQ*CsA20$K-/T]+%Ew~niؓ7.qGWbϗN@ غvl]+:-n3;/f>UĒ «7W46v,V$rrq@Bw=N2"mYa9~OVQTNYwlUv ^YHھwC3in?~|f`M>BfO` ,D"WY![ ZH_lsʜ U8-{N2XAhuwwFU5zS;t3׷j8{_n"U](J>I߈\>4_C$㭄[e3Ep3 Y D!L}rHq *K?(L|G7nΖ[Lv\.#T(F Წ9R;82DaI9eXM@_ 3J855 BU,nM<ԭ[F7C8`Dnn{ .LgppIK m$Q!) h@צb ynl/3IwDwO)Y1rS\'.A Y5WTbjQW9ۅNUaږTJ8bn>0f ] G ֛PEX$kwxUP;)]ifA]{欅7Ҫ&cqsq(EY*pmjW 8 Qv(XB#G`aEC{yj#@G r\_m0TPߊHa(v8; TpK㏄ix{<%`F~bVGq;JuSj~6MUev-mall>l_tKW|q(.ب uQܼbWż_V Dwfn?Ez⊜ᖒW8Z+WHw-RJ>@Edؿ=n~,#cF,ٴm*;&7; "1owv{4I@ ߣrj'\T=*a Eb*R$\wId ĕxW% ikMSѸ@}>U1jw{i]+\٬qx5./}Tm/kWY8/1endstream endobj 585 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 5485 >> stream xX XS׶>!$爈c =EZkq(*&̃p`),2(D"j[V׾[~}zm߻}%g{ז1L&h]F N&:@J~eDb wLԉmZ֖gc`F_e2͡¶DoszmFNN3G;-m  p҅8-x;adkXf]SͫVxY4wْG_b3fm='=by#uQћl_<(8덉0rq0%`ƃ,e\'3Yθ2+JƍbV13,f 3aܙ8f3,`&0 יEbfcϨƑ a06L_Ɩfޠ1f,lb y'OkȮbr;{YJe[{W^} lz4e7_+#_k{?PQ5QHjsb?u+l~/50`ƱfqC<%y6hT a)hn Fcfک\yU* T8 &n%fM]c>j7٢ee|ngvbwŬ4Pߓ{ZlѐOݵls8H9c_GV#R[r5gJ ]/~q ~t!5)[dIgV\ɷ!+Y@c%W>*ZsV- xYZlnZ(jBk,@kXa*t3G&NLQ8'mb#*b)xQ8v qKI~?bF1f =Fe"(UIRxKH `|%sTt7杆}tud1! cCJ.Lyz^ p {hB|7 7;hVDҌ.&35>YBk{FP}8z $av2E;s"?> jqjV[q*XlH[QǏ/z5 |lPfs);HK<=J3!U@z_~^8؎n{iZ5@T4B$lKPtkkuJN'}c5]ŝWpԨVSdwC@֊K4}(Gy֜p#!2*5߰e+ Ը԰b,?BAodMM))% XЯZT&$OKRiFAbQ NRp8m%Ҭ5f[<΀j{^츊;Tf6A@9ngÒE|fv!ftвe4' fpWhyXAട8,utXI@XK~@ߛ ``qw8_QUslT6&jP梕\)+iiBbԖych}29tG싆dH341 5.owaV9Կdcp;\6MA+Sc:γ " AO;Ix8bQO3%lOKۚmLnVYi5|[iPY|7.+J +oo>uX% SC=C h%B%b YRv{ZwAC'27ܩ2QOj׼[HPWѤ9tbV%4hF%Tѳ6Jpx`MNc_ v$ [ׯy UҒu7ןZ<-.Ė8=#pĉ'%1" JcsƲf*̹bC=0P>rrM7U_Dp /,3TIGOM4C+>aM*K3EM:;V*dF~M& #ˉfq_?g0P10yn&7qG}X;˞'Ywy[!z[x['w#='8ϻ%@#*ڟҷ|'x۲W=>ͻgo~6@X[C8ltmvlU}"ikhcl*ĘL;N_u@aY@6փa&'ڽ{^CR_RqDw%(ҠRy/MwSTlڃ/fvd R 8;˾tEsXތGLetyڠ9נ=__T%{2bc҄0XWAwbS}{ړ#؛=[h9ޭNZ/jH8ZFkqD6q]nI=^` UVO4u8tV GMOsUWNM7q++]tDcq7=,'*PAqI n}Lc7c1q[+#~<x翈r_I"i0e$W(sQ5vdV!F 1(d-^șK,߻qŵRٵϩHKߝWH`m)Ͻ&H $+UP'%)z UZO#JG0Uh_E)!c= qu=QD\+BDn%QRAzR6vhXڗ~Qg=m)𥝎g.ݬ~sX]77hE ax-֯庶>[WQv̥#ۧɻUfeZ,6!2fB ) V22W V`VFf7z6 d;fea8  ]17.kGdV1*ƙN:uY;Ζ2QgvU*ǐ΢FSUhM6\Go: #>ي֣SnL ߜ3 p~5JA2u*)}T:ir)Ivrطşa0{>z0ZGSK-jS&7i7q:e JOKsJꌜcv{x.aBNula_1G奛sչy`Ӌ(entՋTO:ށG јDFfOs[Q䆏@n!- ӗJ[O_0_VM>ճ'NPxeU=a,!r9Us!`ooSѣCiӹ̬=s]̷}$\8zd( yRnw,"|k"`eӹ %J22 Oyrp[!7LHi!dWv-dTC/߸%ǯTmiQf#jp[8ryW-±m=8>}ԕB"v!Nb( h{.oL,b/:E G8W@|[G%ws%!=\}te'+e -W! >[5`a!6-2-Ő Ȃi%Z0 k5ܢCf5z |t]hn'Tɖ\3&`2ã +V KZ_vu8Bviω*Ɛ J 5Ml葋(V(-XY.ֽk332ҍ97[2O5fgs30Lkendstream endobj 586 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 4502 >> stream xXiTTWeYu (\AQlMYT@)d22(UdAe:E1IDŽ琎i{ypH{~[gA-)oW d+WyD=h4Z&$坾 p7ױL| 0cT*+SF.yG,Jm\{iGńD/j80~rd}`phm7{o\cǚk;N0Mqߵ؀ [}ǿ/sx{,Ϭab2 :f"Lf62Ÿ1SM"f3Y,eܙerf3YɬbV36sŘ33#3" fv1_Vj%_#ol>UQqTr^rCi0l{l¬|y\80jD:ӖZ%xlUŀDiK֢K)5ɰ"@Zz1dMԥ-.V!VNDކsh 2I1Ul1IǑ["JTbAIMn+4=G _+kq)0;Z@o4 AjruE+#Z,X@ U/Lw6c(C8^ާ*x232,W;dпTL#h %λ𙚸]{>%\[J2LJDOe9!ԶI|Gb.KQ{Ďxzq@FfaOx@A'/iN8 Њ0>IE?JclxxDSAیcP_Z%~xN񟢒w:ɭ0Bw/S8zsOEljDq,&3Wt*oURj(MOI_Duttg}*O7IJ=B:EWO0ͥ;Ȉ̵Oo|,x CV2KHӄ׮:Hq&$ ,\ %(YJDA 5G#=ڹ@&znO%e<,yE`uwZn)F?j5iM-ZO)e(3/TG=<G#٠2~(*Yw{CEJ܄NAD}I#?;LD.y9?Sb/Z`Շklݴr !)oZ:4Ӕ؛G9hm}x4ު L†f :BՆ?-wyPUKĆ!ѐQ\5R= чCsLb<|-E{k_$X˹N{: \ hw,8R!𺚒 Pvu8 p bn/d@!T7 `L8a +:1\avI__>yܭOo+./|$ӉJ|p" /SE]Q/KqFۆ>*5"mP}fc"*kI^f:lj]|!FrZ$JJ:G4|Ujv*]t`a7&=~2DA03rtm4uaaW)".F.lqYIiqH;tv6DE챦3%W'f2;A5c;^t:#4y&gObzC1/Ȉ o1ʭW/0\-2Ru sixU ,Bhi?vU>؟OW{SMW:t}hߐW:5s.gWa*N;\5;DINq6Yb;anjd)gq83Gԛi F @gbM_-2į*lyMh\J,=Z!#|9g\&ڝWc2i{6Sok,]Jf͚@,ɨN8D=GH2ppdC5F5(G8\./Ntw$LQ\5_p/y/;z sĔxkNW `#8'1Oq'!!Vx!LMT7Q „Ʉ RGj?XHsw+e1e1_TIPeQ>Xv8**z}ۢH`<>TZVBT".NVmQ^4R}LhR{g,o}^J>\gJg-a%bֽEytQ;!z4oOX;FG[ɗ[3#Ėgk+ۼ8q o5쉬 zu'ęu(F}/l/'xmO7,35wu#ʸ_L)ht51 i:6)T*C~nV,X@eEGjb'KQW+$#! vpaU5-W#cy/_ܤV~ R1YIpWTkqzCˋb ]Zv=:/hE/qlKT;We*莸OKp*pEt?yyct:5 )ؼU3M&:"%!J9D8zk`[^'| yGB;_d:Bg<]*gнL6Q/jiN({*:Isc_v"#Mށ=xB),R y"@)E0a fClXh0ders9egCendstream endobj 587 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 9870 >> stream xx5.cGrv r}\NGBWl31gGT/C7!t3;o .rI k=s<<Ǎ;~({֋]\‚Yn^u~F~ydNcFFSG0W_PvTj F VRSUʞJQpj eCFP9Hj=5EmQԧj>5Z@R Exj15ZBMRedj95uC̩~ %ESK1T Q)eMuST7ʇA}@RP)?95IͦzQ( j3%)GCJB]])%^ j;MT$$,0bkh-g3vŦKI]wSz\9^SzB.ŗ>\,"=R`i V}f9wLf#Gُgփ @8yAs!}4 uC}=ǰN 1 oliy8bÈ;G F]d =[Tӽ%Lh&hqmv-Ĝ21 st0/zDM@c^"KsxVSYoN v?D"Bfy4BO37) UvR\U6k!4;dO0yEl/^ s`27Z|W{6p )\-TZJ~~,U,z.[&AY*aަpДP@_HGhzW#B+lW7Wc`MJJP`!䐤OLd+62&S=KPP䇖bOQa>$$IS'lB'e]xJĄp汮$v +dd̳9/,ZdhN ]_2ŽNe:HR|ysD`dݣe''Ya2x0҈jtiXS$vYn[;u{6%P9ZA͞7^(ΩFnAPٌv6 9/]> {`%@?t xCSzp<8Qࡒ;;Jpcn@b-1I4nS(UJK"t8!tbUbL>h[7 k@!_'n0`urj"_BZF^r;-P$ҢհxT,f9^ 5chHh,R,E:|ЗP,]i bVO3vE-Z&d^cB# )h*o mc.N!aZ zI6ϢNg)_Ȧ|:y[q?UZ(fP-HUScs~%(AB4*,|^F*{rӲӬƶ)pQw~ڊZZq4o#JXED>Ku2 Bز=U/3ߟDh:b0B67`adž0U+3=Ut[6]!MȘ3q 6FfUȕ5!-?& qgCsa_A7#/_HPt&`,hZw'ᡫy3=0$~#a-(GVUV,k̈ E6-J(NXE~(ļ[|J%AV2eWW^=[sKR]REPt!0cl@@) "EapaKԊ>q1۫g-́3Lmy)[jɁ"m1ѬZRUnH5{UȪ*_D1݈}yΘq~8$ r; !+E-*bu:w9)!u@ ž(7W&=D;i:4j;]fXom#5~l/^'^'q xOcW^Ķˋ &=8{O:]M$T& f.۲Gc(D/I _A-ɣg#0SoZS<;fVVQ{$#ԟbڳW=TĊ&W[r>I"֎qÖڣ'Twү%&-Dӏ7} ,$75 "66>_ǤRK#uBpIj\F4Xl%jW:n~m LٜRrȒ+@ }lݻ; G>F%[$GG! NT.!s/ S //XˈsNSPؤh5ct$mZq劤P T3i<V>!'jPi"'d]\\IϤG3 oDg ȽEHGۡ69p$`5]QKS#]w!/q`Fi%d#xBmY{NL/PPVE mN udTG2wMx(G>H<]4wAlI$ [n;,*P!\ 9,;W>2iw[IJ5R,DUfR2z(>:LD6i~󗀕f"q_ᅦt3a!>G/6>]ƢyM|^ I}[DȍG]%&Lb}G/qf}h"|xEGlZL@aXeyqg)|>ը[&_T7Ee]*Q75r0r_ur8% 3Dt!-#rADO$R=zԉ(W~$HJ#ۃEvS&td5, EW,n\ٛ^JIhM9;0KaHtjl)*$19$)v0=}C.Р> P;WuĴBc1kYkدZI$E{Ɲy~ e3iˏh ޟxWX)Uk5δ D&Yɍl}&͈jҀɅ"wC/ٷ S9+KKR'l!փzpT-23Rw̘Awl_~ dNJR{]};eN/-n9/. (<o!n4hA\?5MJc5M.y<"]P'Y;U-듦h~@%#yHnficM;M\ 9l^rq.T0b??yXS~p1+e "J<1 (J7 [fs$&Jhb^ PI%*P@0C2eD `]E _ y $O\ds tD\m N yu+{Ht\:xwXH5TLԨ+ᅧ:^^g&SCr]ו@:77ڃ*|*mUP"b%.]0jS4mw]Ըv_x#7.Fz#OEwOCe u9IuDH`G<,s?{c 5-?m(l}; ,1MIQAje/i b{NmsU6yE"[_KS9P\ݗWT};Tl9Rh##-'wE= 7#'%a QP'SdQ:R )$o%)NROi"hMrR*q;@yR x󷋧0|*Ųx"艀ѿ_Arb;"cs ti9h*"EZS/Z_eS=w=DesJn@iFr3q8~6e4auMem6" ;3s:<Y3L| Iel;<|6BY >yCpX[/?Ll; &H>Y&@֗H%X%!)GU-:sRo; W6lqXbA86pR2DriP_J J[zGK"Q!uK"21<wziGuج@㎕a!g_$jF;OtN.(`_n~ڹlID2{ک^h>S}!Q~[9ziE+,_xWF<6[*)# 9ggAv{=WzOq7 %ߡ$^'ܕ ?zљ|CR֋7O*0Dm]xb}$!y"͆H=KElz8 G*8;0~ͶGFP^K7`W+!ZuTUUU_`ũ&$6!q>gJٱ).uGlUU$m,0ar?ocd~ȳE7Yf cX_wo&z|ʦ ɚtM20EEJפ ㎏Aeo7Kfn\8=[GL C Qz@ZݹC3Yq]Y? .E6MKO# nƒ wUJwa#q7*Y^ h |{ Elۑz l!?~@hOQc` Bqhx$cϷv^=!v  јl$| ?3D p,!bE# gN#6r  `@zCũ 獿Ѵ hR|xˢ_IV\"61!Aɮ_| ˌy3sdŴn~rP1eu54Ri?l㈺"LKJg9}25CTVRVOJ0vڰE9hrNXήu;/Y|mfR|漥}-)9onlK%+q_ÀP)Ā ͻ/3[=6jLn4 5jjk '}5ǃ^%un8vppwgԳWF&^<{@[=}vGpM^{>2 MG}L̽յ?eْ%%3%ىov՝8&3lٷȯnm;ܮP4/L *$x$#($Caʼ=|d+82/$Ŷ( "ȓqߋXט"EʸEkMQh @r.+L%_0Ry%]Lg5ŝGlDhu€:нf_SFD]EpuUtBWIY~( 5(Br{/>۷/_vlUZ(fݶUI%A3kmsՒ Ol,(ѾXɐ>NR (}JԨTdQ:+ss+*.&3iiLY4uQF?!)v̔ݥܭ8s$F i7s!Tl:H_ӑ+9u4 IZ$ٸM%%@&A" ~}g& / rtҶ&ą_m:Ubm$ْϵn3FEx3 GǠow[=$X-Bjy[O#zl'P>G##K=IlS_`jsbDY\P8'U whDuX܆;Q9mSI^L{X,G"std6+7p 2=.%h{f8ç5Y}k ee;ۣ<6%҉Wme~Y^o3mDW:-V+s '7U"[oH̬6Џ:GS%PCz6"N q`oCTm]xGlUo9Vm"o%Ur6֥x=0pgw?*ZJ&AdG&27-M`7+ 2uڕbzSЭA5MӟؠOեnPxendstream endobj 588 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 2992 >> stream x}VyTwRY"EV[p!/7DEQDnEQMݍ .@hYbԌc2Nc&&cFtD1_ng63?W]u}`d2Y蕦HٳV[2'4C$GъI+ &1y'/9xy~~xfcj_o5G C_GJKvq9%T8ym2{L'ӆ_Bn^G@kT5)zH]N.tuGMp:Ն X9<"Jߋ~bb QK[wV@.>*^[/|\D7*pmvG lT +Vo~ $|4MTӶn}-햒ү$MvYp>T AVcRGѣM}m3SrA 3۷gt+5q5ppc`/p5wv?%8RCZc1'kÛv; ŧzvB4Pa?*93Ci<(:X逺ջ`߷+\ٮELi/Ncs>GVI~>86n[1.5؁"X-k{DOqzoVZΥ?X1b D/=lt %kɒ0c#Җ-Yjk>'^tGӽG݂}&x"<}[!^wr#C3yu+ ́i(dfmY` vNVm_ 9ϢS$VTh/8udL {SI> ڳ߹gܒ5Mv sZp >J}H]zh0o ,+p{G[AhrBۺEj=KYI:6\:$[[:8&Qx1)=0!0]COo/kTOJJ{),P !H!g~afȸdț$_|X٩T0趼 t¹}}:7+nkYډ/?Sj延V$yWSZi?2Q͞CI\ 'iR *\ɧcw"{#V|UqsS"1 1,?H)T/oB/ %ouv݄ސ^LdO° c&a|\&!W^(a%cpHh3DR(r%UpQ-(2I4~0EjAP(, 8 ȷ1Z e+ >t.KߪJs5'Ďnw8ٽ>(]WK U9X2*De5zT;3)UvpC&Hv G "qE.d"tbq?V΢z2Ӄ__WU{YfosatԱýh`kYܦ&ԅJ_+/bҩ -$J a0L/7ۂnkZwDZQ_ջGu"9O35>Vu)k6 y7; I;OAZ)jjX2N5u:+ {ee^^{_U^ $endstream endobj 589 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 1444 >> stream x]S}LSW׾V4-Vp,C@O TC>"(~L0 E1n"DmF=Ҹ׸M~'w=9 \VEťmLLޞaOj(nI|9J쉦z̋NXFcu kMQӃ7geSi__aB Ӱq tVAQiMBҠOLKMҤ'ihQK"4"#9~ #o[i/BhAz|QCKQ( @, #rB.k$BM;; 5]Ft*%wzP^N6n:˽yW7bYqC6V<b X(S|f%y ۉκCM| PPpTwns}1 ~ãV2t x]cONZ:V=61 y=>eD"R=*T} +V"P 3r7w({MË;iL1'%πYkoY߹}[r\{vי,M%bc :6lQ9ͥP(nč % R3<zOHbgFLʮB ^ _JW>$;}-N#( |N4%e7*t2VҜX+!$8 ^b*Yr<8g2Ӳrt݆C?)w4Qτw땐ʃ⵼eM~$`储 k7W%_W ñiđ8?!^77w'vʕ[i܍g #!ni68(Onu= DlcC}]PF=hFW!KB=&T$>>g@G&H\GUi.Dhz9jW3el]Z7N8ȵ-ŘP8=py GxVLfabM4Ƌn2yB=c{Ԑك9NEN' .)6`9Y> stream xY TSw!KjSRm]Zu\ZkiFqGD".K d#!ۗl$a'P@TpCkU\Z촵m{;3yN?S;Á!~o",kʦScҖ,^>;&{pgQvO7Qbꕏ?Ml8浙s Ѯ 6&=#?te ^!&N_Iܰh-yA~؄}vm 5t{OA,Y:}MڠWgY!/f0vS~- &&m燆 SR;sE1!f[ljm"Kpb>x "vD$XH"D;"XB#ˈ 3&b9B0^Ӱ \pFϺY/kpR`gQ*i Hf]ov52BE ZFZ fPUB2%RPK@[L syYۇpѐ6C+u*]hHJ%U!xC9K,PsE u!j-_@b4\!o=bdn{+9"Jv::?VK@]Azc/~ <4C8I؄Un@ }̍|e^Fx2w]D;6!iz90@D9,CI9} ?\fEZN=ZWLjN&&_Eea޽ȹ3W+5BM6i s,&_GObAYtM}y-g/V͉V8B^o)ZrRf.lt^ό}*+b꨼W sNS&8P-qfDm9dԇ?_ݞ'F5dž\T׀(c4 PN[9N9uĠPMYGLJI\w- =chuжD;X`6ڏs(N;6>\M^s<3jvR+q+ 479ўDJY[`gsE3s n:g sHV@̋lS~5.Qn7%m ZRNG<]6jmPeW  d^yh&T^waw>=4n͇2M8뙚_{OܷwgٞsSP&>T6RQay4ZA[$ RUt}ˆAZk*jd;zQ3xD; e%s@ q< "ȧϥ9ZOU)Nm(wWu9.ȕ@랭lU5ס`XFim3ۮۑ:.Ft,Dc֑\`-7Q$)/eŧ#=t…®FODVi@Y4 G= uegegK,J4 pIÔ0ce'ϵ}6e0g Rr}IWwlwVlT$'jOE#.όTcsj1C Ԡ1 vd1ϙ /׾rh'l[~Y2<{s qY9WGfW!>Ԥ)30j3Be.sObG Xo8uHEe<d^~AwG^;<#4@8N ȷ3ĩm]B9sa5 ȹa䀾D7}ߍR_hX!i`?Zm PN6%YYwVR5OQ}Yh(齣y:+Sol#Fiy N!OV e JF#jc!w4='qVr-C[P^r Ȼ-F<;8Vj:PI F3$,3==ҏW:b2PGX#cq8q\~i$ZԿ&ӧٳLC͆{=2-.d+fw:U{#-2`n *Z=fF_0IasIkRjNV-\.2<馿7^9ykk<61DvQ]SDtS?G}r_G;?^T@< o.zi]Z P!84A)d"a7 c^Jo7h`.@O#w!D<[SZ*PJg647'玿6ss^ZL%oDRa yEա78L 5Pp qz‚"HĬ`  ۽ gh Y%TYmSo*ʪ+ zi_rs=f{I;)\b].W兟s +KEi N-&ogґtKSTZ'iZ71 TWxb)h40$'y7Aq зB xxқQg Yc4:p08WIA^Ki7ǫ?l+`!uo̞-9#4FVcsLr㡖<+jTx?9U򊆆OFt>9ۛiށƯOH~AS#CeM\+7)>R73Fwer:!otyftb&.by6 T2ي45%nhc$3c[~,ک#=487ϋR ^cu,Ou{oWMPH[^׾g?ފŗtֈg>%%P03o3V'O kv#u]| Um5mG.2niwO#VǗӧUzZfDjTfmXoluP gB~|B1 iI|#?÷M WP?"8EkĚ"q(-ֳۣj@pFVjgՋS"Z bhj / G6ܠ:{G7CiO)Fg^.m1 ὖаo!o^_ҩ+'1Iy߶'KiM$nm+U HT䶴ۨMyۻvPZ=mYnqAДbSmC~7m^Ss(ZݵPt"**4͵<,q.6k\B2.@"ŹKW|etrFWJC) 13eUvwIU(<\! w>o`V'(rTJq5: B`W2ӛY'm9yU `ga2M[Do`El>Q:GJGnK\6]YUno>BpD5ԴAJvE^s=sQ,Dl'pkU㲒u*[i)% IX>\jRcTZуh7Á-Ñ 7Km{u> stream xVmTg3 Dt7R-V]ݺ[ZD]A) H  & _ FA)WGڊzvm퇕u]|9mONM>y/FR^4yc!K6oNVLUXyg>YpW_;8&[#3_E6TVmț)/+=5mSt'fd2u$ݢ)St9anR^!E[Z`y.|QsЬ- KӖذa0K 2f"D3U<|&„1Ә"f&`" b ,ez{xmbčlC)ѡơW;r8 C* N`}헉'hò%}n%% $jrĢ (`2+T L`)+,RFR% n3M8~m2tD (j/'B$dU~z~R–TH}#URթ]wwmzAQ$d-O21ICI,` xEaZkmP DC, mŬ ;zoվwz.~pa!O|'w1C#U(6/\I2@B21Ic? dݲJF݉ ϣA݌s>!^ a XP[uY%9`MlE?,NtaU+.h1v֌\`Ҵu47#U.m+XcL8 >'~m\'ˆЙ28uIAyQxPTB9a=w7E6j]K<Ѫ {ܠ062񲗺X k Rbv2EK7kpVZ[ s!p7:s™.|*F$.)@#5X88:;MuP#ykЇ'NqhͶYX^˭#ch?kQJ *Ơ/BqI['+Re$6AhHKʨ~@0W. ^jԄp2N(!`InfQ(*)Bom l%'V$F ٷ)BDʁK5 mGmui*AAXԍqmBnŊNэ4 _@4ۦ68<8hB7b )d:&/`voUґ'-╷?:/ṵl=$@QG1u-G-ӱ<-W)pA'@Q`uWv3ˑaPfIT>wDd QrJ{z/V!77(un\v{#^LY q %mԐ*?T@p]8ŝ\# T>LoSS+\cLK1Ő$6v?nh18닣̉ʔNo}TCQr8Uw@lrki)'㕹 LΓUj.5Uo)*)/p~Z~G R/">X8^Z~E IrdgE G< 84X۳~:leئ2qS ItlfYɒms`RK# - `mlz46`1p/8v:s>}Lǃf'$?@ GuMq: ʅ2 Hc"PSoQxfUͧ3L Y1j~ ܄@R0x6j(تrڅ7d ]VЗO{q8OBF4Esg4{d2EO =U=ya0V$}ooٶP\ee45 ;[]2C/vQW6)D̓R%0ΐI(md?z&^{rO׃Y)X7mA| ޠ r)h\nxUE2IQ qᄻBaxaܧ^ǩ~YCR(E2 |)wEyL~6\h?z$p*{fe_a`:e(b)AJY;?)d = Y$ka},,+_cOq> stream xW T纝GET)3K}Uz05>pcܫa.]>lG) J Nݲ$mi+WŬKHL ۸KüSxj5ZOͣ&RITBSQ`j)5zZFP+jj FQgH(o@"ty^҅[ޑ޷eePan1o}{rt1 ]31 j/7gKe9uP`׵a~W{a/%lQyF/ 9ouok#Pf2Yf< &Q ՝‡pȡ*9|@[5u~hg\b+2rkEan*vCxTY )?{IV.#4N\!n Z-cZ~9(4糃WXj C]n?q?տRr5,)Wsr4待':S6aV,z8Nd"~wzBj,OEF[`? #g޸ ?mBt܉䚱m`ںQ#i&yM8N"ZN =1x,aXǪh8QFՅznBP5e DAkωel.yC'θqRDDn5/!x6??Pau+ ;iaS=lΠ).iqvED&+'-`u|0V,Ji4-Xw̓5<5Bp]pL"0ٺh& #z䥋VcW_Tin ݗoD s^@H7j]x%pXsE/3x2oUTh25AA#^g%$L@jMorDΝwʺ+n1y$0ްDf,3OȘ:Ui#9_8XX i1aha H*TAd65gN@bmBN=s>` C=$R"C+I-OъB/HKWFSh=9X.b <$ѯ젚{w=UҶ nCoQRXD~A^,ECGD1K)0.#%g^p:t }`6FZOΫjh*z)MF|FTP8e,,Ok+Q\YQPۀp^I9*L*G:0@Z; /l ) &֫ӛW79vAh)09*̲| V0k34 ._'Y:$7wH Hh>UZ_YS_݃Ep!G]g{*3~ikuPzk)Il/&hAB=h# t0y;[yFr~ (&"{qktw7Ȭ">tyOHewzODbdc&p:L~g(: KٕU_E$I, {ikJ6s: hKܟ1tUa-Cku^CïuʯN1iOx(pCr'XM6utV̅E4+tU Zkڶ8[4Hk7˓yIIK"iF3/wzܑ48>wIY= FB¤x7oy6b_9(i&#-9Aq(QNM4~ \NGLCaeϡS<.],ei3CaF#Mf,Z /4,EdXlB$u:ݣݶ;>n|ψr4LBx=M0F4;vY$ tꫬi粮E1텩,<& ?DE>2O !F/657ݔ]7 bf+Bx' ;pc :ɉKh(ȥ#kq[UQ1pQQ[gŎ&mɫ(.LR@tbâ/4q&H_V/]4t%C5jmJ%`0#Cٌ/Y/y&Om,i)L,]dSY1p ̥؆wZMJ6 m"^/m@423M⫊ y(VȢ3Ն'ޠ!*aIJuI|xbNweǃN3 Za_+77A [9vCC!i(^XJHCˍf$p`yjf<"/THf2wlTc(gU3dkUy Hªϴ9'G硰$B 4[ #ygC0v"ơ_KF_{GҠ'W -_vǙ\.·~kquz+ cKWfjPWCbZCx/-FNi.Aڒ} 4In].vw "X[p|}$ ,@VIQIἜ{liOקu~o(S/[y[hĪB9|} |@u<:qIJq}#qcp2l69%T3Yeendstream endobj 593 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 1572 >> stream xmSPSWI1"W݈tHa;㮻 )~)1 hĕ Z4b$v]Rw+PRH h]ǵ۝i=/00wg=;=@ADfVLTZ6ӎ(HZ,?G׋ɼՋ{22Po,0<<M0'aCaIBI鞝¢]ؘ(>NG%&]^:}tjn]/tKuEy۶J rtٙk22uiن_G/Wy'{ϘZPa&!d$$DDMH #z6YG$L$ k׃V9^es rG;B^.2퓒,YD^Q3zqsEB;XujVM㲴>u `{W*&'xO~DIC[}뭖ڢEv!s5zwoF:<\8VqLy 5k+>f1CɉD[b&u1>N9`ZaqU02Ǽc TOIj`{Y>3h'{{ѡTȖ6TITDt_-U3X4A; }g :A>[>zشw}ieu=pڍ{߭հaq'<]Y9u֭{ aOŕ~^p1!E}2*9š).!=2]wҌЮXW~QyÚ '%ܝ,x _B%EeK_o\y2Jf]vL< -{_kK.'I)|<5~">470dȴV E#FUcv!o`ؠ aӋ.w[dĥL]4p/gkVoH` f@'c:" <`&i VZ¼TܩRrm m*dmW^!q&@ l ՁTv3:4 &q_ʤp`ږߚb˸-͞7IਈQ< ;"i։IQDΜ|(`_-PcŸJ1j "pt[}sZ\TI;-1+?(x6 K6fH>f/*$7 {;B鵛$ Gp{.3]Pm+NobyZ TYXuPXReiRpy:(UYA GNv&8ڬ*pCk}+ Y,ڬ| snTGC:_pT!5*_'$ endstream endobj 594 0 obj << /Filter /FlateDecode /Length 659 >> stream x];n`^ ؗ c7.I.@Q Z.ř)H0| eo<~X>y^mw<>x]}鞿o}l˱_ǩ/WM{<ڪml6x"qM4XMXA3>4 4#iS NǦA<2A쌧AѠl6x"BcEId5 1g hZ6 b2jk&kFg h4&QиDNC9EK94.SиDNC9EK94.SиDNC9EK9ڠs :p.؞k :.Ao\p!`A`\AB~$:;:;:;'ZJr)ΉΉΉΉΉΉ\Jb!$ФDIQB%E\J& \ &p)`XXHi)ť%o[EoZk[%o[EoZk[%o[EoZ\ Xp%`]':,},ڞr_/W5`Vgendstream endobj 595 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 7500 >> stream xzw\TʙcWN3bb=vł`EE}(CzCWQc2D55E117yI6~{fha3gk?Yk=k"JODc,|}|Ιe_~s0^$LM¡-ߥFY.rh.wi4s \)=7r_-dޜ9g"KLBM6Wn砉l&}eMw}}L{:4nd[m>&]:z9 KQvS5kldl!ۿ)2U[\Yxznm裉'-Y:e`3fasΓϏ\pEMPjJYSQ64ʖzN}@Sv j'IR({j5M>̨9j.2SjQVGfj1eHQ:1KSMKʄJMQ˩j%5CFQje@HqXQ*xң")A":3]IwJZF^(:=FrRbP$]JtY jj!WIa]=YUkP-]Z$x |YJU`2dO M\A!!<:kp)IN.ǁ+O՟8zm|sKE[x,;(֌W'Ѯ*P, 8}kiͪSQsoS`ZjQG7hT ݏ$ss4OCTc xB04u Jx]UAJWp/~~?t@7f[65z[ܽi1'D+ B`9cO 'YL}ޥE&Ĵ H%;41&>OIB)R;9b8l7y|Bm1*C%|)Ќ4qC;ϾnL)rz'c!D?~lW=?ׯ]5 xRtCG~ڹrTeZ Bh^1ByƇPqq5tXgӞИbݫP9-ٹ$ mBr{2D3TgMכ:xwA3mՕi/2VG󄱜%_zJs-LGMHN'!sx C o=Wxl4);~!t:;o{ HhVgGmL,׆!\UDXsS-X>̈u=]K.VEH:|w+Je(]CU]]!RH 3`bKcfk`T{Ab0[,&[郀ۻJ(S 0C\g# Sw/yEXZXso4$O+Qq%R.{v0..s?eIQK2]M%RJ y<'dSJQԑC 2TƧZY"f%X+XX,/~Jrm%y޻d7?([̟ڤ =b;m\:pFIIvATnVMnf*3mFj<5SrَKN'Q 6m[WvY]Uu.*^ GWP- su@q㭜 f \]!PH>%#)+5 /F0l\2_(®\ʫ}uyWjwWo1A`L!`у~iA=c[D^,ސ}(r߁@>&r KL[~ٻN͝>0Gh}l0|(}ߠY7SWRD*dkd2Lr^E-rD*HP$fH ;^4`m=r+axSA@"%ǤȐ=YH_y JnT6ޛ-x|<`?iJW;u> Ő2'/Ň.T!EM|R֊ꒆ^ddMK![ VsL_`1n` o${/^Tc ~"$Fpl&)ӫWn7 UQ-I!;#:y{}^J击?kEJ=C$ ܊Q$6u@-u35Ϛ;#WEz:E4u#UİR8Vr41/햰VưʂBE_u|bJ %x#?{goe^=u{v@a97mꓧrkKr=t>Z֟[ߟ[濩%8[h"UUDt|/lT>.%1ʉ+ 'mq߬zxM {<" e?Qqy{z7F5%(;lï4pXI Wo'A]x j v$Tq AUAAUA UU ' oYΪ@rez ^C7]OD:^Qhs+:\8wӃ*#mg;n/p )QwEϘ >[ܐl8}[g'H-K-M-mҰK*:V$vDF\7YO9LK#̢Qrws"EE0]jG`M}G! iWWHݠA]29 ks0ÖnN6?0 :كJ#~ jЏ0\Q~ͩ< 촴%:mbng]Z67i,Ө47oQ<hDuc-(W^ 챤](ǂTiPx2X~jYgTU_*~Z&ƶM}6[G{JKP#2G( 2#}kOe.u0Ð 5ɕDMyЗ8[<< a4PxO%v Lϵ%H`n?^]fc8/}=ӠA+Gi baA3,0d_ yjYgE;o6ۥxnԬ@K>!]'ъ?ZN0}kۉ+Qw{*هn5`|.QUyٗ!282.PEJee*8:]E;뾀/HpGQItf"I@L@t<8ּ +NBs;2>&1]7FSu  הS+tBI(C; Jil-ݱ:y7`zEYZYA͗7BL'pK.9e:%qd:#JI;FW%@35)aV}>)=nZt<]?Z̮/yM[Wj>qq ϶:ycb^lީR^ۗW-7]OH79(D;bG#EDzpEeR/}Aƹy(yL},MAXP/VLmL@2$swSS͑q,hiA-JwPb* ˊN]}2Q*Rَ޲00sPhN{zJIs~5E +ߴ)8 & x8ӭUMIx2Z[QkaUKQMYZNy@oOFiKV"-dLDqX顷A_Nqe?^l>Hd-p1̸3,Z#j. .]cl .OdРmPqhqXEUvqa_n]@(Ǹh jF6OCi6轁q`s} s b>;6s[ ǛcX}γfh1|뤥t2Liun #Bܿ`bϓ6){PĤuSS4%K}_XBQBL|V|H9E78&׽$|ΞMG5b1~GBt[λ:xGI<))I0l ms`eYIffvJiޏۈ2+Mg\IDIxtBbB<.'=C\ndAd!0YO%YY|SC]IC9QQ=j(.(nBegzRxPZq[ˢe(15RCF(d=f21e5f@Qwk9,A o6+aR\^`'<1>(kMk6rq( ݔsŗMk , =c^]M{ ge|'>Aהrjt`jXXJ f"\M$"Z+%yb$RK/͇rj?Tw4|ђ,'"#kʴlE"rKTSendstream endobj 596 0 obj << /Filter /FlateDecode /Length 665 >> stream x]MnA=@B7^$\`"OUagET0#7Oz˧|?nW|máz3bmsG>t988~jө-|fݔ ')Vk zSP5d)[]SPwcSPGCSPSSP'cSP)T|[0NTXM^`5y^5z V5XM^`5y^5z V5XM^`5y^5z V5XM^`5y^t8 t\@'Ёs@t8 t\@'Ёs@t8 th\"ȡq"%r%\tN0 . 0 B 0 . 0 B 0 B# , &$ (( hB(q!IH\?EHOS$!q!IH\?EHO|R3J(I&%J()J+50 LR$0K¸J#+`-yނ-z ֒-XKޢ`-yނ-z ֒-XKޢ`-yނ-z ֒-J"+qr~L.Um> stream x]KsIrؓ^]Ǭ3;#f (= J#UlfVUwUu7>A^2|_?;jq-O~>x{{ h?y8: /W/zqv{rKuʾ_6d {v]/ MR8]Jbm~pw]Cll6}uzfR7S}@OnWu7dATLmf$7:'5F$Oλyޏ*08ʋдFlh$!E.#lO=87 yא ȓ1vi;^`H -bܓP@,&je2a[ Zͽ.ҴB>[#WE%AL db vbSԥFm kw11R-o-"n'Nֳ\+y^Tuݴv[+&$SpRrO$yn/J%4ihnGmjAQf&` 5lf;4(%۳" ZZYگ_sVYYx]`MvSzFl[Eټ:dé+i8n?60og /^Qqo 8k^ߝ$z݃rTJ+J}+cй4fM=>~< iƋ܅X:^ˁW%84K7YO6s |E'N*! t{phcy8wܙ R@(P~^6Wm eu[2΃l8zAg Wo%*V>۫ ?" ҨF#ǥ6[fխgf0W˗̴˒A\3+$ Pr&@}Ao֛4sq$ٳT Ud_`G̶|ⅴIo.ek)7cV+f|֊ Wˢc`q p7/'lK &R\CJZ1zer֌HUD#Mnޡv+1 ]\?䟻Wobbu@̱cVeߧ]iD8o=3E쐃YW(pD~z@V EH&j*Gxf*0)3LIwQ\Uj ڗOF\@n>m vLg5!/08oM39b Grj!.yGXeVL\Ҟ!|:]ǁ " XPQ^~=sʜZWA4ZQzy Rva$CPK{ vXR׭]ˌo>(ICc]XY0G({gzSDH?]+YsLUt(Κ=AvG> ]ymO|7A3 $sx$Xϑ0 ˯f |5ZYO!Q@ `@~Zt> Da3S`ȹAWeioJ,k qiQ僣[fhbċtAD MYs+w {k1i1zGe;bfO9(."MS@kDGNh,=2JbMbes{ yȗՕ1 @_eKcFVi\HE]{U"m]jVPu}0e1W˸2!>Q>ԒxZuDaͪMenWT֊SlCLBD\G!H$SLR`PLa95t M!Hżl5YILPy3m^^fL[!8IWv?,gm@|!lcBJ|?lf—-d{*dYuPJ0MӵT|zjc L ̽^IqP%H#a2nRh$qΎu.vq5[2x Vܶ . ]+*4'd$=! 2?aԙ 8~1[ҧGAu)7z@0aV"8!])j&%]I}50fOhٺDc'%D_\|r ^kHIE +Rw5>per `}9Vȗv<ƹQ$pPYOZk=TTU:jBMQ"8C+d"1UwCs.GsGXƹa4o*EXQX)֢=ؤ1Cq!rNL^N+&?ېS,aJ;?VQMUyd=^r7@q}N92֕ڙ>pUɀ v(@DUk+#]e` >uf6!12h]l (k/DG-¸b9]?Esm_ҫ.&>onV}4'sN*BF/Dl @7 ‰OMy?O눷L#C+y d^2);7S<:1FCuͦ'\ڀ;Ơ&gjc纻 s8ǁnR|nu؟@rYn*jNINd9kV⋬JG(Uf.nqPa/-[R*7"'?+ts ~$A=lS#$yUtEOL<=H }CqݗhfD9w 9pPT$6?lTAb֟RLr JߏV80x_f`@eaG fБp}DEa8mM8HE"ǘE+' )Hg`7l0 ;) PTJ/5殇nC)b7WPh(T 9hfyZpGgY |CӀdH %t $ mkrO|~yƺf4c`7%U%e7q\Y^'#$r'b-%/d@{bc$*ɞ3*hAk.v1֛#6d>7@ǧwyCC5iA]'GoEVo "9ZW%iO-ۨ°ʚ\0XKwJ!\=f]C7qRkc9XlcX 't늀O FѰ3mbz*#QMZ!HphUԶ.:$*nF8\UF5K C;ǧs͂>xԫlb+~$xӍLQ nu"_sv*^6BLaO6Z0ԤpƥzB/dc2a<ĤNIq.vې&F4|hv\w]Ƃ=i?\oEMib:-ӽ g Uk)T_UL2j|S MWQvbmo sYmŌ/~8MŘ3j( {Ij/V7DGy0<8N|(G ֦>(su}6K]se=Q*}>F{G Yo75[R"eӲURA!xKFP1#S]g G2ǡ /0=be@]~R`>*P_NET)2$.Nyϑ 2֘0 H{;Y 9bE@jr uQ]ξ$WOmNs<,Ql{;p^*>c zPs4ݼTTV_3AtT~#1p&,Ks+Y3W&0/Ncv LkU*x%pΐn*0Z+?0xWSJsu}qhrHgoZ2c‡}ZR۝ђTԔJ~旎lЇAO8ϊ$y6᪳8.EPzHu$xCL3>#΀zKZ*D7vUoCfM%}~k>RMԂJFϪݼP4o攽N墷i'sAnݮz8K d(f5t-`yt%'[ 'ZӇ/Z/IuʤV+S>mV/"tlL_ Dm12e}IhZIwX{<={V9 F /'#X20Q9 <6xB"n؅ bYLncF3&mkX}ޜ8Bu$jUqeVFG!BxhEo/´]]=ٳBx׺WbDOS)ҀܐْQ|IeG']_+e)ʱ)&Ċ\‡.fEx``Z? i *a&t]*,*n|761E1E7ߕ>}MJdt":qN nwendstream endobj 598 0 obj << /Filter /FlateDecode /Length 3818 >> stream x[Ks7K) 9ʶ]R);kAJZt 3 )&703=yZtZ?|9;j=)Wj?O*ӳˉLTBTK]T\NϖU񼘝pΊb,J5-+r3;a8(r1;/! ;FVU_M2)5(R*peŋ" IIl|{\'/g~1JN#Y$kKR܅g?Ɖt 'D’y윚.MWL)INP<'FZ+6=b?3A9l J;GfP mɯgr?bƮT M(Oș)h >0ԦOƔmF?7e,|@L&$9SkzT(V囝^r /" Ay_7Wբyz<\r_qJ)NĹN^"WV6Y 3{J c9H6<8-8WaЭB<)E}ũ. S#nzf>$yO(ulz4ۡݖoڏT{t+.mu-;D}Y]{1 F6#Ӂi "v8 *I>A:&Gbyzpbv@2!,qYpׯok`Y&IdeiQ4>{+a/nIpm! 7{_\ gw;31Bh:KomCp N~*z>H]4 g@ ևr$T&]rgIf;Dm%p$ aKT:De `|{1#X}U6?o? 3kw;fhBp)$Ms@LU4UJ%QIeǿURlCKHZ*hJ>TKeE#u{]?JK1kYMCpdUa$-l7]X :O}G d b~ZTQJKFa{dM}EPWu9xWґ9 )jXxՀ_Ǣsc.3AL6Icxy,cáճ!Ȳ#LkOT=jZh)2KTK:e)i&EnBr+:3 Ѿg6\:3BE`Sx0 _Ce9NJibjqˋWg(),\lv7e\0J r )簮]fU탻3m dsSBT *0tYh5u(D &<;pn>`=T}IFy#} `4 @~HޞM^jzڙmJsL"k!Ob2v|2-z; y~pfFnq3Mty~mo~|F.5/NV{i;)7T] dKoa9 aq>Op:dQRm+u.9צs kw+K4iY Njb|֔,w8JHy3Ce tgqH qgɔ!9%asJT\QǝhSPA`ahsL. M-J8 D}‘V~SpWˬ]&[Y&َ|ޣ!,LUk/`ŴE-Ń[Àl[ pE%*мkciiNyuhBu0ږ,/iG*{M=R%:oRY԰8hx3ĶZ i.b,yrn (W{[fđ YJ[`.vPn2 bf7{,HLԫ(@˧/8~JX}A .e$P9Kf=\Д<6?%2N'4Ut|/'z =R*D=׵NE3x[y3ZNGt ~Z eFJ~#)rg ^}Z ȖkRfqf:$ۇuUniK plz@ KחtJ:07ϯsܒ*-z=NGU]"H1  @\1>Wԩ/|<V7Vۏ8Hl8޲Hߩ,rxq[P7f[_ڞ^}>B\JOT!wdaXT!XYq5U^ RU>\uˬ}8 ^8bO >&$if] " IU\xg`!&$O yoBCrWFGۀm/#wI6V~{37F$m2?+EB`]v{K>R{gyNl/~PxçeN]Rz *eHe67PeZn3[>vM|kRr31شTa{3&N]O3M=|g >U\ovz\DG'X쪹W[:8>{ [$qA,}>y=p}S1)W3$+j*km^݌#yL̝wt[r p-j[ c )PƦX["à`*xrv„bvrڬI|$?/g8XptAxd!eа7}v M%ڢqG|i!#.uռ`BQJendstream endobj 599 0 obj << /Filter /FlateDecode /Length 3717 >> stream x[Kȳ/ʁG0yc)lN9WVKqa $%+>f!\INR:,̣픔tJ{|21]n'dPyܮ^NUiӫ7N5V* ӫzv U2~ `XݛY2Ez7c4𳸵85DSfx>&G )"W$Dwьf{kGIMEv9af8 Ϛ {OA$P' a.OŋT{7CSf}C-Su G7ȉD^TUj,m4][l=GT> D)1Y'!ƤpyXѾsɮ۷5& YfTd.`59yMQ9*ە3/ O^nyڊo7?4vn]59b7& D4ai,W?],u*?vK"9k"zϔ\?۳D9̉ Mh k* ʵdf]@bu.(ۀjfp=n JFvFsg?pY%lKqR(}ہ|޵7Mvm9P?]H3g_I40u(=sQps߂SO> MQt~jr8ݎzP&cY'7F/I\1R<]Éٓ^CRIGzLV lt긥@?蓵. .!tRgaKz#]z_Sz ƶD$tQlq)`."ٛW$yinkt߁lC9 :NnHDqٛs58*( [e\\L-ceI)X*.%+-Qr F^6#ŲytsLo8Բ^b9 F / H~,%Z T%HsM (S2CaR(C-YcYIzAٖbmnVgE4Qed{M*G1a`Rq[v#8a7FUxݵx>Z7% R!Xٌ\Vp6cyhĔ iH`"<,<``”53Dxqb:ei|`]C12ŏ\IC3 ^h|~\̟hdbmDJ 8)lqN=4AL.Sr-cSlzyi{.BV< ?$e { _9 . )ɱrYbd(] A5,nXT5Ɣn֕X Qd > Ooֱ9'TM( hGKmkuI_ĪK5\u@Ayd:F[`WB%W >ӂ薱) ,5Y)1R*hHLA&$H0wZ7A# 7}?a{v"m-6F~jn&FE%-U0$%w!xkRq-}`;;P]@rjOn8o3dMo9rQ]O`$Ave\_BsJ1ʌ5ě*'ZR[_@ZcTʗ_RJM]jJeO4QW+ 2u"HA h;4QbQ1_Lϙ H1Jۤ:`>DD5_?Dp, ςs$dV8asVifGBۀq䏂k'b|O<$tY dyJ%B=zTI-6.-lھ/IEŤj-c> nuy%E0f~U_$܍f v9őe5*SDMw7R] P1GQDGz AmYւ26P'~gmQzAˌeTA ^{څICӫ+X+$zkgʱ P;kD,K1]̓#y\ er-u 3˃tsD־H}]/;Ǟ.\U9hV{UPCR,PVjHmm 0I;i$9M㪂!`T$D.ZX) Ic$:9a8dKRgp`a6611Cjcs"'~n Nendstream endobj 600 0 obj << /Filter /FlateDecode /Length 1766 >> stream xXn7}7AKYI-ФEddk]i@gx%%A[~.9ø*;[Lj|50{~fH M,ח#oLTRVv.-z=:#?ߕŔsVZۢ*+5,. E-!s7Ff-igiOm'HR)DU&r&h6{/ BrNUάg(Y&$LF= KaAXemV7v8 ipm),E bTB(țod]$(_Ou{-vsrrbݟy^_FWpgtBJggr":c4О='3xpy#e'F/&X]nžHmɳd۞M@C]ٷ]ݜ6ݮ̬&?>0EsѮf\H vNS1cLnQq($bˋ")=J8!>UHʽʣ*:dhW4)2kOT0*,cKX3lGXǒ)ֿ :6fǂ0NÝ& 4:Ԋtv[h k{hȒ[{IL{bR^k`M97Ԁ>g!8Ƣ^]_`1pw*8N-1(L).5H4nol]LP)\&7aT?p%{sv+b;%9wNP c px&8#gׇ2 Џ(P}dORA< PO$]Ӂ02kg XUYb~7VJB~z!2\klx_XDAJ%ǻG>JS()~+(,?d`R3j( _wYq\>xdj3AU&MܸBJ Ry4J-_<^h4yT:GSǸWa y\e)Iq,,wG7|MldZ{A~v<̫V-XyqM;Seҕ솏ק/4)WtM˒Tj-ͱJ݀ȃ>#^Yѻendstream endobj 601 0 obj << /Filter /FlateDecode /Length 2637 >> stream xK}PmXM0I%u8a,iW%ʯc[+p-{{ӘlLl=K~7O#z[wƖ2j<]GLqRsGɺپ8@YHׯ*kK2{9hM0:ҴwhQ7̻6|/ i*i5RՔj2 RYmvKyqQdM" mR7.* "3 `l<Z\UAR [;bRHZli=R0ᔨ;&j%hkr]P * tJY|rn. KO>+Ђr3ZSD%`\*&Ns܂pQk-ʰRh*X3[\RҨIy~b<*Y[ŃPl AMB!l E ƣbk &ܷ0+)xQZBp`OEDIqHQC|a./nۭǃ9.pW<&a2Ref _dElwyͺ(5W`*ÇqYP*Т6MOuvsQQ7OnMfpBØE/XKO8!q{߬CȢ }>#wn]}6t(7jvQS3ǐj#84w`:3JݐQ{]|yrcrtS3SkVDy$ \M.ܧ+Fa.Ëjh}y'–7BH,Cǩ0m#~t]4gt'gKbSOVICߪLV|ycZ}2Vq#4F({QY1OEprdڄ [B粬uĵ|찏)꿫:uʂ|1)Ae}\G?r}fVb(or}\٪)Y^M8\XST7ᱧt15D_.x. Z_H!xd EʁX&hb*Tl6#J^#;+$ XZ 6jP_E ֺ+ˢO&=2GYI &Q <҉#,]"mCSgl@֚vgs&9gה/=[:ϯ0R}ϓw8.l}[0>k bށx6t(B ZO0^.SHx$:u*('8*˦Q/ ,8]P[ؙ#ϥ`Kbއc3> @>Cp'5p(U!$#>~~?7zendstream endobj 602 0 obj << /Filter /FlateDecode /Length 529 >> stream x]1n@D{7R8A AEnQ"Qiu|y.}֟zs۲SwYӵu~Z7#N>}\9<|4?:/5L">Z`'sP-uG CtH i/Eݙ(m2;g4`OC`s`Iї5Z/> 4}:gf9oVg\a7sfgWBdP߄`D07! MFC}P߄`D07! MF;:2]\G+יt:s\g#ӕudrEXѵsIG#W+W+9cTװau5sXoO%qho7 XCAk7` y! o5 xo7ŕqxByG֛tyΗخ?A?iendstream endobj 603 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 7797 >> stream xz tSe ʮL$(: ˾mi6i&z'ޝt_HY"*ˌ3 .9á&7}~oy:V\+O^*M͚9}mBRnZl6Y C#R>mDj'R0b艇=^2PMgs2Ҥdy3gΞ>|.j2jጨeq9Ҩ،e3VΈz-3(3!96-1*31j} ^Y.jUV{|Ə_͑&Ʀe%%K 7;cd潔ya梬/^~5G$wi^~rʸ%$NZV.e}ꆴ 5"ɃSdj5ZM=BRSԣ:1j=zH=Am6S '-"j:zAm^^S%Rj6zZNZA͡VRR ,DP*Q#vj!p \hj>Jۇ3Y- |W ?|ψFR#kGՍ11~zCLI'<3;Q;OurL|k{G>ry SQ}Zh{)_ЗP-@Jp7qk}jpLzC %EќڢPbFf7s@PZܖ:Rw o#F-? %t&.5aS:rTx2R#G(M(<=$@]|ZJS)]yk! [1]fm]./`v/QJV. ֐xH&UAUe0܊phnwۡ鄽\ >X5P^V~('_~u") MEkZ9*`O )B{I ֢4\Ys ߪ~ >-G0P7`+J`poěuƃdQ=a_hģiu؜x?/xloۘ=hKh&g &u6 /ւzV RȈ%B4 O t1) J%]h~UŢ Q<$>)sy hriX41|H~v{k;:b]δ")&tU6v"0ǙIn:Wޯ8҄l˵~y=g ۂv v SkY%. ]<#G}Ջ,eAOg pI؝e4*tzr15-PʟzV"hTss9`6e`Hz`jcK#d&̿@4٨d~2=PujC$[%ᑢˢf*~~l6=$v׆ӂ"1*]6Š Now$"~ p,EHG-œEjow?tL p,kh Aȼ $wۨ!JR Qx{7QᮻYb*☢uűe{FD|{ego)o%(Hű7w!H6xĶ~(h lTYU ^0-E'k%wm9{E,FxĚ&Q hbOF[&1Aw:~"[A@W(Bf@.7G1#)ƫ*4 4w~nW^vYqi d/Ih5AYC\N 57Q,oM>@5f&%jq3mdD#P. 8Ә 96j:T&y\d_zZ,cyml?.յ]ծ]V$G+c@L0/X ">ѐQgRw?p;{i$`sn^!{6Iݟn|gi5t" ufh,Hˎ%zԘ,ّD("MiVUgؼ~7ѐdHH#AҳPHB~$sA3xڠ-'j2~HϷ-/plF=/? r$Р2f.̟ͤ+~/1b^Y ͚*η`U7jnl2\ /VoRo(&d x^ěҀ lrS n9xE6qň\7:3oYG{Uvm9}rIh=ťYK U9ڮ;_Of74 #w0&[w0HnHjQ fb ,V _j #ğO%qϓUlQj]b?=֪p[v?z8- ĩ$~p𴾑e<1} GPM=;AvmCv dv ' )&Ug7+CvQSB9-1q%9Aջb/ _gO(k,#79NvSɴ^A#^tY|PZ?c18v x*vi2T *Z^o aF=D=QtcIkxoV?g؃w ׷]/W,/b)14zTߵ*5^^[6p8Cmi;л??(WM@"6zN7O˳{[N5[/Mڐ&ur#QćeTntmOoy0Qۭqcxlc, CoRWWexD9wQLc?FPhz_)mqhem*(d +<1ܓ,cK5&|;>hGcfDÅVKV rVM$?UDŽͺ;(=tSQ).*geɲ55͌?;)WrҚs޿kxjNJXuLƔȊS ޖ|0˃0HlŗrDRiT^r<]R6AۏbT @IgW8(s`mX̶Û{G{vb٩=$rBcjVcuX[ IiVG&j- 0GO_vI蕠1ūa@E?C"“HI*rš&)rVNӧc۷L]{\^ԝ;t$Х ]@mv;쌼#oߒa){6)j@ q|`X7bXHip>ET}%^a#tNmxm!S& $I$H54ڠcK4A[Pb $e$6ݍW&Mu4_e7Xɬ/l2~6.ʹt-awKBf"bGQ:*Ms-5Gz? .mL ,66NAf3w.b q ,\{tյq+WbVm77wĐ^ǵ~?#%A'','gz EX߬ XAZ+f֥o4\}7޹w?=\/nޘX\[fmTg6̂ ӱͯݳ'={y1o>Sfb$bЦHLZ)i-VӋ ` Mpm,od9&Mpg9o_ RTJ+$>>ʧ7ͨ#iX4D6D=9ddq9l;~Y'QQD߲|N?0p^ g}1.uFPK@1a8){~>s=!_ v2qdXSnM jDSh.t?X`j}h֌/XO3z|}J͹0rŋf`Uwz aO5ygT\6FhR=ak?[7@+x~ A&Lwz3KH>DNc9م 1}Eb er &`%ġ:yDS{l6kdx\Nh^P:8`Xxap:nAJk]' L /Geh-/ ~'",BD6g;rKL{8+= HHs:w~8hnLSH;,Xdl.A؞%S:Z_DkU\(`cu;qy[bkdTOJ2bN y2H0i 56]in ꮭonڍOZ 2YnHKІP.eO̪^c٬|,E/%JIaԑ̢NG GMy}c/ʩPYiBÀ۴)9?=D>6$N &ʯhq7̨0yߨ}"z=gYi,#y7߹AŠϿy N⠾{6 zڈAf(8&|IwfCp;: n2A-׉qnP (˽~AԹo<\.)A!!k y^H¿8#@71os!'vcsx\90܀GԩPzZQ 6{;L!B21iXB\4#Ec]6C)qބFqpP xfjH} ]o sKJN,۱rzMBO.$gE8AXqwv""tԵ|Vi6Uّ7'aQℭ1dk:S%;؞D=ڌڜ&fTn(04w83l#CVĵRkuĈ:kamH?%Pendstream endobj 604 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 2274 >> stream x]{PSwo^5bö[-j}*5`x@0 'Ĉ* GTK:O"ns6cGR".ybżkab6YUQ*JEHw~mMQL=u.,1<$/s^@QkuT8AP+?Q~T.Q(oʗR2SpyʓRSOEZ#M,\8%RI@1oq5ceD9Jg E&3ED?G{(!۾9&+C\CGBKWΚ^8 ]vUcy=lf,kޥ'u@ ә"C j[o>RO1yS]M ]FĨn_5 SE{8@%.TE &\BD?DR .ʩ>)2A?V#?]?d+ xRP. dСc<Ρ~)aX(ߠϩj℔|ka3XpFbQ./O$4B;\y=iK8X LVw*(clT{P7<<,zoS[ȷo]}-1[R_@.WTQl& <GgC~>gWX`=97BU2Ҵ\?7|RǚRrrYšWѬJ#)gd4;#iQ?fp _ aţ+tGaʣ~lHO}Pbn*UV ' قs_#rn2 uuZ^?=DI&L 2z97Mm<T *Sɰ}ʮ4Mf亂Dzt}h0[[q"nMf8(pŲTY" E2  h>| wc1ؠ.Q#z˝):ٞR&.NCE/⽋bW#ík)[lr%zsAQgLF} tv-&g.ꊫ%8 H̰2Dbn $D2|!W,yg7r{=~5S$wd';k'om"^AtJ}㢠nѮ1%fY$ G_96^+O[LYPrMW8sk)jV 1 wFW .k$ æB"&R N1UNgUΝ? ?13rZFPpԉqUy\s~9t&y"#<=R'xɋ.vmAvᛏ>cǹm~xwhfu+-违euZjڒ*\kC!ÌwrmiMQfr!/޿n:ϞVnX\3;yV=iv怇Q~^ZbCmZ{ȇ_X|}[e_j;܆endstream endobj 605 0 obj << /Filter /FlateDecode /Length 412 >> stream x]=n@{7R}mE#`a"0rb|'><˭=Zo^?{Xߖ?ҳ[sz9lnł:kxޛ>M:Տm(uַ\BȗyM]1c>ҏ]|1e8{cB8 .+!Dq ɱ 'TY>qIEFd7Ψ{֬+q ~&<(T2dB% &L,bV I;ehD,4-6-p5} &_k5|9S!t :\NAGAWIgIK)x!|).CݥwظGCqOo[>7ͼfo]7j ӷendstream endobj 606 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 5880 >> stream xXtW 0="If !$I}$$``clc\$EVo#e[WM/@ $Mgmfߕ ɾsѱs̤U+^N\ q٪WLOi[ g?7-ASlKv^AEbIkE2,]+rVn7k`LD,#^ $fD"1XA$VG5Z b1FL' I)b!4xXL,!< Fy,B"Ņ&XAAE|z!UCc<-QG_+7b܍{2co7O8D4Y^T=7#{߇Ѻpdu  [iv70ԋZDeW_. MWg1j $cQ+\Cc_h)+uA-]Py#DMn`yYEJJWzvXX7uJFdp>@7kP7Gf^d{KzPZuJNP6e)z\:coWt͛f]6JwbC{dADU^@jF5:)ߡ7(*.3jshܣ }E_}!=;q`C dW4ee/]G+r@ 2@ovx?.n/a^⦡+IhJ}H'Ag`6X @eZiUZ-c2 n7[oX=Vu:jZo RJfFr?TvK4qa.\,e|HiVzZ1~_ҥ PUG: v5DH\n ߂q=&nۙfԁK":7"ssX_Dp Ȍf3 }LvJ#C4 NG MH3qw B.JWp>Q8eqYPmn[1&ݞ!p:%\5XPGm[M d8ƢC&6 d7ZŸhdJW8nWh0m*H,]jn8~aS`V<rQB=ȋڅNruwްY U$џpaޱ/c{{_6SoF^ƿn( Nr\$BkÇA6dP1TGq=PiDBh }v\=:q6VmÀ!@Hj|ܬey(WA}TU0H@68%bQlܠ\6Dr\:`ج>} jLY} x}=ѳ`sq|녫7@,~JDU` Vvph]*_a/tnc t6Y04M2JU׳Hn8w/Gs<nil),R♏ЏO_~nytT)/]tV zɞ~G9g?VM:! a-Q .^`UfXM-⦤KY+ ?/|d0q0>FCZ1udvA3@>?.E\?%5A)t\]eX;~]PBWϿ ck5^ &ـ;wWPboR+EoHG ֧HK ~{-w:;\Th~X#uF͉4a`a LLؘK+`)-ZkMtݝ |38nņ_BiFBQ*2έJK s[ΞhZ[] 7|+ZD]Ds`+(9f|@;R2*)tMTd4*^l$!~$Be2h R/B3q\ p㋺SNZ"Jtet$H駀5u|zSfrNzFa*R֒c6[f2xK@CnQ =75jkq$pz= ԋӸAP䪍m}o >onܖnPlr[o?b;s浆e$l(.sWzg;-yF4$Q_ (JF>9cl0[9oʵůDz,jluVD_]SYttNn k0Ye*8wdz/_Ț}glV 6GU?a`fk'8N.vѪ[|oapu ̡f%Vn @0e\LB!&w"nDPj}]7Fm}_9e8kU!:rX;T[Jz\tSyjȷ۱9{lz.'_EE>a锊gW97=o7w:'~W2Ve}"eB*/kex~%ږΨ׆|HY9g&|IT_A3h!MF~я @axrʊ̩Gw^ YhKmhps$;)y ;NQ3fJ4b3J@(6uX(P2AKS@)&*% iRyB 4UKض| (g_x7|}sRM:He޹o_Ult/D|̧-Ί`9ى*,5(sg7>ΤѨ$(Th_(fcs8g ӶVop(#t>ViZԟM&R) sUM'phrmvҢTީ@E '~fڤhjSF(1Psؿciie^2Þuy'3#{\X:_" v3pC!q^a~aiGs ՝'.SD/k :9 dO#g~oXu H65_Cdvx }/L'] ֩wy-*u=Cc oeZXb6ҜVɨQ:z(&Co eQ_b-X \~6"-dJ~/gm~#&YRf^#dvKQߋEeFJRj &?X8*1:]8}?Ǹԫt'w]fCb"@?Hu^:|ޙ۟_>$f-IUTrtrs &"iy8Epk8-=|n_1j{{z^J󸸊er]!ɴA΂crT{ɳWb{Ldi&>20?mfp25T&4wED&pyoΟ/USj OhnEaXzޥwq~7MYQ_2H&}F$g7(r-k,qأ)iSU^{Zz{q?Yo%u©Ukspl 'uBšރp#;sejM9e끜rbІFƠu@Sa+ cdr|Nk]EBȀ摌T~l4R~G锓Jm P`,sQ@a9-.h<(vLeͱ(*x[oÖP~w}0 KPFdӺNﯢ~Kpeҟ*JxoFb#̨T)E3 #RxJcӋhsni^EvhU ]+B'cLTpn'WR T)M!+jN8=-Umz [\JwmQ;ׁ鼌bG4&繩t?~Sq\!Jb4ė(\Gk)%8?CPl,pBБ_[+.ܖ\^BDy^TPovi85l7ba->vŵ=u[=nA/_endstream endobj 607 0 obj << /Filter /FlateDecode /Length 236 >> stream x]=n0 FwB70Mr Z%Cd aЭ6.5&jLa9*˿ߥs* X]PXGVÝg:n*Wn&Q "<1NYsf\f/:| _=/tmʮyj^Z7U+uendstream endobj 608 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 1487 >> stream xmS LSWk}F`Fnh'9_U Thb\ o@)q*>6Ѩ3sƹeͅKqYnKi99l9'# qAe4"~H&5,pة ;l&]IE==#cƴi>WŦMW/l-3Ze:ưM$}Y=UuIFzTS^6WjתW[X^T|UAћOߨd2ZJk˵z3 :MchM:s^xlЖh,:\10Zՠ5!{{t"x{hV @zI IXOl 6>O 1D!&4,iQCa VqD+h2(q"BU&`1WG#xD0w>2eA8} =@-6]tD"uPTZ ?읳m*lB Vbnwv_CĹ5Xx^LS."1ZɞVQ/ =$eGCD $B"<4EЋz?l@alLd,"47Aʦ*/ UFN1qYӗAwB:[g(+~?t= z 0JK`G|c"QwإzNp 秵 . M/ltm#i\sϰӾ& B5xi쐂ʊ;meZwr4zc)Y zt' TQB$$xw T0R* L#:Y bB#pkPP0DTx.hY㯂:( Ⱥn,buZY`^R_''::r7/Vq T>'Q@JRNjAS|mFPڀ-mS⁣:w+#lGRqRB(g z,ѺVMYl>徦-*ybnb"\8<Վj,^*] H  qc"jBՙjƃZyJǑ4Z Vp PZ?%X Ӛq*%Y;N4W?%( PJn<L8Ēcdњ׳elp2Ykpz'BHendstream endobj 609 0 obj << /Filter /FlateDecode /Length 283 >> stream x]1n0 EwB7L9r\%C.<6gI5E xEJd}:?qs:w0N*fq!׏y1[5-U}zN"2IW:jm)mIY4}JuL#e5І ҩ|o@jӲA[Н  Ѐ*jdÇ`2m/`EGi|0vlP؀jJsu6m1*fs9xI~G̋r .endstream endobj 610 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 2321 >> stream x}U tU!d(h$kpX(X~E%&%it7$7wR mQ!ʯuYQW袻gUIѣG7of{;$Q0 IR^[Wn߱u8|MsGڐn!It \{dlYU8z= %P,SEӄ9? +7SIr{ys~e.yG|XKuJzqVmWS7G$ YzjfIoR=٠_|ZU͚spb7MlШ5ͮuEӪѵmzCVSZqA]MҢ1',&Nաvj FqYklj-jŠ5litA7/]=CV1;7j \R}T61c@}%֠:`Q `Chix#-\:8-|u<{IxAty)Cc }̢=@} GC'?r`O0З߬ƷF\aBh{h%ڈ(e: I:E\O%h=$ޯ2%cN"9Vl-CaDX U]6@y<^[7?_)k93ʜ~b\FT"\0ҏ .^!.q;w%s+?evqK(#]r9Q_ڣqp|/X 6#DyF8z\2)3t3Yj2Jy*ʒ'>Elˢ_#=L42x>Uyrn*JQh;^Hx&.Ux *7JTAMG3\S,+~/,u4 # *` f1bLA;n9W%.+WdT xː%> ϮO.~o\ɇ+#l&gP̈puDӠe{8wMB-l}v雫>u3~ac|x C'589f~/~oת;ǺMs{$d;azf|PP.{W ,ñp axM G`gz ʅ(+`O^w"&Xp@O&u;""CHxVY~ ѿsDjL",y K44k0<$4h{E}Q-"Qw,Zظuo736?x/Q!WO:O9!TaQ0Ou_irxVJWɏJ#"!h醌CCL]eHHrN"Tc p"M]x34IۛoQAhކ}wz 9JnQa0,Vk[[HaOB2< wQ)< MG[p?t*|: Nay&dr"Sl2XR .oڗt9q_]PKpєǭ% L`2זH` s+Ȣl~cY>-Q17|(wHq{QU~S N%\He8-;v:q6'JvXUo8i>Xgt ]]7IH^䥷C d2ïqt,LG2Ik v^'T}֔M`9 56[F;<OlK@P,`n< Vf3j\%cO43+^T3  dBȪk-&rI}ӒrEOǛPχĆ0Qnu ^Ci؄K6MxGHP}h_!Hł/;(V> stream x]1n0 EwB7e \%Cd 4DgKRI3E;Ov}cʡmWzK :$?Lo|tcwws+ӖVա#(Lmc#e+'"M_Y-=  ڍ34u Ձ@:zHVԒZk[Jym˼R^2{Vk[Yy:5?[^+]BSƿ*[-M_;endstream endobj 612 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 2534 >> stream xV{xU۴A l֠C],bRP@>n!M4iwSʜI7}Զbڂp}KY?W>]eY`YNzӲ7mZcs~ߙ;bB[iHXe2uO/oFZ$^L?>d\919?|e=? |~iFE2ؙff?cN1go h*z!(YQڏz 4OO0h,X8o}/Dejk^ޝ;QѼ4|t3QDN$ich"ViV6gf?(=ukk[2Dcu8!ebkXFfLJ4yT[0#.Z3K Xt uO$o }`֍[60H@DJrT*QF5Eu5fԂV@m5ny3L+s AkQޢc)bϏǒt~ak2Dz1XwvΥB笢Z'W0VI{zB|n>wO[U(h̴@v<2$pï}iDN]YnnW=;yf s,%@!wMԂb7U+ $Wg<(.UdcfG,k$# k!&kMv[UeL(\ !!OFk˙BKQ 4YLp|ڽ]ݧU}8qg˯v8ЉkɆ+"zWDv cd,EԙɆm>`mr@;ѐIr4#<$Uu~?_‰Xz M],#]m7z]ŒmE顋2%o,U~x'^ij臆smUbܻ@bcnc\OZkQ͙ \j-uw@ 3$"p_L֐ 8s5YZuX="~l@8ϓ^dtS1?Rd?=p+U1nsA ٥}Sݐ/C/{S^GW6)dN* uC.UH?TVYif;眦]ne`s[۳Od b LP u '(] (fMRN ӼEC{(5//er|Cz" ҿPc9ଢ3d= q_Za=fcќgS5ҬrI]c NV; |';bhU٫*S__0!jendstream endobj 613 0 obj << /Filter /FlateDecode /Length 5897 >> stream x\Kq}'W[X$VBM9S˙aO>|pfP}9ԩr SBr64W[*w5wp! Y Sz<`ioKS-"*yp/VʷkAeoRYcV8#b[#WT:YQ Z+&$|*o6t˥u>yeJ13BXF _|Q1檟/+.Llv8# ֭67ZU+ЏIgFoE@b:cF7ө0~+aۋQ ?8_j[&pZc;u|H}yÕ񥤁_tG:nTބWͧ0TT.[kEgu,OS-2omV#eCb=aQ09;Rå ٯӡ0(g)Pl+Wa{#3fG .6'@jd8X[ ƨ5hЄhzs-GCKK$`7A5JadhSqfob19PmC hI"t(ls}lhLJtE|1kzm7=N/vgC6gc kpXJ&[\`YgԟAan`L.EYXqh@-`?^ШA/hՎFE @sZt\oF'la /m8;(y+il#5Ui83qdW?)N7`A![(#:6hDbk*mEJuv?j]\ QGLuNk@םAC("LkMZQ`(,/&3Q KGr,' ڀ, }V Io(:/`&J_Do4йRz|~߆e?߰;@W5uAAwE j\)Phf0/Aj%rVK2?/MC swx+~kR$ꆾp[0߮}XfsXzTܪuZ+nO7HYl+9)@ōBX },ʡY3ٌk'm:IrJJlݻX z#>=4 + pCh/*:q UiuJcŬo|wH[3!LwȊ;:5Рͺ|r}ֶJ"P5c1t]O0s}*Z:Ѽb y|9÷YLԯ'xyMYCKosYk,֒'ڠfiT.-G0?' @VI$i#Y` `VJ8Z k~: F ,F\(>O'riȗ Ͷ&'ƒا5ÃHqewJ]jIs[u=-$Lݧ1uд:/a{:  bITgggUOVL]d(eq G.#U5scV[M1n(9MdπohU6[yA(̕ RLydP2jy2 {~9 -45k:9s{5s5@Y>D$PV^~dGo h(`J0oou!jFG=TLa i뼜wjA3PuÏa'+h$Qw9AMejDdz\&^P^Rh:iDLc!p%J>KG 8aYUWY,ʹ4p]Ӭ^,!jMA9fhRXbQOм7yX9`QHH&NJF @# Vdclٗ3Bsf~t~JqmLr) J`)<[dtTJK47]!ԡTa`TxNu+I>\ky'(_c|,j\֒*ٗMU-Hd9 Ү m!g@hsr>Qʷte^734j+t鿚Sn:EO:{ NB5u؋Ӿh^| 6 1X /o:0 쀁v uR$?6Ib@4{OK,Vh]!F}Gw }/-8 t(UL!TM{dxw (` +HH8X=˨kzj _x#q--ht vf BZZ/<>Zl8RT<,ÿ *='x3CIi <:Ki|"c."v ,Hǂarh|. Or9`wSBňuQS\u7(h?q3T1fY}!ڞȧMf֕{ynMa!k)rP5"K6=:M9K>wٕncT~E 3t!ª4cKov82 :ԧM*˘ֵ?lEq /J{9CB}H~P8xFC" -v"cCЙ "d"oz+SzO;SmKZSR8D a;)?<”P+]=֌`!oCqRb!4+f'n7h!EU]rgsrV~QZ jېe@ ~7 {rf?=iMt;x. ZUHyd+ {# zP=4ǭpdC$LI!@L*0hp1Qs{(*G^6n?Qh*Z'ݾ&TDK1ڥ$HMըhLr0HRȺ^ie?>޶ [q3@OyD['./E]]&'kmXrtEк 'hB*VV !J=T?4JGV0 ĕ?iSob*|R}a~Ww_9~/SS@ow9J!lMc5e {ކcٰ1d&&A'kv|T U_SHKʘǍBCd=](^cHf5X#dQhԣ+v|:"{-V88N,VDAG ?-f/Xݧ 3'?aE =e+p '$0Ɩ+A*~i%yd$0y6uDpd$8qq Z%-T#z(_ݤ{o"g7Z47tLug~GtYdjâˤR&]# +]tk'o8dWapX( N -{yg1M\dPtR! Ѷ<_pa%u0 c3pMAC]{^mW>~_(-P'Ğsm}Xw^4]P^L\<ɎGt{P:ˈ 3+vnjβ<8=xm;xH -:j&O:@endstream endobj 614 0 obj << /Filter /FlateDecode /Length 182 >> stream x]M @M1iԍ Q/а(J^Z.K>7l^e)dj7 & T<87B׳7ߟm|3oO]k$d\ }A٭8gxA)PeE%P=55%5> stream xz |010.Ⱦ Ȧdk,P(Phٺk7M4&}lʾ0BXB+W;NRJ[fy<9954j5BwǾ:kzН™Wy'~ q~w@Xp0? pY|HtmJx~;"21:( 0vY̘A~/+qҙcwƄg53ǬAc&G3LOc6nX~ØakgQq~1A~3cc^(j[ގܺ,jy􊘕;Iص:q=kX>hCgMa^^y_4a7ޜ4dkCMpWGsn7ΉYwgQF]⬒N=?/_zqڋRKƸ[JF{sf|FL0݉?MZ4m'ϛ{vr;S2_SwO; 30ϸZ`YԌ۟4uj̨A \Tm(C2a\*Ă2;] `~K&)phmRPJTaP*\S?61Gnh:ӆ6?G!)DqtJi<8ɍh*dl&xqY {BC"hhB肨`Z_my3Z,6Ā )bv,X[ Z['0ݾ%m&@_a(B/qbCM$"CPgAvRtx̀ŀ]sa= ӉC1Sg7qbEWx^E_k G,e= ;j0Bߟjُ .E/݆v<(o r$̊QRjZB.o^;Rvr%pdik=7Άm)R#rP\t__f I 㳤osf~&(b; & dw`g*ӗrꄬVCC|Cy㙏R4JMDT edZl66wt !jFgu赵h*wp@0Ԉ}hW4%NBfMh7X"Lݴ:bC"[MM6u r3"6LO\BؙٛS(Xk vת}qv,õO9[Q?FKGVp[~~Cm{70i4IM])otv։eZ—$^ ɯԙz,v|G,o3٥yQ@GTNf(֖p:l8cjDs}s %v| RCt(~&XU&ՠ?rX{!V)M!O Y|PXNC;c0W4j16qaգPE*+$!61T#gPW'xd›`>ea\')H ;pkHvޣwau7@rW٫$/1{w4{OݕH:p NM ;9˅ܔLԪ"ѽZx@etSHbgͬB3G|agѝSU A")C\|U=?]}Cy޳gU!YPr.izJD?<@"ew)pþ|E !W>6 %`"Ж;hKC~ATO-OtKT'z;>ʡ$e%-}5q'o;ݟO !bDa-*'" #4yA>5NX_TH NwX{}L4wrrl=}aN%J ]9LŹA\#~;Y|AQߋBkaLjsN.W-j |GCӗjmIưS?åSM ZrLϥޮ<:6+~g"0idV~h$QK7ѫY+K AmC^V$Hs8،˸èLTjԍf?}",&TEθ<ꆟ`sJ$A"4GwvIz@^$V< m5?S >߻h.z M u0k"Sw.$s~'ixh76:u9Bhw8G`A]tV$" j$k#r|ؤfŷ$H9 rrhQBg{Mϴ rCCIRɡf޾;OA}CZk彭I;ktw7(*_*dl"Դ& }]FRdv0a0"xgh='Eyghr̺PT!4څ=<&YQu#8SJ 3= .HH@XJbEFLm ̃w}{(\HB\XZM(>]a~>etrQ_҅VTLriCPq,z7Q. 8-ތ=ui?ȇ(+4L .zXGgu͆OŖDÕ4t\חH?ڟQ=rZh?xTV'=itI_/+B9_v`>W17U1ij#w;DJgoNͥ570evꁧu1۶A1Me}/>  rϧd%Qi?r<? k"V#Aq^+[>#DZbM-H@oG,'gI1ruFB p_3$]Alj/Cx!9må_NDboh܆Ɗq]{٫p Gg:@y1t(yCőh PWGN2+5ˤg:>Yx|6bޕ#UZ-h< ]ekFdK ;gĖ,yS_o&~YHMz{4(Bx8N=Zp2M~OCM˶V:[X xWqo@eDަMh0B ?Ngڿl/=u~ѶU6}v)¨0CWi2IGarIlHHϻ`9X;x0ɉ}jل!m`]DY|Vb'B)*0a$\s1/̭f45uZ=2#g=bslC"LǺ%|PXA|(ztblcha!yA7J!mG'h0hfhC5qeyԣq]LJ l'b?Y2O?S~&%|c11Xǰ~uCR]4k+\Xv>CaH{'TH]UY+ίxٽyW< ЍRu hZ6Ga%.&hhއD]٥lr#C۽[.]za/5&;KN5G >LCε'1גx71=[JcmBc@Q[7d\eMFr3tm~ڵޖ&8ΰj*TEιtH\%J~qS+"6r|$f{teE6$#{%[Ǹzydl8Us]}]MC)qG y/G#Σ3χv*|719Sf~GUvx(Zݟtgm ɆM!o [ۦ)QK閽pq g+gAQ;K'KhK?"/':l6=`"CR#F]Bz=p%}[HK6{у.U$s߃SdO7u/p}zz-6].YL?~r4k5$ud/UDC>LE[9 R CoKY#8ۨ+ӝ)p}z0QRk!$f 9PZaSZ!! 9neV"ptcN 3@$K;q?$]ˈsZPfP)UJ:1+HZK"^c3S ,< >qƣ/P"R羵mpVeZWhh; |V- 7o82! ݟD/;UF4KYjGh U:G|ԉ_O6ȉwf74. SG $ \3*8KJoxBn5t1V6|&T\pkÅ_֝9n>WxBO1.N](_߾VX#5y9h$+|kZ ;̧><܆r/&Va>996t,Vۥo>Yao( M#g,7r)_Ob[j(!$v+\pak}}$>mӅzΗa 1#ْSC3'O#&&PRE & 1-4h<ق2ײַ a傛wAulv)!}`JE&~t gII(x7}N'B@ l#9\w≤:#4*ub^R4An!/W'>2H&XXrPh$rC#QqN{TmAX*T_ pf|O a*O>%=kE{etu: Pom1c1w)PȊV'+8,J{뭴%Y1BWd1HSfU: jx+9Eo$m7#/l-#mI:p^ȿ`=+ d?O'PUk ” zK:tԺs߯'x+Kd9؂8t$d&Y2)4̬@'W iD UR"i؄yۙWzhسl܀Madg_- Uk'&$Oܑ!o5&6&>0s{[moBhsc8x%%T+eF|ҒŽ(|fqMOt NJ ؠ8{Zk>T[IjN*Yxx&U*.XA~)Cra%x^^c w:Gyg< ?$-=[DF"y"eN. d{0HnJI6{ @''nVkgYꄼLYbtIVlIz1)mnb*$1(F\7uEJCr>e ܖ<ДoKr "me8xf)jݖK?P L-˵&ѐo"OC!dfF5\{ҤwKxꬤ *;E(q!c\vҊ/;8C29)D$3v`14 R(X Rm,E "=qPZbh?J?x"Wᕏos&ʼn S7 B>(Ͱc,<{ah]оgǙf]y Kdlm\ɚB4WS/ sLfSK`\5 b6(JNT,|CHS*R&K~9VJwSk8-Z򱮶#(dU\η>Mf:>NMh 5d3^~uy _i%o'gj4N/*;t91xU Dv%U܌m|r, " /5b5d/R~5dߐEْ%S'R6{wtl1=qXNq?w! ٺ=lZ-k=kb<3^ /[T^$As?B/7 )<ݽE1=Fdd=^ .#YLub~R9K~+8K5,DMh"4mkB#_ k?ǣo 4=׈ܙUyhhp \&~&܀Sb$ x ф޴Tr-Gj*Z5Ps[SGv&CDXNd]ZȜleUDʫ2@◼'e&̊QUk,PAZ1/Ta>GŞQGO:V4\͆v}/qId+ҽmI֖&B;DUDC=^33Fhp†1ņڧ_CKPE0XN_9['\8as <f.H P.̩0,?*:; {]V »͚U2֮Z=pk^Q1פ/T? I}wh4t=yXW^ 2P2Aɩ(̲ ؔ8ubvr~2y^䐵 9V6זTTK zRoЯe.JMI_,M*sM''HDL|fK%-ghhE4Y\ԕ"Z^\ G׌sHѧDTv_\Ƒ;h4_z@R{ꢅN5 gMFs -&B=/t7e]GGI5-Uc5$el=uL~Rs: w:l!dz_<> stream xcd`ab`ddd v541H3a!3#,,CŎ|<<,+~d }=B19(3=DA#YS\GR17(391O7$#57QOL-Tа())///K-/JQ(,PJ-N-*KMQp+QKMU8SB10001030=gZ}?˺D׳}]3zqʴI}݋9˱͝=wr뤦^QgWL<ss|7slݕU')=%rv\&ͩnjG{Ţ7޽@-Mrպr#T*8~~]н䃭~T6-˻peڽy.w6?ٻ &Wr|ZtROY {g*_~'Mg̵[|> <<@8endstream endobj 617 0 obj << /Filter /FlateDecode /Length 2805 >> stream xZIs7́KWt2J8),R< )sA\A@~oi~_-GzOlGfuDwmS8”g#5W* rDx9}ٲfóP VK7Mҕ!mTa\1N E)yEKCfս}Þcs/p]m)f1'U &4y9 !TdhWvT+M6mosx'3 > VJ2E_Ѣ4^= {P#-5 Ľ#iW8Ҏ>{i|y3MRcRamb #:vE+xrm*ER_]saцV'1w2pA4MuhZ8{[]y!)/<[lH2*)1+马uyb#B>H@4 w& =CIwͅB{Jhm6p|jX2X/>wd/VWlCx~ Uq*v]\87gC` և-˫v?_q=SrQ$hy~$QV@ 8$q;dg=+%o"eG~U(N}>f,?ձyQh?vDRН<:vϫ Zzm"^{ zS/\"JI S@ C s EvI$ MR^YfHTؐ pƁ֓J]*# L%Z{וAOލCCixpnԁbȂai'ʮQ~i@h>&mzM!?ԩ UG:#{j@Z&H)vPj4In2!_F/ʢdzsh15W=aN~r x֬ϐ[yHlχ)ppOxbʛj>H&$n_N~8KI+k @(]J85=?Td796oT|7|om#X [4:"r˝bDhّendstream endobj 618 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 5393 >> stream xY XSg>1ҪMKC[kO^uwAİ$wH "֪SULgw:3838<}߻}Ç<oe3,ʊIK 8}>;}m1!%9ⱹ?s1跣1qKKr)IY3OyyNDlnܩcRŲԔSMX.S"&3"bc#ĉQ kV:bA5+WO LĔw3IxO~悬ًrbKs/OX2)92e5iN!ċJbODKD1XCL"u\b=1Bl S{4}b:A,$/W%,b)1XF,'҉0b*.'1-s|Cc& #Ə~$=,l\ᱭ}hc93v(O<5)~Z$g 5eإ?N ̩zPA3Q[yZ0EiQzYc{(tPd6(udj 5_gK}|>$f~k٦-)cW͐;T9䉅HMa*?Y}:( \Pf0EU>wU:cAXl+VK j!7IwMQ*~E~{?0Ipu7777䅨srw@ݠЁѤu *htAC#: N@qO &k+cܯ/̚<=&03Xle\YMBagQY>C|ƍ&rGk>Aah zM a[&;?Z dLkѰtٞV {6Ea94?MHX7 @j-ԓ Amᒹ,NŽMAcUu+tYmpSoNr7iߝ=HX[MY|Z=Bb`Z?eyy3|ր:6{%Z=>TPcȧ4.DJ5Sqt4An(w1nvKw%wp˦47[$L~/V22VF{N8ՉkWpe14[ yw..spa-@*;BX%f-E[ƕ70Nv١D x[g_ ^u366Q$)(?u6af{z[iƜ` EC`oh}^Vb* ךiD*:躾@O5Ƃ\lV\Y,t7HL^=M' =!9\ؤ3RJH:tÔR`dofU 2`1F P'vdT&I KZej{ VGyT:APGFe& 0ߠp x «TC72f {8ě%R%6UcCe֒. *PSo烔̩ДW՗ X@?xl.~"vXMXP:A|xsCq:QI!z,2+!R틁 |}!Sm1טE Ǝg?~}-9g@ӄps~FĞ.}Td0T: ʪqmKs\*RQ߯ڥ)(R27,i}Y:d;sxC,X?u9~Tg'3~@ʄNElTGb97_'W5MD%B% A>\C݂SHj;@Tw{GضZ`/M-@:658]"j A }3 `nkxʉ^>u֘tUyl_D2`*/r5RLTNiI/$&orTY+./ّٶaz Nc-QΆ;YgJU;rVѩ3 !q2pgivK-%'$%gCǾGn-P9 ϱfv:U`H7A"ā*PyJAVuNK` [nS` 4T'zѣ7}T#+͕KYj'&ܱ+FF4}q|C赃u=-VB?nh@1w47U,N)`'uў]*Ovx1S;Z|Te+SY) +q`Ngca++L u Qi|Jֲٔȗs"YLRQL.:tHO1KIc9- C6Z(! y;dْ;k(gfݖS@~ۃD`UBЋ$ؔ2ec "j8+pi%uxYm\DP&YOM蟤NPTBt"uRpQmu[V@e%uYP 7jfTMe=MrvYv " i@c8:,ye@gvJ2sۼ= Vf nvQ XJiV 5jkQtrtꖤdTJ=q쇧˕e2miUtop N7Ai $8VW fR r3$7NzR8[1AgDϳ}}V T9ȆHEo 7<5pv{-h%1]m+_  5Hrz[w>q N_pMB4~6 (n3Ss nMn8Qf[>nU7Piz:A}γ@~~~G%9mXrSƓ\A\᳑'_H'ST Yz/$'E+Ljr2hS@V:l PYw1h}romA#zusDVNJR^{G^6Y3Xs8)4_BzJjq8vGCk[W mhY5R5N ' !./d(@6c0huAQ:Q u+7=CX$E@4bZ-~jpktMQA!ʁŗ"k;Or_PHm@yԀBW )*ҝ`CZ픒 ADˊk;kDhm BB>^U+1v~y`qt>G?m\O8zV٪l57\:PP\x\]oJϠGN4JdK3t@&'=#~u,ZZA-ҤmCmW>wAۊK[G}ÃO)JN']p**()bz,v{5ɰ<7ʴTG>B% T3ń61ʋ>J`/endstream endobj 619 0 obj << /Filter /FlateDecode /Length 3683 >> stream xn#@ >*VNC t 嵕%GsHь.>g<<+Өb&<5}F_Mgq -iǃ2 'JId or ~<[v=ٌh/C`tz`yy Ѽi,tY|-7:{.w1h$-^_s E`f)ֳ{UѱVrÿ ud%Õ \d쌙gh j xi@l|WC`[|p`/x[uZf {2M5$۴A7M x؇2=Yw )cvŇLN7nVl{SB˱)!!Uiuw=.Zit,-Xn [\{X#|3FU۳?\d9Y^:~G2r˺^9a6 CŒ;@3@fPΡln3..ٓN˴D.!}m)~DՊEtݍTOO:d{04!g1j]QzRHKR(UWE @ݬ敞'~mVo.;ùJwN׺3}%hU6D5 76ѶR]q*6i?^GR]),;>HR5*%PhM}L˻I@̑`q1Khv4$޵Bqhؼ~dmWh8_ocRu<ʾY"E@J4>k'o-m }s };qk㚋ݮמ.35U(!ow2.ʕXxhHbzZo+rfݯo&b\0p@6 ]VMWO\B*7b~uڪDPhAuKg<앧dzr=@TeVBX$)g2*'SJ^#atsէa~љvZwѨŒ([$E\|O絡n Es">M$#sчy>Jc V՚)pQ`~%7U)ZYȄ`F1T;7On 7ԑP@#3vlZ0R@U\L5FP~p ua `82]Ee'M$8F ꨴrΠ~P[JцX,bDžXzjL$)0 DO` .u\zZNZG2"ux9?xpló~Y_1:iQ!5Nl &Ծcy:Obb!nNOoV(U]EMT𪣍qpH6v * #䉣݂RRj(2;sfsҭScIjUBnQRތy[Rc[Lz`EI¼EcUgqw \bh{l큵GM)q.r{TU {Q(˴'1ud5^97 yȲUPBsˋtEVzOAnK(R";,k@ve͸_I_pԘ%Fe#P:Y=.T8E?zLe^+RK-6sS+GAZ?͠V{yɁu辉!P uetJM[:kn4CFB4ȰBv?=N#7U$ʯ~8h>_Gi}> g`RV1нER Rf?4Q-Ue mx*y@ī8oɟCP'71 B&jHܴs4(ybǑ2)I$XUZ6ᖑrL:FmTv;F/_ݳmtnQ9gsCՇ+n-$ $\ qB+ 0q'5aA-ZS@bpbDqTܠT77y>`4SޞPIT2HӛxU{+_]EAeDaGv-~O[wpch^pëJ ]`lD+YI'n]=3I}p/IN/3#* kv<8 endstream endobj 620 0 obj << /Filter /FlateDecode /Length 2486 >> stream xYn}7b7vwcNZǜHD|A7? $žTWW:UE~w_L?fͿ]i`(3ms*X&s3WRej7$ϾKYf #<%W47ndPdXdc͌!:)ҒX=Ddy^?M4nYNӳo~ "9'e](s%RX,f.Shߨ}_[ ilpL& }X}5xR:_reoKjHew_te=ErX"gy:;Fuo!r/j@M)Ϥ0lq:=}hOJNYvx]/ٔ]W&,W3#yƢXRV9dHySmM y>({բ(${oX/D|XHI:ևJMkʝ0LćաJ[JW ౗vݺ0d5G@ѮjַRb%#Վrڟ 8ۗ͡ꪶFTihц3eE?vb U~@fi'n탐Zi ,򈬂FpҞON9.$UX])jy%+xK#S!|`e# Ytt*͋L1],4Yu=.˺mHIܸ>z Yw-'eȃ |5NqMGrP5.{My_w֯ S(@ׯ#g1&1{:<)Գ!];tRiImzC ]rpEȵ}O>&H{Įݕ9hB&=`=@e)8`- S3+ ˶x@. (dQ$([LqIx%mq`ͪZ.icڟ1RD٫ݶ:Ba ܎ba}o+_{-ʤ*(V ۮsPEpxq2 QCVA?2cfv-B6I-p0T{_*χuUe2;1; `M@Gഺjyʵxϱ^p=D΃62QҖ,<"fm ?Nwy&/ABs\1pO3\ϼ80sX97byoPD/{B8j yL$fh ͳup(4r!VR n c.CKncڟn"|4EnBFi 4̢9`rT2C)F&ᦄtfJfNGFn^/Kb}jz+Ӟ'hJ͑^OB㖸lMI*"UҸSN- @%3ii0%ao~L iS$ lwk["=QrOjO^M6Wіf\ؓ}\ ǎs-H`p+ IQ} /DԐ{"cnFDະ &IfT  as5)V.h*PXBF]J6Z.R>1ϟ8%œkm(lʞ"A~3?Tc|c2%* S0]1F ؜ȼ' ס/.bS$~FB㯰WX~ym0OCxu$Dl> stream xYKs7eoyT%[d3uT<JeΘ,)׻n$eǩLL_-~wKXE<.~ZP;_6()~eFZpT JKIX(+6ŖTyy`U iS!85a&䚦e=~}1W%\0T*j-LTCVJf4Si_s&zg 3 7ΐdI^$5><>585I N53@8SB7rX{;%d3JoF%ɽ s.462(k Cp %#j(uT+; N#MT`*óC^_MLWh'TFy۝C Ήq5HkL c^9y+剄U?nI{xY6(o8.9Xx(JUO&OPr dTsR{snZK&]-Xh~C$eEzѺ8٘/mIG?G&np ~Ӥױ@35Gnnn~)ڦ}?js$BU[zM?Xv6~y0~Z41ݼym?Ioz޷NmTUq 8ǗEhoxR T=P+pLj[^1V"@XC aQ8Ht޻x#W2l8yݕ,r(8F̨/ć+j& yw*@.{bPqZeJtŗ~y6*ޭbf(A=yMa 0&Nfi[S(q=:[syIlX;,܃aAwς&\A$<5ijKw@bر0 J}RNRRc2|{+bR`pgşwQck]::| Q4PY/p*rm|ڪѴ:wx Ҕ<>#U$t^J?# <2u>:Pϲh|L!=.І5ʕĐײ +oǪ u)Еzp3dfLWun ,ȿ6oRώ2#IŴ~l.p_-ņCp鼔!UxT95RfBe>t33ñ5;wp9O^Lt1k*d\ SAz{ŏh5rnyGF3׸ifs'w6Tӷuz N+D + B|m5OYd&Iw?m]`M7F`全D8,w+C3Q?.HXendstream endobj 622 0 obj << /Filter /FlateDecode /Length 4714 >> stream x[IGvo02;"f Ҁ$^t9EidchbmY7 |o""6]k/wκ~u2W^?/8[j]_ܬt]1n5pv#GǼ!9̸Z=v9*o_?4V.m^Df?>s<ɏy;K9^%\(g!ј4\fl/eT,2]ϟxy3S_0a4p3i)lGS0(//VRs]CiLuMɣk!*ghʌ)ӕEȮwT< i} Ύ5@B!)$1s@H1f\@fV .!R@se\rͣw)pVNU l1Ybe@B}Ig) J䆐Te{-nByUɗ3,8Č^֍ "PDLE8Hbo91(S R$+1YEilraSqsR csS]XLR3)IR)n\ V\)1VRěb/s1yn%57#.kԎ9GW&G욛5an &alnr )S]^}X#w+.6R;BW Hq).$rBF c:rQ6z R;;P yLmVV )0$Pi*Q'A{9(6Auhe /'|ϣiwN*X{ +?X$c)9$d5jvH"-@FeФ =Ge%x/8qvUTu^@PBրh/ ũ<1(WB$|ϣ$He*a2I.dž=,XyS R4ڴjb {=- .&AV 'b HݰFאeD4'&}U{R+ /Z6VE2gՄzH Pю)YӀ)B)FskbY3E >&!ݏ̟6=d"VH*-@ etÞ%$4Z0%XǍ ~ !IȩÌh;Bv$7Զ1 E-ZQ*h}QzЂwM s1vAwsȂRFeIl*< !O!b4B[J'rMaD mKpɺP|#ign+r3ұyyzzS&ii罖GMuh{UjY攤\ G݁hѵ.<:  I;QIX&OOFeWE˔>viK60YCKǡآ *Z3|·7?y q}zϖwyq$$+6-g0g}'<pr6 ~gY?xb/Ye(~Jg:qk?^ Vk&H2&eT%.h+KlqWqz{.WGV jdhF om v~Qr@"ʜ Џ"=xir?#Px|r#b+熃3:e= G{ƌNE1V8RyKfW&H4u?APm&`/F[%m% v izrhBО1Gt 'LPi)c,FgW&g+ 8}k.pG۾yWKin) G{\MT4ȑn]QGdz P8&ϼB.2m\V#]xba` ˋAE{ƌNDt',HleWF*V9 gٵʏr_,^ɂ SVURnōT|q]FlS5]ͫ!xfl;x^afENyr_ou\5˻a޼iPWSٽ]@Ǝ)"1yWyx:D[_LJa8}s]v CwTҺ,zզYO@RnQVZ_ɼNPL&lxo~~wД 3 />H*#N =!zXdCFM 8wygJj4a{>5n7 a.BUϿy,?l ">!]euٳ@&a*Sy}K)vZa?yF oG쟟K!Jlm?|#N~϶O^<4ˇ#jy|5y~̽@ly`ћ7w})aw\Qv(8C&?ώlԾ/iݽ_M: SJa6rN#~T1&;"%.j2z7}G/othxb)S;8^n^Sk_bNv}P4^|G`qljkw,܆Y(I3!IVzu#}x:~eXoCZWߣ9o, NDlNF*/Bc#חcJ^=9F{Ph;~x?on@wD2gM }g=3?2gd &GgE_fϦ.lqCendstream endobj 623 0 obj << /Filter /FlateDecode /Length 2173 >> stream xYr}oÔ^qiF$Uĩ$ޔ;UDg9Ԯw8eeA$4ݧOwC oiV_f ßq ( -,v3͙TvV8#_RNݢ<.'lKnE ٸ9pkING "+JsHZ%w${DЂeӯ " ա:f Qq|of|3HᩘM2r"liȇ=BMnto?,5ЖJf_mfRJ6_ EMY;Z_Ȓh̞ѽJea)}'(K?,]SWEtaT2(wGۅ ;}:,U;\ǃpXe#Ef~&qf3!['4o GqlүÅgφ6A+I nxhBE$J@j$"#vmإ Kn9yYrnZ2ƨ2X2A{g_kY 8 %^.t@@y:ɝ<e^ˀ/R=$>am*% `!53lSmHy$ z0"3/ClUmݝ `B0B^c2c 잚uq|3\ f)bh/K%Ѥq[b(@\/4!B[ڧ3 @r–q1Gk 'DRnf%h'swwފ+"0ȻA/(8Hʘ.>/i$&Nr>zf3Nau=` Iy{Fbżl  Dx&FDO !'C8 ! Z3V;p\Ig .Nfo@HC4HO' XwU`RSUO1]0d#h[p{*y]FN Im骖-WE㞡乆:AEvPYK.~, S~PbˁݫAO-|w<^ôQaC1b2IyDу+#9*L Rrzs%iZ6a%O&ӓ1$i:bOrU`9p|1h+T+F|Fv͝0q{8Fp>bQ12nX0X!(Nq%ދt { 쇙Լ{}}RQ\j&B a(3DUpQ^S]M#pZg ᑩ|0w@k Ia^2@$tx Zdž|K-+_e3I%'dG`XG T~ȍc[Ťߜy(hteo dT%ij}hCJ_jj=7>q>D,r,WJ(;"t@ P =p"zgk6 S x|k@~V=W3I7C+8UY-}ϐZ Jvq)@N=Tuuc ;'Dme㉇p8)y8OyzCh-Ɂ.McǺg„Se4S_啯ŨB%r i՛2#(\pIeR~cXٗ4_'\|z9*I+SJS&"27u2`1x/VJwhìZFoߡQumz$΋XZ]%o+A;??u {аW3L̯;tBo&Mu_ yӗ]O_?z!5?~ܒendstream endobj 624 0 obj << /Filter /FlateDecode /Length 1822 >> stream xXY6~0%ta+M-i-@}̓|kKdPHHzgC}/wo((ߏ>9p "/xtlXK\aj2Na-jtKve8aetAaiTV,;N 0ɳ\rMUrXhży{H~=\OfB,YE~iFҮ8Ln11@ ^b #5NINRks&yxL3ϸ̌R?6`(- BayUXa3ZL o!A$@,$Thp748#;094+dh`4wDd*Tyأ ESJIӾ益k fQ.mUk\^9Jv3PIOR ;vkdZ#,48%p! &KA?(F>fFLZ c}$CLvHB_ڏcٮmU<{>MڲvV5..)rSѺ+u(|7+ 4㩢w hc ߼!'8Ӱ-ƍ}ӗV$ϭ6I$g :O>H(8&,8zU |*?(E/`ot]>^ Ч> Ӕ .T(}5oVb36Dשh8\LAB(6>Р˔ ;ޝm' 0n:Cs[Fٶ& #&yMR7:jQX4C 99N cB+ί#t! (79=LqBnW9t kMttј P}cVPYz*@O@PBS%m {ТJ>$ͣzh6)>1 ϔy}}mwNӸtJy;Syj}]Φ s5|>e_Sihy#Y iǭXN~#WQOl7eVx-4!M/̋zӫGu[W8jk$ =7ċ(r78Wp{k8$D*WTusfm>we_ON:hͣrHy,40T%9vȻHgYFrح#ch[3jPy’ޞc=P|a ?xWw w.䖧Yq= 37HIΤOFMڞz }\@jIx`$\6mgR3LGakˏ`}ݽ;kGo +zr%NŖ?bfpLu"3΍Z'o^9%>Bb17˽fkWKN~;;ӯƈpY&c,vB燷(!I\uU & uQPAiA݄ 5m(E g4E$j[zл{fJ7P_Xb>ʽ?zBTendstream endobj 625 0 obj << /Filter /FlateDecode /Length 2260 >> stream xXn}W[` n;rh,_P@!Q?oէ"/},8Y. pOؾ cH4~ eڕٮu>֓7|!J7ZbI#pR ܔzS!ZP*l 9$=^7Yxv~&)bivB%dS( .ۄ5pSa5v#پtGxb]'˜yO; i+Y9>9 Y#ᆴ*<@` ٫ ܈3sh.DAd6"&F^vW] f6)Hobqoȃ!D~3>MdĩbYAhޔZS#bBbϟV@s%T#R`s:始-F^}C$sjhK 7 "A",<j!leIzSրfM䢺IUZ2\fgCyx+CPFTc@C6Mt@=_BA§Ŧ2Vz3g7}<s0?$Vict.t+%4+& 0f[!swmr?juE&y&B"9`bxB I}U{Jȗqxv2U^,YTn.;T>A2fAܲ[|4wCb5M׏R8w%eZ⧚/ Hw|,]o[o=9?mO0]ɐ6i)$3pX`fJ*׆ O M6q.|j $"pAheNx'tU:̀~2s,` IAv(tU]=Oï'Dbv怗ٷ|Y\Ķ\BF>7Y`{ȟhbk#呶-Va_:<(q 2nLY@ӣ6j(ϰTX{a*0TݷUAn6uT2?}RSdi9y~ MF᧯(q)$"WZ/eގ8@":XK0b"fe5E~em K@YJ@,&Pb%wxG8G?Q_ùӨ[]NiG x̐3#w L8$i5C7-ހI8&RZǕ_ #\M ty _Hm"GKmk`UX+>y !5C`n\-hmZ9'@3:t\6pnaQLCa ǹP{ P&p P8] u(W΂4{cK$qt'i # J_r("x{Ec0 tq@Q0ceh8wAɘ`s0k>c1:ӄ7W%_(Y!7ކ2&$ +(` d,D_j׻ӆ ӧu&Lm CW8~. ӆIb8?ACendstream endobj 626 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 987 >> stream xm_L[u-VA͘m&a3F݃.[3lL̖(R,綗z[12]B,|6苉/{9R)AΞSNVu8`۟mjBRtiI}Y8~ JP?z{^ l|) 9aM'zm|k:}rME4Чm.94ѯڬtGomt>r~٦5K!Tv(BGPG ;"Z'.?+(~(7iſnr#`7 IR>`G:62o<3YEf̱ ]\`]iX8Ŧ>qRJ$3f1%L}1Wx#YvAWC( / o.k]}ď*󞬳oB@-a&R3@zFsLnƥy.%̐箟;t^ Qœ1jis ڤ}#VސװfvH$ 7d#5-X,Ae4K/]NJA2ϳ=ظd1aLJ}Fl,qD2Z?1;#oagX]1ɟdEXC)#b<8F{NeDnћw|&]v:a: l0HڪY8Q{Oְ\4u1# 1~/ kH8Mɯ{% {a ύfRܕ4H a+ '2yjM3&~Vf岵`1ڭվҒ@)ܗ%칵ڣjwzo1FXf&Nrj˲8ƒ(Yendstream endobj 627 0 obj << /Filter /FlateDecode /Length 2194 >> stream xYYo#~o歹;} #AkyȈԊKrH}PKiHzPLu|]U`:%/_n'}C^&dz=84[nX)+lŻIXLNr9]l'dDbB4,X&h[?,2ڢ͘hqjzF0\SbQU?sl~1霛|qSCu}5'E}5 ThK?KjA'9<6B 0hn6hJkeAKY {p`pS͙NI(to6jJ`O9Ռc**ePOBm(bg*4uNa\o$Cn|cgb52W?K \(^4w˵cR+ܽ$u. ౡMS˨&>8'bbDmm`TxS-j._-5cv2 k~ u\|L l8/5i]"4 GצpѠlbI ]A(ǹ܊_=ٜ Ŋ w"Q8>I]?/4Y/T82g/m5\3f" r#4y#&'R 5ʢH>="!RStY̌K`}ScI!]"l!%; t,SNSb`ZΨi1Oa(?Y["w3)bI 'CĊ`bO)#iq(DN4ԈF'}][yJ_u\s<\-H ڱKzтc7{V0ѯY|a {O6 >KVE@+<朸_>y&O_G)!qi@-4)$Ayo~ ]a rdRjTQ~:?`SuИe#Ѕ,[*'&\O5G DO1Tjv{ 9 _F> stream xWn6}7AX*2H 4m[EaoŗDHJb?(iEc șÙ̙CRW讙avևf"eb7Y$*Q2jْ_8Sk9)NiDRK~Fm2dd֒bm5XF !&eqTMtzC_bB|&mTW a6t 6䩷RANhBmgvn}:0hFӞE&5b1UhxL>8$!v&!*Ud5G&FvFWXij:/*M.}sNҘWcaH|\%rU"=TǼ(]F7rMbmK*mNwu~خ@fEWU;s<ȑ3@g߇\sEs U3W8"&i;ۢܬy>!@"; s9=jn+ⵜ̩¿|"r.*~}N޶u(4\8I럷y{'t tjkjZH9jJLM2PJ%G%&S@ ~j ॡ hS NWo#/MȪ8Xq`euLhZ)43/R&Ǧ<ͼBK$lDѸ7y)Q^:L'Aȍ?9|yzrGi ˬ^[.5y|wnoM!# ?JJAbFypL݈3icac%8V0 SrlM;gy[eMbp8ۣ90$P!#yoRg4kB;78%XޝA. iwo ڍXK s҆Zz1>9endstream endobj 629 0 obj << /Filter /FlateDecode /Length 53975 >> stream xɮvYr6ϧY^Ѐ-@R(HIDJޫ}*,rTB *ov;'}O˿̥}H?~o|D_;gk˟1ïms_)52GYǿMJΜosF:okuv_Zi7ZJi_WۿW^cw>Z?1os~оR1?=~]Ε?kzיJS>o>?Aι=ЎXuHzZHzӑs3H͏+HZWZ&$~Mj_$޷U}w:"2d~5^Uy:xӱ;t>gA$ ]uw$-Wܑwv2$s |wUy, >=3(h;|TxI^9Cwg<կ2Ij.* }rn3=I^X>Ej kbyLZ!g,|k̳W{O_S!}j^̑Y_+CD ~<R$ޒL>H4嬶ݴcϟ6'9j,|?,vꄣ6SqTF_sߝdLlΣ)VrGsZ]7ܟ7[,(#YK}Hf`@:HC>㬺-FCx@n]uя58*gt˞b><,c"XʛN:< tUj,c=%ajU7 ^b<&C|‘ ]5ߒ#T5_G3B#lճwϠM#{  a 7#< _r+p{Eu¼J49|[4ٝίz+{%XTm ϗ` ԒGLDPxuXMI͹QѶm4 ]6Baઍf8N* qAH8G@h$<%jcdҮ% 3G sR #i[8p $/pPa"`v]+xAz :! `躍,Gϟxl 8TUG!uW!r`j[=`Y`%H(3F 4x2#>GPں>찢8#X~ ߟ8` Y Ϛ4~鹲;l̪DyQs㤦%2x̢~IrNޖGeߜa)GE;ފ󍵿'z'22 kn~W[y c9gqg;P4CK#D-k J l(EL%+3/ 1=ˠ^+n˗c,;*j^շ|VΌw'3lE|/2#"@ؒI%"/waUFGmrJCHVW:&FaM*X])v`L7,`ȱIB*qNa#wlYojW(ɀp9ir.x I7DJڙu|S=R?Zi5j1;ߠ _sq}=o(sGҳc~/HgjqJ)dyv#LU:IN9w?Dp=9rfӒgp’/wg?zQ^flCe~ ɌQg~V+SwpkWy~ /R{RTHuR7}O߿C"1nU_[@HCfj"9A|o⺖s!#>qc d \ᗸ;| LO<6Y w_?ۿ9Rs#/DŽOF.w$Z0PVp*K?_ ?f-ao9e VWog;F{ R`3z?g/g0<ʼθy cFpTΠrpoH eJ:%$]%i"$G_m,$oz6CdJXedQh&H&%PǮEŽgH*%KUJ u$甑9#$GKIG I1Jؑ&ߐhNւ|t&;x!i .j 9d' Zp?Ha91 7,[IpDP~oC3^p(3ޑ @!!hM ױ* r(8˝N/H` |ޑT#(!9  Ⱥ >ivȀAU_,ĉ%˱5Qm|w*UF ø{:J8^h!?\urz5X cR'ؠ̢{e G '}w+4E$[_=0$gz?"k7n>ך+{gYnG>(L$Uj3-M9Z&*==[hͻGkX . OH4=K kX9<_~Z(dB#C gg"ouNɲA"w9by$b+ͻCO) voZ˂AkMkc!0{=-5T6őg}^wخ^[f늰-ʆ/%x.R 0h Vh%cnS)京l |4 ՝c c.}S=b9-:ԇUNMlWg:^3+$CwǒPV NQ).E7%):׈zv{:(zz[:$mE Y_KW#iw Ceߙfؼ+Pf3޼{[-ivIh ̂t -0WD&gYԻɚm5HT.(Qg-Y >wꤹt,vWg!zN _: G8Fz* c y4;5玝^G~ܑf>|ppαaUqQy(: pPJH6GA=0+0atO;SJt骿<Ǧ)9\Z!sŵd09Ƹx~]'(}z.ɞNΡ OaA"G.OLÙUvyHߜ  HIl@݊<IE š S[egh".E;V0Uv*$a/!G(4E{j Af0]|i;Cu8R$51U A w!)Sa./PDBa2|+\̥p#Q(,bDTJ" IQdw5X{^i(yΙ naš[!|H~*݉ʖ8QaJ̨婋< @l8DM?D*+L$mq)Pm)Ez ̎mfE|Bsަ;;1׵P]ቮŷiTE߷g2YEhuywJZa]ꯌJ$zsD= Pv^aFCE &SAA#ādB ͂& F&+uo@ٷI8 u䘠 Y }x$>9{* T;-]&=~ĭV|kHY@v*9W l]pb&'_Uni=tPhi{8i Noe9 dN @h*p(Z q*F5=5l%D k.Ŕ383ι+e)7=lD^uqY 9!1] G@v9ʣxVX',Å\klM~;[&H`XH&as-][?$ .LKN1`*_ԭ5OTP8obPGZDburdE!&4~quK"'RK䮒8Q=ˌ)YJLg[=%b/|k%Q%9OETv&QHzUGUëboy]?CܸX)qV؞iHAR2)G='y)LJ%;֛s%]9 HM4z7ayF:w.G0}JPJQK]o2YyȰ%Iw'[UDK%e*48^ʮ3'ԥ;iMq;GeM'5ISHL)%o7=>"Dziw5"=5ozlY8yS{tu7a"!O OjWM-"t5$7laG"7m҂lwwma,!V40|%pGk8:"mwWmfj c#N }IY;V6M$,`aٞ^#- kC+2܂ 3|' ͂-ak%=cC wf;,tyS# \G ~@ZX~SVwep8BYP'A7h XĀwp3Ǡ% ~DgDbMIpl-nEp>e@_|QMvӅ e#p8ٮ=µZBt ~v7+bsz^Y ͪïѫGbIqࢭsj5QvP BBŎ g5?*}AXD @P}͋Pü3d]ucid79 jRB;tb@ i !ȲCxFћry+S}d!a(G{Q}әros&ߟ.hUwJLB?h\y7vd Io?&Gˍ*%XKR?ITn%2TrnJ7˒-_#)K(V@b/K#Su#I_"Ze9-ǝRNi%겔g}Bne{pT pA2LnCJGU-ktok6+ʭhaUSu?U% b-x)IP@R"Q~$M"P };Tkp˦YRw9l ͒~XydW|:#`i,v '*Eݏ|0@|n*zb)L*cY=v&, oI"s!/'ö@A<&VZBG!MK8ͬ`}"6˛ywjK:"-ZRfr!O:ڟ%ѐ&hO.S8UZ|+,j߱Θ+@S\ eft()VujǽA1j+Q -[8KV?&qU2=ٶD2ӮWevʗ:p"i%/oza IUrxw9 ]-\珲xufksZT#* Fֱ,X1{cʿ\Vuh`xf|^>h]AiuT!!hE5y# *xS:s 9xxnb`Ǫ/Iuw@Œ;"bշgW/0 G9Uh t5* *Kw$d7uŊU<7Ҏ-TG!64jeK'NQ ܒh!WI=L* n 1;ksZtZ~a-HJgIfː]V(:5e,8Z2]\pnŃovi|JQjYN)׊cg-|\,:kq1-ؤղPR p]Ivn]ӡʪ-[d// iyG֒ RZe CoMTXXeV-flRK&H&yR Zi=Ki%at@0'WꧾDs*q+|B[XxO>hի~هq){lI$:L/-m{Y m:f,]DRޏ ORL6mg2f8%Fug%%) X$iOm6 %n\=Z}dt*T_ӝ$3a5~1))ki_W Nۤ[V[GtXi1ԝQݝ f<8Y:#bK$0YD vD灕ђA4GR;U ~шU>16riD>m=Ayey ȠWgJwOEp|t!xMu"c񤪁>X@-J%*"pFo0y?l&IW[SnOT,Z#l(TarΰcQ7՝&: 3NZd$Ro<ǭIQsp 4k|"t?nh/cMP)@-\:gyvp.yp̙yTg2mcL@ &Hı./[옲Q:LҢc9ccT01$Kpabʚj;*LHTn5i/!cni[LE&2xv/[Q[ա^Eg3}վwA3 lzh_SnHNU&+B/* V}F5E^pWw뙳^Pwgz+\'Ih8R?#7"||& 1uc'Pa ,r rI CrÀH'p\I''4TE $Il%E:٤ Z© 2lsΕucx;\}&FÎ`Xcc"== M5Y@0:g'|!zr<ΗSJUymH²}N,ْPРn\A.Sn&JlvY 2NuvRa+ "FE)SR;.Ia"@80YO_hxzAHR%A(90(R)(8d g`@SaH]^9\ [pQu=)UU}@T Dނ!@BUqmtSUGu)jUJV_{@0,a|aedm9Z),RvIAAN e p $\JX!Q3شWaVpwVB)әGiAyщ w)-l|1 bZb9g`At8g o@-Dt~R*h6G_^,+9T:WzW*/ޫ m۽*YzD͑8LP(>nx-}Wu2Yºc\dĮmv< rΚo 5˓ ($YQIY^|Ms/4HµılTTm,4& #xƾG2#N6bJܨNw>CAkl+%+PȠ\&.(h2'02&V̝44V.WJX }@r*3V;NH I 6 4**H3<+;? jUp0d= ?%8$f.T#T+ @d ޲YUԕcʮc#(T$<} } (X  (C/ M}.oIX:$U WNf-ڛ ;Yn*,d08& ~?Sڱt눭b yl?2l<@vTH-$J( %/@aco+ }h)J|><@U)& ͯ=} -YaE Z 8`ײUVr8p2/v#690.4pdPI\%HD1>W-K@4A:_,ordJҬ4@HGv*s #&LR#igLoE[g+JBDml?G0K"`~̋m̤>HuxJ/U*CuX 8B=NBƏ i6z&w˸ZgT%'f&)D|qf6aDD C]RoAaqĖox6e R2!בЌ<d- xqpu.'pMbpyȺٸ! 3.ptv3@OBÒx#aP⛄׍uX=Sw`F&_|Q륚7 _4,!_0^M-che `>}ʮF|21vLrKvL3,ɐ/R`sWHlHbXu23Va$ 9Kl( !62BZoPEtDQCH 'R9$uĬCbtp7a: #:,oxm-@0nt[P2-)hCخ-b rh\N;e)K SG/~r IĎ1&O0$A}ަGmu^<ȿ 8ŷ] e.l};$;5++HVhci >qGw/ы@"V?_༕+D_;: ]`bsP<փTkHPM9C4긾Ci dUoQonb?u3P]^F8iuY20 I"goU;xJdдʉҚ/yTh8B"xm J)UkW"дE#(b,Mil]c*[n+ȶfH;@.*()VxzPDpD fۭi:lx锰#HU@ITKbDSBƮ M$Q#>,b$G}2|1k:%o _ҳ#Hot ˚N K05]4WSBF`M5#.6,OS¶-.m:%d tVô锐0hӹ<ȎҦsQ8¼\wd tJH\3oo<\3o}\trst&N4}AŒ@mr_oGe~;}_o2"Y,wG%S ' tHJ'q:"^to:+T<$D>o^t\ZmKf$ t6fmNNS O]v@lGۡ޾!MnfF6Cf#M=7YGqzPn$%#UssU8~h tv|Yo{K~8]˛.2^3"8u* ~d3ojMWl^Cf75ན.CΎ*%UwtLw]ƫj|WR%Nw<,l:7w syYIMg]'CJJL~ޗ6,V1f)xo:CβU f_u-$6eCkV;P_|l%(pqoZLCN6W ^FoqC_]˧֞y[<67#Z~a]0Hf%GR<Ï$k;$^㻄Wa_6xN #ؔyxr~&1k3o5tT'o xU]o^5jZE˚.FyX/de"{mU\el`Mge*Sqey[YC7@RTźt4]!yHӅʟ4]yXӅڟ5R>k:csRK6cMgp}SJŦꙇ6­.m:=b>B~t^MWvty}IF#Kh\tE}t?;137 Mz6@3ty!NWa}te31b#>𐕛o:3)نGb*yyә᳃7q_MWjfXc:hoK䚺ӛ7]i;>@q |xyәBhwʇ 7]iP&yVsLr,$y7^tB,Ꝿ,%N c:Ӊ l5=La$ƒ;NsJ xW[2>˜<޼6G+)=X)a$XEh̼PD˒NI |˒N ; w_tI #bC4 2hd B)$ťId,d3X)ad2Xg?r,=Nv7g`S%Kzmx%w_E, phjgY lumöȊ6TFCth[aqq`]&?*q^PN}~]EWt Ydsɺ؈Mpk?2_֎}n_ܟ#`n!}oV'[-曳#9n`^^ݘERo+PZ};CAr=<:&ICǠ(o!S'.-ch=e_An1}KȐ"\orvX %&>ɺAF hf#vno<>` 71l><4o`L.Mųt!J b  #9Mq1BS*o[~$2cZ]}9Ԇn ʕ6w"H? \'De&)egD+cK$ڱ `l^4#W3-FBY nJX>$zF!( 2u zE52oI)4o+P=$89:\z.B.xr%!"KάpC"v=/C0-@m~ 5ڊMfZN"<<5 N4Z2ݿÇxfD AHQrWyJٽ_};Yf5DVΪ&ÅhuR&V&*KYq؊6~寚׍(I lށ̅ګ9-,O>>p dA( _oMjj mj6rGri~;tP&Ft>ԃEof|d/]r%>?U"Pl{$7q>E$wk&,ܻ;+5<$RX.D.t8%EKIktO} ҹ.َh֫GX`-0#W7H+?;MnUVG턋sJ'D \wI?"%$e!OWSD'5CG20JIZ2[4kA7g } U AxaU|}Ŏ`2Y?G^赓FjV6HF/Dv~[ mMіT?m nY~[(V&M;am\KC$5RmqoZUt6 5h|jՖnl| \L;QId((#{]Xe  ˑ8|$$o5&oU o-!JY/*Q(e/4jOEٵ/;- (8V@;ojG{$7{`0F IH.ο8u$\jM-_4D|I9|P-CPjN Jss8d &Qd|j1 $F kXv#FW`S゜? 2e8@QxӀE2 paڢ0[%`F S:^fȚ#'M8PȻWQິc-},0ͱtNjdO<+h>\l29J'kKw0mƤIyñ) pHU $b 5$h*Q}h2鹐)\c4ĝb4Ji"e SV oS./C فN7?@;h#X"~^I м+E ,R" U! 4I&S-sy$߰F2h\aЏ&dl0]a-.5 WGUR>(l;vH>!hQ!2C>t X{!./f7t/;%{nFm>[l Y?l؍gLD笮lc0qur:oF˭4t闆4:{XjDe_ ":\RPϕVBg NN~Re0; ҧB"4c|GVBv~++[ =Ӏ'>`3Nd'%zMq6hCZ9eڨ驑$,M%bV)]>KniBy w=NŦ($ vd E`amCj E3ԳG[vYFJ7sTeH3tp|dxrA%R>1H&0,/>oK弫3Fj$h9h:dtqjH7ނ 1Ar)4_ncNIj֜zD s](HҜrt[oQG̽Q:SRn5'e#n?3žz?ɨ97Yi 2f< 8c :O%SfTS S^9#OG4y-G! KT. , G{D\B_X󦫺.D,v!}&UOe|_Efvb髊 q*zTeE_>6ۿn&q$T[MB׺uᢎ5%hi)U|}2,".EY!!wA[,MBA% t ip֌ܑeY)B(`A$]{6[Դičv2C^V 'o]~Dtip Ӧܲ^mwZBglimW*<Ho Xcv2nIqLO؄tea?mILCGHA"KC^ Bg,7RDCwh.ohǗn4 rR“ډ `їx5Q$h,Ѯ9Lq+jq~;oSHKlRΨC}瀧1+$v$Tun8n1 (\J*q;),)* r7#RuBQA14 %4vf8<`?R#~X`C"so%@5ȲgFїpA:]teB#r(gH㱡N\joaMweCp#6P= |RJ\JJXY-*H:iA` $jRJc5Y`f.eTJLL-)O5f(ވ Zpj؞!D&RCUaɰW"t-YD\@\byQwʽ ̵m$ĉ6!R.1WUͅF0f(E%% 8YG,1MhYP;*餕L@|tf, ,VbjʹĺiWrQ6ԑ#f`1)tW0TLz)ΖȎ)oXXZJ DL0Fx<3TvUdAP[ݬ TW#pTە{"Afۢ$RW%pl!CՓ A%Y/ρ!=~kzIXr`ȇ2ɵwf*Cg=:c_{1 ^v);3o_*zd_ wSХ?sPsEq] j f['&A^G7/uTˁw)P!p}\KѕZ ZS;CbAUEŲ/~aǻ61ֺ/YZgDhZ #M֌^(fcw[d_[,*bnF ,rw(f/rX2Kl0r GU͆gSׇ+XHGpRIpwr xϗwaJ*s<]Pw,nRvt)߷oySxxobR8ѾmffݥNMavzv6 os?ë[܎0G4 d˾XV'LelvGP ؍Xmk=eޖ&6(ng2~$~]6~.]ME&av]=Lҕ2!X7q3B_gv1PU)3?$M%SRd=#d$()42|$O)I-$¢!ӈV@*v 'FZ tAG} J xX!UPx[IBְQjk66kpyB:*]BjK7ZQ{ןTU<\#axNk!\1 n`0BּV5J["~O_pL^`+KYoLS$eA^̝bj{SU`|o9ȂӶynmyDݪ~e K H ۡ!qSۓ7 e߇Fn2X0wWkT zՈV@f6 ) ,bG P"4¦6:"+bSkv Cuo@-3B2ȭh5# 1'./거; )qaVg#VZB[oDXVYg5)I=iVTUӘgeٌ%7,9vP\gE8CXVJ]b>)PXM3 \ɦjEtePC,]UTPB<%.D_* ƻ&JF/)ƥQSTZn&kGT7 S$^G/gry$jDU$Ro 2/ G-6T&A{$-B{YPLtv˘Kb Kp-X6I.ki{5Vl֧*F<7ͅ^`mǰb}wUR %}[GJ4TDmTHE>LU_W/-ȑTDhUWQ{{]v%=/_bR&S4:Hƌa#C2A}Tu{ 7&g=MŪdJr;ji,5T8Xpϝˮۦt cC/'v.7u8q))\vΰebPtMG]Yƒvћ]V-mǤ6GDQنhYK OZلu;Gd& -1*y@<12䰝Wޕ6mAGK&9}8C F,=V͕UWXSѱE@"gNJA ~v%ᡟ!mzPV'^$gNI@H1eVW;=zzpQq@>>8Ёr-E>\]3Qhߧ$aP@dZ[nNjDܨ>luΘkKV\- NŠʄks''[Oiyğ:o"ţ"V>b8 iUgd01T0?Bq׮B'EiDBkZgv+uiAr#-`$vO.G>"B-NY:iCakd-[H\:]}>*qYw~&v5Ba{7޼_y6RAYGdhaMR->/ )CMih.܆ԖY/Hgũqhޭ;AY 5ˍses ][yN+Co^.+q h-@?8QJop]yޛn&:Ylac?S| G%es7+J;CLIDx&v{ "+G&蚡4*WEm H!Hwy_]#<>4!Sʪ/'c*$䦑)Z0"8$ xpN(É> pwKgD_-fS-v?Q¦MxkY "7-`䮄-QtהYs#㟐鈞}sH+Ht0/s5-W}q3UcKnqA㔩A>bU"SsND \5LS3S|CQM򜳣zejGf<ãs~a^FRs`ofslN/gjBKK^|TZb~iV DX\CpL2pGBȰd7s)a ʈtY@^-LJ֌eRh[Q4ө<'!9:k8OnSC(NG|8*\^)Dw~h9{YQo[BYVyۇ+ؗs.7n >NaޕoUr Z}(0ʹri(E'8$_J~24qRo.aw VT:ߍ V_W>YWt<cO', _#r>L8E9w(`s;}C]vƕT xD+C9n銕5@|NٳTS.ObO ىǔ29B`L]G9Ы*FJA7Rڲp(rS<Hn6ٞ^WlI\oղU޺3B& Of|^UP]qepQ6N!qqd.wC٢WKl^Y˛.I穦ߓFݿ70 .Nԕ0ˉ~|/<gqww)U]0tbp +tsU@Pm&eT*lF&Ftg@Ig2eXxK:NߩGb#KnZPOAKNQIw+8V@';U ҍcbхlu+ ޕndH[\櫓ETZ"bƩW})e3mgǣѣ (˓g;GqR4cwm`7|jc,G ;Ns܊/zum@?;W cCMPwP%^%_hTǓ,-BK]ܿچx]+z_ lqY.o;{)\nOt?$S?ݺŒ6ӿ>=hH˂A+^C) 9'\Ŗ@*i)64t~d{eVQ8iåkhSC9^Pq2}-mD1q9[)7F1myvC g;˸gvEzkLVq@:YiBk*0N(.o%}TF6ZKX138ʨxl [8lvNAvnX*J?RP(V"9y ߝ{Dfz;WǙ!\}<~WrpȬHVL*tջ.aPhI)SF ީ,"zLm)P*玳L+SD^1Cᆐ\hwQ@{4c0HaeZuiw!‘o;~Y }Olu=ys:c02=[ڻWiidUrVVKad2;4Gf-UkZ1)3=N$V ]@u^w`ox>ߗ@t0;0!c l1wΗ:e]EKcj ;z6,oo6mnzn +}m'/G4N\L2ZNFٻ/8zsF>`eɞ_gګxwm߶ f+-8Adq?B v;~n!6=+A Q o;GVfC[92U+׎8C0ٯ| }t[9RvF|-+ @; )t וiel% C9A+;wIFͫ_kKTFT< A vXw!ǽc+ZdT{b# _C# /q&4*ϖ*\$Im{v>`L9AeylޟDEnŌ>}Qqsva)I%xs[BUR*N JJŭM N)%E>(/beVQg*d̓|RqYUR*#`GvTv@;(mt.F*Npu۠+Wk3ҲX/_roKJs?U<E#Wϋj-)jvgm1iz,^=Wf\@郖 ajΤ1ؕqt *%ft K([,( Ny#*(uef(FQT✱n[Y G ϲhQ:6$ PТ~|oJRq)( zعt3(k=ώٸo*.)KJv<GV叩,.L+13FFV<_Z1'AWVVAggm.[\޶ʳ&m1>JRd{Mb 7N|eh]yzkQZJ爋\lD*F=8_ {[O8(%_ٹc*@ݟko)Ꙋ;BO6 NmJ]q@kv}堻@@1<} V XD-{p`T]N}N''-ӬW jsV"UWZ_P|*.M[kWsw#|vF& `M4F"mKOZums~_[#- +[#c g 7( R\gsnT'PPx^P0RøK} {5 t(L\vzHrWm5PA!y"gr:O`PX=Ύx3:KEB`fG> ٽ'Rn`%,ץk81UP;QIrF~ fTZ ^EǙT^(I2ؗfPw%1Hx2\>ܢNk[!ӯiS'‚rx܄r=A gX!MTU$۞QФnw%z R70630dݒԭ]0ڗ+W@A [0ƻM AMC3}3ǁ@c*^m-d۔7ݻi|7 vTl hF?WFVrj|hAi?^P b:J{Tb|)s>R( IUS)}%ފHo'!Q'.Bߊi{"~Q-؄ع& $tky};4ӑ*bx$U[gڛ+4uX"`W@HvڄDg#>;N2=@¯ א?0AY8_p;j:mcj㼢\>Kl;BD_CǙPiky<< GH?w٘9?,8m>$4d*=SU~ills^c Y)3U{cƫsZήw-g͖["1ɻ"ܷ5[jugnC :8z%QޗavVmoofKK3=[mԵլۊ.~J-ls-}ymD5YX6`ϣJM9u[c57"^M%j|b GapZpH VI ݝߓdAFa4٪ .Vn|LcX\F >}No#P=.*9w4\`O,V?3kr*ciI)BrӨaYj[J[ : ^ cx ?{]J b?uy:3YlJ GCw'J Q(y;Ot.qKzyBs+N'j} V^fD]iju::(̌_ UW& J:^g]7+y$^ :3M+O-* >ksk(%]-ª_M~:N,%T[ %T:M%PZyZu$|6i GǶ 9?\gtuYgg"VӉ=1D,9kN*U;pdDj.O$ @5Ԫ(TW+?"VBߛgP ^Y ͕Bʐ=CufHuaP\QM&i0qekc~+ lߚm5k{sF7Og{A!D3G:5`4fxbkVQMgcysAe8!{2գ -i+yQxaJ͝b?9`FE`(5wW6Uؐ T崗dž+_b̞ō̜K1}RL2M rϳT^g}yW ֻ.~nw$/R ./0> wĎR a{"p:.ۆa{~U:@%ʹױ"QT1WUrgҘ5%Q!kg[bFU咷섷Y o',{ic¦6:Yb./( pe+hyG4W*Pu >{gtLevipSp%DUW[C%@׭|fΕ|+i- iYڿce5=ʼ: 2/mV5UE̫'B8 ϨIhI5̚[Wiݍe yAw?oMȵykvިPb@5&lzKr{t#JqN& * e`?)R~@'"ѽ$7̱W!I{6A H&Q)II<Un@B嗘HIlkX`3gފ+Ykx[eL4_Bɷ+YFwŞE(#v}FJu \6Fct?Fct?F~~ݏѥ11V>Fct?Fct?Fct?F~ݏݏ?}~~~~F?~\~$sUo~3 WO_#׽OW_/~n?*%`b `|Kq *d<~+ǥ!(TX&&4_Rac1VkbTX \ ,)ǫ@DE;א(tӢBjGueLV8il*$8T\(5mRUQa1P*tx)f V\ ^(:p0pE Kre[BMFv2iRqTBc?T-srZLފ "Ș20Kz0%0V3J3ܷa[vK] >!@h QWt/vx+U%TY-'o1d %=!Pv88PV~zb#]x M.(Fj{=2= k>r ~D#F CO]14*6NƖ}h]x!J B+-Б? @ТoF3 vh O?=lḧcGc+㇪2+z~B^A6aE)jEsп+ \xumoG|IɅ D CIX4ַ wk/$kzfWJSc!尥h0ܥE rZRF?ozwoW E2٤t+v< )ۃY/+).a)> t+ ذ-)Պ,oe1 XтhaEN.?GiGѢ ^z{J?^KkW6DIz{aikQZϏ0qj6ckiD[ʰRml1Omgp,hP/IhBL:J`8ڥEb fYt~5|ϴ02L)3 /?qN MSp7\a b>&R-3(#2SG_C3躔Eڦ\5ك5م/ /nXRͰߏ\޸ErPO)E.qxUjt y9@Ur[Mx<[}K0c\ /om Cؠ|Wie/H#"G=ؙF؎d;hZ-8&1j?vTDeH"^"cP~ш_iTi{4a%A``"W aUq0~w*iCiť$w~Mjɺ;(T/ijH5/mXu#nӻk>)(DXl9a酥[xzN  --D9Jԇۡ>!iP5"($ukkWnONrR;bVX6N(\.m={)pgM9 sĺv-ـi eB0rLǎwg /e֘G J* 5s㈭hO'Ih̔>VaJO9I ([XC\{d*87iZT vg9 |١pe;jd3J(bQ$vڣ~ r8Z-9 H_DxxH1Y]\8dϊ(\zv,eG^s]xJ_S⹥jŰ=AүJwo)A(/ȄX4ڢP#~QsBqJ׎[L̝~2(XŔ+cyXXr<`{ly).Pl(Ns2АB& )#t*hcřaVdI+3G"q TY+C92CeE:yJ_ĉqHD>atٌWg9&ƭ/?@T&0°=Q^??,T c3{U[[aNu.ñ`gS ͬ,7;RASrP\Ba?xeK=G[GʈB#)Uj1fs&Uyz%~ij/9É~ڙpJ\P.S~䡊>h/RϒHs Z Ekv( [cc qPE4Q>(BRܶ4`)ellز{}i*zkD*R)VԘPēF2<U"֊-P8h ԅIOu_l[dGjG'ĪU_xT> B kCO{s[m5 ΨWBvA8@Q:Rj!3sĢ<3U{@4sd?}kPJ 454cJkOp697m{_qp?371>Fct?Fct?Fct?Fct11>Fct?Fctѵ1111111+ct?Fct?Fct?Fct?Fctp@W݉hj/8Me=+p*HRt@P(U}j D]j_)5(MX"]Q0:y<\+ˆq\TzZ)B}ѡ\_i_X;wub./P¿}'rVH5Gl1wO-XIJIeuS 2 dN,nUD*(~(~(\YB;*aB1%;Q6AXvQF2U$ԫa<8Ιχ+1IO']빰333{9n=\FQF5ͭ3PbrMTH6Ѥ8?[=ZQ=U^63=JyiƊ8}I>~?xyoў.JKxnOX4  A{DweoAf[һnˎA*z3v-ʽ׆K~i3CQW8u%oQw~1dv/SSU},#nPwykv]RQ&(%]{GbCx`r̛;D0IJբ.k^+7R:Kro?X:1~*mb&M֗;0mFxg?,h>n?Rzҿ߸l(ZCJ;BԼ;`̐}H0 mTəzEx$|/+Wғ>_ M *IRL_%[#PsGCIF ,J@I^bb 8&UзҹY3Dڭ(VN+b_J1~lh"SMy淲!K+J^3j,֏*-舮d嫳8W~uz?O0jHi.ǔ&g#aC (dO;߮Cb((LdpT #cN} zϩC" t\?.EM/)UΙ%:=E)CE) uKiI{Cَs׿FU;xw==AEǻ߫#VPh'p3٘ g23Վ[XȰkX,r쨼-{m\"9/k6Rkb@\c8`&x5#^sWÌT7qɕkz[[Pۈ\ٽ-l mY%tb-жw85=Ƹ'6FxW͖5ӼR1%,]s",~.ytF# 2=]Ҥ=aC-.ݙկ;A'X 8V)+Pn"eYJnB9 8pqBUj>qb} ?57O5<O2kгPzB[j dX  `e+.g._E^xvb$X㉤} ьi>pS cv'Mm1;in[7yw6/O[{1U1 >IٟL ?9טQɡ\@ {='mg_T}r5|ߕ=2^ܕ6|y;ih u%28{CO1 27 OgUkL NѠAUPX eDu* 3ZO%1TҒRI4dOSI4| v$%|ŜSI">e[_13Ok{쌮~0>l2alV=i01 xϵUb585mi ʯt$-n8LQ@}4O@17UGಮj9|5?DZE\D*Qv[֙UheM34 邰lk5{oN(@!_S6H/;Qf?~̭&isI΃C:oyL ~cV$+Y:pH\ڳF߶ ~'XO1O"o{@pC@" K8r4& MV8#!uJ^S^'(hb'mK v˽nr,%V%2pޕ#KJCHݖ9ow̭hQ D@JZW;POjq Z w+ybo%[^+۠2-%{Vʀ| bvVE`h왕# o~FHmȶ.:0@)I4ԑ}΋#;C~J%1^LD'^t 6,r?,r?,r?,r?,ׇ~X~X~X岱p^.oR>"#r?"#r?"G~DGRP>"#r?"#r?"#r?"#r?"#r?"#r?"#r?"G~DG~DG~DG_ocE=(~a09׿o_mOQ.ۅ\iPBPz.2琝?w.[wMVJA%(F4kjne'RYTb*Pp+nChx䕨 *U=2nhv+ U< JeRx`,P*$24 RᇚF xZQh E)l?p)2hKkʊ @oe*j2 -inљSӔq L-ʥ` oePh+[k񕭵Jt2)+ѭp+E vf=BۭmmF%{”n%0|ùVjaw+E=~-0R>rTx%x+Lʌ E/V8n)+n]僨l~m𥞋zV4'jY,n׭㞋vt+C+[aV&<*Ϸm2J[ŭD z.=m>^TNa6V(Nq޳ӻ+|+e̷PFV&[ Hq•˷B7u+:zQ%V8Ŕr{_bk! aFVη2lG[{> o|WCMq0*Tn(vAE3->Գ ~=i1=`KBHPK2a[8!X^RlJ#M;3g!s/P*ޛrw{սyJ%ʭaKm8Rto)H lZtEYRoPR;, V7B–T![Ѣ\ u˖bs;ޥxm;Ζv.joJx\RykJєQ0m)voQeR<_RVdL!px)EN1Ft 2z-.T-)X)ϧoj`uܦʰŹGuz8[aq22^KJRx26y.c qeq x5Nc!3Bk&J5]tɸxiwae^NKr^9&[[J&rè*| 9{O^zIv1 {AqHbQm9(Xt)G?S;xx$r+]W`29]ۑ4[~V]k[8Pݮiг!M3nח[r)=Zh/q ״9}SbKVHՔ$Ÿx·:!VW0ފwq-HSBFI9z{>`X]tA?/,n-<=A܊̗Dԁحt oCOP* _|zQt+K599g{ VB*q&fYc[i4;B˻3<hy^%c[Yr.%l` jg;nOۭ`^⺩vq+E˂[PjEi㉡l?夶_.98)z<TĪY2Ɋ[q[Q few.msEE{ׇ C"jR,_ V )vY}Q35C9]YOjh'.ne0gSb0YTTPTnEʿwv&~Mι}Ps41| |QBVsH-\ 8/s(m( plP6ޡ9w+ۯJJinSS,Jgfj!tX8Jqqahʨyиz(c+9V(Lzve-Ẉ-U>JR*#z8i(c/5sԥtj[\چn䙆mXR~sW4wb`=^ oY®Vs<(zƸ,Xaj>? ?Ԯѹgۙ'j[9Xm_x_~$ĜJWz'>iנ맥,2|P& ǡP.<ɮ+]iٳ?~O40,bQd`{zOR;?Z~O}Utr) n-@jҨX߾n?a/hğZ#FԺCa+”2bCus NJd⃊LBxC,i7#N'V2+Xi\g}-ǒKblp [sF ѪEL;xδ(KF?sxB~Z{.i1 $[tUgэi*(;s]jC{`D1DM+#PV*APR\Lsguk>;c+a8Ө8W.E3I 6At.g%|_:\rzVìz<[:,b .Va 4Cy4{él6Ǝ7jMddӁ:RH{h}v`TYgw-GJ\"HjQ$5ѯ !Spv#}u@_a> }*kȂ~a6^N!v;!s߳=MaDEڳwKV?@dA͌P25`"SZ+A1p -ZZ] X"=s z\y}MuC6B]8"\D\WuvUag ^CW]^EZ:Ij<0(r*(,H`ayF]rG҂:ER*rK| N۫YB,=.GTV^٠Ɍٰ߶U\-4/t|C>rbxzڹ,Q.n㺵$ƌd@1BadƊ2KΥ ۉryl-hb;6yY-XZ KQ2j%FʶZ[#V[pkǃdz^auk) X9^T2; L, ə' Œgsj1O)S^':s*yϑ]cC{0eg(ã ZO_ 6~L~Lɵ1111111R>&cr?&cr?&7ɍicr}L~Lɕ11?&cr?&cr}L~L~Lo a$ed_pZnKV%yɕ#%;@· ) K +ͫ\i^?G> n-9Ӽkw ϜAoVfWIjW -uanxX1\u 65GXΛfH1\e=^pwRҀa?WqX0=#o|PA[zCs Cȹi4XڛFx[%mm6l.\GL:khkHT:ahՌ"vq y}yDvM]u{vvx5~%k>nf+멐}QGPk2ųYgS40O)RC-CxJv񴏈୘q~) O;68Kq2%tg>P [1"P@=Wƫeդ<}=}n_Iݶu㮒b2bB%pbh 9wfkWfDhI[ƻUخ0‰nKpX͙;F$2&$񢿛h2N` `YYY.fiymL{]f pa-dl > $C`o. 3>"RWD\yi/m+Xp1sDTMCo̵Gb ?l#3!17=Lc*%' 03+p^b|uӀ˛lifLXxBG0<óH3GYWa*XG1<՚^ 3ZJh}"{Zy yf Anc :{e~,b5[ֹҳCS+|'AΰX8ޙcvSGLz2;C3ZQuC.\=݄!װ]ft0A=a aMs3ubz%݇X7X~~5ƃ`xpzVҦՠjQm#%5p$O-h~ ڃ zW/ 5xfue6;16t).241nu/oXs1DTҸ|d;~ipg4n.Vgn[V'PyJ $k)QGҙk<ܵQݓ=nNUX furD]v}{{cꞄps?;'&w;`w|d0FfϤt׈׃bo\_G1& z'ܛt8Mq$N " |b3W'^Oˎ\Lf_&jɍ_?MfwV5HIfC9֗GyeZxAW&n{h9@wPpX5~Qh3}o=[n%x쮌@OAN%P"0ݵƀ0{b8'R(W|EmoDo2R+?Á0ąk# m968!cz*f}Df`-leY9Ohs !ӗ1ѫ\bЌ%+AϮ\%|ZnޞXRQze@/G4;j.'LIȾ]G$HlaFbT H2 /7ڛ6h/xo>k#1J⍕3aEf1Pvל9pwDU*o!4m'A&7f>t$<^H7>Z 2S6sfpa<9VL#mIAvxAXoq7촭nkUneDN-띃5? FSz]fdLpү]+0(B䌰@(yɇV$i|QB|EɍdmT1\Jڣ6Su78{D+SP2DacA/gPvxT?bǠV).JhŘL' AEAqa1,gwt bniAO(( ҥħҗ2E0G]VQDLBuSysL^rOshmtU_]48D|$S㲝%ۏ,.6䪈gcXW(=y)8vIOzFyDά&syF-1;3ښ@[!(&l=#$yc~h&~3cdi` t3ѥT ?!% =xj{%Sfq%-"@i%Y$/pΓv^Rs:it䑢:M9(ة'APvlOxX_J˩/vU/~HЇ~H$ C=lchÂLpA  fѽuH.$ ɅBr-$7E \H.$k ɅZHBr-$@r!\H.$k!\H.$ ɅBr!\H.$"]L7Ƶh\E+иh\4ڠqѸEqѸh\4.ƍ4.EqѸhAqѸ)EqѸh\4nqѸh\4.Cݛ]wMnyo/sv+͕|2Vzq + ^P{O͵ḽlVe>^jIļWÏ2dnXQQs?)=x}Z&^nAF*=߷z8&]~>0R2^}ly6 USs-s[Hu-vȫ}* I]m|վn7_endstream endobj 630 0 obj << /Filter /FlateDecode /Length 2448 >> stream xYKo/$f0boL&a r@KקT7EٞdL]]U_U}]2K2?sX eb/5p ?EV~a2K-uZp\fg-u4b .nȡ% r]v|zNJ x$'YIiVjNo_?upS؍_1S xV>|R`"'Jf"@7fir·8N)"{!=f )Ɍ*6iV8c/p̊F4Wd)[rsW]w$~1Nij1ݐP+'z$'[/h?Өϻ gSJJʚ\X vOCP~U;ӌ3EH6zj/-giY?B:ॼ5, &ZEH.X@jkГU8u_󠈝zsBC\\__w'9샾3Ce*r5c(`((QOYaOY3t; y01>|mmܽxYjj_rru{OP|ۦn/Nv}%SӼ!AKKha ɮ=+BL2hq@p%KBV o,mt8JxKC ԕ$<3pg6ĮR2nV Bʊh1-_cʀ5FVbl—10u>ʆ^$?'F^2?89c.c1_/U w G!q$s$nf.Kiw fK*4yO,2{/p/L ,)|*tIl ذ%I* Li.[f=2ʨfXӽ8u|] 0ϰ;M+dUʆە J%k8>d7ݳO,:@& p/Bf74H93 R6oQQ0m0 uLH ^6n2SbˀE5P"?`P"ui ׋y¤~h>X sǮjYSp X;"Ѷ=T|NwFӆ)Ŵ ,U\gulH%- 0"=nx0B0O9b&#oBFfPe/g(% 5:{ qXC*>:!3HɕEɃ+^HcfG;N޵bZqVPi m}K@cL^/Ꝅ9Jvc5ͩF)<p,"Y65)bߘy(>&# 2N#jDEY`?{,#˰!SNrP$ aA9irlZͬP<$*:ϊ7]iTٛ@5fQ! ӝx:kk' G 6eLHmoގ vD W͟~q"9qعLOLPP2"hx4"*7_2t\V9'4Ml=̧Rqx343l?>'Rq@W a_<fzL" XKƉ sE C&(L\0}Oݘ妯uI˴L>=?5qQt=/eg؅/2ϛ&t=hmᩩ6%S]oUQ8ܰ3l(fpndc;Dx6STW)njS@ ?qre~BӋƤ";aRh )\3"1vVۃv7"]2x7!x{ J/ Y]6 j8TmѼ lQPhӵFL΁ڻ7u]C^1ݠR~\r endstream endobj 631 0 obj << /Filter /FlateDecode /Length 3653 >> stream x[Io#W4|j&bň$Ĉ'El}My$WAcXz]zkuN S^mfz3]~iy_MsN!YY㐭7^l<`bCY~J1܄!_ ՠZ~;{S:঑-(Z^_x3._A_LV+N!耑&L6DLpҿE~+}!éT_Qߜ͝sxugOC2U) v'i;ty0vn€]u7G\gnj귛nywvQdl5|Ԧz/WZ "`)Q)Μ)LefFt0?13F VZ`51V$j8^ 0qG,FrbC `Y|ċl+&1ɸJAJ|e<Vґo]vqcTuntEeut!$7x3)!Oa0 ϝ'LuxqHeM >t.\RD 6aꮠR& 0O)l…kNi0!쐣 CPY4Da8'KX_y ð : $PvZ$< T@l\ZApEЂS&xDW!N2 ɘ'4QWh`dE?O]64cDRo9 j:/!F<;:G J0F@h(!"H H@`CX!"$scP->-H )>oxP Nb?/yT;EKL|+/D@@t3BEQ1AbT+Q F;Z<; !]a^FY rdN^N>%9N!21D:‹vȔ ,)^rh;: q)1VD<2+r ۗHQr0(ɞi >I9o1 M5ɒT YX Lj!~U> #c=!:@BJgƨH{ dɾȤD͌޶Ȅ=wb&E;kGyȘDfTvbGz@bX?bio4+ŜjpVЂ@1H5l$S@#G-5XAʲ$ۑe`5jIhIpŒFZѲ$s_ A  z)UOx5$22 A M\"4#^rtt^PDHސwt8@B;Rʓ# W#*R|H5w`Q WiBu _VaEM  %8g5o $\$ AՠS@0/pX3QWC`˱ŮH Z>FMC} a-&PMl##^Ve>ev!zd_:2(ݮl>ܴɱΡE=j"P $f4l|]gWPjDg BuZԄUY}BV[yTD7X0RAҽxb|[ .G6 (ت&*veʇWw<ڏUh85qê`y@iKRȏJx:Ὀ2k \UGPr[栬@s<8,'$EEKYq-8U=-Q˹1G t*fp'#чy8rUս{3_pL%B(nLd0I&-0)*uXJtjAj62f,WD=Kx42)gj` ;瘟9|g G mJ*Z$rќjjx5k,3F+ǞH$:9Vd!FFN˳1r్jW3m *rN!uk\2"c]#foz^c.#$;TNQ]μ,3Ob"=0M[=+zAs7). j6rf,W DDzn"Dh^[CMyI\[3wR~DrmXW,;Y-2I^b'֚qDп_Z\Gc`2@Y&z!"鎇כaך/y'4p7񁄟9cYJhK"Bc|sypl]{/wFKt'jA+xjԫ__p܎!V.T3Mrxg2yYh-̛|!k}/uV/!r@=f%,7w.* e,m ZrmWD╒_󎃢XnfAixQEB4 Br!=oOU]I'Ix׺5kG?Np̦\?짗'lޔ? >ys'|Hhr:s"575SGֈ6c皁 e:m{~ a:M_=>5*mD /i0Se]DZ.z~2܃-)à:2e۷.v}OG?KDчW9'V8^> stream xVˎ6f3TaKEdGb ,j,ӑ8(^Xx=A>Hb$gz[LXmI]_`9E\-\R,fуqy"gPs?xQ,(ݩK) Q԰y Z֥㌤(iGu^„QZ=<:zMٕ~;aؑ (7ա\=?K6jWuKm'sY)#dt5gmE||]#\t)eTulx͓'8G:k:g<9 H2|unOvo^6Uy蔫,]u@նݭ8#Tr{UnrhAYnV#G}f)1䶼i#@G"lej˥#je5Ϙk;߽RrvՌr?˶ :tm<9V#n6;rAĮ*[<W63rFSH_̲qϲ9 6 KUo! Cb^)dF鿴ja3T{V !c\r c> stream xVێFSXV/904&QuTUc&5 즫(}@4?N>Ω|8q~]zv;c {ʈH$OQݬUfssOR5e)!lvI~{+¡"dTF0 H⸆62~ĕ"i9.H5N|ҀsĸnTGȻ:1)c(9#b1`c3[$*}=dbP Ӣ V .Bʔ}&{Lb!ҋ]?9 dR{O5HzKC QZƕшW/gӝ}^2KΒFɚ,*[iDԇ>޾{vg'{&<wC{Dua9!> 46,ߊkBz\IpˊXZ;K{(|b=aly7{=ZmF+Q]c}G5]!U뤧)C e]m'q=9^{d6~$ rLH7[HʟNR{ssPmEPu.*NWƴ;=L dœCXGq4bU}NzsH(ߛC\Ouưy]mC_!IˠO.l&֨<[WM)mMQEpx 0ze0{,;<0VW6<+vX#rG,Mk4P*$A> stream xXmo6_a˨M7I,3,ٚiCl);Hr_`ӑoqDZ/0azoGoGXM\pCoFz1cF"qHP>FiL)% (4ű@wH,A`*yVJeDT,ݙVWF8#56ikSB$"'fH1henw㝹҉!b3Ce%t rծWΚ8)>M ))PTV igj!N _{A HPNRlJ}4oUq(: zBqTX|Y,2&/'TBH&aTZKbe;ej+5ygx^H3tFb/cwrm_пJz.E}wrܝ[rqV.L&^$%ު0'b:h{N\;6|J3Y"8MP>=N%MEpRH Qf$)> mG1r"k ݔՊGaԤri3-BQYioa)w9+GΘCg2kpb |6UW}K38ϭ~6OM [Ub( $ȱ.+L+ +NU/Տ*6UO e^DsSl &KKG} 0$觇}$-KGT2h ﶅSf+0}4>*?g~D |X?+sD?zs;,c\U\PSV0,Ey/al$IQ2.8oFWêt1M)62P+ g\N8"VSkhA, OMԿpOԶ}bM''JNܛ\Ŷxj4.b>]~(K:Rls ;EOtNj,]A ?w>UƎqbG/A8^?}@-q<NAe+"*v M,&6<у.=ΉC;!C|G&87~s'\w"_ٗ%/ 9@wQmiq#av:+7)zr6{1\NVB{h " ؅W8",$Rt&p̽ȍ=eH {#}Y4ʤE{Vs`GCo02}|ʖ TKkoxZv>=w^(CK>;N 9h <%2 ԈRͅ{^& ve\Ԃ^ ~u>$endstream endobj 635 0 obj << /Filter /FlateDecode /Length 1042 >> stream xWMs6WvBrLkw&zx}%f"ͿK$:uL&@, } ӂ_xWaXa~A"l ^)"uY !1! vX9/V7591gŀ4Xork `Oš"NiQqRa\%h]\)#r,,~,+JtwPC4\f7n No V8hCG[owu\1Fv?i2vU.E5z*qtϝhj0g$0tܷZ$=Ir.OM9m)K_G̺T35emS@]1fZe ))EyjCiI}DjCttb,|'<V->׳?(|qb,lj) 3 BRbro\_zB^pʡ5R1 g C*<2nBhZ9 pA~Y'me05AH#lף_mө76.(d2B dl^dM6XevfVY;/,HY|.c ={ X6BS v ’)j''k7TD)NӖJA+`.TH]9|(yT~~pz ,Ja>l,z;G ( djǃ4~zU)K e$e/ e>yð"QG E_b #O;GE]8԰xZCxRVgp!꺨Aӗ[]]_y8, ]YG7op.k,UlaU?蜸4e~4 M<10&e &(o 0endstream endobj 636 0 obj << /Filter /FlateDecode /Length 5026 >> stream x\rNn ]9ڿrt*KދҬI"m+H7N AJS!ht}D?چZZ ebv{n~>Yj5H Z:;w_3*X#[;R7yZF~M+% "7>\10kIma.#yM*np^MK՛otnU}5G%#wEX,,:ݹ)2cv&M2~<32Ӝ\FXdv~BPhg .tA>Υp ;9V iq5BM`F4Jrz{DQ .Z70SI:`Vm|]U4FVi۷s$\5V3r;Rj@Mu  G?)o(orXĵK8̙?ݰeaS - ml0ȳ 4q%aFXP- v܇J? ̗8 6'%)`N[Oq!Ji$SAkQC~QiTE001/8<@T\k]Ohư0"ek~4-Ak4{ͥv[v3=N>Je)!/e΀x&]9.lhnj}7l &(E`ǕIW69z:`.XO"/u`]WLo`b'_qU HyBjgw` U],usK7/Њl'Mܤ ߭[ N4AS;_s U3IVg|!sUNDGW-i9<լnlinꋭXh˝FˈCf.02= V0aPpX60ǸLw/m8pUQ(@$/o~› OM!vB82e.J& bv=#`q?C{xNXn2Yl6^GF<4wh&ll=8pZX #/9VhHHp}$Y@P<oÌpZsU'[pBOaS|oCytOH'<*Y~ۇ]kJ^C]B$KY]d]A~h S)1,DA|0u_7W#?›^{)Fp5&>,5>"mz(>$~od7ߘ9%FZkzH^{35s0Vf ,p=@_ F߻c ҭb!d5<,:`Y &?zAPu?jۍ-7OLW`Usدei!#s& _'\a '$㯋=nE(}z{Me"(|,bp^c1aNU?})¹;l/9C8SuOSI&+'xVtW _'#ڛ["b@64Lԋ/2F45 C^ב !>OmB" th3иU;x"܄A`oϠ\RF]ЃeJt,ų n)vr"bJ歅U%8%q&g(~;f"[U򓘖'KbMۘ}E3NJe: ı:.k33u /mQ `qj +`I-ʼn2 x;pQα5?}4eVf t$?+Zj? [p`Lj0|Ƴe:7Ŋ 4z`AN-t=[1=yʒ%d~FK^`*W~vx yR8pFJ|-k%A*T8=9E w1mt!Ѷ1%f hMasV:@DZB=*/OE޸s(x#8@C/<AYN0 j<Ϻ/ӾU-%mj> tJkξII7øNJ^lKO/Һ;xBE….kR+BCM[sOy o7$L۪Cll!M<:SWǮrH}\'GUJ@9͑+FeQ"G6B<a2>SWYس%7W: c`iѬ{OEN۾~cBz=0, Wr=UN]hځ0U-m~Zwi/ݫ;&"@Q>ʢ3y fYB ?z2IN&>5"澺 ѻ0sS&ӆKw\/ x'eE8q"¶`r  2drr$;Ol)$ƝNݪq,ɇtrEcI=ڣQz4{|{2t{) $6K5$cLN 6=z8rȽzel)~q̗[#O)+B>NOkaڶ`Bɸ(w7WP~;Lƨ0΋U*q`un\'-bFcxv+@=- 'm)bi,0pѪKoUI<,whձE:*&-! {I54凇]-l>)^m?JXXnW,%5H 2{{q6Lܚ/0G}h2Y94k-~báʎb¨TIWJgoGha[q(_ fEnSk 3GyWe [R6#&"[('P@nP ÓF^fxV] >޷{yc1X,q89R-omct]r/ߖыr#E\Eݽp1 <;} vr~@ݿ%ty=ԷӆfЯ[ou6\-#%,8W~ŏ+ DɳS(x“HEA(xG<9XӓcG":zB$ARXQr>&)%|^[f@@hǫ m=?|3an)2N\qu WZTn>d]~ݦݶs1 xۉcSr|jd܅5n|o\j-w9|XikP ^wmH xu{ ##׽`uX^ո?Xo1GJ~GZZb59D~eFC,z8K܌7aYd6q:XXiա=M@5gw=c:WFPFe`7^e-ZP  |'Xfy)+:Wͽ\aF˛xaBKL)_wKYa~8/sXendstream endobj 637 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 562 >> stream xcd`ab`dd M3 JM/I, f!CǮ N ;.u0w1K{ ̌E% Fƺ@R!RIO+19;8;S!1/EKWO/(>ЈAAA gJb([i5Wbe+ߗ{s;wv_pYNghwZUps]Vwb{Gv>}c%OX׽lBYa=Nv;7t9mJ`W鞱F.ݹrު)Kv_=NE.!P@xP;̰ߟgxwS?0X_4ѳժ?wk=9jʕ[u_.oߢfZ-Y1wºrxa[+'Woߍss$-<{ 6qb ^;wq {'OxBendstream endobj 638 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 3255 >> stream xW TSW>!s c>QhUb((# D ogp*RTtNpǎժںwtvuVN>?ގH$ӷnۡUƭY#\V&n.gIv)4:̡\7C/v uNGih4J*Ri5TɊvB{rϖ!'hKSnPZ_K2 B/t\5aG5)AŸCK=*_ˡ,+r ;oK$:|?|Q Z(iQ6} rt|.h};j,ơh-^oOw hlm2Zkxm71pwJh87BD䯖u ݒ0 F3gSI\v=Aif}4Sx A^Z:ṇx7dWD#׮"#C\x s6N d{x(*pUob\_]Y? qDl{Jdp_sC* V!uJۓ!'ss ԍjsI#zcPLuBS ߡ>7;p1%™ k~§r8]L9tVƺ\+Wg!pv>O\i#<+inߏiZǠ՝}ɇ4J>;Ρv4_V mX9oIٿ^m2lq ڽQH@#&))$b#YTqkd:G.6IqAYG^zLH%1bj4tKtLtRk|rKyhI^e8<^L/({#坅7m14TyA":V46j5ݦ?vA_}x`jљf(l9 u@'B @Ej"<̙GuePGJcSe*xd0! nCu\0ȣs GjkI;T69znC¾`1&n rț&2|.jvjeuCF;xk6`Wt~}Ȕ]ư5wjݨ,iSuϲ\7eؐ!o7Zjl/,%y0,&5r]72>CS9ɔ[η74:h a)1 IY `aM`5Vvמn8Ԣl_1*!-Njkt\C!K@NJ-贠_O5o9|jp}b'ZqLB}ZKK1gVWX~rD dT~]j](A)#T~mӨ;(;uxfw$UE_ rXx7 _{/Ӭ 2iz_zج֜ЕЭ N@yls tTkxe& ݖ;KliG6ul ]?:Fw2ksjXYްGٙVUٕ1|%c[_ ňl%\K]~W, U/;0*]u1| CS,$CPPyb0Jʱ~M<%`ͱN_Q]C =xx%f_dz/w*)'&> stream x]SmLgilw]ⶨaSΩ孶ji) 6@J E@) 8ŷ9MLvD㲸MM6?,s}خ}IXbxʧ]Y\Ym֕8(O`%%c'\ĩEH?&)wTFMuy?;{EfxaʭL~Le-Z S`6gd1[X80KX=S^YSb*f[NFUPUT4_=1 ՚ʬ% Sa.l# [0,;= .JR%Q4lrL>|\2/E N']p8 ?{֡1(7;=@(s09qFÎ62 6زGcu?O>j&cM h@TBgl`z=z70Z;|%nr(٢@1R/x"3hqj)j05LzBH6ZXX*J`'YkG @$ؤ|ޭ5SȡsNq5"n>%#hՁBղfϻOw_[h!)톥Ҟ{y). CF Bm ʣC)XsVmb2B%H ,hZ.a,]e״Xvcr#P{''}!Ʒ?mGP2lrlzZo7:=4 k7O1=u([ ]zܽ$KL&SŐK4qߩR%j\!ߗRsH=] a>tO!lbٳS:hӣh5Tt_ܺCoPqWoFW kWE@<bMYJ5QauRVJO9~hk4zکYc_r7h[/ڃ5*";.k9bG<~~pI9Ԝmu=\nw#rwtu.0A+endstream endobj 640 0 obj << /Filter /FlateDecode /Length 1794 >> stream xXIs6sRb%ᴝiܦ3Yt>Pl1D㸙>,$f<88Q،ƄQ2a=?f|HX&g#Ìǘ'r4g5)XJDIpDhJa&2uIa8ž4I"۸,4Np=~EI)&."e#'h- {)s-B }謇3褪}<.f+)T#Dp4{;2SL|f+ k2AQc_pMA9Oѥ^g@atl۲A$(;y, E*[0H8]LjaꝄrT]Bʴj4&Qհ3 iکdu6zHZ7Ɣ6\x ,$p\A2 Y67M^)c1搊&8f2Qfp7v1`%:4fJ40O19FE* "VA=\o eLNp(@.1\V69ue#h*Q2=%5D+˘dt,$֥UQ ;5;qf y,YsG8LI˄d" :BIzm)[6:CUqԺ گ5I02Deiiy}^^زeUيm6̃lƒh|4YM1rTy@/#_nK㿗u՜#J. Y69}# O&GiO3-z_ gJ`#(QO>3 => N<#6@<"=i1yTQ+B0|0{VzBd*%Nm+9{{suu7KHŗ<]/vY7g'- iqvy{ r4;_ۦT%~y )DW]4M vyG/|^i:=9ՠlunc4[;eAkZyOݴM=i[wq!PTY.w.&T/fL8Ywdb7Ͳ9Q]U/Ź[rއM]/ x&O<4\MJ ”vabVKGbzXzAU `(?ϛ+ԴylBt.,twZH%"Uyߕx1d[ !Rgɭ\_[88sʠ1Nz01k=ʔ8s&\רzֺlMYjlVhÈkf)N~)G.Uy{dJDomY 3M8P\Ƹ$dR'[{S^Dk"]""QR-<Գ@m"UdUJUVv픸&fBڜ%f fDa]9;W(=2 2B4YeKii(gFm.F=f"ΪÂ%n\$ДCA 3jwWNP\ʗ%mW.+DhW \ ^"{:^gF +8endstream endobj 641 0 obj << /Filter /FlateDecode /Length 1838 >> stream xY[o6}Dƻe{J ۲ԗRe~)"Ɏd뜇P~#$?]$GbQo~.@U&x>H(It86E2Fg+NwFDt;2PDS[LjF!` y4ͮu$[?CsȆ#hSsc,SK(B*1mFSl:l7ulR'$ƺ&]քK1#$1+)+B~m'$_p$3EC*Fłvě`"،PdhT'u)&]5s΀]fHFp|ihd|ܴ MX~"j{oMUQeV۰蕊PzրxWy+Ͻ+/hW̪cpK(w_[| +>ּhy@T5V8&(ZN3KO,4(M)fXc}U+F;-ɜE3f窴i Wݽ;6ɪGJKvڀ%qg z=,yQTa$zԡ讚SP vlLa8 vy X*ٻV?Ɏ:u+8yض"p OrnJQ"|z^h/.fɧC'fBükl_ d"i'Qv"C2e}i!QTTc"DA)lӦE!4epd;Jr(ZwlJcɍsT\o 5TNg/)]^tE>YeU^*Ib&hs9r7n HmU֚ Ewy; Ep/ruՆI8 >c4'ygGM &laoB HM`+ ELK,YB%Ę*URPs?lᙚgx(tbR.*wu=3Vaǐ`·Tph.R!4l 3W15QI8U߼ҴPΖ/SzoU,&c"ƒ3|{gX3Ƃzl̬Th<&arrVk yZۉd9NsjyԮ7; Gl0P!װ^J?Ʈ$*[̢fΙ[ҍ;G˘s\p77 Du6-Sy[]FzJ_5 6ݫi {ȧ&5/x{誡Evx7>=9+Wė Ղ 0o"ڝ0*x̧O)DL9BwUĹ|#5l[Qpf JKs|$ d^(4dւ0 ۖD|kO=7A YVEW+39 *&rd2e͎0I8$]qވ$Ix#dl$P%d|k[0! X1H"yeJQȚub&u: HLۮIiDo8_2;V* 6VŤi7;?u?V+^kfyhQlHO&ۙDy%XY"\@m> ǬFZ6g5]endstream endobj 642 0 obj << /Filter /FlateDecode /Length 1687 >> stream xXnF}7KDsq(l?PlĄA?+S˹ٙz R-7sLXtӎfz)7ѳJͮGFG2xHʣftOK)$_4I9p*ѫxJ`.2uISa9Uk+hִ@?[ÍVIɳ[#ΕIJQ.6v9A+oXg=“. )rt碇)fn|f?*)LT#Dhde|ꤹDU— ]u<ݷV.C"9k= )3dYKj)whW[(϶ T]0ȧE e61sɑڄhrt[_>*&ϣ^k9FT)4H(zkMb+Q1j v[-plrW 綶6Q^ܖ!gPV $pUuQ;8 w]vW.Q|aM!Hf[}W^;y—M#)2%"K2"^*TN0f翝7gn =#$%7[&e L3Hn!vsI iڱcC7=Y\$czt7)"M(Y=`\ra/-,aTv Y2cW;!Xhfp\/x> stream xZ[o~gB@8m ĉbVo[`-RF$W^VԿ?sffwg,U,9s߹?LS?\v¦^O?$QJPjKӓ-yAz/țLK˙'W@Bgr6^2"(''i5G%2f_M<v9sRJRuqt2jA.Qo{."gȨG>v҃f-iɟS' |vdΥ|4X)}:NѨ;gݍ2Q\w[pUǠӆTmZ-WQ e8췉U^F43a|DGɧ{rQaJW/8yYOziÓY"U@bQ*0u. V;.N.jLRE=@#%hԃ&,R.0zz}*.ldЉ1ƼŻ%w2[v$S!.iҹ mKXn (gp=h !% H] #:'ʑg9nA7 2RgkTƆY՗\Q}#MT %CH@b vD &ěMBNwJ )_jhXpc Xtޜ -#]bA }jJy<ԱێZj U}[7mMcY 89hSk3z e8TF)uߖ94s\p %+bĈqሇ<{]g~;t"aM6zyz ezu]х7(Rmj0Rхf D,hg2 SN@!@Єhq$"fD*sjg/-'UtPI͘8WIV"1ѹ2NX=FaHcI+MO"6@P"fE0bTQ z;|*;I6UJP}g]CK A0t?<%BZnjna\>)xC5F3տFBYpv-EGK*]dʷK48vT8,.(CnL禂= VIQ!@5y<3Q_^P{Ck )ZշA7C 0#^|*N;<i x-j=W+ f(Z  }>2' jw_aApo|4ڌJd0>PVz"LOV7w2S 80\&1z0a,H=DeH@h?i;L%_u4~,u'0{L n﷤%-ђQ*P$D6 @kȢ 1O{0+mW8dGsG]'@3@0~/av| w^d,vqܶy'6j/xuu7j]1uLzsoj7>ҢHIr){ށ2 :\G ^1sn!:h <|3k#QӐOebٓ 9iHdꨶ&=U;[@z #2[rXdRi V s*" (߅. ~m8u]4Dp֫gG Gx-^=[{ݧ03.gH'wKzM^PbskCZAvDvg.rS:]:= `o↪?<7 )`;5Us]o·G޻`7/yOW~qs4m'7p)OKvm::=][ ԻLF-V-Q$f\}WT@X<tN1& i]QŤ4\vxx$s7( (VrE6"l?9O&Jۛ <:s95tp(cwxn>a!`nо(JL|ԺS@VX"$4u,MnyI.0QY_^ [bkYm߶öuhtN@?x`UURatj]lʘoijgPC" R~z7uxnPvC&.})Gh'JN2P'_2 /%paVbdeR^2.[2 Q)hD~R2W,jp[ito=X3l||*endstream endobj 644 0 obj << /Filter /FlateDecode /Length 2099 >> stream xYێ}'pe9p {$Cn3^7HMϑCSU/K钸a(˻ӂ,,^'"- aK,]nna1]R$vƖ永B¯j92`"Ģ՚9AnviPea֢fT}wu687Rĝ_1kpDZ^)RXv&&CANcrvI?Ԕ&|ۗ v7m5mYXمfn>Nx`0$|(I syi̐+m2]JN(U-Vd slRE4+t kW] ӊ\(> Bm(LK8[ s~{_5M91J3)@AQY #k(A] 涂Q]~_MFN}s@`Q( q"1TqW7[JѢFhxApņv!Ȅ)`_t s5#CG(us rɦ&$S_ +6M H(ԌNoLF` uUE}<H.ucI?x,.Pag'Z"tlxq:l)E3IG:}IĖ0Lk}A- J~4fϨH+rˤSNRb#DaĔ J3 PJj<=*J(nۆ`4mhXDHFc_Js|Ϳ+vAJYfsrE]{J,~uyIGI^ Ь: -&KUDY%'ڳ+I@k %<*xhn@w/c\?$y+-nn7T_b鯃`|{>E!}J3 ub Û<1_sk(̫~Eox3RO1pl_kb _\/Nmzu16~W3Ko?'Lendstream endobj 645 0 obj << /Filter /FlateDecode /Length 4679 >> stream x[K#s#ї-\xJ^)B:d-]p<ɯ˚On.`>|R骨\m/ӫ[WIoh+#`=6T([lߵ54ٜI^iYZVYO/Z1j2X7]ZNgɔJ(˾ΔEYFs-Ʋv?IJ 컰Zd3A%f{ &MeyIؑ|28δPN._P EdUTVMpqvKz~\V+Ug4->5 a~

LgvEsD AVSDR-ex?Ay9PD bY38 )kZ: +E";EQy_<YՆ jؗem k~,q]%Lz(~: n t'E#`;q;#ɒ)oxv3|X~.yڟ"^Ġ/EP,܉dh/aQ#G:h4Z6VBDJNMsR7q-gh@^n2uS;=#Br`s=ъ, 7u% / (Ό*1Bslݟu̕7[4mMqZ DbuR8zL:f o,'L20D^dC,wp oSu'3z )ɹue9{ y6!~KIkTb&u ^ ՘B+Q]=jn%OBtwyzp WtЏbC.&3Z:ziX/Tx ! 蠅`WXH ̥.K$bsB@i-K1 l@()L$_8d/gXqK&R <:TF0)MKpV R˺KytsuW: BBiY#hcz_KKVEaW`G~G}w 1!+8vCذ Ro;2Y4h8W`C}^3eA*d" (`51r!#mbV*YQR3iQ(QkQBn]qyO`XG;eJiHiy Ϫ=  [@t>s0Jr>hv04;a38 *$z#/sںX~bB y=瘩XombJ p(פJ+5zVc5j/ae˂["xNؖ}BoUB(i\M6,uJO []p6=ƒ;neX²>m}K\m2gxU@lP& U,Laܝ&|aɬm+DfmF0q6T u-MjSSt7Wpie=iߜEZBd0\Goq_w]Z$1+VA<Ⓖ `K: ~3(:$'r@ Ѽ6"SRz(>1eO8C ~|-nD!++$?#l|@&BĻl?`8Mk\JWM'p! zgm)K>5h8 48TDgye W<)hh?s 4M<}R'Y(ypn$X `ʐ$ "͛k:JהL\0>.iXF?4&Sqi2Asx&V&a+C94A^9\1]Dyv>BCܡҩ⾚8yeĐxp~1\F* tMuk@1wcM{;‘'/pCpyߺ4 5PbM_g哻A_a*A[%Ѵ]󪋅*0$Uoߜ&ʘob?Z]|̻`4|x@>hx`y| fEΊ7@ Z$@+: ɢ*\e9;2 +2ZZ#)!A24|vlSLVm|?,\-Yi7afV{͚*Dvx =}ҍI9J( ^P~p*CHXHP*Cd6UT~>=߳_ 6$^li/ &E5w艩#bdYZ+HGD=߻r a)",+3E[Mׄ)ɰxY(1uThuYq c0PD *SkߏәFS *u7S8$L XKoi8jeG&ɰ>3R65>݇N+ W ٿJ3]88 mk9ac2&uU%L:}!vҾV/ls0P17:KFg8*п&/$ EWZY1F U||arS@E]?{Q7In Rt|=ȿ'lf'^mtD[X)#n-\?gOmT5 $;Nη~?$pLT'r4O 1;zCt!AD W0]GWX9"D]B7`nP(m0 pV<֐Fu)G$i25Yi <4{в3J6*]0A؝n`,-xi~ Ea9ɠ*zE9쩅3^WB0l.~hBt!\2U/2OE;m(S'j*DZC~e_ qRr⮽eP~ytxH7K4 * c^/2C`3j"# 43v~uoqI6MFv ̨|1s}vs(]@q5QIx&98݋+0%Oe紗(Xb[j  &NRi~ m`*Te9ոܜiq,>kbWSD5.I_R qMMfv=[IެNMvq I5Qk]acu.&N68b=5&QWYKiڰ:G`2NPt䭩KGxFD-0?fd1αSR +Jxxۤˬ$z,&/~WUhJ`, _K*L.c?h|(vy}TDw4:LImJ[a[~Ss3h!gKOQendstream endobj 646 0 obj << /Filter /FlateDecode /Length 2980 >> stream x]~C"b-8mE[EQ}IN:%wu+"0`rvfv?vV|VW/PmUζWo8}Vot|\|ƕ(tfVI=[^f󅔢pNW(tnXxd+&U XJ×WٲHUaɇ-]E3ŷpy(dnȣ&9l 8n"R4&T>r/ahًC^w?%O.+C/WLg22Z(g {͖MlB t%_moii]wk:=7gG`f}Wʉ/W߽f/nڈűEdB|-0hYoV,DEwͶk~|a!%vdyo둳ա_'Rnq_N͈)*{}9"⛹*X7}Od$qΎ7MCCG0lt?R*6 +YjF Pn㥾e9YX22ȍO+ ZC+)نp+@vJgآY WuS\8Whٻ{ H́S Ǜq O;hH?Et0pA  &T﶑H`_t=a2 09 ְD0&BC6PL,(GRCbA汨*AP+Kื ۲4%R Pbl'ǫ}@툿YE lNSKmlhU>L&,Tďݕ.7?Sdwv ܃3m pj < '$+qgɧ)Kܜc;Is vg,L :ƪTAdgu&uniHG $%9%$WAHF '$ ʟ<F3u&A%q;%aΫg4ɨߑ`LrxBR<@rO'5~NrQiVrOSAPXT\@[]@LϑwBqBޑ#I`-R@CWOk5Gb.V%1}RZXb+ʔDeJ_l(z;+Ʋhae =4qoIfLvgkGAxv.N2jRik=z"KARZ5Rk}(lS۟7 zը8wRNN]n"kA;x&\mlPJUqf))8Tn~f8Բ3oK?((*-L :f_Jz5;^Hm,#yVv~jCz Dk٭nmql<,톉)h Zh4M۟_@. Vpqn36 JCN~]!b.X}Фp4~hi va C`*Sϵf~:G԰JBxp{ĎDXJ}b w0mAax]FG>:Ӵׯ2´9Svl"F>p]?/=C :wx^((yUO!:k˓c؇,vű~i|94Cr 7'//DMRW}>ju'v!񴢉9#8WWDȑ Pơz0ڂL8^7v#@y>Ō>$',xy \j!m=t/2)#q '߽KZ%]و ,0r<նJ1KA߰5c@BأMw/qIW#B/ [ GB9}пMs"Ǟǡ 0oB) #nXBEy'mE5Tóe'$ 29\g񺩙=XL8Ajy{g-zd{亓Y4X%Yk:ö0~dd% \J_p5nOP}רj]w 9?Rf ?8_Zin?raaگ6ia t0}ÁX2'//ÐOW4[endstream endobj 647 0 obj << /Filter /FlateDecode /Length 2733 >> stream xnȵ@}(PbQ`G5pMn6(nnVE>вl+&xӯ93r$ڗ`Μ-OϞBW͌Wk/]a:xü(yaNbw[l7\+#b\:r>̅.گIQ̑j{R}Ն*9;}e>d8)QNk!FPQw P̧c (?U~,SׂB}Xp?˜3=<]*GIxh;w Gz~s,SOϐN 8|OŨe&dȧ9p07Tj],i#L y  T]*(|Ң* YRvfENhn?ib7y1V!D5VFdfUu8&!U`I*:eLuu(_|\b P9l@:qEjκ3LV1`=]]?mADM]W;Cϖx t]nawFʚajxan 6n~ 7ÓXﮮw ,"3(H˻]|MJw*Zc ǭ"}RwmsK[q _(Ȓ@Xޒl " MWEA xy]Uun9|"`84MSm5:p#81 /I^3KM mrE9F%È\l՛<5#Zyx,>TMޅLCM) 5w[p۹? $3:o ၗ||<ƒf[<-fНE }ǘ K+E :֐5M!=m!_41YY0AۘsVNob|g~m3q*KeG'W>i"5 lۇЗlXo |Ο\T@x uptﯓOcϣX.9m+/qFGrgoWS40Ɉ%śOXbsMUO^oQ@"' q#GmFO&CyԥNKH$<ԡAp2sd갟ys^??jX[6(51N'LǢSK En2$ƂM]4Ozt3BWov714xb\P\Y,=PF6Y>RPVn Uny6B&Ngq8VY z4mfMU_~CqȻh09[h*gBV.ܾ$wԾ(PRWa.8NXŐ͛깘nece꘥C۳a^$AքwyZBC{' + $Dm0pL;5 )O-)dEjKS@ldidw0J.i6^l0Tw%`#y܋O L'[ߒSqSJegkYDp-dBNe@(-miʓ\ӠG1 :gXDH՞*@W$N%*y5E$4Ռ@3`I.ΨԨՈ@KBdX!DtM 0qU`+oHI*<ݴK a+8`H Jɰ+*Ԡ^ rrb{h'"t/c97fP2/%F~(5ӽoraVnT"-}p :`mץ/ Q>> GK D6A"pϧX5"S+1<]"5%$>U~" Vtg-ou&a VͿ^W\]!l;@WڬjreS܍eߖ^~ a}]m_ujVjnώ6E5I[wendstream endobj 648 0 obj << /Filter /FlateDecode /Length 5628 >> stream x\o$uיS0w:];q"D F0ekg8pv{j%cݜ|}V4k>k_?}Dž;igoO~:,zSl"9ڎNN|g>s|fm:gGlm䌭wsfx0@ciز|4Ocdܦ} -^iPjPJ7mkEңZiǝql{MJ 웸Nl3@5-?f3[ Ye&'MbL,$Hr>[Q\ j%ʹ'x -c<+DF8wэnO mjs30$Ǔ=j=g^a"c7ZɐK[$#:ҲfwD~Jtx&ʸ nv۹ԛǜod:UF!$pk8{`Ъ`T2Z]6]7u= +_ ԲnavՎ}/2fʰ~0޲w&qw8UZ]m,l( At9".X!۬/DžM2S 1|W7o?(+ {3oN6Zw3&yVFzX@ G4Me.Ws`ғ&8\IytGVI r-H ' Lcca1Nf @LHX/_EӄaLxH26Вɺ 4*SxL\zX-(swL˻BƗNi|l-{ n9d_nS ̸!`=Q񶱈Yxqp7aQcLl ma ^"]Fs\ []|ث] ",D{7[oKvȚKND n`v6hw?^ОJ$4I͆}Y _L|o : m70g-{a*Dm2<'(0HC!؁0kq@֨d<'x<`5X$lY&U&5!>Ef sX]%[7SLmr&YiFd wUz5Y/N 6o便yFpA @8 #BUT,jrdr?>,::̐C찷A=g8b\AΫp~u`%xTy7ږZ:-D#x)6k BAct44TB0l`1D4n ]#n,YIfC&gSJ7M|c*=ǐ Q_Hh&} `o&m_؛S7ƢbOlk!m`$%1l<2.k} 2l%L wZ\fb=(= \I\7FfW]30K nG;_׉.FvA6'x_s,G5BVɮq1w@b4EXf_NkQȼM ZyiXq@fxȤ48;d,'R|23d2er`#0_t0Mnv![X =Qx(X=`ff(Yo\1\0p2XJ uZ}ByYx}"C=}4 w8{UQNGƻ걚Fh6M(]NY':8ÀFׇn-# f,Q.8G1U쮃mdM}V]?FDjhe{]{SX0#X4!/K_eC\#}ڠ`}[wE*w@/`Dq.N"£Ϫtzƿ|.3CD=FH*>Fv&0A ޺<Cb!8G\KOQ!6::ꯧ.#nhK`08d=1j1 \9.&0NqF;GT;"Ԇ_<_n&r* -G1u. *Ɖ⍋}'&L_'!P9nR֊*;9t`X#^,lntxYgŤ"NU Ơ1ԀH0f/C}5I y z^!7tgR=~m CɄYC&gxH{RhL=\0YX 癎  uk5Ȓ>ctpq(VRG֕YY pYzD?%M:϶XϽh,4D8K !53: 8ASG+2|jـRecWNt]DϽuڙT~,M̀F։`;~{z-nv¿é >S0l =tlnl,)vT<"Uf:ec}kDRBSգ]^K 94s-=-$-I|xtQ V.; J!Q E0SN`K7/7܏IR>)k$Y֬ڈ^зBt< 'ig-O 0Ic .6-\WP;WyKOPd.C@8Y%CwG VP=x&a,\PiJⰦMX~?dE\ܷ`k.S:@*v]%n\oK7ĈKʭ%C_E6D6F6'*,/C#k'b፣#dXW;_ʰe\ڧXXk<WJ>z>PׁyD@٢֭JdCu7|_#dU щ VI?;(ς϶T*U N<5Ax "@0E2ڮV1KYL jò]j|d2q,Hdd[uP I{'/~+&/йv\NfRE!YWy2 ph`S @r$PMtMDIrCXgyɦJw@1x4J1aZ>#@۪0E@=ku4Zi+2M.P`㰍,88kg,d a3ܖ]Lc )" J lA0Hxp{!2gq B{vxEh$# ح- \_P:3җ$`҂8PYW`I#,Wcz\4J #fϠ!7X!C- xemuS7sokwɜu޲? Β4cr-%d xʢUr<=,jTܑ̮?lT:åKY+x(?+o`t@ߘYHS5Мϙv4Wj4dߔ56"eIMQ BqApi\cfU笍įsJJkW4e~>jd/?d<1:/00:zw?v܂+y*܃ç%^7o7<iFL s0CQ+ލrKr1OG,rTgBui'\<hT3QU2yڟ:cg9c/U{=L?NP|qo'JSw{/2.4e,{5{( }FV'`rHdzkБ+\$4Z⏣p)KM \;:۱ZIֶa[_3E\j 8 .I%7iӗJeMMqlKyWێ )vJwk;5OS@`:zf7gGlk`^8gL_v>ڶp~z/Kl] $+?@~ mMۉF N\W_9v{ )q1H0?eףn?Ɛ׮4tPO J)KYqf|kMhٿH l&t" ?Y?ݓ.Ϩ7H{hD}=cG 0-aĠEP+M٭+JTm6f%kcT41ŕ's CSB0<FCjZBspeD ,0u\ h%:,"V\T2,)[yR:|yޯ c MaCBIid :xi:uJۣ88B?BBGJ|*Quf4'ѝ $[*.A$96OքWH>xP:W PW9kC(M=fN>cYgE]s|fƟL;?ɍ_:ڍ|!iQA(h6])bhu`ӥY8wdYU\wD<.Ă?C`=fhpbgA|ҷ؊oeM|yC-V$6- I1 6#=G)UZ?EJDI>"JK~,-C,ܿތĊmtҫ Eی_;z\ *k>bǯOJ1Dy*AۓJMWendstream endobj 649 0 obj << /Filter /FlateDecode /Length 4710 >> stream x\IsǕ_qGurϤlGǛ8xD4)4@5R{/ꅄ:Q^гـO~?0>YzS<fL^ nfPY~|% \v8rN v4FZ\:,> sY՝lAw^f/X]Bt˛|{T{=\}cWiOs^N]=C}y?g .XNpa\/⤓b~d!%-;}+ڔ$ˀ0R$95^Ohݦm>q|eonBP<w |beaǃ얉~7z\@dr"s{#x 11l[5 bə ! ,9{lw][.,{.3a|0pcκKx7i5.Lŋ N v'.G Wh57͍ kqKg]z B2\BUW,Nt}y{ =yS^89[0+2t|ج~$d Z˭q23#I` 4M{ISfԑ\ppX~p'c{v*c嬮F[1=SCUTtQnؤonjo9cniցApҞ+/6Ali2“N_3tr2ƔffĵB ?E7ي&FI ooާpk?www~;U:̥`yEmOXq/}?ݗ|WO#G_JCwf%hTIޕ',g[}卉6H_V>>m-= OCt` ?@L֕LhB  g,'0?κ,7A_t_Wm38 ,8]2FLvWk J̖e*$92,}0$]:Jm׺~Z,n39Rԁ~2ОlX2&h0+r&IlTdwxT9L#mE W00mA['9)^YSYأl eAͦDǂb2,s]6s }`[6"Zjy^ms>LiQv N.qOGuVm y dǴs K#o,QaD>isa]=fe~5ۋQs eYE6-ddLVd+_> .Dӽ?wIaU/ c{uo$gN"Vq`4HTmUJ:i^ec;8+Z9͍)b <}M\o瀆"tL$) At%88n(2VAЀhSC0I\x,צL)Sb66B:vN8IPpʡHcaW%s%m";㥵;.< 妆V)/XwB.TDoR6BX+&$ !Z2hm!Y7s'Y.x;`&M/[[b {<̂#G Pcyg.`ov8Nd&PӣA5ҿh- 'B7~~zn;{O/jG9[.eotzrsrId8!@$ 8(IgN*(9tzu5rͣEs}eJn8iHiۅ3.͡FFC4eMgW03Oa}w[LEl`ڇC*n~N׫pks| $֮~|O1D{G#Bb PՄ{PKd_ F2 Jrޣ4`V㩼`,@Zn`E9Z2~(i#W)m0i@6nO#aP!b(G<+gF`j>MWAPtgS~!7xd2lsD@é6[vx.vzWsƒ~HLف:S[p``99@ABECˆ?7%@lb!%aL;êt=FfCʐ<ٕ"JP6PqnW*( GV„#mÀ>UBj@(Q\/oV.bhC I˓qN60r` $g<گҬODGZ( )2oׂ:>д _dr_MɞPP*$aD"!a㠬PtOef4>̬M nK۶+lYG0߈d[ Pe)3h =Ӂ鶇՘;Zi'V۞5IQ$]<{󉸅Ѳ&go[5w+=8fEYxMUUs 1C4SsqX<+<,PfKZ)d'%ؼv!dVΩҴ4Ax'F >:%6dmƶU1U1[]E\i5 W0ɩ#ZX#t"]壮ieBq_1c tP䂻L> bXG ƪeM@!mVω8YSo`_Jy=?g&f !zTsaKmג2ƝF ѻ`.r\bBnǶmWi*[M̐"hd>as ~WMM(BճL ^rRn0amffG97m0f ˖E*pB?{}wsAeAxKZ\AE!suG݅ .tY%-_@KKkWPPYY(޳oCQj )/v)5d%-^ltVieH 9myDuhTB,#:f;]iXqqu-NK ;"ITڧWW|z]aA*Ă5B;t`!`rFM򀼅}HZiOBmq=]&Wӽ)~fKxؗDŽ4<(}lV o %pta V$V,edz")bt8@ 6`Z`%傌P|w r]" Xwhc7N^ta3j⣕8kDETO0Gbou+Bcn]Z61K~BSQl"NNj~p{m0e{:cKZ;4y;AҪl? EF.n9.p.6@=?'/c$u&n;hQ h1/*p=UA$-Ը'yzPoHw܅IR?nM/dh+ 1meGkVRׅO;LrL z'SB)n=jm车X}=ϚBC*s @Nv`E C")XdcF1HĎt^RƏӎmIG=〘(W !B!b>F1hrX:s>iC"-|9ޜ$oo_Cm|{H+FA@EKu'/Gcm~9Z[*/B.cV"j:űThǼ|BT&QJlDChπ@ :9`igoNvˬn׾* ĩ:l`$فwuh!6ֈ(ą,+9½Vktt*VVWO{&Y=4dk?6sF u6'i]f"Co_*uO={ ;{] 꿳= &E:ԪYSwD.=B ܈_N0endstream endobj 650 0 obj << /Filter /FlateDecode /Length 3795 >> stream xZKo9l+H6 d`w "-IPxߧ({&>XdW?U5/ʂ/J o}ŅZ/ NËfI$W+_\|QնpR/ rͨ2f7پ-̪cROvC$wnYa|qǥYi6\rCTBUJ)]adFs!*]TۦO=kDnVְ7l1x;qmV|0p~]ىcSP$n@V9,fraQȾG D ("OEa(9OK(5og4{ eKX9Xex@c(UUc@ZE*5k3@MEU-yWKOiAS> 0E 5[꒵K]nvMo! >J(?l#_5%DnoAci hPJD2qFa`~r*iz9䑻ISFtEnlm}^zޜMp lzU#mz)p0I~o/ `c MHiT "SDFG+)eD L! nSԑ%CM!Q@B :mpLYUU!m7fV1cԐjQbo'7aަekhNi=:m >1 v]EһA5Ө)J1|܂,cL!Y3Ц]H1CDWaW㹡betӵhTnjʕkn/|97໼ "6gTdv)f`pmX`R#gu` ΁`- CQr |J Ea|{؂ ;[MQw抽86ex Ŀ籯Pe @&_z߿Yܓnfx|6u_btؕo )K#`:+~L:t)YW\.O5.2L"À=c^Ļ_e3!ҧGd 3î|MQ(GP,U^$:f`GKR'Bje*[\˗?^)1 .n^܎kD2!lw/vt\$iyE e(0pGm6thJ^WT5 *>PF89˿ k×ZD͓W12mQ8XU9`\9.=2#+J~(`\&JL֧<6K>&&*HYSzbCXO~9{J(aA:A \p9il42$xً͛44f[x(cg~б/CVQTDd'PSNT$:Q|MFy1*]``%~d]1`Bәߟd/fJX,KGY_4^endstream endobj 651 0 obj << /Filter /FlateDecode /Length 4111 >> stream x\YoN^· e5 $=恶,jlQRT1 ] 0`g_U%lF_~}~:c,y}MGQfWg3Y63'|}p;?)Zõz_]4ӜC/k\s]}sM\63\s=*auͲ/~ϣդiaH TriW󅔊PˬG޷RYm͇n wBo6(6 $lfl͡j*% #Eb6?DkYp)~ 7.553X%Rg FPN,\픘--94ׁD1= qoG3D ~3L0Aqo&hf-18kLDZ\ -6[R;dmkS0N:p! ƉP%`ۂ+stY&2 T,FKx9ãW~[VEI0#qVFa9Ǧ[DIhb>/0V$@pjr>_]euY-vRt-"8RtH k]87&VS6M 3?kk")j0 nͦ%K/_ 7To܆$;d JE*Z)l_xܐhFi=`P]00iy- ?q^ŸHϓ.wX$(9Ѷ08iO'mX&)EmAE9TA&gMϡ97=п c`V98a` [xcGJd3 y*Vy /}/Ӻ[%|N@U^mu]@H@>#P&̾p,D; ui(PX )6xP'8fC@FIs X6ȀӈuS$0XD2샏Hc*T邆S`K=d 8r>ZR1p086|qݚG<wA6llٸ>P&:LO8΀B-jquE Y`d^3["&CLbuR`< $; $ ;z'0lPIzyON utG5 oeuu` O'Y)}ۥ/r^ҚClfQ%^݄h7wﲦɂ-^#TSi|=A}=wҍ dqeW=U( ""]8D;1"a"/:ITMeF[TSh{d;+BnTv:6S5˭z! 'Fb6 JΝ'+2Tśmzð1MW FUJtbq|V5OXm !o&q t_fހc|CuώMR\Vͻ, b$G!A#Z'I؟U ~ݐND3_Zo$+W##39} 1m$m%xXuF ܺs|LmwjgrU\OHTįOB!4yԂ ň"/c39:G ?>$ ?"(襊7fvXfJzTZK5)ε=P%C0k5H*^^ȩ;@kGx8Osacf%ھO:zc6RI@!'\7]`ϔ!nXx 5A, *Q"B$jˏlC_[:c!_U$n ^pPKݷƏ|c .~{vnrSGɒ[<c3K̼K.\;u銓t-brN(Leg=BHѫIOl<ұ(qEKߍf-/djiNa c i21roa%MiP1't3lu-@*{^V6$lW5z^AvtW޶]1/);6J1 ×ߚ{D<P C#!oO'vOsׇa)@g˛aH, cWX? ӯY^DŽV, \#.T `jYVY}kbg"wIąvjCI'-k1w_/ԨQv?.s`U(*+N;Kn ݟCӀSOߋ ҹqGIɨqpdV ȏpH80b-P ׊FgQix-||XGEc֤j}MwիWgrhbwFSʅ.Y^9.J^M `]o.M$~'Fp*JW5_T'JU QȼjX^BI+;.xH(fUt^-C_x#

> stream xZmooF~B>,6^E$ER \W ][@Zʧ83CR"z3ό,,s.$]| m^x!>cG i4C@0ITڀW9 4A Vj!晀1"}ܢĤL# OS\44NL023`?|1qNP{ԖYn${;2YdNd<[ o2AɾυJBR3ܺ 9ss+CtxhFTHx[*V X01+W<]mwJmb'0ah/}cyH.9x:f4ƯV<~J;V*K 8LXyQ>e i-8I}9їjJhd:!(V(* y3abԭ FRKRAƧ -c -ձj |-&qڟl1By8rĺB&-)a?$^f$;sjY[#r~+\0x/3qw厒n˃Drs=m ^[lbsY =#Ь7;Z1\BJk8)-zA8Ms7 xErB +h54|E`KN"@ |,|q|t?mX[9#aSj[fa_7!Q hOs<ry/"}!f~}-YiyWvb&!U1|F~1*:zz9v[iE`dޭ仐Vf&O] \۔T]k݁%\{ (K%V腜΁'fr9^ǻ?"6?萀YK؂ROEDNܮL'i'iXŌSzcz>wPG1m"@Sn#z)o+x.0m(K ^?0A:hCd1kԤI2*e.qI gXXs1&~'٨ZgOO-sVג1L29:ztgvIbgrliTR鮟NS;L#OnpQeEispAQ_7DlXD%fɒOkgrUza0}{qb/yBă!AL/j+aL3ELׯLն'*^LBKhQXFhR4@Aq.YhfYkSg!0`M1r ȰzV٢Lm,uOz32/2NPBb%j+KDru} $LepM:,dm<[!뱌qC㊈)B*~Ls-ZW'7) _^L-"wR{///iuzv]~ѻb~l//Gg]s<6*F-? `aԇF0 Nu$ԪMN`aJC"{dKֱO9d&q(LL1gUp  ժ'z *FZ``yc(R|"RO*??jNzེ:їظ;q ]3:v;L90`S%{{otL(<=7)7| r>` >7$~VهOC} 1%F # ~}Gk7:tlm&uj9zE  %, [7Z`uE>?~j#Rƨ]ЪXuDʍêqE,͏}U;5o vt<MT/m3Ilzb,)c!j>.I tb1:p%8+}ⳟb EMCQFebJ'#BsܤjGILGg1Sr~4d24!T  ̔ѣ/m@6-]>OQ3j́3)KͤvP]%Ocroendstream endobj 653 0 obj << /Filter /FlateDecode /Length 2851 >> stream xZr}7䁵 1$U67{학TA%bE2Jv~ sf Hw7LOt?s^f?L̯|~54_p4*3ms*X&s3Rgj7@KYf #yKinbpaz ]# f //eלxZ$,fY+r}[xUֿ@r[ Q2^'0J驰L.- ro릍77b_9εɄA|apRz2+OS1#ե.(I` ƀE޼> ,5=YzWvuǵN IrL[hCYw B8Rh ۟U []Olt&_n:56et@v9HŔb 唓Cέ>RĐ"F oޣVޗf}`{p"SK3) sNm:\ e V_EJ61twѦ8T=FWN.4ۢp-T&L nd.(Lp,?X@ Iނ]Z,Ii!O,_-Rct4ʤ.=al`́{jPe>W԰=20|q ˥r&t0m3v-Nq H75 d$ W~emxa;$$g^|IQ}}Euw·MMtn5x ]8mpp  ܇`4 + \;!4aXFe/+UVm۴jp !GH~ƚw_3i=3TRik r8U QHeҸBKr*:s52М̛TKT9h=&l[u׭Wo߮C82,Lus{)=,苝]I%c;?XZzF~ vnT\S c1ɰ`o#S 4?)oI?*Vo!eh 9+1e䈁{;mFϪޭ.v%kA%_e zE=nz5ևcӡXݟ t[8=qp*>29@f յȒd>FnL7:-kg`P.$ ZA ܦU㎤R#]0RDghD A=i8%BRS!q#*:b:[C?(iUkxHk t=˰uh! PPKryR\5,0/dJPHYR@'7 j!e}<:0b%]m:3C7DU'vp&ƵMF(r `S*lBa+M8U=߂[ʾXU5?tlЎ-@ɴ*p:kzr YM뱯`;m.@t:C8zj ;}b1"~! $ =nr-vBU1ȍݜ4Dj#ă:(T!xb}<#0ss8q>.yk ?"$dF!8z[8]ڠ; u8UW=@2|4GZ$r֑e!%oξ9 %w;ZøtMvaE5РZ @$X; JymAK 6APWM bS2@ĘAVI ~B;zFP:MKaY=2vgT[%iqF;f@D*d~TY<*'y8HnWg8B,gb7=_ %zeU&BEAkXT;#wa7u-d86mYM}P:'P̡S\X:7)L_ښAbF "v`=VIu:dLU֐&~o=ӽN=A_I(.Y/3-F 032WfL]{u nym3%ԕPꐨz7 @=cXЇvi,ρǶ.4[y6qt뻤]^T#֕0˧/O:7+U+v$ ~|%O2\7InIM?_44sSd@hf DLmL9r~<=%_h endstream endobj 654 0 obj << /Filter /FlateDecode /Length 2611 >> stream xMo5@/O݇=Ȱ "9pr8 P@a%R$W^QR_fv3R:ޛ5+Fy r~wDrxE=_L"2K]"t Bk[@oe`SwSXzP?yx''WSF3Of{yp177T]}aJ(GΔҔ1CFss8--W}\.pjί`4bF='2sJίz$U&AjY?%Z"E⤃î.|W =T%d ]"h'6{mvQ |;u eH׮xgɲm* Gʀoea|*pt۫e)‡)d0~Mؑ \wz>C_ $1byx$:d ]Pc硂!7Ex6 X+$w"4Eϐ֤.[v]G]mnLc-ӻ,1 잕GM)>uʊlB;0GOFZ#q #T~ >ᴫpp=1Q&d+Ω%4ݐ28>8uO>$ n泔+afwU@ësCBެ&?^lMu 8}!āTc[EQ̔W3Hz ɂ-DK]ugy_)܇>{'WHes*WAO"JOeBEbrzW$ݮe{S$skx}_w55C,`fh%.u~ȰiY %xz~Ɯmq?dG~ZMӋuwotK)y1*rr>g`_cStUOhX58 9[({Ja ii}d3]\xĀ 0K9EsUMGǺyS-ލq.xg =䩂%vU$ft=z0RnEqWun n%CX!I!'0b* W X.r fӯ KZĞQ4 JZ\qՇd ƙ\FrHgԪv^i=zwSLzU0KZ kL7J\r*ˊ{Q1ͮ[a_R\{endstream endobj 655 0 obj << /Filter /FlateDecode /Length 2183 >> stream xXێ3BxT9v^\eBDv}s 9Ci㺶|S)vwa⶧v2 .6L f vr~^f9c433afsjjܮFkwkj *WN6K b4GgK6naP~ʹd UMnfGA]X^bQh NH;A7E.}|??Y3+)qcվXO/9d:g"RkpO൨.5|)=QuW:mfsa0CGw 4Au\o˦%7,TXeleSouQ7u[`;(i<5:* 3ƻVӋ|:sq䒺ې*n7d6N/x1'dw*(dCd _XE|gYU*_oYDH\tl@ $,rT(e9YTB myjf7oLG pC׫{\90%7Ʊqb˳hG1S|]Fede"bŬ˿Hw)Ϳ>zLЏU["ާ :@#e'BkSLZyZOYH;\Q@PݟdPn[OKfݗ7}M|q.>͹=9޾86o1}2ju{X!o@x_|!)5KF-z10~PUKЮ> stream xV[oF~7E2 eVVه*!k.= 6nd;sw[buC<5wtC/;#lH~ zej*D>hlIÚK mpi%AոzL ,Q1bS,"ؠ(;?#Y nI/.bx%1\Q̹H0hiMI MԨcn0 &5V@ <$ظd;p|"FH3h)(eT!z{ s)f{`il,oy=Y(1[$,btezȷ{T3XM(*ckBq湁i-yUvܞTF#և>*EGyt jFC=V}Zii6ow,d1D5%־ŶN@ Ҷ.RY6h`O>x pPP_]}¡ꢀ}1;Ui3vSiF'xpilI%o^P-Է5J(fk c΋ВB[0 YCIDصR-ҶFpLq+^znpbcfRBDCZXEӺF?  2P^b̹Aϝ?. f5#RӺI@)0iliAQd$4WIcΐZ9r(n1KyRR (TZxr0veHh@8QڷX~8ӻ"]'._&U=bu_~ @&W]elU&VG*ciܿ F~Ǽi Xno'N9[ˍΖ?1F=:tiك)4wSV)Hp{&b+K_ya+gY񕉴Jw Znz{]a<Ż,+ pJfWzn1K)tRۂjfYZK5ޝݵD(g˝YU>ۼ%?=<τkUm_:߿@9Dendstream endobj 657 0 obj << /Filter /FlateDecode /Length 2362 >> stream xYmܶ.o @ @D.8A&{nW{+V{~w@gHQ"t~I\ym(vBeWˮf3>g.z$lyfq%f6uNr7{C/|! z(2̒B aaH5_ྕLdhT!%*Rv'B%9%2fe}r,2h_a?EJIʦ%ܫ9ڨF^' RD,fQN5yϾsrpRe1(>_E"#cR<[HM c YVǮn -4n[u0noss -ZY|!R8EQ,VvK /qG>.F>فO.1e"b6jC=_w(cPKa9~wSveSwuu!+N1ZJ<5F KYXrE gJh4c1T&dDC4,rfzU6c闃/[A@!9W8aG5MP'HygQDGu8TǛ}NflNUБ2n% 3L47¢?\Q\Eg0IZ7 ,oc<7z.7/&ې|6yH1%rMX,1=DxÆߔ.{~ h4]m`!cmEC t\K!=@ K5֣PohqƩ=9y9iWxs `&|VHHƒ7cD>x73qrqSkRElfpjȲFyHyRl-]:da7{F`{?7ئJLhwE#ڃ4CA&/ ÔPg]FdW9!߳<7 eNJ-OdNC4D̩ꐈϱ#"sjX>h&{ڋDJ+>ڋ7jS\bvHpt)-it$֠?^z@1I$2Z1E<ؔ!@@cn?n8@)u/p8r[s?_Ӽ*PN0l_zc; PZ5 pY@ؠD4.PO9Y.8H8gǑ\9L擥9ZkC} ?Dn{ Qӏ%iD0CIj)@RrKC{Pܽw X&58p$ѢD:ƅu7Rn34>IC+ @ $#Ip@K9 26A\j5T"|(CaQ,w`_r1r-<+X~mH<">"|1 ";cO !OgX=ؾɵ=yG4IO“U(-\2-Ca1@3 H(欰9P8mΑjna:5G$ Z{d#FABOYn e4+v%CS}=ZACȋ$ ͫ !fJRA2<7[0ʹ@iȡd _pNz f*& Uds^(Ĉz|> ,$|S>@_(a'ln&T <;z@Y -8t>\,&(< QN8\Q;p~:8 G@',u1\H (maDm)#d\/X8!(ȋ& I=%]YIpCGh=Neh?X,OdH9wBPŃG':̌uk`f{<ؕDo~ ?+!gn-a_I rGS!]'1i> stream xcd`ab`ddM,M)6 JM/I,If!C+Xed*vѥEݵrj tϔ6sIRyƩ'Zi!~r\"<<ܫ'0aBd==3{gMpendstream endobj 659 0 obj << /Filter /FlateDecode /Length 1758 >> stream xWYD~o!hä@,ҲZvd@&VT=Y@"~*L=:N|Mn/2NO_ƿ8vD)l$lS*Nr9]';s7DrME9sFM1wiìE*])*2-p"0! P1[ZsTc*]2QR5D*2zUQg9Wb.ba] XLBKlr(WdPu1iKlUbBZXQ٥6qAD68@2-E+ r697k 0 PSh8oe![ܔoթ;N-(qae̔Bq \NR'x{ ߸V$Ũ0d5z \v@9<_8_v&oqf-w^Tto NU{ %hͪPws~|] |@Wr AAQMn_sGC7sOۀ *lfsÃ?S,矬GR?NN/-vy@uٔe5h笜%h%oVe2Yf⪫Ik.,z-a0F"umcH̒]k5[KC^q~z֫7Uӆ_;P^.fPs7:if}խ応R w kG//]9M};&9q]md*'=+n̉\X99G LVZ *ʻXWFRغ>ˈxN@pIǛYgxeendstream endobj 660 0 obj << /Filter /FlateDecode /Length 1849 >> stream xXmo6O?0Z_%밭нdE c bɶ6Yre%i(R%MhI~QHooS&"Z/5^>:t.:bL2q\ "Toػ kPrHTIIݯB K18dDa$yL#MҢv1rX!hǴK]HE<VB0FicnɄ&*!M>޵Þ9c41pA}1䄷PJ2c]0֙u;+qJ+.D)sq4usBC)=l^նj\ysx?_H~|+ h&\$ye}7XU{M XS ) u a1"W> Dw,z)TlFӹ!<g<eb~19sݟ!t|!sdNi/aכ{󚌹(TBiϑeze?#7mX#'y +pTs}0 w*%TWgR4?VF22)$U*0wQ%BbHz4`)d'xP@$] c;b΃䪣QZ;hRt Z@1L-3^0;7y;|i Je;XX^vR,&mQ)'ۦ>;H2I0I :uOyuV m8=CD1#F5\ႋӫIqN("$6! RM~yVlĄi86wEd!g{$Ė r_%M\& J'4ZRxzNPi?GY]e5B?H|`dD# c,T=U9wxwǟ  rF*IA9j `{P)Rڡ0#F m:,ma;Z< Cl(8sb KQQkbvgȫ>4[jlJoge FmΰR{4&DDR*>'ǹj|lZ/n+JGoL;= v̿u8 CDfڟ %Ø)-'C‡_V2+9l̊ӦɱJ?3s7y7uۢʳAًfpf ]s7|:Nχ?--0endstream endobj 661 0 obj << /Filter /FlateDecode /Length 2571 >> stream xmoyjȩX E5 ܬ\E0XHT {hgA uw8Xg]o_bƛf7w#U9~P I GQxx8DD)MƋrtEɌR)A&q'T8EɌ(@Q>ƉDk $ISTL"i",c~vF?Qѓ;D*lDtSK ?wXfAgE%q܃d?vzZz ^U)b_TKbbx8f&t<% 5t%j[ΊDQ4JXJƋ\u(Q[ghjgY['3'пf9oaί]yȼ˫ji@™KJO8MJBaꅙq "k%G<[rPh'#TIOԔ8%9NqDp*bx0R}yx%eHEi̤1K0Ȉ'pVS"&u9˶?%\ůD{(31K3 ޤ%9[^LJ*e(!# >\ʧdVq5FBg"48NT )о&J bX%L@.++OSvL wFwo!sg,j ypN~GN M":dq"Zf6Ĕ,ѳsT[]oZa\ueJNx՛U\p&Qۨ+ IT D ݋2h-c̸@`P1TX\{-_"1RрѭeLLZjN:^Sһ5i0є6)RC@9z[)3˝tWf/+2_)60cSC,u234%.d>`:J}JDa9*mv=&v}EK?JQq}w(̛}ʗ DZ?ҤD25*oyxD~GGF%҄}jy`iaT~ \UuٕЬyXgPy=.OW7kg%޼Wm׌Gslg@ho5C+]=lʓIqn>B-5m'gu]_}=љŪ'/mi5{y`?6b C?)yX@XCjd\ףK̠t;6 N|Dqsqqׯ}jOJ0=ƹ(U>U"uUj)::z?:Y;g?~z6]saȫX:-t~s¹Ր#}R4'CkKt[eA:y=6kv蝺M x<OTLimoE<.R![N'$ag0:}rڢ=t2N|ez~rv~2ݔC4ssӼͣթ4;@\]f%eRbsńG!?M^TEMQzNO.,皷YΖ-}]L|e7YlGÖfppg]/gPv48J( D$R!$^fsXeO$D%tVMrroX DU_a8pPyfAykGfި> 3= 4h1|[ Q_4`bp\Os {h10&@9nfH0j<&S2hGÁgm 92" 7i8aRnRS1Rj1:ɫS(.ѢrP1ht(JE:xя; :Я۬?gejU(V*3JblTRu^Vi]0~d\ IÁ^Ԟ\߱s c@GFsfG`FwV+Xg; VP3T+6e 5IE<#Bbv0-@飒=Is 5 lM -$Lh$1u3>U>20x$b M*AX)hݳ4biI`,ETudH,;`M;oʹL ZqicB{ǩ*:{w0Mq*# dz*%,Ƌ.H̄Cj^ endstream endobj 662 0 obj << /Filter /FlateDecode /Length 2478 >> stream xYrG_Ѯ}GS3 ؖb,}h bQA?ZU',KVV./3_5>Mh&ǿ1.'>1z,֓WsQU:6ߜl$uL儚g_j:Wq~J+aԑtQjTYreΑfyeW6(&{KDEY{kjQmTf#Zx: -Oփg? n 0UagD# i ̈́f2Zg6${>E3H7b2cR('Q9?A ~?zBS报I3 uQUZ-zwv֮n6d9^}\. /X=,,.G7yW?zof;[˺[_~:,7固k/lV`GwBa~n<\o_Տ}׬%-Uo30wCZog? N \* O},s2@OPvSD-]$c.sɡccxD0.Z *wŦ .3 Ὄ^4 1,[@/ BfצC ? 1*/vlB 05Y5I&IIﲄF4 V:!TBo0Fz RIm}}ڔ0-X9 i6d3D&k9HZ(;sDP aNx-8fQwW/7,rLLmr}<|]0a7h3< HAl<9=^&.M]@Pɥ"3\JhYD[HۮB& e7;#+`+ &)6%<RRÚ9>IA=sc4} >~ (pDܘop0݁C2. O%S=v#)9Ru l:f)Qu'+0DZ3e%7a[bEd_uEϤF'8긭&O_JEC4DQ-PbK$,KYiT4r?Kp`1yv1pt'bx.(/IƮ΅iWpnHx-QGXm0RTIL} SQPYJj^+!dn#خI{OnhxœYe}0E/ néspDUݥ#]`Y~s(;RD!ךɹLdaZ{;̠ 0+/G+ r8C6XTiaFbPXGɯSϾ\/Fvp@9` {$ML'IFJYmR|?kgtt`^n+52"+x1NdfNr_p;:zLS8)MЕFj iZEA+ S,w~XA'JƼM\QfF0r|gϔp҇Y5MîHi"D1p'8cN**r>4_:+SKu we_!lÏ(քWF1~ eyGГK4aAXcCW~OEϺv޶GWqCsh~*_ 2;endstream endobj 663 0 obj << /Filter /FlateDecode /Length 3910 >> stream x[IyrYfŮq%UlgrH$lIɯ{hg8AC[/J؄7˳/b\Ngtr}'7ɷ8D08,&M2 59_UNhe88{Y-wSoYu)`RB0j^O?ůhA`7iaJ __NgHSɥΤTR]]tިo(eն\v?>W/aa`$9LTU:a((\=3'MfBu6aJ骷lRYcY4 ՠ"xJHU5 qz֝upΨrQD3:•:iMTu 6J-zY딱RGƁ2I20Yc۾t ^o1^w3XV@{Wqj4d^\kn.L @#U84jobbkdQ0ckd Øs e*wTI6X:$]wpY&(|= D?h0%Prf̥<,J8 #fM0q [M} <74 q:;9pRmpFsLMI=∷I 8oB3_MqЌkN (|&@&M:6R f}`f|@ Ze7B73 p۝uA S1M1AZ_w%%mLgHH!:bĴ(oMىǽ^EJRdVF*Eo$1QV ٽz gb."eTBtbaw_OabtW=v ͋`A[Ff.,+61:< {}oZUwKb36Pt?v=H;t| !?EʙIPm酨4ܕc3`|{nȊ#~:#_ 9c,Ejm2SY <$<ǻƝB* X'Z'<5QH$ ;u~60 a&ʿr0'Mqob3LX]!f3`{12$sM 8T:w,O32{ LD&.1dzMqQJZ_X,B)75՛ A/$|MID]oU^#~,]`UB^":3#&~##%4@z*HRiQ gY}ڭg. FèO^Իm]0[u#+>eow UtP/" Y9b07VDxqi[^,"9|oݼ!$@<\o!IMYIrV'ϋ1Qpi\@a4R1b jNy҆!iw'+ 6_&L4DH#Trj(cq~X$Jm:`OtNi4 mi"<5M;nMf _Nui7JAӨ)<]M1\H(!Uuvrıy|;ˆT8H5"z`PF5$C?i3Qe"U2A-y*0Ѵ4zq! 聠S2O*BPcW `|ϵ1hj\z!0*^Dȍ^oUn"kImahb_Ҫz{G!-Lj$}|-H@v$qiWi벷M'`/wO:d= * y}1gʪ |n6]syǂ^:- @zZxy8,O~BH_fw11;OIP:vZdqb M=q^q^UN7^2uB\LgJ[DPؘ20Snl`\)LlLKll!>\B![ onN> [_Ek,^]gHM"b^nRic>gEnۻMP7 IdnΖ:,hVKu]`Rh*!hRy k啛O`sY~"+2 !zzKЀ X?IG< P 2!u.{d@b@P|HH!w~#JvN?:&Bys<\R.}8\fL QY (ԽsDz~Ǒ։{P'Gar+t0ej5U)Z%=xb 4Ij)pVU&jYi"?YvMc4%AP e2rOy3ljR20D+>1`p}|?_.#ȫn_,#zw9PLqW`ꀤt?,80]]?'RD׀;g@w9NJX3jrO=`(sT^ 0AO%F08jÛOG?V-v: uyu$/nXtl52ɕ{kXX=~a2Y>X6ýD6걧KOĢtwJV/Ch(شȿ.m̌)ٲ{Ep'/ukG ~Q]hp+SWոݏ$ ohӍX=orחUrv81C>ve7̓v^A8R -N) IHPL32Yb gB_CPͺC=R/DP('SDF<ӌkG,w NGK8Bd+E{<7a1Z'3hNˎ$;B;:r%;1PrT|* # ?E ` "Uƈ3!T|JbG5S\|̥=k#X|'r!uY68 qi8iyo}x-wZȺJDjAQA؍"^G->tyʦ݃?pZo_@,5_c)Aj6D=WBPW-qݜ log%^w/_hmKW/nur= Ig%Z'GjQ~q+F:<Ь`yqn XI4pAȓ%'NfB܉wky(0Hh;&N> stream xKo9gS[mvyϤ m $"uZ:]ypg+^{FТN(nϪuŲH ,-_ɴYBKM,Y5yB&S?L*RIie˛ɔ@[^ISa"Ro֟,IDR_UMᤢ ҾqbpIzQ/Q:y9,lF˧*,]fV.B'/M: > "\[",|vVJ:9y6S.Q _?+ 5; BkAH -bZU=_G?)Rn7Y{;KoӇ .bEX鼙{t#K\/.XZK3Rr3_l,I\0!U^7@=qYn JXM_Ŕb.fHX8pdd`=IJl'.`S RTA:0A 6<۔P**V|ٻ ҳŠ})Qvi~댅(<%v;uCdy5k.8<0|ۏ)DT ?~P&LA !l)d*,,/7WQ1NTa0jà!J^si2J9[p-֬_RRTʍ1`]9&R_Bd] |\ݢ$/J Y\uFc8bR]~p?ra(bX+{O2JiaC(/{~TvXѹ _oO,w?s?_֙-v3`VE"]t짽nk[3ZF[)Q] `` $©~8 R@?G^psvB?Vsջ&\[!2;|ӑ2ܻhx `eT̀`by ebqpA\ -GgW@֋Lc,/&f>Et0S܂n &/VA5wL>k xHD+Ca(x*@ åx$o}`'E8"-я1)^7kC vDΆ:B*I )0y/b@y/ ;.=;Qax@b>|#zyc$p`.a+J˺יuɌW!١ީ{ڠ!R}O|3VֻTez_i}k4Әpv6j8-GX=(}pϮ1cJ 4'Ky4D͉B^qI`^١h*`K^ ^'rd8l{2Qm֧C ȳx7 p 4)+G:J WYwt.`T;$$~N,Zz@pލ<SӨHa3eWY_7!V5ڲ\obhV;kn"Eڹfq8+pBXn:s:f.Ic,@ݴSNz RHmVi9|H ""ĕܓ#خ:ffE(^ǪzJ^go|CFT$:E~L,u3ܷi$O؋|Mv3O#!&=ޝg<;̒Ϧcg}6_tSZe8f1US/H'‡TVJ" V/|Ld;Hb3C8 h6u$._ƴk{62.YFl>׋Yx2 5bk9Y?O(\FR t,0 uԠ*`+\rб@J5vzO ,Pu vezoS#LE;Ȏs44;rwBgv0#\".#>4H@l D?#Nɭ;e7.MN鋕k잤Gnu`]-'\bv b=2@&S!K_} %1>r?@x}rIj9в!`cǻE%k-di5-Gn{Ɖ`QXAgW"FȗJV&!cbKźٕ>2˅Cs %`Gꝇ;l8GcQоъ׽"+xh-L9ljJ+gA~<'w!D׾^%:Iϙ;T^ \ ^+Ypq*SxA\޲p%Xaq~TQ ~ z5GBӀ*9zY׾6wl969Bt$VszQl}Pr7nΩMAUV%ctQGF\FKtb3XǸM4eXػTxlr[q{ۏ8fd)wB;z#3]fhdWA2ނK"(cYr; 7lYۡ?&0@ I;(O34}q_|yMX\."T c Np\bwq9f}.Ȏ@Ճ. x`F)[_fW[ک6ʬ&+~Ю`~ ׮u /Ip, XU H ]Wa&X֏yMfe'3&T&ε0?Oj{TQޅ3ſ;Sw ;=`=#hhTU7m:֦\Ad-p3@aL5Vi~?&Gendstream endobj 665 0 obj << /Filter /FlateDecode /Length 2678 >> stream x]oƱ_HHqFb^6y(rh{QЇsҢd1'E[]r\ ?K~ _|GկW?ܿaʀp j:[m,2t,\V+"|Y B1XWɡh?`&M8g*HSMv4I%hIQޣxkC\KRn3_' &rzB&iH`T5Bɜ*'&}f4, | &<6H`kiQrYߐC]%K}Pu 35Xo5_x{~לջVg÷]9<gXi-7'&__AiSJ{!D":qH˾. 5 rrf>MЍd_@q =NcYQWulYjŎ^"DW& ds5[8JG7'637q^hEC'FLria‡~L;b5|N`}yƞvBNagbߊs`PamSv"8YsfMj7bK'hRy?j~of2zb1TOBuT~DksH2-w*kQ@3u!WOԦ.d}6N ^eahؗ;lhP\fѵr<Sk76r0{(/(y`P|)S%һ7@hYnh ;}z;ULqk8={n1 7uZ0pP[r >I%94tl?FmFAMͭsUu1AMTM|8,|?q8ei+N{Br<Kk\G3-IT.9f[b g)HhHO(ni]qAli4Hn K߭ǡ nQk>d6 5sը݆y/c܋0 tShnL'-b`}{+䁲:L7zn.縡c[}A(Q*m 8 Ǜ@ O}3*PUA$6 վr=^3a\gs}wXbB<5BaI׭[_ے/1e>IpöՈآHeuY@F8yW?.b#;K_ ǡn6) AJ=+jAh0?Pւ`l1=4=,AF8֣>=1yq96?y۞߃j 禔5Ÿ0#MkyHGHSnmUnfy C .3¤jşX監b-[7Ǫ,ғIƔJ=7,Gfͦ.0eu\n3U>;lr{Ͷ7 yp%L]턿Wv~= vų =z˭Ky0M4M#$t4+>:_W<{I{ER) `:DdX,a2FZ̮'KʲasEWpoH WHտ(endstream endobj 666 0 obj << /Filter /FlateDecode /Length 2006 >> stream xXKo4sCBm v`6lb7@CݖTIT z_U~K ߜ 7'|qsr""]_pqT:bu}BhY)XN.؟ʯb,%7 ]UuنZ:KwWmتL]r^?7 Lzu9R6@db{ El6Xfpwe)~Z}W"5;ڜ0c?a&ݴZ,ʔuU vbiFُr|2Yhhk1.u񻸿FAJȺHC=}{y83qZBF;)/Ov^*k<^J35fP՗è_0*777Ót]*&%h3o+oU j߹6ؠ7WN\W,=~w%=OѴwע ={la״Mmkn0^Cֆ$x9 *dز\T`##+(;*; 9 R:-aK#[žG*,?lb?<>!Z F-{)S,U` FļQ@V$ݮ;(^l`Ov` ClѻH!Rd}G:A8k "ۀJ`UHy.[PU9pLj\WKhFpckهƷMm:=K*vwm7nCd(eֱ(mvw^ʰ~oJVwu:.3juR)ax?YOQ׈X\5!>Cf#u=( &.Z~9Ǧ/E+wy6=#vNkO8TOѣ;@k1$0 Ψ' ǃ$SBդ/Xڌ!xq%ߧx2o6yIlA13Q,- 9 YH ImfMbQIz3GJ1./Ļo 7XU'zal9y܀h>)ڋL{THu7 >Q:E3*k9)ʻn~h[C {~ɷI?}x4?16V_Q]嚉h0gp9_AmF -,xvdϟџPCCQǕҟz1|m;#Gy*!{FfT5WĹP80gmh5{Ѻ0r]|™- EԱN Hƈwi Ʈc&_^31vͨbl;l o`'_'Ua#P4)kM-ކ:^^~AE䰍#09B?z} oReq{+bϙtJx02x]茰X,&6QIN[ FQx^*PL4EKhj nk"Aco]`Ѵ4oZqXZ~3-fpCIiK[ߌo_G0Ί9rW0a͂!ssw8l;t~hoܺ\N48}l7G}qa\ A 9 vC1OǡMS`<]3|hnη?mendstream endobj 667 0 obj << /Filter /FlateDecode /Length 2000 >> stream xXYoF~_1lF},yص3@^P]r(:sI^`LY3#dfRz|nvPޡz*>a+ܭVV; A!+/P<8 .Ia^WRDb&1_;.j <:7Q<.8U,ys8Zib.B\ʎ>xvhlO8nʡ~K C4 ab,5:u.s&C4oۺ=TSlklNaC5ܹP'EPch\b#бl*T"-}=1*|6}DIpPT)T]=xcC>/ӵn:w_"-A~ q0}LDדm !2ۇ0gc,D=HwmGOU&*AqpzT> Ryd_@',dUF҃ (pA)^kCFˉh7'q&'%'(#Pq l (Iu&HRո58ь즨nOk3o ʪ~;>sCvs((**Q(Qx?(-O0) Wed˔O(j7@˪,`0j þܽ-?W-(dlVѫ\{v hwX=."ߥ iq{5F ?_&XXkE$I!ӼFq5f ׄ A^c3bPX6]VCm8 zmmSm{H0z>Yp&QGLJZ*&g]Qg/+hlpY (Cݨ|JS#[Ua6bǤe%~(h;iGG&Ц &.tМT#aR&H]ۙCRR5!& 0b7+W01wm7Ǐ#M:G7 ˂_]Oc@Vi{Y`ǠVϗ<Ȫ8$>LR&[}ܠB7+׃?ɋ+eOml;adb YSks .qa&v4P+cʃ СB幟 unk.cfCC,Y'q{^8c"s4ёJo(>$m|a{@j8;X?Evb(?q;Wy0 NyzSVS7#nt>Lf qfYB ylD w~0fd#FV2唞c "> stream xZKstH)JJbJShZyw]bv@3kqJ 2(U/7G_b\.wG#?f1Q~rlv|q&͌2 5;/|!qz^J *G &D,9cΑ2RǑU"V&هK?EЊӯhI Au)f/ȣ*q8.79FמӖ}M4'7cPQPgGD[ę-dPjN p 8Q\G"W)WM^Rw,LYQ^=#LP%tޭKό*ȩg,V-a;sԸAl?UUN(ο/K،V|l."ɳ8 ^]>yzuk^0Wz~:ZZ$6}2LFVhܪtz,`w7Y]k˟sɒfON`IAƿHe H8V5Ȧ޷):9.]m| FqhQD0h9Ԧnn~]^0{@xZS F hv'5y[Pp8 wwd&I5X{ѴHJR\knH9]o]y(K0~pvw/.Ų7?/S(@mݭf.-8‰u@KdC(:wS5ޟNNTZ9a?%bFZL "~g 2T*ŏQQ.0h0Qk'6\Kƅ LBfLyf$qB[U4ߎo(LcYE%D)MZZYғ`lAT ] Jϣ6Q 0ܑơC3 ?Cpc}dbI!(8 ܀ 9ѕQ3D,l06[r+%\ ;ha&`td\C0\zQrX D啦 PpX ZM(dPUؿAњFcy1!hnb\q ō0YVA>[H? h5Qp\̼wځ9>A(k株U&HQB"@&3SB—q9I{F 3"jeWnYjr\(mD%i_R#nU] 1&TqKɳ{{#:Z\i+wܲ#uI`<R)c؈@2hӀdf!K16`d19Pr |լ^ O&"H,rrf<!n"z:e$Cτ%%KhDZuPl*(֝'(J!UB̙sQ5!Aڊ?z(qbFNC˸*6r_ .W) ll (\'GOA+գM5DS=c!]`ny(exՈ#V̟2dzH!֨㇊ N e}A H(ߴJMzݖ+[jZrl̼|ޓ m'i HQoFl28IJ7|oz/tn*W}l@%lG/ kiбkqyJ?m)sRsv5Sx7EdSbVQOQ659ƹl_<%/)!R'DVw$ll/{y|q/kWWQo怜[ՋYՂWOQ/`V~lŃld! lAdN |:𶓧m}FCkm yz?.Ovoޭʸ螄}Jswgn_p3Ey)Tb@rT>%+,򢾹]֑1-֤}ۘendstream endobj 669 0 obj << /Filter /FlateDecode /Length 3232 >> stream xZݏ/з{j!@UbnS n҇W8>R'K♢l_ љ]r|>@Bvg{~3 ~\?O9.~J ^n\L2%ԓ>ugeR1_]mټ+ VwOoI xdKMLyµ/XYs#s$<*p=Δ 熭~It.rf&wv)R  TŇ:ibg"S&*<:\3 b2:ɍ әN#RςsT#Rw]ڷMuy._>u;uCxʾ$|yطv| ) -d8q6jx:8y}Twίl _ ӯzDž_\\(r^uC"e> FN/ϞZT/OAFpG]4Q^FrNT pP6UCd잲)[ ɠZ;`,ۤ`aeu08.Sp{8*\YysAA*oTG% dQXA$ڱYyҊWVnV0|^kBM0l: 2J*G)cs|+Tc#QzhlJ(Rc,橊VZ99dy{fBH+Byɀ \auG.rzsM:ThOv0UX(30dSGus؃ nyػB/R;ZGF8$Xy$;[,sJcvPl~S5%!?,j.%*/U%7 S= -`/ppGx46e0ڑ3@[FDF6Bpfq4ِxH jC ӔYb h^آ1`rVArJyW ]e D>rh%" vāY55CC#gSxgN_t4Ț`D8'i64b)FBA#Bib{DXBgJJZM9H |YPLB8B~#(E+єc U@5Oq?򖂚68F |wliCܷj38Y-'$8gHLؘ/$ ZܨvfҰ]C!lP޽Z6xIP[1D{ޮcH0|YIMfjUĆZ $r9tpR#nj'i iYf1 ֐4@rp֡{[5Umr +)[r;o\UQkgU ۺ~-O:m[C#s ^z2 s:``~VqlF.9kHBd Bv( !t"zsp]kq`3H׉ e3o;ok?>ŠE1*$ R)s%DBQz bEW2X+? 6Yƫ% cw T/}2 ۮLR"dg M/ Ɛ`YjteuKtc~<4G~u)2xӑVHdBg\n˶t蓟-ă={uSQgzDVڋ3-}C񧽀X#}s8uYЭø?a{.N, ZA}j=Ptɢ? j~8O-endstream endobj 670 0 obj << /Filter /FlateDecode /Length 2685 >> stream xY_s۸gt4~KH86ss:V`PBQ>롟_Ce2 ?>҄R6|G=/ jgj3{}KOJ>\tF9KDf5N,cR4OTd9M ~WY* 6kx"*]sh&!5Ä'i*?6x4[$u@YFt7e<+nj ɬڐP ^z8ٛ~cNipPYP "?Q.ZpNgL$fp~q|[Psoꮽ `P3j1^j$/E 23 Pڼ_|e~ ?3})Mw땈X}zҭ^ue$O‘A= -6o."B%C 啓6-Hf:^,yx$=/@< =p֞5#[GV_~;Ċu9övgxw>ƬL{_X/h99ls}ؑywOwb4S̉^™jd QjSqχ '?6yt\hCD2fq$bw 2RQap#8 J7Of,,'dIiE̾>z^ej&܄[Y9@m;7$i7"Qg|稌!}?d4)G҉10DWE`SGA㏙ʋ1Q,dvv$5:O4 gW)S %w1etY* Lӷ:vul̵O0gA6@GCo5*YluoA#)&d, `Trti]:xg}ӏh_[ OF_c2TImax Bupf8֙q..07a&AY;]0`~NVMǰ롉"mi*ǝ*dvfAכ&D:GI}PvCj@iSb,tm1RB0y"WL-$Q9IJFJ1ohf PP|âEovuy}E<ŹLG۞BW=3G#>=y_%emT!l=N2hl:.c1::<QlO!^q5yIM@jȠ &zyKyҶ&HO/ .N h,nB :JeĘ۩nAT6='*G 3\:gᇦd(#b ('ʦvӇ{w9@C4s>`.GI06?moWS##HN0tF"됞f'(ыI;/o }fWŀq[!&ӄe+}R&ocl2ȚX8 F߅~)I<f R-Mq{OWG3jۋ/xQ3Q`Ei>cHEf"-ͦk/>O g7P@&V̸LTDj@i-x;yx2w3N;]Ց^m fJ] n h[rgEb x_(AJQ> stream xY[oF~o@Q}8E@[~h}-2[JT(*3$p!$!96g|Os96?f7{B|{viF߭ #)Xa<'s%Tv3$b*g.)pݯ \Ҩ>/4<ƈF a~|ѱ2&9ļN$_mX’Ĝ}Xr.2%*#T"'QWo=1݆hʒ0͠)ЅMdX{gᔈf`G,9_KA9arW]}>ݘ֨UWRmzqߴun!pcX [Wޜp7eǷKb,o ʟUWe},}T~qV,&R]ܑAF$cq؄%Cy!'h 11^fތJ hݨ T1LV@.QP)V(Z7 =Mbłe'o `Q18rp{B]9DS/3,;+ xRrT\ X,.|BB¹wp*8>@G]1CC@@M=9Iڄevw42bs iDa Rէwxqo&5B@7&{yBcD zF]#e8s`$p9Ñs`L@Na7$BPC@7%a72b0 S9mn}bBE<&V Ry pK3(1!{܅, QUb #&DZ_6a"ezgB:,Ʉ,po<¥Lzzx]M"ۗ9JtAQXI]oo]CPbyo,jts&>LtPUrKNS&òB"2fu 7+1P4z%B*Kk 6zخz*WFdDU A淉ˁД ˢ-vb -J; UO6>ǶY 5*U4{nv. /:LQ(2c /K"%_3~_rZmA=赢DARF!=͸)7]eCg7oVP=K܉KO~;~:wm߷Q.ݗ.~[aLu45;JtGata;g{-E=bE;΃Tc>$| UhY-cRb8c;] })|'//s`PŸDPD2(W_)VaYK豿C,sNG%Ʌ\腒6^ծy>H?> stream x\[o$q~o@0f"Nb$$9V,[ <]rERR}UuzG$!Go!˩>:u:m?ws!mޟۓoN}8= tqzFx.\Ƥz'ۏmjI:W?/)qj_K8w7Sn6oUr,Kd'ۻ] ߯ _<־Md}~2|/'ɰT6MmțZݺ -n?P.rv65muܝ|nss ѿes}2dg>ywB6{ˉۖ7_6oJfZk@"Ne@2]=66@|@Htq!ʤ2 HzY8 l )#lҰ@67Bs$J$ AVNXYAVNU]@jTaRB B6v;zY<:ww 빜  6HM!؝o%Y8_zBLբ2V'-ko]YE0[ߖh$M[4}ImIEHuɘbd^tպdps/ԗYq%L\9vB|66mKn ]m" (묡F:+Eb'TQLQbŒ=^T̟)-&mIPDA_Mx#bVAP &̈́T1 Z! rRBd%[ߊ$a 8) K,P)*=H@V\1P4Ȑػg\N y@)|q ;{"xGA/ S[(x`hTBTcS"Gnq)Bmr^Л/e! ]Y `HyYնN5(DJy:)*4]XOJZ^nBsP4:UJN,“{af &X.4eCAovlN@H4^eBJegD7-Wrޗ:CRi%Ӗ4{M̮-9. fMKiKQ^Ha!pqb\1d;@.R/5UJ % ͜ᱢn'8x/w<9,8D 57 }fԍ;$J<,P#r.\(.!ua4 8hY'UىgX`f[`J^Xժΐ$dm)?^$q~b裦"e"H#f ZmDK yB+#U͗Y=j90 ݢA(0mrr bd);$/FKBii&",Ih24%N/I$;q&2}4 RPRR8¤ k7$n|J"Rr%7r!aA6A"& qڦ +f6Kyȸ+=:a qITltKcsx鲓'e"88AqEO I<-4MqznTڪ? : :+ A|ό$,HK+HK)+,'qB *9`NLR" RfA >ڤBNeSX[:,@yAiGe@b : H/U-reWˈnឃůvM4 @@Ê_nL;Xr24 vd^"Jj8̬ S#h B{BTb=*ΏH+i^( ,[g^rL9K3ܗ`T&9$O_V4H-CZ2&P̓ܒ[(~!ݗ"Yl!pʲYPTїӺ0K{֬4YHT#kTQ*q;XĪzLZ=tG1 a9ѼZd}Y,XDv# %pЊBf'x E&":*ϱS2DLFɮcm͒-!}>,H1dj/y8?; ax@Vyfw`?Á(BVCSך0W& +4kU) ^r8/ۍ/2JJE E3(W+UELt- Wĵiy$(qs'Yt80G5%0B^ԓO߰@T)J1 6 V)ގIQ 9 9CZ҅63bV9%CV">@VZ:țyhce$R4yFTKW"{7Bql, WY**Hf t._4q$EPəWRQt#jIriB r.Uu'/f74jJP.)fǢ`+kSV4H<$w}Q KLa!VUt*[ U{E)z9A^Kq;Y3ZߛBR)kZ0f٪ }xh9]mY3HΔSr' ^ f増5^g5if#q ][1`(oB9 -zÂHE5wLr7X!ҵ3gdRU^ yćR_J0s$Ԟ;GY$wYIK-޵T) Ci**5vCF{DFe[%)l3"*n[@Qz :c1 }Bgc,O,Z^%h)gbVJڒ J%aUCGe4b$A2Z$`*E8I: \(An6腬-TهDDltGB"Dͥ&' 21n3`#-[RVҳp[89x޳ȘVt@uŹVi4ho`GX#![rZjJ2)QHA;E:΍}-ju?}0e-mfRmcl&Y4)Z7|!^uQ})~]y*fv kCle*z5cIY6oGӨs⇱e;X20ێPI/OeWR4?_}4w2v8&qi*&QZ\Җ5YeD-RԚB:@2cӽhP!ȏz-I^3\lRjvy@t/׭Q3ﯬADop?2d7Lo6DD@xهZ+eOd!fP1 JzVeS[JU3yQ{Rؓ(<&>nxRzܘ髭Hz.=7:yhq廅aCN' N/j/ t R/dQv{Ϯ&FmwWW$x'edF`10CC#ŝK^yΆKgTjؠ*)ۼ==,C0}8C._<\8Sv`~O_P\.ίW/N\=c?{0 giZdpu~LZ?H%MJ*h.>XB1Ppp%p?c׏i<8tXq !=6_g}?Pqⷫ ;M_7;G}o2 d _!V7 RBgoWޯ`+ |,a evwL#ZS\ls3drJEw*HY|CNٿKX7Z]fh&>CfP]AUr|2|ԋp=|&1)zDɯ8`@Ye5k(`p*OχyeTK$O?;X"u2߸ bŔ~rwx"`J~Сݱ`WK6:Ml oֳWt[qvE=Q'TۄP|A鐸k=(Xo_7ZVtpKJ྽=&+hOXbIPAY+(#=䒮KY! r{Ю>ݎ CzJlS*iE.a wIWc9(Û@v/U63x'`׾Kn} VO+5#xb%%dw7>]w{ "#G&tf14N`Es@~TXfHk)iË,n G{q @iiELwW\~9szFGr9;_w#"QM'BYFuDcQ>ZG.l~v>N|#2=l!07mߓ<|i ,-\|OM>Q>mݮ0D3nm[ )y͵MЗpnS{"ɖ8K:6ƲB~:x Eovh$nlwG(Lh3_f_-cklDyIIAR|(Otx|]=|}d~hξ\ +]jmFmgЌ_]?\4K|Gѹ.dzFR`מϷ4p󫛋ݵ&B1Li?OD*|~ʛ\T4H́zO>>Pߟ/~nƽ{endstream endobj 673 0 obj << /Filter /FlateDecode /Length 2293 >> stream xrƵ}Wp82%н_ Mdsz?TiGs5z1-8%tPB-5گӷc.2L%&_ :Xh  ()s+RbK5S d~b ʁv]f8ן[7iZAM[0%aJop_L>Faz8^vD04wU,`jaz -"O3>IO&#uk4*9h9!4$@ S &򎃈Q}c8g L.Zsp,#QZgHsqR,>,9$ڐ@TE.ZO8ZK~TP 8_rdTo,Q[BMDmnBɡBnZ*Atу}$(6TƝLxR e#YEqj@ݗsn$jV4On݋vonsξw&yU`2mݘ ŔJIKm@pRi&u- zp1zǟFU:~>>Un>Uߌ2wcnI|O1uk52W2:Wet/GU}ۿViL,CI+#ש._ǤdJ@}wZcQBS2r/`'5Vmom۝KZґ{re9-m ŲxHchjYZ(S8z؆"ѦGK"V~WwСu/p=Cw3)a(_b7>溭 &ܙS@qJ, hnlPTKަ(zy33b;/1GT`Y: VpmO0;>Bܟl2E&]j<$~& hS2׷ /6=rޜ2}i<4ϒgyI$#VIMB&wEo2lGxޭK=Z.tOYPP0qٙ>?dfSlj셳4z;E晗Y>IؐG϶H\p3B Ż`݉= ̎9pj>,c7%NXD!jfјWכucjPp㉏S콒S\sPlnM2xkkj J)1$\o6_'s*}&GB n\Šh*Dl7(X 6 MOf~ݮ{.#D\~'D6BŐ/mvy#X1û q90Ļ X}8ѠdzDzl(]n7]( Z3l l- ,FbBKQƅTISza-9g<%^ C?)*t0b~p7̎맣:[{ߊ`@& y.۵nK 6w/F}wUbF]3zv! !Qazh,&7vu.E0$5^ƻF59SahxL*3(^!JYȗ/KAd`.[@-q /rCJB0}3XA0h3a!{I&4T,FU%쩡p5ٙsÍRDN1Ak EgϽZ%8ݨqyU4X >H4#g"χ ))y&Eif⛒16t;P""AUIЮ~eٟ:>6@`> stream xXێ6}7 ֊xYMl hmFYv?^dv( rh G?R&ݨ/G_GE#LT\f\27\g>gYn #Y0dM jE /&s+#43Գ+dǷY#"/ E~6Ҫ𼠉m>isR5:ўeddmW%[X|<Fir~Krv¿Nŀs&>* Ĩ&B\+rǟd] Oiar~w.s) OM_w\q嫗a_.$9,'ozvCfxG^K{zʛ(4jT}W(ɏxU]_]@(YՇ[{]Ħ؋ 0ZD\_BeQآwaMBJRH op+t~&JݴRBj3,C,mJ}&=dKxP"S.+Sb@!CVRB!ysJB`bh[9R- U2ɽ]?jhdЕ`/Vad45(!2E 6C) k@·.BA(͵.!z]]Y$vΘRdg$C{,2}UXA&> Nh<8lvXH؆S TT!8[M9{:)cbvj>pquyN҃PJP5^JRQ8:Y Imbj=~u|eΞ,@ޫ =]vMqaaqS +g7@|xbj~u^X.՞XTr(\ /2iڇq8ꂼ.06TȜ xw_Kfru^&5ta䘡wJ `9j,28"sx)6>9oP )v{Md7&;l渏Ϫӊӓ Eck6ovY'كh S:48& <@ Ly\ rRYTqIO rbƊ vm{B: N&:"g=˦n~OFлeFHqG&("`u)QK6mylz@.s%Ueĩi9z"8q5$dɑsjC19{N3e8 kw0*G܃Jlgy4<$6[ 0$) !#վ_] @c'ʟd] \ 8nj%NZ'86 {тL]bPaz8naU_€rM-ϨFK!ل"% hx;b~;~['vp˝k~9| I9EIpr7~lf&I< } ٞATM hAYr\HKouX<ֵ %3jghvTj. =3`i$MSsZb;Msh&[۬`!G,[8]p_i,a/ɴϠCPc0dDžOK=>яVendstream endobj 675 0 obj << /Filter /FlateDecode /Length 1778 >> stream xXmo6.oޟ )ŀuP$m:J/l' )RkahHs/I l{LH.\n}N,y9&6d|17D$6\& =_YkuY(;S4˫CtfТy2 <[ԠOC%}{71q1 a㽵Kb'E S!$&DhG8+VUE79px0` 0s vJsHե>O&:g21(IJ%1q>5Bbqb aY-U3]Ʌ,)VJ'ᓾV sztO}f\4`nXBK?KL%{Ɩ۫AѠ L["+23x7+K43."$\-S72tF"F"?C 6l HyA4RmZfU'2lp/x,h7eL~?e^\\fzLj~P[V,o'<Q/zH <ӽ:›#I%SLi9zQ>]-ʳtsif@+JDYtu 9+L]5 wS`bpHjwY̮K(|6(ѫ5maX~ڊVQG{ݣ{0@;g6ΆρҪX ony0;Řᯡv rRopqulyfF+jcC=!Zv#a[;@%]O;+?~᮪uq8"Z=`3s?PL+ԃ;F}V~c+pv)/O@O<ph f> stream xVێF }WAZK%h t"EBUjlɑM)%y9$>1 ~Pl l[l\%VE$ޘX`(bD+sCFqSt#jDEYaXhw:\ScP+i[/,-Phc,۴r6 1W$+1ҽg6FAA uarG; qvf2TǺq >9_,ጸ3ebn,I@?VI2W)TUOaD6tS„4VJJc(8 m$ǒ+$ x(Lw"jY {v)L*a]6iYAenF-ڷX4aL'3ª th 9z5D@D*xHywOha.P)&ģRs.NȠ*{d۴&i2H ݒZuO$+[ 9{+\8f|+˴@%Vt|yI&ʉ1Jå8gvr&̋*+Tm<;`q"䲆#- kGnͅt]uYmS~5<bC:1{bU Ixf 204VD<'TQPw+<0Z}13ɠy! pr*)P3Ϛ93"t|Q&(f$SB*/ԈF&ڧ[]-r[)Nn D'/WwwЀZwߏ "/|,#dS[=Ph=\OyM1^//q Tb<7fR'(Cڤq }7 BPW}8?]JNqghlaKxwckB+MЮ= i|VFjNj n.0}ڌAH5iik }3]lpC^Mouמy(/Um`BT?[>޾KfxΥ[N%oQ]*Gtɖ>\NMtڟUsyW?f[cT_,8os2b^,@sٸciSyWe c[B ҧXyHoY`_endstream endobj 677 0 obj << /Filter /FlateDecode /Length 1435 >> stream xWKs6W萙@L;6i:I;u5:"eHwrKb9Nۿ0{sC(?43<}ʢd@'|K2,W$tK7K&-Œ`n`KA!iBr|̊C^6EUNԹXPܥ\=ٌ(w% 4V}Ti6o6u!Շ[W|5a[SY\u 9U'3:/mSsWfYri-4l |Pn"K`ҙ9,HQż = )lRDTqgO* V4~K86zI,rnhp#„4􂕒ҘZ1eɵ,z96N|z&ϵ&}3$Wc"i\c;jݷ3$EHE)wa)H )C?N`` z':{WU p)Wd큄G:>GPNѻ%Wpk,`2L`eZ ztS%dLr*b&*#1K{[ɮ/7U-6a(g%@!̭z e fI^+cUG?^8]M/W餺0T<jEĥ1=,U:U'-pnL}3N$FNK'! $cZ%jb,_,l8'1}X:F@ԇiI;ÇS/vU'&=< Ki‰J ucoбγbۢݟ=)0c6=7bz0qxj@1fRvgcZ>g~e.BOOv1WrQ!'ZHnJo" @W`( Mzk_L|UUZPjL3wɻQ5`M}:2q sF4kEO'dD*,Wd(i٣'ѽ%N>e k]:\䢂fAdADVIk>8Gps1PTnd?5`:J 70&Ǯ:U%ƗᆳmΫ[x NIendstream endobj 678 0 obj << /Filter /FlateDecode /Length 1804 >> stream xn6_a Jm&6 M[uaZrՖ\Yf$E 0%88w}'t|ѧVc3[O$*|$b,ajt^EQDB$}G1#)Za"3 Y CDTܛkN IRSLhŕB!Q=h $ A2_yس@p^V'F/vTL@犄҃fw):kZ_Gɯ'f#X0gR!aQƹ|}aX[Էgnb1-`kCp%-f[ݧ~|e9v>(1D@pPD@'`d,QLJv|"Ȼ By|䘢y۬ 8Aۅ#Ce3Cdhe`Y/s^V܃]c ThQxy^GKS:"n3p]O9y]ڀa%[2JeJybF,cBuQqiՅM|aȁ(e+-[DLblOAXUl]rT|s0Pz0C?#A2`iϕO,6|Y0Tc6E8>̃DaL#4=QDR²U6T eΖuqk9UV,Du]*-&ňBB!p ]FKڂMbd,"$<; l0`~KDNZګ]uS~Yު( tKȦLΠ `SkFr«**:x3S!䫵S>w]%K"ę)Y ˒%Euɂ3W#ytghmRvsavy,lEO$ţ0e0T>8 {d?3~!2ae59 Phrf{(vsnmR{'jwCp ypXȀ1CXMePowX(MR=x_K=ܾ F]KE6Q}VzL%r~W^gnw`h+K_diu\-g=.f0ntiyYnUO𡜶*ESX"߬7F=ާog(ok]% ,yA+ SHl]DA cQzS5u'ڲ+{+3ڝ-|myUE[krflU}տt١h;=-u_91q9|'PcO2SG`;vj`4w.rFA>4!4UR3E$FWZџ{jendstream endobj 679 0 obj << /Filter /FlateDecode /Length 1665 >> stream xXmo6_aF&C< 7C3`؎ֶIn;JDrm(y18X觏y>]ja+Rèa%|< ң58-:ŗW"׀H=z (TX4;v_w?k=#hX5O.o#K!1ű@#+5Op"Tlݯ (n3P `Q *55N~%be> ҄<CʣD& 4qA!' R4ѻ:Rb,e,TR&$bq-??"=.2+X_r\Ԋ) ++"Z"f)ªU$EsFq"8WĨϢHb[ Bj.u\'$dLL0JThN.g]2HLcV<A0$glUBf oKVe.rz}XY*횺B׫+*ڸ{K_>6 HXވǍ !B&*%I.* Vڔsբml]]]7!t5)`d0r+}oUUyqXٽ=-gLg1b>t6p' B//f*NB_2Lm"@8'4{LԁfNGV 飗uYG/ -i(!X=&¾vn2z{ܩngL)J:דƯ80?N-e po*ѧi{\o}^"?}h3hxI2Cc: gMc4~_ٔendstream endobj 680 0 obj << /Filter /FlateDecode /Length 1509 >> stream xXn7}S h_ si6yhEQHF+ɖVI"_(ɽ$qb(`Β3s8 5&1 g^!Li驪tPςljY”+P*e cU aA2 /IFX) ^0#lJ02L]C%V cw&-ږd ]k !tLL !Ǐ_4& iKv/qcP޿;j~&{À!ۏ}j=_g_ q`/c V}ud6b\`+~r>GƮr(}Qy"_e|'vў7!=Cxz(/׍W!#I̓<-QpM'ӻ@w]kKӓ*lEXl?*M({V`[v:+:s~B#!Klr2PV/kA;EF65(wuE m~ߺWǖN [ Hkm!' 0DvDEC>iaUQ;bvv/;VicyVA3I'QnPӕ-},kaG*dmxE%)-w AkIVzئ>*&S)6di27\*(՞PE} -.ݕ\$D'A{"?ϴ`%_Gehٚ6-b< !jF3),2,Nzendstream endobj 681 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 379 >> stream xcd`ab`dd N+64uIf!CO/VY~'YyyX}-=\{@fFʢ#c]] iTध_^竧_TSHJHISOSIP v Vp Cq`R} }'}?}{CgU|./.SqoNz߹qr[؄7yŞ>~nSg>;3wߛgO^-ymԆ-w| 2wn9.p{n&Nnt %[endstream endobj 682 0 obj << /Filter /FlateDecode /Length 1283 >> stream xWn6# ZYEeU5H2cgc;[Ȋ%)Q"g / yy_bq̯>b|8DvlL& ӻc(SŒx~&#J 2&  <$#bJȴgq v $@1{TSukI_n BJ#S(D8D%)jYbb, ~/3F+D&$غ.)8j^L1 O8M32<)NZx3hGC)7nUn|(Nz*q+:/.H|0.SA5ⷷ #>ޠg('ؘB2^30ئQ Hp:8 .i!,׉Thޔ9"Հzf??ʁ.E#YtK)F#2k yu &c riI٣Z|}VW\BЃ.A\c!(H|97Y64m7EXp |:vOOOpgMӧ$Z%Waʷ誆{msfIߩ!)ʁy(G'@5wxꂞ^8RNxGK qJ ޚՖ~'6!%,wZ಼)X5Ĥ`dX_eK k7|Lg EbZI,VZv΄2PٰŪ x2$Xt[VAs \\ {йuYӦalUW.VVU s rV綌jRWr qۛӖnL͡r290ZnW=а!gHkuttNSe9/T9e5%^DSZendstream endobj 683 0 obj << /Type /XRef /Length 428 /Filter /FlateDecode /DecodeParms << /Columns 5 /Predictor 12 >> /W [ 1 3 1 ] /Info 3 0 R /Root 2 0 R /Size 684 /ID [<5ff932afbaef0633cd512e6f0f9d8fa7><4bd84f582b8f889fc4fc7b7db1fc4819>] >> stream xK(an̘ Sl\BɊܭdaF)l\( +5$)P$e1}Vft~k~veeWfhoX"V奇a^$.JfxӸ0suVEME_.哑t:O"[ yA1wO,i-M[ՓLш.%fEX!f=Eb&}ke(i%󨉅mS3IM;JTU;H616> 'Kl&z\J*8{f׊k=QE73(Yahd.6 Via Firenze 11 Paderno Dugnano 20037 Italy email: \email{spedygiorgio@gmail.com} url: www.statisticaladvisor.com - name: "Tae Seung Kang" affiliation: Ph.D student, Computer \& Information Science \& Engineering address: > University of Florida Gainesville, FL, USA email: \email{tskang3@gmail.com} - name: "Sai Bhargav Yalamanchi" affiliation: B-Tech student, Electrical Engineering address: > Indian Institute of Technology, Bombay Mumbai - 400 076, India email: \email{bhargavcoolboy@gmail.com} - name: "Deepak Yadav" affiliation: B-Tech student, Computer Science and Engineering address: > Indian Institute of Technology, Varanasi Uttar Pradesh - 221 005, India email: \email{deepakyadav.iitbhu@gmail.com} - name: "Ignacio Cordón" affiliation: Software Engineer address: > Madrid (Madrid), Spain email: \email{nacho.cordon.castillo@gmail.com} preamble: > \author{\small{Giorgio Alfredo Spedicato, Tae Seung Kang, Sai Bhargav Yalamanchi, Deepak Yadav, Ignacio Cordón}} \Plainauthor{G.A. Spedicato, T.S. Kang, S.B. Yalamanchi, D. Yadav, I. Cordón} \usepackage{graphicx} \usepackage{amsmath} \usepackage{longtable} \usepackage{booktabs} \setkeys{Gin}{width=0.8\textwidth} \usepackage{amsfonts} abstract: | The \pkg{markovchain} package aims to fill a gap within the \proglang{R} framework providing S4 classes and methods for easily handling discrete time Markov chains, homogeneous and simple inhomogeneous ones as well as continuous time Markov chains. The S4 classes for handling and analysing discrete and continuous time Markov chains are presented, as well as functions and method for performing probabilistic and statistical analysis. Finally, some examples in which the package's functions are applied to Economics, Finance and Natural Sciences topics are shown. output: if (rmarkdown::pandoc_version() < "2.2") function(...) { rmarkdown::pdf_document(template = "./template.tex", ...) } else function(...) { bookdown::pdf_book(base_format = rticles::jss_article, ...) } vignette: > %\VignetteIndexEntry{An introduction to markovchain package} %\VignetteEngine{knitr::rmarkdown} %VignetteEncoding{UTF-8} keywords: plain: [discrete time Markov chains, continuous time Markov chains, transition matrices, communicating classes, periodicity, first passage time, stationary distributions] formatted: [discrete time Markov chains, continuous time Markov chains, transition matrices, communicating classes, periodicity, first passage time, stationary distributions] documentclass: jss classoption: nojss bibliography: markovchainBiblio.bib pkgdown: as_is: true extension: pdf --- ```{r global_options, include=FALSE} knitr::opts_chunk$set(fig.width=8.5, fig.height=6, out.width = "70%") set.seed(123) library(knitr) hook_output <- knit_hooks$get("output") knit_hooks$set(output = function(x, options) { lines <- options$output.lines if (is.null(lines)) { return(hook_output(x, options)) # pass to default hook } x <- unlist(strsplit(x, "\n")) more <- "..." if (length(lines)==1) { # first n lines if (length(x) > lines) { # truncate the output, but add .... x <- c(head(x, lines), more) } } else { x <- c(more, x[lines], more) } # paste these lines together x <- paste(c(x, ""), collapse = "\n") hook_output(x, options) }) ``` # Introduction Markov chains represent a class of stochastic processes of great interest for the wide spectrum of practical applications. In particular, discrete time Markov chains (DTMC) permit to model the transition probabilities between discrete states by the aid of matrices.Various \proglang{R} packages deal with models that are based on Markov chains: * \pkg{msm} [@msmR] handles Multi-State Models for panel data. * \pkg{mcmcR} [@mcmcR] implements Monte Carlo Markov Chain approach. * \pkg{hmm} [@hmmR] fits hidden Markov models with covariates. * \pkg{mstate} fits `Multi-State Models based on Markov chains for survival analysis [@mstateR]. Nevertheless, the \proglang{R} statistical environment [@rSoftware] seems to lack a simple package that coherently defines S4 classes for discrete Markov chains and allows to perform probabilistic analysis, statistical inference and applications. For the sake of completeness, \pkg{markovchain} is the second package specifically dedicated to DTMC analysis, being \pkg{DTMCPack} [@DTMCPackR] the first one. Notwithstanding, \pkg{markovchain} package [@pkg:markovchain] aims to offer more flexibility in handling DTMC than other existing solutions, providing S4 classes for both homogeneous and semi-homogeneous Markov chains as well as methods suited to perform statistical and probabilistic analysis. The \pkg{markovchain} package depends on the following \proglang{R} packages: \pkg{expm} [@expmR] to perform efficient matrices powers; \pkg{igraph} [@pkg:igraph] to perform pretty plotting of `markovchain` objects and \pkg{matlab} [@pkg:matlab], that contains functions for matrix management and calculations that emulate those within \proglang{MATLAB} environment. Moreover, other scientific softwares provide functions specifically designed to analyze DTMC, as \proglang{Mathematica} 9 [@mathematica9]. The paper is structured as follows: Section \@ref(sec:mathematics) briefly reviews mathematics and definitions regarding DTMC, Section \@ref(sec:structure) discusses how to handle and manage Markov chain objects within the package, Section \@ref(sec:probability) and Section \@ref(sec:statistics) show how to perform probabilistic and statistical modelling, while Section \@ref(sec:applications) presents some applied examples from various fields analyzed by means of the \pkg{markovchain} package. # Review of core mathematical concepts {#sec:mathematics} ## General Definitions A DTMC is a sequence of random variables $X_{1},\: X_{2}\: ,\ldots,\:X_{n},\ldots$ characterized by the Markov property (also known as memoryless property, see Equation \ref{eq:markovProp}). The Markov property states that the distribution of the forthcoming state $X_{n+1}$ depends only on the current state $X_{n}$ and doesn't depend on the previous ones $X_{n-1},\: X_{n-2},\ldots,\: X_{1}$. \begin{equation} Pr\left(X_{n+1}=x_{n+1}\left|X_{1}=x_{1},X_{2}=x_{2,}...,X_{n}=x_{n}\right.\right)=Pr\left(X_{n+1}=x_{n+1}\left|X_{n}=x_{n}\right.\right). \label{eq:markovProp} \end{equation} The set of possible states $S=\left\{ s_{1},s_{2},...,s_{r}\right\}$ of $X_{n}$ can be finite or countable and it is named the state space of the chain. The chain moves from one state to another (this change is named either 'transition' or 'step') and the probability $p_{ij}$ to move from state $s_{i}$ to state $s_{j}$ in one step is named transition probability: \begin{equation} p_{ij}=Pr\left(X_{1}=s_{j}\left|X_{0}=s_{i}\right.\right). \label{eq:trProp} \end{equation} The probability of moving from state $i$ to $j$ in $n$ steps is denoted by $p_{ij}^{(n)}=Pr\left(X_{n}=s_{j}\left|X_{0}=s_{i}\right.\right)$. A DTMC is called time-homogeneous if the property shown in Equation \ref{eq:mcHom} holds. Time homogeneity implies no change in the underlying transition probabilities as time goes on. \begin{equation} Pr\left(X_{n+1}=s_{j}\left|X_{n}=s_{i}\right.\right)=Pr\left(X_{n}=s_{j}\left|X_{n-1}=s_{i}\right.\right). \label{eq:mcHom} \end{equation} If the Markov chain is time-homogeneous, then $p_{ij}=Pr\left(X_{k+1}=s_{j}\left|X_{k}=s_{i}\right.\right)$ and \newline $p_{ij}^{(n)}=Pr\left(X_{n+k}=s_{j}\left|X_{k}=s_{i}\right.\right)$, where $k>0$. The probability distribution of transitions from one state to another can be represented into a transition matrix $P=(p_{ij})_{i,j}$, where each element of position $(i,j)$ represents the transition probability $p_{ij}$. E.g., if $r=3$ the transition matrix $P$ is shown in Equation \ref{eq:trPropEx} \begin{equation} P=\left[\begin{array}{ccc} p_{11} & p_{12} & p_{13}\\ p_{21} & p_{22} & p_{23}\\ p_{31} & p_{32} & p_{33} \end{array}\right]. \label{eq:trPropEx} \end{equation} The distribution over the states can be written in the form of a stochastic row vector $x$ (the term stochastic means that $\sum_{i}x_{i}=1, x_{i} \geq 0$): e.g., if the current state of $x$ is $s_{2}$, $x=\left(0\:1\:0\right)$. As a consequence, the relation between $x^{(1)}$ and $x^{(0)}$ is $x^{(1)}=x^{(0)}P$ and, recursively, we get $x^{(2)}=x^{(0)}P^{2}$ and $x^{(n)}=x^{(0)}P^{n},\, n>0$. DTMC are explained in most theory books on stochastic processes, see \cite{bremaud1999discrete} and \cite{dobrow2016introduction} for example. Valuable references online available are: \cite{konstantopoulos2009markov}, \cite{probBook} and \cite{bardPpt}. ## Properties and classification of states {#sec:properties} A state $s_{j}$ is said accessible from state $s_{i}$ (written $s_{i}\rightarrow s_{j}$) if a system starting in state $s_{i}$ has a positive probability to reach the state $s_{j}$ at a certain point, i.e., $\exists n>0:\: p_{ij}^{n}>0$. If both $s_{i}\rightarrow s_{j}$ and $s_{j}\rightarrow s_{i}$, then $s_{i}$ and $s_{j}$ are said to communicate. A communicating class is defined to be a set of states that communicate. A DTMC can be composed by one or more communicating classes. If the DTMC is composed by only one communicating class (i.e., if all states in the chain communicate), then it is said irreducible. A communicating class is said to be closed if no states outside of the class can be reached from any state inside it. If $p_{ii}=1$, $s_{i}$ is defined as absorbing state: an absorbing state corresponds to a closed communicating class composed by one state only. The canonical form of a DTMC transition matrix is a matrix having a block form, where the closed communicating classes are shown at the beginning of the diagonal matrix. A state $s_{i}$ has period $k_{i}$ if any return to state $s_{i}$ must occur in multiplies of $k_{i}$ steps, that is $k_{i}=gcd\left\{ n:Pr\left(X_{n}=s_{i}\left|X_{0}=s_{i}\right.\right)>0\right\}$, where $gcd$ is the greatest common divisor. If $k_{i}=1$ the state $s_{i}$ is said to be aperiodic, else if $k_{i}>1$ the state $s_{i}$ is periodic with period $k_{i}$. Loosely speaking, $s_{i}$ is periodic if it can only return to itself after a fixed number of transitions $k_{i}>1$ (or multiple of $k_{i}$), else it is aperiodic. If states $s_{i}$ and $s_{j}$ belong to the same communicating class, then they have the same period $k_{i}$. As a consequence, each of the states of an irreducible DTMC share the same periodicity. This periodicity is also considered the DTMC periodicity. It is possible to classify states according to their periodicity. Let $T^{x\rightarrow x}$ is the number of periods to go back to state $x$ knowing that the chain starts in $x$. * A state $x$ is recurrent if $P(T^{x\rightarrow x}<+\infty)=1$ (equivalently $P(T^{x\rightarrow x}=+\infty)=0$). In addition: 1. A state $x$ is null recurrent if in addition $E(T^{x\rightarrow x})=+\infty$. 2. A state $x$ is positive recurrent if in addition $E(T^{x\rightarrow x})<+\infty$. 3. A state $x$ is absorbing if in addition $P(T^{x\rightarrow x}=1)=1$. * A state $x$ is transient if $P(T^{x\rightarrow x}<+\infty)<1$ (equivalently $P(T^{x\rightarrow x}=+\infty)>0$). It is possible to analyze the timing to reach a certain state. The first passage time (or hitting time) from state $s_{i}$ to state $s_{j}$ is the number $T_{ij}$ of steps taken by the chain until it arrives for the first time to state $s_{j}$, given that $X_{0} = s_{i}$. The probability distribution of $T_{ij}$ is defined by Equation \ref{eq:fpt1} \begin{equation} {h_{ij}}^{\left( n \right)} = Pr\left( {T_{ij} = n} \right) = Pr\left( X_n = s_j,X_{n - 1} \ne s_{j}, \ldots ,X_1 \ne s_j |X_0 = s_i \right) \label{eq:fpt1} \end{equation} and can be found recursively using Equation \ref{eq:ftp2}, given that ${h_{ij}}^{\left( n \right)} = p_{ij}$. \begin{equation} {h_{ij}}^{\left( n \right)} = \sum\limits_{k \in S - \left\{ s_{j} \right\}}^{} {{p_{ik}}{h_{kj}}^{\left( {n - 1} \right)}}. \label{eq:ftp2} \end{equation} A commonly used quantity related to $h$ is its average value, i.e. the \emph{mean first passage time} (also expected hitting time), namely $\bar h_{ij}= \sum_{n=1\dots\infty} n \,h_{ij}^{(n)}$. If in the definition of the first passage time we let $s_{i}=s_{j}$, we obtain the first recurrence time $T_{i}=\inf \{ n\geq1:X_{n}=s_{i}|X_{0}=s_{i} \}$. We could also ask ourselves which is the *mean recurrence time*, an average of the mean first recurrence times: \[ r_i = \sum_{k = 1}^{\infty} k \cdot P(T_i = k) \] Revisiting the definition of recurrence and transience: a state $s_{i}$ is said to be recurrent if it is visited infinitely often, i.e., $Pr(T_{i}<+\infty|X_{0}=s_{i})=1$. On the opposite, $s_{i}$ is called transient if there is a positive probability that the chain will never return to $s_{i}$, i.e., $Pr(T_{i}=+\infty|X_{0}=s_{i})>0$. Given a time homogeneous Markov chain with transition matrix \emph{P}, a stationary distribution \emph{z} is a stochastic row vector such that $z=z\cdot P$, where $0\leq z_{j}\leq 1 \: \forall j$ and $\sum_{j}z_{j}=1$. If a DTMC $\{X_{n}\}$ is irreducible and aperiodic, then it has a limit distribution and this distribution is stationary. As a consequence, if $P$ is the $k\times k$ transition matrix of the chain and $z=\left(z_{1},...,z_{k}\right)$ is the unique eigenvector of $P$ such that $\sum_{i=1}^{k}z_{i}=1$, then we get \begin{equation} \underset{n\rightarrow\infty}{lim}P^{n}=Z, \label{eq:limMc} \end{equation} where $Z$ is the matrix having all rows equal to $z$. The stationary distribution of $\{X_{n}\}$ is represented by $z$. A matrix $A$ is called primitive if all of its entries are strictly positive, and we write it $A > 0$. If the transition matrix $P$ for a DTMC has some primitive power, i.e. it exists $m > 0: P^m > 0$, then the DTMC is said to be regular. In fact being regular is equivalent to being irreducible and aperiodic. All regular DTMCs are irreducible. The counterpart is not true. Given two absorbing states $s_A$ (source) and $s_B$ (sink), the \emph{committor probability} $q_j^{(AB)}$ is the probability that a process starting in state $s_i$ is absorbed in state $s_B$ (rather than $s_A$) [@noe_constructing_2009]. It can be computed via \begin{equation} q_j^{(AB)} = \sum_{k \ni {A, B}} P_{jk}q_k^{(AB)} \quad \mbox{with} \quad q_A^{(AB)} = 0 \quad \mbox{and} \quad q_B^{(AB)} = 1 \end{equation} Note we can also define the hitting probability from $i$ to $j$ as the probability of ever reaching the state $j$ if our initial state is $i$: \begin{equation} h_{i,j} = Pr(T_{ij} < \infty) = \sum_{n = 0}^{\infty} h_{ij}^{(n)} \label{eq:hitting-probs} \end{equation} In a DTMC with finite set of states, we know that a transient state communicates at least with one recurrent state. If the chain starts in a transient element, once it hits a recurrent state, it is going to be caught in its recurrent state, and we cannot expect it would go back to the initial state. Given a transient state $i$ we can define the *absorption probability* to the recurrent state $j$ as the probability that the first recurrent state that the Markov chain visits (and therefore gets absorbed by its recurrent class) is $j$, $f^{*}_ij$. We can also define the *mean absorption time* as the mean number of steps the transient state $i$ would take until it hits any recurrent state, $b_i$. ## A short example Consider the following numerical example. Suppose we have a DTMC with a set of 3 possible states $S=\{s_{1}, s_{2}, s_{3}\}$. Let the transition matrix be: \begin{equation} P=\left[\begin{array}{ccc} 0.5 & 0.2 & 0.3\\ 0.15 & 0.45 & 0.4\\ 0.25 & 0.35 & 0.4 \end{array}\right]. \label{eq:trPropExEx1} \end{equation} In $P$, $p_{11}=0.5$ is the probability that $X_{1}=s_{1}$ given that we observed $X_{0}=s_{1}$ is 0.5, and so on.It is easy to see that the chain is irreducible since all the states communicate (it is made by one communicating class only). Suppose that the current state of the chain is $X_{0}=s_{2}$, i.e., $x^{(0)}=(0\:1\:0)$, then the probability distribution of states after 1 and 2 steps can be computed as shown in Equations \@ref(eq:trPropExEx2) and \@ref(eq:trPropExEx3). \begin{equation} x^{(1)}=\left(0\:1\:0\right)\left[\begin{array}{ccc} 0.5 & 0.2 & 0.3\\ 0.15 & 0.45 & 0.4\\ 0.25 & 0.35 & 0.4 \end{array}\right]=\left(0.15\:0.45\:0.4\right). \label{eq:trPropExEx2} \end{equation} \begin{equation} x^{(n)}=x^{(n-1)}P \to \left(0.15\:0.45\:0.4\right)\left[\begin{array}{ccc} 0.5 & 0.2 & 0.3\\ 0.15 & 0.45 & 0.4\\ 0.25 & 0.35 & 0.4 \end{array}\right]=\left(0.2425\:0.3725\:0.385\right). \label{eq:trPropExEx3} \end{equation} If we were interested in the probability of being in the state $s_{3}$ in the second step, then $Pr\left(X_{2}=s_{3}\left|X_{0}=s_{2}\right.\right)=0.385$. \newpage # The structure of the package {#sec:structure} ## Creating markovchain objects The package is loaded within the \proglang{R} command line as follows: ```{r, load, results='hide', message=FALSE} library("markovchain") ``` The `markovchain` and `markovchainList` S4 classes [@chambers] are defined within the \pkg{markovchain} package as displayed: ```{r, showClass, echo=FALSE} showClass("markovchain") showClass("markovchainList") ``` The first class has been designed to handle homogeneous Markov chain processes, while the latter (which is itself a list of `markovchain` objects) has been designed to handle semi-homogeneous Markov chains processes. Any element of `markovchain` class is comprised by following slots: 1. `states`: a character vector, listing the states for which transition probabilities are defined. 2. `byrow`: a logical element, indicating whether transition probabilities are shown by row or by column. 3. `transitionMatrix`: the probabilities of the transition matrix. 4. `name`: optional character element to name the DTMC. The `markovchainList` objects are defined by following slots: 1. `markovchains`: a list of `markovchain` objects. 2. `name`: optional character element to name the DTMC. The `markovchain` objects can be created either in a long way, as the following code shows ```{r mcInitLong} weatherStates <- c("sunny", "cloudy", "rain") byRow <- TRUE weatherMatrix <- matrix(data = c(0.70, 0.2, 0.1, 0.3, 0.4, 0.3, 0.2, 0.45, 0.35), byrow = byRow, nrow = 3, dimnames = list(weatherStates, weatherStates)) mcWeather <- new("markovchain", states = weatherStates, byrow = byRow, transitionMatrix = weatherMatrix, name = "Weather") ``` or in a shorter way, displayed below ```{r mcInitShort} mcWeather <- new("markovchain", states = c("sunny", "cloudy", "rain"), transitionMatrix = matrix(data = c(0.70, 0.2, 0.1, 0.3, 0.4, 0.3, 0.2, 0.45, 0.35), byrow = byRow, nrow = 3), name = "Weather") ``` When `new("markovchain")` is called alone, a default Markov chain is created. ```{r defaultMc} defaultMc <- new("markovchain") ``` The quicker way to create `markovchain` objects is made possible thanks to the implemented `initialize` S4 method that checks that: * the `transitionMatrix` to be a transition matrix, i.e., all entries to be probabilities and either all rows or all columns to sum up to one. * the columns and rows names of `transitionMatrix` to be defined and to coincide with `states` vector slot. The `markovchain` objects can be collected in a list within `markovchainList` S4 objects as following example shows. ```{r intromcList} mcList <- new("markovchainList", markovchains = list(mcWeather, defaultMc), name = "A list of Markov chains") ``` ## Handling markovchain objects Table \@ref(tab:methodsToHandleMc) lists which methods handle and manipulate `markovchain` objects. \begin{table}[h] \centering \begin{tabular}{lll} \hline Method & Purpose \\ \hline \hline \code{*} & Direct multiplication for transition matrices.\\ \code{\textasciicircum{}} & Compute the power \code{markovchain} of a given one.\\ \code{[} & Direct access to the elements of the transition matrix.\\ \code{==} & Equality operator between two transition matrices.\\ \code{!=} & Inequality operator between two transition matrices.\\ \code{as} & Operator to convert \code{markovchain} objects into \code{data.frame} and\\ & \code{table} object.\\ \code{dim} & Dimension of the transition matrix.\\ \code{names} & Equal to \code{states}.\\ \code{names<-} & Change the \code{states} name.\\ \code{name} & Get the name of \code{markovchain object}.\\ \code{name<-} & Change the name of \code{markovchain object}.\\ \code{plot} & \code{plot} method for \code{markovchain} objects.\\ \code{print} & \code{print} method for \code{markovchain} objects.\\ \code{show} & \code{show} method for \code{markovchain} objects.\\ \code{sort} & \code{sort} method for \code{markovchain} objects, in terms of their states.\\ \code{states} & Name of the transition states.\\ \code{t} & Transposition operator (which switches \code{byrow} `slot value and modifies \\ & the transition matrix coherently).\\ \hline \end{tabular} \caption{\pkg{markovchain} methods for handling \code{markovchain} objects.} \label{tab:methodsToHandleMc} \end{table} The examples that follow shows how operations on `markovchain` objects can be easily performed. For example, using the previously defined matrix we can find what is the probability distribution of expected weather states in two and seven days, given the actual state to be cloudy. ```{r operations} initialState <- c(0, 1, 0) after2Days <- initialState * (mcWeather * mcWeather) after7Days <- initialState * (mcWeather ^ 7) after2Days round(after7Days, 3) ``` A similar answer could have been obtained defining the vector of probabilities as a column vector. A column - defined probability matrix could be set up either creating a new matrix or transposing an existing `markovchain` object thanks to the `t` method. ```{r operations2} initialState <- c(0, 1, 0) after2Days <- (t(mcWeather) * t(mcWeather)) * initialState after7Days <- (t(mcWeather) ^ 7) * initialState after2Days round(after7Days, 3) ``` The initial state vector previously shown can not necessarily be a probability vector, as the code that follows shows: ```{r fval} fvals<-function(mchain,initialstate,n) { out<-data.frame() names(initialstate)<-names(mchain) for (i in 0:n) { iteration<-initialstate*mchain^(i) out<-rbind(out,iteration) } out<-cbind(out, i=seq(0,n)) out<-out[,c(4,1:3)] return(out) } fvals(mchain=mcWeather,initialstate=c(90,5,5),n=4) ``` Basic methods have been defined for `markovchain` objects to quickly get states and transition matrix dimension. ```{r otherMethods} states(mcWeather) names(mcWeather) dim(mcWeather) ``` Methods are available to set and get the name of `markovchain` object. ```{r otherMethods2} name(mcWeather) name(mcWeather) <- "New Name" name(mcWeather) ``` Also it is possible to alphabetically sort the transition matrix: ```{r sortMethod} markovchain:::sort(mcWeather) ``` A direct access to transition probabilities is provided both by `transitionProbability` method and `"["` method. ```{r transProb} transitionProbability(mcWeather, "cloudy", "rain") mcWeather[2,3] ``` The transition matrix of a `markovchain` object can be displayed using `print` or `show` methods (the latter being less verbose). Similarly, the underlying transition probability diagram can be plotted by the use of `plot` method (as shown in Figure \@ref(fig:mcPlot)) which is based on \pkg{igraph} package [@pkg:igraph]. `plot` method for `markovchain` objects is a wrapper of `plot.igraph` for `igraph` S4 objects defined within the \pkg{igraph} package. Additional parameters can be passed to `plot` function to control the network graph layout. There are also \pkg{diagram} and \pkg{DiagrammeR} ways available for plotting as shown in Figure \@ref(fig:mcPlotdiagram). The `plot` function also uses `communicatingClasses` function to separate out states of different communicating classes. All states that belong to one class have same color. ```{r printAndShow} print(mcWeather) show(mcWeather) ``` ```{r mcPlot, echo=FALSE, fig.cap="Weather example. Markov chain plot"} if (requireNamespace("igraph", quietly = TRUE)) { library(igraph) plot(mcWeather,layout = layout.fruchterman.reingold) } else { message("igraph unavailable") } ``` ```{r mcPlotdiagram, echo=FALSE, fig.cap="Weather example. Markov chain plot with diagram"} if (requireNamespace("diagram", quietly = TRUE)) { library(diagram) plot(mcWeather, package="diagram", box.size = 0.04) } else { message("diagram unavailable") } ``` Import and export from some specific classes is possible, as shown in Figure \@ref(fig:fromAndTo) and in the following code. ```{r exportImport1} mcDf <- as(mcWeather, "data.frame") mcNew <- as(mcDf, "markovchain") mcDf mcIgraph <- as(mcWeather, "igraph") ``` ```{r exportImport2} if (requireNamespace("msm", quietly = TRUE)) { require(msm) Q <- rbind ( c(0, 0.25, 0, 0.25), c(0.166, 0, 0.166, 0.166), c(0, 0.25, 0, 0.25), c(0, 0, 0, 0) ) cavmsm <- msm(state ~ years, subject = PTNUM, data = cav, qmatrix = Q, death = 4) msmMc <- as(cavmsm, "markovchain") msmMc } else { message("msm unavailable") } ``` from etm (now archived as of September 2020): ```{r exporImport3} if (requireNamespace("etm", quietly = TRUE)) { library(etm) data(sir.cont) sir.cont <- sir.cont[order(sir.cont$id, sir.cont$time), ] for (i in 2:nrow(sir.cont)) { if (sir.cont$id[i]==sir.cont$id[i-1]) { if (sir.cont$time[i]==sir.cont$time[i-1]) { sir.cont$time[i-1] <- sir.cont$time[i-1] - 0.5 } } } tra <- matrix(ncol=3,nrow=3,FALSE) tra[1, 2:3] <- TRUE tra[2, c(1, 3)] <- TRUE tr.prob <- etm::etm(sir.cont, c("0", "1", "2"), tra, "cens", 1) tr.prob etm2mc<-as(tr.prob, "markovchain") etm2mc } else { message("etm unavailable") } ``` ```{r fromAndTo, echo=FALSE, fig.cap="The markovchain methods for import and export"} library(igraph) importExportGraph<-graph.formula(dataframe++markovchain,markovchain-+igraph, markovchain++matrix,table-+markovchain,msm-+markovchain,etm-+markovchain, markovchain++sparseMatrix) plot(importExportGraph,main="Import - Export from and to markovchain objects") ``` Coerce from `matrix` method, as the code below shows, represents another approach to create a `markovchain` method starting from a given squared probability matrix. ```{r exportImport4} myMatr<-matrix(c(.1,.8,.1,.2,.6,.2,.3,.4,.3), byrow=TRUE, ncol=3) myMc<-as(myMatr, "markovchain") myMc ``` Semi-homogeneous Markov chains can be created with the aid of `markovchainList` object. The example that follows arises from health insurance, where the costs associated to patients in a Continuous Care Health Community (CCHC) are modeled by a semi-homogeneous Markov Chain, since the transition probabilities change by year. Methods explicitly written for `markovchainList` objects are: `print`, `show`, `dim` and `[`. ```{r cchcMcList} stateNames = c("H", "I", "D") Q0 <- new("markovchain", states = stateNames, transitionMatrix =matrix(c(0.7, 0.2, 0.1,0.1, 0.6, 0.3,0, 0, 1), byrow = TRUE, nrow = 3), name = "state t0") Q1 <- new("markovchain", states = stateNames, transitionMatrix = matrix(c(0.5, 0.3, 0.2,0, 0.4, 0.6,0, 0, 1), byrow = TRUE, nrow = 3), name = "state t1") Q2 <- new("markovchain", states = stateNames, transitionMatrix = matrix(c(0.3, 0.2, 0.5,0, 0.2, 0.8,0, 0, 1), byrow = TRUE,nrow = 3), name = "state t2") Q3 <- new("markovchain", states = stateNames, transitionMatrix = matrix(c(0, 0, 1, 0, 0, 1, 0, 0, 1), byrow = TRUE, nrow = 3), name = "state t3") mcCCRC <- new("markovchainList",markovchains = list(Q0,Q1,Q2,Q3), name = "Continuous Care Health Community") print(mcCCRC) ``` It is possible to perform direct access to `markovchainList` elements, as well as to determine the number of `markovchain` objects by which a `markovchainList` object is composed. ```{r cchcMcList2} mcCCRC[[1]] dim(mcCCRC) ``` The `markovchain` package contains some data found in the literature related to DTMC models (see Section \@ref(sec:applications). Table \@ref(tab:datasets) lists datasets and tables included within the current release of the package. \begin{table}[h] \centering \begin{tabular}{p{0.2\textwidth}p{0.75\textwidth}} \hline Dataset & Description \\ \hline \hline \code{blanden} & Mobility across income quartiles, \cite{blandenEtAlii}.\\ \code{craigsendi} & CD4 cells, \cite{craigSendi}.\\ \code{kullback} & raw transition matrices for testing homogeneity, \cite{kullback1962tests}.\\ \code{preproglucacon} & Preproglucacon DNA basis, \cite{averyHenderson}.\\ \code{rain} & Alofi Island rains, \cite{averyHenderson}.\\ \code{holson} & Individual states trajectories.\\ \code{sales} & Sales of six beverages in Hong Kong \cite{ching2008higher}. \\ \hline \end{tabular} \caption{The \pkg{markovchain} \code{data.frame} and \code{table}.} \label{tab:datasets} \end{table} Finally, Table \@ref(tab:demos) lists the demos included in the demo directory of the package. \begin{table}[h] \centering \begin{tabular}{lll} \hline R Code File & Description \\ \hline \hline \code{bard.R} & Structural analysis of Markov chains from Bard PPT.\\ \code{examples.R} & Notable Markov chains, e.g., The Gambler Ruin chain.\\ \code{quickStart.R} & Generic examples.\\ \code{extractMatrices.R} & Generic examples.\\ \hline \end{tabular} \caption{The \pkg{markovchain} demos.} \label{tab:demos} \end{table} # Probability with markovchain objects {#sec:probability} The \pkg{markovchain} package contains functions to analyse DTMC from a probabilistic perspective. For example, the package provides methods to find stationary distributions and identifying absorbing and transient states. Many of these methods come from \proglang{MATLAB} listings that have been ported into \proglang{R}. For a full description of the underlying theory and algorithm the interested reader can overview the original \proglang{MATLAB} listings, \cite{renaldoMatlab} and \cite{montgomery}. Table \@ref(tab:methodsToStats) shows methods that can be applied on `markovchain` objects to perform probabilistic analysis. \begin{table}[h] \centering \begin{tabular}{lll} \hline Method & Returns \\ \hline \hline \code{absorbingStates} & the absorbing states of the transition matrix, if any.\\ \code{steadyStates} & the vector(s) of steady state(s) in matrix form. \\ \code{meanFirstPassageTime} & matrix or vector of mean first passage times. \\ \code{meanRecurrenceTime} & vector of mean number of steps to return to each recurrent state \\ \code{hittingProbabilities} & matrix of hitting probabilities for a Markov chain. \\ \code{meanAbsorptionTime} & expected number of steps for a transient state to be \\ & absorbed by any recurrent class \\ \code{absorptionProbabilities} & probabilities of transient states of being \\ & absorbed by each recurrent state \\ \code{committorAB} & committor probabilities \\ \code{communicatingClasses} & list of communicating classes. \\ & $s_{j}$, given actual state $s_{i}$. \\ \code{canonicForm} & the transition matrix into canonic form. \\ \code{is.accessible} & checks whether a state j is reachable from state i. \\ \code{is.irreducible} & checks whether a DTMC is irreducible. \\ \code{is.regular} & checks whether a DTMC is regular. \\ \code{period} & the period of an irreducible DTMC. \\ \code{recurrentClasses} & list of recurrent communicating classes. \\ \code{transientClasses} & list of transient communicating classes. \\ \code{recurrentStates} & the recurrent states of the transition matrix. \\ \code{transientStates} & the transient states of the transition matrix, if any. \\ \code{summary} & DTMC summary. \\ \hline \end{tabular} \caption{\pkg{markovchain} methods: statistical operations.} \label{tab:methodsToStats} \end{table} ## Conditional distributions The conditional distribution of weather states, given that current day's weather is sunny, is given by following code. ```{r conditionalDistr} conditionalDistribution(mcWeather, "sunny") ``` ## Stationary states A stationary (steady state, or equilibrium) vector is a probability vector such that Equation \ref{eq:steadystat2} holds \begin{equation} \begin{matrix} 0\leq \pi_j \leq 1\\ \sum_{j \in S} \pi_j = 1\\ \pi \cdot P = \pi \end{matrix} \label{eq:steadystat2} \end{equation} Steady states are associated to $P$ eigenvalues equal to one. We could be tempted to compute them solving the eigen values / vectors of the matrix and taking real parts (since if $u + iv$ is a eigen vector, for the matrix $P$, then $Re(u + iv) = u$ and $Im(u + iv) = v$ are eigen vectors) and normalizing by the vector sum, this carries some concerns: 1. If $u, v \in \mathbb{R}^n$ are linearly independent eigen vectors associated to $1$ eigen value, $u + iv$, $u + iu$ are also linearly independent eigen vectors, and their real parts coincide. Clearly if we took real parts, we would be loosing an eigen vector, because we cannot know in advance if the underlying algorithm to compute the eigen vectors is going to output something similar to what we described. We should be agnostic to the underlying eigen vector computation algorithm. 2. Imagine the identity $P$ of dimensions $2 \times 2$. Its eigen vectors associated to the $1$ eigen value are $u = (1, 0)$ and $v = (0, 1)$. However, the underlying algorithm to compute eigen vectors could return $(1, -2)$ and $(-2, 1)$ instead, that are linear combinations of the aforementioned ones, and therefore eigen vectors. Normalizing by their sum, we would get: $(-1, 2)$ and $(2, -1)$, which obviously are not probability measures. Again, we should be agnostic to the underlying eigen computation algorithm. 3. Algorithms to compute eigen values / vectors are computationally expensive: they are iterative, and we cannot predict a fixed number of iterations for them. Moreover, each iteration takes $\mathcal{O}(m^2)$ or $\mathcal{O}(m^3)$ algorithmic complexity, with $m$ the number of states. We are going to use that every irreducible DTMC has a unique steady state, that is, if $M$ is the matrix for an irreducible DTMC (all states communicate with each other), then it exists a unique $v \in \mathbb{R}^m$ such that: \[ v \cdot M = v, \qquad \sum_{i = 1}^m v_i = 1 \] Also, we'll use that a steady state for a DTMC assigns $0$ to the transient states. The canonical form of a (by row) stochastic matrix looks alike: \[ \left(\begin{array}{c|c|c|c|c} M_1 & 0 & 0 & \ldots & 0 \\ \hline 0 & M_2 & 0 & \ldots & 0 \\ \hline 0 & 0 & M_3 & \ldots & 0 \\ \hline \vdots & \vdots & \vdots & \ddots & \vdots \\ \hline A_1 & A_2 & A_3 & \ldots & R \end{array}\right) \] where $M_i$ corresponds to irreducible sub-chains, the blocks $A_i$ correspond to the transitions from transient states to each of the recurrent classes and $R$ are the transitions from the transient states to themselves. Also, we should note that a Markov chain has exactly the same name of steady states as recurrent classes. Therefore, we have coded the following algorithm ^[We would like to thank Prof. Christophe Dutang for his contributions to the development of this method. He coded a first improvement of the original `steadyStates` method and we could not have reached the current correctness without his previous work]: 1. Identify the recurrent classes $[C_1, \ldots, C_l]$ with \texttt{recurrentClasses} function. 2. Take each class $C_i$, compute the sub-matrix corresponding to it $M_i$. 3. Solve the system $v \cdot C_i = v, \, \sum_{j = 1}^{|C_i|} v_j = 1$ which has a unique solution, for each $i = 1, \ldots, l$. 3. Map each state $v_i$ to the original order in $P$ and assign a $0$ to the slots corresponding to transient states in the matrix. The result is returned in matrix form. ```{r steadyStates} steadyStates(mcWeather) ``` It is possible for a Markov chain to have more than one stationary distribution, as the gambler ruin example shows. ```{r gamblerRuin} gamblerRuinMarkovChain <- function(moneyMax, prob = 0.5) { m <- markovchain:::zeros(moneyMax + 1) m[1,1] <- m[moneyMax + 1,moneyMax + 1] <- 1 states <- as.character(0:moneyMax) rownames(m) <- colnames(m) <- states for(i in 2:moneyMax){ m[i,i-1] <- 1 - prob m[i, i + 1] <- prob } new("markovchain", transitionMatrix = m, name = paste("Gambler ruin", moneyMax, "dim", sep = " ")) } mcGR4 <- gamblerRuinMarkovChain(moneyMax = 4, prob = 0.5) steadyStates(mcGR4) ``` ## Classification of states Absorbing states are determined by means of `absorbingStates` method. ```{r absorbingStates} absorbingStates(mcGR4) absorbingStates(mcWeather) ``` The key function in methods which need knowledge about communicating classes, recurrent states, transient states, is `.commclassKernel`, which is a modification of Tarjan's algorithm from \cite{Tarjan}. This `.commclassKernel` method gets a transition matrix of dimension $n$ and returns a list of two items: 1. `classes`, an matrix whose $(i, j)$ entry is `true` if $s_i$ and $s_j$ are in the same communicating class. 2. `closed`, a vector whose $i$ -th entry indicates whether the communicating class to which $i$ belongs is closed. These functions are used by two other internal functions on which the `summary` method for `markovchain` objects works. The example matrix used in \cite{renaldoMatlab} well exemplifies the purpose of the function. ```{r renaldoMatrix1} P <- markovchain:::zeros(10) P[1, c(1, 3)] <- 1/2; P[2, 2] <- 1/3; P[2,7] <- 2/3; P[3, 1] <- 1; P[4, 5] <- 1; P[5, c(4, 5, 9)] <- 1/3; P[6, 6] <- 1; P[7, 7] <- 1/4; P[7,9] <- 3/4; P[8, c(3, 4, 8, 10)] <- 1/4; P[9, 2] <- 1; P[10, c(2, 5, 10)] <- 1/3; rownames(P) <- letters[1:10] colnames(P) <- letters[1:10] probMc <- new("markovchain", transitionMatrix = P, name = "Probability MC") summary(probMc) ``` All states that pertain to a transient class are named "transient" and a specific method has been written to elicit them. ```{r transientStates} transientStates(probMc) ``` `canonicForm` method that turns a Markov chain into its canonic form, reordering the states to have first the recurrent classes and then the transient states. ```{r probMc2Canonic} probMcCanonic <- canonicForm(probMc) probMc probMcCanonic ``` The function `is.accessible` permits to investigate whether a state $s_{j}$ is accessible from state $s_i$, that is whether the probability to eventually reach $s_j$ starting from $s_{i}$ is greater than zero. ```{r isAccessible} is.accessible(object = probMc, from = "a", to = "c") is.accessible(object = probMc, from = "g", to = "c") ``` In Section \@ref(sec:properties) we observed that, if a DTMC is irreducible, all its states share the same periodicity. Then, the `period` function returns the periodicity of the DTMC, provided that it is irreducible. The example that follows shows how to find if a DTMC is reducible or irreducible by means of the function `is.irreducible` and, in the latter case, the method `period` is used to compute the periodicity of the chain. ```{r periodicity} E <- matrix(0, nrow = 4, ncol = 4) E[1, 2] <- 1 E[2, 1] <- 1/3; E[2, 3] <- 2/3 E[3,2] <- 1/4; E[3, 4] <- 3/4 E[4, 3] <- 1 mcE <- new("markovchain", states = c("a", "b", "c", "d"), transitionMatrix = E, name = "E") is.irreducible(mcE) period(mcE) ``` The example Markov chain found in \proglang{Mathematica} web site \citep{mathematica9MarkovChain} has been used, and is plotted in Figure \@ref(fig:mcMathematics). ```{r mathematica9Mc} mathematicaMatr <- markovchain:::zeros(5) mathematicaMatr[1,] <- c(0, 1/3, 0, 2/3, 0) mathematicaMatr[2,] <- c(1/2, 0, 0, 0, 1/2) mathematicaMatr[3,] <- c(0, 0, 1/2, 1/2, 0) mathematicaMatr[4,] <- c(0, 0, 1/2, 1/2, 0) mathematicaMatr[5,] <- c(0, 0, 0, 0, 1) statesNames <- letters[1:5] mathematicaMc <- new("markovchain", transitionMatrix = mathematicaMatr, name = "Mathematica MC", states = statesNames) ``` ```{r mcMathematics, fig=TRUE, echo=FALSE, fig.align='center', fig.cap="Mathematica 9 example. Markov chain plot."} plot(mathematicaMc, layout = layout.fruchterman.reingold) ``` ```{r mathematica9MC, echo=FALSE} summary(mathematicaMc) ``` ## First passage time distributions and means \cite{renaldoMatlab} provides code to compute first passage time (within $1,2,\ldots, n$ steps) given the initial state to be $i$. The \proglang{MATLAB} listings translated into \proglang{R} on which the `firstPassage` function is based are: ```{r fpTime1, eval=FALSE} .firstpassageKernel <- function(P, i, n){ G <- P H <- P[i,] E <- 1 - diag(size(P)[2]) for (m in 2:n) { G <- P %*% (G * E) H <- rbind(H, G[i,]) } return(H) } ``` We conclude that the probability for the *first* rainy day to be the third one, given that the current state is sunny, is given by: ```{r fpTime2} firstPassagePdF <- firstPassage(object = mcWeather, state = "sunny", n = 10) firstPassagePdF[3, 3] ``` To compute the *mean* first passage times, i.e. the expected number of days before it rains given that today is sunny, we can use the `meanFirstPassageTime` function: ```{r mfpt1} meanFirstPassageTime(mcWeather) ``` indicating e.g. that the average number of days of sun or cloud before rain is 6.67 if we start counting from a sunny day, and 5 if we start from a cloudy day. Note that we can also specify one or more destination states: ```{r mfpt2} meanFirstPassageTime(mcWeather,"rain") ``` The implementation follows the matrix solutions by [@GrinsteadSnell]. We can check the result by averaging the first passage probability density function: ```{r mfpt3} firstPassagePdF.long <- firstPassage(object = mcWeather, state = "sunny", n = 100) sum(firstPassagePdF.long[,"rain"] * 1:100) ``` ## Mean recurrence time The `meanRecurrenceTime` method gives the first mean recurrence time (expected number of steps to go back to a state if it was the initial one) for each recurrent state in the transition probabilities matrix for a DTMC. Let's see an example: ```{r mrt-weather} meanRecurrenceTime(mcWeather) ``` Another example, with not all of its states being recurrent: ```{r mrt-probMc} recurrentStates(probMc) meanRecurrenceTime(probMc) ``` ## Absorption probabilities and mean absorption time We are going to use the Drunkard’s random walk from [@GrinsteadSnell]. We have a drunk person walking through the street. Each move the person does, if they have not arrived to either home (corner 1) or to the bar (corner 5) could be to the left corner or to the right one, with equal probability. In case of arrival to the bar or to home, the person stays there. ```{r data-drunkard} drunkProbs <- markovchain:::zeros(5) drunkProbs[1,1] <- drunkProbs[5,5] <- 1 drunkProbs[2,1] <- drunkProbs[2,3] <- 1/2 drunkProbs[3,2] <- drunkProbs[3,4] <- 1/2 drunkProbs[4,3] <- drunkProbs[4,5] <- 1/2 drunkMc <- new("markovchain", transitionMatrix = drunkProbs) drunkMc ``` Recurrent (in fact absorbing states) are: ```{r rs-drunkard} recurrentStates(drunkMc) ``` Transient states are the rest: ```{r ts-drunkard} transientStates(drunkMc) ``` The probability of either being absorbed by the bar or by the sofa at home are: ```{r ap-drunkard} absorptionProbabilities(drunkMc) ``` which means that the probability of arriving home / bar is inversely proportional to the distance to each one. But we also would like to know how much time does the person take to arrive there, which can be done with `meanAbsorptionTime`: ```{r at-drunkard} meanAbsorptionTime(drunkMc) ``` So it would take `3` steps to arrive to the destiny if the person is either in the second or fourth corner, and `4` steps in case of being at the same distance from home than to the bar. ## Committor probability The committor probability tells us the probability to reach a given state before another given. Suppose that we start in a cloudy day, the probabilities of experiencing a rainy day before a sunny one is 0.5: ```{r} committorAB(mcWeather,3,1) ``` ## Hitting probabilities Rewriting the system \eqref{eq:hitting-probs} as: \begin{equation*} A = \left(\begin{array}{c|c|c|c} A_1 & 0 & \ldots & 0 \\ \hline 0 & A_2 & \ldots & 0 \\ \hline \vdots & \vdots & \ddots & 0 \\ \hline 0 & 0 & \ldots & A_n \end{array}\right) \end{equation*} \begin{eqnarray*} A_1 &= \left(\begin{matrix} -1 & p_{1,2} & p_{1,3} & \ldots & p_{1,n} \\ 0 & (p_{2,2} - 1) & p_{2,3} & \ldots & p_{2,n} \\ \vdots & \vdots & \vdots & \ddots & \vdots \\ 0 & p_{n, 2} & p_{n,3} & \ldots & (p_{n,n} - 1) \end{matrix}\right)\\ A_2 &= \left(\begin{matrix} (p_{1,1} - 1) & 0 & p_{1,3} & \ldots & p_{1,n} \\ p_{2,1} & -1 & p_{2,3} & \ldots & p_{2,n} \\ \vdots & \vdots & \vdots & \ddots & \vdots \\ p_{n,1} & 0 & p_{n,3} & \ldots & (p_{n,n} - 1) \end{matrix}\right)\\ \vdots & \vdots\\ A_n &= \left(\begin{matrix} (p_{1,1} - 1) & p_{1,2} & p_{1,3} & \ldots & 0 \\ p_{2,1} & (p_{2,2} -1) & p_{2,3} & \ldots & 0 \\ \vdots & \vdots & \vdots & \ddots & \vdots \\ p_{n,1} & p_{n,2} & p_{n,3} & \ldots & -1 \end{matrix}\right)\\ \end{eqnarray*} \begin{equation*} \begin{array}{lr} X_j = \left(\begin{array}{c} h_{1,j} \\ h_{2,j} \\ \vdots \\ h_{n,j} \end{array}\right) & C_j = - \left(\begin{array}{c} p_{1,j} \\ p_{2,j} \\ \vdots \\ p_{n,j} \end{array}\right) \end{array} \end{equation*} we end up having to solve the block systems: \begin{equation} A_j \cdot X_j = C_j \end{equation} Let us imagine the $i$ -th state has transition probabilities: $(0, \ldots, 0, \underset{i)}{1}, 0, \ldots, 0)$. Then that same row would turn into $(0,0, \ldots, 0)$ for some block, thus obtaining a singular matrix. Another case which may give us problems could be: state $i$ has the following transition probabilities: $(0, \ldots, 0, \underset{j)}{1}, 0, \ldots, 0)$ and the state $j$ has the following transition probabilities: $(0, \ldots, 0, \underset{i)}{1}, 0, \ldots, 0)$. Then when building some blocks we will end up with rows: \begin{eqnarray*} (0, \ldots, 0, \underset{i)}{-1}, 0, \ldots, 0, \underset{j)}{1}, 0, \ldots, 0) \\ (0, \ldots, 0, \underset{i)}{1}, 0, \ldots, 0, \underset{j)}{-1}, 0, \ldots, 0) \end{eqnarray*} which are linearly dependent. Our hypothesis is that if we treat the closed communicating classes differently, we *might* delete the linearity in the system. If we have a closed communicating class $C_u$, then $h_{i,j} = 1$ for all $i,j \in C_u$ and $h_{k,j} = 0$ for all $k\not\in C_u$. Then we can set $X_u$ appropriately and solve the other $X_v$ using those values. The method in charge of that in `markovchain` package is `hittingProbabilities`, which receives a Markov chain and computes the matrix $(h_{ij})_{i,j = 1,\ldots, n}$ where $S = \{s_1, \ldots, s_n\}$ is the set of all states of the chain. For the following chain: ```{r hitting-data} M <- markovchain:::zeros(5) M[1,1] <- M[5,5] <- 1 M[2,1] <- M[2,3] <- 1/2 M[3,2] <- M[3,4] <- 1/2 M[4,2] <- M[4,5] <- 1/2 hittingTest <- new("markovchain", transitionMatrix = M) hittingProbabilities(hittingTest) ``` we want to compute the hitting probabilities. That can be done with: ```{r hitting-probabilities} hittingProbabilities(hittingTest) ``` In the case of the `mcWeather` Markov chain we would obtain a matrix with all its elements set to $1$. That makes sense (and is desirable) since if today is sunny, we expect it would be sunny again at certain point in the time, and the same with rainy weather (that way we assure good harvests): ```{r hitting-weather} hittingProbabilities(mcWeather) ``` # Statistical analysis {#sec:statistics} Table \@ref(tab:funs4Stats) lists the functions and methods implemented within the package which help to fit, simulate and predict DTMC. \begin{table}[h] \centering \begin{tabular}{lll} \hline Function & Purpose \\ \hline \hline \code{markovchainFit} & Function to return fitted Markov chain for a given sequence.\\ \code{predict} & Method to calculate predictions from \code{markovchain} or \\ & \code{markovchainList} objects.\\ \code{rmarkovchain} & Function to sample from \code{markovchain} or \code{markovchainList} objects.\\ \hline \end{tabular} \caption{The \pkg{markovchain} statistical functions.} \label{tab:funs4Stats} \end{table} ## Simulation Simulating a random sequence from an underlying DTMC is quite easy thanks to the function `rmarkovchain`. The following code generates a year of weather states according to `mcWeather` underlying stochastic process. ```{r simulatingAMarkovChain} weathersOfDays <- rmarkovchain(n = 365, object = mcWeather, t0 = "sunny") weathersOfDays[1:30] ``` Similarly, it is possible to simulate one or more sequences from a semi-homogeneous Markov chain, as the following code (applied on CCHC example) exemplifies. ```{r simulatingAListOfMarkovChain} patientStates <- rmarkovchain(n = 5, object = mcCCRC, t0 = "H", include.t0 = TRUE) patientStates[1:10,] ``` Two advance parameters are available to the `rmarkovchain` method which helps you decide which implementation to use. There are four options available : \proglang{R}, \proglang{R} in parallel, \proglang{C++} and \proglang{C++} in parallel. Two boolean parameters `useRcpp` and `parallel` will decide which implementation will be used. Default is \code{useRcpp = TRUE} and \code{parallel = FALSE} i.e. \proglang{C++} implementation. The \proglang{C++} implementation is generally faster than the `R` implementation. If you have multicore processors then you can take advantage of `parallel` parameter by setting it to `TRUE`. When both `Rcpp=TRUE` and `parallel=TRUE` the parallelization has been carried out using \pkg{RcppParallel} package \citep{pkg:RcppParallel}. ## Estimation A time homogeneous Markov chain can be fit from given data. Four methods have been implemented within current version of \pkg{markovchain} package: maximum likelihood, maximum likelihood with Laplace smoothing, Bootstrap approach, maximum a posteriori. Equation \ref{eq:MLE} shows the maximum likelihood estimator (MLE) of the $p_{ij}$ entry, where the $n_{ij}$ element consists in the number sequences $\left( X_{t}=s_{i}, X_{t+1}=s_{j}\right)$ found in the sample, that is \begin{equation} {\hat p^{MLE}}_{ij} = \frac{n_{ij}}{\sum\limits_{u = 1}^k {n_{iu}}}. \label{eq:MLE} \end{equation} Equation \@ref(eq:SE) shows the `standardError` of the MLE \citep{MSkuriat}. \begin{equation} SE_{ij} = \frac{ {\hat p^{MLE}}_{ij} }{\sqrt{n_{ij}}} \label{eq:SE} \end{equation} ```{r fitMcbyMLE2} weatherFittedMLE <- markovchainFit(data = weathersOfDays, method = "mle",name = "Weather MLE") weatherFittedMLE$estimate weatherFittedMLE$standardError ``` The Laplace smoothing approach is a variation of the MLE, where the $n_{ij}$ is substituted by $n_{ij}+\alpha$ (see Equation \ref{eq:LAPLACE}), being $\alpha$ an arbitrary positive stabilizing parameter. \begin{equation} {\hat p^{LS}}_{ij} = \frac{{{n_{ij}} + \alpha }}{{\sum\limits_{u = 1}^k {\left( {{n_{iu}} + \alpha } \right)} }} \label{eq:LAPLACE} \end{equation} ```{r fitMcbyLAPLACE} weatherFittedLAPLACE <- markovchainFit(data = weathersOfDays, method = "laplace", laplacian = 0.01, name = "Weather LAPLACE") weatherFittedLAPLACE$estimate ``` (NOTE: The Confidence Interval option is enabled by default. Remove this option to fasten computations.) Both MLE and Laplace approach are based on the `createSequenceMatrix` functions that returns the raw counts transition matrix. ```{r fitSequenceMatrix} createSequenceMatrix(stringchar = weathersOfDays) ``` `stringchar` could contain `NA` values, and the transitions containing `NA` would be ignored. An issue occurs when the sample contains only one realization of a state (say $X_{\beta}$) which is located at the end of the data sequence, since it yields to a row of zero (no sample to estimate the conditional distribution of the transition). In this case the estimated transition matrix is corrected assuming $p_{\beta,j}=1/k$, being $k$ the possible states. Create sequence matrix can also be used to obtain raw count transition matrices from a given $n*2$ matrix as the following example shows: ```{r fitSequenceMatrix2} myMatr<-matrix(c("a","b","b","a","a","b","b","b","b","a","a","a","b","a"),ncol=2) createSequenceMatrix(stringchar = myMatr,toRowProbs = TRUE) ``` A bootstrap estimation approach has been developed within the package in order to provide an indication of the variability of ${\hat p}_{ij}$ estimates. The bootstrap approach implemented within the \pkg{markovchain} package follows these steps: 1. bootstrap the data sequences following the conditional distributions of states estimated from the original one. The default bootstrap samples is 10, as specified in `nboot` parameter of `markovchainFit` function. 2. apply MLE estimation on bootstrapped data sequences that are saved in `bootStrapSamples` slot of the returned list. 3. the ${p^{BOOTSTRAP}}_{ij}$ is the average of all ${p^{MLE}}_{ij}$ across the `bootStrapSamples` list, normalized by row. A `standardError` of $\hat{{p^{MLE}}_{ij}}$ estimate is provided as well. ```{r fitMcbyBootStrap1} weatherFittedBOOT <- markovchainFit(data = weathersOfDays, method = "bootstrap", nboot = 20) weatherFittedBOOT$estimate weatherFittedBOOT$standardError ``` The bootstrapping process can be done in parallel thanks to \pkg{RcppParallel} package \citep{pkg:RcppParallel}. Parallelized implementation is definitively suggested when the data sample size or the required number of bootstrap runs is high. ```{r fitMcbyBootStrap2, eval=FALSE} weatherFittedBOOTParallel <- markovchainFit(data = weathersOfDays, method = "bootstrap", nboot = 200, parallel = TRUE) weatherFittedBOOTParallel$estimate weatherFittedBOOTParallel$standardError ``` The parallel bootstrapping uses all the available cores on a machine by default. However, it is also possible to tune the number of threads used. Note that this should be done in R before calling the `markovchainFit` function. For example, the following code will set the number of threads to 4. ```{r fitMcbyBootStrap3, eval=FALSE} RcppParallel::setNumThreads(2) ``` For more details, please refer to \pkg{RcppParallel} web site. For all the fitting methods, the `logLikelihood` \citep{MSkuriat} denoted in Equation \ref{eq:LLH} is provided. \begin{equation} LLH = \sum_{i,j} n_{ij} * log (p_{ij}) \label{eq:LLH} \end{equation} where $n_{ij}$ is the entry of the frequency matrix and $p_{ij}$ is the entry of the transition probability matrix. ```{r fitMcbyMLE1} weatherFittedMLE$logLikelihood weatherFittedBOOT$logLikelihood ``` Confidence matrices of estimated parameters (parametric for MLE, non - parametric for BootStrap) are available as well. The `confidenceInterval` is provided with the two matrices: `lowerEndpointMatrix` and `upperEndpointMatrix`. The confidence level (CL) is 0.95 by default and can be given as an argument of the function `markovchainFit`. This is used to obtain the standard score (z-score). From classical inference theory, if $ci$ is the level of confidence required assuming normal distribution the $zscore(ci)$ solves $\Phi \left ( 1-\left(\frac{1-ci}{2}\right) \right )$ Equations \ref{eq:CIL} and \ref{eq:CIU} \citep{MSkuriat} show the `confidenceInterval` of a fitting. Note that each entry of the matrices is bounded between 0 and 1. \begin{align} LowerEndpoint_{ij} = p_{ij} - zscore (CL) * SE_{ij} \label{eq:CIL} \\ UpperEndpoint_{ij} = p_{ij} + zscore (CL) * SE_{ij} \label{eq:CIU} \end{align} ```{r confint} weatherFittedMLE$confidenceInterval weatherFittedBOOT$confidenceInterval ``` A special function, `multinomialConfidenceIntervals`, has been written in order to obtain multinomial wise confidence intervals. The code has been based on and Rcpp translation of package's \pkg{MultinomialCI} functions \cite{pkg:MultinomialCI} that were themselves based on the \cite{sison1995simultaneous} paper. ```{r multinomial} multinomialConfidenceIntervals(transitionMatrix = weatherFittedMLE$estimate@transitionMatrix, countsTransitionMatrix = createSequenceMatrix(weathersOfDays)) ``` The functions for fitting DTMC have mostly been rewritten in \proglang{C++} using \pkg{Rcpp} \cite{RcppR} since version 0.2. It is also possible to fit a DTMC object from `matrix` or `data.frame` objects as shown in following code. ```{r fitMclists} data(holson) singleMc<-markovchainFit(data=holson[,2:12],name="holson") ``` The same applies for `markovchainList` (output length has been limited). ```{r fitMclistsFit1, output.lines=20} mcListFit<-markovchainListFit(data=holson[,2:6],name="holson") mcListFit$estimate ``` Finally, given a `list` object, it is possible to fit a `markovchain` object or to obtain the raw transition matrix. ```{r fitMclistsFit2} c1<-c("a","b","a","a","c","c","a") c2<-c("b") c3<-c("c","a","a","c") c4<-c("b","a","b","a","a","c","b") c5<-c("a","a","c",NA) c6<-c("b","c","b","c","a") mylist<-list(c1,c2,c3,c4,c5,c6) mylistMc<-markovchainFit(data=mylist) mylistMc ``` The same works for `markovchainFitList`. ```{r fitAMarkovChainListfromAlist, output.lines=15} markovchainListFit(data=mylist) ``` If any transition contains `NA`, it will be ignored in the results as the above example showed. ## Prediction The $n$-step forward predictions can be obtained using the `predict` methods explicitly written for `markovchain` and `markovchainList` objects. The prediction is the mode of the conditional distribution of $X_{t+1}$ given $X_{t}=s_{j}$, being $s_{j}$ the last realization of the DTMC (homogeneous or semi-homogeneous). ### Predicting from a markovchain object The 3-days forward predictions from `markovchain` object can be generated as follows, assuming that the last two days were respectively "cloudy" and "sunny". ```{r markovchainPredict} predict(object = weatherFittedMLE$estimate, newdata = c("cloudy", "sunny"), n.ahead = 3) ``` ### Predicting from a markovchainList object Given an initial two years health status, the 5-year ahead prediction of any CCRC guest is ```{r markovchainListPredict} predict(mcCCRC, newdata = c("H", "H"), n.ahead = 5) ``` The prediction has stopped at time sequence since the underlying semi-homogeneous Markov chain has a length of four. In order to continue five years ahead, the `continue=TRUE` parameter setting makes the `predict` method keeping to use the last `markovchain` in the sequence list. ```{r markovchainListPredict2} predict(mcCCRC, newdata = c("H", "H"), n.ahead = 5, continue = TRUE) ``` ## Statistical Tests In this section, we describe the statistical tests: assessing the Markov property (`verifyMarkovProperty`), the order (`assessOrder`), the stationary (`assessStationarity`) of a Markov chain sequence, and the divergence test for empirically estimated transition matrices (`divergenceTest`). Most of such tests are based on the $\chi ^2$ statistics. Relevant references are \cite{kullback1962tests} and \cite{anderson1957statistical}. All such tests have been designed for small samples, since it is easy to detect departures from Markov property as long as the sample size increases. In addition, the accuracy of the statistical inference functions has been questioned and will be thoroughly investigated in future versions of the package. ### Assessing the Markov property of a Markov chain sequence The `verifyMarkovProperty` function verifies whether the Markov property holds for the given chain. The test implemented in the package looks at triplets of successive observations. If $x_1, x_2, \ldots, x_N$ is a set of observations and $n_{ijk}$ is the number of times $t$ $\left(1 \le t \le N-2 \right)$ such that $x_t=i, x_{t+1}=j, x_{x+2}=k$, then if the Markov property holds $n_{ijk}$ follows a Binomial distribution with parameters $n_{ij}$ and $p_{jk}$. A classical $\chi^2$ test can check this distributional assumption, since $\sum_{i}\sum_{j}\sum_{k}\frac{(n_{ijk}-n_{ij}\hat{p_{jk}})^2}{n_{ij}\hat{p_{jk}}}\sim \chi^2\left(q \right )$ where q is the number of degrees of freedom. The number of degrees of freedom q of the distribution of $\chi^2$ is given by the formula r-q+s-1, where: s denotes the number of states i in the state space such that n_{i} > 0 q denotes the number of pairs (i, j) for which n_{ij} > 0 and r denotes the number of triplets (i, j, k) for which n_{ij}n_{jk} > 0 ```{r test1} sample_sequence<-c("a", "b", "a", "a", "a", "a", "b", "a", "b", "a", "b", "a", "a", "b", "b", "b", "a") verifyMarkovProperty(sample_sequence) ``` ### Assessing the order of a Markov chain sequence The `assessOrder` function checks whether the given chain is of first order or of second order. For each possible present state, we construct a contingency table of the frequency of the future state for each past to present state transition as shown in Table \ref{tab:order}. \begin{table}[h] \centering \begin{tabular}{l | l | l | l} \hline past & present & future & future \\ & & a & b \\ \hline \hline a & a & 2 & 2\\ b & a & 2 & 2\\ \hline \end{tabular} \caption{Contingency table to assess the order for the present state a.} \label{tab:order} \end{table} Using the table, the function performs the $\chi ^2$ test by calling the `chisq.test` function. This test returns a list of the chi-squared value and the p-value. If the p-value is greater than the given significance level, we cannot reject the hypothesis that the sequence is of first order. ```{r test2} data(rain) assessOrder(rain$rain) ``` ### Assessing the stationarity of a Markov chain sequence The `assessStationarity` function assesses if the transition probabilities of the given chain change over time. To be more specific, the chain is stationary if the following condition meets. \begin{equation} p_{ij}(t) = p_{ij} ~\textrm{ for all }~t \label{eq:stationarity} \end{equation} For each possible state, we construct a contingency table of the estimated transition probabilities over time as shown in Table \ref{tab:stationarity}. \begin{table}[h] \centering \begin{tabular}{l | l | l} \hline time (t) & probability of transition to a & probability of transition to b \\ \hline \hline 1 & 0 & 1\\ 2 & 0 & 1\\ . & . & . \\ . & . & . \\ . & . & . \\ 16 & 0.44 & 0.56\\ \hline \end{tabular} \caption{Contingency table to assess the stationarity of the state a.} \label{tab:stationarity} \end{table} Using the table, the function performs the $\chi ^2$ test by calling the `chisq.test` function. This test returns a list of the chi-squared value and the p-value. If the p-value is greater than the given significance level, we cannot reject the hypothesis that the sequence is stationary. ```{r test3} assessStationarity(rain$rain, 10) ``` ### Divergence tests for empirically estimated transition matrices This section discusses tests developed to verify whether: 1. An empirical transition matrix is consistent with a theoretical one. 2. Two or more empirical transition matrices belongs to the same DTMC. The first test is implemented by the `verifyEmpiricalToTheoretical` function. Being $f_{ij}$ the raw transition count, \cite{kullback1962tests} shows that $2*\sum_{i=1}^{r}\sum_{j=1}^{r}f_{ij}\ln\frac{f_{ij}}{f_{i.}P\left( E_j | E_i\right)} \sim \chi^2\left ( r*(r-1) \right )$. The following example is taken from \cite{kullback1962tests}: ```{r divergence1} sequence<-c(0,1,2,2,1,0,0,0,0,0,0,1,2,2,2,1,0,0,1,0,0,0,0,0,0,1,1, 2,0,0,2,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,0,0,0,0,2,1,0, 0,2,1,0,0,0,0,0,0,1,1,1,2,2,0,0,2,1,1,1,1,2,1,1,1,1,1,1,1,1,1,0,2, 0,1,1,0,0,0,1,2,2,0,0,0,0,0,0,2,2,2,1,1,1,1,0,1,1,1,1,0,0,2,1,1, 0,0,0,0,0,2,2,1,1,1,1,1,2,1,2,0,0,0,1,2,2,2,0,0,0,1,1) mc=matrix(c(5/8,1/4,1/8,1/4,1/2,1/4,1/4,3/8,3/8),byrow=TRUE, nrow=3) rownames(mc)<-colnames(mc)<-0:2; theoreticalMc<-as(mc, "markovchain") verifyEmpiricalToTheoretical(data=sequence,object=theoreticalMc) ``` The second one is implemented by the `verifyHomogeneity` function, inspired by \cite[section~9]{kullback1962tests}. Assuming that $i=1,2, \ldots, s$ DTMC samples are available and that the cardinality of the state space is $r$ it verifies whether the $s$ chains belongs to the same unknown one. \cite{kullback1962tests} shows that its test statistics follows a chi-square law, $2*\sum_{i=1}^{s}\sum_{j=1}^{r}\sum_{k=1}^{r}f_{ijk}\ln\frac{n*f_{ijk}}{f_{i..}f_{.jk}} \sim \chi^2\left ( r*(r-1) \right )$. Also the following example is taken from \cite{kullback1962tests}: ```{r divergence2} data(kullback) verifyHomogeneity(inputList=kullback,verbose=TRUE) ``` ## Continuous Times Markov Chains ### Intro The \pkg{markovchain} package provides functionality for continuous time Markov chains (CTMCs). CTMCs are a generalization of discrete time Markov chains (DTMCs) in that we allow time to be continuous. We assume a finite state space $S$ (for an infinite state space wouldn't fit in memory). We can think of CTMCs as Markov chains in which state transitions can happen at any time. More formally, we would like our CTMCs to satisfy the following two properties: * The Markov property - let $F_{X(s)}$ denote the information about $X$ up to time $s$. Let $j \in S$ and $s \leq t$. Then, $P(X(t) = j|F_{X(s)}) = P(X(t) = j|X(s))$. * Time homogeneity - $P(X(t) = j|X(s) = k) = P(X(t-s) = j|X(0) = k)$. If both the above properties are satisfied, it is referred to as a time-homogeneous CTMC. If a transition occurs at time $t$, then $X(t)$ denotes the new state and $X(t)\neq X(t-)$. Now, let $X(0)=x$ and let $T_x$ be the time a transition occurs from this state. We are interested in the distribution of $T_x$. For $s,t \geq 0$, it can be shown that $ P(T_x > s+t | T_x > s) = P(T_x > t) $ This is the memory less property that only the exponential random variable exhibits. Therefore, this is the sought distribution, and each state $s \in S$ has an exponential holding parameter $\lambda(s)$. Since $\mathrm{E}T_x = \frac{1}{\lambda(x)}$, higher the rate $\lambda(x)$, smaller the expected time of transitioning out of the state $x$. However, specifying this parameter alone for each state would only paint an incomplete picture of our CTMC. To see why, consider a state $x$ that may transition to either state $y$ or $z$. The holding parameter enables us to predict when a transition may occur if we start off in state $x$, but tells us nothing about which state will be next. To this end, we also need transition probabilities associated with the process, defined as follows (for $y \neq x$) - $p_{xy} = P(X(T_s) = y | X(0) = x)$. Note that $\sum_{y \neq x} p_{xy} = 1$. Let $Q$ denote this transition matrix ($Q_{ij} = p_{ij}$). What is key here is that $T_x$ and the state $y$ are independent random variables. Let's define $\lambda(x, y) = \lambda(x) p_{xy}$ We now look at Kolmogorov's backward equation. Let's define $P_{ij}(t) = P(X(t) = j | X(0) = i)$ for $i, j \in S$. The backward equation is given by (it can be proved) $P_{ij}(t) = \delta_{ij}e^{-\lambda(i)t} + \int_{0}^{t}\lambda(i)e^{-\lambda(i)t} \sum_{k \neq i} Q_{ik} P_{kj}(t-s) ds$. Basically, the first term is non-zero if and only if $i=j$ and represents the probability that the first transition from state $i$ occurs after time $t$. This would mean that at $t$, the state is still $i$. The second term accounts for any transitions that may occur before time $t$ and denotes the probability that at time $t$, when the smoke clears, we are in state $j$. This equation can be represented compactly as follows $P'(t) = AP(t)$ where $A$ is the *generator* matrix. \[ A(i, j) = \begin{cases} \lambda(i, j) & \mbox{if } i \neq j \\ -\lambda(i) & \mbox{else.} \end{cases} \] Observe that the sum of each row is 0. A CTMC can be completely specified by the generator matrix. ### Stationary Distributions The following theorem guarantees the existence of a unique stationary distribution for CTMCs. Note that $X(t)$ being irreducible and recurrent is the same as $X_n(t)$ being irreducible and recurrent. Suppose that $X(t)$ is irreducible and recurrent. Then $X(t)$ has an invariant measure $\eta$, which is unique up to multiplicative factors. Moreover, for each $k \in S$, we have \[\eta_k = \frac{\pi_k}{\lambda(k)}\] where $\pi$ is the unique invariant measure of the embedded discrete time Markov chain $Xn$. Finally, $\eta$ satisfies \[0 < \eta_j < \infty, \forall j \in S\] and if $\sum_i \eta_i < \infty$ then $\eta$ can be normalized to get a stationary distribution. ### Estimation Let the data set be $D = \{(s_0, t_0), (s_1, t_1), ..., (s_{N-1}, t_{N-1})\}$ where $N=|D|$. Each $s_i$ is a state from the state space $S$ and during the time $[t_i,t_{i+1}]$ the chain is in state $s_i$. Let the parameters be represented by $\theta = \{\lambda, P\}$ where $\lambda$ is the vector of holding parameters for each state and $P$ the transition matrix of the embedded discrete time Markov chain. Then the probability is given by \[ {Pr(D | \theta) \propto \lambda(s_0)e^{-\lambda(s_0)(t_1-t_0)}Pr(s_1|s_0) \cdot\ldots\cdot \lambda(s_{N-2})e^{-\lambda(s_{N-2})(t_{N-1}-t_{N-2})}Pr(s_{N-1}|s_{N-2})} \] Let $n(j|i)$ denote the number of $i$->$j$ transitions in $D$, and $n(i)$ the number of times $s_i$ occurs in $D$. Let $t(s_i)$ denote the total time the chain spends in state $s_i$. Then the MLEs are given by \[ \hat{\lambda(s)} = \frac{n(s)}{t(s)},\hat{Pr(j|i)}=\frac{n(j|i)}{n(i)} \] ### Expected Hitting Time The package provides a function `ExpectedTime` to calculate average hitting time from one state to another. Let the final state be j, then for every state $i \in S$, where $S$ is the set of all states and holding time $q_{i} > 0$ for every $i \neq j$. Assuming the conditions to be true, expected hitting time is equal to minimal non-negative solution vector $p$ to the system of linear equations: \begin{equation} \begin{cases} p_{k} = 0 & k = j \\ -\sum_{l \in I} q_{kl}p_{k} = 1 & k \neq j \end{cases} \label{eq:EHT} \end{equation} ### Probability at time t The package provides a function `probabilityatT` to calculate probability of every state according to given `ctmc` object. Here we use Kolmogorov's backward equation $P(t) = P(0)e^{tQ}$ for $t \geq 0$ and $P(0) = I$. Here $P(t)$ is the transition function at time t. The value $P(t)[i][j]$ at time $P(t)$ describes the probability of the state at time $t$ to be equal to j if it was equal to i at time $t=0$. It takes care of the case when `ctmc` object has a generator represented by columns. If initial state is not provided, the function returns the whole transition matrix $P(t)$. ### Examples To create a CTMC object, you need to provide a valid generator matrix, say $Q$. The CTMC object has the following slots - states, generator, by row, name (look at the documentation object for further details). Consider the following example in which we aim to model the transition of a molecule from the $\sigma$ state to the $\sigma^*$ state. When in the former state, if it absorbs sufficient energy, it can make the jump to the latter state and remains there for some time before transitioning back to the original state. Let us model this by a CTMC: ```{r rCtmcInit} energyStates <- c("sigma", "sigma_star") byRow <- TRUE gen <- matrix(data = c(-3, 3, 1, -1), nrow = 2, byrow = byRow, dimnames = list(energyStates, energyStates)) molecularCTMC <- new("ctmc", states = energyStates, byrow = byRow, generator = gen, name = "Molecular Transition Model") ``` To generate random CTMC transitions, we provide an initial distribution of the states. This must be in the same order as the dimnames of the generator. The output can be returned either as a list or a data frame. ```{r rctmcRandom0} statesDist <- c(0.8, 0.2) rctmc(n = 3, ctmc = molecularCTMC, initDist = statesDist, out.type = "df", include.T0 = FALSE) ``` $n$ represents the number of samples to generate. There is an optional argument $T$ for `rctmc`. It represents the time of termination of the simulation. To use this feature, set $n$ to a very high value, say `Inf` (since we do not know the number of transitions before hand) and set $T$ accordingly. ```{r ctmcRandom1} statesDist <- c(0.8, 0.2) rctmc(n = Inf, ctmc = molecularCTMC, initDist = statesDist, T = 2) ``` To obtain the stationary distribution simply invoke the `steadyStates` function ```{r rctmcSteadyStates} steadyStates(molecularCTMC) ``` For fitting, use the `ctmcFit` function. It returns the MLE values for the parameters along with the confidence intervals. ```{r rctmcFitting} data <- list(c("a", "b", "c", "a", "b", "a", "c", "b", "c"), c(0, 0.8, 2.1, 2.4, 4, 5, 5.9, 8.2, 9)) ctmcFit(data) ``` One approach to obtain the generator matrix is to apply the `logm` function from the \pkg{expm} package on a transition matrix. Numeric issues arise, see \cite{israel2001finding}. For example, applying the standard `method` ('Higham08') on `mcWeather` raises an error, whilst the alternative method (eigenvalue decomposition) is OK. The following code estimates the generator matrix of the `mcWeather` transition matrix. ```{r mcWeatherQ} mcWeatherQ <- expm::logm(mcWeather@transitionMatrix,method='Eigen') mcWeatherQ ``` Therefore, the "half - day" transition probability for mcWeather DTMC is ```{r mcWeatherHalfDay} mcWeatherHalfDayTM <- expm::expm(mcWeatherQ*.5) mcWeatherHalfDay <- new("markovchain",transitionMatrix=mcWeatherHalfDayTM,name="Half Day Weather Transition Matrix") mcWeatherHalfDay ``` The \pkg{ctmcd} package \citep{pkg:ctmcd} provides various functions to estimate the generator matrix (GM) of a CTMC process using different methods. The following code provides a way to join \pkg{markovchain} and \pkg{ctmcd} computations. ```{r ctmcd1} if(requireNamespace(package='ctmcd', quietly = TRUE)) { require(ctmcd) require(expm) #defines a function to transform a GM into a TM gm_to_markovchain<-function(object, t=1) { if(!(class(object) %in% c("gm","matrix","Matrix"))) stop("Error! Expecting either a matrix or a gm object") if ( class(object) %in% c("matrix","Matrix")) generator_matrix<-object else generator_matrix<-as.matrix(object[["par"]]) #must add importClassesFrom("markovchain",markovchain) in the NAMESPACE #must add importFrom(expm, "expm") transitionMatrix<-expm(generator_matrix*t) out<-as(transitionMatrix,"markovchain") return(out) } #loading ctmcd dataset data(tm_abs) gm0=matrix(1,8,8) #initializing diag(gm0)=0 diag(gm0)=-rowSums(gm0) gm0[8,]=0 gmem=gm(tm_abs,te=1,method="EM",gmguess=gm0) #estimating GM mc_at_2=gm_to_markovchain(object=gmem, t=2) #converting to TM at time 2 } else { warning('package ctmcd unavailable') } ``` ## Pseudo - Bayesian Estimation \cite{Hu2002} shows an empirical quasi-Bayesian method to estimate transition matrices, given an empirical $\hat{P}$ transition matrix (estimated using the classical approach) and an a - priori estimate $Q$. In particular, each row of the matrix is estimated using the linear combination $\alpha \cdot Q+\left(1-1alpha\right) \cdot P$, where $\alpha$ is defined for each row as Equation \ref{eq:pseudobayes} shows \begin{equation} \left\{\begin{matrix} \hat{\alpha_i}=\frac{\hat{K_i}}{v\left(i \right )+\hat{K_i}}\\ \hat{K_i}=\frac{v\left(i \right)^2 - \sum_{j}Y_{ij}^2}{\sum_{j}(Y_{ij}-v\left(i \right)*q_{ij})^2} \end{matrix}\right. \label{eq:pseudobayes} \end{equation} The following code returns the pseudo Bayesian estimate of the transition matrix: ```{r pseudobayes} pseudoBayesEstimator <- function(raw, apriori){ v_i <- rowSums(raw) K_i <- numeric(nrow(raw)) sumSquaredY <- rowSums(raw^2) #get numerator K_i_num <- v_i^2-sumSquaredY #get denominator VQ <- matrix(0,nrow= nrow(apriori),ncol=ncol(apriori)) for (i in 1:nrow(VQ)) { VQ[i,]<-v_i[i]*apriori[i,] } K_i_den<-rowSums((raw - VQ)^2) K_i <- K_i_num/K_i_den #get the alpha vector alpha <- K_i / (v_i+K_i) #empirical transition matrix Emp<-raw/rowSums(raw) #get the estimate out<-matrix(0, nrow= nrow(raw),ncol=ncol(raw)) for (i in 1:nrow(out)) { out[i,]<-alpha[i]*apriori[i,]+(1-alpha[i])*Emp[i,] } return(out) } ``` We then apply it to the weather example: ```{r pseudobayes2} trueMc<-as(matrix(c(0.1, .9,.7,.3),nrow = 2, byrow = 2),"markovchain") aprioriMc<-as(matrix(c(0.5, .5,.5,.5),nrow = 2, byrow = 2),"markovchain") smallSample<-rmarkovchain(n=20,object = trueMc) smallSampleRawTransitions<-createSequenceMatrix(stringchar = smallSample) pseudoBayesEstimator( raw = smallSampleRawTransitions, apriori = aprioriMc@transitionMatrix ) - trueMc@transitionMatrix biggerSample<-rmarkovchain(n=100,object = trueMc) biggerSampleRawTransitions<-createSequenceMatrix(stringchar = biggerSample) pseudoBayesEstimator( raw = biggerSampleRawTransitions, apriori = aprioriMc@transitionMatrix ) - trueMc@transitionMatrix bigSample<-rmarkovchain(n=1000,object = trueMc) bigSampleRawTransitions<-createSequenceMatrix(stringchar = bigSample) pseudoBayesEstimator( raw = bigSampleRawTransitions, apriori = aprioriMc@transitionMatrix ) - trueMc@transitionMatrix ``` ## Bayesian Estimation The \pkg{markovchain} package provides functionality for maximum a posteriori (MAP) estimation of the chain parameters (at the time of writing this document, only first order models are supported) by Bayesian inference. It also computes the probability of observing a new data set, given a (different) data set. This vignette provides the mathematical description for the methods employed by the package. ### Notation and set-up The data is denoted by $D$, the model parameters (transition matrix) by $\theta$. The object of interest is $P(\theta | D)$ (posterior density). $\mathcal{A}$ represents an alphabet class, each of whose members represent a state of the chain. Therefore \[D = s_0 s_1 ... s_{N-1}, s_t \in \mathcal{A}\] where $N$ is the length of the data set. Also, \[\theta = \{p(s|u), s \in \mathcal{A}, u \in \mathcal{A} \}\] where $\sum_{s \in \mathcal{A}} p(s|u) = 1$ for each $u \in \mathcal{A}$. Our objective is to find $\theta$ which maximizes the posterior. That is, if our solution is denoted by $\hat{\theta}$, then \[\hat{\theta} = \underset{\theta}{argmax}P(\theta | D)\] where the search space is the set of right stochastic matrices of dimension $|\mathcal{A}|x|\mathcal{A}|$. $n(u, s)$ denotes the number of times the word $us$ occurs in $D$ and $n(u)=\sum_{s \in \mathcal{A}}n(u, s)$. The hyper-parameters are similarly denoted by $\alpha(u, s)$ and $\alpha(u)$ respectively. ### Methods Given $D$, its likelihood conditioned on the observed initial state in D is given by \[P(D|\theta) = \prod_{s \in \mathcal{A}} \prod_{u \in \mathcal{A}} p(s|u)^{n(u, s)}\] Conjugate priors are used to model the prior $P(\theta)$. The reasons are two fold: 1. Exact expressions can be derived for the MAP estimates, expectations and even variances 2. Model order selection/comparison can be implemented easily (available in a future release of the package) The hyper-parameters determine the form of the prior distribution, which is a product of Dirichlet distributions \[P(\theta) = \prod_{u \in \mathcal{A}} \Big\{ \frac{\Gamma(\alpha(u))}{\prod_{s \in \mathcal{A}} \Gamma(\alpha(u, s))} \prod_{s \in \mathcal{A}} p(s|u)^{\alpha(u, s)) - 1} \Big\}\] where $\Gamma(.)$ is the Gamma function. The hyper-parameters are specified using the `hyperparam` argument in the `markovchainFit` function. If this argument is not specified, then a default value of 1 is assigned to each hyper-parameter resulting in the prior distribution of each chain parameter to be uniform over $[0,1]$. Given the likelihood and the prior as described above, the evidence $P(D)$ is simply given by \[P(D) = \int P(D|\theta) P(\theta) d\theta\] which simplifies to \[ P(D) = \prod_{u \in \mathcal{A}} \Big\{ \frac{\Gamma(\alpha(u))}{\prod_{s \in \mathcal{A}} \Gamma(\alpha(u, s))} \frac{\prod_{s \in \mathcal{A}} \Gamma(n(u, s) + \alpha(u, s))}{\Gamma(\alpha(u) + n(u))} \Big\} \] Using Bayes' theorem, the posterior now becomes (thanks to the choice of conjugate priors) \[ P(\theta | D) = \prod_{u \in \mathcal{A}} \Big\{ \frac{\Gamma(n(u) + \alpha(u))}{\prod_{s \in \mathcal{A}} \Gamma(n(u, s) + \alpha(u, s))} \prod_{s \in \mathcal{A}} p(s|u)^{n(u, s) + \alpha(u, s)) - 1} \Big\} \] Since this is again a product of Dirichlet distributions, the marginal distribution of a particular parameter $P(s|u)$ of our chain is given by \[ P(s|u) \sim Beta(n(u, s) + \alpha(u, s), n(u) + \alpha(u) - n(u, s) - \alpha(u, s)) \] Thus, the MAP estimate $\hat{\theta}$ is given by \[ \hat{\theta} = \Big\{ \frac{n(u, s) + \alpha(u, s) - 1}{n(u) + \alpha(u) - |\mathcal{A}|}, s \in \mathcal{A}, u \in \mathcal{A} \Big\} \] The function also returns the expected value, given by \[ \text{E}_{\text{post}} p(s|u) = \Big\{ \frac{n(u, s) + \alpha(u, s)}{n(u) + \alpha(u)}, s \in \mathcal{A}, u \in \mathcal{A} \Big\} \] The variance is given by \[ \text{Var}_{\text{post}} p(s|u) = \frac{n(u, s) + \alpha(u, s)}{(n(u) + \alpha(u))^2} \frac{n(u) + \alpha(u) - n(u, s) - \alpha(u, s)}{n(u) + \alpha(u) + 1} \] The square root of this quantity is the standard error, which is returned by the function. The confidence intervals are constructed by computing the inverse of the beta integral. ### Predictive distribution Given the old data set, the probability of observing new data is $P(D'|D)$ where $D'$ is the new data set. Let $m(u, s), m(u)$ denote the corresponding counts for the new data. Then, \[ P(D'|D) = \int P(D' | \theta) P(\theta | D) d\theta \] We already know the expressions for both quantities in the integral and it turns out to be similar to evaluating the evidence \[ P(D'|D) = \prod_{u \in \mathcal{A}} \Big\{ \frac{\Gamma(\alpha(u))}{\prod_{s \in \mathcal{A}} \Gamma(\alpha(u, s))} \frac{\prod_{s \in \mathcal{A}} \Gamma(n(u, s) + m(u, s) + \alpha(u, s))}{\Gamma(\alpha(u) + n(u) + m(u))} \Big\} \] ### Choosing the hyper-parameters The hyper parameters model the shape of the parameters' prior distribution. These must be provided by the user. The package offers functionality to translate a given prior belief transition matrix into the hyper-parameter matrix. It is assumed that this belief matrix corresponds to the mean value of the parameters. Since the relation \[ \text{E}_{\text{prior}} p(s | u) = \frac{\alpha(u, s)}{\alpha(u)} \] holds, the function accepts as input the belief matrix as well as a scaling vector (serves as a proxy for $\alpha(.)$) and proceeds to compute $\alpha(., .)$. Alternatively, the function accepts a data sample and infers the hyper-parameters from it. Since the mode of a parameter (with respect to the prior distribution) is proportional to one less than the corresponding hyper-parameter, we set \[ \alpha(u, s) - 1 = m(u, s) \] where $m(u, s)$ is the $u\rightarrow s$ transition count in the data sample. This is regarded as a 'fake count' which helps $\alpha(u, s)$ to reflect knowledge of the data sample. ### Usage and examples ```{r loadAndDoExample} weatherStates <- c("sunny", "cloudy", "rain") byRow <- TRUE weatherMatrix <- matrix(data = c(0.7, 0.2, 0.1, 0.3, 0.4, 0.3, 0.2, 0.4, 0.4), byrow = byRow, nrow = 3, dimnames = list(weatherStates, weatherStates)) mcWeather <- new("markovchain", states = weatherStates, byrow = byRow, transitionMatrix = weatherMatrix, name = "Weather") weathersOfDays <- rmarkovchain(n = 365, object = mcWeather, t0 = "sunny") ``` For the purpose of this section, we shall continue to use the weather of days example introduced in the main vignette of the package (reproduced above for convenience). Let us invoke the fit function to estimate the MAP parameters with 92\% confidence bounds and hyper-parameters as shown below, based on the first 200 days of the weather data. Additionally, let us find out what the probability is of observing the weather data for the next 165 days. The usage would be as follows ```{r MAPFit} hyperMatrix<-matrix(c(1, 1, 2, 3, 2, 1, 2, 2, 3), nrow = 3, byrow = TRUE, dimnames = list(weatherStates,weatherStates)) markovchainFit(weathersOfDays[1:200], method = "map", confidencelevel = 0.92, hyperparam = hyperMatrix) predictiveDistribution(weathersOfDays[1:200], weathersOfDays[201:365],hyperparam = hyperMatrix) ``` The results should not change after permuting the dimensions of the matrix. ```{r MAPFit2} hyperMatrix2<- hyperMatrix[c(2,3,1), c(2,3,1)] markovchainFit(weathersOfDays[1:200], method = "map", confidencelevel = 0.92, hyperparam = hyperMatrix2) predictiveDistribution(weathersOfDays[1:200], weathersOfDays[201:365],hyperparam = hyperMatrix2) ``` Note that the predictive probability is very small. However, this can be useful when comparing model orders. Suppose we have an idea of the (prior) transition matrix corresponding to the expected value of the parameters, and have a data set from which we want to deduce the MAP estimates. We can infer the hyper-parameters from this known transition matrix itself, and use this to obtain our MAP estimates. ```{r inferHyperparam} inferHyperparam(transMatr = weatherMatrix, scale = c(10, 10, 10)) ``` Alternatively, we can use a data sample to infer the hyper-parameters. ```{r inferHyperparam2} inferHyperparam(data = weathersOfDays[1:15]) ``` In order to use the inferred hyper-parameter matrices, we do ```{r inferHyperparam3} hyperMatrix3 <- inferHyperparam(transMatr = weatherMatrix, scale = c(10, 10, 10)) hyperMatrix3 <- hyperMatrix3$scaledInference hyperMatrix4 <- inferHyperparam(data = weathersOfDays[1:15]) hyperMatrix4 <- hyperMatrix4$dataInference ``` Now we can safely use `hyperMatrix3` and `hyperMatrix4` with `markovchainFit` (in the `hyperparam` argument). Supposing we don't provide any hyper-parameters, then the prior is uniform. This is the same as maximum likelihood. ```{r MAPandMLE} data(preproglucacon) preproglucacon <- preproglucacon[[2]] MLEest <- markovchainFit(preproglucacon, method = "mle") MAPest <- markovchainFit(preproglucacon, method = "map") MLEest$estimate MAPest$estimate ``` # Applications {#sec:applications} This section shows applications of DTMC in various fields. ## Weather forecasting {#app:weather} Markov chains provide a simple model to predict the next day's weather given the current meteorological condition. The first application herewith shown is the "Land of Oz example" from \cite{landOfOz}, the second is the "Alofi Island Rainfall" from \cite{averyHenderson}. ### Land of Oz {#sec:wfLandOfOz} The Land of Oz is acknowledged not to have ideal weather conditions at all: the weather is snowy or rainy very often and, once more, there are never two nice days in a row. Consider three weather states: rainy, nice and snowy. Let the transition matrix be as in the following: ```{r weatPred1} mcWP <- new("markovchain", states = c("rainy", "nice", "snowy"), transitionMatrix = matrix(c(0.5, 0.25, 0.25, 0.5, 0, 0.5, 0.25,0.25,0.5), byrow = T, nrow = 3)) ``` Given that today it is a nice day, the corresponding stochastic row vector is $w_{0}=(0\:,1\:,0)$ and the forecast after 1, 2 and 3 days are given by ```{r weatPred2} W0 <- t(as.matrix(c(0, 1, 0))) W1 <- W0 * mcWP; W1 W2 <- W0 * (mcWP ^ 2); W2 W3 <- W0 * (mcWP ^ 3); W3 ``` As can be seen from $w_{1}$, if in the Land of Oz today is a nice day, tomorrow it will rain or snow with probability 1. One week later, the prediction can be computed as ```{r weatPred3} W7 <- W0 * (mcWP ^ 7) W7 ``` The steady state of the chain can be computed by means of the `steadyStates` method. ```{r weatPred4} q <- steadyStates(mcWP) q ``` Note that, from the seventh day on, the predicted probabilities are substantially equal to the steady state of the chain and they don't depend from the starting point, as the following code shows. ```{r weatPred5} R0 <- t(as.matrix(c(1, 0, 0))) R7 <- R0 * (mcWP ^ 7); R7 S0 <- t(as.matrix(c(0, 0, 1))) S7 <- S0 * (mcWP ^ 7); S7 ``` ### Alofi Island Rainfall {#sec:wfAlofi} Alofi Island daily rainfall data were recorded from January 1st, 1987 until December 31st, 1989 and classified into three states: "0" (no rain), "1-5" (from non zero until 5 mm) and "6+" (more than 5mm). The corresponding dataset is provided within the \pkg{markovchain} package. ```{r Alofi1} data("rain", package = "markovchain") table(rain$rain) ``` The underlying transition matrix is estimated as follows. ```{r Alofi2} mcAlofi <- markovchainFit(data = rain$rain, name = "Alofi MC")$estimate mcAlofi ``` The long term daily rainfall distribution is obtained by means of the `steadyStates` method. ```{r Alofi3} steadyStates(mcAlofi) ``` ## Finance and Economics {#app:fin} Other relevant applications of DTMC can be found in Finance and Economics. ### Finance {#fin:fin} Credit ratings transitions have been successfully modeled with discrete time Markov chains. Some rating agencies publish transition matrices that show the empirical transition probabilities across credit ratings. The example that follows comes from \pkg{CreditMetrics} \proglang{R} package \citep{CreditMetricsR}, carrying Standard \& Poor's published data. ```{r ratings1} rc <- c("AAA", "AA", "A", "BBB", "BB", "B", "CCC", "D") creditMatrix <- matrix( c(90.81, 8.33, 0.68, 0.06, 0.08, 0.02, 0.01, 0.01, 0.70, 90.65, 7.79, 0.64, 0.06, 0.13, 0.02, 0.01, 0.09, 2.27, 91.05, 5.52, 0.74, 0.26, 0.01, 0.06, 0.02, 0.33, 5.95, 85.93, 5.30, 1.17, 1.12, 0.18, 0.03, 0.14, 0.67, 7.73, 80.53, 8.84, 1.00, 1.06, 0.01, 0.11, 0.24, 0.43, 6.48, 83.46, 4.07, 5.20, 0.21, 0, 0.22, 1.30, 2.38, 11.24, 64.86, 19.79, 0, 0, 0, 0, 0, 0, 0, 100 )/100, 8, 8, dimnames = list(rc, rc), byrow = TRUE) ``` It is easy to convert such matrices into `markovchain` objects and to perform some analyses ```{r ratings2} creditMc <- new("markovchain", transitionMatrix = creditMatrix, name = "S&P Matrix") absorbingStates(creditMc) ``` ### Economics {#fin:ec} For a recent application of \pkg{markovchain} in Economic, see \cite{manchesterR}. A dynamic system generates two kinds of economic effects \citep{bardPpt}: 1. those incurred when the system is in a specified state, and 2. those incurred when the system makes a transition from one state to another. Let the monetary amount of being in a particular state be represented as a m-dimensional column vector $c^{\rm{S}}$, while let the monetary amount of a transition be embodied in a $C^{R}$ matrix in which each component specifies the monetary amount of going from state i to state j in a single step. Henceforth, Equation \@ref(eq:cost) represents the monetary of being in state $i$. \begin{equation} {c_i} = c_i^{\rm{S}} + \sum\limits_{j = 1}^m {C_{ij}^{\rm{R}}} {p_{ij}}. \label{eq:cost} \end{equation} Let $\bar c = \left[ c_i \right]$ and let $e_i$ be the vector valued 1 in the initial state and 0 in all other, then, if $f_n$ is the random variable representing the economic return associated with the stochastic process at time $n$, Equation \@ref(eq:return) holds: \begin{equation} E\left[ {{f_n}\left( {{X_n}} \right)|{X_0} = i} \right] = {e_i}{P^n}\bar c. \label{eq:return} \end{equation} The following example assumes that a telephone company models the transition probabilities between customer/non-customer status by matrix $P$ and the cost associated to states by matrix $M$. ```{r economicAnalysis1} statesNames <- c("customer", "non customer") P <- markovchain:::zeros(2); P[1, 1] <- .9; P[1, 2] <- .1; P[2, 2] <- .95; P[2, 1] <- .05; rownames(P) <- statesNames; colnames(P) <- statesNames mcP <- new("markovchain", transitionMatrix = P, name = "Telephone company") M <- markovchain:::zeros(2); M[1, 1] <- -20; M[1, 2] <- -30; M[2, 1] <- -40; M[2, 2] <- 0 ``` If the average revenue for existing customer is +100, the cost per state is computed as follows. ```{r economicAnalysis2} c1 <- 100 + conditionalDistribution(mcP, state = "customer") %*% M[1,] c2 <- 0 + conditionalDistribution(mcP, state = "non customer") %*% M[2,] ``` For an existing customer, the expected gain (loss) at the fifth year is given by the following code. ```{r economicAnalysis3} as.numeric((c(1, 0)* mcP ^ 5) %*% (as.vector(c(c1, c2)))) ``` ## Actuarial science {#app:act} Markov chains are widely applied in the field of actuarial science. Two classical applications are policyholders' distribution across Bonus Malus classes in Motor Third Party Liability (MTPL) insurance (Section \@ref(sec:bm)) and health insurance pricing and reserving (Section \@ref(sec:hi)). ### MPTL Bonus Malus {#sec:bm} Bonus Malus (BM) contracts grant the policyholder a discount (enworsen) as a function of the number of claims in the experience period. The discount (enworsen) is applied on a premium that already allows for known (a priori) policyholder characteristics \citep{denuit2007actuarial} and it usually depends on vehicle, territory, the demographic profile of the policyholder, and policy coverage deep (deductible and policy limits).\\ Since the proposed BM level depends on the claim on the previous period, it can be modeled by a discrete Markov chain. A very simplified example follows. Assume a BM scale from 1 to 5, where 4 is the starting level. The evolution rules are shown in Equation \ref{eq:BM}: \begin{equation} bm_{t + 1} = \max \left( {1,bm_{t} - 1} \right)*\left( {\tilde N = 0} \right) + \min \left( {5,bm_{t} + 2*\tilde N} \right)*\left( {\tilde N \ge 1} \right). \label{eq:BM} \end{equation} The number of claim $\tilde N$ is a random variable that is assumed to be Poisson distributed. ```{r bonusMalus1} getBonusMalusMarkovChain <- function(lambda) { bmMatr <- markovchain:::zeros(5) bmMatr[1, 1] <- dpois(x = 0, lambda) bmMatr[1, 3] <- dpois(x = 1, lambda) bmMatr[1, 5] <- 1 - ppois(q = 1, lambda) bmMatr[2, 1] <- dpois(x = 0, lambda) bmMatr[2, 4] <- dpois(x = 1, lambda) bmMatr[2, 5] <- 1 - ppois(q = 1, lambda) bmMatr[3, 2] <- dpois(x = 0, lambda) bmMatr[3, 5] <- 1 - dpois(x=0, lambda) bmMatr[4, 3] <- dpois(x = 0, lambda) bmMatr[4, 5] <- 1 - dpois(x = 0, lambda) bmMatr[5, 4] <- dpois(x = 0, lambda) bmMatr[5, 5] <- 1 - dpois(x = 0, lambda) stateNames <- as.character(1:5) out <- new("markovchain", transitionMatrix = bmMatr, states = stateNames, name = "BM Matrix") return(out) } ``` Assuming that the a-priori claim frequency per car-year is 0.05 in the class (being the class the group of policyholders that share the same common characteristics), the underlying BM transition matrix and its underlying steady state are as follows. ```{r bonusMalus2} bmMc <- getBonusMalusMarkovChain(0.05) as.numeric(steadyStates(bmMc)) ``` If the underlying BM coefficients of the class are 0.5, 0.7, 0.9, 1.0, 1.25, this means that the average BM coefficient applied on the long run to the class is given by ```{r bonusMalus3} sum(as.numeric(steadyStates(bmMc)) * c(0.5, 0.7, 0.9, 1, 1.25)) ``` This means that the average premium paid by policyholders in the portfolio almost halves in the long run. ### Health insurance example {#sec:hi} Actuaries quantify the risk inherent in insurance contracts evaluating the premium of insurance contract to be sold (therefore covering future risk) and evaluating the actuarial reserves of existing portfolios (the liabilities in terms of benefits or claims payments due to policyholder arising from previously sold contracts), see \cite{deshmukh2012multiple} for details. An applied example can be performed using the data from \cite{de2016assicurazioni} that has been saved in the `exdata` folder. ```{r healthIns6} ltcDemoPath<-system.file("extdata", "ltdItaData.txt", package = "markovchain") ltcDemo<-read.table(file = ltcDemoPath, header=TRUE, sep = ";", dec = ".") head(ltcDemo) ``` The data shows the probability of transition between the state of (A)ctive, to (I)ll and Dead. It is easy to complete the transition matrix. ```{r healthIns7} ltcDemo<-transform(ltcDemo, pIA=0, pII=1-pID, pDD=1, pDA=0, pDI=0) ``` Now we build a function that returns the transition during the $t+1$ th year, assuming that the subject has attained year $t$. ```{r healthIns8} possibleStates<-c("A","I","D") getMc4Age<-function(age) { transitionsAtAge<-ltcDemo[ltcDemo$age==age,] myTransMatr<-matrix(0, nrow=3,ncol = 3, dimnames = list(possibleStates, possibleStates)) myTransMatr[1,1]<-transitionsAtAge$pAA[1] myTransMatr[1,2]<-transitionsAtAge$pAI[1] myTransMatr[1,3]<-transitionsAtAge$pAD[1] myTransMatr[2,2]<-transitionsAtAge$pII[1] myTransMatr[2,3]<-transitionsAtAge$pID[1] myTransMatr[3,3]<-1 myMc<-new("markovchain", transitionMatrix = myTransMatr, states = possibleStates, name = paste("Age",age,"transition matrix")) return(myMc) } ``` Cause transitions are not homogeneous across ages, we use a `markovchainList` object to describe the transition probabilities for a guy starting at age 100. ```{r healthIns8-prob} getFullTransitionTable<-function(age){ ageSequence<-seq(from=age, to=120) k=1 myList=list() for ( i in ageSequence) { mc_age_i<-getMc4Age(age = i) myList[[k]]<-mc_age_i k=k+1 } myMarkovChainList<-new("markovchainList", markovchains = myList, name = paste("TransitionsSinceAge", age, sep = "")) return(myMarkovChainList) } transitionsSince100<-getFullTransitionTable(age=100) ``` We can use such transition for simulating ten life trajectories for a guy that begins "active" (A) aged 100: ```{r healthIns9} rmarkovchain(n = 10, object = transitionsSince100, what = "matrix", t0 = "A", include.t0 = TRUE) ``` Lets consider 1000 simulated live trajectories, for a healthy guy aged 80. We can compute the expected time a guy will be disabled starting active at age 80. ```{r healthIns10} transitionsSince80<-getFullTransitionTable(age=80) lifeTrajectories<-rmarkovchain(n=1e3, object=transitionsSince80, what="matrix",t0="A",include.t0=TRUE) temp<-matrix(0,nrow=nrow(lifeTrajectories),ncol = ncol(lifeTrajectories)) temp[lifeTrajectories=="I"]<-1 expected_period_disabled<-mean(rowSums((temp))) expected_period_disabled ``` Assuming that the health insurance will pay a benefit of 12000 per year disabled and that the real interest rate is 0.02, we can compute the lump sum premium at 80. ```{r healthIns11} mean(rowMeans(12000*temp%*%( matrix((1+0.02)^-seq(from=0, to=ncol(temp)-1))))) ``` ## Sociology {#app:sociology} Markov chains have been actively used to model progressions and regressions between social classes. The first study was performed by \cite{glassHall}, while a more recent application can be found in \cite{blandenEtAlii}. The table that follows shows the income quartile of the father when the son was 16 (in 1984) and the income quartile of the son when aged 30 (in 2000) for the 1970 cohort. ```{r blandenEtAlii} data("blanden") mobilityMc <- as(blanden, "markovchain") mobilityMc ``` The underlying transition graph is plotted in Figure \@ref(fig:mobility). ```{r mobility, fig=TRUE, echo=FALSE, fig.align='center', fig.cap="1970 UK cohort mobility data."} plot(mobilityMc, main = '1970 mobility',vertex.label.cex = 2, layout = layout.fruchterman.reingold) ``` The steady state distribution is computed as follows. Since transition across quartiles are shown, the probability function is evenly 0.25. ```{r blandenEtAlii3} round(steadyStates(mobilityMc), 2) ``` ## Genetics and Medicine {#sec:gen} This section contains two examples: the first shows the use of Markov chain models in genetics, the second shows an application of Markov chains in modelling diseases' dynamics. ### Genetics {#sec:genetics} \cite{averyHenderson} discusses the use of Markov chains in model Preprogucacon gene protein bases sequence. The `preproglucacon` dataset in \pkg{markovchain} contains the dataset shown in the package. ```{r preproglucacon1} data("preproglucacon", package = "markovchain") ``` It is possible to model the transition probabilities between bases as shown in the following code. ```{r preproglucacon2} mcProtein <- markovchainFit(preproglucacon$preproglucacon, name = "Preproglucacon MC")$estimate mcProtein ``` ### Medicine {#sec:medicine} Discrete-time Markov chains are also employed to study the progression of chronic diseases. The following example is taken from \cite{craigSendi}. Starting from six month follow-up data, the maximum likelihood estimation of the monthly transition matrix is obtained. This transition matrix aims to describe the monthly progression of CD4-cell counts of HIV infected subjects. ```{r epid1} craigSendiMatr <- matrix(c(682, 33, 25, 154, 64, 47, 19, 19, 43), byrow = T, nrow = 3) hivStates <- c("0-49", "50-74", "75-UP") rownames(craigSendiMatr) <- hivStates colnames(craigSendiMatr) <- hivStates craigSendiTable <- as.table(craigSendiMatr) mcM6 <- as(craigSendiTable, "markovchain") mcM6@name <- "Zero-Six month CD4 cells transition" mcM6 ``` As shown in the paper, the second passage consists in the decomposition of $M_{6}=V \cdot D \cdot V^{-1}$ in order to obtain $M_{1}$ as $M_{1}=V \cdot D^{1/6} \cdot V^{-1}$ . ```{r epid2} eig <- eigen(mcM6@transitionMatrix) D <- diag(eig$values) ``` ```{r epid3} V <- eig$vectors V %*% D %*% solve(V) d <- D ^ (1/6) M <- V %*% d %*% solve(V) mcM1 <- new("markovchain", transitionMatrix = M, states = hivStates) ``` # Discussion, issues and future plans The \pkg{markovchain} package has been designed in order to provide easily handling of DTMC and communication with alternative packages. The package has known several improvements in the recent years: many functions added, porting the software in Rcpp \pkg{Rcpp} package \citep{RcppR} and many methodological improvements that have improved the software reliability. # Acknowledgments {#sec:aknowledgements} The package was selected for Google Summer of Code 2015 support. The authors wish to thank Michael Cole, Tobi Gutman and Mildenberger Thoralf for their suggestions and bug checks. A final thanks also to Dr. Simona C. Minotti and Dr. Mirko Signorelli for their support in drafting this version of the vignettes. \clearpage # References # markovchain/inst/doc/gsoc_2017_additions.pdf0000644000176200001440000031632214503773062020451 0ustar liggesusers%PDF-1.5 % 1 0 obj << /Type /ObjStm /Length 5145 /Filter /FlateDecode /N 96 /First 807 >> stream x\[s8~?o;S[A5SNdfrwdAh[;䑨L~ɲlS2-F4@ Kxt"x"D%Zkd,dQ('"ɲ$ 'yY:[ $[v}y+}x]bUn+r3ze(Ys `;-fǟ˧oK}O0Yz(f )>h9Xl%ke_<)e͝vT*,5›w&e hZ'n$#! {x :>22YmyT6Uc;`KexzQE8s)'JN$( '^0z\0SjFL*W1V yOO$WL$|_Y5a _cPU/yـ M0> w"2ձ+I AB(vzXB6Qٕsok;?3AMdncqO> 9EaqkVe3m)\G}{;ꌂ7qj"|ͻ"@g²$ZPha˿cP)afrɘm|( P+0R=|7Wez"O},)7"Lxd-fG_8]_MW>:-.vfbu.Vǫ\Kƃ,p )աCZ(-DQ4 hq:XGYrJ\P`#]hfrзƼը(RV/gmg$_4t]X,..)A:]z>JoyZEHUJzUtnO*-uK]l;ƏrbKq75-%'egKrzUOt.I#n!v'%9-_h7cC5v߽y8EAm!gY}ԐC <ġi9]}AaZfh̝F09%fВ%чtte"Z2CoO>D?\/Jqe0\q-o4;d<1 =j !Wma ݠm,6QG T#0I9w[i+іWK1?;5"[?RYQz>N??uz {`=Kg]L/\_D^8Y?6}(..Gt(|%gaRkfN0>f*C%jc%Y kT M6YwDW+}SCzַzateR"QPih#=`LaBp9ҷ~ 6!]VK֐HM7gH'Uݧa?'5c5"pm>#{([O#)I,N-V̩Ko*| ^]t5mbY>\/\@\z^z CiOin8`pIP46<k <^0#/@uYϖz8O溿Ct$[/7EQyjn֛"}KRVK֑`nFYo^ll-XL3V}|Lj=hlֶ{>V8>/\(/I*7+즃iy!ƟGW׌(+>] `@L/xdtmEm$upva(, Tqq:HQ[_ eYrES4v* `wS0MɇSӴ{֏E/=0SG`"UMɤ><419]gT1r<Bo#hSq(A9=(n6azg" S+姧ZcD "YBi5_UawmA;1?pEki5,C@zSӍFH3 j u[l6NS*y];EIq}7+H9x;Iu<}7Uo\aQ]a}9P\N؋2:cd7 QnZ\%뾦aXrQX,Xw3^&C@w M~L;N̈́JNy7gښ'y۞X7y]^UO#9>Ҩ{:Z W?>}}eBY 2Axe `/ÎALo_ـ憴csS݌ltF)lȦMjۢA":+r)Q01Bќ5D[7j61"}96Z_ h:'2 ˘s~v(Y(<< ˒|Cv[i/}EQ>cCQ=jTR*ZmÛoC;XPEoK:P0nnt37cj}˙wZ.9ƤE&3KB:Qaj`2r*?ԧ7$".裢f^oEkΉ[mk4?>5ZŶ՟4oњ5-f,EAe XvBF y=dq~^@+H wD~[/ioףoZYz~cc낶hS-7~Y_i;1Uk)$sh/ZIZkI2R"ˈ>xG=fr}j=Bs8OiQ ܗ1z`@ˎ^L OU5.D&{IkNA?阿 &ؚ]w\~ҟg`Doendstream endobj 98 0 obj << /Subtype /XML /Type /Metadata /Length 1593 >> stream GPL Ghostscript 9.20 2023-09-24T10:41:19+02:00 2023-09-24T10:41:19+02:00 LaTeX via pandoc Google Summer of Code 2017 AdditionsVandit Jain endstream endobj 99 0 obj << /Filter /FlateDecode /Length 4637 >> stream x\KƑ `0 Ukck퐴C7ghjͿ(9L(Tefe~Yfog^Y3y8rfjΔ\ܟGلiS3e'Zۚ9Z]V55u#d#WsVT뇅\Vwәhn4/$c\jۤqZwm.ltu?++0?tpm|N(gYX4QF4ZpsP6!9 rJq/VLj&O-XخnXGpS4V\;5qW \xkP流gqm;aRgbaqT\D3TO<2$!"v;_= Bp|yo[L((qo廋yh-4|INsMA6*alA.=ëf ol}Hw6)wAm q}kϓҹx+iY݄ōbvN #(vg t0"zY'T xo26Zʲ oop!U;Nq~ـlff| ]o BLJ&q7Tø!֢̂}Du^ "Ui %4аoUe?>U%F(ƫ&]R5z4.P10\LL>IQt6^!ǻl^hğK-k%/2kL`e_ 䜼A ildQ_8߅m@],Í $K t~z*hԂ :׋)ĺgP:B?D …Џg>i8 Q~jM-Q`DhS#:2hd=6Z'Ypί0R`pKaz^Ό_ȫ@VD+o6$\05\v%,;}!>B5 D,s̺B/I1h D"fd m3i͌!^vJ0R5;g8}; |M- h"dzqVh;a\#&|4s.ڰQPl50dWƊTdj@95`m|O3#`*v53t-6!ԟ}FS|ZǑ6Rrg='!WSP:zpZ_Fh!l7@$ާ gz5RdcǑ^E'FX#ʻ`27gߑ?6\+rj8>vД }qs CrgBJo#L2"D _#rҰ*fK.o28T>9SwQp> TZ#^|e:Bੋ9 WByeun[p^e S1:-"{{Y,O! X"SV\ȧfGOCiė=@ʈ~w"ҁx>5?BȈQcv+̨Ri 1KW[!N&fEgr1k+n SjJ-wЉ"a1;$qO zWA#`1q#3fP⧲lsE[NeB!(*:S8ڠ7Tf-LQl)Qӝ `tSlTNbzr;;ѝ̝R^wz]Cpw[ ^[P9 dZ@O:(UN%𳑣rcOeRcfw* v2w*t Vw*2UB(  NF)ӵT"OF)Pқ]MsX?ّJf{utl/uLD1VӍ"i(Et1H_q>tS;lpΈ?mt~ss;_st5| p?o9:Yp}_ȝ<7'fAOy ` P7c' VZݽa]>ḯ򕾠9M5s/۲.ӫ8N#Q`WR$Z(e<| nui,rH6 .4q,1 ?!%?X{j 1)M:=LCUbtg&BɌ  {.-gu1*SvG;2 b&E5|B"mI@N\Qبߤn>dމN.zV}==ݵ4):t)b 1kv `CAl:c7|OOU^dtˊcBchτPz*n; VO}"Վ'Jm+xS\%wْD<5}7SYm41 5F$TN}H Uw>MFynnȾf09 ZQ1 E{Jz\^.l?a! H Pۋ=Y_*jS\޷i5}OW],2.sb^ cR.}ӻleu=˥_ZboiIuIzs3S0^YTFc!Za4= y}Jq>p0@?\[zN(^uYӴvcb _àܐ:U!!:$^ZFecJbE7exGWBR*(MH9Mma?Ἥb'1l jCq?N͐Xl?zot,۔"\C>$Wz eC[7 a`5_9^48P7,[, 57Dw;0$8Z`g7<%aKG};g֪{5r> stream x[KsS|IibVʇVB#H`DV+a$~_g3OW}#.~=u9]q#afG. Y1Lm|Y#m'u͌>̅wJ/7ჭlTd5۾UzUw;ڛČ?S7bW},r c c\Un_GokM=: *Ι!ń\rspfT_J _G %mŀMiA0Lt8^Fzff601;:|W}mqx%ԲƛZ-$A>8 Y%t|F()kNgl4_?@Өs_Ҭ 4y .EGjƅKv;0:eN|2c@AbN)<`uAWY\>i9~uSUӧRJ˪GKpov- gj r1*G@tF}%11:Gڕ4o!"S!Xm -5 yvVNrMQMVGlյSQΨQ$<1  (h*w40[a'gt'Il9D>@C<\D|C2AB"auo&MO@~´)# W ##?[OKmȵ:!em+P`qYx頕6Q( xdWk QBp kueͿt2䷣7i+MҺۙH)LAf}UPU1/sگN`B^%z_deB5U0nX5e1{]VM  e {x]>ٚ}ŕW1R zc.-@E ]sTb()Hȝ%̲[V$,xm h<; sWZP3d3Y^h3mY'.r?\u|0abf8'p|gB`uL",8b20IXBOyGX%K8M$f&1)B$drR|0)#eqx C6l7 eV5N%PgD-)mx8 b$a,bbN|lmP΄d*P@ܲѼrȴ/T0eĄw ,sqȟҮ'v-ugl=/f-LyCzmAH ws kAƭ02;?6+MgxUk=SVbh5QS H[\YQF^hy_}*JC)?㰡{&a 㵓 ^vJ,αXB3  ]^)cA!}p0W8*2:0MqN eܧwY& i0!LxeFzFN;q! 鱓rz 90"PR4wcb+9=R`\!DoYA$Wyw#>_TGIo%T'B1Qt>tY ʂ=[)Zz qsSUf"4aMZŎYc. 7jp:%N3 PCgx}7~(%|XsKD lb:BܯX1H n5IаC_ 6s*ˀyyN<_8u86\.ݰP-GQD# (rF /!GBDZ_gݦZ̀PwraUYj4IfFPZLEHzu;Z2Ywv]HܞMJPظ87ChUve<τE*` -f灠"UO gŔfd"1$LcK w3Z;J=+5ևXlUG⪖ sb@~a]/ my}lbgKjqaF߲v[4l TtZ n~S돃#p[  j5-QMN#S 56g$X2KlWZW\/ʇss;Fa0sA%4/< e-:JD! GMŦ^67rz<3(`l/䢵 [R?u!Flu֖_.7UC6KK@ŧb>E fS0Z&oW+'2|e$bXg)67SȜ*Ϝ*'Ya5Celh]ӎ,81"ANkͨfh|*:i-[q>Bꏓp]wdgc.T@w5pɤ$x\;셲ۛ.+Tc羰(ntjɇdu?4t5u &;EO߂1^%5oNIMvc羈y-O9aR~ ,K׊$M]60^Rl҈*mf(jz(ro?" 9]$c0@lQgB"El&/V<.B #- l٬I&8hxeB ^\whS꜎'}8o//p|n/f$ 9R\ %Lyb6WϜ[eԅ!yanYNǃ$Oendstream endobj 101 0 obj << /Filter /FlateDecode /Length 5276 >> stream x[K\u*kѐ;~X&ٱG@H#h΃b4adSuoUO")0A`sN0S =S_|s/X;{uwI4jmȋlf$3Z<]ξݜąW==D-B81!EygBDw5&/<-\\D->4ova#H, AGC'&SNL6/ On⁲@@]8/1WԂMYSJ/T-HZ8Te<+ofzl 櫔 I#z[8qXmN==g' TNGRRȱqV)8 Lɛш!B&IFm ɀ 1ǐH /rJ#H b(D `6J =N ڒN" b rarq@Z0H"Ry7кurc=\0f 8ZHfHIEєxJ p{⸢` 9Xp m*8X@C>YXTD )R.T.V׷ ”Ql5 zm l< BPiTyUŒ"j >ZK_:`)3-F^Q+0$bϚ2 A!B@I+ M|GJջו#WH5%X5Z/HKɘU٥!dSFu`y#s#x3)C)x*){"ErJ<3KoE 7S"D'''hG⸖S(%x']V%хhY793uN"sH]%fј] ST o@ E9NdFYgQ@|$EeuOm\w[Ԙ }yJ@]1XpS4kGdۓL=lrlFIvć:F&@_pP70:Wp< '#H1>`y7XJjի͒=`BX=~]>{`fuȹ"hv]AN~>]_έG٫]xw:WXRiޞYE2K!hXixyZˣ}}u󪾊Þw1t{"ڵDWa4@kI^&deӰ;vyX^To T8evy~BGJsR:܊ P]2lFZ6?,}cpެ.Z9 w"Wݯ7Ua^M2]o"Oծ2_T 2Ь2HR. ! =.VK>:3Jۗcǩ7dzEvoI60e( o0TV" ݝ4nIј9dE"qTu@sU8JExZpu /2<.3yM % +P*7dɡu ůYԵ&0{!R!J癳hPY~]QF=Xl4DftO X5S h4찺j 9oQ{145'#c`2ΆF,1~Z69 I'v4Y(p8D}h |{WXv j NJFгBr4.w,sMs*PHBKQ{vMZ^.8}qJМiyO[pb|?XJ^Űٮ/ݗg_ӹϫ=b Υ]Ա9%rKDPX'SbsWsF8W΃Dc"J tƿ#ޭn>¸G]Vd&ENu'=س ffZ/_/?ΡV۳Jlt=ag89/Y?UqtP-hR=wP:錄CEٰj!vRMnXQ*zalk.ndOmc0i#8zxt:Gy7o˂ ?!6hLdn_ofs ZNvwy2HJ7×ۛS =ЀK~~{/ զnzZJqt S:AO - [jNza.[OH%H d)F#T6|/d9di|]4Ơ>"+ur]蓭#Y=V-`.[u@u%Vx]r/'Pswvzo顣rǝ"CR_04G3"#/)鍴h {j JM=>nKߏd#ʽxwTq(@|KǑ aﺧJlܕM/u:Ӣ]Yr y(!*4XoΛW-~I6J%{=5" >QwXw_ baA$H!v;n/U^\qEc=Č!^Ɖ_H*-5G8G#+" ||JG6q엯iI,Vd``{..]10,_AMS9l#!#qs0IgC;ozUfuGו1û,OU4 DUdļj[Ȧ>An"dyqw{>2ű G=Tpư) Z`uN/_֫ xJ䳏 m1N^vʔlozò_`%Y[ItJW-%fYK@b.{Qԥj ߞ E;-)[5~O_8[g(mkOz4?nn͌?MxV /L*e*7GxN`!p߽)(6b)V8 :ݮCEjJj0m/΍z] EX;x &l~QxGm%nњFSn \ܤs-θӧ/; (֏Iq>,ewHKgBJ tj2ӲrlvVIsI;_v)!oˊйYJ֒@M(*験4鯥`G⬌O |-dTPlBv]´$cI)k&r[>j$Ū68u;'Ч7~ 8¡ V?(}păOLWoKME~nʁvBAg0S}lRlYDc-sWM'h3{(uH|K|{eggsm_vDz"`+"7L &l6/Hߴ8i,SAUOޯ =uO4im|^׆o4,\/ @wH2mFOc>T\ӧbr^kF>͈ol4Ng;54y mzp<2?$]nXi}Anź!`TYbҘ^ g8ld-_O%P|qzl-nv(ڭ11E[væû"KRkWFߚR:{/^Z ͪo?WJG^QcNZ˅kNM9g A ۙ'/CНؑJ.1:ulD9!=}ŜF ̙X*(b$pBw8ʻ*8T]^du76c7tress &UG IE yeپ9luH%gIu;^fCVĊ%40L-zfUe=uX]Ά'r1ECa; Y4ByNs |t٨NvFt{p SB~W#JD2S\[.0ZY"5e(]ǻi&@ľilNԤUMhr;-k ;$ lwl %z#endstream endobj 102 0 obj << /Filter /FlateDecode /Length 5523 >> stream x\I$7v÷-7G]NjƂi[CRltȮ5Թz~q 2 -_]ڜB,Ϻ/g.?W?_`uJ홟/ Nv]tͻ`m1(ٯK(]kCҨ9n4MT_Gk#>+f YꛋҲ?];&k9"Z[hVHf K|0u|rgmu MkffqZ$fN12eoMVݍNYLv5ҵFs~lDV{aKfMs-b. (3r<-E@[K7c%)+Qe׬uD_zuV[4u7ż R7RN;I HzF1 D&/ls s LKJ.rLwsAEi::_ǴSBeᕃsds jxK@';5lk. /͹c[ee3ܑ##VI+di˴s4_~nJ ڼ9;j'rr /"ʵ5",59~M BFTEdTP8ɸi0\ZP$ʽEMIN -3,K@)m.w#mX~&nU4`̏˯i笃Пq+vy.# jhUӳeym*D`HãT=U b" d_29E:׼ďh%FZ1 s1o^Ub yXX5YULK`,x<['pOHCUeL!>Mege<i0yjEsZGWK<\W҅g/k3gdp0RL-44G~[Ur '-|JPᮮ 1}ILCOU>xkqSaZq5&+ySflG9`#"C5!x(fBN#y֑`/r{^.as J` +sGHCc[@9)T5':_C.)w+9qgO~L9g}ցWc 1L( !=3CEbp9?ٰݏO3kOʠĝ1Dk@S&ӪrLw1~"LЬB SqmQ q0w5 JdSC$2dN Q8eNFU fmb~gN:@pQ3G*%B.J&CD< &F<Ÿ(59x ;%H3g-wf|w|jɂ$qdL&I[$Ii"IYX=:TiCC3Dّ! xS"S]OxnmMe0K1 R7ClV,+@%z$Uъ\Z7ӳep*>#:TX/@y 8sv`QXoO e&"Sd< ۘSrjLP|q UM03}>+̝O&DH$ä9pr)~E58>0s!^j󰿹)dWKF߽^VrD xpz*.z*W̄|!* Dsm1E&GZ$u͛5liV4qM+nW\~C4/*4bþXv J(~¢oxhsm155Ca{;PT&YMYˎS~把GDߡ(`w@EODfH׼#.0yъTSyEEAR'Vx2v& Mha>+ iL9ZYkU;^:ܵK-+k_ge(&@| ?$ EM/` ,QС(H/;`36*oa~6v&o|MDJ! 5VDcWS&"4ՠcn(XׁxIZE~;~`1 'wEt)g_l"4[Qʶ{]Y#&vEoV7~5|@ߢޯ&{7H/7M z^Nhpo&%) UAY;#$c(^UWK{ҴH Ґɷ#'x0ϒӱ*@F[X}VaH}:tLޑ~U.0Jɧ4֡Oϒ: a C! 05y-su(Tm+i#K|G⎹eI҃u)$u'#Y<~^wwc=hdLLp7Vjb? t>^:1e/2! fd_hK^ma~T 6I#x[|rEkG_QtrEy6vSČ2^e( 1AjVRHU5vD+3g[|C$ѐQbנ=m \͜6!0G8f=ٖ<ܤU@-|&R($78ț{@4"0+]a |K0%8|fT% Ãlx&\6{ַtA z|e}8 bc †S I 0kܒ,!>0&00u5Pt=05Lp8 ikvl)nzϟx&]q ,ߏM}& .$<%R&I<Z) }T([}A)5Zo$)/B*Zd4Quӵ@Lq8XJ& "WFyt苂oD yR@jV0B+C.&vXsH,h!m3l5ftRph:P9ĵ!}o>a>|f\X׀% >ve'՝x#92]yLbv;EjG'<ĺ hߗǴ/YEY0zr:TBWJ]?dFir>G[(AȒ)D8ciuWgdYSYoŎ. xEdx Z.[ Z&q}j-\)#D Q൉tEMu7=PxJ,NH~g2,[eL)F!(hCzz324 ^Z3 ^},޻#, TmMVe>pblb1v];>4@N㿪jeå֊﫧]OS=_G/- l*;t=,z!)%ЁA%`8\$9Wv7 yu2X3oe{{wdl/-PLZ}y({_1_ӥzw] r(5=||o&Vxjdtn8f2Y'z'tAKV429:M.`̻nogj"3؜ Tk&JAyl W^k5ȄhpƱ?,V(vӤй-TPZ08FF,)n9Ϩ瓄BVh5,el~-e4Ĝbt mܰ.uuκ5]\>o%oV{_Ds7#G9|K[|qM;Af_}k-J*tT`z`SXZ A R)YZ5Q~SYV0tlh >U7DaV@1VʊV|GcM(_z$.]O&DַvٴYSbֈX>|t&%W}N4K1-Ό[[`0 'R4*xMNV`|St*wnW'媉Zy#`cpeM *Q_Hs|p.t ]wC ^:=<b~B"xxm헧XƇ>]4NE,6+2=/lo Ɏd@R~nJlV͹-^[% ÝwXÂxM;ʈh 5:{v1}636̠ @vR|(& &MKr:1mmNj2o}H/AWS=tXb{CKoV!YHpL2<Ѭo1JKBPcCS+} ]LK{܆'1=y0mУ0w(1pࡷe-_SfYRS^+B} +߼ o*Xhr}""K >wW|8\`a9~sT".o7>zLX3do^ƴgE:f  mmf wM4{u!eg.* ~weN=GUV F]=$,$/+JmG]1{> stream x]sq3NcSx \v,ǔīԒ$W~'򿧻TJwn`~'{>_3Nw'p*\ܜGĊuUK;XenUk͎~z.VpgW5l=H lO>_UdSYWyf@.5}GfCm U% JA`K?.w T\%;ht{ ]cZGML)p(ǢV BL]s`}śaՑ- UӰO4C|]K\=]dl7,lꙊGW[Vɸ|c.6\}H|旖Zc3&M8.:~&ds(qʳM5!-owgDsq;Uty &8b,xqܿ89]Bxڰ{jME.ĿZ%FwC۠8R/|\U3@6̺9'ol3E#;x!$}!R}ۮlܢ&Ny>]0Ny.g'"{.πN2Q2/xsL =㽎ꟇJDtd9^VV5@הD=D>CAl}Z9{7[iGMcY9zȔ~ty:(KsRB)B^4m:6*2)j\f. $Olڒ ! Ot #tNpHMX|(1pK! (%6 bInBAg򈱐@acs!5I7 ޤT#KrUi$]qLMJI 4S3e,d@3jeEɴcܤBYr6)^+JfDz#HEcCU/T5EiMO7;LֱٮI0`} r͖ܖ muP>$E=޲}.Ųז˩&9¾_v5F?][&2>,gM{J)~X~(+_!;o Ad,ɕ<%N kڜy{€?w扺bKu9$9[qSPUm:~S\:>4*FAcr^lJ>e-^Sc "I υH'sCc(al4 ~Jer{JYit?MͥaG^(%Ri^$00 ˙$$ O& ]}s&=i3`U D= sWMV '\@V c&^xZЧ*z E 9su'*UNA!yP!j/ fv N ҒȎEOra&5~0 T޲__\A>u樟:Fz>` 8Ⰲf؋|iţ`#-sKkNf]V;SDPR0Z79>DV81E׸詯~>ƘT)/ƙ8tCp03LWU??!,c\$`_=$ <-Ͽ\PQc]9PUR ERIZ.FN s$I5P#JzI)oq$~4Gf$\2TWtRPz(2p{X(2NL JR(B PNj1J짂X$u~ ZYꤒ$C줒(=4"xe1N,&{}Ԥ,cr%)a+tq<TՇ#ERG+`RZ~bafDǵ޹²nng,|SÌEO#FJ| Ff*?-2'YK7ch)aJ%m?}KnQSPļ(4|P#H04%ǰB(\h\{:q  eJZ)|eK_aX{R+9 (k-•Q}I( dcMA/jQ=DJ%O?)uJzԋJJ8ޡ%t_p_;1hH2fdW4`2\3^*zLVp^)UłB/,9L/}%wlz-e2:L9xq(t7_}^fw9ſ*Pg$<؋qwԾg͎l]EX}yt\η.QjS_WĂ?wߒ=˥" "en݁"8^GPFP߂ne[No2qY8BfM^WjkGABTM$y )>|%p7ڮ ݮcKUsnA@1UZLӈZ= 8ɮӞ}677%)ԣދ(" QD3`qԑ +g \lmRLH8 @y9AEY`ؙfPFtJp5m{"9ougE sI'Q HRhWjljGtAFFAx5fyzرW`qv ㄥߑ* 꺚]-WCv̸_NxuٲQi`/m]Yt*uO塽+T B%@'Quk/6P7vЮMD-IڸW\O JcEd=! hl:xKp.b=lDXK@ [i`jLԈ542mWa Kngž,a_ZrS4ZōY&|wɾ?^p{*KQj,1sK+%SV,N4&T:3S_ Q7V tW<A:?Z ΦGmz+θS4 /;=pela)'hi#UW* e([t|RjM )}&K<цFC}TU񴗍W'= b#l4#Ώw؛Sr$ \<]ũHf 99|VO'; \'M C<ȺϩƂ*~ [&xevЦXY(q35[0 JJR ֞~({hP>?w>i[pW PMR2fs#As[ɜSohnD4GF}Ω4)h%$mN07gsrsv I#zvA;1Hڜt/k@ώ!iD:z@R0B(F$pFTFPQcHCIJq$ա$%TCҀ ,E҈lLEXDu0FTB&iD!)!TiD!Q0X`BxT2I;J%G% n9{lvP5JvT ˋQbj  <8Q?ፆAEDT! CTc#ґEzu p(_s/߇4 }#mت$.ffhkGIRwtıx:C^7B^Z2lm=`Ko$NDe5 Z9=7\Ҁ;bZeXP',]XVSfNnZ0W=N/Ɩ;L >Ϯ^.71grMŦ9o!4,md؞qRzMp^",K SkDyfqK 8%p|gAP)Jۏݸo> _h-aѓ>/ëR݅9izX;HVr|7](9AsڲwW8 .$2?ucsd%4ӾߵHM:`]g*2|f< s $a2ә6pvhOѲN"4\ M N>Eek'lI_~6Tyh.7I3n&\9:[r1Aя6(om7bgokR j@P.g:=0bp?oCO"DͶQ=0zᨚVjU9\ggfyC' gC!F:!dTTr3_>,&CEnt[ҷJX@7J_kI2C1Q)=Z \z>MⷨvH h_N戡 of=x}\{9Tj=»r2i0-ui.G2! 6@ŏQ6Ƈlpy7jWyd_E/<\AQށ4R=c\Ӏ'"x :T摦o|޷p靏'U /N! y'xC9W?k :îGin耸|WP. AE#YlWߝendstream endobj 104 0 obj << /Filter /FlateDecode /Length 4347 >> stream x\Ks# NUn %L~?!);r.{KI+F%ѦHV1M ÒnFh|6PߟZ3` u-H#\e*Y) Cg7gtr{ &D򉫜zrqsC53~^rO TIE2?m?2Z<6X1;J;)NJvqRӼezbJUږ.I&(/s:#xiuVr:{nt)Erk*m)U9F3Q$-!+efLihXHq}F7:*%D;Y1).+y9O]c6f1cE g^f}.YWw`3 hC3?4e[^1X3TQ[i& N}bx;+QqXe>})iz3dMΪU FհJs !k # 96W)[ zI,ܬBT6^VMPq s7'ǐw_ \-w>%Pa(ĸ,ḁ6.(XW"){"O+lp+ f FP $ CBꇇջ8Ynwݮ]|/VA0$\ě|:3UFy;Jvo~daRo+P R+:#]6lPC.yGjĢGhCn6$"'90^)8ƐM~ ]Ђ1(:Srtr;fg>N1pBT=>u0h|\FZM)^=E[˄!hfa 5lpJ?da`gjI8Lq.l#u:j*FW*2kl|UᮖMoO~ǜeV L{OQ`ߐֹ\#Qk>H"?}/9+Z]I|8}=K;]3%@Y}+p'|%EDd{3Xſ.*#',>U,uL]LOJ d> .>2tTʍw!"h px!w|iy#UHNhHweѬ&CoWC rNyHD f@racΥ6'Ě<-WHْ>t^O9,RE`b&.͑>,_u{I, 3H<΀ԯIQMP!(Ix\!_H+8%Ӈ|p5mFH }6!HuR^5J# yd#d 9=EPAxl Sa*}N6Qc?S2A-DHItLAyc<}| 7Ӱ~ _-cc*ߗ8F s*o Rck]q}0 ߕ<{,m<(N=P^eA`Oَ,QPdpx&ձ@8ձ$^m@0FAzd%o< .ۯr=ޣpmǯʵV "*>Wlnomfi+$ӂ3\򝞔ڟ'Crh Ys.H<ax0aI0j6SA}A@L9呀gfJ {/3F.ynG .#A:@'J.~BfsTuޜ(FX[)b5@Wv$7:T2_HBuZd Ĵ* D0 g˼dloE*4 +6/J+UUUS3.MdѡKst|,MZTsI5X~yBfS V*a pf,#׏e <Ŷ֧Pi_,x"quv&;R_ 5)*s*5_,5[\ R s? -)nϫ dXz#ܖ&38sNW%I(<G4TQ@H*릸KIX6KM.;[]9Yzݳ=h2!d&h=Y#7~FwJ7Y lwȃ0.xM6Wa,_G$U^`xUkBc!>Bx{*bs)v;zzDd?9RoC2>o}JG/'8:<Ω2$Ş \4(<ԒC;5<}Jw* m5>+y<T}x9_Io<^v,#>g?RDϞ6!}I."PNnw{rF]\E Oae"K c=R8z *IvF1xWwuV~< qebU5][)JOȲd)*5yAJ˸JRuVA˪Do >Tz_[gOK} JݵP\JpT(=8Ҷ SYؗoz|aqt#})땧o5VH9,zG)rQ 8B^ۚ0* ̃W QPk@t ׾`A]ct U': XV*f%-Pp*ZFo6_-:6g-Q<U wV@q% #JoL&E*}U<)e}Lv3A(&$"Do:3mLpe۔4l6$aQ@*ec1Apmʘ'xH)Y vn$ exV}G PY'ży N! m7 ܉`^&G4b ȿ=]ľ、z+@sи)\з?>9?Xm# vW&(z/CTH=CM̒0Wg9oK6d.{-6V)C-CmBn!hV}ƷX=cE+qAZe f*y:}! g ) f{!3Icd2*#?vc5` ?.$q1bavt囶yy17t(񃊊)ɢAsqV:miHxOBeCNC.̄.t=:ؖ:@ǔ8 `Mϙ q; m<6!uC8f(ja0"Ԣ~MXZmbۛyi;|[VmAhZi\NTO-!|άaWmK.L"3}=^vʃK0fWL@۔$zzS0K εI@/G~ߡS1']˲?LUrXo.L?5\_iKMw|&Zix`FwCGҧ G[+]AkĬՅA7̵4`E-/( Qig3|T:8.*s iJ@ ޫQ6¾o@uT*=MXF$y@aba#;pWF;:ᑜՀ贀H\yDz{lK0񅕜ј4e{`AB ?& DgsN9qӰtNyTp:g( NۚB!B# R8:SqH):G0~'"'>8ɜI qВ8<'sIEړ~{ :2YБQ16څ>ݠ?nendstream endobj 105 0 obj << /Filter /FlateDecode /Length 5523 >> stream x\s7߳;=2in7lW2[ٝD[*;ED[$~&%YR&'I׊IGW'II78DI|5 #6Xi'goNL1qrDvMV'/?qzjnCK+=T1"=  zOt^Mvr/bR6裏kYs<:Q5Snfu}Xk f5Q˿f&mm5f7-2:uj}7Uk6=r7eUs>UL򪹜5gy.4 C'HX.٨B:!d j1VF!].mg) nv/ o:}a9 BQf6;I܈ 9cjys>/G햛uIa[f{O$o6n1h֝h]v W~[ٮn`(CMZ*G]IC; F䇅z\O8_b40^DxiY+(L+IVc io􈈄K#r(Ʒa|0 r"dz¶DVY<)>_$! ċ4MMSMy)BZv![r3lgHn9TK0Z0e&5jl=JKS8XRBThQ2SF{xU6~az:^y=Q;zH7l1uFA@: qJl%_iηڅ%XWNT[:%&01A =94FY-k5e"fx5 Wo3>4eU-I%'Pl[ҝhM陭 ;ao$W:424%M={r/B}44EW u(I3trwI#V|e9$ Q-av>_\S朇 kh| w<2> W6yjDO`xح.M\&Ll?_E& "=j2N*QN=|aspT4PQӺhtRRt&_X`)m2 f_NZϖ[Ε5pʖ]]E.4Z*j"fo:ƃ,@[NPNc{qbCǩ@f5%TMO7_TRb^|9a^vFcʀ07= uiJ@Ӯ*|dƣ 辌rqƗg( _ ` :bڍ1OOপ\0L2m1E=4[_|ҮYHB# U{]j^r;MԦtۣHFeju>S#/iþȲ2*$ hjYV*9F`C:LEvͰrDŢK5S%ǖT*dsI VܦEޤMSSd*?ܠIrݠrx!?D.Jژ'ZXyk6|!+j 8.32j&7Au\KT:~^^-w+T|jmo2ԶUX> +oT5KWYEnA٧gm@K>@]jJW)]󝺷F$c'ʡ7>q"2t5U=YbGA]9p?p09K=މܸ+1TIzHif[o (6BtPKPK{7p.{wINb\?QGEPp>^-Sg٭ךwo)>Q@Oh9n~Lu>.=Ɏc7::{p#' jICkŲ{#8E#M00: >HڱBۖ·H$`:^"?\G#Ct;G`CuEtZrwo;f^ 9*:FZ1%n?>0{l MqQl30jn5; nn?q_ *v<6wy̽"oG]hy,o͙K`ExcL@[hC=橿<:8-zPt\8ǁrFR匾/qt/ lΊf\EiIJHLh.kRNO9.=v:}L r(q[TKJE])E|yKVLךdu9'e**\i-!e!7@%ʛD> Dgl I\r.h+j{vޫ 46ziXT\7Ɋ[̗,%W șA,Ӿm|ֻ~P}*-*IUc{EGҩ;&"7.]UΈ^އdUG.ebFo>wƳVYHraj5,. 7#>S|xepx`O75/6F} WrH%K1}?N,endstream endobj 106 0 obj << /Filter /FlateDecode /Length 4769 >> stream x]sƕ=䰷)jgSWئ#SL@@pi>$̌dnj}t0EU:_nRxRWWNВ.&43RO8x$ՅT,4.4 V“D%p&Zΰe=ݯ'} 9B "&gN,(7[`Ǽ?عC, Ed*m4.,*h)0\iɽ%X9?_ې#jAR* LH*!uo_)r"yYX$oшh3mMi}ݠm3_4LѦvDJ \Da$r=mͬ`D19g(e8"<80vZx="f 4ʃ e=a$t wayf__d~Oa[rhx//_ aD? _܍ѷb^jf1['cAB_AaMu(K)K/ܛ}pi*nK~SgV) aTō(p ow\6^Uw9b<d o#`X8?Lg嶛 _nJ$_DOcg0!;yG R$!U$U7`LcJ ͘_<.3ԅ\ds{4΀( }3n ?p`JURc } n DY*1=Ĵf{/ U庅#`"+?BUD$DӽƸ; FxR<h%$v{p@Uq{V[wD)k('?3 B3!5,⳦o\[W&b'ޟ0@fhKXnfA#'Nom껃S3xXVS g+/Y"DUσA+3b a(8TI8<P@5 R;,C`E5.P40`8!`x>HiRH%] &UF% {&ol`ibo{ΐdw=?i_ˌq )?0E <`}JMS{(kUߦFw Pq&ωy*V𝍿DA } 1mRaʤS%Ci2Ct%-x|0Gt$4$92&MCW9`!d/nXc^虜(VN=8O=$0vG'q0m'ﯦ鉗4ttFr11ͬg,vsD!qyDü̈́3 bX/0 QVA#wLDz-^%X{ȢPp赜cf J LcHHTiׄdNu;Eٟ }hrR;\ 3 )~VMq*L2y=&3SE&r9zLR&q\p!`:ܦ"a@Ō1X3;\[z0_oes;4&}=Xv`+$ك L kӆQ4U6 vM"9 e Goܽ,JJ"*0+aU߻2'02bk@4pIשn%ŧҍk$8S(&2{)wnsƟ LP T2}EUbjّ֯% Uyך&9ڶ{0h!LCanj2'"oiNnWj5]oLE\-6f싃 L;es*`1L_L9zgŽnL4g|n_5C4e2DJjJjwOWyb&p[뱟|~ $>qd"tƼrYReip#1"N7h^]/ P d㤪#4pbQ(ƤGLɍٙBFmgT`ЎŨ)1~mD,wp)Wse0#|O`v*NK! i={[|#~DA<1+?G4,IKq}YRQ?cR߫YoWêj^\_ȃm%('B>47{~;tnCOL c.v|n+an"igig( ,|e,'Hnfzy}rT$Ρ˵@!= )4BMٹt{r;_4UVD|R+v;M砣~$7 ӦЈQno!ݢ6+h)%Z4µ@-޳i6|f$f7/ Zmʰ zcJ]'˜DzS6)j+RV6;XfS'ف*Sf@!R؞ڦKҘR ⪕mS;llz%C׮0 @ W&LpweB&^m]`m=nSl7s>A_zJ;/ۤ2DA-7}$na5'F9TB{SCA5&=c+Y` 6inN.'`'K%6-{Xoa>U=/k)zqߠ><~Ўl|)ע!z(5mK6N)~zla67Jls{9~Kٝv p}[IF/Tj]?ƻus#xcXiK#_}Chf+&^5mo Ha0CDPo lK 602P [.A/nG\kD{pLhBNlDLrd|(CݢȮi{^" `o;JB˞!ppEWaEvKz9UvonN=ZӹS| eoҘLj %}}8վ7t$&}06zENE5g͟F.m]ﶏaUbljC|۬w .m:onnM#ƧnBkkз7:?f\L@gmY^Tk7sz]MmD {~V#9J4č~B/'q{}Q*ތA?n&(b{o9zڽB,}H?ޗ5~Re_[o:$6=6;`'o eu-R=endstream endobj 107 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 2530 >> stream xVkTSW!! j(&Ъu@iqjo(q Z#w  $;@ E+0Te\hN˶֮=Ikf.bk;Ys׺޳>!p x,B(i1xa2m)j(3q~ǼϷYmŒ yM}ZUS&SQ}.Q2jZ%ؓTuF㍩~ov`5!"Ik.JB%bL׆B*`O Imȏ'Y_{{}ⷴ- {iG#ed+1eX9[1 w]3`rmԅ,$-S 2?h$'SXγWI;mݼG?]`aI O= V.)zgAVP8ukZWR{ ntׂ䬢[thY\8[l?;?ɕGjѣr\a/}(YEґhmJNH(H] FUw+Kj2r-"]i˸M߆ ["~ϼ;=gR* ٜڗ4#' r0T?!X-4!k=0X0P,Z3"9Gk],>?&赈T15ɯHb =E86xNSTͨYEF)fsc%L*Gw7׋{4#9Sߕ0<51ƀ,GHnNjA $dD<ά#ww+"{Yx,g %$c8 UdmGcovɁ*Xj`7E  kͺZ Bq6`W5!%Yu6Cȟvt{}[EBK-0q ,aotE7~]ts|7Z-VtIlB>ɉ7Q;X3~-Ei/nΩJߕnݵh{5OY'soE0ӓ5&L Ps)U^_߻ <)HB >-%dz3Jҽ㬃Sy՚3 ۶W:]$-btI%HLcїz{Nx>{2 dDrt#ŏjw$9 }·n&m/җY;ܟY›?b{ UaDoɍ&zrwS?y1َlZzo MXE)X)086ˬkEUKb>^ :k~8n;2L܅a1*sG}r`KÖ8ox (ek݃}j^1l, ̑F; p FkYo GJ. үsmmNso1M&jr~ܧab#sP:NòazLʴLB^48>vFi,wK|B4_V4%j? 9 9w1玜ƞ^m+>Xq]QX0[g3kn[Aqcb[Ufujpf!r)\zťPa4ˍ.OĿĒendstream endobj 108 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 2011 >> stream xmU{TwL2MAęس["bzG(Тh#(ZDy&$@%.Dޘ^CX/|Zq:;=sf;~~!Rejb iL1].b!p1{cm5G=u:E-Nyz2-'#I&ϔx{^H$1qJ*9I{zI”>$yG&cR%DIxB$b ֈmۗzo PeceI;zφ¶bX[cb`l-掽b$osĊ΂!øpsǻN>N=D:]< uLg1lF0-}!bZ^Z7iA z]A.a扻6VP? 2xLy c)EU<Ѓnaj$dP nxi`F1+BɅw3Wŀ+&P܄y@vAu(GО٥2%2HXW!ʞȧgQU|k8#|KQ0']+(\f! jOBb8Z֙f\joDH`J1PEGZ0Z0e S2<|.Su-KnľOVo_jRfW|(ɡ9O<Jj63e<ƣ}Q ċ47 Vz0(D|B݄<Z 1ALnDrll䝯N'9z{6KQ2!X[;Aс~KNR_L· ygy]paGU s6}?;3x>ރ4Ŀȓ3{qmY۪R [2ӵ~2rxx\0PP$1Vz;.Ui`B8/~yWt^ ևD+Z{<(\.%pnw-l$H ȸדgU)lm [;"^3m51M?y %s=: Waҍ<ֽѿsg1z!{#,E' G8"i#W*v-Jv@S7}ssơ'`~r|p16q2\ۆro0T0$r#c2E.R^MA=54$?/7GdT9G;K|4轖K[;] r5Vdi82@p.4rҒ|mM*6rbiيԂ, VOv h+QcI#lfxoC }TSC5 ).O]{{ՍlAgO8yq\>3 gx318Ct$PYU,uZzLͺeִe:ԍ毌^Oޯ ٲF%h0k3.D{0Bapޗz%z|Bc:43nJ+}^b>yB[] iƹy/jQ $TVi$-M[ܶ7)S8͈8}$JMN4톏!PNR ˎT0<Œ3:1ll!$[lF0+d~3胱Lz)lWvw)ddRպ^ۡ\Cv% x0 [ .学R6SM*-cĸ=?b6mEE ʯ[~Zk3AE #<}15'՞mULGYo;5#+t EsYN*Vd؀F9s9Jy۸0`0+ JS< ycendstream endobj 109 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 4012 >> stream xX T׶TDCV7*"C&1((8Dd&>4ʬL2I@ jF5!/qL^L%_B^޿$1oս眽޷d F& _::R5u-ҥL9H%z3|Q1+bmF6VGvx9LkG0rkԂ蘤ذi=<$e7ãڨ-e'߈N㣣A[C`E}ՋW\;a'aܢ,ܾ(+nǒe ĠIozoٺ2'$tu_xDìd|*ƕY͌c|7ƏYL`2Ɲ `3YLf1^',a2әe Efͼ2i+&.4mP|ЍC/Lmq`ĥFm5XrkC9pB %x  8HGְKo[t܄=Go c|~xU\?;߃2 &QD)?d1Ğ8tzTyGMr(=goB3xcʣ?lOJeR,q:L>D1,IDeyf4H5dQOe0Ж <,yۏZ60m#YhL/jX(ry |[ԾNd/L\I0 d Y!*Rւ&D7uќ桍?B(L8$)c[cO~zS{ `FS}6B|x{ >QEsi!EcGHyIDÉ;[E#VMEF( 14pɶo\D)oU;3C0erkVikgoZJ#y&c-mSMG^+J>ۀ[q?v|v@Ƞ{44[bGcqfhN5;OzbRБ s|[O4w(~( 2cV1/M.`z*SetfEYBtX+l-*-"J9^-Ye[^ +dֳMh;PK4xS>NžԌvՙO 4* ҏ/fTS`>ʾ=}ۿ'EpWh!Np`.aQt 3&|G5^n\]T)u)"XXޚ+-TkAP50ZlK#"VX Z0~6X ;n:}Mςw-4m9JL+}@/M5$,XgQ}:ܽfA־ǹdaRC IGsI3"p7V8vxS5W+>q/r+^ek}FlVVaEA l/5S۔TPRQX0B%pNmz%D\O5>A/MQU*⏃3_d>< W M{"A rT| pSQhUJU3&a k N d5UX=󝱁B-=}R@>1@QNE*tN3@ڶpMX,I_Fy'lL 簕}Zef++z %RAҳ VGlؘ!uO5')ַUq~6JtO2v.NMdQ_}0ӀнFx" a}Z!9;;Ӑ-y%ES޽Y){Rt½>H ]=5CH,# !!&.CRYb4sK]qt{nfz~youPaU%&.q&Zw}\|YUy*ļГȷNp}]s< 2gJh w{)HusiIl>TڂJ1ӔLz'M񬖖k,W?RIDRX& M8vl>fwnߝwׁ,U&5i5A$1A[ ڢ sjgbR(MMp.c > |Y["l]!6CV&?6炜⃭/BԒ.|q[TsJv1;+{$sCG։@! ;p= TQE ]E~a*q r#(UBqdU9rmt~ad9*ąZj&Ťq6YhK=8=TMN4d$uhXWvfg" M7a;Ʉfݗ 5,ox:^?/8O2V ;e%.˸PIڧ)%O!n>\T APa_`ڐc)/mccNca~p0?R]endstream endobj 110 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 8592 >> stream xzTT^r,`h5vEAH& eaл8 ]ԨF$ؒX9^ {_k1soރ2A 5km},Nqq2kzÅX=x8iA!1:3l}{ j\PFf%A^!ӦN>i9%bdUή^~;-VM^;b8ybF-vZ,[o;nn>!%dI/Y2lU5kw\fcv>g͞3W0o/3`q' 8|$d)NS,)Ϙ9p`$NQ#)kjMmޣl)[j KQMxj35B-&R[%$jLm>P˨rʒZAMVRөU j55ZC͢R9)eFSCJj(eL hʋIYPTʗKQR(?@YQ){ʄbʁDTd2"7;z<nYO1' ]=Kp&^+{de}*.[ovEz1e"KĆ 5(CN|hWMW&4C gЬaNÎ-7/-+ib=hG4HV#mF9fFG{o3|Lͨ_{`}gAzdqJ.[ft m NKѠ4%Gt\ V+-''93t],l֡ϑ1Mz4Q˛u&o6Sv_|Ĉ-pv Zimt]4Χ3Ԩ&AzĒ9qߏf[7p%bUCo\ 'ߒ =8żoFEx_gGK ,`}`$LKEڼ̙y B owŸ ݦT\ ȌG̵CUZ ^}1xGD{xCDھ4c!*C5=Mmӡ :NnA <9d1^Dмŏg޳$:T$)YnEhTl,F'G-)y~MO>UN=V8Esf%U"A80*9N&MJWR)r91=:JJwCׇ()[ow+bU[0CY[+AYok7q]P-I@HI"~DIqr<);uAb8/6gW p-o|zZ9U|60Z$!6S JCRmgE:Ry4F-uls#8E2%H`3z7$fTEEZ ¢G%:裟~Nx!pT>u`DX)\axh}Gd50 oT5-JH?!Af12zgˏ>>6v}z $x<gW^,iװ7=+d 'qӷ5ݑ)@éo37S E|OG$)C`̇Wf@'p6Bղ\H0/oBLW܇)\4#huR.+3S)eYPHT2 azFdHe+naX P~1{zVM;R<Î>ο_. v`u׾T ñK\W$ zAaݶ3Pj(L6ZF 9)f*49J5W4@1t䬘TYozLd6''2:}r9bI0 NKՙ)R[湠1}mVxR[KP"A[ ךTDWod]# 6BAUM\ͥMmhnPسk!8 =5C+UFUz:P ؀a!}P[)3tЈA=J\;!.'Elw6LЛ*SV# RR +}(+5S1ɾ@M96kQ5<&IvقH!??XŰ$Ӕ" UZ\ ,6d{'SZ*fB5C@[UsƘeHs(VAz&dr }EMlCjbuq/h}d]SZ:躋!M[FJ%dk?0]vQ\O< BC ~QG.̭Ec.Lvcex8?%:+N!}/إ~"vjiG .Cw‚ 7݅픳hyGI=Gzen2<)bJ3,DZyVr z(7=:ND;fAC7/[ $ VOBSQqRseZ/,Ԭ*DYԿ-*J_*ؿ 9^ )m]=5TcNE#?zƤg&#%):Z3S7tC)C$W#f%BzOfv&@$oxN E$}㥐 I$=M{i>ͿD"=k!1G@R^Q_\ƽKz@پ6ei7ec9#h h 뗒уTt݅9T5=0bax.҂R&hf_NJV w8s,8;kBN^CG.L'rxO>z2K^lDPoF7o6UnuDԜ QjJ77*yzRIBۦRt($)2sc K9SbPz J wf,PU$eiNFi [lt5>zDnM|K2/T'΂li2!N}_fV :} BXhVk2ySP>*B͒#>->N.d]16h7rO86z<"U%5 g%O ֎lp`GeEW3WPyui-:EX l-jV1RO$- m4Ym5ݺOi-4;zi #:>Vk6{4~,7޻O/ r{戠r,u{>}_}! ୔6Hx!͞dʈ>kCS FGqEHWP@iŕ:+7dfH/ѼJ˟^Fk. y9Z'+.ƃfkw!S3nxC,{F;:D26riFZ8{9 d˛cHܗX^ v1K.0&nee2+omi;]N4)5^=^sC%xĦ|wV1/D$73 gN @.?8*bg}yc> " ]ڤPC[[EPl4 K 0 Gr>F+8J^ĕ y $;0%[]A,]yx;l@CռiDgĻC(,ZnT'טr%+Rs`1O\Zu>)ѱhPwuMi[uZ*RVK/um>|״{)pwMLǽtȆ/G "S$i!2]G:{{2艐R tm~326AVv gVvEc| [(e\ӛlnU oĎ, S$$3 HzӼ]l{e5+B+mL!GL~r"SӴp\M4s@]sYKq=ڂ} -Ax(1KJ؏ĒLRan_׀o,?!cԭꗏ Z=W[`a'H>Y.@Kn&rɹ"&H Bd^IODn\EOCwa gG‘,5gI){p"l2 3:AɒHqRHb/:??q Z'M3jrڡɞˎ72#^ Џmvn'{ W߸y W![-(8>0G@4(:Ë _'/b dD>u|'YAg+ C hV\#No.J#IV::ZdZIl}z^d.\i 8erp伔/]Gä'$I,ĽVshϺ8a 8/Dp x 2FMZkdq1ۂ{b! qߏq `U0i)tN7z* {gZOJl{cij͔DO]oqZ|[U]jإq&yџ>DYwJX')\$t cHBf!WK2yvaU5 y. ExF FdlϼB*]&\)y>4ȅdMb2(Ke= pͨG½9ăe`_'>;b0z xj8Q(!Vk跼C7Q4DCQjP5w#aB^+q^' il4PmxWEk(d}^ Y 4:(C+^n2y(u i{<nMٳnggJqq H"ɕfgdei8ݯSUV]a 6Cn46!nN/v0aqdeI Xbٸм{U﫯ނ̜1KWh;a.i [h'$0u~юSDW\'!=J:}oGs1vx}e샊fV\a]p t纠{A T/ R=;cE=OhC[ȴaZMag@⇦.;Ĩܗ4˔Ȋk)R%lO NRo _ ۃr% )c !JO֤pgSۑY [w ݟ0EJF=flnWg [b6xdW? LnMOn=w/PV4WBi/oMۦ331_YzlcGzюcQp"G W@<2^Ș~vʉj%{_vVu7hF>=OoJm=]\GרISi4Դ>})NJendstream endobj 111 0 obj << /Filter /FlateDecode /Length 596 >> stream x]=n@{7}?6 lc7.I.@KC)>3c9E!0 |߾?<,[]_͗ecnew8veݛ۰O߆ux͟?OϏal۰ݩi-|xJZZX*PǪ>UAYN* {> stream xYX 1b5H@QTme.eQWFcLQ&&Kg]Z^`ܽ93+tGP"hMys=B}]e"aM4)JMjٛA:@< XGDFʼ<g8 qu \Vf.d}hXku.ݶ|񱧝&~K[:yWζ;u=|)j2zB}LMi=5rfPw͔#2RRs('j 5ZKKYPu|j=RV"jP6{-21CPDMFR j4C̨vʐb)g JBMeRIXJEN9MGZY3z-}C1czFyMQFn\=ut،lx>\l4(I^M4Zk'%8Xe"5lcrDÉML4fGo~=C~:#!忕[f6)mTZb-HEDw%%2~M{MW5p[oy#_Tſ+PW ҟzУtP{\(lP41[XT3e&T=.C+XH]`-Ͷ{-kVk%)Yi bT4} Rmzݱscp8!?FT7ͤл%n>3) i7u6|->(q"~fJ%4t/.֝;ar6fW{ݍ?n5#+ "`do0O A\;˃3o0ykՋ;QYmD5< eXwˎ- |`}˔njuLiR!F?9׋c_f!T}-*7Ā-&ZkvS1kxkw//4J#T{#-ؘnu}С9),&$ 1fuWzr> i&t.ըEsnMv;Sgc~&y>NGO7a@ sZlխn;Rk*Ex#qv߅Ò^VTX8gk n(sߋvaLG}XOyq}#5LՖ 4;bo~a?,` tT#v-YeCOלGЃ}{.WG1XG)_y9Epex#bj*y_*DbɀT'7;7LȭaLϔP\`Kj}rוHs֕LSG_es|Pi!8MԺCyK39:a,=o}ZVuh$k+Ptp=g4/IRXcJ4!YFQCk}+ T1_@*Te% f:C",/}Dr"mm$yY7=h/7٦ op3?ލ?O.Z]2RF=qlwF:mM0Bes]q9 h Ehq>+QHR9|c(jQ;tE֯aR[V!]5R"|^0sGF?/Aȸգ m}g =lr`a1Lԣj4ˤӚbO^Cӓwa.-ɤimAk7'$;2> Oeuu#sJP B9uWWV;,J3*&,B ׄ$|ŷ7}윊j[ *P(j1]xV4a-Yo?ɱfu2Ll#IɰQ(i۾킩9uͰ8'd oba!yx5}^_s'7Ab]48xFj|oal/:ڑ)wW zJ)y$=e՟aחG05??7H E-CRH򆇑"ȻCf/B_W|J0:PڂRئ4lx#FC'%28NNM ƿ}; o9 (!cd6##&AV6GXk7n6ׅPWd}*Z!Z'h89<?g>6H 2WDK'K==܃GKS4yxP'i   mik ݰRTra \{+u;jyԵĽdΞ>uq١)5ST;F%qnJI1[d ˯y-)_/2qъ]ksڑ̪*K5cK]^YG!wO}0 'T@7NZoAE"XhB`I }sh܃<} GGqY1Yl򙖐ʰ(M'tBxNvOUȿ:lZH{'%yy<M߫ Ev)a}Jؗf baa4Kl=EP:'w6 ]ޅQ]s08⁔{*'V 'CɰJM-7[?oi4î4W?l'0B&@5=ONZ񉤇LiXmդ:{@۩Z5jQ' _AN5LL)Kٝ:M UWVd܌QaH*͵)w(ˈ=-jȗ-vw/ ?yp(~ٖ"'Ѓrp#2=Ɩ:1{eQV#׽?l-|-+ aQ",jEFsH{Y1j.$d➔)3/>%!-%!m;ޞjCh#ͬN(g+H *g}5z}T2ՠ sk9p"#_քրr;?j=Cحk/Ir|`腽xc3Wo} ڇ*sS_ d›k*N{\Mh2'K} B [}h2Khj@eL3y~ju cKWtJE$JB ]3J^8ywVVrs\y*+HJc*s{#Il\9vsݧxpi#_J,,rRn]U{"KOzĒ?>ۥH1a-V)*v!zG 6b!!u-%n,U>%q+g"8PUJ!&HSp&))k[y_n͟}|-!t.XcB$׆{`-X."I8K 面K:QN8+(4̸w׷|x AAr (.4?ďp"͎rŽ{x _lɞʖ|ytµN[:8&&"9ͅg4 \KHxDVF7Q :غ:1QU2LhOv` 1axjƥDdj,y jaIW/?hƦf]'қz7j񄿶;~Ų{%tk)c,5jRҳG7x8x>A up'h[۵djYHhς?&}g0$ӭ#$6mW//.F8.ME$U 9ŔͰ lPEA`,AhWb 2I(|N󪟰Krv: Nߓ>1bwUBZ3"IB!QQ! 7(DLKVg-_$fMg_=(PfL|a1,)QCHd0@EB[g`YV][d0qendstream endobj 113 0 obj << /Filter /FlateDecode /Length 445 >> stream x]n0z )uC" HYPC޾3㸇ݝ!yx~.k:rKmMwl\Iks|muQo]Qw\ ^s<Ϲ߫9)7.e)H,0SB_±,gbRXsV1K#1f oDO D,YoDϨX7oBT3'4F #$8F쉘(iĩ)KPTXPTXPTXPTX`dhА)g@Љik$LiӰ!K 1mq 92feɔ1+CNY9rreaeiaeiaeiaeiaeiaei+Ox*> stream xXg`Te־C#80C7hd u)"^ۤL~L&I$(DPQމoowG}{s9CEq8֯&F$F/|8,<6>'%*+BL(dlaPƏ>9kv?}jē).=Pa#?hX(d(dłQyɉ!Qi1!k_Bzy?6!*%.$=.dK쎐 *|֍XiJT،ĔE-O۱"g9krEy!6ncI[SRij3JmS۩Nj%FPRQQjj (zzzZG^Xj5*9@M$h3joиcF+7h>m3vLظߍ3_>qf⒉NQ,'%+@]roFG_r9[*bBs.7N^h1mUUs#9E#c] 5fph7 #nrt*܄N.X` VGSj,9.`rL,nB3\?|;GWh 􊲲3sK+Ʉx:,cvN\w c+؟eުe%PLB$kS Mv:=Pꟕ`h~3>u}ҎBl` 좜-#HGb 7X{T[џexZAĖ@,~x3Tum: ETzuhH%QTnn"4J*gLas+*$V4Z[u",P ,G_syM ۄ&Vgs1/+<0]iM)`ua 'DE [Rc mVc/JL{<ЅVkSWlܢΑ$%(OdT2:p? ]&K5*eJy:YߗBOB{aLZ7>ĤP=nX}`t^` 囵6\.T2E`.^_]7nš#pGͭ1o6{\xfmd"9S+R4%}x(5 !=qk y_1A}gaȹ3]exdWشP L]^(pkO7ee¤];Žןv$E׺}+YU6ZSe8XWئS/)dC t˥ ut)+r@+k3rAH͓ 2RJWop RvQ[kf,2dʷtR`"]?wv7}`-nð\<l-qZ~O?lvJϲl<~b xCAyg~9ě~nzKK xk&4zNdU׉BSUV݆h{l/xx;4 69ڡ3rb>Rr\` K'^:m>yvT3B(B#K"ZX.mu2+Y^Tx﮹3drFTӫzt$h[j5Z Vd F{ .AL1;mPD$ᾎjnq_pg<H_?\z+ Fy+؆E{pBm4y>zv[gq9Y> 59 ȇ$C%cWZ,ߑJ4nY)RDDw&Sغٺc=@_,VI;h0D]& 5 ΞfS4 lJ FNz =h+(N8h}ޚVѬ'*$+hQ1xp,KmK+޶k<+O5%wu7_3XzEzL)MU.N,GBW7-?۴mK~fO7->4N{۝7y"NH VRz Sh&G˭u>oS!z][c1 l2 r|>tiϦO=88dfnK^H0{y&gI04DA"P$-?e-,u5Y_47F< W>4"|E_]j1:PoԄi"ȲESLhES\@h EUeJ1CGm_ybCqJӮs>Bb0##3#SP]X8rR9ѺvܪSuFg^['f!W}.e Ӛpa#P؅TN"*?$նs^t9 ;SsٗkF?aCc*(Frjr㣻s^5$:':\ &Al3|N]HmNk%i{ŭB m"4*ĎpEz8T@flIa/XͥLmf0d_i*h2YF' d50KҀ|NXԾ Ԟ/v^\F4 5X)6:UF=荺Zy H"R]–}{KV21ѱE))uч[kZ-5Vۍ'>Ԉ)E7#WS“o$: ͞jR'I04q@G rE )CZTBA#-5eLy{ pFI[qשa]#b[/ѶWgHdB|P}*qNx-vݽw2]ʦcZWɓ.'9o1r>A׏_W ́Xؓ)vne{:٧xV> stream x]n <o0 ʗêi TJM{g>ےwӪOZTk$=%eeAO)?&o[(_߅47м9\/ e[ jR{cp?Ϩ(OJ8lX@V Q}ց fԣ`ݡU:(@`ɞύ-3Z)r!@ 2,MiF=hlendstream endobj 116 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 1028 >> stream x}QaLUA[vYcoAh&$6C3DPHi(X^k{=z!%OKDƐͨ1ʫÃ`4{?X0=WË3c@lG31Ɠ@@EmN\xU8[sOOOb8>dW }Ѝ,fcvz?>Fhz}~55Sӡ =Luu šak ?<fL90f H=g‘K(菘=LgZ$LbVN&у`,z3@ a{8o-˰G ~< PWonPD9< H{\⼜.'}6VwPNpd\JR%ʸX*YZKTUQngܵj`<Í a|utB]VDd#!૞ ^`굢nrs6{:taz?R޵ jy^nì؁&2Yxz$Tu'RG_L)q{J(%_gXf޸C1c0v+=-DBzP}i7S2 J $E*BSP 5 7"yQ<_ahq~)]hƽof4 Xa [[`SdN7};ucz!5uY޶zUOzQ yYY9$I-i\Ec'f;+endstream endobj 117 0 obj << /Filter /FlateDecode /Length 299 >> stream x]=n0 FwB7G'%]2(^Cdq޾CG9H)Ns6_sͧ)_:eWV~kmX\qzE<$6ܤ(kO~(Σܗ!:䫸>Sb'ySU'.陚xN-BU]ߔl==ـU#Pـ״vUW5ZcA dɤɄduIj%KAӐMEGd]vEl@訳:k؀6쭟Ϯ |ǺJl˶E]ޔ̋V8endstream endobj 118 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 3224 >> stream xW pS畾B$Ԑ{ҦIڲf P0q/,[ÖkܫӖ-ɶ$@@)@H$0a!鄔̆ٝm_Nfimwg*_|;ߑ˚ e;w*UTmWUd#ʺRygyBJgy_~[y {?_9Bɺ|Ⱦu-Ɨd -#Ug׮}njrZr7QZ^+S7Vݱfך25XRV[VQUZW+-(_e_A}{<Ê:Y: ÖX/oR|t箽U5u^ظþ Bl?4[Ŷc;Y`FivZ0}>&C^x fL̯$Uk)] >ʕqBS Nt4E# $!菸|#4&6PFkk>HZPRchw!7Drs~D|5Z̘bɔ qHZ@%e倫p~ \)Z_P\~|/PJ>huapiGCtsv Gl4u7)URe+s.w.qqWo8:91y)yO 'l-% 2h#?v G|)d}lx0V| +MP rELqX]?uF}Q/$$sc;7&oQ]I/\i9xЊ&d3bTwȝV8v'd8Lvī*)?>KZ72 u7>FW@c(V z(}>Ȉ3)%W^}zl$H3N/N"]zMc&d; 6kCi3dzB@CqSڭ mF a65@ B7/|`Ne$rC|ta'#_Q2-e`E+L tWn$a$QkA;;ԓ fmMl `3jc4;n3)fv"p"ë b qvuJg\ps ?wbQZL,nJ*b}+C Y61EȨcXmyqMq FPho:a8HA `Ԥ'")z΢\@+nsBv}%uh54jfhPm/~Axc|>yXhS+ >yI#^C¬2*H^׆8Uӏ'=}<,R-dě$Q pFY Hu5or˸'řdϞ)c.6r{,2Z,5kK<~a=I}|g z@,B¥gg5"@a mVb!NEuvIoCC~pfo"7UAC&J/-#6D@&*/䫬? ,"lZnv]2`VW@)Bn˙zf|ĶYgW v0STC+2y $ ̳4 E1u4 ހ'Jh;̵zhqQTkUgs&C,4Q9qEp SldXNel0Qvivu5?Uh5mGl™VN)Eɦcth?Gh'h',l*O𼵭O^Px;,'}ދ!P_p${3Js0U0vԱd "Go07Cew NWhBჰ+K~ޑЌ# |C(^lf*\-6Dd-z\pҮX]"::<|[_rW{A/( !x:[ܥ5!Ur J_&n{==89|,_:Q~pr͟FFQ|MҵA#=x=SW Y -'U6''Tj&W{?6{R]'1]^xu_Ņݻy<=ʉmJXOՒaU%l7x'z!$3qg7A;y <JkPD1wN٬.:~7MBD /콥³h Zf 1$5DB6/jDko5@b(~)vtyT-H;:=DS,ij 674[z5u c+WЂ4Z2)|4Te>u徐턾Wy1'#$pF wpc{t$y̹K_fw`HhѾe2Ϳ+>'CѸ`o 8n3RmyA25O$c]ҽҜK_d/ !<2c0 `];7}+AFg0Z5Lf{׉3IQOAQ%[A ۝d>b -;>S.)d ̢fo&Yw*g"7]Yǥf%E.i 1.x3.@/ QΣ fi !;g3.Fj} *NĒ}g!ʧ(l!lzp9ep]3D; 5mA"G܀j6`AEh"{ $T4_4zMSc]moH/6ZfX[F]6fD Q$ CwD艴A3m ?!j'^í)O/7De_!*J-XU(Y9RU yc 3B7scu`endstream endobj 119 0 obj << /Filter /FlateDecode /Length 209 >> stream x]10 E"7hS"( He B=20H/쏇cl.et7dx@IN覙uOE?|dX>NMnAb׶f׆!̕3ЇA\bRa TQ 1Us28BL5yÃ-#ЌtR M|^6 ~Wc._ clendstream endobj 120 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 1288 >> stream xTmLSW>۫V.]ګ38@E#s[?B?dkmRW{ۖ+Ώ͏e:؇3:IfKL>9\`۲?nsE_[>1ڕvG%,JO%5Ugj!3MXk^bY^R,J-ڴ iFNeXx^*E&[`/r[ͫr6 kr6eo^.pRr< UOC+N"b$$Obb;L.\NbR\&p(B3/\bV-#2H3 2! #DG)cnǭ;JR8}3X~h/xVMSR6{K2*h w B苾=#u08;FIR:rUsP\!eFL.$1&cd(thy>BpfP(*E3tH */@K%t5Il9 2xx vRMasֶVWj n?w ?K\]s$r9y*e+nHQ1OD:B=Ъt=> ?SmXK'n.a<~2+)o+ -(- Nd]ˆ.Ɲxpq<3;x{g?@]jk !2W9e]Ov \$q1j/$bFjFuNֹp,"+Ba%fH,fjI7qxwٌ͉Eun agdb_#~ģ<~;p*9SUD۹o#5>:~ipmc5kKސG*y{0OO2e_?ckb5TeAU $HGmʬ5uљzrIS]`םjn}UFJ!i=P h^w+c T$K="J$y!h_عz| uwk@]Y)‹im:h_!ktҥ&s+n~Adp 2;{jw SչvN񅤠"'S@{H"0NGOuIeendstream endobj 121 0 obj << /Filter /FlateDecode /Length 184 >> stream x]OA  ~@YѤR/=h KáPzxMfggwݹs6Rv z`:p8XG8PmUR(=aE#MhV~#;Siդ& tDc _ HkQp%%]VS)e&ɉT-!;gM o/\Jendstream endobj 122 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 562 >> stream xmkQk!cjAf\l!!VCk16vbR'IjH}i>P$`q;D7 DA˅{9hOݩ' /]>}Y^ЙysU^2`VY[0y4D :kۅh,%|'VuCY~9MƃGnYӆF3NF##E躰܎y7w-\sO WB$a5;!a@ D ] }FQWTdza}6rR"/uoҏ.8Yʵ"5)ONs7&by7qӢ`CʽEp1LȐeda=q!t=Atd!3}VNWCg2恗oNbr6GM#V %ȑ9LQ*VpdJKgL3kO0;.;RR**,^'jYba endstream endobj 123 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 4173 >> stream xuX XڞL*#nkܢB[mk"u_+%aM¾#sEd}%,([.%jP޶zn{p-AB;uEJ/[m|%b;A$7b7K&kXK,!'XJl ob3K3YEH Ĩ$Ib3,tyIEʄi~; 8$; ?3lf&μ3Y1>GEǯs`dzB0?0 ZMcz> i~:)_ @@F]KL2ɼg*y=\@V{+9}.4su~|64! +쭻^Z,-!o,TPMwy*0F.\R 9U]W\(\BR͆|WױJ|xtn,z7B#:XFVY 33;}5 ;Sg\I+md> A2! nYBOZ;)ERP{|A,~$;kvF>MɠR$ewy}~h\U4F}!_!,+~ȧ^@YК%=h $Ut i(v\__,翪-M@74pk efd3yGA^fBXaA Csϡ h#ĿpDQ5óV܆ C76!E'W0_eXkSNRD>È!{zZ w8]>g-J/~8po0VY " B7$gС;QE"z㕑n${'#'${fpST`BtHe $BP +4U!htEzU2 /ilzϻ+Wnjꕡ}>[|fA,hgil/?O? fʃN&u)}( M d7T9LQNzfPڅ%Hb,jGh))|s/\h^?& sKgS)\񊁌+IdVGfzCF#y=X D75ۘ"8fV>7\3Msh9ׇ-0DӔzZ%C*c bymۚulOu^2.3#3QȨ;hewȢ,s*xW,[:dGRvTǫJ ׌л ֚,M g ƐB̡Mz\dBqV 2pn];wbaA|qrdQbgfD&3aMA8{^@s=lx ~ =McmQ8q5KZР8hn׾x [ղ0K̩,rI֍%R,-K[0wQFgcТdcbI9APdXWaӷCkVaT09Y#E┬;:Q!U,q䓄+OdB5+'~BIo|VII:E3Pp0vwOI]'C)-|!^%'0/"'"ݑHNz@Bt"y-h˻ 0!ߌ#Dy‹I =zT Ng]. TRyvEDzxu2& hPSpπUsA/UjkJ ? M!ΉcFOW%d2P!ZGW6 gxT]Vsь]F\}l.HMټ 6<’dTQbdj$S>| $ γ76.*1 8UY }ŶTbaH"1H(A+Ejwc A\D7}K=fR ^nI8GX`rL1[]Id PJ'7%JAFxșoi*Jf$tEOrצɵA:2J_ū(/+dѾaJE~b ՙE$ݩty1LNVV:r td)X - ,?DooA^E0R 0p3B^S!< UA.Дڬ|jrQ4\fudF+KFi8OCIaqDvB3"YI# ȧ0&CFN ٸag?/40u^#Z1G7R2 xk߮k\M nA;Q-vt%F$Yqbv¤+[#;LT~$j6Q?Yn ?dך>?Ţ{b2fu#}O%h Lxw\.(bDWZC$bR-1<<::"1}Xd;Pfwʻwm|WCΤT00)iٹg>6x¨QLfrT&غL!6+_=e 4M9wMk Bj`e]IyK s*R'X(!; HZ= i:AGX_:)_P@:wbq%,74)Sa3FhV }]AA3p1c珮/E]nחZBj4Laځ$K %p.ݵc=Ӄ_x4L]`BiAia*&Us"&=V/exkbUvW Q "(oH :{Kw0H-t9 \06,Ĭ*qӿ<   kU*4tsg'޷ kB''x)|}bId.ũ =.!ӚA ,+~庢"6țAy_+04Hp '32*Yx<[Ժ2dkZ;Ckb2d?L=qFmCS1:nA|,"p 03ӍzW30y+ 3 `]qx5/E{juCsu{yNEV s<ݾxM{Y6edgGȞ3LXVtjia@}!/ 98b<)K!$Kendstream endobj 124 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 1004 >> stream x]]LSwOP99\ ,ȌsA& I(‘2P^ڎ*PڢeBؗJLB9@q DIL??Ռx&sP Q'Vkjv%iuzᥩvP[2)b Zġŝ<ܕ !߯w`c4.ي MQuZZ 'Jj:v(gv7v >c{97Or @%"."{z_웡#lS`am= gl]<%;fT::mv>GeI8^1QRKh2?b1.Ȕ|sK<* F/qx ݵ ^jb ,ђ? gs,irMҏq#ʩ 0gTKrs 7Z ܅+G?/TC:/I>yM1뱷|ZNpuKVu{O'4LzٿJ0:oLzW֖}?nВg)+#sASy6|y;56#> stream x\Yo$Gr^/|CCkE{6cd.kٻkϘ &CYԈ0ytfMvsUYÄ|ׯܟgN?Wo.b{#SĊgN.goTJ3.)Ȧcpq#@QnWf'>jԏuIfԥ;a2͓{k&;#]?j !u7<̇%¬e8acܐG/n zKFfЯU5ќtj>$8Yq (eWUtd]V!6I kvPeJ-qkٰ,`Ҫ[Q۟u> sFSF9"Q̙d PTש8zU^$LJ ?(h(3dC4:E4jFWgz|3h鍜,R8C8\;dvR ۚUWͶ 9<0o z TܔԶՓ0ll> E 刴)`5ψwU4Gc1rGO-w{;RJ[NpB#Fhٲ8 8@f6_/uz h.>t݇' ,EpbFqY!N`l1fjTlW}f .Mu"ЅDJbWeO"vmczw]}YͫuIvC'뒄m⏿zS D+P6&۳UR-Gi&iqѢ뀶)Æf$M=,w-̺Y5MWO^aYo &Zhu"+@RlW*T o.%=`VB]L UDoxX P] (/Cl׭"Hq= xa)ʮ@g5\*seWH|CvW~ט譱'ۤJ )yk)ߵR L>f\}X;Gk)[K`i϶KĀ\KȄQXЀ CC ~w09MLԉQ&4Wahn?1fxG`DKc`~_cMk1;[hИˁ%`bE)m9p"GF*.r?>4JPZXpoQ.ˋ*k9d#.* 6_50]UzK]D/hpUbݡr Ё m`Ob!h%V e 32e`XX&ɏX"AHH~ܽB a(D#3'N5hL:wUȚ%V*֗93Dx͏1eXa[X(/Hk"}#* Ovyz$k#I**iMo4,|$M:zH&SnzGu&$05G |{}9䍩 E /wfc 2HCxمeεה?yB]q?' ah k/WYtP[@~U-ˇl({JubV$wC&|%7`b8lS(4Jc~cp,7SoW@F`]4it~As-uJ8Y6RMSl8l\Zū0Ч" s%v/vcDRoau*$^jCU70IdRd4O t8=LusD|/['EVjR*A~ ,Cjn.L0N/"-Tpe0:-jTr;NjI5TQ[EWT Z(P8d!pzCE/rkXc"́n[Fc|f㸁;.ѫC_i%ixdTZ \,a YU+pH7X՗0دU=,`#G*a0w BbMC՘#m̭Qf*JV5U@g=/vu%eN_,CC&iK'g8V()-ɤ*["?8:j;k;ѬYkxq4*˂Qm!EETq ^5c qgWz{Zqoh1r S, Vi-P1NJE,s˛ IbgYaih⁥zchkt",m.{{l8]epBZуU7r|ЋaY;ț%PR6;,+@f98Υܯݢr'yI8B@(.{C ަ\CBoV\m's}*S íѓ-w,jVGnZ?q&mU-<䡦;:he N񩅡c\:F')NjMXIGeY'KgAL8܀.aaKgl@лܴ̏ MhI,kQ>oѱxE4b.Ō j#X}X_ݺLzش+Ũ?ţ8FQjƨR`P &X}Ta^a_ܣ3`}àa=٩|,H&Ң^-F4;6H]hi4ifȭPh#qQo ;jq 7Γ"w`(,MxO}' #M̷UI[eR7\kMU+J66D$ lY7MZ #2oعZnJ'!+7@˶@\䈼)q#'C]} q7qMӌhCb:«zB)&mK)S.|K]m{& cLLæ(r pja܋AaT-¿? D_b> Rpvwv~{[X\=裱l^ Y=U-!AM 7*{a1tU+-E>&"z<5'5MX/*8nDE=X>{Nju%C2`NVɀF'RIX= grvwx L\\:2%CUhs\vd1iERJ9*"⦜d*Ln﹫Q]Qٺv7BSuKhd${% bxغB`V";`FFGSSDvFj1VQΉgRFnT-5}lEbBR샪ؗ}LAx"[) Q4s}D"?._VE1ErXWKU#jYՇSQA02ڲ( ("7ydY;ݻ^7Si@0EҕϏV~xpvhBHq my_ч@~ڈu2/<~)"mQ`yaTnu6/7Yan&4GX\?* B؀0.4_^݋fp `P"ϧ fQ)d 'lnΌǃ~!eN0 U@]l|S4 M`Q8(7֐2jt+<7Lm9gpY Ob(Tă7[&X}#G=BQH-G\\X쨺 5(hpYKܑGrkI$i%P::]NcA6#a3f[Y-m1!&x-i ,Vܞ-BSQ+J<L5?ăcݽ8B{n&1=W$Ij ~]Kcؗ㽓$ &rDs(M1ԛnY" C(IHOtJJс]s<)v#j]>F)cr5uO;oNjLe'21ܖ#Be(7E^cEt uNQX#FȲky\={z ={,=Ij܁КSk$Z^w}!Sh8Uv"LWN#:xfv-y8;Vx[X:Ÿ̏ Ҏ )R,p'{+UIr/!ayu@ Ai9e1ډ9e1!%P*;p3oMWk_7֤2h`\e+_)RGru?>)t݈endstream endobj 126 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 1499 >> stream xT PTe/˽wu,dݫ]3ik _C$[d bQuP R@Y (5uG:65bS95 ҆qiwsaVGLΞnسRJQFk&=7M, G;"+zi@1z?A4 :-e(7#ݖ/0g9RJd*-O^i/̐SSçJB5!س4[rzɾ^Z/ƄEHK#cb&N!D(/Xnz$D$Ē82R,'$l/'יLc9a53+<פ98AV +$eKj߆M0n-ߞ-+v.+CM%sy{LOpt;w.hpj-j9sVﳥ{_l47\Py1m #"[ta)k/TB^XlFUM㑹~zHzzG#菕Q0ۈM/t$s='R*, T$wOc--oUg*x];8rrU)%3H}:na\VQ  B5B/XJl_M&@DŽ^ bp-xaL p48_FPIX̿B}D(3uֽ"_Dn8\FeZ SjӳIe)4?ةtSkp\kr}Urm߶]( CjkDSt(ū'N.XhBo1=5PݚaCϑpӣIW4r4+ gNxxTRM}'Ǵ3 r[4G3# j)@Wy+j> stream xcd`ab`dd M3 JM/I, f!C۟z NyyX~?%=V{pfF|ʢ#c]] iTध_^竧_TSHJHISOSIP v Vp Cwh Ȓ} JdZxk~Ereg{Sg~}9V5Z5i3>}jGܿ1gy ün҅?|ϛ6q!vr\,!!<<ܫzzpr_<<[M7^Oendstream endobj 128 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 4808 >> stream xX \Sg1jf$ڎZjA!B!$!{N@vD \bi;Zm޴|8y/&|s:`XsH\lKN\?qqw,&Ož؀'ٯBj4 fas F砒h#ŊJmdSRsVXz2ŠזmKdg AqĠw,ڙ/gRҒ2"Ekh0chSpVvNq ;wwG-#D(x#~L,"vD$x"{׈hb#Gl"ωb%XEl!V[牟ۉNqb 1hf:2)߲aހw9jΟO#yvzC<8c3_~xgmukv#3ykfp]{}6d1)_DA< پ͵ %Htvq .Y< hUj9sgV`쫱T`( uG!3)ڀT@C J#6egYu479f]@~,V. f襺[ 4 iLD7p|rwwv=-WEZJ)- 2PU\7gT&oEzRB H ;q̊rT. UN)n%:VBg xr{QVg-.kūc͡ʦ)&of4.J <КrNB݂єw׊<<e2#⢅* O B{c~ڍg鞑m}@j= b&r=ezo XjB) r@^Bm{-:xL<#`rO# E!:xߥQ@20sKhM4gjr5bxs5 }>COM )p3ҭ*f8c14.+PY4 V:uN+Sҽ`-ܥt7Zd+l> -f,+6x/a;vl'Pҍs(ĉm,<>WEO=åcu(ԁ^++je2E Ck{KȄyf_|f\n҈.;[·7b굌czú{ۈ됁* cԭj Y}5 g& EJ! zNVVT[JC"|rSb)&q: qF1ADs&XM4Б2VKkMT# 6ՐN mP? fdx9`4ڪ>EF@b=DyFFȒl;;q)ѹJ~&I6!xhzi\' Y\&vz*85ߛ7ӘϣoO\ kM 7[zJU@AʭM5n}[K~vUڪ)I D/=fP8+(9 l/u|ыl- T ÁAs mtNc$KL`5ZY<,u5{6h;kpBwzOFγm0)nv>R9d5Zs(UBz'2XHgͣR u郞bDLa ]fB$M337@ζ&6zS8]lyש`sJ(n-q1& Xs<8@8~)/Sջtv֯J 9(^ڬ7L`ί킟Mr>wh7j?wS*2dH;#hlߎ8<$F 2Z8FA/[]凡t5B&2 y{݊w=([A,Lwgg j*.E8>ΐ{Wl}5)1P⣂jZ6h5:L;u-T1W)ر. m=hzy~ 4GŁAWFSuF544:1|sqIw'*ZΝZ${r6F'VPٳ|&3F0V[薏0$d$9+$[^ ^ U;Zw0Hm5T})ȯc3%aio"֞wPǤS :jRf@̓CIJIZrX0 @9.l֚ 33Ĺz<`i?:W^h5P Bw=MT@.d3g%wG}׿k$10{a{SZ`P( Vi >I,4h5bIƖ&W؞m=Vaa=yO/}7Р0'dn_Ʋ]x(G9yإaKcxzt)ݝ$rCAUgk4w]!*7PfV#5T*UfuxX2G?^`c8,?,U&=}@b1EQK>i<2mKHsh( A :V&' /rp9אW~}oOh`#'m[< ^yro&j' 9~X߮~w#qK*ԑs Õf=4m; 4#͖ دpıFd`]A 6QDHdf'Ao5T8QarQPm4ն׷(A&P=BCl_\Z\ltTtA麖v͍cSE G?_7^̡@wT߾}?﫨:#CR]t7-߯׿aDpv۷v9o2呑 qlmvijڡ+_I/зE?qn_[!h)RjlqWtTWЫL sI_mmjrBN_X2Klہ-xS Et M[wNMlr=,(:؅JpH՟ٞ呙k[4&~Ua;~靻!2 *m.eu+2&V0< Fc0K݉oo2cͰkZkϰ\*)iP]WS:r!\3֯bvn"Qjޟ(&I2h6N9+EC@bax6f6b, ?ZRc|l]LHOMN0LLFczq4VҗO%%X'P'XϘaA ˜PP@]đ@IfM)TbcXByF<(vzxš̵fjF?+p#.+HXhk5ZhKF[5V64x.?'t 8& ]'6.OoHR }.p6[kDz4W *cU 79sP0㉅ΌIaݽFgҨATY [3g 7|(N>[\AtU^.{I7F=vX)嵨}oTOpk@E"P_Boe*=,e` @ Ě?ʺuI2f;=mԇn>&H˩o{ʕt@Y#++\RrQLfzFA١B:L:ntlSz ꡩfN3\f:sfo f4Z 3&Geiendstream endobj 129 0 obj << /Filter /FlateDecode /Length 1357 >> stream xVKF 9E|lK$PSwvvzCRCЍIޱ^ۏVJq84ݱ]{ӁQ qܰ%9_lB}Aٽ/1'=NLW#W^Vhci~>?4Rz\H N!ڮ߿>endstream endobj 130 0 obj << /Type /XRef /Length 143 /Filter /FlateDecode /DecodeParms << /Columns 5 /Predictor 12 >> /W [ 1 3 1 ] /Info 3 0 R /Root 2 0 R /Size 131 /ID [<8ff4cc51c3cd969ee3bd01ada00a2633>] >> stream xcb&F~0 $8J$DݠT$"߂Hg R.)"D_H.[i"AXi.d @xdT,`,."YlFqX;^+i? ` O endstream endobj startxref 105264 %%EOF markovchain/inst/doc/gsoc_2017_additions.Rmd0000644000176200001440000005216314430771726020426 0ustar liggesusers--- title: "Google Summer of Code 2017 Additions" author: "Vandit Jain" date: "August 2017" output: rmarkdown::pdf_document bibliography: markovchainBiblio.bib vignette: > %\VignetteIndexEntry{Google Summer of Code 2017 Additions} %\VignetteEngine{knitr::rmarkdown} %VignetteEncoding{UTF-8} pkgdown: as_is: true extension: pdf --- # Expected Hitting Time using CTMC The package provides `ExpectedTime` function to calculate average hitting time from one state to another. Let the final state be j, then for every state $i \in I$, where $I$ is the set of all possible states and holding time $q_{i} > 0$ for every $i \neq j$. Assuming the conditions to be true, expected hitting time is equal to minimal non-negative solution vector $p$ to the system of linear equations [@NorrisBook]: \begin{equation} \begin{array}{lcr} p_{k} = 0 & k = j \\ -\sum_{l \in I} q_{kl}p_{k} = 1 & k \neq j \end{array} \label{eq:EHT} \end{equation} For example, consider the continuous time markovchain which is as follows: ``` {r,message = FALSE} library(markovchain) states <- c("a","b","c","d") byRow <- TRUE gen <- matrix(data = c(-1, 1/2, 1/2, 0, 1/4, -1/2, 0, 1/4, 1/6, 0, -1/3, 1/6, 0, 0, 0, 0), nrow = 4,byrow = byRow, dimnames = list(states,states)) ctmc <- new("ctmc",states = states, byrow = byRow, generator = gen, name = "testctmc") ``` The generator matrix of the ctmc is: \[ M = \left(\begin{array}{cccc} -1 & 1/2 & 1/2 & 0\\ 1/4 & -1/2 & 1/4 & 1/6\\ 1/6 & 0 & -1/3 & 1/6\\ 0 & 0 & 0 & 0 \end{array}\right) \] Now if we have to calculate expected hitting time the process will take to hit state $d$ if we start from $a$, we apply the $ExpectedTime$ function. $ExpectedTime$ function takes four inputs namely a $ctmc$ class object, initial state $i$, the final state $j$ that we have to calculate expected hitting time and a logical parameter whether to use RCpp implementation. By default, the function uses RCpp as it is faster and takes lesser time. ``` {r} ExpectedTime(ctmc,1,4) ``` We find that the expected hitting time for process to be hit state $d$ is 7 units in this case. # Calculating Probability at time T using ctmc The package provides a function `probabilityatT` to calculate probability of every state according to given `ctmc` object. The Kolmogorov's backward equation gives us a relation between transition matrix at any time t with the generator matrix[@dobrow2016introduction]: \begin{equation} P'(t) = QP(t) \end{equation} Here we use the solution of this differential equation $P(t) = P(0)e^{tQ}$ for $t \geq 0$ and $P(0) = I$. In this equation, $P(t)$ is the transition function at time t. The value $P(t)[i][j]$ at time $P(t)$ describes the conditional probability of the state at time $t$ to be equal to j if it was equal to i at time $t=0$. It takes care of the case when `ctmc` object has a generator represented by columns. If initial state is not provided, the function returns the whole transition matrix $P(t)$. Also to mention is that the function is also implemented using RCpp and can be used used to lessen the time of computation. It is used by default. Next, We consider both examples where initial state is given and case where initial state is not given. In the first case, the function takes two inputs, first of them is an object of the S4 class 'ctmc' and second is the final time $t$. ``` {r} probabilityatT(ctmc,1) ``` Here we get an output in the form of a transition matrix. If we take the second case i.e. considering some initial input: ``` {r} probabilityatT(ctmc,1,1) ``` In this case we get the probabilities corresponding to every state. this also includes probability that the process hits the same state $a$ after time $t=1$. # Plotting generator matrix of continuous-time markovchains The package provides a `plot` function for plotting a generator matrix $Q$ in the form of a directed graph where every possible state is assigned a node. Edges connecting these nodes are weighted. Weight of the edge going from a state $i$ to state $j$ is equal to the value $Q_{ij}$. This gives a picture of the generator matrix. For example, we build a ctmc-class object to plot it. ``` {r} energyStates <- c("sigma", "sigma_star") byRow <- TRUE gen <- matrix(data = c(-3, 3, 1, -1), nrow = 2, byrow = byRow, dimnames = list(energyStates, energyStates)) molecularCTMC <- new("ctmc", states = energyStates, byrow = byRow, generator = gen, name = "Molecular Transition Model") ``` Now if we plot this function we get the following graph: ``` {r} plot(molecularCTMC) ``` The figure shown is built using the $igraph$ package. The package also provides options of plotting graph using $diagram$ and $DiagrameR$ package. Plot using these packages can be built using these commands: ``` {r} if(requireNamespace(package='ctmcd', quietly = TRUE)) { plot(molecularCTMC,package = "diagram") } else { print("diagram package unavailable") } ``` Similarly, one can easily replace $diagram$ package with $DiagrammeR$. # Imprecise Continuous-Time Markov chains Continuous-time Markov chains are mathematical models that are used to describe the state-evolution of dynamical systems under stochastic uncertainty. However, building models using continuous time markovchains take in consideration a number of assumptions which may not be realistic for the domain of application; in particular; the ability to provide exact numerical parameter assessments, and the applicability of time-homogeneity and the eponymous Markov property. Hence we take ICTMC into consideration. More technically, an ICTMC is a set of “precise” continuous-time finite-state stochastic processes, and rather than computing expected values of functions, we seek to compute lower expectations, which are tight lower bounds on the expectations that correspond to such a set of “precise” models. ## Types of ICTMCs For any non-empty bounded set of rate matrices $L$, and any non-empty set $M$ of probability mass functions on $X$, we define the following three sets of stochastic processes that are jointly consistent with $L$ and $M$: * $P^{W}_{L,M}$ is the consistent set of all well-behaved stochastic processes; * $P^{WM}_{L,M}$ is the consistent set of all well-behaved Markov chains; * $P^{WHM}_{L,M}$ is the consistent set of all well-behaved homogeneous Markov chains[@ictmcpaper]. From a practical point of view, after having specified a (precise) stochastic process, one is typically interested in the expected value of some function of interest, or the probability of some event. Similarly, in this work, our main objects of consideration will be the lower probabilities that correspond to the ICTMCs. ## Lower Transition Rate Operators for ICTMCs A map $Q_{l}$ from $L(X)$ to $L(X)$ is called a lower transition rate operator if, for all $f,g \in L(X)$, all $\lambda \in R_{\geq 0}$, all $\mu \in L(X)$, and all $x \in X$[@ictmcpaper]: 1. $[Q_{l}m](x) = 0$ 2. $[Q_{l}I](x) \geq 0 \forall y \in X$ such that $x \neq y$ 3. $[Q_{l}(f + g)](x)\geq [Q_{l}f](x) + [Q_{l}g](x)$ 4. $[Q_{l}(l f)](x) = \lambda Q_{l}f[(x)]$ ## Lower Transition Operators A map $T_{l}$ from $L (X )$ to $L (X )$ is called a lower transition operator if, for all $f,g \in L(X)$, all $\lambda \in R_{\geq 0}$, all $\mu \in L(X)$, and all $x \in X$[@ictmcpaper]: 1. $[T_{l} f](x) \geq min(f(y) : y \in L)$ 2. $[T_{l}(f +g)](x) \geq [T_{l} f](x)+[T_{l}g](x)$ 3. $[T_{l}(\lambda f)](x) = l [T_{l} f](x)$ ## ImpreciseprobabilityatT function Now I would like to come onto the practical purpose of using ICTMC classes. ICTMC classes in these package are defined to represent a generator that is defined in such a way that every row of the generator corresponding to every state in the process is governed by a separate variable. As defined earlier, an imprecise continuous time markovchain is a set of many precise CTMCs. Hence this representation of set of precise CTMCs can be used to calulate transition probability at some time in future. This can be seen as an analogy with `probabilityatT` function. It is used to calculate the transition function at some later time t using generatoe matrix. For every generator matrix, we have a corresponding transition function. Similarly, for every Lower Transition rate operator of an ICTMC, we have a corresponding lower transition operator denoted by $L_{t}^{s}$. Here $t$ is the initial time and $s$ is the final time. Now we mention a proposition[@ictmcpaper] which states that: Let $Q_{l}$ be a lower transition rate operator, choose any time $t$ and $s$ both greater than 0 such that $t \leq s$, and let $L_{t}^{s}$ be the lower transition operator corresponding to $Q_{l}$. Then for any $f \in L(X)$ and $\epsilon \in R_{>0}$, if we choose any $n \in N$ such that: \[n \geq max((s-t)*||Q||,\frac{1}{2\epsilon}(s-t)^{2}||Q||^{2}||f||_v)\] with $||f||_{v}$ := max $f$ - min $f$, we are guaranteed that[@ictmcpaper] \[ ||L_{t}^{s} - \prod_{i=1}^{n}(I + \Delta Q_{l}) || \leq \epsilon \] with $\Delta := \frac{s-t}{n}$ Simple put this equation tells us that, using $Q_{l}g$ for all $g \in L(X)$ then we can also approximate the quantity $L_{t}^{s}$ to arbitrary precision, for any given $f \in L(X)$. To explain this approximate calculation, I would take a detailed example of a process containing two states healthy and sick, hence $X = (healthy,sick)$. If we represent in form of an ICTMC, we get: \[ Q = \left(\begin{array}{cc} -a & a \\ b & -b \end{array}\right) \] for some $a,b \in R_{\geq 0}$. The parameter $a$ here is the rate at which a healthy person becomes sick. Technically, this means that if a person is healthy at time $t$, the probability that he or she will be sick at time $t +\Delta$, for small $\Delta$, is very close to $\Delta a$. More intuitively, if we take the time unit to be one week, it means that he or she will, on average, become sick after $\frac{1}{a}$ weeks. The parameter $b$ is the rate at which a sick person becomes healthy again, and has a similar interpretation. Now to completely represent the ICTMC we take an example and write the generator as: \[ Q = \left(\begin{array}{cc} -a & a \\ b & -b \end{array}\right) : a \in [\frac{1}{52},\frac{3}{52}],b \in [\frac{1}{2},2] \] Now suppose we know the initial state of the patient to be sick, hence this is represented in the form of a function by: \[ I_{s} = \left(\begin{matrix} 0 \\ 1 \end{matrix}\right) \] We observe that the $||I_{s}|| = 1$. Now to use the proposition mentioned above, we use the definition to calculate the lower transition operator $Q_{l}$ Next we calculate the norm of the lower transition rate operator and use it in the preposition. Also we take value of $\epsilon$ to be 0.001. Using the preposition we can come up to an algorithm for calculating the probability at any time $s$ given state at initial time $t$ and a ICTMC generator[@ictmcpaper]. The algorithm is as follows: **Input**: A lower transition rate operator $Q$, two time points $t,s$ such that $t \leq s$, a function $f \in L(X )$ and a maximum numerical error $\epsilon \in R_{>0}$. **Algorithm**: 1. $n = max((s-t)||Q||,\frac{1}{2\epsilon}(s-t)^{2}||Q||^{2}||f||_v)$ 2. $\Delta = \frac{s-t}{n}$ 3. $g_{0} = I_{s}$ 4. for $i \in (1,.....,n)$ do $g_{i} = g_{i-1} + \Delta Q_{l}g_{i-1}$ 5. end for 6. return $g_{n}$ **Output**: The conditional probability vector after time $t$ with error $\epsilon$. Hence, after applying the algorithm on above example we get the following result: $ g_{n} = 0.0083$ if final state is $healthy$ and $g_{n} = 0.141$ if the final state is $sick$. The probability calculated is with an error equal to $\epsilon$ i.e. $0.001$. Now we run the algorithm on the example through R code. ``` {r} states <- c("n","y") Q <- matrix(c(-1,1,1,-1),nrow = 2,byrow = TRUE,dimnames = list(states,states)) range <- matrix(c(1/52,3/52,1/2,2),nrow = 2,byrow = 2) name <- "testictmc" ictmc <- new("ictmc",states = states,Q = Q,range = range,name = name) impreciseProbabilityatT(ictmc,2,0,1,10^-3,TRUE) ``` The probabilities we get are with an error of $10^{-3}$ # Continuous time markovchain generator using frequency Matrix The package provides `freq2Generator` function. It takes in a matrix representing relative frequency values along with time taken to provide a continuous time markovchain generator matrix. Here, frequency matrix is a 2-D matrix of dimensions equal to relative number of possible states describing the number of transitions from a state $i$ to $j$ in time $t$, which is another parameter to be provided to the function. The function also allows to chose among three methods for calculation of the generator matrix [@freqArticle]. It requires the [@pkg:ctmcd] package. Three methods are as follows: 1. Quasi Optimization - "QO" 2. Diagonal Adjustment - "DA" 3. Weighted Adjustment - "WA" See reference for details about the methods. Here is an example matrix on which `freq2Generator` function is run: ``` {r} if(requireNamespace(package='ctmcd', quietly = TRUE)) { sample <- matrix(c(150,2,1,1,1,200,2,1,2,1,175,1,1,1,1,150),nrow = 4,byrow = TRUE) sample_rel = rbind((sample/rowSums(sample))[1:dim(sample)[1]-1,],c(rep(0,dim(sample)[1]-1),1)) freq2Generator(sample_rel,1) } else { print('ctmcd unavailable') } ``` # Committor of a markovchain Consider set of states A,B comprising of states from a markovchain with transition matrix P. The committor vector of a markovchain with respect to sets A and B gives the probability that the process will hit a state from set A before any state from set B. Committor vector u can be calculated by solving the following system of linear equations[@committorlink]: $$ \begin{array}{l} Lu(x) = 0, x \notin A \cup B \\ u(x) = 1, x \in A \\ u(x) = 0, x \in B \end{array} $$ where $L = P -I$. Now we apply the method to an example: ``` {r eval=FALSE} transMatr <- matrix(c(0,0,0,1,0.5,0.5,0,0,0,0,0.5,0,0,0,0,0,0.2,0.4,0,0,0,0.8,0.6,0,0.5),nrow = 5) object <- new("markovchain", states=c("a","b","c","d","e"),transitionMatrix=transMatr, name="simpleMc") committorAB(object,c(5),c(3)) ``` Here we get probability that the process will hit state "e" before state "c" given different initial states. # First Passage probability for set of states Currently computation of the first passage time for individual states has been implemented in the package. `firstPassageMultiple` function provides a method to get first passage probability for given provided set of states. Consider this example markovchain object: ``` {r} statesNames <- c("a", "b", "c") testmarkov <- new("markovchain", states = statesNames, transitionMatrix = matrix(c(0.2, 0.5, 0.3, 0.5, 0.1, 0.4, 0.1, 0.8, 0.1), nrow = 3, byrow = TRUE, dimnames = list(statesNames, statesNames) )) ``` Now we apply `firstPassageMultiple` function to calculate first passage probabilities for set of states $"b", "c"$ when initial state is $"a"$. ``` {r} firstPassageMultiple(testmarkov,"a",c("b","c"),4) ``` This shows us the probability that the process will hit any of the state from the set after n number of steps for instance, as shown, the probability of the process to hit any of the states among $"b", "c"$ after $2$ steps is $0.6000$. # Joint PDF of number of visits to the various states of a markovchain The package provides a function `noofVisitsDist` that returns the PDF of the number of visits to the various states of the discrete time markovchain during the first N steps, given initial state of the process. We will take an example to see how to use the function on a `markovchain-class` object: ``` {r} transMatr<-matrix(c(0.4,0.6,.3,.7),nrow=2,byrow=TRUE) simpleMc<-new("markovchain", states=c("a","b"), transitionMatrix=transMatr, name="simpleMc") noofVisitsDist(simpleMc,5,"a") ``` The output clearly shows the probabilities related to various states of the process. # Expected Rewards for a markovchain The package provides a function `expectedRewards` that returns a vector of expected rewards for different initial states. The user provides reward values, a vector $r$ of size equal to number of states having a value corresponding to every state. Given a transition matrix $[P]$, we get the vector of expected rewards $v$ after $n$ transitions according to the equation as follows[@GallagerBook]: $v[n] = r + [P]*v[n-1]$ Applying this equation on a markovchain-class object ``` {r} transMatr<-matrix(c(0.99,0.01,0.01,0.99),nrow=2,byrow=TRUE) simpleMc<-new("markovchain", states=c("a","b"), transitionMatrix=transMatr) expectedRewards(simpleMc,1,c(0,1)) ``` ## Expected Rewards for a set of states in a markovchain process The package provides a function `expectedRewardsBeforeHittingA` that returns the value of expected first passage rewards $E$ given rewards corresponding to every state, an initial state. This means the function returns expected reward for given initial state $s_{0}$, number of transitions $n$ and for a set of states $A$ with a constraint such that the process does not hit any of the states that belong to state $A$. $S$ is the set of all possible states. The function uses an equation which is as follows: $$E = \sum_{i=1}^{n}{1_{s_{0}}P_{S-A}^{i}R_{S-A}}$$ here $1_{s_{0}} = [0,0,...0,1,0,...,0,0,0]$, 1 being on $s_{0}$ position and $R_{S-A}$ being the rewards vector for $S-A$ state. # Checking Irreducibly of a CTMC The package provides a function `is.CTMCirreducible` that returns a Boolean value stating whether the ctmc object is irreducible. We know that a continuous time markovchain is irreducible if and only if its embedded chain is irreducible[@Sigman]. We demonstrate an example running the function: ``` {r} energyStates <- c("sigma", "sigma_star") byRow <- TRUE gen <- matrix(data = c(-3, 3, 1, -1), nrow = 2, byrow = byRow, dimnames = list(energyStates, energyStates)) molecularCTMC <- new("ctmc", states = energyStates, byrow = byRow, generator = gen, name = "Molecular Transition Model") is.CTMCirreducible(molecularCTMC) ``` # Simulation of Higher Order Multivariate Markovchains The package provides `predictHommc` function. This function provides a simulation system for higher order multivariate markovchains. The function assumes that the state probability distribution of the jth sequence at time $r+1$ depends on the state probability distribution of all the sequences at n previous mon=ments of time i.e. $t = r$ to $t = r-n+1$ . Hence the proposed model takes the form mathematically as:[@ching2008higher] $$ X_{r+1}^{j} = \sum_{k=1}^{s}\sum_{h=1}^n{\lambda_{jk}^{(h)}P_{h}^{(jk)}X_{r-h+1}^{(k)}}, \ \ \ j = 1,2,....s, \ \ r = n-1,n,... $$ with initals $X_{0}^{(k)},X_{1}^{(k)},......,X_{n-1}^{(k)} \ (k = 1,2,...s)$. Here, $\lambda_{jk}^{(k)}, \ 1 \leq j,k \leq s, \ 1 \leq h \leq n \ \ \ and \ \ \ \sum_{k=1}^{s}\sum_{h=1}^{n}{\lambda_{jk}^{(h)} = 1}, \ \ \ j = 1,2,....s.$ Now we run an example on sample hommc object for simulating next 3 steps using `predictHommc` function. The function provides a choice of entering initial states according to the hommc object. In case the user does not enter initial states, the function takes all initial states to be the first state from the set of states. ``` {r} if (requireNamespace("Rsolnp", quietly = TRUE)) { statesName <- c("a", "b") P <- array(0, dim = c(2, 2, 4), dimnames = list(statesName, statesName)) P[,,1] <- matrix(c(0, 1, 1/3, 2/3), byrow = FALSE, nrow = 2) P[,,2] <- matrix(c(1/4, 3/4, 0, 1), byrow = FALSE, nrow = 2) P[,,3] <- matrix(c(1, 0, 1/3, 2/3), byrow = FALSE, nrow = 2) P[,,4] <- matrix(c(3/4, 1/4, 0, 1), byrow = FALSE, nrow = 2) Lambda <- c(0.8, 0.2, 0.3, 0.7) ob <- new("hommc", order = 1, states = statesName, P = P, Lambda = Lambda, byrow = FALSE, name = "FOMMC") predictHommc(ob,3) } else { print("Rsolnp unavailable") } ``` # Check Time Reversibility of Continuous-time markovchains A Continuous-time markovchain with generator $Q$ and stationary distribution $\pi$ is said to be time reversible if:[@dobrow2016introduction] $$ \pi_{i}q_{ij} = \pi_{j}q_{ji} $$ Intuitively, a continuous-time Markov chain is time reversible if the process in forward time is indistinguishable from the process in reversed time. A consequence is that for all states i and j, the long-term forward transition rate from i to j is equal to the long-term backward rate from j to i. The package provides `is.TimeReversible` function to check if a `ctmc` object is time-reversible. We follow with an example run on a `ctmc` object. ``` {r} energyStates <- c("sigma", "sigma_star") byRow <- TRUE gen <- matrix(data = c(-3, 3, 1, -1), nrow = 2, byrow = byRow, dimnames = list(energyStates, energyStates)) molecularCTMC <- new("ctmc", states = energyStates, byrow = byRow, generator = gen, name = "Molecular Transition Model") is.TimeReversible(molecularCTMC) ``` # References markovchain/inst/extdata/0000755000176200001440000000000013762012756015174 5ustar liggesusersmarkovchain/inst/extdata/ltdItaData.txt0000644000176200001440000001754413762012756017763 0ustar liggesusersage;pAD;pID;pAI;pAA 20;0.000461600217312132;0.0108336431456553;0.000176246694342926;0.999362153088345 21;0.000482488764261525;0.0107971909886194;0.000171057735612955;0.999346453500126 22;0.000494993811957668;0.0117707568980395;0.00015923328063064;0.999345772907412 23;0.00050429345591411;0.0115939415352431;0.00016057305104192;0.999335133493044 24;0.000507419256318309;0.0126057434590121;0.000160650439141657;0.99933193030454 25;0.000515426695613004;0.0152636383159172;0.000164360322575267;0.999320212981812 26;0.000525595649643117;0.0179897694669832;0.000161660453870113;0.999312743896487 27;0.000534823584740014;0.0166832171238458;0.000160135991146609;0.999305040424113 28;0.000537542563101855;0.0170682636815637;0.000159910891995352;0.999302546544903 29;0.00054010571047368;0.0189681430662499;0.000154068989936519;0.99930582529959 30;0.000540223049860296;0.0211937258794527;0.000157160647241465;0.999302616302898 31;0.000543650098790082;0.0273283049097408;0.000166920507275799;0.999289429393934 32;0.000550657084106076;0.0297242338721561;0.000176149326345398;0.999273193589549 33;0.000567666524533948;0.0312031127015988;0.000174959290838917;0.999257374184627 34;0.000589564479997331;0.0362614421255913;0.000181142949631881;0.999229292570371 35;0.00061806212917393;0.0400308116670889;0.000191872923626047;0.9991900649472 36;0.000650632511832817;0.0418389766775716;0.000199643253754878;0.999149724234412 37;0.000689474293782214;0.0467358834902955;0.000211977632924897;0.999098548073293 38;0.000731138219342785;0.0523964586730976;0.000221795302027688;0.99904706647863 39;0.000793533578699302;0.055177966832089;0.000240416543812767;0.998966049877488 40;0.000859207185781173;0.0585124067163057;0.000264674910015306;0.998876117904203 41;0.000920725621476301;0.0679656015534935;0.000282355739283444;0.99879691863924 42;0.000984991230447042;0.0757310812483226;0.000314702144743578;0.998700306624809 43;0.00105939521363715;0.0811703192912012;0.000349094911367465;0.998591509874995 44;0.00115292666392253;0.086792620615223;0.000376750745815673;0.998470322590262 45;0.00124967505305504;0.0951282991008114;0.000410031980278301;0.998340292966667 46;0.00134617679388282;0.104306046855522;0.000454381385459602;0.998199441820658 47;0.00147744439982092;0.108863822493192;0.000510090216360853;0.998012465383818 48;0.00160678108915558;0.115727419192654;0.000572265035829771;0.997820953875015 49;0.00175231797005106;0.122620733316479;0.000653974571998273;0.997593707457951 50;0.00192647416205334;0.128264864604654;0.000724389338373027;0.997349136499574 51;0.00211873152640448;0.132487558117908;0.00080347791835564;0.99707779055524 52;0.00231715656887456;0.140903426576919;0.000897377523639647;0.996785465907486 53;0.00257005954245349;0.148308971347905;0.000988973589213584;0.996440966868333 54;0.00282270426395954;0.152694601283312;0.00110175953341175;0.996075536202629 55;0.00308892793456527;0.156014449299285;0.00121842271667517;0.99569264934876 56;0.00334491994186366;0.160354906530207;0.00135221310972041;0.995302866948416 57;0.0036680988581455;0.163584432824716;0.0015021237857599;0.994829777356095 58;0.00404383426827132;0.166621852151551;0.00164389926056965;0.994312266471159 59;0.00446410949795472;0.172995362892957;0.00181867939206979;0.993717211109975 60;0.00491071415431949;0.175114162729557;0.00203795753783481;0.993051328307846 61;0.00541897972205418;0.176836610548664;0.00223079594105024;0.992350224336896 62;0.0058872972394518;0.179065380547063;0.00243327615598675;0.991679426604561 63;0.00635426205223726;0.18328905395475;0.00268594485354049;0.990959793094222 64;0.00684249456592532;0.186309865207791;0.00299675785270434;0.99016074758137 65;0.00745741108087491;0.18514690618557;0.0034276558099294;0.989114933109196 66;0.00813029947746983;0.186388327973927;0.00401498471329922;0.987854715809231 67;0.00894480989820999;0.189665426363304;0.00467257950797381;0.986382610593816 68;0.0097208540778663;0.190052370707222;0.00533807252107788;0.984941073401056 69;0.0106145155277237;0.188697039510498;0.0060901494087728;0.983295335063503 70;0.0115815669321884;0.190655935136829;0.00698624429053202;0.98143218877728 71;0.0126738410165432;0.193198718680596;0.0080193455961521;0.979306813387305 72;0.0137706103972485;0.194509663180914;0.00912917559527522;0.977100214007476 73;0.0151133228338427;0.197263891809617;0.0104517574376933;0.974434919728464 74;0.0167023100407502;0.19981346841012;0.011975363077487;0.971322326881763 75;0.0185015234683037;0.202602461969921;0.0136406055721243;0.967857870959572 76;0.020543474835099;0.20567144487397;0.0155521514540999;0.963904373710801 77;0.0226990867632567;0.209583024789031;0.0176773816301192;0.959623531606624 78;0.025242012724978;0.216536798215654;0.0199840051708029;0.954773982104219 79;0.0280714682899232;0.222812914664102;0.0225820740028015;0.949346457707275 80;0.0313087874273656;0.229026086379995;0.0254147939549998;0.943276418617635 81;0.0347623037317666;0.237135642759773;0.0283027615785079;0.936934934689726 82;0.0377325549166625;0.246569096453296;0.0313008704863735;0.930966574596964 83;0.0415709360291018;0.255103424222878;0.0350322493312845;0.923396814639614 84;0.0466891630019649;0.263805059072913;0.0392249475621001;0.914085889435935 85;0.0523809285174003;0.275827261187502;0.0435063497564212;0.904112721726179 86;0.0595353395950259;0.287154077419303;0.0477809616106315;0.892683698794343 87;0.0667672618557459;0.296736437668921;0.0520193008104549;0.881213437333799 88;0.0742284842880654;0.307394243456317;0.0567867940499367;0.868984721661998 89;0.08102260498543;0.318807087201621;0.0617441024832344;0.857233292531336 90;0.0879638443512971;0.330335659703562;0.0647086526386771;0.847327503010026 91;0.0956534410840304;0.34429171728002;0.0667471331812722;0.837599425734697 92;0.104629865442421;0.360343939150447;0.0679974373329284;0.827372697224651 93;0.114342159249409;0.376714600886086;0.0692694237579942;0.816388416992597 94;0.124830307842353;0.39337161823954;0.0705634029519849;0.804606289205662 95;0.136132619794602;0.410280336624487;0.0718796873808002;0.791987692824598 96;0.148284870965249;0.427403785267262;0.0732185913907422;0.778496537644009 97;0.16131934887796;0.44470297533411;0.0745804311136826;0.764100220008357 98;0.175263808419464;0.46213723752253;0.0759655243672633;0.748770667213273 99;0.190140356683496;0.479664593311152;0.0773741905500132;0.732485452766491 100;0.205964292416731;0.49724215292386;0.0788067505312643;0.715228957052005 101;0.222742933595137;0.514826532138116;0.0802635265357542;0.696993539869109 102;0.240474474656817;0.532374279403284;0.0817448420228012;0.677780683320382 103;0.259146922168122;0.549842304367436;0.0832510215599426;0.657602056271935 104;0.278737163401518;0.567188298860823;0.0847823906909263;0.636480445907556 105;0.299210225581725;0.584371141653351;0.0863392757979522;0.614450498620323 106;0.320518783551316;0.601351278877663;0.087922003958059;0.591559212490625 107;0.342602969587468;0.618091072857778;0.089530902793558;0.567866127618974 108;0.365390530594804;0.634555113161069;0.0911663003164189;0.543443169088777 109;0.388797364816074;0.650710484943252;0.0928285247665155;0.51837411041741 110;0.412728452936279;0.666526991020167;0.0945179044436453;0.492753642620076 111;0.437079177930066;0.681977325511973;0.0962347675332406;0.466686054536693 112;0.461737005643692;0.69703719830248;0.0979794419256961;0.440283552430612 113;0.486583475732349;0.711685410881454;0.099752255029241;0.41366426923841 114;0.511496432215896;0.725903885342153;0.101553533576292;0.386950034207812 115;0.536352406554738;0.739677649351817;0.103383603423233;0.360263990022029 116;0.561029055458654;0.752994780772872;0.105242789343564;0.333728155197782 117;0.585407551755433;0.765846316272406;0.107131414814384;0.307461033430183 118;0.609374829980436;0.778226128713706;0.109049801796177;0.281575368223387 119;0.632825598544515;0.790130778382749;0.110998270505863;0.256176130949622 120;0.655664046315679;0.801559343179304;0.112977139183108;0.231358814501213 121;1;1;0;0 markovchain/inst/CITATION0000644000176200001440000000063414425444544014703 0ustar liggesusersnote <- sprintf("R package version %s", meta$Version) bibentry( bibtype = "Article", title = "Discrete Time Markov Chains with R", author = person(given="Giorgio Alfredo", family="Spedicato"), journal = "The R Journal", year = "2017", volume = "9", number = "2", pages = "84--104", url = "https://journal.r-project.org/archive/2017/RJ-2017-036/index.html" )